gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
import unittest, logging
from pyquante2 import molecule, rhf, uhf, rohf, cuhf, h2, h2o, lih, li, oh, ch4, basisset
from pyquante2.ints.integrals import libint_twoe_integrals, twoe_integrals_compressed
from pyquante2.geo.molecule import read_xyz
from pyquante2.scf.iterators import SCFIterator, AveragingIterator, USCFIterator, ROSCFIterator
class test_scf(unittest.TestCase):
"""reference energies obtained from NWCHEM 6.5"""
def test_h2(self):
bfs = basisset(h2,'sto-3g')
hamiltonian = rhf(bfs)
iterator = SCFIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -1.117099435262, 7)
def test_h2_631(self):
bfs = basisset(h2,'6-31gss')
hamiltonian = rhf(bfs)
iterator = SCFIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -1.131333590574, 7)
def test_lih(self):
bfs = basisset(lih,'sto-3g')
hamiltonian = rhf(bfs)
iterator = SCFIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -7.860746149768, 6)
def test_lih_averaging(self):
bfs = basisset(lih,'sto-3g')
hamiltonian = rhf(bfs)
iterator = AveragingIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -7.860746149768, 6)
def test_h4(self):
h4 = molecule([
(1, 0.00000000, 0.00000000, 0.36628549),
(1, 0.00000000, 0.00000000, -0.36628549),
(1, 0.00000000, 4.00000000, 0.36628549),
(1, 0.00000000, 4.00000000, -0.36628549),
],
units='Angstrom')
bfs = basisset(h4,'sto-3g')
hamiltonian = rhf(bfs)
iterator = SCFIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -2.234185358600, 7)
# This is not quite equal to 2x the h2 energy, but very close
def test_h2o(self):
bfs = basisset(h2o,'sto-3g')
hamiltonian = rhf(bfs)
iterator = SCFIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -74.959857776754, 5)
def test_h2o_averaging(self):
bfs = basisset(h2o,'sto-3g')
hamiltonian = rhf(bfs)
iterator = AveragingIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -74.959857776754, 5)
def test_oh(self):
bfs = basisset(oh,'sto-3g')
hamiltonian = uhf(bfs)
iterator = USCFIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -74.360233544941, 4)
def test_li(self):
bfs = basisset(li,'sto-3g')
hamiltonian = uhf(bfs)
iterator = USCFIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -7.315525981280, 6)
class test_libint_rhf(unittest.TestCase):
"""reference energies obtained from NWCHEM 6.5"""
def test_CH4(self):
"""CH4 symmetry Td"""
bfs = basisset(ch4,'sto-3g')
hamiltonian = rhf(bfs, twoe_factory=libint_twoe_integrals)
iterator = SCFIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -39.726862723517, 6)
def test_C2H2Cl2(self):
"""C2H2Cl2 symmetry C2H"""
C2H2Cl2 = read_xyz('./molfiles/C2H2Cl2.xyz')
bfs = basisset(C2H2Cl2,'sto-3g')
hamiltonian = rhf(bfs, twoe_factory=libint_twoe_integrals)
iterator = SCFIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -967.533150337277, 4)
def test_H2O_4(self):
"""H2O tethramer symmetry S4"""
H2O4 = read_xyz('./molfiles/H2O_4.xyz')
bfs = basisset(H2O4,'sto-3g')
hamiltonian = rhf(bfs, twoe_factory=libint_twoe_integrals)
iterator = SCFIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -299.909789863537, 5)
def test_BrF5(self):
"""BrF5 symmetry C4v"""
BrF5 = read_xyz('./molfiles/BrF5.xyz')
bfs = basisset(BrF5,'sto-3g')
hamiltonian = rhf(bfs, twoe_factory=libint_twoe_integrals)
iterator = SCFIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -3035.015731331871, 4)
def test_HBr(self):
"""HBr"""
HBr = read_xyz('./molfiles/HBr.xyz')
bfs = basisset(HBr,'sto-3g')
hamiltonian = rhf(bfs, twoe_factory=libint_twoe_integrals)
iterator = SCFIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -2545.887434128302, 4)
def test_C8H8(self):
"""C8H8"""
C8H8 = read_xyz('./molfiles/C8H8.xyz')
bfs = basisset(C8H8,'sto-6g')
hamiltonian = rhf(bfs, twoe_factory=libint_twoe_integrals)
iterator = SCFIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -306.765545547300, 5)
def test_N8(self):
"""N8"""
N8 = read_xyz('./molfiles/N8.xyz')
bfs = basisset(N8,'cc-pvdz')
hamiltonian = rhf(bfs, twoe_factory=libint_twoe_integrals)
iterator = SCFIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -434.992755329296, 5)
class test_unstable(unittest.TestCase):
"""Unstable RHF convergence.
Different NWCHEM energy with and without autosym.
"""
def test_B12(self):
"""B12 symmetry Ih"""
B12 = read_xyz('./molfiles/B12.xyz')
bfs = basisset(B12,'sto-3g')
hamiltonian = rhf(bfs, twoe_factory=libint_twoe_integrals)
iterator = SCFIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -290.579419642829, 0)
def test_CrCO6(self):
# FAIL
"""Cr(CO)6 symmetry Oh
Reference: Whitaker, A.; Jeffery, J. W. Acta Cryst. 1967, 23, 977. DOI: 10.1107/S0365110X67004153
"""
CrCO6 = read_xyz('./molfiles/CrCO6.xyz')
bfs = basisset(CrCO6,'sto-3g')
hamiltonian = rohf(bfs, twoe_factory=libint_twoe_integrals)
iterator = ROSCFIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -1699.539642257497, 0)
def test_C24(self):
# FAIL
"""C24 symmetry Th"""
C24 = read_xyz('./molfiles/C24.xyz')
bfs = basisset(C24,'sto-3g')
hamiltonian = rhf(bfs, twoe_factory=libint_twoe_integrals)
iterator = SCFIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -890.071915453874, 0)
class test_libint_uhf(unittest.TestCase):
"""reference energies obtained from NWCHEM 6.5"""
def test_CF3(self):
"""CF3 radical"""
CF3 = read_xyz('./molfiles/CF3.xyz')
bfs = basisset(CF3,'sto-3g')
hamiltonian = uhf(bfs, twoe_factory=libint_twoe_integrals)
iterator = USCFIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -331.480688906400, 5)
class test_libint_rohf(unittest.TestCase):
"""reference energies obtained from NWCHEM 6.5"""
def test_CH3(self):
"""CH3 radical"""
CH3 = read_xyz('./molfiles/CH3.xyz')
bfs = basisset(CH3,'sto-3g')
hamiltonian = rohf(bfs, twoe_factory=libint_twoe_integrals)
iterator = ROSCFIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -38.9493, 5)
def test_CF3(self):
"""CF3 radical"""
CF3 = read_xyz('./molfiles/CF3.xyz')
bfs = basisset(CF3,'sto-3g')
hamiltonian = rohf(bfs, twoe_factory=libint_twoe_integrals)
iterator = ROSCFIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -331.479340943449, 5)
def test_oh(self):
bfs = basisset(oh,'sto-3g')
hamiltonian = rohf(bfs)
iterator = ROSCFIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -74.359151530162, 5)
def test_N8(self):
"""N8"""
N8 = read_xyz('./molfiles/N8.xyz')
bfs = basisset(N8,'cc-pvdz')
hamiltonian = rohf(bfs, twoe_factory=libint_twoe_integrals)
iterator = ROSCFIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -434.992755329296, 5)
class test_libint_cuhf(unittest.TestCase):
"""use UHF energy reference"""
def test_CH3(self):
"""CH3 radical"""
CH3 = read_xyz('./molfiles/CH3.xyz')
bfs = basisset(CH3,'sto-3g')
hamiltonian = cuhf(bfs, twoe_factory=libint_twoe_integrals)
iterator = USCFIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -38.952023222533, 5)
def test_CF3(self):
"""CF3 radical"""
CF3 = read_xyz('./molfiles/CF3.xyz')
bfs = basisset(CF3,'sto-3g')
hamiltonian = cuhf(bfs, twoe_factory=libint_twoe_integrals)
iterator = USCFIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -331.480688906400, 5)
def test_oh(self):
bfs = basisset(oh,'sto-3g')
hamiltonian = cuhf(bfs)
iterator = USCFIterator(hamiltonian)
iterator.converge()
self.assertTrue(iterator.converged)
self.assertAlmostEqual(iterator.energy, -74.360233544941, 4)
def runsuite(verbose=True):
# To use psyco, uncomment this line:
#import psyco; psyco.full()
verbosity = 2 if verbose else 1
# If you want more output, uncomment this line:
logging.basicConfig(format="%(message)s",level=logging.DEBUG)
suite1 = unittest.TestLoader().loadTestsFromTestCase(test_scf)
suite2 = unittest.TestLoader().loadTestsFromTestCase(test_libint_rhf)
suite3 = unittest.TestLoader().loadTestsFromTestCase(test_unstable)
suite4 = unittest.TestLoader().loadTestsFromTestCase(test_libint_uhf)
suite5 = unittest.TestLoader().loadTestsFromTestCase(test_libint_rohf)
suite6 = unittest.TestLoader().loadTestsFromTestCase(test_libint_cuhf)
alltests = unittest.TestSuite([suite6])
unittest.TextTestRunner(verbosity=verbosity).run(alltests)
# Running without verbosity is equivalent to replacing the above
# two lines with the following:
#unittest.main()
def debugsuite():
import cProfile,pstats
cProfile.run('runsuite()','prof')
prof = pstats.Stats('prof')
prof.strip_dirs().sort_stats('time').print_stats(15)
if __name__ == '__main__':
import sys
if "-d" in sys.argv:
debugsuite()
else:
runsuite()
|
|
"""
Web Service protocol for OpenNSA.
Author: Henrik Thostrup Jensen <htj@nordu.net>
Copyright: NORDUnet (2011)
"""
import uuid
from opennsa.protocols.webservice.ext import twistedsuds
WSDL_PROVIDER = 'file://%s/ogf_nsi_connection_provider_v1_0.wsdl'
WSDL_REQUESTER = 'file://%s/ogf_nsi_connection_requester_v1_0.wsdl'
URN_UUID_PREFIX = 'urn:uuid:'
def utcTime(dt):
return dt.isoformat().rsplit('.',1)[0] + 'Z'
def createCorrelationId():
return URN_UUID_PREFIX + str(uuid.uuid1())
class ProviderClient:
def __init__(self, reply_to, wsdl_dir, ctx_factory=None):
self.reply_to = reply_to
self.client = twistedsuds.TwistedSUDSClient(WSDL_PROVIDER % wsdl_dir, ctx_factory=ctx_factory)
def _createGenericRequestType(self, requester_nsa, provider_nsa, connection_id):
req = self.client.createType('{http://schemas.ogf.org/nsi/2011/10/connection/types}GenericRequestType')
req.requesterNSA = requester_nsa.urn()
req.providerNSA = provider_nsa.urn()
req.connectionId = connection_id
return req
def reserve(self, correlation_id, requester_nsa, provider_nsa, session_security_attr, global_reservation_id, description, connection_id, service_parameters):
res_req = self.client.createType('{http://schemas.ogf.org/nsi/2011/10/connection/types}ReserveType')
res_req.requesterNSA = requester_nsa.urn()
res_req.providerNSA = provider_nsa.urn()
#<sessionSecurityAttr>
# <ns5:Attribute Name="globalUserName" NameFormat="urn:oasis:names:tc:SAML:2.0:attrname-format:basic">
# <ns5:AttributeValue xsi:type="xs:string" xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">jrv@internet2.edu</ns5:AttributeValue>
# </ns5:Attribute>
# <ns5:Attribute Name="role" NameFormat="urn:oasis:names:tc:SAML:2.0:attrname-format:basic">
# <ns5:AttributeValue xsi:type="xs:string" xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">AuthorizedUser</ns5:AttributeValue>
# </ns5:Attribute>
#</sessionSecurityAttr>
# OK, giving up for now, SUDS refuses to put the right namespace on this
#user_attr = self.client.createType('{urn:oasis:names:tc:SAML:2.0:assertion}Attribute')
#user_attr._Name = 'globalUserName'
#user_attr._NameFormat = 'urn:oasis:names:tc:SAML:2.0:attrname-format:basic'
#user_attr.AttributeValue = ['jrv@internet2.edu']
#role_attr = self.client.createType('{urn:oasis:names:tc:SAML:2.0:assertion}Attribute')
#role_attr._Name = 'role'
#role_attr._NameFormat = 'urn:oasis:names:tc:SAML:2.0:attrname-format:basic'
#role_attr.AttributeValue = ['AuthorizedUser']
#res_req.sessionSecurityAttr['Attribute'] = [ user_attr, role_attr ]
res_req.reservation.globalReservationId = global_reservation_id
res_req.reservation.description = description
res_req.reservation.connectionId = connection_id
res_req.reservation.path.directionality = service_parameters.directionality
res_req.reservation.path.sourceSTP.stpId = service_parameters.source_stp.urn()
#res_req.reservation.path.sourceSTP.stpSpecAttrs.guaranteed = ['123' ]
#res_req.reservation.path.sourceSTP.stpSpecAttrs.preferred = ['abc', 'def']
res_req.reservation.path.destSTP.stpId = service_parameters.dest_stp.urn()
res_req.reservation.serviceParameters.schedule.startTime = utcTime(service_parameters.start_time)
res_req.reservation.serviceParameters.schedule.endTime = utcTime(service_parameters.end_time)
res_req.reservation.serviceParameters.bandwidth.desired = service_parameters.bandwidth.desired
res_req.reservation.serviceParameters.bandwidth.minimum = service_parameters.bandwidth.minimum
res_req.reservation.serviceParameters.bandwidth.maximum = service_parameters.bandwidth.maximum
#res_req.reservation.serviceParameters.serviceAttributes.guaranteed = [ '1a' ]
#res_req.reservation.serviceParameters.serviceAttributes.preferred = [ '2c', '3d' ]
d = self.client.invoke(provider_nsa.url(), 'reserve', correlation_id, self.reply_to, res_req)
return d
def provision(self, correlation_id, requester_nsa, provider_nsa, session_security_attr, connection_id):
req = self._createGenericRequestType(requester_nsa, provider_nsa, connection_id)
d = self.client.invoke(provider_nsa.url(), 'provision', correlation_id, self.reply_to, req)
return d
def release(self, correlation_id, requester_nsa, provider_nsa, session_security_attr, connection_id):
req = self._createGenericRequestType(requester_nsa, provider_nsa, connection_id)
d = self.client.invoke(provider_nsa.url(), 'release', correlation_id, self.reply_to, req)
return d
def terminate(self, correlation_id, requester_nsa, provider_nsa, session_security_attr, connection_id):
req = self._createGenericRequestType(requester_nsa, provider_nsa, connection_id)
d = self.client.invoke(provider_nsa.url(), 'terminate', correlation_id, self.reply_to, req)
return d
def query(self, correlation_id, requester_nsa, provider_nsa, session_security_attr, operation="Summary", connection_ids=None, global_reservation_ids=None):
req = self.client.createType('{http://schemas.ogf.org/nsi/2011/10/connection/types}QueryType')
#print req
req.requesterNSA = requester_nsa.urn()
req.providerNSA = provider_nsa.urn()
req.operation = operation
req.queryFilter.connectionId = connection_ids or []
req.queryFilter.globalReservationId = global_reservation_ids or []
#print req
d = self.client.invoke(provider_nsa.url(), 'query', correlation_id, self.reply_to, req)
return d
class RequesterClient:
def __init__(self, wsdl_dir, ctx_factory=None):
self.client = twistedsuds.TwistedSUDSClient(WSDL_REQUESTER % wsdl_dir, ctx_factory=ctx_factory)
def _createGenericConfirmType(self, requester_nsa, provider_nsa, global_reservation_id, connection_id):
conf = self.client.createType('{http://schemas.ogf.org/nsi/2011/10/connection/types}GenericConfirmedType')
conf.requesterNSA = requester_nsa
conf.providerNSA = provider_nsa
conf.globalReservationId = global_reservation_id
conf.connectionId = connection_id
return conf
def reserveConfirmed(self, requester_uri, correlation_id, requester_nsa, provider_nsa, global_reservation_id, description, connection_id, service_parameters):
#correlation_id = self._createCorrelationId()
res_conf = self.client.createType('{http://schemas.ogf.org/nsi/2011/10/connection/types}ReserveConfirmedType')
res_conf.requesterNSA = requester_nsa
res_conf.providerNSA = provider_nsa
res_conf.reservation.globalReservationId = global_reservation_id
res_conf.reservation.description = description
res_conf.reservation.connectionId = connection_id
#res_conf.reservation.connectionState = 'Reserved' # not sure why this doesn't work
res_conf.reservation.serviceParameters.schedule.startTime = utcTime(service_parameters.start_time)
res_conf.reservation.serviceParameters.schedule.endTime = utcTime(service_parameters.end_time)
res_conf.reservation.serviceParameters.bandwidth.desired = service_parameters.bandwidth.desired
res_conf.reservation.serviceParameters.bandwidth.minimum = service_parameters.bandwidth.minimum
res_conf.reservation.serviceParameters.bandwidth.maximum = service_parameters.bandwidth.maximum
res_conf.reservation.path.directionality = service_parameters.directionality
res_conf.reservation.path.sourceSTP.stpId = service_parameters.source_stp.urn()
res_conf.reservation.path.destSTP.stpId = service_parameters.dest_stp.urn()
d = self.client.invoke(requester_uri, 'reserveConfirmed', correlation_id, res_conf)
return d
def reserveFailed(self, requester_uri, correlation_id, requester_nsa, provider_nsa, global_reservation_id, connection_id, connection_state, error_msg):
res_fail = self.client.createType('{http://schemas.ogf.org/nsi/2011/10/connection/types}GenericFailedType')
nsi_ex = self.client.createType('{http://schemas.ogf.org/nsi/2011/10/connection/types}ServiceExceptionType')
res_fail.requesterNSA = requester_nsa
res_fail.providerNSA = provider_nsa
res_fail.globalReservationId = global_reservation_id
res_fail.connectionId = connection_id
res_fail.connectionState = connection_state
nsi_ex.errorId = 'RESERVATION_FAILURE'
nsi_ex.text = error_msg
res_fail.serviceException = nsi_ex
d = self.client.invoke(requester_uri, 'reserveFailed', correlation_id, res_fail)
return d
def provisionConfirmed(self, requester_uri, correlation_id, requester_nsa, provider_nsa, global_reservation_id, connection_id):
conf = self._createGenericConfirmType(requester_nsa, provider_nsa, global_reservation_id, connection_id)
d = self.client.invoke(requester_uri, 'provisionConfirmed', correlation_id, conf)
return d
def provisionFailed(self, requester_uri, correlation_id, requester_nsa, provider_nsa, global_reservation_id, connection_id, connection_state, error_msg):
gft = self.client.createType('{http://schemas.ogf.org/nsi/2011/10/connection/types}GenericFailedType')
net = self.client.createType('{http://schemas.ogf.org/nsi/2011/10/connection/types}ServiceExceptionType')
gft.requesterNSA = requester_nsa
gft.providerNSA = provider_nsa
gft.globalReservationId = global_reservation_id
gft.connectionId = connection_id
gft.connectionState = connection_state
net.errorId = 'PROVISION_FAILURE'
net.text = error_msg
gft.serviceException = net
d = self.client.invoke(requester_uri, 'provisionFailed', correlation_id, gft)
return d
def releaseConfirmed(self, requester_uri, correlation_id, requester_nsa, provider_nsa, global_reservation_id, connection_id):
conf = self._createGenericConfirmType(requester_nsa, provider_nsa, global_reservation_id, connection_id)
d = self.client.invoke(requester_uri, 'releaseConfirmed', correlation_id, conf)
return d
def releaseFailed(self, requester_uri, correlation_id, requester_nsa, provider_nsa, global_reservation_id, connection_id, connection_state, error_msg):
gft = self.client.createType('{http://schemas.ogf.org/nsi/2011/10/connection/types}GenericFailedType')
net = self.client.createType('{http://schemas.ogf.org/nsi/2011/10/connection/types}ServiceExceptionType')
gft.requesterNSA = requester_nsa
gft.providerNSA = provider_nsa
gft.globalReservationId = global_reservation_id
gft.connectionId = connection_id
gft.connectionState = connection_state
net.errorId = 'RELEASE_FAILURE'
net.text = error_msg
gft.serviceException = net
d = self.client.invoke(requester_uri, 'releaseFailed', correlation_id, gft)
return d
def terminateConfirmed(self, requester_uri, correlation_id, requester_nsa, provider_nsa, global_reservation_id, connection_id):
conf = self._createGenericConfirmType(requester_nsa, provider_nsa, global_reservation_id, connection_id)
d = self.client.invoke(requester_uri, 'terminateConfirmed', correlation_id, conf)
return d
def terminateFailed(self, requester_uri, correlation_id, requester_nsa, provider_nsa, global_reservation_id, connection_id, connection_state, error_msg):
gft = self.client.createType('{http://schemas.ogf.org/nsi/2011/10/connection/types}GenericFailedType')
net = self.client.createType('{http://schemas.ogf.org/nsi/2011/10/connection/types}ServiceExceptionType')
gft.requesterNSA = requester_nsa
gft.providerNSA = provider_nsa
gft.globalReservationId = global_reservation_id
gft.connectionId = connection_id
gft.connectionState = connection_state
net.errorId = 'TERMINATE_FAILURE'
net.text = error_msg
gft.serviceException = net
d = self.client.invoke(requester_uri, 'terminateFailed', correlation_id, gft)
return d
def queryConfirmed(self, requester_uri, correlation_id, requester_nsa, provider_nsa, operation, connections):
res = self.client.createType('{http://schemas.ogf.org/nsi/2011/10/connection/types}QueryConfirmedType')
res.requesterNSA = requester_nsa
res.providerNSA = provider_nsa
if operation == "Summary":
qsrs = []
for conn in connections:
qsr = self.client.createType('{http://schemas.ogf.org/nsi/2011/10/connection/types}QuerySummaryResultType')
#print qsr
qsr.globalReservationId = conn.global_reservation_id
qsr.description = conn.description
qsr.connectionId = conn.connection_id
qsr.connectionState = conn.state()
qsr.path.sourceSTP.stpId = conn.source_stp.urn()
qsr.path.destSTP.stpId = conn.dest_stp.urn()
qsr.serviceParameters.schedule.startTime = utcTime(conn.service_parameters.start_time)
qsr.serviceParameters.schedule.endTime = utcTime(conn.service_parameters.end_time)
qsr.serviceParameters.bandwidth.desired = conn.service_parameters.bandwidth.desired
qsr.serviceParameters.bandwidth.minimum = conn.service_parameters.bandwidth.minimum
qsr.serviceParameters.bandwidth.maximum = conn.service_parameters.bandwidth.maximum
def createOrderedSTP(stp, rank):
ostp = self.client.createType('{http://schemas.ogf.org/nsi/2011/10/connection/types}OrderedServiceTerminationPointType')
ostp.stpId = stp.urn()
ostp._order = rank
return ostp
# create list of all stps, but skip source and dest stp
stps = [ stp for sc in conn.connections() for stp in sc.stps() ] [1:-1]
for i, stp in zip(range(len(stps)), stps):
qsr.path.stpList.stp.append( createOrderedSTP(stp, i) )
qsrs.append(qsr)
res.reservationSummary = qsrs
elif operation == "Details":
qdr = self.client.createType('{http://schemas.ogf.org/nsi/2011/10/connection/types}QueryDetailsResultType')
#print qdr
qdr.globalReservationId = '123'
res.reservationDetails = [ qdr ]
else:
raise ValueError('Invalid query operation type')
d = self.client.invoke(requester_uri, 'queryConfirmed', correlation_id, res)
return d
def queryFailed(self, requester_uri, correlation_id, requester_nsa, provider_nsa, error_msg):
print "CLIENT QUERY FAILED"
qft = self.client.createType('{http://schemas.ogf.org/nsi/2011/10/connection/types}QueryFailedType')
net = self.client.createType('{http://schemas.ogf.org/nsi/2011/10/connection/types}ServiceExceptionType')
qft.requesterNSA = requester_nsa
qft.providerNSA = provider_nsa
net.errorId = 'QUERY_FAILURE'
net.text = error_msg
qft.serviceException = net
d = self.client.invoke(requester_uri, 'queryFailed', correlation_id, qft)
return d
|
|
#===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
import lit
import lit.formats
import os
import pipes
import re
import shutil
import subprocess
def _supportsVerify(config):
"""
Determine whether clang-verify is supported by the given configuration.
This is done by checking whether the %{cxx} substitution in that
configuration supports certain compiler flags.
"""
command = "%{{cxx}} -xc++ {} -Werror -fsyntax-only -Xclang -verify-ignore-unexpected".format(os.devnull)
command = lit.TestRunner.applySubstitutions([command], config.substitutions,
recursion_limit=config.recursiveExpansionLimit)[0]
devNull = open(os.devnull, 'w')
result = subprocess.call(command, shell=True, stdout=devNull, stderr=devNull)
return result == 0
def _getTempPaths(test):
"""
Return the values to use for the %T and %t substitutions, respectively.
The difference between this and Lit's default behavior is that we guarantee
that %T is a path unique to the test being run.
"""
tmpDir, _ = lit.TestRunner.getTempPaths(test)
_, testName = os.path.split(test.getExecPath())
tmpDir = os.path.join(tmpDir, testName + '.dir')
tmpBase = os.path.join(tmpDir, 't')
return tmpDir, tmpBase
def parseScript(test, preamble):
"""
Extract the script from a test, with substitutions applied.
Returns a list of commands ready to be executed.
- test
The lit.Test to parse.
- preamble
A list of commands to perform before any command in the test.
These commands can contain unexpanded substitutions, but they
must not be of the form 'RUN:' -- they must be proper commands
once substituted.
"""
# Get the default substitutions
tmpDir, tmpBase = _getTempPaths(test)
useExternalSh = True
substitutions = lit.TestRunner.getDefaultSubstitutions(test, tmpDir, tmpBase,
normalize_slashes=useExternalSh)
# Add the %{build} and %{run} convenience substitutions
substitutions.append(('%{build}', '%{cxx} %s %{flags} %{compile_flags} %{link_flags} -o %t.exe'))
substitutions.append(('%{run}', '%{exec} %t.exe'))
# Parse the test file, including custom directives
additionalCompileFlags = []
fileDependencies = []
parsers = [
lit.TestRunner.IntegratedTestKeywordParser('FILE_DEPENDENCIES:',
lit.TestRunner.ParserKind.LIST,
initial_value=fileDependencies),
lit.TestRunner.IntegratedTestKeywordParser('ADDITIONAL_COMPILE_FLAGS:',
lit.TestRunner.ParserKind.LIST,
initial_value=additionalCompileFlags)
]
scriptInTest = lit.TestRunner.parseIntegratedTestScript(test, additional_parsers=parsers,
require_script=not preamble)
if isinstance(scriptInTest, lit.Test.Result):
return scriptInTest
script = []
# For each file dependency in FILE_DEPENDENCIES, inject a command to copy
# that file to the execution directory. Execute the copy from %S to allow
# relative paths from the test directory.
for dep in fileDependencies:
script += ['%dbg(SETUP) cd %S && cp {} %T'.format(dep)]
script += preamble
script += scriptInTest
# Add compile flags specified with ADDITIONAL_COMPILE_FLAGS.
substitutions = [(s, x + ' ' + ' '.join(additionalCompileFlags)) if s == '%{compile_flags}'
else (s, x) for (s, x) in substitutions]
# Perform substitutions in the script itself.
script = lit.TestRunner.applySubstitutions(script, substitutions,
recursion_limit=test.config.recursiveExpansionLimit)
return script
class CxxStandardLibraryTest(lit.formats.TestFormat):
"""
Lit test format for the C++ Standard Library conformance test suite.
This test format is based on top of the ShTest format -- it basically
creates a shell script performing the right operations (compile/link/run)
based on the extension of the test file it encounters. It supports files
with the following extensions:
FOO.pass.cpp - Compiles, links and runs successfully
FOO.pass.mm - Same as .pass.cpp, but for Objective-C++
FOO.run.fail.cpp - Compiles and links successfully, but fails at runtime
FOO.compile.pass.cpp - Compiles successfully, link and run not attempted
FOO.compile.fail.cpp - Does not compile successfully
FOO.link.pass.cpp - Compiles and links successfully, run not attempted
FOO.link.fail.cpp - Compiles successfully, but fails to link
FOO.sh.<anything> - A builtin Lit Shell test
FOO.verify.cpp - Compiles with clang-verify. This type of test is
automatically marked as UNSUPPORTED if the compiler
does not support Clang-verify.
FOO.fail.cpp - Compiled with clang-verify if clang-verify is
supported, and equivalent to a .compile.fail.cpp
test otherwise. This is supported only for backwards
compatibility with the test suite.
Substitution requirements
===============================
The test format operates by assuming that each test's configuration provides
the following substitutions, which it will reuse in the shell scripts it
constructs:
%{cxx} - A command that can be used to invoke the compiler
%{compile_flags} - Flags to use when compiling a test case
%{link_flags} - Flags to use when linking a test case
%{flags} - Flags to use either when compiling or linking a test case
%{exec} - A command to prefix the execution of executables
Note that when building an executable (as opposed to only compiling a source
file), all three of %{flags}, %{compile_flags} and %{link_flags} will be used
in the same command line. In other words, the test format doesn't perform
separate compilation and linking steps in this case.
Additional supported directives
===============================
In addition to everything that's supported in Lit ShTests, this test format
also understands the following directives inside test files:
// FILE_DEPENDENCIES: file, directory, /path/to/file
This directive expresses that the test requires the provided files
or directories in order to run. An example is a test that requires
some test input stored in a data file. When a test file contains
such a directive, this test format will collect them and copy them
to the directory represented by %T. The intent is that %T contains
all the inputs necessary to run the test, such that e.g. execution
on a remote host can be done by simply copying %T to the host.
// ADDITIONAL_COMPILE_FLAGS: flag1, flag2, flag3
This directive will cause the provided flags to be added to the
%{compile_flags} substitution for the test that contains it. This
allows adding special compilation flags without having to use a
.sh.cpp test, which would be more powerful but perhaps overkill.
Additional provided substitutions and features
==============================================
The test format will define the following substitutions for use inside tests:
%{build}
Expands to a command-line that builds the current source
file with the %{flags}, %{compile_flags} and %{link_flags}
substitutions, and that produces an executable named %t.exe.
%{run}
Equivalent to `%{exec} %t.exe`. This is intended to be used
in conjunction with the %{build} substitution.
"""
def getTestsInDirectory(self, testSuite, pathInSuite, litConfig, localConfig):
SUPPORTED_SUFFIXES = ['[.]pass[.]cpp$', '[.]pass[.]mm$', '[.]run[.]fail[.]cpp$',
'[.]compile[.]pass[.]cpp$', '[.]compile[.]fail[.]cpp$',
'[.]link[.]pass[.]cpp$', '[.]link[.]fail[.]cpp$',
'[.]sh[.][^.]+$',
'[.]verify[.]cpp$',
'[.]fail[.]cpp$']
sourcePath = testSuite.getSourcePath(pathInSuite)
for filename in os.listdir(sourcePath):
# Ignore dot files and excluded tests.
if filename.startswith('.') or filename in localConfig.excludes:
continue
filepath = os.path.join(sourcePath, filename)
if not os.path.isdir(filepath):
if any([re.search(ext, filename) for ext in SUPPORTED_SUFFIXES]):
yield lit.Test.Test(testSuite, pathInSuite + (filename,), localConfig)
def _checkBaseSubstitutions(self, substitutions):
substitutions = [s for (s, _) in substitutions]
for s in ['%{cxx}', '%{compile_flags}', '%{link_flags}', '%{flags}', '%{exec}']:
assert s in substitutions, "Required substitution {} was not provided".format(s)
def _disableWithModules(self, test):
with open(test.getSourcePath(), 'rb') as f:
contents = f.read()
return b'#define _LIBCPP_ASSERT' in contents
def execute(self, test, litConfig):
self._checkBaseSubstitutions(test.config.substitutions)
VERIFY_FLAGS = '-Xclang -verify -Xclang -verify-ignore-unexpected=note -ferror-limit=0'
supportsVerify = _supportsVerify(test.config)
filename = test.path_in_suite[-1]
# TODO(ldionne): We currently disable tests that re-define _LIBCPP_ASSERT
# when we run with modules enabled. Instead, we should
# split the part that does a death test outside of the
# test, and only disable that part when modules are
# enabled.
if '-fmodules' in test.config.available_features and self._disableWithModules(test):
return lit.Test.Result(lit.Test.UNSUPPORTED, 'Test {} is unsupported when modules are enabled')
if re.search('[.]sh[.][^.]+$', filename):
steps = [ ] # The steps are already in the script
return self._executeShTest(test, litConfig, steps)
elif filename.endswith('.compile.pass.cpp'):
steps = [
"%dbg(COMPILED WITH) %{cxx} %s %{flags} %{compile_flags} -fsyntax-only"
]
return self._executeShTest(test, litConfig, steps)
elif filename.endswith('.compile.fail.cpp'):
steps = [
"%dbg(COMPILED WITH) ! %{cxx} %s %{flags} %{compile_flags} -fsyntax-only"
]
return self._executeShTest(test, litConfig, steps)
elif filename.endswith('.link.pass.cpp'):
steps = [
"%dbg(COMPILED WITH) %{cxx} %s %{flags} %{compile_flags} %{link_flags} -o %t.exe"
]
return self._executeShTest(test, litConfig, steps)
elif filename.endswith('.link.fail.cpp'):
steps = [
"%dbg(COMPILED WITH) %{cxx} %s %{flags} %{compile_flags} -c -o %t.o",
"%dbg(LINKED WITH) ! %{cxx} %t.o %{flags} %{link_flags} -o %t.exe"
]
return self._executeShTest(test, litConfig, steps)
elif filename.endswith('.run.fail.cpp'):
steps = [
"%dbg(COMPILED WITH) %{cxx} %s %{flags} %{compile_flags} %{link_flags} -o %t.exe",
"%dbg(EXECUTED AS) %{exec} ! %t.exe"
]
return self._executeShTest(test, litConfig, steps)
elif filename.endswith('.verify.cpp'):
if not supportsVerify:
return lit.Test.Result(lit.Test.UNSUPPORTED,
"Test {} requires support for Clang-verify, which isn't supported by the compiler".format(test.getFullName()))
steps = [
# Note: Use -Wno-error to make sure all diagnostics are not treated as errors,
# which doesn't make sense for clang-verify tests.
"%dbg(COMPILED WITH) %{{cxx}} %s %{{flags}} %{{compile_flags}} -fsyntax-only -Wno-error {}".format(VERIFY_FLAGS)
]
return self._executeShTest(test, litConfig, steps)
# Make sure to check these ones last, since they will match other
# suffixes above too.
elif filename.endswith('.pass.cpp') or filename.endswith('.pass.mm'):
steps = [
"%dbg(COMPILED WITH) %{cxx} %s %{flags} %{compile_flags} %{link_flags} -o %t.exe",
"%dbg(EXECUTED AS) %{exec} %t.exe"
]
return self._executeShTest(test, litConfig, steps)
# This is like a .verify.cpp test when clang-verify is supported,
# otherwise it's like a .compile.fail.cpp test. This is only provided
# for backwards compatibility with the test suite.
elif filename.endswith('.fail.cpp'):
if supportsVerify:
steps = [
"%dbg(COMPILED WITH) %{{cxx}} %s %{{flags}} %{{compile_flags}} -fsyntax-only -Wno-error {}".format(VERIFY_FLAGS)
]
else:
steps = [
"%dbg(COMPILED WITH) ! %{cxx} %s %{flags} %{compile_flags} -fsyntax-only"
]
return self._executeShTest(test, litConfig, steps)
else:
return lit.Test.Result(lit.Test.UNRESOLVED, "Unknown test suffix for '{}'".format(filename))
# Utility function to add compile flags in lit.local.cfg files.
def addCompileFlags(self, config, *flags):
string = ' '.join(flags)
config.substitutions = [(s, x + ' ' + string) if s == '%{compile_flags}' else (s, x) for (s, x) in config.substitutions]
def _executeShTest(self, test, litConfig, steps):
if test.config.unsupported:
return lit.Test.Result(lit.Test.UNSUPPORTED, 'Test is unsupported')
script = parseScript(test, steps)
if isinstance(script, lit.Test.Result):
return script
if litConfig.noExecute:
return lit.Test.Result(lit.Test.XFAIL if test.isExpectedToFail() else lit.Test.PASS)
else:
_, tmpBase = _getTempPaths(test)
useExternalSh = True
return lit.TestRunner._runShTest(test, litConfig, useExternalSh, script, tmpBase)
|
|
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.db.models import Q
from django.utils import timezone
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from djblets.db.fields import CounterField, JSONField
from djblets.db.managers import ConcurrencyManager
from reviewboard.admin.read_only import is_site_read_only_for
@python_2_unicode_compatible
class BaseComment(models.Model):
"""The base class for all comment types."""
OPEN = 'O'
RESOLVED = 'R'
DROPPED = 'D'
VERIFYING_RESOLVED = 'A'
VERIFYING_DROPPED = 'B'
ISSUE_STATUSES = (
(OPEN, _('Open')),
(RESOLVED, _('Resolved')),
(DROPPED, _('Dropped')),
(VERIFYING_RESOLVED, _('Waiting for verification to resolve')),
(VERIFYING_DROPPED, _('Waiting for verification to drop')),
)
ISSUE_STATUS_TO_STRING = {
OPEN: 'open',
RESOLVED: 'resolved',
DROPPED: 'dropped',
VERIFYING_RESOLVED: 'verifying-resolved',
VERIFYING_DROPPED: 'verifying-dropped',
}
ISSUE_STRING_TO_STATUS = {
'open': OPEN,
'resolved': RESOLVED,
'dropped': DROPPED,
'verifying-resolved': VERIFYING_RESOLVED,
'verifying-dropped': VERIFYING_DROPPED,
}
issue_opened = models.BooleanField(_('Issue Opened'), default=False)
issue_status = models.CharField(_('Issue Status'),
max_length=1,
choices=ISSUE_STATUSES,
blank=True,
null=True,
db_index=True)
reply_to = models.ForeignKey('self', blank=True, null=True,
related_name='replies',
verbose_name=_('Reply To'))
timestamp = models.DateTimeField(_('Timestamp'), default=timezone.now)
text = models.TextField(_('Comment Text'))
rich_text = models.BooleanField(_('Rich Text'), default=False)
extra_data = JSONField(null=True)
# Set this up with a ConcurrencyManager to help prevent race conditions.
objects = ConcurrencyManager()
@staticmethod
def issue_status_to_string(status):
"""Return a string representation of the status field.
Args:
status (unicode):
The value of the ``issue_status`` field.
Returns:
unicode:
A string representation of the status used for the API and other
interfaces.
"""
try:
return BaseComment.ISSUE_STATUS_TO_STRING[status]
except KeyError:
return ''
@staticmethod
def issue_string_to_status(status):
"""Return a DB representation of the given status string.
Args:
status (unicode):
The status string to convert.
Returns:
unicode:
A value suitable for storing in the ``issue_status`` field.
"""
try:
return BaseComment.ISSUE_STRING_TO_STATUS[status]
except KeyError:
raise Exception('Invalid issue status "%s"' % status)
def _get_require_verification(self):
return self.extra_data.get('require_verification', False)
def _set_require_verification(self, value):
if not isinstance(value, bool):
raise ValueError('require_verification must be a bool')
self.extra_data['require_verification'] = value
require_verification = property(
_get_require_verification, _set_require_verification,
doc='Whether this comment requires verification before closing.')
def __init__(self, *args, **kwargs):
"""Initialize the comment.
Args:
*args (tuple):
Positional arguments to pass through to the model
initialization.
**kwargs (dict):
Keyword arguments to pass through to the model
initialization.
"""
super(BaseComment, self).__init__(*args, **kwargs)
self._loaded_issue_status = self.issue_status
def get_review_request(self):
"""Return this comment's review request.
Returns:
reviewboard.reviews.models.review_request.ReviewRequest:
The review request that this comment was made on.
"""
if hasattr(self, '_review_request'):
return self._review_request
else:
return self.get_review().review_request
def get_review(self):
"""Return this comment's review.
Returns:
reviewboard.reviews.models.review.Review:
The review containing this comment.
"""
if hasattr(self, '_review'):
return self._review
else:
return self.review.get()
def get_review_url(self):
"""Return the URL to view this comment.
Returns:
unicode:
The absolute URL to view this comment in the web UI.
"""
return '%s#%s%d' % (self.get_review_request().get_absolute_url(),
self.anchor_prefix, self.id)
def is_reply(self):
"""Return whether this comment is a reply to another comment.
Returns:
bool:
True if the comment is a reply.
"""
return self.reply_to_id is not None
is_reply.boolean = True
def is_accessible_by(self, user):
"""Return whether the user can access this comment.
Args:
user (django.contrib.auth.models.User):
The user being checked.
Returns:
bool:
True if the given user can access this comment.
"""
return self.get_review().is_accessible_by(user)
def is_mutable_by(self, user):
"""Return whether the user can modify this comment.
Args:
user (django.contrib.auth.models.User):
The user being checked.
Returns:
bool:
True if the given user can modify this comment.
"""
return self.get_review().is_mutable_by(user)
def public_replies(self, user=None):
"""Return the public replies to this comment.
Args:
user (django.contrib.auth.models.User, optional):
A user to filter by, if desired. If specified, only replies
authored by this user will be returned.
Returns:
list of reviewboard.reviews.models.base_comment.BaseComment:
The public replies to this comment.
"""
if hasattr(self, '_replies'):
return self._replies
if user and user.is_authenticated:
return self.replies.filter(Q(review__public=True) |
Q(review__user=user))
else:
return self.replies.filter(review__public=True)
def can_change_issue_status(self, user):
"""Return whether the user can change the issue status.
Currently, this is allowed for:
- The user who owns the review request.
- The user who opened the issue (posted the comment).
Args:
user (django.contrib.auth.models.User):
The user being checked.
Returns:
bool:
True if the given user is allowed to change the issue status.
"""
if not (user and user.is_authenticated):
return False
return ((self.get_review_request().is_mutable_by(user) or
user.pk == self.get_review().user_id) and
not is_site_read_only_for(user))
def can_verify_issue_status(self, user):
"""Return whether the user can verify the issue status.
Currently this is allowed for:
- The user who opened the issue.
- Administrators.
Args:
user (django.contrib.auth.models.User):
The user being checked.
Returns:
bool:
True if the given user is allowed to verify the issue status.
"""
if not (user and user.is_authenticated):
return False
review = self.get_review()
local_site = review.review_request.local_site
return (user.is_superuser or
user.pk == review.user_id or
(local_site and local_site.is_mutable_by(user)))
def save(self, **kwargs):
"""Save the comment.
Args:
**kwargs (dict):
Keyword arguments passed to the method (unused).
"""
from reviewboard.reviews.models.review_request import ReviewRequest
self.timestamp = timezone.now()
super(BaseComment, self).save()
try:
# Update the review timestamp, but only if it's a draft.
# Otherwise, resolving an issue will change the timestamp of
# the review.
review = self.get_review()
if not review.public:
review.timestamp = self.timestamp
review.save()
else:
if (not self.is_reply() and
self.issue_opened and
self._loaded_issue_status != self.issue_status):
# The user has toggled the issue status of this comment,
# so update the issue counts for the review request.
old_field = ReviewRequest.ISSUE_COUNTER_FIELDS[
self._loaded_issue_status]
new_field = ReviewRequest.ISSUE_COUNTER_FIELDS[
self.issue_status]
if old_field != new_field:
CounterField.increment_many(
self.get_review_request(),
{
old_field: -1,
new_field: 1,
})
q = ReviewRequest.objects.filter(pk=review.review_request_id)
q.update(last_review_activity_timestamp=self.timestamp)
except ObjectDoesNotExist:
pass
def __str__(self):
"""Return a string representation of the comment.
Returns:
unicode:
A string representation of the comment.
"""
return self.text
class Meta:
abstract = True
app_label = 'reviews'
ordering = ['timestamp']
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Data parser and processing.
Parse image and ground truths in a dataset to training targets and package them
into (image, labels) tuple for RetinaNet.
T.-Y. Lin, P. Goyal, R. Girshick, K. He, and P. Dollar
Focal Loss for Dense Object Detection. arXiv:1708.02002
"""
import tensorflow.compat.v1 as tf
from dataloader import anchor
from dataloader import mode_keys as ModeKeys
from dataloader import tf_example_decoder
from utils import box_utils
from utils import dataloader_utils
from utils import input_utils
# Currently there are import errors related to AutoAugment and TF 2.x,
# so we guard the import with a try/except.
try:
from utils import autoaugment_utils # pylint: disable=g-import-not-at-top
AUTOAUG_IMPORTED = True
except ImportError:
AUTOAUG_IMPORTED = False
class Parser(object):
"""Parser to parse an image and its annotations into a dictionary of tensors."""
def __init__(self,
output_size,
min_level,
max_level,
num_scales,
aspect_ratios,
anchor_size,
match_threshold=0.5,
unmatched_threshold=0.5,
aug_rand_hflip=False,
aug_scale_min=1.0,
aug_scale_max=1.0,
aug_policy='',
skip_crowd_during_training=True,
max_num_instances=100,
use_bfloat16=True,
regenerate_source_id=False,
mode=None):
"""Initializes parameters for parsing annotations in the dataset.
Args:
output_size: `Tensor` or `list` for [height, width] of output image. The
output_size should be divided by the largest feature stride 2^max_level.
min_level: `int` number of minimum level of the output feature pyramid.
max_level: `int` number of maximum level of the output feature pyramid.
num_scales: `int` number representing intermediate scales added
on each level. For instances, num_scales=2 adds one additional
intermediate anchor scales [2^0, 2^0.5] on each level.
aspect_ratios: `list` of float numbers representing the aspect raito
anchors added on each level. The number indicates the ratio of width to
height. For instances, aspect_ratios=[1.0, 2.0, 0.5] adds three anchors
on each scale level.
anchor_size: `float` number representing the scale of size of the base
anchor to the feature stride 2^level.
match_threshold: `float` number between 0 and 1 representing the
lower-bound threshold to assign positive labels for anchors. An anchor
with a score over the threshold is labeled positive.
unmatched_threshold: `float` number between 0 and 1 representing the
upper-bound threshold to assign negative labels for anchors. An anchor
with a score below the threshold is labeled negative.
aug_rand_hflip: `bool`, if True, augment training with random
horizontal flip.
aug_scale_min: `float`, the minimum scale applied to `output_size` for
data augmentation during training.
aug_scale_max: `float`, the maximum scale applied to `output_size` for
data augmentation during training.
aug_policy: `str`, the augmentation policy to use.
This can be an autoaugment policy name, for example 'v0'.
An empty string indicates no augmentation policy.
The augment policy is independent from `aug_rand_hflip`,
`aug_scale_min`, and `aug_scale_max`.
skip_crowd_during_training: `bool`, if True, skip annotations labeled with
`is_crowd` equals to 1.
max_num_instances: `int` number of maximum number of instances in an
image. The groundtruth data will be padded to `max_num_instances`.
use_bfloat16: `bool`, if True, cast output image to tf.bfloat16.
regenerate_source_id: `bool`, if True TFExampleParser will use hashed
value of `image/encoded` for `image/source_id`.
mode: a ModeKeys. Specifies if this is training, evaluation, prediction or
prediction with groundtruths in the outputs.
"""
self._mode = mode
self._max_num_instances = max_num_instances
self._skip_crowd_during_training = skip_crowd_during_training
self._is_training = (mode == ModeKeys.TRAIN)
self._example_decoder = tf_example_decoder.TfExampleDecoder(
include_mask=False, regenerate_source_id=regenerate_source_id)
# Anchor.
self._output_size = output_size
self._min_level = min_level
self._max_level = max_level
self._num_scales = num_scales
self._aspect_ratios = aspect_ratios
self._anchor_size = anchor_size
self._match_threshold = match_threshold
self._unmatched_threshold = unmatched_threshold
# Data augmentation.
self._aug_rand_hflip = aug_rand_hflip
self._aug_scale_min = aug_scale_min
self._aug_scale_max = aug_scale_max
self._aug_policy = aug_policy
# Device.
self._use_bfloat16 = use_bfloat16
# Data is parsed depending on the model Modekey.
if mode == ModeKeys.TRAIN:
self._parse_fn = self._parse_train_data
elif mode == ModeKeys.EVAL:
self._parse_fn = self._parse_eval_data
elif mode == ModeKeys.PREDICT or mode == ModeKeys.PREDICT_WITH_GT:
self._parse_fn = self._parse_predict_data
else:
raise ValueError('mode is not defined.')
def __call__(self, value):
"""Parses data to an image and associated training labels.
Args:
value: a string tensor holding a serialized tf.Example proto.
Returns:
image: image tensor that is preproessed to have normalized value and
dimension [output_size[0], output_size[1], 3]
labels:
cls_targets: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, anchors_per_location]. The height_l and
width_l represent the dimension of class logits at l-th level.
box_targets: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, anchors_per_location * 4]. The height_l and
width_l represent the dimension of bounding box regression output at
l-th level.
num_positives: number of positive anchors in the image.
anchor_boxes: ordered dictionary with keys
[min_level, min_level+1, ..., max_level]. The values are tensor with
shape [height_l, width_l, 4] representing anchor boxes at each level.
image_info: a 2D `Tensor` that encodes the information of the image and
the applied preprocessing. It is in the format of
[[original_height, original_width], [scaled_height, scaled_width],
[y_scale, x_scale], [y_offset, x_offset]].
groundtruths:
source_id: source image id. Default value -1 if the source id is empty
in the groundtruth annotation.
boxes: groundtruth bounding box annotations. The box is represented in
[y1, x1, y2, x2] format. The tensor is padded with -1 to the fixed
dimension [self._max_num_instances, 4].
classes: groundtruth classes annotations. The tensor is padded with
-1 to the fixed dimension [self._max_num_instances].
areas: groundtruth areas annotations. The tensor is padded with -1
to the fixed dimension [self._max_num_instances].
is_crowds: groundtruth annotations to indicate if an annotation
represents a group of instances by value {0, 1}. The tensor is
padded with 0 to the fixed dimension [self._max_num_instances].
"""
with tf.name_scope('parser'):
data = self._example_decoder.decode(value)
return self._parse_fn(data)
def _parse_train_data(self, data):
"""Parses data for training and evaluation."""
classes = data['groundtruth_classes']
boxes = data['groundtruth_boxes']
is_crowds = data['groundtruth_is_crowd']
# Skips annotations with `is_crowd` = True.
if self._skip_crowd_during_training and self._is_training:
num_groundtrtuhs = tf.shape(classes)[0]
with tf.control_dependencies([num_groundtrtuhs, is_crowds]):
indices = tf.cond(
tf.greater(tf.size(is_crowds), 0),
lambda: tf.where(tf.logical_not(is_crowds))[:, 0],
lambda: tf.cast(tf.range(num_groundtrtuhs), tf.int64))
classes = tf.gather(classes, indices)
boxes = tf.gather(boxes, indices)
# Gets original image and its size.
image = data['image']
if self._aug_policy:
if AUTOAUG_IMPORTED:
image, boxes = autoaugment_utils.distort_image_with_autoaugment(
image, boxes, self._aug_policy)
else:
raise ImportError('Unable to get autoaugment_utils, likely due '
'to imcompatability with TF 2.X.')
image_shape = tf.shape(image)[0:2]
# Normalizes image with mean and std pixel values.
image = input_utils.normalize_image(image)
# Flips image randomly during training.
if self._aug_rand_hflip:
image, boxes = input_utils.random_horizontal_flip(image, boxes)
# Converts boxes from normalized coordinates to pixel coordinates.
# Now the coordinates of boxes are w.r.t. the original image.
boxes = box_utils.denormalize_boxes(boxes, image_shape)
# Resizes and crops image.
image, image_info = input_utils.resize_and_crop_image(
image,
self._output_size,
padded_size=input_utils.compute_padded_size(
self._output_size, 2 ** self._max_level),
aug_scale_min=self._aug_scale_min,
aug_scale_max=self._aug_scale_max)
image_height, image_width, _ = image.get_shape().as_list()
# Resizes and crops boxes.
# Now the coordinates of boxes are w.r.t the scaled image.
image_scale = image_info[2, :]
offset = image_info[3, :]
boxes = input_utils.resize_and_crop_boxes(
boxes, image_scale, image_info[1, :], offset)
# Filters out ground truth boxes that are all zeros.
indices = box_utils.get_non_empty_box_indices(boxes)
boxes = tf.gather(boxes, indices)
classes = tf.gather(classes, indices)
# Assigns anchor targets.
# Note that after the target assignment, box targets are absolute pixel
# offsets w.r.t. the scaled image.
input_anchor = anchor.Anchor(
self._min_level, self._max_level, self._num_scales,
self._aspect_ratios, self._anchor_size, (image_height, image_width))
anchor_labeler = anchor.AnchorLabeler(
input_anchor, self._match_threshold, self._unmatched_threshold)
(cls_targets, box_targets, num_positives) = anchor_labeler.label_anchors(
boxes,
tf.cast(tf.expand_dims(classes, axis=1), tf.float32))
# If bfloat16 is used, casts input image to tf.bfloat16.
if self._use_bfloat16:
image = tf.cast(image, dtype=tf.bfloat16)
# Packs labels for model_fn outputs.
labels = {
'cls_targets': cls_targets,
'box_targets': box_targets,
'anchor_boxes': input_anchor.multilevel_boxes,
'num_positives': num_positives,
'image_info': image_info,
}
return image, labels
def _parse_eval_data(self, data):
"""Parses data for training and evaluation."""
groundtruths = {}
classes = data['groundtruth_classes']
boxes = data['groundtruth_boxes']
# Gets original image and its size.
image = data['image']
image_shape = tf.shape(image)[0:2]
# Normalizes image with mean and std pixel values.
image = input_utils.normalize_image(image)
# Converts boxes from normalized coordinates to pixel coordinates.
boxes = box_utils.denormalize_boxes(boxes, image_shape)
# Resizes and crops image.
image, image_info = input_utils.resize_and_crop_image(
image,
self._output_size,
padded_size=input_utils.compute_padded_size(
self._output_size, 2 ** self._max_level),
aug_scale_min=1.0,
aug_scale_max=1.0)
image_height, image_width, _ = image.get_shape().as_list()
# Resizes and crops boxes.
image_scale = image_info[2, :]
offset = image_info[3, :]
boxes = input_utils.resize_and_crop_boxes(
boxes, image_scale, image_info[1, :], offset)
# Filters out ground truth boxes that are all zeros.
indices = box_utils.get_non_empty_box_indices(boxes)
boxes = tf.gather(boxes, indices)
classes = tf.gather(classes, indices)
# Assigns anchors.
input_anchor = anchor.Anchor(
self._min_level, self._max_level, self._num_scales,
self._aspect_ratios, self._anchor_size, (image_height, image_width))
anchor_labeler = anchor.AnchorLabeler(
input_anchor, self._match_threshold, self._unmatched_threshold)
(cls_targets, box_targets, num_positives) = anchor_labeler.label_anchors(
boxes,
tf.cast(tf.expand_dims(classes, axis=1), tf.float32))
# If bfloat16 is used, casts input image to tf.bfloat16.
if self._use_bfloat16:
image = tf.cast(image, dtype=tf.bfloat16)
# Sets up groundtruth data for evaluation.
groundtruths = {
'source_id': data['source_id'],
'height': data['height'],
'width': data['width'],
'num_groundtruths': tf.shape(data['groundtruth_classes']),
'boxes': box_utils.denormalize_boxes(
data['groundtruth_boxes'], image_shape),
'classes': data['groundtruth_classes'],
'areas': data['groundtruth_area'],
'is_crowds': tf.cast(data['groundtruth_is_crowd'], tf.int32),
}
groundtruths['source_id'] = dataloader_utils.process_source_id(
groundtruths['source_id'])
groundtruths = dataloader_utils.pad_groundtruths_to_fixed_size(
groundtruths, self._max_num_instances)
# Packs labels for model_fn outputs.
labels = {
'cls_targets': cls_targets,
'box_targets': box_targets,
'anchor_boxes': input_anchor.multilevel_boxes,
'num_positives': num_positives,
'image_info': image_info,
'groundtruths': groundtruths,
}
return image, labels
def _parse_predict_data(self, data):
"""Parses data for prediction."""
# Gets original image and its size.
image = data['image']
image_shape = tf.shape(image)[0:2]
# Normalizes image with mean and std pixel values.
image = input_utils.normalize_image(image)
# Resizes and crops image.
image, image_info = input_utils.resize_and_crop_image(
image,
self._output_size,
padded_size=input_utils.compute_padded_size(
self._output_size, 2 ** self._max_level),
aug_scale_min=1.0,
aug_scale_max=1.0)
image_height, image_width, _ = image.get_shape().as_list()
# If bfloat16 is used, casts input image to tf.bfloat16.
if self._use_bfloat16:
image = tf.cast(image, dtype=tf.bfloat16)
# Compute Anchor boxes.
input_anchor = anchor.Anchor(
self._min_level, self._max_level, self._num_scales,
self._aspect_ratios, self._anchor_size, (image_height, image_width))
labels = {
'anchor_boxes': input_anchor.multilevel_boxes,
'image_info': image_info,
}
# If mode is PREDICT_WITH_GT, returns groundtruths and training targets
# in labels.
if self._mode == ModeKeys.PREDICT_WITH_GT:
# Converts boxes from normalized coordinates to pixel coordinates.
boxes = box_utils.denormalize_boxes(
data['groundtruth_boxes'], image_shape)
groundtruths = {
'source_id': data['source_id'],
'height': data['height'],
'width': data['width'],
'num_detections': tf.shape(data['groundtruth_classes']),
'boxes': boxes,
'classes': data['groundtruth_classes'],
'areas': data['groundtruth_area'],
'is_crowds': tf.cast(data['groundtruth_is_crowd'], tf.int32),
}
groundtruths['source_id'] = dataloader_utils.process_source_id(
groundtruths['source_id'])
groundtruths = dataloader_utils.pad_groundtruths_to_fixed_size(
groundtruths, self._max_num_instances)
labels['groundtruths'] = groundtruths
# Computes training objective for evaluation loss.
classes = data['groundtruth_classes']
image_scale = image_info[2, :]
offset = image_info[3, :]
boxes = input_utils.resize_and_crop_boxes(
boxes, image_scale, image_info[1, :], offset)
# Filters out ground truth boxes that are all zeros.
indices = box_utils.get_non_empty_box_indices(boxes)
boxes = tf.gather(boxes, indices)
classes = tf.gather(classes, indices)
# Assigns anchors.
anchor_labeler = anchor.AnchorLabeler(
input_anchor, self._match_threshold, self._unmatched_threshold)
(cls_targets, box_targets, num_positives) = anchor_labeler.label_anchors(
boxes,
tf.cast(tf.expand_dims(classes, axis=1), tf.float32))
labels['cls_targets'] = cls_targets
labels['box_targets'] = box_targets
labels['num_positives'] = num_positives
return {
'images': image,
'labels': labels,
}
|
|
##
#TEMA: MINIMIZATORI
#Programski kod koji generira minimizatore iz ulazne datoteke.
#Datoteka, kao i velicina miniimzatora(k), broj minimizatora koji se provjeravaju u svakom koraku(w), te velicina podnizova ulaznog niza na kojem zelimo naci minimizatore
# zadaju se preko komandne linije. Primjer poziva programa uz k = 20, w = 15, te velicina podniza 60 nad datotekom e_coli.fa: >python minimizer_python_2.py e_coli.fa 20 15 60
#Program nakon izvrsavanja ispisuje broj pronadenih minimizatora, kao i vrijeme izvodenja na konzolu, a pronadene minimizatore zapisuje u datoteku minimizers.txt
#
#Sven Srebot
##
import sys
import time
from collections import defaultdict
from memory_profiler import profile
#
#Klasa fifio sluzi za ostvarenje pomicnog prozora odredene velicine. Velicina prozora ovisna je o parametrima s kojima se pokrece program (k + w - 1), te
#kada se popunjenom prozoru dodaje simbol, on izbacuje simbol na krajnjoj lijevoj poziciji, te dodaje novi simbol na krajnju desnu poziciju.
#
class fifo():
def __init__(self, w, k):
self.list = []
self.w = w
self.k = k
self.size = self.w + self.k - 1
def add(self, val):
if len(self.list) == self.size:
self.list = self.list[1:]
self.list.append(val)
def last_k_1(self):
return self.list[w:]
#
#Klasa minimizer() u kojuj su ostvarene sve funkcije koje sudjeluju u stvaranju minimizatora
#
class minimizer():
def __init__(self, w, k, file_name, sub_size):
self.w = w
self.k = k
self.num = 0
self.input = []
self.size = self.w + self.k - 1
self.window = fifo(w, k)
self.sub_size = sub_size
self.br = 0
self.minimized_dict = defaultdict(list)
self.file = open(str(file_name), "r")
self.lessA = ["C"]
self.lessT_U = ["C", "A"]
self.lessG = ["C", "A", "T", "U"]
self.all = ["C", "A", "T", "U", "G"]
self.lessT1 = ["G"]
self.lessA1 = ["G", "T"]
self.lessC1 = ["G", "T", "A"]
self.i = 0
def read_n(self, n):
for a in range(0, n):
new = self.file.read(1)
if new == "\n":
break
else:
self.window.add(new)
return (a + 1)
#
#Funkcija reverse_compl() se poziva nakon sto je zavrseno stvaranje minimizatora nad ulaznim nizom, te je zaduzena za stvaranje reverznog komlementa ulaznog niza
#
def reverse_compl(self):
#print self.input
rev1 = reversed(self.input)
rev = []
self.input = []
for a in rev1:
rev.append(a)
for a in rev:
if a == "A":
self.input.append("T")
elif a == "C":
self.input.append("G")
elif a == "G":
self.input.append("C")
elif a == "T" or a == "U":
self.input.append("A")
return
#
#Funkcija compare() usporeduje minimizator koji je pronaden u trenutnom prozoru sa novim potencijalnim minimizatorom
#Pri usporedivanju ne usporeduje nizove abecedno, nego: za parne pozicije minimizera (0, 2, 4, 6, ...) najmanju tezinu ima slovo C, zatim A, zatim T pa G
# za neparne pozicije minimizera(1, 3, 5, 7, ...) najmanju tezinu ima slovo G, zatim T, pa A, pa C
#Sukladno navedenom najmanji moguci niz duzine 3 je 'CGC', a najveci 'GCG'
#
def compare(self, i):
pos = i + self.k
last_position = 0
same = 0
for j in range(0, self.k):
i_j = i + j
if j%2 == 0:
if self.best[j] == self.window.list[i_j]:
if same == 0:
last_position = (self.p + i)
same = same + 1
if same == self.k and self.last_position != last_position:
self.best = self.window.list[i:pos]
self.position = i
break
continue
elif j == 0 or same >= j:
if self.best[j] not in self.all and self.window.list[i_j] in self.all:
self.best = self.window.list[i:pos]
self.position = i
break
elif self.best[j] == "C":
break
elif self.best[j] == "A":
if self.window.list[i_j] in self.lessA:
self.best = self.window.list[i:pos]
self.position = i
break
elif self.best[j] == "T" or self.best == "U":
if self.window.list[i_j] in self.lessT_U:
self.best = self.window.list[i:pos]
self.position = i
break
elif self.best[j] == "G":
if self.window.list[i_j] in self.lessG:
self.best = self.window.list[i:pos]
self.position = i
break
else:
break
else:
break
else:
if self.best[j] == self.window.list[i_j]:
if same == 0:
last_position = (self.p + i)
same = same + 1
if same == self.k and self.last_position != last_position:
self.best = self.window.list[i:pos]
self.position = i
break
continue
elif j == 0 or same >= j:
if self.best[j] not in self.all and self.window.list[i_j] in self.all:
self.best = self.window.list[i:pos]
self.position = i
break
elif self.best[j] == "G":
break
elif self.best[j] == "T" or self.best[j] == "U":
if self.window.list[i_j] in self.lessT1:
self.best = self.window.list[i:pos]
self.position = i
break
elif self.best[j] == "A":
if self.window.list[i_j] in self.lessA1:
self.best = self.window.list[i:pos]
self.position = i
break
elif self.best[j] == "C":
if self.window.list[i_j] in self.lessC1:
self.best = self.window.list[i:pos]
self.position = i
break
else:
break
else:
break
#
#Funkcija check_all() trazi najbolju k-torku cijelog prozora visestruko pozivajuci funkciju compare(). Poziva se kada minimizator ispadne iz prozora ili na pocetku izvodenja algoritma
#
def check_all(self):
self.best = self.window.list[0:self.k]
best = []
best = self.best
for i in range(1, self.w):
self.compare(i)
if best != self.best:
pass
else:
self.position = 0
return
#
#Funkcija check() provjerava da li je zadnja k-torka u prozoru bolja od zadnjeg dodanog minimizatora, te ako je postaje novi minimizator.
#
def check(self):
i = self.size - self.k
self.compare(i)
return
#
#check_pos_substring() provjerava da li je prozor dosao do kraja podniza koji provjeravamo, ako je vraca 1, inace 0
#
def check_pos_substring(self):
if (self.pos_substring % (self.sub_size)) == 0 and self.pos_substring != 0:
return 1
else:
return 0
#
#Funkcija minimize() poziva funkciju za obradivanje podniza za svaki podniz ulazne datoteke
#
def minimize(self, checker, inverz):
self.inverz = inverz
self.i = self.i + 1
self.last_position = -1
self.pos_substring = 0
self.no_substring = 0
result, checker = self.process_substring(checker)
while result == 1 and checker == 0:
self.no_substring = self.no_substring + 1
self.best = []
self.last_position = -1
self.pos_substring = 0
self.window.list = []
result, checker = self.process_substring(checker)
self.file.close()
return result, checker
#
#Funkcija end() stvara krajnji minimizator
#
def end(self):
l = len(self.window.list)
k_end = self.window.list[(l - self.k):]
if k_end != self.best:
self.best = k_end
self.b = ""
for a in k_end:
self.b = self.b + str(a)
self.minimized_dict[self.b].append((self.no_substring, self.pos_substring - self.k, self.inverz))
self.num = self.num + 1
#
#Funkcija process_substring() ucitava nove simbole dok ne dode do kraja podniza, dok ucitava znakove, pomice prozor i na njima trazi minimizatore
#
def process_substring(self, checker):
max_size = 0
k_end = []
if checker == 1:
new = ">"
else:
new = self.file.read(1)
if new == "\n" or new == " " or new == "":
new = self.file.read(1)
if new == "\n" or new == " " or new == "":
return 1, 1
br = 0
self.p = 1
if new == ">":
self.com = self.file.readline()
new = self.file.read(1)
self.window.add(new)
self.input.append(new)
self.br = self.br + 1
self.pos_substring = self.pos_substring + 1
max_size = self.check_pos_substring()
best = 0
case = 0
e = 0;
lp = -1
while new in self.all:
new = self.file.read(1)
before = self.window.list
if new == "\n" or new == " " or new == "":
new = self.file.read(1)
if new == "\n" or new == " " or new == "":
return 1, 1
if new == ">":
return 2, 1
if new in self.all:
self.window.add(new)
self.p = self.p + 1
else:
return 1, 1
if self.inverz == 0:
self.input.append(new)
self.br = self.br + 1
self.pos_substring = self.pos_substring + 1
max_size = self.check_pos_substring()
if (len(self.window.list) == self.k and e == 0):
self.position = 0
k_start = self.window.list[0:self.k]
self.best = k_start
self.b = ""
for a in self.best:
self.b = self.b + str(a)
self.minimized_dict[self.b].append((self.no_substring, 0, self.inverz))
self.num = self.num + 1
self.best = k_start
best = self.best
e = 1
if len(self.window.list) == self.size:
lp = self.last_position
self.check_all()
if ((best == 0) or (best != self.best) or (best == self.best and lp != self.last_position)):
self.b = ""
for a in self.best:
self.b = self.b + str(a)
self.minimized_dict[self.b].append((self.no_substring, self.pos_substring - self.size + self.position, self.inverz))
self.num = self.num + 1
self.last_position = self.pos_substring - self.size + self.position
pos = self.position
case = 0
if max_size == 0:
for check in range(0, pos):
new = self.file.read(1)
if new == "\n" or new == " " or new == "":
new = self.file.read(1)
if new == "\n" or new == " " or new == "":
return 1, 1
if new == ">":
return 2, 1
if new in self.all:
if self.inverz == 0:
self.input.append(new)
self.br = self.br + 1
self.window.add(new)
self.pos_substring = self.pos_substring + 1
max_size = self.check_pos_substring()
self.p = self.p + 1
best = self.best
lp = self.last_position
self.check()
if (best != self.best) or (best == self.best and lp != self.last_position):
self.b = ""
for a in self.best:
self.b = self.b + str(a)
self.minimized_dict[self.b].append((self.no_substring, self.pos_substring - self.size + self.position, self.inverz))
self.num = self.num + 1
self.last_position = self.pos_substring - self.size + self.position
case = 1
if max_size == 1:
self.end()
max_size = 0
return 1, 0
else:
self.end()
max_size = 0
return 1, 0
best = self.best
lp = self.last_position
if max_size == 1:
self.end()
max_size = 0
return 1, 0
if new == ">":
return 2, 1
return 1, 0
#
#Funkcija print_dict() u file zapisuje sve minimizatore koji su pronadeni
#
def print_dict(self, output):
for key in self.minimized_dict:
output.write( key + ": " + "\n")
for value in self.minimized_dict[key]:
for a in range(0, self.k):
output.write(" ")
output.write("(" + str(value[0]) + ", " + str(value[1]) + ", " + str(value[2]) + ")" + "\n")
#
#Funkcija main() poziva funkciju minimaze() prvo za ulazni niz podataka, a nakon toga za inverzni komplement ulaznog niza, te nakon toga ispisuje minimizatore u datoteku
# te racuna vrijeme izvodenja.
#
@profile
def main(argv):
start = time.time()
file_name = str(sys.argv[1])
k = int(sys.argv[2])
w = int(sys.argv[3])
sub_size = int(sys.argv[4])
checker = 0
out = "minimizers.txt"
output = open(out, "w+")
minimizer1 = minimizer(w, k, file_name, sub_size)
result, checker = minimizer1.minimize(0, 0)
minimizer1.window.list = []
while result == 2:
result, checker = minimizer1.minimize(checker, 0)
output.write(str(minimizer1.com) + "\n")
minimizer1.minimized = []
minimizer1.window.list = []
if result != 2:
new = minimizer1.file.read(1)
minimizer1.reverse_compl()
minimizer1.window.list = []
complement = "complement.txt"
compl = open(complement, "w+")
compl.write(">" + str(minimizer1.com))
for element in minimizer1.input:
compl.write(str(element))
compl.close()
minimizer1.file = open("complement.txt", "r")
result, checker = minimizer1.minimize(0, 1)
minimizer1.minimized = []
minimizer1.window.list = []
while result == 2:
result, checker = minimizer1.minimize(checker, 1)
minimizer1.minimized = []
minimizer1.window.list = []
if result != 2:
new = minimizer1.file.read(1)
end = time.time()
print "Number of minimizers(total): ", minimizer1.num
print "Time: " + str((round(end - start, 5))) + " s"
minimizer1.print_dict(output)
return
if __name__ == "__main__":
main(sys.argv[1:])
|
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""
Syncs a database table to the `DocType` (metadata)
.. note:: This module is only used internally
"""
import os
import frappe
from frappe import _
from frappe.utils import cstr
type_map = {
'Currency': ('decimal', '18,6')
,'Int': ('int', '11')
,'Float': ('decimal', '18,6')
,'Percent': ('decimal', '18,6')
,'Check': ('int', '1')
,'Small Text': ('text', '')
,'Long Text': ('longtext', '')
,'Code': ('text', '')
,'Text Editor': ('text', '')
,'Date': ('date', '')
,'Datetime': ('datetime', '6')
,'Time': ('time', '6')
,'Text': ('text', '')
,'Data': ('varchar', '255')
,'Link': ('varchar', '255')
,'Dynamic Link':('varchar', '255')
,'Password': ('varchar', '255')
,'Select': ('varchar', '255')
,'Read Only': ('varchar', '255')
,'Attach': ('varchar', '255')
}
default_columns = ['name', 'creation', 'modified', 'modified_by', 'owner', 'docstatus', 'parent',\
'parentfield', 'parenttype', 'idx']
default_shortcuts = ['_Login', '__user', '_Full Name', 'Today', '__today']
# -------------------------------------------------
# Class database table
# -------------------------------------------------
class DbTable:
def __init__(self, doctype, prefix = 'tab'):
self.doctype = doctype
self.name = prefix + doctype
self.columns = {}
self.current_columns = {}
# lists for change
self.add_column = []
self.change_type = []
self.add_index = []
self.drop_index = []
self.set_default = []
# load
self.get_columns_from_docfields()
def create(self):
add_text = ''
# columns
column_defs = self.get_column_definitions()
if column_defs: add_text += ',\n'.join(column_defs) + ',\n'
# index
index_defs = self.get_index_definitions()
if index_defs: add_text += ',\n'.join(index_defs) + ',\n'
# create table
frappe.db.sql("""create table `%s` (
name varchar(255) not null primary key,
creation datetime(6),
modified datetime(6),
modified_by varchar(255),
owner varchar(255),
docstatus int(1) default '0',
parent varchar(255),
parentfield varchar(255),
parenttype varchar(255),
idx int(8),
%sindex parent(parent))
ENGINE=InnoDB
CHARACTER SET=utf8""" % (self.name, add_text))
def get_columns_from_docfields(self):
"""
get columns from docfields and custom fields
"""
fl = frappe.db.sql("SELECT * FROM tabDocField WHERE parent = %s", self.doctype, as_dict = 1)
try:
custom_fl = frappe.db.sql("""\
SELECT * FROM `tabCustom Field`
WHERE dt = %s AND docstatus < 2""", (self.doctype,), as_dict=1)
if custom_fl: fl += custom_fl
except Exception, e:
if e.args[0]!=1146: # ignore no custom field
raise
for f in fl:
self.columns[f['fieldname']] = DbColumn(self, f['fieldname'],
f['fieldtype'], f.get('length'), f.get('default'),
f.get('search_index'), f.get('options'))
def get_columns_from_db(self):
self.show_columns = frappe.db.sql("desc `%s`" % self.name)
for c in self.show_columns:
self.current_columns[c[0]] = {'name': c[0], 'type':c[1], 'index':c[3], 'default':c[4]}
def get_column_definitions(self):
column_list = [] + default_columns
ret = []
for k in self.columns.keys():
if k not in column_list:
d = self.columns[k].get_definition()
if d:
ret.append('`'+ k+ '` ' + d)
column_list.append(k)
return ret
def get_index_definitions(self):
ret = []
for key, col in self.columns.items():
if col.set_index and col.fieldtype in type_map and \
type_map.get(col.fieldtype)[0] not in ('text', 'longtext'):
ret.append('index `' + key + '`(`' + key + '`)')
return ret
# GET foreign keys
def get_foreign_keys(self):
fk_list = []
txt = frappe.db.sql("show create table `%s`" % self.name)[0][1]
for line in txt.split('\n'):
if line.strip().startswith('CONSTRAINT') and line.find('FOREIGN')!=-1:
try:
fk_list.append((line.split('`')[3], line.split('`')[1]))
except IndexError:
pass
return fk_list
# Drop foreign keys
def drop_foreign_keys(self):
if not self.drop_foreign_key:
return
fk_list = self.get_foreign_keys()
# make dictionary of constraint names
fk_dict = {}
for f in fk_list:
fk_dict[f[0]] = f[1]
# drop
for col in self.drop_foreign_key:
frappe.db.sql("set foreign_key_checks=0")
frappe.db.sql("alter table `%s` drop foreign key `%s`" % (self.name, fk_dict[col.fieldname]))
frappe.db.sql("set foreign_key_checks=1")
def sync(self):
if not self.name in DbManager(frappe.db).get_tables_list(frappe.db.cur_db_name):
self.create()
else:
self.alter()
def alter(self):
self.get_columns_from_db()
for col in self.columns.values():
col.check(self.current_columns.get(col.fieldname, None))
query = []
for col in self.add_column:
query.append("add column `{}` {}".format(col.fieldname, col.get_definition()))
for col in self.change_type:
query.append("change `{}` `{}` {}".format(col.fieldname, col.fieldname, col.get_definition()))
for col in self.add_index:
# if index key not exists
if not frappe.db.sql("show index from `%s` where key_name = %s" %
(self.name, '%s'), col.fieldname):
query.append("add index `{}`(`{}`)".format(col.fieldname, col.fieldname))
for col in self.drop_index:
if col.fieldname != 'name': # primary key
# if index key exists
if frappe.db.sql("show index from `%s` where key_name = %s" %
(self.name, '%s'), col.fieldname):
query.append("drop index `{}`".format(col.fieldname))
for col in list(set(self.set_default).difference(set(self.change_type))):
if col.fieldname=="name":
continue
if not col.default:
col_default = "null"
else:
col_default = '"{}"'.format(col.default.replace('"', '\\"'))
query.append('alter column `{}` set default {}'.format(col.fieldname, col_default))
if query:
frappe.db.sql("alter table `{}` {}".format(self.name, ", ".join(query)))
class DbColumn:
def __init__(self, table, fieldname, fieldtype, length, default, set_index, options):
self.table = table
self.fieldname = fieldname
self.fieldtype = fieldtype
self.length = length
self.set_index = set_index
self.default = default
self.options = options
def get_definition(self, with_default=1):
ret = get_definition(self.fieldtype)
if with_default and self.default and (self.default not in default_shortcuts) \
and not self.default.startswith(":") and ret not in ['text', 'longtext']:
ret += ' default "' + self.default.replace('"', '\"') + '"'
return ret
def check(self, current_def):
column_def = self.get_definition(0)
# no columns
if not column_def:
return
# to add?
if not current_def:
self.fieldname = validate_column_name(self.fieldname)
self.table.add_column.append(self)
return
# type
if current_def['type'] != column_def:
self.table.change_type.append(self)
# index
else:
if (current_def['index'] and not self.set_index):
self.table.drop_index.append(self)
if (not current_def['index'] and self.set_index and not (column_def in ['text', 'longtext'])):
self.table.add_index.append(self)
# default
if (self.default_changed(current_def) and (self.default not in default_shortcuts) and not cstr(self.default).startswith(":") and not (column_def in ['text','longtext'])):
self.table.set_default.append(self)
def default_changed(self, current_def):
if "decimal" in current_def['type']:
try:
return float(current_def['default'])!=float(self.default)
except TypeError:
return True
else:
return current_def['default'] != self.default
class DbManager:
"""
Basically, a wrapper for oft-used mysql commands. like show tables,databases, variables etc...
#TODO:
0. Simplify / create settings for the restore database source folder
0a. Merge restore database and extract_sql(from frappe_server_tools).
1. Setter and getter for different mysql variables.
2. Setter and getter for mysql variables at global level??
"""
def __init__(self,db):
"""
Pass root_conn here for access to all databases.
"""
if db:
self.db = db
def get_variables(self,regex):
"""
Get variables that match the passed pattern regex
"""
return list(self.db.sql("SHOW VARIABLES LIKE '%s'"%regex))
def get_table_schema(self,table):
"""
Just returns the output of Desc tables.
"""
return list(self.db.sql("DESC `%s`"%table))
def get_tables_list(self,target=None):
"""get list of tables"""
if target:
self.db.use(target)
return [t[0] for t in self.db.sql("SHOW TABLES")]
def create_user(self,user,password):
#Create user if it doesn't exist.
try:
if password:
self.db.sql("CREATE USER '%s'@'localhost' IDENTIFIED BY '%s';" % (user[:16], password))
else:
self.db.sql("CREATE USER '%s'@'localhost';"%user[:16])
except Exception:
raise
def delete_user(self,target):
# delete user if exists
try:
self.db.sql("DROP USER '%s'@'localhost';" % target)
except Exception, e:
if e.args[0]==1396:
pass
else:
raise
def create_database(self,target):
if target in self.get_database_list():
self.drop_database(target)
self.db.sql("CREATE DATABASE IF NOT EXISTS `%s` ;" % target)
def drop_database(self,target):
self.db.sql("DROP DATABASE IF EXISTS `%s`;"%target)
def grant_all_privileges(self,target,user):
self.db.sql("GRANT ALL PRIVILEGES ON `%s`.* TO '%s'@'localhost';" % (target, user))
self.db.sql("GRANT SELECT ON %s.* to 'smarttailor'@'localhost';" %(target))
def grant_select_privilges(self,db,table,user):
if table:
self.db.sql("GRANT SELECT ON %s.%s to '%s'@'localhost';" % (db,table,user))
else:
self.db.sql("GRANT SELECT ON %s.* to '%s'@'localhost';" % (db,user))
def flush_privileges(self):
self.db.sql("FLUSH PRIVILEGES")
def get_database_list(self):
"""get list of databases"""
return [d[0] for d in self.db.sql("SHOW DATABASES")]
def restore_database(self,target,source,user,password):
from frappe.utils import make_esc
esc = make_esc('$ ')
os.system("mysql -u %s -p%s %s < %s" % \
(esc(user), esc(password), esc(target), source))
def drop_table(self,table_name):
"""drop table if exists"""
if not table_name in self.get_tables_list():
return
self.db.sql("DROP TABLE IF EXISTS %s "%(table_name))
def validate_column_name(n):
n = n.replace(' ','_').strip().lower()
import re
if re.search("[\W]", n):
frappe.throw(_("Fieldname {0} cannot contain letters, numbers or spaces").format(n))
return n
def updatedb(dt):
"""
Syncs a `DocType` to the table
* creates if required
* updates columns
* updates indices
"""
res = frappe.db.sql("select ifnull(issingle, 0) from tabDocType where name=%s", (dt,))
if not res:
raise Exception, 'Wrong doctype "%s" in updatedb' % dt
if not res[0][0]:
frappe.db.commit()
tab = DbTable(dt, 'tab')
tab.sync()
frappe.db.begin()
def remove_all_foreign_keys():
frappe.db.sql("set foreign_key_checks = 0")
frappe.db.commit()
for t in frappe.db.sql("select name from tabDocType where ifnull(issingle,0)=0"):
dbtab = DbTable(t[0])
try:
fklist = dbtab.get_foreign_keys()
except Exception, e:
if e.args[0]==1146:
fklist = []
else:
raise
for f in fklist:
frappe.db.sql("alter table `tab%s` drop foreign key `%s`" % (t[0], f[1]))
def get_definition(fieldtype):
d = type_map.get(fieldtype)
if not d:
return
ret = d[0]
if d[1]:
ret += '(' + d[1] + ')'
return ret
def add_column(doctype, column_name, fieldtype):
frappe.db.commit()
frappe.db.sql("alter table `tab%s` add column %s %s" % (doctype,
column_name, get_definition(fieldtype)))
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Marks tasks APIs."""
import datetime
from typing import Iterable
from sqlalchemy import or_
from airflow.models.baseoperator import BaseOperator
from airflow.models.dagrun import DagRun
from airflow.models.taskinstance import TaskInstance
from airflow.operators.subdag import SubDagOperator
from airflow.utils import timezone
from airflow.utils.session import provide_session
from airflow.utils.state import State
from airflow.utils.types import DagRunType
def _create_dagruns(dag, execution_dates, state, run_type):
"""
Infers from the dates which dag runs need to be created and does so.
:param dag: the dag to create dag runs for
:param execution_dates: list of execution dates to evaluate
:param state: the state to set the dag run to
:param run_type: The prefix will be used to construct dag run id: {run_id_prefix}__{execution_date}
:return: newly created and existing dag runs for the execution dates supplied
"""
# find out if we need to create any dag runs
dag_runs = DagRun.find(dag_id=dag.dag_id, execution_date=execution_dates)
dates_to_create = list(set(execution_dates) - {dag_run.execution_date for dag_run in dag_runs})
for date in dates_to_create:
dag_run = dag.create_dagrun(
execution_date=date,
start_date=timezone.utcnow(),
external_trigger=False,
state=state,
run_type=run_type,
)
dag_runs.append(dag_run)
return dag_runs
@provide_session
def set_state(
tasks: Iterable[BaseOperator],
execution_date: datetime.datetime,
upstream: bool = False,
downstream: bool = False,
future: bool = False,
past: bool = False,
state: str = State.SUCCESS,
commit: bool = False,
session=None,
): # pylint: disable=too-many-arguments,too-many-locals
"""
Set the state of a task instance and if needed its relatives. Can set state
for future tasks (calculated from execution_date) and retroactively
for past tasks. Will verify integrity of past dag runs in order to create
tasks that did not exist. It will not create dag runs that are missing
on the schedule (but it will as for subdag dag runs if needed).
:param tasks: the iterable of tasks from which to work. task.task.dag needs to be set
:param execution_date: the execution date from which to start looking
:param upstream: Mark all parents (upstream tasks)
:param downstream: Mark all siblings (downstream tasks) of task_id, including SubDags
:param future: Mark all future tasks on the interval of the dag up until
last execution date.
:param past: Retroactively mark all tasks starting from start_date of the DAG
:param state: State to which the tasks need to be set
:param commit: Commit tasks to be altered to the database
:param session: database session
:return: list of tasks that have been created and updated
"""
if not tasks:
return []
if not timezone.is_localized(execution_date):
raise ValueError(f"Received non-localized date {execution_date}")
task_dags = {task.dag for task in tasks}
if len(task_dags) > 1:
raise ValueError(f"Received tasks from multiple DAGs: {task_dags}")
dag = next(iter(task_dags))
if dag is None:
raise ValueError("Received tasks with no DAG")
dates = get_execution_dates(dag, execution_date, future, past)
task_ids = list(find_task_relatives(tasks, downstream, upstream))
confirmed_dates = verify_dag_run_integrity(dag, dates)
sub_dag_run_ids = get_subdag_runs(dag, session, state, task_ids, commit, confirmed_dates)
# now look for the task instances that are affected
qry_dag = get_all_dag_task_query(dag, session, state, task_ids, confirmed_dates)
if commit:
tis_altered = qry_dag.with_for_update().all()
if sub_dag_run_ids:
qry_sub_dag = all_subdag_tasks_query(sub_dag_run_ids, session, state, confirmed_dates)
tis_altered += qry_sub_dag.with_for_update().all()
for task_instance in tis_altered:
task_instance.state = state
if state in State.finished:
task_instance.end_date = timezone.utcnow()
task_instance.set_duration()
else:
tis_altered = qry_dag.all()
if sub_dag_run_ids:
qry_sub_dag = all_subdag_tasks_query(sub_dag_run_ids, session, state, confirmed_dates)
tis_altered += qry_sub_dag.all()
return tis_altered
# Flake and pylint disagree about correct indents here
def all_subdag_tasks_query(sub_dag_run_ids, session, state, confirmed_dates): # noqa: E123
"""Get *all* tasks of the sub dags"""
qry_sub_dag = (
session.query(TaskInstance)
.filter(TaskInstance.dag_id.in_(sub_dag_run_ids), TaskInstance.execution_date.in_(confirmed_dates))
.filter(or_(TaskInstance.state.is_(None), TaskInstance.state != state))
) # noqa: E123
return qry_sub_dag
def get_all_dag_task_query(dag, session, state, task_ids, confirmed_dates):
"""Get all tasks of the main dag that will be affected by a state change"""
qry_dag = (
session.query(TaskInstance)
.filter(
TaskInstance.dag_id == dag.dag_id,
TaskInstance.execution_date.in_(confirmed_dates),
TaskInstance.task_id.in_(task_ids), # noqa: E123
)
.filter(or_(TaskInstance.state.is_(None), TaskInstance.state != state))
)
return qry_dag
def get_subdag_runs(dag, session, state, task_ids, commit, confirmed_dates):
"""Go through subdag operators and create dag runs. We will only work
within the scope of the subdag. We wont propagate to the parent dag,
but we will propagate from parent to subdag.
"""
dags = [dag]
sub_dag_ids = []
while dags:
current_dag = dags.pop()
for task_id in task_ids:
if not current_dag.has_task(task_id):
continue
current_task = current_dag.get_task(task_id)
if isinstance(current_task, SubDagOperator) or current_task.task_type == "SubDagOperator":
# this works as a kind of integrity check
# it creates missing dag runs for subdag operators,
# maybe this should be moved to dagrun.verify_integrity
dag_runs = _create_dagruns(
current_task.subdag,
execution_dates=confirmed_dates,
state=State.RUNNING,
run_type=DagRunType.BACKFILL_JOB,
)
verify_dagruns(dag_runs, commit, state, session, current_task)
dags.append(current_task.subdag)
sub_dag_ids.append(current_task.subdag.dag_id)
return sub_dag_ids
def verify_dagruns(dag_runs, commit, state, session, current_task):
"""Verifies integrity of dag_runs.
:param dag_runs: dag runs to verify
:param commit: whether dag runs state should be updated
:param state: state of the dag_run to set if commit is True
:param session: session to use
:param current_task: current task
:return:
"""
for dag_run in dag_runs:
dag_run.dag = current_task.subdag
dag_run.verify_integrity()
if commit:
dag_run.state = state
session.merge(dag_run)
def verify_dag_run_integrity(dag, dates):
"""
Verify the integrity of the dag runs in case a task was added or removed
set the confirmed execution dates as they might be different
from what was provided
"""
confirmed_dates = []
dag_runs = DagRun.find(dag_id=dag.dag_id, execution_date=dates)
for dag_run in dag_runs:
dag_run.dag = dag
dag_run.verify_integrity()
confirmed_dates.append(dag_run.execution_date)
return confirmed_dates
def find_task_relatives(tasks, downstream, upstream):
"""Yield task ids and optionally ancestor and descendant ids."""
for task in tasks:
yield task.task_id
if downstream:
for relative in task.get_flat_relatives(upstream=False):
yield relative.task_id
if upstream:
for relative in task.get_flat_relatives(upstream=True):
yield relative.task_id
def get_execution_dates(dag, execution_date, future, past):
"""Returns dates of DAG execution"""
latest_execution_date = dag.get_latest_execution_date()
if latest_execution_date is None:
raise ValueError(f"Received non-localized date {execution_date}")
# determine date range of dag runs and tasks to consider
end_date = latest_execution_date if future else execution_date
if 'start_date' in dag.default_args:
start_date = dag.default_args['start_date']
elif dag.start_date:
start_date = dag.start_date
else:
start_date = execution_date
start_date = execution_date if not past else start_date
if dag.schedule_interval == '@once':
dates = [start_date]
elif not dag.schedule_interval:
# If schedule_interval is None, need to look at existing DagRun if the user wants future or
# past runs.
dag_runs = dag.get_dagruns_between(start_date=start_date, end_date=end_date)
dates = sorted({d.execution_date for d in dag_runs})
else:
dates = dag.date_range(start_date=start_date, end_date=end_date)
return dates
@provide_session
def _set_dag_run_state(dag_id, execution_date, state, session=None):
"""
Helper method that set dag run state in the DB.
:param dag_id: dag_id of target dag run
:param execution_date: the execution date from which to start looking
:param state: target state
:param session: database session
"""
dag_run = (
session.query(DagRun).filter(DagRun.dag_id == dag_id, DagRun.execution_date == execution_date).one()
)
dag_run.state = state
if state == State.RUNNING:
dag_run.start_date = timezone.utcnow()
dag_run.end_date = None
else:
dag_run.end_date = timezone.utcnow()
session.merge(dag_run)
@provide_session
def set_dag_run_state_to_success(dag, execution_date, commit=False, session=None):
"""
Set the dag run for a specific execution date and its task instances
to success.
:param dag: the DAG of which to alter state
:param execution_date: the execution date from which to start looking
:param commit: commit DAG and tasks to be altered to the database
:param session: database session
:return: If commit is true, list of tasks that have been updated,
otherwise list of tasks that will be updated
:raises: ValueError if dag or execution_date is invalid
"""
if not dag or not execution_date:
return []
# Mark the dag run to success.
if commit:
_set_dag_run_state(dag.dag_id, execution_date, State.SUCCESS, session)
# Mark all task instances of the dag run to success.
for task in dag.tasks:
task.dag = dag
return set_state(
tasks=dag.tasks, execution_date=execution_date, state=State.SUCCESS, commit=commit, session=session
)
@provide_session
def set_dag_run_state_to_failed(dag, execution_date, commit=False, session=None):
"""
Set the dag run for a specific execution date and its running task instances
to failed.
:param dag: the DAG of which to alter state
:param execution_date: the execution date from which to start looking
:param commit: commit DAG and tasks to be altered to the database
:param session: database session
:return: If commit is true, list of tasks that have been updated,
otherwise list of tasks that will be updated
:raises: AssertionError if dag or execution_date is invalid
"""
if not dag or not execution_date:
return []
# Mark the dag run to failed.
if commit:
_set_dag_run_state(dag.dag_id, execution_date, State.FAILED, session)
# Mark only RUNNING task instances.
task_ids = [task.task_id for task in dag.tasks]
tis = (
session.query(TaskInstance)
.filter(
TaskInstance.dag_id == dag.dag_id,
TaskInstance.execution_date == execution_date,
TaskInstance.task_id.in_(task_ids),
)
.filter(TaskInstance.state == State.RUNNING)
)
task_ids_of_running_tis = [task_instance.task_id for task_instance in tis]
tasks = []
for task in dag.tasks:
if task.task_id not in task_ids_of_running_tis:
continue
task.dag = dag
tasks.append(task)
return set_state(
tasks=tasks, execution_date=execution_date, state=State.FAILED, commit=commit, session=session
)
@provide_session
def set_dag_run_state_to_running(dag, execution_date, commit=False, session=None):
"""
Set the dag run for a specific execution date to running.
:param dag: the DAG of which to alter state
:param execution_date: the execution date from which to start looking
:param commit: commit DAG and tasks to be altered to the database
:param session: database session
:return: If commit is true, list of tasks that have been updated,
otherwise list of tasks that will be updated
"""
res = []
if not dag or not execution_date:
return res
# Mark the dag run to running.
if commit:
_set_dag_run_state(dag.dag_id, execution_date, State.RUNNING, session)
# To keep the return type consistent with the other similar functions.
return res
|
|
"""Python version compatibility code."""
import enum
import functools
import inspect
import re
import sys
from contextlib import contextmanager
from inspect import Parameter
from inspect import signature
from pathlib import Path
from typing import Any
from typing import Callable
from typing import Generic
from typing import Optional
from typing import Tuple
from typing import TYPE_CHECKING
from typing import TypeVar
from typing import Union
import attr
from _pytest.outcomes import fail
from _pytest.outcomes import TEST_OUTCOME
if TYPE_CHECKING:
from typing import NoReturn
from typing_extensions import Final
_T = TypeVar("_T")
_S = TypeVar("_S")
# fmt: off
# Singleton type for NOTSET, as described in:
# https://www.python.org/dev/peps/pep-0484/#support-for-singleton-types-in-unions
class NotSetType(enum.Enum):
token = 0
NOTSET: "Final" = NotSetType.token # noqa: E305
# fmt: on
if sys.version_info >= (3, 8):
from importlib import metadata as importlib_metadata
else:
import importlib_metadata # noqa: F401
def _format_args(func: Callable[..., Any]) -> str:
return str(signature(func))
# The type of re.compile objects is not exposed in Python.
REGEX_TYPE = type(re.compile(""))
def is_generator(func: object) -> bool:
genfunc = inspect.isgeneratorfunction(func)
return genfunc and not iscoroutinefunction(func)
def iscoroutinefunction(func: object) -> bool:
"""Return True if func is a coroutine function (a function defined with async
def syntax, and doesn't contain yield), or a function decorated with
@asyncio.coroutine.
Note: copied and modified from Python 3.5's builtin couroutines.py to avoid
importing asyncio directly, which in turns also initializes the "logging"
module as a side-effect (see issue #8).
"""
return inspect.iscoroutinefunction(func) or getattr(func, "_is_coroutine", False)
def is_async_function(func: object) -> bool:
"""Return True if the given function seems to be an async function or
an async generator."""
return iscoroutinefunction(func) or inspect.isasyncgenfunction(func)
def getlocation(function, curdir: Optional[str] = None) -> str:
function = get_real_func(function)
fn = Path(inspect.getfile(function))
lineno = function.__code__.co_firstlineno
if curdir is not None:
try:
relfn = fn.relative_to(curdir)
except ValueError:
pass
else:
return "%s:%d" % (relfn, lineno + 1)
return "%s:%d" % (fn, lineno + 1)
def num_mock_patch_args(function) -> int:
"""Return number of arguments used up by mock arguments (if any)."""
patchings = getattr(function, "patchings", None)
if not patchings:
return 0
mock_sentinel = getattr(sys.modules.get("mock"), "DEFAULT", object())
ut_mock_sentinel = getattr(sys.modules.get("unittest.mock"), "DEFAULT", object())
return len(
[
p
for p in patchings
if not p.attribute_name
and (p.new is mock_sentinel or p.new is ut_mock_sentinel)
]
)
def getfuncargnames(
function: Callable[..., Any],
*,
name: str = "",
is_method: bool = False,
cls: Optional[type] = None,
) -> Tuple[str, ...]:
"""Return the names of a function's mandatory arguments.
Should return the names of all function arguments that:
* Aren't bound to an instance or type as in instance or class methods.
* Don't have default values.
* Aren't bound with functools.partial.
* Aren't replaced with mocks.
The is_method and cls arguments indicate that the function should
be treated as a bound method even though it's not unless, only in
the case of cls, the function is a static method.
The name parameter should be the original name in which the function was collected.
"""
# TODO(RonnyPfannschmidt): This function should be refactored when we
# revisit fixtures. The fixture mechanism should ask the node for
# the fixture names, and not try to obtain directly from the
# function object well after collection has occurred.
# The parameters attribute of a Signature object contains an
# ordered mapping of parameter names to Parameter instances. This
# creates a tuple of the names of the parameters that don't have
# defaults.
try:
parameters = signature(function).parameters
except (ValueError, TypeError) as e:
fail(
f"Could not determine arguments of {function!r}: {e}", pytrace=False,
)
arg_names = tuple(
p.name
for p in parameters.values()
if (
p.kind is Parameter.POSITIONAL_OR_KEYWORD
or p.kind is Parameter.KEYWORD_ONLY
)
and p.default is Parameter.empty
)
if not name:
name = function.__name__
# If this function should be treated as a bound method even though
# it's passed as an unbound method or function, remove the first
# parameter name.
if is_method or (
cls and not isinstance(cls.__dict__.get(name, None), staticmethod)
):
arg_names = arg_names[1:]
# Remove any names that will be replaced with mocks.
if hasattr(function, "__wrapped__"):
arg_names = arg_names[num_mock_patch_args(function) :]
return arg_names
if sys.version_info < (3, 7):
@contextmanager
def nullcontext():
yield
else:
from contextlib import nullcontext as nullcontext # noqa: F401
def get_default_arg_names(function: Callable[..., Any]) -> Tuple[str, ...]:
# Note: this code intentionally mirrors the code at the beginning of
# getfuncargnames, to get the arguments which were excluded from its result
# because they had default values.
return tuple(
p.name
for p in signature(function).parameters.values()
if p.kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY)
and p.default is not Parameter.empty
)
_non_printable_ascii_translate_table = {
i: f"\\x{i:02x}" for i in range(128) if i not in range(32, 127)
}
_non_printable_ascii_translate_table.update(
{ord("\t"): "\\t", ord("\r"): "\\r", ord("\n"): "\\n"}
)
def _translate_non_printable(s: str) -> str:
return s.translate(_non_printable_ascii_translate_table)
STRING_TYPES = bytes, str
def _bytes_to_ascii(val: bytes) -> str:
return val.decode("ascii", "backslashreplace")
def ascii_escaped(val: Union[bytes, str]) -> str:
r"""If val is pure ASCII, return it as an str, otherwise, escape
bytes objects into a sequence of escaped bytes:
b'\xc3\xb4\xc5\xd6' -> r'\xc3\xb4\xc5\xd6'
and escapes unicode objects into a sequence of escaped unicode
ids, e.g.:
r'4\nV\U00043efa\x0eMXWB\x1e\u3028\u15fd\xcd\U0007d944'
Note:
The obvious "v.decode('unicode-escape')" will return
valid UTF-8 unicode if it finds them in bytes, but we
want to return escaped bytes for any byte, even if they match
a UTF-8 string.
"""
if isinstance(val, bytes):
ret = _bytes_to_ascii(val)
else:
ret = val.encode("unicode_escape").decode("ascii")
return _translate_non_printable(ret)
@attr.s
class _PytestWrapper:
"""Dummy wrapper around a function object for internal use only.
Used to correctly unwrap the underlying function object when we are
creating fixtures, because we wrap the function object ourselves with a
decorator to issue warnings when the fixture function is called directly.
"""
obj = attr.ib()
def get_real_func(obj):
"""Get the real function object of the (possibly) wrapped object by
functools.wraps or functools.partial."""
start_obj = obj
for i in range(100):
# __pytest_wrapped__ is set by @pytest.fixture when wrapping the fixture function
# to trigger a warning if it gets called directly instead of by pytest: we don't
# want to unwrap further than this otherwise we lose useful wrappings like @mock.patch (#3774)
new_obj = getattr(obj, "__pytest_wrapped__", None)
if isinstance(new_obj, _PytestWrapper):
obj = new_obj.obj
break
new_obj = getattr(obj, "__wrapped__", None)
if new_obj is None:
break
obj = new_obj
else:
from _pytest._io.saferepr import saferepr
raise ValueError(
("could not find real function of {start}\nstopped at {current}").format(
start=saferepr(start_obj), current=saferepr(obj)
)
)
if isinstance(obj, functools.partial):
obj = obj.func
return obj
def get_real_method(obj, holder):
"""Attempt to obtain the real function object that might be wrapping
``obj``, while at the same time returning a bound method to ``holder`` if
the original object was a bound method."""
try:
is_method = hasattr(obj, "__func__")
obj = get_real_func(obj)
except Exception: # pragma: no cover
return obj
if is_method and hasattr(obj, "__get__") and callable(obj.__get__):
obj = obj.__get__(holder)
return obj
def getimfunc(func):
try:
return func.__func__
except AttributeError:
return func
def safe_getattr(object: Any, name: str, default: Any) -> Any:
"""Like getattr but return default upon any Exception or any OutcomeException.
Attribute access can potentially fail for 'evil' Python objects.
See issue #214.
It catches OutcomeException because of #2490 (issue #580), new outcomes
are derived from BaseException instead of Exception (for more details
check #2707).
"""
try:
return getattr(object, name, default)
except TEST_OUTCOME:
return default
def safe_isclass(obj: object) -> bool:
"""Ignore any exception via isinstance on Python 3."""
try:
return inspect.isclass(obj)
except Exception:
return False
if TYPE_CHECKING:
if sys.version_info >= (3, 8):
from typing import final as final
else:
from typing_extensions import final as final
elif sys.version_info >= (3, 8):
from typing import final as final
else:
def final(f):
return f
if sys.version_info >= (3, 8):
from functools import cached_property as cached_property
else:
from typing import overload
from typing import Type
class cached_property(Generic[_S, _T]):
__slots__ = ("func", "__doc__")
def __init__(self, func: Callable[[_S], _T]) -> None:
self.func = func
self.__doc__ = func.__doc__
@overload
def __get__(
self, instance: None, owner: Optional[Type[_S]] = ...
) -> "cached_property[_S, _T]":
...
@overload
def __get__(self, instance: _S, owner: Optional[Type[_S]] = ...) -> _T:
...
def __get__(self, instance, owner=None):
if instance is None:
return self
value = instance.__dict__[self.func.__name__] = self.func(instance)
return value
# Perform exhaustiveness checking.
#
# Consider this example:
#
# MyUnion = Union[int, str]
#
# def handle(x: MyUnion) -> int {
# if isinstance(x, int):
# return 1
# elif isinstance(x, str):
# return 2
# else:
# raise Exception('unreachable')
#
# Now suppose we add a new variant:
#
# MyUnion = Union[int, str, bytes]
#
# After doing this, we must remember ourselves to go and update the handle
# function to handle the new variant.
#
# With `assert_never` we can do better:
#
# // raise Exception('unreachable')
# return assert_never(x)
#
# Now, if we forget to handle the new variant, the type-checker will emit a
# compile-time error, instead of the runtime error we would have gotten
# previously.
#
# This also work for Enums (if you use `is` to compare) and Literals.
def assert_never(value: "NoReturn") -> "NoReturn":
assert False, "Unhandled value: {} ({})".format(value, type(value).__name__)
|
|
import sublime
from ...component_index import component_index
from ... import utils
from . import cfc_utils
projects = {}
def build_project_map(project_name):
global projects
data = {}
path_completions, constructor_completions = make_completions(project_name)
data["path_completions"] = path_completions
data["constructor_completions"] = constructor_completions
projects[project_name] = data
def make_completions(project_name):
dot_paths = component_index.get_dot_paths(project_name)
path_map = map_paths(dot_paths)
path_completions = {}
constructor_completions = {}
for k in path_map:
path_completions[k] = []
constructor_completions[k] = []
for c in sorted(path_map[k], key=lambda i: i["path_part"]):
path_completions[k].append(
make_completion(c, k, dot_paths, project_name, False)
)
constructor_completions[k].append(
make_completion(c, k, dot_paths, project_name, True)
)
return path_completions, constructor_completions
def make_completion(path_part_dict, key, dot_paths, project_name, constructor):
completion = path_part_dict["path_part"]
if path_part_dict["is_cfc"] and constructor:
full_key = key + ("." if len(key) > 0 else "") + completion
constructor_completion = component_index.get_completions_by_file_path(
project_name, dot_paths[full_key.lower()]["file_path"]
)["constructor"]
if constructor_completion:
completion = completion + constructor_completion.content[4:]
else:
completion = completion + "()"
if not path_part_dict["is_cfc"]:
completion += "."
return (
path_part_dict["path_part"]
+ "\t"
+ ("cfc" if path_part_dict["is_cfc"] else "cfc path"),
completion,
)
def map_paths(dot_paths):
path_map = {}
for path_key in dot_paths:
path_parts = dot_paths[path_key]["dot_path"].split(".")
for i in range(len(path_parts)):
key = ".".join(path_parts[:i]).lower()
if key not in path_map:
path_map[key] = []
is_cfc = i == len(path_parts) - 1
path_part_dict = {"path_part": path_parts[i], "is_cfc": is_cfc}
if path_part_dict not in path_map[key]:
path_map[key].append(path_part_dict)
return path_map
def get_tag_attributes(cfml_view):
if not cfml_view.project_name or cfml_view.project_name not in projects:
return None
if cfml_view.view.match_selector(
cfml_view.position - 1,
"meta.class.inheritance.cfml -entity.other.inherited-class.cfml",
):
cfc_path = ""
folder_cfc_path = cfc_utils.get_folder_cfc_path(cfml_view, cfc_path)
completions = []
completions.extend(
get_completions(cfml_view.project_name, cfc_path, "path_completions")
)
completions.extend(
get_completions(cfml_view.project_name, folder_cfc_path, "path_completions")
)
if len(completions) > 0:
return cfml_view.CompletionList(completions, 2, True)
if cfml_view.view.match_selector(
cfml_view.position - 1, "entity.other.inherited-class.cfml"
):
r = utils.get_scope_region_containing_point(
cfml_view.view, cfml_view.position - 1, "entity.other.inherited-class.cfml"
)
r = sublime.Region(r.begin(), cfml_view.position - len(cfml_view.prefix))
cfc_path = ".".join(cfml_view.view.substr(r).split(".")[:-1])
folder_cfc_path = cfc_utils.get_folder_cfc_path(cfml_view, cfc_path)
completions = []
completions.extend(
get_completions(cfml_view.project_name, cfc_path, "path_completions")
)
completions.extend(
get_completions(cfml_view.project_name, folder_cfc_path, "path_completions")
)
if len(completions) > 0:
return cfml_view.CompletionList(completions, 2, True)
def get_script_completions(cfml_view):
if not cfml_view.project_name or cfml_view.project_name not in projects:
return None
if cfml_view.view.match_selector(
cfml_view.position,
"meta.function-call.support.createcomponent.cfml string.quoted",
):
r = utils.get_scope_region_containing_point(
cfml_view.view, cfml_view.position, "string.quoted"
)
r = sublime.Region(r.begin(), cfml_view.position + 1)
cfc_path = cfml_view.view.substr(r)
if cfc_path[0] not in ['"', "'"] or cfc_path[-1] not in ['"', "'"]:
return None
cfc_path = ".".join(cfc_path[1:-1].split(".")[:-1])
folder_cfc_path = cfc_utils.get_folder_cfc_path(cfml_view, cfc_path)
completions = []
completions.extend(
get_completions(cfml_view.project_name, cfc_path, "path_completions")
)
completions.extend(
get_completions(cfml_view.project_name, folder_cfc_path, "path_completions")
)
if len(completions) > 0:
return cfml_view.CompletionList(completions, 2, True)
if cfml_view.view.match_selector(
cfml_view.position - 1, "meta.instance.constructor.cfml"
):
r = utils.get_scope_region_containing_point(
cfml_view.view, cfml_view.position - 1, "meta.instance.constructor.cfml"
)
r = sublime.Region(r.begin(), cfml_view.position - len(cfml_view.prefix))
cfc_path = ".".join(cfml_view.view.substr(r)[4:].split(".")[:-1])
folder_cfc_path = cfc_utils.get_folder_cfc_path(cfml_view, cfc_path)
completions = []
completions.extend(
get_completions(cfml_view.project_name, cfc_path, "constructor_completions")
)
completions.extend(
get_completions(
cfml_view.project_name, folder_cfc_path, "constructor_completions"
)
)
if len(completions) > 0:
return cfml_view.CompletionList(completions, 2, True)
return None
def get_dot_completions(cfml_view):
if not cfml_view.project_name or len(cfml_view.dot_context) == 0:
return None
component_selector = "meta.function-call.support.createcomponent.cfml"
constructor_selector = "meta.instance.constructor.cfml"
component_name = None
if cfml_view.dot_context[
0
].name == "createobject" and cfml_view.view.match_selector(
cfml_view.prefix_start - 2, component_selector
):
component_name = cfc_utils.get_component_name(
cfml_view.view.substr(cfml_view.dot_context[0].args_region)
)
elif cfml_view.view.match_selector(
cfml_view.prefix_start - 2, constructor_selector
):
component_name = ".".join([s.name for s in reversed(cfml_view.dot_context)])
elif cfc_utils.is_possible_cfc_instance(cfml_view.dot_context):
# look for variable assignment, it might be an instantiated component
component_tuple = cfc_utils.find_cfc_by_var_assignment(
cfml_view, cfml_view.prefix_start, cfml_view.dot_context[0].name
)
if component_tuple[0] is not None:
component_name = component_tuple[0]
if component_name:
completions = get_completions_by_component_name(cfml_view, component_name)
if completions:
return cfml_view.CompletionList(completions, 1, True)
return None
def get_completions(project_name, cfc_path, completion_type):
if (
cfc_path is not None
and cfc_path.lower() in projects[project_name][completion_type]
):
return projects[project_name][completion_type][cfc_path.lower()]
return []
def get_completions_by_component_name(cfml_view, component_name):
comp = component_index.get_completions_by_dot_path(
cfml_view.project_name, component_name.lower()
)
if not comp:
folder_cfc_path = cfc_utils.get_folder_cfc_path(cfml_view, component_name)
if folder_cfc_path:
comp = component_index.get_completions_by_dot_path(
cfml_view.project_name, folder_cfc_path
)
if comp:
filtered_completions = []
for completion in comp["functions"]:
if not completion.private:
filtered_completions.append(
(completion.key + "\t" + completion.hint, completion.content)
)
return filtered_completions
return None
|
|
# -*- coding: utf-8 -*-
from lxml.etree import Element
from .namespaces import STREAM_NS_URI, STREAM_ERROR_NS_URI, STANZA_ERROR_NS_URI
from .stanzas import XML_LANG
'''
Concrete :class:StanzaError types.
* BadRequestStanzaError
* ConflictStanzaError
* FeatureNotImplementedStanzaError
* ForbiddenStanzaError
* GoneStanzaError
* InternalServerErrorStanzaError
* ItemNotFoundStanzaError
* JidMalformedStanzaError
* NotAcceptableStanzaError
* NotAllowedStanzaError
* NotAuthorizedStanzaError
* PolicyViolationStanzaError
* RecipientUnavailableStanzaError
* RedirectStanzaError
* RegistrationRequiredStanzaError
* RemoteServerNotFoundStanzaError
* RemoteServerTimeoutStanzaError
* ResourceConstraintStanzaError
* ServiceUnavailableStanzaError
* SubscriptionRequiredStanzaError
* UndefinedConditionStanzaError
* UnexpectedRequestStanzaError
Concrete :code:StreamError types.
* BadFormatStreamError
* BadNamespacePrefixStreamError
* ConflictStreamError
* ConnectionTimeoutStreamError
* HostGoneStreamError
* HostUnknownStreamError
* ImproperAddressingStreamError
* InternalServerErrorStreamError
* InvalidFromStreamError
* InvalidNamespaceStreamError
* InvalidXmlStreamError
* NotAuthorizedStreamError
* NotWellFormedStreamError
* PolicyViolationStreamError
* RemoteConnectionFailedStreamError
* ResetStreamError
* ResourceConstraintStreamError
* RestrictedXmlStreamError
* SeeOtherHostStreamError
* SystemShutdownStreamError
* UndefinedConditionStreamError
* UnsupportedEncodingStreamError
* UnsupportedFeatureStreamError
* UnsupportedStanzaTypeStreamError
* UnsupportedVersionStreamError
'''
class Error(RuntimeError):
'''A generatl exception type for errors in the vexmpp domain.'''
TYPE_AUTH = "auth"
TYPE_CANCEL = "cancel"
TYPE_CONTINUE = "continue"
TYPE_MODIFY = "modify"
TYPE_WAIT = "wait"
# Tuple format (name, type, default-text, default_lang)
STANZA_ERRORS = (
("bad-request", TYPE_MODIFY, "", None),
("conflict", TYPE_CANCEL, "", None),
("feature-not-implemented", TYPE_CANCEL, "", None),
("forbidden", TYPE_AUTH, "", None),
("gone", TYPE_CANCEL, "", None),
("internal-server-error", TYPE_CANCEL, "", None),
("item-not-found", TYPE_CANCEL, "", None),
("jid-malformed", TYPE_MODIFY, "", None),
("not-acceptable", TYPE_MODIFY, "", None),
("not-allowed", TYPE_CANCEL, "", None),
("not-authorized", TYPE_AUTH, "", None),
("policy-violation", TYPE_MODIFY, "", None),
("recipient-unavailable", TYPE_WAIT, "", None),
("redirect", TYPE_MODIFY, "", None),
("registration-required", TYPE_AUTH, "", None),
("remote-server-not-found", TYPE_CANCEL, "", None),
("remote-server-timeout", TYPE_WAIT, "", None),
("resource-constraint", TYPE_WAIT, "", None),
("service-unavailable", TYPE_CANCEL, "", None),
("subscription-required", TYPE_AUTH, "", None),
("undefined-condition", TYPE_AUTH, "", None),
("unexpected-request", TYPE_WAIT, "", None),
)
# Tuple format (name, default-text, default_lang)
STREAM_ERRORS = (
("bad-format", "", None),
("bad-namespace-prefix", "", None),
("conflict", "", None),
("connection-timeout", "", None),
("host-gone", "", None),
("host-unknown", "", None),
("improper-addressing", "", None),
("internal-server-error", "", None),
("invalid-from", "", None),
("invalid-namespace", "", None),
("invalid-xml", "", None),
("not-authorized", "", None),
("not-well-formed", "", None),
("policy-violation", "", None),
("remote-connection-failed", "", None),
("reset", "", None),
("resource-constraint", "", None),
("restricted-xml", "", None),
("see-other-host", "", None),
("system-shutdown", "", None),
("undefined-condition", "", None),
("unsupported-encoding", "", None),
("unsupported-feature", "", None),
("unsupported-stanza-type", "", None),
("unsupported-version", "", None),
)
class XmppError(RuntimeError):
'''An error for XMPP logic errors.'''
cond = None
text = None
lang = None
app_err = None
def __str__(self):
return "%s (text: %s [lang: %s]): %s" % (self.cond, self.text,
self.lang, self.app_err)
class StanzaError(XmppError):
type = None
def __init__(self):
raise NotImplementedError("Instantiate a concrete error type")
def __str__(self):
return "%s (type: %s text: %s [lang: %s]): %s" % (self.cond, self.type,
self.text, self.lang,
self.app_err)
@property
def xml(self):
assert(self.cond)
e = Element("error")
nsmap = {None: STANZA_ERROR_NS_URI}
e.append(Element("{%s}%s" % (STANZA_ERROR_NS_URI, self.cond),
nsmap=nsmap))
e[0].attrib["type"] = self.type
if self.text:
txt = Element("{%s}text" % STANZA_ERROR_NS_URI, nsmap=nsmap)
if self.lang:
txt.attrib[XML_LANG] = self.lang
txt.text = self.text
e.append(txt)
if self.app_err:
e.append(self.app_err)
return e
class StreamError(XmppError):
def __init__(self):
raise NotImplementedError("Instantiate a concrete error type")
@property
def xml(self):
assert(self.cond)
e = Element("{%s}error" % STREAM_NS_URI,
nsmap={"stream": STREAM_NS_URI})
nsmap = {None: STREAM_ERROR_NS_URI}
e.append(Element("{%s}%s" % (STREAM_ERROR_NS_URI, self.cond),
nsmap=nsmap))
if self.text:
txt = Element("{%s}text" % STREAM_ERROR_NS_URI, nsmap=nsmap)
if self.lang:
txt.attrib[XML_LANG] = self.lang
txt.text = self.text
e.append(txt)
if self.app_err:
e.append(self.app_err)
return e
def _makeClassName(cond, stanza_err):
'''Simple util for turning stanza/stream error conditions into class
names.'''
class_name = ""
next_leter_cap = True
for c in cond:
if next_leter_cap:
assert(c != '-')
c = c.upper()
next_leter_cap = False
elif c == '-':
next_leter_cap = True
continue
class_name += c
if stanza_err:
return "%sStanzaError" % class_name
else:
return "%sStreamError" % class_name
# Make concrete error classes types for all the defined errors.
_global_dict = globals()
for _cond, _type, _txt, _lang in STANZA_ERRORS:
_name = _makeClassName(_cond, True)
def stanzaErrCtor(self, text=None, type=None, lang=None, app_err=None):
# Use constructor values or defer to the Class vars
self.type = type or self.__class__.type
self.text = text or self.__class__.text
self.lang = lang or self.__class__.lang
self.app_err = (app_err if app_err is not None
else self.__class__.app_err)
_global_dict[_name] = type(_name, (StanzaError,),
{"cond": _cond,
"type": _type,
"text": _txt,
"lang": _lang,
"app_err": None,
"__init__": stanzaErrCtor,
})
for _cond, _txt, _lang in STREAM_ERRORS:
_name = _makeClassName(_cond, False)
def streamErrCtor(self, text=None, lang=None, app_err=None):
self.text = text or self.__class__.text
self.lang = lang or self.__class__.lang
self.app_err = app_err or self.__class__.app_err
_global_dict[_name] = type(_name, (StreamError,),
{"cond": _cond,
"text": _txt,
"lang": _lang,
"app_err": None,
"__init__": streamErrCtor,
})
del _global_dict
##
# Make a concrete instance of \c StreamError based on \a xml.
#
# \param xml The <stream:error> Element
# \throws ValueError Thrown if \a xml is not a stream:error or lacks
# a condition.
# \returns A concrete instance of \c StreamError or
# \c UndefinedConditionStreamError if the condition is not
# recognozed.
def makeStreamError(xml):
return _makeConcreteError(xml)
def makeStanzaError(xml):
return _makeConcreteError(xml)
def _makeConcreteError(xml):
stream_error, stanza_error = False, False
if xml.xpath("/stream:error", namespaces={"stream": STREAM_NS_URI}):
stream_error = True
error_ns = STREAM_ERROR_NS_URI
elif xml.tag == "error":
stanza_error = True
error_ns = STANZA_ERROR_NS_URI
elif (xml.getchildren() and
xml.getchildren()[0].tag.startswith("{%s}" % STANZA_ERROR_NS_URI)):
# Many stream features will wrap a stranza error in a feature-specific
# parent (xep 198, <failed>, e.g.).
stanza_error = True
error_ns = STANZA_ERROR_NS_URI
else:
raise ValueError("xml must be a stream:error or stanza error (no ns!)")
cond, type_, text, lang, app_err = None, None, None, None, None
for child in list(xml):
if child.tag == "{%s}text" % error_ns and child.text:
text = child.text
lang = child.attrib[XML_LANG] if XML_LANG in child.attrib \
else None
elif child.tag.startswith("{%s}" % error_ns):
cond = child
if stanza_error and "type" in xml.attrib:
type_ = xml.attrib["type"]
else:
app_err = child
if cond is None and app_err is not None:
# Not properly namespacing the condition is common
cond = app_err
cond.tag = "{%s}%s" % (error_ns, cond.tag)
if stream_error:
cond = cond.tag[2 + len(STREAM_ERROR_NS_URI):]
else:
cond = cond.tag[2 + len(STANZA_ERROR_NS_URI):]
Class = _makeClassName(cond, stanza_error)
try:
Class = globals()[Class]
except KeyError:
if stream_error:
Class = globals()["UndefinedConditionStreamError"]
else:
Class = globals()["UndefinedConditionStanzaError"]
if stream_error:
return Class(text=text, lang=lang, app_err=app_err)
else:
return Class(type=type_, text=text, lang=lang, app_err=app_err)
|
|
#
#
# Copyright (C) 2006, 2007, 2010, 2011, 2012 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Utility functions for processes.
"""
import os
import sys
import subprocess
import errno
import select
import logging
import signal
import resource
from cStringIO import StringIO
from ganeti import errors
from ganeti import constants
from ganeti import compat
from ganeti.utils import retry as utils_retry
from ganeti.utils import wrapper as utils_wrapper
from ganeti.utils import text as utils_text
from ganeti.utils import io as utils_io
from ganeti.utils import algo as utils_algo
#: when set to True, L{RunCmd} is disabled
_no_fork = False
(_TIMEOUT_NONE,
_TIMEOUT_TERM,
_TIMEOUT_KILL) = range(3)
def DisableFork():
"""Disables the use of fork(2).
"""
global _no_fork # pylint: disable=W0603
_no_fork = True
class RunResult(object):
"""Holds the result of running external programs.
@type exit_code: int
@ivar exit_code: the exit code of the program, or None (if the program
didn't exit())
@type signal: int or None
@ivar signal: the signal that caused the program to finish, or None
(if the program wasn't terminated by a signal)
@type stdout: str
@ivar stdout: the standard output of the program
@type stderr: str
@ivar stderr: the standard error of the program
@type failed: boolean
@ivar failed: True in case the program was
terminated by a signal or exited with a non-zero exit code
@type failed_by_timeout: boolean
@ivar failed_by_timeout: True in case the program was
terminated by timeout
@ivar fail_reason: a string detailing the termination reason
"""
__slots__ = ["exit_code", "signal", "stdout", "stderr",
"failed", "failed_by_timeout", "fail_reason", "cmd"]
def __init__(self, exit_code, signal_, stdout, stderr, cmd, timeout_action,
timeout):
self.cmd = cmd
self.exit_code = exit_code
self.signal = signal_
self.stdout = stdout
self.stderr = stderr
self.failed = (signal_ is not None or exit_code != 0)
self.failed_by_timeout = timeout_action != _TIMEOUT_NONE
fail_msgs = []
if self.signal is not None:
fail_msgs.append("terminated by signal %s" % self.signal)
elif self.exit_code is not None:
fail_msgs.append("exited with exit code %s" % self.exit_code)
else:
fail_msgs.append("unable to determine termination reason")
if timeout_action == _TIMEOUT_TERM:
fail_msgs.append("terminated after timeout of %.2f seconds" % timeout)
elif timeout_action == _TIMEOUT_KILL:
fail_msgs.append(("force termination after timeout of %.2f seconds"
" and linger for another %.2f seconds") %
(timeout, constants.CHILD_LINGER_TIMEOUT))
if fail_msgs and self.failed:
self.fail_reason = utils_text.CommaJoin(fail_msgs)
else:
self.fail_reason = None
if self.failed:
logging.debug("Command '%s' failed (%s); output: %s",
self.cmd, self.fail_reason, self.output)
def _GetOutput(self):
"""Returns the combined stdout and stderr for easier usage.
"""
return self.stdout + self.stderr
output = property(_GetOutput, None, None, "Return full output")
def _BuildCmdEnvironment(env, reset):
"""Builds the environment for an external program.
"""
if reset:
cmd_env = {}
else:
cmd_env = os.environ.copy()
cmd_env["LC_ALL"] = "C"
if env is not None:
cmd_env.update(env)
return cmd_env
def RunCmd(cmd, env=None, output=None, cwd="/", reset_env=False,
interactive=False, timeout=None, noclose_fds=None,
input_fd=None, postfork_fn=None):
"""Execute a (shell) command.
The command should not read from its standard input, as it will be
closed.
@type cmd: string or list
@param cmd: Command to run
@type env: dict
@param env: Additional environment variables
@type output: str
@param output: if desired, the output of the command can be
saved in a file instead of the RunResult instance; this
parameter denotes the file name (if not None)
@type cwd: string
@param cwd: if specified, will be used as the working
directory for the command; the default will be /
@type reset_env: boolean
@param reset_env: whether to reset or keep the default os environment
@type interactive: boolean
@param interactive: whether we pipe stdin, stdout and stderr
(default behaviour) or run the command interactive
@type timeout: int
@param timeout: If not None, timeout in seconds until child process gets
killed
@type noclose_fds: list
@param noclose_fds: list of additional (fd >=3) file descriptors to leave
open for the child process
@type input_fd: C{file}-like object or numeric file descriptor
@param input_fd: File descriptor for process' standard input
@type postfork_fn: Callable receiving PID as parameter
@param postfork_fn: Callback run after fork but before timeout
@rtype: L{RunResult}
@return: RunResult instance
@raise errors.ProgrammerError: if we call this when forks are disabled
"""
if _no_fork:
raise errors.ProgrammerError("utils.RunCmd() called with fork() disabled")
if output and interactive:
raise errors.ProgrammerError("Parameters 'output' and 'interactive' can"
" not be provided at the same time")
if not (output is None or input_fd is None):
# The current logic in "_RunCmdFile", which is used when output is defined,
# does not support input files (not hard to implement, though)
raise errors.ProgrammerError("Parameters 'output' and 'input_fd' can"
" not be used at the same time")
if isinstance(cmd, basestring):
strcmd = cmd
shell = True
else:
cmd = [str(val) for val in cmd]
strcmd = utils_text.ShellQuoteArgs(cmd)
shell = False
if output:
logging.info("RunCmd %s, output file '%s'", strcmd, output)
else:
logging.info("RunCmd %s", strcmd)
cmd_env = _BuildCmdEnvironment(env, reset_env)
try:
if output is None:
out, err, status, timeout_action = _RunCmdPipe(cmd, cmd_env, shell, cwd,
interactive, timeout,
noclose_fds, input_fd,
postfork_fn=postfork_fn)
else:
if postfork_fn:
raise errors.ProgrammerError("postfork_fn is not supported if output"
" should be captured")
assert input_fd is None
timeout_action = _TIMEOUT_NONE
status = _RunCmdFile(cmd, cmd_env, shell, output, cwd, noclose_fds)
out = err = ""
except OSError, err:
if err.errno == errno.ENOENT:
raise errors.OpExecError("Can't execute '%s': not found (%s)" %
(strcmd, err))
else:
raise
if status >= 0:
exitcode = status
signal_ = None
else:
exitcode = None
signal_ = -status
return RunResult(exitcode, signal_, out, err, strcmd, timeout_action, timeout)
def SetupDaemonEnv(cwd="/", umask=077):
"""Setup a daemon's environment.
This should be called between the first and second fork, due to
setsid usage.
@param cwd: the directory to which to chdir
@param umask: the umask to setup
"""
os.chdir(cwd)
os.umask(umask)
os.setsid()
def SetupDaemonFDs(output_file, output_fd):
"""Setups up a daemon's file descriptors.
@param output_file: if not None, the file to which to redirect
stdout/stderr
@param output_fd: if not None, the file descriptor for stdout/stderr
"""
# check that at most one is defined
assert [output_file, output_fd].count(None) >= 1
# Open /dev/null (read-only, only for stdin)
devnull_fd = os.open(os.devnull, os.O_RDONLY)
output_close = True
if output_fd is not None:
output_close = False
elif output_file is not None:
# Open output file
try:
output_fd = os.open(output_file,
os.O_WRONLY | os.O_CREAT | os.O_APPEND, 0600)
except EnvironmentError, err:
raise Exception("Opening output file failed: %s" % err)
else:
output_fd = os.open(os.devnull, os.O_WRONLY)
# Redirect standard I/O
os.dup2(devnull_fd, 0)
os.dup2(output_fd, 1)
os.dup2(output_fd, 2)
if devnull_fd > 2:
utils_wrapper.CloseFdNoError(devnull_fd)
if output_close and output_fd > 2:
utils_wrapper.CloseFdNoError(output_fd)
def StartDaemon(cmd, env=None, cwd="/", output=None, output_fd=None,
pidfile=None):
"""Start a daemon process after forking twice.
@type cmd: string or list
@param cmd: Command to run
@type env: dict
@param env: Additional environment variables
@type cwd: string
@param cwd: Working directory for the program
@type output: string
@param output: Path to file in which to save the output
@type output_fd: int
@param output_fd: File descriptor for output
@type pidfile: string
@param pidfile: Process ID file
@rtype: int
@return: Daemon process ID
@raise errors.ProgrammerError: if we call this when forks are disabled
"""
if _no_fork:
raise errors.ProgrammerError("utils.StartDaemon() called with fork()"
" disabled")
if output and not (bool(output) ^ (output_fd is not None)):
raise errors.ProgrammerError("Only one of 'output' and 'output_fd' can be"
" specified")
if isinstance(cmd, basestring):
cmd = ["/bin/sh", "-c", cmd]
strcmd = utils_text.ShellQuoteArgs(cmd)
if output:
logging.debug("StartDaemon %s, output file '%s'", strcmd, output)
else:
logging.debug("StartDaemon %s", strcmd)
cmd_env = _BuildCmdEnvironment(env, False)
# Create pipe for sending PID back
(pidpipe_read, pidpipe_write) = os.pipe()
try:
try:
# Create pipe for sending error messages
(errpipe_read, errpipe_write) = os.pipe()
try:
try:
# First fork
pid = os.fork()
if pid == 0:
try:
# Child process, won't return
_StartDaemonChild(errpipe_read, errpipe_write,
pidpipe_read, pidpipe_write,
cmd, cmd_env, cwd,
output, output_fd, pidfile)
finally:
# Well, maybe child process failed
os._exit(1) # pylint: disable=W0212
finally:
utils_wrapper.CloseFdNoError(errpipe_write)
# Wait for daemon to be started (or an error message to
# arrive) and read up to 100 KB as an error message
errormsg = utils_wrapper.RetryOnSignal(os.read, errpipe_read,
100 * 1024)
finally:
utils_wrapper.CloseFdNoError(errpipe_read)
finally:
utils_wrapper.CloseFdNoError(pidpipe_write)
# Read up to 128 bytes for PID
pidtext = utils_wrapper.RetryOnSignal(os.read, pidpipe_read, 128)
finally:
utils_wrapper.CloseFdNoError(pidpipe_read)
# Try to avoid zombies by waiting for child process
try:
os.waitpid(pid, 0)
except OSError:
pass
if errormsg:
raise errors.OpExecError("Error when starting daemon process: %r" %
errormsg)
try:
return int(pidtext)
except (ValueError, TypeError), err:
raise errors.OpExecError("Error while trying to parse PID %r: %s" %
(pidtext, err))
def _StartDaemonChild(errpipe_read, errpipe_write,
pidpipe_read, pidpipe_write,
args, env, cwd,
output, fd_output, pidfile):
"""Child process for starting daemon.
"""
try:
# Close parent's side
utils_wrapper.CloseFdNoError(errpipe_read)
utils_wrapper.CloseFdNoError(pidpipe_read)
# First child process
SetupDaemonEnv()
# And fork for the second time
pid = os.fork()
if pid != 0:
# Exit first child process
os._exit(0) # pylint: disable=W0212
# Make sure pipe is closed on execv* (and thereby notifies
# original process)
utils_wrapper.SetCloseOnExecFlag(errpipe_write, True)
# List of file descriptors to be left open
noclose_fds = [errpipe_write]
# Open PID file
if pidfile:
fd_pidfile = utils_io.WritePidFile(pidfile)
# Keeping the file open to hold the lock
noclose_fds.append(fd_pidfile)
utils_wrapper.SetCloseOnExecFlag(fd_pidfile, False)
else:
fd_pidfile = None
SetupDaemonFDs(output, fd_output)
# Send daemon PID to parent
utils_wrapper.RetryOnSignal(os.write, pidpipe_write, str(os.getpid()))
# Close all file descriptors except stdio and error message pipe
CloseFDs(noclose_fds=noclose_fds)
# Change working directory
os.chdir(cwd)
if env is None:
os.execvp(args[0], args)
else:
os.execvpe(args[0], args, env)
except: # pylint: disable=W0702
try:
# Report errors to original process
WriteErrorToFD(errpipe_write, str(sys.exc_info()[1]))
except: # pylint: disable=W0702
# Ignore errors in error handling
pass
os._exit(1) # pylint: disable=W0212
def WriteErrorToFD(fd, err):
"""Possibly write an error message to a fd.
@type fd: None or int (file descriptor)
@param fd: if not None, the error will be written to this fd
@param err: string, the error message
"""
if fd is None:
return
if not err:
err = "<unknown error>"
utils_wrapper.RetryOnSignal(os.write, fd, err)
def _CheckIfAlive(child):
"""Raises L{utils_retry.RetryAgain} if child is still alive.
@raises utils_retry.RetryAgain: If child is still alive
"""
if child.poll() is None:
raise utils_retry.RetryAgain()
def _WaitForProcess(child, timeout):
"""Waits for the child to terminate or until we reach timeout.
"""
try:
utils_retry.Retry(_CheckIfAlive, (1.0, 1.2, 5.0), max(0, timeout),
args=[child])
except utils_retry.RetryTimeout:
pass
def _RunCmdPipe(cmd, env, via_shell, cwd, interactive, timeout, noclose_fds,
input_fd, postfork_fn=None,
_linger_timeout=constants.CHILD_LINGER_TIMEOUT):
"""Run a command and return its output.
@type cmd: string or list
@param cmd: Command to run
@type env: dict
@param env: The environment to use
@type via_shell: bool
@param via_shell: if we should run via the shell
@type cwd: string
@param cwd: the working directory for the program
@type interactive: boolean
@param interactive: Run command interactive (without piping)
@type timeout: int
@param timeout: Timeout after the programm gets terminated
@type noclose_fds: list
@param noclose_fds: list of additional (fd >=3) file descriptors to leave
open for the child process
@type input_fd: C{file}-like object or numeric file descriptor
@param input_fd: File descriptor for process' standard input
@type postfork_fn: Callable receiving PID as parameter
@param postfork_fn: Function run after fork but before timeout
@rtype: tuple
@return: (out, err, status)
"""
poller = select.poll()
if interactive:
stderr = None
stdout = None
else:
stderr = subprocess.PIPE
stdout = subprocess.PIPE
if input_fd:
stdin = input_fd
elif interactive:
stdin = None
else:
stdin = subprocess.PIPE
if noclose_fds:
preexec_fn = lambda: CloseFDs(noclose_fds)
close_fds = False
else:
preexec_fn = None
close_fds = True
child = subprocess.Popen(cmd, shell=via_shell,
stderr=stderr,
stdout=stdout,
stdin=stdin,
close_fds=close_fds, env=env,
cwd=cwd,
preexec_fn=preexec_fn)
if postfork_fn:
postfork_fn(child.pid)
out = StringIO()
err = StringIO()
linger_timeout = None
if timeout is None:
poll_timeout = None
else:
poll_timeout = utils_algo.RunningTimeout(timeout, True).Remaining
msg_timeout = ("Command %s (%d) run into execution timeout, terminating" %
(cmd, child.pid))
msg_linger = ("Command %s (%d) run into linger timeout, killing" %
(cmd, child.pid))
timeout_action = _TIMEOUT_NONE
# subprocess: "If the stdin argument is PIPE, this attribute is a file object
# that provides input to the child process. Otherwise, it is None."
assert (stdin == subprocess.PIPE) ^ (child.stdin is None), \
"subprocess' stdin did not behave as documented"
if not interactive:
if child.stdin is not None:
child.stdin.close()
poller.register(child.stdout, select.POLLIN)
poller.register(child.stderr, select.POLLIN)
fdmap = {
child.stdout.fileno(): (out, child.stdout),
child.stderr.fileno(): (err, child.stderr),
}
for fd in fdmap:
utils_wrapper.SetNonblockFlag(fd, True)
while fdmap:
if poll_timeout:
pt = poll_timeout() * 1000
if pt < 0:
if linger_timeout is None:
logging.warning(msg_timeout)
if child.poll() is None:
timeout_action = _TIMEOUT_TERM
utils_wrapper.IgnoreProcessNotFound(os.kill, child.pid,
signal.SIGTERM)
linger_timeout = \
utils_algo.RunningTimeout(_linger_timeout, True).Remaining
pt = linger_timeout() * 1000
if pt < 0:
break
else:
pt = None
pollresult = utils_wrapper.RetryOnSignal(poller.poll, pt)
for fd, event in pollresult:
if event & select.POLLIN or event & select.POLLPRI:
data = fdmap[fd][1].read()
# no data from read signifies EOF (the same as POLLHUP)
if not data:
poller.unregister(fd)
del fdmap[fd]
continue
fdmap[fd][0].write(data)
if (event & select.POLLNVAL or event & select.POLLHUP or
event & select.POLLERR):
poller.unregister(fd)
del fdmap[fd]
if timeout is not None:
assert callable(poll_timeout)
# We have no I/O left but it might still run
if child.poll() is None:
_WaitForProcess(child, poll_timeout())
# Terminate if still alive after timeout
if child.poll() is None:
if linger_timeout is None:
logging.warning(msg_timeout)
timeout_action = _TIMEOUT_TERM
utils_wrapper.IgnoreProcessNotFound(os.kill, child.pid, signal.SIGTERM)
lt = _linger_timeout
else:
lt = linger_timeout()
_WaitForProcess(child, lt)
# Okay, still alive after timeout and linger timeout? Kill it!
if child.poll() is None:
timeout_action = _TIMEOUT_KILL
logging.warning(msg_linger)
utils_wrapper.IgnoreProcessNotFound(os.kill, child.pid, signal.SIGKILL)
out = out.getvalue()
err = err.getvalue()
status = child.wait()
return out, err, status, timeout_action
def _RunCmdFile(cmd, env, via_shell, output, cwd, noclose_fds):
"""Run a command and save its output to a file.
@type cmd: string or list
@param cmd: Command to run
@type env: dict
@param env: The environment to use
@type via_shell: bool
@param via_shell: if we should run via the shell
@type output: str
@param output: the filename in which to save the output
@type cwd: string
@param cwd: the working directory for the program
@type noclose_fds: list
@param noclose_fds: list of additional (fd >=3) file descriptors to leave
open for the child process
@rtype: int
@return: the exit status
"""
fh = open(output, "a")
if noclose_fds:
preexec_fn = lambda: CloseFDs(noclose_fds + [fh.fileno()])
close_fds = False
else:
preexec_fn = None
close_fds = True
try:
child = subprocess.Popen(cmd, shell=via_shell,
stderr=subprocess.STDOUT,
stdout=fh,
stdin=subprocess.PIPE,
close_fds=close_fds, env=env,
cwd=cwd,
preexec_fn=preexec_fn)
child.stdin.close()
status = child.wait()
finally:
fh.close()
return status
def RunParts(dir_name, env=None, reset_env=False):
"""Run Scripts or programs in a directory
@type dir_name: string
@param dir_name: absolute path to a directory
@type env: dict
@param env: The environment to use
@type reset_env: boolean
@param reset_env: whether to reset or keep the default os environment
@rtype: list of tuples
@return: list of (name, (one of RUNDIR_STATUS), RunResult)
"""
rr = []
try:
dir_contents = utils_io.ListVisibleFiles(dir_name)
except OSError, err:
logging.warning("RunParts: skipping %s (cannot list: %s)", dir_name, err)
return rr
for relname in sorted(dir_contents):
fname = utils_io.PathJoin(dir_name, relname)
if not (constants.EXT_PLUGIN_MASK.match(relname) is not None and
utils_wrapper.IsExecutable(fname)):
rr.append((relname, constants.RUNPARTS_SKIP, None))
else:
try:
result = RunCmd([fname], env=env, reset_env=reset_env)
except Exception, err: # pylint: disable=W0703
rr.append((relname, constants.RUNPARTS_ERR, str(err)))
else:
rr.append((relname, constants.RUNPARTS_RUN, result))
return rr
def _GetProcStatusPath(pid):
"""Returns the path for a PID's proc status file.
@type pid: int
@param pid: Process ID
@rtype: string
"""
return "/proc/%d/status" % pid
def GetProcCmdline(pid):
"""Returns the command line of a pid as a list of arguments.
@type pid: int
@param pid: Process ID
@rtype: list of string
@raise EnvironmentError: If the process does not exist
"""
proc_path = "/proc/%d/cmdline" % pid
with open(proc_path, 'r') as f:
nulled_cmdline = f.read()
# Individual arguments are separated by nul chars in the contents of the proc
# file
return nulled_cmdline.split('\x00')
def IsProcessAlive(pid):
"""Check if a given pid exists on the system.
@note: zombie status is not handled, so zombie processes
will be returned as alive
@type pid: int
@param pid: the process ID to check
@rtype: boolean
@return: True if the process exists
"""
def _TryStat(name):
try:
os.stat(name)
return True
except EnvironmentError, err:
if err.errno in (errno.ENOENT, errno.ENOTDIR):
return False
elif err.errno == errno.EINVAL:
raise utils_retry.RetryAgain(err)
raise
assert isinstance(pid, int), "pid must be an integer"
if pid <= 0:
return False
# /proc in a multiprocessor environment can have strange behaviors.
# Retry the os.stat a few times until we get a good result.
try:
return utils_retry.Retry(_TryStat, (0.01, 1.5, 0.1), 0.5,
args=[_GetProcStatusPath(pid)])
except utils_retry.RetryTimeout, err:
err.RaiseInner()
def IsDaemonAlive(name):
"""Determines whether a daemon is alive
@type name: string
@param name: daemon name
@rtype: boolean
@return: True if daemon is running, False otherwise
"""
return IsProcessAlive(utils_io.ReadPidFile(utils_io.DaemonPidFileName(name)))
def _ParseSigsetT(sigset):
"""Parse a rendered sigset_t value.
This is the opposite of the Linux kernel's fs/proc/array.c:render_sigset_t
function.
@type sigset: string
@param sigset: Rendered signal set from /proc/$pid/status
@rtype: set
@return: Set of all enabled signal numbers
"""
result = set()
signum = 0
for ch in reversed(sigset):
chv = int(ch, 16)
# The following could be done in a loop, but it's easier to read and
# understand in the unrolled form
if chv & 1:
result.add(signum + 1)
if chv & 2:
result.add(signum + 2)
if chv & 4:
result.add(signum + 3)
if chv & 8:
result.add(signum + 4)
signum += 4
return result
def _GetProcStatusField(pstatus, field):
"""Retrieves a field from the contents of a proc status file.
@type pstatus: string
@param pstatus: Contents of /proc/$pid/status
@type field: string
@param field: Name of field whose value should be returned
@rtype: string
"""
for line in pstatus.splitlines():
parts = line.split(":", 1)
if len(parts) < 2 or parts[0] != field:
continue
return parts[1].strip()
return None
def IsProcessHandlingSignal(pid, signum, status_path=None):
"""Checks whether a process is handling a signal.
@type pid: int
@param pid: Process ID
@type signum: int
@param signum: Signal number
@rtype: bool
"""
if status_path is None:
status_path = _GetProcStatusPath(pid)
try:
proc_status = utils_io.ReadFile(status_path)
except EnvironmentError, err:
# In at least one case, reading /proc/$pid/status failed with ESRCH.
if err.errno in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL, errno.ESRCH):
return False
raise
sigcgt = _GetProcStatusField(proc_status, "SigCgt")
if sigcgt is None:
raise RuntimeError("%s is missing 'SigCgt' field" % status_path)
# Now check whether signal is handled
return signum in _ParseSigsetT(sigcgt)
def Daemonize(logfile):
"""Daemonize the current process.
This detaches the current process from the controlling terminal and
runs it in the background as a daemon.
@type logfile: str
@param logfile: the logfile to which we should redirect stdout/stderr
@rtype: tuple; (int, callable)
@return: File descriptor of pipe(2) which must be closed to notify parent
process and a callable to reopen log files
"""
# pylint: disable=W0212
# yes, we really want os._exit
# TODO: do another attempt to merge Daemonize and StartDaemon, or at
# least abstract the pipe functionality between them
# Create pipe for sending error messages
(rpipe, wpipe) = os.pipe()
# this might fail
pid = os.fork()
if (pid == 0): # The first child.
SetupDaemonEnv()
# this might fail
pid = os.fork() # Fork a second child.
if (pid == 0): # The second child.
utils_wrapper.CloseFdNoError(rpipe)
else:
# exit() or _exit()? See below.
os._exit(0) # Exit parent (the first child) of the second child.
else:
utils_wrapper.CloseFdNoError(wpipe)
# Wait for daemon to be started (or an error message to
# arrive) and read up to 100 KB as an error message
errormsg = utils_wrapper.RetryOnSignal(os.read, rpipe, 100 * 1024)
if errormsg:
sys.stderr.write("Error when starting daemon process: %r\n" % errormsg)
rcode = 1
else:
rcode = 0
os._exit(rcode) # Exit parent of the first child.
reopen_fn = compat.partial(SetupDaemonFDs, logfile, None)
# Open logs for the first time
reopen_fn()
return (wpipe, reopen_fn)
def KillProcess(pid, signal_=signal.SIGTERM, timeout=30,
waitpid=False):
"""Kill a process given by its pid.
@type pid: int
@param pid: The PID to terminate.
@type signal_: int
@param signal_: The signal to send, by default SIGTERM
@type timeout: int
@param timeout: The timeout after which, if the process is still alive,
a SIGKILL will be sent. If not positive, no such checking
will be done
@type waitpid: boolean
@param waitpid: If true, we should waitpid on this process after
sending signals, since it's our own child and otherwise it
would remain as zombie
"""
def _helper(pid, signal_, wait):
"""Simple helper to encapsulate the kill/waitpid sequence"""
if utils_wrapper.IgnoreProcessNotFound(os.kill, pid, signal_) and wait:
try:
os.waitpid(pid, os.WNOHANG)
except OSError:
pass
if pid <= 0:
# kill with pid=0 == suicide
raise errors.ProgrammerError("Invalid pid given '%s'" % pid)
if not IsProcessAlive(pid):
return
_helper(pid, signal_, waitpid)
if timeout <= 0:
return
def _CheckProcess():
if not IsProcessAlive(pid):
return
try:
(result_pid, _) = os.waitpid(pid, os.WNOHANG)
except OSError:
raise utils_retry.RetryAgain()
if result_pid > 0:
return
raise utils_retry.RetryAgain()
try:
# Wait up to $timeout seconds
utils_retry.Retry(_CheckProcess, (0.01, 1.5, 0.1), timeout)
except utils_retry.RetryTimeout:
pass
if IsProcessAlive(pid):
# Kill process if it's still alive
_helper(pid, signal.SIGKILL, waitpid)
def RunInSeparateProcess(fn, *args):
"""Runs a function in a separate process.
Note: Only boolean return values are supported.
@type fn: callable
@param fn: Function to be called
@rtype: bool
@return: Function's result
"""
pid = os.fork()
if pid == 0:
# Child process
try:
# In case the function uses temporary files
utils_wrapper.ResetTempfileModule()
# Call function
result = int(bool(fn(*args)))
assert result in (0, 1)
except: # pylint: disable=W0702
logging.exception("Error while calling function in separate process")
# 0 and 1 are reserved for the return value
result = 33
os._exit(result) # pylint: disable=W0212
# Parent process
# Avoid zombies and check exit code
(_, status) = os.waitpid(pid, 0)
if os.WIFSIGNALED(status):
exitcode = None
signum = os.WTERMSIG(status)
else:
exitcode = os.WEXITSTATUS(status)
signum = None
if not (exitcode in (0, 1) and signum is None):
raise errors.GenericError("Child program failed (code=%s, signal=%s)" %
(exitcode, signum))
return bool(exitcode)
def CloseFDs(noclose_fds=None):
"""Close file descriptors.
This closes all file descriptors above 2 (i.e. except
stdin/out/err).
@type noclose_fds: list or None
@param noclose_fds: if given, it denotes a list of file descriptor
that should not be closed
"""
# Default maximum for the number of available file descriptors.
if 'SC_OPEN_MAX' in os.sysconf_names:
try:
MAXFD = os.sysconf('SC_OPEN_MAX')
if MAXFD < 0:
MAXFD = 1024
except OSError:
MAXFD = 1024
else:
MAXFD = 1024
maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
if (maxfd == resource.RLIM_INFINITY):
maxfd = MAXFD
# Iterate through and close all file descriptors (except the standard ones)
for fd in range(3, maxfd):
if noclose_fds and fd in noclose_fds:
continue
utils_wrapper.CloseFdNoError(fd)
|
|
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Handle version information related to Visual Stuio."""
import errno
import os
import re
import subprocess
import sys
import gyp
import glob
class VisualStudioVersion(object):
"""Information regarding a version of Visual Studio."""
def __init__(self, short_name, description,
solution_version, project_version, flat_sln, uses_vcxproj,
path, sdk_based, default_toolset=None):
self.short_name = short_name
self.description = description
self.solution_version = solution_version
self.project_version = project_version
self.flat_sln = flat_sln
self.uses_vcxproj = uses_vcxproj
self.path = path
self.sdk_based = sdk_based
self.default_toolset = default_toolset
def ShortName(self):
return self.short_name
def Description(self):
"""Get the full description of the version."""
return self.description
def SolutionVersion(self):
"""Get the version number of the sln files."""
return self.solution_version
def ProjectVersion(self):
"""Get the version number of the vcproj or vcxproj files."""
return self.project_version
def FlatSolution(self):
return self.flat_sln
def UsesVcxproj(self):
"""Returns true if this version uses a vcxproj file."""
return self.uses_vcxproj
def ProjectExtension(self):
"""Returns the file extension for the project."""
return self.uses_vcxproj and '.vcxproj' or '.vcproj'
def Path(self):
"""Returns the path to Visual Studio installation."""
return self.path
def ToolPath(self, tool):
"""Returns the path to a given compiler tool. """
return os.path.normpath(os.path.join(self.path, "VC/bin", tool))
def DefaultToolset(self):
"""Returns the msbuild toolset version that will be used in the absence
of a user override."""
return self.default_toolset
def SetupScript(self, target_arch):
"""Returns a command (with arguments) to be used to set up the
environment."""
# Check if we are running in the SDK command line environment and use
# the setup script from the SDK if so. |target_arch| should be either
# 'x86' or 'x64'.
assert target_arch in ('x86', 'x64')
sdk_dir = os.environ.get('WindowsSDKDir')
if self.sdk_based and sdk_dir:
return [os.path.normpath(os.path.join(sdk_dir, 'Bin/SetEnv.Cmd')),
'/' + target_arch]
else:
# We don't use VC/vcvarsall.bat for x86 because vcvarsall calls
# vcvars32, which it can only find if VS??COMNTOOLS is set, which it
# isn't always.
if target_arch == 'x86':
if self.short_name >= '2013' and self.short_name[-1] != 'e' and (
os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'):
# VS2013 and later, non-Express have a x64-x86 cross that we want
# to prefer.
return [os.path.normpath(
os.path.join(self.path, 'VC/vcvarsall.bat')), 'amd64_x86']
# Otherwise, the standard x86 compiler.
return [os.path.normpath(
os.path.join(self.path, 'Common7/Tools/vsvars32.bat'))]
else:
assert target_arch == 'x64'
arg = 'x86_amd64'
# Use the 64-on-64 compiler if we're not using an express
# edition and we're running on a 64bit OS.
if self.short_name[-1] != 'e' and (
os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'):
arg = 'amd64'
return [os.path.normpath(
os.path.join(self.path, 'VC/vcvarsall.bat')), arg]
def _RegistryQueryBase(sysdir, key, value):
"""Use reg.exe to read a particular key.
While ideally we might use the win32 module, we would like gyp to be
python neutral, so for instance cygwin python lacks this module.
Arguments:
sysdir: The system subdirectory to attempt to launch reg.exe from.
key: The registry key to read from.
value: The particular value to read.
Return:
stdout from reg.exe, or None for failure.
"""
# Skip if not on Windows or Python Win32 setup issue
if sys.platform not in ('win32', 'cygwin'):
return None
# Setup params to pass to and attempt to launch reg.exe
cmd = [os.path.join(os.environ.get('WINDIR', ''), sysdir, 'reg.exe'),
'query', key]
if value:
cmd.extend(['/v', value])
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Obtain the stdout from reg.exe, reading to the end so p.returncode is valid
# Note that the error text may be in [1] in some cases
text = p.communicate()[0]
# Check return code from reg.exe; officially 0==success and 1==error
if p.returncode:
return None
return text
def _RegistryQuery(key, value=None):
r"""Use reg.exe to read a particular key through _RegistryQueryBase.
First tries to launch from %WinDir%\Sysnative to avoid WoW64 redirection. If
that fails, it falls back to System32. Sysnative is available on Vista and
up and available on Windows Server 2003 and XP through KB patch 942589. Note
that Sysnative will always fail if using 64-bit python due to it being a
virtual directory and System32 will work correctly in the first place.
KB 942589 - http://support.microsoft.com/kb/942589/en-us.
Arguments:
key: The registry key.
value: The particular registry value to read (optional).
Return:
stdout from reg.exe, or None for failure.
"""
text = None
try:
text = _RegistryQueryBase('Sysnative', key, value)
except OSError as e:
if e.errno == errno.ENOENT:
text = _RegistryQueryBase('System32', key, value)
else:
raise
return text
def _RegistryGetValueUsingWinReg(key, value):
"""Use the _winreg module to obtain the value of a registry key.
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure. Throws
ImportError if _winreg is unavailable.
"""
try:
# Python 2
from _winreg import OpenKey, QueryValueEx
except ImportError:
# Python 3
from winreg import OpenKey, QueryValueEx
try:
root, subkey = key.split('\\', 1)
assert root == 'HKLM' # Only need HKLM for now.
with OpenKey(_winreg.HKEY_LOCAL_MACHINE, subkey) as hkey:
return QueryValueEx(hkey, value)[0]
except WindowsError:
return None
def _RegistryGetValue(key, value):
"""Use _winreg or reg.exe to obtain the value of a registry key.
Using _winreg is preferable because it solves an issue on some corporate
environments where access to reg.exe is locked down. However, we still need
to fallback to reg.exe for the case where the _winreg module is not available
(for example in cygwin python).
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure.
"""
try:
return _RegistryGetValueUsingWinReg(key, value)
except ImportError:
pass
# Fallback to reg.exe if we fail to import _winreg.
text = _RegistryQuery(key, value)
if not text:
return None
# Extract value.
match = re.search(r'REG_\w+\s+([^\r]+)\r\n', text)
if not match:
return None
return match.group(1)
def _CreateVersion(name, path, sdk_based=False):
"""Sets up MSVS project generation.
Setup is based off the GYP_MSVS_VERSION environment variable or whatever is
autodetected if GYP_MSVS_VERSION is not explicitly specified. If a version is
passed in that doesn't match a value in versions python will throw a error.
"""
if path:
path = os.path.normpath(path)
versions = {
'2015': VisualStudioVersion('2015',
'Visual Studio 2015',
solution_version='12.00',
project_version='14.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v140'),
'2013': VisualStudioVersion('2013',
'Visual Studio 2013',
solution_version='13.00',
project_version='12.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v120'),
'2013e': VisualStudioVersion('2013e',
'Visual Studio 2013',
solution_version='13.00',
project_version='12.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v120'),
'2012': VisualStudioVersion('2012',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2012e': VisualStudioVersion('2012e',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2010': VisualStudioVersion('2010',
'Visual Studio 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2010e': VisualStudioVersion('2010e',
'Visual C++ Express 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2008': VisualStudioVersion('2008',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2008e': VisualStudioVersion('2008e',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005': VisualStudioVersion('2005',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005e': VisualStudioVersion('2005e',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
}
return versions[str(name)]
def _ConvertToCygpath(path):
"""Convert to cygwin path if we are using cygwin."""
if sys.platform == 'cygwin':
p = subprocess.Popen(['cygpath', path], stdout=subprocess.PIPE)
path = p.communicate()[0].strip()
return path
def _DetectVisualStudioVersions(versions_to_check, force_express):
"""Collect the list of installed visual studio versions.
Returns:
A list of visual studio versions installed in descending order of
usage preference.
Base this on the registry and a quick check if devenv.exe exists.
Only versions 8-10 are considered.
Possibilities are:
2005(e) - Visual Studio 2005 (8)
2008(e) - Visual Studio 2008 (9)
2010(e) - Visual Studio 2010 (10)
2012(e) - Visual Studio 2012 (11)
2013(e) - Visual Studio 2013 (12)
2015 - Visual Studio 2015 (14)
Where (e) is e for express editions of MSVS and blank otherwise.
"""
version_to_year = {
'8.0': '2005',
'9.0': '2008',
'10.0': '2010',
'11.0': '2012',
'12.0': '2013',
'14.0': '2015',
}
versions = []
for version in versions_to_check:
# Old method of searching for which VS version is installed
# We don't use the 2010-encouraged-way because we also want to get the
# path to the binaries, which it doesn't offer.
keys = [r'HKLM\Software\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Microsoft\VCExpress\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VCExpress\%s' % version]
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], 'InstallDir')
if not path:
continue
path = _ConvertToCygpath(path)
# Check for full.
full_path = os.path.join(path, 'devenv.exe')
express_path = os.path.join(path, '*express.exe')
if not force_express and os.path.exists(full_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version],
os.path.join(path, '..', '..')))
# Check for express.
elif glob.glob(express_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..', '..')))
# The old method above does not work when only SDK is installed.
keys = [r'HKLM\Software\Microsoft\VisualStudio\SxS\VC7',
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\SxS\VC7']
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], version)
if not path:
continue
path = _ConvertToCygpath(path)
if version != '14.0': # There is no Express edition for 2015.
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..'), sdk_based=True))
return versions
def SelectVisualStudioVersion(version='auto', allow_fallback=True):
"""Select which version of Visual Studio projects to generate.
Arguments:
version: Hook to allow caller to force a particular version (vs auto).
Returns:
An object representing a visual studio project format version.
"""
# In auto mode, check environment variable for override.
if version == 'auto':
version = os.environ.get('GYP_MSVS_VERSION', 'auto')
version_map = {
'auto': ('14.0', '12.0', '10.0', '9.0', '8.0', '11.0'),
'2005': ('8.0',),
'2005e': ('8.0',),
'2008': ('9.0',),
'2008e': ('9.0',),
'2010': ('10.0',),
'2010e': ('10.0',),
'2012': ('11.0',),
'2012e': ('11.0',),
'2013': ('12.0',),
'2013e': ('12.0',),
'2015': ('14.0',),
}
override_path = os.environ.get('GYP_MSVS_OVERRIDE_PATH')
if override_path:
msvs_version = os.environ.get('GYP_MSVS_VERSION')
if not msvs_version:
raise ValueError('GYP_MSVS_OVERRIDE_PATH requires GYP_MSVS_VERSION to be '
'set to a particular version (e.g. 2010e).')
return _CreateVersion(msvs_version, override_path, sdk_based=True)
version = str(version)
versions = _DetectVisualStudioVersions(version_map[version], 'e' in version)
if not versions:
if not allow_fallback:
raise ValueError('Could not locate Visual Studio installation.')
if version == 'auto':
# Default to 2005 if we couldn't find anything
return _CreateVersion('2005', None)
else:
return _CreateVersion(version, None)
return versions[0]
|
|
from __future__ import unicode_literals
import datetime
from django.contrib import admin
from django.contrib.admin.models import LogEntry
from django.contrib.admin.options import IncorrectLookupParameters
from django.contrib.admin.templatetags.admin_list import pagination
from django.contrib.admin.tests import AdminSeleniumTestCase
from django.contrib.admin.views.main import ALL_VAR, SEARCH_VAR, ChangeList
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.template import Context, Template
from django.test import TestCase, ignore_warnings, override_settings
from django.test.client import RequestFactory
from django.urls import reverse
from django.utils import formats, six
from django.utils.deprecation import RemovedInDjango20Warning
from .admin import (
BandAdmin, ChildAdmin, ChordsBandAdmin, ConcertAdmin,
CustomPaginationAdmin, CustomPaginator, DynamicListDisplayChildAdmin,
DynamicListDisplayLinksChildAdmin, DynamicListFilterChildAdmin,
DynamicSearchFieldsChildAdmin, EmptyValueChildAdmin, EventAdmin,
FilteredChildAdmin, GroupAdmin, InvitationAdmin,
NoListDisplayLinksParentAdmin, ParentAdmin, QuartetAdmin, SwallowAdmin,
site as custom_site,
)
from .models import (
Band, Child, ChordsBand, ChordsMusician, Concert, CustomIdUser, Event,
Genre, Group, Invitation, Membership, Musician, OrderedObject, Parent,
Quartet, Swallow, SwallowOneToOne, UnorderedObject,
)
def get_changelist_args(modeladmin, **kwargs):
m = modeladmin
args = (
kwargs.pop('list_display', m.list_display),
kwargs.pop('list_display_links', m.list_display_links),
kwargs.pop('list_filter', m.list_filter),
kwargs.pop('date_hierarchy', m.date_hierarchy),
kwargs.pop('search_fields', m.search_fields),
kwargs.pop('list_select_related', m.list_select_related),
kwargs.pop('list_per_page', m.list_per_page),
kwargs.pop('list_max_show_all', m.list_max_show_all),
kwargs.pop('list_editable', m.list_editable),
m,
)
assert not kwargs, "Unexpected kwarg %s" % kwargs
return args
@override_settings(ROOT_URLCONF="admin_changelist.urls")
class ChangeListTests(TestCase):
def setUp(self):
self.factory = RequestFactory()
def _create_superuser(self, username):
return User.objects.create_superuser(username=username, email='a@b.com', password='xxx')
def _mocked_authenticated_request(self, url, user):
request = self.factory.get(url)
request.user = user
return request
def test_select_related_preserved(self):
"""
Regression test for #10348: ChangeList.get_queryset() shouldn't
overwrite a custom select_related provided by ModelAdmin.get_queryset().
"""
m = ChildAdmin(Child, custom_site)
request = self.factory.get('/child/')
cl = ChangeList(
request, Child,
*get_changelist_args(m, list_select_related=m.get_list_select_related(request))
)
self.assertEqual(cl.queryset.query.select_related, {'parent': {}})
def test_select_related_as_tuple(self):
ia = InvitationAdmin(Invitation, custom_site)
request = self.factory.get('/invitation/')
cl = ChangeList(
request, Child,
*get_changelist_args(ia, list_select_related=ia.get_list_select_related(request))
)
self.assertEqual(cl.queryset.query.select_related, {'player': {}})
def test_select_related_as_empty_tuple(self):
ia = InvitationAdmin(Invitation, custom_site)
ia.list_select_related = ()
request = self.factory.get('/invitation/')
cl = ChangeList(
request, Child,
*get_changelist_args(ia, list_select_related=ia.get_list_select_related(request))
)
self.assertIs(cl.queryset.query.select_related, False)
def test_get_select_related_custom_method(self):
class GetListSelectRelatedAdmin(admin.ModelAdmin):
list_display = ('band', 'player')
def get_list_select_related(self, request):
return ('band', 'player')
ia = GetListSelectRelatedAdmin(Invitation, custom_site)
request = self.factory.get('/invitation/')
cl = ChangeList(
request, Child,
*get_changelist_args(ia, list_select_related=ia.get_list_select_related(request))
)
self.assertEqual(cl.queryset.query.select_related, {'player': {}, 'band': {}})
def test_result_list_empty_changelist_value(self):
"""
Regression test for #14982: EMPTY_CHANGELIST_VALUE should be honored
for relationship fields
"""
new_child = Child.objects.create(name='name', parent=None)
request = self.factory.get('/child/')
m = ChildAdmin(Child, custom_site)
cl = ChangeList(request, Child, *get_changelist_args(m))
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
link = reverse('admin:admin_changelist_child_change', args=(new_child.id,))
row_html = (
'<tbody><tr class="row1"><th class="field-name"><a href="%s">name</a></th>'
'<td class="field-parent nowrap">-</td></tr></tbody>' % link
)
self.assertNotEqual(table_output.find(row_html), -1, 'Failed to find expected row element: %s' % table_output)
def test_result_list_set_empty_value_display_on_admin_site(self):
"""
Test that empty value display can be set on AdminSite
"""
new_child = Child.objects.create(name='name', parent=None)
request = self.factory.get('/child/')
# Set a new empty display value on AdminSite.
admin.site.empty_value_display = '???'
m = ChildAdmin(Child, admin.site)
cl = ChangeList(request, Child, *get_changelist_args(m))
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
link = reverse('admin:admin_changelist_child_change', args=(new_child.id,))
row_html = (
'<tbody><tr class="row1"><th class="field-name"><a href="%s">name</a></th>'
'<td class="field-parent nowrap">???</td></tr></tbody>' % link
)
self.assertNotEqual(table_output.find(row_html), -1, 'Failed to find expected row element: %s' % table_output)
def test_result_list_set_empty_value_display_in_model_admin(self):
"""
Test that empty value display can be set in ModelAdmin or individual fields.
"""
new_child = Child.objects.create(name='name', parent=None)
request = self.factory.get('/child/')
m = EmptyValueChildAdmin(Child, admin.site)
cl = ChangeList(request, Child, *get_changelist_args(m))
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
link = reverse('admin:admin_changelist_child_change', args=(new_child.id,))
row_html = (
'<tbody><tr class="row1"><th class="field-name"><a href="%s">name</a></th>'
'<td class="field-age_display">&dagger;</td><td class="field-age">-empty-</td></tr></tbody>' % link
)
self.assertNotEqual(table_output.find(row_html), -1, 'Failed to find expected row element: %s' % table_output)
def test_result_list_html(self):
"""
Verifies that inclusion tag result_list generates a table when with
default ModelAdmin settings.
"""
new_parent = Parent.objects.create(name='parent')
new_child = Child.objects.create(name='name', parent=new_parent)
request = self.factory.get('/child/')
m = ChildAdmin(Child, custom_site)
cl = ChangeList(request, Child, *get_changelist_args(m))
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
link = reverse('admin:admin_changelist_child_change', args=(new_child.id,))
row_html = (
'<tbody><tr class="row1"><th class="field-name"><a href="%s">name</a></th>'
'<td class="field-parent nowrap">Parent object</td></tr></tbody>' % link
)
self.assertNotEqual(table_output.find(row_html), -1, 'Failed to find expected row element: %s' % table_output)
def test_result_list_editable_html(self):
"""
Regression tests for #11791: Inclusion tag result_list generates a
table and this checks that the items are nested within the table
element tags.
Also a regression test for #13599, verifies that hidden fields
when list_editable is enabled are rendered in a div outside the
table.
"""
new_parent = Parent.objects.create(name='parent')
new_child = Child.objects.create(name='name', parent=new_parent)
request = self.factory.get('/child/')
m = ChildAdmin(Child, custom_site)
# Test with list_editable fields
m.list_display = ['id', 'name', 'parent']
m.list_display_links = ['id']
m.list_editable = ['name']
cl = ChangeList(request, Child, *get_changelist_args(m))
FormSet = m.get_changelist_formset(request)
cl.formset = FormSet(queryset=cl.result_list)
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
# make sure that hidden fields are in the correct place
hiddenfields_div = (
'<div class="hiddenfields">'
'<input type="hidden" name="form-0-id" value="%d" id="id_form-0-id" />'
'</div>'
) % new_child.id
self.assertInHTML(hiddenfields_div, table_output, msg_prefix='Failed to find hidden fields')
# make sure that list editable fields are rendered in divs correctly
editable_name_field = (
'<input name="form-0-name" value="name" class="vTextField" '
'maxlength="30" type="text" id="id_form-0-name" />'
)
self.assertInHTML(
'<td class="field-name">%s</td>' % editable_name_field,
table_output,
msg_prefix='Failed to find "name" list_editable field',
)
def test_result_list_editable(self):
"""
Regression test for #14312: list_editable with pagination
"""
new_parent = Parent.objects.create(name='parent')
for i in range(200):
Child.objects.create(name='name %s' % i, parent=new_parent)
request = self.factory.get('/child/', data={'p': -1}) # Anything outside range
m = ChildAdmin(Child, custom_site)
# Test with list_editable fields
m.list_display = ['id', 'name', 'parent']
m.list_display_links = ['id']
m.list_editable = ['name']
with self.assertRaises(IncorrectLookupParameters):
ChangeList(request, Child, *get_changelist_args(m))
@ignore_warnings(category=RemovedInDjango20Warning)
def test_result_list_with_allow_tags(self):
"""
Test for deprecation of allow_tags attribute
"""
new_parent = Parent.objects.create(name='parent')
for i in range(2):
Child.objects.create(name='name %s' % i, parent=new_parent)
request = self.factory.get('/child/')
m = ChildAdmin(Child, custom_site)
def custom_method(self, obj=None):
return 'Unsafe html <br />'
custom_method.allow_tags = True
# Add custom method with allow_tags attribute
m.custom_method = custom_method
m.list_display = ['id', 'name', 'parent', 'custom_method']
cl = ChangeList(request, Child, *get_changelist_args(m))
FormSet = m.get_changelist_formset(request)
cl.formset = FormSet(queryset=cl.result_list)
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
custom_field_html = '<td class="field-custom_method">Unsafe html <br /></td>'
self.assertInHTML(custom_field_html, table_output)
def test_custom_paginator(self):
new_parent = Parent.objects.create(name='parent')
for i in range(200):
Child.objects.create(name='name %s' % i, parent=new_parent)
request = self.factory.get('/child/')
m = CustomPaginationAdmin(Child, custom_site)
cl = ChangeList(request, Child, *get_changelist_args(m))
cl.get_results(request)
self.assertIsInstance(cl.paginator, CustomPaginator)
def test_distinct_for_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Basic ManyToMany.
"""
blues = Genre.objects.create(name='Blues')
band = Band.objects.create(name='B.B. King Review', nr_of_members=11)
band.genres.add(blues)
band.genres.add(blues)
m = BandAdmin(Band, custom_site)
request = self.factory.get('/band/', data={'genres': blues.pk})
cl = ChangeList(request, Band, *get_changelist_args(m))
cl.get_results(request)
# There's only one Group instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_through_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. With an intermediate model.
"""
lead = Musician.objects.create(name='Vox')
band = Group.objects.create(name='The Hype')
Membership.objects.create(group=band, music=lead, role='lead voice')
Membership.objects.create(group=band, music=lead, role='bass player')
m = GroupAdmin(Group, custom_site)
request = self.factory.get('/group/', data={'members': lead.pk})
cl = ChangeList(request, Group, *get_changelist_args(m))
cl.get_results(request)
# There's only one Group instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_through_m2m_at_second_level_in_list_filter(self):
"""
When using a ManyToMany in list_filter at the second level behind a
ForeignKey, distinct() must be called and results shouldn't appear more
than once.
"""
lead = Musician.objects.create(name='Vox')
band = Group.objects.create(name='The Hype')
Concert.objects.create(name='Woodstock', group=band)
Membership.objects.create(group=band, music=lead, role='lead voice')
Membership.objects.create(group=band, music=lead, role='bass player')
m = ConcertAdmin(Concert, custom_site)
request = self.factory.get('/concert/', data={'group__members': lead.pk})
cl = ChangeList(request, Concert, *get_changelist_args(m))
cl.get_results(request)
# There's only one Concert instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_inherited_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Model managed in the
admin inherits from the one that defins the relationship.
"""
lead = Musician.objects.create(name='John')
four = Quartet.objects.create(name='The Beatles')
Membership.objects.create(group=four, music=lead, role='lead voice')
Membership.objects.create(group=four, music=lead, role='guitar player')
m = QuartetAdmin(Quartet, custom_site)
request = self.factory.get('/quartet/', data={'members': lead.pk})
cl = ChangeList(request, Quartet, *get_changelist_args(m))
cl.get_results(request)
# There's only one Quartet instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_m2m_to_inherited_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Target of the relationship
inherits from another.
"""
lead = ChordsMusician.objects.create(name='Player A')
three = ChordsBand.objects.create(name='The Chords Trio')
Invitation.objects.create(band=three, player=lead, instrument='guitar')
Invitation.objects.create(band=three, player=lead, instrument='bass')
m = ChordsBandAdmin(ChordsBand, custom_site)
request = self.factory.get('/chordsband/', data={'members': lead.pk})
cl = ChangeList(request, ChordsBand, *get_changelist_args(m))
cl.get_results(request)
# There's only one ChordsBand instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_non_unique_related_object_in_list_filter(self):
"""
Regressions tests for #15819: If a field listed in list_filters
is a non-unique related object, distinct() must be called.
"""
parent = Parent.objects.create(name='Mary')
# Two children with the same name
Child.objects.create(parent=parent, name='Daniel')
Child.objects.create(parent=parent, name='Daniel')
m = ParentAdmin(Parent, custom_site)
request = self.factory.get('/parent/', data={'child__name': 'Daniel'})
cl = ChangeList(request, Parent, *get_changelist_args(m))
# Make sure distinct() was called
self.assertEqual(cl.queryset.count(), 1)
def test_distinct_for_non_unique_related_object_in_search_fields(self):
"""
Regressions tests for #15819: If a field listed in search_fields
is a non-unique related object, distinct() must be called.
"""
parent = Parent.objects.create(name='Mary')
Child.objects.create(parent=parent, name='Danielle')
Child.objects.create(parent=parent, name='Daniel')
m = ParentAdmin(Parent, custom_site)
request = self.factory.get('/parent/', data={SEARCH_VAR: 'daniel'})
cl = ChangeList(request, Parent, *get_changelist_args(m))
# Make sure distinct() was called
self.assertEqual(cl.queryset.count(), 1)
def test_distinct_for_many_to_many_at_second_level_in_search_fields(self):
"""
When using a ManyToMany in search_fields at the second level behind a
ForeignKey, distinct() must be called and results shouldn't appear more
than once.
"""
lead = Musician.objects.create(name='Vox')
band = Group.objects.create(name='The Hype')
Concert.objects.create(name='Woodstock', group=band)
Membership.objects.create(group=band, music=lead, role='lead voice')
Membership.objects.create(group=band, music=lead, role='bass player')
m = ConcertAdmin(Concert, custom_site)
request = self.factory.get('/concert/', data={SEARCH_VAR: 'vox'})
cl = ChangeList(request, Concert, *get_changelist_args(m))
# There's only one Concert instance
self.assertEqual(cl.queryset.count(), 1)
def test_pagination(self):
"""
Regression tests for #12893: Pagination in admins changelist doesn't
use queryset set by modeladmin.
"""
parent = Parent.objects.create(name='anything')
for i in range(30):
Child.objects.create(name='name %s' % i, parent=parent)
Child.objects.create(name='filtered %s' % i, parent=parent)
request = self.factory.get('/child/')
# Test default queryset
m = ChildAdmin(Child, custom_site)
cl = ChangeList(request, Child, *get_changelist_args(m))
self.assertEqual(cl.queryset.count(), 60)
self.assertEqual(cl.paginator.count, 60)
self.assertEqual(list(cl.paginator.page_range), [1, 2, 3, 4, 5, 6])
# Test custom queryset
m = FilteredChildAdmin(Child, custom_site)
cl = ChangeList(request, Child, *get_changelist_args(m))
self.assertEqual(cl.queryset.count(), 30)
self.assertEqual(cl.paginator.count, 30)
self.assertEqual(list(cl.paginator.page_range), [1, 2, 3])
def test_computed_list_display_localization(self):
"""
Regression test for #13196: output of functions should be localized
in the changelist.
"""
superuser = User.objects.create_superuser(username='super', email='super@localhost', password='secret')
self.client.force_login(superuser)
event = Event.objects.create(date=datetime.date.today())
response = self.client.get(reverse('admin:admin_changelist_event_changelist'))
self.assertContains(response, formats.localize(event.date))
self.assertNotContains(response, six.text_type(event.date))
def test_dynamic_list_display(self):
"""
Regression tests for #14206: dynamic list_display support.
"""
parent = Parent.objects.create(name='parent')
for i in range(10):
Child.objects.create(name='child %s' % i, parent=parent)
user_noparents = self._create_superuser('noparents')
user_parents = self._create_superuser('parents')
# Test with user 'noparents'
m = custom_site._registry[Child]
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertNotContains(response, 'Parent object')
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ['name', 'age'])
self.assertEqual(list_display_links, ['name'])
# Test with user 'parents'
m = DynamicListDisplayChildAdmin(Child, custom_site)
request = self._mocked_authenticated_request('/child/', user_parents)
response = m.changelist_view(request)
self.assertContains(response, 'Parent object')
custom_site.unregister(Child)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ('parent', 'name', 'age'))
self.assertEqual(list_display_links, ['parent'])
# Test default implementation
custom_site.register(Child, ChildAdmin)
m = custom_site._registry[Child]
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertContains(response, 'Parent object')
def test_show_all(self):
parent = Parent.objects.create(name='anything')
for i in range(30):
Child.objects.create(name='name %s' % i, parent=parent)
Child.objects.create(name='filtered %s' % i, parent=parent)
# Add "show all" parameter to request
request = self.factory.get('/child/', data={ALL_VAR: ''})
# Test valid "show all" request (number of total objects is under max)
m = ChildAdmin(Child, custom_site)
m.list_max_show_all = 200
# 200 is the max we'll pass to ChangeList
cl = ChangeList(request, Child, *get_changelist_args(m))
cl.get_results(request)
self.assertEqual(len(cl.result_list), 60)
# Test invalid "show all" request (number of total objects over max)
# falls back to paginated pages
m = ChildAdmin(Child, custom_site)
m.list_max_show_all = 30
# 30 is the max we'll pass to ChangeList for this test
cl = ChangeList(request, Child, *get_changelist_args(m))
cl.get_results(request)
self.assertEqual(len(cl.result_list), 10)
def test_dynamic_list_display_links(self):
"""
Regression tests for #16257: dynamic list_display_links support.
"""
parent = Parent.objects.create(name='parent')
for i in range(1, 10):
Child.objects.create(id=i, name='child %s' % i, parent=parent, age=i)
m = DynamicListDisplayLinksChildAdmin(Child, custom_site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/child/', superuser)
response = m.changelist_view(request)
for i in range(1, 10):
link = reverse('admin:admin_changelist_child_change', args=(i,))
self.assertContains(response, '<a href="%s">%s</a>' % (link, i))
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ('parent', 'name', 'age'))
self.assertEqual(list_display_links, ['age'])
def test_no_list_display_links(self):
"""#15185 -- Allow no links from the 'change list' view grid."""
p = Parent.objects.create(name='parent')
m = NoListDisplayLinksParentAdmin(Parent, custom_site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/parent/', superuser)
response = m.changelist_view(request)
link = reverse('admin:admin_changelist_parent_change', args=(p.pk,))
self.assertNotContains(response, '<a href="%s">' % link)
def test_tuple_list_display(self):
"""
Regression test for #17128
(ChangeList failing under Python 2.5 after r16319)
"""
swallow = Swallow.objects.create(origin='Africa', load='12.34', speed='22.2')
swallow2 = Swallow.objects.create(origin='Africa', load='12.34', speed='22.2')
swallow_o2o = SwallowOneToOne.objects.create(swallow=swallow2)
model_admin = SwallowAdmin(Swallow, custom_site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/swallow/', superuser)
response = model_admin.changelist_view(request)
# just want to ensure it doesn't blow up during rendering
self.assertContains(response, six.text_type(swallow.origin))
self.assertContains(response, six.text_type(swallow.load))
self.assertContains(response, six.text_type(swallow.speed))
# Reverse one-to-one relations should work.
self.assertContains(response, '<td class="field-swallowonetoone">-</td>')
self.assertContains(response, '<td class="field-swallowonetoone">%s</td>' % swallow_o2o)
def test_multiuser_edit(self):
"""
Simultaneous edits of list_editable fields on the changelist by
different users must not result in one user's edits creating a new
object instead of modifying the correct existing object (#11313).
"""
# To replicate this issue, simulate the following steps:
# 1. User1 opens an admin changelist with list_editable fields.
# 2. User2 edits object "Foo" such that it moves to another page in
# the pagination order and saves.
# 3. User1 edits object "Foo" and saves.
# 4. The edit made by User1 does not get applied to object "Foo" but
# instead is used to create a new object (bug).
# For this test, order the changelist by the 'speed' attribute and
# display 3 objects per page (SwallowAdmin.list_per_page = 3).
# Setup the test to reflect the DB state after step 2 where User2 has
# edited the first swallow object's speed from '4' to '1'.
a = Swallow.objects.create(origin='Swallow A', load=4, speed=1)
b = Swallow.objects.create(origin='Swallow B', load=2, speed=2)
c = Swallow.objects.create(origin='Swallow C', load=5, speed=5)
d = Swallow.objects.create(origin='Swallow D', load=9, speed=9)
superuser = self._create_superuser('superuser')
self.client.force_login(superuser)
changelist_url = reverse('admin:admin_changelist_swallow_changelist')
# Send the POST from User1 for step 3. It's still using the changelist
# ordering from before User2's edits in step 2.
data = {
'form-TOTAL_FORMS': '3',
'form-INITIAL_FORMS': '3',
'form-MIN_NUM_FORMS': '0',
'form-MAX_NUM_FORMS': '1000',
'form-0-id': str(d.pk),
'form-1-id': str(c.pk),
'form-2-id': str(a.pk),
'form-0-load': '9.0',
'form-0-speed': '9.0',
'form-1-load': '5.0',
'form-1-speed': '5.0',
'form-2-load': '5.0',
'form-2-speed': '4.0',
'_save': 'Save',
}
response = self.client.post(changelist_url, data, follow=True, extra={'o': '-2'})
# The object User1 edited in step 3 is displayed on the changelist and
# has the correct edits applied.
self.assertContains(response, '1 swallow was changed successfully.')
self.assertContains(response, a.origin)
a.refresh_from_db()
self.assertEqual(a.load, float(data['form-2-load']))
self.assertEqual(a.speed, float(data['form-2-speed']))
b.refresh_from_db()
self.assertEqual(b.load, 2)
self.assertEqual(b.speed, 2)
c.refresh_from_db()
self.assertEqual(c.load, float(data['form-1-load']))
self.assertEqual(c.speed, float(data['form-1-speed']))
d.refresh_from_db()
self.assertEqual(d.load, float(data['form-0-load']))
self.assertEqual(d.speed, float(data['form-0-speed']))
# No new swallows were created.
self.assertEqual(len(Swallow.objects.all()), 4)
def test_deterministic_order_for_unordered_model(self):
"""
Ensure that the primary key is systematically used in the ordering of
the changelist's results to guarantee a deterministic order, even
when the Model doesn't have any default ordering defined.
Refs #17198.
"""
superuser = self._create_superuser('superuser')
for counter in range(1, 51):
UnorderedObject.objects.create(id=counter, bool=True)
class UnorderedObjectAdmin(admin.ModelAdmin):
list_per_page = 10
def check_results_order(ascending=False):
custom_site.register(UnorderedObject, UnorderedObjectAdmin)
model_admin = UnorderedObjectAdmin(UnorderedObject, custom_site)
counter = 0 if ascending else 51
for page in range(0, 5):
request = self._mocked_authenticated_request('/unorderedobject/?p=%s' % page, superuser)
response = model_admin.changelist_view(request)
for result in response.context_data['cl'].result_list:
counter += 1 if ascending else -1
self.assertEqual(result.id, counter)
custom_site.unregister(UnorderedObject)
# When no order is defined at all, everything is ordered by '-pk'.
check_results_order()
# When an order field is defined but multiple records have the same
# value for that field, make sure everything gets ordered by -pk as well.
UnorderedObjectAdmin.ordering = ['bool']
check_results_order()
# When order fields are defined, including the pk itself, use them.
UnorderedObjectAdmin.ordering = ['bool', '-pk']
check_results_order()
UnorderedObjectAdmin.ordering = ['bool', 'pk']
check_results_order(ascending=True)
UnorderedObjectAdmin.ordering = ['-id', 'bool']
check_results_order()
UnorderedObjectAdmin.ordering = ['id', 'bool']
check_results_order(ascending=True)
def test_deterministic_order_for_model_ordered_by_its_manager(self):
"""
Ensure that the primary key is systematically used in the ordering of
the changelist's results to guarantee a deterministic order, even
when the Model has a manager that defines a default ordering.
Refs #17198.
"""
superuser = self._create_superuser('superuser')
for counter in range(1, 51):
OrderedObject.objects.create(id=counter, bool=True, number=counter)
class OrderedObjectAdmin(admin.ModelAdmin):
list_per_page = 10
def check_results_order(ascending=False):
custom_site.register(OrderedObject, OrderedObjectAdmin)
model_admin = OrderedObjectAdmin(OrderedObject, custom_site)
counter = 0 if ascending else 51
for page in range(0, 5):
request = self._mocked_authenticated_request('/orderedobject/?p=%s' % page, superuser)
response = model_admin.changelist_view(request)
for result in response.context_data['cl'].result_list:
counter += 1 if ascending else -1
self.assertEqual(result.id, counter)
custom_site.unregister(OrderedObject)
# When no order is defined at all, use the model's default ordering (i.e. 'number')
check_results_order(ascending=True)
# When an order field is defined but multiple records have the same
# value for that field, make sure everything gets ordered by -pk as well.
OrderedObjectAdmin.ordering = ['bool']
check_results_order()
# When order fields are defined, including the pk itself, use them.
OrderedObjectAdmin.ordering = ['bool', '-pk']
check_results_order()
OrderedObjectAdmin.ordering = ['bool', 'pk']
check_results_order(ascending=True)
OrderedObjectAdmin.ordering = ['-id', 'bool']
check_results_order()
OrderedObjectAdmin.ordering = ['id', 'bool']
check_results_order(ascending=True)
def test_dynamic_list_filter(self):
"""
Regression tests for ticket #17646: dynamic list_filter support.
"""
parent = Parent.objects.create(name='parent')
for i in range(10):
Child.objects.create(name='child %s' % i, parent=parent)
user_noparents = self._create_superuser('noparents')
user_parents = self._create_superuser('parents')
# Test with user 'noparents'
m = DynamicListFilterChildAdmin(Child, custom_site)
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertEqual(response.context_data['cl'].list_filter, ['name', 'age'])
# Test with user 'parents'
m = DynamicListFilterChildAdmin(Child, custom_site)
request = self._mocked_authenticated_request('/child/', user_parents)
response = m.changelist_view(request)
self.assertEqual(response.context_data['cl'].list_filter, ('parent', 'name', 'age'))
def test_dynamic_search_fields(self):
child = self._create_superuser('child')
m = DynamicSearchFieldsChildAdmin(Child, custom_site)
request = self._mocked_authenticated_request('/child/', child)
response = m.changelist_view(request)
self.assertEqual(response.context_data['cl'].search_fields, ('name', 'age'))
def test_pagination_page_range(self):
"""
Regression tests for ticket #15653: ensure the number of pages
generated for changelist views are correct.
"""
# instantiating and setting up ChangeList object
m = GroupAdmin(Group, custom_site)
request = self.factory.get('/group/')
cl = ChangeList(request, Group, *get_changelist_args(m))
per_page = cl.list_per_page = 10
for page_num, objects_count, expected_page_range in [
(0, per_page, []),
(0, per_page * 2, list(range(2))),
(5, per_page * 11, list(range(11))),
(5, per_page * 12, [0, 1, 2, 3, 4, 5, 6, 7, 8, '.', 10, 11]),
(6, per_page * 12, [0, 1, '.', 3, 4, 5, 6, 7, 8, 9, 10, 11]),
(6, per_page * 13, [0, 1, '.', 3, 4, 5, 6, 7, 8, 9, '.', 11, 12]),
]:
# assuming we have exactly `objects_count` objects
Group.objects.all().delete()
for i in range(objects_count):
Group.objects.create(name='test band')
# setting page number and calculating page range
cl.page_num = page_num
cl.get_results(request)
real_page_range = pagination(cl)['page_range']
self.assertListEqual(
expected_page_range,
list(real_page_range),
)
def test_object_tools_displayed_no_add_permission(self):
"""
When ModelAdmin.has_add_permission() returns False, the object-tools
block is still shown.
"""
superuser = self._create_superuser('superuser')
m = EventAdmin(Event, custom_site)
request = self._mocked_authenticated_request('/event/', superuser)
self.assertFalse(m.has_add_permission(request))
response = m.changelist_view(request)
self.assertIn('<ul class="object-tools">', response.rendered_content)
# The "Add" button inside the object-tools shouldn't appear.
self.assertNotIn('Add ', response.rendered_content)
class AdminLogNodeTestCase(TestCase):
def test_get_admin_log_templatetag_custom_user(self):
"""
Regression test for ticket #20088: admin log depends on User model
having id field as primary key.
The old implementation raised an AttributeError when trying to use
the id field.
"""
context = Context({'user': CustomIdUser()})
template_string = '{% load log %}{% get_admin_log 10 as admin_log for_user user %}'
template = Template(template_string)
# Rendering should be u'' since this templatetag just logs,
# it doesn't render any string.
self.assertEqual(template.render(context), '')
def test_get_admin_log_templatetag_no_user(self):
"""
The {% get_admin_log %} tag should work without specifying a user.
"""
user = User(username='jondoe', password='secret', email='super@example.com')
user.save()
ct = ContentType.objects.get_for_model(User)
LogEntry.objects.log_action(user.pk, ct.pk, user.pk, repr(user), 1)
t = Template(
'{% load log %}'
'{% get_admin_log 100 as admin_log %}'
'{% for entry in admin_log %}'
'{{ entry|safe }}'
'{% endfor %}'
)
self.assertEqual(t.render(Context({})), 'Added "<User: jondoe>".')
@override_settings(ROOT_URLCONF='admin_changelist.urls')
class SeleniumTests(AdminSeleniumTestCase):
available_apps = ['admin_changelist'] + AdminSeleniumTestCase.available_apps
def setUp(self):
User.objects.create_superuser(username='super', password='secret', email=None)
def test_add_row_selection(self):
"""
Ensure that the status line for selected rows gets updated correctly (#22038)
"""
self.admin_login(username='super', password='secret')
self.selenium.get(self.live_server_url + reverse('admin:auth_user_changelist'))
form_id = '#changelist-form'
# Test amount of rows in the Changelist
rows = self.selenium.find_elements_by_css_selector(
'%s #result_list tbody tr' % form_id)
self.assertEqual(len(rows), 1)
# Test current selection
selection_indicator = self.selenium.find_element_by_css_selector(
'%s .action-counter' % form_id)
self.assertEqual(selection_indicator.text, "0 of 1 selected")
# Select a row and check again
row_selector = self.selenium.find_element_by_css_selector(
'%s #result_list tbody tr:first-child .action-select' % form_id)
row_selector.click()
self.assertEqual(selection_indicator.text, "1 of 1 selected")
|
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Helper functions for Copy Number Variations (CNV).
"""
import sys
import logging
import os.path as op
import numpy as np
import numpy.ma as ma
import pandas as pd
import pysam
from collections import Counter, defaultdict
from itertools import groupby
from multiprocessing import Pool
from random import choice
from pybedtools import BedTool, cleanup, set_tempdir
from jcvi.algorithms.formula import get_kmeans
from jcvi.apps.grid import MakeManager
from jcvi.utils.aws import glob_s3, push_to_s3, sync_from_s3
from jcvi.utils.cbook import percentage
from jcvi.apps.base import OptionParser, ActionDispatcher, getfilesize, mkdir, popen, sh
autosomes = ["chr{}".format(x) for x in range(1, 23)]
sexsomes = ["chrX", "chrY"]
allsomes = autosomes + sexsomes
# See: http://www.ncbi.nlm.nih.gov/projects/genome/assembly/grc/human/
PAR = [("chrX", 10001, 2781479), ("chrX", 155701383, 156030895)]
class CopyNumberSegment(object):
def __init__(self, chr, rr, tag, mean_cn, realbins, is_PAR=False):
self.chr = chr
self.rr = rr
self.start = rr[0] * 1000
self.end = rr[1] * 1000
self.span = self.end - self.start
self.tag = tag
self.mean_cn = mean_cn
self.realbins = realbins
self.is_PAR = is_PAR
def __str__(self):
mb = self.rr / 1000.0
coords = "{}:{}-{}Mb".format(self.chr, format_float(mb[0]), format_float(mb[1]))
if self.is_PAR:
coords += ":PAR"
msg = "[{}] {} CN={} bins={}".format(
self.tag, coords, self.mean_cn, self.realbins
)
if self.realbins >= 10000: # Mark segments longer than 10K bins ~ 10Mb
msg += "*"
return msg
@property
def bedline(self):
return "\t".join(
str(x)
for x in (self.chr, self.start, self.end, self.tag, self.span, self.mean_cn)
)
class CopyNumberHMM(object):
def __init__(
self, workdir, betadir="beta", mu=0.003, sigma=10, step=0.1, threshold=0.2
):
self.model = self.initialize(mu=mu, sigma=sigma, step=step)
self.workdir = workdir
self.betadir = betadir
if not op.exists(betadir):
sync_from_s3("s3://hli-mv-data-science/htang/ccn/beta", target_dir=betadir)
self.mu = mu
self.sigma = sigma
self.step = step
self.threshold = threshold
def run(self, samplekey, chrs=allsomes):
if isinstance(chrs, str):
chrs = [chrs]
allevents = []
for chr in chrs:
X, Z, clen, events = self.run_one(samplekey, chr)
allevents.extend(events)
return allevents
def run_one(self, samplekey, chr):
cov = np.fromfile(
"{}/{}-cn/{}.{}.cn".format(self.workdir, samplekey, samplekey, chr)
)
beta = np.fromfile("beta/{}.beta".format(chr))
std = np.fromfile("beta/{}.std".format(chr))
# Check if the two arrays have different dimensions
clen, blen = cov.shape[0], beta.shape[0]
tlen = max(clen, blen)
if tlen > clen:
cov = np.array(list(cov) + [np.nan] * (tlen - clen))
elif tlen > blen:
beta = np.array(list(beta) + [np.nan] * (tlen - blen))
clen, blen = cov.shape[0], beta.shape[0]
assert clen == blen, "cov ({}) and correction ({}) not same dimension".format(
clen, blen
)
normalized = cov / beta
fixed = normalized.copy()
fixed[np.where(std > self.threshold)] = np.nan
X = fixed
Z = self.predict(X)
med_cn = np.median(fixed[np.isfinite(fixed)])
print(chr, med_cn)
# Annotate segments
segments = self.annotate_segments(Z)
events = []
for mean_cn, rr in segments:
ss = fixed[rr[0] : rr[1]]
realbins = np.sum(np.isfinite(ss))
# Determine whether this is an outlier
segment = self.tag(chr, mean_cn, rr, med_cn, realbins)
if segment:
events.append((mean_cn, rr, segment))
events.sort(key=lambda x: x[-1].start)
# Send some debug info to screen
for mean_cn, rr, segment in events:
print(segment)
return X, Z, clen, events
def tag(self, chr, mean_cn, rr, med_cn, realbins, base=2):
around_0 = around_value(mean_cn, 0)
around_1 = around_value(mean_cn, 1)
around_2 = around_value(mean_cn, 2)
if realbins <= 1: # Remove singleton bins
return
if chr == "chrX":
start, end = rr
is_PAR = end < 5000 or start > 155000
if med_cn < 1.25: # Male
# PAR ~ 2, rest ~ 1
if is_PAR:
base = 2
if around_2:
return
else:
base = 1
if around_1:
return
else:
# All ~ 2
if around_2:
return
elif chr == "chrY":
if med_cn < 0.25: # Female
base = 0
if around_0:
return
else:
base = 1
if around_1:
return
else:
if around_2:
return
tag = "DUP" if mean_cn > base else "DEL"
segment = CopyNumberSegment(chr, rr, tag, mean_cn, realbins, is_PAR=False)
return segment
def initialize(self, mu, sigma, step):
from hmmlearn import hmm
# Initial population probability
n = int(10 / step)
startprob = 1.0 / n * np.ones(n)
transmat = mu * np.ones((n, n))
np.fill_diagonal(transmat, 1 - (n - 1) * mu)
# The means of each component
means = np.arange(0, step * n, step)
means.resize((n, 1, 1))
# The covariance of each component
covars = sigma * np.ones((n, 1, 1))
# Build an HMM instance and set parameters
model = hmm.GaussianHMM(n_components=n, covariance_type="full")
# Instead of fitting it from the data, we directly set the estimated
# parameters, the means and covariance of the components
model.startprob_ = startprob
model.transmat_ = transmat
model.means_ = means
model.covars_ = covars
return model
def predict(self, X):
# Handle missing values
X = ma.masked_invalid(X)
mask = X.mask
dX = ma.compressed(X).reshape(-1, 1)
dZ = self.model.predict(dX)
Z = np.array([np.nan for _ in range(X.shape[0])])
Z[~mask] = dZ
Z = ma.masked_invalid(Z)
return Z * self.step
def annotate_segments(self, Z):
"""Report the copy number and start-end segment"""
# We need a way to go from compressed idices to original indices
P = Z.copy()
P[~np.isfinite(P)] = -1
_, mapping = np.unique(np.cumsum(P >= 0), return_index=True)
dZ = Z.compressed()
uniq, idx = np.unique(dZ, return_inverse=True)
segments = []
for i, mean_cn in enumerate(uniq):
if not np.isfinite(mean_cn):
continue
for rr in contiguous_regions(idx == i):
segments.append((mean_cn, mapping[rr]))
return segments
def plot(
self, samplekey, chrs=allsomes, color=None, dx=None, ymax=8, ms=2, alpha=0.7
):
from brewer2mpl import get_map
import matplotlib.pyplot as plt
props = dict(boxstyle="round", facecolor="wheat", alpha=0.2)
if isinstance(chrs, str):
chrs = [chrs]
f, axs = plt.subplots(1, len(chrs), sharey=True)
if not isinstance(axs, np.ndarray):
axs = np.array([axs])
plt.tight_layout()
if color is None:
color = choice(get_map("Set2", "qualitative", 8).mpl_colors)
for region, ax in zip(chrs, axs):
chr, start, end = parse_region(region)
X, Z, clen, events = self.run_one(samplekey, chr)
ax.plot(X, ".", label="observations", ms=ms, mfc=color, alpha=alpha)
ax.plot(Z, "k.", label="hidden", ms=6)
if start is None and end is None:
ax.set_xlim(0, clen)
else:
ax.set_xlim(start / 1000, end / 1000)
ax.set_ylim(0, ymax)
ax.set_xlabel("1Kb bins")
title = "{} {}".format(samplekey.split("_")[1], chr)
if dx:
title += " ({})".format(dx)
ax.set_title(title)
# The final calls
yy = 0.9
abnormal = [x for x in events if x[-1]]
if len(abnormal) > 5:
yinterval = 0.02
size = 10
else:
yinterval = 0.05
size = 12
for mean_cn, rr, event in events:
if mean_cn > ymax:
continue
ax.text(np.mean(rr), mean_cn + 0.2, mean_cn, ha="center", bbox=props)
if event is None:
continue
ax.text(
0.5,
yy,
str(event).rsplit(" ", 1)[0],
color="r",
ha="center",
transform=ax.transAxes,
size=size,
)
yy -= yinterval
axs[0].set_ylabel("Copy number")
def parse_region(region):
if ":" not in region:
return region, None, None
chr, start_end = region.split(":")
start, end = start_end.split("-")
return chr, int(start), int(end)
def contiguous_regions(condition):
"""Finds contiguous True regions of the boolean array "condition". Returns
a 2D array where the first column is the start index of the region and the
second column is the end index."""
# Find the indicies of changes in "condition"
d = np.diff(condition)
(idx,) = d.nonzero()
# We need to start things after the change in "condition". Therefore,
# we'll shift the index by 1 to the right.
idx += 1
if condition[0]:
# If the start of condition is True prepend a 0
idx = np.r_[0, idx]
if condition[-1]:
# If the end of condition is True, append the length of the array
idx = np.r_[idx, condition.size] # Edit
# Reshape the result into two columns
idx.shape = (-1, 2)
return idx
def format_float(f):
s = "{:.3f}".format(f)
return s.rstrip("0").rstrip(".")
def around_value(s, mu, max_dev=0.25):
return mu - max_dev < s < mu + max_dev
def main():
actions = (
("cib", "convert bam to cib"),
("coverage", "plot coverage along chromosome"),
("cn", "correct cib according to GC content"),
("mergecn", "compile matrix of GC-corrected copy numbers"),
("hmm", "run cnv segmentation"),
# Gene copy number
("exonunion", "collapse overlapping exons within the same gene"),
("gcn", "gene copy number based on Canvas results"),
("summarycanvas", "count different tags in Canvas vcf"),
# Interact with CCN script
("batchccn", "run CCN script in batch"),
("batchcn", "run HMM in batch"),
("plot", "plot some chromosomes for visual proof"),
# Benchmark, training, etc.
("sweep", "write a number of commands to sweep parameter space"),
("compare", "compare cnv output to ground truths"),
# Plots
("gcdepth", "plot GC content vs depth for genomic bins"),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def gcdepth(args):
"""
%prog gcdepth sample_name tag
Plot GC content vs depth vs genomnic bins. Inputs are mosdepth output:
- NA12878_S1.mosdepth.global.dist.txt
- NA12878_S1.mosdepth.region.dist.txt
- NA12878_S1.regions.bed.gz
- NA12878_S1.regions.bed.gz.csi
- NA12878_S1.regions.gc.bed.gz
A sample mosdepth.sh script might look like:
```
#!/bin/bash
LD_LIBRARY_PATH=mosdepth/htslib/ mosdepth/mosdepth $1 \\
bams/$1.bam -t 4 -c chr1 -n --by 1000
bedtools nuc -fi GRCh38/WholeGenomeFasta/genome.fa \\
-bed $1.regions.bed.gz \\
| pigz -c > $1.regions.gc.bed.gz
```
"""
import hashlib
from jcvi.algorithms.formula import MAD_interval
from jcvi.graphics.base import latex, plt, savefig, set2
p = OptionParser(gcdepth.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
sample_name, tag = args
# The tag is used to add to title, also provide a random (hashed) color
coloridx = int(hashlib.sha256(tag).hexdigest(), 16) % len(set2)
color = set2[coloridx]
# mosdepth outputs a table that we can use to plot relationship
gcbedgz = sample_name + ".regions.gc.bed.gz"
df = pd.read_csv(gcbedgz, delimiter="\t")
mf = df.loc[:, ("4_usercol", "6_pct_gc")]
mf.columns = ["depth", "gc"]
# We discard any bins that are gaps
mf = mf[(mf["depth"] > 0.001) | (mf["gc"] > 0.001)]
# Create GC bins
gcbins = defaultdict(list)
for i, row in mf.iterrows():
gcp = int(round(row["gc"] * 100))
gcbins[gcp].append(row["depth"])
gcd = sorted((k * 0.01, MAD_interval(v)) for (k, v) in gcbins.items())
gcd_x, gcd_y = zip(*gcd)
m, lo, hi = zip(*gcd_y)
# Plot
plt.plot(
mf["gc"],
mf["depth"],
".",
color="lightslategray",
ms=2,
mec="lightslategray",
alpha=0.1,
)
patch = plt.fill_between(
gcd_x,
lo,
hi,
facecolor=color,
alpha=0.25,
zorder=10,
linewidth=0.0,
label="Median +/- MAD band",
)
plt.plot(gcd_x, m, "-", color=color, lw=2, zorder=20)
ax = plt.gca()
ax.legend(handles=[patch], loc="best")
ax.set_xlim(0, 1)
ax.set_ylim(0, 100)
ax.set_title("{} ({})".format(latex(sample_name), tag))
ax.set_xlabel("GC content")
ax.set_ylabel("Depth")
savefig(sample_name + ".gcdepth.png")
def exonunion(args):
"""
%prog exonunion gencode.v26.annotation.exon.bed
Collapse overlapping exons within the same gene. File
`gencode.v26.annotation.exon.bed` can be generated by:
$ zcat gencode.v26.annotation.gtf.gz | awk 'OFS="\t" {if ($3=="exon")
{print $1,$4-1,$5,$10,$12,$14,$16,$7}}' | tr -d '";'
"""
p = OptionParser(exonunion.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
(gencodebed,) = args
beds = BedTool(gencodebed)
# fields[3] is gene_id; fields[6] is gene_name
for g, gb in groupby(beds, key=lambda x: x.fields[3]):
gb = BedTool(gb)
sys.stdout.write(str(gb.sort().merge(c="4,5,6,7", o=",".join(["first"] * 4))))
def get_gain_loss_summary(vcffile):
"""Extract Canvas:GAIN/LOSS/REF/LOH tags"""
from cyvcf2 import VCF
counter = Counter()
for v in VCF(vcffile):
tag = v.ID.split(":")[1]
counter[tag] += 1
return counter
def summarycanvas(args):
"""
%prog summarycanvas output.vcf.gz
Generate tag counts (GAIN/LOSS/REF/LOH) of segments in Canvas output.
"""
p = OptionParser(summarycanvas.__doc__)
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
for vcffile in args:
counter = get_gain_loss_summary(vcffile)
pf = op.basename(vcffile).split(".")[0]
print(
pf
+ " "
+ " ".join("{}:{}".format(k, v) for k, v in sorted(counter.items()))
)
def parse_segments(vcffile):
"""Extract all copy number segments from a CANVAS file
VCF line looks like:
chr1 788879 Canvas:GAIN:chr1:788880-821005 N <CNV> 2 q10
SVTYPE=CNV;END=821005;CNVLEN=32126 RC:BC:CN:MCC 157:4:3:2
"""
from cStringIO import StringIO
from cyvcf2 import VCF
output = StringIO()
for v in VCF(vcffile):
chrom = v.CHROM
start = v.start
end = v.INFO.get("END") - 1
(cn,) = v.format("CN")[0]
print("\t".join(str(x) for x in (chrom, start, end, cn)), file=output)
beds = BedTool(output.getvalue(), from_string=True)
return beds
def counter_mean_and_median(counter):
"""Calculate the mean and median value of a counter"""
if not counter:
return np.nan, np.nan
total = sum(v for k, v in counter.items())
mid = total / 2
weighted_sum = 0
items_seen = 0
median_found = False
for k, v in sorted(counter.items()):
weighted_sum += k * v
items_seen += v
if not median_found and items_seen >= mid:
median = k
median_found = True
mean = weighted_sum * 1.0 / total
return mean, median
def counter_format(counter):
"""Pretty print a counter so that it appears as: "2:200,3:100,4:20" """
if not counter:
return "na"
return ",".join("{}:{}".format(*z) for z in sorted(counter.items()))
def gcn(args):
"""
%prog gcn gencode.v26.exonunion.bed data/*.vcf.gz
Compile gene copy njumber based on CANVAS results.
"""
p = OptionParser(gcn.__doc__)
p.set_cpus()
p.set_tmpdir(tmpdir="tmp")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help())
exonbed = args[0]
canvasvcfs = args[1:]
tsvfile = opts.outfile
tmpdir = opts.tmpdir
mkdir(tmpdir)
set_tempdir(tmpdir)
df = vcf_to_df(canvasvcfs, exonbed, opts.cpus)
for suffix in (".avgcn", ".medcn"):
df_to_tsv(df, tsvfile, suffix)
def vcf_to_df_worker(arg):
"""Convert CANVAS vcf to a dict, single thread"""
canvasvcf, exonbed, i = arg
logging.debug("Working on job {}: {}".format(i, canvasvcf))
samplekey = op.basename(canvasvcf).split(".")[0].rsplit("_", 1)[0]
d = {"SampleKey": samplekey}
exons = BedTool(exonbed)
cn = parse_segments(canvasvcf)
overlaps = exons.intersect(cn, wao=True)
gcn_store = {}
for ov in overlaps:
# Example of ov.fields:
# [u'chr1', u'11868', u'12227', u'ENSG00000223972.5',
# u'ENST00000456328.2', u'transcribed_unprocessed_pseudogene',
# u'DDX11L1', u'.', u'-1', u'-1', u'.', u'0']
gene_name = "|".join((ov.fields[6], ov.fields[3], ov.fields[5]))
if gene_name not in gcn_store:
gcn_store[gene_name] = defaultdict(int)
cn = ov.fields[-2]
if cn == ".":
continue
cn = int(cn)
if cn > 10:
cn = 10
amt = int(ov.fields[-1])
gcn_store[gene_name][cn] += amt
for k, v in sorted(gcn_store.items()):
v_mean, v_median = counter_mean_and_median(v)
d[k + ".avgcn"] = v_mean
d[k + ".medcn"] = v_median
cleanup()
return d
def vcf_to_df(canvasvcfs, exonbed, cpus):
"""Compile a number of vcf files into tsv file for easy manipulation"""
df = pd.DataFrame()
p = Pool(processes=cpus)
results = []
args = [(x, exonbed, i) for (i, x) in enumerate(canvasvcfs)]
r = p.map_async(vcf_to_df_worker, args, callback=results.append)
r.wait()
for res in results:
df = df.append(res, ignore_index=True)
return df
def df_to_tsv(df, tsvfile, suffix):
"""Serialize the dataframe as a tsv"""
tsvfile += suffix
columns = ["SampleKey"] + sorted(x for x in df.columns if x.endswith(suffix))
tf = df.reindex_axis(columns, axis="columns")
tf.sort_values("SampleKey")
tf.to_csv(tsvfile, sep="\t", index=False, float_format="%.4g", na_rep="na")
print(
"TSV output written to `{}` (# samples={})".format(tsvfile, tf.shape[0]),
file=sys.stderr,
)
def coverage(args):
"""
%prog coverage *.coverage
Plot coverage along chromosome. The coverage file can be generated with:
$ samtools depth a.bam > a.coverage
The plot is a simple line plot using matplotlib.
"""
from jcvi.graphics.base import savefig
p = OptionParser(coverage.__doc__)
opts, args, iopts = p.set_image_options(args, format="png")
if len(args) != 1:
sys.exit(not p.print_help())
(covfile,) = args
df = pd.read_csv(covfile, sep="\t", names=["Ref", "Position", "Depth"])
xlabel, ylabel = "Position", "Depth"
df.plot(xlabel, ylabel, color="g")
image_name = covfile + "." + iopts.format
savefig(image_name)
def plot(args):
"""
%prog plot workdir sample chr1,chr2
Plot some chromosomes for visual proof. Separate multiple chromosomes with
comma. Must contain folder workdir/sample-cn/.
"""
from jcvi.graphics.base import savefig
p = OptionParser(plot.__doc__)
opts, args, iopts = p.set_image_options(args, figsize="8x7", format="png")
if len(args) != 3:
sys.exit(not p.print_help())
workdir, sample_key, chrs = args
chrs = chrs.split(",")
hmm = CopyNumberHMM(workdir=workdir)
hmm.plot(sample_key, chrs=chrs)
image_name = sample_key + "_cn." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def sweep(args):
"""
%prog sweep workdir 102340_NA12878
Write a number of commands to sweep parameter space.
"""
p = OptionParser(sweep.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
workdir, sample_key = args
golden_ratio = (1 + 5 ** 0.5) / 2
cmd = "python -m jcvi.variation.cnv hmm {} {}".format(workdir, sample_key)
cmd += " --mu {:.5f} --sigma {:.3f} --threshold {:.3f}"
mus = [0.00012 * golden_ratio ** x for x in range(10)]
sigmas = [0.0012 * golden_ratio ** x for x in range(20)]
thresholds = [0.1 * golden_ratio ** x for x in range(10)]
print(mus, file=sys.stderr)
print(sigmas, file=sys.stderr)
print(thresholds, file=sys.stderr)
for mu in mus:
for sigma in sigmas:
for threshold in thresholds:
tcmd = cmd.format(mu, sigma, threshold)
print(tcmd)
def compare_worker(arg):
cnvoutput, truths = arg
cmd = "intersectBed -f .5 -F .5"
cmd += " -a {} -b {} | wc -l".format(cnvoutput, truths)
nlines = int(popen(cmd, debug=False).read())
target_lines = len([x for x in open(cnvoutput)])
truths_lines = len([x for x in open(truths)])
precision = nlines * 100.0 / target_lines
recall = nlines * 100.0 / truths_lines
d = "\t".join(
str(x)
for x in (
cnvoutput,
truths,
nlines,
target_lines,
truths_lines,
precision,
recall,
)
)
return d
def compare(args):
"""
%prog compare NA12878_array_hg38.bed *.seg
Compare cnv output to known ground truths.
"""
p = OptionParser(compare.__doc__)
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help())
truths = args[0]
cnvoutputs = args[1:]
cpus = min(len(cnvoutputs), opts.cpus)
p = Pool(processes=cpus)
results = []
files = [(x, truths) for x in cnvoutputs]
r = p.map_async(compare_worker, files, callback=results.append)
r.wait()
for res in results:
print("\n".join(res))
def bam_to_cib(arg):
bamfile, seq, samplekey = arg
bam = pysam.AlignmentFile(bamfile, "rb")
name, length = seq["SN"], seq["LN"]
logging.debug("Computing depth for {} (length={})".format(name, length))
pileup = bam.pileup(name)
a = np.ones(length, dtype=np.int8) * -128
for x in pileup:
a[x.reference_pos] = min(x.nsegments, 255) - 128
cibfile = op.join(samplekey, "{}.{}.cib".format(samplekey, name))
a.tofile(cibfile)
logging.debug("Depth written to `{}`".format(cibfile))
def cib(args):
"""
%prog cib bamfile samplekey
Convert BAM to CIB (a binary storage of int8 per base).
"""
p = OptionParser(cib.__doc__)
p.add_option("--prefix", help="Report seqids with this prefix only")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bamfile, samplekey = args
mkdir(samplekey)
bam = pysam.AlignmentFile(bamfile, "rb")
refs = [x for x in bam.header["SQ"]]
prefix = opts.prefix
if prefix:
refs = [x for x in refs if x["SN"].startswith(prefix)]
task_args = []
for r in refs:
task_args.append((bamfile, r, samplekey))
cpus = min(opts.cpus, len(task_args))
logging.debug("Use {} cpus".format(cpus))
p = Pool(processes=cpus)
for _ in p.imap(bam_to_cib, task_args):
continue
def batchcn(args):
"""
%prog batchcn workdir samples.csv
Run CNV segmentation caller in batch mode. Scans a workdir.
"""
p = OptionParser(batchcn.__doc__)
p.add_option(
"--upload",
default="s3://hli-mv-data-science/htang/ccn",
help="Upload cn and seg results to s3",
)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
workdir, samples = args
upload = opts.upload
store = upload + "/{}/*.seg".format(workdir)
computed = [op.basename(x).split(".")[0] for x in glob_s3(store)]
computed = set(computed)
# Generate a bunch of cn commands
fp = open(samples)
nskipped = ntotal = 0
cmd = "python -m jcvi.variation.cnv cn --hmm --cleanup {}".format(workdir)
for row in fp:
samplekey, path = row.strip().split(",")
ntotal += 1
if samplekey in computed:
nskipped += 1
continue
print(" ".join((cmd, samplekey, path)))
logging.debug("Skipped: {}".format(percentage(nskipped, ntotal)))
def hmm(args):
"""
%prog hmm workdir sample_key
Run CNV segmentation caller. The workdir must contain a subfolder called
`sample_key-cn` that contains CN for each chromosome. A `beta` directory
that contains scaler for each bin must also be present in the current
directory.
"""
p = OptionParser(hmm.__doc__)
p.add_option("--mu", default=0.003, type="float", help="Transition probability")
p.add_option(
"--sigma",
default=0.1,
type="float",
help="Standard deviation of Gaussian emission distribution",
)
p.add_option(
"--threshold",
default=1,
type="float",
help="Standard deviation must be < this in the baseline population",
)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
workdir, sample_key = args
model = CopyNumberHMM(
workdir=workdir, mu=opts.mu, sigma=opts.sigma, threshold=opts.threshold
)
events = model.run(sample_key)
params = ".mu-{}.sigma-{}.threshold-{}".format(opts.mu, opts.sigma, opts.threshold)
hmmfile = op.join(workdir, sample_key + params + ".seg")
fw = open(hmmfile, "w")
nevents = 0
for mean_cn, rr, event in events:
if event is None:
continue
print(" ".join((event.bedline, sample_key)), file=fw)
nevents += 1
fw.close()
logging.debug(
"A total of {} aberrant events written to `{}`".format(nevents, hmmfile)
)
return hmmfile
def batchccn(args):
"""
%prog batchccn test.csv
Run CCN script in batch. Write makefile.
"""
p = OptionParser(batchccn.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
(csvfile,) = args
mm = MakeManager()
pf = op.basename(csvfile).split(".")[0]
mkdir(pf)
header = next(open(csvfile))
header = None if header.strip().endswith(".bam") else "infer"
logging.debug("Header={}".format(header))
df = pd.read_csv(csvfile, header=header)
cmd = "perl /mnt/software/ccn_gcn_hg38_script/ccn_gcn_hg38.pl"
cmd += " -n {} -b {}"
cmd += " -o {} -r hg38".format(pf)
for i, (sample_key, bam) in df.iterrows():
cmdi = cmd.format(sample_key, bam)
outfile = "{}/{}/{}.ccn".format(pf, sample_key, sample_key)
mm.add(csvfile, outfile, cmdi)
mm.write()
def mergecn(args):
"""
%prog mergecn FACE.csv
Compile matrix of GC-corrected copy numbers. Place a bunch of folders in
csv file. Each folder will be scanned, one chromosomes after another.
"""
p = OptionParser(mergecn.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
(csvfile,) = args
samples = [x.replace("-cn", "").strip().strip("/") for x in open(csvfile)]
betadir = "beta"
mkdir(betadir)
for seqid in allsomes:
names = [
op.join(s + "-cn", "{}.{}.cn".format(op.basename(s), seqid))
for s in samples
]
arrays = [np.fromfile(name, dtype=np.float) for name in names]
shapes = [x.shape[0] for x in arrays]
med_shape = np.median(shapes)
arrays = [x for x in arrays if x.shape[0] == med_shape]
ploidy = 2 if seqid not in ("chrY", "chrM") else 1
if seqid in sexsomes:
chr_med = [np.median([x for x in a if x > 0]) for a in arrays]
chr_med = np.array(chr_med)
idx = get_kmeans(chr_med, k=2)
zero_med = np.median(chr_med[idx == 0])
one_med = np.median(chr_med[idx == 1])
logging.debug(
"K-means with {} c0:{} c1:{}".format(seqid, zero_med, one_med)
)
higher_idx = 1 if one_med > zero_med else 0
# Use the higher mean coverage componen
arrays = np.array(arrays)[idx == higher_idx]
arrays = [[x] for x in arrays]
ar = np.concatenate(arrays)
print(seqid, ar.shape)
rows, columns = ar.shape
beta = []
std = []
for j in range(columns):
a = ar[:, j]
beta.append(np.median(a))
std.append(np.std(a) / np.mean(a))
beta = np.array(beta) / ploidy
betafile = op.join(betadir, "{}.beta".format(seqid))
beta.tofile(betafile)
stdfile = op.join(betadir, "{}.std".format(seqid))
std = np.array(std)
std.tofile(stdfile)
logging.debug("Written to `{}`".format(betafile))
ar.tofile("{}.bin".format(seqid))
def is_matching_gz(origfile, gzfile):
if not op.exists(origfile):
return False
if not op.exists(gzfile):
return False
return getfilesize(origfile) == getfilesize(gzfile)
def load_cib(cibfile, n=1000):
cibgzfile = cibfile + ".gz"
# When we try unzip if cib not found, or cib does not match cibgz
if not op.exists(cibfile) or not is_matching_gz(cibfile, cibgzfile):
if op.exists(cibgzfile):
cibfile = cibgzfile
if cibfile.endswith(".gz"):
sh("pigz -d -k -f {}".format(cibfile))
cibfile = cibfile.replace(".gz", "")
if not op.exists(cibfile):
return
cib = np.fromfile(cibfile, dtype=np.int8) + 128
rm = pd.rolling_mean(cib, n, min_periods=n / 2)
a = rm[n - 1 :: n].copy()
del cib
del rm
return a
def build_gc_array(fastafile="/mnt/ref/hg38.upper.fa", gcdir="gc", n=1000):
from pyfasta import Fasta
f = Fasta(fastafile)
mkdir(gcdir)
for seqid in allsomes:
if seqid not in f:
logging.debug("Seq {} not found. Continue anyway.".format(seqid))
continue
c = np.array(f[seqid])
gc = (c == "G") | (c == "C") # If base is GC
rr = ~(c == "N") # If base is real
mgc = pd.rolling_sum(gc, n, min_periods=n / 2)[n - 1 :: n]
mrr = pd.rolling_sum(rr, n, min_periods=n / 2)[n - 1 :: n]
gc_pct = np.rint(mgc * 100 / mrr)
gc_pct = np.asarray(gc_pct, dtype=np.uint8)
arfile = op.join(gcdir, "{}.{}.gc".format(seqid, n))
gc_pct.tofile(arfile)
print(seqid, gc_pct, arfile, file=sys.stderr)
def cn(args):
"""
%prog cn workdir 102340_NA12878 \
s3://hli-bix-us-west-2/kubernetes/wf-root-test/102340_NA12878/lpierce-ccn_gcn-v2/
Download CCN output folder and convert cib to copy number per 1Kb.
"""
p = OptionParser(cn.__doc__)
p.add_option(
"--binsize", default=1000, type="int", help="Window size along chromosome"
)
p.add_option(
"--cleanup",
default=False,
action="store_true",
help="Clean up downloaded s3 folder",
)
p.add_option(
"--hmm",
default=False,
action="store_true",
help="Run HMM caller after computing CN",
)
p.add_option(
"--upload",
default="s3://hli-mv-data-science/htang/ccn",
help="Upload cn and seg results to s3",
)
p.add_option("--rebuildgc", help="Rebuild GC directory rather than pulling from S3")
opts, args = p.parse_args(args)
if len(args) == 2:
workdir, sample_key = args
s3dir = None
elif len(args) == 3:
workdir, sample_key, s3dir = args
else:
sys.exit(not p.print_help())
n = opts.binsize
rebuildgc = opts.rebuildgc
mkdir(workdir)
sampledir = op.join(workdir, sample_key)
if s3dir:
sync_from_s3(s3dir, target_dir=sampledir)
assert op.exists(sampledir), "Directory {} doesn't exist!".format(sampledir)
cndir = op.join(workdir, sample_key + "-cn")
if op.exists(cndir):
logging.debug("Directory {} exists. Skipped.".format(cndir))
return
gcdir = "gc"
if rebuildgc:
build_gc_array(fastafile=rebuildgc, n=n, gcdir=gcdir)
if not op.exists(gcdir):
sync_from_s3("s3://hli-mv-data-science/htang/ccn/gc", target_dir=gcdir)
# Build GC correction table
gc_bin = defaultdict(list)
gc_med = {}
coverage = []
for seqid in allsomes:
gcfile = op.join(gcdir, "{}.{}.gc".format(seqid, n))
if not op.exists(gcfile):
logging.error("File {} not found. Continue anyway.".format(gcfile))
continue
gc = np.fromfile(gcfile, dtype=np.uint8)
cibfile = op.join(sampledir, "{}.{}.cib".format(sample_key, seqid))
cib = load_cib(cibfile)
print(seqid, gc.shape[0], cib.shape[0], file=sys.stderr)
if seqid in autosomes:
for gci, k in zip(gc, cib):
gc_bin[gci].append(k)
coverage.append((seqid, gc, cib))
for gci, k in gc_bin.items():
nonzero_k = [x for x in k if x]
gc_med[gci] = med = np.median(nonzero_k) / 2
print(gci, len(nonzero_k), med, file=sys.stderr)
mkdir(cndir)
apply_fun = np.vectorize(gc_med.get)
# Apply the GC correction over coverage
for seqid, gc, cib in coverage:
nitems = cib.shape[0]
beta = apply_fun(gc[:nitems])
beta_cn = cib / beta
cnfile = op.join(cndir, "{}.{}.cn".format(sample_key, seqid))
beta_cn.tofile(cnfile)
# Run HMM caller if asked
segfile = hmm([workdir, sample_key]) if opts.hmm else None
upload = opts.upload
if upload:
push_to_s3(upload, cndir)
if segfile:
push_to_s3(upload, segfile)
if opts.cleanup:
import shutil
shutil.rmtree(sampledir)
shutil.rmtree(cndir)
if __name__ == "__main__":
main()
|
|
from typing import Optional
import numpy as np
from pandas._libs import lib
from pandas.core.dtypes.cast import maybe_downcast_numeric
from pandas.core.dtypes.common import (
ensure_object,
is_datetime_or_timedelta_dtype,
is_decimal,
is_integer_dtype,
is_number,
is_numeric_dtype,
is_scalar,
needs_i8_conversion,
)
from pandas.core.dtypes.generic import (
ABCIndex,
ABCSeries,
)
import pandas as pd
from pandas.core.arrays.numeric import NumericArray
def to_numeric(arg, errors="raise", downcast=None):
"""
Convert argument to a numeric type.
The default return dtype is `float64` or `int64`
depending on the data supplied. Use the `downcast` parameter
to obtain other dtypes.
Please note that precision loss may occur if really large numbers
are passed in. Due to the internal limitations of `ndarray`, if
numbers smaller than `-9223372036854775808` (np.iinfo(np.int64).min)
or larger than `18446744073709551615` (np.iinfo(np.uint64).max) are
passed in, it is very likely they will be converted to float so that
they can stored in an `ndarray`. These warnings apply similarly to
`Series` since it internally leverages `ndarray`.
Parameters
----------
arg : scalar, list, tuple, 1-d array, or Series
Argument to be converted.
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception.
- If 'coerce', then invalid parsing will be set as NaN.
- If 'ignore', then invalid parsing will return the input.
downcast : {'integer', 'signed', 'unsigned', 'float'}, default None
If not None, and if the data has been successfully cast to a
numerical dtype (or if the data was numeric to begin with),
downcast that resulting data to the smallest numerical dtype
possible according to the following rules:
- 'integer' or 'signed': smallest signed int dtype (min.: np.int8)
- 'unsigned': smallest unsigned int dtype (min.: np.uint8)
- 'float': smallest float dtype (min.: np.float32)
As this behaviour is separate from the core conversion to
numeric values, any errors raised during the downcasting
will be surfaced regardless of the value of the 'errors' input.
In addition, downcasting will only occur if the size
of the resulting data's dtype is strictly larger than
the dtype it is to be cast to, so if none of the dtypes
checked satisfy that specification, no downcasting will be
performed on the data.
Returns
-------
ret
Numeric if parsing succeeded.
Return type depends on input. Series if Series, otherwise ndarray.
See Also
--------
DataFrame.astype : Cast argument to a specified dtype.
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
numpy.ndarray.astype : Cast a numpy array to a specified type.
DataFrame.convert_dtypes : Convert dtypes.
Examples
--------
Take separate series and convert to numeric, coercing when told to
>>> s = pd.Series(['1.0', '2', -3])
>>> pd.to_numeric(s)
0 1.0
1 2.0
2 -3.0
dtype: float64
>>> pd.to_numeric(s, downcast='float')
0 1.0
1 2.0
2 -3.0
dtype: float32
>>> pd.to_numeric(s, downcast='signed')
0 1
1 2
2 -3
dtype: int8
>>> s = pd.Series(['apple', '1.0', '2', -3])
>>> pd.to_numeric(s, errors='ignore')
0 apple
1 1.0
2 2
3 -3
dtype: object
>>> pd.to_numeric(s, errors='coerce')
0 NaN
1 1.0
2 2.0
3 -3.0
dtype: float64
Downcasting of nullable integer and floating dtypes is supported:
>>> s = pd.Series([1, 2, 3], dtype="Int64")
>>> pd.to_numeric(s, downcast="integer")
0 1
1 2
2 3
dtype: Int8
>>> s = pd.Series([1.0, 2.1, 3.0], dtype="Float64")
>>> pd.to_numeric(s, downcast="float")
0 1.0
1 2.1
2 3.0
dtype: Float32
"""
if downcast not in (None, "integer", "signed", "unsigned", "float"):
raise ValueError("invalid downcasting method provided")
if errors not in ("ignore", "raise", "coerce"):
raise ValueError("invalid error value specified")
is_series = False
is_index = False
is_scalars = False
if isinstance(arg, ABCSeries):
is_series = True
values = arg.values
elif isinstance(arg, ABCIndex):
is_index = True
if needs_i8_conversion(arg.dtype):
values = arg.asi8
else:
values = arg.values
elif isinstance(arg, (list, tuple)):
values = np.array(arg, dtype="O")
elif is_scalar(arg):
if is_decimal(arg):
return float(arg)
if is_number(arg):
return arg
is_scalars = True
values = np.array([arg], dtype="O")
elif getattr(arg, "ndim", 1) > 1:
raise TypeError("arg must be a list, tuple, 1-d array, or Series")
else:
values = arg
# GH33013: for IntegerArray & FloatingArray extract non-null values for casting
# save mask to reconstruct the full array after casting
mask: Optional[np.ndarray] = None
if isinstance(values, NumericArray):
mask = values._mask
values = values._data[~mask]
values_dtype = getattr(values, "dtype", None)
if is_numeric_dtype(values_dtype):
pass
elif is_datetime_or_timedelta_dtype(values_dtype):
values = values.view(np.int64)
else:
values = ensure_object(values)
coerce_numeric = errors not in ("ignore", "raise")
try:
values, _ = lib.maybe_convert_numeric(
values, set(), coerce_numeric=coerce_numeric
)
except (ValueError, TypeError):
if errors == "raise":
raise
# attempt downcast only if the data has been successfully converted
# to a numerical dtype and if a downcast method has been specified
if downcast is not None and is_numeric_dtype(values.dtype):
typecodes = None
if downcast in ("integer", "signed"):
typecodes = np.typecodes["Integer"]
elif downcast == "unsigned" and (not len(values) or np.min(values) >= 0):
typecodes = np.typecodes["UnsignedInteger"]
elif downcast == "float":
typecodes = np.typecodes["Float"]
# pandas support goes only to np.float32,
# as float dtypes smaller than that are
# extremely rare and not well supported
float_32_char = np.dtype(np.float32).char
float_32_ind = typecodes.index(float_32_char)
typecodes = typecodes[float_32_ind:]
if typecodes is not None:
# from smallest to largest
for dtype in typecodes:
dtype = np.dtype(dtype)
if dtype.itemsize <= values.dtype.itemsize:
values = maybe_downcast_numeric(values, dtype)
# successful conversion
if values.dtype == dtype:
break
# GH33013: for IntegerArray & FloatingArray need to reconstruct masked array
if mask is not None:
data = np.zeros(mask.shape, dtype=values.dtype)
data[~mask] = values
from pandas.core.arrays import (
FloatingArray,
IntegerArray,
)
klass = IntegerArray if is_integer_dtype(data.dtype) else FloatingArray
values = klass(data, mask.copy())
if is_series:
return arg._constructor(values, index=arg.index, name=arg.name)
elif is_index:
# because we want to coerce to numeric if possible,
# do not use _shallow_copy
return pd.Index(values, name=arg.name)
elif is_scalars:
return values[0]
else:
return values
|
|
# Copyright (c) 2007-2009 The PyAMF Project.
# See LICENSE.txt for details.
"""
Google App Engine adapter module.
Sets up basic type mapping and class mappings for using the Datastore API
in Google App Engine.
@see: U{Datastore API on Google App Engine (external)
<http://code.google.com/appengine/docs/datastore>}
@since: 0.3.1
"""
from google.appengine.ext import db
from google.appengine.ext.db import polymodel
import datetime
import pyamf
from pyamf.util import imports
from pyamf.adapters import util
class ModelStub(object):
"""
This class represents a L{db.Model} or L{db.Expando} class as the typed
object is being read from the AMF stream. Once the attributes have been
read from the stream and through the magic of Python, the instance of this
class will be converted into the correct type.
@ivar klass: The referenced class either L{db.Model} or L{db.Expando}.
This is used so we can proxy some of the method calls during decoding.
@type klass: L{db.Model} or L{db.Expando}
@see: L{DataStoreClassAlias.applyAttributes}
"""
def __init__(self, klass):
self.klass = klass
def properties(self):
return self.klass.properties()
def dynamic_properties(self):
return []
class GAEReferenceCollection(dict):
"""
This helper class holds a dict of klass to key/objects loaded from the
Datastore.
@since: 0.4.1
"""
def _getClass(self, klass):
if not issubclass(klass, (db.Model, db.Expando)):
raise TypeError('expected db.Model/db.Expando class, got %s' % (klass,))
if klass not in self.keys():
self[klass] = {}
return self[klass]
def getClassKey(self, klass, key):
"""
Return an instance based on klass/key.
If an instance cannot be found then L{KeyError} is raised.
@param klass: The class of the instance.
@param key: The key of the instance.
@return: The instance linked to the C{klass}/C{key}.
@rtype: Instance of L{klass}.
"""
if not isinstance(key, basestring):
raise TypeError('basestring type expected for test, got %s' % (repr(key),))
d = self._getClass(klass)
return d[key]
def addClassKey(self, klass, key, obj):
"""
Adds an object to the collection, based on klass and key.
@param klass: The class of the object.
@param key: The datastore key of the object.
@param obj: The loaded instance from the datastore.
"""
if not isinstance(key, basestring):
raise TypeError('basestring type expected for test, got %s' % (repr(key),))
d = self._getClass(klass)
d[key] = obj
class DataStoreClassAlias(pyamf.ClassAlias):
"""
This class contains all the business logic to interact with Google's
Datastore API's. Any L{db.Model} or L{db.Expando} classes will use this
class alias for encoding/decoding.
We also add a number of indexes to the encoder context to aggressively
decrease the number of Datastore API's that we need to complete.
"""
# The name of the attribute used to represent the key
KEY_ATTR = '_key'
def _compile_base_class(self, klass):
if klass in (db.Model, polymodel.PolyModel):
return
pyamf.ClassAlias._compile_base_class(self, klass)
def getCustomProperties(self):
props = [self.KEY_ATTR]
self.reference_properties = {}
self.properties = {}
reverse_props = []
for name, prop in self.klass.properties().iteritems():
self.properties[name] = prop
props.append(name)
if isinstance(prop, db.ReferenceProperty):
self.reference_properties[name] = prop
if issubclass(self.klass, polymodel.PolyModel):
del self.properties['_class']
props.remove('_class')
# check if the property is a defined as a collection_name. These types
# of properties are read-only and the datastore freaks out if you
# attempt to meddle with it. We delete the attribute entirely ..
for name, value in self.klass.__dict__.iteritems():
if isinstance(value, db._ReverseReferenceProperty):
reverse_props.append(name)
self.static_attrs.update(props)
self.encodable_properties.update(self.properties.keys())
self.decodable_properties.update(self.properties.keys())
self.readonly_attrs.update(reverse_props)
if not self.reference_properties:
self.reference_properties = None
if not self.properties:
self.properties = None
def getEncodableAttributes(self, obj, codec=None):
sa, da = pyamf.ClassAlias.getEncodableAttributes(self, obj, codec=codec)
sa[self.KEY_ATTR] = str(obj.key()) if obj.is_saved() else None
gae_objects = getGAEObjects(codec.context) if codec else None
if self.reference_properties and gae_objects:
for name, prop in self.reference_properties.iteritems():
klass = prop.reference_class
key = prop.get_value_for_datastore(obj)
if not key:
continue
key = str(key)
try:
sa[name] = gae_objects.getClassKey(klass, key)
except KeyError:
ref_obj = getattr(obj, name)
gae_objects.addClassKey(klass, key, ref_obj)
sa[name] = ref_obj
if da:
for k, v in da.copy().iteritems():
if k.startswith('_'):
del da[k]
if not da:
da = {}
for attr in obj.dynamic_properties():
da[attr] = getattr(obj, attr)
if not da:
da = None
return sa, da
def createInstance(self, codec=None):
return ModelStub(self.klass)
def getDecodableAttributes(self, obj, attrs, codec=None):
try:
key = attrs[self.KEY_ATTR]
except KeyError:
key = attrs[self.KEY_ATTR] = None
attrs = pyamf.ClassAlias.getDecodableAttributes(self, obj, attrs, codec=codec)
del attrs[self.KEY_ATTR]
new_obj = None
# attempt to load the object from the datastore if KEY_ATTR exists.
if key and codec:
new_obj = loadInstanceFromDatastore(self.klass, key, codec)
# clean up the stub
if isinstance(obj, ModelStub) and hasattr(obj, 'klass'):
del obj.klass
if new_obj:
obj.__dict__ = new_obj.__dict__.copy()
obj.__class__ = self.klass
apply_init = True
if self.properties:
for k in [k for k in attrs.keys() if k in self.properties.keys()]:
prop = self.properties[k]
v = attrs[k]
if isinstance(prop, db.FloatProperty) and isinstance(v, (int, long)):
attrs[k] = float(v)
elif isinstance(prop, db.ListProperty) and v is None:
attrs[k] = []
elif isinstance(v, datetime.datetime):
# Date/Time Property fields expect specific types of data
# whereas PyAMF only decodes into datetime.datetime objects.
if isinstance(prop, db.DateProperty):
attrs[k] = v.date()
elif isinstance(prop, db.TimeProperty):
attrs[k] = v.time()
if new_obj is None and isinstance(v, ModelStub) and prop.required and k in self.reference_properties:
apply_init = False
del attrs[k]
# If the object does not exist in the datastore, we must fire the
# class constructor. This sets internal attributes that pyamf has
# no business messing with ..
if new_obj is None and apply_init is True:
obj.__init__(**attrs)
return attrs
def getGAEObjects(context):
"""
Returns a reference to the C{gae_objects} on the context. If it doesn't
exist then it is created.
@param context: The context to load the C{gae_objects} index from.
@type context: Instance of L{pyamf.BaseContext}
@return: The C{gae_objects} index reference.
@rtype: Instance of L{GAEReferenceCollection}
@since: 0.4.1
"""
if not hasattr(context, 'gae_objects'):
context.gae_objects = GAEReferenceCollection()
return context.gae_objects
def loadInstanceFromDatastore(klass, key, codec=None):
"""
Attempt to load an instance from the datastore, based on C{klass}
and C{key}. We create an index on the codec's context (if it exists)
so we can check that first before accessing the datastore.
@param klass: The class that will be loaded from the datastore.
@type klass: Sub-class of L{db.Model} or L{db.Expando}
@param key: The key which is used to uniquely identify the instance in the
datastore.
@type key: C{str}
@param codec: The codec to reference the C{gae_objects} index. If
supplied,The codec must have have a context attribute.
@type codec: Instance of L{pyamf.BaseEncoder} or L{pyamf.BaseDecoder}
@return: The loaded instance from the datastore.
@rtype: Instance of C{klass}.
@since: 0.4.1
"""
if not issubclass(klass, (db.Model, db.Expando)):
raise TypeError('expected db.Model/db.Expando class, got %s' % (klass,))
if not isinstance(key, basestring):
raise TypeError('string expected for key, got %s', (repr(key),))
key = str(key)
if codec is None:
return klass.get(key)
gae_objects = getGAEObjects(codec.context)
try:
return gae_objects.getClassKey(klass, key)
except KeyError:
pass
obj = klass.get(key)
gae_objects.addClassKey(klass, key, obj)
return obj
def writeGAEObject(self, object, *args, **kwargs):
"""
The GAE Datastore creates new instances of objects for each get request.
This is a problem for PyAMF as it uses the id(obj) of the object to do
reference checking.
We could just ignore the problem, but the objects are conceptually the
same so the effort should be made to attempt to resolve references for a
given object graph.
We create a new map on the encoder context object which contains a dict of
C{object.__class__: {key1: object1, key2: object2, .., keyn: objectn}}. We
use the datastore key to do the reference checking.
@since: 0.4.1
"""
if not (isinstance(object, db.Model) and object.is_saved()):
self.writeNonGAEObject(object, *args, **kwargs)
return
context = self.context
kls = object.__class__
s = str(object.key())
gae_objects = getGAEObjects(context)
try:
referenced_object = gae_objects.getClassKey(kls, s)
except KeyError:
referenced_object = object
gae_objects.addClassKey(kls, s, object)
self.writeNonGAEObject(referenced_object, *args, **kwargs)
def install_gae_reference_model_hook(mod):
"""
Called when L{pyamf.amf0} or L{pyamf.amf3} are imported. Attaches the
L{writeGAEObject} method to the C{Encoder} class in that module.
@param mod: The module imported.
@since: 0.4.1
"""
if not hasattr(mod.Encoder, 'writeNonGAEObject'):
mod.Encoder.writeNonGAEObject = mod.Encoder.writeObject
mod.Encoder.writeObject = writeGAEObject
# initialise the module here: hook into pyamf
pyamf.add_type(db.Query, util.to_list)
pyamf.register_alias_type(DataStoreClassAlias, db.Model, db.Expando)
# hook the L{writeGAEObject} method to the Encoder class on import
imports.when_imported('pyamf.amf0', install_gae_reference_model_hook)
imports.when_imported('pyamf.amf3', install_gae_reference_model_hook)
|
|
from __future__ import absolute_import
from datetime import timedelta
import pytest
import logging
import re
import mock
from tornado import gen
from tornado.ioloop import PeriodicCallback, IOLoop
from tornado.httpclient import HTTPError
import bokeh.server.server as server
from bokeh.application import Application
from bokeh.application.handlers import Handler
from bokeh.model import Model
from bokeh.core.properties import List, String
from bokeh.client import pull_session
from bokeh.server.server import Server
from bokeh.util.session_id import check_session_id_signature
from .utils import ManagedServerLoop, url, ws_url, http_get, websocket_open
logging.basicConfig(level=logging.DEBUG)
def test__create_hosts_whitelist_no_host():
hosts = server._create_hosts_whitelist(None, 1000)
assert hosts == ["localhost:1000"]
hosts = server._create_hosts_whitelist([], 1000)
assert hosts == ["localhost:1000"]
def test__create_hosts_whitelist_host_value_with_port_use_port():
hosts = server._create_hosts_whitelist(["foo:1000"], 1000)
assert hosts == ["foo:1000"]
hosts = server._create_hosts_whitelist(["foo:1000","bar:2100"], 1000)
assert hosts == ["foo:1000","bar:2100"]
def test__create_hosts_whitelist_host_without_port_use_port_80():
hosts = server._create_hosts_whitelist(["foo"], 1000)
assert hosts == ["foo:80"]
hosts = server._create_hosts_whitelist(["foo","bar"], 1000)
assert hosts == ["foo:80","bar:80"]
def test__create_hosts_whitelist_host_non_int_port_raises():
with pytest.raises(ValueError):
server._create_hosts_whitelist(["foo:xyz"], 1000)
def test__create_hosts_whitelist_bad_host_raises():
with pytest.raises(ValueError):
server._create_hosts_whitelist([""], 1000)
with pytest.raises(ValueError):
server._create_hosts_whitelist(["a:b:c"], 1000)
with pytest.raises(ValueError):
server._create_hosts_whitelist([":80"], 1000)
@gen.coroutine
def async_value(value):
yield gen.moment # this ensures we actually return to the loop
raise gen.Return(value)
class HookListModel(Model):
hooks = List(String)
class HookTestHandler(Handler):
def __init__(self):
super(HookTestHandler, self).__init__()
self.load_count = 0
self.unload_count = 0
self.session_creation_async_value = 0
self.hooks = []
self.server_periodic_remover = None
self.session_periodic_remover = None
def modify_document(self, doc):
# this checks that the session created hook has run
# and session destroyed has not.
assert self.session_creation_async_value == 3
doc.title = "Modified"
doc.roots[0].hooks.append("modify")
self.hooks.append("modify")
def on_server_loaded(self, server_context):
assert len(server_context.sessions) == 0
self.load_count += 1
self.hooks.append("server_loaded")
server_context.add_next_tick_callback(self.on_next_tick_server)
server_context.add_timeout_callback(self.on_timeout_server, 2)
server_context.add_periodic_callback(self.on_periodic_server, 3)
def remover():
server_context.remove_periodic_callback(self.on_periodic_server)
self.server_periodic_remover = remover
def on_server_unloaded(self, server_context):
self.unload_count += 1
self.hooks.append("server_unloaded")
# important to test that this can be async
@gen.coroutine
def on_session_created(self, session_context):
@gen.coroutine
def setup_document(doc):
# session creation hook is allowed to init the document
# before any modify_document() handlers kick in
from bokeh.document import DEFAULT_TITLE
hook_list = HookListModel()
assert doc.title == DEFAULT_TITLE
assert len(doc.roots) == 0
hook_list.hooks.append("session_created")
doc.add_root(hook_list)
self.session_creation_async_value = yield async_value(1)
self.session_creation_async_value = yield async_value(2)
self.session_creation_async_value = yield async_value(3)
yield session_context.with_locked_document(setup_document)
server_context = session_context.server_context
server_context.add_next_tick_callback(self.on_next_tick_session)
server_context.add_timeout_callback(self.on_timeout_session, 2)
server_context.add_periodic_callback(self.on_periodic_session, 3)
def remover():
server_context.remove_periodic_callback(self.on_periodic_session)
self.session_periodic_remover = remover
self.hooks.append("session_created")
# this has to be async too
@gen.coroutine
def on_session_destroyed(self, session_context):
@gen.coroutine
def shutdown_document(doc):
doc.roots[0].hooks.append("session_destroyed")
self.session_creation_async_value = yield async_value(4)
self.session_creation_async_value = yield async_value(5)
self.session_creation_async_value = yield async_value(6)
yield session_context.with_locked_document(shutdown_document)
self.hooks.append("session_destroyed")
def on_next_tick_server(self):
self.hooks.append("next_tick_server")
def on_timeout_server(self):
self.hooks.append("timeout_server")
def on_periodic_server(self):
self.hooks.append("periodic_server")
self.server_periodic_remover()
def on_next_tick_session(self):
self.hooks.append("next_tick_session")
def on_timeout_session(self):
self.hooks.append("timeout_session")
def on_periodic_session(self):
self.hooks.append("periodic_session")
self.session_periodic_remover()
def test__lifecycle_hooks():
application = Application()
handler = HookTestHandler()
application.add(handler)
with ManagedServerLoop(application, check_unused_sessions_milliseconds=30) as server:
# wait for server callbacks to run before we mix in the
# session, this keeps the test deterministic
def check_done():
if len(handler.hooks) == 4:
server.io_loop.stop()
server_load_checker = PeriodicCallback(check_done, 1,
io_loop=server.io_loop)
server_load_checker.start()
server.io_loop.start()
server_load_checker.stop()
# now we create a session
client_session = pull_session(session_id='test__lifecycle_hooks',
url=url(server),
io_loop=server.io_loop)
client_doc = client_session.document
assert len(client_doc.roots) == 1
server_session = server.get_session('/', client_session.id)
server_doc = server_session.document
assert len(server_doc.roots) == 1
client_session.close()
# expire the session quickly rather than after the
# usual timeout
server_session.request_expiration()
def on_done():
server.io_loop.stop()
server.io_loop.call_later(0.1, on_done)
server.io_loop.start()
assert handler.hooks == ["server_loaded",
"next_tick_server",
"timeout_server",
"periodic_server",
"session_created",
"next_tick_session",
"modify",
"timeout_session",
"periodic_session",
"session_destroyed",
"server_unloaded"]
client_hook_list = client_doc.roots[0]
server_hook_list = server_doc.roots[0]
assert handler.load_count == 1
assert handler.unload_count == 1
assert handler.session_creation_async_value == 6
assert client_doc.title == "Modified"
assert server_doc.title == "Modified"
# the client session doesn't see the event that adds "session_destroyed" since
# we shut down at that point.
assert client_hook_list.hooks == ["session_created", "modify"]
assert server_hook_list.hooks == ["session_created", "modify", "session_destroyed"]
def test_get_sessions():
application = Application()
with ManagedServerLoop(application) as server:
server_sessions = server.get_sessions('/')
assert len(server_sessions) == 0
http_get(server.io_loop, url(server))
server_sessions = server.get_sessions('/')
assert len(server_sessions) == 1
http_get(server.io_loop, url(server))
server_sessions = server.get_sessions('/')
assert len(server_sessions) == 2
server_sessions = server.get_sessions()
assert len(server_sessions) == 2
with pytest.raises(ValueError):
server.get_sessions("/foo")
with ManagedServerLoop({"/foo": application, "/bar": application}) as server:
http_get(server.io_loop, url(server) + "foo")
server_sessions = server.get_sessions('/foo')
assert len(server_sessions) == 1
server_sessions = server.get_sessions('/bar')
assert len(server_sessions) == 0
server_sessions = server.get_sessions()
assert len(server_sessions) == 1
http_get(server.io_loop, url(server) + "foo")
server_sessions = server.get_sessions('/foo')
assert len(server_sessions) == 2
server_sessions = server.get_sessions('/bar')
assert len(server_sessions) == 0
server_sessions = server.get_sessions()
assert len(server_sessions) == 2
http_get(server.io_loop, url(server) + "bar")
server_sessions = server.get_sessions('/foo')
assert len(server_sessions) == 2
server_sessions = server.get_sessions('/bar')
assert len(server_sessions) == 1
server_sessions = server.get_sessions()
assert len(server_sessions) == 3
def test__request_in_session_context():
application = Application()
with ManagedServerLoop(application) as server:
response = http_get(server.io_loop,
url(server) + "?foo=10")
html = response.body
sessionid = extract_sessionid_from_json(html)
server_session = server.get_session('/', sessionid)
server_doc = server_session.document
session_context = server_doc.session_context
# do we have a request
assert session_context.request is not None
def test__request_in_session_context_has_arguments():
application = Application()
with ManagedServerLoop(application) as server:
response = http_get(server.io_loop,
url(server) + "?foo=10")
html = response.body
sessionid = extract_sessionid_from_json(html)
server_session = server.get_session('/', sessionid)
server_doc = server_session.document
session_context = server_doc.session_context
# test if we can get the argument from the request
assert session_context.request.arguments['foo'] == [b'10']
def test__no_request_arguments_in_session_context():
application = Application()
with ManagedServerLoop(application) as server:
response = http_get(server.io_loop,
url(server))
html = response.body
sessionid = extract_sessionid_from_json(html)
server_session = server.get_session('/', sessionid)
server_doc = server_session.document
session_context = server_doc.session_context
# if we do not pass any arguments to the url, the request arguments
# should be empty
assert len(session_context.request.arguments) == 0
# examples:
# "sessionid" : "NzlNoPfEYJahnPljE34xI0a5RSTaU1Aq1Cx5"
# 'sessionid':'NzlNoPfEYJahnPljE34xI0a5RSTaU1Aq1Cx5'
sessionid_in_json = re.compile("""["']sessionid["'] *: *["']([^"]+)["']""")
def extract_sessionid_from_json(html):
from six import string_types
if not isinstance(html, string_types):
import codecs
html = codecs.decode(html, 'utf-8')
match = sessionid_in_json.search(html)
return match.group(1)
# examples:
# "sessionid" : "NzlNoPfEYJahnPljE34xI0a5RSTaU1Aq1Cx5"
# 'sessionid':'NzlNoPfEYJahnPljE34xI0a5RSTaU1Aq1Cx5'
use_for_title_in_json = re.compile("""["']use_for_title["'] *: *(false|true)""")
def extract_use_for_title_from_json(html):
from six import string_types
if not isinstance(html, string_types):
import codecs
html = codecs.decode(html, 'utf-8')
match = use_for_title_in_json.search(html)
return match.group(1)
def autoload_url(server):
return url(server) + \
"autoload.js?bokeh-protocol-version=1.0&bokeh-autoload-element=foo"
def test_use_xheaders():
application = Application()
with ManagedServerLoop(application, use_xheaders=True) as server:
assert server._http.xheaders == True
def test__autocreate_session_autoload():
application = Application()
with ManagedServerLoop(application) as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
response = http_get(server.io_loop,
autoload_url(server))
js = response.body
sessionid = extract_sessionid_from_json(js)
sessions = server.get_sessions('/')
assert 1 == len(sessions)
assert sessionid == sessions[0].id
def test__no_set_title_autoload():
application = Application()
with ManagedServerLoop(application) as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
response = http_get(server.io_loop,
autoload_url(server))
js = response.body
use_for_title = extract_use_for_title_from_json(js)
assert use_for_title == "false"
def test__autocreate_session_doc():
application = Application()
with ManagedServerLoop(application) as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
response = http_get(server.io_loop,
url(server))
html = response.body
sessionid = extract_sessionid_from_json(html)
sessions = server.get_sessions('/')
assert 1 == len(sessions)
assert sessionid == sessions[0].id
def test__no_autocreate_session_websocket():
application = Application()
with ManagedServerLoop(application) as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
websocket_open(server.io_loop,
ws_url(server) + "?bokeh-protocol-version=1.0")
sessions = server.get_sessions('/')
assert 0 == len(sessions)
def test__use_provided_session_autoload():
application = Application()
with ManagedServerLoop(application) as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
expected = 'foo'
response = http_get(server.io_loop,
autoload_url(server) + "&bokeh-session-id=" + expected)
js = response.body
sessionid = extract_sessionid_from_json(js)
assert expected == sessionid
sessions = server.get_sessions('/')
assert 1 == len(sessions)
assert expected == sessions[0].id
def test__use_provided_session_doc():
application = Application()
with ManagedServerLoop(application) as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
expected = 'foo'
response = http_get(server.io_loop,
url(server) + "?bokeh-session-id=" + expected)
html = response.body
sessionid = extract_sessionid_from_json(html)
assert expected == sessionid
sessions = server.get_sessions('/')
assert 1 == len(sessions)
assert expected == sessions[0].id
def test__use_provided_session_websocket():
application = Application()
with ManagedServerLoop(application) as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
expected = 'foo'
url = ws_url(server) + \
"?bokeh-protocol-version=1.0" + \
"&bokeh-session-id=" + expected
websocket_open(server.io_loop,
url)
sessions = server.get_sessions('/')
assert 1 == len(sessions)
assert expected == sessions[0].id
def test__autocreate_signed_session_autoload():
application = Application()
with ManagedServerLoop(application, sign_sessions=True, secret_key='foo') as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
response = http_get(server.io_loop,
autoload_url(server))
js = response.body
sessionid = extract_sessionid_from_json(js)
sessions = server.get_sessions('/')
assert 1 == len(sessions)
assert sessionid == sessions[0].id
assert check_session_id_signature(sessionid, signed=True, secret_key='foo')
def test__autocreate_signed_session_doc():
application = Application()
with ManagedServerLoop(application, sign_sessions=True, secret_key='foo') as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
response = http_get(server.io_loop,
url(server))
html = response.body
sessionid = extract_sessionid_from_json(html)
sessions = server.get_sessions('/')
assert 1 == len(sessions)
assert sessionid == sessions[0].id
assert check_session_id_signature(sessionid, signed=True, secret_key='foo')
def test__reject_unsigned_session_autoload():
application = Application()
with ManagedServerLoop(application, sign_sessions=True, secret_key='bar') as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
expected = 'foo'
with (pytest.raises(HTTPError)) as info:
http_get(server.io_loop,
autoload_url(server) + "&bokeh-session-id=" + expected)
assert 'Invalid session ID' in repr(info.value)
sessions = server.get_sessions('/')
assert 0 == len(sessions)
def test__reject_unsigned_session_doc():
application = Application()
with ManagedServerLoop(application, sign_sessions=True, secret_key='bar') as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
expected = 'foo'
with (pytest.raises(HTTPError)) as info:
http_get(server.io_loop, url(server) + "?bokeh-session-id=" + expected)
assert 'Invalid session ID' in repr(info.value)
sessions = server.get_sessions('/')
assert 0 == len(sessions)
def test__reject_unsigned_session_websocket():
application = Application()
with ManagedServerLoop(application, sign_sessions=True, secret_key='bar') as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
expected = 'foo'
url = ws_url(server) + \
"?bokeh-protocol-version=1.0" + \
"&bokeh-session-id=" + expected
websocket_open(server.io_loop,
url)
sessions = server.get_sessions('/')
assert 0 == len(sessions)
def test__no_generate_session_autoload():
application = Application()
with ManagedServerLoop(application, generate_session_ids=False) as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
with (pytest.raises(HTTPError)) as info:
http_get(server.io_loop, autoload_url(server))
assert 'No bokeh-session-id provided' in repr(info.value)
sessions = server.get_sessions('/')
assert 0 == len(sessions)
def test__no_generate_session_doc():
application = Application()
with ManagedServerLoop(application, generate_session_ids=False) as server:
sessions = server.get_sessions('/')
assert 0 == len(sessions)
with (pytest.raises(HTTPError)) as info:
http_get(server.io_loop, url(server))
assert 'No bokeh-session-id provided' in repr(info.value)
sessions = server.get_sessions('/')
assert 0 == len(sessions)
def test__server_multiple_processes():
with mock.patch('tornado.process.fork_processes') as tornado_fp:
application = Application()
with ManagedServerLoop(application, num_procs=3):
pass
tornado_fp.assert_called_with(3)
def test__existing_ioloop_with_multiple_processes_exception():
application = Application()
ioloop_instance = IOLoop.instance() ; ioloop_instance # silence flake8
with pytest.raises(RuntimeError):
with ManagedServerLoop(application, num_procs=3):
pass
def test__actual_port_number():
application = Application()
with ManagedServerLoop(application, port=0) as server:
port = server.port
assert port > 0
http_get(server.io_loop, url(server))
def test__ioloop_not_forcibly_stopped():
# Issue #5494
application = Application()
loop = IOLoop()
loop.make_current()
server = Server(application, ioloop=loop)
server.start()
result = []
def f():
server.unlisten()
server.stop()
# If server.stop() were to stop the Tornado IO loop,
# g() wouldn't be called and `result` would remain empty.
loop.add_timeout(timedelta(seconds=0.01), g)
def g():
result.append(None)
loop.stop()
loop.add_callback(f)
loop.start()
assert result == [None]
|
|
###
# Copyright (c) 2002-2005, Jeremiah Fincher
# Copyright (c) 2008, James McCoy
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions, and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author of this software nor the name of
# contributors to this software may be used to endorse or promote products
# derived from this software without specific prior written consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
###
import os
import time
import random
import shutil
import os.path
from . import crypt
from .iter import ifilter
def contents(filename):
return file(filename).read()
def open(filename, mode='wb', *args, **kwargs):
"""filename -> file object.
Returns a file object for filename, creating as many directories as may be
necessary. I.e., if the filename is ./foo/bar/baz, and . exists, and ./foo
exists, but ./foo/bar does not exist, bar will be created before opening
baz in it.
"""
if mode not in ('w', 'wb'):
raise ValueError, 'utils.file.open expects to write.'
(dirname, basename) = os.path.split(filename)
os.makedirs(dirname)
return file(filename, mode, *args, **kwargs)
def copy(src, dst):
"""src, dst -> None
Copies src to dst, using this module's 'open' function to open dst.
"""
srcfd = file(src)
dstfd = open(dst, 'wb')
shutil.copyfileobj(srcfd, dstfd)
def writeLine(fd, line):
fd.write(line)
if not line.endswith('\n'):
fd.write('\n')
def readLines(filename):
fd = file(filename)
try:
return [line.rstrip('\r\n') for line in fd.readlines()]
finally:
fd.close()
def touch(filename):
fd = file(filename, 'w')
fd.close()
def mktemp(suffix=''):
"""Gives a decent random string, suitable for a filename."""
r = random.Random()
m = crypt.md5(suffix)
r.seed(time.time())
s = str(r.getstate())
period = random.random()
now = start = time.time()
while start + period < now:
time.sleep() # Induce a context switch, if possible.
now = time.time()
m.update(str(random.random()))
m.update(s)
m.update(str(now))
s = m.hexdigest()
return crypt.sha(s + str(time.time())).hexdigest() + suffix
def nonCommentLines(fd):
for line in fd:
if not line.startswith('#'):
yield line
def nonEmptyLines(fd):
return ifilter(str.strip, fd)
def nonCommentNonEmptyLines(fd):
return nonEmptyLines(nonCommentLines(fd))
def chunks(fd, size):
return iter(lambda : fd.read(size), '')
## chunk = fd.read(size)
## while chunk:
## yield chunk
## chunk = fd.read(size)
class AtomicFile(file):
"""Used for files that need to be atomically written -- i.e., if there's a
failure, the original file remains, unmodified. mode must be 'w' or 'wb'"""
class default(object): # Holder for values.
# Callables?
tmpDir = None
backupDir = None
makeBackupIfSmaller = True
allowEmptyOverwrite = True
def __init__(self, filename, mode='w', allowEmptyOverwrite=None,
makeBackupIfSmaller=None, tmpDir=None, backupDir=None):
if tmpDir is None:
tmpDir = force(self.default.tmpDir)
if backupDir is None:
backupDir = force(self.default.backupDir)
if makeBackupIfSmaller is None:
makeBackupIfSmaller = force(self.default.makeBackupIfSmaller)
if allowEmptyOverwrite is None:
allowEmptyOverwrite = force(self.default.allowEmptyOverwrite)
if mode not in ('w', 'wb'):
raise ValueError, format('Invalid mode: %q', mode)
self.rolledback = False
self.allowEmptyOverwrite = allowEmptyOverwrite
self.makeBackupIfSmaller = makeBackupIfSmaller
self.filename = filename
self.backupDir = backupDir
if tmpDir is None:
# If not given a tmpDir, we'll just put a random token on the end
# of our filename and put it in the same directory.
self.tempFilename = '%s.%s' % (self.filename, mktemp())
else:
# If given a tmpDir, we'll get the basename (just the filename, no
# directory), put our random token on the end, and put it in tmpDir
tempFilename = '%s.%s' % (os.path.basename(self.filename), mktemp())
self.tempFilename = os.path.join(tmpDir, tempFilename)
# This doesn't work because of the uncollectable garbage effect.
# self.__parent = super(AtomicFile, self)
super(AtomicFile, self).__init__(self.tempFilename, mode)
def rollback(self):
if not self.closed:
super(AtomicFile, self).close()
if os.path.exists(self.tempFilename):
os.remove(self.tempFilename)
self.rolledback = True
def close(self):
if not self.rolledback:
super(AtomicFile, self).close()
# We don't mind writing an empty file if the file we're overwriting
# doesn't exist.
newSize = os.path.getsize(self.tempFilename)
originalExists = os.path.exists(self.filename)
if newSize or self.allowEmptyOverwrite or not originalExists:
if originalExists:
oldSize = os.path.getsize(self.filename)
if self.makeBackupIfSmaller and newSize < oldSize:
now = int(time.time())
backupFilename = '%s.backup.%s' % (self.filename, now)
if self.backupDir is not None:
backupFilename = os.path.basename(backupFilename)
backupFilename = os.path.join(self.backupDir,
backupFilename)
shutil.copy(self.filename, backupFilename)
# We use shutil.move here instead of os.rename because
# the latter doesn't work on Windows when self.filename
# (the target) already exists. shutil.move handles those
# intricacies for us.
# This raises IOError if we can't write to the file. Since
# in *nix, it only takes write perms to the *directory* to
# rename a file (and shutil.move will use os.rename if
# possible), we first check if we have the write permission
# and only then do we write.
fd = file(self.filename, 'a')
fd.close()
shutil.move(self.tempFilename, self.filename)
else:
raise ValueError, 'AtomicFile.close called after rollback.'
def __del__(self):
# We rollback because if we're deleted without being explicitly closed,
# that's bad. We really should log this here, but as of yet we've got
# no logging facility in utils. I've got some ideas for this, though.
self.rollback()
# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:
|
|
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors:
# - Paul Nilsson, paul.nilsson@cern.ch, 2018-2019
import os
from xml.dom import minidom
from xml.etree import ElementTree
from pilot.util.filehandling import write_file
import logging
logger = logging.getLogger(__name__)
def create_input_file_metadata(file_dictionary, workdir, filename="PoolFileCatalog.xml"):
"""
Create a Pool File Catalog for the files listed in the input dictionary.
The function creates properly formatted XML (pretty printed) and writes the XML to file.
Format:
dictionary = {'guid': 'pfn', ..}
->
<POOLFILECATALOG>
<!DOCTYPE POOLFILECATALOG SYSTEM "InMemory">
<File ID="guid">
<physical>
<pfn filetype="ROOT_All" name="surl"/>
</physical>
<logical/>
</File>
<POOLFILECATALOG>
:param file_dictionary: file dictionary.
:param workdir: job work directory (string).
:param filename: PFC file name (string).
:return: xml (string)
"""
# create the file structure
data = ElementTree.Element('POOLFILECATALOG')
for fileid in list(file_dictionary.keys()): # Python 2/3
_file = ElementTree.SubElement(data, 'File')
_file.set('ID', fileid)
_physical = ElementTree.SubElement(_file, 'physical')
_pfn = ElementTree.SubElement(_physical, 'pfn')
_pfn.set('filetype', 'ROOT_All')
_pfn.set('name', file_dictionary.get(fileid))
ElementTree.SubElement(_file, 'logical')
# create a new XML file with the results
xml = ElementTree.tostring(data, encoding='utf8')
xml = minidom.parseString(xml).toprettyxml(indent=" ")
# add escape character for & (needed for google turls)
if '&' in xml:
xml = xml.replace('&', '&')
# stitch in the DOCTYPE
xml = xml.replace('<POOLFILECATALOG>', '<!DOCTYPE POOLFILECATALOG SYSTEM "InMemory">\n<POOLFILECATALOG>')
write_file(os.path.join(workdir, filename), xml, mute=False)
return xml
def get_file_info_from_xml(workdir, filename="PoolFileCatalog.xml"):
"""
Return a file info dictionary based on the metadata in the given XML file.
The file info dictionary is used to replace the input file LFN list in the job parameters with the full PFNs
which are needed for direct access in production jobs.
Example of PoolFileCatalog.xml:
<?xml version="1.0" ?>
<POOLFILECATALOG>
<File ID="4ACC5018-2EA3-B441-BC11-0C0992847FD1">
<physical>
<pfn filetype="ROOT_ALL" name="root://dcgftp.usatlas.bnl.gov:1096//../AOD.11164242._001522.pool.root.1"/>
</physical>
<logical/>
</File>
</POOLFILECATALOG>
which gives the following dictionary:
{'AOD.11164242._001522.pool.root.1': ['root://dcgftp.usatlas.bnl.gov:1096//../AOD.11164242._001522.pool.root.1',
'4ACC5018-2EA3-B441-BC11-0C0992847FD1']}
:param workdir: directory of PoolFileCatalog.xml (string).
:param filename: file name (default: PoolFileCatalog.xml) (string).
:return: dictionary { LFN: [PFN, GUID], .. }
"""
file_info_dictionary = {}
tree = ElementTree.parse(os.path.join(workdir, filename))
root = tree.getroot()
# root.tag = POOLFILECATALOG
for child in root:
# child.tag = 'File', child.attrib = {'ID': '4ACC5018-2EA3-B441-BC11-0C0992847FD1'}
guid = child.attrib['ID']
for grandchild in child:
# grandchild.tag = 'physical', grandchild.attrib = {}
for greatgrandchild in grandchild:
# greatgrandchild.tag = 'pfn', greatgrandchild.attrib = {'filetype': 'ROOT_ALL', 'name': 'root://dcgftp.usatlas.bnl ..'}
pfn = greatgrandchild.attrib['name']
lfn = os.path.basename(pfn)
file_info_dictionary[lfn] = [pfn, guid]
return file_info_dictionary
def get_metadata_from_xml(workdir, filename="metadata.xml"):
"""
Parse the payload metadata.xml file.
Example of metadata.xml:
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE POOLFILECATALOG SYSTEM 'InMemory'>
<POOLFILECATALOG>
<File ID="D2A6D6F4-ADB2-B140-9C2E-D2D5C099B342">
<logical>
<lfn name="RDO_011a43ba-7c98-488d-8741-08da579c5de7.root"/>
</logical>
<metadata att_name="geometryVersion" att_value="ATLAS-R2-2015-03-01-00"/>
<metadata att_name="conditionsTag" att_value="OFLCOND-RUN12-SDR-19"/>
<metadata att_name="size" att_value="3250143"/>
<metadata att_name="events" att_value="3"/>
<metadata att_name="beamType" att_value="collisions"/>
<metadata att_name="fileType" att_value="RDO"/>
</File>
</POOLFILECATALOG>
which gives the following dictionary:
{'RDO_011a43ba-7c98-488d-8741-08da579c5de7.root': {'conditionsTag': 'OFLCOND-RUN12-SDR-19',
'beamType': 'collisions', 'fileType': 'RDO', 'geometryVersion': 'ATLAS-R2-2015-03-01-00', 'events': '3',
'size': '3250143'}}
:param workdir: payload work directory (string).
:param filename: metadata file name (string).
:return: metadata dictionary.
"""
# metadata_dictionary = { lfn: { att_name1: att_value1, .. }, ..}
metadata_dictionary = {}
path = os.path.join(workdir, filename)
if not os.path.exists(path):
logger.warning('file does not exist: %s' % path)
return metadata_dictionary
tree = ElementTree.parse(path)
root = tree.getroot()
# root.tag = POOLFILECATALOG
for child in root:
# child.tag = 'File', child.attrib = {'ID': '4ACC5018-2EA3-B441-BC11-0C0992847FD1'}
lfn = ""
guid = child.attrib['ID'] if 'ID' in child.attrib else None
for grandchild in child:
# grandchild.tag = 'logical', grandchild.attrib = {}
if grandchild.tag == 'logical':
for greatgrandchild in grandchild:
# greatgrandchild.tag = lfn
# greatgrandchild.attrib = lfn {'name': 'RDO_011a43ba-7c98-488d-8741-08da579c5de7.root'}
lfn = greatgrandchild.attrib.get('name')
metadata_dictionary[lfn] = {}
elif grandchild.tag == 'metadata':
# grandchild.attrib = {'att_name': 'events', 'att_value': '3'}
name = grandchild.attrib.get('att_name')
value = grandchild.attrib.get('att_value')
metadata_dictionary[lfn][name] = value
else:
# unknown metadata entry
pass
if guid:
metadata_dictionary[lfn]['guid'] = guid
return metadata_dictionary
def get_number_of_events(metadata_dictionary, filename=''):
"""
Get the number of events for the given file from the metadata dictionary (from metadata.xml).
:param metadata_dictionary: dictionary from parsed metadata.xml file.
:param filename: file name for which the number of events relates to (string).
:return: number of events (int). -1 is returned if the events could not be extracted from the dictionary.
"""
nevents = -1
if filename != '' and filename in metadata_dictionary:
try:
nevents = int(metadata_dictionary[filename].get('events'))
except ValueError as e:
logger.warning('failed to convert number of events to int: %s' % e)
else:
logger.warning('number of events could not be extracted from metadata dictionary (based on metadata.xml)')
return nevents
def get_total_number_of_events(metadata_dictionary):
"""
Get the total number of events for all files in the metadata dictionary.
:param metadata_dictionary: dictionary from parsed metadata.xml file.
:return: total number of processed events (int).
"""
nevents = 0
for filename in metadata_dictionary:
_nevents = get_number_of_events(metadata_dictionary, filename=filename)
if _nevents != -1:
nevents += _nevents
return nevents
def get_guid(metadata_dictionary, filename=''):
"""
Get the guid from the metadata dictionary for the given LFN.
:param metadata_dictionary: dictionary from parsed metadata.xml file.
:param filename: file name for which the number of events relates to (string).
:return: guid (string, None is returned if guid could not be extracted).
"""
guid = None
if filename != '' and filename in metadata_dictionary:
try:
guid = metadata_dictionary[filename].get('guid')
except ValueError as e:
logger.warning('failed to get guid from xml: %s' % e)
else:
logger.warning('guid could not be extracted from metadata dictionary (based on metadata.xml)')
return guid
def get_guid_from_xml(metadata_dictionary, lfn):
"""
Get the guid for the given LFN in the metadata dictionary.
:param metadata_dictionary: dictionary from parsed metadata.xml file.
:param lfn: LFN (string).
:return: total number of processed events (int).
"""
guid = None
for filename in metadata_dictionary:
if filename == lfn:
guid = get_guid(metadata_dictionary, filename=filename)
return guid
|
|
import pytest
from peewee_validates import DEFAULT_MESSAGES
from peewee_validates import ModelValidator
from peewee_validates import ValidationError
from peewee_validates import ManyModelChoiceField
from tests.models import BasicFields
from tests.models import ComplexPerson
from tests.models import Course
from tests.models import Organization
from tests.models import Person
from tests.models import Student
def test_not_instance():
with pytest.raises(AttributeError):
ModelValidator(Person)
def test_instance():
instance = Person()
validator = ModelValidator(instance)
valid = validator.validate({'name': 'tim'})
assert valid
assert validator.data['name'] == 'tim'
def test_required():
validator = ModelValidator(Person())
valid = validator.validate()
assert not valid
assert validator.errors['name'] == DEFAULT_MESSAGES['required']
def test_clean():
class TestValidator(ModelValidator):
def clean(self, data):
super().clean(data)
data['name'] += 'awesome'
return data
validator = TestValidator(Person())
valid = validator.validate({'name': 'tim'})
assert valid
assert validator.data['name'] == 'timawesome'
def test_clean_error():
class TestValidator(ModelValidator):
def clean(self, data):
raise ValidationError('required')
validator = TestValidator(Person())
valid = validator.validate({'name': 'tim'})
assert not valid
assert validator.data['name'] == 'tim'
assert validator.errors['__base__'] == DEFAULT_MESSAGES['required']
def test_choices():
validator = ModelValidator(ComplexPerson(name='tim'))
valid = validator.validate({'organization': 1, 'gender': 'S'})
assert not valid
assert validator.errors['gender'] == DEFAULT_MESSAGES['one_of'].format(choices='M, F')
assert 'name' not in validator.errors
valid = validator.validate({'organization': 1, 'gender': 'M'})
assert valid
def test_default():
validator = ModelValidator(BasicFields())
valid = validator.validate()
assert not valid
assert validator.data['field1'] == 'Tim'
assert validator.errors['field2'] == DEFAULT_MESSAGES['required']
assert validator.errors['field3'] == DEFAULT_MESSAGES['required']
def test_related_required_missing():
validator = ModelValidator(ComplexPerson(name='tim', gender='M'))
valid = validator.validate({'organization': 999})
assert not valid
assert validator.errors['organization'] == DEFAULT_MESSAGES['related'].format(field='id', values=999)
valid = validator.validate({'organization': None})
assert not valid
assert validator.errors['organization'] == DEFAULT_MESSAGES['required']
valid = validator.validate()
assert not valid
assert validator.errors['organization'] == DEFAULT_MESSAGES['required']
def test_related_optional_missing():
validator = ModelValidator(ComplexPerson(name='tim', gender='M', organization=1))
valid = validator.validate({'pay_grade': 999})
assert not valid
assert validator.errors['pay_grade'] == DEFAULT_MESSAGES['related'].format(field='id', values=999)
valid = validator.validate({'pay_grade': None})
assert valid
valid = validator.validate()
assert valid
def test_related_required_int():
org = Organization.create(name='new1')
validator = ModelValidator(ComplexPerson(name='tim', gender='M'))
valid = validator.validate({'organization': org.id})
assert valid
def test_related_required_instance():
org = Organization.create(name='new1')
validator = ModelValidator(ComplexPerson(name='tim', gender='M'))
valid = validator.validate({'organization': org})
assert valid
def test_related_required_dict():
org = Organization.create(name='new1')
validator = ModelValidator(ComplexPerson(name='tim', gender='M'))
valid = validator.validate({'organization': {'id': org.id}})
assert valid
def test_related_required_dict_missing():
validator = ModelValidator(ComplexPerson(name='tim', gender='M'))
validator.validate({'organization': {}})
assert validator.errors['organization'] == DEFAULT_MESSAGES['required']
def test_related_optional_dict_missing():
validator = ModelValidator(ComplexPerson(name='tim', gender='M', organization=1))
valid = validator.validate({'pay_grade': {}})
assert valid
def test_unique():
person = Person.create(name='tim')
validator = ModelValidator(Person(name='tim'))
valid = validator.validate({'gender': 'M'})
assert not valid
assert validator.errors['name'] == DEFAULT_MESSAGES['unique']
validator = ModelValidator(person)
valid = validator.validate({'gender': 'M'})
assert valid
def test_unique_index():
obj1 = BasicFields.create(field1='one', field2='two', field3='three')
obj2 = BasicFields(field1='one', field2='two', field3='three')
validator = ModelValidator(obj2)
valid = validator.validate()
assert not valid
assert validator.errors['field1'] == DEFAULT_MESSAGES['index']
assert validator.errors['field2'] == DEFAULT_MESSAGES['index']
validator = ModelValidator(obj1)
valid = validator.validate()
assert valid
def test_validate_only():
obj = BasicFields(field1='one')
validator = ModelValidator(obj)
valid = validator.validate(only=('field1', ))
assert valid
def test_save():
obj = BasicFields(field1='one', field2='124124', field3='1232314')
validator = ModelValidator(obj)
valid = validator.validate({'field1': 'updated'})
assert valid
validator.save()
assert obj.id
assert obj.field1 == 'updated'
def test_m2m_empty():
validator = ModelValidator(Student(name='tim'))
valid = validator.validate()
assert valid
valid = validator.validate({'courses': []})
assert valid
def test_m2m_missing():
validator = ModelValidator(Student(name='tim'))
valid = validator.validate({'courses': [1, 33]})
assert not valid
assert validator.errors['courses'] == DEFAULT_MESSAGES['related'].format(field='id', values=[1, 33])
def test_m2m_ints():
validator = ModelValidator(Student(name='tim'))
c1 = Course.create(name='course1')
c2 = Course.create(name='course2')
valid = validator.validate({'courses': [c1.id, c2.id]})
print(validator.errors)
assert valid
valid = validator.validate({'courses': c1.id})
assert valid
valid = validator.validate({'courses': str(c1.id)})
assert valid
def test_m2m_instances():
validator = ModelValidator(Student(name='tim'))
c1 = Course.create(name='course1')
c2 = Course.create(name='course2')
valid = validator.validate({'courses': [c1, c2]})
assert valid
valid = validator.validate({'courses': c1})
assert valid
def test_m2m_dicts():
validator = ModelValidator(Student(name='tim'))
c1 = Course.create(name='course1')
c2 = Course.create(name='course2')
valid = validator.validate({'courses': [{'id': c1.id}, {'id': c2.id}]})
assert valid
valid = validator.validate({'courses': {'id': c1.id}})
assert valid
def test_m2m_dicts_blank():
validator = ModelValidator(Student(name='tim'))
valid = validator.validate({'courses': [{}, {}]})
assert valid
valid = validator.validate({'courses': {}})
assert valid
def test_m2m_save():
obj = Student(name='tim')
validator = ModelValidator(obj)
c1 = Course.create(name='course1')
c2 = Course.create(name='course2')
valid = validator.validate({'courses': [c1, c2]})
assert valid
validator.save()
assert obj.id
assert c1 in obj.courses
assert c2 in obj.courses
def test_m2m_save_blank():
obj = Student(name='tim')
validator = ModelValidator(obj)
valid = validator.validate({'courses': [{}, {}]})
assert valid
validator.save()
assert obj.id
def test_overrides():
class CustomValidator(ModelValidator):
students = ManyModelChoiceField(Student.select(), Student.name)
Student.create(name='tim')
Student.create(name='bob')
obj = Course.create(name='course1')
validator = CustomValidator(obj)
data = {'students': [{'name': 'tim'}, 'bob']}
valid = validator.validate(data)
print(validator.errors)
assert valid
validator.save()
assert obj.id
assert len(obj.students) == 2
|
|
# Copyright Bruno da Silva de Oliveira 2003. Use, modification and
# distribution is subject to the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
"""
Pyste version %s
Usage:
pyste [options] interface-files
where options are:
--module=<name> The name of the module that will be generated;
defaults to the first interface filename, without
the extension.
-I <path> Add an include path
-D <symbol> Define symbol
--multiple Create various cpps, instead of only one
(useful during development)
--out=<name> Specify output filename (default: <module>.cpp)
in --multiple mode, this will be a directory
--no-using Do not declare "using namespace boost";
use explicit declarations instead
--pyste-ns=<name> Set the namespace where new types will be declared;
default is the empty namespace
--debug Writes the xml for each file parsed in the current
directory
--cache-dir=<dir> Directory for cache files (speeds up future runs)
--only-create-cache Recreates all caches (doesn't generate code).
--generate-main Generates the _main.cpp file (in multiple mode)
--file-list A file with one pyste file per line. Use as a
substitute for passing the files in the command
line.
--gccxml-path=<path> Path to gccxml executable (default: gccxml)
--no-default-include Do not use INCLUDE environment variable for include
files to pass along gccxml.
-h, --help Print this help and exit
-v, --version Print version information
"""
import sys
import os
import getopt
import exporters
import SingleCodeUnit
import MultipleCodeUnit
import infos
import exporterutils
import settings
import gc
import sys
from policies import *
from CppParser import CppParser, CppParserError
import time
import declarations
__version__ = '0.9.30'
def RecursiveIncludes(include):
'Return a list containg the include dir and all its subdirectories'
dirs = [include]
def visit(arg, dir, names):
# ignore CVS dirs
if os.path.split(dir)[1] != 'CVS':
dirs.append(dir)
os.path.walk(include, visit, None)
return dirs
def GetDefaultIncludes():
if 'INCLUDE' in os.environ:
include = os.environ['INCLUDE']
return include.split(os.pathsep)
else:
return []
def ProcessIncludes(includes):
if sys.platform == 'win32':
index = 0
for include in includes:
includes[index] = include.replace('\\', '/')
index += 1
def ReadFileList(filename):
f = file(filename)
files = []
try:
for line in f:
line = line.strip()
if line:
files.append(line)
finally:
f.close()
return files
def ParseArguments():
def Usage():
print __doc__ % __version__
sys.exit(1)
try:
options, files = getopt.getopt(
sys.argv[1:],
'R:I:D:vh',
['module=', 'multiple', 'out=', 'no-using', 'pyste-ns=', 'debug', 'cache-dir=',
'only-create-cache', 'version', 'generate-main', 'file-list=', 'help',
'gccxml-path=', 'no-default-include'])
except getopt.GetoptError, e:
print
print 'ERROR:', e
Usage()
default_includes = GetDefaultIncludes()
includes = []
defines = []
module = None
out = None
multiple = False
cache_dir = None
create_cache = False
generate_main = False
gccxml_path = 'gccxml'
for opt, value in options:
if opt == '-I':
includes.append(value)
elif opt == '-D':
defines.append(value)
elif opt == '-R':
includes.extend(RecursiveIncludes(value))
elif opt == '--module':
module = value
elif opt == '--out':
out = value
elif opt == '--no-using':
settings.namespaces.python = 'boost::python::'
settings.USING_BOOST_NS = False
elif opt == '--pyste-ns':
settings.namespaces.pyste = value + '::'
elif opt == '--debug':
settings.DEBUG = True
elif opt == '--multiple':
multiple = True
elif opt == '--cache-dir':
cache_dir = value
elif opt == '--only-create-cache':
create_cache = True
elif opt == '--file-list':
files += ReadFileList(value)
elif opt in ['-h', '--help']:
Usage()
elif opt in ['-v', '--version']:
print 'Pyste version %s' % __version__
sys.exit(2)
elif opt == '--generate-main':
generate_main = True
elif opt == '--gccxml-path':
gccxml_path = value
elif opt == '--no-default-include':
default_includes = []
else:
print 'Unknown option:', opt
Usage()
includes[0:0] = default_includes
if not files:
Usage()
if not module:
module = os.path.splitext(os.path.basename(files[0]))[0]
if not out:
out = module
if not multiple:
out += '.cpp'
for file in files:
d = os.path.dirname(os.path.abspath(file))
if d not in sys.path:
sys.path.append(d)
if create_cache and not cache_dir:
print 'Error: Use --cache-dir to indicate where to create the cache files!'
Usage()
sys.exit(3)
if generate_main and not multiple:
print 'Error: --generate-main only valid in multiple mode.'
Usage()
sys.exit(3)
ProcessIncludes(includes)
return includes, defines, module, out, files, multiple, cache_dir, create_cache, \
generate_main, gccxml_path
def PCHInclude(*headers):
code = '\n'.join(['#include <%s>' % x for x in headers])
infos.CodeInfo(code, 'pchinclude')
def CreateContext():
'create the context where a interface file will be executed'
context = {}
context['Import'] = Import
# infos
context['Function'] = infos.FunctionInfo
context['Class'] = infos.ClassInfo
context['Include'] = lambda header: infos.CodeInfo('#include <%s>\n' % header, 'include')
context['PCHInclude'] = PCHInclude
context['Template'] = infos.ClassTemplateInfo
context['Enum'] = infos.EnumInfo
context['AllFromHeader'] = infos.HeaderInfo
context['Var'] = infos.VarInfo
# functions
context['rename'] = infos.rename
context['set_policy'] = infos.set_policy
context['exclude'] = infos.exclude
context['set_wrapper'] = infos.set_wrapper
context['use_shared_ptr'] = infos.use_shared_ptr
context['use_auto_ptr'] = infos.use_auto_ptr
context['holder'] = infos.holder
context['add_method'] = infos.add_method
context['final'] = infos.final
context['export_values'] = infos.export_values
# policies
context['return_internal_reference'] = return_internal_reference
context['with_custodian_and_ward'] = with_custodian_and_ward
context['return_value_policy'] = return_value_policy
context['reference_existing_object'] = reference_existing_object
context['copy_const_reference'] = copy_const_reference
context['copy_non_const_reference'] = copy_non_const_reference
context['return_opaque_pointer'] = return_opaque_pointer
context['manage_new_object'] = manage_new_object
context['return_by_value'] = return_by_value
context['return_self'] = return_self
# utils
context['Wrapper'] = exporterutils.FunctionWrapper
context['declaration_code'] = lambda code: infos.CodeInfo(code, 'declaration-outside')
context['module_code'] = lambda code: infos.CodeInfo(code, 'module')
context['class_code'] = infos.class_code
return context
def Begin():
# parse arguments
includes, defines, module, out, interfaces, multiple, cache_dir, create_cache, generate_main, gccxml_path = ParseArguments()
# run pyste scripts
for interface in interfaces:
ExecuteInterface(interface)
# create the parser
parser = CppParser(includes, defines, cache_dir, declarations.version, gccxml_path)
try:
if not create_cache:
if not generate_main:
return GenerateCode(parser, module, out, interfaces, multiple)
else:
return GenerateMain(module, out, OrderInterfaces(interfaces))
else:
return CreateCaches(parser)
finally:
parser.Close()
def CreateCaches(parser):
# There is one cache file per interface so we organize the headers
# by interfaces. For each interface collect the tails from the
# exporters sharing the same header.
tails = JoinTails(exporters.exporters)
# now for each interface file take each header, and using the tail
# get the declarations and cache them.
for interface, header in tails:
tail = tails[(interface, header)]
declarations = parser.ParseWithGCCXML(header, tail)
cachefile = parser.CreateCache(header, interface, tail, declarations)
print 'Cached', cachefile
return 0
_imported_count = {} # interface => count
def ExecuteInterface(interface):
old_interface = exporters.current_interface
if not os.path.exists(interface):
if old_interface and os.path.exists(old_interface):
d = os.path.dirname(old_interface)
interface = os.path.join(d, interface)
if not os.path.exists(interface):
raise IOError, "Cannot find interface file %s."%interface
_imported_count[interface] = _imported_count.get(interface, 0) + 1
exporters.current_interface = interface
context = CreateContext()
context['INTERFACE_FILE'] = os.path.abspath(interface)
execfile(interface, context)
exporters.current_interface = old_interface
def Import(interface):
exporters.importing = True
ExecuteInterface(interface)
exporters.importing = False
def JoinTails(exports):
'''Returns a dict of {(interface, header): tail}, where tail is the
joining of all tails of all exports for the header.
'''
tails = {}
for export in exports:
interface = export.interface_file
header = export.Header()
tail = export.Tail() or ''
if (interface, header) in tails:
all_tails = tails[(interface,header)]
all_tails += '\n' + tail
tails[(interface, header)] = all_tails
else:
tails[(interface, header)] = tail
return tails
def OrderInterfaces(interfaces):
interfaces_order = [(_imported_count[x], x) for x in interfaces]
interfaces_order.sort()
interfaces_order.reverse()
return [x for _, x in interfaces_order]
def GenerateMain(module, out, interfaces):
codeunit = MultipleCodeUnit.MultipleCodeUnit(module, out)
codeunit.GenerateMain(interfaces)
return 0
def GenerateCode(parser, module, out, interfaces, multiple):
# prepare to generate the wrapper code
if multiple:
codeunit = MultipleCodeUnit.MultipleCodeUnit(module, out)
else:
codeunit = SingleCodeUnit.SingleCodeUnit(module, out)
# stop referencing the exporters here
exports = exporters.exporters
exporters.exporters = None
exported_names = dict([(x.Name(), None) for x in exports])
# order the exports
order = {}
for export in exports:
if export.interface_file in order:
order[export.interface_file].append(export)
else:
order[export.interface_file] = [export]
exports = []
interfaces_order = OrderInterfaces(interfaces)
for interface in interfaces_order:
exports.extend(order[interface])
del order
del interfaces_order
# now generate the code in the correct order
#print exported_names
tails = JoinTails(exports)
for i in xrange(len(exports)):
export = exports[i]
interface = export.interface_file
header = export.Header()
if header:
tail = tails[(interface, header)]
declarations, parsed_header = parser.Parse(header, interface, tail)
else:
declarations = []
parsed_header = None
ExpandTypedefs(declarations, exported_names)
export.SetDeclarations(declarations)
export.SetParsedHeader(parsed_header)
if multiple:
codeunit.SetCurrent(export.interface_file, export.Name())
export.GenerateCode(codeunit, exported_names)
# force collect of cyclic references
exports[i] = None
del declarations
del export
gc.collect()
# finally save the code unit
codeunit.Save()
if not multiple:
print 'Module %s generated' % module
return 0
def ExpandTypedefs(decls, exported_names):
'''Check if the names in exported_names are a typedef, and add the real class
name in the dict.
'''
for name in exported_names.keys():
for decl in decls:
if isinstance(decl, declarations.Typedef):
exported_names[decl.type.FullName()] = None
def UsePsyco():
'Tries to use psyco if possible'
try:
import psyco
psyco.profile()
except: pass
def main():
start = time.clock()
UsePsyco()
status = Begin()
print '%0.2f seconds' % (time.clock()-start)
sys.exit(status)
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Bitcoin test framework primitive and message structures
CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
data structures that should map to corresponding structures in
bitcoin/primitives
msg_block, msg_tx, msg_headers, etc.:
data structures that represent network messages
ser_*, deser_*: functions that handle serialization/deserialization.
Classes use __slots__ to ensure extraneous attributes aren't accidentally added
by tests, compromising their intended effect.
"""
from codecs import encode
import copy
import hashlib
from io import BytesIO
import random
import socket
import struct
import time
from test_framework.siphash import siphash256
from test_framework.util import hex_str_to_bytes, assert_equal
MIN_VERSION_SUPPORTED = 60001
MY_VERSION = 70014 # past bip-31 for ping/pong
MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
MY_RELAY = 1 # from version 70001 onwards, fRelay should be appended to version messages (BIP37)
MAX_LOCATOR_SZ = 101
MAX_BLOCK_BASE_SIZE = 1000000
COIN = 100000000 # 1 btc in satoshis
BIP125_SEQUENCE_NUMBER = 0xfffffffd # Sequence number that is BIP 125 opt-in and BIP 68-opt-out
NODE_NETWORK = (1 << 0)
# NODE_GETUTXO = (1 << 1)
# NODE_BLOOM = (1 << 2)
NODE_WITNESS = (1 << 3)
NODE_NETWORK_LIMITED = (1 << 10)
MSG_TX = 1
MSG_BLOCK = 2
MSG_WITNESS_FLAG = 1 << 30
MSG_TYPE_MASK = 0xffffffff >> 2
# Serialization/deserialization tools
def sha256(s):
return hashlib.new('sha256', s).digest()
def hash256(s):
return sha256(sha256(s))
def ser_compact_size(l):
r = b""
if l < 253:
r = struct.pack("B", l)
elif l < 0x10000:
r = struct.pack("<BH", 253, l)
elif l < 0x100000000:
r = struct.pack("<BI", 254, l)
else:
r = struct.pack("<BQ", 255, l)
return r
def deser_compact_size(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return nit
def deser_string(f):
nit = deser_compact_size(f)
return f.read(nit)
def ser_string(s):
return ser_compact_size(len(s)) + s
def deser_uint256(f):
r = 0
for i in range(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = b""
for i in range(8):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def uint256_from_str(s):
r = 0
t = struct.unpack("<IIIIIIII", s[:32])
for i in range(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFF) << (8 * (nbytes - 3))
return v
def deser_vector(f, c):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
# ser_function_name: Allow for an alternate serialization function on the
# entries in the vector (we use this for serializing the vector of transactions
# for a witness block).
def ser_vector(l, ser_function_name=None):
r = ser_compact_size(len(l))
for i in l:
if ser_function_name:
r += getattr(i, ser_function_name)()
else:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(l):
r = ser_compact_size(len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = deser_compact_size(f)
r = []
for i in range(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(l):
r = ser_compact_size(len(l))
for sv in l:
r += ser_string(sv)
return r
# Deserialize from a hex string representation (eg from RPC)
def FromHex(obj, hex_string):
obj.deserialize(BytesIO(hex_str_to_bytes(hex_string)))
return obj
# Convert a binary-serializable object to hex (eg for submission via RPC)
def ToHex(obj):
return obj.serialize().hex()
# Objects that map to bitcoind objects, which can be serialized/deserialized
class CAddress:
__slots__ = ("ip", "nServices", "pchReserved", "port", "time")
def __init__(self):
self.time = 0
self.nServices = 1
self.pchReserved = b"\x00" * 10 + b"\xff" * 2
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f, with_time=True):
if with_time:
self.time = struct.unpack("<i", f.read(4))[0]
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self, with_time=True):
r = b""
if with_time:
r += struct.pack("<i", self.time)
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
self.ip, self.port)
class CInv:
__slots__ = ("hash", "type")
typemap = {
0: "Error",
1: "TX",
2: "Block",
1|MSG_WITNESS_FLAG: "WitnessTx",
2|MSG_WITNESS_FLAG : "WitnessBlock",
4: "CompactBlock"
}
def __init__(self, t=0, h=0):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
class CBlockLocator:
__slots__ = ("nVersion", "vHave")
def __init__(self):
self.nVersion = MY_VERSION
self.vHave = []
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
class COutPoint:
__slots__ = ("hash", "n")
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn:
__slots__ = ("nSequence", "prevout", "scriptSig")
def __init__(self, outpoint=None, scriptSig=b"", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), self.scriptSig.hex(),
self.nSequence)
class CTxOut:
__slots__ = ("nValue", "scriptPubKey")
def __init__(self, nValue=0, scriptPubKey=b""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // COIN, self.nValue % COIN,
self.scriptPubKey.hex())
class CScriptWitness:
__slots__ = ("stack",)
def __init__(self):
# stack is a vector of strings
self.stack = []
def __repr__(self):
return "CScriptWitness(%s)" % \
(",".join([x.hex() for x in self.stack]))
def is_null(self):
if self.stack:
return False
return True
class CTxInWitness:
__slots__ = ("scriptWitness",)
def __init__(self):
self.scriptWitness = CScriptWitness()
def deserialize(self, f):
self.scriptWitness.stack = deser_string_vector(f)
def serialize(self):
return ser_string_vector(self.scriptWitness.stack)
def __repr__(self):
return repr(self.scriptWitness)
def is_null(self):
return self.scriptWitness.is_null()
class CTxWitness:
__slots__ = ("vtxinwit",)
def __init__(self):
self.vtxinwit = []
def deserialize(self, f):
for i in range(len(self.vtxinwit)):
self.vtxinwit[i].deserialize(f)
def serialize(self):
r = b""
# This is different than the usual vector serialization --
# we omit the length of the vector, which is required to be
# the same length as the transaction's vin vector.
for x in self.vtxinwit:
r += x.serialize()
return r
def __repr__(self):
return "CTxWitness(%s)" % \
(';'.join([repr(x) for x in self.vtxinwit]))
def is_null(self):
for x in self.vtxinwit:
if not x.is_null():
return False
return True
class CTransaction:
__slots__ = ("hash", "nLockTime", "nVersion", "sha256", "vin", "vout",
"wit")
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.vin = []
self.vout = []
self.wit = CTxWitness()
self.nLockTime = 0
self.sha256 = None
self.hash = None
else:
self.nVersion = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.sha256 = tx.sha256
self.hash = tx.hash
self.wit = copy.deepcopy(tx.wit)
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vin = deser_vector(f, CTxIn)
flags = 0
if len(self.vin) == 0:
flags = struct.unpack("<B", f.read(1))[0]
# Not sure why flags can't be zero, but this
# matches the implementation in bitcoind
if (flags != 0):
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
else:
self.vout = deser_vector(f, CTxOut)
if flags != 0:
self.wit.vtxinwit = [CTxInWitness() for i in range(len(self.vin))]
self.wit.deserialize(f)
else:
self.wit = CTxWitness()
self.nLockTime = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize_without_witness(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
return r
# Only serialize with witness when explicitly called for
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
if (len(self.wit.vtxinwit) != len(self.vin)):
# vtxinwit must have the same length as vin
self.wit.vtxinwit = self.wit.vtxinwit[:len(self.vin)]
for i in range(len(self.wit.vtxinwit), len(self.vin)):
self.wit.vtxinwit.append(CTxInWitness())
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
# Regular serialization is with witness -- must explicitly
# call serialize_without_witness to exclude witness data.
def serialize(self):
return self.serialize_with_witness()
# Recalculate the txid (transaction hash without witness)
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.hash
# We will only cache the serialization without witness in
# self.sha256 and self.hash -- those are expected to be the txid.
def calc_sha256(self, with_witness=False):
if with_witness:
# Don't cache the result, just return it
return uint256_from_str(hash256(self.serialize_with_witness()))
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize_without_witness()))
self.hash = encode(hash256(self.serialize_without_witness())[::-1], 'hex_codec').decode('ascii')
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 21000000 * COIN:
return False
return True
def __repr__(self):
return "CTransaction(nVersion=%i vin=%s vout=%s wit=%s nLockTime=%i)" \
% (self.nVersion, repr(self.vin), repr(self.vout), repr(self.wit), self.nLockTime)
class CBlockHeader:
__slots__ = ("hash", "hashMerkleRoot", "hashPrevBlock", "nBits", "nNonce",
"nTime", "nVersion", "sha256")
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.sha256 = header.sha256
self.hash = header.hash
self.calc_sha256()
def set_null(self):
self.nVersion = 1
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.sha256 = None
self.hash = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
return r
def calc_sha256(self):
if self.sha256 is None:
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
self.sha256 = uint256_from_str(hash256(r))
self.hash = encode(hash256(r)[::-1], 'hex_codec').decode('ascii')
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.sha256
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce)
BLOCK_HEADER_SIZE = len(CBlockHeader().serialize())
assert_equal(BLOCK_HEADER_SIZE, 80)
class CBlock(CBlockHeader):
__slots__ = ("vtx",)
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
def deserialize(self, f):
super(CBlock, self).deserialize(f)
self.vtx = deser_vector(f, CTransaction)
def serialize(self, with_witness=True):
r = b""
r += super(CBlock, self).serialize()
if with_witness:
r += ser_vector(self.vtx, "serialize_with_witness")
else:
r += ser_vector(self.vtx, "serialize_without_witness")
return r
# Calculate the merkle root given a vector of transaction hashes
@classmethod
def get_merkle_root(cls, hashes):
while len(hashes) > 1:
newhashes = []
for i in range(0, len(hashes), 2):
i2 = min(i+1, len(hashes)-1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
return self.get_merkle_root(hashes)
def calc_witness_merkle_root(self):
# For witness root purposes, the hash of the
# coinbase, with witness, is defined to be 0...0
hashes = [ser_uint256(0)]
for tx in self.vtx[1:]:
# Calculate the hashes with witness data
hashes.append(ser_uint256(tx.calc_sha256(True)))
return self.get_merkle_root(hashes)
def is_valid(self):
self.calc_sha256()
target = uint256_from_compact(self.nBits)
if self.sha256 > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self):
self.rehash()
target = uint256_from_compact(self.nBits)
while self.sha256 > target:
self.nNonce += 1
self.rehash()
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
class PrefilledTransaction:
__slots__ = ("index", "tx")
def __init__(self, index=0, tx = None):
self.index = index
self.tx = tx
def deserialize(self, f):
self.index = deser_compact_size(f)
self.tx = CTransaction()
self.tx.deserialize(f)
def serialize(self, with_witness=True):
r = b""
r += ser_compact_size(self.index)
if with_witness:
r += self.tx.serialize_with_witness()
else:
r += self.tx.serialize_without_witness()
return r
def serialize_without_witness(self):
return self.serialize(with_witness=False)
def serialize_with_witness(self):
return self.serialize(with_witness=True)
def __repr__(self):
return "PrefilledTransaction(index=%d, tx=%s)" % (self.index, repr(self.tx))
# This is what we send on the wire, in a cmpctblock message.
class P2PHeaderAndShortIDs:
__slots__ = ("header", "nonce", "prefilled_txn", "prefilled_txn_length",
"shortids", "shortids_length")
def __init__(self):
self.header = CBlockHeader()
self.nonce = 0
self.shortids_length = 0
self.shortids = []
self.prefilled_txn_length = 0
self.prefilled_txn = []
def deserialize(self, f):
self.header.deserialize(f)
self.nonce = struct.unpack("<Q", f.read(8))[0]
self.shortids_length = deser_compact_size(f)
for i in range(self.shortids_length):
# shortids are defined to be 6 bytes in the spec, so append
# two zero bytes and read it in as an 8-byte number
self.shortids.append(struct.unpack("<Q", f.read(6) + b'\x00\x00')[0])
self.prefilled_txn = deser_vector(f, PrefilledTransaction)
self.prefilled_txn_length = len(self.prefilled_txn)
# When using version 2 compact blocks, we must serialize with_witness.
def serialize(self, with_witness=False):
r = b""
r += self.header.serialize()
r += struct.pack("<Q", self.nonce)
r += ser_compact_size(self.shortids_length)
for x in self.shortids:
# We only want the first 6 bytes
r += struct.pack("<Q", x)[0:6]
if with_witness:
r += ser_vector(self.prefilled_txn, "serialize_with_witness")
else:
r += ser_vector(self.prefilled_txn, "serialize_without_witness")
return r
def __repr__(self):
return "P2PHeaderAndShortIDs(header=%s, nonce=%d, shortids_length=%d, shortids=%s, prefilled_txn_length=%d, prefilledtxn=%s" % (repr(self.header), self.nonce, self.shortids_length, repr(self.shortids), self.prefilled_txn_length, repr(self.prefilled_txn))
# P2P version of the above that will use witness serialization (for compact
# block version 2)
class P2PHeaderAndShortWitnessIDs(P2PHeaderAndShortIDs):
__slots__ = ()
def serialize(self):
return super(P2PHeaderAndShortWitnessIDs, self).serialize(with_witness=True)
# Calculate the BIP 152-compact blocks shortid for a given transaction hash
def calculate_shortid(k0, k1, tx_hash):
expected_shortid = siphash256(k0, k1, tx_hash)
expected_shortid &= 0x0000ffffffffffff
return expected_shortid
# This version gets rid of the array lengths, and reinterprets the differential
# encoding into indices that can be used for lookup.
class HeaderAndShortIDs:
__slots__ = ("header", "nonce", "prefilled_txn", "shortids", "use_witness")
def __init__(self, p2pheaders_and_shortids = None):
self.header = CBlockHeader()
self.nonce = 0
self.shortids = []
self.prefilled_txn = []
self.use_witness = False
if p2pheaders_and_shortids is not None:
self.header = p2pheaders_and_shortids.header
self.nonce = p2pheaders_and_shortids.nonce
self.shortids = p2pheaders_and_shortids.shortids
last_index = -1
for x in p2pheaders_and_shortids.prefilled_txn:
self.prefilled_txn.append(PrefilledTransaction(x.index + last_index + 1, x.tx))
last_index = self.prefilled_txn[-1].index
def to_p2p(self):
if self.use_witness:
ret = P2PHeaderAndShortWitnessIDs()
else:
ret = P2PHeaderAndShortIDs()
ret.header = self.header
ret.nonce = self.nonce
ret.shortids_length = len(self.shortids)
ret.shortids = self.shortids
ret.prefilled_txn_length = len(self.prefilled_txn)
ret.prefilled_txn = []
last_index = -1
for x in self.prefilled_txn:
ret.prefilled_txn.append(PrefilledTransaction(x.index - last_index - 1, x.tx))
last_index = x.index
return ret
def get_siphash_keys(self):
header_nonce = self.header.serialize()
header_nonce += struct.pack("<Q", self.nonce)
hash_header_nonce_as_str = sha256(header_nonce)
key0 = struct.unpack("<Q", hash_header_nonce_as_str[0:8])[0]
key1 = struct.unpack("<Q", hash_header_nonce_as_str[8:16])[0]
return [ key0, key1 ]
# Version 2 compact blocks use wtxid in shortids (rather than txid)
def initialize_from_block(self, block, nonce=0, prefill_list=None, use_witness=False):
if prefill_list is None:
prefill_list = [0]
self.header = CBlockHeader(block)
self.nonce = nonce
self.prefilled_txn = [ PrefilledTransaction(i, block.vtx[i]) for i in prefill_list ]
self.shortids = []
self.use_witness = use_witness
[k0, k1] = self.get_siphash_keys()
for i in range(len(block.vtx)):
if i not in prefill_list:
tx_hash = block.vtx[i].sha256
if use_witness:
tx_hash = block.vtx[i].calc_sha256(with_witness=True)
self.shortids.append(calculate_shortid(k0, k1, tx_hash))
def __repr__(self):
return "HeaderAndShortIDs(header=%s, nonce=%d, shortids=%s, prefilledtxn=%s" % (repr(self.header), self.nonce, repr(self.shortids), repr(self.prefilled_txn))
class BlockTransactionsRequest:
__slots__ = ("blockhash", "indexes")
def __init__(self, blockhash=0, indexes = None):
self.blockhash = blockhash
self.indexes = indexes if indexes is not None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
indexes_length = deser_compact_size(f)
for i in range(indexes_length):
self.indexes.append(deser_compact_size(f))
def serialize(self):
r = b""
r += ser_uint256(self.blockhash)
r += ser_compact_size(len(self.indexes))
for x in self.indexes:
r += ser_compact_size(x)
return r
# helper to set the differentially encoded indexes from absolute ones
def from_absolute(self, absolute_indexes):
self.indexes = []
last_index = -1
for x in absolute_indexes:
self.indexes.append(x-last_index-1)
last_index = x
def to_absolute(self):
absolute_indexes = []
last_index = -1
for x in self.indexes:
absolute_indexes.append(x+last_index+1)
last_index = absolute_indexes[-1]
return absolute_indexes
def __repr__(self):
return "BlockTransactionsRequest(hash=%064x indexes=%s)" % (self.blockhash, repr(self.indexes))
class BlockTransactions:
__slots__ = ("blockhash", "transactions")
def __init__(self, blockhash=0, transactions = None):
self.blockhash = blockhash
self.transactions = transactions if transactions is not None else []
def deserialize(self, f):
self.blockhash = deser_uint256(f)
self.transactions = deser_vector(f, CTransaction)
def serialize(self, with_witness=True):
r = b""
r += ser_uint256(self.blockhash)
if with_witness:
r += ser_vector(self.transactions, "serialize_with_witness")
else:
r += ser_vector(self.transactions, "serialize_without_witness")
return r
def __repr__(self):
return "BlockTransactions(hash=%064x transactions=%s)" % (self.blockhash, repr(self.transactions))
class CPartialMerkleTree:
__slots__ = ("nTransactions", "vBits", "vHash")
def __init__(self):
self.nTransactions = 0
self.vHash = []
self.vBits = []
def deserialize(self, f):
self.nTransactions = struct.unpack("<i", f.read(4))[0]
self.vHash = deser_uint256_vector(f)
vBytes = deser_string(f)
self.vBits = []
for i in range(len(vBytes) * 8):
self.vBits.append(vBytes[i//8] & (1 << (i % 8)) != 0)
def serialize(self):
r = b""
r += struct.pack("<i", self.nTransactions)
r += ser_uint256_vector(self.vHash)
vBytesArray = bytearray([0x00] * ((len(self.vBits) + 7)//8))
for i in range(len(self.vBits)):
vBytesArray[i // 8] |= self.vBits[i] << (i % 8)
r += ser_string(bytes(vBytesArray))
return r
def __repr__(self):
return "CPartialMerkleTree(nTransactions=%d, vHash=%s, vBits=%s)" % (self.nTransactions, repr(self.vHash), repr(self.vBits))
class CMerkleBlock:
__slots__ = ("header", "txn")
def __init__(self):
self.header = CBlockHeader()
self.txn = CPartialMerkleTree()
def deserialize(self, f):
self.header.deserialize(f)
self.txn.deserialize(f)
def serialize(self):
r = b""
r += self.header.serialize()
r += self.txn.serialize()
return r
def __repr__(self):
return "CMerkleBlock(header=%s, txn=%s)" % (repr(self.header), repr(self.txn))
# Objects that correspond to messages on the wire
class msg_version:
__slots__ = ("addrFrom", "addrTo", "nNonce", "nRelay", "nServices",
"nStartingHeight", "nTime", "nVersion", "strSubVer")
command = b"version"
def __init__(self):
self.nVersion = MY_VERSION
self.nServices = NODE_NETWORK | NODE_WITNESS
self.nTime = int(time.time())
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
self.nRelay = MY_RELAY
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f, False)
self.addrFrom = CAddress()
self.addrFrom.deserialize(f, False)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
if self.nVersion >= 70001:
# Relay field is optional for version 70001 onwards
try:
self.nRelay = struct.unpack("<b", f.read(1))[0]
except:
self.nRelay = 0
else:
self.nRelay = 0
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize(False)
r += self.addrFrom.serialize(False)
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer)
r += struct.pack("<i", self.nStartingHeight)
r += struct.pack("<b", self.nRelay)
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i nRelay=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight, self.nRelay)
class msg_verack:
__slots__ = ()
command = b"verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_verack()"
class msg_addr:
__slots__ = ("addrs",)
command = b"addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_inv:
__slots__ = ("inv",)
command = b"inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata:
__slots__ = ("inv",)
command = b"getdata"
def __init__(self, inv=None):
self.inv = inv if inv is not None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_getblocks:
__slots__ = ("locator", "hashstop")
command = b"getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx:
__slots__ = ("tx",)
command = b"tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize_without_witness()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_witness_tx(msg_tx):
__slots__ = ()
def serialize(self):
return self.tx.serialize_with_witness()
class msg_block:
__slots__ = ("block",)
command = b"block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize()
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
# for cases where a user needs tighter control over what is sent over the wire
# note that the user must supply the name of the command, and the data
class msg_generic:
__slots__ = ("command", "data")
def __init__(self, command, data=None):
self.command = command
self.data = data
def serialize(self):
return self.data
def __repr__(self):
return "msg_generic()"
class msg_no_witness_block(msg_block):
__slots__ = ()
def serialize(self):
return self.block.serialize(with_witness=False)
class msg_getaddr:
__slots__ = ()
command = b"getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_getaddr()"
class msg_ping:
__slots__ = ("nonce",)
command = b"ping"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong:
__slots__ = ("nonce",)
command = b"pong"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool:
__slots__ = ()
command = b"mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_mempool()"
class msg_notfound:
__slots__ = ("vec", )
command = b"notfound"
def __init__(self, vec=None):
self.vec = vec or []
def deserialize(self, f):
self.vec = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.vec)
def __repr__(self):
return "msg_notfound(vec=%s)" % (repr(self.vec))
class msg_sendheaders:
__slots__ = ()
command = b"sendheaders"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_sendheaders()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders:
__slots__ = ("hashstop", "locator",)
command = b"getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers:
__slots__ = ("headers",)
command = b"headers"
def __init__(self, headers=None):
self.headers = headers if headers is not None else []
def deserialize(self, f):
# comment in bitcoind indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_reject:
__slots__ = ("code", "data", "message", "reason")
command = b"reject"
REJECT_MALFORMED = 1
def __init__(self):
self.message = b""
self.code = 0
self.reason = b""
self.data = 0
def deserialize(self, f):
self.message = deser_string(f)
self.code = struct.unpack("<B", f.read(1))[0]
self.reason = deser_string(f)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
self.data = deser_uint256(f)
def serialize(self):
r = ser_string(self.message)
r += struct.pack("<B", self.code)
r += ser_string(self.reason)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
r += ser_uint256(self.data)
return r
def __repr__(self):
return "msg_reject: %s %d %s [%064x]" \
% (self.message, self.code, self.reason, self.data)
class msg_feefilter:
__slots__ = ("feerate",)
command = b"feefilter"
def __init__(self, feerate=0):
self.feerate = feerate
def deserialize(self, f):
self.feerate = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.feerate)
return r
def __repr__(self):
return "msg_feefilter(feerate=%08x)" % self.feerate
class msg_sendcmpct:
__slots__ = ("announce", "version")
command = b"sendcmpct"
def __init__(self):
self.announce = False
self.version = 1
def deserialize(self, f):
self.announce = struct.unpack("<?", f.read(1))[0]
self.version = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<?", self.announce)
r += struct.pack("<Q", self.version)
return r
def __repr__(self):
return "msg_sendcmpct(announce=%s, version=%lu)" % (self.announce, self.version)
class msg_cmpctblock:
__slots__ = ("header_and_shortids",)
command = b"cmpctblock"
def __init__(self, header_and_shortids = None):
self.header_and_shortids = header_and_shortids
def deserialize(self, f):
self.header_and_shortids = P2PHeaderAndShortIDs()
self.header_and_shortids.deserialize(f)
def serialize(self):
r = b""
r += self.header_and_shortids.serialize()
return r
def __repr__(self):
return "msg_cmpctblock(HeaderAndShortIDs=%s)" % repr(self.header_and_shortids)
class msg_getblocktxn:
__slots__ = ("block_txn_request",)
command = b"getblocktxn"
def __init__(self):
self.block_txn_request = None
def deserialize(self, f):
self.block_txn_request = BlockTransactionsRequest()
self.block_txn_request.deserialize(f)
def serialize(self):
r = b""
r += self.block_txn_request.serialize()
return r
def __repr__(self):
return "msg_getblocktxn(block_txn_request=%s)" % (repr(self.block_txn_request))
class msg_blocktxn:
__slots__ = ("block_transactions",)
command = b"blocktxn"
def __init__(self):
self.block_transactions = BlockTransactions()
def deserialize(self, f):
self.block_transactions.deserialize(f)
def serialize(self):
r = b""
r += self.block_transactions.serialize()
return r
def __repr__(self):
return "msg_blocktxn(block_transactions=%s)" % (repr(self.block_transactions))
class msg_no_witness_blocktxn(msg_blocktxn):
__slots__ = ()
def serialize(self):
return self.block_transactions.serialize(with_witness=False)
|
|
#!/usr/bin/env python3
"""
Open a shell over MAVLink.
@author: Beat Kueng (beat-kueng@gmx.net)
"""
from __future__ import print_function
import sys, select
import termios
from timeit import default_timer as timer
from argparse import ArgumentParser
try:
from pymavlink import mavutil
except ImportError as e:
print("Failed to import pymavlink: " + str(e))
print("")
print("You may need to install it with:")
print(" pip3 install --user pymavlink")
print("")
sys.exit(1)
try:
import serial
except ImportError as e:
print("Failed to import pyserial: " + str(e))
print("")
print("You may need to install it with:")
print(" pip3 install --user pyserial")
print("")
sys.exit(1)
class MavlinkSerialPort():
'''an object that looks like a serial port, but
transmits using mavlink SERIAL_CONTROL packets'''
def __init__(self, portname, baudrate, devnum=0, debug=0):
self.baudrate = 0
self._debug = debug
self.buf = ''
self.port = devnum
self.debug("Connecting with MAVLink to %s ..." % portname)
self.mav = mavutil.mavlink_connection(portname, autoreconnect=True, baud=baudrate)
self.mav.wait_heartbeat()
self.debug("HEARTBEAT OK\n")
self.debug("Locked serial device\n")
def debug(self, s, level=1):
'''write some debug text'''
if self._debug >= level:
print(s)
def write(self, b):
'''write some bytes'''
self.debug("sending '%s' (0x%02x) of len %u\n" % (b, ord(b[0]), len(b)), 2)
while len(b) > 0:
n = len(b)
if n > 70:
n = 70
buf = [ord(x) for x in b[:n]]
buf.extend([0]*(70-len(buf)))
self.mav.mav.serial_control_send(self.port,
mavutil.mavlink.SERIAL_CONTROL_FLAG_EXCLUSIVE |
mavutil.mavlink.SERIAL_CONTROL_FLAG_RESPOND,
0,
0,
n,
buf)
b = b[n:]
def close(self):
self.mav.mav.serial_control_send(self.port, 0, 0, 0, 0, [0]*70)
def _recv(self):
'''read some bytes into self.buf'''
m = self.mav.recv_match(condition='SERIAL_CONTROL.count!=0',
type='SERIAL_CONTROL', blocking=True,
timeout=0.03)
if m is not None:
if self._debug > 2:
print(m)
data = m.data[:m.count]
self.buf += ''.join(str(chr(x)) for x in data)
def read(self, n):
'''read some bytes'''
if len(self.buf) == 0:
self._recv()
if len(self.buf) > 0:
if n > len(self.buf):
n = len(self.buf)
ret = self.buf[:n]
self.buf = self.buf[n:]
if self._debug >= 2:
for b in ret:
self.debug("read 0x%x" % ord(b), 2)
return ret
return ''
def main():
parser = ArgumentParser(description=__doc__)
parser.add_argument('port', metavar='PORT', nargs='?', default = None,
help='Mavlink port name: serial: DEVICE[,BAUD], udp: IP:PORT, tcp: tcp:IP:PORT. Eg: \
/dev/ttyUSB0 or 0.0.0.0:14550. Auto-detect serial if not given.')
parser.add_argument("--baudrate", "-b", dest="baudrate", type=int,
help="Mavlink port baud rate (default=57600)", default=57600)
args = parser.parse_args()
if args.port == None:
if sys.platform == "darwin":
args.port = "/dev/tty.usbmodem01"
else:
serial_list = mavutil.auto_detect_serial(preferred_list=['*FTDI*',
"*Arduino_Mega_2560*", "*3D_Robotics*", "*USB_to_UART*", '*PX4*', '*FMU*', "*Gumstix*"])
if len(serial_list) == 0:
print("Error: no serial connection found")
return
if len(serial_list) > 1:
print('Auto-detected serial ports are:')
for port in serial_list:
print(" {:}".format(port))
print('Using port {:}'.format(serial_list[0]))
args.port = serial_list[0].device
print("Connecting to MAVLINK...")
mav_serialport = MavlinkSerialPort(args.port, args.baudrate, devnum=10)
mav_serialport.write('\n') # make sure the shell is started
# setup the console, so we can read one char at a time
fd_in = sys.stdin.fileno()
old_attr = termios.tcgetattr(fd_in)
new_attr = termios.tcgetattr(fd_in)
new_attr[3] = new_attr[3] & ~termios.ECHO # lflags
new_attr[3] = new_attr[3] & ~termios.ICANON
try:
termios.tcsetattr(fd_in, termios.TCSANOW, new_attr)
cur_line = ''
command_history = []
cur_history_index = 0
def erase_last_n_chars(N):
if N == 0: return
CURSOR_BACK_N = '\x1b['+str(N)+'D'
ERASE_END_LINE = '\x1b[K'
sys.stdout.write(CURSOR_BACK_N + ERASE_END_LINE)
next_heartbeat_time = timer()
while True:
while True:
i, o, e = select.select([sys.stdin], [], [], 0)
if not i: break
ch = sys.stdin.read(1)
# provide a simple shell with command history
if ch == '\n':
if len(cur_line) > 0:
# erase current text (mavlink shell will echo it as well)
erase_last_n_chars(len(cur_line))
# add to history
if len(command_history) == 0 or command_history[-1] != cur_line:
command_history.append(cur_line)
if len(command_history) > 50:
del command_history[0]
cur_history_index = len(command_history)
mav_serialport.write(cur_line+'\n')
cur_line = ''
elif ord(ch) == 127: # backslash
if len(cur_line) > 0:
erase_last_n_chars(1)
cur_line = cur_line[:-1]
sys.stdout.write(ch)
elif ord(ch) == 27:
ch = sys.stdin.read(1) # skip one
ch = sys.stdin.read(1)
if ch == 'A': # arrow up
if cur_history_index > 0:
cur_history_index -= 1
elif ch == 'B': # arrow down
if cur_history_index < len(command_history):
cur_history_index += 1
# TODO: else: support line editing
erase_last_n_chars(len(cur_line))
if cur_history_index == len(command_history):
cur_line = ''
else:
cur_line = command_history[cur_history_index]
sys.stdout.write(cur_line)
elif ord(ch) > 3:
cur_line += ch
sys.stdout.write(ch)
sys.stdout.flush()
data = mav_serialport.read(4096)
if data and len(data) > 0:
sys.stdout.write(data)
sys.stdout.flush()
# handle heartbeat sending
heartbeat_time = timer()
if heartbeat_time > next_heartbeat_time:
mav_serialport.mav.mav.heartbeat_send(mavutil.mavlink.MAV_TYPE_GCS,
mavutil.mavlink.MAV_AUTOPILOT_GENERIC, 0, 0, 0)
next_heartbeat_time = heartbeat_time + 1
except serial.serialutil.SerialException as e:
print(e)
except KeyboardInterrupt:
mav_serialport.close()
finally:
termios.tcsetattr(fd_in, termios.TCSADRAIN, old_attr)
if __name__ == '__main__':
main()
|
|
import os
import os.path as op
import logging
import shutil
import json
import subprocess
from Bio import SeqIO, AlignIO
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.PDB import PDBIO
from kmtools.system_tools import switch_paths
from . import (
conf, errors, structure_tools, structure_analysis,
call_modeller, call_tcoffee, call_foldx
)
logger = logging.getLogger(__name__)
class Model:
"""Structural homology model.
Parameters
----------
sequence_file
fasta file containing the sequence of the protein that should be mutated.
structure_file
pdb file containing the structure to be used as a template for homology modelling.
modeller_results_file
Precalculated data from a previous modeller run.
"""
def __init__(self, sequence_file, structure_file, modeller_results_file=None):
logger.debug('Initialising a Model instance with parameters:')
logger.debug('sequence_file: {}:'.format(sequence_file))
logger.debug('structure_file: {}:'.format(structure_file))
# Target sequences
self.sequence_file = sequence_file
self.sequence_seqrecords = list(SeqIO.parse(self.sequence_file, 'fasta'))
self.sequence_id = op.splitext(op.basename(self.sequence_file))[0].replace(':', '.')
self._validate_sequence_seqrecords()
logger.debug('sequence_seqrecords: {}'.format(self.sequence_seqrecords))
# Template structures
self.structure_file = structure_file
self.structure = structure_tools.get_pdb_structure(self.structure_file)
self.structure_id = self.structure.id.replace(':', '.')
self.structure_seqrecords = [
SeqRecord(
id='{}{}'.format(self.structure_id, chain.id),
seq=Seq(
structure_tools
.get_chain_sequence_and_numbering(chain, include_hetatms=True)[0])
) for chain in self.structure[0].child_list
]
self.chain_ids = [chain.id for chain in self.structure.child_list[0].child_list]
logger.debug('structure_seqrecords: {}'.format(self.structure_seqrecords))
# Homology modelling
if self.sequence_id == self.structure_id:
self.sequence_id += '_sequence'
self.model_id = '{}-{}'.format(self.sequence_id, self.structure_id)
# Check for precalculated data
self.modeller_results_file = op.join(conf.CONFIGS['model_dir'], self.model_id + '.json')
if (modeller_results_file is not None and
modeller_results_file != self.modeller_results_file):
logger.debug(
'Copying precalculated modeller results file from {} to {}...'
.format(modeller_results_file, self.modeller_results_file)
)
shutil.copy(modeller_results_file, self.modeller_results_file)
if op.isfile(self.modeller_results_file):
logger.debug(
'Loading precalculated modeller results from file: %s',
self.modeller_results_file
)
with open(self.modeller_results_file) as ifh:
self.modeller_results = json.load(ifh)
else:
logger.debug('Creating sequence alignments and building a homology model')
self._create_alignments_and_model()
# Save model into a json file for faster future use
with open(self.modeller_results_file, 'w') as ofh:
json.dump(self.modeller_results, ofh)
# Get interacting amino acids and interface area
self.modeller_structure = (
structure_tools.get_pdb_structure(
op.join(conf.CONFIGS['unique_temp_dir'], self.modeller_results['model_file']))
)
self.modeller_chain_ids = [
chain.id for chain in self.modeller_structure[0]
]
self._analyse_core()
if len(self.sequence_seqrecords) > 1:
self._analyse_interface()
self.mutations = {}
self.errors = []
@property
def core_or_interface(self):
if len(self.sequence_seqrecords) == 1:
return 'core'
else:
return 'interface'
def _validate_sequence_seqrecords(self):
if len(self.sequence_seqrecords) > 2:
message = (
"ELASPIC is designed to predict the effect of mutations on the folding "
"of a single domain or the interaction between two domains. It cannot predict "
"the effect of mutations on the interaction between more than two domains. "
)
logger.warning(message)
def _align_with_tcoffee(self, sequence_seqrec, structure_seqrec):
alignment_fasta_file = op.join(
conf.CONFIGS['tcoffee_dir'],
'{}-{}.fasta'.format(sequence_seqrec.id, structure_seqrec.id)
)
with open(alignment_fasta_file, 'w') as ofh:
SeqIO.write([sequence_seqrec, structure_seqrec], ofh, 'fasta')
tc = call_tcoffee.TCoffee(
alignment_fasta_file, pdb_file=self.structure_file, mode='3dcoffee')
alignment_output_file = tc.align()
return alignment_output_file
def _create_pir_alignment(self):
pir_alignment_file = op.join(conf.CONFIGS['model_dir'], self.model_id + '.pir')
with open(pir_alignment_file, 'w') as ofh:
write_to_pir_alignment(
ofh, 'sequence', self.sequence_id,
'/'.join(str(seqrec.seq) for seqrec in self.sequence_seqrecords_aligned)
)
write_to_pir_alignment(
ofh, 'structure', self.structure_id,
'/'.join(str(seqrec.seq) for seqrec in self.structure_seqrecords_aligned)
)
return pir_alignment_file
def _create_alignments_and_model(self):
# Align sequence to structure.
alignment_files = []
domain_def_offsets = []
model_domain_defs = []
alignment_stats = []
self.sequence_seqrecords_aligned, self.structure_seqrecords_aligned = [], []
for sequence_seqrec, structure_seqrec in zip(
self.sequence_seqrecords, self.structure_seqrecords):
if str(sequence_seqrec.seq) != str(structure_seqrec.seq):
# Sequence and structure are different, so perform alignment
alignment_output_file = self._align_with_tcoffee(sequence_seqrec, structure_seqrec)
alignment = AlignIO.read(alignment_output_file, 'fasta')
assert len(alignment) == 2
# Check to make sure that the sequence does not have very large overhangs
# over the structure.
# TODO: Do something similar for very large gaps
# (long region of sequence without structure)
# Right now Modeller will try to model those regions as loops (which end up looking
# very unnatural.
domain_def_offset = get_alignment_overhangs(alignment)
if any(domain_def_offset):
logger.debug(
'Shortening uniprot domain sequence because the alignment had large '
'overhangs... (domain_def_offset: {})'.format(domain_def_offset)
)
cut_from_start = domain_def_offset[0] if domain_def_offset[0] else None
cut_from_end = -domain_def_offset[1] if domain_def_offset[1] else None
sequence_seqrec.seq = (
Seq(str(sequence_seqrec.seq)[cut_from_start:cut_from_end])
)
alignment_output_file = (
self._align_with_tcoffee(sequence_seqrec, structure_seqrec)
)
alignment = AlignIO.read(alignment_output_file, 'fasta')
assert len(alignment) == 2
# Analyse the quality of the alignment
alignment_identity, alignment_coverage, __, __ = analyze_alignment(alignment)
alignment_score = score_alignment(alignment_identity, alignment_coverage)
# Save results
alignment_stats.append(
(alignment_identity, alignment_coverage, alignment_score)
)
alignment_files.append(alignment_output_file)
self.sequence_seqrecords_aligned.append(alignment[0])
self.structure_seqrecords_aligned.append(alignment[1])
else:
# Sequence and structure are the same; no need for alignment. Save dummy results.
alignment_stats.append((1.0, 1.0, 1.0,))
alignment_output_file = (
op.join(
conf.CONFIGS['model_dir'],
'{}-{}.aln'.format(sequence_seqrec.id, structure_seqrec.id))
)
with open(alignment_output_file, 'w') as ofh:
SeqIO.write([sequence_seqrec, structure_seqrec], ofh, 'clustal')
alignment_files.append(alignment_output_file)
domain_def_offset = (0, 0,)
self.sequence_seqrecords_aligned.append(sequence_seqrec)
self.structure_seqrecords_aligned.append(structure_seqrec)
# either way
domain_def_offsets.append(domain_def_offset)
model_domain_def = (
(domain_def_offset[0] + 1,
domain_def_offset[0] + 1 + len(sequence_seqrec), )
)
model_domain_defs.append(model_domain_def)
# Add the HETATM chain if necesasry.
assert len(self.sequence_seqrecords_aligned) == len(self.structure_seqrecords_aligned)
# TODO: This looks wrong...
if len(self.structure_seqrecords) == len(self.structure_seqrecords_aligned) + 1:
self.sequence_seqrecords_aligned.append(self.structure_seqrecords[-1])
self.structure_seqrecords_aligned.append(self.structure_seqrecords[-1])
# Write *.pir alignment.
self.pir_alignment_file = self._create_pir_alignment()
logger.debug('Created pir alignment: {}'.format(self.pir_alignment_file))
# Run modeller.
self.modeller_results = run_modeller(
self.pir_alignment_file, self.sequence_id, self.structure_id,
new_chains=''.join(self.chain_ids)
)
# Save additional alignment info
self.modeller_results['alignment_files'] = [
op.relpath(f, conf.CONFIGS['unique_temp_dir'])
for f in alignment_files]
assert len(domain_def_offsets) <= 2
self.modeller_results['domain_def_offsets'] = domain_def_offsets
assert len(model_domain_defs) <= 2
self.modeller_results['model_domain_defs'] = model_domain_defs
self.modeller_results['alignment_stats'] = alignment_stats
def _analyse_core(self):
# Run the homology model through msms and get dataframes with all the
# per atom and per residue SASA values
analyze_structure = structure_analysis.AnalyzeStructure(
op.join(conf.CONFIGS['unique_temp_dir'], self.modeller_results['model_file']),
conf.CONFIGS['modeller_dir']
)
__, seasa_by_chain_separately, __, seasa_by_residue_separately = (
analyze_structure.get_seasa()
)
# Get SASA only for amino acids in the chain of interest
def _filter_df(df, chain_id, resname, resnum):
df2 = df[
(df['pdb_chain'] == chain_id) &
(df['res_name'] == resname) &
(df['res_num'] == resnum)
]
return df2.iloc[0]['rel_sasa']
self.relative_sasa_scores = {}
for chain_id in self.modeller_chain_ids:
self.relative_sasa_scores[chain_id] = []
chain = self.modeller_structure[0][chain_id]
for residue in chain:
if residue.resname in structure_tools.AAA_DICT:
relative_sasa_score = _filter_df(
seasa_by_residue_separately,
chain_id=chain_id,
resname=residue.resname,
resnum=(str(residue.id[1]) + residue.id[2].strip())
)
self.relative_sasa_scores[chain_id].append(relative_sasa_score)
number_of_aa = len(structure_tools.get_chain_sequence_and_numbering(chain)[0])
if (number_of_aa != len(self.relative_sasa_scores[chain_id])):
logger.error(
'Chain has {} non-hetatm AA, but we have SASA score for only {} AA.'
.format(number_of_aa, len(self.relative_sasa_scores[chain_id]))
)
raise errors.MSMSError()
def _analyse_interface(self):
# Get a dictionary of interacting residues
interacting_residues = (
structure_tools.get_interacting_residues(self.modeller_structure[0], r_cutoff=6.0)
)
_interacting_residues_complement = dict()
for key, values in interacting_residues.items():
for value in values:
_interacting_residues_complement.setdefault(value, set()).add(key)
interacting_residues.update(_interacting_residues_complement)
# Get interacting residues (and interacting resnum) for chain 1 and 2
def _get_a2b_contacts(a_idx, b_idx):
# 2 if you want to get AA indexes (starting from 0)
# 3 if you want to get AA residue numbering
a2b_contacts = set()
for key in interacting_residues:
if key[0] == a_idx:
for value in interacting_residues[key]:
if value[0] == b_idx:
a2b_contacts.add(tuple(key[2:]))
return a2b_contacts
a2b_contacts = _get_a2b_contacts(0, 1)
b2a_contacts = _get_a2b_contacts(1, 0)
if not a2b_contacts or not b2a_contacts:
logger.error('Chains are not interacting!')
logger.error("interacting_residues: {}".format(interacting_residues))
logger.error('a2b_contacts: {}'.format(a2b_contacts))
logger.error('b2a_contacts: {}'.format(b2a_contacts))
raise errors.ChainsNotInteractingError()
def _validate_a2b_contacts(a2b_contacts, chain_idx):
logger.debug('Validating chain {} interacting AA...'.format(chain_idx))
interface_aa_a = ''.join([
i[2] for i in a2b_contacts
])
try:
interface_aa_b = ''.join([
str(self.sequence_seqrecords[chain_idx].seq)[i[0]]
for i in a2b_contacts
])
except IndexError as e:
logger.error('{}: {}'.format(type(e), e))
interface_aa_b = None
logger.debug('interface_aa_a: {}'.format(interface_aa_a))
logger.debug('interface_aa_b: {}'.format(interface_aa_b))
logger.debug('a2b_contacts: {}'.format(a2b_contacts))
logger.debug(
'self.sequence_seqrecords[chain_idx].seq: {}'
.format(self.sequence_seqrecords[chain_idx].seq)
)
logger.debug(
"domain_def_offsets: {}".format(self.modeller_results['domain_def_offsets']))
if interface_aa_a != interface_aa_b:
raise errors.InterfaceMismatchError()
_validate_a2b_contacts(a2b_contacts, 0)
_validate_a2b_contacts(b2a_contacts, 1)
# Using residue indexes
# self.interacting_residues_x uses the POSITION of the residue
# (e.g. [1,2,3] means the first three residues are interacting)
self.interacting_aa_1 = sorted(i[0] + 1 for i in a2b_contacts)
self.interacting_aa_2 = sorted(i[0] + 1 for i in b2a_contacts)
# Interface area
analyze_structure = structure_analysis.AnalyzeStructure(
op.join(conf.CONFIGS['unique_temp_dir'], self.modeller_results['model_file']),
conf.CONFIGS['modeller_dir']
)
(self.interface_area_hydrophobic,
self.interface_area_hydrophilic,
self.interface_area_total) = (
analyze_structure.get_interface_area(self.modeller_chain_ids[:2])
)
def mutate(self, sequence_idx, mutation):
"""Introduce mutation into model.
Parameters
----------
sequence_idx : int
Integer describing whether the mutation is on the first domain (`0`)
or on the second domain (`1`).
Raises
------
MutationOutsideDomainError
MutationOutsideInterfaceError
"""
if (sequence_idx, mutation) in self.mutations:
return self.mutations[(sequence_idx, mutation)]
protein_id = self.sequence_seqrecords[sequence_idx].id
chain_id = self.modeller_structure.child_list[0].child_list[sequence_idx].id
mutation_errors = ''
# Domain definitions, in case not the entire sequence was modelled
domain_def_offset = self.modeller_results['domain_def_offsets'][sequence_idx]
domain_def = self.modeller_results['model_domain_defs'][sequence_idx]
logger.debug("domain_def_offset: %s", domain_def_offset)
logger.debug("domain_def: %s", domain_def)
# domain_def = (
# domain_def_offset[0],
# len(self.sequence_seqrecords[sequence_idx].seq) - domain_def_offset[1]
# )
mutation_pos = int(mutation[1:-1]) - domain_def[0] + 1
if mutation_pos > (domain_def[1] - domain_def[0] + 1):
raise errors.MutationOutsideDomainError()
position_modeller = (
structure_tools.convert_position_to_resid(
self.modeller_structure[0][chain_id],
[mutation_pos])[0]
)
mutation_modeller = (mutation[0] + str(position_modeller) + mutation[-1])
logger.debug('mutation: {}'.format(mutation))
logger.debug('position_modeller: {}'.format(position_modeller))
logger.debug('mutation_modeller: {}'.format(mutation_modeller))
if len(self.sequence_seqrecords) == 1:
partner_chain_idx = None
partner_protein_id = ''
partner_chain_id = None
else:
# TODO: slight hack getting partner_chain_idx
partner_chain_idx = [
i for i in range(len(self.sequence_seqrecords)) if i != sequence_idx
][0]
partner_protein_id = self.sequence_seqrecords[partner_chain_idx].id
partner_chain_id = (
self.modeller_structure.child_list[0].child_list[partner_chain_idx].id
)
logger.debug('sequence_idx: {}'.format(sequence_idx))
logger.debug('partner_chain_idx: {}'.format(partner_chain_idx))
if sequence_idx == 0:
logger.debug('interacting_aa_1: {}'.format(self.interacting_aa_1))
if int(mutation[1:-1]) not in self.interacting_aa_1:
raise errors.MutationOutsideInterfaceError()
elif sequence_idx == 1:
logger.debug('interacting_aa_2: {}'.format(self.interacting_aa_2))
if int(mutation[1:-1]) not in self.interacting_aa_2:
raise errors.MutationOutsideInterfaceError()
else:
logger.warning(
"Can't make sure that a mutation is inside an interface if there are only "
"two chains!"
)
mutation_id = '{}-{}-{}'.format(protein_id, partner_protein_id, mutation)
if mutation_errors:
results = dict(
protein_id=protein_id,
sequence_idx=sequence_idx,
chain_modeller=chain_id,
partner_chain_id=partner_chain_id,
mutation_id=mutation_id,
mutation_domain=mutation,
mutation_errors=mutation_errors,
)
self.mutations[(sequence_idx, mutation)] = results
return results
# ...
logger.debug('Running mutation with mutation_id: {}'.format(mutation_id))
logger.debug('chain_id: {}'.format(chain_id))
logger.debug('partner_chain_id: {}'.format(partner_chain_id))
#######################################################################
# Create a folder for all mutation data.
mutation_dir = op.join(conf.CONFIGS['model_dir'], 'mutations', mutation_id)
os.makedirs(mutation_dir, exist_ok=True)
os.makedirs(mutation_dir, exist_ok=True)
shutil.copy(op.join(conf.CONFIGS['data_dir'], 'rotabase.txt'), mutation_dir)
#######################################################################
# Copy the homology model to the mutation folder
model_file = op.join(mutation_dir, op.basename(self.modeller_results['model_file']))
shutil.copy(
op.join(conf.CONFIGS['unique_temp_dir'], self.modeller_results['model_file']),
model_file)
#######################################################################
# 2nd: use the 'Repair' feature of FoldX to optimise the structure
fX = call_foldx.FoldX(model_file, chain_id, mutation_dir)
repairedPDB_wt = fX('RepairPDB')
#######################################################################
# 3rd: introduce the mutation using FoldX
mutCodes = [mutation_modeller[0] + chain_id + mutation_modeller[1:], ]
logger.debug('Mutcodes for foldx: {}'.format(mutCodes))
# Introduce the mutation using foldX
fX_wt = call_foldx.FoldX(repairedPDB_wt, chain_id, mutation_dir)
repairedPDB_wt_list, repairedPDB_mut_list = fX_wt('BuildModel', mutCodes)
logger.debug('repairedPDB_wt_list: %s' % str(repairedPDB_wt_list))
logger.debug('repairedPDB_mut_list: %s' % str(repairedPDB_mut_list))
wt_chain_sequences = structure_tools.get_structure_sequences(repairedPDB_wt_list[0])
mut_chain_sequences = structure_tools.get_structure_sequences(repairedPDB_mut_list[0])
logger.debug('wt_chain_sequences: %s' % str(wt_chain_sequences))
logger.debug('mut_chain_sequences: %s' % str(mut_chain_sequences))
# Copy the foldX wildtype and mutant pdb files (use the first model if there are multiple)
model_file_wt = op.join(mutation_dir, mutation_id + '-wt.pdb')
model_file_mut = op.join(mutation_dir, mutation_id + '-mut.pdb')
shutil.copy(repairedPDB_wt_list[0], model_file_wt)
shutil.copy(repairedPDB_mut_list[0], model_file_mut)
#######################################################################
# 4th: set up the classes for the wildtype and the mutant structures
fX_wt_list = list()
for wPDB in repairedPDB_wt_list:
fX_wt_list.append(call_foldx.FoldX(wPDB, chain_id, mutation_dir))
fX_mut_list = list()
for mPDB in repairedPDB_mut_list:
fX_mut_list.append(call_foldx.FoldX(mPDB, chain_id, mutation_dir))
#######################################################################
# 5th: Calculate energies
assert len(fX_wt_list) == 1
stability_values_wt = ','.join(
'{}'.format(f) for f in fX_wt_list[0]('Stability')
)
assert len(fX_wt_list) == 1
stability_values_mut = ','.join(
'{}'.format(f) for f in fX_mut_list[0]('Stability')
)
if len(self.sequence_seqrecords) == 1:
complex_stability_values_wt = None
complex_stability_values_mut = None
else:
assert len(fX_wt_list) == 1
complex_stability_values_wt = ','.join(
'{}'.format(f) for f in fX_wt_list[0]('AnalyseComplex')
)
assert len(fX_mut_list) == 1
complex_stability_values_mut = ','.join(
'{}'.format(f) for f in fX_mut_list[0]('AnalyseComplex')
)
#######################################################################
# 6: Calculate all other relevant properties
# (This also verifies that mutations match mutated residues in pdb structures).
analyze_structure_wt = structure_analysis.AnalyzeStructure(
repairedPDB_wt_list[0], mutation_dir,
)
analyze_structure_results_wt = analyze_structure_wt(
chain_id, mutation_modeller, partner_chain_id)
analyze_structure_mut = structure_analysis.AnalyzeStructure(
repairedPDB_mut_list[0], mutation_dir,
)
analyze_structure_results_mut = analyze_structure_mut(
chain_id, mutation_modeller, partner_chain_id)
logger.debug('analyze_structure_results_wt: {}'.format(analyze_structure_results_wt))
logger.debug('analyze_structure_results_mut: {}'.format(analyze_structure_results_mut))
#######################################################################
# 5th: calculate the energy for the wildtype
results = dict(
protein_id=protein_id,
sequence_idx=sequence_idx,
chain_modeller=chain_id,
partner_chain_id=partner_chain_id,
mutation_id=mutation_id,
mutation_domain=mutation,
mutation_errors=mutation_errors,
#
mutation_dir=mutation_dir,
mutation_modeller=mutation_modeller,
mutation_foldx=','.join(mutCodes),
model_file_wt=model_file_wt,
model_file_mut=model_file_mut,
stability_energy_wt=stability_values_wt,
stability_energy_mut=stability_values_mut,
analyse_complex_energy_wt=complex_stability_values_wt,
analyse_complex_energy_mut=complex_stability_values_mut,
)
for key, value in analyze_structure_results_wt.items():
results[key + '_wt'] = value
for key, value in analyze_structure_results_mut.items():
results[key + '_mut'] = value
# Another exit point
self.mutations[(sequence_idx, mutation)] = results
return results
@property
def result(self):
result = dict(
model_id=self.model_id,
structure_file=op.relpath(self.structure_file, conf.CONFIGS['unique_temp_dir']),
structure_id=self.structure_id,
sequence_file=op.relpath(self.sequence_file, conf.CONFIGS['unique_temp_dir']),
sequence_id=self.sequence_id,
chain_ids=tuple(self.chain_ids),
mutations=self.mutations,
modeller_results_file=op.relpath(
self.modeller_results_file, conf.CONFIGS['unique_temp_dir']),
modeller_chain_ids=tuple(self.modeller_chain_ids),
relative_sasa_scores=self.relative_sasa_scores,
core_or_interface=self.core_or_interface,
)
# Dump modeller resutls
for key, value in self.modeller_results.items():
result[key] = value
# For interfaces
if len(self.sequence_seqrecords) > 1:
result_interface = dict(
interacting_aa_1=self.interacting_aa_1,
interacting_aa_2=self.interacting_aa_2,
interface_area_hydrophobic=self.interface_area_hydrophobic,
interface_area_hydrophilic=self.interface_area_hydrophilic,
interface_area_total=self.interface_area_total,
)
result.update(result_interface)
return result
def perform_alignment(self, uniprot_seqrecord, pdb_seqrecord, mode, path_to_data):
"""
"""
# Perform the alignment
t_coffee_parameters = [
uniprot_seqrecord,
pdb_seqrecord,
mode,
logger,
]
logger.debug(
"Calling t_coffee with parameters:\n" +
', '.join(['{}'.format(x) for x in t_coffee_parameters]))
tcoffee = call_tcoffee.tcoffee_alignment(*t_coffee_parameters)
alignments = tcoffee.align()
assert len(alignments) == 1
alignment = alignments[0]
# Save the alignment
logger.debug(alignment)
alignment_filename = alignment[0].id + '_' + alignment[1].id + '.aln'
try:
AlignIO.write(
alignment, self.unique_temp_folder + 'tcoffee/' + alignment_filename, 'clustal')
except IndexError as e:
raise errors.EmptyPDBSequenceError('{}: {}'.format(type(e), e))
temp_save_path = self.temp_archive_path + path_to_data
subprocess.check_call("mkdir -p '{}'".format(temp_save_path), shell=True)
subprocess.check_call("cp -f '{}' '{}'".format(
self.unique_temp_folder + 'tcoffee/' + alignment_filename,
temp_save_path + alignment_filename), shell=True)
return alignment, alignment_filename
def analyze_alignment(alignment, pdb_contact_idxs=[]):
"""Return scores describing the qualit of the alignment.
Returns
-------
identity : float <= 1
Core identity.
coverage : float <= 1
Core coverage.
if_identity : float <= 1
Interface identity.
if_coverage : float <= 1
Interface coverage.
"""
pdb_aa_idx = -1
sequence_1_length = 0
sequence_1_identity = 0
sequence_1_coverage = 0
interface_1_identity = 0
interface_1_coverage = 0
for aa_1, aa_2 in zip(*alignment):
is_interface = False
# Check if the amino acid falls in a gap
if aa_1 == '-':
continue
sequence_1_length += 1
# Check if the template is in a gap
if aa_2 == '-':
continue
pdb_aa_idx += 1
if pdb_aa_idx in pdb_contact_idxs:
is_interface = True # This is an interface amino acid
sequence_1_coverage += 1 # Count as coverage
if is_interface:
interface_1_coverage += 1 # Count as coverage
# Check if the template is identical
if aa_1 != aa_2:
continue
sequence_1_identity += 1 # Count as identity
if is_interface:
interface_1_identity += 1 # Count as identity
identity = sequence_1_identity / float(sequence_1_length)
coverage = sequence_1_coverage / float(sequence_1_length)
assert identity <= 1
assert coverage <= 1
if pdb_contact_idxs:
if_identity = sequence_1_identity / float(len(pdb_contact_idxs))
if_coverage = interface_1_coverage / float(len(pdb_contact_idxs))
assert if_identity <= 1
assert if_coverage <= 1
else:
if_identity = None
if_coverage = None
return identity, coverage, if_identity, if_coverage
def score_alignment(identity, coverage, alpha=0.95):
"""T-score from the interactome3d paper."""
return alpha * (identity) * (coverage) + (1.0 - alpha) * (coverage)
def get_alignment_overhangs(alignment):
"""Remove gap overhangs from the alignments.
There are cases where no template sequence is availible for a big chunk
of the protein. Return the number of amino acids that should be removed
from the start and end of the query sequence in order to match the template.
"""
n_gaps_start = 0
n_gaps_end = 0
for aa_query, aa_template in zip(*alignment):
if aa_query != '-' and aa_template == '-':
n_gaps_start += 1
else:
break
for aa_query, aa_template in reversed(list(zip(*alignment))):
if aa_query != '-' and aa_template == '-':
n_gaps_end += 1
else:
break
return n_gaps_start, n_gaps_end
def write_to_pir_alignment(pir_alignment_filehandle, seq_type, seq_name, seq):
"""Write the `*.pir` alignment compatible with modeller.
Parameters
----------
seq_type : str
One of: ['sequence', 'structure'], in that order.
seq_name : str
Name to appear in the alignment.
seq : str
Alignment sequence.
"""
pir_alignment_filehandle.write('>P1;' + seq_name + '\n')
pir_alignment_filehandle.write(seq_type + ':' + seq_name + ':.:.:.:.::::\n')
pir_alignment_filehandle.write(seq + '*')
pir_alignment_filehandle.write('\n\n')
def run_modeller(
pir_alignment_file, target_id, template_id, new_chains='ABCDEFGHIJKLMNOPQRSTUVWXYZ'):
"""
"""
logger.debug(
"Calling modeller with parameters:\n" +
'pir_alignment_file: {}\n'.format([pir_alignment_file]) +
'target_id: {}\n'.format(target_id) +
'template_id: {}\n'.format(template_id)
)
modeller = call_modeller.Modeller(
[pir_alignment_file], target_id, template_id, conf.CONFIGS['unique_temp_dir'])
with switch_paths(conf.CONFIGS['modeller_dir']):
norm_dope, pdb_filename = modeller.run()
raw_model_file = op.join(conf.CONFIGS['modeller_dir'], pdb_filename)
# If there is only one chain in the pdb, label that chain 'A'
io = PDBIO()
structure = structure_tools.get_pdb_structure(raw_model_file)
chains = structure[0].child_list
logger.debug('Modeller chain ids: ' + ', '.join(chain.id for chain in chains))
for i in range(len(chains)):
chains[i].id = new_chains[i]
logger.debug('Corrected chain ids: ' + ', '.join(chain.id for chain in chains))
io.set_structure(structure)
model_file = op.splitext(pir_alignment_file)[0] + '.pdb'
io.save(model_file)
results = {
'model_file': op.relpath(model_file, conf.CONFIGS['unique_temp_dir']),
'raw_model_file': op.relpath(raw_model_file, conf.CONFIGS['unique_temp_dir']),
'norm_dope': norm_dope,
'pir_alignment_file': op.relpath(pir_alignment_file, conf.CONFIGS['unique_temp_dir']),
}
return results
|
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest import exceptions
from tempest.test import attr
class ServerRescueTestJSON(base.BaseV2ComputeTest):
_interface = 'json'
@classmethod
def setUpClass(cls):
cls.set_network_resources(network=True, subnet=True, router=True)
super(ServerRescueTestJSON, cls).setUpClass()
cls.device = 'vdf'
# Floating IP creation
resp, body = cls.floating_ips_client.create_floating_ip()
cls.floating_ip_id = str(body['id']).strip()
cls.floating_ip = str(body['ip']).strip()
# Security group creation
cls.sg_name = data_utils.rand_name('sg')
cls.sg_desc = data_utils.rand_name('sg-desc')
resp, cls.sg = \
cls.security_groups_client.create_security_group(cls.sg_name,
cls.sg_desc)
cls.sg_id = cls.sg['id']
# Create a volume and wait for it to become ready for attach
resp, cls.volume = cls.volumes_extensions_client.create_volume(
1, display_name=data_utils.rand_name(cls.__name__ + '_volume'))
cls.volumes_extensions_client.wait_for_volume_status(
cls.volume['id'], 'available')
# Server for positive tests
resp, server = cls.create_test_server(wait_until='BUILD')
resp, resc_server = cls.create_test_server(wait_until='ACTIVE')
cls.server_id = server['id']
cls.password = server['adminPass']
cls.servers_client.wait_for_server_status(cls.server_id, 'ACTIVE')
# Server for negative tests
cls.rescue_id = resc_server['id']
cls.rescue_password = resc_server['adminPass']
cls.servers_client.rescue_server(
cls.rescue_id, adminPass=cls.rescue_password)
cls.servers_client.wait_for_server_status(cls.rescue_id, 'RESCUE')
def setUp(self):
super(ServerRescueTestJSON, self).setUp()
@classmethod
def tearDownClass(cls):
# Deleting the floating IP which is created in this method
cls.floating_ips_client.delete_floating_ip(cls.floating_ip_id)
cls.delete_volume(cls.volume['id'])
resp, cls.sg = cls.security_groups_client.delete_security_group(
cls.sg_id)
super(ServerRescueTestJSON, cls).tearDownClass()
def tearDown(self):
super(ServerRescueTestJSON, self).tearDown()
def _detach(self, server_id, volume_id):
self.servers_client.detach_volume(server_id, volume_id)
self.volumes_extensions_client.wait_for_volume_status(volume_id,
'available')
def _unrescue(self, server_id):
resp, body = self.servers_client.unrescue_server(server_id)
self.assertEqual(202, resp.status)
self.servers_client.wait_for_server_status(server_id, 'ACTIVE')
def _unpause(self, server_id):
resp, body = self.servers_client.unpause_server(server_id)
self.assertEqual(202, resp.status)
self.servers_client.wait_for_server_status(server_id, 'ACTIVE')
@attr(type='smoke')
def test_rescue_unrescue_instance(self):
resp, body = self.servers_client.rescue_server(
self.server_id, adminPass=self.password)
self.assertEqual(200, resp.status)
self.servers_client.wait_for_server_status(self.server_id, 'RESCUE')
resp, body = self.servers_client.unrescue_server(self.server_id)
self.assertEqual(202, resp.status)
self.servers_client.wait_for_server_status(self.server_id, 'ACTIVE')
@attr(type=['negative', 'gate'])
def test_rescue_paused_instance(self):
# Rescue a paused server
resp, body = self.servers_client.pause_server(
self.server_id)
self.addCleanup(self._unpause, self.server_id)
self.assertEqual(202, resp.status)
self.servers_client.wait_for_server_status(self.server_id, 'PAUSED')
self.assertRaises(exceptions.Conflict,
self.servers_client.rescue_server,
self.server_id)
@attr(type=['negative', 'gate'])
def test_rescued_vm_reboot(self):
self.assertRaises(exceptions.Conflict, self.servers_client.reboot,
self.rescue_id, 'HARD')
@attr(type=['negative', 'gate'])
def test_rescue_non_existent_server(self):
# Rescue a non-existing server
self.assertRaises(exceptions.NotFound,
self.servers_client.rescue_server,
'999erra43')
@attr(type=['negative', 'gate'])
def test_rescued_vm_rebuild(self):
self.assertRaises(exceptions.Conflict,
self.servers_client.rebuild,
self.rescue_id,
self.image_ref_alt)
@attr(type=['negative', 'gate'])
def test_rescued_vm_attach_volume(self):
# Rescue the server
self.servers_client.rescue_server(self.server_id,
adminPass=self.password)
self.servers_client.wait_for_server_status(self.server_id, 'RESCUE')
self.addCleanup(self._unrescue, self.server_id)
# Attach the volume to the server
self.assertRaises(exceptions.Conflict,
self.servers_client.attach_volume,
self.server_id,
self.volume['id'],
device='/dev/%s' % self.device)
@attr(type=['negative', 'gate'])
def test_rescued_vm_detach_volume(self):
# Attach the volume to the server
self.servers_client.attach_volume(self.server_id,
self.volume['id'],
device='/dev/%s' % self.device)
self.volumes_extensions_client.wait_for_volume_status(
self.volume['id'], 'in-use')
# Rescue the server
self.servers_client.rescue_server(self.server_id,
adminPass=self.password)
self.servers_client.wait_for_server_status(self.server_id, 'RESCUE')
# addCleanup is a LIFO queue
self.addCleanup(self._detach, self.server_id, self.volume['id'])
self.addCleanup(self._unrescue, self.server_id)
# Detach the volume from the server expecting failure
self.assertRaises(exceptions.Conflict,
self.servers_client.detach_volume,
self.server_id,
self.volume['id'])
@attr(type='gate')
def test_rescued_vm_associate_dissociate_floating_ip(self):
# Rescue the server
self.servers_client.rescue_server(
self.server_id, adminPass=self.password)
self.servers_client.wait_for_server_status(self.server_id, 'RESCUE')
self.addCleanup(self._unrescue, self.server_id)
# Association of floating IP to a rescued vm
client = self.floating_ips_client
resp, body = client.associate_floating_ip_to_server(self.floating_ip,
self.server_id)
self.assertEqual(202, resp.status)
# Disassociation of floating IP that was associated in this method
resp, body = \
client.disassociate_floating_ip_from_server(self.floating_ip,
self.server_id)
self.assertEqual(202, resp.status)
@attr(type='gate')
def test_rescued_vm_add_remove_security_group(self):
# Rescue the server
self.servers_client.rescue_server(
self.server_id, adminPass=self.password)
self.servers_client.wait_for_server_status(self.server_id, 'RESCUE')
self.addCleanup(self._unrescue, self.server_id)
# Add Security group
resp, body = self.servers_client.add_security_group(self.server_id,
self.sg_name)
self.assertEqual(202, resp.status)
# Delete Security group
resp, body = self.servers_client.remove_security_group(self.server_id,
self.sg_name)
self.assertEqual(202, resp.status)
class ServerRescueTestXML(ServerRescueTestJSON):
_interface = 'xml'
|
|
# Copyright 2017,2018,2019,2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from six.moves import range
import copy
import pytest
import numpy as np
import nnabla as nn
import nnabla.functions as F
import nnabla.parametric_functions as PF
from nnabla.testing import assert_allclose, clear_called_flag_recorder
from nbla_test_utils import list_context
@pytest.mark.parametrize("seed", [313])
def test_graph_logreg(seed):
rng = np.random.RandomState(seed)
x = nn.Variable([2, 3, 4], need_grad=True)
w = nn.Variable([12, 5], need_grad=True)
b = nn.Variable([5], need_grad=True)
t = nn.Variable([2, 1])
x.d = rng.randn(*x.shape)
w.d = rng.randn(*w.shape)
b.d = rng.randn(*b.shape)
t.d = rng.randint(0, 5, size=t.shape)
nn.set_default_context(nn.Context())
# Forwardprop by definition
with nn.auto_forward():
z = F.affine(x, w, b, 1)
l = F.softmax_cross_entropy(z, t, 1)
L = F.mean(l)
# Backprop
# Diff should be initialized since they are always accumulated
x.g = 0
w.g = 0
b.g = 0
L.backward(clear_buffer=True)
x.g = rng.randn(*x.shape)
inputs = [x, w, b]
from nbla_test_utils import \
compute_analytical_and_numerical_grad_graph as grads
agrad, ngrad = grads(L, inputs, 1e-3)
assert_allclose(ngrad, agrad, atol=1e-2)
@pytest.mark.parametrize("seed", [311])
@pytest.mark.parametrize("model", ["mlp", "recurrent", "convolution"])
def test_graph_model(model, seed):
np.random.seed(313)
rng = np.random.RandomState(seed)
x = nn.Variable([2, 3, 4, 4], need_grad=True)
t = nn.Variable([2, 1])
x.d = rng.randn(*x.shape)
t.d = rng.randint(0, 5, size=t.shape)
nn.set_default_context(nn.Context())
# Forwardprop by definition
nn.clear_parameters()
if model == "mlp":
with nn.parameter_scope('fc1'):
z = PF.affine(x, 3)
z2 = F.relu(z, inplace=True)
with nn.parameter_scope('fc2'):
z3 = PF.affine(z2, 5)
elif model == "recurrent":
with nn.parameter_scope('fc1'):
z = PF.affine(x, 8)
z2 = F.relu(z, inplace=True)
h = z2
for _ in range(2):
with nn.parameter_scope('fc2'):
h = PF.affine(h, 8)
h = F.relu(h, inplace=True)
with nn.parameter_scope('fc3'):
z3 = PF.affine(h, 5)
elif model == "convolution":
with nn.parameter_scope('conv1'):
z = PF.convolution(x, 3, (2, 2))
z2 = F.relu(z, inplace=True)
with nn.parameter_scope('fc2'):
z3 = PF.affine(z2, 5)
else:
raise ValueError()
l = F.softmax_cross_entropy(z3, t, 1)
L = F.mean(l)
# Forwardprop
L.forward(clear_no_need_grad=True)
# Backprop
# Diff should be initialized since they are always accumulated
x.grad.zero()
L.backward(clear_buffer=True)
x.g = rng.randn(*x.shape)
parameters = nn.get_parameters()
for param in parameters.values():
param.grad.zero()
inputs = [x] + list(parameters.values())
from nbla_test_utils import \
compute_analytical_and_numerical_grad_graph as grads
agrad, ngrad = grads(L, inputs, 1e-3)
assert_allclose(ngrad, agrad, atol=1.05e-2)
@pytest.mark.parametrize("seed", [311])
def test_graph_unlink_backward(seed):
rng = np.random.RandomState(seed)
x0 = nn.Variable([2, 4], need_grad=True)
x1 = nn.Variable([2, 4], need_grad=True)
x0.d = rng.randn(*x0.shape)
x1.d = rng.randn(*x1.shape)
x0.grad.zero()
x1.grad.zero()
with nn.auto_forward():
with nn.parameter_scope("fc0"):
h0 = PF.affine(x0, 2)
with nn.parameter_scope("fc1"):
h1 = PF.affine(x1, 2)
h0.need_grad = False
h = h0 + h1
with nn.parameter_scope("fc"):
y = PF.affine(h, 1)
y.backward(clear_buffer=True)
assert np.all(x0.g == 0)
assert not np.all(x1.g == 0)
@pytest.mark.parametrize("seed", [311])
def test_graph_clear_buffer(seed):
np.random.seed(313)
rng = np.random.RandomState(seed)
x = nn.Variable([2, 3, 4, 4])
t = nn.Variable([2, 1])
x.d = rng.randn(*x.shape)
t.d = rng.randint(0, 5, size=t.shape)
# Network definition
nn.set_default_context(nn.Context())
nn.clear_parameters()
x1 = x + 1
x2 = x1 - 1
with nn.parameter_scope('conv1'):
z = PF.convolution(x2, 3, (2, 2))
z2 = F.relu(z, inplace=True)
with nn.parameter_scope('fc2'):
z3 = PF.affine(z2, 5)
l = F.softmax_cross_entropy(z3, t, 1)
L = F.mean(l)
# Forwardprop
import tempfile
import os
tmpd = tempfile.mkdtemp()
nn.save_parameters(os.path.join(tmpd, 'parameter.h5'))
first = False
for cnng in [False, True]:
for cb in [False, True]:
_ = nn.load_parameters(os.path.join(tmpd, 'parameter.h5'))
for v in nn.get_parameters().values():
v.grad.zero()
L.forward(clear_no_need_grad=cnng)
L.backward(clear_buffer=cb)
if not first:
first = True
g = list(nn.get_parameters().values())[0].g.copy()
else:
g2 = list(nn.get_parameters().values())[0].g.copy()
import platform
if platform.machine() == 'ppc64le':
pytest.skip("This test fails on ppc64le")
assert np.all(g == g2)
@pytest.mark.parametrize("seed", [311])
@pytest.mark.parametrize("clear_buffer", [True, False])
def test_graph_rewire(seed, clear_buffer):
nn.clear_parameters()
# A. defining graph definition utility
def mlp2(x, scope):
with nn.parameter_scope(scope):
h = F.tanh(PF.affine(x, 10, name='a1'))
h = F.tanh(PF.affine(h, 10, name='a1'))
return h
# A. Create a graph A.
xa = nn.Variable((2, 10), need_grad=True)
ya = mlp2(xa, 'a')
# B. Create a graph B.
xb = nn.Variable((2, 10), need_grad=True)
yb = mlp2(xb, 'b')
# C. Create directly connected graph.
xc = nn.Variable((2, 10))
yc = mlp2(mlp2(xc, 'a'), 'b')
# D. Rewire the graphs A and B.
xb.rewire_on(ya)
# E. Check whether the results are the same.
rng = np.random.RandomState(seed)
data = rng.randn(*xa.shape)
xa.d = data
xc.d = data
params = nn.get_parameters()
def zero_grad():
for p in params.values():
p.grad.zero()
def backup_params():
return [p.g.copy() for p in params.values()]
# Checking forward
yb.forward(clear_no_need_grad=clear_buffer)
yc.forward(clear_no_need_grad=clear_buffer)
assert_allclose(yb.d, yc.d)
# Checking backward
zero_grad()
yb.backward(clear_buffer=clear_buffer)
gb = backup_params()
zero_grad()
yc.backward(clear_buffer=clear_buffer)
gc = backup_params()
assert_allclose(xa.d, xc.d)
for b, c in zip(gb, gc):
assert_allclose(b, c)
def test_deleted_outputs():
rng = np.random.RandomState(313)
x = nn.Variable((2, 3, 4, 5))
h, m, v = PF.batch_normalization(x, output_stat=True)
del m
x.d = rng.randn(*x.shape).astype(np.float32)
h.forward()
h.backward()
def test_function_hook():
'''
Testing function hooks in forward and backward
'''
x = nn.Variable.from_numpy_array(
np.zeros((2, 3), dtype=np.float32)).apply(need_grad=True)
x.grad.zero()
h = x + 2
h.data.zero()
h.grad.zero()
y = h * 0.5
y.data.zero()
def forward_pre_hook(f):
assert_allclose(f.outputs[0].d, 0)
def forward_post_hook(f):
if f.info.type_name == 'AddScalar':
assert_allclose(f.outputs[0].d, 2)
if f.info.type_name == 'MulScalar':
assert_allclose(f.outputs[0].d, 1)
def backward_pre_hook(f):
assert_allclose(f.inputs[0].g, 0)
def backward_post_hook(f):
# Both h and x grad will be 0.5
assert_allclose(f.inputs[0].g, 0.5)
y.forward(function_pre_hook=forward_pre_hook,
function_post_hook=forward_post_hook)
y.backward(function_pre_hook=backward_pre_hook,
function_post_hook=backward_post_hook)
x.grad.zero()
z = x * 0.1
# Just calling test
nn.forward_all((y, z), function_pre_hook=lambda f: None,
function_post_hook=lambda f: None)
@pytest.mark.parametrize("seed", [313])
def test_shared_variable_on_same_function(seed):
rng = np.random.RandomState(313)
xd = rng.randn(2, 3)
x = nn.Variable.from_numpy_array(xd).apply(need_grad=True)
x.grad.zero()
y = x * x * x
y.forward()
y.backward()
assert_allclose(x.g, 3 * xd ** 2)
@pytest.mark.parametrize("seed", [313])
def test_function_context(seed):
rng = np.random.RandomState(313)
xd = rng.randn(2, 3)
x = nn.Variable.from_numpy_array(xd)
ctx1 = nn.Context(backend=['cpu:float'],
array_class='CpuCachedArray', device_id='1')
with nn.context_scope(ctx1):
y = F.relu(x)
ctx0 = nn.Context(backend=['cpu:float'],
array_class='CpuCachedArray', device_id='0')
# TODO: use id or hash if we determine the spec
assert str(ctx0) != str(ctx1)
assert str(ctx1) == str(y.parent.context)
with nn.context_scope(y.parent.context):
z = F.relu(x)
assert str(y.parent.context) == str(z.parent.context)
def test_no_need_grad_backward():
'''
This tests a previously existing bug where an
intermediate variable with need_grad=False yet required
to compute a gradient in a function has been unexpectedly cleared.
'''
nn.prefer_cached_array(False)
x = nn.Variable(tuple(), need_grad=False)
y = nn.Variable(tuple(), need_grad=True)
z = nn.Variable(tuple(), need_grad=False)
xx = x * 1
yy = y * 1
zz = z * 1
a = xx * 3
b = xx * yy
c = xx * zz
d = a * b * c
x.data.fill(1)
y.data.fill(2)
z.data.fill(0.5)
hook = None # lambda f: print(f, list(map(lambda x: x.d, f.inputs)))
d.forward(clear_no_need_grad=True, function_pre_hook=hook)
y.grad.zero()
d.backward(clear_buffer=True, function_pre_hook=hook)
assert np.isclose(y.g, 1.5)
@pytest.mark.parametrize("clear_buffer", [False, True])
def test_no_need_grad_forward(clear_buffer):
'''
This tests a previously existing bug where an intermediate variable
has been unexpectedly cleared before the end of life if
it is used in an in-place function and
another function at the same time.
'''
import nnabla as nn
import nnabla.functions as F
nn.prefer_cached_array(False)
x = nn.Variable(tuple(), need_grad=False)
xx = x * 1
a = xx.reshape(x.shape)
b = xx * 1
d = a * b
x.data.fill(1)
d.forward(clear_no_need_grad=True, clear_buffer=clear_buffer)
assert np.isclose(d.d, 1.0)
def test_no_need_grad_forward_double():
'''
This tests a previously existing bug where a variable used
twice by a single function caused an unexpected clear due to
incorrect count of function references.
'''
import nnabla as nn
import nnabla.functions as F
nn.prefer_cached_array(False)
x = nn.Variable(tuple())
xx = x * 1
y = xx * xx
z = xx * 1
a = y * z
x.data.fill(1)
a.forward(clear_no_need_grad=True)
assert np.isclose(a.d, 1.0)
class TestClearInput():
def check_input_data_clear_called_flags(self, answer):
result = clear_called_flag_recorder.get_input_clear_called_flags()
assert len(result) == len(answer)
for i, flags in enumerate(answer):
assert len(result[i]) == len(flags)
for j, flag in enumerate(flags):
assert flag == result[i][j][0]
def setup_method(self):
clear_called_flag_recorder.activate_clear_called_flag_recorder()
def teardown_method(self):
clear_called_flag_recorder.deactivate_clear_called_flag_recorder()
# Test for clearing input in a network of two layers.
def test_clear_input_if_no_need_grad0(self):
x1 = nn.Variable([1, 5], need_grad=True)
xx1 = F.identity(x1)
y1 = F.add_scalar(xx1)
answer = []
answer.append([False])
answer.append([True])
y1.forward(clear_no_need_grad=True)
self.check_input_data_clear_called_flags(answer)
# Test for clearing input in a network of three layers.
def test_clear_input_if_no_need_grad1(self):
x1 = nn.Variable([1, 5], need_grad=True)
xx1 = F.identity(x1)
y1 = F.add_scalar(xx1)
y2 = F.add_scalar(y1)
answer = []
answer.append([False])
answer.append([True])
answer.append([True])
y2.forward(clear_no_need_grad=True)
self.check_input_data_clear_called_flags(answer)
# Test the case where an input is not cleared when it is required for backward at the previous layer function.
def test_clear_input_if_no_need_grad2(self):
x1 = nn.Variable([1, 5], need_grad=True)
xx1 = F.identity(x1) # (1)
y1 = F.tanh(xx1) # (2)
y2 = F.add_scalar(y1) # (3)
answer = []
answer.append([False])
answer.append([True])
answer.append([False])
# y1 must not be clear after (3) because y1 is required for backward of (2).
y2.forward(clear_no_need_grad=True)
self.check_input_data_clear_called_flags(answer)
# Test for a variable shared with two layer functions.
# Check if it is cleared after the both functions finish to use it.
def test_clear_input_if_no_need_grad_branch0(self):
x1 = nn.Variable([1, 5], need_grad=True)
x2 = nn.Variable([1, 5], need_grad=True)
xx1 = F.identity(x1)
y1 = F.add_scalar(xx1) # (1)
y2 = F.add_scalar(xx1) # (2)
y3 = F.add2(y1, y2) # (3)
answer = []
answer.append([False])
answer.append([False]) # (1) does not clear xx1
answer.append([True]) # (2) clears xx1
answer.append([True, True])
y3.forward(clear_no_need_grad=True)
self.check_input_data_clear_called_flags(answer)
# Test for a variable shared with mul2 and add2.
# add2 does not require it as input for backward, but mul2 does.
def test_clear_input_if_no_need_grad_branch1(self):
x1 = nn.Variable([1, 5], need_grad=True)
x2 = nn.Variable([1, 5], need_grad=True)
x3 = nn.Variable([1, 5], need_grad=True)
xx1 = F.identity(x1)
xx2 = F.identity(x2)
y1 = F.mul2(xx1, xx2) # (1)
xx3 = F.identity(x3)
y2 = F.add2(xx2, xx3) # (2)
y3 = F.add2(y1, y2) # (3)
answer = []
answer.append([False])
answer.append([False])
answer.append([False, False]) # (1)
answer.append([False])
answer.append([False, True]) # (2) use xx2 in backward
answer.append([True, True]) # (3)
y3.forward(clear_no_need_grad=True)
self.check_input_data_clear_called_flags(answer)
# Test for only clearing bias in convolution.
def test_clear_input_if_no_need_grad_convolution(self):
x1 = nn.Variable([1, 1, 2], need_grad=True)
x2 = nn.Variable([1, 1, 2], need_grad=True)
x3 = nn.Variable([1], need_grad=True)
inp = F.identity(x1)
weight = F.identity(x2)
bias = F.identity(x3)
y = F.convolution(inp, weight, bias) # (1)
answer = []
answer.append([False])
answer.append([False])
answer.append([False])
answer.append([False, False, True]) # (1) clears bias
y.forward(clear_no_need_grad=True)
self.check_input_data_clear_called_flags(answer)
# Test for only clearing beta in batch_normalization.
@pytest.mark.parametrize("batch_stat", [False, True])
def test_clear_input_if_no_need_grad_batch_normalization(self, batch_stat):
x1 = nn.Variable([1, 1, 2], need_grad=True)
x2 = nn.Variable([1, 1, 1], need_grad=True)
x3 = nn.Variable([1, 1, 1], need_grad=True)
x4 = nn.Variable([1, 1, 1], need_grad=True)
x5 = nn.Variable([1, 1, 1], need_grad=True)
x = F.identity(x1)
beta = F.identity(x2)
gamma = F.identity(x3)
if batch_stat:
y = F.batch_normalization(
x, beta, gamma, x4, x5, batch_stat=batch_stat)
else:
mean = F.identity(x4)
var = F.identity(x5)
y = F.batch_normalization(
x, beta, gamma, mean, var, batch_stat=batch_stat)
answer = []
answer.append([False])
answer.append([False])
answer.append([False])
if not batch_stat:
answer.append([False])
answer.append([False])
answer.append([False, True, False, False, False])
y.forward(clear_no_need_grad=True)
self.check_input_data_clear_called_flags(answer)
class TestClearOutputGrad():
def check_grad_cleared_flags(self, answer):
result = clear_called_flag_recorder.get_output_clear_called_flags()
assert len(result) == len(answer)
for i, flags in enumerate(answer):
assert len(result[i]) == len(flags)
for j, flag in enumerate(flags):
assert flag == result[i][j][1]
def setup_method(self):
clear_called_flag_recorder.activate_clear_called_flag_recorder()
def teardown_method(self):
clear_called_flag_recorder.deactivate_clear_called_flag_recorder()
# Test for the type of grad given to backward.
@pytest.mark.parametrize("grad", [1, None, np.ndarray([1]), nn.NdArray([1])])
def test_clear_output_grad_argument(self, grad):
x1 = nn.Variable([1], need_grad=True)
xx1 = F.identity(x1)
y1 = F.add_scalar(xx1)
answer_grad = []
if grad is None or isinstance(grad, nn.NdArray):
answer_grad.append([False]) # y1
else:
answer_grad.append([True]) # y1
answer_grad.append([True]) # xx1
y1.forward(clear_no_need_grad=True)
clear_called_flag_recorder.deactivate_clear_called_flag_recorder()
clear_called_flag_recorder.activate_clear_called_flag_recorder()
y1.backward(clear_buffer=True, grad=grad)
self.check_grad_cleared_flags(answer_grad)
assert y1.grad.clear_called == False
# Test for an inplaced variable.
def test_clear_output_grad_inplace(self):
x1 = nn.Variable([1], need_grad=True)
xx1 = F.identity(x1)
y1 = F.add_scalar(xx1, inplace=True)
y2 = F.add_scalar(y1)
answer_grad = []
answer_grad.append([True])
answer_grad.append([True])
answer_grad.append([True])
y2.forward(clear_no_need_grad=True)
clear_called_flag_recorder.deactivate_clear_called_flag_recorder()
clear_called_flag_recorder.activate_clear_called_flag_recorder()
y2.backward(clear_buffer=True)
self.check_grad_cleared_flags(answer_grad)
# Test for a variable shared with two layer functions.
def test_clear_output_grad_shared_variable(self):
x1 = nn.Variable([1], need_grad=True)
xx1 = F.identity(x1)
y1 = F.add_scalar(xx1)
y2 = F.add_scalar(xx1)
y3 = F.add2(y1, y2)
answer_grad = []
answer_grad.append([True])
answer_grad.append([True])
answer_grad.append([True])
answer_grad.append([True])
y3.forward(clear_no_need_grad=True)
clear_called_flag_recorder.deactivate_clear_called_flag_recorder()
clear_called_flag_recorder.activate_clear_called_flag_recorder()
y3.backward(clear_buffer=True)
self.check_grad_cleared_flags(answer_grad)
# Test for a persistent variable.
def test_clear_output_grad_persistent(self):
x1 = nn.Variable([1], need_grad=True)
xx1 = F.identity(x1)
y1 = F.add_scalar(xx1)
y2 = F.add_scalar(y1)
xx1.persistent = True
y2.persistent = True
answer_grad = []
answer_grad.append([False]) # y2
answer_grad.append([True]) # y1
answer_grad.append([False]) # xx1
y2.forward(clear_no_need_grad=True)
clear_called_flag_recorder.deactivate_clear_called_flag_recorder()
clear_called_flag_recorder.activate_clear_called_flag_recorder()
y2.backward(clear_buffer=True)
self.check_grad_cleared_flags(answer_grad)
# Test for the input variables of sink.
# In the case where Function::prohibit_clear_input_buffers returns true,
# these inputs must not be cleared from any function.
def test_clear_output_grad_prohibit_clear_input(self):
x1 = nn.Variable([1], need_grad=True)
xx1 = F.identity(x1)
y1 = F.add_scalar(xx1)
y2 = F.add_scalar(xx1)
y3 = F.sink(y1, y2)
answer_grad = []
answer_grad.append([True]) # y3
answer_grad.append([False]) # y2
answer_grad.append([False]) # y1
answer_grad.append([True]) # xx1
y3.forward(clear_no_need_grad=True)
clear_called_flag_recorder.deactivate_clear_called_flag_recorder()
clear_called_flag_recorder.activate_clear_called_flag_recorder()
y3.backward(clear_buffer=True)
self.check_grad_cleared_flags(answer_grad)
class TestRecomputation():
def teardown_method(self):
clear_called_flag_recorder.deactivate_clear_called_flag_recorder()
def check_input_data_clear_called_flags(self, answer):
result = clear_called_flag_recorder.get_input_clear_called_flags()
assert len(result) == len(answer)
for i, flags in enumerate(answer):
assert len(result[i]) == len(flags)
for j, flag in enumerate(flags):
assert flag == result[i][j][0]
def check_recomputation(self, seed, graph, inputs):
def forward_backward_and_get_grads(y):
# Initialize grads
for input in inputs:
if input.need_grad:
input.grad.zero()
y.forward(clear_no_need_grad=True)
y.backward(clear_buffer=True)
# Get grads
grads = []
for input in inputs:
if input.need_grad:
grads.append(copy.deepcopy(input.g))
return grads
# Set random input data.
rng = np.random.RandomState(seed)
for input in inputs:
input.d = rng.randn(*input.shape)
# Calculate reference grads.
y_ref = graph(*inputs)
# Disable recompute flags for generating reference grads.
def disable_recompute_flag(f):
for input in f.inputs:
input.apply(recompute=False)
y_ref.visit(disable_recompute_flag)
grads_expected = forward_backward_and_get_grads(y_ref)
y = graph(*inputs)
grads_actual = forward_backward_and_get_grads(y)
for a, e in zip(grads_actual, grads_expected):
assert_allclose(a, e, rtol=0, atol=0)
# Check setting up recompute flag.
def test_recompute_flag(self):
x0 = nn.Variable((1, 1), need_grad=True)
x1 = F.sin(x0).apply(recompute=True)
x2 = F.sin(x1).apply(recompute=False)
x3 = F.sin(x2)
assert x0.recompute == False
assert x1.recompute == True
assert x2.recompute == False
assert x3.recompute == False
# Check whether input data is cleared when recompute flag is True.
def test_clear_input_data(self):
x0 = nn.Variable((1, 1), need_grad=True)
# `F.sin` input data is always needed for grad calculation
x1 = F.sin(x0).apply(recompute=True)
x2 = F.sin(x1).apply(recompute=False)
x3 = F.sin(x2)
answer = []
answer.append([False]) # x0
answer.append([True]) # x1
answer.append([False]) # x2
clear_called_flag_recorder.activate_clear_called_flag_recorder()
x3.forward(clear_no_need_grad=True)
self.check_input_data_clear_called_flags(answer)
clear_called_flag_recorder.deactivate_clear_called_flag_recorder()
# Check claering output which needs `setup_recompute` for recomputation.
def test_clearing_without_recompute_flag(self):
x0 = nn.Variable((1, 128, 128), need_grad=True)
x1 = F.sin(x0).apply(recompute=True)
x2 = F.dropout(x1)
x3 = F.sin(x2).apply(recompute=True)
x4 = F.sin(x3).apply(recompute=True)
y = F.identity(x4)
# Skip this code temporarily since it cause
# randomly crash when perform CI testing on windows 10 with nnabla-cuda-ext
pytest.skip(
'Skipped for randomly crash when perform CI testing on windows 10 with nnabla-cuda-ext')
y.forward(clear_no_need_grad=True)
x2.data.clear()
with pytest.raises(RuntimeError, match="Failed `called_setup_recompute_`"):
# x2.data cannot be recomputed correctly since `setup_recompute` is not called during forward propagation.
# Backward should raise when some intermediate variables are cleared by user.
y.backward()
# Check recomputed data value.
@pytest.mark.parametrize("seed", [313])
def test_recomputed_data_value(self, seed):
rng = np.random.RandomState(seed)
a0 = nn.Variable((2, 3), need_grad=True)
b0 = nn.Variable((2, 3), need_grad=True)
a0.d = rng.randn(*a0.shape)
b0.d = rng.randn(*b0.shape)
a1 = F.sin(a0).apply(recompute=True)
a2 = F.sin(a1)
a3 = F.sin(a2)
b1 = F.sin(b0)
b2 = F.sin(b1).apply(recompute=True)
b3 = F.sin(b2)
c0 = F.mul2(a3, b3).apply(recompute=True)
c1 = F.sin(c0)
# Forward
# Get output data which will be recomputed.
ref_data = [] # data of a0, b2 and c0 will be stored.
def get_output_data(nnabla_func):
outputs = nnabla_func.outputs
for output in outputs:
if output.recompute:
ref_data.append(copy.deepcopy(output.d))
c1.forward(function_post_hook=get_output_data)
# Backward
# Get recomputed data
act_data = []
def get_recomputed_data(nnabla_func):
inputs = nnabla_func.inputs
for input in inputs:
if input.recompute:
act_data.append(copy.deepcopy(input.d))
c1.backward(function_pre_hook=get_recomputed_data)
# Make the order the same as `ref_data`.
act_data.reverse()
# Check recomputed data
for act, ref in zip(act_data, ref_data):
assert_allclose(act, ref, rtol=0, atol=0)
@pytest.mark.parametrize("seed", [313])
def test_grad_value_simple(self, seed):
x = nn.Variable((2, 3), need_grad=True)
inputs = (x,)
def graph(x):
y = F.sin(x).apply(recompute=True)
y = F.cos(y)
return y
self.check_recomputation(seed, graph, inputs)
@pytest.mark.parametrize("seed", [313])
@pytest.mark.parametrize("need_grad_x1", [False, True])
@pytest.mark.parametrize("need_grad_x2", [False, True])
def test_grad_value_with_branch(self, seed, need_grad_x1, need_grad_x2):
x1 = nn.Variable((2, 3), need_grad=need_grad_x1)
x2 = nn.Variable((2, 3), need_grad=need_grad_x2)
inputs = (x1, x2)
def graph(x1, x2):
x1 = F.identity(x1).apply(recompute=True)
x2 = F.identity(x2).apply(recompute=True)
y = F.mul2(x1, x2)
y = F.identity(y)
return y
self.check_recomputation(seed, graph, inputs)
# Check `setup_recompute`
@pytest.mark.parametrize("seed", [313])
def test_grad_value_with_random_function(self, seed):
x1 = nn.Variable((2, 3), need_grad=True)
inputs = (x1,)
def graph(x1):
x1 = F.identity(x1).apply(recompute=True)
x2 = F.randn(shape=x1.shape, seed=123).apply(recompute=True)
x3 = F.rand(shape=x1.shape, seed=456).apply(recompute=True)
y = F.mul2(x1, x2).apply(recompute=True)
y = F.mul2(y, x3).apply(recompute=True)
y = F.identity(y)
return y
self.check_recomputation(seed, graph, inputs)
@pytest.mark.parametrize("seed", [313])
def test_grad_value_with_output_dependent_function(self, seed):
"""
Gradient values are tested for the function which depends on output data.
Here, we test a following case that variable `h` will be recomputed and
its data is needed for the `F.swish` backward.
x -> F.swish -> h -> F.interpolate -> y
"""
def graph(x0):
# F.swish -> F.interpolate
x1 = F.swish(x0)
x1.apply(recompute=True)
x2 = F.interpolate(x1, scale=(2,))
return x2
x = nn.Variable((2, 3), need_grad=True)
inputs = (x,)
self.check_recomputation(seed, graph, inputs)
@pytest.mark.parametrize("seed", [313])
def test_with_persistent_flag(self, seed):
x = nn.Variable((2, 3), need_grad=True)
inputs = (x,)
def graph(x0):
x1 = F.sin(x0).apply(recompute=True)
# Set `recompute` and `persistent` flag at the same time
x2 = F.sin(x1).apply(recompute=True, persistent=True)
x3 = F.sin(x2).apply(recompute=True)
y = F.sin(x3)
return y
y = graph(x)
# Trace data clearing during forward propagation.
clear_called_flag_recorder.activate_clear_called_flag_recorder()
y.forward(clear_no_need_grad=True)
expected = [
[False], # x0: graph input
[True], # x1: Cleared because `recompute=True`
[False], # x2: Not cleared because `persistent=True`
[True], # x3: Cleared because `recompute=True`
]
self.check_input_data_clear_called_flags(expected)
clear_called_flag_recorder.deactivate_clear_called_flag_recorder()
# Check grad value
self.check_recomputation(seed, graph, inputs)
@pytest.mark.parametrize("seed", [313])
def test_with_inplacing(self, seed):
x = nn.Variable((2, 3), need_grad=True)
inputs = (x,)
def graph(x0):
x1 = F.sin(x0).apply(recompute=True)
# Set `recompute` flag to the inplaced variable.
x2 = F.reshape(x1, (3, 2), inplace=True).apply(recompute=True)
x3 = F.sin(x2).apply(recompute=True)
y = F.sin(x3)
return y
y = graph(x)
# Trace data clearing during forward propagation.
clear_called_flag_recorder.activate_clear_called_flag_recorder()
y.forward(clear_no_need_grad=True)
expected = [
[False], # x0: graph input
[False], # x1: Not cleared because inplaced data
[False], # x2: Not cleared because inplaced data
[True], # x3: Cleared because `recompute=True`
]
self.check_input_data_clear_called_flags(expected)
clear_called_flag_recorder.deactivate_clear_called_flag_recorder()
# Check grad value
self.check_recomputation(seed, graph, inputs)
# Check clear of recomputed data on the subgraph which is not back-propagated.
def test_clear_data_on_not_bwd_path(self):
a0 = nn.Variable((2, 3), need_grad=True)
a1 = F.identity(a0).apply(recompute=True)
a2 = F.sin(a1).apply(recompute=True)
# These three variables are not back-propagated.
b0 = nn.Variable((2, 3), need_grad=False)
b1 = F.identity(b0).apply(recompute=True)
b2 = F.sin(b1).apply(recompute=True)
c1 = F.add2(a2, b2).apply(recompute=True)
c2 = F.sin(c1)
# Forward
clear_called_flag_recorder.activate_clear_called_flag_recorder()
c2.forward(clear_no_need_grad=True)
# Data which will be recomputed must be cleared during forward propagation.
expected = [
[False], # a0
[True], # a1
[False], # b0
[True], # b1
[True, True], # a2, b2
[True], # c1
]
self.check_input_data_clear_called_flags(expected)
clear_called_flag_recorder.deactivate_clear_called_flag_recorder()
# Backward
clear_called_flag_recorder.activate_clear_called_flag_recorder()
c2.backward(clear_buffer=True)
# b1 is not on backward path and must be cleared during recomputation.
expected = [
# Recomputation
[False], # a0
[False], # a1
[False], # b0
[True], # b1 (not on backward path) must be cleared
[True, True], # a2, b2
[False], # c1
# Backward propagation
[True, True], # a2, b2
[False], # a1
[False], # a0
]
self.check_input_data_clear_called_flags(expected)
clear_called_flag_recorder.deactivate_clear_called_flag_recorder()
# Check clear of data not need for grad calculation during recomputation.
def test_clear_no_need_grad_during_recomputation(self):
x0 = nn.Variable((2, 3), need_grad=True)
x1 = F.identity(x0).apply(recompute=True)
# x2.data must be cleared just after recomputation because they are not need for backward propagation.
x2 = F.sin(x1).apply(recompute=True)
x3 = F.identity(x2).apply(recompute=True)
x4 = F.sin(x3)
# Forward
clear_called_flag_recorder.activate_clear_called_flag_recorder()
x4.forward(clear_no_need_grad=True)
# All intermediate data must be cleared.
expected = [
[False], # x0
[True], # x1
[True], # x2
[True], # x3
]
self.check_input_data_clear_called_flags(expected)
clear_called_flag_recorder.deactivate_clear_called_flag_recorder()
# Backward
clear_called_flag_recorder.activate_clear_called_flag_recorder()
x4.backward(clear_buffer=True)
expected = [
# Recomputation
[False], # x0
[False], # x1
[True], # x2: not need for grad calculation
# Backward propagation
[False], # x3
[True], # x2
[False], # x1
[False], # x0
]
self.check_input_data_clear_called_flags(expected)
clear_called_flag_recorder.deactivate_clear_called_flag_recorder()
# Check recompute recursion stops at checkpoint.
def test_checkpoint(self):
x0 = nn.Variable((2, 3), need_grad=True)
x1 = F.sin(x0).apply(recompute=True)
x2 = F.sin(x1).apply(recompute=True)
x3 = F.sin(x2) # Checkpoint 1 (recompute == False)
x4 = F.sin(x3).apply(recompute=True)
x5 = F.sin(x4).apply(recompute=True)
x6 = F.sin(x5) # Checkpoint 2 (recompute == False)
x7 = F.sin(x6).apply(recompute=True)
x8 = F.sin(x7).apply(recompute=True)
# All intermediate data except checkpoints will be cleared during forward propagation.
x8.forward(clear_no_need_grad=True)
# Trace clear_called flags of `x2` and `x5` during backward propagation.
# clear_called flag changes True to False when the data is recomputed.
act_flags = []
def get_clear_called_flags(nnabla_func):
act_flags.append([x2.data.clear_called, x5.data.clear_called])
x8.backward(function_post_hook=get_clear_called_flags)
ref_flags = [
# [x2, x5] clear_called flags
[True, True], # After F.sin(x7) backward
[True, True], # After F.sin(x6) backward
[True, False], # After F.sin(x5) backward
[True, False], # After F.sin(x4) backward
[True, False], # After F.sin(x3) backward
[False, False], # After F.sin(x2) backward
[False, False], # After F.sin(x1) backward
[False, False], # After F.sin(x0) backward
]
assert(ref_flags == act_flags)
# Test unnecessary recomputation with single recomputation recursion.
def test_unnecessary_traverse_0(self):
# No need grad path
a0 = nn.Variable((2, 3), need_grad=False)
a1 = F.sin(a0).apply(recompute=True)
# Need grad path
b0 = nn.Variable((2, 3), need_grad=True)
b1 = F.sin(b0).apply(recompute=True)
# branch
c = F.add2(a1, b1)
# Check whether unnecessary recomputation for `a1.data` is performed.
c.forward(clear_no_need_grad=True)
assert(a1.data.clear_called == True)
assert(b1.data.clear_called == True)
# Exec backward without clearing buffer to check whether recomputation is performed by seeing `clear_called` flag.
c.backward(clear_buffer=False)
# a1.data is still cleared. (Recalculation is not performed)
assert(a1.data.clear_called == True)
# b1.data is set. (Recalculation is performed)
assert(b1.data.clear_called == False)
# Test recomputation recursion depth.
def test_unnecessary_traverse_1(self):
a0 = nn.Variable((2, 3), need_grad=False)
# `a1` will not be recomputed since `a2` will not be cleared.
a1 = F.sin(a0).apply(recompute=True)
a2 = F.cos(a1)
a3 = F.sin(a2).apply(recompute=True) # 'a3` will be recomputed.
b0 = nn.Variable((2, 3), need_grad=True).apply(recompute=True)
b1 = F.identity(b0).apply(recompute=True)
c = F.mul2(a3, b1).apply(recompute=True)
# Check recomputation recursion stops when `a3.data` is calculated.
c.forward(clear_buffer=False)
# `a1.data` is cleared because `recompute` flag is `true`.
assert(a1.data.clear_called == True)
# `a2.data` is not cleared because `recompute` flag is `false`.
assert(a2.data.clear_called == False)
c.backward(clear_buffer=False)
# If the recursive call reached to `a1`, `a1.data` should be set by recomputation.
# However, the recursive call stops at `a2` whose data is not cleared.
assert(a1.data.clear_called == True)
# Test unnecessary recomputation for whole graph.
def test_unnecessary_traverse_2(self):
def fail_with_not_cleared_data(nnabla_func):
inputs = nnabla_func.inputs
for input in inputs:
if input.parent is None:
continue
if not input.data.clear_called:
# Not cleared (recomputed) data is found.
pytest.fail()
# Prepare graph does not need any recomputation.
x1 = nn.Variable((2, 3), need_grad=True)
x1 = F.identity(x1).apply(recompute=True)
x2 = nn.Variable((2, 3), need_grad=True)
x2 = F.identity(x2).apply(recompute=True)
y = F.add2(x1, x2).apply(recompute=True)
y = F.identity(y).apply(recompute=True)
# Check unnecessary recomputation.
y.forward(clear_no_need_grad=True)
y.backward(function_pre_hook=fail_with_not_cleared_data)
@pytest.mark.parametrize("recompute_flag", [False, True])
def test_with_statement_variable_creation(self, recompute_flag):
"""
Test for setting recompute flags with Python `with` statement.
"""
# Create a new Variable
x1 = nn.Variable((2, 3))
assert x1.recompute == False
with nn.recompute(recompute_flag):
# Create Variable by `__cinit__()`
y1 = nn.Variable((2, 3))
assert y1.recompute == recompute_flag
# Create Variable by `create_from_cvariable()`
y2 = x1.reshape((3, 2), unlink=True)
assert y2.recompute == recompute_flag
# Create Variable by `create_from_cg_variable()`
y3 = F.relu(x1)
assert y3.recompute == recompute_flag
# Create Variable by `from_numpy_array()`
data = np.array((2, 3))
y4 = nn.Variable.from_numpy_array(data)
assert y4.recompute == recompute_flag
# Create Variable by `get_unlinked_variable()`
y5 = x1.get_unlinked_variable()
assert y5.recompute == recompute_flag
# Recompute flag for referenced Variable must not be overwritten.
# More detail tests are performed by `test_nested_with_statement`
y6 = x1
assert y6.recompute == False
# Direct function connection
y7 = F.relu(F.relu(x1))
# Create a new Variable after with statement
x2 = nn.Variable((2, 3))
assert x2.recompute == False
# Check recompute flag of forcibly got Pyhon Variable.
assert y7.parent.inputs[0].recompute == recompute_flag
# Check default recompute flag for nn.recompute()
with nn.recompute():
x = nn.Variable((2, 3))
assert x.recompute == True
# Recompute flag for first nest
@pytest.mark.parametrize("f1", [False, True])
# Recompute flag for second nest
@pytest.mark.parametrize("f2", [False, True])
# Recompute flag for third nest
@pytest.mark.parametrize("f3", [False, True])
def test_nested_with_statement(self, f1, f2, f3):
"""
Test for nested Pyhon `with` statement of recomputation.
"""
x0 = nn.Variable((2, 3))
assert x0.recompute == False
# Nest 1
with nn.recompute(f1):
x1 = nn.Variable((2, 3))
x0_1 = x0
assert x1.recompute == f1
assert x0_1.recompute == False
# Nest 2
with nn.recompute(f2):
x2 = nn.Variable((2, 3))
x0_2 = x0
x1_2 = x1
assert x2.recompute == f2
assert x0_2.recompute == False
assert x1_2.recompute == f1
# Nest 3
with nn.recompute(f3):
x3 = nn.Variable((2, 3))
x0_3 = x0
x1_3 = x1
x2_3 = x2
assert x3.recompute == f3
assert x0_3.recompute == False
assert x1_3.recompute == f1
assert x2_3.recompute == f2
x2 = nn.Variable((2, 3))
x0_2 = x0
x1_2 = x1
assert x2.recompute == f2
assert x0_2.recompute == False
assert x1_2.recompute == f1
x1 = nn.Variable((2, 3))
x0_1 = x0
assert x1.recompute == f1
assert x0_1.recompute == False
x0 = nn.Variable((2, 3))
assert x0.recompute == False
# Recompute flag for first `with` block
@pytest.mark.parametrize("f1", [False, True])
# Recompute flag for second `with` block
@pytest.mark.parametrize("f2", [False, True])
def test_sequential_with_statement(self, f1, f2):
"""
Test for sequential use of with statement.
"""
x = nn.Variable((2, 3))
assert x.recompute == False
# First `with` block
with nn.recompute(f1):
y = F.relu(x)
assert y.recompute == f1
y = F.sin(y)
assert y.recompute == f1
assert y.recompute == f1
y = F.relu(y)
assert y.recompute == False
# Second `with` block
with nn.recompute(f2):
y = F.relu(x)
assert y.recompute == f2
y = F.sin(y)
assert y.recompute == f2
assert y.recompute == f2
y = F.relu(y)
assert y.recompute == False
@pytest.mark.parametrize("recompute_flag", [False, True])
def test_recompute_fn_decorator(self, recompute_flag):
"""
Test for setting recompute flags with function decorator `nn.recompute_fn()`.
"""
# Specifying recompute flag
@nn.recompute_fn(recompute_flag)
def func2(x):
assert x.recompute == False
y = F.relu(x)
assert y.recompute == recompute_flag
return y
# Check recompute flags
x2 = nn.Variable((2, 3))
assert x2.recompute == False
y2 = func2(x2)
assert y2.recompute == recompute_flag
def test_recompute_fn_decorator_default_use(self):
"""
Test for setting recompute flags with function decorator `nn.recompute_fn()` without specifying recompute flag.
"""
# Default usage
@nn.recompute_fn()
def func1(x):
assert x.recompute == False
y = F.relu(x)
assert y.recompute == True
return y
# Check recompute flags
x1 = nn.Variable((2, 3))
assert x1.recompute == False
y1 = func1(x1)
assert y1.recompute == True
@pytest.mark.parametrize("recompute_flag", [False, True])
def test_recompute_fn_decorator_multiple_inputs_outputs(self, recompute_flag):
"""
Test for the use of `nn.recompute_fn()` with a function which have multiple inputs, outpus, args and kwargs.
"""
# Define sample function with multiple inputs and outputs
@nn.recompute_fn(recompute_flag)
def func(x1, x2, val, axis, reverse=False, alpha=0.2):
# Check args and kwargs passed correctly
assert val == 3.14
assert axis == 0
assert reverse == True
assert alpha == 0.3
y1 = F.cumsum(x1, axis, reverse=reverse)
y2 = x2 * val
y3 = y1 + y2
y3 = F.leaky_relu(y3, alpha=alpha)
# Check recompute flags for variables defined inside this function
assert y1.recompute == recompute_flag
assert y2.recompute == recompute_flag
assert y3.recompute == recompute_flag
return y2, y3
x1 = nn.Variable((2, 3))
x2 = nn.Variable((2, 3))
y1, y2 = func(x1, x2, 3.14, 0, alpha=0.3, reverse=True)
assert y1.recompute == recompute_flag
assert y2.recompute == recompute_flag
# Recompute flag for outer function
@pytest.mark.parametrize("f0", [False, True])
# Recompute flag for first inner function
@pytest.mark.parametrize("f1", [False, True])
# Recompute flag for second inner function
@pytest.mark.parametrize("f2", [False, True])
def test_nested_recompute_fn_decorator(self, f0, f1, f2):
"""
Test for setting recompute flags with nested function decorator `nn.recompute_fn()`.
"""
# First sub function
@nn.recompute_fn(f1)
def func1(x):
assert x.recompute == f0
y = F.relu(x)
assert y.recompute == f1
return y
# Second sub function
@nn.recompute_fn(f2)
def func2(x):
assert x.recompute == f0
y = F.sin(x)
assert y.recompute == f2
return y
# Main function
@nn.recompute_fn(f0)
def func0(x):
assert x.recompute == False
y = F.identity(x)
assert y.recompute == f0
# First inner function call
y = func1(y)
assert y.recompute == f1
y = F.relu(y)
assert y.recompute == f0
# Second inner function call
y = func2(y)
assert y.recompute == f2
y = F.identity(y)
assert y.recompute == f0
return y
# Call main function
x = nn.Variable((2, 3))
y = func0(x)
assert y.recompute == f0
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("func, num_inputs", [
(F.relu, 1),
(F.leaky_relu, 1),
(F.random_erase, 1),
(F.add2, 2),
(F.bc_add2, 2),
(F.sub2, 2),
(F.add_scalar, 1),
(F.mul_scalar, 1),
])
def test_obsolete_inplace_option(inplace, func, num_inputs):
'''
This test confirms the construction of graph.
Since F.log_softmax requires output for backward calculation, graph cannot be constructed if it is inplaced.
'''
x0 = nn.Variable((2, 3, 4, 5), need_grad=True)
x1 = nn.Variable((2, 3, 4, 5), need_grad=True)
if num_inputs == 1:
y = F.identity(x0)
y = F.log_softmax(y)
y = func(y, inplace=inplace)
y.forward()
y.backward()
elif num_inputs == 2:
y0 = F.identity(x0)
y1 = F.identity(x1)
y0 = F.log_softmax(y0)
y1 = F.log_softmax(y1)
y = func(y0, y1, inplace=inplace)
y.forward()
y.backward()
|
|
###############################################################################
# EllipsoidalPotential.py: base class for potentials corresponding to
# density profiles that are stratified on
# ellipsoids:
#
# \rho(x,y,z) ~ \rho(m)
#
# with m^2 = x^2+y^2/b^2+z^2/c^2
#
###############################################################################
import hashlib
import numpy
from scipy import integrate
from ..util import coords, conversion
from ..util import _rotate_to_arbitrary_vector
from .Potential import Potential, check_potential_inputs_not_arrays
class EllipsoidalPotential(Potential):
"""Base class for potentials corresponding to density profiles that are stratified on ellipsoids:
.. math::
\\rho(x,y,z) \\equiv \\rho(m^2)
where :math:`m^2 = x^2+y^2/b^2+z^2/c^2`. Note that :math:`b` and :math:`c` are defined to be the axis ratios (rather than using :math:`m^2 = x^2/a^2+y^2/b^2+z^2/c^2` as is common).
Implement a specific density distribution with this form by inheriting from this class and defining functions ``_mdens(self,m)`` (the density as a function of ``m``), ``_mdens_deriv(self,m)`` (the derivative of the density as a function of ``m``), and ``_psi(self,m)``, which is:
.. math::
\psi(m) = -\\int_{m^2}^\\infty d m^2 \\rho(m^2)
See PerfectEllipsoidPotential for an example and `Merritt & Fridman (1996) <http://adsabs.harvard.edu/abs/1996ApJ...460..136M>`_ for the formalism.
"""
def __init__(self,amp=1.,
b=1.,c=1.,
zvec=None,pa=None,glorder=50,
ro=None,vo=None,amp_units=None):
"""
NAME:
__init__
PURPOSE:
initialize a ellipsoidal potential
INPUT:
amp - amplitude to be applied to the potential (default: 1); can be a Quantity with units that depend on the specific spheroidal potential
b - y-to-x axis ratio of the density
c - z-to-x axis ratio of the density
zvec= (None) If set, a unit vector that corresponds to the z axis
pa= (None) If set, the position angle of the x axis (rad or Quantity)
glorder= (50) if set, compute the relevant force and potential integrals with Gaussian quadrature of this order
amp_units - ('mass', 'velocity2', 'density') type of units that amp should have if it has units (passed to Potential.__init__)
ro=, vo= distance and velocity scales for translation into internal units (default from configuration file)
OUTPUT:
(none)
HISTORY:
2018-08-06 - Started - Bovy (UofT)
"""
Potential.__init__(self,amp=amp,ro=ro,vo=vo,amp_units=amp_units)
# Setup axis ratios
self._b= b
self._c= c
self._b2= self._b**2.
self._c2= self._c**2.
self._force_hash= None
# Setup rotation
self._setup_zvec_pa(zvec,pa)
# Setup integration
self._setup_gl(glorder)
if not self._aligned or numpy.fabs(self._b-1.) > 10.**-10.:
self.isNonAxi= True
return None
def _setup_zvec_pa(self,zvec,pa):
if not pa is None:
pa= conversion.parse_angle(pa)
if zvec is None and (pa is None or numpy.fabs(pa) < 10.**-10.):
self._aligned= True
else:
self._aligned= False
if not pa is None:
pa_rot= numpy.array([[numpy.cos(pa),numpy.sin(pa),0.],
[-numpy.sin(pa),numpy.cos(pa),0.],
[0.,0.,1.]])
else:
pa_rot= numpy.eye(3)
if not zvec is None:
if not isinstance(zvec,numpy.ndarray):
zvec= numpy.array(zvec)
zvec/= numpy.sqrt(numpy.sum(zvec**2.))
zvec_rot= _rotate_to_arbitrary_vector(\
numpy.array([[0.,0.,1.]]),zvec,inv=True)[0]
else:
zvec_rot= numpy.eye(3)
self._rot= numpy.dot(pa_rot,zvec_rot)
return None
def _setup_gl(self,glorder):
self._glorder= glorder
if self._glorder is None:
self._glx, self._glw= None, None
else:
self._glx, self._glw=\
numpy.polynomial.legendre.leggauss(self._glorder)
# Interval change
self._glx= 0.5*self._glx+0.5
self._glw*= 0.5
return None
@check_potential_inputs_not_arrays
def _evaluate(self,R,z,phi=0.,t=0.):
"""
NAME:
_evaluate
PURPOSE:
evaluate the potential at R,z
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
Phi(R,z)
HISTORY:
2016-05-30 - Started - Bovy (UofT)
"""
if not self.isNonAxi:
phi= 0.
x,y,z= coords.cyl_to_rect(R,phi,z)
if numpy.isinf(R): y= 0.
if self._aligned:
return self._evaluate_xyz(x,y,z)
else:
xyzp= numpy.dot(self._rot,numpy.array([x,y,z]))
return self._evaluate_xyz(xyzp[0],xyzp[1],xyzp[2])
def _evaluate_xyz(self,x,y,z):
"""Evaluation of the potential as a function of (x,y,z) in the
aligned coordinate frame"""
return 2.*numpy.pi*self._b*self._c\
*_potInt(x,y,z,self._psi,
self._b2,self._c2,glx=self._glx,glw=self._glw)
@check_potential_inputs_not_arrays
def _Rforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rforce
PURPOSE:
evaluate the radial force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the radial force
HISTORY:
2016-06-09 - Written - Bovy (UofT)
"""
if not self.isNonAxi:
phi= 0.
x,y,z= coords.cyl_to_rect(R,phi,z)
# Compute all rectangular forces
new_hash= hashlib.md5(numpy.array([x,y,z])).hexdigest()
if new_hash == self._force_hash:
Fx= self._cached_Fx
Fy= self._cached_Fy
Fz= self._cached_Fz
else:
if self._aligned:
xp, yp, zp= x, y, z
else:
xyzp= numpy.dot(self._rot,numpy.array([x,y,z]))
xp, yp, zp= xyzp[0], xyzp[1], xyzp[2]
Fx= self._force_xyz(xp,yp,zp,0)
Fy= self._force_xyz(xp,yp,zp,1)
Fz= self._force_xyz(xp,yp,zp,2)
self._force_hash= new_hash
self._cached_Fx= Fx
self._cached_Fy= Fy
self._cached_Fz= Fz
if not self._aligned:
Fxyz= numpy.dot(self._rot.T,numpy.array([Fx,Fy,Fz]))
Fx, Fy= Fxyz[0], Fxyz[1]
return numpy.cos(phi)*Fx+numpy.sin(phi)*Fy
@check_potential_inputs_not_arrays
def _phiforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_phiforce
PURPOSE:
evaluate the azimuthal force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the azimuthal force
HISTORY:
2016-06-09 - Written - Bovy (UofT)
"""
if not self.isNonAxi:
phi= 0.
x,y,z= coords.cyl_to_rect(R,phi,z)
# Compute all rectangular forces
new_hash= hashlib.md5(numpy.array([x,y,z])).hexdigest()
if new_hash == self._force_hash:
Fx= self._cached_Fx
Fy= self._cached_Fy
Fz= self._cached_Fz
else:
if self._aligned:
xp, yp, zp= x, y, z
else:
xyzp= numpy.dot(self._rot,numpy.array([x,y,z]))
xp, yp, zp= xyzp[0], xyzp[1], xyzp[2]
Fx= self._force_xyz(xp,yp,zp,0)
Fy= self._force_xyz(xp,yp,zp,1)
Fz= self._force_xyz(xp,yp,zp,2)
self._force_hash= new_hash
self._cached_Fx= Fx
self._cached_Fy= Fy
self._cached_Fz= Fz
if not self._aligned:
Fxyz= numpy.dot(self._rot.T,numpy.array([Fx,Fy,Fz]))
Fx, Fy= Fxyz[0], Fxyz[1]
return R*(-numpy.sin(phi)*Fx+numpy.cos(phi)*Fy)
@check_potential_inputs_not_arrays
def _zforce(self,R,z,phi=0.,t=0.):
"""
NAME:
_zforce
PURPOSE:
evaluate the vertical force for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the vertical force
HISTORY:
2016-06-09 - Written - Bovy (UofT)
"""
if not self.isNonAxi:
phi= 0.
x,y,z= coords.cyl_to_rect(R,phi,z)
# Compute all rectangular forces
new_hash= hashlib.md5(numpy.array([x,y,z])).hexdigest()
if new_hash == self._force_hash:
Fx= self._cached_Fx
Fy= self._cached_Fy
Fz= self._cached_Fz
else:
if self._aligned:
xp, yp, zp= x, y, z
else:
xyzp= numpy.dot(self._rot,numpy.array([x,y,z]))
xp, yp, zp= xyzp[0], xyzp[1], xyzp[2]
Fx= self._force_xyz(xp,yp,zp,0)
Fy= self._force_xyz(xp,yp,zp,1)
Fz= self._force_xyz(xp,yp,zp,2)
self._force_hash= new_hash
self._cached_Fx= Fx
self._cached_Fy= Fy
self._cached_Fz= Fz
if not self._aligned:
Fxyz= numpy.dot(self._rot.T,numpy.array([Fx,Fy,Fz]))
Fz= Fxyz[2]
return Fz
def _force_xyz(self,x,y,z,i):
"""Evaluation of the i-th force component as a function of (x,y,z)"""
return -4.*numpy.pi*self._b*self._c\
*_forceInt(x,y,z,
lambda m: self._mdens(m),
self._b2,self._c2,i,glx=self._glx,glw=self._glw)
@check_potential_inputs_not_arrays
def _R2deriv(self,R,z,phi=0.,t=0.):
"""
NAME:
_R2deriv
PURPOSE:
evaluate the second radial derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the second radial derivative
HISTORY:
2016-06-15 - Written - Bovy (UofT)
"""
if not self.isNonAxi:
phi= 0.
x,y,z= coords.cyl_to_rect(R,phi,z)
if not self._aligned:
raise NotImplementedError("2nd potential derivatives of TwoPowerTriaxialPotential not implemented for rotated coordinated frames (non-trivial zvec and pa); use RotateAndTiltWrapperPotential for this functionality instead")
phixx= self._2ndderiv_xyz(x,y,z,0,0)
phixy= self._2ndderiv_xyz(x,y,z,0,1)
phiyy= self._2ndderiv_xyz(x,y,z,1,1)
return numpy.cos(phi)**2.*phixx+numpy.sin(phi)**2.*phiyy\
+2.*numpy.cos(phi)*numpy.sin(phi)*phixy
@check_potential_inputs_not_arrays
def _Rzderiv(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rzderiv
PURPOSE:
evaluate the mixed radial, vertical derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the mixed radial, vertical derivative
HISTORY:
2016-06-15 - Written - Bovy (UofT)
"""
if not self.isNonAxi:
phi= 0.
x,y,z= coords.cyl_to_rect(R,phi,z)
if not self._aligned:
raise NotImplementedError("2nd potential derivatives of TwoPowerTriaxialPotential not implemented for rotated coordinated frames (non-trivial zvec and pa; use RotateAndTiltWrapperPotential for this functionality instead)")
phixz= self._2ndderiv_xyz(x,y,z,0,2)
phiyz= self._2ndderiv_xyz(x,y,z,1,2)
return numpy.cos(phi)*phixz+numpy.sin(phi)*phiyz
@check_potential_inputs_not_arrays
def _z2deriv(self,R,z,phi=0.,t=0.):
"""
NAME:
_z2deriv
PURPOSE:
evaluate the second vertical derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the second vertical derivative
HISTORY:
2016-06-15 - Written - Bovy (UofT)
"""
if not self.isNonAxi:
phi= 0.
x,y,z= coords.cyl_to_rect(R,phi,z)
if not self._aligned:
raise NotImplementedError("2nd potential derivatives of TwoPowerTriaxialPotential not implemented for rotated coordinated frames (non-trivial zvec and pa; use RotateAndTiltWrapperPotential for this functionality instead)")
return self._2ndderiv_xyz(x,y,z,2,2)
@check_potential_inputs_not_arrays
def _phi2deriv(self,R,z,phi=0.,t=0.):
"""
NAME:
_phi2deriv
PURPOSE:
evaluate the second azimuthal derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the second azimuthal derivative
HISTORY:
2016-06-15 - Written - Bovy (UofT)
"""
if not self.isNonAxi:
phi= 0.
x,y,z= coords.cyl_to_rect(R,phi,z)
if not self._aligned:
raise NotImplementedError("2nd potential derivatives of TwoPowerTriaxialPotential not implemented for rotated coordinated frames (non-trivial zvec and pa; use RotateAndTiltWrapperPotential for this functionality instead)")
Fx= self._force_xyz(x,y,z,0)
Fy= self._force_xyz(x,y,z,1)
phixx= self._2ndderiv_xyz(x,y,z,0,0)
phixy= self._2ndderiv_xyz(x,y,z,0,1)
phiyy= self._2ndderiv_xyz(x,y,z,1,1)
return R**2.*(numpy.sin(phi)**2.*phixx+numpy.cos(phi)**2.*phiyy\
-2.*numpy.cos(phi)*numpy.sin(phi)*phixy)\
+R*(numpy.cos(phi)*Fx+numpy.sin(phi)*Fy)
@check_potential_inputs_not_arrays
def _Rphideriv(self,R,z,phi=0.,t=0.):
"""
NAME:
_Rphideriv
PURPOSE:
evaluate the mixed radial, azimuthal derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the mixed radial, azimuthal derivative
HISTORY:
2016-06-15 - Written - Bovy (UofT)
"""
if not self.isNonAxi:
phi= 0.
x,y,z= coords.cyl_to_rect(R,phi,z)
if not self._aligned:
raise NotImplementedError("2nd potential derivatives of TwoPowerTriaxialPotential not implemented for rotated coordinated frames (non-trivial zvec and pa; use RotateAndTiltWrapperPotential for this functionality instead)")
Fx= self._force_xyz(x,y,z,0)
Fy= self._force_xyz(x,y,z,1)
phixx= self._2ndderiv_xyz(x,y,z,0,0)
phixy= self._2ndderiv_xyz(x,y,z,0,1)
phiyy= self._2ndderiv_xyz(x,y,z,1,1)
return R*numpy.cos(phi)*numpy.sin(phi)*\
(phiyy-phixx)+R*numpy.cos(2.*phi)*phixy\
+numpy.sin(phi)*Fx-numpy.cos(phi)*Fy
@check_potential_inputs_not_arrays
def _phizderiv(self,R,z,phi=0.,t=0.):
"""
NAME:
_phizderiv
PURPOSE:
evaluate the mixed azimuthal, vertical derivative for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the mixed radial, azimuthal derivative
HISTORY:
2021-04-30 - Written - Bovy (UofT)
"""
if not self.isNonAxi:
phi= 0.
x,y,z= coords.cyl_to_rect(R,phi,z)
if not self._aligned:
raise NotImplementedError("2nd potential derivatives of TwoPowerTriaxialPotential not implemented for rotated coordinated frames (non-trivial zvec and pa; use RotateAndTiltWrapperPotential for this functionality instead)")
phixz= self._2ndderiv_xyz(x,y,z,0,2)
phiyz= self._2ndderiv_xyz(x,y,z,1,2)
return R*(numpy.cos(phi)*phiyz-numpy.sin(phi)*phixz)
def _2ndderiv_xyz(self,x,y,z,i,j):
"""General 2nd derivative of the potential as a function of (x,y,z)
in the aligned coordinate frame"""
return 4.*numpy.pi*self._b*self._c\
*_2ndDerivInt(x,y,z,
lambda m: self._mdens(m),
lambda m: self._mdens_deriv(m),
self._b2,self._c2,i,j,glx=self._glx,glw=self._glw)
@check_potential_inputs_not_arrays
def _dens(self,R,z,phi=0.,t=0.):
"""
NAME:
_dens
PURPOSE:
evaluate the density for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the density
HISTORY:
2018-08-06 - Written - Bovy (UofT)
"""
x,y,z= coords.cyl_to_rect(R,phi,z)
if self._aligned:
xp, yp, zp= x, y, z
else:
xyzp= numpy.dot(self._rot,numpy.array([x,y,z]))
xp, yp, zp= xyzp[0], xyzp[1], xyzp[2]
m= numpy.sqrt(xp**2.+yp**2./self._b2+zp**2./self._c2)
return self._mdens(m)
def _mass(self,R,z=None,t=0.):
"""
NAME:
_mass
PURPOSE:
evaluate the mass within R (and z) for this potential; if z=None, integrate to z=inf
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
t - time
OUTPUT:
the mass enclosed
HISTORY:
2021-03-08 - Written - Bovy (UofT)
"""
if not z is None: raise AttributeError # Hack to fall back to general
return 4.*numpy.pi*self._b*self._c\
*integrate.quad(lambda m: m**2.*self._mdens(m),0,R)[0]
def OmegaP(self):
"""
NAME:
OmegaP
PURPOSE:
return the pattern speed
INPUT:
(none)
OUTPUT:
pattern speed
HISTORY:
2016-05-31 - Written - Bovy (UofT)
"""
return 0.
def _potInt(x,y,z,psi,b2,c2,glx=None,glw=None):
"""int_0^\infty [psi(m)-psi(\infy)]/sqrt([1+tau]x[b^2+tau]x[c^2+tau])dtau"""
def integrand(s):
t= 1/s**2.-1.
return psi(numpy.sqrt(x**2./(1.+t)+y**2./(b2+t)+z**2./(c2+t)))\
/numpy.sqrt((1.+(b2-1.)*s**2.)*(1.+(c2-1.)*s**2.))
if glx is None:
return integrate.quad(integrand,0.,1.)[0]
else:
return numpy.sum(glw*integrand(glx))
def _forceInt(x,y,z,dens,b2,c2,i,glx=None,glw=None):
"""Integral that gives the force in x,y,z"""
def integrand(s):
t= 1/s**2.-1.
return dens(numpy.sqrt(x**2./(1.+t)+y**2./(b2+t)+z**2./(c2+t)))\
*(x/(1.+t)*(i==0)+y/(b2+t)*(i==1)+z/(c2+t)*(i==2))\
/numpy.sqrt((1.+(b2-1.)*s**2.)*(1.+(c2-1.)*s**2.))
if glx is None:
return integrate.quad(integrand,0.,1.)[0]
else:
return numpy.sum(glw*integrand(glx))
def _2ndDerivInt(x,y,z,dens,densDeriv,b2,c2,i,j,glx=None,glw=None):
"""Integral that gives the 2nd derivative of the potential in x,y,z"""
def integrand(s):
t= 1/s**2.-1.
m= numpy.sqrt(x**2./(1.+t)+y**2./(b2+t)+z**2./(c2+t))
return (densDeriv(m)
*(x/(1.+t)*(i==0)+y/(b2+t)*(i==1)+z/(c2+t)*(i==2))
*(x/(1.+t)*(j==0)+y/(b2+t)*(j==1)+z/(c2+t)*(j==2))/m\
+dens(m)*(i==j)*((1./(1.+t)*(i==0)+1./(b2+t)*(i==1)+1./(c2+t)*(i==2))))\
/numpy.sqrt((1.+(b2-1.)*s**2.)*(1.+(c2-1.)*s**2.))
if glx is None:
return integrate.quad(integrand,0.,1.)[0]
else:
return numpy.sum(glw*integrand(glx))
|
|
from __future__ import absolute_import
import itertools
import operator
import os
from plumbum.lib import six
from abc import abstractmethod, abstractproperty
import warnings
from functools import reduce
class FSUser(int):
"""A special object that represents a file-system user. It derives from ``int``, so it behaves
just like a number (``uid``/``gid``), but also have a ``.name`` attribute that holds the
string-name of the user, if given (otherwise ``None``)
"""
def __new__(cls, val, name=None):
self = int.__new__(cls, val)
self.name = name
return self
class Path(str, six.ABC):
"""An abstraction over file system paths. This class is abstract, and the two implementations
are :class:`LocalPath <plumbum.machines.local.LocalPath>` and
:class:`RemotePath <plumbum.path.remote.RemotePath>`.
"""
CASE_SENSITIVE = True
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, str(self))
def __div__(self, other):
"""Joins two paths"""
return self.join(other)
__truediv__ = __div__
__getitem__ = __div__
def __floordiv__(self, expr):
"""Returns a (possibly empty) list of paths that matched the glob-pattern under this path"""
return self.glob(expr)
def __iter__(self):
"""Iterate over the files in this directory"""
return iter(self.list())
def __eq__(self, other):
if isinstance(other, Path):
return self._get_info() == other._get_info()
elif isinstance(other, str):
if self.CASE_SENSITIVE:
return str(self) == other
else:
return str(self).lower() == other.lower()
else:
return NotImplemented
def __ne__(self, other):
return not (self == other)
def __gt__(self, other):
return str(self) > str(other)
def __ge__(self, other):
return str(self) >= str(other)
def __lt__(self, other):
return str(self) < str(other)
def __le__(self, other):
return str(self) <= str(other)
def __hash__(self):
if self.CASE_SENSITIVE:
return hash(str(self))
else:
return hash(str(self).lower())
def __nonzero__(self):
return bool(str(self))
__bool__ = __nonzero__
def __fspath__(self):
"""Added for Python 3.6 support"""
return str(self)
def __contains__(self, item):
"""Paths should support checking to see if an file or folder is in them."""
try:
return (self / item.name).exists()
except AttributeError:
return (self / item).exists()
@abstractmethod
def _form(self, *parts):
pass
def up(self, count=1):
"""Go up in ``count`` directories (the default is 1)"""
return self.join("../" * count)
def walk(self, filter=lambda p: True,
dir_filter=lambda p: True): # @ReservedAssignment
"""traverse all (recursive) sub-elements under this directory, that match the given filter.
By default, the filter accepts everything; you can provide a custom filter function that
takes a path as an argument and returns a boolean
:param filter: the filter (predicate function) for matching results. Only paths matching
this predicate are returned. Defaults to everything.
:param dir_filter: the filter (predicate function) for matching directories. Only directories
matching this predicate are recursed into. Defaults to everything.
"""
for p in self.list():
if filter(p):
yield p
if p.is_dir() and dir_filter(p):
for p2 in p.walk(filter, dir_filter):
yield p2
@abstractproperty
def name(self):
"""The basename component of this path"""
@property
def basename(self):
"""Included for compatibility with older Plumbum code"""
warnings.warn("Use .name instead", DeprecationWarning)
return self.name
@abstractproperty
def stem(self):
"""The name without an extension, or the last component of the path"""
@abstractproperty
def dirname(self):
"""The dirname component of this path"""
@abstractproperty
def root(self):
"""The root of the file tree (`/` on Unix)"""
@abstractproperty
def drive(self):
"""The drive letter (on Windows)"""
@abstractproperty
def suffix(self):
"""The suffix of this file"""
@abstractproperty
def suffixes(self):
"""This is a list of all suffixes"""
@abstractproperty
def uid(self):
"""The user that owns this path. The returned value is a :class:`FSUser <plumbum.path.FSUser>`
object which behaves like an ``int`` (as expected from ``uid``), but it also has a ``.name``
attribute that holds the string-name of the user"""
@abstractproperty
def gid(self):
"""The group that owns this path. The returned value is a :class:`FSUser <plumbum.path.FSUser>`
object which behaves like an ``int`` (as expected from ``gid``), but it also has a ``.name``
attribute that holds the string-name of the group"""
@abstractmethod
def as_uri(self, scheme=None):
"""Returns a universal resource identifier. Use ``scheme`` to force a scheme."""
@abstractmethod
def _get_info(self):
pass
@abstractmethod
def join(self, *parts):
"""Joins this path with any number of paths"""
@abstractmethod
def list(self):
"""Returns the files in this directory"""
@abstractmethod
def iterdir(self):
"""Returns an iterator over the directory. Might be slightly faster on Python 3.5 than .list()"""
@abstractmethod
def is_dir(self):
"""Returns ``True`` if this path is a directory, ``False`` otherwise"""
def isdir(self):
"""Included for compatibility with older Plumbum code"""
warnings.warn("Use .is_dir() instead", DeprecationWarning)
return self.is_dir()
@abstractmethod
def is_file(self):
"""Returns ``True`` if this path is a regular file, ``False`` otherwise"""
def isfile(self):
"""Included for compatibility with older Plumbum code"""
warnings.warn("Use .is_file() instead", DeprecationWarning)
return self.is_file()
def islink(self):
"""Included for compatibility with older Plumbum code"""
warnings.warn("Use is_symlink instead", DeprecationWarning)
return self.is_symlink()
@abstractmethod
def is_symlink(self):
"""Returns ``True`` if this path is a symbolic link, ``False`` otherwise"""
@abstractmethod
def exists(self):
"""Returns ``True`` if this path exists, ``False`` otherwise"""
@abstractmethod
def stat(self):
"""Returns the os.stats for a file"""
pass
@abstractmethod
def with_name(self, name):
"""Returns a path with the name replaced"""
@abstractmethod
def with_suffix(self, suffix, depth=1):
"""Returns a path with the suffix replaced. Up to last ``depth`` suffixes will be
replaced. None will replace all suffixes. If there are less than ``depth`` suffixes,
this will replace all suffixes. ``.tar.gz`` is an example where ``depth=2`` or
``depth=None`` is useful"""
def preferred_suffix(self, suffix):
"""Adds a suffix if one does not currently exist (otherwise, no change). Useful
for loading files with a default suffix"""
if len(self.suffixes) > 0:
return self
else:
return self.with_suffix(suffix)
@abstractmethod
def glob(self, pattern):
"""Returns a (possibly empty) list of paths that matched the glob-pattern under this path"""
@abstractmethod
def delete(self):
"""Deletes this path (recursively, if a directory)"""
@abstractmethod
def move(self, dst):
"""Moves this path to a different location"""
def rename(self, newname):
"""Renames this path to the ``new name`` (only the basename is changed)"""
return self.move(self.up() / newname)
@abstractmethod
def copy(self, dst, override=False):
"""Copies this path (recursively, if a directory) to the destination path. Raises TypeError if
dst exists and override is False."""
@abstractmethod
def mkdir(self, mode=0o777, parents=True, exist_ok=True):
"""
Creates a directory at this path.
:param mode: **Currently only implemented for local paths!** Numeric mode to use for directory
creation, which may be ignored on some systems. The current implementation
reproduces the behavior of ``os.mkdir`` (i.e., the current umask is first masked
out), but this may change for remote paths. As with ``os.mkdir``, it is recommended
to call :func:`chmod` explicitly if you need to be sure.
:param parents: If this is true (the default), the directory's parents will also be created if
necessary.
:param exist_ok: If this is true (the default), no exception will be raised if the directory
already exists (otherwise ``OSError``).
Note that the defaults for ``parents`` and ``exist_ok`` are the opposite of what they are in
Python's own ``pathlib`` - this is to maintain backwards-compatibility with Plumbum's behaviour
from before they were implemented.
"""
@abstractmethod
def open(self, mode="r"):
"""opens this path as a file"""
@abstractmethod
def read(self, encoding=None):
"""returns the contents of this file. By default the data is binary (``bytes``), but you can
specify the encoding, e.g., ``'latin1'`` or ``'utf8'``"""
@abstractmethod
def write(self, data, encoding=None):
"""writes the given data to this file. By default the data is expected to be binary (``bytes``),
but you can specify the encoding, e.g., ``'latin1'`` or ``'utf8'``"""
@abstractmethod
def touch(self):
"""Update the access time. Creates an empty file if none exists."""
@abstractmethod
def chown(self, owner=None, group=None, recursive=None):
"""Change ownership of this path.
:param owner: The owner to set (either ``uid`` or ``username``), optional
:param group: The group to set (either ``gid`` or ``groupname``), optional
:param recursive: whether to change ownership of all contained files and subdirectories.
Only meaningful when ``self`` is a directory. If ``None``, the value
will default to ``True`` if ``self`` is a directory, ``False`` otherwise.
"""
@abstractmethod
def chmod(self, mode):
"""Change the mode of path to the numeric mode.
:param mode: file mode as for os.chmod
"""
@staticmethod
def _access_mode_to_flags(mode,
flags={
"f": os.F_OK,
"w": os.W_OK,
"r": os.R_OK,
"x": os.X_OK
}):
if isinstance(mode, str):
mode = reduce(operator.or_, [flags[m] for m in mode.lower()], 0)
return mode
@abstractmethod
def access(self, mode=0):
"""Test file existence or permission bits
:param mode: a bitwise-or of access bits, or a string-representation thereof:
``'f'``, ``'x'``, ``'r'``, ``'w'`` for ``os.F_OK``, ``os.X_OK``,
``os.R_OK``, ``os.W_OK``
"""
@abstractmethod
def link(self, dst):
"""Creates a hard link from ``self`` to ``dst``
:param dst: the destination path
"""
@abstractmethod
def symlink(self, dst):
"""Creates a symbolic link from ``self`` to ``dst``
:param dst: the destination path
"""
@abstractmethod
def unlink(self):
"""Deletes a symbolic link"""
def split(self, *dummy_args, **dummy_kargs):
"""Splits the path on directory separators, yielding a list of directories, e.g,
``"/var/log/messages"`` will yield ``['var', 'log', 'messages']``.
"""
parts = []
path = self
while path != path.dirname:
parts.append(path.name)
path = path.dirname
return parts[::-1]
@property
def parts(self):
"""Splits the directory into parts, including the base directroy, returns a tuple"""
return tuple([self.drive + self.root] + self.split())
def relative_to(self, source):
"""Computes the "relative path" require to get from ``source`` to ``self``. They satisfy the invariant
``source_path + (target_path - source_path) == target_path``. For example::
/var/log/messages - /var/log/messages = []
/var/log/messages - /var = [log, messages]
/var/log/messages - / = [var, log, messages]
/var/log/messages - /var/tmp = [.., log, messages]
/var/log/messages - /opt = [.., var, log, messages]
/var/log/messages - /opt/lib = [.., .., var, log, messages]
"""
if isinstance(source, str):
source = self._form(source)
parts = self.split()
baseparts = source.split()
ancestors = len(
list(
itertools.takewhile(lambda p: p[0] == p[1],
zip(parts, baseparts))))
return RelativePath([".."] * (len(baseparts) - ancestors) +
parts[ancestors:])
def __sub__(self, other):
"""Same as ``self.relative_to(other)``"""
return self.relative_to(other)
def _glob(self, pattern, fn):
"""Applies a glob string or list/tuple/iterable to the current path, using ``fn``"""
if isinstance(pattern, str):
return fn(pattern)
else:
results = []
for single_pattern in pattern:
results.extend(fn(single_pattern))
return sorted(list(set(results)))
def resolve(strict=False):
"""Added to allow pathlib like syntax. Does nothing since
Plumbum paths are always absolute. Does not (currently) resolve
symlinks."""
# TODO: Resolve symlinks here
return self
@property
def parents(self):
"""Pathlib like sequence of ancestors"""
join = lambda x, y: self._form(x) / y
as_list = (reduce(join, self.parts[:i], self.parts[0])
for i in range(len(self.parts) - 1, 0, -1))
return tuple(as_list)
@property
def parent(self):
"""Pathlib like parent of the path."""
return self.parents[0]
class RelativePath(object):
"""
Relative paths are the "delta" required to get from one path to another.
Note that relative path do not point at anything, and thus are not paths.
Therefore they are system agnostic (but closed under addition)
Paths are always absolute and point at "something", whether existent or not.
Relative paths are created by subtracting paths (``Path.relative_to``)
"""
def __init__(self, parts):
self.parts = parts
def __str__(self):
return "/".join(self.parts)
def __iter__(self):
return iter(self.parts)
def __len__(self):
return len(self.parts)
def __getitem__(self, index):
return self.parts[index]
def __repr__(self):
return "RelativePath(%r)" % (self.parts, )
def __eq__(self, other):
return str(self) == str(other)
def __ne__(self, other):
return not (self == other)
def __gt__(self, other):
return str(self) > str(other)
def __ge__(self, other):
return str(self) >= str(other)
def __lt__(self, other):
return str(self) < str(other)
def __le__(self, other):
return str(self) <= str(other)
def __hash__(self):
return hash(str(self))
def __nonzero__(self):
return bool(str(self))
__bool__ = __nonzero__
def up(self, count=1):
return RelativePath(self.parts[:-count])
def __radd__(self, path):
return path.join(*self.parts)
|
|
# Copyright 2016 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
from __future__ import absolute_import
from builtins import object
from future.utils import iteritems, with_metaclass
from past.builtins import basestring
import bisect
import contextlib
import copy
import hashlib
import inspect
import json
import keyword
import os
import re
import sys
import types
from functools import wraps
import attr
from google.protobuf import message
import gevent
from .config_types import Path
from .internal import engine_step
from .internal.attr_util import attr_dict_type
from .internal.warn import escape
from .recipe_test_api import DisabledTestData, ModuleTestData
from .third_party import luci_context
from .third_party.logdog import streamname
from .third_party.logdog.bootstrap import ButlerBootstrap, NotBootstrappedError
from .engine_types import StepPresentation, freeze, FrozenDict
from .util import ModuleInjectionSite
# TODO(iannucci): Rationalize the use of this in downstream scripts.
from .util import Placeholder # pylint: disable=unused-import
class UnknownRequirementError(object):
"""Raised by a requirement function when the referenced requirement is
unknown.
"""
def __init__(self, req):
super(UnknownRequirementError, self).__init__(
'Unknown requirement [%s]' % (req,))
self.typ = req._typ
self.name = req._name
class _UnresolvedRequirement(object):
"""Internal placeholder type for an unresolved module/recipe requirement."""
def __init__(self, typ, name):
self._typ = typ
self._name = name
def __str__(self):
return '%s:%s' % (self._typ, self._name)
def __getattr__(self, key):
raise AttributeError(
'Cannot reference [%s] in unresolved requirement [%s]' % (
key, str(self,)))
def __call__(self, *args, **kwargs):
raise AttributeError('Cannot call unresolved requirement [%s]' % (
str(self,)))
def RequireClient(name):
"""Returns: A dependency injection placeholder for a recipe engine client.
Recipes and Recipe APIs can call this function to install a placeholder for
the dependency injection of a recipe engine client. This dependency will be
noted by the recipe engine and resolved prior to recipe execution.
Clients are intended to be used to interface between the recipe engine and
low-level modules (e.g., "step"). As a general rule of thumb, higher-level
modules should not use clients and interface with the low-level modules
instead.
Recipe engine clients are referenced by name and resolved directly by the
recipe engine. Modules must require them as class member variables in their
recipe API subclass, and recipes must require them as top-level variables.
For example:
class MyCollRecipeApi(recipe_api.RecipeApi):
step_client = recipe_api.RequireClient('step')
def do_something(self):
self.step_client.whatever()
Args:
name (str): the name of the recipe engine client to install.
"""
return _UnresolvedRequirement('client', name)
@attr.s(frozen=True, slots=True)
class LUCIContextClient(object):
"""A recipe engine client which reads/writes the LUCI_CONTEXT."""
IDENT = 'lucictx'
ENV_KEY = luci_context.ENV_KEY
initial_context = attr.ib(validator=attr_dict_type(str, (dict, FrozenDict)),
factory=dict, converter=freeze)
class PathsClient(object):
"""A recipe engine client which exposes all known base paths.
In particular, you can use this client to discover all known:
* recipe resource path
* loaded module resource paths
* loaded recipe repo paths
"""
IDENT = 'paths'
def __init__(self, start_dir):
self.paths = []
self.path_strings = []
self._start_dir = start_dir
def _initialize_with_recipe_api(self, root_api):
"""This method is called once before the start of every recipe.
It is passed the recipe's `api` object. This method crawls the api object
and extracts every resource base path it can find."""
paths_found = {}
def add_found(path):
if path is not None:
paths_found[str(path)] = path
search_set = [root_api]
found_api_id_set = {id(root_api)}
while search_set:
api = search_set.pop()
add_found(api.resource())
add_found(api.repo_resource())
for name in dir(api.m):
sub_api = getattr(api.m, name)
if not isinstance(sub_api, RecipeApiPlain):
continue
if id(sub_api) not in found_api_id_set:
found_api_id_set.add(id(api))
search_set.append(sub_api)
# transpose
# [(path_string, path), ...]
# into
# ([path_string, ...], [path, ...])
for path_string, path in sorted(iteritems(paths_found)):
self.path_strings.append(path_string)
self.paths.append(path)
def find_longest_prefix(self, target, sep):
"""Identifies a known resource path which would contain the `target` path.
sep must be the current path separator (can vary from os.path.sep when
running under simulation).
Returns (str(Path), Path) if the prefix path is found, or (None, None) if no
such prefix exists.
"""
idx = bisect.bisect_left(self.path_strings, target)
if idx == len(self.paths):
return (None, None) # off the end
sPath, path = self.path_strings[idx], self.paths[idx]
if target == sPath :
return sPath, path
if idx > 0:
sPath, path = self.path_strings[idx-1], self.paths[idx-1]
if target.startswith(sPath+sep):
return sPath, path
return (None, None)
@property
def start_dir(self):
"""Returns the START_DIR for this recipe execution."""
return self._start_dir
class PropertiesClient(object):
"""A recipe engine client representing the recipe engine properties."""
IDENT = 'properties'
def __init__(self, properties):
self._properties = properties
def get_properties(self):
return copy.deepcopy(self._properties)
class StepClient(object):
"""A recipe engine client representing step running and introspection."""
IDENT = 'step'
StepConfig = engine_step.StepConfig
EnvAffix = engine_step.EnvAffix
def __init__(self, engine):
self._engine = engine
def previous_step_result(self):
"""Allows api.step to get the active result from any context.
This always returns the innermost nested step that is still open --
presumably the one that just failed if we are in an exception handler."""
active_step_data = self._engine.active_step
if not active_step_data:
raise ValueError(
'No steps have been run yet, and you are asking for a previous step '
'result.')
return active_step_data
def parent_step(self, name_tokens):
"""Opens a parent step.
Returns a contextmanager object yielding (StepPresentation, List[StepData]).
Refer to RecipeEngine.parent_step for details.
"""
return self._engine.parent_step(name_tokens)
def run_step(self, step):
"""
Runs a step from a StepConfig.
Args:
* step (StepConfig) - The step to run.
Returns:
A StepData object containing the result of finished the step.
"""
assert isinstance(step, engine_step.StepConfig)
return self._engine.run_step(step)
def close_non_parent_step(self):
"""Closes the currently active non-parent step, if any."""
return self._engine.close_non_parent_step()
@attr.s(frozen=True, slots=True)
class ConcurrencyClient(object):
IDENT = 'concurrency'
supports_concurrency = attr.ib() # type: bool
_spawn_impl = attr.ib() # type: f(func, args, kwargs) -> Greenlet
def spawn(self, func, args, kwargs, greenlet_name):
return self._spawn_impl(func, args, kwargs, greenlet_name)
class WarningClient(object):
IDENT = 'warning'
def __init__(self, recorder, recipe_deps):
from .internal.warn import record # Avoid early proto import
if recorder != record.NULL_WARNING_RECORDER and (
not isinstance(recorder, record.WarningRecorder)):
raise ValueError('Expected either an instance of WarningRecorder '
'or NULL_WARNING_RECORDER sentinel. Got type '
'(%s): %r' % (type(recorder), recorder))
self._recorder = recorder
# A repo may locate inside another repo (e.g. generally, deps repos are
# inside main repo). So we should start with the repo with the longest
# path to decide which repo contains the issuer file.
self._repo_paths = sorted(
((repo_name, repo.path)
for repo_name, repo in iteritems(recipe_deps.repos)),
key=lambda r: r[1],
reverse=True,
)
@escape.escape_all_warnings
def record_execution_warning(self, name):
"""Captures the current stack and records an execution warning."""
cur_stack = [frame_tup[0] for frame_tup in inspect.stack()]
cur_stack.extend(getattr(gevent.getcurrent(), 'spawning_frames', ()))
self._recorder.record_execution_warning(name, cur_stack)
def record_import_warning(self, name, importer):
"""Records import warning during DEPS resolution."""
self._recorder.record_import_warning(name, importer)
def resolve_warning(self, name, issuer_file):
"""Returns the fully-qualified warning name for the given warning.
The repo that contains the issuer_file is considered as where the
warning is defined.
Args:
* name (str): the warning name to be resolved. If fully-qualified name
is provided, returns as it is.
* issuer_file (str): The file path where warning is issued.
Raise ValueError if none of the repo contains the issuer_file.
"""
if '/' in name:
return name
abs_issuer_path = os.path.abspath(issuer_file)
for _, (repo_name, repo_path) in enumerate(self._repo_paths):
if abs_issuer_path.startswith(repo_path):
return '/'.join((repo_name, name))
raise ValueError('Failed to resolve warning: %r issued in %s. To '
'disambiguate, please provide fully-qualified warning name '
'(i.e. $repo_name/WARNING_NAME)' % (name, abs_issuer_path))
def escape_frame_function(self, warning, frame):
"""Escapes the function the given frame executes from warning attribution.
"""
loc = escape.FuncLoc.from_code_obj(frame.f_code)
if '/' in warning:
pattern = re.compile('^%s$' % warning)
else:
pattern = re.compile('^.+/%s$' % warning)
escaped_warnings = escape.WARNING_ESCAPE_REGISTRY.get(loc, ())
if pattern not in escaped_warnings:
escaped_warnings = (pattern,) + escaped_warnings
escape.WARNING_ESCAPE_REGISTRY[loc] = escaped_warnings
# Exports warning escape decorators
escape_warnings = escape.escape_warnings
escape_all_warnings = escape.escape_all_warnings
ignore_warnings = escape.ignore_warnings
class StepFailure(Exception):
"""
This is the base class for all step failures.
Raising a StepFailure counts as 'running a step' for the purpose of
infer_composite_step's logic.
FIXME: This class is as a general way to fail, but it should be split up.
See crbug.com/892792 for more information.
FIXME: These exceptions should be made into more-normal exceptions (e.g.
the way reason_message is overridden by subclasses is very strange).
"""
def __init__(self, name_or_reason, result=None):
# Raising a StepFailure counts as running a step.
_DEFER_CONTEXT.mark_ran_step()
self.exc_result = None # default to None
if result:
self.name = name_or_reason
self.result = result
self.reason = self.reason_message()
# TODO(iannucci): This hasattr stuff is pretty bogus. This is attempting
# to detect when 'result' was a StepData. However AggregatedStepFailure
# passes in something else.
if hasattr(result, 'exc_result'):
self.exc_result = result.exc_result
if self.exc_result.had_timeout:
self.reason += ' (timeout)'
if self.exc_result.was_cancelled:
self.reason += ' (canceled)'
self.reason += ' (retcode: {!r})'.format(self.exc_result.retcode)
else:
self.name = None
self.result = None
self.reason = name_or_reason
super(StepFailure, self).__init__(self.reason)
def reason_message(self):
return 'Step({!r})'.format(self.name)
@property
def was_cancelled(self):
"""
Returns True if this exception was caused by a cancellation event
(see ExecutionResult.was_cancelled).
If this was a manual failure, returns None.
"""
if not self.exc_result:
return None
return self.exc_result.was_cancelled
@property
def had_timeout(self):
"""
Returns True if this exception was caused by a timeout. If this was a manual
failure, returns None.
"""
if not self.exc_result:
return None
return self.exc_result.had_timeout
@property
def retcode(self):
"""
Returns the retcode of the step which failed. If this was a manual
failure, returns None
"""
if not self.exc_result:
return None
return self.exc_result.retcode
class StepWarning(StepFailure):
"""
A subclass of StepFailure, which still fails the build, but which is
a warning. Need to figure out how exactly this will be useful.
"""
def reason_message(self): # pragma: no cover
return "Warning: Step({!r})".format(self.name)
class InfraFailure(StepFailure):
"""
A subclass of StepFailure.
Raised for any non-failure, non-success cases, e.g.
* Step failed to start due to missing executable
* Step timed out
* Step was canceled
* Step was marked as `infra_step`, or run in a context with `infra_steps`
set and returned a not-ok retcode.
"""
def reason_message(self):
return "Infra Failure: Step({!r})".format(self.name)
class AggregatedStepFailure(StepFailure):
def __init__(self, result):
super(AggregatedStepFailure, self).__init__(
"Aggregate step failure.", result=result)
def reason_message(self):
msg = "{!r} out of {!r} aggregated steps failed: ".format(
len(self.result.failures), len(self.result.all_results))
msg += ', '.join((f.reason or f.name) for f in self.result.failures)
return msg
class AggregatedResult(object):
"""Holds the result of an aggregated run of steps.
Currently this is only used internally by defer_results, but it may be exposed
to the consumer of defer_results at some point in the future. For now it's
expected to be easier for defer_results consumers to do their own result
aggregation, as they may need to pick and chose (or label) which results they
really care about.
"""
def __init__(self):
self.successes = []
self.failures = []
self.contains_infra_failure = False
self.contains_cancelled = False
# Needs to be here to be able to treat this as a step result
self.retcode = None
@property
def all_results(self):
"""
Return a list of two item tuples (x, y), where
x is whether or not the step succeeded, and
y is the result of the run
"""
res = [(True, result) for result in self.successes]
res.extend([(False, result) for result in self.failures])
return res
def add_success(self, result):
self.successes.append(result)
return DeferredResult(result, None)
def add_failure(self, exception):
if isinstance(exception, InfraFailure):
self.contains_infra_failure = True
self.contains_cancelled = (
self.contains_cancelled or exception.was_cancelled)
self.failures.append(exception)
return DeferredResult(None, exception)
class DeferredResult(object):
def __init__(self, result, failure):
self._result = result
self._failure = failure
@property
def is_ok(self):
return self._failure is None
def get_result(self):
if not self.is_ok:
raise self.get_error()
return self._result
def get_error(self):
return self._failure
class _DEFER_CONTEXT_OBJ(object):
"""This object keeps track of state pertaining to the behavior of
defer_results and composite_step.
"""
def __init__(self):
"""The object starts in a state where no steps have been run, and there's no
current aggregated_result."""
self._ran_step = [False]
self._aggregated_result = [None]
@property
def ran_step(self):
"""Returns True if a step has run within this defer_results context."""
return self._ran_step[-1]
def mark_ran_step(self):
"""Marks that a step has run within this defer_results context."""
self._ran_step[-1] = True
@property
def aggregated_result(self):
"""Returns the current AggregatedResult() or None, if we're not currently
deferring results."""
return self._aggregated_result[-1]
@contextlib.contextmanager
def begin_aggregate(self):
"""Begins aggregating new results. Use with a with statement:
with _DEFER_CONTEXT.begin_aggregate() as agg:
...
Where `agg` is the AggregatedResult() for that with section.
"""
try:
yield self._enter(AggregatedResult())
finally:
self._exit()
@contextlib.contextmanager
def begin_normal(self):
"""Returns the context to normal (stop aggregating results).
with _DEFER_CONTEXT.begin_normal():
...
"""
try:
yield self._enter(None)
finally:
self._exit()
def _enter(self, agg):
self._ran_step.append(False)
self._aggregated_result.append(agg)
return agg
def _exit(self):
self._ran_step.pop()
self._aggregated_result.pop()
_DEFER_CONTEXT = _DEFER_CONTEXT_OBJ()
def non_step(func):
"""A decorator which prevents a method from automatically being wrapped as
a infer_composite_step by RecipeApiMeta.
This is needed for utility methods which don't run any steps, but which are
invoked within the context of a defer_results().
@see infer_composite_step, defer_results, RecipeApiMeta
"""
assert not hasattr(func, "_skip_inference"), \
"Double-wrapped method %r?" % func
func._skip_inference = True # pylint: disable=protected-access
return func
_skip_inference = non_step
def infer_composite_step(func):
"""A decorator which possibly makes this step act as a single step, for the
purposes of the defer_results function.
Behaves as if this function were wrapped by composite_step, unless this
function:
* is already wrapped by non_step
* returns a result without calling api.step
* raises an exception which is not derived from StepFailure
In any of these cases, this function will behave like a normal function.
This decorator is automatically applied by RecipeApiMeta (or by inheriting
from RecipeApi). If you want to declare a method's behavior explicitly, you
may decorate it with either composite_step or with non_step.
"""
if getattr(func, "_skip_inference", False):
return func
@_skip_inference # to prevent double-wraps
@wraps(func)
@escape.escape_all_warnings
def _inner(*a, **kw):
agg = _DEFER_CONTEXT.aggregated_result
# We're not deferring results, so run the function normally.
if agg is None:
return func(*a, **kw)
# Stop deferring results within this function; the ultimate result of the
# function will be added to our parent context's aggregated results and
# we'll return a DeferredResult.
with _DEFER_CONTEXT.begin_normal():
try:
ret = func(*a, **kw)
# This is how we differ from composite_step; if we didn't actually run
# a step or throw a StepFailure, return normally.
if not _DEFER_CONTEXT.ran_step:
return ret
return agg.add_success(ret)
except StepFailure as ex:
return agg.add_failure(ex)
_inner.__original = func
return _inner
def composite_step(func):
"""A decorator which makes this step act as a single step, for the purposes of
the defer_results function.
This means that this function will not quit during the middle of its execution
because of a StepFailure, if there is an aggregator active.
You may use this decorator explicitly if infer_composite_step is detecting
the behavior of your method incorrectly to force it to behave as a step. You
may also need to use this if your Api class inherits from RecipeApiPlain and
so doesn't have its methods automatically wrapped by infer_composite_step.
"""
@_skip_inference # to avoid double-wraps
@wraps(func)
@escape.escape_all_warnings
def _inner(*a, **kw):
# composite_steps always count as running a step.
_DEFER_CONTEXT.mark_ran_step()
agg = _DEFER_CONTEXT.aggregated_result
# If we're not aggregating
if agg is None:
return func(*a, **kw)
# Stop deferring results within this function; the ultimate result of the
# function will be added to our parent context's aggregated results and
# we'll return a DeferredResult.
with _DEFER_CONTEXT.begin_normal():
try:
return agg.add_success(func(*a, **kw))
except StepFailure as ex:
return agg.add_failure(ex)
_inner.__original = func
return _inner
@contextlib.contextmanager
def defer_results():
"""
Use this to defer step results in your code. All steps which would previously
return a result or throw an exception will instead return a DeferredResult.
Any exceptions which were thrown during execution will be thrown when either:
a. You call get_result() on the step's result.
b. You exit the lexical scope inside of the with statement
Example:
with defer_results():
api.step('a', ..)
api.step('b', ..)
result = api.m.module.im_a_composite_step(...)
api.m.echo('the data is', result.get_result())
If 'a' fails, 'b' and 'im a composite step' will still run.
If 'im a composite step' fails, then the get_result() call will raise
an exception.
If you don't try to use the result (don't call get_result()), an aggregate
failure will still be raised once you exit the lexical scope inside
the with statement.
"""
assert _DEFER_CONTEXT.aggregated_result is None, (
"may not call defer_results in an active defer_results context")
with _DEFER_CONTEXT.begin_aggregate() as agg:
yield
if agg.failures:
raise AggregatedStepFailure(agg)
class RecipeApiMeta(type):
WHITELIST = ('__init__',)
def __new__(mcs, name, bases, attrs):
"""Automatically wraps all methods of subclasses of RecipeApi with
@infer_composite_step. This allows defer_results to work as intended without
manually decorating every method.
"""
wrap = lambda f: infer_composite_step(f) if f else f
for attr in attrs:
if attr in RecipeApiMeta.WHITELIST:
continue
val = attrs[attr]
if isinstance(val, types.FunctionType):
attrs[attr] = wrap(val)
elif isinstance(val, property):
attrs[attr] = property(
wrap(val.fget),
wrap(val.fset),
wrap(val.fdel),
val.__doc__)
return super(RecipeApiMeta, mcs).__new__(mcs, name, bases, attrs)
class RecipeApiPlain(object):
"""
Framework class for handling recipe_modules.
Inherit from this in your recipe_modules/<name>/api.py . This class provides
wiring for your config context (in self.c and methods, and for dependency
injection (in self.m).
Dependency injection takes place in load_recipe_modules() in loader.py.
USE RecipeApi INSTEAD, UNLESS your RecipeApi subclass derives from something
which defines its own __metaclass__. Deriving from RecipeApi instead of
RecipeApiPlain allows your RecipeApi subclass to automatically work with
defer_results without needing to decorate every methods with
@infer_composite_step.
"""
def __init__(self, module=None, test_data=DisabledTestData(), **_kwargs):
"""Note: Injected dependencies are NOT available in __init__()."""
super(RecipeApiPlain, self).__init__()
self._module = module
assert isinstance(test_data, (ModuleTestData, DisabledTestData))
self._test_data = test_data
# If we're the 'root' api, inject directly into 'self'.
# Otherwise inject into 'self.m'
if not isinstance(module, types.ModuleType):
self.m = self
else:
self.m = ModuleInjectionSite(self)
# If our module has a test api, it gets injected here.
self.test_api = None
# Config goes here.
self.c = None
def initialize(self):
"""
Initializes the recipe module after it has been instantiated with all
dependencies injected and available.
"""
pass
def get_config_defaults(self): # pylint: disable=R0201
"""
Allows your api to dynamically determine static default values for configs.
"""
return {}
def make_config(self, config_name=None, optional=False, **CONFIG_VARS):
"""Returns a 'config blob' for the current API."""
return self.make_config_params(config_name, optional, **CONFIG_VARS)[0]
def _get_config_item(self, config_name, optional=False):
"""Get the config item for a given name.
If `config_name` does not refer to a config item for the current module,
the behavior is determined by the value of `optional`:
* if optional is True, then None will be returned
* else a KeyError will be raised with an error message containing
`config_name`, the name of the api's module and the list of the api's
module's config names.
"""
ctx = self._module.CONFIG_CTX
try:
return ctx.CONFIG_ITEMS[config_name]
except KeyError:
if optional:
return None
raise KeyError(
'%s is not the name of a configuration for module %s: %s' % (
config_name, self._module.__name__, sorted(ctx.CONFIG_ITEMS)))
def make_config_params(self, config_name, optional=False, **CONFIG_VARS):
"""Returns a 'config blob' for the current API, and the computed params
for all dependent configurations.
The params have the following order of precedence. Each subsequent param
is dict.update'd into the final parameters, so the order is from lowest to
highest precedence on a per-key basis:
* if config_name in CONFIG_CTX
* get_config_defaults()
* CONFIG_CTX[config_name].DEFAULT_CONFIG_VARS()
* CONFIG_VARS
* else
* get_config_defaults()
* CONFIG_VARS
"""
generic_params = self.get_config_defaults() # generic defaults
generic_params.update(CONFIG_VARS) # per-invocation values
ctx = self._module.CONFIG_CTX
if optional and not ctx:
return None, generic_params
assert ctx, '%s has no config context' % self
params = self.get_config_defaults() # generic defaults
itm = None
if config_name:
itm = self._get_config_item(config_name, optional)
if not itm:
return None, generic_params
if itm:
params.update(itm.DEFAULT_CONFIG_VARS()) # per-item defaults
params.update(CONFIG_VARS) # per-invocation values
base = ctx.CONFIG_SCHEMA(**params)
if config_name is None:
return base, params
else:
return itm(base), params
def set_config(self, config_name=None, optional=False, **CONFIG_VARS):
"""Sets the modules and its dependencies to the named configuration."""
assert self._module
config, _ = self.make_config_params(config_name, optional, **CONFIG_VARS)
if config:
self.c = config
def apply_config(self, config_name, config_object=None, optional=False):
"""Apply a named configuration to the provided config object or self."""
itm = self._get_config_item(config_name)
itm(config_object or self.c, optional=optional)
def resource(self, *path):
"""Returns path to a file under <recipe module>/resources/ directory.
Args:
path: path relative to module's resources/ directory.
"""
# TODO(vadimsh): Verify that file exists. Including a case like:
# module.resource('dir').join('subdir', 'file.py')
return self._module.RESOURCE_DIRECTORY.join(*path)
def repo_resource(self, *path):
"""Returns a resource path, where path is relative to the root of
the recipe repo where this module is defined.
"""
return self._module.REPO_ROOT.join(*path)
@property
def name(self):
return self._module.NAME
class RecipeApi(with_metaclass(RecipeApiMeta, RecipeApiPlain)):
pass
class RecipeScriptApi(RecipeApiPlain, ModuleInjectionSite):
# TODO(dnj): Delete this and make recipe scripts use standard recipe APIs.
pass
# This is a sentinel object for the Property system. This allows users to
# specify a default of None that will actually be respected.
PROPERTY_SENTINEL = object()
class BoundProperty(object):
"""
A bound, named version of a Property.
A BoundProperty is different than a Property, in that it requires a name,
as well as all of the arguments to be provided. It's intended to be
the declaration of the Property, with no mutation, so the logic about
what a property does is very clear.
The reason there is a distinction between this and a Property is because
we want the user interface for defining properties to be
PROPERTIES = {
'prop_name': Property(),
}
We don't want to have to duplicate the name in both the key of the dictionary
and then Property constructor call, so we need to modify this dictionary
before we actually use it, and inject knowledge into it about its name. We
don't want to actually mutate this though, since we're striving for immutable,
declarative code, so instead we generate a new BoundProperty object from the
defined Property object.
"""
MODULE_PROPERTY = 'module'
RECIPE_PROPERTY = 'recipe'
@staticmethod
def legal_module_property_name(name, full_decl_name):
"""
If this is a special $repo_name/module name.
"""
repo_name, module = full_decl_name.split('::', 1)
return name == '$%s/%s' % (repo_name, module)
@staticmethod
def legal_name(name, is_param_name=False):
"""
If this name is a legal property name.
is_param_name determines if this name in the name of a property, or a
param_name. See the constructor documentation for more information.
The rules are as follows:
* Cannot start with an underscore.
This is for internal arguments, namely _engine (for the step module).
* Cannot be 'self'
This is to avoid conflict with recipe modules, which use the name self.
* Cannot be a python keyword
"""
if name.startswith('_'):
return False
if name in ('self',):
return False
if keyword.iskeyword(name):
return False
regex = r'^[a-zA-Z][a-zA-Z0-9_]*$' if is_param_name else (
r'^[a-zA-Z][.\w-]*$')
return bool(re.match(regex, name))
def __init__(self, default, from_environ, help, kind, name, property_type,
full_decl_name, param_name=None):
"""
Constructor for BoundProperty.
Args:
default (jsonish): The default value for this Property. Must be
JSON-encodable or PROPERTY_SENTINEL.
from_environ (str|None): If given, specifies an environment variable to
grab the default property value from before falling back to the
hardcoded default. If the property value is explicitly passed to the
recipe, it still takes precedence over the environment. If you rely on
this, 'kind' must be string-compatible (since environ contains strings).
help (str): The help text for this Property.
kind (type|ConfigBase): The type of this Property. You can either pass in
a raw python type, or a Config Type, using the recipe engine config
system.
name (str): The name of this Property.
property_type (str): One of RECIPE_PROPERTY or MODULE_PROPERTY.
full_decl_name (str): The fully qualified name of the recipe or module
where this property is defined. This has the form of:
repo_name::module_name
repo_name::path/to/recipe
param_name (str|None): The name of the python function parameter this
property should be stored in. Can be used to allow for dotted property
names, e.g.
PROPERTIES = {
'foo.bar.bam': Property(param_name="bizbaz")
}
"""
assert property_type in (self.RECIPE_PROPERTY, self.MODULE_PROPERTY), \
property_type
# first, check if this is a special '$repo_name/module' property type
# declaration.
is_module_property = (
property_type is self.MODULE_PROPERTY and
self.legal_module_property_name(name, full_decl_name))
if not (is_module_property or BoundProperty.legal_name(name)):
raise ValueError("Illegal name '{}'.".format(name))
param_name = param_name or name
if not BoundProperty.legal_name(param_name, is_param_name=True):
raise ValueError("Illegal param_name '{}'.".format(param_name))
if default is not PROPERTY_SENTINEL:
try:
json.dumps(default)
except:
raise TypeError('default=%r is not json-encodable' % (default,))
self.__default = default
self.__from_environ = from_environ
self.__help = help
self.__kind = kind
self.__name = name
self.__property_type = property_type
self.__param_name = param_name
self.__full_decl_name = full_decl_name
@property
def name(self):
return self.__name
@property
def param_name(self):
return self.__param_name
@property
def default(self):
if self.__default is PROPERTY_SENTINEL:
return self.__default
return copy.deepcopy(self.__default)
@property
def from_environ(self):
return self.__from_environ
@property
def kind(self):
return self.__kind
@property
def help(self):
return self.__help
@property
def full_decl_name(self):
return self.__full_decl_name
def interpret(self, value, environ):
"""
Interprets the value for this Property.
Args:
value: The value to interpret. May be None, which means no explicit value
is provided and we should grab a default.
environ: An environment dict to use for grabbing values for properties
that use 'from_environ'.
Returns:
The value to use for this property. Raises an error if
this property has no valid interpretation.
"""
# Pick from environment if not given explicitly.
if value is PROPERTY_SENTINEL and self.__from_environ:
value = environ.get(self.__from_environ, PROPERTY_SENTINEL)
# If have a value (passed explicitly or through environ), check its type.
if value is not PROPERTY_SENTINEL:
if self.kind is not None:
# The config system handles type checking for us here.
self.kind.set_val(value)
return value
if self.__default is not PROPERTY_SENTINEL:
return self.default
raise ValueError(
"No default specified and no value provided for '{}' from {} '{}'".format(
self.name, self.__property_type, self.full_decl_name))
class Property(object):
def __init__(self, default=PROPERTY_SENTINEL, from_environ=None, help="",
kind=None, param_name=None):
"""
Constructor for Property.
Args:
default: The default value for this Property. Note: A default
value of None is allowed. To have no default value, omit
this argument. This must be a valid JSON-encodable object.
from_environ: If given, specifies an environment variable to grab the
default property value from before falling back to the
hardcoded default. If the property value is explicitly
passed to the recipe, it still takes precedence over the
environment. If you rely on this, 'kind' must be
string-compatible (since environ contains strings).
help: The help text for this Property.
kind: The type of this Property. You can either pass in a raw python
type, or a Config Type, using the recipe engine config system.
"""
if default is not PROPERTY_SENTINEL:
try:
json.dumps(default)
except:
raise TypeError('default=%r is not json-encodable' % (default,))
if from_environ is not None:
if not isinstance(from_environ, basestring):
raise TypeError('from_environ=%r must be a string' % (from_environ,))
self._default = default
self._from_environ = from_environ
self.help = help
self.param_name = param_name
# NOTE: late import to avoid early protobuf import
from .config import Single
if isinstance(kind, type):
if sys.version_info.major < 3 and kind in (str, unicode):
kind = basestring
kind = Single(kind)
self.kind = kind
def bind(self, name, property_type, full_decl_name):
"""
Gets the BoundProperty version of this Property. Requires a name.
"""
return BoundProperty(
self._default, self._from_environ, self.help, self.kind, name,
property_type, full_decl_name, self.param_name)
class UndefinedPropertyException(TypeError):
pass
|
|
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.common import constants as q_const
from neutron.common import rpc as q_rpc
from neutron.common import topics
from neutron.db import agents_db
from neutron.db import api as db_api
from neutron.db import dhcp_rpc_base
from neutron.db import securitygroups_rpc_base as sg_db_rpc
from neutron import manager
from neutron.openstack.common import log
from neutron.openstack.common.rpc import proxy
from neutron.plugins.ml2 import db
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2.drivers import type_tunnel
# REVISIT(kmestery): Allow the type and mechanism drivers to supply the
# mixins and eventually remove the direct dependencies on type_tunnel.
LOG = log.getLogger(__name__)
TAP_DEVICE_PREFIX = 'tap'
TAP_DEVICE_PREFIX_LENGTH = 3
class RpcCallbacks(dhcp_rpc_base.DhcpRpcCallbackMixin,
sg_db_rpc.SecurityGroupServerRpcCallbackMixin,
type_tunnel.TunnelRpcCallbackMixin):
RPC_API_VERSION = '1.1'
# history
# 1.0 Initial version (from openvswitch/linuxbridge)
# 1.1 Support Security Group RPC
def __init__(self, notifier, type_manager):
# REVISIT(kmestery): This depends on the first three super classes
# not having their own __init__ functions. If an __init__() is added
# to one, this could break. Fix this and add a unit test to cover this
# test in H3.
super(RpcCallbacks, self).__init__(notifier, type_manager)
def create_rpc_dispatcher(self):
'''Get the rpc dispatcher for this manager.
If a manager would like to set an rpc API version, or support more than
one class as the target of rpc messages, override this method.
'''
return q_rpc.PluginRpcDispatcher([self,
agents_db.AgentExtRpcCallback()])
@classmethod
def _device_to_port_id(cls, device):
# REVISIT(rkukura): Consider calling into MechanismDrivers to
# process device names, or having MechanismDrivers supply list
# of device prefixes to strip.
if device.startswith(TAP_DEVICE_PREFIX):
return device[TAP_DEVICE_PREFIX_LENGTH:]
else:
return device
@classmethod
def get_port_from_device(cls, device):
port_id = cls._device_to_port_id(device)
port = db.get_port_and_sgs(port_id)
if port:
port['device'] = device
return port
def get_device_details(self, rpc_context, **kwargs):
"""Agent requests device details."""
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
LOG.debug(_("Device %(device)s details requested by agent "
"%(agent_id)s"),
{'device': device, 'agent_id': agent_id})
port_id = self._device_to_port_id(device)
session = db_api.get_session()
with session.begin(subtransactions=True):
port = db.get_port(session, port_id)
if not port:
LOG.warning(_("Device %(device)s requested by agent "
"%(agent_id)s not found in database"),
{'device': device, 'agent_id': agent_id})
return {'device': device}
segments = db.get_network_segments(session, port.network_id)
if not segments:
LOG.warning(_("Device %(device)s requested by agent "
"%(agent_id)s has network %(network_id)s with "
"no segments"),
{'device': device,
'agent_id': agent_id,
'network_id': port.network_id})
return {'device': device}
binding = db.ensure_port_binding(session, port.id)
if not binding.segment:
LOG.warning(_("Device %(device)s requested by agent "
"%(agent_id)s on network %(network_id)s not "
"bound, vif_type: %(vif_type)s"),
{'device': device,
'agent_id': agent_id,
'network_id': port.network_id,
'vif_type': binding.vif_type})
return {'device': device}
segment = self._find_segment(segments, binding.segment)
if not segment:
LOG.warning(_("Device %(device)s requested by agent "
"%(agent_id)s on network %(network_id)s "
"invalid segment, vif_type: %(vif_type)s"),
{'device': device,
'agent_id': agent_id,
'network_id': port.network_id,
'vif_type': binding.vif_type})
return {'device': device}
new_status = (q_const.PORT_STATUS_BUILD if port.admin_state_up
else q_const.PORT_STATUS_DOWN)
if port.status != new_status:
port.status = new_status
entry = {'device': device,
'network_id': port.network_id,
'port_id': port.id,
'admin_state_up': port.admin_state_up,
'network_type': segment[api.NETWORK_TYPE],
'segmentation_id': segment[api.SEGMENTATION_ID],
'physical_network': segment[api.PHYSICAL_NETWORK]}
LOG.debug(_("Returning: %s"), entry)
return entry
def _find_segment(self, segments, segment_id):
for segment in segments:
if segment[api.ID] == segment_id:
return segment
def update_device_down(self, rpc_context, **kwargs):
"""Device no longer exists on agent."""
# TODO(garyk) - live migration and port status
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
LOG.debug(_("Device %(device)s no longer exists at agent "
"%(agent_id)s"),
{'device': device, 'agent_id': agent_id})
port_id = self._device_to_port_id(device)
plugin = manager.NeutronManager.get_plugin()
port_exists = plugin.update_port_status(rpc_context, port_id,
q_const.PORT_STATUS_DOWN)
return {'device': device,
'exists': port_exists}
def update_device_up(self, rpc_context, **kwargs):
"""Device is up on agent."""
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
LOG.debug(_("Device %(device)s up at agent %(agent_id)s"),
{'device': device, 'agent_id': agent_id})
port_id = self._device_to_port_id(device)
plugin = manager.NeutronManager.get_plugin()
plugin.update_port_status(rpc_context, port_id,
q_const.PORT_STATUS_ACTIVE)
class AgentNotifierApi(proxy.RpcProxy,
sg_rpc.SecurityGroupAgentRpcApiMixin,
type_tunnel.TunnelAgentRpcApiMixin):
"""Agent side of the openvswitch rpc API.
API version history:
1.0 - Initial version.
1.1 - Added get_active_networks_info, create_dhcp_port,
update_dhcp_port, and removed get_dhcp_port methods.
"""
BASE_RPC_API_VERSION = '1.1'
def __init__(self, topic):
super(AgentNotifierApi, self).__init__(
topic=topic, default_version=self.BASE_RPC_API_VERSION)
self.topic_network_delete = topics.get_topic_name(topic,
topics.NETWORK,
topics.DELETE)
self.topic_port_update = topics.get_topic_name(topic,
topics.PORT,
topics.UPDATE)
def network_delete(self, context, network_id):
self.fanout_cast(context,
self.make_msg('network_delete',
network_id=network_id),
topic=self.topic_network_delete)
def port_update(self, context, port, network_type, segmentation_id,
physical_network):
self.fanout_cast(context,
self.make_msg('port_update',
port=port,
network_type=network_type,
segmentation_id=segmentation_id,
physical_network=physical_network),
topic=self.topic_port_update)
|
|
import demistomock as demisto
from CommonServerPython import *
''' IMPORTS '''
import base64
import json
import time
import devodsconnector as ds
import concurrent.futures
import tempfile
import urllib.parse
import re
import os
from datetime import datetime
from devo.sender import Lookup, SenderConfigSSL, Sender
from typing import List, Dict, Set
from devodsconnector import error_checking
from functools import partial
''' GLOBAL VARS '''
ALLOW_INSECURE = demisto.params().get('insecure', False)
READER_ENDPOINT = demisto.params().get('reader_endpoint', None)
READER_OAUTH_TOKEN = demisto.params().get('reader_oauth_token', None)
WRITER_RELAY = demisto.params().get('writer_relay', None)
WRITER_CREDENTIALS = demisto.params().get('writer_credentials', None)
LINQ_LINK_BASE = demisto.params().get('linq_link_base', "https://us.devo.com/welcome")
FETCH_INCIDENTS_FILTER = demisto.params().get('fetch_incidents_filters', None)
FETCH_INCIDENTS_DEDUPE = demisto.params().get('fetch_incidents_deduplication', None)
TIMEOUT = demisto.params().get('timeout', '60')
PORT = arg_to_number(demisto.params().get('port', '443') or '443')
HEALTHCHECK_WRITER_RECORD = [{'hello': 'world', 'from': 'demisto-integration'}]
HEALTHCHECK_WRITER_TABLE = 'test.keep.free'
RANGE_PATTERN = re.compile('^[0-9]+ [a-zA-Z]+')
TIMESTAMP_PATTERN = re.compile('^[0-9]+')
TIMESTAMP_PATTERN_MILLI = re.compile('^[0-9]+.[0-9]+')
ALERTS_QUERY = '''
from
siem.logtrust.alert.info
select
eventdate,
alertHost,
domain,
priority,
context,
category,
status,
alertId,
srcIp,
srcPort,
srcHost,
dstIp,
dstPort,
dstHost,
application,
engine,
extraData
'''
HEALTHCHECK_QUERY = '''
from
test.keep.free
select
*
'''
SEVERITY_LEVELS_MAP = {
'1': 0.5,
'2': 1,
'3': 2,
'4': 3,
'5': 4,
'informational': 0.5,
'low': 1,
'medium': 2,
'high': 3,
'critical': 4
}
''' HELPER FUNCTIONS '''
def alert_to_incident(alert):
alert_severity = float(1)
alert_name = alert['context'].split('.')[-1]
alert_description = None
alert_occurred = demisto_ISO(float(alert['eventdate']) / 1000)
alert_labels = []
if demisto.get(alert['extraData'], 'alertPriority'):
alert_severity = SEVERITY_LEVELS_MAP[str(alert['extraData']['alertPriority']).lower()]
if demisto.get(alert['extraData'], 'alertName'):
alert_name = alert['extraData']['alertName']
if demisto.get(alert['extraData'], 'alertDescription'):
alert_description = alert['extraData']['alertDescription']
new_alert: Dict = {
'devo.metadata.alert': {}
}
for key in alert:
if key == 'extraData':
continue
new_alert['devo.metadata.alert'][key] = alert[key]
alert_labels.append({'type': f'devo.metadata.alert.{key}', 'value': str(alert[key])})
for key in alert['extraData']:
new_alert[key] = alert['extraData'][key]
alert_labels.append({'type': f'{key}', 'value': str(alert['extraData'][key])})
incident = {
'name': alert_name,
'severity': alert_severity,
'details': alert_description,
'occurred': alert_occurred,
'labels': alert_labels,
'rawJSON': json.dumps(new_alert)
}
return incident
# Monkey patching for backwards compatibility
def get_types(self, linq_query, start, ts_format):
type_map = self._make_type_map(ts_format)
stop = self._to_unix(start)
start = stop - 1
response = self._query(linq_query, start=start, stop=stop, mode='json/compact', limit=1)
try:
data = json.loads(response)
error_checking.check_status(data)
except ValueError:
raise Exception('API V2 response error')
col_data = data['object']['m']
type_dict = {c: type_map[v['type']] for c, v in col_data.items()}
return type_dict
def build_link(query, start_ts_milli, end_ts_milli, mode='loxcope', linq_base=None):
myb64str = base64.b64encode((json.dumps({
'query': query,
'mode': mode,
'dates': {
'from': start_ts_milli,
'to': end_ts_milli
}
}).encode('ascii'))).decode()
if linq_base:
url = linq_base + f"/#/vapps/app.custom.queryApp_dev?&targetQuery={myb64str}"
else:
url = LINQ_LINK_BASE + f"/#/vapps/app.custom.queryApp_dev?&targetQuery={myb64str}"
return url
def check_configuration():
# Check all settings related if set
# Basic functionality of integration
list(ds.Reader(oauth_token=READER_OAUTH_TOKEN, end_point=READER_ENDPOINT, verify=not ALLOW_INSECURE)
.query(HEALTHCHECK_QUERY, start=int(time.time() - 1), stop=int(time.time()), output='dict'))
if WRITER_RELAY and WRITER_CREDENTIALS:
creds = get_writer_creds()
Sender(SenderConfigSSL(address=(WRITER_RELAY, PORT),
key=creds['key'].name, cert=creds['crt'].name, chain=creds['chain'].name))\
.send(tag=HEALTHCHECK_WRITER_TABLE, msg=f'{HEALTHCHECK_WRITER_RECORD}')
if FETCH_INCIDENTS_FILTER:
alert_filters = check_type(FETCH_INCIDENTS_FILTER, dict)
assert alert_filters['type'] in ['AND', 'OR'], 'Missing key:"type" or unsupported value in fetch_incidents_filters'
filters = check_type(alert_filters['filters'], list)
for filt in filters:
assert filt['key'], 'Missing key: "key" in fetch_incidents_filters.filters configuration'
assert filt['operator'] in ['=', '/=', '>', '<', '>=', '<=', 'and', 'or', '->'], 'Missing key: "operator"'\
' or unsupported operator in fetch_incidents_filters.filters configuration'
assert filt['value'], 'Missing key:"value" in fetch_incidents_filters.filters configuration'
if FETCH_INCIDENTS_DEDUPE:
dedupe_conf = check_type(FETCH_INCIDENTS_DEDUPE, dict)
assert isinstance(dedupe_conf['cooldown'], (int, float)), 'Invalid fetch_incidents_deduplication configuration'
return True
def check_type(input, tar_type):
if isinstance(input, str):
input = json.loads(input)
if not isinstance(input, tar_type):
raise ValueError(f'tables to query should either be a json string of a {tar_type} or a {tar_type} input')
elif isinstance(input, tar_type):
pass
else:
raise ValueError(f'tables to query should either be a json string of a {tar_type} or a {tar_type} input')
return input
# Converts epoch (miliseconds) to ISO string
def demisto_ISO(s_epoch):
if s_epoch >= 0:
return datetime.utcfromtimestamp(s_epoch).strftime("%Y-%m-%dT%H:%M:%S.%fZ")
return s_epoch
# We will assume timestamp_from and timestamp_to will be the same format or to will be None
def get_time_range(timestamp_from, timestamp_to):
if isinstance(timestamp_from, (int, float)):
t_from = timestamp_from
if timestamp_to is None:
t_to = time.time()
else:
t_to = timestamp_to
elif isinstance(timestamp_from, str):
if re.fullmatch(RANGE_PATTERN, timestamp_from):
t_range = parse_date_range(timestamp_from)
t_from = t_range[0].timestamp()
t_to = t_range[1].timestamp()
elif re.fullmatch(TIMESTAMP_PATTERN, timestamp_from) or re.fullmatch(TIMESTAMP_PATTERN_MILLI, timestamp_from):
t_from = float(timestamp_from)
if timestamp_to is None:
t_to = time.time()
else:
t_to = float(timestamp_to)
else:
t_from = date_to_timestamp(timestamp_from) / 1000
if timestamp_to is None:
t_to = time.time()
else:
t_to = date_to_timestamp(timestamp_to) / 1000
elif isinstance(timestamp_from, datetime):
t_from = timestamp_from.timestamp()
if timestamp_to is None:
t_to = time.time()
else:
t_to = timestamp_to.timestamp()
return (t_from, t_to)
def get_writer_creds():
if WRITER_RELAY is None:
raise ValueError('writer_relay is not set in your Devo Integration')
if WRITER_CREDENTIALS is None:
raise ValueError('writer_credentials are not set in your Devo Integration')
write_credentials = check_type(WRITER_CREDENTIALS, dict)
assert write_credentials['key'], 'Required key: "key" is not present in writer credentials'
assert write_credentials['crt'], 'Required key: "crt" is not present in writer credentials'
assert write_credentials['chain'], 'Required key: "chain" is not present in writer credentials'
# Limitation in Devo DS Connector SDK. Currently require filepaths for credentials.
# Will accept file-handler type objects in the future.
key_tmp = tempfile.NamedTemporaryFile(mode='w')
crt_tmp = tempfile.NamedTemporaryFile(mode='w')
chain_tmp = tempfile.NamedTemporaryFile(mode='w')
key_tmp.write(write_credentials['key'])
crt_tmp.write(write_credentials['crt'])
chain_tmp.write(write_credentials['chain'])
key_tmp.flush()
crt_tmp.flush()
chain_tmp.flush()
creds = {
'key': key_tmp,
'crt': crt_tmp,
'chain': chain_tmp
}
return creds
def parallel_query_helper(sub_query, append_list, timestamp_from, timestamp_to):
append_list.extend(list(ds.Reader(oauth_token=READER_OAUTH_TOKEN, end_point=READER_ENDPOINT,
verify=not ALLOW_INSECURE)
.query(sub_query, start=float(timestamp_from), stop=float(timestamp_to),
output='dict', ts_format='iso')))
''' FUNCTIONS '''
def fetch_incidents():
last_run = demisto.getLastRun()
alert_query = ALERTS_QUERY
to_time = time.time()
dedupe_config = None
alerts_list: Dict = {}
new_last_run: Dict = {
'from_time': to_time
}
if FETCH_INCIDENTS_FILTER:
alert_filters = check_type(FETCH_INCIDENTS_FILTER, dict)
if alert_filters['type'] == 'AND':
filter_string = ' , '.join([f'{filt["key"]} {filt["operator"]} "{urllib.parse.quote(filt["value"])}"'
for filt in alert_filters['filters']])
elif alert_filters['type'] == 'OR':
filter_string = ' or '.join([f'{filt["key"]} {filt["operator"]} "{urllib.parse.quote(filt["value"])}"'
for filt in alert_filters['filters']])
alert_query = f'{alert_query} where {filter_string}'
from_time = to_time - 3600
if 'from_time' in last_run:
from_time = float(last_run['from_time'])
if FETCH_INCIDENTS_DEDUPE:
dedupe_config = check_type(FETCH_INCIDENTS_DEDUPE, dict)
if 'alerts_list' in last_run:
alerts_list = last_run['alerts_list']
alerts_list = {k: v for k, v in alerts_list.items() if alerts_list[k] >= (to_time - float(dedupe_config['cooldown']))}
# execute the query and get the events
# reverse the list so that the most recent event timestamp event is taken when de-duping if needed.
events = list(ds.Reader(oauth_token=READER_OAUTH_TOKEN, end_point=READER_ENDPOINT,
verify=not ALLOW_INSECURE, timeout=int(TIMEOUT))
.query(alert_query, start=float(from_time), stop=float(to_time),
output='dict', ts_format='timestamp'))[::-1]
deduped_events: List[Dict] = []
if FETCH_INCIDENTS_DEDUPE:
# Expire out of rolling time window events
for event in events:
if any(de['context'] == event['context'] for de in deduped_events):
continue
if event['context'] in alerts_list:
continue
deduped_events.append(event)
alerts_list[event['context']] = event['eventdate']
events = deduped_events
new_last_run['alerts_list'] = alerts_list
# convert the events to demisto incident
incidents = []
for event in events:
event['extraData'] = json.loads(event['extraData'])
for ed in event['extraData']:
event['extraData'][ed] = urllib.parse.unquote_plus(event['extraData'][ed])
inc = alert_to_incident(event)
incidents.append(inc)
demisto.setLastRun(new_last_run)
# this command will create incidents in Demisto
demisto.incidents(incidents)
return incidents
def run_query_command():
to_query = demisto.args()['query']
timestamp_from = demisto.args()['from']
timestamp_to = demisto.args().get('to', None)
write_context = demisto.args()['writeToContext'].lower()
query_timeout = int(demisto.args().get('queryTimeout', TIMEOUT))
linq_base = demisto.args().get('linqLinkBase', None)
time_range = get_time_range(timestamp_from, timestamp_to)
results = list(ds.Reader(oauth_token=READER_OAUTH_TOKEN, end_point=READER_ENDPOINT, verify=not ALLOW_INSECURE,
timeout=query_timeout)
.query(to_query, start=float(time_range[0]), stop=float(time_range[1]),
output='dict', ts_format='iso'))
querylink = {'DevoTableLink': build_link(to_query, int(1000 * float(time_range[0])),
int(1000 * float(time_range[1])), linq_base=linq_base)}
entry = {
'Type': entryTypes['note'],
'Contents': results,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown']
}
entry_linq = {
'Type': entryTypes['note'],
'Contents': querylink,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown']
}
if len(results) == 0:
entry['HumanReadable'] = 'No results found'
entry['Devo.QueryResults'] = None
entry['Devo.QueryLink'] = querylink
return entry
headers = list(results[0].keys())
md = tableToMarkdown('Devo query results', results, headers)
entry['HumanReadable'] = md
md_linq = tableToMarkdown('Link to Devo Query', {'DevoTableLink': f'[Devo Direct Link]({querylink["DevoTableLink"]})'})
entry_linq['HumanReadable'] = md_linq
if write_context == 'true':
entry['EntryContext'] = {
'Devo.QueryResults': createContext(results)
}
entry_linq['EntryContext'] = {
'Devo.QueryLink': createContext(querylink)
}
return [entry, entry_linq]
def get_alerts_command():
timestamp_from = demisto.args()['from']
timestamp_to = demisto.args().get('to', None)
alert_filters = demisto.args().get('filters', None)
write_context = demisto.args()['writeToContext'].lower()
query_timeout = int(demisto.args().get('queryTimeout', TIMEOUT))
linq_base = demisto.args().get('linqLinkBase', None)
alert_query = ALERTS_QUERY
time_range = get_time_range(timestamp_from, timestamp_to)
if alert_filters:
alert_filters = check_type(alert_filters, dict)
if alert_filters['type'] == 'AND':
filter_string = ', '\
.join([f'{filt["key"]} {filt["operator"]} "{urllib.parse.quote(filt["value"])}"'
for filt in alert_filters['filters']])
elif alert_filters['type'] == 'OR':
filter_string = ' or '\
.join([f'{filt["key"]} {filt["operator"]} "{urllib.parse.quote(filt["value"])}"'
for filt in alert_filters['filters']])
alert_query = f'{alert_query} where {filter_string}'
results = list(ds.Reader(oauth_token=READER_OAUTH_TOKEN, end_point=READER_ENDPOINT,
verify=not ALLOW_INSECURE, timeout=query_timeout)
.query(alert_query, start=float(time_range[0]), stop=float(time_range[1]),
output='dict', ts_format='iso'))
querylink = {'DevoTableLink': build_link(alert_query, int(1000 * float(time_range[0])),
int(1000 * float(time_range[1])), linq_base=linq_base)}
for res in results:
res['extraData'] = json.loads(res['extraData'])
for ed in res['extraData']:
res['extraData'][ed] = urllib.parse.unquote_plus(res['extraData'][ed])
entry = {
'Type': entryTypes['note'],
'Contents': results,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown']
}
entry_linq = {
'Type': entryTypes['note'],
'Contents': querylink,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown']
}
if len(results) == 0:
entry['HumanReadable'] = 'No results found'
entry['Devo.AlertsResults'] = None
entry_linq['Devo.QueryLink'] = querylink
return entry
headers = list(results[0].keys())
md = tableToMarkdown('Devo query results', results, headers)
entry['HumanReadable'] = md
md_linq = tableToMarkdown('Link to Devo Query', {'DevoTableLink': f'[Devo Direct Link]({querylink["DevoTableLink"]})'})
entry_linq['HumanReadable'] = md_linq
if write_context == 'true':
entry['EntryContext'] = {
'Devo.AlertsResults': createContext(results)
}
entry_linq['EntryContext'] = {
'Devo.QueryLink': createContext(querylink)
}
return [entry, entry_linq]
def multi_table_query_command():
tables_to_query = check_type(demisto.args()['tables'], list)
search_token = demisto.args()['searchToken']
timestamp_from = demisto.args()['from']
timestamp_to = demisto.args().get('to', None)
limit = int(demisto.args().get('limit', 50))
write_context = demisto.args()['writeToContext'].lower()
query_timeout = int(demisto.args().get('queryTimeout', TIMEOUT))
time_range = get_time_range(timestamp_from, timestamp_to)
futures = []
all_results: List[Dict] = []
sub_queries = []
ds_read = ds.Reader(oauth_token=READER_OAUTH_TOKEN, end_point=READER_ENDPOINT,
verify=not ALLOW_INSECURE, timeout=query_timeout)
ds_read.get_types = partial(get_types, ds_read)
for table in tables_to_query:
fields = ds_read.get_types(f'from {table} select *', 'now', 'iso').keys()
clauses = [f"( isnotnull({field}) and str({field})->\"" + search_token + "\")" for field in fields]
sub_queries.append("from " + table + " where" + " or ".join(clauses) + " select *")
with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
for q in sub_queries:
futures.append(executor.submit(parallel_query_helper, q, all_results, time_range[0], time_range[1]))
concurrent.futures.wait(futures)
entry = {
'Type': entryTypes['note'],
'Contents': all_results,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown']
}
if len(all_results) == 0:
entry['HumanReadable'] = 'No results found'
return entry
if limit == 0:
pass
else:
all_results = all_results[:limit]
headers: Set = set().union(*(r.keys() for r in all_results))
md = tableToMarkdown('Devo query results', all_results, headers)
entry['HumanReadable'] = md
if write_context == 'true':
entry['EntryContext'] = {
'Devo.MultiResults': createContext(all_results)
}
return entry
def write_to_table_command():
table_name = demisto.args()['tableName']
records = check_type(demisto.args()['records'], list)
linq_base = demisto.args().get('linqLinkBase', None)
creds = get_writer_creds()
linq = f"from {table_name}"
sender = Sender(SenderConfigSSL(address=(WRITER_RELAY, PORT),
key=creds['key'].name, cert=creds['crt'].name, chain=creds['chain'].name))
for r in records:
try:
sender.send(tag=table_name, msg=json.dumps(r))
except TypeError:
sender.send(tag=table_name, msg=f"{r}")
querylink = {'DevoTableLink': build_link(linq, int(1000 * time.time()) - 3600000,
int(1000 * time.time()), linq_base=linq_base)}
entry = {
'Type': entryTypes['note'],
'Contents': {'recordsWritten': records},
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'EntryContext': {
'Devo.RecordsWritten': records,
'Devo.LinqQuery': linq
}
}
entry_linq = {
'Type': entryTypes['note'],
'Contents': querylink,
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'EntryContext': {
'Devo.QueryLink': createContext(querylink)
}
}
md = tableToMarkdown('Entries to load into Devo', records)
entry['HumanReadable'] = md
md_linq = tableToMarkdown('Link to Devo Query', {'DevoTableLink': f'[Devo Direct Link]({querylink["DevoTableLink"]})'})
entry_linq['HumanReadable'] = md_linq
return [entry, entry_linq]
def write_to_lookup_table_command():
lookup_table_name = demisto.args()['lookupTableName']
headers = check_type(demisto.args()['headers'], list)
records = check_type(demisto.args()['records'], list)
creds = get_writer_creds()
engine_config = SenderConfigSSL(address=(WRITER_RELAY, PORT),
key=creds['key'].name,
cert=creds['crt'].name,
chain=creds['chain'].name)
try:
con = Sender(config=engine_config, timeout=60)
lookup = Lookup(name=lookup_table_name,
historic_tag=None,
con=con)
# Order sensitive list
pHeaders = json.dumps(headers)
lookup.send_control('START', pHeaders, 'INC')
for r in records:
lookup.send_data_line(key=r['key'], fields=r['values'])
lookup.send_control('END', pHeaders, 'INC')
finally:
con.flush_buffer()
con.socket.shutdown(0)
entry = {
'Type': entryTypes['note'],
'Contents': {'recordsWritten': records},
'ContentsFormat': formats['json'],
'ReadableContentsFormat': formats['markdown'],
'EntryContext': {
'Devo.RecordsWritten': records
}
}
md = tableToMarkdown('Entries to load into Devo', records)
entry['HumanReadable'] = md
return [entry]
''' EXECUTION CODE '''
try:
if ALLOW_INSECURE:
os.environ['CURL_CA_BUNDLE'] = ''
os.environ['PYTHONWARNINGS'] = 'ignore:Unverified HTTPS request'
handle_proxy()
if demisto.command() == 'test-module':
check_configuration()
demisto.results('ok')
elif demisto.command() == 'fetch-incidents':
fetch_incidents()
elif demisto.command() == 'devo-run-query':
demisto.results(run_query_command())
elif demisto.command() == 'devo-get-alerts':
demisto.results(get_alerts_command())
elif demisto.command() == 'devo-multi-table-query':
demisto.results(multi_table_query_command())
elif demisto.command() == 'devo-write-to-table':
demisto.results(write_to_table_command())
elif demisto.command() == 'devo-write-to-lookup-table':
demisto.results(write_to_lookup_table_command())
except Exception as e:
return_error('Failed to execute command {}. Error: {}'.format(demisto.command(), str(e)))
|
|
"""This module implements functions that have to do with number theory."""
import random
from operator import mul
_stock_primes = [2, 3, 5, 7, 11, 13, 17, 19]
def int_pow(x, n):
"""Raise x to the power n (if n is negative a ValueError is raised).
intPow(0, 0) is defined to be 0.
"""
if n < 0:
raise ValueError("n must be non-negative")
elif n == 0:
return 1
else:
if n % 2 == 0:
tmp = int_pow(x, n // 2)
return tmp * tmp
else:
return x * int_pow(x, n - 1)
def mod_exp(b, e, m):
"""Calculate b to the e modulo m."""
if e < 0:
raise ValueError("e must be non-negative")
elif e == 0:
return 1
else:
if e % 2 == 0:
tmp = mod_exp(b, e // 2, m)
return tmp * tmp % m
else:
return b * mod_exp(b, e - 1, m) % m
def miller_rabin(n, k):
"""Declare n probably prime with probability at most 1/4^k (if returns true)
otherwise declare n composite (if returns false).
"""
if n <= 4:
raise ValueError("n must be greater than 4")
d = n - 1
s = 0
while d % 2 == 0:
d = d // 2
s += 1
for _ in range(k):
a = random.randint(2, n - 2)
x = mod_exp(a, d, n)
if x in (1, n - 1):
continue
next_loop = False
for r in range(1, s):
x = x * x % n
if x == 1:
return False #composite
if x == n - 1:
next_loop = True
break
if not next_loop:
return False #composite
return True #probably prime
def prime_sieve(n):
"""Calculate all primes up to and including n, and return the list of those
primes. If n is negative, a ValueError is raised.
"""
if n < 0:
raise ValueError("n must be non-negative")
candidates = list(range(n+1))
finish = int(n**0.5)
for i in range(2, finish+1):
if candidates[i]:
candidates[i*i::i] = [None] * len(candidates[i*i::i])
return [i for i in candidates[2:] if i]
def prime(n, primes=_stock_primes):
"""Checks if an integer n is a prime number. If primes is provided,
these can be used to speed up the test."""
if n < 2:
return False
for p in primes:
if p * p > n:
return True
if n % p == 0:
return False
p = primes[-1] + 2
while p * p <= n:
if n % p == 0:
return False
p = p + 2
return True
def isqrt(n):
"""Calculate the integer part of the square root of a natural number n.
Uses a binary search to find the integer square root, and so runs
logarithmically. If n negative, a ValueError is raised.
"""
if n < 0:
raise ValueError("n must be non-negative")
a, b = 0, n+1
while b - a != 1:
mid = (a + b) // 2
if mid*mid <= n:
a = mid
else:
b = mid
return a
def perfect_square(n):
"""Calculate if an integer is a perfect square. Constant time complexity
for most numbers due to modulo tests. Worst case time complexity is logn
when the square of the isqrt is checked against n.
"""
#negative values cannot be perfect squares
if n < 0:
return False
#checks modulo 256
if (n & 7 != 1) and (n & 31 != 4) and (n & 127 != 16) and (n & 191 != 0):
return False
#checks the modulus of n is a quadratic residue mod 9, 5, 7, 13, and 17.
if n % 9 not in (0, 1, 4, 7): return False
if n % 5 not in (0, 1, 4): return False
if n % 7 not in (0, 1, 2, 4): return False
if n % 13 not in (0, 1, 3, 4, 9, 10, 12): return False
if n % 17 not in (0, 1, 2, 4, 8, 9, 13, 15, 16): return False
#check using isqrt
i = isqrt(n)
return i*i == n
def decomp_sieve(n):
"""Calculate the prime decomposition for each number up and including n,
and return the prime decompositions in a list indexed by that number.
"""
result = [dict() for i in range(n+1)]
p = 2
while p <= n:
for pk in range(p, n+1, p):
result[pk][p] = 1
palpha = p*p
while palpha <= n:
for palphak in range(palpha, n+1, palpha):
result[palphak][p] += 1
palpha *= p
while p <= n and result[p]:
p += 1
return result
def decomp(n, primes=_stock_primes):
"""Find the prime decomposition of a natural number. The result is returned
as a dictionary whose keys are powers and values are primes.
E.g. decomp(12) -> {2:2, 3:1}
A list of primes should be provided, with primes at least up to the square
root of n. If the prime list doesn't go that high, a ValueError will be
raised if any primes geater than the square root of the highest prime
provided enters n.
"""
if n < 1:
raise ValueError("n must be positive")
record = {}
if n == 1:
return record
for p in primes:
power = 0
while n % p == 0:
power += 1
n = n // p
if power != 0:
record[p] = power
if p * p > n:
if n != 1:
record[n] = 1 #this is the last prime in the record
return record
#we have run out of primes to check...
last_p = primes[-1]
if last_p * last_p > n:
record[n] = 1
else:
raise ValueError("not enough prime numbers in primes")
def factors(pd):
"""Yields all factors of a number given its prime decomposition."""
if pd:
prime, power = pd.popitem()
vals = [int_pow(prime, i) for i in range(power + 1)]
for partial_factor in factors(pd):
for val in vals:
yield val * partial_factor
pd[prime] = power
else:
yield 1
def sum_divisors(pd):
"""Calculate the lowercase sigma function (sum of divisors) of a natural
number given its prime decomposition pd.
"""
if pd == {}: #decomp corresponds to 1
return 1
else:
return reduce(mul, [(int_pow(p, pd[p]+1)-1) // (p-1) for p in pd])
def num_divisors(pd):
"""Calculates the tau function (number of divisors) of a natural number
given its prime decomposition pd.
"""
if pd == {}:
return 1
else:
return reduce(mul, [pd[p] + 1 for p in pd])
def nominal_record(n, base):
"""Calculate the digital record of a natural number n with a certain
base.
"""
if n < 1:
raise ValueError("n must be >= 1")
if base < 2:
raise ValueError("base must be >= 2")
record = []
while n > 0:
record.insert(0, n % base)
n = n // base
return record
def eval_nominal_record(record, base):
place_value = 1
value = 0
for digit in reversed(record):
value += digit * place_value
place_value *= base
return value
|
|
import string
import envi.memory as e_mem
import envi.memcanvas as e_canvas
import envi.memcanvas.renderers as e_render
from PyQt4 import QtGui, QtCore
class VQLineEdit(QtGui.QLineEdit):
'''
Has an additional signal to emit a signal on release of every keypress.
'''
keyReleased = QtCore.pyqtSignal(QtGui.QKeyEvent)
def keyReleaseEvent(self, event):
self.keyReleased.emit(event)
QtGui.QLineEdit.keyReleaseEvent(self, event)
class MemNavWidget(QtGui.QWidget):
userChanged = QtCore.pyqtSignal(str, str)
def __init__(self):
QtGui.QWidget.__init__(self)
self.expr_entry = QtGui.QLineEdit()
self.esize_entry = QtGui.QLineEdit()
hbox1 = QtGui.QHBoxLayout()
hbox1.setMargin(2)
hbox1.setSpacing(4)
hbox1.addWidget(self.expr_entry)
hbox1.addWidget(self.esize_entry)
self.setLayout(hbox1)
self.expr_entry.returnPressed.connect(self.emitUserChangedSignal)
self.esize_entry.returnPressed.connect(self.emitUserChangedSignal)
def emitUserChangedSignal(self):
'''
Emits signal when user manually enters new expressions in the expr or
size field and presses enter.
'''
expr = str(self.expr_entry.text())
size = str(self.esize_entry.text())
self.userChanged.emit(expr, size)
def setValues(self, expr, esize):
'''
Called externally to allow programmatic way to update the expr or size
field. Does not emit the changed signal.
'''
self.expr_entry.setText(expr)
self.esize_entry.setText(esize)
def getValues(self):
return str(self.expr_entry.text()), str(self.esize_entry.text())
class MemWriteWindow(QtGui.QWidget):
'''
gui for writemem cli command.
'''
renderRequest = QtCore.pyqtSignal(str, str)
# button to write memory was clicked (va, bytez)
writeToMemory = QtCore.pyqtSignal(str, str)
def __init__(self, expr='', esize='', emu=None, parent=None):
QtGui.QWidget.__init__(self, parent=parent)
self.modes = ['ascii', 'hex', 'regex', 'utf-8', 'utf-16-le',
'utf-16-be']
rend_orig = e_render.ByteRend()
self.canvas_orig = e_canvas.StringMemoryCanvas(None)
self.canvas_orig.addRenderer('bytes', rend_orig)
rend_new = e_render.ByteRend()
self.canvas_new = e_canvas.StringMemoryCanvas(None)
self.canvas_new.addRenderer('bytes', rend_new)
hbox1 = QtGui.QHBoxLayout()
self.nav = MemNavWidget()
self.nav.userChanged.connect(self.renderMemory)
self.renderRequest.connect(self.nav.setValues)
hbox1.addWidget(self.nav)
hbox2 = QtGui.QHBoxLayout()
self.hex_edit = QtGui.QPlainTextEdit()
self.hex_edit.setWordWrapMode(QtGui.QTextOption.NoWrap)
self.hex_edit.setReadOnly(True)
font = QtGui.QFont('Courier') # should use actual memcanvas
self.hex_edit.setFont(font)
hbox2.addWidget(self.hex_edit)
vbox1 = QtGui.QVBoxLayout()
vbox1.addLayout(hbox1)
vbox1.addLayout(hbox2)
gbox1 = QtGui.QGroupBox('Original Bytes')
gbox1.setLayout(vbox1)
hbox3 = QtGui.QHBoxLayout()
mode_label = QtGui.QLabel('Input:')
self.mode_combo = QtGui.QComboBox()
self.mode_combo.addItems(self.modes)
self.mode_combo.currentIndexChanged.connect(self.encodingChanged)
hbox3.addWidget(mode_label)
hbox3.addWidget(self.mode_combo, alignment=QtCore.Qt.AlignLeft)
hbox3.addStretch(1)
hbox4 = QtGui.QHBoxLayout()
data_label = QtGui.QLabel('Bytes:')
self.data_edit = VQLineEdit()
self.data_edit.keyReleased.connect(self.keyReleasedSlot)
hbox4.addWidget(data_label)
hbox4.addWidget(self.data_edit)
vbox2 = QtGui.QVBoxLayout()
vbox2.addLayout(hbox3)
vbox2.addLayout(hbox4)
gbox2 = QtGui.QGroupBox('New Bytes')
gbox2.setLayout(vbox2)
hbox5 = QtGui.QHBoxLayout()
self.hex_preview = QtGui.QPlainTextEdit()
self.hex_preview.setWordWrapMode(QtGui.QTextOption.NoWrap)
self.hex_preview.setReadOnly(True)
self.hex_preview.setFont(font)
hbox5.addWidget(self.hex_preview)
vbox3 = QtGui.QVBoxLayout()
vbox3.addLayout(hbox5)
gbox3 = QtGui.QGroupBox('Result Preview')
gbox3.setLayout(vbox3)
hbox6 = QtGui.QHBoxLayout()
button = QtGui.QPushButton('Write Memory')
button.clicked.connect(self.buttonClicked)
hbox6.addWidget(button)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(gbox1)
vbox.addWidget(gbox2)
vbox.addWidget(gbox3)
vbox.addLayout(hbox6)
self.setLayout(vbox)
self.setWindowTitle('Memory Write')
self.resize(650, 500)
self.data_edit.setFocus()
self.emu = emu
self.renderMemory(expr, esize)
def renderMemory(self, expr=None, esize=None, emu=None):
if emu != None:
self.emu = emu
curexpr, cur_esize = self.nav.getValues()
if expr == None:
expr = curexpr
if esize == None:
esize = cur_esize
self.renderRequest.emit(expr, esize)
try:
# str() for QString -> ascii strings
va = self.emu.parseExpression(str(expr))
size = self.emu.parseExpression(str(esize))
bytez = self.emu.readMemory(va, size)
self.updateHexOrig(va, bytez)
encoding = str(self.mode_combo.currentText())
rbytes = str(self.data_edit.text())
erbytes = self.encodeData(rbytes, encoding)
# encoded bytes is bigger than the amount we are displaying.
if len(erbytes) > len(bytez):
self.hex_preview.setPlainText('too many bytes, change size, encoding or input')
return
bytez = erbytes + bytez[len(erbytes):]
self.updateHexPreview(va, bytez)
except Exception as e:
self.hex_preview.setPlainText(str(e))
def keyReleasedSlot(self, event):
encoding = self.mode_combo.currentText()
self.encodingChanged(None)
def encodingChanged(self, idx):
encoding = str(self.mode_combo.currentText())
validator = None
if encoding == 'hex':
# only clear the box if there are non-hex chars
# before setting the validator.
txt = str(self.data_edit.text())
if not all(c in string.hexdigits for c in txt):
self.data_edit.setText('')
regex = QtCore.QRegExp('^[0-9A-Fa-f]+$')
validator = QtGui.QRegExpValidator(regex)
self.data_edit.setValidator(validator)
self.renderMemory()
def encodeData(self, txt, encoding):
if encoding == 'hex' and (len(txt) % 2) != 0:
txt = txt[:-1] # trim last if odd length
if encoding == 'hex':
if not all(c in string.hexdigits for c in txt):
return None
return txt.decode(encoding)
elif encoding == 'regex':
return None
return txt.encode(encoding)
def updateHexOrig(self, va, bytez):
if bytez == None:
self.hex_edit.setPlainText('')
return
self.canvas_orig.clearCanvas()
mem = e_mem.MemoryObject()
mem.addMemoryMap(va, e_mem.MM_READ, '', bytez)
self.canvas_orig.mem = mem
self.canvas_orig.renderMemory(va, len(bytez))
self.hex_edit.setPlainText(str(self.canvas_orig))
def updateHexPreview(self, va, bytez):
if bytez == None:
self.hex_preview.setPlainText('')
return
self.canvas_new.clearCanvas()
mem = e_mem.MemoryObject()
mem.addMemoryMap(va, e_mem.MM_READ, '', bytez)
self.canvas_new.mem = mem
self.canvas_new.renderMemory(va, len(bytez))
self.hex_preview.setPlainText(str(self.canvas_new))
def buttonClicked(self):
curexpr, cur_esize = self.nav.getValues()
encoding = str(self.mode_combo.currentText())
rbytes = str(self.data_edit.text())
erbytes = self.encodeData(rbytes, encoding)
hexbytes = erbytes.encode('hex')
self.writeToMemory.emit(curexpr, hexbytes)
def getValues(self):
return self.nav.getValues()
def setValues(self, expr, esize):
self.nav.setValues(expr, esize)
class MockEmu(object):
def parseExpression(self, expr):
return long(eval(expr, {}, {}))
def readMemory(self, va, size):
return '\x90' * size
def main():
import sys
app = QtGui.QApplication([])
w = MemWriteWindow('0x1234', '0xff', emu=MockEmu())
w.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Calculation of DDEC charges based on data parsed by cclib."""
import copy
import random
import numpy
import logging
import math
import os
import sys
from cclib.method.calculationmethod import Method
from cclib.method.stockholder import Stockholder
from cclib.method.volume import electrondensity_spin
from cclib.parser.utils import convertor
from cclib.parser.utils import find_package
from typing import List
class MissingInputError(Exception):
pass
class ConvergenceError(Exception):
pass
class DDEC6(Stockholder):
"""DDEC6 charges."""
# All of these are required for DDEC6 charges.
required_attrs = ("homos", "mocoeffs", "nbasis", "gbasis")
def __init__(
self,
data,
volume,
proatom_path=None,
progress=None,
convergence_level=1e-10,
max_iteration=50,
loglevel=logging.INFO,
logname="Log",
):
""" Initialize DDEC6 object.
Inputs are:
data -- ccData object that describe target molecule.
volume -- Volume object that describe target Cartesian grid.
proatom_path -- path to proatom densities
(directory containing atoms.h5 in horton or c2_001_001_000_400_075.txt in chargemol)
convergence_level -- convergence level to use for conditioning densities in step 3
max_iteration -- maximum iteration to optimize phi in step 3-6
Note:
Proatom densities are used in DDEC6 algorithm in a similar way to other stockholder
partitioning methods. They are used as references to appropriately partition the
total densities (which is the density stored in cube files). Proatom densities are
densities obtained for single atom or ion in a radial grid that originates from
the atom or ion.
In DDEC6 algorithm, stockholder partitioning is heavily modified to ensure that the
total densities that are partitioned resemble the proatom densities and to prevent
the numerical algorithm from failing to converge.
"""
super(DDEC6, self).__init__(data, volume, proatom_path, progress, loglevel, logname)
self.convergence_level = convergence_level
self.max_iteration = max_iteration
if numpy.sum(self.data.coreelectrons) != 0:
# TODO: Pseudopotentials should be added back
pass
def __str__(self):
"""Return a string representation of the object."""
return "DDEC6 charges of {}".format(self.data)
def __repr__(self):
"""Return a representation of the object."""
return "DDEC6({})".format(self.data)
def _check_required_attributes(self):
super(DDEC6, self)._check_required_attributes()
def _cartesian_dist(self, pt1, pt2):
""" Small utility function that calculates Euclidian distance between two points
pt1 and pt2 are numpy arrays representing a point in Cartesian coordinates. """
return numpy.sqrt(numpy.dot(pt1 - pt2, pt1 - pt2))
def _read_proatom(
self, directory, atom_num, charge # type = str # type = int # type = float
):
return super(DDEC6, self)._read_proatom(directory, atom_num, charge)
def calculate(self, indices=None, fupdate=0.05):
"""
Calculate DDEC6 charges based on doi: 10.1039/c6ra04656h paper.
Cartesian, uniformly spaced grids are assumed for this function.
"""
super(DDEC6, self).calculate()
# Notify user about the total charge in the density grid
integrated_density = self.charge_density.integrate()
self.logger.info(
"Total charge density in the grid is {}. If this does not match what is expected, using a finer grid may help.".format(
integrated_density
)
)
# * STEP 1 *
# Carry out step 1 of DDEC6 algorithm [Determining reference charge value]
# Refer to equations 49-57 in doi: 10.1039/c6ra04656h
self.logger.info("Creating first reference charges. (Step 1/7)")
(
reference_charges,
localized_charges,
stockholder_charges,
) = self.calculate_reference_charges()
self.reference_charges = [reference_charges]
self._localized_charges = [localized_charges]
self._stockholder_charges = [stockholder_charges]
# * STEP 2 *
# Load new proatom densities based on the reference charges determined in step 1.
self.logger.info("Creating second reference charges. (Step 2/7)")
self.proatom_density = []
self.radial_grid_r = []
for i, atom_number in enumerate(self.data.atomnos):
density, r = self._read_proatom(
self.proatom_path, atom_number, float(self.reference_charges[0][i])
)
self.proatom_density.append(density)
self.radial_grid_r.append(r)
# Carry out step 2 of DDEC6 algorithm [Determining ion charge value again]
ref, loc, stock = self.calculate_reference_charges()
self.reference_charges.append(ref)
self._localized_charges.append(loc)
self._stockholder_charges.append(stock)
# * STEP 3 *
# Load new proatom densities based on the reference charges determined in step 2.
self.proatom_density = []
self.radial_grid_r = []
self._cond_density = []
for i, atom_number in enumerate(self.data.atomnos):
density, r = self._read_proatom(
self.proatom_path, atom_number, float(self.reference_charges[1][i])
)
self.proatom_density.append(density)
self.radial_grid_r.append(r)
# Carry out step 3 of DDEC6 algorithm [Determine conditioned charge density and tau]
self.logger.info("Conditioning charge densities. (Step 3/7)")
self.condition_densities()
# Steps 4 through 7 contain similar routines. Comments the precede each step explain the
# differences among them.
# * STEP 4 *
# In step 4, calculate w, u, g, and h but always skip kappa updates.
self.logger.info("Optimizing grid weights. (Step 4/7)")
self._kappa = [0.0] * self.data.atomnos
# N_A is assigned number of electrons determined using equation 72.
self.N_A = []
# u_A is determined using equation 77.
self.u_A = []
self.N_A.append(self._calculate_w_and_u())
# Calculate G_A and H_A based on S4.3 in doi: 10.1039/c6ra04656h
self.reshape_G()
self.calculate_H()
# Update weights (w_A) using equation 96 in doi: 10.1039/c6ra04656h
# self._cond_density is first created in step 3 by conditioning on the total densities
# as described in Figure S1. Then, this quantity is updated in every step that follows
# until the last step in the algorithm when the weights placed on the grid is iteratively
# updated.
for atomi in range(self.data.natom):
self._cond_density[atomi] = math.exp(self._kappa[atomi]) * self._h[atomi]
# Update rho_cond for next iteration
self._update_rho_cond()
# * STEPS 5 and 6 *
# In step 5 and 6, calculate w and u. Then if update_kappa is found to be true, do not
# continue to next step. Otherwise, calculate g and h. In both cases, calculate new
# rho_cond.
steps = 5
self._update_kappa = False
while steps < 7:
self.logger.info("Optimizing grid weights. (Step {}/7)".format(steps))
self.N_A.append(self._calculate_w_and_u())
# Determine whether kappa needs to be updated or not based on Figure S4.2
# of doi: 10.1039/c6ra04656h
self._update_kappa = self._check_kappa()
if not self._update_kappa:
# Increment steps
steps = steps + 1
# Calculate G_A and H_A based on S4.3 in doi: 10.1039/c6ra04656h
self.reshape_G()
self.calculate_H()
else:
# `steps` not incremented in this case
# First update kappa based on equation 93
kappa_new = self._kappa - self.N_A[-1] / self.u_A[-1]
self._kappa = [x if x > 0 else 0.0 for x in kappa_new]
# Update weights (w_A) using equation 96 in doi: 10.1039/c6ra04656h
# self._cond_density is first created in step 3 by conditioning on the total densities
# as described in Figure S1. Then, this quantity is updated in every step that follows
# until the last step in the algorithm when the weights placed on the grid is
# iteratively updated.
for atomi in range(self.data.natom):
self._cond_density[atomi] = math.exp(self._kappa[atomi]) * self._h[atomi]
# Update rho_cond for next iteration
self._update_rho_cond()
# * STEP 7 *
# In step 7, calculate w and u. Then, stop to calculate reference_charges.
self.logger.info("Optimizing grid weights. (Step 7/7)")
self.N_A.append(self._calculate_w_and_u())
# Finally, store calculated DDEC6 charges in fragcharges
self.logger.info("Creating fragcharges: array[1]")
self.fragcharges = numpy.array(self.data.atomnos - self.N_A[-1], dtype=float)
def _check_kappa(self):
""" Return whether kappa needs to be updated or not based on Figure S4.2
of doi: 10.1039/c6ra04656h
"""
if numpy.any([x if x < -1e-5 else 0 for x in self.N_A[-1]]):
return True
elif (
self._update_kappa
and numpy.any(numpy.diff(self.N_A)[-1] < 1e-5) # change in N_A during last cycle
and numpy.any(numpy.diff(self.N_A)[-2] < 1e-5) # change in N_A during last to
# second cycle
):
self._kappa = [0.0 for x in self.data.atomnos]
return False
else:
return self._update_kappa
def calculate_reference_charges(self):
""" Calculate reference charges from proatom density and molecular density
[STEP 1 and 2]
Function returns calculated reference charges, localized charges, and stockholder
charges.
"""
# Generator object to iterate over the grid
ngridx, ngridy, ngridz = self.charge_density.data.shape
grid_shape = (self.data.natom, ngridx, ngridy, ngridz)
stockholder_w = numpy.zeros(grid_shape)
localized_w = numpy.zeros(grid_shape)
self.closest_r_index = numpy.zeros(grid_shape, dtype=int)
indices = numpy.asanyarray(
tuple(
(x, y, z)
for x in range(ngridx)
for y in range(ngridy)
for z in range(ngridz)
)
)
coordinates = self.charge_density.coordinates(indices)
for atomi in range(self.data.natom):
# Distance of the grid from atom grid
self.closest_r_index[atomi] = numpy.argmin(
numpy.abs(
self.radial_grid_r[atomi][..., numpy.newaxis] - numpy.linalg.norm(self.data.atomcoords[-1][atomi] - coordinates, axis=1)
),
axis=0
).reshape((ngridx, ngridy, ngridz))
# Equation 54 in doi: 10.1039/c6ra04656h
stockholder_w[atomi] = self.proatom_density[atomi][self.closest_r_index[atomi]]
# Equation 55 in doi: 10.1039/c6ra04656h
localized_w = numpy.power(stockholder_w, 4)
# Equation 53 in doi: 10.1039/c6ra04656h
stockholder_bigW = numpy.sum(stockholder_w, axis=0)
localized_bigW = numpy.sum(localized_w, axis=0)
reference_charges = numpy.zeros((self.data.natom))
localizedcharges = numpy.zeros((self.data.natom))
stockholdercharges = numpy.zeros((self.data.natom))
for atomi in range(self.data.natom):
# Equation 52 and 51 in doi: 10.1039/c6ra04656h
localizedcharges[atomi] = self.data.atomnos[atomi] - self.charge_density.integrate(
weights=(localized_w[atomi] / localized_bigW)
)
stockholdercharges[atomi] = self.data.atomnos[atomi] - self.charge_density.integrate(
weights=(stockholder_w[atomi] / stockholder_bigW)
)
# In DDEC6, weights of 1/3 and 2/3 are assigned for stockholder and localized charges.
# (Equation 50 and 58 in doi: 10.1039/c6ra04656h)
reference_charges[atomi] = (stockholdercharges[atomi] / 3.0) + (
localizedcharges[atomi] * 2.0 / 3.0
)
return reference_charges, localizedcharges, stockholdercharges
def condition_densities(self):
""" Calculate conditioned densities
[STEP 3]
"""
# Generator object to iterate over the grid
ngridx, ngridy, ngridz = self.charge_density.data.shape
self._rho_ref = numpy.zeros((ngridx, ngridy, ngridz))
for atomi in range(self.data.natom):
# rho_ref -- Equation 41 in doi: 10.1039/c6ra04656h
self._rho_ref += self.proatom_density[atomi][
self.closest_r_index[atomi]
]
self._candidates_bigPhi = []
self._candidates_phi = []
# Initial conditions are detailed in Figure S1 in doi: 10.1039/c6ra04656h
phiAI = numpy.zeros_like(self.data.atomnos, dtype=float)
bigphiAI = numpy.zeros_like(self.data.atomnos, dtype=float)
self._y_a = []
for atomi in range(self.data.natom):
# y_a -- equation 40 in doi: 10.1039/c6ra04656h
self._y_a.append(self._ya(self.proatom_density, atomi))
# rho_A^cond -- equation S97 in doi: 10.1039/c6ra04656h
self._cond_density.append(
self._y_a[atomi] + bigphiAI[atomi] * numpy.sqrt(self._y_a[atomi])
)
# Monotonic Decrease Condition (as expressed in equation S99)
self._cond_density[atomi] = numpy.minimum.accumulate(self._cond_density[atomi])
# phi_A^I -- Equation S100 in doi: 10.1039/c6ra04656h
phiAI[atomi] = (
self._integrate_from_radial([self._cond_density[atomi]], [atomi])
- self.data.atomnos[atomi]
+ self.reference_charges[-1][atomi]
)
self._candidates_bigPhi.append([bigphiAI[atomi]])
self._candidates_phi.append([phiAI[atomi]])
# Attempt to find the point where phiAI is zero iteratively
# Refer to S101 in doi: 10.1039/c6ra04656h
self._candidates_phi[atomi], self._candidates_bigPhi[atomi] = self._converge_phi(
phiAI[atomi], 1, atomi
)
# Perform parabolic fit to find optimized phiAI
# Refer to Figure S1 in doi: 10.1039/c6ra04656h
bigphiAI[atomi] = self._parabolic_fit(self._y_a[atomi], 1, atomi)
# Set final conditioned density using chosen Phi
self._cond_density[atomi] = self._update_phiai(
self._y_a[atomi], bigphiAI[atomi], atomi
)[1]
self.logger.info("Calculating tau and combined conditioned densities.")
# Calculate tau(r) and rho^cond(r)
# Refer to equation 65 and 66 in doi: 10.1039/c6ra04656h
# Assign rho^cond on grid using generator object
self.rho_cond = copy.deepcopy(self.charge_density)
self.rho_cond.data = numpy.zeros_like(self.rho_cond.data, dtype=float)
rho_cond_sqrt = numpy.zeros_like(self.rho_cond.data, dtype=float)
# Generator object to iterate over the grid
ngridx, ngridy, ngridz = self.charge_density.data.shape
self._leftterm = numpy.zeros((self.data.natom, ngridx, ngridy, ngridz), dtype=float)
# rho_cond_cartesian is rho^cond projected on Cartesian grid
# (used for Step 4 calculation)
self._rho_cond_cartesian = numpy.zeros(
(self.data.natom, ngridx, ngridy, ngridz), dtype=float
)
self.tau = []
# rho_cond -- equation 65 in doi: 10.1039/c6ra04656h
for atomi in range(self.data.natom):
self.rho_cond.data += self._cond_density[atomi][
self.closest_r_index[atomi]
]
rho_cond_sqrt = numpy.sqrt(self.rho_cond.data)
for atomi in range(self.data.natom):
self.tau.append(numpy.zeros_like(self.proatom_density[atomi], dtype=float))
# leftterm is the first spherical average term in equation 66.
# <rho^cond_A(r_A) / sqrt(rho^cond(r))>
self._rho_cond_cartesian[atomi] = self._cond_density[atomi][
self.closest_r_index[atomi]
]
self._leftterm[atomi] = self._rho_cond_cartesian[atomi] / rho_cond_sqrt
for radiusi in range(len(self.tau[atomi])):
grid_filter = self.closest_r_index[atomi] == radiusi
num_grid_filter = numpy.count_nonzero(grid_filter)
if num_grid_filter < 1:
self.tau[atomi][radiusi] = 0.0
else:
leftaverage = numpy.sum(grid_filter * self._leftterm[atomi]) / num_grid_filter
rightaverage = numpy.sum(grid_filter * rho_cond_sqrt) / num_grid_filter
if leftaverage < 1e-20:
self.tau[atomi][radiusi] = 0.0
else:
self.tau[atomi][radiusi] = numpy.divide(
leftaverage,
rightaverage,
out=numpy.zeros_like(leftaverage),
where=rightaverage != 0.0,
)
# Make tau monotonic decreasing
self.tau[atomi] = numpy.maximum.accumulate(self.tau[atomi][::-1])[::-1]
def _ya(self, proatom_density, atomi):
# Function that calculates Y_a^avg
# See Eq. 40-41 in doi: 10.1039/c6ra04656h
rho_ref = self._rho_ref
# Y_a^avg -- Equation 40 in doi: 10.1039/c6ra04656h
ya = numpy.zeros_like(proatom_density[atomi], dtype=float)
weights = self.charge_density.data / rho_ref
for radiusi in range(len(ya)):
grid_filter = self.closest_r_index[atomi] == radiusi
num_grid_filter = numpy.count_nonzero(grid_filter)
if num_grid_filter < 1:
ya[radiusi] = 0.0
else:
spherical_avg = numpy.sum(grid_filter * weights) / num_grid_filter
ya[radiusi] = proatom_density[atomi][radiusi] * spherical_avg
# Make y_a monotonic decreasing
# Refer to module_reshaping_functions.f08::77-79
ya = numpy.maximum.accumulate(ya[::-1])[::-1]
ya = numpy.minimum.accumulate(ya)
# Normalize y_a (see module_DDEC6_valence_iterator.f08::284)
nelec = self._integrate_from_radial([ya], [atomi])
ya *= (self.data.atomnos[atomi] - self.reference_charges[-1][atomi]) / nelec
return ya
def _calculate_w_and_u(self):
""" Calculate weights placed on each integration grid point
[STEP 4-7]
"""
# From equation 67, w_A(r_A) = self._cond_density
# From equation 8, W(r) = self.rho_cond for the rest of this function
ngridx, ngridy, ngridz = self.charge_density.data.shape
# Evaluate rho_A(r_A) from equation 68, rho_A^avg(r_A) from equation 69,
# theta(r_A) from equation 70, _wavg from equation 71,
# N_A from equation 72, rho_wavg from equation 73, and u_A from equation 77.
self._rho_A = []
self._rho_A_avg = []
self._theta = []
self._wavg = []
N_A = []
u_A = []
self.rho_wavg = []
for atomi in range(self.data.natom):
self._rho_A.append(copy.deepcopy(self.charge_density))
# Equation 68
self._rho_A[atomi].data = numpy.divide(
self.charge_density.data * self._rho_cond_cartesian[atomi],
self.rho_cond.data,
out=numpy.zeros_like(self.charge_density.data, dtype=float),
where=self.rho_cond.data != 0,
)
self._rho_A_avg.append(numpy.zeros_like(self.proatom_density[atomi], dtype=float))
self._theta.append(numpy.zeros_like(self.proatom_density[atomi], dtype=float))
self._wavg.append(numpy.zeros_like(self.proatom_density[atomi], dtype=float))
# Equation 69, 70, and 71
self._rho_A_avg[atomi] = self._spherical_average_from_cartesian(
self._rho_A[atomi].data, atomi, self.radial_grid_r[atomi]
)
self._rho_A_avg[atomi] = numpy.maximum.accumulate(self._rho_A_avg[atomi][::-1])[::-1]
self._theta[atomi] = self._spherical_average_from_cartesian(
(
1
- numpy.divide(
self._rho_cond_cartesian[atomi],
self.rho_cond.data,
out=numpy.zeros_like(self.rho_cond.data, dtype=float),
where=self.rho_cond.data != 0,
)
)
* self._rho_A[atomi].data,
atomi,
self.radial_grid_r[atomi],
)
self._theta[atomi] = numpy.maximum.accumulate(self._theta[atomi][::-1])[::-1]
self._wavg[atomi] = self._spherical_average_from_cartesian(
numpy.divide(
self._rho_cond_cartesian[atomi],
self.rho_cond.data,
out=numpy.zeros_like(self.rho_cond.data, dtype=float),
where=self.rho_cond.data != 0,
),
atomi,
self.radial_grid_r[atomi],
)
self._wavg[atomi] = numpy.maximum.accumulate(self._wavg[atomi][::-1])[::-1]
# Equation 72, 73, and 77
N_A.append(self._rho_A[atomi].integrate())
self.rho_wavg.append(
(self._theta[atomi] + self._rho_A_avg[atomi] * self._wavg[atomi] / 5.0)
/ (1.0 - (4.0 / 5.0) * self._wavg[atomi])
)
u_A.append(self._integrate_from_radial([self._theta[atomi]], [atomi]))
self.u_A.append(u_A)
return N_A
def reshape_G(self):
""" Calculate G_A(r_A) and reshape densities
This is a quantity introduced in DDEC6 as a constraint preventing the tails from being
too diffuse.
[STEP 4-7]
"""
self._candidates_bigPhi = []
self._candidates_phi = []
# Initial conditions are detailed in Figure S3 in doi: 10.1039/c6ra04656h
phiAII = numpy.zeros_like(self.data.atomnos, dtype=float)
bigphiAII = numpy.zeros_like(self.data.atomnos, dtype=float)
self._g = []
self._eta = []
for atomi in range(self.data.natom):
# G_A -- equation S102 in doi: 10.1039/c6ra04656h
self._g.append(numpy.zeros_like(self.proatom_density[atomi], dtype=float))
self._g[atomi] = self.rho_wavg[atomi] + bigphiAII[atomi] * numpy.sqrt(
self.rho_wavg[atomi]
)
# Exponential constraint (as expressed in equation S105)
self._eta.append((1 - (self.tau[atomi]) ** 2) * 1.75 * convertor(1, "Angstrom", "bohr"))
exp_applied = self._g[atomi][:-1] * numpy.exp(
-1 * self._eta[atomi][1:] * numpy.diff(self.radial_grid_r[atomi])
)
for radiusi in range(1, len(self._g[atomi])):
self._g[atomi][radiusi] = min(self._g[atomi][radiusi], exp_applied[radiusi - 1])
# phi_A^II -- Equation S106 in doi: 10.1039/c6ra04656h
phiAII[atomi] = self._integrate_from_radial(
[self._g[atomi] - self.rho_wavg[atomi]], [atomi]
)
self._candidates_bigPhi.append([bigphiAII[atomi]])
self._candidates_phi.append([phiAII[atomi]])
# Attempt to find the point where phiAI is zero iteratively
# Refer to S101 in doi: 10.1039/c6ra04656h
self._candidates_phi[atomi], self._candidates_bigPhi[atomi] = self._converge_phi(
phiAII[atomi], 1, atomi
)
# Perform parabolic fit to find optimized phiAI
# Refer to Figure S1 in doi: 10.1039/c6ra04656h
bigphiAII[atomi] = self._parabolic_fit(self.rho_wavg[atomi], 2, atomi)
# Set final G_A value using chosen Phi
self._g[atomi] = self._update_phiaii(self.rho_wavg[atomi], bigphiAII[atomi], atomi)[1]
def calculate_H(self):
""" Calculate H_A(r_A)
This is a quantity introduced in DDEC6 as a constraint preventing the tails from being
too contracted.
[STEP 4-7]
"""
self._h = []
for atomi in range(len(self._g)):
# First set H_est as G_A
self._h.append(self._g[atomi])
# Determine eta_upper using equation 86 in doi: 10.1039/c6ra04656h
# and apply upper limit using equation 91.
temp = (
1 - (self.tau[atomi]) ** 2 + self.convergence_level
) # convergence_level is added to avoid divide-by-zero in next line for highly polar molecules.
eta = 2.5 * convertor(1, "Angstrom", "bohr") / temp
exp_applied = self._h[atomi][:-1] * numpy.exp(
-1 * eta[1:] * numpy.diff(self.radial_grid_r[atomi])
)
for radiusi in range(1, len(self._h[atomi])):
self._h[atomi][radiusi] = max(self._h[atomi][radiusi], exp_applied[radiusi - 1])
# Normalize using equation 92 in doi: 10.1039/c6ra04656h.
self._h[atomi] = (
self._h[atomi]
* self._integrate_from_radial([self._g[atomi]], [atomi])
/ self._integrate_from_radial([self._h[atomi]], [atomi])
)
def _update_phiai(self, ya, bigphiAI, atomi):
# Update phi^a_i and quantity that directly follows (cond_density) in each step of
# iterative optimization. (Refer to Figure S1)
# Re-evaluate cond_density
if isinstance(bigphiAI, float) and not numpy.isinf(bigphiAI):
cond_density = ya + bigphiAI * numpy.sqrt(ya)
else:
cond_density = ya
# Monotonic Decrease Condition
cond_density = numpy.minimum.accumulate(cond_density)
# Re-evaluate phi_AI
phiAI = (
self._integrate_from_radial([cond_density], [atomi])
- self.data.atomnos[atomi]
+ self.reference_charges[-1][atomi]
)
return phiAI, cond_density
def _update_phiaii(self, rhowavg, bigphiAI, atomi):
# Update phi^a_ii and quantity that directly follows (G_A) in each step of
# iterative optimization. (Refer to Figure S3)
# Re-evaluate g_a
# Equations can be found in Figure S3.
ga = rhowavg + bigphiAI * numpy.sqrt(rhowavg)
# Exponential Decrease Condition
exp_applied = ga[:-1] * numpy.exp(
-1 * self._eta[atomi][1:] * numpy.diff(self.radial_grid_r[atomi])
)
for radiusi in range(1, len(ga)):
ga[radiusi] = min(ga[radiusi], exp_applied[radiusi - 1])
# Re-evaluate phi_AII
phiAII = self._integrate_from_radial([ga - rhowavg], [atomi])
return phiAII, ga
def _integrate_from_radial(self, radial_density_list, atom_list):
# Function that reads in list of radial densities, projects it on Cartesian grid,
# and returns integrated value
grid = copy.deepcopy(self.charge_density)
grid.data = numpy.zeros_like(grid.data, dtype=float)
for density, atomi in zip(radial_density_list, atom_list):
grid.data += density[self.closest_r_index[atomi]]
return grid.integrate()
def _spherical_average_from_cartesian(self, cartesian_grid, atom_index, radius_list):
spherical_average = numpy.zeros(len(radius_list))
for radiusi in range(len(radius_list)):
grid_filter = self.closest_r_index[atom_index] == radiusi
num_grid_filter = numpy.count_nonzero(grid_filter)
if num_grid_filter < 1:
average = 0.0
else:
average = numpy.sum(grid_filter * cartesian_grid) / num_grid_filter
if average < self.convergence_level:
average = 0.0
spherical_average[radiusi] = average
return spherical_average
def _update_rho_cond(self):
# Update total weights on Cartesian grid using equation 65 in doi: 10.1039/c6ra04656h
ngridx, ngridy, ngridz = self.charge_density.data.shape
self.rho_cond.data = numpy.zeros_like(self.rho_cond.data, dtype=float)
self._rho_cond_cartesian = numpy.zeros(
(self.data.natom, ngridx, ngridy, ngridz), dtype=float
)
for atomi in range(self.data.natom):
self.rho_cond.data += self._cond_density[atomi][
self.closest_r_index[atomi]
]
self._rho_cond_cartesian[atomi] = self._cond_density[atomi][
self.closest_r_index[atomi]
]
def _converge_phi(self, phiA, superscript, atomi):
""" Update phi until it is positive.
This is used in step 3 (for phi_A^I) and in steps 4-6 (for phi_A^II).
--- Inputs ---
phiA Either phi_A^I or phi_A^II
superscript 1 when calculating phi_I (STEP 3)
2 when calculating phi_II (STEPS 4-6)
atomi Index of target atom as in ccData object
Refer to Equation S101, Figure S1 and S3 for an overview.
"""
# Initial value of bigphi is zero (Equation S100)
bigphiA = 0.0
# List to store candidate values for parabolic fitting
candidates_phi = [phiA]
candidates_bigphi = [bigphiA]
while phiA <= 0:
# Iterative algorithm until convergence
# Refer to S101 in doi: 10.1039/c6ra04656h
if superscript == 1:
temp = self._integrate_from_radial([numpy.sqrt(self._y_a[atomi])], [atomi])
elif superscript == 2:
temp = self._integrate_from_radial([numpy.sqrt(self.rho_wavg[atomi])], [atomi])
bigphiA = 2 * bigphiA - phiA / temp
# When Phi is updated, related quantities are updated as well
# Refer to S100 in doi: 10.1039/c6ra04656h [Step 3]
# or S102 in doi: 10.1039/c6ra04656h [Steps 4-6]
if superscript == 1:
phiA, self._cond_density[atomi] = self._update_phiai(
self._y_a[atomi], bigphiA, atomi
)
elif superscript == 2:
phiA, self._g[atomi] = self._update_phiaii(self.rho_wavg[atomi], bigphiA, atomi)
candidates_phi.append(phiA)
candidates_bigphi.append(bigphiA)
return candidates_phi, candidates_bigphi
def _parabolic_fit(self, pseudodensity, superscript, atomi):
""" Optimize phi using parabolic fitting.
This is used in step 3 (for phi_A^I) and in steps 4-6 (for phi_A^II).
--- Inputs ---
phiA Either phi_A^I or phi_A^II
superscript 1 when calculating phi_I (STEP 3)
2 when calculating phi_II (STEPS 4-6)
atomi Index of target atom as in ccData object
Refer to Figure S1 and S3 for an overview.
"""
# Set update methods for phi_A^I or phi_A^II
if superscript == 1:
def update(pdens, bigPhi, atomi):
return self._update_phiai(pdens, bigPhi, atomi)
elif superscript == 2:
def update(pdens, bigPhi, atomi):
return self._update_phiaii(pseudodensity, bigPhi, atomi)
# lowerbigPhi is bigPhi that yields biggest negative phi.
# upperbigPhi is bigPhi that yields smallest positive phi.
# The point here is to find two phi values that are closest to zero (from positive side
# and negative side respectively).
self._candidates_phi[atomi] = numpy.array(self._candidates_phi[atomi], dtype=float)
self._candidates_bigPhi[atomi] = numpy.array(self._candidates_bigPhi[atomi], dtype=float)
if numpy.count_nonzero(self._candidates_phi[atomi] < 0) > 0:
# If there is at least one candidate phi that is negative
lower_ind = numpy.where(
self._candidates_phi[atomi]
== self._candidates_phi[atomi][self._candidates_phi[atomi] < 0].max()
)[0][0]
lowerbigPhi = self._candidates_bigPhi[atomi][lower_ind]
lowerphi = self._candidates_phi[atomi][lower_ind]
else: # assign some large negative number otherwise
lowerbigPhi = numpy.NINF
lowerphi = numpy.NINF
if numpy.count_nonzero(self._candidates_phi[atomi] > 0) > 0:
# If there is at least one candidate phi that is positive
upper_ind = numpy.where(
self._candidates_phi[atomi]
== self._candidates_phi[atomi][self._candidates_phi[atomi] > 0].min()
)[0][0]
upperbigPhi = self._candidates_bigPhi[atomi][upper_ind]
upperphi = self._candidates_phi[atomi][upper_ind]
else: # assign some large positive number otherwise
upperbigPhi = numpy.PINF
upperphi = numpy.PINF
for iteration in range(self.max_iteration):
# Flow diagram on Figure S1 in doi: 10.1039/c6ra04656h details the procedure.
# Find midpoint between positive bigPhi that yields phi closest to zero and negative
# bigPhi closest to zero. Then, evaluate phi.
# This can be thought as linear fitting compared to parabolic fitting below.
midbigPhi = (lowerbigPhi + upperbigPhi) / 2.0
midphi = update(pseudodensity, midbigPhi, atomi)[0]
# Exit conditions -- if any of three phi values are within the convergence level.
if abs(lowerphi) < self.convergence_level:
return lowerbigPhi
elif abs(upperphi) < self.convergence_level:
return upperbigPhi
elif abs(midphi) < self.convergence_level:
return midbigPhi
# Parabolic fitting as described on Figure S1 in doi: 10.1039/c6ra04656h
# Type casting here converts from size 1 numpy.ndarray to float
xpts = numpy.array(
[float(lowerbigPhi), float(midbigPhi), float(upperbigPhi)], dtype=float
)
ypts = numpy.array([float(lowerphi), float(midphi), float(upperphi)], dtype=float)
fit = numpy.polyfit(xpts, ypts, 2)
roots = numpy.roots(fit) # max two roots (bigPhi) from parabolic fitting
# Find phi for two bigPhis that were obtained from parabolic fitting.
belowphi = update(pseudodensity, roots.min(), atomi)[0]
abovephi = update(pseudodensity, roots.max(), atomi)[0]
# If phi values from parabolically fitted bigPhis lie within the convergence level,
# exit the iterative algorithm.
if abs(abovephi) < self.convergence_level:
return roots.min()
elif abs(belowphi) < self.convergence_level:
return roots.max()
else:
# Otherwise, corrected phi value is obtained in a way that cuts the numerical
# search domain in half in each iteration.
if 3 * abs(abovephi) < abs(belowphi):
corbigPhi = roots.max() - 2.0 * abovephi * (roots.max() - roots.min()) / (
abovephi - belowphi
)
elif 3 * abs(belowphi) < abs(abovephi):
corbigPhi = roots.min() - 2.0 * belowphi * (roots.max() - roots.min()) / (
abovephi - belowphi
)
else:
corbigPhi = (roots.max() + roots.min()) / 2.0
# New candidates of phi and bigPhi are determined as bigPhi yielding largest
# negative phi and bigPhi yielding smallest positve phi. This is analogous to how
# the first candidiate phi values are evaluated.
corphi = update(pseudodensity, corbigPhi, atomi)[0]
self._candidates_bigPhi[atomi] = numpy.array(
[lowerbigPhi, midbigPhi, upperbigPhi, roots.max(), roots.min(), corbigPhi,],
dtype=float,
)
self._candidates_phi[atomi] = numpy.array(
[lowerphi, midphi, upperphi, abovephi, belowphi, corphi], dtype=float
)
# Set new upperphi and lowerphi
lower_ind = numpy.where(
self._candidates_phi[atomi]
== self._candidates_phi[atomi][self._candidates_phi[atomi] < 0].max()
)[0][0]
upper_ind = numpy.where(
self._candidates_phi[atomi]
== self._candidates_phi[atomi][self._candidates_phi[atomi] > 0].min()
)[0][0]
lowerphi = self._candidates_phi[atomi][lower_ind]
upperphi = self._candidates_phi[atomi][upper_ind]
# If new lowerphi or upperphi values are within convergence level, exit the
# iterative algorithm. Otherwise, start new linear/parabolic fitting.
if abs(lowerphi) < self.convergence_level:
return self._candidates_bigPhi[atomi][lower_ind]
elif abs(upperphi) < self.convergence_level:
return self._candidates_bigPhi[atomi][upper_ind]
else:
# Fitting needs to continue in this case.
lowerbigPhi = self._candidates_bigPhi[atomi][lower_ind]
lowerphi = self._candidates_phi[atomi][lower_ind]
upperbigPhi = self._candidates_bigPhi[atomi][upper_ind]
upperphi = self._candidates_phi[atomi][upper_ind]
# Raise Exception if convergence is not achieved within max_iteration.
raise ConvergenceError("Iterative conditioning failed to converge.")
|
|
##########################################################################
#
# Copyright (c) 2014-2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import stat
import shutil
import unittest
import six
import time
import inspect
import functools
import imath
import IECore
import Gaffer
import GafferTest
import GafferDispatch
import GafferDispatchTest
class LocalDispatcherTest( GafferTest.TestCase ) :
def __createLocalDispatcher( self ) :
result = GafferDispatch.LocalDispatcher()
result["jobsDirectory"].setValue( self.temporaryDirectory() )
return result
def testDispatcherRegistration( self ) :
self.assertIn( "Local", GafferDispatch.Dispatcher.registeredDispatchers() )
self.assertIsInstance( GafferDispatch.Dispatcher.create( "Local" ), GafferDispatch.LocalDispatcher )
def testDispatch( self ) :
dispatcher = self.__createLocalDispatcher()
fileName = self.temporaryDirectory() + "/result.txt"
def createWriter( text ) :
node = GafferDispatchTest.TextWriter()
node["mode"].setValue( "a" )
node["fileName"].setValue( fileName )
node["text"].setValue( text + " on ${frame};" )
return node
# Create a tree of dependencies for execution:
# n1 requires:
# - n2 requires:
# -n2a
# -n2b
s = Gaffer.ScriptNode()
s["n1"] = createWriter( "n1" )
s["n2"] = createWriter( "n2" )
s["n2a"] = createWriter( "n2a" )
s["n2b"] = createWriter( "n2b" )
s["n1"]["preTasks"][0].setInput( s["n2"]["task"] )
s["n2"]["preTasks"][0].setInput( s["n2a"]["task"] )
s["n2"]["preTasks"][1].setInput( s["n2b"]["task"] )
# No files should exist yet
self.assertEqual( os.path.isfile( fileName ), False )
# Executing n1 should trigger execution of all of them
dispatcher.dispatch( [ s["n1"] ] )
self.assertEqual( os.path.isfile( fileName ), True )
with open( fileName, "r" ) as f :
text = f.read()
expectedText = s.context().substitute( "n2a on ${frame};n2b on ${frame};n2 on ${frame};n1 on ${frame};" )
self.assertEqual( text, expectedText )
# Executing n1 and anything else, should be the same as just n1, but forcing n2b execution puts it before n2a
os.remove( fileName )
dispatcher.dispatch( [ s["n2b"], s["n1"] ] )
self.assertEqual( os.path.isfile( fileName ), True )
with open( fileName, "r" ) as f :
text = f.read()
expectedText = s.context().substitute( "n2b on ${frame};n2a on ${frame};n2 on ${frame};n1 on ${frame};" )
self.assertEqual( text, expectedText )
# Executing all nodes should be the same as just n1
os.remove( fileName )
dispatcher.dispatch( [ s["n2"], s["n2b"], s["n1"], s["n2a"] ] )
self.assertEqual( os.path.isfile( fileName ), True )
with open( fileName, "r" ) as f :
text = f.read()
expectedText = s.context().substitute( "n2a on ${frame};n2b on ${frame};n2 on ${frame};n1 on ${frame};" )
self.assertEqual( text, expectedText )
# Executing a sub-branch (n2) should only trigger execution in that branch
os.remove( fileName )
dispatcher.dispatch( [ s["n2"] ] )
self.assertEqual( os.path.isfile( fileName ), True )
with open( fileName, "r" ) as f :
text = f.read()
expectedText = s.context().substitute( "n2a on ${frame};n2b on ${frame};n2 on ${frame};" )
self.assertEqual( text, expectedText )
# Executing a leaf node, should not trigger other executions.
os.remove( fileName )
dispatcher.dispatch( [ s["n2b"] ] )
self.assertEqual( os.path.isfile( fileName ), True )
with open( fileName, "r" ) as f :
text = f.read()
expectedText = s.context().substitute( "n2b on ${frame};" )
self.assertEqual( text, expectedText )
def testDispatchDifferentFrame( self ) :
s = Gaffer.ScriptNode()
s["n1"] = GafferDispatchTest.TextWriter()
s["n1"]["fileName"].setValue( self.temporaryDirectory() + "/n1_####.txt" )
s["n1"]["text"].setValue( "n1 on ${frame}" )
context = Gaffer.Context( s.context() )
context.setFrame( s.context().getFrame() + 10 )
with context :
self.__createLocalDispatcher().dispatch( [ s["n1"] ] )
fileName = context.substitute( s["n1"]["fileName"].getValue() )
self.assertTrue( os.path.isfile( fileName ) )
with open( fileName, "r" ) as f :
text = f.read()
self.assertEqual( text, "%s on %d" % ( s["n1"].getName(), context.getFrame() ) )
def testDispatchFullRange( self ) :
dispatcher = self.__createLocalDispatcher()
dispatcher["framesMode"].setValue( GafferDispatch.Dispatcher.FramesMode.FullRange )
frameList = IECore.FrameList.parse( "5-7" )
fileName = self.temporaryDirectory() + "/result.txt"
def createWriter( text ) :
node = GafferDispatchTest.TextWriter()
node["mode"].setValue( "a" )
node["fileName"].setValue( fileName )
node["text"].setValue( text + " on ${frame};" )
return node
# Create a tree of dependencies for execution:
# n1 requires:
# - n2 requires:
# -n2a
# -n2b
s = Gaffer.ScriptNode()
s["frameRange"]["start"].setValue( 5 )
s["frameRange"]["end"].setValue( 7 )
s["n1"] = createWriter( "n1" )
s["n2"] = createWriter( "n2" )
s["n2a"] = createWriter( "n2a" )
s["n2b"] = createWriter( "n2b" )
s["n1"]["preTasks"][0].setInput( s["n2"]["task"] )
s["n2"]["preTasks"][0].setInput( s["n2a"]["task"] )
s["n2"]["preTasks"][1].setInput( s["n2b"]["task"] )
# No files should exist yet
self.assertEqual( os.path.isfile( fileName ), False )
# Executing n1 should trigger execution of all of them
dispatcher.dispatch( [ s["n1"] ] )
self.assertEqual( os.path.isfile( fileName ), True )
with open( fileName, "r" ) as f :
text = f.read()
expectedText = ""
for frame in frameList.asList() :
context = Gaffer.Context( s.context() )
context.setFrame( frame )
expectedText += context.substitute( "n2a on ${frame};n2b on ${frame};n2 on ${frame};n1 on ${frame};" )
self.assertEqual( text, expectedText )
# Executing a leaf node, should not trigger other executions.
os.remove( fileName )
dispatcher.dispatch( [ s["n2b"] ] )
self.assertEqual( os.path.isfile( fileName ), True )
with open( fileName, "r" ) as f :
text = f.read()
expectedText = ""
for frame in frameList.asList() :
context = Gaffer.Context( s.context() )
context.setFrame( frame )
expectedText += context.substitute( "n2b on ${frame};" )
self.assertEqual( text, expectedText )
def testDispatchCustomRange( self ) :
dispatcher = self.__createLocalDispatcher()
dispatcher["framesMode"].setValue( GafferDispatch.Dispatcher.FramesMode.CustomRange )
frameList = IECore.FrameList.parse( "2-6x2" )
dispatcher["frameRange"].setValue( str(frameList) )
fileName = self.temporaryDirectory() + "/result.txt"
def createWriter( text ) :
node = GafferDispatchTest.TextWriter()
node["mode"].setValue( "a" )
node["fileName"].setValue( fileName )
node["text"].setValue( text + " on ${frame};" )
return node
# Create a tree of dependencies for execution:
# n1 requires:
# - n2 requires:
# -n2a
# -n2b
s = Gaffer.ScriptNode()
s["n1"] = createWriter( "n1" )
s["n2"] = createWriter( "n2" )
s["n2a"] = createWriter( "n2a" )
s["n2b"] = createWriter( "n2b" )
s["n1"]["preTasks"][0].setInput( s["n2"]["task"] )
s["n2"]["preTasks"][0].setInput( s["n2a"]["task"] )
s["n2"]["preTasks"][1].setInput( s["n2b"]["task"] )
# No files should exist yet
self.assertEqual( os.path.isfile( fileName ), False )
# Executing n1 should trigger execution of all of them
dispatcher.dispatch( [ s["n1"] ] )
self.assertEqual( os.path.isfile( fileName ), True )
with open( fileName, "r" ) as f :
text = f.read()
expectedText = ""
for frame in frameList.asList() :
context = Gaffer.Context( s.context() )
context.setFrame( frame )
expectedText += context.substitute( "n2a on ${frame};n2b on ${frame};n2 on ${frame};n1 on ${frame};" )
self.assertEqual( text, expectedText )
# Executing a leaf node, should not trigger other executions.
os.remove( fileName )
dispatcher.dispatch( [ s["n2b"] ] )
self.assertEqual( os.path.isfile( fileName ), True )
with open( fileName, "r" ) as f :
text = f.read()
expectedText = ""
for frame in frameList.asList() :
context = Gaffer.Context( s.context() )
context.setFrame( frame )
expectedText += context.substitute( "n2b on ${frame};" )
self.assertEqual( text, expectedText )
def testDispatchBadCustomRange( self ) :
dispatcher = self.__createLocalDispatcher()
dispatcher["framesMode"].setValue( GafferDispatch.Dispatcher.FramesMode.CustomRange )
dispatcher["frameRange"].setValue( "notAFrameRange" )
s = Gaffer.ScriptNode()
s["n1"] = GafferDispatchTest.TextWriter()
s["n1"]["fileName"].setValue( self.temporaryDirectory() + "/n1_####.txt" )
s["n1"]["text"].setValue( "n1 on ${frame}" )
self.assertRaises( RuntimeError, dispatcher.dispatch, [ s["n1"] ] )
self.assertFalse( os.path.isfile( s.context().substitute( s["n1"]["fileName"].getValue() ) ) )
def testContextVariation( self ) :
s = Gaffer.ScriptNode()
context = Gaffer.Context( s.context() )
context["script:name"] = "notTheRealScriptName"
context["textWriter:replace"] = IECore.StringVectorData( [ " ", "\n" ] )
s["n1"] = GafferDispatchTest.TextWriter()
s["n1"]["fileName"].setValue( self.temporaryDirectory() + "/${script:name}_####.txt" )
s["n1"]["text"].setValue( "${script:name} on ${frame}" )
fileName = context.substitute( s["n1"]["fileName"].getValue() )
self.assertFalse( os.path.isfile( fileName ) )
with context :
self.__createLocalDispatcher().dispatch( [ s["n1"] ] )
self.assertTrue( os.path.isfile( fileName ) )
self.assertTrue( os.path.basename( fileName ).startswith( context["script:name"] ) )
with open( fileName, "r" ) as f :
text = f.read()
expected = "%s on %d" % ( context["script:name"], context.getFrame() )
expected = expected.replace( context["textWriter:replace"][0], context["textWriter:replace"][1] )
self.assertEqual( text, expected )
def testDispatcherSignals( self ) :
preCs = GafferTest.CapturingSlot( GafferDispatch.Dispatcher.preDispatchSignal() )
self.assertEqual( len( preCs ), 0 )
dispatchCs = GafferTest.CapturingSlot( GafferDispatch.Dispatcher.preDispatchSignal() )
self.assertEqual( len( dispatchCs ), 0 )
postCs = GafferTest.CapturingSlot( GafferDispatch.Dispatcher.postDispatchSignal() )
self.assertEqual( len( postCs ), 0 )
s = Gaffer.ScriptNode()
s["n1"] = GafferDispatchTest.TextWriter()
s["n1"]["fileName"].setValue( self.temporaryDirectory() + "/n1_####.txt" )
s["n1"]["text"].setValue( "n1 on ${frame}" )
dispatcher = self.__createLocalDispatcher()
dispatcher.dispatch( [ s["n1"] ] )
self.assertEqual( len( preCs ), 1 )
self.assertTrue( preCs[0][0].isSame( dispatcher ) )
self.assertEqual( preCs[0][1], [ s["n1"] ] )
self.assertEqual( len( dispatchCs ), 1 )
self.assertTrue( dispatchCs[0][0].isSame( dispatcher ) )
self.assertEqual( dispatchCs[0][1], [ s["n1"] ] )
self.assertEqual( len( postCs ), 1 )
self.assertTrue( postCs[0][0].isSame( dispatcher ) )
self.assertEqual( postCs[0][1], [ s["n1"] ] )
def testExecuteInBackground( self ) :
preCs = GafferTest.CapturingSlot( GafferDispatch.LocalDispatcher.preDispatchSignal() )
self.assertEqual( len( preCs ), 0 )
dispatchCs = GafferTest.CapturingSlot( GafferDispatch.LocalDispatcher.dispatchSignal() )
self.assertEqual( len( dispatchCs ), 0 )
postCs = GafferTest.CapturingSlot( GafferDispatch.LocalDispatcher.postDispatchSignal() )
self.assertEqual( len( postCs ), 0 )
s = Gaffer.ScriptNode()
s["n1"] = GafferDispatchTest.TextWriter()
s["n1"]["fileName"].setValue( self.temporaryDirectory() + "/n1_####.txt" )
s["n1"]["text"].setValue( "n1 on ${frame}" )
dispatcher = self.__createLocalDispatcher()
dispatcher["executeInBackground"].setValue( True )
dispatcher.dispatch( [ s["n1"] ] )
# the dispatching started and finished
self.assertEqual( len( preCs ), 1 )
self.assertEqual( len( dispatchCs ), 1 )
self.assertEqual( len( postCs ), 1 )
# but the execution hasn't finished yet
self.assertFalse( os.path.isfile( s.context().substitute( s["n1"]["fileName"].getValue() ) ) )
# wait long enough to finish execution
self.assertEqual( len(dispatcher.jobPool().jobs()), 1 )
dispatcher.jobPool().waitForAll()
self.assertEqual( len(dispatcher.jobPool().jobs()), 0 )
self.assertTrue( os.path.isfile( s.context().substitute( s["n1"]["fileName"].getValue() ) ) )
def testMixedImmediateAndBackground( self ) :
preCs = GafferTest.CapturingSlot( GafferDispatch.LocalDispatcher.preDispatchSignal() )
self.assertEqual( len( preCs ), 0 )
dispatchCs = GafferTest.CapturingSlot( GafferDispatch.LocalDispatcher.dispatchSignal() )
self.assertEqual( len( dispatchCs ), 0 )
postCs = GafferTest.CapturingSlot( GafferDispatch.LocalDispatcher.postDispatchSignal() )
self.assertEqual( len( postCs ), 0 )
fileName = self.temporaryDirectory() + "/result.txt"
def createWriter( text ) :
node = GafferDispatchTest.TextWriter()
node["mode"].setValue( "a" )
node["fileName"].setValue( fileName )
node["text"].setValue( text + " on ${frame};" )
return node
s = Gaffer.ScriptNode()
# Create a tree of dependencies for execution:
# n1 requires:
# - n2 requires:
# -n2a
# -n2b
# - n3
s = Gaffer.ScriptNode()
s["n1"] = createWriter( "n1" )
s["n2"] = createWriter( "n2" )
# force the entire n2 tree to execute in the foreground
s["n2"]["dispatcher"]["immediate"].setValue( True )
s["n2a"] = createWriter( "n2a" )
s["n2b"] = createWriter( "n2b" )
s["n3"] = createWriter( "n3" )
s["n1"]["preTasks"][0].setInput( s["n2"]["task"] )
s["n1"]["preTasks"][1].setInput( s["n3"]["task"] )
s["n2"]["preTasks"][0].setInput( s["n2a"]["task"] )
s["n2"]["preTasks"][1].setInput( s["n2b"]["task"] )
dispatcher = self.__createLocalDispatcher()
dispatcher["executeInBackground"].setValue( True )
dispatcher["framesMode"].setValue( GafferDispatch.Dispatcher.FramesMode.CustomRange )
frameList = IECore.FrameList.parse( "2-6x2" )
dispatcher["frameRange"].setValue( str(frameList) )
dispatcher.dispatch( [ s["n1"] ] )
# the dispatching started and finished
self.assertEqual( len( preCs ), 1 )
self.assertEqual( len( dispatchCs ), 1 )
self.assertEqual( len( postCs ), 1 )
# all the foreground execution has finished
self.assertEqual( os.path.isfile( fileName ), True )
with open( fileName, "r" ) as f :
text = f.read()
expectedText = ""
for frame in frameList.asList() :
context = Gaffer.Context( s.context() )
context.setFrame( frame )
expectedText += context.substitute( "n2a on ${frame};n2b on ${frame};n2 on ${frame};" )
self.assertEqual( text, expectedText )
# wait long enough for background execution to finish
self.assertEqual( len(dispatcher.jobPool().jobs()), 1 )
dispatcher.jobPool().waitForAll()
self.assertEqual( len(dispatcher.jobPool().jobs()), 0 )
self.assertEqual( os.path.isfile( fileName ), True )
with open( fileName, "r" ) as f :
text = f.read()
# don't reset the expectedText since we're still appending
for frame in frameList.asList() :
context = Gaffer.Context( s.context() )
context.setFrame( frame )
expectedText += context.substitute( "n3 on ${frame};n1 on ${frame};" )
self.assertEqual( text, expectedText )
def testMultipleDispatchers( self ) :
s = Gaffer.ScriptNode()
s["n1"] = GafferDispatchTest.TextWriter()
s["n1"]["fileName"].setValue( self.temporaryDirectory() + "/n1_####.txt" )
s["n1"]["text"].setValue( "n1 on ${frame}" )
dispatcher = self.__createLocalDispatcher()
dispatcher["executeInBackground"].setValue( True )
dispatcher2 = self.__createLocalDispatcher()
dispatcher2["executeInBackground"].setValue( True )
dispatcher.dispatch( [ s["n1"] ] )
c = s.context()
c.setFrame( 2 )
with c :
dispatcher2.dispatch( [ s["n1"] ] )
# wait long enough for background execution to finish
self.assertEqual( len(dispatcher.jobPool().jobs()), 2 )
dispatcher.jobPool().waitForAll()
self.assertEqual( len(dispatcher.jobPool().jobs()), 0 )
self.assertTrue( os.path.isfile( s.context().substitute( s["n1"]["fileName"].getValue() ) ) )
self.assertTrue( os.path.isfile( c.substitute( s["n1"]["fileName"].getValue() ) ) )
def testFailure( self ) :
s = Gaffer.ScriptNode()
s["n1"] = GafferDispatchTest.TextWriter()
s["n1"]["fileName"].setValue( self.temporaryDirectory() + "/n1_####.txt" )
s["n1"]["text"].setValue( "n1 on ${frame}" )
s["n2"] = GafferDispatchTest.TextWriter()
s["n2"]["fileName"].setValue( "" )
s["n2"]["text"].setValue( "n2 on ${frame}" )
s["n3"] = GafferDispatchTest.TextWriter()
s["n3"]["fileName"].setValue( self.temporaryDirectory() + "/n3_####.txt" )
s["n3"]["text"].setValue( "n3 on ${frame}" )
s["n1"]["preTasks"][0].setInput( s["n2"]["task"] )
s["n2"]["preTasks"][0].setInput( s["n3"]["task"] )
dispatcher = self.__createLocalDispatcher()
# fails because n2 doesn't have a valid fileName
six.assertRaisesRegex( self, RuntimeError, "No such file or directory", functools.partial( dispatcher.dispatch, [ s["n1"] ] ) )
# it still cleans up the JobPool
self.assertEqual( len(dispatcher.jobPool().jobs()), 0 )
# n3 executed correctly
self.assertTrue( os.path.isfile( s.context().substitute( s["n3"]["fileName"].getValue() ) ) )
with open( s.context().substitute( s["n3"]["fileName"].getValue() ), "r" ) as f :
text = f.read()
self.assertEqual( text, "n3 on %d" % s.context().getFrame() )
# n2 failed, so n1 never executed
self.assertFalse( os.path.isfile( s.context().substitute( s["n2"]["fileName"].getValue() ) ) )
self.assertFalse( os.path.isfile( s.context().substitute( s["n1"]["fileName"].getValue() ) ) )
self.tearDown()
dispatcher["executeInBackground"].setValue( True )
dispatcher.dispatch( [ s["n1"] ] )
# wait long enough for background execution to finish
self.assertEqual( len(dispatcher.jobPool().jobs()), 1 )
dispatcher.jobPool().waitForAll()
self.assertEqual( len(dispatcher.jobPool().jobs()), 0 )
# n3 executed correctly
self.assertTrue( os.path.isfile( s.context().substitute( s["n3"]["fileName"].getValue() ) ) )
with open( s.context().substitute( s["n3"]["fileName"].getValue() ), "r" ) as f :
text = f.read()
self.assertEqual( text, "n3 on %d" % s.context().getFrame() )
# n2 failed, so n1 never executed
self.assertFalse( os.path.isfile( s.context().substitute( s["n2"]["fileName"].getValue() ) ) )
self.assertFalse( os.path.isfile( s.context().substitute( s["n1"]["fileName"].getValue() ) ) )
def testKill( self ) :
s = Gaffer.ScriptNode()
s["n1"] = GafferDispatchTest.TextWriter()
s["n1"]["fileName"].setValue( self.temporaryDirectory() + "/n1_####.txt" )
s["n1"]["text"].setValue( "n1 on ${frame}" )
dispatcher = self.__createLocalDispatcher()
dispatcher["executeInBackground"].setValue( True )
self.assertEqual( len(dispatcher.jobPool().jobs()), 0 )
dispatcher.dispatch( [ s["n1"] ] )
self.assertEqual( len(dispatcher.jobPool().jobs()), 1 )
# the execution hasn't finished yet
self.assertFalse( os.path.isfile( s.context().substitute( s["n1"]["fileName"].getValue() ) ) )
# kill the job
dispatcher.jobPool().jobs()[0].kill()
# wait long enough for the process to die
dispatcher.jobPool().waitForAll()
self.assertEqual( len(dispatcher.jobPool().jobs()), 0 )
# make sure it never wrote the file
self.assertFalse( os.path.isfile( s.context().substitute( s["n1"]["fileName"].getValue() ) ) )
def testSpacesInContext( self ) :
s = Gaffer.ScriptNode()
s["n"] = GafferDispatchTest.TextWriter()
s["n"]["fileName"].setValue( self.temporaryDirectory() + "/test.txt" )
s["n"]["text"].setValue( "${test}" )
dispatcher = self.__createLocalDispatcher()
dispatcher["executeInBackground"].setValue( True )
c = Gaffer.Context()
c["test"] = "i am a string with spaces"
with c :
dispatcher.dispatch( [ s["n"] ] )
dispatcher.jobPool().waitForAll()
text = "".join( open( self.temporaryDirectory() + "/test.txt" ).readlines() )
self.assertEqual( text, "i am a string with spaces" )
def testUIContextEntriesIgnored( self ) :
s = Gaffer.ScriptNode()
s["n"] = GafferDispatchTest.TextWriter()
s["n"]["fileName"].setValue( self.temporaryDirectory() + "/out.txt" )
s["n"]["text"].setValue( "${foo} ${ui:foo}" )
dispatcher = self.__createLocalDispatcher()
dispatcher["executeInBackground"].setValue( True )
c = Gaffer.Context()
c["ui:foo"] = "uiFoo"
c["foo"] = "foo"
with c :
dispatcher.dispatch( [ s["n"] ] )
dispatcher.jobPool().waitForAll()
text = "".join( open( self.temporaryDirectory() + "/out.txt" ).readlines() )
self.assertEqual( text, "foo " )
def testContextLockedDuringBackgroundDispatch( self ) :
s = Gaffer.ScriptNode()
s["n1"] = GafferDispatchTest.TextWriter()
s["n1"]["fileName"].setValue( self.temporaryDirectory() + "/out.txt" )
s["n1"]["text"].setValue( "n1 on ${frame} with ${foo}" )
dispatcher = self.__createLocalDispatcher()
dispatcher["executeInBackground"].setValue( True )
c = Gaffer.Context( s.context() )
c["foo"] = "foo"
with c :
dispatcher.dispatch( [ s["n1"] ] )
self.assertFalse( os.path.isfile( self.temporaryDirectory() + "/out.txt" ) )
foo = s["variables"].addChild( Gaffer.NameValuePlug( "foo", IECore.StringData( "foo" ) ) )
dispatcher.jobPool().waitForAll()
self.assertTrue( os.path.isfile( self.temporaryDirectory() + "/out.txt" ) )
text = "".join( open( self.temporaryDirectory() + "/out.txt" ).readlines() )
self.assertEqual( text, "n1 on 1 with foo" )
def testNodeNamesLockedDuringBackgroundDispatch( self ) :
s = Gaffer.ScriptNode()
s["n1"] = GafferDispatchTest.TextWriter()
s["n1"]["fileName"].setValue( self.temporaryDirectory() + "/out.txt" )
s["n1"]["text"].setValue( "n1 on ${frame}" )
dispatcher = self.__createLocalDispatcher()
dispatcher["executeInBackground"].setValue( True )
dispatcher.dispatch( [ s["n1"] ] )
self.assertFalse( os.path.isfile( self.temporaryDirectory() + "/out.txt" ) )
s["n1"].setName( "n2" )
dispatcher.jobPool().waitForAll()
self.assertTrue( os.path.isfile( self.temporaryDirectory() + "/out.txt" ) )
text = "".join( open( self.temporaryDirectory() + "/out.txt" ).readlines() )
self.assertEqual( text, "n1 on 1" )
def testIgnoreScriptLoadErrors( self ) :
s = Gaffer.ScriptNode()
s["n"] = GafferDispatchTest.TextWriter()
s["n"]["fileName"].setValue( self.temporaryDirectory() + "/scriptLoadErrorTest.txt" )
s["n"]["text"].setValue( "test" )
# because this doesn't have the dynamic flag set,
# it won't serialise/load properly.
s["n"]["user"]["badPlug"] = Gaffer.IntPlug()
s["n"]["user"]["badPlug"].setValue( 10 )
dispatcher = self.__createLocalDispatcher()
dispatcher["executeInBackground"].setValue( True )
dispatcher.dispatch( [ s["n"] ] )
dispatcher.jobPool().waitForAll()
self.assertFalse( os.path.isfile( self.temporaryDirectory() + "/scriptLoadErrorTest.txt" ) )
dispatcher["ignoreScriptLoadErrors"].setValue( True )
dispatcher.dispatch( [ s["n"] ] )
dispatcher.jobPool().waitForAll()
self.assertTrue( os.path.isfile( self.temporaryDirectory() + "/scriptLoadErrorTest.txt" ) )
def testBackgroundBatchesCanAccessJobDirectory( self ) :
s = Gaffer.ScriptNode()
s["w"] = GafferDispatchTest.TextWriter()
s["w"]["fileName"].setValue( "${dispatcher:jobDirectory}/test.####.txt" )
s["w"]["text"].setValue( "w on ${frame} from ${dispatcher:jobDirectory}" )
dispatcher = self.__createLocalDispatcher()
dispatcher["executeInBackground"].setValue( True )
dispatcher["framesMode"].setValue( GafferDispatch.Dispatcher.FramesMode.CustomRange )
frameList = IECore.FrameList.parse( "2-6x2" )
dispatcher["frameRange"].setValue( str(frameList) )
dispatcher.dispatch( [ s["w"] ] )
dispatcher.jobPool().waitForAll()
# a single dispatch should have the same job directory for all batches
jobDir = dispatcher["jobsDirectory"].getValue() + "/000000"
self.assertEqual( next( open( "%s/test.0002.txt" % jobDir ) ), "w on 2 from %s" % jobDir )
self.assertEqual( next( open( "%s/test.0004.txt" % jobDir ) ), "w on 4 from %s" % jobDir )
self.assertEqual( next( open( "%s/test.0006.txt" % jobDir ) ), "w on 6 from %s" % jobDir )
def testEnvironmentCommand( self ) :
s = Gaffer.ScriptNode()
testFile = os.path.join( self.temporaryDirectory(), "test" )
s["c"] = GafferDispatch.SystemCommand()
s["c"]["command"].setValue( r"echo HELLO \$GAFFERDISPATCHTEST_ENVVAR > " + testFile )
dispatcher = self.__createLocalDispatcher()
dispatcher["executeInBackground"].setValue( True )
dispatcher["framesMode"].setValue( GafferDispatch.Dispatcher.FramesMode.CurrentFrame )
dispatcher.dispatch( [ s["c"] ] )
dispatcher.jobPool().waitForAll()
with open( testFile ) as f :
self.assertEqual( f.readlines(), [ "HELLO\n" ] )
dispatcher["environmentCommand"].setValue( "env GAFFERDISPATCHTEST_ENVVAR=WORLD" )
dispatcher.dispatch( [ s["c"] ] )
dispatcher.jobPool().waitForAll()
with open( testFile ) as f :
self.assertEqual( f.readlines(), [ "HELLO WORLD\n" ] )
def testEnvironmentCommandSubstitutions( self ) :
s = Gaffer.ScriptNode()
testFile = os.path.join( self.temporaryDirectory(), "test" )
s["c"] = GafferDispatch.SystemCommand()
s["c"]["command"].setValue( r"echo HELLO \$GAFFERDISPATCHTEST_ENVVAR > " + testFile )
dispatcher = self.__createLocalDispatcher()
dispatcher["executeInBackground"].setValue( True )
dispatcher["framesMode"].setValue( GafferDispatch.Dispatcher.FramesMode.CurrentFrame )
dispatcher["environmentCommand"].setValue( "env GAFFERDISPATCHTEST_ENVVAR=$world" )
with Gaffer.Context() as c :
c["world"] = "WORLD"
dispatcher.dispatch( [ s["c"] ] )
dispatcher.jobPool().waitForAll()
with open( testFile ) as f :
self.assertEqual( f.readlines(), [ "HELLO WORLD\n" ] )
def testScaling( self ) :
# See DispatcherTest.testScaling for details.
s = Gaffer.ScriptNode()
lastTask = None
for i in range( 0, 5 ) :
perFrame = GafferDispatch.PythonCommand()
perFrame["command"].setValue( "context.getFrame()" )
s["perFrame%d" % i] = perFrame
if lastTask is not None :
perFrame["preTasks"][0].setInput( lastTask["task"] )
perSequence = GafferDispatch.PythonCommand()
perSequence["command"].setValue( "pass" )
perSequence["sequence"].setValue( True )
perSequence["preTasks"][0].setInput( perFrame["task"] )
s["perSequence%d" % i] = perSequence
lastTask = perSequence
d = self.__createLocalDispatcher()
d["framesMode"].setValue( d.FramesMode.CustomRange )
d["frameRange"].setValue( "1-1000" )
clock = time.process_time if six.PY3 else time.clock
t = clock()
d.dispatch( [ lastTask ] )
timeLimit = 6
if Gaffer.isDebug():
timeLimit *= 2
self.assertLess( clock() - t, timeLimit )
d["executeInBackground"].setValue( True )
d.dispatch( [ lastTask ] )
t = clock()
d.jobPool().jobs()[0].kill()
self.assertLess( clock() - t, 1 )
d.jobPool().waitForAll()
def testImathContextVariable( self ) :
s = Gaffer.ScriptNode()
s["t"] = GafferDispatchTest.TextWriter()
s["t"]["fileName"].setValue( self.temporaryDirectory() + "/test.txt" )
s["e"] = Gaffer.Expression()
s["e"].setExpression( inspect.cleandoc(
"""
c = context["c"]
parent["t"]["text"] = "{0} {1} {2}".format( *c )
"""
) )
s["v"] = GafferDispatch.TaskContextVariables()
s["v"]["variables"].addChild( Gaffer.NameValuePlug( "c", imath.Color3f( 0, 1, 2 ) ) )
s["v"]["preTasks"][0].setInput( s["t"]["task"] )
d = self.__createLocalDispatcher()
d["executeInBackground"].setValue( True )
d.dispatch( [ s["v"] ] )
d.jobPool().waitForAll()
self.assertEqual(
open( s["t"]["fileName"].getValue() ).read(),
"0.0 1.0 2.0"
)
def testNestedDispatchBorrowingOuterJobDirectory( self ) :
s = Gaffer.ScriptNode()
s["nestedTask"] = GafferDispatchTest.TextWriter()
s["nestedTask"]["fileName"].setValue( self.temporaryDirectory() + "/nested.txt" )
s["nestedTask"]["text"].setValue( "${dispatcher:jobDirectory} : ${dispatcher:scriptFileName}" )
s["dispatchTask"] = GafferDispatch.PythonCommand()
s["dispatchTask"]["command"].setValue( inspect.cleandoc(
"""
import GafferDispatch
dispatcher = GafferDispatch.LocalDispatcher()
dispatcher.dispatch( [ self.parent()["nestedTask"] ] )
"""
) )
s["outerTask"] = GafferDispatchTest.TextWriter()
s["outerTask"]["preTasks"][0].setInput( s["dispatchTask"]["task"] )
s["outerTask"]["fileName"].setValue( self.temporaryDirectory() + "/outer.txt" )
s["outerTask"]["text"].setValue( "${dispatcher:jobDirectory} : ${dispatcher:scriptFileName}" )
d = self.__createLocalDispatcher()
d["executeInBackground"].setValue( True )
d.dispatch( [ s["outerTask"] ] )
d.jobPool().waitForAll()
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/nested.txt" ) )
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/outer.txt" ) )
self.assertEqual(
open( self.temporaryDirectory() + "/nested.txt" ).readlines(),
open( self.temporaryDirectory() + "/outer.txt" ).readlines(),
)
if __name__ == "__main__":
unittest.main()
|
|
'''
Created on Oct 13, 2014
@author: Abanoub Milad Nassief
'''
def build_scoring_matrix(alphabet, diag_score, off_diag_score, dash_score):
"""
Takes as input a set of characters alphabet
and three scores diag_score, off_diag_score,
and dash_score. The function returns a dictionary
of dictionaries whose entries are indexed by pairs
of characters in alphabet plus '-'. The score for
any entry indexed by one or more dashes is dash_score.
The score for the remaining diagonal entries is diag_score.
Finally, the score for the remaining off-diagonal entries is off_diag_score
"""
alphabet.add('-')
scoring_matri = {}
for first_ltr in alphabet:
temp = {}
for sec_ltr in alphabet:
if first_ltr == sec_ltr and first_ltr != '-':
temp[sec_ltr] = diag_score
elif first_ltr == '-' or sec_ltr == '-':
temp[sec_ltr] = dash_score
else:
temp[sec_ltr] = off_diag_score
scoring_matri[first_ltr] = temp
return scoring_matri
def compute_alignment_matrix(seq_x, seq_y, scoring_matrix, global_flag):
"""
Takes as input two sequences seq_x and seq_y
whose elements share a common alphabet with
the scoring matrix scoring_matrix. The function
computes and returns the alignment matrix for
seq_x and seq_y as described in the Homework.
If global_flag is True, each entry of the alignment
matrix is computed using the method described in Question 8 of the Homework.
If global_flag is False, each entry is computed
using the method described in Question 12 of the Homework.
"""
rows = len(seq_x) + 1
cols = len(seq_y) + 1
s_matrix = [[_ for _ in range(cols)] for _ in range(rows)]
s_matrix[0][0] = 0
if global_flag:
for row in range(1, rows):
s_matrix[row][0] = s_matrix[row - 1][0] + scoring_matrix[seq_x[row - 1]]['-']
for col in range(1, cols):
s_matrix[0][col] = s_matrix[0][col - 1] + scoring_matrix['-'][seq_y[col - 1]]
for row in range(1, rows):
for col in range(1, cols):
s_matrix[row][col] = max(s_matrix[row - 1][col - 1]
+ scoring_matrix[seq_x[row - 1]][seq_y[col - 1]],
max(s_matrix[row - 1][col]
+ scoring_matrix[seq_x[row - 1]]['-'],
s_matrix[row][col - 1]
+ scoring_matrix['-'][seq_y[col - 1]]))
else:
for row in range(1, rows):
s_matrix[row][0] = max(0, s_matrix[row - 1][0] +
scoring_matrix[seq_x[row - 1]]['-'])
for col in range(1, cols):
s_matrix[0][col] = max(0, s_matrix[0][col - 1] +
scoring_matrix['-'][seq_y[col - 1]])
for row in range(1, rows):
for col in range(1, cols):
s_matrix[row][col] = max(s_matrix[row - 1][col - 1]
+ scoring_matrix[seq_x[row - 1]][seq_y[col - 1]],
max(s_matrix[row - 1][col]
+ scoring_matrix[seq_x[row - 1]]['-'],
s_matrix[row][col - 1]
+ scoring_matrix['-'][seq_y[col - 1]]))
if s_matrix[row][col] < 0:
s_matrix[row][col] = 0
return s_matrix
def compute_global_alignment(seq_x, seq_y, scoring_matrix, alignment_matrix):
"""
implement the method ComputeAlignment discussed in Question 9 of the Homework.
Takes as input two sequences seq_x and seq_y whose elements
share a common alphabet with the scoring matrix scoring_matrix.
This function computes a global alignment of seq_x and seq_y using
the global alignment matrix alignment_matrix.
The function returns a tuple of the form (score, align_x, align_y)
where score is the score of the global alignment align_x and align_y.
Note that align_x and align_y should have the same length and may
include the padding character '-'.
"""
rows = len(seq_x)
cols = len(seq_y)
par_x = ''
par_y = ''
score = 0
while rows != 0 and cols != 0:
if alignment_matrix[rows][cols] == \
alignment_matrix[rows - 1][cols - 1] + \
scoring_matrix[seq_x[rows - 1]][seq_y[cols - 1]]:
par_x = seq_x[rows - 1] + par_x
par_y = seq_y[cols - 1] + par_y
score += scoring_matrix[seq_x[rows - 1]][seq_y[cols - 1]]
rows -= 1
cols -= 1
else:
if alignment_matrix[rows][cols] == \
alignment_matrix[rows - 1][cols] + \
scoring_matrix[seq_x[rows - 1]]['-']:
par_x = seq_x[rows - 1] + par_x
par_y = '-' + par_y
score += scoring_matrix[seq_x[rows - 1]]['-']
rows -= 1
else:
par_x = '-' + par_x
par_y = seq_y[cols - 1] + par_y
score += scoring_matrix['-'][seq_y[cols - 1]]
cols -= 1
while rows != 0:
par_x = seq_x[rows - 1] + par_x
par_y = '-' + par_y
score += scoring_matrix[seq_x[rows - 1]]['-']
rows -= 1
while cols != 0:
par_x = '-' + par_x
par_y = seq_y[cols - 1] + par_y
score += scoring_matrix['-'][seq_y[cols - 1]]
cols -= 1
return (score, par_x, par_y)
def compute_local_alignment(seq_x, seq_y, scoring_matrix, alignment_matrix):
"""
This second function will compute an optimal local alignment
starting at the maximum entry of the local alignment matrix
and working backwards to zero as described in Question 13
of the Homework.
Takes as input two sequences seq_x and seq_y whose elements
share a common alphabet with the scoring matrix scoring_matrix.
This function computes a local alignment of seq_x and seq_y using
the local alignment matrix alignment_matrix.
The function returns a tuple of the form (score, align_x, align_y)
where score is the score of the optimal local alignment align_x and
align_y. Note that align_x and align_y should have the same length
and may include the padding character '-'.
"""
max_val = float("-inf")
max_tuple = (0, 0)
for row in range(len(seq_x) + 1):
for col in range(len(seq_y) + 1):
if alignment_matrix[row][col] > max_val:
max_val = alignment_matrix[row][col]
max_tuple = (row, col)
par_x = ''
par_y = ''
score = 0
rows = max_tuple[0]
cols = max_tuple[1]
while rows != 0 and cols != 0 and alignment_matrix[rows][cols] != 0:
if alignment_matrix[rows][cols] == \
alignment_matrix[rows - 1][cols - 1] + \
scoring_matrix[seq_x[rows - 1]][seq_y[cols - 1]]:
par_x = seq_x[rows - 1] + par_x
par_y = seq_y[cols - 1] + par_y
score += scoring_matrix[seq_x[rows - 1]][seq_y[cols - 1]]
rows -= 1
cols -= 1
else:
if alignment_matrix[rows][cols] == \
alignment_matrix[rows - 1][cols] + \
scoring_matrix[seq_x[rows - 1]]['-']:
par_x = seq_x[rows - 1] + par_x
par_y = '-' + par_y
score += scoring_matrix[seq_x[rows - 1]]['-']
rows -= 1
else:
par_x = '-' + par_x
par_y = seq_y[cols - 1] + par_y
score += scoring_matrix['-'][seq_y[cols - 1]]
cols -= 1
return (score, par_x, par_y)
# m1=build_scoring_matrix(set(['A','C','T','G']), 10, 4, -6)
# m2=compute_alignment_matrix('AA', 'TAAT', m1, False)
# print(compute_local_alignment('AA', 'TAAT', m1, m2))
|
|
# -*- coding: utf-8 -*-
import contextlib
import hashlib
import random
import urllib
import requests
from requests_oauthlib import OAuth1
from .exceptions import LinkedInError
from .models import AccessToken, LinkedInInvitation, LinkedInMessage
from .utils import enum, to_utf8, raise_for_error, json, StringIO
__all__ = ['LinkedInAuthentication', 'LinkedInApplication', 'PERMISSIONS']
PERMISSIONS = enum('Permission',
BASIC_PROFILE='r_basicprofile',
FULL_PROFILE='r_fullprofile',
EMAIL_ADDRESS='r_emailaddress',
NETWORK='r_network',
CONTACT_INFO='r_contactinfo',
NETWORK_UPDATES='rw_nus',
GROUPS='rw_groups',
MESSAGES='w_messages')
ENDPOINTS = enum('LinkedInURL',
PEOPLE='https://api.linkedin.com/v1/people',
PEOPLE_SEARCH='https://api.linkedin.com/v1/people-search',
GROUPS='https://api.linkedin.com/v1/groups',
POSTS='https://api.linkedin.com/v1/posts',
COMPANIES='https://api.linkedin.com/v1/companies',
COMPANY_SEARCH='https://api.linkedin.com/v1/company-search',
JOBS='https://api.linkedin.com/v1/jobs',
JOB_SEARCH='https://api.linkedin.com/v1/job-search')
NETWORK_UPDATES = enum('NetworkUpdate',
APPLICATION='APPS',
COMPANY='CMPY',
CONNECTION='CONN',
JOB='JOBS',
GROUP='JGRP',
PICTURE='PICT',
EXTENDED_PROFILE='PRFX',
CHANGED_PROFILE='PRFU',
SHARED='SHAR',
VIRAL='VIRL')
class LinkedInDeveloperAuthentication(object):
"""
Uses all four credentials provided by LinkedIn as part of an OAuth 1.0a
flow that provides instant API access with no redirects/approvals required.
Useful for situations in which users would like to access their own data or
during the development process.
"""
def __init__(self, consumer_key, consumer_secret, user_token, user_secret,
redirect_uri, permissions=[]):
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.user_token = user_token
self.user_secret = user_secret
self.redirect_uri = redirect_uri
self.permissions = permissions
class LinkedInAuthentication(object):
"""
Implements a standard OAuth 2.0 flow that involves redirection for users to
authorize the application to access account data.
"""
AUTHORIZATION_URL = 'https://www.linkedin.com/uas/oauth2/authorization'
ACCESS_TOKEN_URL = 'https://www.linkedin.com/uas/oauth2/accessToken'
def __init__(self, key, secret, redirect_uri, permissions=None):
self.key = key
self.secret = secret
self.redirect_uri = redirect_uri
self.permissions = permissions or []
self.state = None
self.authorization_code = None
self.token = None
self._error = None
@property
def authorization_url(self):
self.state = self._make_new_state()
qd = {'response_type': 'code',
'client_id': self.key,
'scope': (' '.join(self.permissions)).strip(),
'state': self.state,
'redirect_uri': self.redirect_uri}
# urlencode uses quote_plus when encoding the query string so,
# we ought to be encoding the qs by on our own.
qsl = ['%s=%s' % (urllib.quote(k), urllib.quote(v)) for k, v in qd.items()]
return '%s?%s' % (self.AUTHORIZATION_URL, '&'.join(qsl))
@property
def last_error(self):
return self._error
def _make_new_state(self):
return hashlib.md5(
'%s%s' % (random.randrange(0, 2 ** 63), self.secret)).hexdigest()
def get_access_token(self, timeout=60):
assert self.authorization_code, 'You must first get the authorization code'
qd = {'grant_type': 'authorization_code',
'code': self.authorization_code,
'redirect_uri': self.redirect_uri,
'client_id': self.key,
'client_secret': self.secret}
response = requests.post(self.ACCESS_TOKEN_URL, data=qd, timeout=timeout)
raise_for_error(response)
response = response.json()
self.token = AccessToken(response['access_token'], response['expires_in'])
return self.token
class LinkedInSelector(object):
@classmethod
def parse(cls, selector):
with contextlib.closing(StringIO()) as result:
if type(selector) == dict:
for k, v in selector.items():
result.write('%s:(%s)' % (to_utf8(k), cls.parse(v)))
elif type(selector) in (list, tuple):
result.write(','.join(map(cls.parse, selector)))
else:
result.write(to_utf8(selector))
return result.getvalue()
class LinkedInApplication(object):
BASE_URL = 'https://api.linkedin.com'
def __init__(self, authentication=None, token=None):
assert authentication or token, 'Either authentication instance or access token is required'
self.authentication = authentication
if not self.authentication:
self.authentication = LinkedInAuthentication('', '', '')
self.authentication.token = AccessToken(token, None)
def make_request(self, method, url, data=None, params=None, headers=None,
timeout=60):
if headers is None:
headers = {'x-li-format': 'json', 'Content-Type': 'application/json'}
else:
headers.update({'x-li-format': 'json', 'Content-Type': 'application/json'})
if params is None:
params = {}
kw = dict(data=data, params=params,
headers=headers, timeout=timeout)
if isinstance(self.authentication, LinkedInDeveloperAuthentication):
# Let requests_oauthlib.OAuth1 do *all* of the work here
auth = OAuth1(self.authentication.consumer_key, self.authentication.consumer_secret,
self.authentication.user_token, self.authentication.user_secret)
kw.update({'auth': auth})
else:
params.update({'oauth2_access_token': self.authentication.token.access_token})
return requests.request(method.upper(), url, **kw)
def get_profile(self, member_id=None, member_url=None, selectors=None,
params=None, headers=None):
if member_id:
if type(member_id) is list:
# Batch request, ids as CSV.
url = '%s::(%s)' % (ENDPOINTS.PEOPLE,
','.join(member_id))
else:
url = '%s/id=%s' % (ENDPOINTS.PEOPLE, str(member_id))
elif member_url:
url = '%s/url=%s' % (ENDPOINTS.PEOPLE, urllib.quote_plus(member_url))
else:
url = '%s/~' % ENDPOINTS.PEOPLE
if selectors:
url = '%s:(%s)' % (url, LinkedInSelector.parse(selectors))
response = self.make_request('GET', url, params=params, headers=headers)
raise_for_error(response)
return response.json()
def search_profile(self, selectors=None, params=None, headers=None):
if selectors:
url = '%s:(%s)' % (ENDPOINTS.PEOPLE_SEARCH,
LinkedInSelector.parse(selectors))
else:
url = ENDPOINTS.PEOPLE_SEARCH
response = self.make_request('GET', url, params=params, headers=headers)
raise_for_error(response)
return response.json()
def get_picture_urls(self, member_id=None, member_url=None,
params=None, headers=None):
if member_id:
url = '%s/id=%s/picture-urls::(original)' % (ENDPOINTS.PEOPLE, str(member_id))
elif member_url:
url = '%s/url=%s/picture-urls::(original)' % (ENDPOINTS.PEOPLE,
urllib.quote_plus(member_url))
else:
url = '%s/~/picture-urls::(original)' % ENDPOINTS.PEOPLE
response = self.make_request('GET', url, params=params, headers=headers)
raise_for_error(response)
return response.json()
def get_connections(self, member_id=None, member_url=None, selectors=None,
params=None, headers=None):
if member_id:
url = '%s/id=%s/connections' % (ENDPOINTS.PEOPLE, str(member_id))
elif member_url:
url = '%s/url=%s/connections' % (ENDPOINTS.PEOPLE,
urllib.quote_plus(member_url))
else:
url = '%s/~/connections' % ENDPOINTS.PEOPLE
if selectors:
url = '%s:(%s)' % (url, LinkedInSelector.parse(selectors))
response = self.make_request('GET', url, params=params, headers=headers)
raise_for_error(response)
return response.json()
def get_memberships(self, member_id=None, member_url=None, group_id=None,
selectors=None, params=None, headers=None):
if member_id:
url = '%s/id=%s/group-memberships' % (ENDPOINTS.PEOPLE, str(member_id))
elif member_url:
url = '%s/url=%s/group-memberships' % (ENDPOINTS.PEOPLE,
urllib.quote_plus(member_url))
else:
url = '%s/~/group-memberships' % ENDPOINTS.PEOPLE
if group_id:
url = '%s/%s' % (url, str(group_id))
if selectors:
url = '%s:(%s)' % (url, LinkedInSelector.parse(selectors))
response = self.make_request('GET', url, params=params, headers=headers)
raise_for_error(response)
return response.json()
def get_group(self, group_id, selectors=None, params=None, headers=None):
url = '%s/%s' % (ENDPOINTS.GROUPS, str(group_id))
response = self.make_request('GET', url, params=params, headers=headers)
raise_for_error(response)
return response.json()
def get_posts(self, group_id, post_ids=None, selectors=None, params=None,
headers=None):
url = '%s/%s/posts' % (ENDPOINTS.GROUPS, str(group_id))
if post_ids:
url = '%s::(%s)' % (url, ','.join(map(str, post_ids)))
if selectors:
url = '%s:(%s)' % (url, LinkedInSelector.parse(selectors))
response = self.make_request('GET', url, params=params, headers=headers)
raise_for_error(response)
return response.json()
def join_group(self, group_id):
url = '%s/~/group-memberships/%s' % (ENDPOINTS.PEOPLE, str(group_id))
response = self.make_request('PUT', url,
data=json.dumps({'membershipState': {'code': 'member'}}))
raise_for_error(response)
return True
def leave_group(self, group_id):
url = '%s/~/group-memberships/%s' % (ENDPOINTS.PEOPLE, str(group_id))
response = self.make_request('DELETE', url)
raise_for_error(response)
return True
def submit_group_post(self, group_id, title, summary, submitted_url,
submitted_image_url, content_title, description):
post = {
'title': title, 'summary': summary,
'content': {
'submitted-url': submitted_url,
'submitted-image-url': submitted_image_url,
'title': content_title,
'description': description
}
}
url = '%s/%s/posts' % (ENDPOINTS.GROUPS, str(group_id))
response = self.make_request('POST', url, data=json.dumps(post))
raise_for_error(response)
return True
def like_post(self, post_id, action):
url = '%s/%s/relation-to-viewer/is-liked' % (ENDPOINTS.POSTS, str(post_id))
try:
self.make_request('PUT', url, data=json.dumps(action))
except (requests.ConnectionError, requests.HTTPError), error:
raise LinkedInError(error.message)
else:
return True
def like_post(self, post_id, is_liked=True):
url = '%s/%s/relation-to-viewer/is-liked' % (ENDPOINTS.POSTS, post_id)
try:
response = self.make_request('PUT', url, data=json.dumps(is_liked))
response.raise_for_status()
except (requests.ConnectionError, requests.HTTPError), error:
raise LinkedInHTTPError(error.message)
return True
def follow_post(self, post_id, is_following=True):
url = '%s/%s/relation-to-viewer/is-following' % (ENDPOINTS.POSTS, post_id)
try:
response = self.make_request('PUT', url, data=json.dumps(is_liked))
response.raise_for_status()
except (requests.ConnectionError, requests.HTTPError), error:
raise LinkedInHTTPError(error.message)
return True
def comment_on_post(self, post_id, comment):
comment = {'comment': comment}
url = '%s/%s/comments' % (ENDPOINTS.POSTS, post_id)
try:
response = self.make_request('POST', url, data=json.dumps(comment))
response.raise_for_status()
except (requests.ConnectionError, requests.HTTPError), error:
raise LinkedInHTTPError(error.message)
return True
def get_company_by_email_domain(self, email_domain, params=None, headers=None):
url = '%s?email-domain=%s' % (ENDPOINTS.COMPANIES, email_domain)
try:
self.make_request('POST', url, data=json.dumps(post))
except (requests.ConnectionError, requests.HTTPError), error:
raise LinkedInError(error.message)
else:
return True
def get_company_by_email_domain(self, email_domain, params=None, headers=None):
url = '%s?email-domain=%s' % (ENDPOINTS.COMPANIES, email_domain)
response = self.make_request('GET', url, params=params, headers=headers)
raise_for_error(response)
return response.json()
def get_companies(self, company_ids=None, universal_names=None, selectors=None,
params=None, headers=None):
identifiers = []
url = ENDPOINTS.COMPANIES
if company_ids:
identifiers += map(str, company_ids)
if universal_names:
identifiers += ['universal-name=%s' % un for un in universal_names]
if identifiers:
url = '%s::(%s)' % (url, ','.join(identifiers))
if selectors:
url = '%s:(%s)' % (url, LinkedInSelector.parse(selectors))
response = self.make_request('GET', url, params=params, headers=headers)
raise_for_error(response)
return response.json()
def get_company_updates(self, company_id, params=None, headers=None):
url = '%s/%s/updates' % (ENDPOINTS.COMPANIES, str(company_id))
response = self.make_request('GET', url, params=params, headers=headers)
raise_for_error(response)
return response.json()
def get_company_products(self, company_id, selectors=None, params=None,
headers=None):
url = '%s/%s/products' % (ENDPOINTS.COMPANIES, str(company_id))
if selectors:
url = '%s:(%s)' % (url, LinkedInSelector.parse(selectors))
response = self.make_request('GET', url, params=params, headers=headers)
raise_for_error(response)
return response.json()
def follow_company(self, company_id):
url = '%s/~/following/companies' % ENDPOINTS.PEOPLE
post = {'id': company_id}
response = self.make_request('POST', url, data=json.dumps(post))
raise_for_error(response)
return True
def unfollow_company(self, company_id):
url = '%s/~/following/companies/id=%s' % (ENDPOINTS.PEOPLE, str(company_id))
response = self.make_request('DELETE', url)
raise_for_error(response)
return True
def search_company(self, selectors=None, params=None, headers=None):
url = ENDPOINTS.COMPANY_SEARCH
if selectors:
url = '%s:(%s)' % (url, LinkedInSelector.parse(selectors))
response = self.make_request('GET', url, params=params, headers=headers)
raise_for_error(response)
return response.json()
def get_job(self, job_id, selectors=None, params=None, headers=None):
url = '%s/%s' % (ENDPOINTS.JOBS, str(job_id))
url = '%s:(%s)' % (url, LinkedInSelector.parse(selectors))
response = self.make_request('GET', url, params=params, headers=headers)
raise_for_error(response)
return response.json()
def get_job_bookmarks(self, selectors=None, params=None, headers=None):
url = '%s/~/job-bookmarks' % ENDPOINTS.PEOPLE
if selectors:
url = '%s:(%s)' % (url, LinkedInSelector.parse(selectors))
response = self.make_request('GET', url, params=params, headers=headers)
raise_for_error(response)
return response.json()
def search_job(self, selectors=None, params=None, headers=None):
url = ENDPOINTS.JOB_SEARCH
if selectors:
url = '%s:(%s)' % (url, LinkedInSelector.parse(selectors))
response = self.make_request('GET', url, params=params, headers=headers)
raise_for_error(response)
return response.json()
def submit_share(self, comment=None, title=None, description=None,
submitted_url=None, submitted_image_url=None,
visibility_code='anyone'):
post = {
'visibility': {
'code': visibility_code,
},
}
if comment is not None:
post['comment'] = comment
if title is not None and submitted_url is not None:
post['content'] = {
'title': title,
'submitted-url': submitted_url,
'submitted-image-url': submitted_image_url,
'description': description,
}
url = '%s/~/shares' % ENDPOINTS.PEOPLE
response = self.make_request('POST', url, data=json.dumps(post))
raise_for_error(response)
return response.json()
def get_network_updates(self, types, member_id=None,
self_scope=True, params=None, headers=None):
if member_id:
url = '%s/id=%s/network/updates' % (ENDPOINTS.PEOPLE,
str(member_id))
else:
url = '%s/~/network/updates' % ENDPOINTS.PEOPLE
if not params:
params = {}
if types:
params.update({'type': types})
if self_scope is True:
params.update({'scope': 'self'})
response = self.make_request('GET', url, params=params, headers=headers)
raise_for_error(response)
return response.json()
def get_network_update(self, types, update_key,
self_scope=True, params=None, headers=None):
url = '%s/~/network/updates/key=%s' % (ENDPOINTS.PEOPLE, str(update_key))
if not params:
params = {}
if types:
params.update({'type': types})
if self_scope is True:
params.update({'scope': 'self'})
response = self.make_request('GET', url, params=params, headers=headers)
raise_for_error(response)
return response.json()
def get_network_status(self, params=None, headers=None):
url = '%s/~/network/network-stats' % ENDPOINTS.PEOPLE
response = self.make_request('GET', url, params=params, headers=headers)
raise_for_error(response)
return response.json()
def send_invitation(self, invitation):
assert type(invitation) == LinkedInInvitation, 'LinkedInInvitation required'
url = '%s/~/mailbox' % ENDPOINTS.PEOPLE
response = self.make_request('POST', url,
data=json.dumps(invitation.json))
raise_for_error(response)
return True
def send_message(self, message):
assert type(message) == LinkedInMessage, 'LinkedInInvitation required'
url = '%s/~/mailbox' % ENDPOINTS.PEOPLE
response = self.make_request('POST', url,
data=json.dumps(message.json))
raise_for_error(response)
return True
def comment_on_update(self, update_key, comment):
comment = {'comment': comment}
url = '%s/~/network/updates/key=%s/update-comments' % (ENDPOINTS.PEOPLE, update_key)
response = self.make_request('POST', url, data=json.dumps(comment))
raise_for_error(response)
return True
def like_update(self, update_key, is_liked=True):
url = '%s/~/network/updates/key=%s/is-liked' % (ENDPOINTS.PEOPLE, update_key)
response = self.make_request('PUT', url, data=json.dumps(is_liked))
raise_for_error(response)
return True
|
|
import os
import locale
import codecs
import nose
import numpy as np
from numpy import iinfo
import pandas as pd
from pandas import (date_range, Index, _np_version_under1p9)
import pandas.util.testing as tm
from pandas.tools.util import cartesian_product, to_numeric
CURRENT_LOCALE = locale.getlocale()
LOCALE_OVERRIDE = os.environ.get('LOCALE_OVERRIDE', None)
class TestCartesianProduct(tm.TestCase):
def test_simple(self):
x, y = list('ABC'), [1, 22]
result1, result2 = cartesian_product([x, y])
expected1 = np.array(['A', 'A', 'B', 'B', 'C', 'C'])
expected2 = np.array([1, 22, 1, 22, 1, 22])
tm.assert_numpy_array_equal(result1, expected1)
tm.assert_numpy_array_equal(result2, expected2)
def test_datetimeindex(self):
# regression test for GitHub issue #6439
# make sure that the ordering on datetimeindex is consistent
x = date_range('2000-01-01', periods=2)
result1, result2 = [Index(y).day for y in cartesian_product([x, x])]
expected1 = np.array([1, 1, 2, 2], dtype=np.int32)
expected2 = np.array([1, 2, 1, 2], dtype=np.int32)
tm.assert_numpy_array_equal(result1, expected1)
tm.assert_numpy_array_equal(result2, expected2)
def test_empty(self):
# product of empty factors
X = [[], [0, 1], []]
Y = [[], [], ['a', 'b', 'c']]
for x, y in zip(X, Y):
expected1 = np.array([], dtype=np.asarray(x).dtype)
expected2 = np.array([], dtype=np.asarray(y).dtype)
result1, result2 = cartesian_product([x, y])
tm.assert_numpy_array_equal(result1, expected1)
tm.assert_numpy_array_equal(result2, expected2)
# empty product (empty input):
result = cartesian_product([])
expected = []
tm.assert_equal(result, expected)
def test_invalid_input(self):
invalid_inputs = [1, [1], [1, 2], [[1], 2],
'a', ['a'], ['a', 'b'], [['a'], 'b']]
msg = "Input must be a list-like of list-likes"
for X in invalid_inputs:
tm.assertRaisesRegexp(TypeError, msg, cartesian_product, X=X)
class TestLocaleUtils(tm.TestCase):
@classmethod
def setUpClass(cls):
super(TestLocaleUtils, cls).setUpClass()
cls.locales = tm.get_locales()
if not cls.locales:
raise nose.SkipTest("No locales found")
tm._skip_if_windows()
@classmethod
def tearDownClass(cls):
super(TestLocaleUtils, cls).tearDownClass()
del cls.locales
def test_get_locales(self):
# all systems should have at least a single locale
assert len(tm.get_locales()) > 0
def test_get_locales_prefix(self):
if len(self.locales) == 1:
raise nose.SkipTest("Only a single locale found, no point in "
"trying to test filtering locale prefixes")
first_locale = self.locales[0]
assert len(tm.get_locales(prefix=first_locale[:2])) > 0
def test_set_locale(self):
if len(self.locales) == 1:
raise nose.SkipTest("Only a single locale found, no point in "
"trying to test setting another locale")
if LOCALE_OVERRIDE is None:
lang, enc = 'it_CH', 'UTF-8'
elif LOCALE_OVERRIDE == 'C':
lang, enc = 'en_US', 'ascii'
else:
lang, enc = LOCALE_OVERRIDE.split('.')
enc = codecs.lookup(enc).name
new_locale = lang, enc
if not tm._can_set_locale(new_locale):
with tm.assertRaises(locale.Error):
with tm.set_locale(new_locale):
pass
else:
with tm.set_locale(new_locale) as normalized_locale:
new_lang, new_enc = normalized_locale.split('.')
new_enc = codecs.lookup(enc).name
normalized_locale = new_lang, new_enc
self.assertEqual(normalized_locale, new_locale)
current_locale = locale.getlocale()
self.assertEqual(current_locale, CURRENT_LOCALE)
class TestToNumeric(tm.TestCase):
def test_series(self):
s = pd.Series(['1', '-3.14', '7'])
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series(['1', '-3.14', 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
def test_series_numeric(self):
s = pd.Series([1, 3, 4, 5], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
s = pd.Series([1., 3., 4., 5.], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
# bool is regarded as numeric
s = pd.Series([True, False, True, True],
index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
def test_error(self):
s = pd.Series([1, -3.14, 'apple'])
msg = 'Unable to parse string "apple" at position 2'
with tm.assertRaisesRegexp(ValueError, msg):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([1, -3.14, 'apple'])
tm.assert_series_equal(res, expected)
res = to_numeric(s, errors='coerce')
expected = pd.Series([1, -3.14, np.nan])
tm.assert_series_equal(res, expected)
s = pd.Series(['orange', 1, -3.14, 'apple'])
msg = 'Unable to parse string "orange" at position 0'
with tm.assertRaisesRegexp(ValueError, msg):
to_numeric(s, errors='raise')
def test_error_seen_bool(self):
s = pd.Series([True, False, 'apple'])
msg = 'Unable to parse string "apple" at position 2'
with tm.assertRaisesRegexp(ValueError, msg):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([True, False, 'apple'])
tm.assert_series_equal(res, expected)
# coerces to float
res = to_numeric(s, errors='coerce')
expected = pd.Series([1., 0., np.nan])
tm.assert_series_equal(res, expected)
def test_list(self):
s = ['1', '-3.14', '7']
res = to_numeric(s)
expected = np.array([1, -3.14, 7])
tm.assert_numpy_array_equal(res, expected)
def test_list_numeric(self):
s = [1, 3, 4, 5]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s, dtype=np.int64))
s = [1., 3., 4., 5.]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s))
# bool is regarded as numeric
s = [True, False, True, True]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s))
def test_numeric(self):
s = pd.Series([1, -3.14, 7], dtype='O')
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series([1, -3.14, 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
def test_all_nan(self):
s = pd.Series(['a', 'b', 'c'])
res = to_numeric(s, errors='coerce')
expected = pd.Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(res, expected)
def test_type_check(self):
# GH 11776
df = pd.DataFrame({'a': [1, -3.14, 7], 'b': ['4', '5', '6']})
with tm.assertRaisesRegexp(TypeError, "1-d array"):
to_numeric(df)
for errors in ['ignore', 'raise', 'coerce']:
with tm.assertRaisesRegexp(TypeError, "1-d array"):
to_numeric(df, errors=errors)
def test_scalar(self):
self.assertEqual(pd.to_numeric(1), 1)
self.assertEqual(pd.to_numeric(1.1), 1.1)
self.assertEqual(pd.to_numeric('1'), 1)
self.assertEqual(pd.to_numeric('1.1'), 1.1)
with tm.assertRaises(ValueError):
to_numeric('XX', errors='raise')
self.assertEqual(to_numeric('XX', errors='ignore'), 'XX')
self.assertTrue(np.isnan(to_numeric('XX', errors='coerce')))
def test_numeric_dtypes(self):
idx = pd.Index([1, 2, 3], name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, idx)
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.values)
idx = pd.Index([1., np.nan, 3., np.nan], name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, idx)
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.values)
def test_str(self):
idx = pd.Index(['1', '2', '3'], name='xxx')
exp = np.array([1, 2, 3], dtype='int64')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(exp, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(exp, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, exp)
idx = pd.Index(['1.5', '2.7', '3.4'], name='xxx')
exp = np.array([1.5, 2.7, 3.4])
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(exp, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(exp, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, exp)
def test_datetimelike(self):
for tz in [None, 'US/Eastern', 'Asia/Tokyo']:
idx = pd.date_range('20130101', periods=3, tz=tz, name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.asi8)
def test_timedelta(self):
idx = pd.timedelta_range('1 days', periods=3, freq='D', name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.asi8)
def test_period(self):
idx = pd.period_range('2011-01', periods=3, freq='M', name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx'))
# ToDo: enable when we can support native PeriodDtype
# res = pd.to_numeric(pd.Series(idx, name='xxx'))
# tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx'))
def test_non_hashable(self):
# Test for Bug #13324
s = pd.Series([[10.0, 2], 1.0, 'apple'])
res = pd.to_numeric(s, errors='coerce')
tm.assert_series_equal(res, pd.Series([np.nan, 1.0, np.nan]))
res = pd.to_numeric(s, errors='ignore')
tm.assert_series_equal(res, pd.Series([[10.0, 2], 1.0, 'apple']))
with self.assertRaisesRegexp(TypeError, "Invalid object type"):
pd.to_numeric(s)
def test_downcast(self):
# see gh-13352
mixed_data = ['1', 2, 3]
int_data = [1, 2, 3]
date_data = np.array(['1970-01-02', '1970-01-03',
'1970-01-04'], dtype='datetime64[D]')
invalid_downcast = 'unsigned-integer'
msg = 'invalid downcasting method provided'
smallest_int_dtype = np.dtype(np.typecodes['Integer'][0])
smallest_uint_dtype = np.dtype(np.typecodes['UnsignedInteger'][0])
# support below np.float32 is rare and far between
float_32_char = np.dtype(np.float32).char
smallest_float_dtype = float_32_char
for data in (mixed_data, int_data, date_data):
with self.assertRaisesRegexp(ValueError, msg):
pd.to_numeric(data, downcast=invalid_downcast)
expected = np.array([1, 2, 3], dtype=np.int64)
res = pd.to_numeric(data)
tm.assert_numpy_array_equal(res, expected)
res = pd.to_numeric(data, downcast=None)
tm.assert_numpy_array_equal(res, expected)
expected = np.array([1, 2, 3], dtype=smallest_int_dtype)
for signed_downcast in ('integer', 'signed'):
res = pd.to_numeric(data, downcast=signed_downcast)
tm.assert_numpy_array_equal(res, expected)
expected = np.array([1, 2, 3], dtype=smallest_uint_dtype)
res = pd.to_numeric(data, downcast='unsigned')
tm.assert_numpy_array_equal(res, expected)
expected = np.array([1, 2, 3], dtype=smallest_float_dtype)
res = pd.to_numeric(data, downcast='float')
tm.assert_numpy_array_equal(res, expected)
# if we can't successfully cast the given
# data to a numeric dtype, do not bother
# with the downcast parameter
data = ['foo', 2, 3]
expected = np.array(data, dtype=object)
res = pd.to_numeric(data, errors='ignore',
downcast='unsigned')
tm.assert_numpy_array_equal(res, expected)
# cannot cast to an unsigned integer because
# we have a negative number
data = ['-1', 2, 3]
expected = np.array([-1, 2, 3], dtype=np.int64)
res = pd.to_numeric(data, downcast='unsigned')
tm.assert_numpy_array_equal(res, expected)
# cannot cast to an integer (signed or unsigned)
# because we have a float number
data = ['1.1', 2, 3]
expected = np.array([1.1, 2, 3], dtype=np.float64)
for downcast in ('integer', 'signed', 'unsigned'):
res = pd.to_numeric(data, downcast=downcast)
tm.assert_numpy_array_equal(res, expected)
# the smallest integer dtype need not be np.(u)int8
data = ['256', 257, 258]
for downcast, expected_dtype in zip(
['integer', 'signed', 'unsigned'],
[np.int16, np.int16, np.uint16]):
expected = np.array([256, 257, 258], dtype=expected_dtype)
res = pd.to_numeric(data, downcast=downcast)
tm.assert_numpy_array_equal(res, expected)
def test_downcast_limits(self):
# Test the limits of each downcast. Bug: #14401.
# Check to make sure numpy is new enough to run this test.
if _np_version_under1p9:
raise nose.SkipTest("Numpy version is under 1.9")
i = 'integer'
u = 'unsigned'
dtype_downcast_min_max = [
('int8', i, [iinfo(np.int8).min, iinfo(np.int8).max]),
('int16', i, [iinfo(np.int16).min, iinfo(np.int16).max]),
('int32', i, [iinfo(np.int32).min, iinfo(np.int32).max]),
('int64', i, [iinfo(np.int64).min, iinfo(np.int64).max]),
('uint8', u, [iinfo(np.uint8).min, iinfo(np.uint8).max]),
('uint16', u, [iinfo(np.uint16).min, iinfo(np.uint16).max]),
('uint32', u, [iinfo(np.uint32).min, iinfo(np.uint32).max]),
# Test will be skipped until there is more uint64 support.
# ('uint64', u, [iinfo(uint64).min, iinfo(uint64).max]),
('int16', i, [iinfo(np.int8).min, iinfo(np.int8).max + 1]),
('int32', i, [iinfo(np.int16).min, iinfo(np.int16).max + 1]),
('int64', i, [iinfo(np.int32).min, iinfo(np.int32).max + 1]),
('int16', i, [iinfo(np.int8).min - 1, iinfo(np.int16).max]),
('int32', i, [iinfo(np.int16).min - 1, iinfo(np.int32).max]),
('int64', i, [iinfo(np.int32).min - 1, iinfo(np.int64).max]),
('uint16', u, [iinfo(np.uint8).min, iinfo(np.uint8).max + 1]),
('uint32', u, [iinfo(np.uint16).min, iinfo(np.uint16).max + 1]),
# Test will be skipped until there is more uint64 support.
# ('uint64', u, [iinfo(np.uint32).min, iinfo(np.uint32).max + 1]),
]
for dtype, downcast, min_max in dtype_downcast_min_max:
series = pd.to_numeric(pd.Series(min_max), downcast=downcast)
tm.assert_equal(series.dtype, dtype)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
|
|
# Copyright 2011 Justin Santa Barbara
# Copyright 2014 NetApp, Inc.
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import errno
import json
import socket
import time
import ddt
import mock
from oslo_config import cfg
from oslo_utils import timeutils
from oslo_utils import uuidutils
import paramiko
import six
from webob import exc
import manila
from manila.common import constants
from manila import context
from manila.db import api as db
from manila import exception
from manila import test
from manila import utils
CONF = cfg.CONF
@ddt.ddt
class GenericUtilsTestCase(test.TestCase):
def test_service_is_up(self):
fts_func = datetime.datetime.fromtimestamp
fake_now = 1000
down_time = 5
self.flags(service_down_time=down_time)
with mock.patch.object(timeutils, 'utcnow',
mock.Mock(return_value=fts_func(fake_now))):
# Up (equal)
service = {'updated_at': fts_func(fake_now - down_time),
'created_at': fts_func(fake_now - down_time)}
result = utils.service_is_up(service)
self.assertTrue(result)
timeutils.utcnow.assert_called_once_with()
with mock.patch.object(timeutils, 'utcnow',
mock.Mock(return_value=fts_func(fake_now))):
# Up
service = {'updated_at': fts_func(fake_now - down_time + 1),
'created_at': fts_func(fake_now - down_time + 1)}
result = utils.service_is_up(service)
self.assertTrue(result)
timeutils.utcnow.assert_called_once_with()
with mock.patch.object(timeutils, 'utcnow',
mock.Mock(return_value=fts_func(fake_now))):
# Down
service = {'updated_at': fts_func(fake_now - down_time - 1),
'created_at': fts_func(fake_now - down_time - 1)}
result = utils.service_is_up(service)
self.assertFalse(result)
timeutils.utcnow.assert_called_once_with()
def test_is_eventlet_bug105(self):
fake_dns = mock.Mock()
fake_dns.getaddrinfo.side_effect = socket.gaierror(errno.EBADF)
with mock.patch.dict('sys.modules', {
'eventlet.support.greendns': fake_dns}):
self.assertTrue(utils.is_eventlet_bug105())
self.assertTrue(fake_dns.getaddrinfo.called)
def test_is_eventlet_bug105_neg(self):
fake_dns = mock.Mock()
fake_dns.getaddrinfo.return_value = [
(socket.AF_INET6, socket.SOCK_STREAM, 0, '', (u'127.0.0.1', 80)),
]
with mock.patch.dict('sys.modules', {
'eventlet.support.greendns': fake_dns}):
self.assertFalse(utils.is_eventlet_bug105())
fake_dns.getaddrinfo.assert_called_once_with('::1', 80)
@ddt.data(['ssh', '-D', 'my_name@name_of_remote_computer'],
['echo', '"quoted arg with space"'],
['echo', "'quoted arg with space'"])
def test_check_ssh_injection(self, cmd):
cmd_list = cmd
self.assertIsNone(utils.check_ssh_injection(cmd_list))
@ddt.data(['ssh', 'my_name@ name_of_remote_computer'],
['||', 'my_name@name_of_remote_computer'],
['cmd', 'virus;ls'],
['cmd', '"arg\"withunescaped"'],
['cmd', 'virus;"quoted argument"'],
['echo', '"quoted argument";rm -rf'],
['echo', "'quoted argument `rm -rf`'"],
['echo', '"quoted";virus;"quoted"'],
['echo', '"quoted";virus;\'quoted\''])
def test_check_ssh_injection_on_error0(self, cmd):
self.assertRaises(exception.SSHInjectionThreat,
utils.check_ssh_injection, cmd)
@ddt.data(
(("3G", "G"), 3.0),
(("4.1G", "G"), 4.1),
(("4,1G", "G"), 4.1),
(("5.23G", "G"), 5.23),
(("5,23G", "G"), 5.23),
(("9728M", "G"), 9.5),
(("8192K", "G"), 0.0078125),
(("2T", "G"), 2048.0),
(("2.1T", "G"), 2150.4),
(("2,1T", "G"), 2150.4),
(("3P", "G"), 3145728.0),
(("3.4P", "G"), 3565158.4),
(("3,4P", "G"), 3565158.4),
(("9728M", "M"), 9728.0),
(("9728.2381T", "T"), 9728.2381),
(("9728,2381T", "T"), 9728.2381),
(("0", "G"), 0.0),
(("512", "M"), 0.00048828125),
(("2097152.", "M"), 2.0),
((".1024", "K"), 0.0001),
((",1024", "K"), 0.0001),
(("2048G", "T"), 2.0),
(("65536G", "P"), 0.0625),
)
@ddt.unpack
def test_translate_string_size_to_float_positive(self, request, expected):
actual = utils.translate_string_size_to_float(*request)
self.assertEqual(expected, actual)
@ddt.data(
(None, "G"),
("fake", "G"),
("1fake", "G"),
("2GG", "G"),
("1KM", "G"),
("K1M", "G"),
("M1K", "G"),
("1.2fake", "G"),
("1,2fake", "G"),
("2.2GG", "G"),
("1.1KM", "G"),
("K2.2M", "G"),
("K2,2M", "G"),
("M2.2K", "G"),
("M2,2K", "G"),
("", "G"),
(23, "G"),
(23.0, "G"),
)
@ddt.unpack
def test_translate_string_size_to_float_negative(self, string, multiplier):
actual = utils.translate_string_size_to_float(string, multiplier)
self.assertIsNone(actual)
class MonkeyPatchTestCase(test.TestCase):
"""Unit test for utils.monkey_patch()."""
def setUp(self):
super(MonkeyPatchTestCase, self).setUp()
self.example_package = 'manila.tests.monkey_patch_example.'
self.flags(
monkey_patch=True,
monkey_patch_modules=[self.example_package + 'example_a' + ':'
+ self.example_package
+ 'example_decorator'])
def test_monkey_patch(self):
utils.monkey_patch()
manila.tests.monkey_patch_example.CALLED_FUNCTION = []
from manila.tests.monkey_patch_example import example_a
from manila.tests.monkey_patch_example import example_b
self.assertEqual('Example function', example_a.example_function_a())
exampleA = example_a.ExampleClassA()
exampleA.example_method()
ret_a = exampleA.example_method_add(3, 5)
self.assertEqual(8, ret_a)
self.assertEqual('Example function', example_b.example_function_b())
exampleB = example_b.ExampleClassB()
exampleB.example_method()
ret_b = exampleB.example_method_add(3, 5)
self.assertEqual(8, ret_b)
package_a = self.example_package + 'example_a.'
self.assertIn(package_a + 'example_function_a',
manila.tests.monkey_patch_example.CALLED_FUNCTION)
self.assertIn(package_a + 'ExampleClassA.example_method',
manila.tests.monkey_patch_example.CALLED_FUNCTION)
self.assertIn(package_a + 'ExampleClassA.example_method_add',
manila.tests.monkey_patch_example.CALLED_FUNCTION)
package_b = self.example_package + 'example_b.'
self.assertNotIn(package_b + 'example_function_b',
manila.tests.monkey_patch_example.CALLED_FUNCTION)
self.assertNotIn(package_b + 'ExampleClassB.example_method',
manila.tests.monkey_patch_example.CALLED_FUNCTION)
self.assertNotIn(package_b + 'ExampleClassB.example_method_add',
manila.tests.monkey_patch_example.CALLED_FUNCTION)
class FakeSSHClient(object):
def __init__(self):
self.id = uuidutils.generate_uuid()
self.transport = FakeTransport()
def set_missing_host_key_policy(self, policy):
pass
def connect(self, ip, port=22, username=None, password=None,
key_filename=None, look_for_keys=None, timeout=10):
pass
def get_transport(self):
return self.transport
def close(self):
pass
def __call__(self, *args, **kwargs):
pass
class FakeSock(object):
def settimeout(self, timeout):
pass
class FakeTransport(object):
def __init__(self):
self.active = True
self.sock = FakeSock()
def set_keepalive(self, timeout):
pass
def is_active(self):
return self.active
class SSHPoolTestCase(test.TestCase):
"""Unit test for SSH Connection Pool."""
def test_single_ssh_connect(self):
with mock.patch.object(paramiko, "SSHClient",
mock.Mock(return_value=FakeSSHClient())):
sshpool = utils.SSHPool("127.0.0.1", 22, 10, "test",
password="test", min_size=1, max_size=1)
with sshpool.item() as ssh:
first_id = ssh.id
with sshpool.item() as ssh:
second_id = ssh.id
self.assertEqual(first_id, second_id)
paramiko.SSHClient.assert_called_once_with()
def test_create_ssh_with_password(self):
fake_ssh_client = mock.Mock()
ssh_pool = utils.SSHPool("127.0.0.1", 22, 10, "test",
password="test")
with mock.patch.object(paramiko, "SSHClient",
return_value=fake_ssh_client):
ssh_pool.create()
fake_ssh_client.connect.assert_called_once_with(
"127.0.0.1", port=22, username="test",
password="test", key_filename=None, look_for_keys=False,
timeout=10)
def test_create_ssh_with_key(self):
path_to_private_key = "/fakepath/to/privatekey"
fake_ssh_client = mock.Mock()
ssh_pool = utils.SSHPool("127.0.0.1", 22, 10, "test",
privatekey="/fakepath/to/privatekey")
with mock.patch.object(paramiko, "SSHClient",
return_value=fake_ssh_client):
ssh_pool.create()
fake_ssh_client.connect.assert_called_once_with(
"127.0.0.1", port=22, username="test", password=None,
key_filename=path_to_private_key, look_for_keys=False,
timeout=10)
def test_create_ssh_with_nothing(self):
fake_ssh_client = mock.Mock()
ssh_pool = utils.SSHPool("127.0.0.1", 22, 10, "test")
with mock.patch.object(paramiko, "SSHClient",
return_value=fake_ssh_client):
ssh_pool.create()
fake_ssh_client.connect.assert_called_once_with(
"127.0.0.1", port=22, username="test", password=None,
key_filename=None, look_for_keys=True,
timeout=10)
def test_create_ssh_error_connecting(self):
attrs = {'connect.side_effect': paramiko.SSHException, }
fake_ssh_client = mock.Mock(**attrs)
ssh_pool = utils.SSHPool("127.0.0.1", 22, 10, "test")
with mock.patch.object(paramiko, "SSHClient",
return_value=fake_ssh_client):
self.assertRaises(exception.SSHException, ssh_pool.create)
fake_ssh_client.connect.assert_called_once_with(
"127.0.0.1", port=22, username="test", password=None,
key_filename=None, look_for_keys=True,
timeout=10)
def test_closed_reopend_ssh_connections(self):
with mock.patch.object(paramiko, "SSHClient",
mock.Mock(return_value=FakeSSHClient())):
sshpool = utils.SSHPool("127.0.0.1", 22, 10, "test",
password="test", min_size=1, max_size=2)
with sshpool.item() as ssh:
first_id = ssh.id
with sshpool.item() as ssh:
second_id = ssh.id
# Close the connection and test for a new connection
ssh.get_transport().active = False
self.assertEqual(first_id, second_id)
paramiko.SSHClient.assert_called_once_with()
# Expected new ssh pool
with mock.patch.object(paramiko, "SSHClient",
mock.Mock(return_value=FakeSSHClient())):
with sshpool.item() as ssh:
third_id = ssh.id
self.assertNotEqual(first_id, third_id)
paramiko.SSHClient.assert_called_once_with()
@ddt.ddt
class CidrToNetmaskTestCase(test.TestCase):
"""Unit test for cidr to netmask."""
@ddt.data(
('10.0.0.0/0', '0.0.0.0'),
('10.0.0.0/24', '255.255.255.0'),
('10.0.0.0/5', '248.0.0.0'),
('10.0.0.0/32', '255.255.255.255'),
('10.0.0.1', '255.255.255.255'),
)
@ddt.unpack
def test_cidr_to_netmask(self, cidr, expected_netmask):
result = utils.cidr_to_netmask(cidr)
self.assertEqual(expected_netmask, result)
@ddt.data(
'10.0.0.0/33',
'',
'10.0.0.555/33'
)
def test_cidr_to_netmask_invalid(self, cidr):
self.assertRaises(exception.InvalidInput, utils.cidr_to_netmask, cidr)
@ddt.ddt
class CidrToPrefixLenTestCase(test.TestCase):
"""Unit test for cidr to prefix length."""
@ddt.data(
('10.0.0.0/0', 0),
('10.0.0.0/24', 24),
('10.0.0.1', 32),
('fdf8:f53b:82e1::1/0', 0),
('fdf8:f53b:82e1::1/64', 64),
('fdf8:f53b:82e1::1', 128),
)
@ddt.unpack
def test_cidr_to_prefixlen(self, cidr, expected_prefixlen):
result = utils.cidr_to_prefixlen(cidr)
self.assertEqual(expected_prefixlen, result)
@ddt.data(
'10.0.0.0/33',
'',
'10.0.0.555/33',
'fdf8:f53b:82e1::1/129',
'fdf8:f53b:82e1::fffff'
)
def test_cidr_to_prefixlen_invalid(self, cidr):
self.assertRaises(exception.InvalidInput,
utils.cidr_to_prefixlen, cidr)
@ddt.ddt
class ParseBoolValueTestCase(test.TestCase):
@ddt.data(
('t', True),
('on', True),
('1', True),
('false', False),
('n', False),
('no', False),
('0', False),)
@ddt.unpack
def test_bool_with_valid_string(self, string, value):
fake_dict = {'fake_key': string}
result = utils.get_bool_from_api_params('fake_key', fake_dict)
self.assertEqual(value, result)
@ddt.data('None', 'invalid', 'falses')
def test_bool_with_invalid_string(self, string):
fake_dict = {'fake_key': string}
self.assertRaises(exc.HTTPBadRequest,
utils.get_bool_from_api_params,
'fake_key', fake_dict)
@ddt.data('undefined', None)
def test_bool_with_key_not_found_raise_error(self, def_val):
fake_dict = {'fake_key1': 'value1'}
self.assertRaises(exc.HTTPBadRequest,
utils.get_bool_from_api_params,
'fake_key2',
fake_dict,
def_val)
@ddt.data((False, False, False),
(True, True, False),
('true', True, False),
('false', False, False),
('undefined', 'undefined', False),
(False, False, True),
('true', True, True))
@ddt.unpack
def test_bool_with_key_not_found(self, def_val, expected, strict):
fake_dict = {'fake_key1': 'value1'}
invalid_default = utils.get_bool_from_api_params('fake_key2',
fake_dict,
def_val,
strict)
self.assertEqual(expected, invalid_default)
@ddt.ddt
class IsValidIPVersion(test.TestCase):
"""Test suite for function 'is_valid_ip_address'."""
@ddt.data('0.0.0.0', '255.255.255.255', '192.168.0.1')
def test_valid_v4(self, addr):
for vers in (4, '4'):
self.assertTrue(utils.is_valid_ip_address(addr, vers))
@ddt.data(
'2001:cdba:0000:0000:0000:0000:3257:9652',
'2001:cdba:0:0:0:0:3257:9652',
'2001:cdba::3257:9652')
def test_valid_v6(self, addr):
for vers in (6, '6'):
self.assertTrue(utils.is_valid_ip_address(addr, vers))
@ddt.data(
{'addr': '1.1.1.1', 'vers': 3},
{'addr': '1.1.1.1', 'vers': 5},
{'addr': '1.1.1.1', 'vers': 7},
{'addr': '2001:cdba::3257:9652', 'vers': '3'},
{'addr': '2001:cdba::3257:9652', 'vers': '5'},
{'addr': '2001:cdba::3257:9652', 'vers': '7'})
@ddt.unpack
def test_provided_invalid_version(self, addr, vers):
self.assertRaises(
exception.ManilaException, utils.is_valid_ip_address, addr, vers)
def test_provided_none_version(self):
self.assertRaises(TypeError, utils.is_valid_ip_address, '', None)
@ddt.data(None, 'fake', '1.1.1.1')
def test_provided_invalid_v6_address(self, addr):
for vers in (6, '6'):
self.assertFalse(utils.is_valid_ip_address(addr, vers))
@ddt.data(None, 'fake', '255.255.255.256', '2001:cdba::3257:9652', '')
def test_provided_invalid_v4_address(self, addr):
for vers in (4, '4'):
self.assertFalse(utils.is_valid_ip_address(addr, vers))
class Comparable(utils.ComparableMixin):
def __init__(self, value):
self.value = value
def _cmpkey(self):
return self.value
class TestComparableMixin(test.TestCase):
def setUp(self):
super(TestComparableMixin, self).setUp()
self.one = Comparable(1)
self.two = Comparable(2)
def test_lt(self):
self.assertTrue(self.one < self.two)
self.assertFalse(self.two < self.one)
self.assertFalse(self.one < self.one)
def test_le(self):
self.assertTrue(self.one <= self.two)
self.assertFalse(self.two <= self.one)
self.assertTrue(self.one <= self.one)
def test_eq(self):
self.assertFalse(self.one == self.two)
self.assertFalse(self.two == self.one)
self.assertTrue(self.one == self.one)
def test_ge(self):
self.assertFalse(self.one >= self.two)
self.assertTrue(self.two >= self.one)
self.assertTrue(self.one >= self.one)
def test_gt(self):
self.assertFalse(self.one > self.two)
self.assertTrue(self.two > self.one)
self.assertFalse(self.one > self.one)
def test_ne(self):
self.assertTrue(self.one != self.two)
self.assertTrue(self.two != self.one)
self.assertFalse(self.one != self.one)
def test_compare(self):
self.assertEqual(NotImplemented,
self.one._compare(1, self.one._cmpkey))
class TestRetryDecorator(test.TestCase):
def test_no_retry_required(self):
self.counter = 0
with mock.patch.object(time, 'sleep') as mock_sleep:
@utils.retry(exception.ManilaException,
interval=2,
retries=3,
backoff_rate=2)
def succeeds():
self.counter += 1
return 'success'
ret = succeeds()
self.assertFalse(mock_sleep.called)
self.assertEqual('success', ret)
self.assertEqual(1, self.counter)
def test_no_retry_required_random(self):
self.counter = 0
with mock.patch.object(time, 'sleep') as mock_sleep:
@utils.retry(exception.ManilaException,
interval=2,
retries=3,
backoff_rate=2,
wait_random=True)
def succeeds():
self.counter += 1
return 'success'
ret = succeeds()
self.assertFalse(mock_sleep.called)
self.assertEqual('success', ret)
self.assertEqual(1, self.counter)
def test_retries_once_random(self):
self.counter = 0
interval = 2
backoff_rate = 2
retries = 3
with mock.patch.object(time, 'sleep') as mock_sleep:
@utils.retry(exception.ManilaException,
interval,
retries,
backoff_rate,
wait_random=True)
def fails_once():
self.counter += 1
if self.counter < 2:
raise exception.ManilaException(data='fake')
else:
return 'success'
ret = fails_once()
self.assertEqual('success', ret)
self.assertEqual(2, self.counter)
self.assertEqual(1, mock_sleep.call_count)
self.assertTrue(mock_sleep.called)
def test_retries_once(self):
self.counter = 0
interval = 2
backoff_rate = 2
retries = 3
with mock.patch.object(time, 'sleep') as mock_sleep:
@utils.retry(exception.ManilaException,
interval,
retries,
backoff_rate)
def fails_once():
self.counter += 1
if self.counter < 2:
raise exception.ManilaException(data='fake')
else:
return 'success'
ret = fails_once()
self.assertEqual('success', ret)
self.assertEqual(2, self.counter)
self.assertEqual(1, mock_sleep.call_count)
mock_sleep.assert_called_with(interval * backoff_rate)
def test_limit_is_reached(self):
self.counter = 0
retries = 3
interval = 2
backoff_rate = 4
with mock.patch.object(time, 'sleep') as mock_sleep:
@utils.retry(exception.ManilaException,
interval,
retries,
backoff_rate)
def always_fails():
self.counter += 1
raise exception.ManilaException(data='fake')
self.assertRaises(exception.ManilaException,
always_fails)
self.assertEqual(retries, self.counter)
expected_sleep_arg = []
for i in range(retries):
if i > 0:
interval *= backoff_rate
expected_sleep_arg.append(float(interval))
mock_sleep.assert_has_calls(map(mock.call, expected_sleep_arg))
def test_wrong_exception_no_retry(self):
with mock.patch.object(time, 'sleep') as mock_sleep:
@utils.retry(exception.ManilaException)
def raise_unexpected_error():
raise ValueError("value error")
self.assertRaises(ValueError, raise_unexpected_error)
self.assertFalse(mock_sleep.called)
def test_wrong_retries_num(self):
self.assertRaises(ValueError, utils.retry, exception.ManilaException,
retries=-1)
def test_max_backoff_sleep(self):
self.counter = 0
with mock.patch.object(time, 'sleep') as mock_sleep:
@utils.retry(exception.ManilaException,
retries=0,
backoff_rate=2,
backoff_sleep_max=4)
def fails_then_passes():
self.counter += 1
if self.counter < 5:
raise exception.ManilaException(data='fake')
else:
return 'success'
self.assertEqual('success', fails_then_passes())
mock_sleep.assert_has_calls(map(mock.call, [2, 4, 4, 4]))
@ddt.ddt
class RequireDriverInitializedTestCase(test.TestCase):
@ddt.data(True, False)
def test_require_driver_initialized(self, initialized):
class FakeDriver(object):
@property
def initialized(self):
return initialized
class FakeException(Exception):
pass
class FakeManager(object):
driver = FakeDriver()
@utils.require_driver_initialized
def call_me(self):
raise FakeException(
"Should be raised only if manager.driver.initialized "
"('%s') is equal to 'True'." % initialized)
if initialized:
expected_exception = FakeException
else:
expected_exception = exception.DriverNotInitialized
self.assertRaises(expected_exception, FakeManager().call_me)
@ddt.ddt
class ShareMigrationHelperTestCase(test.TestCase):
"""Tests DataMigrationHelper."""
def setUp(self):
super(ShareMigrationHelperTestCase, self).setUp()
self.context = context.get_admin_context()
def test_wait_for_access_update(self):
sid = 1
fake_share_instances = [
{
'id': sid,
'access_rules_status': constants.SHARE_INSTANCE_RULES_SYNCING,
},
{
'id': sid,
'access_rules_status': constants.STATUS_ACTIVE,
},
]
self.mock_object(time, 'sleep')
self.mock_object(db, 'share_instance_get',
mock.Mock(side_effect=fake_share_instances))
utils.wait_for_access_update(self.context, db,
fake_share_instances[0], 1)
db.share_instance_get.assert_has_calls(
[mock.call(mock.ANY, sid), mock.call(mock.ANY, sid)]
)
time.sleep.assert_called_once_with(1.414)
@ddt.data(
(
{
'id': '1',
'access_rules_status': constants.SHARE_INSTANCE_RULES_ERROR,
},
exception.ShareMigrationFailed
),
(
{
'id': '1',
'access_rules_status': constants.SHARE_INSTANCE_RULES_SYNCING,
},
exception.ShareMigrationFailed
),
)
@ddt.unpack
def test_wait_for_access_update_invalid(self, fake_instance, expected_exc):
self.mock_object(time, 'sleep')
self.mock_object(db, 'share_instance_get',
mock.Mock(return_value=fake_instance))
now = time.time()
timeout = now + 100
self.mock_object(time, 'time',
mock.Mock(side_effect=[now, timeout]))
self.assertRaises(expected_exc,
utils.wait_for_access_update, self.context,
db, fake_instance, 1)
@ddt.ddt
class ConvertStrTestCase(test.TestCase):
def test_convert_str_str_input(self):
self.mock_object(utils.encodeutils, 'safe_encode')
input_value = six.text_type("string_input")
output_value = utils.convert_str(input_value)
if six.PY2:
utils.encodeutils.safe_encode.assert_called_once_with(input_value)
self.assertEqual(
utils.encodeutils.safe_encode.return_value, output_value)
else:
self.assertEqual(0, utils.encodeutils.safe_encode.call_count)
self.assertEqual(input_value, output_value)
def test_convert_str_bytes_input(self):
self.mock_object(utils.encodeutils, 'safe_encode')
if six.PY2:
input_value = six.binary_type("binary_input")
else:
input_value = six.binary_type("binary_input", "utf-8")
output_value = utils.convert_str(input_value)
if six.PY2:
utils.encodeutils.safe_encode.assert_called_once_with(input_value)
self.assertEqual(
utils.encodeutils.safe_encode.return_value, output_value)
else:
self.assertEqual(0, utils.encodeutils.safe_encode.call_count)
self.assertIsInstance(output_value, six.string_types)
self.assertEqual(six.text_type("binary_input"), output_value)
@ddt.ddt
class TestDisableNotifications(test.TestCase):
def test_do_nothing_getter(self):
"""Test any attribute will always return the same instance (self)."""
donothing = utils.DoNothing()
self.assertIs(donothing, donothing.anyname)
def test_do_nothing_caller(self):
"""Test calling the object will always return the same instance."""
donothing = utils.DoNothing()
self.assertIs(donothing, donothing())
def test_do_nothing_json_serializable(self):
"""Test calling the object will always return the same instance."""
donothing = utils.DoNothing()
self.assertEqual('""', json.dumps(donothing))
@utils.if_notifications_enabled
def _decorated_method(self):
return mock.sentinel.success
def test_if_notification_enabled_when_enabled(self):
"""Test method is called when notifications are enabled."""
result = self._decorated_method()
self.assertEqual(mock.sentinel.success, result)
@ddt.data([], ['noop'], ['noop', 'noop'])
def test_if_notification_enabled_when_disabled(self, driver):
"""Test method is not called when notifications are disabled."""
self.override_config('driver', driver,
group='oslo_messaging_notifications')
result = self._decorated_method()
self.assertEqual(utils.DO_NOTHING, result)
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
import csv
import shutil
import requests
import scraperwiki
# Below as a helper for namespaces.
# Looks like a horrible hack.
dir = os.path.split(os.path.split(os.path.realpath(__file__))[0])[0]
sys.path.append(dir)
from calc_collect import calc
from ga_collect import datecalc
from ga_collect import ga_collect
from app_config import load as L
from utilities import prompt_format as I
from utilities import store_records as S
from datetime import date, timedelta, datetime
def create_tables():
'''Creating the tables of the new database.'''
sql_statements = {
'funnel': 'CREATE TABLE IF NOT EXISTS funnel(metricid TEXT, period TEXT, period_start_date TEXT, period_end_date TEXT, period_type TEXT, value REAL, PRIMARY KEY(metricid,period))',
'metrics': 'CREATE TABLE IF NOT EXISTS metrics(metricid TEXT, calculated INTEGER, name TEXT, description TEXT, operand1metricid TEXT, operand1periodoffset TEXT, operand2metricid TEXT, operand2periodoffset TEXT, operation TEXT, calcsortorder TEXT)'
# "_log": 'CREATE TABLE IF NOT EXISTS _log(date TEXT, script TEXT, metricid TEXT, success TEXT, log_file TEXT)'
}
for table in sql_statements:
try:
query = scraperwiki.sqlite.execute(sql_statements[table])
print "%s Table `%s` created." % (I.item('prompt_bullet'), str(table))
except Exception as e:
print e
return False
print "%s Database created successfully.\n" % I.item('prompt_success')
return True
def sync_metrics():
'''Sync the metrics metadata with the new database.'''
# Download
data_dir = os.path.split(dir)[0]
path = os.path.join(data_dir, "temp", "metrics.csv")
gdocs_url = "https://docs.google.com/spreadsheets/d/14kZ2Cj_IaBP1J4KZqhHTOb67HmymC5otSSV_3ji_Ssg/export?format=csv"
r = requests.get(gdocs_url, stream = True)
if r.status_code == 200:
with open(path, 'wb') as f:
r.raw.decode_content = True
shutil.copyfileobj(r.raw, f)
# Read file and store in database.
try:
with open(path) as csvfile:
reader = csv.DictReader(csvfile)
records = []
for row in reader:
records.append(row)
except Exception as e:
print e
return False
try:
print "%s Syncing `metrics` table." % I.item('prompt_bullet')
S.StoreRecords(records, table = "metrics")
print "%s successfully synced `metrics` table.\n" % I.item('prompt_success')
return True
except Exception as e:
print e
print "%s failed to sync `metrics` table." % I.item('prompt_error')
return False
def collect_previous_ga_data(verbose = False, test_data = False):
'''Collecting historical Google Analytics data with the new database.'''
counter = 0
period_date = date.today()
# Google Analytics only has data available
# from 2014-05-25, not earlier.
while period_date > date(2014, 5, 25):
period_date = date.today() - timedelta(weeks=counter)
counter += 1
try:
print "%s collecting data for week %s of %s" % (I.item('prompt_bullet'), period_date.isocalendar()[1], period_date.isocalendar()[0])
records = ga_collect.collect_ga_data(period_date)
S.StoreRecords(data = records, table = "funnel")
if test_data is True and counter > 1:
return records
except Exception as e:
if verbose:
print e
return False
print "%s Google Analytics failed to run." % I.item('prompt_error')
print "%s Google Analytics collection ran successfully." % I.item('prompt_success')
return True
def collect_previous_ckan_data(test_data = False):
'''Syncing historical CKAN data with the newly installed database.'''
#
# TODO: This is a major failure point.
# This collector relies on data collected
# by a very old collector written in R
# and hosted in ScraperWiki.
#
data_dir = os.path.split(dir)[0]
path = os.path.join(data_dir, "temp", "ckan_data.csv")
u = "https://ds-ec2.scraperwiki.com/7c6jufm/bwbcvvxuynjbrx2/cgi-bin/csv/ckan_dataset_data.csv"
r = requests.get(u, stream = True)
if r.status_code == 200:
with open(path, 'wb') as f:
r.raw.decode_content = True
shutil.copyfileobj(r.raw, f)
# Read file and store in database.
try:
print "%s Fetching CKAN historical data." % I.item('prompt_bullet')
with open(path) as csvfile:
reader = csv.DictReader(csvfile)
records = []
for row in reader:
user = {
'metricid': 'ckan-number-of-users',
'period': row["date"],
'period_start_date': row["date"],
'period_end_date': row["date"],
'period_type': "d",
'value': row["number_of_users"]
}
orgs = {
'metricid': 'ckan-number-of-orgs',
'period': row["date"],
'period_start_date': row["date"],
'period_end_date': row["date"],
'period_type': "d",
'value': row["number_of_organizations"]
}
datasets = {
'metricid': 'ckan-number-of-datasets',
'period': row["date"],
'period_start_date': row["date"],
'period_end_date': row["date"],
'period_type': "d",
'value': row["number_of_datasets"]
}
records.append(user)
records.append(orgs)
records.append(datasets)
record_date = datetime.strptime(row["date"], "%Y-%m-%d")
if record_date == datecalc.period_start_date(date = record_date, period_type = "w"):
record_week = datecalc.get_period(date = record_date, period_type = "w")
#
# Adding weekly records to the
# record collection.
#
user["period"] = record_week
user["period_type"] = "w"
orgs["period"] = record_week
orgs["period_type"] = "w"
datasets["period"] = record_week
datasets["period_type"] = "w"
records.append(user)
records.append(orgs)
records.append(datasets)
#
# Store records in database.
#
print "%s Storing CKAN historical data (%s records)." % (I.item('prompt_bullet'), len(records))
S.StoreRecords(records, table = "funnel")
if test_data:
return records
except Exception as e:
print e
return False
print "%s Successfully collected historic CKAN records." % I.item('prompt_success')
return True
def run_historical_calculations():
'''Making the calculations.'''
print "%s Making historical calculations." % I.item('prompt_bullet')
try:
calc.get_initial_setup_data()
except Exception as e:
print e
print "%s successfully performed historical calculations.\n" % I.item('prompt_success')
def main():
# Setting up database.
create_tables()
sync_metrics()
# Collecting previous data.
collect_previous_ga_data()
collect_previous_ckan_data()
# Running historic calculations.
run_historical_calculations()
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
for org in orm.Organization.objects.all():
for user in org.members.all():
orm.OrganizationMembership.objects.create(organization=org,
user=user)
for proj in orm.Project.objects.all():
for user in proj.members.all():
orm.ProjectMembership.objects.create(project=proj,
user=user)
def backwards(self, orm):
for membership in orm.OrganizationMembership.objects.all():
membership.organization.members.add(membership.user)
for membership in orm.ProjectMembership.objects.all():
membership.project.members.add(membership.user)
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'storybase_asset.asset': {
'Meta': {'object_name': 'Asset'},
'asset_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'asset_id': ('uuidfield.fields.UUIDField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'attribution': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'datasets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'assets'", 'blank': 'True', 'to': "orm['storybase_asset.DataSet']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'license': ('django.db.models.fields.CharField', [], {'max_length': '25', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'assets'", 'null': 'True', 'to': "orm['auth.User']"}),
'published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'section_specific': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'source_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'draft'", 'max_length': '10'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
'storybase_asset.dataset': {
'Meta': {'object_name': 'DataSet'},
'attribution': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'dataset_created': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'dataset_id': ('uuidfield.fields.UUIDField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'links_to_file': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'datasets'", 'null': 'True', 'to': "orm['auth.User']"}),
'published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'source': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'draft'", 'max_length': '10'})
},
'storybase_geo.geolevel': {
'Meta': {'object_name': 'GeoLevel'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['storybase_geo.GeoLevel']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'storybase_geo.location': {
'Meta': {'object_name': 'Location'},
'address': ('storybase.fields.ShortTextField', [], {'blank': 'True'}),
'address2': ('storybase.fields.ShortTextField', [], {'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lat': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'lng': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'location_id': ('uuidfield.fields.UUIDField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'name': ('storybase.fields.ShortTextField', [], {'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locations'", 'null': 'True', 'to': "orm['auth.User']"}),
'point': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'blank': 'True'}),
'postcode': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'raw': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'storybase_geo.place': {
'Meta': {'object_name': 'Place'},
'boundary': ('django.contrib.gis.db.models.fields.MultiPolygonField', [], {'null': 'True', 'blank': 'True'}),
'children': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['storybase_geo.Place']", 'null': 'True', 'through': "orm['storybase_geo.PlaceRelation']", 'blank': 'True'}),
'geolevel': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'places'", 'null': 'True', 'to': "orm['storybase_geo.GeoLevel']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('storybase.fields.ShortTextField', [], {}),
'place_id': ('uuidfield.fields.UUIDField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
'storybase_geo.placerelation': {
'Meta': {'unique_together': "(('parent', 'child'),)", 'object_name': 'PlaceRelation'},
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'place_parent'", 'to': "orm['storybase_geo.Place']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'place_child'", 'to': "orm['storybase_geo.Place']"})
},
'storybase_story.story': {
'Meta': {'object_name': 'Story'},
'allow_connected': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'assets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_asset.Asset']"}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'stories'", 'null': 'True', 'to': "orm['auth.User']"}),
'byline': ('django.db.models.fields.TextField', [], {}),
'contact_info': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'datasets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_asset.DataSet']"}),
'featured_assets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'featured_in_stories'", 'blank': 'True', 'to': "orm['storybase_asset.Asset']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_template': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'license': ('django.db.models.fields.CharField', [], {'max_length': '25', 'blank': 'True'}),
'locations': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_geo.Location']"}),
'on_homepage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'organizations': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_user.Organization']"}),
'places': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_geo.Place']"}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_user.Project']"}),
'published': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'related_stories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'related_to'", 'blank': 'True', 'through': "orm['storybase_story.StoryRelation']", 'to': "orm['storybase_story.Story']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'draft'", 'max_length': '10'}),
'story_id': ('uuidfield.fields.UUIDField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'structure_type': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'template_story': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'template_for'", 'null': 'True', 'to': "orm['storybase_story.Story']"}),
'topics': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'stories'", 'blank': 'True', 'to': "orm['storybase_taxonomy.Category']"})
},
'storybase_story.storyrelation': {
'Meta': {'object_name': 'StoryRelation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'relation_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'relation_type': ('django.db.models.fields.CharField', [], {'default': "'connected'", 'max_length': '25'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'target'", 'to': "orm['storybase_story.Story']"}),
'target': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'source'", 'to': "orm['storybase_story.Story']"})
},
'storybase_taxonomy.category': {
'Meta': {'object_name': 'Category'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['storybase_taxonomy.Category']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'storybase_taxonomy.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'}),
'tag_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
'storybase_taxonomy.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'storybase_taxonomy_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['storybase_taxonomy.Tag']"})
},
'storybase_user.organization': {
'Meta': {'object_name': 'Organization'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'curated_stories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'curated_in_organizations'", 'blank': 'True', 'through': "orm['storybase_user.OrganizationStory']", 'to': "orm['storybase_story.Story']"}),
'featured_assets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'featured_in_organizations'", 'blank': 'True', 'to': "orm['storybase_asset.Asset']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'organizations'", 'blank': 'True', 'to': "orm['auth.User']"}),
'on_homepage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'organization_id': ('uuidfield.fields.UUIDField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'website_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'storybase_user.organizationmembership': {
'Meta': {'object_name': 'OrganizationMembership'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member_type': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '140'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_user.Organization']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'storybase_user.organizationstory': {
'Meta': {'object_name': 'OrganizationStory'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_user.Organization']"}),
'story': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_story.Story']"}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'storybase_user.organizationtranslation': {
'Meta': {'unique_together': "(('organization', 'language'),)", 'object_name': 'OrganizationTranslation'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '15'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('storybase.fields.ShortTextField', [], {}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_user.Organization']"}),
'translation_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
'storybase_user.project': {
'Meta': {'object_name': 'Project'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'curated_stories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'curated_in_projects'", 'blank': 'True', 'through': "orm['storybase_user.ProjectStory']", 'to': "orm['storybase_story.Story']"}),
'featured_assets': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'featured_in_projects'", 'blank': 'True', 'to': "orm['storybase_asset.Asset']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_edited': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'projects'", 'blank': 'True', 'to': "orm['auth.User']"}),
'on_homepage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'organizations': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'projects'", 'blank': 'True', 'to': "orm['storybase_user.Organization']"}),
'project_id': ('uuidfield.fields.UUIDField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'website_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'storybase_user.projectmembership': {
'Meta': {'object_name': 'ProjectMembership'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member_type': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '140'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_user.Project']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'storybase_user.projectstory': {
'Meta': {'object_name': 'ProjectStory'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_user.Project']"}),
'story': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_story.Story']"}),
'weight': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'storybase_user.projecttranslation': {
'Meta': {'unique_together': "(('project', 'language'),)", 'object_name': 'ProjectTranslation'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'en'", 'max_length': '15'}),
'name': ('storybase.fields.ShortTextField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['storybase_user.Project']"}),
'translation_id': ('uuidfield.fields.UUIDField', [], {'unique': 'True', 'max_length': '32', 'blank': 'True'})
},
'storybase_user.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notify_admin': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'notify_digest': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'notify_story_comment': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'notify_story_featured': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'notify_updates': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'profile_id': ('uuidfield.fields.UUIDField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '32', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['storybase_user']
symmetrical = True
|
|
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from unittest import TestCase
from mock.mock import patch, MagicMock
from only_for_platform import get_platform, not_for_platform, os_distro_value, PLATFORM_WINDOWS
import os
if get_platform() != PLATFORM_WINDOWS:
with patch.object(os, "geteuid", return_value=0):
from resource_management.core import sudo
reload(sudo)
from ambari_commons.os_check import OSCheck
from resource_management.core import Environment
from resource_management.core.system import System
from resource_management.core.source import StaticFile
from resource_management.core.source import DownloadSource
from resource_management.core.source import Template
from resource_management.core.source import InlineTemplate
from ambari_jinja2 import UndefinedError, TemplateNotFound
import urllib2
@patch.object(OSCheck, "os_distribution", new = MagicMock(return_value = os_distro_value))
class TestContentSources(TestCase):
@patch.object(os.path, "isfile")
@patch.object(os.path, "join")
def test_static_file_absolute_path(self, join_mock, is_file_mock):
"""
Testing StaticFile source with absolute path
"""
sudo.read_file = lambda path: 'content'
is_file_mock.return_value = True
with Environment("/base") as env:
static_file = StaticFile("/absolute/path/file")
content = static_file.get_content()
self.assertEqual('content', content)
self.assertEqual(is_file_mock.call_count, 1)
self.assertEqual(join_mock.call_count, 0)
@patch.object(os.path, "isfile")
@patch.object(os.path, "join")
def test_static_file_relative_path(self, join_mock, is_file_mock):
"""
Testing StaticFile source with relative path
"""
sudo.read_file = lambda path: 'content'
is_file_mock.return_value = True
with Environment("/base") as env:
static_file = StaticFile("relative/path/file")
content = static_file.get_content()
self.assertEqual('content', content)
self.assertEqual(is_file_mock.call_count, 1)
self.assertEqual(join_mock.call_count, 1)
join_mock.assert_called_with('/base', 'files', 'relative/path/file')
@patch.object(urllib2, "build_opener")
@patch.object(urllib2, "Request")
@patch.object(os.path, "exists")
def test_download_source_get_content_nocache(self, exists_mock, request_mock, opener_mock):
"""
Testing DownloadSource.get_content without cache
"""
exists_mock.return_value = True
web_file_mock = MagicMock()
web_file_mock.read.return_value = 'web_content'
opener_mock.return_value.open = MagicMock(return_value=web_file_mock)
with Environment("/base", tmp_dir='/var/tmp/downloads') as env:
download_source = DownloadSource("http://download/source", redownload_files=True)
content = download_source.get_content()
self.assertEqual('web_content', content)
self.assertEqual(opener_mock.call_count, 1)
request_mock.assert_called_with('http://download/source')
self.assertEqual(web_file_mock.read.call_count, 1)
@patch("__builtin__.open")
@patch.object(urllib2, "Request")
@patch.object(urllib2, "build_opener")
@patch.object(os, "makedirs")
@patch.object(os.path, "exists")
@patch("resource_management.core.sudo.create_file")
def test_download_source_get_content_cache_new(self, create_mock, exists_mock, makedirs_mock, opener_mock, request_mock, open_mock):
"""
Testing DownloadSource.get_content with cache on non-cached resource
"""
exists_mock.side_effect = [True, False]
web_file_mock = MagicMock()
web_file_mock.read.return_value = 'web_content'
opener_mock.return_value.open = MagicMock(return_value=web_file_mock)
file_mock = MagicMock(name = 'file_mock')
file_mock.__enter__.return_value = file_mock
file_mock.read.return_value = 'content'
open_mock.return_value = file_mock
with Environment("/base", tmp_dir='/var/tmp/downloads') as env:
download_source = DownloadSource("http://download/source", redownload_files=False)
content = download_source.get_content()
self.assertEqual('web_content', content)
self.assertEqual(opener_mock.call_count, 1)
request_mock.assert_called_with('http://download/source')
self.assertEqual(web_file_mock.read.call_count, 1)
@patch("__builtin__.open")
@patch.object(os.path, "exists")
def test_download_source_get_content_cache_existent(self, exists_mock, open_mock):
"""
Testing DownloadSource.get_content with cache on cached resource
"""
exists_mock.side_effect = [True, True]
file_mock = MagicMock(name = 'file_mock')
file_mock.__enter__.return_value = file_mock
file_mock.read.return_value = 'cached_content'
open_mock.return_value = file_mock
with Environment("/base", tmp_dir='/var/tmp/downloads') as env:
download_source = DownloadSource("http://download/source", redownload_files=False)
content = download_source.get_content()
self.assertEqual('cached_content', content)
self.assertEqual(open_mock.call_count, 1)
self.assertEqual(file_mock.read.call_count, 1)
@patch("__builtin__.open")
@patch.object(os.path, "getmtime")
@patch.object(os.path, "exists")
def test_template_loader(self, exists_mock, getmtime_mock, open_mock):
"""
Testing template loader on existent file
"""
exists_mock.return_value = True
getmtime_mock.return_value = 10
file_mock = MagicMock(name = 'file_mock')
file_mock.__enter__.return_value = file_mock
file_mock.read.return_value = 'template content'
open_mock.return_value = file_mock
with Environment("/base") as env:
template = Template("test.j2")
self.assertEqual(open_mock.call_count, 1)
open_mock.assert_called_with('/base/templates/test.j2', 'rb')
self.assertEqual(getmtime_mock.call_count, 1)
getmtime_mock.assert_called_with('/base/templates/test.j2')
@patch.object(os.path, "exists")
def test_template_loader_fail(self, exists_mock):
"""
Testing template loader on non-existent file
"""
exists_mock.return_value = False
try:
with Environment("/base") as env:
template = Template("test.j2")
self.fail("Template should fail with nonexistent template file")
except TemplateNotFound:
pass
@patch("__builtin__.open")
@patch.object(os.path, "getmtime")
@patch.object(os.path, "exists")
def test_template_loader_absolute_path(self, exists_mock, getmtime_mock, open_mock):
"""
Testing template loader with absolute file-path
"""
exists_mock.return_value = True
getmtime_mock.return_value = 10
file_mock = MagicMock(name = 'file_mock')
file_mock.__enter__.return_value = file_mock
file_mock.read.return_value = 'template content'
open_mock.return_value = file_mock
with Environment("/base") as env:
template = Template("/absolute/path/test.j2")
self.assertEqual(open_mock.call_count, 1)
open_mock.assert_called_with('/absolute/path/test.j2', 'rb')
self.assertEqual(getmtime_mock.call_count, 1)
getmtime_mock.assert_called_with('/absolute/path/test.j2')
@patch("__builtin__.open")
@patch.object(os.path, "getmtime")
@patch.object(os.path, "exists")
def test_template_loader_arguments(self, exists_mock, getmtime_mock, open_mock):
"""
Testing template loader additional arguments in template and absolute file-path
"""
exists_mock.return_value = True
getmtime_mock.return_value = 10
file_mock = MagicMock(name = 'file_mock')
file_mock.__enter__.return_value = file_mock
file_mock.read.return_value = '{{test_arg1}} template content'
open_mock.return_value = file_mock
with Environment("/base") as env:
template = Template("/absolute/path/test.j2", [], test_arg1 = "test")
content = template.get_content()
self.assertEqual(open_mock.call_count, 1)
self.assertEqual(u'test template content', content)
open_mock.assert_called_with('/absolute/path/test.j2', 'rb')
self.assertEqual(getmtime_mock.call_count, 1)
getmtime_mock.assert_called_with('/absolute/path/test.j2')
def test_inline_template(self):
"""
Testing InlineTemplate
"""
with Environment("/base") as env:
template = InlineTemplate("{{test_arg1}} template content", [], test_arg1 = "test")
content = template.get_content()
self.assertEqual(u'test template content', content)
def test_template_imports(self):
"""
Testing Template additional imports
"""
try:
with Environment("/base") as env:
template = InlineTemplate("{{test_arg1}} template content {{os.path.join(path[0],path[1])}}", [], test_arg1 = "test", path = ["/one","two"])
content = template.get_content()
self.fail("Template.get_content should fail when evaluating unknown import")
except UndefinedError:
pass
with Environment("/base") as env:
template = InlineTemplate("{{test_arg1}} template content {{os.path.join(path[0],path[1])}}", [os], test_arg1 = "test", path = ["/one","two"])
content = template.get_content()
self.assertEqual(u'test template content /one/two', content)
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
################################################################################
#
# RMG - Reaction Mechanism Generator
#
# Copyright (c) 2002-2010 Prof. William H. Green (whgreen@mit.edu) and the
# RMG Team (rmg_dev@mit.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the 'Software'),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
################################################################################
"""
This module contains functionality for working with kinetics "rate rules",
which provide rate coefficient parameters for various combinations of
functional groups.
"""
import os.path
import re
import codecs
import math
from copy import deepcopy
from rmgpy.data.base import Database, Entry, DatabaseError, getAllCombinations
from rmgpy.quantity import Quantity, ScalarQuantity
from rmgpy.reaction import Reaction
from rmgpy.kinetics import ArrheniusEP
from .common import KineticsError, saveEntry, \
BIMOLECULAR_KINETICS_FAMILIES, UNIMOLECULAR_KINETICS_FAMILIES
################################################################################
class KineticsRules(Database):
"""
A class for working with a set of "rate rules" for a RMG kinetics family.
"""
def __init__(self, label='', name='', shortDesc='', longDesc=''):
Database.__init__(self, label=label, name=name, shortDesc=shortDesc, longDesc=longDesc)
def __repr__(self):
return '<KineticsRules "{0}">'.format(self.label)
def loadEntry(self,
index,
kinetics=None,
degeneracy=1,
label='',
duplicate=False,
reversible=True,
reference=None,
referenceType='',
shortDesc='',
longDesc='',
rank=None,
):
entry = Entry(
index = index,
label = label,
# item = reaction,
data = kinetics,
reference = reference,
referenceType = referenceType,
shortDesc = shortDesc,
longDesc = longDesc.strip(),
rank = rank,
)
try:
self.entries[label].append(entry)
except KeyError:
self.entries[label] = [entry]
return entry
def saveEntry(self, f, entry):
"""
Write the given `entry` in the thermo database to the file object `f`.
"""
return saveEntry(f, entry)
def processOldLibraryEntry(self, data):
"""
Process a list of parameters `data` as read from an old-style RMG
thermo database, returning the corresponding kinetics object.
"""
# This is hardcoding of reaction families!
label = os.path.split(self.label)[-2]
if label in BIMOLECULAR_KINETICS_FAMILIES:
Aunits = 'cm^3/(mol*s)'
elif label in UNIMOLECULAR_KINETICS_FAMILIES:
Aunits = 's^-1'
else:
raise Exception('Unable to determine preexponential units for old reaction family "{0}".'.format(self.label))
try:
Tmin, Tmax = data[0].split('-')
Tmin = (float(Tmin),"K")
Tmax = (float(Tmax),"K")
except ValueError:
Tmin = (float(data[0]),"K")
Tmax = None
A, n, alpha, E0, dA, dn, dalpha, dE0 = data[1:9]
A = float(A)
if dA[0] == '*':
A = Quantity(A,Aunits,'*|/',float(dA[1:]))
else:
dA = float(dA)
if dA != 0:
A = Quantity(A,Aunits,'+|-',dA)
else:
A = Quantity(A,Aunits)
n = float(n); dn = float(dn)
if dn != 0:
n = Quantity(n,'','+|-',dn)
else:
n = Quantity(n,'')
alpha = float(alpha); dalpha = float(dalpha)
if dalpha != 0:
alpha = Quantity(alpha,'','+|-',dalpha)
else:
alpha = Quantity(alpha,'')
E0 = float(E0); dE0 = float(dE0)
if dE0 != 0:
E0 = Quantity(E0,'kcal/mol','+|-',dE0)
else:
E0 = Quantity(E0,'kcal/mol')
rank = int(data[9])
return ArrheniusEP(A=A, n=n, alpha=alpha, E0=E0, Tmin=Tmin, Tmax=Tmax), rank
def loadOld(self, path, groups, numLabels):
"""
Load a set of old rate rules for kinetics groups into this depository.
"""
# Parse the old library
entries = self.parseOldLibrary(os.path.join(path, 'rateLibrary.txt'), numParameters=10, numLabels=numLabels)
self.entries = {}
for entry in entries:
index, label, data, shortDesc = entry
if isinstance(data, (str,unicode)):
kinetics = data
rank = 0
elif isinstance(data, tuple) and len(data) == 2:
kinetics, rank = data
else:
raise DatabaseError('Unexpected data {0!r} for entry {1!s}.'.format(data, entry))
reactants = [groups.entries[l].item for l in label.split(';')]
item = Reaction(reactants=reactants, products=[])
entry = Entry(
index = index,
label = label,
item = item,
data = kinetics,
rank = rank,
shortDesc = shortDesc
)
try:
self.entries[label].append(entry)
except KeyError:
self.entries[label] = [entry]
self.__loadOldComments(path)
def __loadOldComments(self, path):
"""
Load a set of old comments from the ``comments.txt`` file for the old
kinetics groups. This function assumes that the groups have already
been loaded.
"""
index = 'General' #mops up comments before the first rate ID
re_underline = re.compile('^\-+')
comments = {}
comments[index] = ''
# Load the comments into a temporary dictionary for now
# If no comments file then do nothing
try:
f = codecs.open(os.path.join(path, 'comments.rst'), 'r', 'utf-8')
except IOError:
return
for line in f:
match = re_underline.match(line)
if match:
index = f.next().strip()
assert line.rstrip() == f.next().rstrip(), "Overline didn't match underline"
if not comments.has_key(index):
comments[index] = ''
line = f.next()
comments[index] += line
f.close()
# Transfer the comments to the longDesc attribute of the associated entry
entries = self.getEntries()
unused = []
for index, longDesc in comments.iteritems():
try:
index = int(index)
except ValueError:
unused.append(index)
if isinstance(index, int):
for entry in entries:
if entry.index == index:
entry.longDesc = longDesc
break
#else:
# unused.append(str(index))
# Any unused comments are placed in the longDesc attribute of the depository
self.longDesc = comments['General'] + '\n'
unused.remove('General')
for index in unused:
try:
self.longDesc += comments[index] + '\n'
except KeyError:
import pdb; pdb.set_trace()
def saveOld(self, path, groups):
"""
Save a set of old rate rules for kinetics groups from this depository.
"""
# This is hardcoding of reaction families!
label = os.path.split(self.label)[-2]
if label in BIMOLECULAR_KINETICS_FAMILIES:
factor = 1.0e6
elif label in UNIMOLECULAR_KINETICS_FAMILIES:
factor = 1.0
else:
raise ValueError('Unable to determine preexponential units for old reaction family "{0}".'.format(self.label))
entries = self.getEntries()
flib = codecs.open(os.path.join(path, 'rateLibrary.txt'), 'w', 'utf-8')
flib.write('// The format for the data in this rate library\n')
flib.write('Arrhenius_EP\n\n')
fcom = codecs.open(os.path.join(path, 'comments.rst'), 'w', 'utf-8')
fcom.write('-------\n')
fcom.write('General\n')
fcom.write('-------\n')
fcom.write(self.longDesc.strip() + '\n\n')
for entry in entries:
flib.write('{0:<5d} '.format(entry.index))
line = ''
for label in entry.label.split(';'):
line = line + '{0:<23} '.format(label)
flib.write(line)
if len(line)>48: # make long lines line up in 10-space columns
flib.write(' '*(10-len(line)%10))
if entry.data.Tmax is None:
if re.match('\d+\-\d+',str(entry.data.Tmin).strip()):
# Tmin contains string of Trange
Trange = '{0} '.format(entry.data.Tmin)
elif isinstance(entry.data.Tmin, ScalarQuantity):
# Tmin is a temperature. Make range 1 degree either side!
Trange = '{0:4g}-{1:g} '.format(entry.data.Tmin.value_si-1, entry.data.Tmin.value_si+1)
else:
# Range is missing, but we have to put something:
Trange = ' 1-9999 '
else:
Trange = '{0:4g}-{1:g} '.format(entry.data.Tmin.value_si, entry.data.Tmax.value_si)
flib.write('{0:<12}'.format(Trange))
flib.write('{0:11.2e} {1:9.2f} {2:9.2f} {3:11.2f} '.format(
entry.data.A.value_si * factor,
entry.data.n.value_si,
entry.data.alpha.value_si,
entry.data.E0.value_si / 4184.
))
if entry.data.A.isUncertaintyMultiplicative():
flib.write('*{0:<6g} '.format(entry.data.A.uncertainty_si))
else:
flib.write('{0:<7g} '.format(entry.data.A.uncertainty_si * factor))
flib.write('{0:6g} {1:6g} {2:6g} '.format(
entry.data.n.uncertainty_si,
entry.data.alpha.uncertainty_si,
entry.data.E0.uncertainty_si / 4184.
))
if not entry.rank:
entry.rank = 0
flib.write(u' {0:<4d} {1}\n'.format(entry.rank, entry.shortDesc))
fcom.write('------\n')
fcom.write('{0}\n'.format(entry.index))
fcom.write('------\n')
fcom.write(entry.longDesc.strip() + '\n\n')
flib.close()
fcom.close()
def getEntries(self):
"""
Return a list of all of the entries in the rate rules database,
sorted by index.
"""
entries = []
for e in self.entries.values():
if isinstance(e,list):
entries.extend(e)
else:
entries.append(e)
entries.sort(key=lambda x: x.index)
return entries
def getEntriesToSave(self):
"""
Return a sorted list of all of the entries in the rate rules database
to save.
"""
return self.getEntries()
def hasRule(self, template):
"""
Return ``True`` if a rate rule with the given `template` currently
exists, or ``False`` otherwise.
"""
return self.getRule(template) is not None
def getRule(self, template):
"""
Return the exact rate rule with the given `template`, or ``None`` if no
corresponding entry exists.
"""
entries = self.getAllRules(template)
if len(entries) == 1:
return entries[0]
elif len(entries) > 1:
if any([entry.rank > 0 for entry in entries]):
entries = [entry for entry in entries if entry.rank > 0]
entries.sort(key=lambda x: (x.rank, x.index))
return entries[0]
else:
entries.sort(key=lambda x: x.index)
return entries[0]
else:
return None
def getAllRules(self, template):
"""
Return all of the exact rate rules with the given `template`. Raises a
:class:`ValueError` if no corresponding entry exists.
"""
entries = []
templateLabels = ';'.join([group.label for group in template])
try:
entries.extend(self.entries[templateLabels])
except KeyError:
pass
family = os.path.split(self.label)[0] # i.e. self.label = 'R_Recombination/rules'
if family.lower() == 'r_recombination':
template.reverse()
templateLabels = ';'.join([group.label for group in template])
try:
entries.extend(self.entries[templateLabels])
except KeyError:
pass
template.reverse()
return entries
def fillRulesByAveragingUp(self, rootTemplate, alreadyDone):
"""
Fill in gaps in the kinetics rate rules by averaging child nodes.
"""
rootLabel = ';'.join([g.label for g in rootTemplate])
if rootLabel in alreadyDone:
return alreadyDone[rootLabel]
# See if we already have a rate rule for this exact template
entry = self.getRule(rootTemplate)
if entry is not None and entry.rank > 0:
# We already have a rate rule for this exact template
# If the entry has rank of zero, then we have so little faith
# in it that we'd rather use an averaged value if possible
# Since this entry does not have a rank of zero, we keep its
# value
alreadyDone[rootLabel] = entry.data
return entry.data
# Recursively descend to the child nodes
childrenList = [[group] for group in rootTemplate]
for group in childrenList:
parent = group.pop(0)
if len(parent.children) > 0:
group.extend(parent.children)
else:
group.append(parent)
childrenList = getAllCombinations(childrenList)
kineticsList = []
for template in childrenList:
label = ';'.join([g.label for g in template])
if template == rootTemplate:
continue
if label in alreadyDone:
kinetics = alreadyDone[label]
else:
kinetics = self.fillRulesByAveragingUp(template, alreadyDone)
if kinetics is not None:
kineticsList.append([kinetics, template])
if len(kineticsList) > 0:
# We found one or more results! Let's average them together
kinetics = self.__getAverageKinetics([k for k, t in kineticsList])
if len(kineticsList) > 1:
kinetics.comment += 'Average of ({0})'.format(
' + '.join(k.comment if k.comment != '' else ';'.join(g.label for g in t) for k, t in kineticsList))
else:
k,t = kineticsList[0]
kinetics.comment += k.comment if k.comment != '' else ';'.join(g.label for g in t)
entry = Entry(
index = 0,
label = rootLabel,
item = rootTemplate,
data = kinetics,
rank = 10, # Indicates this is an averaged estimate
)
self.entries[entry.label] = [entry]
alreadyDone[rootLabel] = entry.data
return entry.data
alreadyDone[rootLabel] = None
return None
def __getAverageKinetics(self, kineticsList):
"""
Based on averaging log k. For most complex case:
k = AT^n * exp(-Ea+alpha*H)
log k = log(A) * nlog(T) * (-Ea + alpha*H)
Hence we average n, Ea, and alpha arithmetically, but we
average log A (geometric average)
"""
logA = 0.0; n = 0.0; E0 = 0.0; alpha = 0.0
count = len(kineticsList)
for kinetics in kineticsList:
logA += math.log10(kinetics.A.value_si)
n += kinetics.n.value_si
alpha += kinetics.alpha.value_si
E0 += kinetics.E0.value_si
logA /= count
n /= count
alpha /= count
E0 /= count
Aunits = kineticsList[0].A.units
if Aunits == 'cm^3/(mol*s)' or Aunits == 'cm^3/(molecule*s)' or Aunits == 'm^3/(molecule*s)':
Aunits = 'm^3/(mol*s)'
elif Aunits == 'cm^6/(mol^2*s)' or Aunits == 'cm^6/(molecule^2*s)' or Aunits == 'm^6/(molecule^2*s)':
Aunits = 'm^6/(mol^2*s)'
elif Aunits == 's^-1' or Aunits == 'm^3/(mol*s)' or Aunits == 'm^6/(mol^2*s)':
pass
else:
raise Exception('Invalid units {0} for averaging kinetics.'.format(Aunits))
averagedKinetics = ArrheniusEP(
A = (10**logA,Aunits),
n = n,
alpha = alpha,
E0 = (E0*0.001,"kJ/mol"),
)
return averagedKinetics
def estimateKinetics(self, template, degeneracy=1):
"""
Determine the appropriate kinetics for a reaction with the given
`template` using rate rules.
"""
def getTemplateLabel(template):
# Get string format of the template in the form "(leaf1,leaf2)"
return '({0})'.format(';'.join([g.label for g in template]))
originalLeaves = getTemplateLabel(template)
templateList = [template]
while len(templateList) > 0:
kineticsList = []
for t in templateList:
entry = self.getRule(t)
if entry is None: continue
kinetics = deepcopy(entry.data)
kineticsList.append([kinetics, t])
if len(kineticsList) > 0:
if len(kineticsList) == 1:
kinetics, t = kineticsList[0]
# Check whether the exact rate rule for the original template (most specific
# leaves) were found or not.
matchedLeaves = getTemplateLabel(t)
if matchedLeaves == originalLeaves:
if 'Average' in kinetics.comment:
kinetics.comment += 'Estimated using an average'
else:
kinetics.comment += 'Exact match found'
else:
# Using a more general node to estimate original template
if kinetics.comment:
kinetics.comment += '\n'
kinetics.comment +='Estimated using template ' + matchedLeaves
else:
# We found one or more results! Let's average them together
kinetics = self.__getAverageKinetics([k for k, t in kineticsList])
kinetics.comment += 'Estimated using average of templates {0}'.format(
' + '.join([getTemplateLabel(t) for k, t in kineticsList]),
)
kinetics.comment += ' for rate rule ' + originalLeaves
kinetics.A.value_si *= degeneracy
if degeneracy > 1:
kinetics.comment += "\n"
kinetics.comment += "Multiplied by reaction path degeneracy {0}".format(degeneracy)
return kinetics, entry if 'Exact' in kinetics.comment else None
else:
# No results found
templateList0 = templateList
templateList = []
for template0 in templateList0:
for index in range(len(template0)):
if not template0[index].parent:
# We're at the top-level node in this subtreee
continue
t = template0[:]
t[index] = t[index].parent
if t not in templateList:
templateList.append(t)
# If we're here then we couldn't estimate any kinetics, which is an exception
raise KineticsError('Unable to determine kinetics for reaction with template {0}.'.format(template))
|
|
#!/usr/bin/env python
#
# Copyright 2011 Joseph Rawlings
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##########################################################################
import webapp2
import os
import jinja2
import hmac
import string
import re
import random
import hashlib
import json
from google.appengine.ext import db
template_dir = os.path.join(os.path.dirname(__file__), 'templates/unit5')
jinja_env = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir), autoescape = False)
jinja_env_escaped = jinja2.Environment(loader = jinja2.FileSystemLoader(template_dir), autoescape = True)
SECRET = 'mysecret'
def make_salt(size=5, chars=string.ascii_letters):
return ''.join(random.choice(chars) for x in range(size))
def make_pw_hash(name, pw, salt = None):
if not salt:
salt = make_salt()
return '%s|%s' % (hashlib.sha256(name + pw + salt).hexdigest(), salt)
def valid_pw(name, pw, h):
salt = h.split('|')[1]
return h == make_pw_hash(name, pw, salt)
def hash_str(s):
return hmac.new(SECRET, s).hexdigest()
def make_secure_val(s):
return "%s|%s" % (s, hash_str(s))
def check_secure_val(h):
val = h.split('|')[0]
return val if h == make_secure_val(val) else None
# RegEx for the username field
USERNAME_RE = re.compile(r"^[a-zA-Z0-9_-]{3,20}$")
# RegEx for the password field
PASSWORD_RE = re.compile(r"^.{3,20}$")
# RegEx for the Email field
EMAIL_RE = re.compile(r"^[\S]+@[\S]+\.[\S]+$")
# Validation for Usernames
def valid_username(username):
return USERNAME_RE.match(username)
# Validation for Passwords
def valid_password(password):
return PASSWORD_RE.match(password)
# Validation for Emails
def valid_email(email):
return EMAIL_RE.match(email)
# Generic webapp Handler with some helper functions
class Handler(webapp2.RequestHandler):
def write(self, *a, **kw):
self.response.out.write(*a, **kw)
def render_str(self, template, **params):
t = jinja_env.get_template(template)
return t.render(params)
def render_str_escaped(self, template, **params):
t = jinja_env_escaped.get_template(template)
return t.render(params)
def render(self, template, **kw):
self.write(self.render_str(template, **kw))
def render_content(self, template, **kw):
content = self.render_str_escaped(template, **kw)
self.render("index.html", content=content)
def render_json(self, content):
# set the content type as JSON
self.response.headers['Content-Type'] = 'application/json'
# write out content as JSON and handle date objects with the default handler for now
self.response.out.write(json.dumps(content, default= lambda obj: obj.isoformat()))
# Model object representing a Blog Entry
class Entry(db.Model):
content = db.TextProperty(required = True)
subject = db.StringProperty(required = True)
created = db.DateTimeProperty(auto_now_add = True)
# Model object representing a User of the Blog
class User(db.Model):
username = db.StringProperty(required = True)
password_hash = db.StringProperty(required = True)
email = db.StringProperty(required = False)
created = db.DateTimeProperty(auto_now_add = True)
# Handler for the main page of the Blog
#
# Will check for /.json routing and render
# the appropriate content type
class Unit5Handler(Handler):
def render_posts(self, args):
cursor = db.GqlQuery("SELECT * FROM Entry ORDER BY created DESC")
if args == '/.json':
# map the _entity which contains the raw model attributes
self.render_json(content=[e._entity for e in cursor])
else:
# render straight html using templates
self.render_content("post.html", entries=cursor)
def get(self, args):
self.render_posts(args)
# Handler for a specific Blog Entry
#
# Will check for .json routing and render
# the appropriate content type
class Unit5EntryHandler(Handler):
def get(self, entry_id, args):
# retrieve the Entry from the Database
entry = Entry.get_by_id(entry_id)
if args == '.json':
# use the _entity which contains the raw model attributes
self.render_json(content=entry._entity)
else:
# render straight html using templates
self.render_content("post.html", entries=[entry])
# Handler for logging out of the Blog
#
# Also clears out the user_id cookie
class Unit5LogoutHandler(Handler):
def get(self):
self.response.headers.add_header('Set-Cookie', 'user_id=; Path=/')
self.redirect("/unit5/signup")
# Handler for logging in to the Blog
#
# 1) Check against the DB to see if the User exists
# 2) Validate the User's password against the stored Hash/Salt
# 3) Set the user_id login cookie
# 4) Redirect the User to the Welcome page
#
# If the login attempt fails, reset the user_id cookie and show the login page again
class Unit5LoginHandler(Handler):
def get(self):
self.render_content("login.html")
def post(self):
username = self.request.get('username')
password = self.request.get('password')
users = db.GqlQuery("SELECT * FROM User WHERE username = :1", username, limit=1)
if users.count() == 1 and valid_pw(users[0].username, password, users[0].password_hash):
self.response.headers.add_header('Set-Cookie', 'user_id=%s; Path=/' % make_secure_val(str(users[0].key().id())))
self.redirect("/unit5/welcome")
else:
self.response.headers.add_header('Set-Cookie', 'user_id=; Path=/')
login_error="Invalid login"
self.render_content("login.html", error=login_error)
class Unit5SingupHandler(Handler):
def get(self):
self.render_content("signup.html")
def post(self):
username = self.request.get('username')
password = self.request.get('password')
verify = self.request.get('verify')
email = self.request.get('email')
username_error = ""
password_error = ""
verify_error = ""
email_error = ""
if not valid_username(username):
username_error = "That's not a valid username."
if not valid_password(password):
password_error = "That wasn't a valid password."
if not password == verify:
verify_error = "Your passwords didn't match."
if email and not valid_email(email):
email_error = "That's not a valid email"
if not (username_error == "" and password_error == "" and verify_error == "" and not (email and email_error)):
self.render_content("signup.html"
, username=username
, username_error=username_error
, password_error=password_error
, verify_error=verify_error
, email=email
, email_error=email_error)
else:
user = User(username=username, password_hash=make_pw_hash(username, password), email=email)
user.put()
self.response.headers.add_header('Set-Cookie', 'user_id=%s; Path=/' % make_secure_val(str(user.key().id())))
self.redirect("/unit5/welcome")
class Unit5WelcomeHandler(Handler):
def get(self):
user_id = 0
user = None
user_id_str = self.request.cookies.get('user_id')
if user_id_str:
user_id = check_secure_val(user_id_str)
if not user_id:
self.redirect("/unit5/signup")
else:
user = User.get_by_id(user_id)
self.render_content("welcome.html", user=user)
class Unit5NewPostHandler(Handler):
def render_new_post(self, subject="", content="", error=""):
self.render_content("new_post.html", subject=subject, content=content, error=error)
def get(self):
self.render_new_post()
def post(self):
subject = self.request.get("subject")
content = self.request.get("content")
if subject and content:
entry = Entry(subject = subject, content = content)
entry.put()
self.redirect("/unit5/" + str(entry.key().id()))
else:
error = "subject and content, please!"
self.render_new_post(subject=subject, content=content, error = error)
# All WebApp Handlers
app = webapp2.WSGIApplication([
# render HTML or JSON depending on the suffix provided
('/unit5(/.json)?', Unit5Handler)
, ('/unit5/newpost', Unit5NewPostHandler)
, ('/unit5/login', Unit5LoginHandler)
, ('/unit5/logout', Unit5LogoutHandler)
, ('/unit5/signup', Unit5SingupHandler)
, ('/unit5/welcome', Unit5WelcomeHandler)
# render HTML or JSON depending on the suffix provided
, ('/unit5/(\d+)(.json)?', Unit5EntryHandler)
], debug=True)
|
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: swift_conn
# You'll see swift_conn passed around a few places in this file. This is the
# source httplib connection of whatever it is attached to.
# It is used when early termination of reading from the connection should
# happen, such as when a range request is satisfied but there's still more the
# source connection would like to send. To prevent having to read all the data
# that could be left, the source connection can be .close() and then reads
# commence to empty out any buffers.
# These shenanigans are to ensure all related objects can be garbage
# collected. We've seen objects hang around forever otherwise.
import os
import time
import functools
import inspect
import itertools
from swift import gettext_ as _
from urllib import quote
from eventlet import spawn_n, GreenPile
from eventlet.queue import Queue, Empty, Full
from eventlet.timeout import Timeout
from swift.common.wsgi import make_pre_authed_env
from swift.common.utils import normalize_timestamp, config_true_value, \
public, split_path, list_from_csv, GreenthreadSafeIterator, \
quorum_size
from swift.common.bufferedhttp import http_connect
from swift.common.exceptions import ChunkReadTimeout, ConnectionTimeout
from swift.common.http import is_informational, is_success, is_redirection, \
is_server_error, HTTP_OK, HTTP_PARTIAL_CONTENT, HTTP_MULTIPLE_CHOICES, \
HTTP_BAD_REQUEST, HTTP_NOT_FOUND, HTTP_SERVICE_UNAVAILABLE, \
HTTP_INSUFFICIENT_STORAGE, HTTP_UNAUTHORIZED
from swift.common.swob import Request, Response, HeaderKeyDict
def update_headers(response, headers):
"""
Helper function to update headers in the response.
:param response: swob.Response object
:param headers: dictionary headers
"""
if hasattr(headers, 'items'):
headers = headers.items()
for name, value in headers:
if name == 'etag':
response.headers[name] = value.replace('"', '')
elif name not in ('date', 'content-length', 'content-type',
'connection', 'x-put-timestamp', 'x-delete-after'):
response.headers[name] = value
def source_key(resp):
"""
Provide the timestamp of the swift http response as a floating
point value. Used as a sort key.
:param resp: httplib response object
"""
return float(resp.getheader('x-put-timestamp') or
resp.getheader('x-timestamp') or 0)
def delay_denial(func):
"""
Decorator to declare which methods should have any swift.authorize call
delayed. This is so the method can load the Request object up with
additional information that may be needed by the authorization system.
:param func: function for which authorization will be delayed
"""
func.delay_denial = True
@functools.wraps(func)
def wrapped(*a, **kw):
return func(*a, **kw)
return wrapped
def get_account_memcache_key(account):
cache_key, env_key = _get_cache_key(account, None)
return cache_key
def get_container_memcache_key(account, container):
if not container:
raise ValueError("container not provided")
cache_key, env_key = _get_cache_key(account, container)
return cache_key
def headers_to_account_info(headers, status_int=HTTP_OK):
"""
Construct a cacheable dict of account info based on response headers.
"""
headers = dict((k.lower(), v) for k, v in dict(headers).iteritems())
return {
'status': status_int,
# 'container_count' anomaly:
# Previous code sometimes expects an int sometimes a string
# Current code aligns to str and None, yet translates to int in
# deprecated functions as needed
'container_count': headers.get('x-account-container-count'),
'total_object_count': headers.get('x-account-object-count'),
'bytes': headers.get('x-account-bytes-used'),
'meta': dict((key[15:], value)
for key, value in headers.iteritems()
if key.startswith('x-account-meta-'))
}
def headers_to_container_info(headers, status_int=HTTP_OK):
"""
Construct a cacheable dict of container info based on response headers.
"""
headers = dict((k.lower(), v) for k, v in dict(headers).iteritems())
return {
'status': status_int,
'read_acl': headers.get('x-container-read'),
'write_acl': headers.get('x-container-write'),
'sync_key': headers.get('x-container-sync-key'),
'object_count': headers.get('x-container-object-count'),
'bytes': headers.get('x-container-bytes-used'),
'versions': headers.get('x-versions-location'),
'cors': {
'allow_origin': headers.get(
'x-container-meta-access-control-allow-origin'),
'expose_headers': headers.get(
'x-container-meta-access-control-expose-headers'),
'max_age': headers.get(
'x-container-meta-access-control-max-age')
},
'meta': dict((key[17:], value)
for key, value in headers.iteritems()
if key.startswith('x-container-meta-'))
}
def headers_to_object_info(headers, status_int=HTTP_OK):
"""
Construct a cacheable dict of object info based on response headers.
"""
headers = dict((k.lower(), v) for k, v in dict(headers).iteritems())
info = {'status': status_int,
'length': headers.get('content-length'),
'type': headers.get('content-type'),
'etag': headers.get('etag'),
'meta': dict((key[14:], value)
for key, value in headers.iteritems()
if key.startswith('x-object-meta-'))
}
return info
def cors_validation(func):
"""
Decorator to check if the request is a CORS request and if so, if it's
valid.
:param func: function to check
"""
@functools.wraps(func)
def wrapped(*a, **kw):
controller = a[0]
req = a[1]
# The logic here was interpreted from
# http://www.w3.org/TR/cors/#resource-requests
# Is this a CORS request?
req_origin = req.headers.get('Origin', None)
if req_origin:
# Yes, this is a CORS request so test if the origin is allowed
container_info = \
controller.container_info(controller.account_name,
controller.container_name, req)
cors_info = container_info.get('cors', {})
# Call through to the decorated method
resp = func(*a, **kw)
# Expose,
# - simple response headers,
# http://www.w3.org/TR/cors/#simple-response-header
# - swift specific: etag, x-timestamp, x-trans-id
# - user metadata headers
# - headers provided by the user in
# x-container-meta-access-control-expose-headers
expose_headers = ['cache-control', 'content-language',
'content-type', 'expires', 'last-modified',
'pragma', 'etag', 'x-timestamp', 'x-trans-id']
for header in resp.headers:
if header.startswith('X-Container-Meta') or \
header.startswith('X-Object-Meta'):
expose_headers.append(header.lower())
if cors_info.get('expose_headers'):
expose_headers.extend(
[header_line.strip()
for header_line in cors_info['expose_headers'].split(' ')
if header_line.strip()])
resp.headers['Access-Control-Expose-Headers'] = \
', '.join(expose_headers)
# The user agent won't process the response if the Allow-Origin
# header isn't included
resp.headers['Access-Control-Allow-Origin'] = req_origin
return resp
else:
# Not a CORS request so make the call as normal
return func(*a, **kw)
return wrapped
def get_object_info(env, app, path=None, swift_source=None):
"""
Get the info structure for an object, based on env and app.
This is useful to middlewares.
Note: This call bypasses auth. Success does not imply that the
request has authorization to the object.
"""
(version, account, container, obj) = \
split_path(path or env['PATH_INFO'], 4, 4, True)
info = _get_object_info(app, env, account, container, obj,
swift_source=swift_source)
if not info:
info = headers_to_object_info({}, 0)
return info
def get_container_info(env, app, swift_source=None):
"""
Get the info structure for a container, based on env and app.
This is useful to middlewares.
Note: This call bypasses auth. Success does not imply that the
request has authorization to the account.
"""
(version, account, container, unused) = \
split_path(env['PATH_INFO'], 3, 4, True)
info = get_info(app, env, account, container, ret_not_found=True,
swift_source=swift_source)
if not info:
info = headers_to_container_info({}, 0)
return info
def get_account_info(env, app, swift_source=None):
"""
Get the info structure for an account, based on env and app.
This is useful to middlewares.
Note: This call bypasses auth. Success does not imply that the
request has authorization to the container.
"""
(version, account, _junk, _junk) = \
split_path(env['PATH_INFO'], 2, 4, True)
info = get_info(app, env, account, ret_not_found=True,
swift_source=swift_source)
if not info:
info = headers_to_account_info({}, 0)
if info.get('container_count') is None:
info['container_count'] = 0
else:
info['container_count'] = int(info['container_count'])
return info
def _get_cache_key(account, container):
"""
Get the keys for both memcache (cache_key) and env (env_key)
where info about accounts and containers is cached
:param account: The name of the account
:param container: The name of the container (or None if account)
:returns a tuple of (cache_key, env_key)
"""
if container:
cache_key = 'container/%s/%s' % (account, container)
else:
cache_key = 'account/%s' % account
# Use a unique environment cache key per account and one container.
# This allows caching both account and container and ensures that when we
# copy this env to form a new request, it won't accidentally reuse the
# old container or account info
env_key = 'swift.%s' % cache_key
return cache_key, env_key
def get_object_env_key(account, container, obj):
"""
Get the keys for env (env_key) where info about object is cached
:param account: The name of the account
:param container: The name of the container
:param obj: The name of the object
:returns a string env_key
"""
env_key = 'swift.object/%s/%s/%s' % (account,
container, obj)
return env_key
def _set_info_cache(app, env, account, container, resp):
"""
Cache info in both memcache and env.
Caching is used to avoid unnecessary calls to account & container servers.
This is a private function that is being called by GETorHEAD_base and
by clear_info_cache.
Any attempt to GET or HEAD from the container/account server should use
the GETorHEAD_base interface which would than set the cache.
:param app: the application object
:param account: the unquoted account name
:param container: the unquoted containr name or None
:param resp: the response received or None if info cache should be cleared
"""
if container:
cache_time = app.recheck_container_existence
else:
cache_time = app.recheck_account_existence
cache_key, env_key = _get_cache_key(account, container)
if resp:
if resp.status_int == HTTP_NOT_FOUND:
cache_time *= 0.1
elif not is_success(resp.status_int):
cache_time = None
else:
cache_time = None
# Next actually set both memcache and the env chache
memcache = getattr(app, 'memcache', None) or env.get('swift.cache')
if not cache_time:
env.pop(env_key, None)
if memcache:
memcache.delete(cache_key)
return
if container:
info = headers_to_container_info(resp.headers, resp.status_int)
else:
info = headers_to_account_info(resp.headers, resp.status_int)
if memcache:
memcache.set(cache_key, info, time=cache_time)
env[env_key] = info
def _set_object_info_cache(app, env, account, container, obj, resp):
"""
Cache object info env. Do not cache object informations in
memcache. This is an intentional omission as it would lead
to cache pressure. This is a per-request cache.
Caching is used to avoid unnecessary calls to object servers.
This is a private function that is being called by GETorHEAD_base.
Any attempt to GET or HEAD from the object server should use
the GETorHEAD_base interface which would then set the cache.
:param app: the application object
:param account: the unquoted account name
:param container: the unquoted container name or None
:param object: the unquoted object name or None
:param resp: the response received or None if info cache should be cleared
"""
env_key = get_object_env_key(account, container, obj)
if not resp:
env.pop(env_key, None)
return
info = headers_to_object_info(resp.headers, resp.status_int)
env[env_key] = info
def clear_info_cache(app, env, account, container=None):
"""
Clear the cached info in both memcache and env
:param app: the application object
:param account: the account name
:param container: the containr name or None if setting info for containers
"""
_set_info_cache(app, env, account, container, None)
def _get_info_cache(app, env, account, container=None):
"""
Get the cached info from env or memcache (if used) in that order
Used for both account and container info
A private function used by get_info
:param app: the application object
:param env: the environment used by the current request
:returns the cached info or None if not cached
"""
cache_key, env_key = _get_cache_key(account, container)
if env_key in env:
return env[env_key]
memcache = getattr(app, 'memcache', None) or env.get('swift.cache')
if memcache:
info = memcache.get(cache_key)
if info:
env[env_key] = info
return info
return None
def _prepare_pre_auth_info_request(env, path, swift_source):
"""
Prepares a pre authed request to obtain info using a HEAD.
:param env: the environment used by the current request
:param path: The unquoted request path
:param swift_source: value for swift.source in WSGI environment
:returns: the pre authed request
"""
# Set the env for the pre_authed call without a query string
newenv = make_pre_authed_env(env, 'HEAD', path, agent='Swift',
query_string='', swift_source=swift_source)
# Note that Request.blank expects quoted path
return Request.blank(quote(path), environ=newenv)
def get_info(app, env, account, container=None, ret_not_found=False,
swift_source=None):
"""
Get the info about accounts or containers
Note: This call bypasses auth. Success does not imply that the
request has authorization to the info.
:param app: the application object
:param env: the environment used by the current request
:param account: The unquoted name of the account
:param container: The unquoted name of the container (or None if account)
:returns: the cached info or None if cannot be retrieved
"""
info = _get_info_cache(app, env, account, container)
if info:
if ret_not_found or is_success(info['status']):
return info
return None
# Not in cache, let's try the account servers
path = '/v1/%s' % account
if container:
# Stop and check if we have an account?
if not get_info(app, env, account):
return None
path += '/' + container
req = _prepare_pre_auth_info_request(
env, path, (swift_source or 'GET_INFO'))
# Whenever we do a GET/HEAD, the GETorHEAD_base will set the info in
# the environment under environ[env_key] and in memcache. We will
# pick the one from environ[env_key] and use it to set the caller env
resp = req.get_response(app)
cache_key, env_key = _get_cache_key(account, container)
try:
info = resp.environ[env_key]
env[env_key] = info
if ret_not_found or is_success(info['status']):
return info
except (KeyError, AttributeError):
pass
return None
def _get_object_info(app, env, account, container, obj, swift_source=None):
"""
Get the info about object
Note: This call bypasses auth. Success does not imply that the
request has authorization to the info.
:param app: the application object
:param env: the environment used by the current request
:param account: The unquoted name of the account
:param container: The unquoted name of the container
:param obj: The unquoted name of the object
:returns: the cached info or None if cannot be retrieved
"""
env_key = get_object_env_key(account, container, obj)
info = env.get(env_key)
if info:
return info
# Not in cached, let's try the object servers
path = '/v1/%s/%s/%s' % (account, container, obj)
req = _prepare_pre_auth_info_request(env, path, swift_source)
# Whenever we do a GET/HEAD, the GETorHEAD_base will set the info in
# the environment under environ[env_key]. We will
# pick the one from environ[env_key] and use it to set the caller env
resp = req.get_response(app)
try:
info = resp.environ[env_key]
env[env_key] = info
return info
except (KeyError, AttributeError):
pass
return None
class Controller(object):
"""Base WSGI controller class for the proxy"""
server_type = 'Base'
# Ensure these are all lowercase
pass_through_headers = []
def __init__(self, app):
"""
Creates a controller attached to an application instance
:param app: the application instance
"""
self.account_name = None
self.app = app
self.trans_id = '-'
self._allowed_methods = None
@property
def allowed_methods(self):
if self._allowed_methods is None:
self._allowed_methods = set()
all_methods = inspect.getmembers(self, predicate=inspect.ismethod)
for name, m in all_methods:
if getattr(m, 'publicly_accessible', False):
self._allowed_methods.add(name)
return self._allowed_methods
def _x_remove_headers(self):
"""
Returns a list of headers that must not be sent to the backend
:returns: a list of header
"""
return []
def transfer_headers(self, src_headers, dst_headers):
"""
Transfer legal headers from an original client request to dictionary
that will be used as headers by the backend request
:param src_headers: A dictionary of the original client request headers
:param dst_headers: A dictionary of the backend request headers
"""
st = self.server_type.lower()
x_remove = 'x-remove-%s-meta-' % st
dst_headers.update((k.lower().replace('-remove', '', 1), '')
for k in src_headers
if k.lower().startswith(x_remove) or
k.lower() in self._x_remove_headers())
x_meta = 'x-%s-meta-' % st
dst_headers.update((k.lower(), v)
for k, v in src_headers.iteritems()
if k.lower() in self.pass_through_headers or
k.lower().startswith(x_meta))
def generate_request_headers(self, orig_req=None, additional=None,
transfer=False):
"""
Create a list of headers to be used in backend request
:param orig_req: the original request sent by the client to the proxy
:param additional: additional headers to send to the backend
:param transfer: If True, transfer headers from original client request
:returns: a dictionary of headers
"""
# Use the additional headers first so they don't overwrite the headers
# we require.
headers = HeaderKeyDict(additional) if additional else HeaderKeyDict()
if transfer:
self.transfer_headers(orig_req.headers, headers)
headers.setdefault('x-timestamp', normalize_timestamp(time.time()))
if orig_req:
referer = orig_req.as_referer()
else:
referer = ''
headers['x-trans-id'] = self.trans_id
headers['connection'] = 'close'
headers['user-agent'] = 'proxy-server %s' % os.getpid()
headers['referer'] = referer
return headers
def error_occurred(self, node, msg):
"""
Handle logging, and handling of errors.
:param node: dictionary of node to handle errors for
:param msg: error message
"""
node['errors'] = node.get('errors', 0) + 1
node['last_error'] = time.time()
self.app.logger.error(_('%(msg)s %(ip)s:%(port)s/%(device)s'),
{'msg': msg, 'ip': node['ip'],
'port': node['port'], 'device': node['device']})
def exception_occurred(self, node, typ, additional_info):
"""
Handle logging of generic exceptions.
:param node: dictionary of node to log the error for
:param typ: server type
:param additional_info: additional information to log
"""
self.app.logger.exception(
_('ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: '
'%(info)s'),
{'type': typ, 'ip': node['ip'], 'port': node['port'],
'device': node['device'], 'info': additional_info})
def error_limited(self, node):
"""
Check if the node is currently error limited.
:param node: dictionary of node to check
:returns: True if error limited, False otherwise
"""
now = time.time()
if 'errors' not in node:
return False
if 'last_error' in node and node['last_error'] < \
now - self.app.error_suppression_interval:
del node['last_error']
if 'errors' in node:
del node['errors']
return False
limited = node['errors'] > self.app.error_suppression_limit
if limited:
self.app.logger.debug(
_('Node error limited %(ip)s:%(port)s (%(device)s)'), node)
return limited
def error_limit(self, node, msg):
"""
Mark a node as error limited. This immediately pretends the
node received enough errors to trigger error suppression. Use
this for errors like Insufficient Storage. For other errors
use :func:`error_occurred`.
:param node: dictionary of node to error limit
:param msg: error message
"""
node['errors'] = self.app.error_suppression_limit + 1
node['last_error'] = time.time()
self.app.logger.error(_('%(msg)s %(ip)s:%(port)s/%(device)s'),
{'msg': msg, 'ip': node['ip'],
'port': node['port'], 'device': node['device']})
def account_info(self, account, req=None):
"""
Get account information, and also verify that the account exists.
:param account: name of the account to get the info for
:param req: caller's HTTP request context object (optional)
:returns: tuple of (account partition, account nodes, container_count)
or (None, None, None) if it does not exist
"""
partition, nodes = self.app.account_ring.get_nodes(account)
if req:
env = getattr(req, 'environ', {})
else:
env = {}
info = get_info(self.app, env, account)
if not info:
return None, None, None
if info.get('container_count') is None:
container_count = 0
else:
container_count = int(info['container_count'])
return partition, nodes, container_count
def container_info(self, account, container, req=None):
"""
Get container information and thusly verify container existence.
This will also verify account existence.
:param account: account name for the container
:param container: container name to look up
:param req: caller's HTTP request context object (optional)
:returns: dict containing at least container partition ('partition'),
container nodes ('containers'), container read
acl ('read_acl'), container write acl ('write_acl'),
and container sync key ('sync_key').
Values are set to None if the container does not exist.
"""
part, nodes = self.app.container_ring.get_nodes(account, container)
if req:
env = getattr(req, 'environ', {})
else:
env = {}
info = get_info(self.app, env, account, container)
if not info:
info = headers_to_container_info({}, 0)
info['partition'] = None
info['nodes'] = None
else:
info['partition'] = part
info['nodes'] = nodes
return info
def iter_nodes(self, ring, partition, node_iter=None):
"""
Yields nodes for a ring partition, skipping over error
limited nodes and stopping at the configurable number of
nodes. If a node yielded subsequently gets error limited, an
extra node will be yielded to take its place.
Note that if you're going to iterate over this concurrently from
multiple greenthreads, you'll want to use a
swift.common.utils.GreenthreadSafeIterator to serialize access.
Otherwise, you may get ValueErrors from concurrent access. (You also
may not, depending on how logging is configured, the vagaries of
socket IO and eventlet, and the phase of the moon.)
:param ring: ring to get yield nodes from
:param partition: ring partition to yield nodes for
:param node_iter: optional iterable of nodes to try. Useful if you
want to filter or reorder the nodes.
"""
part_nodes = ring.get_part_nodes(partition)
if node_iter is None:
#chain part nodes and handoff nodes
node_iter = itertools.chain(part_nodes,
ring.get_more_nodes(partition))
num_primary_nodes = len(part_nodes)
# Use of list() here forcibly yanks the first N nodes (the primary
# nodes) from node_iter, so the rest of its values are handoffs.
primary_nodes = self.app.sort_nodes(
list(itertools.islice(node_iter, num_primary_nodes)))
handoff_nodes = node_iter
nodes_left = self.app.request_node_count(ring)
for node in primary_nodes:
if not self.error_limited(node):
yield node
if not self.error_limited(node):
nodes_left -= 1
if nodes_left <= 0:
return
handoffs = 0
for node in handoff_nodes:
if not self.error_limited(node):
handoffs += 1
if self.app.log_handoffs:
self.app.logger.increment('handoff_count')
self.app.logger.warning(
'Handoff requested (%d)' % handoffs)
if handoffs == len(primary_nodes):
self.app.logger.increment('handoff_all_count')
yield node
if not self.error_limited(node):
nodes_left -= 1
if nodes_left <= 0:
return
def _make_request(self, nodes, part, method, path, headers, query,
logger_thread_locals):
"""
Sends an HTTP request to a single node and aggregates the result.
It attempts the primary node, then iterates over the handoff nodes
as needed.
:param nodes: an iterator of the backend server and handoff servers
:param part: the partition number
:param method: the method to send to the backend
:param path: the path to send to the backend
:param headers: a list of dicts, where each dict represents one
backend request that should be made.
:param query: query string to send to the backend.
:param logger_thread_locals: The thread local values to be set on the
self.app.logger to retain transaction
logging information.
:returns: a swob.Response object
"""
self.app.logger.thread_locals = logger_thread_locals
for node in nodes:
try:
start_node_timing = time.time()
with ConnectionTimeout(self.app.conn_timeout):
conn = http_connect(node['ip'], node['port'],
node['device'], part, method, path,
headers=headers, query_string=query)
conn.node = node
self.app.set_node_timing(node, time.time() - start_node_timing)
with Timeout(self.app.node_timeout):
resp = conn.getresponse()
if not is_informational(resp.status) and \
not is_server_error(resp.status):
return resp.status, resp.reason, resp.getheaders(), \
resp.read()
elif resp.status == HTTP_INSUFFICIENT_STORAGE:
self.error_limit(node, _('ERROR Insufficient Storage'))
except (Exception, Timeout):
self.exception_occurred(node, self.server_type,
_('Trying to %(method)s %(path)s') %
{'method': method, 'path': path})
def make_requests(self, req, ring, part, method, path, headers,
query_string=''):
"""
Sends an HTTP request to multiple nodes and aggregates the results.
It attempts the primary nodes concurrently, then iterates over the
handoff nodes as needed.
:param req: a request sent by the client
:param ring: the ring used for finding backend servers
:param part: the partition number
:param method: the method to send to the backend
:param path: the path to send to the backend
:param headers: a list of dicts, where each dict represents one
backend request that should be made.
:param query_string: optional query string to send to the backend
:returns: a swob.Response object
"""
start_nodes = ring.get_part_nodes(part)
nodes = GreenthreadSafeIterator(self.iter_nodes(ring, part))
pile = GreenPile(len(start_nodes))
for head in headers:
pile.spawn(self._make_request, nodes, part, method, path,
head, query_string, self.app.logger.thread_locals)
response = [resp for resp in pile if resp]
while len(response) < len(start_nodes):
response.append((HTTP_SERVICE_UNAVAILABLE, '', '', ''))
statuses, reasons, resp_headers, bodies = zip(*response)
return self.best_response(req, statuses, reasons, bodies,
'%s %s' % (self.server_type, req.method),
headers=resp_headers)
def best_response(self, req, statuses, reasons, bodies, server_type,
etag=None, headers=None):
"""
Given a list of responses from several servers, choose the best to
return to the API.
:param req: swob.Request object
:param statuses: list of statuses returned
:param reasons: list of reasons for each status
:param bodies: bodies of each response
:param server_type: type of server the responses came from
:param etag: etag
:param headers: headers of each response
:returns: swob.Response object with the correct status, body, etc. set
"""
resp = Response(request=req)
if len(statuses):
for hundred in (HTTP_OK, HTTP_MULTIPLE_CHOICES, HTTP_BAD_REQUEST):
hstatuses = \
[s for s in statuses if hundred <= s < hundred + 100]
if len(hstatuses) >= quorum_size(len(statuses)):
status = max(hstatuses)
status_index = statuses.index(status)
resp.status = '%s %s' % (status, reasons[status_index])
resp.body = bodies[status_index]
if headers:
update_headers(resp, headers[status_index])
if etag:
resp.headers['etag'] = etag.strip('"')
return resp
self.app.logger.error(_('%(type)s returning 503 for %(statuses)s'),
{'type': server_type, 'statuses': statuses})
resp.status = '503 Internal Server Error'
return resp
@public
def GET(self, req):
"""
Handler for HTTP GET requests.
:param req: The client request
:returns: the response to the client
"""
return self.GETorHEAD(req)
@public
def HEAD(self, req):
"""
Handler for HTTP HEAD requests.
:param req: The client request
:returns: the response to the client
"""
return self.GETorHEAD(req)
def _make_app_iter_reader(self, node, source, queue, logger_thread_locals):
"""
Reads from the source and places data in the queue. It expects
something else be reading from the queue and, if nothing does within
self.app.client_timeout seconds, the process will be aborted.
:param node: The node dict that the source is connected to, for
logging/error-limiting purposes.
:param source: The httplib.Response object to read from.
:param queue: The eventlet.queue.Queue to place read source data into.
:param logger_thread_locals: The thread local values to be set on the
self.app.logger to retain transaction
logging information.
"""
self.app.logger.thread_locals = logger_thread_locals
success = True
try:
try:
while True:
with ChunkReadTimeout(self.app.node_timeout):
chunk = source.read(self.app.object_chunk_size)
if not chunk:
break
queue.put(chunk, timeout=self.app.client_timeout)
except Full:
self.app.logger.warn(
_('Client did not read from queue within %ss') %
self.app.client_timeout)
self.app.logger.increment('client_timeouts')
success = False
except (Exception, Timeout):
self.exception_occurred(node, _('Object'),
_('Trying to read during GET'))
success = False
finally:
# Ensure the queue getter gets a terminator.
queue.resize(2)
queue.put(success)
# Close-out the connection as best as possible.
if getattr(source, 'swift_conn', None):
self.close_swift_conn(source)
def _make_app_iter(self, node, source):
"""
Returns an iterator over the contents of the source (via its read
func). There is also quite a bit of cleanup to ensure garbage
collection works and the underlying socket of the source is closed.
:param source: The httplib.Response object this iterator should read
from.
:param node: The node the source is reading from, for logging purposes.
"""
try:
# Spawn reader to read from the source and place in the queue.
# We then drop any reference to the source or node, for garbage
# collection purposes.
queue = Queue(1)
spawn_n(self._make_app_iter_reader, node, source, queue,
self.app.logger.thread_locals)
source = node = None
while True:
chunk = queue.get(timeout=self.app.node_timeout)
if isinstance(chunk, bool): # terminator
success = chunk
if not success:
raise Exception(_('Failed to read all data'
' from the source'))
break
yield chunk
except Empty:
raise ChunkReadTimeout()
except (GeneratorExit, Timeout):
self.app.logger.warn(_('Client disconnected on read'))
except Exception:
self.app.logger.exception(_('Trying to send to client'))
raise
def close_swift_conn(self, src):
"""
Force close the http connection to the backend.
:param src: the response from the backend
"""
try:
# Since the backends set "Connection: close" in their response
# headers, the response object (src) is solely responsible for the
# socket. The connection object (src.swift_conn) has no references
# to the socket, so calling its close() method does nothing, and
# therefore we don't do it.
#
# Also, since calling the response's close() method might not
# close the underlying socket but only decrement some
# reference-counter, we have a special method here that really,
# really kills the underlying socket with a close() syscall.
src.nuke_from_orbit() # it's the only way to be sure
except Exception:
pass
def is_good_source(self, src):
"""
Indicates whether or not the request made to the backend found
what it was looking for.
:param src: the response from the backend
:returns: True if found, False if not
"""
return is_success(src.status) or is_redirection(src.status)
def autocreate_account(self, env, account):
"""
Autocreate an account
:param env: the environment of the request leading to this autocreate
:param account: the unquoted account name
"""
partition, nodes = self.app.account_ring.get_nodes(account)
path = '/%s' % account
headers = {'X-Timestamp': normalize_timestamp(time.time()),
'X-Trans-Id': self.trans_id,
'Connection': 'close'}
resp = self.make_requests(Request.blank('/v1' + path),
self.app.account_ring, partition, 'PUT',
path, [headers] * len(nodes))
if is_success(resp.status_int):
self.app.logger.info('autocreate account %r' % path)
clear_info_cache(self.app, env, account)
else:
self.app.logger.warning('Could not autocreate account %r' % path)
def GETorHEAD_base(self, req, server_type, ring, partition, path):
"""
Base handler for HTTP GET or HEAD requests.
:param req: swob.Request object
:param server_type: server type
:param ring: the ring to obtain nodes from
:param partition: partition
:param path: path for the request
:returns: swob.Response object
"""
statuses = []
reasons = []
bodies = []
source_headers = []
sources = []
#if x-newest is set, proxy will fetch newest version of replicas
#if x-newest not set, proxy only fetch one replica
newest = config_true_value(req.headers.get('x-newest', 'f'))
headers = self.generate_request_headers(req, additional=req.headers)
for node in self.iter_nodes(ring, partition):
start_node_timing = time.time()
try:
with ConnectionTimeout(self.app.conn_timeout):
conn = http_connect(
node['ip'], node['port'], node['device'], partition,
req.method, path, headers=headers,
query_string=req.query_string)
self.app.set_node_timing(node, time.time() - start_node_timing)
with Timeout(self.app.node_timeout):
possible_source = conn.getresponse()
# See NOTE: swift_conn at top of file about this.
possible_source.swift_conn = conn
except (Exception, Timeout):
self.exception_occurred(
node, server_type, _('Trying to %(method)s %(path)s') %
{'method': req.method, 'path': req.path})
continue
if self.is_good_source(possible_source):
# 404 if we know we don't have a synced copy
if not float(possible_source.getheader('X-PUT-Timestamp', 1)):
statuses.append(HTTP_NOT_FOUND)
reasons.append('')
bodies.append('')
source_headers.append('')
self.close_swift_conn(possible_source)
else:
statuses.append(possible_source.status)
reasons.append(possible_source.reason)
bodies.append('')
source_headers.append('')
sources.append((possible_source, node))
if not newest: # one good source is enough
break
else:
statuses.append(possible_source.status)
reasons.append(possible_source.reason)
bodies.append(possible_source.read())
source_headers.append(possible_source.getheaders())
if possible_source.status == HTTP_INSUFFICIENT_STORAGE:
self.error_limit(node, _('ERROR Insufficient Storage'))
elif is_server_error(possible_source.status):
self.error_occurred(node, _('ERROR %(status)d %(body)s '
'From %(type)s Server') %
{'status': possible_source.status,
'body': bodies[-1][:1024],
'type': server_type})
res = None
if sources:
sources.sort(key=lambda s: source_key(s[0]))
#get response has newest 'x-put-timestamp' or 'x-timestamp'
source, node = sources.pop()
for src, _junk in sources:
self.close_swift_conn(src)
res = Response(request=req)
if req.method == 'GET' and \
source.status in (HTTP_OK, HTTP_PARTIAL_CONTENT):
res.app_iter = self._make_app_iter(node, source)
# See NOTE: swift_conn at top of file about this.
res.swift_conn = source.swift_conn
res.status = source.status
update_headers(res, source.getheaders())
if not res.environ:
res.environ = {}
res.environ['swift_x_timestamp'] = \
source.getheader('x-timestamp')
res.accept_ranges = 'bytes'
res.content_length = source.getheader('Content-Length')
if source.getheader('Content-Type'):
res.charset = None
res.content_type = source.getheader('Content-Type')
if not res:
res = self.best_response(req, statuses, reasons, bodies,
'%s %s' % (server_type, req.method),
headers=source_headers)
try:
(account, container) = split_path(req.path_info, 1, 2)
_set_info_cache(self.app, req.environ, account, container, res)
except ValueError:
pass
try:
(account, container, obj) = split_path(req.path_info, 3, 3, True)
_set_object_info_cache(self.app, req.environ, account,
container, obj, res)
except ValueError:
pass
return res
def is_origin_allowed(self, cors_info, origin):
"""
Is the given Origin allowed to make requests to this resource
:param cors_info: the resource's CORS related metadata headers
:param origin: the origin making the request
:return: True or False
"""
allowed_origins = set()
if cors_info.get('allow_origin'):
allowed_origins.update(
[a.strip()
for a in cors_info['allow_origin'].split(' ')
if a.strip()])
if self.app.cors_allow_origin:
allowed_origins.update(self.app.cors_allow_origin)
return origin in allowed_origins or '*' in allowed_origins
@public
def OPTIONS(self, req):
"""
Base handler for OPTIONS requests
:param req: swob.Request object
:returns: swob.Response object
"""
# Prepare the default response
headers = {'Allow': ', '.join(self.allowed_methods)}
resp = Response(status=200, request=req, headers=headers)
# If this isn't a CORS pre-flight request then return now
req_origin_value = req.headers.get('Origin', None)
if not req_origin_value:
return resp
# This is a CORS preflight request so check it's allowed
try:
container_info = \
self.container_info(self.account_name,
self.container_name, req)
except AttributeError:
# This should only happen for requests to the Account. A future
# change could allow CORS requests to the Account level as well.
return resp
cors = container_info.get('cors', {})
# If the CORS origin isn't allowed return a 401
if not self.is_origin_allowed(cors, req_origin_value) or (
req.headers.get('Access-Control-Request-Method') not in
self.allowed_methods):
resp.status = HTTP_UNAUTHORIZED
return resp
# Allow all headers requested in the request. The CORS
# specification does leave the door open for this, as mentioned in
# http://www.w3.org/TR/cors/#resource-preflight-requests
# Note: Since the list of headers can be unbounded
# simply returning headers can be enough.
allow_headers = set()
if req.headers.get('Access-Control-Request-Headers'):
allow_headers.update(
list_from_csv(req.headers['Access-Control-Request-Headers']))
# Populate the response with the CORS preflight headers
headers['access-control-allow-origin'] = req_origin_value
if cors.get('max_age') is not None:
headers['access-control-max-age'] = cors.get('max_age')
headers['access-control-allow-methods'] = \
', '.join(self.allowed_methods)
if allow_headers:
headers['access-control-allow-headers'] = ', '.join(allow_headers)
resp.headers = headers
return resp
|
|
# -*- coding: utf-8 -*-
import copy
import datetime
from blist import sortedlist
from util import add_raw_postfix
from util import dt_to_ts
from util import EAException
from util import elastalert_logger
from util import elasticsearch_client
from util import format_index
from util import hashable
from util import lookup_es_key
from util import new_get_event_ts
from util import pretty_ts
from util import ts_now
from util import ts_to_dt
class RuleType(object):
""" The base class for a rule type.
The class must implement add_data and add any matches to self.matches.
:param rules: A rule configuration.
"""
required_options = frozenset()
def __init__(self, rules, args=None):
self.matches = []
self.rules = rules
self.occurrences = {}
self.rules['owner'] = self.rules.get('owner', '')
self.rules['priority'] = self.rules.get('priority', '2')
def add_data(self, data):
""" The function that the ElastAlert client calls with results from ES.
Data is a list of dictionaries, from Elasticsearch.
:param data: A list of events, each of which is a dictionary of terms.
"""
raise NotImplementedError()
def add_match(self, event):
""" This function is called on all matching events. Rules use it to add
extra information about the context of a match. Event is a dictionary
containing terms directly from Elasticsearch and alerts will report
all of the information.
:param event: The matching event, a dictionary of terms.
"""
# Convert datetime's back to timestamps
ts = self.rules.get('timestamp_field')
if ts in event:
event[ts] = dt_to_ts(event[ts])
self.matches.append(event)
def get_match_str(self, match):
""" Returns a string that gives more context about a match.
:param match: The matching event, a dictionary of terms.
:return: A user facing string describing the match.
"""
return ''
def garbage_collect(self, timestamp):
""" Gets called periodically to remove old data that is useless beyond given timestamp.
May also be used to compute things in the absence of new data.
:param timestamp: A timestamp indicating the rule has been run up to that point.
"""
pass
def add_count_data(self, counts):
""" Gets called when a rule has use_count_query set to True. Called to add data from querying to the rule.
:param counts: A dictionary mapping timestamps to hit counts.
"""
raise NotImplementedError()
def add_terms_data(self, terms):
""" Gets called when a rule has use_terms_query set to True.
:param terms: A list of buckets with a key, corresponding to query_key, and the count """
raise NotImplementedError()
class CompareRule(RuleType):
""" A base class for matching a specific term by passing it to a compare function """
required_options = frozenset(['compare_key'])
def compare(self, event):
""" An event is a match iff this returns true """
raise NotImplementedError()
def add_data(self, data):
# If compare returns true, add it as a match
for event in data:
if self.compare(event):
self.add_match(event)
class BlacklistRule(CompareRule):
""" A CompareRule where the compare function checks a given key against a blacklist """
required_options = frozenset(['compare_key', 'blacklist'])
def compare(self, event):
term = lookup_es_key(event, self.rules['compare_key'])
if term in self.rules['blacklist']:
return True
return False
class WhitelistRule(CompareRule):
""" A CompareRule where the compare function checks a given term against a whitelist """
required_options = frozenset(['compare_key', 'whitelist', 'ignore_null'])
def compare(self, event):
term = lookup_es_key(event, self.rules['compare_key'])
if term is None:
return not self.rules['ignore_null']
if term not in self.rules['whitelist']:
return True
return False
class ChangeRule(CompareRule):
""" A rule that will store values for a certain term and match if those values change """
required_options = frozenset(['query_key', 'compare_key', 'ignore_null'])
change_map = {}
occurrence_time = {}
def compare(self, event):
key = hashable(lookup_es_key(event, self.rules['query_key']))
val = lookup_es_key(event, self.rules['compare_key'])
if not val and self.rules['ignore_null']:
return False
changed = False
# If we have seen this key before, compare it to the new value
if key in self.occurrences:
changed = self.occurrences[key] != val
if changed:
self.change_map[key] = (self.occurrences[key], val)
# If using timeframe, only return true if the time delta is < timeframe
if key in self.occurrence_time:
changed = event[self.rules['timestamp_field']] - self.occurrence_time[key] <= self.rules['timeframe']
# Update the current value and time
self.occurrences[key] = val
if 'timeframe' in self.rules:
self.occurrence_time[key] = event[self.rules['timestamp_field']]
return changed
def add_match(self, match):
# TODO this is not technically correct
# if the term changes multiple times before an alert is sent
# this data will be overwritten with the most recent change
change = self.change_map.get(hashable(lookup_es_key(match, self.rules['query_key'])))
extra = {}
if change:
extra = {'old_value': change[0],
'new_value': change[1]}
super(ChangeRule, self).add_match(dict(match.items() + extra.items()))
class FrequencyRule(RuleType):
""" A rule that matches if num_events number of events occur within a timeframe """
required_options = frozenset(['num_events', 'timeframe'])
def __init__(self, *args):
super(FrequencyRule, self).__init__(*args)
self.ts_field = self.rules.get('timestamp_field', '@timestamp')
self.get_ts = new_get_event_ts(self.ts_field)
self.attach_related = self.rules.get('attach_related', False)
def add_count_data(self, data):
""" Add count data to the rule. Data should be of the form {ts: count}. """
if len(data) > 1:
raise EAException('add_count_data can only accept one count at a time')
(ts, count), = data.items()
event = ({self.ts_field: ts}, count)
self.occurrences.setdefault('all', EventWindow(self.rules['timeframe'], getTimestamp=self.get_ts)).append(event)
self.check_for_match('all')
def add_terms_data(self, terms):
for timestamp, buckets in terms.iteritems():
for bucket in buckets:
event = ({self.ts_field: timestamp,
self.rules['query_key']: bucket['key']}, bucket['doc_count'])
self.occurrences.setdefault(bucket['key'], EventWindow(self.rules['timeframe'], getTimestamp=self.get_ts)).append(event)
self.check_for_match(bucket['key'])
def add_data(self, data):
if 'query_key' in self.rules:
qk = self.rules['query_key']
else:
qk = None
for event in data:
if qk:
key = hashable(lookup_es_key(event, qk))
else:
# If no query_key, we use the key 'all' for all events
key = 'all'
# Store the timestamps of recent occurrences, per key
self.occurrences.setdefault(key, EventWindow(self.rules['timeframe'], getTimestamp=self.get_ts)).append((event, 1))
self.check_for_match(key, end=False)
# We call this multiple times with the 'end' parameter because subclasses
# may or may not want to check while only partial data has been added
if key in self.occurrences: # could have been emptied by previous check
self.check_for_match(key, end=True)
def check_for_match(self, key, end=False):
# Match if, after removing old events, we hit num_events.
# the 'end' parameter depends on whether this was called from the
# middle or end of an add_data call and is used in subclasses
if self.occurrences[key].count() >= self.rules['num_events']:
event = self.occurrences[key].data[-1][0]
if self.attach_related:
event['related_events'] = [data[0] for data in self.occurrences[key].data[:-1]]
self.add_match(event)
self.occurrences.pop(key)
def garbage_collect(self, timestamp):
""" Remove all occurrence data that is beyond the timeframe away """
stale_keys = []
for key, window in self.occurrences.iteritems():
if timestamp - lookup_es_key(window.data[-1][0], self.ts_field) > self.rules['timeframe']:
stale_keys.append(key)
map(self.occurrences.pop, stale_keys)
def get_match_str(self, match):
lt = self.rules.get('use_local_time')
match_ts = lookup_es_key(match, self.ts_field)
starttime = pretty_ts(dt_to_ts(ts_to_dt(match_ts) - self.rules['timeframe']), lt)
endtime = pretty_ts(match_ts, lt)
message = 'At least %d events occurred between %s and %s\n\n' % (self.rules['num_events'],
starttime,
endtime)
return message
class AnyRule(RuleType):
""" A rule that will match on any input data """
def add_data(self, data):
for datum in data:
self.add_match(datum)
class EventWindow(object):
""" A container for hold event counts for rules which need a chronological ordered event window. """
def __init__(self, timeframe, onRemoved=None, getTimestamp=new_get_event_ts('@timestamp')):
self.timeframe = timeframe
self.onRemoved = onRemoved
self.get_ts = getTimestamp
self.data = sortedlist(key=self.get_ts)
self.running_count = 0
def clear(self):
self.data = sortedlist(key=self.get_ts)
self.running_count = 0
def append(self, event):
""" Add an event to the window. Event should be of the form (dict, count).
This will also pop the oldest events and call onRemoved on them until the
window size is less than timeframe. """
self.data.add(event)
self.running_count += event[1]
while self.duration() >= self.timeframe:
oldest = self.data[0]
self.data.remove(oldest)
self.running_count -= oldest[1]
self.onRemoved and self.onRemoved(oldest)
def duration(self):
""" Get the size in timedelta of the window. """
if not self.data:
return datetime.timedelta(0)
return self.get_ts(self.data[-1]) - self.get_ts(self.data[0])
def count(self):
""" Count the number of events in the window. """
return self.running_count
def __iter__(self):
return iter(self.data)
def append_middle(self, event):
""" Attempt to place the event in the correct location in our deque.
Returns True if successful, otherwise False. """
rotation = 0
ts = self.get_ts(event)
# Append left if ts is earlier than first event
if self.get_ts(self.data[0]) > ts:
self.data.appendleft(event)
self.running_count += event[1]
return
# Rotate window until we can insert event
while self.get_ts(self.data[-1]) > ts:
self.data.rotate(1)
rotation += 1
if rotation == len(self.data):
# This should never happen
return
self.data.append(event)
self.running_count += event[1]
self.data.rotate(-rotation)
class SpikeRule(RuleType):
""" A rule that uses two sliding windows to compare relative event frequency. """
required_options = frozenset(['timeframe', 'spike_height', 'spike_type'])
def __init__(self, *args):
super(SpikeRule, self).__init__(*args)
self.timeframe = self.rules['timeframe']
self.ref_windows = {}
self.cur_windows = {}
self.ts_field = self.rules.get('timestamp_field', '@timestamp')
self.get_ts = new_get_event_ts(self.ts_field)
self.first_event = {}
self.skip_checks = {}
self.ref_window_filled_once = False
def add_count_data(self, data):
""" Add count data to the rule. Data should be of the form {ts: count}. """
if len(data) > 1:
raise EAException('add_count_data can only accept one count at a time')
for ts, count in data.iteritems():
self.handle_event({self.ts_field: ts}, count, 'all')
def add_terms_data(self, terms):
for timestamp, buckets in terms.iteritems():
for bucket in buckets:
count = bucket['doc_count']
event = {self.ts_field: timestamp,
self.rules['query_key']: bucket['key']}
key = bucket['key']
self.handle_event(event, count, key)
def add_data(self, data):
for event in data:
qk = self.rules.get('query_key', 'all')
if qk != 'all':
qk = hashable(lookup_es_key(event, qk))
if qk is None:
qk = 'other'
self.handle_event(event, 1, qk)
def clear_windows(self, qk, event):
# Reset the state and prevent alerts until windows filled again
self.cur_windows[qk].clear()
self.ref_windows[qk].clear()
self.first_event.pop(qk)
self.skip_checks[qk] = event[self.ts_field] + self.rules['timeframe'] * 2
def handle_event(self, event, count, qk='all'):
self.first_event.setdefault(qk, event)
self.ref_windows.setdefault(qk, EventWindow(self.timeframe, getTimestamp=self.get_ts))
self.cur_windows.setdefault(qk, EventWindow(self.timeframe, self.ref_windows[qk].append, self.get_ts))
self.cur_windows[qk].append((event, count))
# Don't alert if ref window has not yet been filled for this key AND
if event[self.ts_field] - self.first_event[qk][self.ts_field] < self.rules['timeframe'] * 2:
# ElastAlert has not been running long enough for any alerts OR
if not self.ref_window_filled_once:
return
# This rule is not using alert_on_new_data (with query_key) OR
if not (self.rules.get('query_key') and self.rules.get('alert_on_new_data')):
return
# An alert for this qk has recently fired
if qk in self.skip_checks and event[self.ts_field] < self.skip_checks[qk]:
return
else:
self.ref_window_filled_once = True
if self.find_matches(self.ref_windows[qk].count(), self.cur_windows[qk].count()):
# skip over placeholder events which have count=0
for match, count in self.cur_windows[qk].data:
if count:
break
self.add_match(match, qk)
self.clear_windows(qk, match)
def add_match(self, match, qk):
extra_info = {}
spike_count = self.cur_windows[qk].count()
reference_count = self.ref_windows[qk].count()
extra_info = {'spike_count': spike_count,
'reference_count': reference_count}
match = dict(match.items() + extra_info.items())
super(SpikeRule, self).add_match(match)
def find_matches(self, ref, cur):
""" Determines if an event spike or dip happening. """
# Apply threshold limits
if (cur < self.rules.get('threshold_cur', 0) or
ref < self.rules.get('threshold_ref', 0)):
return False
spike_up, spike_down = False, False
if cur <= ref / self.rules['spike_height']:
spike_down = True
if cur >= ref * self.rules['spike_height']:
spike_up = True
if (self.rules['spike_type'] in ['both', 'up'] and spike_up) or \
(self.rules['spike_type'] in ['both', 'down'] and spike_down):
return True
return False
def get_match_str(self, match):
message = 'An abnormal number (%d) of events occurred around %s.\n' % (match['spike_count'],
pretty_ts(match[self.rules['timestamp_field']], self.rules.get('use_local_time')))
message += 'Preceding that time, there were only %d events within %s\n\n' % (match['reference_count'], self.rules['timeframe'])
return message
def garbage_collect(self, ts):
# Windows are sized according to their newest event
# This is a placeholder to accurately size windows in the absence of events
for qk in self.cur_windows.keys():
# If we havn't seen this key in a long time, forget it
if qk != 'all' and self.ref_windows[qk].count() == 0 and self.cur_windows[qk].count() == 0:
self.cur_windows.pop(qk)
self.ref_windows.pop(qk)
continue
placeholder = {self.ts_field: ts}
# The placeholder may trigger an alert, in which case, qk will be expected
if qk != 'all':
placeholder.update({self.rules['query_key']: qk})
self.handle_event(placeholder, 0, qk)
class FlatlineRule(FrequencyRule):
""" A rule that matches when there is a low number of events given a timeframe. """
required_options = frozenset(['timeframe', 'threshold'])
def __init__(self, *args):
super(FlatlineRule, self).__init__(*args)
self.threshold = self.rules['threshold']
# Dictionary mapping query keys to the first events
self.first_event = {}
def check_for_match(self, key, end=True):
# This function gets called between every added document with end=True after the last
# We ignore the calls before the end because it may trigger false positives
if not end:
return
most_recent_ts = self.get_ts(self.occurrences[key].data[-1])
if self.first_event.get(key) is None:
self.first_event[key] = most_recent_ts
# Don't check for matches until timeframe has elapsed
if most_recent_ts - self.first_event[key] < self.rules['timeframe']:
return
# Match if, after removing old events, we hit num_events
count = self.occurrences[key].count()
if count < self.rules['threshold']:
# Do a deep-copy, otherwise we lose the datetime type in the timestamp field of the last event
event = copy.deepcopy(self.occurrences[key].data[-1][0])
event.update(key=key, count=count)
self.add_match(event)
# After adding this match, leave the occurrences windows alone since it will
# be pruned in the next add_data or garbage_collect, but reset the first_event
# so that alerts continue to fire until the threshold is passed again.
least_recent_ts = self.get_ts(self.occurrences[key].data[0])
timeframe_ago = most_recent_ts - self.rules['timeframe']
self.first_event[key] = min(least_recent_ts, timeframe_ago)
def get_match_str(self, match):
ts = match[self.rules['timestamp_field']]
lt = self.rules.get('use_local_time')
message = 'An abnormally low number of events occurred around %s.\n' % (pretty_ts(ts, lt))
message += 'Between %s and %s, there were less than %s events.\n\n' % (pretty_ts(dt_to_ts(ts_to_dt(ts) - self.rules['timeframe']), lt),
pretty_ts(ts, lt),
self.rules['threshold'])
return message
def garbage_collect(self, ts):
# We add an event with a count of zero to the EventWindow for each key. This will cause the EventWindow
# to remove events that occurred more than one `timeframe` ago, and call onRemoved on them.
default = ['all'] if 'query_key' not in self.rules else []
for key in self.occurrences.keys() or default:
self.occurrences.setdefault(key, EventWindow(self.rules['timeframe'], getTimestamp=self.get_ts)).append(({self.ts_field: ts}, 0))
self.first_event.setdefault(key, ts)
self.check_for_match(key)
class NewTermsRule(RuleType):
""" Alerts on a new value in a list of fields. """
def __init__(self, rule, args=None):
super(NewTermsRule, self).__init__(rule, args)
self.seen_values = {}
# Allow the use of query_key or fields
if 'fields' not in self.rules:
if 'query_key' not in self.rules:
raise EAException("fields or query_key must be specified")
self.fields = self.rules['query_key']
else:
self.fields = self.rules['fields']
if not self.fields:
raise EAException("fields must not be an empty list")
if type(self.fields) != list:
self.fields = [self.fields]
if self.rules.get('use_terms_query') and (
len(self.fields) != 1 or len(self.fields) == 1 and type(self.fields[0]) == list
):
raise EAException("use_terms_query can only be used with a single non-composite field")
try:
self.get_all_terms(args)
except Exception as e:
# Refuse to start if we cannot get existing terms
raise EAException('Error searching for existing terms: %s' % (repr(e)))
def get_all_terms(self, args):
""" Performs a terms aggregation for each field to get every existing term. """
self.es = elasticsearch_client(self.rules)
window_size = datetime.timedelta(**self.rules.get('terms_window_size', {'days': 30}))
field_name = {"field": "", "size": 2147483647} # Integer.MAX_VALUE
query_template = {"aggs": {"values": {"terms": field_name}}}
if args and args.start:
end = ts_to_dt(args.start)
else:
end = ts_now()
start = end - window_size
step = datetime.timedelta(**self.rules.get('window_step_size', {'days': 1}))
for field in self.fields:
tmp_start = start
tmp_end = min(start + step, end)
time_filter = {self.rules['timestamp_field']: {'lt': dt_to_ts(tmp_end), 'gte': dt_to_ts(tmp_start)}}
query_template['filter'] = {'bool': {'must': [{'range': time_filter}]}}
query = {'aggs': {'filtered': query_template}}
# For composite keys, we will need to perform sub-aggregations
if type(field) == list:
self.seen_values.setdefault(tuple(field), [])
level = query_template['aggs']
# Iterate on each part of the composite key and add a sub aggs clause to the elastic search query
for i, sub_field in enumerate(field):
level['values']['terms']['field'] = add_raw_postfix(sub_field)
if i < len(field) - 1:
# If we have more fields after the current one, then set up the next nested structure
level['values']['aggs'] = {'values': {'terms': copy.deepcopy(field_name)}}
level = level['values']['aggs']
else:
self.seen_values.setdefault(field, [])
# For non-composite keys, only a single agg is needed
field_name['field'] = add_raw_postfix(field)
# Query the entire time range in small chunks
while tmp_start < end:
if self.rules.get('use_strftime_index'):
index = format_index(self.rules['index'], tmp_start, tmp_end)
else:
index = self.rules['index']
res = self.es.search(body=query, index=index, ignore_unavailable=True, timeout='50s')
if 'aggregations' in res:
buckets = res['aggregations']['filtered']['values']['buckets']
if type(field) == list:
# For composite keys, make the lookup based on all fields
# Make it a tuple since it can be hashed and used in dictionary lookups
for bucket in buckets:
# We need to walk down the hierarchy and obtain the value at each level
self.seen_values[tuple(field)] += self.flatten_aggregation_hierarchy(bucket)
else:
keys = [bucket['key'] for bucket in buckets]
self.seen_values[field] += keys
else:
self.seen_values.setdefault(field, [])
if tmp_start == tmp_end:
break
tmp_start = tmp_end
tmp_end = min(tmp_start + step, end)
time_filter[self.rules['timestamp_field']] = {'lt': dt_to_ts(tmp_end), 'gte': dt_to_ts(tmp_start)}
for key, values in self.seen_values.iteritems():
if not values:
if type(key) == tuple:
# If we don't have any results, it could either be because of the absence of any baseline data
# OR it may be because the composite key contained a non-primitive type. Either way, give the
# end-users a heads up to help them debug what might be going on.
elastalert_logger.warning((
'No results were found from all sub-aggregations. This can either indicate that there is '
'no baseline data OR that a non-primitive field was used in a composite key.'
))
else:
elastalert_logger.info('Found no values for %s' % (field))
continue
self.seen_values[key] = list(set(values))
elastalert_logger.info('Found %s unique values for %s' % (len(values), key))
def flatten_aggregation_hierarchy(self, root, hierarchy_tuple=()):
""" For nested aggregations, the results come back in the following format:
{
"aggregations" : {
"filtered" : {
"doc_count" : 37,
"values" : {
"doc_count_error_upper_bound" : 0,
"sum_other_doc_count" : 0,
"buckets" : [ {
"key" : "1.1.1.1", # IP address (root)
"doc_count" : 13,
"values" : {
"doc_count_error_upper_bound" : 0,
"sum_other_doc_count" : 0,
"buckets" : [ {
"key" : "80", # Port (sub-aggregation)
"doc_count" : 3,
"values" : {
"doc_count_error_upper_bound" : 0,
"sum_other_doc_count" : 0,
"buckets" : [ {
"key" : "ack", # Reason (sub-aggregation, leaf-node)
"doc_count" : 3
}, {
"key" : "syn", # Reason (sub-aggregation, leaf-node)
"doc_count" : 1
} ]
}
}, {
"key" : "82", # Port (sub-aggregation)
"doc_count" : 3,
"values" : {
"doc_count_error_upper_bound" : 0,
"sum_other_doc_count" : 0,
"buckets" : [ {
"key" : "ack", # Reason (sub-aggregation, leaf-node)
"doc_count" : 3
}, {
"key" : "syn", # Reason (sub-aggregation, leaf-node)
"doc_count" : 3
} ]
}
} ]
}
}, {
"key" : "2.2.2.2", # IP address (root)
"doc_count" : 4,
"values" : {
"doc_count_error_upper_bound" : 0,
"sum_other_doc_count" : 0,
"buckets" : [ {
"key" : "443", # Port (sub-aggregation)
"doc_count" : 3,
"values" : {
"doc_count_error_upper_bound" : 0,
"sum_other_doc_count" : 0,
"buckets" : [ {
"key" : "ack", # Reason (sub-aggregation, leaf-node)
"doc_count" : 3
}, {
"key" : "syn", # Reason (sub-aggregation, leaf-node)
"doc_count" : 3
} ]
}
} ]
}
} ]
}
}
}
}
Each level will either have more values and buckets, or it will be a leaf node
We'll ultimately return a flattened list with the hierarchies appended as strings,
e.g the above snippet would yield a list with:
[
('1.1.1.1', '80', 'ack'),
('1.1.1.1', '80', 'syn'),
('1.1.1.1', '82', 'ack'),
('1.1.1.1', '82', 'syn'),
('2.2.2.2', '443', 'ack'),
('2.2.2.2', '443', 'syn')
]
A similar formatting will be performed in the add_data method and used as the basis for comparison
"""
results = []
# There are more aggregation hierarchies left. Traverse them.
if 'values' in root:
results += self.flatten_aggregation_hierarchy(root['values']['buckets'], hierarchy_tuple + (root['key'],))
else:
# We've gotten to a sub-aggregation, which may have further sub-aggregations
# See if we need to traverse further
for node in root:
if 'values' in node:
results += self.flatten_aggregation_hierarchy(node, hierarchy_tuple)
else:
results.append(hierarchy_tuple + (node['key'],))
return results
def add_data(self, data):
for document in data:
for field in self.fields:
value = ()
lookup_field = field
if type(field) == list:
# For composite keys, make the lookup based on all fields
# Make it a tuple since it can be hashed and used in dictionary lookups
lookup_field = tuple(field)
for sub_field in field:
lookup_result = lookup_es_key(document, sub_field)
if not lookup_result:
value = None
break
value += (lookup_result,)
else:
value = lookup_es_key(document, field)
if not value and self.rules.get('alert_on_missing_field'):
document['missing_field'] = lookup_field
self.add_match(copy.deepcopy(document))
elif value:
if value not in self.seen_values[lookup_field]:
document['new_field'] = lookup_field
self.add_match(copy.deepcopy(document))
self.seen_values[lookup_field].append(value)
def add_terms_data(self, terms):
# With terms query, len(self.fields) is always 1 and the 0'th entry is always a string
field = self.fields[0]
for timestamp, buckets in terms.iteritems():
for bucket in buckets:
if bucket['doc_count']:
if bucket['key'] not in self.seen_values[field]:
match = {field: bucket['key'],
self.rules['timestamp_field']: timestamp,
'new_field': field}
self.add_match(match)
self.seen_values[field].append(bucket['key'])
class CardinalityRule(RuleType):
""" A rule that matches if cardinality of a field is above or below a threshold within a timeframe """
required_options = frozenset(['timeframe', 'cardinality_field'])
def __init__(self, *args):
super(CardinalityRule, self).__init__(*args)
if 'max_cardinality' not in self.rules and 'min_cardinality' not in self.rules:
raise EAException("CardinalityRule must have one of either max_cardinality or min_cardinality")
self.ts_field = self.rules.get('timestamp_field', '@timestamp')
self.cardinality_field = self.rules['cardinality_field']
self.cardinality_cache = {}
self.first_event = {}
self.timeframe = self.rules['timeframe']
def add_data(self, data):
qk = self.rules.get('query_key')
for event in data:
if qk:
key = hashable(lookup_es_key(event, qk))
else:
# If no query_key, we use the key 'all' for all events
key = 'all'
self.cardinality_cache.setdefault(key, {})
self.first_event.setdefault(key, event[self.ts_field])
value = hashable(lookup_es_key(event, self.cardinality_field))
if value is not None:
# Store this timestamp as most recent occurence of the term
self.cardinality_cache[key][value] = event[self.ts_field]
self.check_for_match(key, event)
def check_for_match(self, key, event, gc=True):
# Check to see if we are past max/min_cardinality for a given key
timeframe_elapsed = event[self.ts_field] - self.first_event.get(key, event[self.ts_field]) > self.timeframe
if (len(self.cardinality_cache[key]) > self.rules.get('max_cardinality', float('inf')) or
(len(self.cardinality_cache[key]) < self.rules.get('min_cardinality', float('-inf')) and timeframe_elapsed)):
# If there might be a match, run garbage collect first, as outdated terms are only removed in GC
# Only run it if there might be a match so it doesn't impact performance
if gc:
self.garbage_collect(event[self.ts_field])
self.check_for_match(key, event, False)
else:
self.first_event.pop(key, None)
self.add_match(event)
def garbage_collect(self, timestamp):
""" Remove all occurrence data that is beyond the timeframe away """
for qk, terms in self.cardinality_cache.items():
for term, last_occurence in terms.items():
if timestamp - last_occurence > self.rules['timeframe']:
self.cardinality_cache[qk].pop(term)
# Create a placeholder event for if a min_cardinality match occured
if 'min_cardinality' in self.rules:
event = {self.ts_field: timestamp}
if 'query_key' in self.rules:
event.update({self.rules['query_key']: qk})
self.check_for_match(qk, event, False)
def get_match_str(self, match):
lt = self.rules.get('use_local_time')
starttime = pretty_ts(dt_to_ts(ts_to_dt(match[self.ts_field]) - self.rules['timeframe']), lt)
endtime = pretty_ts(match[self.ts_field], lt)
if 'max_cardinality' in self.rules:
message = ('A maximum of %d unique %s(s) occurred since last alert or between %s and %s\n\n' % (self.rules['max_cardinality'],
self.rules['cardinality_field'],
starttime, endtime))
else:
message = ('Less than %d unique %s(s) occurred since last alert or between %s and %s\n\n' % (self.rules['min_cardinality'],
self.rules['cardinality_field'],
starttime, endtime))
return message
|
|
#
# SimpleXMLWriter
# $Id: SimpleXMLWriter.py 2312 2005-03-02 18:13:39Z fredrik $
#
# a simple XML writer
#
# history:
# 2001-12-28 fl created
# 2002-11-25 fl fixed attribute encoding
# 2002-12-02 fl minor fixes for 1.5.2
# 2004-06-17 fl added pythondoc markup
# 2004-07-23 fl added flush method (from Jay Graves)
# 2004-10-03 fl added declaration method
#
# Copyright (c) 2001-2004 by Fredrik Lundh
#
# fredrik@pythonware.com
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The SimpleXMLWriter module is
#
# Copyright (c) 2001-2004 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
##
# Tools to write XML files, without having to deal with encoding
# issues, well-formedness, etc.
# <p>
# The current version does not provide built-in support for
# namespaces. To create files using namespaces, you have to provide
# "xmlns" attributes and explicitly add prefixes to tags and
# attributes.
#
# <h3>Patterns</h3>
#
# The following example generates a small XHTML document.
# <pre>
#
# from elementtree.SimpleXMLWriter import XMLWriter
# import sys
#
# w = XMLWriter(sys.stdout)
#
# html = w.start("html")
#
# w.start("head")
# w.element("title", "my document")
# w.element("meta", name="generator", value="my application 1.0")
# w.end()
#
# w.start("body")
# w.element("h1", "this is a heading")
# w.element("p", "this is a paragraph")
#
# w.start("p")
# w.data("this is ")
# w.element("b", "bold")
# w.data(" and ")
# w.element("i", "italic")
# w.data(".")
# w.end("p")
#
# w.close(html)
# </pre>
##
import re, sys, string
try:
unicode("")
except NameError:
def encode(s, encoding):
# 1.5.2: application must use the right encoding
return s
_escape = re.compile(r"[&<>\"\x80-\xff]+") # 1.5.2
else:
def encode(s, encoding):
return s.encode(encoding)
_escape = re.compile(eval(r'u"[&<>\"\u0080-\uffff]+"'))
def encode_entity(text, pattern=_escape):
# map reserved and non-ascii characters to numerical entities
def escape_entities(m):
out = []
for char in m.group():
out.append("&#%d;" % ord(char))
return string.join(out, "")
return encode(pattern.sub(escape_entities, text), "ascii")
del _escape
#
# the following functions assume an ascii-compatible encoding
# (or "utf-16")
def escape_cdata(s, encoding=None, replace=string.replace):
s = replace(s, "&", "&")
s = replace(s, "<", "<")
s = replace(s, ">", ">")
if encoding:
try:
return encode(s, encoding)
except UnicodeError:
return encode_entity(s)
return s
def escape_attrib(s, encoding=None, replace=string.replace):
s = replace(s, "&", "&")
s = replace(s, "'", "'")
s = replace(s, "\"", """)
s = replace(s, "<", "<")
s = replace(s, ">", ">")
if encoding:
try:
return encode(s, encoding)
except UnicodeError:
return encode_entity(s)
return s
##
# XML writer class.
#
# @param file A file or file-like object. This object must implement
# a <b>write</b> method that takes an 8-bit string.
# @param encoding Optional encoding.
class XMLWriter:
def __init__(self, file, encoding="us-ascii"):
if not hasattr(file, "write"):
file = open(file, "w")
self.__write = file.write
if hasattr(file, "flush"):
self.flush = file.flush
self.__open = 0 # true if start tag is open
self.__tags = []
self.__data = []
self.__encoding = encoding
def __flush(self):
# flush internal buffers
if self.__open:
self.__write(">")
self.__open = 0
if self.__data:
data = string.join(self.__data, "")
self.__write(escape_cdata(data, self.__encoding))
self.__data = []
##
# Writes an XML declaration.
def declaration(self):
encoding = self.__encoding
if encoding == "us-ascii" or encoding == "utf-8":
self.__write("<?xml version='1.0'?>\n")
else:
self.__write("<?xml version='1.0' encoding='%s'?>\n" % encoding)
##
# Opens a new element. Attributes can be given as keyword
# arguments, or as a string/string dictionary. You can pass in
# 8-bit strings or Unicode strings; the former are assumed to use
# the encoding passed to the constructor. The method returns an
# opaque identifier that can be passed to the <b>close</b> method,
# to close all open elements up to and including this one.
#
# @param tag Element tag.
# @param attrib Attribute dictionary. Alternatively, attributes
# can be given as keyword arguments.
# @return An element identifier.
def start(self, tag, attrib={}, **extra):
self.__flush()
tag = escape_cdata(tag, self.__encoding)
self.__data = []
self.__tags.append(tag)
self.__write("<%s" % tag)
if attrib or extra:
attrib = attrib.copy()
attrib.update(extra)
attrib = attrib.items()
attrib.sort()
for k, v in attrib:
k = escape_cdata(k, self.__encoding)
v = escape_attrib(v, self.__encoding)
self.__write(" %s=\"%s\"" % (k, v))
self.__open = 1
return len(self.__tags)-1
##
# Adds a comment to the output stream.
#
# @param comment Comment text, as an 8-bit string or Unicode string.
def comment(self, comment):
self.__flush()
self.__write("<!-- %s -->\n" % escape_cdata(comment, self.__encoding))
##
# Adds character data to the output stream.
#
# @param text Character data, as an 8-bit string or Unicode string.
def data(self, text):
self.__data.append(text)
##
# Adds unparsed character data to the output stream.
#
# @param text Character data, as an 8-bit string or Unicode string.
def cdata(self, text):
try:
text = encode(text, self.__encoding)
except UnicodeError:
text = encode_entity(text)
self.__flush()
self.__write('<![CDATA[%s]]>' % text)
##
# Closes the current element (opened by the most recent call to
# <b>start</b>).
#
# @param tag Element tag. If given, the tag must match the start
# tag. If omitted, the current element is closed.
def end(self, tag=None):
if tag:
assert self.__tags, "unbalanced end(%s)" % tag
assert escape_cdata(tag, self.__encoding) == self.__tags[-1],\
"expected end(%s), got %s" % (self.__tags[-1], tag)
else:
assert self.__tags, "unbalanced end()"
tag = self.__tags.pop()
if self.__data:
self.__flush()
elif self.__open:
self.__open = 0
self.__write(" />")
return
self.__write("</%s>" % tag)
##
# Closes open elements, up to (and including) the element identified
# by the given identifier.
#
# @param id Element identifier, as returned by the <b>start</b> method.
def close(self, id):
while len(self.__tags) > id:
self.end()
##
# Adds an entire element. This is the same as calling <b>start</b>,
# <b>data</b>, and <b>end</b> in sequence. The <b>text</b> argument
# can be omitted.
def element(self, tag, text=None, attrib={}, **extra):
apply(self.start, (tag, attrib), extra)
if text:
self.data(text)
self.end()
##
# Flushes the output stream.
def flush(self):
pass # replaced by the constructor
|
|
"""Storage providers backends for Memory caching."""
import re
import os
import os.path
import datetime
import json
import shutil
import warnings
import collections
import operator
import threading
from abc import ABCMeta, abstractmethod
from .backports import concurrency_safe_rename
from .disk import mkdirp, memstr_to_bytes, rm_subdirs
from . import numpy_pickle
CacheItemInfo = collections.namedtuple('CacheItemInfo',
'path size last_access')
def concurrency_safe_write(object_to_write, filename, write_func):
"""Writes an object into a unique file in a concurrency-safe way."""
thread_id = id(threading.current_thread())
temporary_filename = '{}.thread-{}-pid-{}'.format(
filename, thread_id, os.getpid())
write_func(object_to_write, temporary_filename)
return temporary_filename
class StoreBackendBase(metaclass=ABCMeta):
"""Helper Abstract Base Class which defines all methods that
a StorageBackend must implement."""
location = None
@abstractmethod
def _open_item(self, f, mode):
"""Opens an item on the store and return a file-like object.
This method is private and only used by the StoreBackendMixin object.
Parameters
----------
f: a file-like object
The file-like object where an item is stored and retrieved
mode: string, optional
the mode in which the file-like object is opened allowed valued are
'rb', 'wb'
Returns
-------
a file-like object
"""
@abstractmethod
def _item_exists(self, location):
"""Checks if an item location exists in the store.
This method is private and only used by the StoreBackendMixin object.
Parameters
----------
location: string
The location of an item. On a filesystem, this corresponds to the
absolute path, including the filename, of a file.
Returns
-------
True if the item exists, False otherwise
"""
@abstractmethod
def _move_item(self, src, dst):
"""Moves an item from src to dst in the store.
This method is private and only used by the StoreBackendMixin object.
Parameters
----------
src: string
The source location of an item
dst: string
The destination location of an item
"""
@abstractmethod
def create_location(self, location):
"""Creates a location on the store.
Parameters
----------
location: string
The location in the store. On a filesystem, this corresponds to a
directory.
"""
@abstractmethod
def clear_location(self, location):
"""Clears a location on the store.
Parameters
----------
location: string
The location in the store. On a filesystem, this corresponds to a
directory or a filename absolute path
"""
@abstractmethod
def get_items(self):
"""Returns the whole list of items available in the store.
Returns
-------
The list of items identified by their ids (e.g filename in a
filesystem).
"""
@abstractmethod
def configure(self, location, verbose=0, backend_options=dict()):
"""Configures the store.
Parameters
----------
location: string
The base location used by the store. On a filesystem, this
corresponds to a directory.
verbose: int
The level of verbosity of the store
backend_options: dict
Contains a dictionary of named parameters used to configure the
store backend.
"""
class StoreBackendMixin(object):
"""Class providing all logic for managing the store in a generic way.
The StoreBackend subclass has to implement 3 methods: create_location,
clear_location and configure. The StoreBackend also has to provide
a private _open_item, _item_exists and _move_item methods. The _open_item
method has to have the same signature as the builtin open and return a
file-like object.
"""
def load_item(self, path, verbose=1, msg=None):
"""Load an item from the store given its path as a list of
strings."""
full_path = os.path.join(self.location, *path)
if verbose > 1:
if verbose < 10:
print('{0}...'.format(msg))
else:
print('{0} from {1}'.format(msg, full_path))
mmap_mode = (None if not hasattr(self, 'mmap_mode')
else self.mmap_mode)
filename = os.path.join(full_path, 'output.pkl')
if not self._item_exists(filename):
raise KeyError("Non-existing item (may have been "
"cleared).\nFile %s does not exist" % filename)
# file-like object cannot be used when mmap_mode is set
if mmap_mode is None:
with self._open_item(filename, "rb") as f:
item = numpy_pickle.load(f)
else:
item = numpy_pickle.load(filename, mmap_mode=mmap_mode)
return item
def dump_item(self, path, item, verbose=1):
"""Dump an item in the store at the path given as a list of
strings."""
try:
item_path = os.path.join(self.location, *path)
if not self._item_exists(item_path):
self.create_location(item_path)
filename = os.path.join(item_path, 'output.pkl')
if verbose > 10:
print('Persisting in %s' % item_path)
def write_func(to_write, dest_filename):
with self._open_item(dest_filename, "wb") as f:
numpy_pickle.dump(to_write, f,
compress=self.compress)
self._concurrency_safe_write(item, filename, write_func)
except: # noqa: E722
" Race condition in the creation of the directory "
def clear_item(self, path):
"""Clear the item at the path, given as a list of strings."""
item_path = os.path.join(self.location, *path)
if self._item_exists(item_path):
self.clear_location(item_path)
def contains_item(self, path):
"""Check if there is an item at the path, given as a list of
strings"""
item_path = os.path.join(self.location, *path)
filename = os.path.join(item_path, 'output.pkl')
return self._item_exists(filename)
def get_item_info(self, path):
"""Return information about item."""
return {'location': os.path.join(self.location,
*path)}
def get_metadata(self, path):
"""Return actual metadata of an item."""
try:
item_path = os.path.join(self.location, *path)
filename = os.path.join(item_path, 'metadata.json')
with self._open_item(filename, 'rb') as f:
return json.loads(f.read().decode('utf-8'))
except: # noqa: E722
return {}
def store_metadata(self, path, metadata):
"""Store metadata of a computation."""
try:
item_path = os.path.join(self.location, *path)
self.create_location(item_path)
filename = os.path.join(item_path, 'metadata.json')
def write_func(to_write, dest_filename):
with self._open_item(dest_filename, "wb") as f:
f.write(json.dumps(to_write).encode('utf-8'))
self._concurrency_safe_write(metadata, filename, write_func)
except: # noqa: E722
pass
def contains_path(self, path):
"""Check cached function is available in store."""
func_path = os.path.join(self.location, *path)
return self.object_exists(func_path)
def clear_path(self, path):
"""Clear all items with a common path in the store."""
func_path = os.path.join(self.location, *path)
if self._item_exists(func_path):
self.clear_location(func_path)
def store_cached_func_code(self, path, func_code=None):
"""Store the code of the cached function."""
func_path = os.path.join(self.location, *path)
if not self._item_exists(func_path):
self.create_location(func_path)
if func_code is not None:
filename = os.path.join(func_path, "func_code.py")
with self._open_item(filename, 'wb') as f:
f.write(func_code.encode('utf-8'))
def get_cached_func_code(self, path):
"""Store the code of the cached function."""
path += ['func_code.py', ]
filename = os.path.join(self.location, *path)
try:
with self._open_item(filename, 'rb') as f:
return f.read().decode('utf-8')
except: # noqa: E722
raise
def get_cached_func_info(self, path):
"""Return information related to the cached function if it exists."""
return {'location': os.path.join(self.location, *path)}
def clear(self):
"""Clear the whole store content."""
self.clear_location(self.location)
def reduce_store_size(self, bytes_limit):
"""Reduce store size to keep it under the given bytes limit."""
items_to_delete = self._get_items_to_delete(bytes_limit)
for item in items_to_delete:
if self.verbose > 10:
print('Deleting item {0}'.format(item))
try:
self.clear_location(item.path)
except OSError:
# Even with ignore_errors=True shutil.rmtree can raise OSError
# with:
# [Errno 116] Stale file handle if another process has deleted
# the folder already.
pass
def _get_items_to_delete(self, bytes_limit):
"""Get items to delete to keep the store under a size limit."""
if isinstance(bytes_limit, str):
bytes_limit = memstr_to_bytes(bytes_limit)
items = self.get_items()
size = sum(item.size for item in items)
to_delete_size = size - bytes_limit
if to_delete_size < 0:
return []
# We want to delete first the cache items that were accessed a
# long time ago
items.sort(key=operator.attrgetter('last_access'))
items_to_delete = []
size_so_far = 0
for item in items:
if size_so_far > to_delete_size:
break
items_to_delete.append(item)
size_so_far += item.size
return items_to_delete
def _concurrency_safe_write(self, to_write, filename, write_func):
"""Writes an object into a file in a concurrency-safe way."""
temporary_filename = concurrency_safe_write(to_write,
filename, write_func)
self._move_item(temporary_filename, filename)
def __repr__(self):
"""Printable representation of the store location."""
return '{class_name}(location="{location}")'.format(
class_name=self.__class__.__name__, location=self.location)
class FileSystemStoreBackend(StoreBackendBase, StoreBackendMixin):
"""A StoreBackend used with local or network file systems."""
_open_item = staticmethod(open)
_item_exists = staticmethod(os.path.exists)
_move_item = staticmethod(concurrency_safe_rename)
def clear_location(self, location):
"""Delete location on store."""
if (location == self.location):
rm_subdirs(location)
else:
shutil.rmtree(location, ignore_errors=True)
def create_location(self, location):
"""Create object location on store"""
mkdirp(location)
def get_items(self):
"""Returns the whole list of items available in the store."""
items = []
for dirpath, _, filenames in os.walk(self.location):
is_cache_hash_dir = re.match('[a-f0-9]{32}',
os.path.basename(dirpath))
if is_cache_hash_dir:
output_filename = os.path.join(dirpath, 'output.pkl')
try:
last_access = os.path.getatime(output_filename)
except OSError:
try:
last_access = os.path.getatime(dirpath)
except OSError:
# The directory has already been deleted
continue
last_access = datetime.datetime.fromtimestamp(last_access)
try:
full_filenames = [os.path.join(dirpath, fn)
for fn in filenames]
dirsize = sum(os.path.getsize(fn)
for fn in full_filenames)
except OSError:
# Either output_filename or one of the files in
# dirpath does not exist any more. We assume this
# directory is being cleaned by another process already
continue
items.append(CacheItemInfo(dirpath, dirsize,
last_access))
return items
def configure(self, location, verbose=1, backend_options=None):
"""Configure the store backend.
For this backend, valid store options are 'compress' and 'mmap_mode'
"""
if backend_options is None:
backend_options = {}
# setup location directory
self.location = location
if not os.path.exists(self.location):
mkdirp(self.location)
# item can be stored compressed for faster I/O
self.compress = backend_options.get('compress', False)
# FileSystemStoreBackend can be used with mmap_mode options under
# certain conditions.
mmap_mode = backend_options.get('mmap_mode')
if self.compress and mmap_mode is not None:
warnings.warn('Compressed items cannot be memmapped in a '
'filesystem store. Option will be ignored.',
stacklevel=2)
self.mmap_mode = mmap_mode
self.verbose = verbose
|
|
#!/usr/bin/env python
#
# Copyright 2015 British Broadcasting Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""\
Command line parameter parsing classes for the example applications.
BaseCmdLineParser contains common arguments to all examples, and sets up a 3
step framework for parsing: init, setupArguments() and parseArguments().
TVTesterCmdLineParser subclasses BaseCmdLineParser adding arguments specific
to exampleTVTester.py
CsaTesterCmdLineParser subclasses BaseCmdLineParser adding arguments specific
to exampleCsaTester.py
"""
import re
import sys
import argparse
import json
import arduino
import dvbcss.util
def ToleranceOrNone(value):
"""\
:param value: None, or a string containing a float that is >= 0 representing tolerance in milliseconds
:returns: None, or tolerance in units of seconds.
"""
if value is None:
return None
else:
if re.match(r"^[0-9]+(?:\.[0-9]+)?", value):
return float(value)/1000.0
class BaseCmdLineParser(object):
"""\
Usage:
1. initialise
2. call setupArguments()
3. call parseArguments()
Parsed arguments will be in the `args` attribute
Subclass to add more arguments:
* initialisation puts an argparse.ArgumentParser() into self.parser
* override setupArguments() to add more arguments - before and/or after
calling the superclass implementation of setupArguments() to determine
the order.
* override parseArguments() to add additional parsing steps. Call the
superclass implementaiton of parseArguments() first.
"""
def __init__(self, desc):
super(BaseCmdLineParser,self).__init__()
self.parser = argparse.ArgumentParser(description=desc)
# setup some defaults
self.PPM=500
# if no time specified, we'll calculate time based on number of pins
self.MEASURE_SECS = -1
self.TOLERANCE = None
def setupArguments(self):
"""\
Setup the arguments used by the command line parser.
Must be called once (and only once) before parsing.
"""
self.parser.add_argument("timelineSelector", type=str, help="The timelineSelector for the timeline to be used (e.g. \"urn:dvb:css:timeline:pts\" for PTS).")
self.parser.add_argument("unitsPerTick", type=int, help="The denominator for the timeline tickrate (e.g. 1 for most timelines, such as PTS).")
self.parser.add_argument("unitsPerSec", type=int, help="The numerator for the timeline tickrate (e.g. 90000 for PTS).")
self.parser.add_argument("videoStartTicks", type=int, help="The timeline tick value corresponding to when the first frame of the test video sequence is expected to be shown.")
self.parser.add_argument("--measureSecs", dest="measureSecs", type=int, nargs=1, help="Duration of measurement period (default is max time possible given number of pins to sample", default=[self.MEASURE_SECS])
self.parser.add_argument("--light0", dest="light0_metadatafile", type=str, nargs=1, help="Measure light sensor input 0 and compare to expected flash timings in the named JSON metadata file.")
self.parser.add_argument("--light1", dest="light1_metadatafile", type=str, nargs=1, help="Measure light sensor input 1 and compare to expected flash timings in the named JSON metadata file.")
self.parser.add_argument("--audio0", dest="audio0_metadatafile", type=str, nargs=1, help="Measure audio input 0 and compare to expected beep timings in the named JSON metadata file.")
self.parser.add_argument("--audio1", dest="audio1_metadatafile", type=str, nargs=1, help="Measure audio input 1 and compare to expected beep timings in the named JSON metadata file.")
self.parser.add_argument("--mfe", \
"--maxfreqerror", dest="maxFreqError", type=int, action="store",default=self.PPM,help="Set the maximum frequency error for the local wall clock in ppm (default="+str(self.PPM)+")")
self.parser.add_argument("--toleranceTest",dest="toleranceSecs",type=ToleranceOrNone, action="store", nargs=1,help="Do a pass/fail test on whether sync is accurate to within this specified tolerance, in milliseconds. Test is not performed if this is not specified.",default=[self.TOLERANCE])
def parseArguments(self, args=None):
"""\
Parse and process arguments.
:param args: The arguments to process as a list of strings. If not provided, defaults to processing sys.argv
"""
if args is None:
self.args = self.parser.parse_args()
else:
self.args = self.parser.parse_args(args)
self.args.timelineClockFrequency = float(self.args.unitsPerSec) / self.args.unitsPerTick
# dictionary that maps from pin name to json metadata file
self.pinMetadataFilenames = {
"LIGHT_0" : self.args.light0_metadatafile,
"LIGHT_1" : self.args.light1_metadatafile,
"AUDIO_0" : self.args.audio0_metadatafile,
"AUDIO_1" : self.args.audio1_metadatafile
}
# load in the expected times for each pin being sampled, and also build a list of which pins are being sampled
self.pinExpectedTimes, self.pinEventDurations = _loadExpectedTimeMetadata(self.pinMetadataFilenames)
self.pinsToMeasure = self.pinExpectedTimes.keys()
if len(self.pinsToMeasure) == 0:
sys.stderr.write("\nAborting. No light sensor or audio inputs have been specified.\n\n")
sys.exit(1)
# see if the requested time for measuring can be accomodated by the system
self.measurerTime = arduino.checkCaptureTimeAchievable(self.args.measureSecs[0], len(self.pinsToMeasure))
if self.measurerTime < 0:
sys.stderr.write("\nAborting. The combination of measured time and pins to measure exceeds the measurement system's capabilities.\n\n")
sys.exit(1)
def _loadExpectedTimeMetadata(pinMetadataFilenames):
"""\
Given an input dictionary mapping pin names to filename, load the
expected flash/beep times data from the filename and return a dict mapping
pin names to the expected timing list.
:param pinMetadataFilenames: dict mapping pin names to either None or a list
containing a single string which is the filename of the metadata json to load from.
:returns: dict mapping pin names to lists containing expected flash/beep times
read from the metadata file. For pins that have a None value, there will be
no entry in the dict.
"""
pinExpectedTimes = {}
pinEventDurations = {}
try:
for pinName in pinMetadataFilenames:
argValue = pinMetadataFilenames[pinName]
if argValue is not None:
filename=argValue[0]
f=open(filename)
metadata = json.load(f)
f.close()
pinExpectedTimes[pinName] = metadata["eventCentreTimes"]
if "AUDIO" in pinName:
pinEventDurations[pinName] = metadata["approxBeepDurationSecs"]
elif "LIGHT" in pinName:
pinEventDurations[pinName] = metadata["approxFlashDurationSecs"]
else:
raise ValueError("Did not recognise pin type (audio or light). Could not determine which field to read from metadata")
except IOError:
sys.stderr.write("\nCould not open one of the specified JSON metadata files.\n\n")
sys.exit(1)
except ValueError:
sys.stderr.write("\nError parsing contents of one of the JSON metadata files. Is it correct JSON?\n\n")
sys.exit(1)
return pinExpectedTimes, pinEventDurations
class TVTesterCmdLineParser(BaseCmdLineParser):
def __init__(self):
"""\
parse the command line arguments for the TV testing system
"""
# defaults for command line arguments
self.DEFAULT_WC_BIND=("0.0.0.0","random")
desc = "Measures synchronisation timing for a TV using the DVB CSS protocols. Does this by pretending to be the CSA and using an external Arduino microcontroller to take measurements."
super(TVTesterCmdLineParser,self).__init__(desc)
def setupArguments(self):
# add argument to beginning of list (called before superclass method)
self.parser.add_argument("contentIdStem", type=str, help="The contentIdStem the measurement system will use when requesting a timeline from the TV, (e.g. \"\" will match all content IDs)")
# let the superclass add its arguments
super(TVTesterCmdLineParser,self).setupArguments()
# add arguments to end of set of arguments (called after superclass method)
self.parser.add_argument("tsUrl", action="store", type=dvbcss.util.wsUrl_str, nargs=1, help="ws:// URL of TV's CSS-TS end point")
self.parser.add_argument("wcUrl", action="store", type=dvbcss.util.udpUrl_str, nargs=1, help="udp://<host>:<port> URL of TV's CSS-WC end point")
self.parser.add_argument("wcBindAddr",action="store", type=dvbcss.util.iphost_str, nargs="?",help="IP address or host name to bind WC client to (default="+str(self.DEFAULT_WC_BIND[0])+")",default=self.DEFAULT_WC_BIND[0])
self.parser.add_argument("wcBindPort",action="store", type=dvbcss.util.port_int_or_random, nargs="?",help="Port number to bind WC client to (default="+str(self.DEFAULT_WC_BIND[1])+")",default=self.DEFAULT_WC_BIND[1])
def parseArguments(self, args=None):
# let the superclass do the argument parsing and parse the pin data
super(TVTesterCmdLineParser,self).parseArguments(args)
self.wcBind = (self.args.wcBindAddr, self.args.wcBindPort)
def printTestSetup(self):
"""\
print out the test setup
"""
print
print "Scenario setup:"
for pin in self.pinsToMeasure:
print " Measuring input %s using expected timings from : %s" % (pin, self.pinMetadataFilenames[pin][0])
print
print " TS server at : %s" % self.args.tsUrl
print " WC server at : %s" % self.args.wcUrl
print " Content id stem asked of the TV : %s" % self.args.contentIdStem
print " Timeline selector asked of TV : %s" % self.args.timelineSelector
print
print " Assuming TV will be at start of video when timeline at : %d ticks" % (self.args.videoStartTicks)
print
print " When go is pressed, will begin measuring immediately for %d seconds" % self.measurerTime
print
if self.args.toleranceSecs[0] is not None:
print " Will report if TV is accurate within a tolerance of : %f milliseconds" % (self.args.toleranceSecs[0]*1000.0)
print
class CsaTesterCmdLineParser(BaseCmdLineParser):
def __init__(self):
"""\
parse the command line arguments for the CSA testing system
"""
# defaults for command line arguments
self.ADDR="127.0.0.1"
self.PORT_WC=6677
self.PORT_WS=7681
self.WAIT_SECS=5.0
desc = "Measures synchronisation timing for a Companion Screen using the DVB CSS protocols. Does this by pretending to be the TV Device and using an external Arduino microcontroller to take measurements."
super(CsaTesterCmdLineParser,self).__init__(desc)
def setupArguments(self):
# add argument to beginning of list (called before superclass method)
self.parser.add_argument("contentId", type=str, help="The contentId the measurement system will pretend to be playing (e.g. \"urn:github.com/bbc/dvbcss-synctiming:sync-timing-test-sequence\")")
# let the superclass add its arguments
super(CsaTesterCmdLineParser,self).setupArguments()
# add arguments to end of set of arguments (called after superclass method)
self.parser.add_argument("--waitSecs", dest="waitSecs", type=float, nargs=1, help="Number of seconds to wait before beginning to measure after timeline is unpaused (default=%4.2f)" % self.WAIT_SECS, default=[self.WAIT_SECS])
self.parser.add_argument("--addr", dest="addr", type=dvbcss.util.iphost_str, nargs=1, help="IP address or host name to bind to (default=\""+str(self.ADDR)+"\")",default=[self.ADDR])
self.parser.add_argument("--wc-port", dest="portwc", type=dvbcss.util.port_int, nargs=1, help="Port number for wall clock server to listen on (default="+str(self.PORT_WC)+")",default=[self.PORT_WC])
self.parser.add_argument("--ws-port", dest="portwebsocket", type=dvbcss.util.port_int, nargs=1, help="Port number for web socket server to listen on (default="+str(self.PORT_WS)+")",default=[self.PORT_WS])
def parseArguments(self, args=None):
# let the superclass do the argument parsing and parse the pin data
super(CsaTesterCmdLineParser,self).parseArguments(args)
def printTestSetup(self, ciiUrl, wcUrl, tsUrl):
"""\
print out the test setup
"""
print
print "Scenario setup:"
for pin in self.pinsToMeasure:
print " Measuring input %s using expected timings from : %s" % (pin, self.pinMetadataFilenames[pin][0])
print
print " CII server at : %s" % ciiUrl
print " TS server at : %s" % tsUrl
print " WC server at : %s" % wcUrl
print " Pretending to have content id : %s" % self.args.contentId
print " Pretending to have timeline : %s" % self.args.timelineSelector
print " ... with tick rate : %d/%d ticks per second" % (self.args.unitsPerSec, self.args.unitsPerTick)
print
print " Will begin with timeline at : %d ticks" % (self.args.videoStartTicks)
print " Assuming CSA will be at start of video when timeline at : %d ticks" % (self.args.videoStartTicks)
print
print " When go is pressed, will wait for : %f seconds" % self.args.waitSecs[0]
print " ... then unpause the timeline and measure for: %d seconds" % self.measurerTime
print
if self.args.toleranceSecs[0] is not None:
print " Will report if CSA is accurate within a tolerance of : %f milliseconds" % (self.args.toleranceSecs[0]*1000.0)
print
|
|
from concurrent.futures import ThreadPoolExecutor
import importlib
import logging
from numbers import Number
from operator import add
import os
import psutil
import sys
from time import sleep
import traceback
import asyncio
import dask
from dask import delayed
from dask.utils import format_bytes
from dask.system import CPU_COUNT
import pytest
from tlz import pluck, sliding_window, first
from distributed import (
Client,
Nanny,
get_client,
default_client,
get_worker,
Reschedule,
wait,
)
from distributed.compatibility import WINDOWS
from distributed.core import rpc, CommClosedError
from distributed.scheduler import Scheduler
from distributed.metrics import time
from distributed.worker import (
Worker,
error_message,
logger,
parse_memory_limit,
)
from distributed.utils import tmpfile, TimeoutError
from distributed.utils_test import ( # noqa: F401
cleanup,
inc,
mul,
gen_cluster,
div,
dec,
slowinc,
gen_test,
captured_logger,
)
from distributed.utils_test import ( # noqa: F401
client,
loop,
nodebug,
cluster_fixture,
s,
a,
b,
)
@pytest.mark.asyncio
async def test_worker_nthreads(cleanup):
async with Scheduler() as s:
async with Worker(s.address) as w:
assert w.executor._max_workers == CPU_COUNT
@gen_cluster()
async def test_str(s, a, b):
assert a.address in str(a)
assert a.address in repr(a)
assert str(a.nthreads) in str(a)
assert str(a.nthreads) in repr(a)
assert str(len(a.executing)) in repr(a)
@pytest.mark.asyncio
async def test_identity(cleanup):
async with Scheduler() as s:
async with Worker(s.address) as w:
ident = w.identity(None)
assert "Worker" in ident["type"]
assert ident["scheduler"] == s.address
assert isinstance(ident["nthreads"], int)
assert isinstance(ident["memory_limit"], Number)
@gen_cluster(client=True)
async def test_worker_bad_args(c, s, a, b):
class NoReprObj:
""" This object cannot be properly represented as a string. """
def __str__(self):
raise ValueError("I have no str representation.")
def __repr__(self):
raise ValueError("I have no repr representation.")
x = c.submit(NoReprObj, workers=a.address)
await wait(x)
assert not a.executing
assert a.data
def bad_func(*args, **kwargs):
1 / 0
class MockLoggingHandler(logging.Handler):
"""Mock logging handler to check for expected logs."""
def __init__(self, *args, **kwargs):
self.reset()
logging.Handler.__init__(self, *args, **kwargs)
def emit(self, record):
self.messages[record.levelname.lower()].append(record.getMessage())
def reset(self):
self.messages = {
"debug": [],
"info": [],
"warning": [],
"error": [],
"critical": [],
}
hdlr = MockLoggingHandler()
old_level = logger.level
logger.setLevel(logging.DEBUG)
logger.addHandler(hdlr)
y = c.submit(bad_func, x, k=x, workers=b.address)
await wait(y)
assert not b.executing
assert y.status == "error"
# Make sure job died because of bad func and not because of bad
# argument.
with pytest.raises(ZeroDivisionError):
await y
tb = await y._traceback()
assert any("1 / 0" in line for line in pluck(3, traceback.extract_tb(tb)) if line)
assert "Compute Failed" in hdlr.messages["warning"][0]
logger.setLevel(old_level)
# Now we check that both workers are still alive.
xx = c.submit(add, 1, 2, workers=a.address)
yy = c.submit(add, 3, 4, workers=b.address)
results = await c._gather([xx, yy])
assert tuple(results) == (3, 7)
@pytest.mark.slow
@gen_cluster()
async def dont_test_delete_data_with_missing_worker(c, a, b):
bad = "127.0.0.1:9001" # this worker doesn't exist
c.who_has["z"].add(bad)
c.who_has["z"].add(a.address)
c.has_what[bad].add("z")
c.has_what[a.address].add("z")
a.data["z"] = 5
cc = rpc(ip=c.ip, port=c.port)
await cc.delete_data(keys=["z"]) # TODO: this hangs for a while
assert "z" not in a.data
assert not c.who_has["z"]
assert not c.has_what[bad]
assert not c.has_what[a.address]
await cc.close_rpc()
@gen_cluster(client=True)
async def test_upload_file(c, s, a, b):
assert not os.path.exists(os.path.join(a.local_directory, "foobar.py"))
assert not os.path.exists(os.path.join(b.local_directory, "foobar.py"))
assert a.local_directory != b.local_directory
with rpc(a.address) as aa, rpc(b.address) as bb:
await asyncio.gather(
aa.upload_file(filename="foobar.py", data=b"x = 123"),
bb.upload_file(filename="foobar.py", data="x = 123"),
)
assert os.path.exists(os.path.join(a.local_directory, "foobar.py"))
assert os.path.exists(os.path.join(b.local_directory, "foobar.py"))
def g():
import foobar
return foobar.x
future = c.submit(g, workers=a.address)
result = await future
assert result == 123
await c.close()
await s.close(close_workers=True)
assert not os.path.exists(os.path.join(a.local_directory, "foobar.py"))
@pytest.mark.skip(reason="don't yet support uploading pyc files")
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)])
async def test_upload_file_pyc(c, s, w):
with tmpfile() as dirname:
os.mkdir(dirname)
with open(os.path.join(dirname, "foo.py"), mode="w") as f:
f.write("def f():\n return 123")
sys.path.append(dirname)
try:
import foo
assert foo.f() == 123
pyc = importlib.util.cache_from_source(os.path.join(dirname, "foo.py"))
assert os.path.exists(pyc)
await c.upload_file(pyc)
def g():
import foo
return foo.x
future = c.submit(g)
result = await future
assert result == 123
finally:
sys.path.remove(dirname)
@gen_cluster(client=True)
async def test_upload_egg(c, s, a, b):
eggname = "testegg-1.0.0-py3.4.egg"
local_file = __file__.replace("test_worker.py", eggname)
assert not os.path.exists(os.path.join(a.local_directory, eggname))
assert not os.path.exists(os.path.join(b.local_directory, eggname))
assert a.local_directory != b.local_directory
await c.upload_file(filename=local_file)
assert os.path.exists(os.path.join(a.local_directory, eggname))
assert os.path.exists(os.path.join(b.local_directory, eggname))
def g(x):
import testegg
return testegg.inc(x)
future = c.submit(g, 10, workers=a.address)
result = await future
assert result == 10 + 1
await c.close()
await s.close()
await a.close()
await b.close()
assert not os.path.exists(os.path.join(a.local_directory, eggname))
@gen_cluster(client=True)
async def test_upload_pyz(c, s, a, b):
pyzname = "mytest.pyz"
local_file = __file__.replace("test_worker.py", pyzname)
assert not os.path.exists(os.path.join(a.local_directory, pyzname))
assert not os.path.exists(os.path.join(b.local_directory, pyzname))
assert a.local_directory != b.local_directory
await c.upload_file(filename=local_file)
assert os.path.exists(os.path.join(a.local_directory, pyzname))
assert os.path.exists(os.path.join(b.local_directory, pyzname))
def g(x):
from mytest import mytest
return mytest.inc(x)
future = c.submit(g, 10, workers=a.address)
result = await future
assert result == 10 + 1
await c.close()
await s.close()
await a.close()
await b.close()
assert not os.path.exists(os.path.join(a.local_directory, pyzname))
@pytest.mark.xfail(reason="Still lose time to network I/O")
@gen_cluster(client=True)
async def test_upload_large_file(c, s, a, b):
pytest.importorskip("crick")
await asyncio.sleep(0.05)
with rpc(a.address) as aa:
await aa.upload_file(filename="myfile.dat", data=b"0" * 100000000)
await asyncio.sleep(0.05)
assert a.digests["tick-duration"].components[0].max() < 0.050
@gen_cluster()
async def test_broadcast(s, a, b):
with rpc(s.address) as cc:
results = await cc.broadcast(msg={"op": "ping"})
assert results == {a.address: b"pong", b.address: b"pong"}
@gen_test()
async def test_worker_with_port_zero():
s = await Scheduler(port=8007)
w = await Worker(s.address)
assert isinstance(w.port, int)
assert w.port > 1024
await w.close()
@pytest.mark.asyncio
async def test_worker_port_range(cleanup):
async with Scheduler() as s:
port = "9867:9868"
async with Worker(s.address, port=port) as w1:
assert w1.port == 9867 # Selects first port in range
async with Worker(s.address, port=port) as w2:
assert w2.port == 9868 # Selects next port in range
with pytest.raises(
ValueError, match="Could not start Worker"
): # No more ports left
async with Worker(s.address, port=port):
pass
@pytest.mark.slow
@pytest.mark.asyncio
async def test_worker_waits_for_scheduler(cleanup):
w = Worker("127.0.0.1:8724")
try:
await asyncio.wait_for(w, 3)
except TimeoutError:
pass
else:
assert False
assert w.status not in ("closed", "running")
await w.close(timeout=0.1)
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)])
async def test_worker_task_data(c, s, w):
x = delayed(2)
xx = c.persist(x)
await wait(xx)
assert w.data[x.key] == 2
def test_error_message():
class MyException(Exception):
def __init__(self, a, b):
self.args = (a + b,)
def __str__(self):
return "MyException(%s)" % self.args
msg = error_message(MyException("Hello", "World!"))
assert "Hello" in str(msg["exception"])
max_error_len = 100
with dask.config.set({"distributed.admin.max-error-length": max_error_len}):
msg = error_message(RuntimeError("-" * max_error_len))
assert len(msg["text"]) <= max_error_len
assert len(msg["text"]) < max_error_len * 2
msg = error_message(RuntimeError("-" * max_error_len * 20))
cut_text = msg["text"].replace("('Long error message', '", "")[:-2]
assert len(cut_text) == max_error_len
max_error_len = 1000000
with dask.config.set({"distributed.admin.max-error-length": max_error_len}):
msg = error_message(RuntimeError("-" * max_error_len * 2))
cut_text = msg["text"].replace("('Long error message', '", "")[:-2]
assert len(cut_text) == max_error_len
assert len(msg["text"]) > 10100 # default + 100
@gen_cluster(client=True)
async def test_chained_error_message(c, s, a, b):
def chained_exception_fn():
class MyException(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return "MyException(%s)" % self.msg
exception = MyException("Foo")
inner_exception = MyException("Bar")
try:
raise inner_exception
except Exception as e:
raise exception from e
f = c.submit(chained_exception_fn)
try:
await f
except Exception as e:
assert e.__cause__ is not None
assert "Bar" in str(e.__cause__)
@gen_cluster()
async def test_gather(s, a, b):
b.data["x"] = 1
b.data["y"] = 2
with rpc(a.address) as aa:
resp = await aa.gather(who_has={"x": [b.address], "y": [b.address]})
assert resp["status"] == "OK"
assert a.data["x"] == b.data["x"]
assert a.data["y"] == b.data["y"]
@pytest.mark.asyncio
async def test_io_loop(cleanup):
async with Scheduler(port=0) as s:
async with Worker(s.address, loop=s.loop) as w:
assert w.io_loop is s.loop
@gen_cluster(client=True, nthreads=[])
async def test_spill_to_disk(c, s):
np = pytest.importorskip("numpy")
w = await Worker(
s.address,
loop=s.loop,
memory_limit=1200 / 0.6,
memory_pause_fraction=None,
memory_spill_fraction=None,
)
x = c.submit(np.random.randint, 0, 255, size=500, dtype="u1", key="x")
await wait(x)
y = c.submit(np.random.randint, 0, 255, size=500, dtype="u1", key="y")
await wait(y)
assert set(w.data) == {x.key, y.key}
assert set(w.data.memory) == {x.key, y.key}
assert set(w.data.fast) == set(w.data.memory)
z = c.submit(np.random.randint, 0, 255, size=500, dtype="u1", key="z")
await wait(z)
assert set(w.data) == {x.key, y.key, z.key}
assert set(w.data.memory) == {y.key, z.key}
assert set(w.data.disk) == {x.key} or set(w.data.slow) == {x.key, y.key}
assert set(w.data.fast) == set(w.data.memory)
assert set(w.data.slow) == set(w.data.disk)
await x
assert set(w.data.memory) == {x.key, z.key}
assert set(w.data.disk) == {y.key} or set(w.data.slow) == {x.key, y.key}
assert set(w.data.fast) == set(w.data.memory)
assert set(w.data.slow) == set(w.data.disk)
await w.close()
@gen_cluster(client=True)
async def test_access_key(c, s, a, b):
def f(i):
from distributed.worker import thread_state
return thread_state.key
futures = [c.submit(f, i, key="x-%d" % i) for i in range(20)]
results = await c._gather(futures)
assert list(results) == ["x-%d" % i for i in range(20)]
@gen_cluster(client=True)
async def test_run_dask_worker(c, s, a, b):
def f(dask_worker=None):
return dask_worker.id
response = await c._run(f)
assert response == {a.address: a.id, b.address: b.id}
@gen_cluster(client=True)
async def test_run_coroutine_dask_worker(c, s, a, b):
async def f(dask_worker=None):
await asyncio.sleep(0.001)
return dask_worker.id
response = await c.run(f)
assert response == {a.address: a.id, b.address: b.id}
@gen_cluster(client=True, nthreads=[])
async def test_Executor(c, s):
with ThreadPoolExecutor(2) as e:
w = Worker(s.address, executor=e)
assert w.executor is e
w = await w
future = c.submit(inc, 1)
result = await future
assert result == 2
assert e._threads # had to do some work
await w.close()
@pytest.mark.skip(
reason="Other tests leak memory, so process-level checks trigger immediately"
)
@gen_cluster(
client=True,
nthreads=[("127.0.0.1", 1)],
timeout=30,
worker_kwargs={"memory_limit": 10e6},
)
async def test_spill_by_default(c, s, w):
da = pytest.importorskip("dask.array")
x = da.ones(int(10e6 * 0.7), chunks=1e6, dtype="u1")
y = c.persist(x)
await wait(y)
assert len(w.data.disk) # something is on disk
del x, y
@gen_cluster(nthreads=[("127.0.0.1", 1)], worker_kwargs={"reconnect": False})
async def test_close_on_disconnect(s, w):
await s.close()
start = time()
while w.status != "closed":
await asyncio.sleep(0.01)
assert time() < start + 5
@pytest.mark.asyncio
async def test_memory_limit_auto():
async with Scheduler() as s:
async with Worker(s.address, nthreads=1) as a, Worker(
s.address, nthreads=2
) as b, Worker(s.address, nthreads=100) as c, Worker(
s.address, nthreads=200
) as d:
assert isinstance(a.memory_limit, Number)
assert isinstance(b.memory_limit, Number)
if CPU_COUNT > 1:
assert a.memory_limit < b.memory_limit
assert c.memory_limit == d.memory_limit
@gen_cluster(client=True)
async def test_inter_worker_communication(c, s, a, b):
[x, y] = await c._scatter([1, 2], workers=a.address)
future = c.submit(add, x, y, workers=b.address)
result = await future
assert result == 3
@gen_cluster(client=True)
async def test_clean(c, s, a, b):
x = c.submit(inc, 1, workers=a.address)
y = c.submit(inc, x, workers=b.address)
await y
collections = [
a.tasks,
a.task_state,
a.startstops,
a.data,
a.nbytes,
a.durations,
a.priorities,
a.types,
a.threads,
]
for c in collections:
assert c
x.release()
y.release()
while x.key in a.task_state:
await asyncio.sleep(0.01)
for c in collections:
assert not c
@gen_cluster(client=True)
async def test_message_breakup(c, s, a, b):
n = 100000
a.target_message_size = 10 * n
b.target_message_size = 10 * n
xs = [c.submit(mul, b"%d" % i, n, workers=a.address) for i in range(30)]
y = c.submit(lambda *args: None, xs, workers=b.address)
await y
assert 2 <= len(b.incoming_transfer_log) <= 20
assert 2 <= len(a.outgoing_transfer_log) <= 20
assert all(msg["who"] == b.address for msg in a.outgoing_transfer_log)
assert all(msg["who"] == a.address for msg in a.incoming_transfer_log)
@gen_cluster(client=True)
async def test_types(c, s, a, b):
assert not a.types
assert not b.types
x = c.submit(inc, 1, workers=a.address)
await wait(x)
assert a.types[x.key] == int
y = c.submit(inc, x, workers=b.address)
await wait(y)
assert b.types == {x.key: int, y.key: int}
await c._cancel(y)
start = time()
while y.key in b.data:
await asyncio.sleep(0.01)
assert time() < start + 5
assert y.key not in b.types
@gen_cluster()
async def test_system_monitor(s, a, b):
assert b.monitor
b.monitor.update()
@gen_cluster(
client=True, nthreads=[("127.0.0.1", 2, {"resources": {"A": 1}}), ("127.0.0.1", 1)]
)
async def test_restrictions(c, s, a, b):
# Resource restrictions
x = c.submit(inc, 1, resources={"A": 1})
await x
assert a.resource_restrictions == {x.key: {"A": 1}}
await c._cancel(x)
while x.key in a.task_state:
await asyncio.sleep(0.01)
assert a.resource_restrictions == {}
@pytest.mark.xfail
@gen_cluster(client=True)
async def test_clean_nbytes(c, s, a, b):
L = [delayed(inc)(i) for i in range(10)]
for i in range(5):
L = [delayed(add)(x, y) for x, y in sliding_window(2, L)]
total = delayed(sum)(L)
future = c.compute(total)
await wait(future)
await asyncio.sleep(1)
assert len(a.nbytes) + len(b.nbytes) == 1
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 20)
async def test_gather_many_small(c, s, a, *workers):
a.total_out_connections = 2
futures = await c._scatter(list(range(100)))
assert all(w.data for w in workers)
def f(*args):
return 10
future = c.submit(f, *futures, workers=a.address)
await wait(future)
types = list(pluck(0, a.log))
req = [i for i, t in enumerate(types) if t == "request-dep"]
recv = [i for i, t in enumerate(types) if t == "receive-dep"]
assert min(recv) > max(req)
assert a.comm_nbytes == 0
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 3)
async def test_multiple_transfers(c, s, w1, w2, w3):
x = c.submit(inc, 1, workers=w1.address)
y = c.submit(inc, 2, workers=w2.address)
z = c.submit(add, x, y, workers=w3.address)
await wait(z)
r = w3.startstops[z.key]
transfers = [t for t in r if t["action"] == "transfer"]
assert len(transfers) == 2
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 3)
async def test_share_communication(c, s, w1, w2, w3):
x = c.submit(mul, b"1", int(w3.target_message_size + 1), workers=w1.address)
y = c.submit(mul, b"2", int(w3.target_message_size + 1), workers=w2.address)
await wait([x, y])
await c._replicate([x, y], workers=[w1.address, w2.address])
z = c.submit(add, x, y, workers=w3.address)
await wait(z)
assert len(w3.incoming_transfer_log) == 2
assert w1.outgoing_transfer_log
assert w2.outgoing_transfer_log
@gen_cluster(client=True)
async def test_dont_overlap_communications_to_same_worker(c, s, a, b):
x = c.submit(mul, b"1", int(b.target_message_size + 1), workers=a.address)
y = c.submit(mul, b"2", int(b.target_message_size + 1), workers=a.address)
await wait([x, y])
z = c.submit(add, x, y, workers=b.address)
await wait(z)
assert len(b.incoming_transfer_log) == 2
l1, l2 = b.incoming_transfer_log
assert l1["stop"] < l2["start"]
@pytest.mark.avoid_travis
@gen_cluster(client=True)
async def test_log_exception_on_failed_task(c, s, a, b):
with tmpfile() as fn:
fh = logging.FileHandler(fn)
try:
from distributed.worker import logger
logger.addHandler(fh)
future = c.submit(div, 1, 0)
await wait(future)
await asyncio.sleep(0.1)
fh.flush()
with open(fn) as f:
text = f.read()
assert "ZeroDivisionError" in text
assert "Exception" in text
finally:
logger.removeHandler(fh)
@gen_cluster(client=True)
async def test_clean_up_dependencies(c, s, a, b):
x = delayed(inc)(1)
y = delayed(inc)(2)
xx = delayed(inc)(x)
yy = delayed(inc)(y)
z = delayed(add)(xx, yy)
zz = c.persist(z)
await wait(zz)
start = time()
while len(a.data) + len(b.data) > 1:
await asyncio.sleep(0.01)
assert time() < start + 2
assert set(a.data) | set(b.data) == {zz.key}
@gen_cluster(client=True)
async def test_hold_onto_dependents(c, s, a, b):
x = c.submit(inc, 1, workers=a.address)
y = c.submit(inc, x, workers=b.address)
await wait(y)
assert x.key in b.data
await c._cancel(y)
await asyncio.sleep(0.1)
assert x.key in b.data
@pytest.mark.slow
@gen_cluster(client=False, nthreads=[])
async def test_worker_death_timeout(s):
with dask.config.set({"distributed.comm.timeouts.connect": "1s"}):
await s.close()
w = Worker(s.address, death_timeout=1)
with pytest.raises(TimeoutError) as info:
await w
assert "Worker" in str(info.value)
assert "timed out" in str(info.value) or "failed to start" in str(info.value)
assert w.status == "closed"
@gen_cluster(client=True)
async def test_stop_doing_unnecessary_work(c, s, a, b):
futures = c.map(slowinc, range(1000), delay=0.01)
await asyncio.sleep(0.1)
del futures
start = time()
while a.executing:
await asyncio.sleep(0.01)
assert time() - start < 0.5
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)])
async def test_priorities(c, s, w):
values = []
for i in range(10):
a = delayed(slowinc)(i, dask_key_name="a-%d" % i, delay=0.01)
a1 = delayed(inc)(a, dask_key_name="a1-%d" % i)
a2 = delayed(inc)(a1, dask_key_name="a2-%d" % i)
b1 = delayed(dec)(a, dask_key_name="b1-%d" % i) # <<-- least favored
values.append(a2)
values.append(b1)
futures = c.compute(values)
await wait(futures)
log = [
t[0]
for t in w.log
if t[1] == "executing" and t[2] == "memory" and not t[0].startswith("finalize")
]
assert any(key.startswith("b1") for key in log[: len(log) // 2])
@gen_cluster(client=True)
async def test_heartbeats(c, s, a, b):
x = s.workers[a.address].last_seen
start = time()
await asyncio.sleep(a.periodic_callbacks["heartbeat"].callback_time / 1000 + 0.1)
while s.workers[a.address].last_seen == x:
await asyncio.sleep(0.01)
assert time() < start + 2
assert a.periodic_callbacks["heartbeat"].callback_time < 1000
@pytest.mark.parametrize("worker", [Worker, Nanny])
def test_worker_dir(worker):
with tmpfile() as fn:
@gen_cluster(client=True, worker_kwargs={"local_directory": fn})
async def test_worker_dir(c, s, a, b):
directories = [w.local_directory for w in s.workers.values()]
assert all(d.startswith(fn) for d in directories)
assert len(set(directories)) == 2 # distinct
test_worker_dir()
@gen_cluster(client=True)
async def test_dataframe_attribute_error(c, s, a, b):
class BadSize:
def __init__(self, data):
self.data = data
def __sizeof__(self):
raise TypeError("Hello")
future = c.submit(BadSize, 123)
result = await future
assert result.data == 123
@gen_cluster(client=True)
async def test_fail_write_to_disk(c, s, a, b):
class Bad:
def __getstate__(self):
raise TypeError()
def __sizeof__(self):
return int(100e9)
future = c.submit(Bad)
await wait(future)
assert future.status == "error"
with pytest.raises(TypeError):
await future
futures = c.map(inc, range(10))
results = await c._gather(futures)
assert results == list(map(inc, range(10)))
@pytest.mark.skip(reason="Our logic here is faulty")
@gen_cluster(
nthreads=[("127.0.0.1", 2)], client=True, worker_kwargs={"memory_limit": 10e9}
)
async def test_fail_write_many_to_disk(c, s, a):
a.validate = False
await asyncio.sleep(0.1)
assert not a.paused
class Bad:
def __init__(self, x):
pass
def __getstate__(self):
raise TypeError()
def __sizeof__(self):
return int(2e9)
futures = c.map(Bad, range(11))
future = c.submit(lambda *args: 123, *futures)
await wait(future)
with pytest.raises(Exception) as info:
await future
# workers still operational
result = await c.submit(inc, 1, workers=a.address)
assert result == 2
@gen_cluster()
async def test_pid(s, a, b):
assert s.workers[a.address].pid == os.getpid()
@gen_cluster(client=True)
async def test_get_client(c, s, a, b):
def f(x):
cc = get_client()
future = cc.submit(inc, x)
return future.result()
assert default_client() is c
future = c.submit(f, 10, workers=a.address)
result = await future
assert result == 11
assert a._client
assert not b._client
assert a._client is c
assert default_client() is c
a_client = a._client
for i in range(10):
await wait(c.submit(f, i))
assert a._client is a_client
def test_get_client_sync(client):
def f(x):
cc = get_client()
future = cc.submit(inc, x)
return future.result()
future = client.submit(f, 10)
assert future.result() == 11
@gen_cluster(client=True)
async def test_get_client_coroutine(c, s, a, b):
async def f():
client = await get_client()
future = client.submit(inc, 10)
result = await future
return result
results = await c.run(f)
assert results == {a.address: 11, b.address: 11}
def test_get_client_coroutine_sync(client, s, a, b):
async def f():
client = await get_client()
future = client.submit(inc, 10)
result = await future
return result
results = client.run(f)
assert results == {a["address"]: 11, b["address"]: 11}
@gen_cluster()
async def test_global_workers(s, a, b):
n = len(Worker._instances)
w = first(Worker._instances)
assert w is a or w is b
@pytest.mark.skipif(WINDOWS, reason="file descriptors")
@gen_cluster(nthreads=[])
async def test_worker_fds(s):
psutil = pytest.importorskip("psutil")
await asyncio.sleep(0.05)
start = psutil.Process().num_fds()
worker = await Worker(s.address, loop=s.loop)
await asyncio.sleep(0.1)
middle = psutil.Process().num_fds()
start = time()
while middle > start:
await asyncio.sleep(0.01)
assert time() < start + 1
await worker.close()
start = time()
while psutil.Process().num_fds() > start:
await asyncio.sleep(0.01)
assert time() < start + 0.5
@gen_cluster(nthreads=[])
async def test_service_hosts_match_worker(s):
async with Worker(s.address, host="tcp://0.0.0.0") as w:
sock = first(w.http_server._sockets.values())
assert sock.getsockname()[0] in ("::", "0.0.0.0")
async with Worker(
s.address, host="tcp://127.0.0.1", dashboard_address="0.0.0.0:0"
) as w:
sock = first(w.http_server._sockets.values())
assert sock.getsockname()[0] in ("::", "0.0.0.0")
async with Worker(s.address, host="tcp://127.0.0.1") as w:
sock = first(w.http_server._sockets.values())
assert sock.getsockname()[0] == "127.0.0.1"
@gen_cluster(nthreads=[])
async def test_start_services(s):
async with Worker(s.address, dashboard_address=1234) as w:
assert w.http_server.port == 1234
@gen_test()
async def test_scheduler_file():
with tmpfile() as fn:
s = await Scheduler(scheduler_file=fn, port=8009)
w = await Worker(scheduler_file=fn)
assert set(s.workers) == {w.address}
await w.close()
s.stop()
@gen_cluster(client=True)
async def test_scheduler_delay(c, s, a, b):
old = a.scheduler_delay
assert abs(a.scheduler_delay) < 0.3
assert abs(b.scheduler_delay) < 0.3
await asyncio.sleep(a.periodic_callbacks["heartbeat"].callback_time / 1000 + 0.3)
assert a.scheduler_delay != old
@gen_cluster(client=True)
async def test_statistical_profiling(c, s, a, b):
futures = c.map(slowinc, range(10), delay=0.1)
await wait(futures)
profile = a.profile_keys["slowinc"]
assert profile["count"]
@pytest.mark.slow
@nodebug
@gen_cluster(
client=True,
timeout=30,
config={
"distributed.worker.profile.interval": "1ms",
"distributed.worker.profile.cycle": "100ms",
},
)
async def test_statistical_profiling_2(c, s, a, b):
da = pytest.importorskip("dask.array")
while True:
x = da.random.random(1000000, chunks=(10000,))
y = (x + x * 2) - x.sum().persist()
await wait(y)
profile = await a.get_profile()
text = str(profile)
if profile["count"] and "sum" in text and "random" in text:
break
@gen_cluster(
nthreads=[("127.0.0.1", 1)],
client=True,
worker_kwargs={"memory_monitor_interval": 10},
)
async def test_robust_to_bad_sizeof_estimates(c, s, a):
np = pytest.importorskip("numpy")
memory = psutil.Process().memory_info().rss
a.memory_limit = memory / 0.7 + 400e6
class BadAccounting:
def __init__(self, data):
self.data = data
def __sizeof__(self):
return 10
def f(n):
x = np.ones(int(n), dtype="u1")
result = BadAccounting(x)
return result
futures = c.map(f, [100e6] * 8, pure=False)
start = time()
while not a.data.disk:
await asyncio.sleep(0.1)
assert time() < start + 5
@pytest.mark.slow
@pytest.mark.xfail(
sys.version_info[:2] == (3, 8),
reason="Sporadic failure on Python 3.8",
strict=False,
)
@gen_cluster(
nthreads=[("127.0.0.1", 2)],
client=True,
worker_kwargs={
"memory_monitor_interval": 10,
"memory_spill_fraction": False, # don't spill
"memory_target_fraction": False,
"memory_pause_fraction": 0.5,
},
timeout=20,
)
async def test_pause_executor(c, s, a):
memory = psutil.Process().memory_info().rss
a.memory_limit = memory / 0.5 + 200e6
np = pytest.importorskip("numpy")
def f():
x = np.ones(int(400e6), dtype="u1")
sleep(1)
with captured_logger(logging.getLogger("distributed.worker")) as logger:
future = c.submit(f)
futures = c.map(slowinc, range(30), delay=0.1)
start = time()
while not a.paused:
await asyncio.sleep(0.01)
assert time() < start + 4, (
format_bytes(psutil.Process().memory_info().rss),
format_bytes(a.memory_limit),
len(a.data),
)
out = logger.getvalue()
assert "memory" in out.lower()
assert "pausing" in out.lower()
assert sum(f.status == "finished" for f in futures) < 4
await wait(futures)
@gen_cluster(client=True, worker_kwargs={"profile_cycle_interval": "50 ms"})
async def test_statistical_profiling_cycle(c, s, a, b):
futures = c.map(slowinc, range(20), delay=0.05)
await wait(futures)
await asyncio.sleep(0.01)
end = time()
assert len(a.profile_history) > 3
x = await a.get_profile(start=time() + 10, stop=time() + 20)
assert not x["count"]
x = await a.get_profile(start=0, stop=time() + 10)
recent = a.profile_recent["count"]
actual = sum(p["count"] for _, p in a.profile_history) + a.profile_recent["count"]
x2 = await a.get_profile(start=0, stop=time() + 10)
assert x["count"] <= actual <= x2["count"]
y = await a.get_profile(start=end - 0.300, stop=time())
assert 0 < y["count"] <= x["count"]
@gen_cluster(client=True)
async def test_get_current_task(c, s, a, b):
def some_name():
return get_worker().get_current_task()
result = await c.submit(some_name)
assert result.startswith("some_name")
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 2)
async def test_reschedule(c, s, a, b):
s.extensions["stealing"]._pc.stop()
a_address = a.address
def f(x):
sleep(0.1)
if get_worker().address == a_address:
raise Reschedule()
futures = c.map(f, range(4))
futures2 = c.map(slowinc, range(10), delay=0.1, workers=a.address)
await wait(futures)
assert all(f.key in b.data for f in futures)
@pytest.mark.asyncio
async def test_deque_handler(cleanup):
from distributed.worker import logger
async with Scheduler() as s:
async with Worker(s.address) as w:
deque_handler = w._deque_handler
logger.info("foo456")
assert deque_handler.deque
msg = deque_handler.deque[-1]
assert "distributed.worker" in deque_handler.format(msg)
assert any(msg.msg == "foo456" for msg in deque_handler.deque)
@gen_cluster(nthreads=[], client=True)
async def test_avoid_memory_monitor_if_zero_limit(c, s):
worker = await Worker(
s.address, loop=s.loop, memory_limit=0, memory_monitor_interval=10
)
assert type(worker.data) is dict
assert "memory" not in worker.periodic_callbacks
future = c.submit(inc, 1)
assert (await future) == 2
await asyncio.sleep(worker.memory_monitor_interval / 1000)
await c.submit(inc, 2) # worker doesn't pause
await worker.close()
@gen_cluster(
nthreads=[("127.0.0.1", 1)],
config={
"distributed.worker.memory.spill": False,
"distributed.worker.memory.target": False,
},
)
async def test_dict_data_if_no_spill_to_disk(s, w):
assert type(w.data) is dict
def test_get_worker_name(client):
def f():
get_client().submit(inc, 1).result()
client.run(f)
def func(dask_scheduler):
return list(dask_scheduler.clients)
start = time()
while not any("worker" in n for n in client.run_on_scheduler(func)):
sleep(0.1)
assert time() < start + 10
@gen_cluster(nthreads=[("127.0.0.1", 1)], worker_kwargs={"memory_limit": "2e3 MB"})
async def test_parse_memory_limit(s, w):
assert w.memory_limit == 2e9
@gen_cluster(nthreads=[], client=True)
async def test_scheduler_address_config(c, s):
with dask.config.set({"scheduler-address": s.address}):
worker = await Worker(loop=s.loop)
assert worker.scheduler.address == s.address
await worker.close()
@pytest.mark.slow
@gen_cluster(client=True)
async def test_wait_for_outgoing(c, s, a, b):
np = pytest.importorskip("numpy")
x = np.random.random(10000000)
future = await c.scatter(x, workers=a.address)
y = c.submit(inc, future, workers=b.address)
await wait(y)
assert len(b.incoming_transfer_log) == len(a.outgoing_transfer_log) == 1
bb = b.incoming_transfer_log[0]["duration"]
aa = a.outgoing_transfer_log[0]["duration"]
ratio = aa / bb
assert 1 / 3 < ratio < 3
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster(
nthreads=[("127.0.0.1", 1), ("127.0.0.1", 1), ("127.0.0.2", 1)], client=True
)
async def test_prefer_gather_from_local_address(c, s, w1, w2, w3):
x = await c.scatter(123, workers=[w1.address, w3.address], broadcast=True)
y = c.submit(inc, x, workers=[w2.address])
await wait(y)
assert any(d["who"] == w2.address for d in w1.outgoing_transfer_log)
assert not any(d["who"] == w2.address for d in w3.outgoing_transfer_log)
@gen_cluster(
client=True,
nthreads=[("127.0.0.1", 1)] * 20,
timeout=30,
config={"distributed.worker.connections.incoming": 1},
)
async def test_avoid_oversubscription(c, s, *workers):
np = pytest.importorskip("numpy")
x = c.submit(np.random.random, 1000000, workers=[workers[0].address])
await wait(x)
futures = [c.submit(len, x, pure=False, workers=[w.address]) for w in workers[1:]]
await wait(futures)
# Original worker not responsible for all transfers
assert len(workers[0].outgoing_transfer_log) < len(workers) - 2
# Some other workers did some work
assert len([w for w in workers if len(w.outgoing_transfer_log) > 0]) >= 3
@gen_cluster(client=True, worker_kwargs={"metrics": {"my_port": lambda w: w.port}})
async def test_custom_metrics(c, s, a, b):
assert s.workers[a.address].metrics["my_port"] == a.port
assert s.workers[b.address].metrics["my_port"] == b.port
@gen_cluster(client=True)
async def test_register_worker_callbacks(c, s, a, b):
# preload function to run
def mystartup(dask_worker):
dask_worker.init_variable = 1
def mystartup2():
import os
os.environ["MY_ENV_VALUE"] = "WORKER_ENV_VALUE"
return "Env set."
# Check that preload function has been run
def test_import(dask_worker):
return hasattr(dask_worker, "init_variable")
# and dask_worker.init_variable == 1
def test_startup2():
import os
return os.getenv("MY_ENV_VALUE", None) == "WORKER_ENV_VALUE"
# Nothing has been run yet
result = await c.run(test_import)
assert list(result.values()) == [False] * 2
result = await c.run(test_startup2)
assert list(result.values()) == [False] * 2
# Start a worker and check that startup is not run
worker = await Worker(s.address, loop=s.loop)
result = await c.run(test_import, workers=[worker.address])
assert list(result.values()) == [False]
await worker.close()
# Add a preload function
response = await c.register_worker_callbacks(setup=mystartup)
assert len(response) == 2
# Check it has been ran on existing worker
result = await c.run(test_import)
assert list(result.values()) == [True] * 2
# Start a worker and check it is ran on it
worker = await Worker(s.address, loop=s.loop)
result = await c.run(test_import, workers=[worker.address])
assert list(result.values()) == [True]
await worker.close()
# Register another preload function
response = await c.register_worker_callbacks(setup=mystartup2)
assert len(response) == 2
# Check it has been run
result = await c.run(test_startup2)
assert list(result.values()) == [True] * 2
# Start a worker and check it is ran on it
worker = await Worker(s.address, loop=s.loop)
result = await c.run(test_import, workers=[worker.address])
assert list(result.values()) == [True]
result = await c.run(test_startup2, workers=[worker.address])
assert list(result.values()) == [True]
await worker.close()
@gen_cluster(client=True)
async def test_register_worker_callbacks_err(c, s, a, b):
with pytest.raises(ZeroDivisionError):
await c.register_worker_callbacks(setup=lambda: 1 / 0)
@gen_cluster(nthreads=[])
async def test_data_types(s):
w = await Worker(s.address, data=dict)
assert isinstance(w.data, dict)
await w.close()
data = dict()
w = await Worker(s.address, data=data)
assert w.data is data
await w.close()
class Data(dict):
def __init__(self, x, y):
self.x = x
self.y = y
w = await Worker(s.address, data=(Data, {"x": 123, "y": 456}))
assert w.data.x == 123
assert w.data.y == 456
await w.close()
@gen_cluster(nthreads=[])
async def test_local_directory(s):
with tmpfile() as fn:
with dask.config.set(temporary_directory=fn):
w = await Worker(s.address)
assert w.local_directory.startswith(fn)
assert "dask-worker-space" in w.local_directory
@pytest.mark.skipif(
not sys.platform.startswith("linux"), reason="Need 127.0.0.2 to mean localhost"
)
@gen_cluster(nthreads=[], client=True)
async def test_host_address(c, s):
w = await Worker(s.address, host="127.0.0.2")
assert "127.0.0.2" in w.address
await w.close()
n = await Nanny(s.address, host="127.0.0.3")
assert "127.0.0.3" in n.address
assert "127.0.0.3" in n.worker_address
await n.close()
def test_resource_limit(monkeypatch):
assert parse_memory_limit("250MiB", 1, total_cores=1) == 1024 * 1024 * 250
new_limit = 1024 * 1024 * 200
import distributed.worker
monkeypatch.setattr(distributed.system, "MEMORY_LIMIT", new_limit)
assert parse_memory_limit("250MiB", 1, total_cores=1) == new_limit
@pytest.mark.asyncio
@pytest.mark.parametrize("Worker", [Worker, Nanny])
async def test_interface_async(loop, Worker):
from distributed.utils import get_ip_interface
psutil = pytest.importorskip("psutil")
if_names = sorted(psutil.net_if_addrs())
for if_name in if_names:
try:
ipv4_addr = get_ip_interface(if_name)
except ValueError:
pass
else:
if ipv4_addr == "127.0.0.1":
break
else:
pytest.skip(
"Could not find loopback interface. "
"Available interfaces are: %s." % (if_names,)
)
async with Scheduler(interface=if_name) as s:
assert s.address.startswith("tcp://127.0.0.1")
async with Worker(s.address, interface=if_name) as w:
assert w.address.startswith("tcp://127.0.0.1")
assert w.ip == "127.0.0.1"
async with Client(s.address, asynchronous=True) as c:
info = c.scheduler_info()
assert "tcp://127.0.0.1" in info["address"]
assert all("127.0.0.1" == d["host"] for d in info["workers"].values())
@pytest.mark.asyncio
@pytest.mark.parametrize("Worker", [Worker, Nanny])
async def test_protocol_from_scheduler_address(Worker):
ucp = pytest.importorskip("ucp")
async with Scheduler(protocol="ucx") as s:
assert s.address.startswith("ucx://")
async with Worker(s.address) as w:
assert w.address.startswith("ucx://")
async with Client(s.address, asynchronous=True) as c:
info = c.scheduler_info()
assert info["address"].startswith("ucx://")
@pytest.mark.asyncio
@pytest.mark.parametrize("Worker", [Worker, Nanny])
async def test_worker_listens_on_same_interface_by_default(Worker):
async with Scheduler(host="localhost") as s:
assert s.ip in {"127.0.0.1", "localhost"}
async with Worker(s.address) as w:
assert s.ip == w.ip
@gen_cluster(client=True)
async def test_close_gracefully(c, s, a, b):
futures = c.map(slowinc, range(200), delay=0.1)
while not b.data:
await asyncio.sleep(0.1)
mem = set(b.data)
proc = set(b.executing)
await b.close_gracefully()
assert b.status == "closed"
assert b.address not in s.workers
assert mem.issubset(set(a.data))
for key in proc:
assert s.tasks[key].state in ("processing", "memory")
@pytest.mark.slow
@pytest.mark.asyncio
async def test_lifetime(cleanup):
async with Scheduler() as s:
async with Worker(s.address) as a, Worker(s.address, lifetime="1 seconds") as b:
async with Client(s.address, asynchronous=True) as c:
futures = c.map(slowinc, range(200), delay=0.1)
await asyncio.sleep(1.5)
assert b.status != "running"
await b.finished()
assert set(b.data).issubset(a.data) # successfully moved data over
@gen_cluster(client=True, worker_kwargs={"lifetime": "10s", "lifetime_stagger": "2s"})
async def test_lifetime_stagger(c, s, a, b):
assert a.lifetime != b.lifetime
assert 8 <= a.lifetime <= 12
assert 8 <= b.lifetime <= 12
@pytest.mark.asyncio
async def test_bad_metrics(cleanup):
def bad_metric(w):
raise Exception("Hello")
async with Scheduler() as s:
async with Worker(s.address, metrics={"bad": bad_metric}) as w:
assert "bad" not in s.workers[w.address].metrics
@pytest.mark.asyncio
async def test_bad_startup(cleanup):
def bad_startup(w):
raise Exception("Hello")
async with Scheduler() as s:
try:
w = await Worker(s.address, startup_information={"bad": bad_startup})
except Exception:
pytest.fail("Startup exception was raised")
@pytest.mark.asyncio
async def test_update_latency(cleanup):
async with await Scheduler() as s:
async with await Worker(s.address) as w:
original = w.latency
await w.heartbeat()
assert original != w.latency
if w.digests is not None:
assert w.digests["latency"].size() > 0
@pytest.mark.asyncio
@pytest.mark.parametrize("reconnect", [True, False])
async def test_heartbeat_comm_closed(cleanup, monkeypatch, reconnect):
with captured_logger("distributed.worker", level=logging.WARNING) as logger:
async with await Scheduler() as s:
def bad_heartbeat_worker(*args, **kwargs):
raise CommClosedError()
async with await Worker(s.address, reconnect=reconnect) as w:
# Trigger CommClosedError during worker heartbeat
monkeypatch.setattr(
w.scheduler, "heartbeat_worker", bad_heartbeat_worker
)
await w.heartbeat()
if reconnect:
assert w.status == "running"
else:
assert w.status == "closed"
assert "Heartbeat to scheduler failed" in logger.getvalue()
|
|
#!/usr/bin/env python3
import socket
from util import ip4_range, reassemble4_ether
import unittest
from framework import VppTestCase, VppTestRunner
from template_bd import BridgeDomain
from scapy.layers.l2 import Ether
from scapy.packet import Raw
from scapy.layers.inet import IP, UDP
from scapy.layers.vxlan import VXLAN
from vpp_ip_route import VppIpRoute, VppRoutePath
from vpp_ip import INVALID_INDEX
class TestVxlanGbp(VppTestCase):
""" VXLAN GBP Test Case """
@property
def frame_request(self):
""" Ethernet frame modeling a generic request """
return (Ether(src='00:00:00:00:00:01', dst='00:00:00:00:00:02') /
IP(src='1.2.3.4', dst='4.3.2.1') /
UDP(sport=10000, dport=20000) /
Raw(b'\xa5' * 100))
@property
def frame_reply(self):
""" Ethernet frame modeling a generic reply """
return (Ether(src='00:00:00:00:00:02', dst='00:00:00:00:00:01') /
IP(src='4.3.2.1', dst='1.2.3.4') /
UDP(sport=20000, dport=10000) /
Raw(b'\xa5' * 100))
def encapsulate(self, pkt, vni):
"""
Encapsulate the original payload frame by adding VXLAN GBP header with
its UDP, IP and Ethernet fields
"""
return (Ether(src=self.pg0.remote_mac, dst=self.pg0.local_mac) /
IP(src=self.pg0.remote_ip4, dst=self.pg0.local_ip4) /
UDP(sport=self.dport, dport=self.dport, chksum=0) /
VXLAN(vni=vni, flags=self.flags, gpflags=self.gpflags,
gpid=self.sclass) / pkt)
def ip_range(self, start, end):
""" range of remote ip's """
return ip4_range(self.pg0.remote_ip4, start, end)
def decapsulate(self, pkt):
"""
Decapsulate the original payload frame by removing VXLAN header
"""
# check if is set G and I flag
self.assertEqual(pkt[VXLAN].flags, int('0x88', 16))
return pkt[VXLAN].payload
# Method for checking VXLAN GBP encapsulation.
#
def check_encapsulation(self, pkt, vni, local_only=False, mcast_pkt=False):
# TODO: add error messages
# Verify source MAC is VPP_MAC and destination MAC is MY_MAC resolved
# by VPP using ARP.
self.assertEqual(pkt[Ether].src, self.pg0.local_mac)
if not local_only:
if not mcast_pkt:
self.assertEqual(pkt[Ether].dst, self.pg0.remote_mac)
else:
self.assertEqual(pkt[Ether].dst, type(self).mcast_mac)
# Verify VXLAN GBP tunnel source IP is VPP_IP and destination IP is
# MY_IP.
self.assertEqual(pkt[IP].src, self.pg0.local_ip4)
if not local_only:
if not mcast_pkt:
self.assertEqual(pkt[IP].dst, self.pg0.remote_ip4)
else:
self.assertEqual(pkt[IP].dst, type(self).mcast_ip4)
# Verify UDP destination port is VXLAN GBP 48879, source UDP port could
# be arbitrary.
self.assertEqual(pkt[UDP].dport, type(self).dport)
# Verify UDP checksum
self.assert_udp_checksum_valid(pkt)
# Verify VNI
# pkt.show()
self.assertEqual(pkt[VXLAN].vni, vni)
# Verify Source Class
self.assertEqual(pkt[VXLAN].gpid, 0)
@classmethod
def create_vxlan_gbp_flood_test_bd(cls, vni, n_ucast_tunnels):
# Create 2 ucast vxlan tunnels under bd
ip_range_start = 10
ip_range_end = ip_range_start + n_ucast_tunnels
next_hop_address = cls.pg0.remote_ip4
for dest_ip4 in ip4_range(cls.pg0.remote_ip4,
ip_range_start,
ip_range_end):
# add host route so dest_ip4 will not be resolved
rip = VppIpRoute(cls, dest_ip4, 32,
[VppRoutePath(next_hop_address,
INVALID_INDEX)],
register=False)
rip.add_vpp_config()
r = cls.vapi.vxlan_gbp_tunnel_add_del(
tunnel={
'src': cls.pg0.local_ip4,
'dst': dest_ip4,
'vni': vni,
'instance': INVALID_INDEX,
'mcast_sw_if_index': INVALID_INDEX,
'mode': 1,
},
is_add=1
)
cls.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=r.sw_if_index,
bd_id=vni)
# Class method to start the VXLAN GBP test case.
# Overrides setUpClass method in VppTestCase class.
# Python try..except statement is used to ensure that the tear down of
# the class will be executed even if exception is raised.
# @param cls The class pointer.
@classmethod
def setUpClass(cls):
super(TestVxlanGbp, cls).setUpClass()
try:
cls.dport = 48879
cls.flags = 0x88
cls.gpflags = 0x0
cls.sclass = 0
# Create 2 pg interfaces.
cls.create_pg_interfaces(range(4))
for pg in cls.pg_interfaces:
pg.admin_up()
# Configure IPv4 addresses on VPP pg0.
cls.pg0.config_ip4()
# Resolve MAC address for VPP's IP address on pg0.
cls.pg0.resolve_arp()
# Create VXLAN GBP VTEP on VPP pg0, and put vxlan_gbp_tunnel0 and
# pg1 into BD.
cls.single_tunnel_bd = 1
cls.single_tunnel_vni = 0xabcde
r = cls.vapi.vxlan_gbp_tunnel_add_del(
tunnel={
'src': cls.pg0.local_ip4,
'dst': cls.pg0.remote_ip4,
'vni': cls.single_tunnel_vni,
'instance': INVALID_INDEX,
'mcast_sw_if_index': INVALID_INDEX,
'mode': 1,
},
is_add=1
)
cls.vapi.sw_interface_set_l2_bridge(rx_sw_if_index=r.sw_if_index,
bd_id=cls.single_tunnel_bd)
cls.vapi.sw_interface_set_l2_bridge(
rx_sw_if_index=cls.pg1.sw_if_index,
bd_id=cls.single_tunnel_bd)
# Setup vni 2 to test multicast flooding
cls.n_ucast_tunnels = 2
# Setup vni 3 to test unicast flooding
cls.ucast_flood_bd = 3
cls.create_vxlan_gbp_flood_test_bd(cls.ucast_flood_bd,
cls.n_ucast_tunnels)
cls.vapi.sw_interface_set_l2_bridge(
rx_sw_if_index=cls.pg3.sw_if_index,
bd_id=cls.ucast_flood_bd)
except Exception:
super(TestVxlanGbp, cls).tearDownClass()
raise
@classmethod
def tearDownClass(cls):
super(TestVxlanGbp, cls).tearDownClass()
def assert_eq_pkts(self, pkt1, pkt2):
""" Verify the Ether, IP, UDP, payload are equal in both
packets
"""
self.assertEqual(pkt1[Ether].src, pkt2[Ether].src)
self.assertEqual(pkt1[Ether].dst, pkt2[Ether].dst)
self.assertEqual(pkt1[IP].src, pkt2[IP].src)
self.assertEqual(pkt1[IP].dst, pkt2[IP].dst)
self.assertEqual(pkt1[UDP].sport, pkt2[UDP].sport)
self.assertEqual(pkt1[UDP].dport, pkt2[UDP].dport)
self.assertEqual(pkt1[Raw], pkt2[Raw])
def test_decap(self):
""" Decapsulation test
Send encapsulated frames from pg0
Verify receipt of decapsulated frames on pg1
"""
encapsulated_pkt = self.encapsulate(self.frame_request,
self.single_tunnel_vni)
self.pg0.add_stream([encapsulated_pkt, ])
self.pg1.enable_capture()
self.pg_start()
# Pick first received frame and check if it's the non-encapsulated
# frame
out = self.pg1.get_capture(1)
pkt = out[0]
self.assert_eq_pkts(pkt, self.frame_request)
def test_encap(self):
""" Encapsulation test
Send frames from pg1
Verify receipt of encapsulated frames on pg0
"""
self.pg1.add_stream([self.frame_reply])
self.pg0.enable_capture()
self.pg_start()
# Pick first received frame and check if it's correctly encapsulated.
out = self.pg0.get_capture(1)
pkt = out[0]
self.check_encapsulation(pkt, self.single_tunnel_vni)
payload = self.decapsulate(pkt)
self.assert_eq_pkts(payload, self.frame_reply)
def test_ucast_flood(self):
""" Unicast flood test
Send frames from pg3
Verify receipt of encapsulated frames on pg0
"""
self.pg3.add_stream([self.frame_reply])
self.pg0.enable_capture()
self.pg_start()
# Get packet from each tunnel and assert it's correctly encapsulated.
out = self.pg0.get_capture(self.n_ucast_tunnels)
for pkt in out:
self.check_encapsulation(pkt, self.ucast_flood_bd, True)
payload = self.decapsulate(pkt)
self.assert_eq_pkts(payload, self.frame_reply)
def test_encap_big_packet(self):
""" Encapsulation test send big frame from pg1
Verify receipt of encapsulated frames on pg0
"""
self.vapi.sw_interface_set_mtu(self.pg0.sw_if_index, [1500, 0, 0, 0])
frame = (Ether(src='00:00:00:00:00:02', dst='00:00:00:00:00:01') /
IP(src='4.3.2.1', dst='1.2.3.4') /
UDP(sport=20000, dport=10000) /
Raw(b'\xa5' * 1450))
self.pg1.add_stream([frame])
self.pg0.enable_capture()
self.pg_start()
# Pick first received frame and check if it's correctly encapsulated.
out = self.pg0.get_capture(2)
pkt = reassemble4_ether(out)
self.check_encapsulation(pkt, self.single_tunnel_vni)
payload = self.decapsulate(pkt)
self.assert_eq_pkts(payload, frame)
# Method to define VPP actions before tear down of the test case.
# Overrides tearDown method in VppTestCase class.
# @param self The object pointer.
def tearDown(self):
super(TestVxlanGbp, self).tearDown()
def show_commands_at_teardown(self):
self.logger.info(self.vapi.cli("show bridge-domain 1 detail"))
self.logger.info(self.vapi.cli("show bridge-domain 3 detail"))
self.logger.info(self.vapi.cli("show vxlan-gbp tunnel"))
self.logger.info(self.vapi.cli("show error"))
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
|
|
"""Config flow for HVV integration."""
import logging
from pygti.auth import GTI_DEFAULT_HOST
from pygti.exceptions import CannotConnect, InvalidAuth
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_HOST, CONF_OFFSET, CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import callback
from homeassistant.helpers import aiohttp_client
import homeassistant.helpers.config_validation as cv
from .const import ( # pylint:disable=unused-import
CONF_FILTER,
CONF_REAL_TIME,
CONF_STATION,
DOMAIN,
)
from .hub import GTIHub
_LOGGER = logging.getLogger(__name__)
SCHEMA_STEP_USER = vol.Schema(
{
vol.Required(CONF_HOST, default=GTI_DEFAULT_HOST): str,
vol.Required(CONF_USERNAME): str,
vol.Required(CONF_PASSWORD): str,
}
)
SCHEMA_STEP_STATION = vol.Schema({vol.Required(CONF_STATION): str})
SCHEMA_STEP_OPTIONS = vol.Schema(
{
vol.Required(CONF_FILTER): vol.In([]),
vol.Required(CONF_OFFSET, default=0): cv.positive_int,
vol.Optional(CONF_REAL_TIME, default=True): bool,
}
)
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for HVV."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
def __init__(self):
"""Initialize component."""
self.hub = None
self.data = None
self.stations = {}
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
session = aiohttp_client.async_get_clientsession(self.hass)
self.hub = GTIHub(
user_input[CONF_HOST],
user_input[CONF_USERNAME],
user_input[CONF_PASSWORD],
session,
)
try:
response = await self.hub.authenticate()
_LOGGER.debug("Init gti: %r", response)
except CannotConnect:
errors["base"] = "cannot_connect"
except InvalidAuth:
errors["base"] = "invalid_auth"
if not errors:
self.data = user_input
return await self.async_step_station()
return self.async_show_form(
step_id="user", data_schema=SCHEMA_STEP_USER, errors=errors
)
async def async_step_station(self, user_input=None):
"""Handle the step where the user inputs his/her station."""
if user_input is not None:
errors = {}
check_name = await self.hub.gti.checkName(
{"theName": {"name": user_input[CONF_STATION]}, "maxList": 20}
)
stations = check_name.get("results")
self.stations = {
f"{station.get('name')}": station
for station in stations
if station.get("type") == "STATION"
}
if not self.stations:
errors["base"] = "no_results"
return self.async_show_form(
step_id="station", data_schema=SCHEMA_STEP_STATION, errors=errors
)
# schema
return await self.async_step_station_select()
return self.async_show_form(step_id="station", data_schema=SCHEMA_STEP_STATION)
async def async_step_station_select(self, user_input=None):
"""Handle the step where the user inputs his/her station."""
schema = vol.Schema({vol.Required(CONF_STATION): vol.In(list(self.stations))})
if user_input is None:
return self.async_show_form(step_id="station_select", data_schema=schema)
self.data.update({"station": self.stations[user_input[CONF_STATION]]})
title = self.data[CONF_STATION]["name"]
return self.async_create_entry(title=title, data=self.data)
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get options flow."""
return OptionsFlowHandler(config_entry)
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Options flow handler."""
def __init__(self, config_entry):
"""Initialize HVV Departures options flow."""
self.config_entry = config_entry
self.options = dict(config_entry.options)
self.departure_filters = {}
self.hub = None
async def async_step_init(self, user_input=None):
"""Manage the options."""
errors = {}
if not self.departure_filters:
departure_list = {}
self.hub = self.hass.data[DOMAIN][self.config_entry.entry_id]
try:
departure_list = await self.hub.gti.departureList(
{
"station": self.config_entry.data[CONF_STATION],
"time": {"date": "heute", "time": "jetzt"},
"maxList": 5,
"maxTimeOffset": 200,
"useRealtime": True,
"returnFilters": True,
}
)
except CannotConnect:
errors["base"] = "cannot_connect"
except InvalidAuth:
errors["base"] = "invalid_auth"
if not errors:
self.departure_filters = {
str(i): departure_filter
for i, departure_filter in enumerate(departure_list.get("filter"))
}
if user_input is not None and not errors:
options = {
CONF_FILTER: [
self.departure_filters[x] for x in user_input[CONF_FILTER]
],
CONF_OFFSET: user_input[CONF_OFFSET],
CONF_REAL_TIME: user_input[CONF_REAL_TIME],
}
return self.async_create_entry(title="", data=options)
if CONF_FILTER in self.config_entry.options:
old_filter = [
i
for (i, f) in self.departure_filters.items()
if f in self.config_entry.options.get(CONF_FILTER)
]
else:
old_filter = []
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Optional(CONF_FILTER, default=old_filter): cv.multi_select(
{
key: f"{departure_filter['serviceName']}, {departure_filter['label']}"
for key, departure_filter in self.departure_filters.items()
}
),
vol.Required(
CONF_OFFSET,
default=self.config_entry.options.get(CONF_OFFSET, 0),
): cv.positive_int,
vol.Optional(
CONF_REAL_TIME,
default=self.config_entry.options.get(CONF_REAL_TIME, True),
): bool,
}
),
errors=errors,
)
|
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide a Bokeh Application Handler to build up documents by running
the code from ``main.py`` or ``main.ipynb`` files in specified directories.
The directory may also optionally contain:
* A ``server_lifecyle.py`` module to provide lifecycle callbacks for the
application and sessions.
* A ``static`` subdirectory containing app-specific static resources to
serve.
* A ``theme.yaml`` file containing a Bokeh theme to automatically apply to
all new documents.
* A ``templates`` subdirectory containing templates for app display
A full directory layout might look like:
.. code-block:: none
myapp
|
+---main.py
+---server_lifecycle.py
+---static
+---theme.yaml
+---templates
+---index.html
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import sys
from os.path import basename, dirname, exists, join
# External imports
from jinja2 import Environment, FileSystemLoader
# Bokeh imports
from .code_runner import CodeRunner
from .handler import Handler
from .notebook import NotebookHandler
from .script import ScriptHandler
from .server_lifecycle import ServerLifecycleHandler
from .server_request_handler import ServerRequestHandler
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'DirectoryHandler',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class DirectoryHandler(Handler):
''' Load an application directory which modifies a Document.
'''
def __init__(self, *args, **kwargs):
'''
Keywords:
filename (str) : a path to an application directory with either "main.py" or "main.ipynb"
argv (list[str], optional) : a list of string arguments to make available as sys.argv to main.py
'''
super().__init__(*args, **kwargs)
if 'filename' not in kwargs:
raise ValueError('Must pass a filename to DirectoryHandler')
src_path = kwargs['filename']
argv = kwargs.get('argv', [])
init_py = join(src_path, '__init__.py')
if exists(init_py):
self._package_runner = CodeRunner(open(init_py).read(), init_py, argv)
self._package = self._package_runner.new_module()
sys.modules[self._package.__name__] = self._package
else:
self._package_runner = None
self._package = None
main_py = join(src_path, 'main.py')
main_ipy = join(src_path, 'main.ipynb')
if exists(main_py) and exists(main_ipy):
log.warning("Found both 'main.py' and 'main.ipynb' in %s, using 'main.py'" % (src_path))
main = main_py
elif exists(main_py):
main = main_py
elif exists(main_ipy):
main = main_ipy
else:
raise ValueError("No 'main.py' or 'main.ipynb' in %s" % (src_path))
self._path = src_path
self._main = main
handler = NotebookHandler if main.endswith('.ipynb') else ScriptHandler
self._main_handler = handler(filename=self._main, argv=argv, package=self._package)
hooks = None
app_hooks = join(src_path, 'app_hooks.py')
lifecycle = join(src_path, 'server_lifecycle.py')
if exists(app_hooks) and exists(lifecycle):
raise ValueError("Directory style apps can provide either server_lifecycle.py or app_hooks.py, not both.")
elif exists(lifecycle):
hooks = lifecycle
elif exists(app_hooks):
hooks = app_hooks
if hooks is not None:
self._lifecycle = hooks
self._lifecycle_handler = ServerLifecycleHandler(filename=self._lifecycle, argv=argv, package=self._package)
else:
self._lifecycle = None
self._lifecycle_handler = Handler() # no-op handler
if exists(app_hooks):
self._request_handler = hooks
self._request_handler = ServerRequestHandler(filename=self._request_handler, argv=argv, package=self._package)
else:
self._request_handler = None
self._request_handler = Handler() # no-op handler
self._theme = None
themeyaml = join(src_path, 'theme.yaml')
if exists(themeyaml):
from bokeh.themes import Theme
self._theme = Theme(filename=themeyaml)
appstatic = join(src_path, 'static')
if exists(appstatic):
self._static = appstatic
self._template = None
appindex = join(src_path, 'templates', 'index.html')
if exists(appindex):
env = Environment(loader=FileSystemLoader(dirname(appindex)))
self._template = env.get_template('index.html')
# Properties --------------------------------------------------------------
@property
def error(self):
''' If the handler fails, may contain a related error message.
'''
return self._main_handler.error or self._lifecycle_handler.error
@property
def error_detail(self):
''' If the handler fails, may contain a traceback or other details.
'''
return self._main_handler.error_detail or self._lifecycle_handler.error_detail
@property
def failed(self):
''' ``True`` if the handler failed to modify the doc
'''
return self._main_handler.failed or self._lifecycle_handler.failed
@property
def safe_to_fork(self):
''' Whether it is still safe for the Bokeh server to fork new workers.
``False`` if the configured code (script, notebook, etc.) has already
been run.
'''
return self._main_handler.safe_to_fork
# Public methods ----------------------------------------------------------
def modify_document(self, doc):
''' Execute the configured ``main.py`` or ``main.ipynb`` to modify the
document.
This method will also search the app directory for any theme or
template files, and automatically configure the document with them
if they are found.
'''
if self._lifecycle_handler.failed:
return
# Note: we do NOT copy self._theme, which assumes the Theme
# class is immutable (has no setters)
if self._theme is not None:
doc.theme = self._theme
if self._template is not None:
doc.template = self._template
# This internal handler should never add a template
self._main_handler.modify_document(doc)
def on_server_loaded(self, server_context):
''' Execute `on_server_unloaded`` from ``server_lifecycle.py`` (if
it is defined) when the server is first started.
Args:
server_context (ServerContext) :
'''
if self._package_runner and self._package:
self._package_runner.run(self._package)
return self._lifecycle_handler.on_server_loaded(server_context)
def on_server_unloaded(self, server_context):
''' Execute ``on_server_unloaded`` from ``server_lifecycle.py`` (if
it is defined) when the server cleanly exits. (Before stopping the
server's ``IOLoop``.)
Args:
server_context (ServerContext) :
.. warning::
In practice this code may not run, since servers are often killed
by a signal.
'''
return self._lifecycle_handler.on_server_unloaded(server_context)
def on_session_created(self, session_context):
''' Execute ``on_session_created`` from ``server_lifecycle.py`` (if
it is defined) when a new session is created.
Args:
session_context (SessionContext) :
'''
return self._lifecycle_handler.on_session_created(session_context)
def on_session_destroyed(self, session_context):
''' Execute ``on_session_destroyed`` from ``server_lifecycle.py`` (if
it is defined) when a session is destroyed.
Args:
session_context (SessionContext) :
'''
return self._lifecycle_handler.on_session_destroyed(session_context)
def process_request(self, request):
''' Processes incoming HTTP request returning a dictionary of
additional data to add to the session_context.
Args:
request: HTTP request
Returns:
A dictionary of JSON serializable data to be included on
the session context.
'''
return self._request_handler.process_request(request)
def url_path(self):
''' The last path component for the basename of the path to the
configured directory.
'''
if self.failed:
return None
else:
# TODO should fix invalid URL characters
return '/' + basename(self._path)
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
|
import flask
import flask_login
from ..models import Repository, Organization
from ..security import permissions
#: Manage controller blueprint
manage = flask.Blueprint('manage', __name__, url_prefix='/manage')
@manage.route('')
@flask_login.login_required
@permissions.actions.manage_dashboard.require(403)
def dashboard():
"""Management zone dashboard (GET handler)"""
ext_master = flask.current_app.container.get('ext_master')
tabs = {}
ext_master.call('view_manage_dashboard_tabs', tabs_dict=tabs)
tabs = sorted(tabs.values())
active_tab = flask.request.args.get('tab', tabs[0].id)
return flask.render_template(
'manage/dashboard.html', tabs=tabs, active_tab=active_tab
)
@manage.route('/profile/update')
@flask_login.login_required
@permissions.actions.manage_profile_update.require(403)
def profile_update():
"""Update user info from GitHub (GET handler)
.. todo:: protect from updating too often
"""
db = flask.current_app.container.get('db')
gh_api = flask.current_app.container.get(
'gh_api', token=flask.session['github_token']
)
user_data = gh_api.get('/user').data
gh_user = flask_login.current_user.github_user
gh_user.update_from_dict(user_data)
db.session.commit()
return flask.redirect(flask.url_for('manage.dashboard', tab='profile'))
@manage.route('/repositories')
@flask_login.login_required
@permissions.actions.manage_repos.require(403)
def repositories():
"""List user repositories from GitHub (GET handler)"""
page = int(flask.request.args.get('page', 0))
gh_api = flask.current_app.container.get(
'gh_api', token=flask.session['github_token']
)
gh_repos = gh_api.get('/user/repos', page=page)
user = flask_login.current_user.github_user
active_ids = [repo.github_id for repo in user.repositories]
return flask.render_template(
'manage/repos.html', repos=gh_repos.data,
actual_page=gh_repos.actual_page, total_pages=gh_repos.total_pages,
Repository=Repository, active_ids=active_ids
)
def get_repo_if_admin(db, full_name):
"""Retrieve repository from db and return if
current user is admin (owner or member)
:param db: database connection where are repos stored
:type db: ``flask_sqlalchemy.SQLAlchemy``
:param full_name: full name of desired repository
:type full_name: str
:return: repository if found, None otherwise
:rtype: ``repocribro.models.Repository`` or None
"""
user = flask_login.current_user.github_user
repo = db.session.query(Repository).filter_by(
full_name=full_name
).first()
if repo is None:
return None
if repo.owner == user or user in repo.members:
return repo
return None
@manage.route('/repository/<path:full_name>')
@flask_login.login_required
@permissions.actions.manage_repo.require(403)
def repository_detail(full_name):
"""Repository detail (GET handler)"""
db = flask.current_app.container.get('db')
repo = get_repo_if_admin(db, full_name)
if repo is None:
flask.abort(404)
return flask.render_template(
'manage/repo.html', repo=repo, Repository=Repository
)
def has_good_webhook(gh_api, repo):
"""Check webhook at GitHub for repo
:param gh_api: GitHub API client for communication
:type gh_api: ``repocribro.github.GitHubAPI``
:param repo: Repository which webhook should be checked
:type repo: ``repocribro.models.Repository``
:return: If webhook is already in good shape
:rtype: bool
.. todo:: move somewhere else, check registered events
"""
if repo.webhook_id is None:
return False
webhook = gh_api.webhook_get(repo.full_name, repo.webhook_id)
return webhook.is_ok
def update_webhook(gh_api, repo):
"""Update webhook at GitHub for repo if needed
:param gh_api: GitHub API client for communication
:type gh_api: ``repocribro.github.GitHubAPI``
:param repo: Repository which webhook should be updated
:type repo: ``repocribro.models.Repository``
:return: If webhook is now in good shape
:rtype: bool
.. todo:: move somewhere else
"""
if not has_good_webhook(gh_api, repo):
repo.webhook_id = None
if repo.webhook_id is None:
# Create new webhook
webhook = gh_api.webhook_create(
repo.full_name,
flask.url_for(gh_api.WEBHOOK_CONTROLLER, _external=True)
)
if webhook is None:
return False
repo.webhook_id = webhook['id']
return True
@manage.route('/repository/activate', methods=['POST'])
@flask_login.login_required
@permissions.actions.manage_repo_activate.require(403)
def repository_activate():
"""Activate repo in app from GitHub (POST handler)
.. todo:: protect from activating too often
"""
db = flask.current_app.container.get('db')
gh_api = flask.current_app.container.get(
'gh_api', token=flask.session['github_token']
)
full_name = flask.request.form.get('full_name')
visibility_type = flask.request.form.get('enable', type=int)
if visibility_type not in (
Repository.VISIBILITY_HIDDEN,
Repository.VISIBILITY_PRIVATE,
Repository.VISIBILITY_PUBLIC
):
flask.flash('You\'ve requested something weird...', 'error')
return flask.redirect(flask.url_for('manage.repositories'))
gh_repo = gh_api.get('/repos/' + full_name)
if not gh_repo.is_ok:
flask.flash('Repository not found at GitHub', 'error')
return flask.redirect(flask.url_for('manage.repositories'))
if not gh_repo.data['permissions']['admin']:
flask.flash('You are not admin of that repository', 'error')
return flask.redirect(flask.url_for('manage.repositories'))
user = flask_login.current_user.github_user
repo = db.session.query(Repository).filter_by(
full_name=full_name
).first()
is_personal_repo = gh_repo.data['owner']['id'] == user.github_id
if repo is None:
if is_personal_repo:
repo = Repository.create_from_dict(gh_repo.data, user)
else:
org_login = gh_repo.data['owner']['login']
org = db.session.query(Organization).filter_by(
login=org_login
).first()
if org is None:
gh_org = gh_api.get('/orgs/'+org_login)
org = Organization.create_from_dict(gh_org.data)
repo = Repository.create_from_dict(gh_repo.data, org)
repo.members.append(user)
db.session.add(org)
db.session.add(repo)
else:
if not is_personal_repo and user not in repo.members:
repo.members.append(user)
gh_repo_langs = gh_api.get('/repos/' + full_name + '/languages')
repo.update_from_dict(gh_repo.data)
if not gh_repo_langs.is_ok:
repo.update_languages(gh_repo_langs.data)
if not update_webhook(gh_api, repo):
flask.flash('We were unable to create webhook for that repository. '
'There is maybe some old one, please remove it and try '
'this procedure again.',
'warning')
repo.visibility_type = visibility_type
if repo.is_hidden:
repo.generate_secret()
db.session.commit()
return flask.redirect(
flask.url_for('manage.repository_detail', full_name=repo.full_name)
)
@manage.route('/repository/deactivate', methods=['POST'])
@flask_login.login_required
@permissions.actions.manage_repo_deactivate.require(403)
def repository_deactivate():
"""Deactivate repo in app from GitHub (POST handler)"""
db = flask.current_app.container.get('db')
gh_api = flask.current_app.container.get(
'gh_api', token=flask.session['github_token']
)
full_name = flask.request.form.get('full_name')
repo = get_repo_if_admin(db, full_name)
if repo is None:
flask.abort(404)
if repo.webhook_id is not None:
if gh_api.webhook_delete(repo.full_name, repo.webhook_id):
flask.flash('Webhook has been deactivated',
'success')
else:
flask.flash('GitHub couldn\'t delete the webhook. Please do it '
'manually within GitHub web application.',
'warning')
repo.webhook_id = None
db.session.commit()
else:
flask.flash('There is no registered the webhook within app', 'info')
return flask.redirect(
flask.url_for('manage.repository_detail', full_name=repo.full_name)
)
@manage.route('/repository/delete', methods=['POST'])
@flask_login.login_required
@permissions.actions.manage_repo_delete.require(403)
def repository_delete():
"""Delete repo (in app) from GitHub (POST handler)
.. todo:: consider deleting org repository if there are more members
"""
db = flask.current_app.container.get('db')
gh_api = flask.current_app.container.get(
'gh_api', token=flask.session['github_token']
)
full_name = flask.request.form.get('full_name')
repo = get_repo_if_admin(db, full_name)
if repo is None:
flask.abort(404)
if repo.webhook_id is not None:
if gh_api.webhook_delete(repo.full_name, repo.webhook_id):
flask.flash('Webhook has been deactivated',
'success')
else:
flask.flash('GitHub couldn\'t delete the webhook. Please do it '
'manually within GitHub web application.',
'warning')
repo.webhook_id = None
db.session.commit()
db.session.delete(repo)
db.session.commit()
flask.flash('Repository {} has been deleted within app.'.format(full_name),
'success')
return flask.redirect(flask.url_for('manage.repositories'))
@manage.route('/repository/update', methods=['POST'])
@flask_login.login_required
@permissions.actions.manage_repo_update.require(403)
def repository_update():
"""Update repo info from GitHub (POST handler)
.. todo:: protect from updating too often
"""
db = flask.current_app.container.get('db')
gh_api = flask.current_app.container.get(
'gh_api', token=flask.session['github_token']
)
full_name = flask.request.form.get('full_name')
repo = get_repo_if_admin(db, full_name)
if repo is None:
flask.abort(404)
gh_repo = gh_api.get('/repos/' + full_name)
if gh_repo.is_ok:
repo.update_from_dict(gh_repo.data)
gh_repo_langs = gh_api.get('/repos/' + full_name + '/languages')
if gh_repo_langs.is_ok:
repo.update_languages(gh_repo_langs.data)
db.session.commit()
else:
flask.flash('GitHub doesn\'t know about this repository. '
'Try it later or remove repository from app.',
'error')
return flask.redirect(
flask.url_for('manage.repository_detail', full_name=repo.full_name)
)
@manage.route('/organizations')
@flask_login.login_required
@permissions.actions.manage_orgs.require(403)
def organizations():
"""List user organizations from GitHub (GET handler)"""
page = int(flask.request.args.get('page', 0))
gh_api = flask.current_app.container.get(
'gh_api', token=flask.session['github_token']
)
gh_orgs = gh_api.get('/user/orgs', page=page)
orgs_link = gh_api.app_connections_link
return flask.render_template(
'manage/orgs.html', orgs=gh_orgs.data,
actual_page=gh_orgs.actual_page, total_pages=gh_orgs.total_pages,
orgs_link=orgs_link,
)
@manage.route('/organization/<login>')
@flask_login.login_required
@permissions.actions.manage_org.require(403)
def organization(login):
"""List organization repositories for activation
.. :todo: register organization in repocribro
.. :todo: own profile page of organization
"""
ORG_REPOS_URL = '/orgs/{}/repos?type=member'
page = int(flask.request.args.get('page', 0))
gh_api = flask.current_app.container.get(
'gh_api', token=flask.session['github_token']
)
gh_repos = gh_api.get(ORG_REPOS_URL.format(login), page=page)
user = flask_login.current_user.github_user
active_ids = [repo.github_id for repo in user.repositories]
return flask.render_template(
'manage/repos.html', repos=gh_repos.data,
actual_page=gh_repos.actual_page, total_pages=gh_repos.total_pages,
Repository=Repository, active_ids=active_ids,
repos_type=login+' (organization)'
)
@manage.route('/organization/<login>/update')
@flask_login.login_required
@permissions.actions.manage_org_update.require(403)
def organization_update(login):
"""Update organization
.. :todo: update org profile
"""
ORG_REPOS_URL = '/orgs/{}/repos?type=member'
gh_api = flask.current_app.container.get(
'gh_api', token=flask.session['github_token']
)
db = flask.current_app.container.get('db')
org = db.session.query(Organization).filter_by(login=login).first()
if org is None:
flask.abort(404)
gh_repos = gh_api.get(ORG_REPOS_URL.format(login))
if gh_repos.is_ok and len(gh_repos.data) > 0:
gh_org = gh_api.get('/orgs/'+login)
if gh_org.is_ok:
org.update_from_dict(gh_org.data)
db.session.commit()
else:
flask.flash('GitHub doesn\'t know about this organization.',
'error')
else:
flask.flash('You cannot update organizations where you are not '
'a member of single repository.',
'error')
return flask.redirect(
flask.url_for('manage.organizations')
)
@manage.route('/organization/<login>/delete')
@flask_login.login_required
@permissions.actions.manage_org_delete.require(403)
def organization_delete(login):
"""Delete organization (if no repositories)
.. :todo: delete org profile
"""
return flask.abort(501)
|
|
# Copyright 2009-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the database module."""
import datetime
import re
import sys
import warnings
sys.path[0:0] = [""]
from bson.code import Code
from bson.codec_options import CodecOptions
from bson.int64 import Int64
from bson.regex import Regex
from bson.dbref import DBRef
from bson.objectid import ObjectId
from bson.py3compat import u, string_type, text_type, PY3
from bson.son import SON
from pymongo import (MongoClient,
ALL,
auth,
OFF,
SLOW_ONLY,
helpers)
from pymongo.collection import Collection
from pymongo.database import Database
from pymongo.errors import (CollectionInvalid,
ConfigurationError,
ExecutionTimeout,
InvalidName,
OperationFailure)
from pymongo.read_preferences import ReadPreference
from pymongo.write_concern import WriteConcern
from test import (client_context,
SkipTest,
unittest,
host,
port,
IntegrationTest)
from test.utils import (ignore_deprecations,
remove_all_users,
rs_or_single_client_noauth,
rs_or_single_client,
server_started_with_auth)
if PY3:
long = int
class TestDatabaseNoConnect(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.client = MongoClient(host, port, connect=False)
def test_name(self):
self.assertRaises(TypeError, Database, self.client, 4)
self.assertRaises(InvalidName, Database, self.client, "my db")
self.assertRaises(InvalidName, Database, self.client, "my\x00db")
self.assertRaises(InvalidName, Database,
self.client, u("my\u0000db"))
self.assertEqual("name", Database(self.client, "name").name)
def test_equality(self):
self.assertNotEqual(Database(self.client, "test"),
Database(self.client, "mike"))
self.assertEqual(Database(self.client, "test"),
Database(self.client, "test"))
# Explicitly test inequality
self.assertFalse(Database(self.client, "test") !=
Database(self.client, "test"))
def test_get_coll(self):
db = Database(self.client, "pymongo_test")
self.assertEqual(db.test, db["test"])
self.assertEqual(db.test, Collection(db, "test"))
self.assertNotEqual(db.test, Collection(db, "mike"))
self.assertEqual(db.test.mike, db["test.mike"])
def test_get_collection(self):
codec_options = CodecOptions(tz_aware=True)
write_concern = WriteConcern(w=2, j=True)
coll = self.client.pymongo_test.get_collection(
'foo', codec_options, ReadPreference.SECONDARY, write_concern)
self.assertEqual('foo', coll.name)
self.assertEqual(codec_options, coll.codec_options)
self.assertEqual(ReadPreference.SECONDARY, coll.read_preference)
self.assertEqual(write_concern, coll.write_concern)
def test_getattr(self):
db = self.client.pymongo_test
self.assertTrue(isinstance(db['_does_not_exist'], Collection))
with self.assertRaises(AttributeError) as context:
db._does_not_exist
# Message should be: "AttributeError: Database has no attribute
# '_does_not_exist'. To access the _does_not_exist collection,
# use database['_does_not_exist']".
self.assertIn("has no attribute '_does_not_exist'",
str(context.exception))
def test_iteration(self):
self.assertRaises(TypeError, next, self.client.pymongo_test)
class TestDatabase(IntegrationTest):
def test_repr(self):
self.assertEqual(repr(Database(self.client, "pymongo_test")),
"Database(%r, %s)" % (self.client,
repr(u("pymongo_test"))))
def test_create_collection(self):
db = Database(self.client, "pymongo_test")
db.test.insert_one({"hello": "world"})
self.assertRaises(CollectionInvalid, db.create_collection, "test")
db.drop_collection("test")
self.assertRaises(TypeError, db.create_collection, 5)
self.assertRaises(TypeError, db.create_collection, None)
self.assertRaises(InvalidName, db.create_collection, "coll..ection")
test = db.create_collection("test")
self.assertTrue(u("test") in db.collection_names())
test.insert_one({"hello": u("world")})
self.assertEqual(db.test.find_one()["hello"], "world")
db.drop_collection("test.foo")
db.create_collection("test.foo")
self.assertTrue(u("test.foo") in db.collection_names())
self.assertRaises(CollectionInvalid, db.create_collection, "test.foo")
def test_collection_names(self):
db = Database(self.client, "pymongo_test")
db.test.insert_one({"dummy": u("object")})
db.test.mike.insert_one({"dummy": u("object")})
colls = db.collection_names()
self.assertTrue("test" in colls)
self.assertTrue("test.mike" in colls)
for coll in colls:
self.assertTrue("$" not in coll)
colls_without_systems = db.collection_names(False)
for coll in colls_without_systems:
self.assertTrue(not coll.startswith("system."))
def test_drop_collection(self):
db = Database(self.client, "pymongo_test")
self.assertRaises(TypeError, db.drop_collection, 5)
self.assertRaises(TypeError, db.drop_collection, None)
db.test.insert_one({"dummy": u("object")})
self.assertTrue("test" in db.collection_names())
db.drop_collection("test")
self.assertFalse("test" in db.collection_names())
db.test.insert_one({"dummy": u("object")})
self.assertTrue("test" in db.collection_names())
db.drop_collection(u("test"))
self.assertFalse("test" in db.collection_names())
db.test.insert_one({"dummy": u("object")})
self.assertTrue("test" in db.collection_names())
db.drop_collection(db.test)
self.assertFalse("test" in db.collection_names())
db.test.insert_one({"dummy": u("object")})
self.assertTrue("test" in db.collection_names())
db.test.drop()
self.assertFalse("test" in db.collection_names())
db.test.drop()
db.drop_collection(db.test.doesnotexist)
def test_validate_collection(self):
db = self.client.pymongo_test
self.assertRaises(TypeError, db.validate_collection, 5)
self.assertRaises(TypeError, db.validate_collection, None)
db.test.insert_one({"dummy": u("object")})
self.assertRaises(OperationFailure, db.validate_collection,
"test.doesnotexist")
self.assertRaises(OperationFailure, db.validate_collection,
db.test.doesnotexist)
self.assertTrue(db.validate_collection("test"))
self.assertTrue(db.validate_collection(db.test))
self.assertTrue(db.validate_collection(db.test, full=True))
self.assertTrue(db.validate_collection(db.test, scandata=True))
self.assertTrue(db.validate_collection(db.test, scandata=True, full=True))
self.assertTrue(db.validate_collection(db.test, True, True))
@client_context.require_no_mongos
def test_profiling_levels(self):
db = self.client.pymongo_test
self.assertEqual(db.profiling_level(), OFF) # default
self.assertRaises(ValueError, db.set_profiling_level, 5.5)
self.assertRaises(ValueError, db.set_profiling_level, None)
self.assertRaises(ValueError, db.set_profiling_level, -1)
self.assertRaises(TypeError, db.set_profiling_level, SLOW_ONLY, 5.5)
self.assertRaises(TypeError, db.set_profiling_level, SLOW_ONLY, '1')
db.set_profiling_level(SLOW_ONLY)
self.assertEqual(db.profiling_level(), SLOW_ONLY)
db.set_profiling_level(ALL)
self.assertEqual(db.profiling_level(), ALL)
db.set_profiling_level(OFF)
self.assertEqual(db.profiling_level(), OFF)
db.set_profiling_level(SLOW_ONLY, 50)
self.assertEqual(50, db.command("profile", -1)['slowms'])
db.set_profiling_level(ALL, -1)
self.assertEqual(-1, db.command("profile", -1)['slowms'])
db.set_profiling_level(OFF, 100) # back to default
self.assertEqual(100, db.command("profile", -1)['slowms'])
@client_context.require_no_mongos
def test_profiling_info(self):
db = self.client.pymongo_test
db.set_profiling_level(ALL)
db.test.find_one()
db.set_profiling_level(OFF)
info = db.profiling_info()
self.assertTrue(isinstance(info, list))
# Check if we're going to fail because of SERVER-4754, in which
# profiling info isn't collected if mongod was started with --auth
if server_started_with_auth(self.client):
raise SkipTest(
"We need SERVER-4754 fixed for the rest of this test to pass"
)
self.assertTrue(len(info) >= 1)
# These basically clue us in to server changes.
self.assertTrue(isinstance(info[0]['responseLength'], int))
self.assertTrue(isinstance(info[0]['millis'], int))
self.assertTrue(isinstance(info[0]['client'], string_type))
self.assertTrue(isinstance(info[0]['user'], string_type))
self.assertTrue(isinstance(info[0]['ns'], string_type))
self.assertTrue(isinstance(info[0]['op'], string_type))
self.assertTrue(isinstance(info[0]["ts"], datetime.datetime))
@client_context.require_no_mongos
def test_errors(self):
with ignore_deprecations():
# We must call getlasterror, etc. on same socket as last operation.
db = rs_or_single_client(maxPoolSize=1).pymongo_test
db.reset_error_history()
self.assertEqual(None, db.error())
self.assertEqual(None, db.previous_error())
db.command("forceerror", check=False)
self.assertTrue(db.error())
self.assertTrue(db.previous_error())
db.command("forceerror", check=False)
self.assertTrue(db.error())
prev_error = db.previous_error()
self.assertEqual(prev_error["nPrev"], 1)
del prev_error["nPrev"]
prev_error.pop("lastOp", None)
error = db.error()
error.pop("lastOp", None)
# getLastError includes "connectionId" in recent
# server versions, getPrevError does not.
error.pop("connectionId", None)
self.assertEqual(error, prev_error)
db.test.find_one()
self.assertEqual(None, db.error())
self.assertTrue(db.previous_error())
self.assertEqual(db.previous_error()["nPrev"], 2)
db.reset_error_history()
self.assertEqual(None, db.error())
self.assertEqual(None, db.previous_error())
def test_command(self):
db = self.client.admin
self.assertEqual(db.command("buildinfo"), db.command({"buildinfo": 1}))
# We use 'aggregate' as our example command, since it's an easy way to
# retrieve a BSON regex from a collection using a command. But until
# MongoDB 2.3.2, aggregation turned regexes into strings: SERVER-6470.
@client_context.require_version_min(2, 3, 2)
def test_command_with_regex(self):
db = self.client.pymongo_test
db.test.drop()
db.test.insert_one({'r': re.compile('.*')})
db.test.insert_one({'r': Regex('.*')})
result = db.command('aggregate', 'test', pipeline=[])
for doc in result['result']:
self.assertTrue(isinstance(doc['r'], Regex))
def test_password_digest(self):
self.assertRaises(TypeError, auth._password_digest, 5)
self.assertRaises(TypeError, auth._password_digest, True)
self.assertRaises(TypeError, auth._password_digest, None)
self.assertTrue(isinstance(auth._password_digest("mike", "password"),
text_type))
self.assertEqual(auth._password_digest("mike", "password"),
u("cd7e45b3b2767dc2fa9b6b548457ed00"))
self.assertEqual(auth._password_digest("mike", "password"),
auth._password_digest(u("mike"), u("password")))
self.assertEqual(auth._password_digest("Gustave", u("Dor\xe9")),
u("81e0e2364499209f466e75926a162d73"))
@client_context.require_auth
def test_authenticate_add_remove_user(self):
# "self.client" is logged in as root.
auth_db = self.client.pymongo_test
db = rs_or_single_client_noauth().pymongo_test
# Configuration errors
self.assertRaises(ValueError, auth_db.add_user, "user", '')
self.assertRaises(TypeError, auth_db.add_user, "user", 'password', 15)
self.assertRaises(TypeError, auth_db.add_user,
"user", 'password', 'True')
self.assertRaises(ConfigurationError, auth_db.add_user,
"user", 'password', True, roles=['read'])
if client_context.version.at_least(2, 5, 3, -1):
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
self.assertRaises(DeprecationWarning, auth_db.add_user,
"user", "password")
self.assertRaises(DeprecationWarning, auth_db.add_user,
"user", "password", True)
with ignore_deprecations():
self.assertRaises(ConfigurationError, auth_db.add_user,
"user", "password", digestPassword=True)
# Add / authenticate / remove
auth_db.add_user("mike", "password", roles=["dbOwner"])
self.addCleanup(remove_all_users, auth_db)
self.assertRaises(TypeError, db.authenticate, 5, "password")
self.assertRaises(TypeError, db.authenticate, "mike", 5)
self.assertRaises(OperationFailure,
db.authenticate, "mike", "not a real password")
self.assertRaises(OperationFailure,
db.authenticate, "faker", "password")
db.authenticate("mike", "password")
db.logout()
# Unicode name and password.
db.authenticate(u("mike"), u("password"))
db.logout()
auth_db.remove_user("mike")
self.assertRaises(OperationFailure,
db.authenticate, "mike", "password")
# Add / authenticate / change password
self.assertRaises(OperationFailure,
db.authenticate, "Gustave", u("Dor\xe9"))
auth_db.add_user("Gustave", u("Dor\xe9"), roles=["dbOwner"])
db.authenticate("Gustave", u("Dor\xe9"))
# Change password.
auth_db.add_user("Gustave", "password", roles=["dbOwner"])
db.logout()
self.assertRaises(OperationFailure,
db.authenticate, "Gustave", u("Dor\xe9"))
self.assertTrue(db.authenticate("Gustave", u("password")))
if not client_context.version.at_least(2, 5, 3, -1):
# Add a readOnly user
with ignore_deprecations():
auth_db.add_user("Ross", "password", read_only=True)
db.logout()
db.authenticate("Ross", u("password"))
self.assertTrue(
auth_db.system.users.find({"readOnly": True}).count())
@client_context.require_auth
def test_make_user_readonly(self):
# "self.client" is logged in as root.
auth_db = self.client.pymongo_test
db = rs_or_single_client_noauth().pymongo_test
# Make a read-write user.
auth_db.add_user('jesse', 'pw')
self.addCleanup(remove_all_users, auth_db)
# Check that we're read-write by default.
db.authenticate('jesse', 'pw')
db.collection.insert_one({})
db.logout()
# Make the user read-only.
auth_db.add_user('jesse', 'pw', read_only=True)
db.authenticate('jesse', 'pw')
self.assertRaises(OperationFailure, db.collection.insert_one, {})
@client_context.require_version_min(2, 5, 3, -1)
@client_context.require_auth
def test_default_roles(self):
# "self.client" is logged in as root.
auth_admin = self.client.admin
auth_admin.add_user('test_default_roles', 'pass')
self.addCleanup(auth_admin.remove_user, 'test_default_roles')
info = auth_admin.command(
'usersInfo', 'test_default_roles')['users'][0]
self.assertEqual("root", info['roles'][0]['role'])
# Read only "admin" user
auth_admin.add_user('ro-admin', 'pass', read_only=True)
self.addCleanup(auth_admin.remove_user, 'ro-admin')
info = auth_admin.command('usersInfo', 'ro-admin')['users'][0]
self.assertEqual("readAnyDatabase", info['roles'][0]['role'])
# "Non-admin" user
auth_db = self.client.pymongo_test
auth_db.add_user('user', 'pass')
self.addCleanup(remove_all_users, auth_db)
info = auth_db.command('usersInfo', 'user')['users'][0]
self.assertEqual("dbOwner", info['roles'][0]['role'])
# Read only "Non-admin" user
auth_db.add_user('ro-user', 'pass', read_only=True)
info = auth_db.command('usersInfo', 'ro-user')['users'][0]
self.assertEqual("read", info['roles'][0]['role'])
@client_context.require_version_min(2, 5, 3, -1)
@client_context.require_auth
def test_new_user_cmds(self):
# "self.client" is logged in as root.
auth_db = self.client.pymongo_test
auth_db.add_user("amalia", "password", roles=["userAdmin"])
self.addCleanup(auth_db.remove_user, "amalia")
db = rs_or_single_client_noauth().pymongo_test
db.authenticate("amalia", "password")
# This tests the ability to update user attributes.
db.add_user("amalia", "new_password",
customData={"secret": "koalas"})
user_info = db.command("usersInfo", "amalia")
self.assertTrue(user_info["users"])
amalia_user = user_info["users"][0]
self.assertEqual(amalia_user["user"], "amalia")
self.assertEqual(amalia_user["customData"], {"secret": "koalas"})
@client_context.require_auth
def test_authenticate_multiple(self):
# "self.client" is logged in as root.
self.client.drop_database("pymongo_test")
self.client.drop_database("pymongo_test1")
admin_db_auth = self.client.admin
users_db_auth = self.client.pymongo_test
# Non-root client.
client = rs_or_single_client_noauth()
admin_db = client.admin
users_db = client.pymongo_test
other_db = client.pymongo_test1
self.assertRaises(OperationFailure, users_db.test.find_one)
if client_context.version.at_least(2, 5, 3, -1):
admin_db_auth.add_user('ro-admin', 'pass',
roles=["userAdmin", "readAnyDatabase"])
else:
admin_db_auth.add_user('ro-admin', 'pass', read_only=True)
self.addCleanup(admin_db_auth.remove_user, 'ro-admin')
users_db_auth.add_user('user', 'pass',
roles=["userAdmin", "readWrite"])
self.addCleanup(remove_all_users, users_db_auth)
# Regular user should be able to query its own db, but
# no other.
users_db.authenticate('user', 'pass')
self.assertEqual(0, users_db.test.count())
self.assertRaises(OperationFailure, other_db.test.find_one)
# Admin read-only user should be able to query any db,
# but not write.
admin_db.authenticate('ro-admin', 'pass')
self.assertEqual(0, other_db.test.count())
self.assertRaises(OperationFailure,
other_db.test.insert_one, {})
# Close all sockets.
client.close()
# We should still be able to write to the regular user's db.
self.assertTrue(users_db.test.delete_many({}))
# And read from other dbs...
self.assertEqual(0, other_db.test.count())
# But still not write to other dbs.
self.assertRaises(OperationFailure,
other_db.test.insert_one, {})
def test_id_ordering(self):
# PyMongo attempts to have _id show up first
# when you iterate key/value pairs in a document.
# This isn't reliable since python dicts don't
# guarantee any particular order. This will never
# work right in Jython or any Python or environment
# with hash randomization enabled (e.g. tox).
db = self.client.pymongo_test
db.test.drop()
db.test.insert_one(SON([("hello", "world"),
("_id", 5)]))
db = self.client.get_database(
"pymongo_test", codec_options=CodecOptions(document_class=SON))
cursor = db.test.find()
for x in cursor:
for (k, v) in x.items():
self.assertEqual(k, "_id")
break
def test_deref(self):
db = self.client.pymongo_test
db.test.drop()
self.assertRaises(TypeError, db.dereference, 5)
self.assertRaises(TypeError, db.dereference, "hello")
self.assertRaises(TypeError, db.dereference, None)
self.assertEqual(None, db.dereference(DBRef("test", ObjectId())))
obj = {"x": True}
key = db.test.insert_one(obj).inserted_id
self.assertEqual(obj, db.dereference(DBRef("test", key)))
self.assertEqual(obj,
db.dereference(DBRef("test", key, "pymongo_test")))
self.assertRaises(ValueError,
db.dereference, DBRef("test", key, "foo"))
self.assertEqual(None, db.dereference(DBRef("test", 4)))
obj = {"_id": 4}
db.test.insert_one(obj)
self.assertEqual(obj, db.dereference(DBRef("test", 4)))
def test_deref_kwargs(self):
db = self.client.pymongo_test
db.test.drop()
db.test.insert_one({"_id": 4, "foo": "bar"})
db = self.client.get_database(
"pymongo_test", codec_options=CodecOptions(document_class=SON))
self.assertEqual(SON([("foo", "bar")]),
db.dereference(DBRef("test", 4),
projection={"_id": False}))
@client_context.require_no_auth
def test_eval(self):
db = self.client.pymongo_test
db.test.drop()
self.assertRaises(TypeError, db.eval, None)
self.assertRaises(TypeError, db.eval, 5)
self.assertRaises(TypeError, db.eval, [])
self.assertEqual(3, db.eval("function (x) {return x;}", 3))
self.assertEqual(3, db.eval(u("function (x) {return x;}"), 3))
self.assertEqual(None,
db.eval("function (x) {db.test.save({y:x});}", 5))
self.assertEqual(db.test.find_one()["y"], 5)
self.assertEqual(5, db.eval("function (x, y) {return x + y;}", 2, 3))
self.assertEqual(5, db.eval("function () {return 5;}"))
self.assertEqual(5, db.eval("2 + 3;"))
self.assertEqual(5, db.eval(Code("2 + 3;")))
self.assertRaises(OperationFailure, db.eval, Code("return i;"))
self.assertEqual(2, db.eval(Code("return i;", {"i": 2})))
self.assertEqual(5, db.eval(Code("i + 3;", {"i": 2})))
self.assertRaises(OperationFailure, db.eval, "5 ++ 5;")
# TODO some of these tests belong in the collection level testing.
def test_insert_find_one(self):
db = self.client.pymongo_test
db.test.drop()
a_doc = SON({"hello": u("world")})
a_key = db.test.insert_one(a_doc).inserted_id
self.assertTrue(isinstance(a_doc["_id"], ObjectId))
self.assertEqual(a_doc["_id"], a_key)
self.assertEqual(a_doc, db.test.find_one({"_id": a_doc["_id"]}))
self.assertEqual(a_doc, db.test.find_one(a_key))
self.assertEqual(None, db.test.find_one(ObjectId()))
self.assertEqual(a_doc, db.test.find_one({"hello": u("world")}))
self.assertEqual(None, db.test.find_one({"hello": u("test")}))
b = db.test.find_one()
b["hello"] = u("mike")
db.test.replace_one({"_id": b["_id"]}, b)
self.assertNotEqual(a_doc, db.test.find_one(a_key))
self.assertEqual(b, db.test.find_one(a_key))
self.assertEqual(b, db.test.find_one())
count = 0
for _ in db.test.find():
count += 1
self.assertEqual(count, 1)
def test_long(self):
db = self.client.pymongo_test
db.test.drop()
db.test.insert_one({"x": long(9223372036854775807)})
retrieved = db.test.find_one()['x']
self.assertEqual(Int64(9223372036854775807), retrieved)
self.assertIsInstance(retrieved, Int64)
db.test.delete_many({})
db.test.insert_one({"x": Int64(1)})
retrieved = db.test.find_one()['x']
self.assertEqual(Int64(1), retrieved)
self.assertIsInstance(retrieved, Int64)
def test_delete(self):
db = self.client.pymongo_test
db.test.drop()
db.test.insert_one({"x": 1})
db.test.insert_one({"x": 2})
db.test.insert_one({"x": 3})
length = 0
for _ in db.test.find():
length += 1
self.assertEqual(length, 3)
db.test.delete_one({"x": 1})
length = 0
for _ in db.test.find():
length += 1
self.assertEqual(length, 2)
db.test.delete_one(db.test.find_one())
db.test.delete_one(db.test.find_one())
self.assertEqual(db.test.find_one(), None)
db.test.insert_one({"x": 1})
db.test.insert_one({"x": 2})
db.test.insert_one({"x": 3})
self.assertTrue(db.test.find_one({"x": 2}))
db.test.delete_one({"x": 2})
self.assertFalse(db.test.find_one({"x": 2}))
self.assertTrue(db.test.find_one())
db.test.delete_many({})
self.assertFalse(db.test.find_one())
@client_context.require_no_auth
def test_system_js(self):
db = self.client.pymongo_test
db.system.js.delete_many({})
self.assertEqual(0, db.system.js.count())
db.system_js.add = "function(a, b) { return a + b; }"
self.assertEqual('add', db.system.js.find_one()['_id'])
self.assertEqual(1, db.system.js.count())
self.assertEqual(6, db.system_js.add(1, 5))
del db.system_js.add
self.assertEqual(0, db.system.js.count())
db.system_js['add'] = "function(a, b) { return a + b; }"
self.assertEqual('add', db.system.js.find_one()['_id'])
self.assertEqual(1, db.system.js.count())
self.assertEqual(6, db.system_js['add'](1, 5))
del db.system_js['add']
self.assertEqual(0, db.system.js.count())
self.assertRaises(OperationFailure, db.system_js.add, 1, 5)
# TODO right now CodeWScope doesn't work w/ system js
# db.system_js.scope = Code("return hello;", {"hello": 8})
# self.assertEqual(8, db.system_js.scope())
self.assertRaises(OperationFailure, db.system_js.non_existant)
# XXX: Broken in V8, works in SpiderMonkey
if not client_context.version.at_least(2, 3, 0):
db.system_js.no_param = Code("return 5;")
self.assertEqual(5, db.system_js.no_param())
def test_system_js_list(self):
db = self.client.pymongo_test
db.system.js.delete_many({})
self.assertEqual([], db.system_js.list())
db.system_js.foo = "function() { return 'blah'; }"
self.assertEqual(["foo"], db.system_js.list())
db.system_js.bar = "function() { return 'baz'; }"
self.assertEqual(set(["foo", "bar"]), set(db.system_js.list()))
del db.system_js.foo
self.assertEqual(["bar"], db.system_js.list())
def test_command_response_without_ok(self):
# Sometimes (SERVER-10891) the server's response to a badly-formatted
# command document will have no 'ok' field. We should raise
# OperationFailure instead of KeyError.
self.assertRaises(OperationFailure,
helpers._check_command_response, {})
try:
helpers._check_command_response({'$err': 'foo'})
except OperationFailure as e:
self.assertEqual(e.args[0], 'foo')
else:
self.fail("_check_command_response didn't raise OperationFailure")
def test_mongos_response(self):
error_document = {
'ok': 0,
'errmsg': 'outer',
'raw': {'shard0/host0,host1': {'ok': 0, 'errmsg': 'inner'}}}
with self.assertRaises(OperationFailure) as context:
helpers._check_command_response(error_document)
self.assertEqual('inner', str(context.exception))
# If a shard has no primary and you run a command like dbstats, which
# cannot be run on a secondary, mongos's response includes empty "raw"
# errors. See SERVER-15428.
error_document = {
'ok': 0,
'errmsg': 'outer',
'raw': {'shard0/host0,host1': {}}}
with self.assertRaises(OperationFailure) as context:
helpers._check_command_response(error_document)
self.assertEqual('outer', str(context.exception))
# Raw error has ok: 0 but no errmsg. Not a known case, but test it.
error_document = {
'ok': 0,
'errmsg': 'outer',
'raw': {'shard0/host0,host1': {'ok': 0}}}
with self.assertRaises(OperationFailure) as context:
helpers._check_command_response(error_document)
self.assertEqual('outer', str(context.exception))
@client_context.require_version_min(2, 5, 3, -1)
@client_context.require_test_commands
def test_command_max_time_ms(self):
self.client.admin.command("configureFailPoint",
"maxTimeAlwaysTimeOut",
mode="alwaysOn")
try:
db = self.client.pymongo_test
db.command('count', 'test')
self.assertRaises(ExecutionTimeout, db.command,
'count', 'test', maxTimeMS=1)
pipeline = [{'$project': {'name': 1, 'count': 1}}]
# Database command helper.
db.command('aggregate', 'test', pipeline=pipeline)
self.assertRaises(ExecutionTimeout, db.command,
'aggregate', 'test',
pipeline=pipeline, maxTimeMS=1)
# Collection helper.
db.test.aggregate(pipeline=pipeline)
self.assertRaises(ExecutionTimeout,
db.test.aggregate, pipeline, maxTimeMS=1)
finally:
self.client.admin.command("configureFailPoint",
"maxTimeAlwaysTimeOut",
mode="off")
if __name__ == "__main__":
unittest.main()
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Utils related to keras metrics."""
from enum import Enum
import functools
import weakref
from keras import backend
from keras.utils import losses_utils
from keras.utils import tf_utils
from keras.utils.generic_utils import to_list
import numpy as np
import tensorflow.compat.v2 as tf
NEG_INF = -1e10
class Reduction(Enum):
"""Types of metrics reduction.
Contains the following values:
* `SUM`: Scalar sum of weighted values.
* `SUM_OVER_BATCH_SIZE`: Scalar sum of weighted values divided by
number of elements.
* `WEIGHTED_MEAN`: Scalar sum of weighted values divided by sum of weights.
"""
SUM = 'sum'
SUM_OVER_BATCH_SIZE = 'sum_over_batch_size'
WEIGHTED_MEAN = 'weighted_mean'
def update_state_wrapper(update_state_fn):
"""Decorator to wrap metric `update_state()` with `add_update()`.
Args:
update_state_fn: function that accumulates metric statistics.
Returns:
Decorated function that wraps `update_state_fn()` with `add_update()`.
"""
def decorated(metric_obj, *args, **kwargs):
"""Decorated function with `add_update()`."""
strategy = tf.distribute.get_strategy()
for weight in metric_obj.weights:
if (backend.is_tpu_strategy(strategy) and
not strategy.extended.variable_created_in_scope(weight)
and not tf.distribute.in_cross_replica_context()):
raise ValueError(
'Trying to run metric.update_state in replica context when '
'the metric was not created in TPUStrategy scope. '
'Make sure the keras Metric is created in TPUstrategy scope. ')
with tf_utils.graph_context_for_symbolic_tensors(*args, **kwargs):
update_op = update_state_fn(*args, **kwargs)
if update_op is not None: # update_op will be None in eager execution.
metric_obj.add_update(update_op)
return update_op
return tf.__internal__.decorator.make_decorator(update_state_fn, decorated)
def result_wrapper(result_fn):
"""Decorator to wrap metric `result()` function in `merge_call()`.
Result computation is an idempotent operation that simply calculates the
metric value using the state variables.
If metric state variables are distributed across replicas/devices and
`result()` is requested from the context of one device - This function wraps
`result()` in a distribution strategy `merge_call()`. With this,
the metric state variables will be aggregated across devices.
Args:
result_fn: function that computes the metric result.
Returns:
Decorated function that wraps `result_fn()` in distribution strategy
`merge_call()`.
"""
def decorated(metric_obj, *args):
"""Decorated function with merge_call."""
replica_context = tf.distribute.get_replica_context()
# The purpose of using `merge_call` to call `result()` is to trigger cross
# replica aggregation of metric state variables (SyncOnReadVariable). After
# we introduced `variable_sync_on_read_context`, in principle there is no
# need to use `merge_call` here. However the branch still exists because:
#
# 1. Keras V1 training code sometimes assumes `result_t` is the same tensor
# across replicas (achieved by `merge_call`). With
# `variable_sync_on_read_context` each replica gets their own tensors
# residing on replica's device, thus breaking the assumption.
# 2. Keras c/fit creates a tf.function (a.k.a, train_function) that returns
# the metric values of the first replica. With
# `variable_sync_on_read_context` since each replica gets their own
# tensors, the metric result tensors on the non-first replicas are not in
# the return value of train_function, making TF graph optimizer prune the
# branch that computes and aggregates those metric results. As a result,
# if NCCL is used to do the aggregation, the program will hang because
# NCCL ops are only launched on the non-pruned first replica.
#
# We condition on strategy_supports_no_merge_call() since we know if it is
# True, the program uses `jit_compile` to compile replica fn, meaning it is
# not V1 training (hence #1 is okay), and no pruning will happen as
# compiled functions are not inlined (hence #2 is okay).
if (replica_context is None or
tf.__internal__.distribute.strategy_supports_no_merge_call()):
with tf.__internal__.distribute.variable_sync_on_read_context():
raw_result = result_fn(*args)
# Results need to be wrapped in a `tf.identity` op to ensure
# correct execution order.
if isinstance(raw_result,
(tf.Tensor, tf.Variable, float, int)):
result_t = tf.identity(raw_result)
elif isinstance(raw_result, dict):
result_t = {
key: tf.identity(value)
for key, value in raw_result.items()
}
else:
try:
result_t = tf.identity(raw_result)
except (ValueError, TypeError):
raise RuntimeError(
'The output of `metric.result()` can only be a single '
'Tensor/Variable, or a dict of Tensors/Variables. '
f'For metric {metric_obj.name}, got result {raw_result}.')
else:
# TODO(psv): Test distribution of metrics using different distribution
# strategies.
# Creating a wrapper for merge_fn. merge_call invokes the given merge_fn
# with distribution object as the first parameter. We create a wrapper
# here so that the result function need not have that parameter.
def merge_fn_wrapper(distribution, merge_fn, *args):
# We will get `PerReplica` merge function. Taking the first one as all
# are identical copies of the function that we had passed below.
result = distribution.experimental_local_results(merge_fn)[0](*args)
# Wrapping result in identity so that control dependency between
# update_op from `update_state` and result works in case result returns
# a tensor.
return tf.identity(result)
# Wrapping result in merge_call. merge_call is used when we want to leave
# replica mode and compute a value in cross replica mode.
result_t = replica_context.merge_call(
merge_fn_wrapper, args=(result_fn,) + args)
# We are saving the result op here to be used in train/test execution
# functions. This basically gives the result op that was generated with a
# control dep to the updates for these workflows.
metric_obj._call_result = result_t
return result_t
return tf.__internal__.decorator.make_decorator(result_fn, decorated)
def weakmethod(method):
"""Creates a weak reference to the bound method."""
cls = method.im_class
func = method.im_func
instance_ref = weakref.ref(method.im_self)
@functools.wraps(method)
def inner(*args, **kwargs):
return func.__get__(instance_ref(), cls)(*args, **kwargs)
del method
return inner
def assert_thresholds_range(thresholds):
if thresholds is not None:
invalid_thresholds = [t for t in thresholds if t is None or t < 0 or t > 1]
if invalid_thresholds:
raise ValueError(
f'Threshold values must be in [0, 1]. Received: {invalid_thresholds}')
def parse_init_thresholds(thresholds, default_threshold=0.5):
if thresholds is not None:
assert_thresholds_range(to_list(thresholds))
thresholds = to_list(default_threshold if thresholds is None else thresholds)
return thresholds
class ConfusionMatrix(Enum):
TRUE_POSITIVES = 'tp'
FALSE_POSITIVES = 'fp'
TRUE_NEGATIVES = 'tn'
FALSE_NEGATIVES = 'fn'
class AUCCurve(Enum):
"""Type of AUC Curve (ROC or PR)."""
ROC = 'ROC'
PR = 'PR'
@staticmethod
def from_str(key):
if key in ('pr', 'PR'):
return AUCCurve.PR
elif key in ('roc', 'ROC'):
return AUCCurve.ROC
else:
raise ValueError(
f'Invalid AUC curve value: "{key}". '
'Expected values are ["PR", "ROC"]')
class AUCSummationMethod(Enum):
"""Type of AUC summation method.
https://en.wikipedia.org/wiki/Riemann_sum)
Contains the following values:
* 'interpolation': Applies mid-point summation scheme for `ROC` curve. For
`PR` curve, interpolates (true/false) positives but not the ratio that is
precision (see Davis & Goadrich 2006 for details).
* 'minoring': Applies left summation for increasing intervals and right
summation for decreasing intervals.
* 'majoring': Applies right summation for increasing intervals and left
summation for decreasing intervals.
"""
INTERPOLATION = 'interpolation'
MAJORING = 'majoring'
MINORING = 'minoring'
@staticmethod
def from_str(key):
if key in ('interpolation', 'Interpolation'):
return AUCSummationMethod.INTERPOLATION
elif key in ('majoring', 'Majoring'):
return AUCSummationMethod.MAJORING
elif key in ('minoring', 'Minoring'):
return AUCSummationMethod.MINORING
else:
raise ValueError(
f'Invalid AUC summation method value: "{key}". '
'Expected values are ["interpolation", "majoring", "minoring"]')
def _update_confusion_matrix_variables_optimized(
variables_to_update,
y_true,
y_pred,
thresholds,
multi_label=False,
sample_weights=None,
label_weights=None,
thresholds_with_epsilon=False):
"""Update confusion matrix variables with memory efficient alternative.
Note that the thresholds need to be evenly distributed within the list, eg,
the diff between consecutive elements are the same.
To compute TP/FP/TN/FN, we are measuring a binary classifier
C(t) = (predictions >= t)
at each threshold 't'. So we have
TP(t) = sum( C(t) * true_labels )
FP(t) = sum( C(t) * false_labels )
But, computing C(t) requires computation for each t. To make it fast,
observe that C(t) is a cumulative integral, and so if we have
thresholds = [t_0, ..., t_{n-1}]; t_0 < ... < t_{n-1}
where n = num_thresholds, and if we can compute the bucket function
B(i) = Sum( (predictions == t), t_i <= t < t{i+1} )
then we get
C(t_i) = sum( B(j), j >= i )
which is the reversed cumulative sum in tf.cumsum().
We can compute B(i) efficiently by taking advantage of the fact that
our thresholds are evenly distributed, in that
width = 1.0 / (num_thresholds - 1)
thresholds = [0.0, 1*width, 2*width, 3*width, ..., 1.0]
Given a prediction value p, we can map it to its bucket by
bucket_index(p) = floor( p * (num_thresholds - 1) )
so we can use tf.math.unsorted_segment_sum() to update the buckets in one
pass.
Consider following example:
y_true = [0, 0, 1, 1]
y_pred = [0.1, 0.5, 0.3, 0.9]
thresholds = [0.0, 0.5, 1.0]
num_buckets = 2 # [0.0, 1.0], (1.0, 2.0]
bucket_index(y_pred) = tf.math.floor(y_pred * num_buckets)
= tf.math.floor([0.2, 1.0, 0.6, 1.8])
= [0, 0, 0, 1]
# The meaning of this bucket is that if any of the label is true,
# then 1 will be added to the corresponding bucket with the index.
# Eg, if the label for 0.2 is true, then 1 will be added to bucket 0. If the
# label for 1.8 is true, then 1 will be added to bucket 1.
#
# Note the second item "1.0" is floored to 0, since the value need to be
# strictly larger than the bucket lower bound.
# In the implementation, we use tf.math.ceil() - 1 to achieve this.
tp_bucket_value = tf.math.unsorted_segment_sum(true_labels, bucket_indices,
num_segments=num_thresholds)
= [1, 1, 0]
# For [1, 1, 0] here, it means there is 1 true value contributed by bucket 0,
# and 1 value contributed by bucket 1. When we aggregate them to together,
# the result become [a + b + c, b + c, c], since large thresholds will always
# contribute to the value for smaller thresholds.
true_positive = tf.math.cumsum(tp_bucket_value, reverse=True)
= [2, 1, 0]
This implementation exhibits a run time and space complexity of O(T + N),
where T is the number of thresholds and N is the size of predictions.
Metrics that rely on standard implementation instead exhibit a complexity of
O(T * N).
Args:
variables_to_update: Dictionary with 'tp', 'fn', 'tn', 'fp' as valid keys
and corresponding variables to update as values.
y_true: A floating point `Tensor` whose shape matches `y_pred`. Will be cast
to `bool`.
y_pred: A floating point `Tensor` of arbitrary shape and whose values are in
the range `[0, 1]`.
thresholds: A sorted floating point `Tensor` with value in `[0, 1]`.
It need to be evenly distributed (the diff between each element need to be
the same).
multi_label: Optional boolean indicating whether multidimensional
prediction/labels should be treated as multilabel responses, or flattened
into a single label. When True, the valus of `variables_to_update` must
have a second dimension equal to the number of labels in y_true and
y_pred, and those tensors must not be RaggedTensors.
sample_weights: Optional `Tensor` whose rank is either 0, or the same rank
as `y_true`, and must be broadcastable to `y_true` (i.e., all dimensions
must be either `1`, or the same as the corresponding `y_true` dimension).
label_weights: Optional tensor of non-negative weights for multilabel
data. The weights are applied when calculating TP, FP, FN, and TN without
explicit multilabel handling (i.e. when the data is to be flattened).
thresholds_with_epsilon: Optional boolean indicating whether the leading and
tailing thresholds has any epsilon added for floating point imprecisions.
It will change how we handle the leading and tailing bucket.
Returns:
Update op.
"""
num_thresholds = thresholds.shape.as_list()[0]
if sample_weights is None:
sample_weights = 1.0
else:
sample_weights = tf.__internal__.ops.broadcast_weights(
tf.cast(sample_weights, dtype=y_pred.dtype), y_pred)
if not multi_label:
sample_weights = tf.reshape(sample_weights, [-1])
if label_weights is None:
label_weights = 1.0
else:
label_weights = tf.expand_dims(label_weights, 0)
label_weights = tf.__internal__.ops.broadcast_weights(label_weights,
y_pred)
if not multi_label:
label_weights = tf.reshape(label_weights, [-1])
weights = tf.multiply(sample_weights, label_weights)
# We shouldn't need this, but in case there are predict value that is out of
# the range of [0.0, 1.0]
y_pred = tf.clip_by_value(y_pred,
clip_value_min=0.0, clip_value_max=1.0)
y_true = tf.cast(tf.cast(y_true, tf.bool), y_true.dtype)
if not multi_label:
y_true = tf.reshape(y_true, [-1])
y_pred = tf.reshape(y_pred, [-1])
true_labels = tf.multiply(y_true, weights)
false_labels = tf.multiply((1.0 - y_true), weights)
# Compute the bucket indices for each prediction value.
# Since the predict value has to be strictly greater than the thresholds,
# eg, buckets like [0, 0.5], (0.5, 1], and 0.5 belongs to first bucket.
# We have to use math.ceil(val) - 1 for the bucket.
bucket_indices = tf.math.ceil(y_pred * (num_thresholds - 1)) - 1
if thresholds_with_epsilon:
# In this case, the first bucket should actually take into account since
# the any prediction between [0.0, 1.0] should be larger than the first
# threshold. We change the bucket value from -1 to 0.
bucket_indices = tf.nn.relu(bucket_indices)
bucket_indices = tf.cast(bucket_indices, tf.int32)
if multi_label:
# We need to run bucket segment sum for each of the label class. In the
# multi_label case, the rank of the label is 2. We first transpose it so
# that the label dim becomes the first and we can parallel run though them.
true_labels = tf.transpose(true_labels)
false_labels = tf.transpose(false_labels)
bucket_indices = tf.transpose(bucket_indices)
def gather_bucket(label_and_bucket_index):
label, bucket_index = label_and_bucket_index[0], label_and_bucket_index[1]
return tf.math.unsorted_segment_sum(
data=label, segment_ids=bucket_index, num_segments=num_thresholds)
tp_bucket_v = tf.vectorized_map(
gather_bucket, (true_labels, bucket_indices))
fp_bucket_v = tf.vectorized_map(
gather_bucket, (false_labels, bucket_indices))
tp = tf.transpose(
tf.cumsum(tp_bucket_v, reverse=True, axis=1))
fp = tf.transpose(
tf.cumsum(fp_bucket_v, reverse=True, axis=1))
else:
tp_bucket_v = tf.math.unsorted_segment_sum(
data=true_labels, segment_ids=bucket_indices,
num_segments=num_thresholds)
fp_bucket_v = tf.math.unsorted_segment_sum(
data=false_labels, segment_ids=bucket_indices,
num_segments=num_thresholds)
tp = tf.cumsum(tp_bucket_v, reverse=True)
fp = tf.cumsum(fp_bucket_v, reverse=True)
# fn = sum(true_labels) - tp
# tn = sum(false_labels) - fp
if (ConfusionMatrix.TRUE_NEGATIVES in variables_to_update or
ConfusionMatrix.FALSE_NEGATIVES in variables_to_update):
if multi_label:
total_true_labels = tf.reduce_sum(true_labels, axis=1)
total_false_labels = tf.reduce_sum(false_labels, axis=1)
else:
total_true_labels = tf.reduce_sum(true_labels)
total_false_labels = tf.reduce_sum(false_labels)
update_ops = []
if ConfusionMatrix.TRUE_POSITIVES in variables_to_update:
variable = variables_to_update[ConfusionMatrix.TRUE_POSITIVES]
update_ops.append(variable.assign_add(tp))
if ConfusionMatrix.FALSE_POSITIVES in variables_to_update:
variable = variables_to_update[ConfusionMatrix.FALSE_POSITIVES]
update_ops.append(variable.assign_add(fp))
if ConfusionMatrix.TRUE_NEGATIVES in variables_to_update:
variable = variables_to_update[ConfusionMatrix.TRUE_NEGATIVES]
tn = total_false_labels - fp
update_ops.append(variable.assign_add(tn))
if ConfusionMatrix.FALSE_NEGATIVES in variables_to_update:
variable = variables_to_update[ConfusionMatrix.FALSE_NEGATIVES]
fn = total_true_labels - tp
update_ops.append(variable.assign_add(fn))
return tf.group(update_ops)
def is_evenly_distributed_thresholds(thresholds):
"""Check if the thresholds list is evenly distributed.
We could leverage evenly distributed thresholds to use less memory when
calculate metrcis like AUC where each individual threshold need to be
evaluated.
Args:
thresholds: A python list or tuple, or 1D numpy array whose value is ranged
in [0, 1].
Returns:
boolean, whether the values in the inputs are evenly distributed.
"""
# Check the list value and see if it is evenly distributed.
num_thresholds = len(thresholds)
if num_thresholds < 3:
return False
even_thresholds = np.arange(num_thresholds,
dtype=np.float32) / (num_thresholds - 1)
return np.allclose(thresholds, even_thresholds, atol=backend.epsilon())
def update_confusion_matrix_variables(variables_to_update,
y_true,
y_pred,
thresholds,
top_k=None,
class_id=None,
sample_weight=None,
multi_label=False,
label_weights=None,
thresholds_distributed_evenly=False):
"""Returns op to update the given confusion matrix variables.
For every pair of values in y_true and y_pred:
true_positive: y_true == True and y_pred > thresholds
false_negatives: y_true == True and y_pred <= thresholds
true_negatives: y_true == False and y_pred <= thresholds
false_positive: y_true == False and y_pred > thresholds
The results will be weighted and added together. When multiple thresholds are
provided, we will repeat the same for every threshold.
For estimation of these metrics over a stream of data, the function creates an
`update_op` operation that updates the given variables.
If `sample_weight` is `None`, weights default to 1.
Use weights of 0 to mask values.
Args:
variables_to_update: Dictionary with 'tp', 'fn', 'tn', 'fp' as valid keys
and corresponding variables to update as values.
y_true: A `Tensor` whose shape matches `y_pred`. Will be cast to `bool`.
y_pred: A floating point `Tensor` of arbitrary shape and whose values are in
the range `[0, 1]`.
thresholds: A float value, float tensor, python list, or tuple of float
thresholds in `[0, 1]`, or NEG_INF (used when top_k is set).
top_k: Optional int, indicates that the positive labels should be limited to
the top k predictions.
class_id: Optional int, limits the prediction and labels to the class
specified by this argument.
sample_weight: Optional `Tensor` whose rank is either 0, or the same rank as
`y_true`, and must be broadcastable to `y_true` (i.e., all dimensions must
be either `1`, or the same as the corresponding `y_true` dimension).
multi_label: Optional boolean indicating whether multidimensional
prediction/labels should be treated as multilabel responses, or flattened
into a single label. When True, the valus of `variables_to_update` must
have a second dimension equal to the number of labels in y_true and
y_pred, and those tensors must not be RaggedTensors.
label_weights: (optional) tensor of non-negative weights for multilabel
data. The weights are applied when calculating TP, FP, FN, and TN without
explicit multilabel handling (i.e. when the data is to be flattened).
thresholds_distributed_evenly: Boolean, whether the thresholds are evenly
distributed within the list. An optimized method will be used if this is
the case. See _update_confusion_matrix_variables_optimized() for more
details.
Returns:
Update op.
Raises:
ValueError: If `y_pred` and `y_true` have mismatched shapes, or if
`sample_weight` is not `None` and its shape doesn't match `y_pred`, or if
`variables_to_update` contains invalid keys.
"""
if multi_label and label_weights is not None:
raise ValueError('`label_weights` for multilabel data should be handled '
'outside of `update_confusion_matrix_variables` when '
'`multi_label` is True.')
if variables_to_update is None:
return
if not any(
key for key in variables_to_update if key in list(ConfusionMatrix)):
raise ValueError(
'Please provide at least one valid confusion matrix '
'variable to update. Valid variable key options are: '
f'"{list(ConfusionMatrix)}". Received: "{variables_to_update.keys()}"')
variable_dtype = list(variables_to_update.values())[0].dtype
y_true = tf.cast(y_true, dtype=variable_dtype)
y_pred = tf.cast(y_pred, dtype=variable_dtype)
if thresholds_distributed_evenly:
# Check whether the thresholds has any leading or tailing epsilon added
# for floating point imprecision. The leading and tailing threshold will be
# handled bit differently as the corner case.
# At this point, thresholds should be a list/array with more than 2 items,
# and ranged between [0, 1]. See is_evenly_distributed_thresholds() for more
# details.
thresholds_with_epsilon = thresholds[0] < 0.0 or thresholds[-1] > 1.0
thresholds = tf.convert_to_tensor(
thresholds, dtype=variable_dtype)
num_thresholds = thresholds.shape.as_list()[0]
if multi_label:
one_thresh = tf.equal(
tf.cast(1, dtype=tf.int32),
tf.rank(thresholds),
name='one_set_of_thresholds_cond')
else:
[y_pred,
y_true], _ = ragged_assert_compatible_and_get_flat_values([y_pred, y_true],
sample_weight)
one_thresh = tf.cast(True, dtype=tf.bool)
invalid_keys = [
key for key in variables_to_update if key not in list(ConfusionMatrix)
]
if invalid_keys:
raise ValueError(
f'Invalid keys: "{invalid_keys}". '
f'Valid variable key options are: "{list(ConfusionMatrix)}"')
with tf.control_dependencies([
tf.debugging.assert_greater_equal(
y_pred,
tf.cast(0.0, dtype=y_pred.dtype),
message='predictions must be >= 0'),
tf.debugging.assert_less_equal(
y_pred,
tf.cast(1.0, dtype=y_pred.dtype),
message='predictions must be <= 1')
]):
if sample_weight is None:
y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(
y_pred, y_true)
else:
sample_weight = tf.cast(sample_weight, dtype=variable_dtype)
y_pred, y_true, sample_weight = (
losses_utils.squeeze_or_expand_dimensions(
y_pred, y_true, sample_weight=sample_weight))
y_pred.shape.assert_is_compatible_with(y_true.shape)
if top_k is not None:
y_pred = _filter_top_k(y_pred, top_k)
if class_id is not None:
y_true = y_true[..., class_id]
y_pred = y_pred[..., class_id]
if thresholds_distributed_evenly:
return _update_confusion_matrix_variables_optimized(
variables_to_update, y_true, y_pred, thresholds,
multi_label=multi_label, sample_weights=sample_weight,
label_weights=label_weights,
thresholds_with_epsilon=thresholds_with_epsilon)
pred_shape = tf.shape(y_pred)
num_predictions = pred_shape[0]
if y_pred.shape.ndims == 1:
num_labels = 1
else:
num_labels = tf.math.reduce_prod(pred_shape[1:], axis=0)
thresh_label_tile = tf.where(one_thresh, num_labels,
tf.ones([], dtype=tf.int32))
# Reshape predictions and labels, adding a dim for thresholding.
if multi_label:
predictions_extra_dim = tf.expand_dims(y_pred, 0)
labels_extra_dim = tf.expand_dims(
tf.cast(y_true, dtype=tf.bool), 0)
else:
# Flatten predictions and labels when not multilabel.
predictions_extra_dim = tf.reshape(y_pred, [1, -1])
labels_extra_dim = tf.reshape(
tf.cast(y_true, dtype=tf.bool), [1, -1])
# Tile the thresholds for every prediction.
if multi_label:
thresh_pretile_shape = [num_thresholds, 1, -1]
thresh_tiles = [1, num_predictions, thresh_label_tile]
data_tiles = [num_thresholds, 1, 1]
else:
thresh_pretile_shape = [num_thresholds, -1]
thresh_tiles = [1, num_predictions * num_labels]
data_tiles = [num_thresholds, 1]
thresh_tiled = tf.tile(
tf.reshape(thresholds, thresh_pretile_shape),
tf.stack(thresh_tiles))
# Tile the predictions for every threshold.
preds_tiled = tf.tile(predictions_extra_dim, data_tiles)
# Compare predictions and threshold.
pred_is_pos = tf.greater(preds_tiled, thresh_tiled)
# Tile labels by number of thresholds
label_is_pos = tf.tile(labels_extra_dim, data_tiles)
if sample_weight is not None:
sample_weight = tf.__internal__.ops.broadcast_weights(
tf.cast(sample_weight, dtype=variable_dtype), y_pred)
weights_tiled = tf.tile(
tf.reshape(sample_weight, thresh_tiles), data_tiles)
else:
weights_tiled = None
if label_weights is not None and not multi_label:
label_weights = tf.expand_dims(label_weights, 0)
label_weights = tf.__internal__.ops.broadcast_weights(label_weights,
y_pred)
label_weights_tiled = tf.tile(
tf.reshape(label_weights, thresh_tiles), data_tiles)
if weights_tiled is None:
weights_tiled = label_weights_tiled
else:
weights_tiled = tf.multiply(weights_tiled, label_weights_tiled)
update_ops = []
def weighted_assign_add(label, pred, weights, var):
label_and_pred = tf.cast(
tf.logical_and(label, pred), dtype=var.dtype)
if weights is not None:
label_and_pred *= tf.cast(weights, dtype=var.dtype)
return var.assign_add(tf.reduce_sum(label_and_pred, 1))
loop_vars = {
ConfusionMatrix.TRUE_POSITIVES: (label_is_pos, pred_is_pos),
}
update_tn = ConfusionMatrix.TRUE_NEGATIVES in variables_to_update
update_fp = ConfusionMatrix.FALSE_POSITIVES in variables_to_update
update_fn = ConfusionMatrix.FALSE_NEGATIVES in variables_to_update
if update_fn or update_tn:
pred_is_neg = tf.logical_not(pred_is_pos)
loop_vars[ConfusionMatrix.FALSE_NEGATIVES] = (label_is_pos, pred_is_neg)
if update_fp or update_tn:
label_is_neg = tf.logical_not(label_is_pos)
loop_vars[ConfusionMatrix.FALSE_POSITIVES] = (label_is_neg, pred_is_pos)
if update_tn:
loop_vars[ConfusionMatrix.TRUE_NEGATIVES] = (label_is_neg, pred_is_neg)
for matrix_cond, (label, pred) in loop_vars.items():
if matrix_cond in variables_to_update:
update_ops.append(
weighted_assign_add(label, pred, weights_tiled,
variables_to_update[matrix_cond]))
return tf.group(update_ops)
def _filter_top_k(x, k):
"""Filters top-k values in the last dim of x and set the rest to NEG_INF.
Used for computing top-k prediction values in dense labels (which has the same
shape as predictions) for recall and precision top-k metrics.
Args:
x: tensor with any dimensions.
k: the number of values to keep.
Returns:
tensor with same shape and dtype as x.
"""
_, top_k_idx = tf.math.top_k(x, k, sorted=False)
top_k_mask = tf.reduce_sum(
tf.one_hot(top_k_idx, tf.shape(x)[-1], axis=-1), axis=-2)
return x * top_k_mask + NEG_INF * (1 - top_k_mask)
def ragged_assert_compatible_and_get_flat_values(values, mask=None):
"""If ragged, it checks the compatibility and then returns the flat_values.
Note: If two tensors are dense, it does not check their compatibility.
Note: Although two ragged tensors with different ragged ranks could have
identical overall rank and dimension sizes and hence be compatible,
we do not support those cases.
Args:
values: A list of potentially ragged tensor of the same ragged_rank.
mask: A potentially ragged tensor of the same ragged_rank as elements in
Values.
Returns:
A tuple in which the first element is the list of tensors and the second
is the mask tensor. ([Values], mask). Mask and the element in Values
are equal to the flat_values of the input arguments (if they were ragged).
"""
if isinstance(values, list):
is_all_ragged = \
all(isinstance(rt, tf.RaggedTensor) for rt in values)
is_any_ragged = \
any(isinstance(rt, tf.RaggedTensor) for rt in values)
else:
is_all_ragged = isinstance(values, tf.RaggedTensor)
is_any_ragged = is_all_ragged
if (is_all_ragged and
((mask is None) or isinstance(mask, tf.RaggedTensor))):
to_be_stripped = False
if not isinstance(values, list):
values = [values]
to_be_stripped = True
# NOTE: we leave the flat_values compatibility to
# tf.TensorShape `assert_is_compatible_with`
# check if both dynamic dimensions are equal and then use the flat_values.
nested_row_split_list = [rt.nested_row_splits for rt in values]
assertion_list = _assert_splits_match(nested_row_split_list)
# if both are ragged sample_weights also should be ragged with same dims.
if isinstance(mask, tf.RaggedTensor):
assertion_list_for_mask = _assert_splits_match(
[nested_row_split_list[0], mask.nested_row_splits])
with tf.control_dependencies(assertion_list_for_mask):
mask = tf.expand_dims(mask.flat_values, -1)
# values has at least 1 element.
flat_values = []
for value in values:
with tf.control_dependencies(assertion_list):
flat_values.append(tf.expand_dims(value.flat_values, -1))
values = flat_values[0] if to_be_stripped else flat_values
elif is_any_ragged:
raise TypeError('Some of the inputs are not tf.RaggedTensor. '
f'Input received: {values}')
# values are empty or value are not ragged and mask is ragged.
elif isinstance(mask, tf.RaggedTensor):
raise TypeError('Ragged mask is not allowed with non-ragged inputs. '
f'Input received: {values}, mask received: {mask}')
return values, mask
def _assert_splits_match(nested_splits_lists):
"""Checks that the given splits lists are identical.
Performs static tests to ensure that the given splits lists are identical,
and returns a list of control dependency op tensors that check that they are
fully identical.
Args:
nested_splits_lists: A list of nested_splits_lists, where each split_list is
a list of `splits` tensors from a `RaggedTensor`, ordered from outermost
ragged dimension to innermost ragged dimension.
Returns:
A list of control dependency op tensors.
Raises:
ValueError: If the splits are not identical.
"""
error_msg = ('Inputs must have identical ragged splits. '
f'Input received: {nested_splits_lists}')
for splits_list in nested_splits_lists:
if len(splits_list) != len(nested_splits_lists[0]):
raise ValueError(error_msg)
return [
tf.debugging.assert_equal(s1, s2, message=error_msg) # pylint: disable=g-complex-comprehension
for splits_list in nested_splits_lists[1:]
for (s1, s2) in zip(nested_splits_lists[0], splits_list)
]
|
|
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_db import exception as db_exc
from oslo_log import log as logging
import sqlalchemy.orm as sa_orm
from glance.common import exception as exc
from glance.db.sqlalchemy.metadef_api\
import namespace as namespace_api
from glance.db.sqlalchemy.metadef_api\
import resource_type as resource_type_api
from glance.db.sqlalchemy.metadef_api\
import utils as metadef_utils
from glance.db.sqlalchemy import models_metadef as models
LOG = logging.getLogger(__name__)
def _to_db_dict(namespace_id, resource_type_id, model_dict):
"""transform a model dict to a metadef_namespace_resource_type dict"""
db_dict = {'namespace_id': namespace_id,
'resource_type_id': resource_type_id,
'properties_target': model_dict['properties_target'],
'prefix': model_dict['prefix']}
return db_dict
def _to_model_dict(resource_type_name, ns_res_type_dict):
"""transform a metadef_namespace_resource_type dict to a model dict"""
model_dict = {'name': resource_type_name,
'properties_target': ns_res_type_dict['properties_target'],
'prefix': ns_res_type_dict['prefix'],
'created_at': ns_res_type_dict['created_at'],
'updated_at': ns_res_type_dict['updated_at']}
return model_dict
def _set_model_dict(resource_type_name, properties_target, prefix,
created_at, updated_at):
"""return a model dict set with the passed in key values"""
model_dict = {'name': resource_type_name,
'properties_target': properties_target,
'prefix': prefix,
'created_at': created_at,
'updated_at': updated_at}
return model_dict
def _get(context, namespace_name, resource_type_name,
namespace_id, resource_type_id, session):
"""Get a namespace resource_type association"""
# visibility check assumed done in calling routine via namespace_get
try:
query = session.query(models.MetadefNamespaceResourceType).\
filter_by(namespace_id=namespace_id,
resource_type_id=resource_type_id)
db_rec = query.one()
except sa_orm.exc.NoResultFound:
msg = ("The metadata definition resource-type association of"
" resource_type=%(resource_type_name)s to"
" namespace_name=%(namespace_name)s was not found."
% {'resource_type_name': resource_type_name,
'namespace_name': namespace_name})
LOG.debug(msg)
raise exc.MetadefResourceTypeAssociationNotFound(
resource_type_name=resource_type_name,
namespace_name=namespace_name)
return db_rec
def _create_association(
context, namespace_name, resource_type_name, values, session):
"""Create an association, raise if it already exists."""
namespace_resource_type_rec = models.MetadefNamespaceResourceType()
metadef_utils.drop_protected_attrs(
models.MetadefNamespaceResourceType, values)
# values['updated_at'] = timeutils.utcnow() # TS mixin should do this
namespace_resource_type_rec.update(values.copy())
try:
namespace_resource_type_rec.save(session=session)
except db_exc.DBDuplicateEntry:
msg = ("The metadata definition resource-type association of"
" resource_type=%(resource_type_name)s to"
" namespace=%(namespace_name)s, already exists."
% {'resource_type_name': resource_type_name,
'namespace_name': namespace_name})
LOG.debug(msg)
raise exc.MetadefDuplicateResourceTypeAssociation(
resource_type_name=resource_type_name,
namespace_name=namespace_name)
return namespace_resource_type_rec.to_dict()
def _delete(context, namespace_name, resource_type_name,
namespace_id, resource_type_id, session):
"""Delete a resource type association or raise if not found."""
db_rec = _get(context, namespace_name, resource_type_name,
namespace_id, resource_type_id, session)
session.delete(db_rec)
session.flush()
return db_rec.to_dict()
def get(context, namespace_name, resource_type_name, session):
"""Get a resource_type associations; raise if not found"""
namespace = namespace_api.get(
context, namespace_name, session)
resource_type = resource_type_api.get(
context, resource_type_name, session)
found = _get(context, namespace_name, resource_type_name,
namespace['id'], resource_type['id'], session)
return _to_model_dict(resource_type_name, found)
def get_all_by_namespace(context, namespace_name, session):
"""List resource_type associations by namespace, raise if not found"""
# namespace get raises an exception if not visible
namespace = namespace_api.get(
context, namespace_name, session)
db_recs = (
session.query(models.MetadefResourceType)
.join(models.MetadefResourceType.associations)
.filter_by(namespace_id=namespace['id'])
.values(models.MetadefResourceType.name,
models.MetadefNamespaceResourceType.properties_target,
models.MetadefNamespaceResourceType.prefix,
models.MetadefNamespaceResourceType.created_at,
models.MetadefNamespaceResourceType.updated_at))
model_dict_list = []
for name, properties_target, prefix, created_at, updated_at in db_recs:
model_dict_list.append(
_set_model_dict
(name, properties_target, prefix, created_at, updated_at)
)
return model_dict_list
def create(context, namespace_name, values, session):
"""Create an association, raise if already exists or ns not found."""
namespace = namespace_api.get(
context, namespace_name, session)
# if the resource_type does not exist, create it
resource_type_name = values['name']
metadef_utils.drop_protected_attrs(
models.MetadefNamespaceResourceType, values)
try:
resource_type = resource_type_api.get(
context, resource_type_name, session)
except exc.NotFound:
resource_type = None
LOG.debug("Creating resource-type %s" % resource_type_name)
if resource_type is None:
resource_type_dict = {'name': resource_type_name, 'protected': False}
resource_type = resource_type_api.create(
context, resource_type_dict, session)
# Create the association record, set the field values
ns_resource_type_dict = _to_db_dict(
namespace['id'], resource_type['id'], values)
new_rec = _create_association(context, namespace_name, resource_type_name,
ns_resource_type_dict, session)
return _to_model_dict(resource_type_name, new_rec)
def delete(context, namespace_name, resource_type_name, session):
"""Delete an association or raise if not found"""
namespace = namespace_api.get(
context, namespace_name, session)
resource_type = resource_type_api.get(
context, resource_type_name, session)
deleted = _delete(context, namespace_name, resource_type_name,
namespace['id'], resource_type['id'], session)
return _to_model_dict(resource_type_name, deleted)
def delete_namespace_content(context, namespace_id, session):
"""Use this def only if the ns for the id has been verified as visible"""
count = 0
query = session.query(models.MetadefNamespaceResourceType)\
.filter_by(namespace_id=namespace_id)
count = query.delete(synchronize_session='fetch')
return count
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class MetricBaselineOperations:
"""MetricBaselineOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~$(python-base-namespace).v2018_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_uri: str,
metric_name: str,
timespan: Optional[str] = None,
interval: Optional[datetime.timedelta] = None,
aggregation: Optional[str] = None,
sensitivities: Optional[str] = None,
result_type: Optional[Union[str, "_models.ResultType"]] = None,
metricnamespace: Optional[str] = None,
filter: Optional[str] = None,
**kwargs: Any
) -> "_models.BaselineResponse":
"""**Gets the baseline values for a specific metric**.
:param resource_uri: The identifier of the resource. It has the following structure:
subscriptions/{subscriptionName}/resourceGroups/{resourceGroupName}/providers/{providerName}/{resourceName}.
For example:
subscriptions/b368ca2f-e298-46b7-b0ab-012281956afa/resourceGroups/vms/providers/Microsoft.Compute/virtualMachines/vm1.
:type resource_uri: str
:param metric_name: The name of the metric to retrieve the baseline for.
:type metric_name: str
:param timespan: The timespan of the query. It is a string with the following format
'startDateTime_ISO/endDateTime_ISO'.
:type timespan: str
:param interval: The interval (i.e. timegrain) of the query.
:type interval: ~datetime.timedelta
:param aggregation: The aggregation type of the metric to retrieve the baseline for.
:type aggregation: str
:param sensitivities: The list of sensitivities (comma separated) to retrieve.
:type sensitivities: str
:param result_type: Allows retrieving only metadata of the baseline. On data request all
information is retrieved.
:type result_type: str or ~$(python-base-namespace).v2018_09_01.models.ResultType
:param metricnamespace: Metric namespace to query metric definitions for.
:type metricnamespace: str
:param filter: The **$filter** is used to describe a set of dimensions with their concrete
values which produce a specific metric's time series, in which a baseline is requested for.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BaselineResponse, or the result of cls(response)
:rtype: ~$(python-base-namespace).v2018_09_01.models.BaselineResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BaselineResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-09-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceUri': self._serialize.url("resource_uri", resource_uri, 'str', skip_quote=True),
'metricName': self._serialize.url("metric_name", metric_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if timespan is not None:
query_parameters['timespan'] = self._serialize.query("timespan", timespan, 'str')
if interval is not None:
query_parameters['interval'] = self._serialize.query("interval", interval, 'duration')
if aggregation is not None:
query_parameters['aggregation'] = self._serialize.query("aggregation", aggregation, 'str')
if sensitivities is not None:
query_parameters['sensitivities'] = self._serialize.query("sensitivities", sensitivities, 'str')
if result_type is not None:
query_parameters['resultType'] = self._serialize.query("result_type", result_type, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if metricnamespace is not None:
query_parameters['metricnamespace'] = self._serialize.query("metricnamespace", metricnamespace, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('BaselineResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/{resourceUri}/providers/Microsoft.Insights/baseline/{metricName}'} # type: ignore
async def calculate_baseline(
self,
resource_uri: str,
time_series_information: "_models.TimeSeriesInformation",
**kwargs: Any
) -> "_models.CalculateBaselineResponse":
"""**Lists the baseline values for a resource**.
:param resource_uri: The identifier of the resource. It has the following structure:
subscriptions/{subscriptionName}/resourceGroups/{resourceGroupName}/providers/{providerName}/{resourceName}.
For example:
subscriptions/b368ca2f-e298-46b7-b0ab-012281956afa/resourceGroups/vms/providers/Microsoft.Compute/virtualMachines/vm1.
:type resource_uri: str
:param time_series_information: Information that need to be specified to calculate a baseline
on a time series.
:type time_series_information: ~$(python-base-namespace).v2018_09_01.models.TimeSeriesInformation
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CalculateBaselineResponse, or the result of cls(response)
:rtype: ~$(python-base-namespace).v2018_09_01.models.CalculateBaselineResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CalculateBaselineResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.calculate_baseline.metadata['url'] # type: ignore
path_format_arguments = {
'resourceUri': self._serialize.url("resource_uri", resource_uri, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(time_series_information, 'TimeSeriesInformation')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('CalculateBaselineResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
calculate_baseline.metadata = {'url': '/{resourceUri}/providers/Microsoft.Insights/calculatebaseline'} # type: ignore
|
|
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Algorithm Protocol
===================
For a class to be passed as a trading algorithm to the
:py:class:`zipline.lines.SimulatedTrading` zipline it must follow an
implementation protocol. Examples of this algorithm protocol are provided
below.
The algorithm must expose methods:
- initialize: method that takes no args, no returns. Simply called to
enable the algorithm to set any internal state needed.
- get_sid_filter: method that takes no args, and returns a list of valid
sids. List must have a length between 1 and 10. If None is returned the
filter will block all events.
- handle_data: method that accepts a :py:class:`zipline.protocol.BarData`
of the current state of the simulation universe. An example data object:
.. This outputs the table as an HTML table but for some reason there
is no bounding box. Make the previous paraagraph ending colon a
double-colon to turn this back into blockquoted table in ASCII art.
+-----------------+--------------+----------------+-------------------+
| | sid(133) | sid(134) | sid(135) |
+=================+==============+================+===================+
| price | $10.10 | $22.50 | $13.37 |
+-----------------+--------------+----------------+-------------------+
| volume | 10,000 | 5,000 | 50,000 |
+-----------------+--------------+----------------+-------------------+
| mvg_avg_30 | $9.97 | $22.61 | $13.37 |
+-----------------+--------------+----------------+-------------------+
| dt | 6/30/2012 | 6/30/2011 | 6/29/2012 |
+-----------------+--------------+----------------+-------------------+
- set_order: method that accepts a callable. Will be set as the value of the
order method of trading_client. An algorithm can then place orders with a
valid sid and a number of shares::
self.order(sid(133), share_count)
- set_performance: property which can be set equal to the
cumulative_trading_performance property of the trading_client. An
algorithm can then check position information with the
Portfolio object::
self.Portfolio[sid(133)]['cost_basis']
- set_transact_setter: method that accepts a callable. Will
be set as the value of the set_transact_setter method of
the trading_client. This allows an algorithm to change the
slippage model used to predict transactions based on orders
and trade events.
"""
from copy import deepcopy
import numpy as np
from nose.tools import assert_raises
from six.moves import range
from six import itervalues
from zipline.algorithm import TradingAlgorithm
from zipline.api import FixedSlippage
from zipline.errors import UnsupportedOrderParameters
from zipline.finance.execution import (
LimitOrder,
MarketOrder,
StopLimitOrder,
StopOrder,
)
class TestAlgorithm(TradingAlgorithm):
"""
This algorithm will send a specified number of orders, to allow unit tests
to verify the orders sent/received, transactions created, and positions
at the close of a simulation.
"""
def initialize(self,
sid,
amount,
order_count,
sid_filter=None,
slippage=None):
self.count = order_count
self.sid = sid
self.amount = amount
self.incr = 0
if sid_filter:
self.sid_filter = sid_filter
else:
self.sid_filter = [self.sid]
if slippage is not None:
self.set_slippage(slippage)
def handle_data(self, data):
# place an order for amount shares of sid
if self.incr < self.count:
self.order(self.sid, self.amount)
self.incr += 1
class HeavyBuyAlgorithm(TradingAlgorithm):
"""
This algorithm will send a specified number of orders, to allow unit tests
to verify the orders sent/received, transactions created, and positions
at the close of a simulation.
"""
def initialize(self, sid, amount):
self.sid = sid
self.amount = amount
self.incr = 0
def handle_data(self, data):
# place an order for 100 shares of sid
self.order(self.sid, self.amount)
self.incr += 1
class NoopAlgorithm(TradingAlgorithm):
"""
Dolce fa niente.
"""
def get_sid_filter(self):
return []
def initialize(self):
pass
def set_transact_setter(self, txn_sim_callable):
pass
def handle_data(self, data):
pass
class ExceptionAlgorithm(TradingAlgorithm):
"""
Throw an exception from the method name specified in the
constructor.
"""
def initialize(self, throw_from, sid):
self.throw_from = throw_from
self.sid = sid
if self.throw_from == "initialize":
raise Exception("Algo exception in initialize")
else:
pass
def set_portfolio(self, portfolio):
if self.throw_from == "set_portfolio":
raise Exception("Algo exception in set_portfolio")
else:
pass
def handle_data(self, data):
if self.throw_from == "handle_data":
raise Exception("Algo exception in handle_data")
else:
pass
def get_sid_filter(self):
if self.throw_from == "get_sid_filter":
raise Exception("Algo exception in get_sid_filter")
else:
return [self.sid]
def set_transact_setter(self, txn_sim_callable):
pass
class DivByZeroAlgorithm(TradingAlgorithm):
def initialize(self, sid):
self.sid = sid
self.incr = 0
def handle_data(self, data):
self.incr += 1
if self.incr > 4:
5 / 0
pass
class TooMuchProcessingAlgorithm(TradingAlgorithm):
def initialize(self, sid):
self.sid = sid
def handle_data(self, data):
# Unless we're running on some sort of
# supercomputer this will hit timeout.
for i in range(1000000000):
self.foo = i
class TimeoutAlgorithm(TradingAlgorithm):
def initialize(self, sid):
self.sid = sid
self.incr = 0
def handle_data(self, data):
if self.incr > 4:
import time
time.sleep(100)
pass
class RecordAlgorithm(TradingAlgorithm):
def initialize(self):
self.incr = 0
def handle_data(self, data):
self.incr += 1
self.record(incr=self.incr)
name = 'name'
self.record(name, self.incr)
record(name, self.incr, 'name2', 2, name3=self.incr)
class TestOrderAlgorithm(TradingAlgorithm):
def initialize(self):
self.incr = 0
def handle_data(self, data):
if self.incr == 0:
assert 0 not in self.portfolio.positions
else:
assert self.portfolio.positions[0]['amount'] == \
self.incr, "Orders not filled immediately."
assert self.portfolio.positions[0]['last_sale_price'] == \
data[0].price, "Orders not filled at current price."
self.incr += 1
self.order(0, 1)
class TestOrderInstantAlgorithm(TradingAlgorithm):
def initialize(self):
self.incr = 0
self.last_price = None
def handle_data(self, data):
if self.incr == 0:
assert 0 not in self.portfolio.positions
else:
assert self.portfolio.positions[0]['amount'] == \
self.incr, "Orders not filled immediately."
assert self.portfolio.positions[0]['last_sale_price'] == \
self.last_price, "Orders was not filled at last price."
self.incr += 2
self.order_value(0, data[0].price * 2.)
self.last_price = data[0].price
class TestOrderStyleForwardingAlgorithm(TradingAlgorithm):
"""
Test Algorithm for verifying that ExecutionStyles are properly forwarded by
order API helper methods. Pass the name of the method to be tested as a
string parameter to this algorithm's constructor.
"""
def __init__(self, *args, **kwargs):
self.method_name = kwargs.pop('method_name')
super(TestOrderStyleForwardingAlgorithm, self)\
.__init__(*args, **kwargs)
def initialize(self):
self.incr = 0
self.last_price = None
def handle_data(self, data):
if self.incr == 0:
assert len(self.portfolio.positions.keys()) == 0
method_to_check = getattr(self, self.method_name)
method_to_check(0, data[0].price, style=StopLimitOrder(10, 10))
assert len(self.blotter.open_orders[0]) == 1
result = self.blotter.open_orders[0][0]
assert result.limit == 10
assert result.stop == 10
self.incr += 1
class TestOrderValueAlgorithm(TradingAlgorithm):
def initialize(self):
self.incr = 0
self.sale_price = None
def handle_data(self, data):
if self.incr == 0:
assert 0 not in self.portfolio.positions
else:
assert self.portfolio.positions[0]['amount'] == \
self.incr, "Orders not filled immediately."
assert self.portfolio.positions[0]['last_sale_price'] == \
data[0].price, "Orders not filled at current price."
self.incr += 2
self.order_value(0, data[0].price * 2.)
class TestTargetAlgorithm(TradingAlgorithm):
def initialize(self):
self.target_shares = 0
self.sale_price = None
def handle_data(self, data):
if self.target_shares == 0:
assert 0 not in self.portfolio.positions
else:
assert self.portfolio.positions[0]['amount'] == \
self.target_shares, "Orders not filled immediately."
assert self.portfolio.positions[0]['last_sale_price'] == \
data[0].price, "Orders not filled at current price."
self.target_shares = np.random.randint(1, 30)
self.order_target(0, self.target_shares)
class TestOrderPercentAlgorithm(TradingAlgorithm):
def initialize(self):
self.target_shares = 0
self.sale_price = None
def handle_data(self, data):
if self.target_shares == 0:
assert 0 not in self.portfolio.positions
self.order(0, 10)
self.target_shares = 10
return
else:
assert self.portfolio.positions[0]['amount'] == \
self.target_shares, "Orders not filled immediately."
assert self.portfolio.positions[0]['last_sale_price'] == \
data[0].price, "Orders not filled at current price."
self.order_percent(0, .001)
self.target_shares += np.floor((.001 *
self.portfolio.portfolio_value)
/ data[0].price)
class TestTargetPercentAlgorithm(TradingAlgorithm):
def initialize(self):
self.target_shares = 0
self.sale_price = None
def handle_data(self, data):
if self.target_shares == 0:
assert 0 not in self.portfolio.positions
self.target_shares = 1
else:
assert np.round(self.portfolio.portfolio_value * 0.002) == \
self.portfolio.positions[0]['amount'] * self.sale_price, \
"Orders not filled correctly."
assert self.portfolio.positions[0]['last_sale_price'] == \
data[0].price, "Orders not filled at current price."
self.sale_price = data[0].price
self.order_target_percent(0, .002)
class TestTargetValueAlgorithm(TradingAlgorithm):
def initialize(self):
self.target_shares = 0
self.sale_price = None
def handle_data(self, data):
if self.target_shares == 0:
assert 0 not in self.portfolio.positions
self.order(0, 10)
self.target_shares = 10
return
else:
print(self.portfolio)
assert self.portfolio.positions[0]['amount'] == \
self.target_shares, "Orders not filled immediately."
assert self.portfolio.positions[0]['last_sale_price'] == \
data[0].price, "Orders not filled at current price."
self.order_target_value(0, 20)
self.target_shares = np.round(20 / data[0].price)
############################
# TradingControl Test Algos#
############################
class SetMaxPositionSizeAlgorithm(TradingAlgorithm):
def initialize(self, sid=None, max_shares=None, max_notional=None):
self.order_count = 0
self.set_max_position_size(sid=sid,
max_shares=max_shares,
max_notional=max_notional)
class SetMaxOrderSizeAlgorithm(TradingAlgorithm):
def initialize(self, sid=None, max_shares=None, max_notional=None):
self.order_count = 0
self.set_max_order_size(sid=sid,
max_shares=max_shares,
max_notional=max_notional)
class SetMaxOrderCountAlgorithm(TradingAlgorithm):
def initialize(self, count):
self.order_count = 0
self.set_max_order_count(count)
class SetLongOnlyAlgorithm(TradingAlgorithm):
def initialize(self):
self.order_count = 0
self.set_long_only()
from zipline.transforms import BatchTransform, batch_transform
class TestRegisterTransformAlgorithm(TradingAlgorithm):
def initialize(self, *args, **kwargs):
self.set_slippage(FixedSlippage())
def handle_data(self, data):
pass
class AmbitiousStopLimitAlgorithm(TradingAlgorithm):
"""
Algorithm that tries to buy with extremely low stops/limits and tries to
sell with extremely high versions of same. Should not end up with any
positions for reasonable data.
"""
def initialize(self, *args, **kwargs):
self.sid = kwargs.pop('sid')
def handle_data(self, data):
########
# Buys #
########
# Buy with low limit, shouldn't trigger.
self.order(self.sid, 100, limit_price=1)
# But with high stop, shouldn't trigger
self.order(self.sid, 100, stop_price=10000000)
# Buy with high limit (should trigger) but also high stop (should
# prevent trigger).
self.order(self.sid, 100, limit_price=10000000, stop_price=10000000)
# Buy with low stop (should trigger), but also low limit (should
# prevent trigger).
self.order(self.sid, 100, limit_price=1, stop_price=1)
#########
# Sells #
#########
# Sell with high limit, shouldn't trigger.
self.order(self.sid, -100, limit_price=1000000)
# Sell with low stop, shouldn't trigger.
self.order(self.sid, -100, stop_price=1)
# Sell with low limit (should trigger), but also high stop (should
# prevent trigger).
self.order(self.sid, -100, limit_price=1000000, stop_price=1000000)
# Sell with low limit (should trigger), but also low stop (should
# prevent trigger).
self.order(self.sid, -100, limit_price=1, stop_price=1)
###################
# Rounding Checks #
###################
self.order(self.sid, 100, limit_price=.00000001)
self.order(self.sid, -100, stop_price=.00000001)
##########################################
# Algorithm using simple batch transforms
class ReturnPriceBatchTransform(BatchTransform):
def get_value(self, data):
assert data.shape[1] == self.window_length, \
"data shape={0} does not equal window_length={1} for data={2}".\
format(data.shape[1], self.window_length, data)
return data.price
@batch_transform
def return_price_batch_decorator(data):
return data.price
@batch_transform
def return_args_batch_decorator(data, *args, **kwargs):
return args, kwargs
@batch_transform
def return_data(data, *args, **kwargs):
return data
@batch_transform
def uses_ufunc(data, *args, **kwargs):
# ufuncs like np.log should not crash
return np.log(data)
@batch_transform
def price_multiple(data, multiplier, extra_arg=1):
return data.price * multiplier * extra_arg
class BatchTransformAlgorithm(TradingAlgorithm):
def initialize(self, *args, **kwargs):
self.refresh_period = kwargs.pop('refresh_period', 1)
self.window_length = kwargs.pop('window_length', 3)
self.args = args
self.kwargs = kwargs
self.history_return_price_class = []
self.history_return_price_decorator = []
self.history_return_args = []
self.history_return_arbitrary_fields = []
self.history_return_nan = []
self.history_return_sid_filter = []
self.history_return_field_filter = []
self.history_return_field_no_filter = []
self.history_return_ticks = []
self.history_return_not_full = []
self.return_price_class = ReturnPriceBatchTransform(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=False
)
self.return_price_decorator = return_price_batch_decorator(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=False
)
self.return_args_batch = return_args_batch_decorator(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=False
)
self.return_arbitrary_fields = return_data(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=False
)
self.return_nan = return_price_batch_decorator(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=True
)
self.return_sid_filter = return_price_batch_decorator(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=True,
sids=[0]
)
self.return_field_filter = return_data(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=True,
fields=['price']
)
self.return_field_no_filter = return_data(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=True
)
self.return_not_full = return_data(
refresh_period=1,
window_length=self.window_length,
compute_only_full=False
)
self.uses_ufunc = uses_ufunc(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=False
)
self.price_multiple = price_multiple(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=False
)
self.iter = 0
self.set_slippage(FixedSlippage())
def handle_data(self, data):
self.history_return_price_class.append(
self.return_price_class.handle_data(data))
self.history_return_price_decorator.append(
self.return_price_decorator.handle_data(data))
self.history_return_args.append(
self.return_args_batch.handle_data(
data, *self.args, **self.kwargs))
self.history_return_not_full.append(
self.return_not_full.handle_data(data))
self.uses_ufunc.handle_data(data)
# check that calling transforms with the same arguments
# is idempotent
self.price_multiple.handle_data(data, 1, extra_arg=1)
if self.price_multiple.full:
pre = self.price_multiple.rolling_panel.get_current().shape[0]
result1 = self.price_multiple.handle_data(data, 1, extra_arg=1)
post = self.price_multiple.rolling_panel.get_current().shape[0]
assert pre == post, "batch transform is appending redundant events"
result2 = self.price_multiple.handle_data(data, 1, extra_arg=1)
assert result1 is result2, "batch transform is not idempotent"
# check that calling transform with the same data, but
# different supplemental arguments results in new
# results.
result3 = self.price_multiple.handle_data(data, 2, extra_arg=1)
assert result1 is not result3, \
"batch transform is not updating for new args"
result4 = self.price_multiple.handle_data(data, 1, extra_arg=2)
assert result1 is not result4,\
"batch transform is not updating for new kwargs"
new_data = deepcopy(data)
for sid in new_data:
new_data[sid]['arbitrary'] = 123
self.history_return_arbitrary_fields.append(
self.return_arbitrary_fields.handle_data(new_data))
# nan every second event price
if self.iter % 2 == 0:
self.history_return_nan.append(
self.return_nan.handle_data(data))
else:
nan_data = deepcopy(data)
for sid in nan_data.iterkeys():
nan_data[sid].price = np.nan
self.history_return_nan.append(
self.return_nan.handle_data(nan_data))
self.iter += 1
# Add a new sid to check that it does not get included
extra_sid_data = deepcopy(data)
extra_sid_data[1] = extra_sid_data[0]
self.history_return_sid_filter.append(
self.return_sid_filter.handle_data(extra_sid_data)
)
# Add a field to check that it does not get included
extra_field_data = deepcopy(data)
extra_field_data[0]['ignore'] = extra_sid_data[0]['price']
self.history_return_field_filter.append(
self.return_field_filter.handle_data(extra_field_data)
)
self.history_return_field_no_filter.append(
self.return_field_no_filter.handle_data(extra_field_data)
)
class BatchTransformAlgorithmMinute(TradingAlgorithm):
def initialize(self, *args, **kwargs):
self.refresh_period = kwargs.pop('refresh_period', 1)
self.window_length = kwargs.pop('window_length', 3)
self.args = args
self.kwargs = kwargs
self.history = []
self.batch_transform = return_price_batch_decorator(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=False,
bars='minute'
)
def handle_data(self, data):
self.history.append(self.batch_transform.handle_data(data))
class SetPortfolioAlgorithm(TradingAlgorithm):
"""
An algorithm that tries to set the portfolio directly.
The portfolio should be treated as a read-only object
within the algorithm.
"""
def initialize(self, *args, **kwargs):
pass
def handle_data(self, data):
self.portfolio = 3
class TALIBAlgorithm(TradingAlgorithm):
"""
An algorithm that applies a TA-Lib transform. The transform object can be
passed at initialization with the 'talib' keyword argument. The results are
stored in the talib_results array.
"""
def initialize(self, *args, **kwargs):
if 'talib' not in kwargs:
raise KeyError('No TA-LIB transform specified '
'(use keyword \'talib\').')
elif not isinstance(kwargs['talib'], (list, tuple)):
self.talib_transforms = (kwargs['talib'],)
else:
self.talib_transforms = kwargs['talib']
self.talib_results = dict((t, []) for t in self.talib_transforms)
def handle_data(self, data):
for t in self.talib_transforms:
result = t.handle_data(data)
if result is None:
if len(t.talib_fn.output_names) == 1:
result = np.nan
else:
result = (np.nan,) * len(t.talib_fn.output_names)
self.talib_results[t].append(result)
class EmptyPositionsAlgorithm(TradingAlgorithm):
"""
An algorithm that ensures that 'phantom' positions do not appear
portfolio.positions in the case that a position has been entered
and fully exited.
"""
def initialize(self, *args, **kwargs):
self.ordered = False
self.exited = False
def handle_data(self, data):
if not self.ordered:
for s in data:
self.order(s, 100)
self.ordered = True
if not self.exited:
amounts = [pos.amount for pos
in itervalues(self.portfolio.positions)]
if (
all([(amount == 100) for amount in amounts]) and
(len(amounts) == len(data.keys()))
):
for stock in self.portfolio.positions:
self.order(stock, -100)
self.exited = True
# Should be 0 when all positions are exited.
self.record(num_positions=len(self.portfolio.positions))
class InvalidOrderAlgorithm(TradingAlgorithm):
"""
An algorithm that tries to make various invalid order calls, verifying that
appropriate exceptions are raised.
"""
def initialize(self, *args, **kwargs):
self.sid = kwargs.pop('sids')[0]
def handle_data(self, data):
from zipline.api import (
order_percent,
order_target,
order_target_percent,
order_target_value,
order_value,
)
for style in [MarketOrder(), LimitOrder(10),
StopOrder(10), StopLimitOrder(10, 10)]:
with assert_raises(UnsupportedOrderParameters):
order(self.sid, 10, limit_price=10, style=style)
with assert_raises(UnsupportedOrderParameters):
order(self.sid, 10, stop_price=10, style=style)
with assert_raises(UnsupportedOrderParameters):
order_value(self.sid, 300, limit_price=10, style=style)
with assert_raises(UnsupportedOrderParameters):
order_value(self.sid, 300, stop_price=10, style=style)
with assert_raises(UnsupportedOrderParameters):
order_percent(self.sid, .1, limit_price=10, style=style)
with assert_raises(UnsupportedOrderParameters):
order_percent(self.sid, .1, stop_price=10, style=style)
with assert_raises(UnsupportedOrderParameters):
order_target(self.sid, 100, limit_price=10, style=style)
with assert_raises(UnsupportedOrderParameters):
order_target(self.sid, 100, stop_price=10, style=style)
with assert_raises(UnsupportedOrderParameters):
order_target_value(self.sid, 100, limit_price=10, style=style)
with assert_raises(UnsupportedOrderParameters):
order_target_value(self.sid, 100, stop_price=10, style=style)
with assert_raises(UnsupportedOrderParameters):
order_target_percent(self.sid, .2, limit_price=10, style=style)
with assert_raises(UnsupportedOrderParameters):
order_target_percent(self.sid, .2, stop_price=10, style=style)
##############################
# Quantopian style algorithms
from zipline.api import (order,
set_slippage,
record)
# Noop algo
def initialize_noop(context):
pass
def handle_data_noop(context, data):
pass
# API functions
def initialize_api(context):
context.incr = 0
context.sale_price = None
set_slippage(FixedSlippage())
def handle_data_api(context, data):
if context.incr == 0:
assert 0 not in context.portfolio.positions
else:
assert context.portfolio.positions[0]['amount'] == \
context.incr, "Orders not filled immediately."
assert context.portfolio.positions[0]['last_sale_price'] == \
data[0].price, "Orders not filled at current price."
context.incr += 1
order(0, 1)
record(incr=context.incr)
###########################
# AlgoScripts as strings
noop_algo = """
# Noop algo
def initialize(context):
pass
def handle_data(context, data):
pass
"""
api_algo = """
from zipline.api import (order,
set_slippage,
FixedSlippage,
record)
def initialize(context):
context.incr = 0
context.sale_price = None
set_slippage(FixedSlippage())
def handle_data(context, data):
if context.incr == 0:
assert 0 not in context.portfolio.positions
else:
assert context.portfolio.positions[0]['amount'] == \
context.incr, "Orders not filled immediately."
assert context.portfolio.positions[0]['last_sale_price'] == \
data[0].price, "Orders not filled at current price."
context.incr += 1
order(0, 1)
record(incr=context.incr)
"""
api_get_environment_algo = """
from zipline.api import get_environment, order, symbol
def initialize(context):
context.environment = get_environment()
handle_data = lambda context, data: order(symbol(0), 1)
"""
api_symbol_algo = """
from zipline.api import (order,
symbol)
def initialize(context):
pass
def handle_data(context, data):
order(symbol(0), 1)
"""
call_order_in_init = """
from zipline.api import (order)
def initialize(context):
order(0, 10)
pass
def handle_data(context, data):
pass
"""
access_portfolio_in_init = """
def initialize(context):
var = context.portfolio.cash
pass
def handle_data(context, data):
pass
"""
access_account_in_init = """
def initialize(context):
var = context.account.settled_cash
pass
def handle_data(context, data):
pass
"""
call_all_order_methods = """
from zipline.api import (order,
order_value,
order_percent,
order_target,
order_target_value,
order_target_percent)
def initialize(context):
pass
def handle_data(context, data):
order(0, 10)
order_value(0, 300)
order_percent(0, .1)
order_target(0, 100)
order_target_value(0, 100)
order_target_percent(0, .2)
"""
record_variables = """
from zipline.api import record
def initialize(context):
context.stocks = [0, 1]
context.incr = 0
def handle_data(context, data):
context.incr += 1
record(incr=context.incr)
"""
record_float_magic = """
from zipline.api import record
def initialize(context):
context.stocks = [0, 1]
context.incr = 0
def handle_data(context, data):
context.incr += 1
record(data=float('%s'))
"""
|
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Object for relabeling."""
# pylint: disable=unused-variable
# pylint: disable=undefined-variable
# pylint: disable=wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v2 as tf
from hal.labeler.labeler_utils import *
class Labeler:
"""A object that approximates the oracle.
Attributes:
generated_label_num: number of lables generated per transition
max_sequence_length: maximum length of sequence generated
temperature: temperature of sampling
encoder: encoder of the captioning model
decoder: decoder of the captioning model
answering_encoder: encoder of the answering model
answering_decoder: decoder of the answering model
answering_projection_layer: final projection layer of the answering model
"""
def __init__(self, labeler_config, name='labeler'):
"""Initializes the labeler.
Args:
labeler_config: configuration of the labeler
name: optional name
"""
self._name = name
self.generated_label_num = labeler_config['generated_label_num']
self.max_sequence_length = labeler_config['max_sequence_length']
self.temperature = labeler_config['sampling_temperature']
def set_captioning_model(self, labeler_config, saved_weight_path=None):
"""Set up the captinong model and maybe load weights.
Args:
labeler_config: configuration of the labeler
saved_weight_path: optional path where weights are loaded from
"""
self.encoder = get_captioning_encoder(labeler_config['captioning_encoder'])
self.decoder = get_captioning_decoder(labeler_config['captioning_decoder'])
if saved_weight_path:
ckpt = tf.train.Checkpoint(encoder=self.encoder, decoder=self.decoder)
latest = tf.train.latest_checkpoint(saved_weight_path)
assert latest, 'Captioning model ckpt not found in {}.'.format(
saved_weight_path)
print('Loading captioning model from: {}'.format(latest))
ckpt.restore(latest)
def set_answering_model(self, labeler_config, saved_weight_path=None):
"""Set up and load models of the answering model and maybe load weights.
Args:
labeler_config: configuration of the labeler
saved_weight_path: optional path where weights are loaded from
"""
self.answering_encoder = get_answering_encoder(
labeler_config['answering_encoder'])
self.answering_decoder = get_answering_decoder(
labeler_config['answering_decoder'])
self.answering_projection_layer = tf.keras.layers.Dense(
1, activation='sigmoid', name='answering_projection')
if saved_weight_path:
ckpt = tf.train.Checkpoint(
encoder=self.answering_encoder,
decoder=self.answering_decoder,
projection_layer=self.answering_projection_layer)
latest = tf.train.latest_checkpoint(saved_weight_path)
assert latest, 'Answering model ckpt not found in {}.'.format(
saved_weight_path)
print('Loading answering model from: {}'.format(latest))
ckpt.restore(latest)
def label_trajectory(self, trajectory, null_token=None):
"""Generate valid instructions for a trajectory of transitions.
Args:
trajectory: configuration of the labeler
null_token: optional token that indicates the transition has no label
Returns:
labels for each transition in trajecotry, if any
"""
instructions = self.label_batch_transition(trajectory)
post_achieved_indicator = self.verify_batch_observation_batch_instruction(
trajectory[:, 1], instructions)
pre_achieved_indicator = self.verify_batch_observation_batch_instruction(
trajectory[:, 0], instructions)
filtered_inst = []
# prune the false answers
for i in range(len(trajectory)):
if null_token:
all_token = instructions[i].flatten()
num_null = np.float32(all_token == null_token).sum()
if num_null > (self.generated_label_num / 3.):
filtered_inst.append([])
continue
filtered_transition_inst = []
for pre, achieved, inst in zip(pre_achieved_indicator[i],
post_achieved_indicator[i],
instructions[i]):
if achieved > 0.5 and pre < 0.5 and null_token not in inst:
filtered_transition_inst.append(list(inst)[1:]) # cut sos symbol
filtered_inst.append(filtered_transition_inst)
return filtered_inst
def label_transition(self, obs, obs_next):
"""Generate an instruction for two neighboring frames.
Args:
obs: one frame
obs_next: the subsequent frame
Returns:
possible labels for that transition
"""
instructions = self.label_batch_transition([(obs, obs_next)])
return instructions[0]
def label_batch_transition(self, transition_pairs):
"""Generate a batch of instructions for a batch of transitions.
Args:
transition_pairs: a batch of (obs, obs_next)
Returns:
possible labels for each transition
"""
transition_pairs = tf.convert_to_tensor(transition_pairs)
result = self._label_batch_transition(transition_pairs).numpy()
return result
def _label_batch_transition(self, transition_pairs_tensor):
"""Generate instructions from a batch of transitions."""
num_pairs = len(transition_pairs_tensor)
transition_pairs = self.encoder.preprocess(transition_pairs_tensor)
features = self.encoder(transition_pairs)
features = tf.expand_dims(features, axis=1)
features_shape = tf.shape(features)
features_rank = len(features_shape)
tile_spec = tf.Variable(tf.ones([features_rank], dtype=tf.int32))
tile_spec[1].assign(self.generated_label_num)
features = tf.tile(features, tile_spec)
# tile each pair for generated label num times
features = tf.reshape(features, tf.concat([[-1], features_shape[2:]],
axis=0))
hidden = self.decoder.reset_state(batch_size=self.generated_label_num *
num_pairs)
result = [np.array([[1]] * self.generated_label_num * num_pairs)]
dec_input = tf.Variable(result[0], dtype=tf.int32)
for _ in tf.range(1, self.max_sequence_length):
# passing the features through the decoder
predictions, hidden, _ = self.decoder(dec_input, features, hidden)
predicted_id = tf.random.categorical(
predictions / self.temperature, 1, dtype=tf.int32).numpy()
result.append(predicted_id)
dec_input = predicted_id
result = tf.transpose(tf.squeeze(tf.stack(result), axis=-1), [1, 0])
result = tf.reshape(
result, (num_pairs, self.generated_label_num, self.max_sequence_length))
return result
def verify_batch_observation_batch_instruction(self, batch_obs, batch_inst):
"""Verify whether each instruction fits each observation.
Args:
batch_obs: a batch of observation
batch_inst: a batch of single label for each transition
Returns:
an array of boolean indicating if each instruction is valid for each
transitions
"""
bo_t = tf.convert_to_tensor(batch_obs)
bi_t = tf.convert_to_tensor(batch_inst)
return self._verify_batch_observation_batch_instruction(bo_t, bi_t)
def verify_instruction(self, obs, instruction):
"""Verify a single instruction fits a single observation.
Args:
obs: a single observation
instruction: a tokenized instruction
Returns:
whether the instruction is valid for the observation
"""
obs, inst = tf.convert_to_tensor(obs), tf.convert_to_tensor(instruction)
inst = tf.expand_dims(inst, axis=0)
return self._verify_observation_batch_instruction(obs, inst)
@tf.function
def _verify_observation_batch_instruction(self, obs, batch_inst):
"""Verify if a single observation satisfy a batch of instruction."""
batch_size = len(batch_inst)
batch_inst = tf.expand_dims(batch_inst, axis=-1)
features = self.answering_encoder(tf.expand_dims(obs, axis=0))
features = tf.concat([features] * batch_size, axis=0)
hidden = self.answering_decoder.reset_state(batch_size=batch_size)
for i in tf.range(self.max_sequence_length):
_, hidden, _ = self.answering_decoder(batch_inst[:, i], features, hidden)
batch_answer = self.answering_projection_layer(hidden)
return batch_answer
@tf.function
def _verify_batch_observation_batch_instruction(self, batch_obs, batch_inst):
"""Verify if an batch of observation satisfy a batch of instruction."""
batch_size = len(batch_inst)
batch_inst = tf.expand_dims(batch_inst, axis=-1)
features = self.answering_encoder(batch_obs)
hidden = self.answering_decoder.reset_state(batch_size=batch_size)
for i in tf.range(self.max_sequence_length):
_, hidden, _ = self.answering_decoder(batch_inst[:, i], features, hidden)
batch_answer = self.answering_projection_layer(hidden)
return batch_answer
|
|
#!/usr/bin/env python
#
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Packages admin handler."""
import datetime
import httplib
from simian.mac import admin
from simian.mac import common
from simian.mac import models
from simian.mac.common import auth
DEFAULT_PACKAGE_LOG_FETCH_LIMIT = 25
class Packages(admin.AdminHandler):
"""Handler for /admin/packages."""
DATASTORE_MODEL = models.PackageInfo
LOGGING_MODEL = models.AdminPackageLog
TEMPLATE = 'packages.html'
REPORT_TYPE = 'packages'
LOG_REPORT_TYPE = 'package_logs'
def get(self, report=None):
"""GET handler."""
auth.DoUserAuth()
if report == 'logs':
self._DisplayLogs()
else:
historical = self.request.get('historical') == '1'
applesus = self.request.get('applesus') == '1'
if historical or applesus:
self._DisplayPackagesListFromCache(applesus=applesus)
else:
self._DisplayPackagesList()
def _GetPackageQuery(self):
"""Build query."""
all_packages = self.request.get('all_packages') == '1'
query = self.DATASTORE_MODEL.all()
if self.REPORT_TYPE == 'packages' and not all_packages:
query.filter('catalogs IN', common.TRACKS)
return query
def _DisplayPackagesList(self):
"""Displays list of all installs/removals/etc."""
installs, counts_mtime = models.ReportsCache.GetInstallCounts()
pending, pending_mtime = models.ReportsCache.GetPendingCounts()
packages = []
all_packages = self.request.get('all_packages') == '1'
query = self._GetPackageQuery()
for p in query:
if not p.plist:
self.error(httplib.FORBIDDEN)
self.response.out.write('Package %s has a broken plist!' % p.filename)
return
pkg = {}
pkg['count'] = installs.get(p.munki_name, {}).get('install_count', 'N/A')
pkg['fail_count'] = installs.get(p.munki_name, {}).get(
'install_fail_count', 'N/A')
pkg['pending_count'] = pending.get(p.munki_name, 'N/A')
pkg['duration_seconds_avg'] = installs.get(p.munki_name, {}).get(
'duration_seconds_avg', None) or 'N/A'
pkg['unattended'] = p.plist.get('unattended_install', False)
pkg['unattended_uninstall'] = p.plist.get('unattended_uninstall', False)
force_install_after_date = p.plist.get('force_install_after_date', None)
if force_install_after_date:
pkg['force_install_after_date'] = force_install_after_date
pkg['catalogs'] = p.catalog_matrix
pkg['manifests'] = p.manifest_matrix
pkg['munki_name'] = p.munki_name or p.plist.GetMunkiName()
pkg['filename'] = p.filename
pkg['file_size'] = p.plist.get('installer_item_size', 0) * 1024
pkg['install_types'] = p.install_types
pkg['manifest_mod_access'] = p.manifest_mod_access
pkg['description'] = p.description
pkg['plist_is_signed'] = p.plist_is_signed()
packages.append(pkg)
packages.sort(key=lambda pkg: pkg['munki_name'].lower())
self.Render(self.TEMPLATE,
{'packages': packages, 'counts_mtime': counts_mtime,
'pending_mtime': pending_mtime,
'report_type': self.REPORT_TYPE,
'active_pkg': self.request.GET.get('activepkg'),
'is_support_user': auth.IsSupportUser(),
'can_upload': auth.HasPermission(auth.UPLOAD),
'is_admin': auth.IsAdminUser(),
'all_packages': all_packages,})
def _DisplayPackagesListFromCache(self, applesus=False):
installs, counts_mtime = models.ReportsCache.GetInstallCounts()
pkgs = []
names = installs.keys()
names.sort()
for name in names:
install = installs[name]
if applesus and install.get('applesus', False):
d = {'name': name,
'count': install.get('install_count', 'N/A'),
'fail_count': install.get('install_fail_count', 'N/A'),
'duration_seconds_avg': install.get('duration_seconds_avg', 'N/A')}
pkgs.append(d)
elif not applesus and not install['applesus']:
d = {'name': name,
'count': install.get('install_count', 'N/A'),
'fail_count': install.get('install_fail_count', 'N/A'),
'duration_seconds_avg': install.get('duration_seconds_avg', 'N/A')}
pkgs.append(d)
if applesus:
report_type = 'apple_historical'
else:
report_type = 'packages_historical'
self.Render(
self.TEMPLATE,
{'packages': pkgs, 'counts_mtime': counts_mtime,
'applesus': applesus, 'cached_pkgs_list': True,
'report_type': report_type})
def _DisplayLogs(self):
"""Displays all models.AdminPackageLog entities."""
key_id = self.request.get('plist')
if key_id:
try:
key_id = int(key_id)
except ValueError:
self.error(httplib.NOT_FOUND)
return
log = self.LOGGING_MODEL.get_by_id(key_id)
if self.request.get('format') == 'xml':
self.response.headers['Content-Type'] = 'text/xml; charset=utf-8'
self.response.out.write(log.plist)
else:
time = datetime.datetime.strftime(log.mtime, '%Y-%m-%d %H:%M:%S')
title = 'plist for Package Log <b>%s - %s</b>' % (log.filename, time)
raw_xml = '/admin/packages/logs?plist=%d&format=xml' % key_id
self.Render(
'plist.html',
{'plist_type': 'package_log',
'xml': admin.XmlToHtml(log.plist.GetXml()),
'title': title,
'raw_xml_link': raw_xml,
})
else:
filename = self.request.get('filename')
query = self.LOGGING_MODEL.all()
if filename:
query.filter('filename =', filename)
query.order('-mtime')
logs = self.Paginate(query, DEFAULT_PACKAGE_LOG_FETCH_LIMIT)
formatted_logs = []
for log in logs:
formatted_log = {}
formatted_log['data'] = log
if (hasattr(log, 'proposed_catalogs')
and hasattr(log, 'proposed_manifest')):
formatted_log['catalogs'] = common.util.MakeTrackMatrix(
log.catalogs, log.proposed_catalogs)
formatted_log['manifests'] = common.util.MakeTrackMatrix(
log.manifests, log.proposed_manifests)
else:
formatted_log['catalogs'] = common.util.MakeTrackMatrix(log.catalogs)
formatted_log['manifests'] = common.util.MakeTrackMatrix(
log.manifests)
formatted_logs.append(formatted_log)
self.Render(
'package_logs.html',
{'logs': formatted_logs,
'report_type': self.LOG_REPORT_TYPE,
'filename': filename})
class PackageProposals(Packages):
"""Handler for /admin/proposals."""
DATASTORE_MODEL = models.PackageInfoProposal
LOGGING_MODEL = models.AdminPackageProposalLog
TEMPLATE = 'packages.html'
LOG_REPORT_TYPE = 'proposal_logs'
REPORT_TYPE = 'proposals'
def _GetPackageQuery(self):
return self.DATASTORE_MODEL.all()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 Metacloud, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
from oslo.config import cfg
import webob
from nova.api.openstack.compute.contrib import security_group_default_rules
from nova.api.openstack import wsgi
from nova import context
import nova.db
from nova import test
from nova.tests.api.openstack import fakes
CONF = cfg.CONF
class AttrDict(dict):
def __getattr__(self, k):
return self[k]
def security_group_default_rule_template(**kwargs):
rule = kwargs.copy()
rule.setdefault('ip_protocol', 'TCP')
rule.setdefault('from_port', 22)
rule.setdefault('to_port', 22)
rule.setdefault('cidr', '10.10.10.0/24')
return rule
def security_group_default_rule_db(security_group_default_rule, id=None):
attrs = security_group_default_rule.copy()
if id is not None:
attrs['id'] = id
return AttrDict(attrs)
class TestSecurityGroupDefaultRules(test.TestCase):
def setUp(self):
super(TestSecurityGroupDefaultRules, self).setUp()
self.controller = \
security_group_default_rules.SecurityGroupDefaultRulesController()
def test_create_security_group_default_rule(self):
sgr = security_group_default_rule_template()
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
sgr_dict = dict(security_group_default_rule=sgr)
res_dict = self.controller.create(req, sgr_dict)
security_group_default_rule = res_dict['security_group_default_rule']
self.assertEqual(security_group_default_rule['ip_protocol'],
sgr['ip_protocol'])
self.assertEqual(security_group_default_rule['from_port'],
sgr['from_port'])
self.assertEqual(security_group_default_rule['to_port'],
sgr['to_port'])
self.assertEqual(security_group_default_rule['ip_range']['cidr'],
sgr['cidr'])
def test_create_security_group_default_rule_with_no_to_port(self):
sgr = security_group_default_rule_template()
del sgr['to_port']
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_with_no_from_port(self):
sgr = security_group_default_rule_template()
del sgr['from_port']
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_with_no_ip_protocol(self):
sgr = security_group_default_rule_template()
del sgr['ip_protocol']
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_with_no_cidr(self):
sgr = security_group_default_rule_template()
del sgr['cidr']
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
res_dict = self.controller.create(req,
{'security_group_default_rule': sgr})
security_group_default_rule = res_dict['security_group_default_rule']
self.assertNotEquals(security_group_default_rule['id'], 0)
self.assertEqual(security_group_default_rule['ip_range']['cidr'],
'0.0.0.0/0')
def test_create_security_group_default_rule_with_blank_to_port(self):
sgr = security_group_default_rule_template(to_port='')
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_with_blank_from_port(self):
sgr = security_group_default_rule_template(from_port='')
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_with_blank_ip_protocol(self):
sgr = security_group_default_rule_template(ip_protocol='')
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_with_blank_cidr(self):
sgr = security_group_default_rule_template(cidr='')
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
res_dict = self.controller.create(req,
{'security_group_default_rule': sgr})
security_group_default_rule = res_dict['security_group_default_rule']
self.assertNotEquals(security_group_default_rule['id'], 0)
self.assertEqual(security_group_default_rule['ip_range']['cidr'],
'0.0.0.0/0')
def test_create_security_group_default_rule_non_numerical_to_port(self):
sgr = security_group_default_rule_template(to_port='invalid')
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_non_numerical_from_port(self):
sgr = security_group_default_rule_template(from_port='invalid')
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_invalid_ip_protocol(self):
sgr = security_group_default_rule_template(ip_protocol='invalid')
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_invalid_cidr(self):
sgr = security_group_default_rule_template(cidr='10.10.2222.0/24')
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_invalid_to_port(self):
sgr = security_group_default_rule_template(to_port='666666')
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_invalid_from_port(self):
sgr = security_group_default_rule_template(from_port='666666')
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_create_security_group_default_rule_with_no_body(self):
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.controller.create, req, None)
def test_create_duplicate_security_group_default_rule(self):
sgr = security_group_default_rule_template()
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.controller.create(req, {'security_group_default_rule': sgr})
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.create,
req, {'security_group_default_rule': sgr})
def test_security_group_default_rules_list(self):
self.test_create_security_group_default_rule()
rules = [dict(id=1,
ip_protocol='TCP',
from_port=22,
to_port=22,
ip_range=dict(cidr='10.10.10.0/24'))]
expected = {'security_group_default_rules': rules}
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
res_dict = self.controller.index(req)
self.assertEqual(res_dict, expected)
def test_default_security_group_default_rule_show(self):
sgr = security_group_default_rule_template(id=1)
self.test_create_security_group_default_rule()
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
res_dict = self.controller.show(req, '1')
security_group_default_rule = res_dict['security_group_default_rule']
self.assertEqual(security_group_default_rule['ip_protocol'],
sgr['ip_protocol'])
self.assertEqual(security_group_default_rule['to_port'],
sgr['to_port'])
self.assertEqual(security_group_default_rule['from_port'],
sgr['from_port'])
self.assertEqual(security_group_default_rule['ip_range']['cidr'],
sgr['cidr'])
def test_delete_security_group_default_rule(self):
sgr = security_group_default_rule_template(id=1)
self.test_create_security_group_default_rule()
self.called = False
def security_group_default_rule_destroy(context, id):
self.called = True
def return_security_group_default_rule(context, id):
self.assertEqual(sgr['id'], id)
return security_group_default_rule_db(sgr)
self.stubs.Set(nova.db, 'security_group_default_rule_destroy',
security_group_default_rule_destroy)
self.stubs.Set(nova.db, 'security_group_default_rule_get',
return_security_group_default_rule)
req = fakes.HTTPRequest.blank(
'/v2/fake/os-security-group-default-rules', use_admin_context=True)
self.controller.delete(req, '1')
self.assertTrue(self.called)
def test_security_group_ensure_default(self):
sgr = security_group_default_rule_template(id=1)
self.test_create_security_group_default_rule()
ctxt = context.get_admin_context()
setattr(ctxt, 'project_id', 'new_project_id')
sg = nova.db.security_group_ensure_default(ctxt)
rules = nova.db.security_group_rule_get_by_security_group(ctxt, sg.id)
security_group_rule = rules[0]
self.assertEqual(sgr['id'], security_group_rule.id)
self.assertEqual(sgr['ip_protocol'], security_group_rule.protocol)
self.assertEqual(sgr['from_port'], security_group_rule.from_port)
self.assertEqual(sgr['to_port'], security_group_rule.to_port)
self.assertEqual(sgr['cidr'], security_group_rule.cidr)
class TestSecurityGroupDefaultRulesXMLDeserializer(test.TestCase):
def setUp(self):
super(TestSecurityGroupDefaultRulesXMLDeserializer, self).setUp()
deserializer = security_group_default_rules.\
SecurityGroupDefaultRulesXMLDeserializer()
self.deserializer = deserializer
def test_create_request(self):
serial_request = """
<security_group_default_rule>
<from_port>22</from_port>
<to_port>22</to_port>
<ip_protocol>TCP</ip_protocol>
<cidr>10.10.10.0/24</cidr>
</security_group_default_rule>"""
request = self.deserializer.deserialize(serial_request)
expected = {
"security_group_default_rule": {
"from_port": "22",
"to_port": "22",
"ip_protocol": "TCP",
"cidr": "10.10.10.0/24"
},
}
self.assertEqual(request['body'], expected)
def test_create_no_to_port_request(self):
serial_request = """
<security_group_default_rule>
<from_port>22</from_port>
<ip_protocol>TCP</ip_protocol>
<cidr>10.10.10.0/24</cidr>
</security_group_default_rule>"""
request = self.deserializer.deserialize(serial_request)
expected = {
"security_group_default_rule": {
"from_port": "22",
"ip_protocol": "TCP",
"cidr": "10.10.10.0/24"
},
}
self.assertEqual(request['body'], expected)
def test_create_no_from_port_request(self):
serial_request = """
<security_group_default_rule>
<to_port>22</to_port>
<ip_protocol>TCP</ip_protocol>
<cidr>10.10.10.0/24</cidr>
</security_group_default_rule>"""
request = self.deserializer.deserialize(serial_request)
expected = {
"security_group_default_rule": {
"to_port": "22",
"ip_protocol": "TCP",
"cidr": "10.10.10.0/24"
},
}
self.assertEqual(request['body'], expected)
def test_create_no_ip_protocol_request(self):
serial_request = """
<security_group_default_rule>
<from_port>22</from_port>
<to_port>22</to_port>
<cidr>10.10.10.0/24</cidr>
</security_group_default_rule>"""
request = self.deserializer.deserialize(serial_request)
expected = {
"security_group_default_rule": {
"from_port": "22",
"to_port": "22",
"cidr": "10.10.10.0/24"
},
}
self.assertEqual(request['body'], expected)
def test_create_no_cidr_request(self):
serial_request = """
<security_group_default_rule>
<from_port>22</from_port>
<to_port>22</to_port>
<ip_protocol>TCP</ip_protocol>
</security_group_default_rule>"""
request = self.deserializer.deserialize(serial_request)
expected = {
"security_group_default_rule": {
"from_port": "22",
"to_port": "22",
"ip_protocol": "TCP",
},
}
self.assertEqual(request['body'], expected)
class TestSecurityGroupDefaultRuleXMLSerializer(test.TestCase):
def setUp(self):
super(TestSecurityGroupDefaultRuleXMLSerializer, self).setUp()
self.namespace = wsgi.XMLNS_V11
self.rule_serializer =\
security_group_default_rules.SecurityGroupDefaultRuleTemplate()
self.index_serializer =\
security_group_default_rules.SecurityGroupDefaultRulesTemplate()
def _tag(self, elem):
tagname = elem.tag
self.assertEqual(tagname[0], '{')
tmp = tagname.partition('}')
namespace = tmp[0][1:]
self.assertEqual(namespace, self.namespace)
return tmp[2]
def _verify_security_group_default_rule(self, raw_rule, tree):
self.assertEqual(raw_rule['id'], tree.get('id'))
seen = set()
expected = set(['ip_protocol', 'from_port', 'to_port', 'ip_range',
'ip_range/cidr'])
for child in tree:
child_tag = self._tag(child)
seen.add(child_tag)
if child_tag == 'ip_range':
for gr_child in child:
gr_child_tag = self._tag(gr_child)
self.assertIn(gr_child_tag, raw_rule[child_tag])
seen.add('%s/%s' % (child_tag, gr_child_tag))
self.assertEqual(gr_child.text,
raw_rule[child_tag][gr_child_tag])
else:
self.assertEqual(child.text, raw_rule[child_tag])
self.assertEqual(seen, expected)
def test_rule_serializer(self):
raw_rule = dict(id='123',
ip_protocol='TCP',
from_port='22',
to_port='22',
ip_range=dict(cidr='10.10.10.0/24'))
rule = dict(security_group_default_rule=raw_rule)
text = self.rule_serializer.serialize(rule)
tree = etree.fromstring(text)
self.assertEqual('security_group_default_rule', self._tag(tree))
self._verify_security_group_default_rule(raw_rule, tree)
def test_index_serializer(self):
rules = [dict(id='123',
ip_protocol='TCP',
from_port='22',
to_port='22',
ip_range=dict(cidr='10.10.10.0/24')),
dict(id='234',
ip_protocol='UDP',
from_port='23456',
to_port='234567',
ip_range=dict(cidr='10.12.0.0/18')),
dict(id='345',
ip_protocol='tcp',
from_port='3456',
to_port='4567',
ip_range=dict(cidr='192.168.1.0/32'))]
rules_dict = dict(security_group_default_rules=rules)
text = self.index_serializer.serialize(rules_dict)
tree = etree.fromstring(text)
self.assertEqual('security_group_default_rules', self._tag(tree))
self.assertEqual(len(rules), len(tree))
for idx, child in enumerate(tree):
self._verify_security_group_default_rule(rules[idx], child)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains the Apache Livy hook."""
import json
import re
from enum import Enum
from typing import Any, Dict, List, Optional, Sequence, Union
import requests
from airflow.exceptions import AirflowException
from airflow.providers.http.hooks.http import HttpHook
from airflow.utils.log.logging_mixin import LoggingMixin
class BatchState(Enum):
"""Batch session states"""
NOT_STARTED = 'not_started'
STARTING = 'starting'
RUNNING = 'running'
IDLE = 'idle'
BUSY = 'busy'
SHUTTING_DOWN = 'shutting_down'
ERROR = 'error'
DEAD = 'dead'
KILLED = 'killed'
SUCCESS = 'success'
class LivyHook(HttpHook, LoggingMixin):
"""
Hook for Apache Livy through the REST API.
:param livy_conn_id: reference to a pre-defined Livy Connection.
:param extra_options: A dictionary of options passed to Livy.
:param extra_headers: A dictionary of headers passed to the HTTP request to livy.
.. seealso::
For more details refer to the Apache Livy API reference:
https://livy.apache.org/docs/latest/rest-api.html
"""
TERMINAL_STATES = {
BatchState.SUCCESS,
BatchState.DEAD,
BatchState.KILLED,
BatchState.ERROR,
}
_def_headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
conn_name_attr = 'livy_conn_id'
default_conn_name = 'livy_default'
conn_type = 'livy'
hook_name = 'Apache Livy'
def __init__(
self,
livy_conn_id: str = default_conn_name,
extra_options: Optional[Dict[str, Any]] = None,
extra_headers: Optional[Dict[str, Any]] = None,
) -> None:
super().__init__(http_conn_id=livy_conn_id)
self.extra_headers = extra_headers or {}
self.extra_options = extra_options or {}
def get_conn(self, headers: Optional[Dict[str, Any]] = None) -> Any:
"""
Returns http session for use with requests
:param headers: additional headers to be passed through as a dictionary
:return: requests session
:rtype: requests.Session
"""
tmp_headers = self._def_headers.copy() # setting default headers
if headers:
tmp_headers.update(headers)
return super().get_conn(tmp_headers)
def run_method(
self,
endpoint: str,
method: str = 'GET',
data: Optional[Any] = None,
headers: Optional[Dict[str, Any]] = None,
retry_args: Optional[Dict[str, Any]] = None,
) -> Any:
"""
Wrapper for HttpHook, allows to change method on the same HttpHook
:param method: http method
:param endpoint: endpoint
:param data: request payload
:param headers: headers
:param retry_args: Arguments which define the retry behaviour.
See Tenacity documentation at https://github.com/jd/tenacity
:return: http response
:rtype: requests.Response
"""
if method not in ('GET', 'POST', 'PUT', 'DELETE', 'HEAD'):
raise ValueError(f"Invalid http method '{method}'")
if not self.extra_options:
self.extra_options = {'check_response': False}
back_method = self.method
self.method = method
try:
if retry_args:
result = self.run_with_advanced_retry(
endpoint=endpoint,
data=data,
headers=headers,
extra_options=self.extra_options,
_retry_args=retry_args,
)
else:
result = self.run(endpoint, data, headers, self.extra_options)
finally:
self.method = back_method
return result
def post_batch(self, *args: Any, **kwargs: Any) -> Any:
"""
Perform request to submit batch
:return: batch session id
:rtype: int
"""
batch_submit_body = json.dumps(self.build_post_batch_body(*args, **kwargs))
if self.base_url is None:
# need to init self.base_url
self.get_conn()
self.log.info("Submitting job %s to %s", batch_submit_body, self.base_url)
response = self.run_method(
method='POST', endpoint='/batches', data=batch_submit_body, headers=self.extra_headers
)
self.log.debug("Got response: %s", response.text)
try:
response.raise_for_status()
except requests.exceptions.HTTPError as err:
raise AirflowException(
"Could not submit batch. "
f"Status code: {err.response.status_code}. Message: '{err.response.text}'"
)
batch_id = self._parse_post_response(response.json())
if batch_id is None:
raise AirflowException("Unable to parse the batch session id")
self.log.info("Batch submitted with session id: %d", batch_id)
return batch_id
def get_batch(self, session_id: Union[int, str]) -> Any:
"""
Fetch info about the specified batch
:param session_id: identifier of the batch sessions
:return: response body
:rtype: dict
"""
self._validate_session_id(session_id)
self.log.debug("Fetching info for batch session %d", session_id)
response = self.run_method(endpoint=f'/batches/{session_id}')
try:
response.raise_for_status()
except requests.exceptions.HTTPError as err:
self.log.warning("Got status code %d for session %d", err.response.status_code, session_id)
raise AirflowException(
f"Unable to fetch batch with id: {session_id}. Message: {err.response.text}"
)
return response.json()
def get_batch_state(
self, session_id: Union[int, str], retry_args: Optional[Dict[str, Any]] = None
) -> BatchState:
"""
Fetch the state of the specified batch
:param session_id: identifier of the batch sessions
:param retry_args: Arguments which define the retry behaviour.
See Tenacity documentation at https://github.com/jd/tenacity
:return: batch state
:rtype: BatchState
"""
self._validate_session_id(session_id)
self.log.debug("Fetching info for batch session %d", session_id)
response = self.run_method(endpoint=f'/batches/{session_id}/state', retry_args=retry_args)
try:
response.raise_for_status()
except requests.exceptions.HTTPError as err:
self.log.warning("Got status code %d for session %d", err.response.status_code, session_id)
raise AirflowException(
f"Unable to fetch batch with id: {session_id}. Message: {err.response.text}"
)
jresp = response.json()
if 'state' not in jresp:
raise AirflowException(f"Unable to get state for batch with id: {session_id}")
return BatchState(jresp['state'])
def delete_batch(self, session_id: Union[int, str]) -> Any:
"""
Delete the specified batch
:param session_id: identifier of the batch sessions
:return: response body
:rtype: dict
"""
self._validate_session_id(session_id)
self.log.info("Deleting batch session %d", session_id)
response = self.run_method(method='DELETE', endpoint=f'/batches/{session_id}')
try:
response.raise_for_status()
except requests.exceptions.HTTPError as err:
self.log.warning("Got status code %d for session %d", err.response.status_code, session_id)
raise AirflowException(
f"Could not kill the batch with session id: {session_id}. Message: {err.response.text}"
)
return response.json()
def get_batch_logs(self, session_id: Union[int, str], log_start_position, log_batch_size) -> Any:
"""
Gets the session logs for a specified batch.
:param session_id: identifier of the batch sessions
:param log_start_position: Position from where to pull the logs
:param log_batch_size: Number of lines to pull in one batch
:return: response body
:rtype: dict
"""
self._validate_session_id(session_id)
log_params = {'from': log_start_position, 'size': log_batch_size}
response = self.run_method(endpoint=f'/batches/{session_id}/log', data=log_params)
try:
response.raise_for_status()
except requests.exceptions.HTTPError as err:
self.log.warning("Got status code %d for session %d", err.response.status_code, session_id)
raise AirflowException(
f"Could not fetch the logs for batch with session id: {session_id}. "
f"Message: {err.response.text}"
)
return response.json()
def dump_batch_logs(self, session_id: Union[int, str]) -> Any:
"""
Dumps the session logs for a specified batch
:param session_id: identifier of the batch sessions
:return: response body
:rtype: dict
"""
self.log.info("Fetching the logs for batch session with id: %d", session_id)
log_start_line = 0
log_total_lines = 0
log_batch_size = 100
while log_start_line <= log_total_lines:
# Livy log endpoint is paginated.
response = self.get_batch_logs(session_id, log_start_line, log_batch_size)
log_total_lines = self._parse_request_response(response, 'total')
log_start_line += log_batch_size
log_lines = self._parse_request_response(response, 'log')
for log_line in log_lines:
self.log.info(log_line)
@staticmethod
def _validate_session_id(session_id: Union[int, str]) -> None:
"""
Validate session id is a int
:param session_id: session id
"""
try:
int(session_id)
except (TypeError, ValueError):
raise TypeError("'session_id' must be an integer")
@staticmethod
def _parse_post_response(response: Dict[Any, Any]) -> Any:
"""
Parse batch response for batch id
:param response: response body
:return: session id
:rtype: int
"""
return response.get('id')
@staticmethod
def _parse_request_response(response: Dict[Any, Any], parameter) -> Any:
"""
Parse batch response for batch id
:param response: response body
:return: value of parameter
:rtype: Union[int, list]
"""
return response.get(parameter)
@staticmethod
def build_post_batch_body(
file: str,
args: Optional[Sequence[Union[str, int, float]]] = None,
class_name: Optional[str] = None,
jars: Optional[List[str]] = None,
py_files: Optional[List[str]] = None,
files: Optional[List[str]] = None,
archives: Optional[List[str]] = None,
name: Optional[str] = None,
driver_memory: Optional[str] = None,
driver_cores: Optional[Union[int, str]] = None,
executor_memory: Optional[str] = None,
executor_cores: Optional[int] = None,
num_executors: Optional[Union[int, str]] = None,
queue: Optional[str] = None,
proxy_user: Optional[str] = None,
conf: Optional[Dict[Any, Any]] = None,
) -> Any:
"""
Build the post batch request body.
For more information about the format refer to
.. seealso:: https://livy.apache.org/docs/latest/rest-api.html
:param file: Path of the file containing the application to execute (required).
:param proxy_user: User to impersonate when running the job.
:param class_name: Application Java/Spark main class string.
:param args: Command line arguments for the application s.
:param jars: jars to be used in this sessions.
:param py_files: Python files to be used in this session.
:param files: files to be used in this session.
:param driver_memory: Amount of memory to use for the driver process string.
:param driver_cores: Number of cores to use for the driver process int.
:param executor_memory: Amount of memory to use per executor process string.
:param executor_cores: Number of cores to use for each executor int.
:param num_executors: Number of executors to launch for this session int.
:param archives: Archives to be used in this session.
:param queue: The name of the YARN queue to which submitted string.
:param name: The name of this session string.
:param conf: Spark configuration properties.
:return: request body
:rtype: dict
"""
body: Dict[str, Any] = {'file': file}
if proxy_user:
body['proxyUser'] = proxy_user
if class_name:
body['className'] = class_name
if args and LivyHook._validate_list_of_stringables(args):
body['args'] = [str(val) for val in args]
if jars and LivyHook._validate_list_of_stringables(jars):
body['jars'] = jars
if py_files and LivyHook._validate_list_of_stringables(py_files):
body['pyFiles'] = py_files
if files and LivyHook._validate_list_of_stringables(files):
body['files'] = files
if driver_memory and LivyHook._validate_size_format(driver_memory):
body['driverMemory'] = driver_memory
if driver_cores:
body['driverCores'] = driver_cores
if executor_memory and LivyHook._validate_size_format(executor_memory):
body['executorMemory'] = executor_memory
if executor_cores:
body['executorCores'] = executor_cores
if num_executors:
body['numExecutors'] = num_executors
if archives and LivyHook._validate_list_of_stringables(archives):
body['archives'] = archives
if queue:
body['queue'] = queue
if name:
body['name'] = name
if conf and LivyHook._validate_extra_conf(conf):
body['conf'] = conf
return body
@staticmethod
def _validate_size_format(size: str) -> bool:
"""
Validate size format.
:param size: size value
:return: true if valid format
:rtype: bool
"""
if size and not (isinstance(size, str) and re.match(r'^\d+[kmgt]b?$', size, re.IGNORECASE)):
raise ValueError(f"Invalid java size format for string'{size}'")
return True
@staticmethod
def _validate_list_of_stringables(vals: Sequence[Union[str, int, float]]) -> bool:
"""
Check the values in the provided list can be converted to strings.
:param vals: list to validate
:return: true if valid
:rtype: bool
"""
if (
vals is None
or not isinstance(vals, (tuple, list))
or any(1 for val in vals if not isinstance(val, (str, int, float)))
):
raise ValueError("List of strings expected")
return True
@staticmethod
def _validate_extra_conf(conf: Dict[Any, Any]) -> bool:
"""
Check configuration values are either strings or ints.
:param conf: configuration variable
:return: true if valid
:rtype: bool
"""
if conf:
if not isinstance(conf, dict):
raise ValueError("'conf' argument must be a dict")
if any(True for k, v in conf.items() if not (v and isinstance(v, str) or isinstance(v, int))):
raise ValueError("'conf' values must be either strings or ints")
return True
|
|
# -*- coding: utf-8 -*-
"""
XForm survey question type mapping dictionary module.
"""
from pyxform.xls2json import QuestionTypesReader, print_pyobj_to_json
def generate_new_dict():
"""
This is just here incase there is ever any need to generate the question
type dictionary from all.xls again.
It shouldn't be called as part of any application.
"""
path_to_question_types = "/pyxform/question_types/all.xls"
json_dict = QuestionTypesReader(path_to_question_types).to_json_dict()
print_pyobj_to_json(json_dict, "new_question_type_dict.json")
QUESTION_TYPE_DICT = {
"q picture": {
"control": {"tag": "upload", "mediatype": "image/*"},
"bind": {"type": "binary"},
},
"photo": {
"control": {"tag": "upload", "mediatype": "image/*"},
"bind": {"type": "binary"},
},
"add date time prompt": {"control": {"tag": "input"}, "bind": {"type": "dateTime"}},
"add audio prompt": {
"control": {"tag": "upload", "mediatype": "audio/*"},
"bind": {"type": "binary"},
},
"q date time": {"control": {"tag": "input"}, "bind": {"type": "dateTime"}},
"phonenumber": {
"bind": {
"jr:preload": "property",
"type": "string",
"jr:preloadParams": "phonenumber",
}
},
"get start time": {
"bind": {
"jr:preload": "timestamp",
"type": "dateTime",
"jr:preloadParams": "start",
}
},
"add select multiple prompt using": {
"control": {"tag": "select"},
"bind": {"type": "string"},
},
"add note prompt": {
"control": {"tag": "input"},
"bind": {"readonly": "true()", "type": "string"},
},
"calculate": {"bind": {"type": "string"}},
"acknowledge": {"control": {"tag": "trigger"}, "bind": {"type": "string"}},
"location": {"control": {"tag": "input"}, "bind": {"type": "geopoint"}},
"text": {"control": {"tag": "input"}, "bind": {"type": "string"}},
"select all that apply from": {
"control": {"tag": "select"},
"bind": {"type": "string"},
},
"simserial": {
"bind": {
"jr:preload": "property",
"type": "string",
"jr:preloadParams": "simserial",
}
},
"string": {"control": {"tag": "input"}, "bind": {"type": "string"}},
"q string": {"control": {"tag": "input"}, "bind": {"type": "string"}},
"imei": {
"bind": {
"jr:preload": "property",
"type": "string",
"jr:preloadParams": "deviceid",
}
},
"integer": {"control": {"tag": "input"}, "bind": {"type": "int"}},
"datetime": {"control": {"tag": "input"}, "bind": {"type": "dateTime"}},
"q note": {
"control": {"tag": "input"},
"bind": {"readonly": "true()", "type": "string"},
},
"subscriber id": {
"bind": {
"jr:preload": "property",
"type": "string",
"jr:preloadParams": "subscriberid",
}
},
"decimal": {"control": {"tag": "input"}, "bind": {"type": "decimal"}},
"dateTime": {"control": {"tag": "input"}, "bind": {"type": "dateTime"}},
"q audio": {
"control": {"tag": "upload", "mediatype": "audio/*"},
"bind": {"type": "binary"},
},
"q geopoint": {"control": {"tag": "input"}, "bind": {"type": "geopoint"}},
"q geoshape": {"control": {"tag": "input"}, "bind": {"type": "geoshape"}},
"q geotrace": {"control": {"tag": "input"}, "bind": {"type": "geotrace"}},
"q image": {
"control": {"tag": "upload", "mediatype": "image/*"},
"bind": {"type": "binary"},
},
"get today": {
"bind": {"jr:preload": "date", "type": "date", "jr:preloadParams": "today"}
},
"video": {
"control": {"tag": "upload", "mediatype": "video/*"},
"bind": {"type": "binary"},
},
"q acknowledge": {"control": {"tag": "trigger"}, "bind": {"type": "string"}},
"add video prompt": {
"control": {"tag": "upload", "mediatype": "video/*"},
"bind": {"type": "binary"},
},
"number of days in last month": {
"control": {"tag": "input"},
"bind": {"type": "int", "constraint": "0 <= . and . <= 31"},
"hint": "Enter a number 0-31.",
},
"get sim id": {
"bind": {
"jr:preload": "property",
"type": "string",
"jr:preloadParams": "simserial",
}
},
"q location": {"control": {"tag": "input"}, "bind": {"type": "geopoint"}},
"select one": {"control": {"tag": "select1"}, "bind": {"type": "string"}},
"select one external": {"control": {"tag": "input"}, "bind": {"type": "string"}},
"add image prompt": {
"control": {"tag": "upload", "mediatype": "image/*"},
"bind": {"type": "binary"},
},
"select all that apply": {"control": {"tag": "select"}, "bind": {"type": "string"}},
"get end time": {
"bind": {
"jr:preload": "timestamp",
"type": "dateTime",
"jr:preloadParams": "end",
}
},
"barcode": {"control": {"tag": "input"}, "bind": {"type": "barcode"}},
"q video": {
"control": {"tag": "upload", "mediatype": "video/*"},
"bind": {"type": "binary"},
},
"geopoint": {"control": {"tag": "input"}, "bind": {"type": "geopoint"}},
"geoshape": {"control": {"tag": "input"}, "bind": {"type": "geoshape"}},
"geotrace": {"control": {"tag": "input"}, "bind": {"type": "geotrace"}},
"select multiple from": {"control": {"tag": "select"}, "bind": {"type": "string"}},
"end time": {
"bind": {
"jr:preload": "timestamp",
"type": "dateTime",
"jr:preloadParams": "end",
}
},
"device id": {
"bind": {
"jr:preload": "property",
"type": "string",
"jr:preloadParams": "deviceid",
}
},
"subscriberid": {
"bind": {
"jr:preload": "property",
"type": "string",
"jr:preloadParams": "subscriberid",
}
},
"q barcode": {"control": {"tag": "input"}, "bind": {"type": "barcode"}},
"q select": {"control": {"tag": "select"}, "bind": {"type": "string"}},
"select one using": {"control": {"tag": "select1"}, "bind": {"type": "string"}},
"rank": {"control": {"tag": "odk:rank"}, "bind": {"type": "odk:rank"}},
"image": {
"control": {"tag": "upload", "mediatype": "image/*"},
"bind": {"type": "binary"},
},
"q int": {"control": {"tag": "input"}, "bind": {"type": "int"}},
"add text prompt": {"control": {"tag": "input"}, "bind": {"type": "string"}},
"add date prompt": {"control": {"tag": "input"}, "bind": {"type": "date"}},
"q calculate": {"bind": {"type": "string"}},
"start": {
"bind": {
"jr:preload": "timestamp",
"type": "dateTime",
"jr:preloadParams": "start",
}
},
"trigger": {"control": {"tag": "trigger"}},
"add acknowledge prompt": {
"control": {"tag": "trigger"},
"bind": {"type": "string"},
},
"percentage": {
"control": {"tag": "input"},
"bind": {"type": "int", "constraint": "0 <= . and . <= 100"},
},
"get phone number": {
"bind": {
"jr:preload": "property",
"type": "string",
"jr:preloadParams": "phonenumber",
}
},
"today": {
"bind": {"jr:preload": "date", "type": "date", "jr:preloadParams": "today"}
},
"gps": {"control": {"tag": "input"}, "bind": {"type": "geopoint"}},
"q date": {"control": {"tag": "input"}, "bind": {"type": "date"}},
"sim id": {
"bind": {
"jr:preload": "property",
"type": "string",
"jr:preloadParams": "simserial",
}
},
"add decimal prompt": {"control": {"tag": "input"}, "bind": {"type": "decimal"}},
"number of days in last six months": {
"control": {"tag": "input"},
"bind": {"type": "int", "constraint": "0 <= . and . <= 183"},
"hint": "Enter a number 0-183.",
},
"deviceid": {
"bind": {
"jr:preload": "property",
"type": "string",
"jr:preloadParams": "deviceid",
}
},
"int": {"control": {"tag": "input"}, "bind": {"type": "int"}},
"add barcode prompt": {"control": {"tag": "input"}, "bind": {"type": "barcode"}},
"select multiple using": {"control": {"tag": "select"}, "bind": {"type": "string"}},
"q decimal": {"control": {"tag": "input"}, "bind": {"type": "decimal"}},
"end": {
"bind": {
"jr:preload": "timestamp",
"type": "dateTime",
"jr:preloadParams": "end",
}
},
"add calculate prompt": {"bind": {"type": "string"}},
"add dateTime prompt": {"control": {"tag": "input"}, "bind": {"type": "dateTime"}},
"note": {
"control": {"tag": "input"},
"bind": {"readonly": "true()", "type": "string"},
},
"add location prompt": {"control": {"tag": "input"}, "bind": {"type": "geopoint"}},
"get subscriber id": {
"bind": {
"jr:preload": "property",
"type": "string",
"jr:preloadParams": "subscriberid",
}
},
"phone number": {
"control": {"tag": "input"},
"bind": {"type": "string", "constraint": "regex(., '^\\d*$')"},
"hint": "Enter numbers only.",
},
"get device id": {
"bind": {
"jr:preload": "property",
"type": "string",
"jr:preloadParams": "deviceid",
}
},
"add integer prompt": {"control": {"tag": "input"}, "bind": {"type": "int"}},
"q dateTime": {"control": {"tag": "input"}, "bind": {"type": "dateTime"}},
"date": {"control": {"tag": "input"}, "bind": {"type": "date"}},
"q select1": {"control": {"tag": "select1"}, "bind": {"type": "string"}},
"start time": {
"bind": {
"jr:preload": "timestamp",
"type": "dateTime",
"jr:preloadParams": "start",
}
},
"number of days in last year": {
"control": {"tag": "input"},
"bind": {"type": "int", "constraint": "0 <= . and . <= 365"},
"hint": "Enter a number 0-365.",
},
"date time": {"control": {"tag": "input"}, "bind": {"type": "dateTime"}},
"time": {"control": {"tag": "input"}, "bind": {"type": "time"}},
"audio": {
"control": {"tag": "upload", "mediatype": "audio/*"},
"bind": {"type": "binary"},
},
"add select one prompt using": {
"control": {"tag": "select1"},
"bind": {"type": "string"},
},
"hidden": {"bind": {"type": "string"}},
"uri:subscriberid": {
"bind": {
"jr:preload": "property",
"type": "string",
"jr:preloadParams": "uri:subscriberid",
}
},
"uri:phonenumber": {
"bind": {
"jr:preload": "property",
"type": "string",
"jr:preloadParams": "uri:phonenumber",
}
},
"uri:simserial": {
"bind": {
"jr:preload": "property",
"type": "string",
"jr:preloadParams": "uri:simserial",
}
},
"uri:deviceid": {
"bind": {
"jr:preload": "property",
"type": "string",
"jr:preloadParams": "uri:deviceid",
}
},
"username": {
"bind": {
"jr:preload": "property",
"type": "string",
"jr:preloadParams": "username",
}
},
"uri:username": {
"bind": {
"jr:preload": "property",
"type": "string",
"jr:preloadParams": "uri:username",
}
},
"email": {
"bind": {
"jr:preload": "property",
"type": "string",
"jr:preloadParams": "email",
}
},
"uri:email": {
"bind": {
"jr:preload": "property",
"type": "string",
"jr:preloadParams": "uri:email",
}
},
"osm": {
"control": {"tag": "upload", "mediatype": "osm/*"},
"bind": {"type": "binary"},
},
"file": {
"control": {"tag": "upload", "mediatype": "application/*"},
"bind": {"type": "binary"},
},
"add file prompt": {
"control": {"tag": "upload", "mediatype": "application/*"},
"bind": {"type": "binary"},
},
"range": {"control": {"tag": "range"}, "bind": {"type": "int"}},
"audit": {"bind": {"type": "binary"}},
"xml-external": {
# Only effect is to add an external instance.
},
"csv-external": {
# Only effect is to add an external instance.
},
"start-geopoint": {
"control": {"tag": "action"},
"bind": {"type": "geopoint"},
"action": {"name": "odk:setgeopoint", "event": "odk-instance-first-load"},
},
"background-audio": {
"control": {"tag": "action"},
"bind": {"type": "binary"},
"action": {"name": "odk:recordaudio", "event": "odk-instance-load"},
},
}
|
|
#!/usr/bin/env python3
"""
This script compares the interfaces of two versions of Mbed TLS, looking
for backward incompatibilities between two different Git revisions within
an Mbed TLS repository. It must be run from the root of a Git working tree.
For the source (API) and runtime (ABI) interface compatibility, this script
is a small wrapper around the abi-compliance-checker and abi-dumper tools,
applying them to compare the header and library files.
For the storage format, this script compares the automatically generated
storage tests and the manual read tests, and complains if there is a
reduction in coverage. A change in test data will be signaled as a
coverage reduction since the old test data is no longer present. A change in
how test data is presented will be signaled as well; this would be a false
positive.
The results of the API/ABI comparison are either formatted as HTML and stored
at a configurable location, or are given as a brief list of problems.
Returns 0 on success, 1 on non-compliance, and 2 if there is an error
while running the script.
You must run this test from an Mbed TLS root.
"""
# Copyright The Mbed TLS Contributors
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
import re
import sys
import traceback
import shutil
import subprocess
import argparse
import logging
import tempfile
import fnmatch
from types import SimpleNamespace
import xml.etree.ElementTree as ET
class AbiChecker:
"""API and ABI checker."""
def __init__(self, old_version, new_version, configuration):
"""Instantiate the API/ABI checker.
old_version: RepoVersion containing details to compare against
new_version: RepoVersion containing details to check
configuration.report_dir: directory for output files
configuration.keep_all_reports: if false, delete old reports
configuration.brief: if true, output shorter report to stdout
configuration.check_abi: if true, compare ABIs
configuration.check_api: if true, compare APIs
configuration.check_storage: if true, compare storage format tests
configuration.skip_file: path to file containing symbols and types to skip
"""
self.repo_path = "."
self.log = None
self.verbose = configuration.verbose
self._setup_logger()
self.report_dir = os.path.abspath(configuration.report_dir)
self.keep_all_reports = configuration.keep_all_reports
self.can_remove_report_dir = not (os.path.exists(self.report_dir) or
self.keep_all_reports)
self.old_version = old_version
self.new_version = new_version
self.skip_file = configuration.skip_file
self.check_abi = configuration.check_abi
self.check_api = configuration.check_api
if self.check_abi != self.check_api:
raise Exception('Checking API without ABI or vice versa is not supported')
self.check_storage_tests = configuration.check_storage
self.brief = configuration.brief
self.git_command = "git"
self.make_command = "make"
@staticmethod
def check_repo_path():
if not all(os.path.isdir(d) for d in ["include", "library", "tests"]):
raise Exception("Must be run from Mbed TLS root")
def _setup_logger(self):
self.log = logging.getLogger()
if self.verbose:
self.log.setLevel(logging.DEBUG)
else:
self.log.setLevel(logging.INFO)
self.log.addHandler(logging.StreamHandler())
@staticmethod
def check_abi_tools_are_installed():
for command in ["abi-dumper", "abi-compliance-checker"]:
if not shutil.which(command):
raise Exception("{} not installed, aborting".format(command))
def _get_clean_worktree_for_git_revision(self, version):
"""Make a separate worktree with version.revision checked out.
Do not modify the current worktree."""
git_worktree_path = tempfile.mkdtemp()
if version.repository:
self.log.debug(
"Checking out git worktree for revision {} from {}".format(
version.revision, version.repository
)
)
fetch_output = subprocess.check_output(
[self.git_command, "fetch",
version.repository, version.revision],
cwd=self.repo_path,
stderr=subprocess.STDOUT
)
self.log.debug(fetch_output.decode("utf-8"))
worktree_rev = "FETCH_HEAD"
else:
self.log.debug("Checking out git worktree for revision {}".format(
version.revision
))
worktree_rev = version.revision
worktree_output = subprocess.check_output(
[self.git_command, "worktree", "add", "--detach",
git_worktree_path, worktree_rev],
cwd=self.repo_path,
stderr=subprocess.STDOUT
)
self.log.debug(worktree_output.decode("utf-8"))
version.commit = subprocess.check_output(
[self.git_command, "rev-parse", "HEAD"],
cwd=git_worktree_path,
stderr=subprocess.STDOUT
).decode("ascii").rstrip()
self.log.debug("Commit is {}".format(version.commit))
return git_worktree_path
def _update_git_submodules(self, git_worktree_path, version):
"""If the crypto submodule is present, initialize it.
if version.crypto_revision exists, update it to that revision,
otherwise update it to the default revision"""
update_output = subprocess.check_output(
[self.git_command, "submodule", "update", "--init", '--recursive'],
cwd=git_worktree_path,
stderr=subprocess.STDOUT
)
self.log.debug(update_output.decode("utf-8"))
if not (os.path.exists(os.path.join(git_worktree_path, "crypto"))
and version.crypto_revision):
return
if version.crypto_repository:
fetch_output = subprocess.check_output(
[self.git_command, "fetch", version.crypto_repository,
version.crypto_revision],
cwd=os.path.join(git_worktree_path, "crypto"),
stderr=subprocess.STDOUT
)
self.log.debug(fetch_output.decode("utf-8"))
crypto_rev = "FETCH_HEAD"
else:
crypto_rev = version.crypto_revision
checkout_output = subprocess.check_output(
[self.git_command, "checkout", crypto_rev],
cwd=os.path.join(git_worktree_path, "crypto"),
stderr=subprocess.STDOUT
)
self.log.debug(checkout_output.decode("utf-8"))
def _build_shared_libraries(self, git_worktree_path, version):
"""Build the shared libraries in the specified worktree."""
my_environment = os.environ.copy()
my_environment["CFLAGS"] = "-g -Og"
my_environment["SHARED"] = "1"
if os.path.exists(os.path.join(git_worktree_path, "crypto")):
my_environment["USE_CRYPTO_SUBMODULE"] = "1"
make_output = subprocess.check_output(
[self.make_command, "lib"],
env=my_environment,
cwd=git_worktree_path,
stderr=subprocess.STDOUT
)
self.log.debug(make_output.decode("utf-8"))
for root, _dirs, files in os.walk(git_worktree_path):
for file in fnmatch.filter(files, "*.so"):
version.modules[os.path.splitext(file)[0]] = (
os.path.join(root, file)
)
@staticmethod
def _pretty_revision(version):
if version.revision == version.commit:
return version.revision
else:
return "{} ({})".format(version.revision, version.commit)
def _get_abi_dumps_from_shared_libraries(self, version):
"""Generate the ABI dumps for the specified git revision.
The shared libraries must have been built and the module paths
present in version.modules."""
for mbed_module, module_path in version.modules.items():
output_path = os.path.join(
self.report_dir, "{}-{}-{}.dump".format(
mbed_module, version.revision, version.version
)
)
abi_dump_command = [
"abi-dumper",
module_path,
"-o", output_path,
"-lver", self._pretty_revision(version),
]
abi_dump_output = subprocess.check_output(
abi_dump_command,
stderr=subprocess.STDOUT
)
self.log.debug(abi_dump_output.decode("utf-8"))
version.abi_dumps[mbed_module] = output_path
@staticmethod
def _normalize_storage_test_case_data(line):
"""Eliminate cosmetic or irrelevant details in storage format test cases."""
line = re.sub(r'\s+', r'', line)
return line
def _read_storage_tests(self,
directory,
filename,
is_generated,
storage_tests):
"""Record storage tests from the given file.
Populate the storage_tests dictionary with test cases read from
filename under directory.
"""
at_paragraph_start = True
description = None
full_path = os.path.join(directory, filename)
with open(full_path) as fd:
for line_number, line in enumerate(fd, 1):
line = line.strip()
if not line:
at_paragraph_start = True
continue
if line.startswith('#'):
continue
if at_paragraph_start:
description = line.strip()
at_paragraph_start = False
continue
if line.startswith('depends_on:'):
continue
# We've reached a test case data line
test_case_data = self._normalize_storage_test_case_data(line)
if not is_generated:
# In manual test data, only look at read tests.
function_name = test_case_data.split(':', 1)[0]
if 'read' not in function_name.split('_'):
continue
metadata = SimpleNamespace(
filename=filename,
line_number=line_number,
description=description
)
storage_tests[test_case_data] = metadata
@staticmethod
def _list_generated_test_data_files(git_worktree_path):
"""List the generated test data files."""
output = subprocess.check_output(
['tests/scripts/generate_psa_tests.py', '--list'],
cwd=git_worktree_path,
).decode('ascii')
return [line for line in output.split('\n') if line]
def _get_storage_format_tests(self, version, git_worktree_path):
"""Record the storage format tests for the specified git version.
The storage format tests are the test suite data files whose name
contains "storage_format".
The version must be checked out at git_worktree_path.
This function creates or updates the generated data files.
"""
# Existing test data files. This may be missing some automatically
# generated files if they haven't been generated yet.
storage_data_files = set(glob.glob(
'tests/suites/test_suite_*storage_format*.data'
))
# Discover and (re)generate automatically generated data files.
to_be_generated = set()
for filename in self._list_generated_test_data_files(git_worktree_path):
if 'storage_format' in filename:
storage_data_files.add(filename)
to_be_generated.add(filename)
subprocess.check_call(
['tests/scripts/generate_psa_tests.py'] + sorted(to_be_generated),
cwd=git_worktree_path,
)
for test_file in sorted(storage_data_files):
self._read_storage_tests(git_worktree_path,
test_file,
test_file in to_be_generated,
version.storage_tests)
def _cleanup_worktree(self, git_worktree_path):
"""Remove the specified git worktree."""
shutil.rmtree(git_worktree_path)
worktree_output = subprocess.check_output(
[self.git_command, "worktree", "prune"],
cwd=self.repo_path,
stderr=subprocess.STDOUT
)
self.log.debug(worktree_output.decode("utf-8"))
def _get_abi_dump_for_ref(self, version):
"""Generate the interface information for the specified git revision."""
git_worktree_path = self._get_clean_worktree_for_git_revision(version)
self._update_git_submodules(git_worktree_path, version)
if self.check_abi:
self._build_shared_libraries(git_worktree_path, version)
self._get_abi_dumps_from_shared_libraries(version)
if self.check_storage_tests:
self._get_storage_format_tests(version, git_worktree_path)
self._cleanup_worktree(git_worktree_path)
def _remove_children_with_tag(self, parent, tag):
children = parent.getchildren()
for child in children:
if child.tag == tag:
parent.remove(child)
else:
self._remove_children_with_tag(child, tag)
def _remove_extra_detail_from_report(self, report_root):
for tag in ['test_info', 'test_results', 'problem_summary',
'added_symbols', 'affected']:
self._remove_children_with_tag(report_root, tag)
for report in report_root:
for problems in report.getchildren()[:]:
if not problems.getchildren():
report.remove(problems)
def _abi_compliance_command(self, mbed_module, output_path):
"""Build the command to run to analyze the library mbed_module.
The report will be placed in output_path."""
abi_compliance_command = [
"abi-compliance-checker",
"-l", mbed_module,
"-old", self.old_version.abi_dumps[mbed_module],
"-new", self.new_version.abi_dumps[mbed_module],
"-strict",
"-report-path", output_path,
]
if self.skip_file:
abi_compliance_command += ["-skip-symbols", self.skip_file,
"-skip-types", self.skip_file]
if self.brief:
abi_compliance_command += ["-report-format", "xml",
"-stdout"]
return abi_compliance_command
def _is_library_compatible(self, mbed_module, compatibility_report):
"""Test if the library mbed_module has remained compatible.
Append a message regarding compatibility to compatibility_report."""
output_path = os.path.join(
self.report_dir, "{}-{}-{}.html".format(
mbed_module, self.old_version.revision,
self.new_version.revision
)
)
try:
subprocess.check_output(
self._abi_compliance_command(mbed_module, output_path),
stderr=subprocess.STDOUT
)
except subprocess.CalledProcessError as err:
if err.returncode != 1:
raise err
if self.brief:
self.log.info(
"Compatibility issues found for {}".format(mbed_module)
)
report_root = ET.fromstring(err.output.decode("utf-8"))
self._remove_extra_detail_from_report(report_root)
self.log.info(ET.tostring(report_root).decode("utf-8"))
else:
self.can_remove_report_dir = False
compatibility_report.append(
"Compatibility issues found for {}, "
"for details see {}".format(mbed_module, output_path)
)
return False
compatibility_report.append(
"No compatibility issues for {}".format(mbed_module)
)
if not (self.keep_all_reports or self.brief):
os.remove(output_path)
return True
@staticmethod
def _is_storage_format_compatible(old_tests, new_tests,
compatibility_report):
"""Check whether all tests present in old_tests are also in new_tests.
Append a message regarding compatibility to compatibility_report.
"""
missing = frozenset(old_tests.keys()).difference(new_tests.keys())
for test_data in sorted(missing):
metadata = old_tests[test_data]
compatibility_report.append(
'Test case from {} line {} "{}" has disappeared: {}'.format(
metadata.filename, metadata.line_number,
metadata.description, test_data
)
)
compatibility_report.append(
'FAIL: {}/{} storage format test cases have changed or disappeared.'.format(
len(missing), len(old_tests)
) if missing else
'PASS: All {} storage format test cases are preserved.'.format(
len(old_tests)
)
)
compatibility_report.append(
'Info: number of storage format tests cases: {} -> {}.'.format(
len(old_tests), len(new_tests)
)
)
return not missing
def get_abi_compatibility_report(self):
"""Generate a report of the differences between the reference ABI
and the new ABI. ABI dumps from self.old_version and self.new_version
must be available."""
compatibility_report = ["Checking evolution from {} to {}".format(
self._pretty_revision(self.old_version),
self._pretty_revision(self.new_version)
)]
compliance_return_code = 0
if self.check_abi:
shared_modules = list(set(self.old_version.modules.keys()) &
set(self.new_version.modules.keys()))
for mbed_module in shared_modules:
if not self._is_library_compatible(mbed_module,
compatibility_report):
compliance_return_code = 1
if self.check_storage_tests:
if not self._is_storage_format_compatible(
self.old_version.storage_tests,
self.new_version.storage_tests,
compatibility_report):
compliance_return_code = 1
for version in [self.old_version, self.new_version]:
for mbed_module, mbed_module_dump in version.abi_dumps.items():
os.remove(mbed_module_dump)
if self.can_remove_report_dir:
os.rmdir(self.report_dir)
self.log.info("\n".join(compatibility_report))
return compliance_return_code
def check_for_abi_changes(self):
"""Generate a report of ABI differences
between self.old_rev and self.new_rev."""
self.check_repo_path()
if self.check_api or self.check_abi:
self.check_abi_tools_are_installed()
self._get_abi_dump_for_ref(self.old_version)
self._get_abi_dump_for_ref(self.new_version)
return self.get_abi_compatibility_report()
def run_main():
try:
parser = argparse.ArgumentParser(
description=__doc__
)
parser.add_argument(
"-v", "--verbose", action="store_true",
help="set verbosity level",
)
parser.add_argument(
"-r", "--report-dir", type=str, default="reports",
help="directory where reports are stored, default is reports",
)
parser.add_argument(
"-k", "--keep-all-reports", action="store_true",
help="keep all reports, even if there are no compatibility issues",
)
parser.add_argument(
"-o", "--old-rev", type=str, help="revision for old version.",
required=True,
)
parser.add_argument(
"-or", "--old-repo", type=str, help="repository for old version."
)
parser.add_argument(
"-oc", "--old-crypto-rev", type=str,
help="revision for old crypto submodule."
)
parser.add_argument(
"-ocr", "--old-crypto-repo", type=str,
help="repository for old crypto submodule."
)
parser.add_argument(
"-n", "--new-rev", type=str, help="revision for new version",
required=True,
)
parser.add_argument(
"-nr", "--new-repo", type=str, help="repository for new version."
)
parser.add_argument(
"-nc", "--new-crypto-rev", type=str,
help="revision for new crypto version"
)
parser.add_argument(
"-ncr", "--new-crypto-repo", type=str,
help="repository for new crypto submodule."
)
parser.add_argument(
"-s", "--skip-file", type=str,
help=("path to file containing symbols and types to skip "
"(typically \"-s identifiers\" after running "
"\"tests/scripts/list-identifiers.sh --internal\")")
)
parser.add_argument(
"--check-abi",
action='store_true', default=True,
help="Perform ABI comparison (default: yes)"
)
parser.add_argument("--no-check-abi", action='store_false', dest='check_abi')
parser.add_argument(
"--check-api",
action='store_true', default=True,
help="Perform API comparison (default: yes)"
)
parser.add_argument("--no-check-api", action='store_false', dest='check_api')
parser.add_argument(
"--check-storage",
action='store_true', default=True,
help="Perform storage tests comparison (default: yes)"
)
parser.add_argument("--no-check-storage", action='store_false', dest='check_storage')
parser.add_argument(
"-b", "--brief", action="store_true",
help="output only the list of issues to stdout, instead of a full report",
)
abi_args = parser.parse_args()
if os.path.isfile(abi_args.report_dir):
print("Error: {} is not a directory".format(abi_args.report_dir))
parser.exit()
old_version = SimpleNamespace(
version="old",
repository=abi_args.old_repo,
revision=abi_args.old_rev,
commit=None,
crypto_repository=abi_args.old_crypto_repo,
crypto_revision=abi_args.old_crypto_rev,
abi_dumps={},
storage_tests={},
modules={}
)
new_version = SimpleNamespace(
version="new",
repository=abi_args.new_repo,
revision=abi_args.new_rev,
commit=None,
crypto_repository=abi_args.new_crypto_repo,
crypto_revision=abi_args.new_crypto_rev,
abi_dumps={},
storage_tests={},
modules={}
)
configuration = SimpleNamespace(
verbose=abi_args.verbose,
report_dir=abi_args.report_dir,
keep_all_reports=abi_args.keep_all_reports,
brief=abi_args.brief,
check_abi=abi_args.check_abi,
check_api=abi_args.check_api,
check_storage=abi_args.check_storage,
skip_file=abi_args.skip_file
)
abi_check = AbiChecker(old_version, new_version, configuration)
return_code = abi_check.check_for_abi_changes()
sys.exit(return_code)
except Exception: # pylint: disable=broad-except
# Print the backtrace and exit explicitly so as to exit with
# status 2, not 1.
traceback.print_exc()
sys.exit(2)
if __name__ == "__main__":
run_main()
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2011 OpenStack Foundation
# Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import uuid as stdlib_uuid
from lxml import etree
import webob
from nova.api.openstack.compute import consoles
from nova.compute import vm_states
from nova import console
from nova import db
from nova import exception
from nova.openstack.common import timeutils
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests import matchers
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
class FakeInstanceDB(object):
def __init__(self):
self.instances_by_id = {}
self.ids_by_uuid = {}
self.max_id = 0
def return_server_by_id(self, context, id):
if id not in self.instances_by_id:
self._add_server(id=id)
return dict(self.instances_by_id[id])
def return_server_by_uuid(self, context, uuid):
if uuid not in self.ids_by_uuid:
self._add_server(uuid=uuid)
return dict(self.instances_by_id[self.ids_by_uuid[uuid]])
def _add_server(self, id=None, uuid=None):
if id is None:
id = self.max_id + 1
if uuid is None:
uuid = str(stdlib_uuid.uuid4())
instance = stub_instance(id, uuid=uuid)
self.instances_by_id[id] = instance
self.ids_by_uuid[uuid] = id
if id > self.max_id:
self.max_id = id
def stub_instance(id, user_id='fake', project_id='fake', host=None,
vm_state=None, task_state=None,
reservation_id="", uuid=FAKE_UUID, image_ref="10",
flavor_id="1", name=None, key_name='',
access_ipv4=None, access_ipv6=None, progress=0):
if host is not None:
host = str(host)
if key_name:
key_data = 'FAKE'
else:
key_data = ''
# ReservationID isn't sent back, hack it in there.
server_name = name or "server%s" % id
if reservation_id != "":
server_name = "reservation_%s" % (reservation_id, )
instance = {
"id": int(id),
"created_at": datetime.datetime(2010, 10, 10, 12, 0, 0),
"updated_at": datetime.datetime(2010, 11, 11, 11, 0, 0),
"admin_pass": "",
"user_id": user_id,
"project_id": project_id,
"image_ref": image_ref,
"kernel_id": "",
"ramdisk_id": "",
"launch_index": 0,
"key_name": key_name,
"key_data": key_data,
"vm_state": vm_state or vm_states.BUILDING,
"task_state": task_state,
"memory_mb": 0,
"vcpus": 0,
"root_gb": 0,
"hostname": "",
"host": host,
"instance_type": {},
"user_data": "",
"reservation_id": reservation_id,
"mac_address": "",
"scheduled_at": timeutils.utcnow(),
"launched_at": timeutils.utcnow(),
"terminated_at": timeutils.utcnow(),
"availability_zone": "",
"display_name": server_name,
"display_description": "",
"locked": False,
"metadata": [],
"access_ip_v4": access_ipv4,
"access_ip_v6": access_ipv6,
"uuid": uuid,
"progress": progress}
return instance
class ConsolesControllerTest(test.TestCase):
def setUp(self):
super(ConsolesControllerTest, self).setUp()
self.flags(verbose=True)
self.instance_db = FakeInstanceDB()
self.stubs.Set(db, 'instance_get',
self.instance_db.return_server_by_id)
self.stubs.Set(db, 'instance_get_by_uuid',
self.instance_db.return_server_by_uuid)
self.uuid = str(stdlib_uuid.uuid4())
self.url = '/v2/fake/servers/%s/consoles' % self.uuid
self.controller = consoles.Controller()
def test_create_console(self):
def fake_create_console(cons_self, context, instance_id):
self.assertEqual(instance_id, self.uuid)
return {}
self.stubs.Set(console.api.API, 'create_console', fake_create_console)
req = fakes.HTTPRequest.blank(self.url)
self.controller.create(req, self.uuid)
def test_show_console(self):
def fake_get_console(cons_self, context, instance_id, console_id):
self.assertEqual(instance_id, self.uuid)
self.assertEqual(console_id, 20)
pool = dict(console_type='fake_type',
public_hostname='fake_hostname')
return dict(id=console_id, password='fake_password',
port='fake_port', pool=pool, instance_name='inst-0001')
expected = {'console': {'id': 20,
'port': 'fake_port',
'host': 'fake_hostname',
'password': 'fake_password',
'instance_name': 'inst-0001',
'console_type': 'fake_type'}}
self.stubs.Set(console.api.API, 'get_console', fake_get_console)
req = fakes.HTTPRequest.blank(self.url + '/20')
res_dict = self.controller.show(req, self.uuid, '20')
self.assertThat(res_dict, matchers.DictMatches(expected))
def test_show_console_unknown_console(self):
def fake_get_console(cons_self, context, instance_id, console_id):
raise exception.ConsoleNotFound(console_id=console_id)
self.stubs.Set(console.api.API, 'get_console', fake_get_console)
req = fakes.HTTPRequest.blank(self.url + '/20')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
req, self.uuid, '20')
def test_show_console_unknown_instance(self):
def fake_get_console(cons_self, context, instance_id, console_id):
raise exception.InstanceNotFound(instance_id=instance_id)
self.stubs.Set(console.api.API, 'get_console', fake_get_console)
req = fakes.HTTPRequest.blank(self.url + '/20')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
req, self.uuid, '20')
def test_list_consoles(self):
def fake_get_consoles(cons_self, context, instance_id):
self.assertEqual(instance_id, self.uuid)
pool1 = dict(console_type='fake_type',
public_hostname='fake_hostname')
cons1 = dict(id=10, password='fake_password',
port='fake_port', pool=pool1)
pool2 = dict(console_type='fake_type2',
public_hostname='fake_hostname2')
cons2 = dict(id=11, password='fake_password2',
port='fake_port2', pool=pool2)
return [cons1, cons2]
expected = {'consoles':
[{'console': {'id': 10, 'console_type': 'fake_type'}},
{'console': {'id': 11, 'console_type': 'fake_type2'}}]}
self.stubs.Set(console.api.API, 'get_consoles', fake_get_consoles)
req = fakes.HTTPRequest.blank(self.url)
res_dict = self.controller.index(req, self.uuid)
self.assertThat(res_dict, matchers.DictMatches(expected))
def test_delete_console(self):
def fake_get_console(cons_self, context, instance_id, console_id):
self.assertEqual(instance_id, self.uuid)
self.assertEqual(console_id, 20)
pool = dict(console_type='fake_type',
public_hostname='fake_hostname')
return dict(id=console_id, password='fake_password',
port='fake_port', pool=pool)
def fake_delete_console(cons_self, context, instance_id, console_id):
self.assertEqual(instance_id, self.uuid)
self.assertEqual(console_id, 20)
self.stubs.Set(console.api.API, 'get_console', fake_get_console)
self.stubs.Set(console.api.API, 'delete_console', fake_delete_console)
req = fakes.HTTPRequest.blank(self.url + '/20')
self.controller.delete(req, self.uuid, '20')
def test_delete_console_unknown_console(self):
def fake_delete_console(cons_self, context, instance_id, console_id):
raise exception.ConsoleNotFound(console_id=console_id)
self.stubs.Set(console.api.API, 'delete_console', fake_delete_console)
req = fakes.HTTPRequest.blank(self.url + '/20')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, self.uuid, '20')
def test_delete_console_unknown_instance(self):
def fake_delete_console(cons_self, context, instance_id, console_id):
raise exception.InstanceNotFound(instance_id=instance_id)
self.stubs.Set(console.api.API, 'delete_console', fake_delete_console)
req = fakes.HTTPRequest.blank(self.url + '/20')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, self.uuid, '20')
class TestConsolesXMLSerializer(test.TestCase):
def test_show(self):
fixture = {'console': {'id': 20,
'password': 'fake_password',
'port': 'fake_port',
'host': 'fake_hostname',
'console_type': 'fake_type'}}
output = consoles.ConsoleTemplate().serialize(fixture)
res_tree = etree.XML(output)
self.assertEqual(res_tree.tag, 'console')
self.assertEqual(res_tree.xpath('id')[0].text, '20')
self.assertEqual(res_tree.xpath('port')[0].text, 'fake_port')
self.assertEqual(res_tree.xpath('host')[0].text, 'fake_hostname')
self.assertEqual(res_tree.xpath('password')[0].text, 'fake_password')
self.assertEqual(res_tree.xpath('console_type')[0].text, 'fake_type')
def test_index(self):
fixture = {'consoles': [{'console': {'id': 10,
'console_type': 'fake_type'}},
{'console': {'id': 11,
'console_type': 'fake_type2'}}]}
output = consoles.ConsolesTemplate().serialize(fixture)
res_tree = etree.XML(output)
self.assertEqual(res_tree.tag, 'consoles')
self.assertEqual(len(res_tree), 2)
self.assertEqual(res_tree[0].tag, 'console')
self.assertEqual(res_tree[1].tag, 'console')
self.assertEqual(len(res_tree[0]), 1)
self.assertEqual(res_tree[0][0].tag, 'console')
self.assertEqual(len(res_tree[1]), 1)
self.assertEqual(res_tree[1][0].tag, 'console')
self.assertEqual(res_tree[0][0].xpath('id')[0].text, '10')
self.assertEqual(res_tree[1][0].xpath('id')[0].text, '11')
self.assertEqual(res_tree[0][0].xpath('console_type')[0].text,
'fake_type')
self.assertEqual(res_tree[1][0].xpath('console_type')[0].text,
'fake_type2')
|
|
from webapp2_extras.appengine.auth.models import User
from google.appengine.ext import ndb
from google.appengine.ext.ndb import msgprop
from web.alittlecloser_api_messages import MediaJsonFinalResponse
import json
from alittlecloser_api_messages import ConnectionResponseMessage, SingleConnectionReponseMessage, CommentsResponseMessage, MediaJsonFinalResponse, LatLngMessage
class User(User):
"""
Universal user model. Can be used with App Engine's default users API,
own auth or third party authentication methods (OpenID, OAuth etc).
based on https://gist.github.com/kylefinley
"""
#: Creation date.
created = ndb.DateTimeProperty(auto_now_add=True)
#: Modification date.
updated = ndb.DateTimeProperty(auto_now=True)
#: User defined unique name, also used as key_name.
# Not used by OpenID
username = ndb.StringProperty()
#: User Name
name = ndb.StringProperty()
#: User Last Name
last_name = ndb.StringProperty()
#: User email
email = ndb.StringProperty()
#: Hashed password. Only set for own authentication.
# Not required because third party authentication
# doesn't use password.
password = ndb.StringProperty()
#: User Country
country = ndb.StringProperty()
#: User TimeZone
tz = ndb.StringProperty()
#: Account activation verifies email
activated = ndb.BooleanProperty(default=False)
type= ndb.StringProperty()
loc_name= ndb.StringProperty()
imgs= ndb.StringProperty()
requests= ndb.StringProperty()
summary= ndb.StringProperty()
@classmethod
def get_by_email(cls, email):
"""Returns a user object based on an email.
:param email:
String representing the user email. Examples:
:returns:
A user object.
"""
return cls.query(cls.email == email).get()
@classmethod
def create_resend_token(cls, user_id):
entity = cls.token_model.create(user_id, 'resend-activation-mail')
return entity.token
@classmethod
def validate_resend_token(cls, user_id, token):
return cls.validate_token(user_id, 'resend-activation-mail', token)
@classmethod
def delete_resend_token(cls, user_id, token):
cls.token_model.get_key(user_id, 'resend-activation-mail', token).delete()
def get_social_providers_names(self):
social_user_objects = SocialUser.get_by_user(self.key)
result = []
# import logging
for social_user_object in social_user_objects:
# logging.error(social_user_object.extra_data['screen_name'])
result.append(social_user_object.provider)
return result
def get_social_providers_info(self):
providers = self.get_social_providers_names()
result = {'used': [], 'unused': []}
for k,v in SocialUser.PROVIDERS_INFO.items():
if k in providers:
result['used'].append(v)
else:
result['unused'].append(v)
return result
@classmethod
def get_user_by_id(cls, user_id):
return cls.get_by_id(int(user_id))
class LogVisit(ndb.Model):
user = ndb.KeyProperty(kind=User)
uastring = ndb.StringProperty()
ip = ndb.StringProperty()
timestamp = ndb.StringProperty()
class LogEmail(ndb.Model):
sender = ndb.StringProperty(
required=True)
to = ndb.StringProperty(
required=True)
subject = ndb.StringProperty(
required=True)
body = ndb.TextProperty()
when = ndb.DateTimeProperty()
class SocialUser(ndb.Model):
PROVIDERS_INFO = { # uri is for OpenID only (not OAuth)
'google': {'name': 'google', 'label': 'Google', 'uri': 'gmail.com'},
'github': {'name': 'github', 'label': 'Github', 'uri': ''},
'facebook': {'name': 'facebook', 'label': 'Facebook', 'uri': ''},
'linkedin': {'name': 'linkedin', 'label': 'LinkedIn', 'uri': ''},
'myopenid': {'name': 'myopenid', 'label': 'MyOpenid', 'uri': 'myopenid.com'},
'twitter': {'name': 'twitter', 'label': 'Twitter', 'uri': ''},
'yahoo': {'name': 'yahoo', 'label': 'Yahoo!', 'uri': 'yahoo.com'},
}
user = ndb.KeyProperty(kind=User)
provider = ndb.StringProperty()
uid = ndb.StringProperty()
extra_data = ndb.JsonProperty()
@classmethod
def get_by_user(cls, user):
return cls.query(cls.user == user).fetch()
@classmethod
def get_by_user_and_provider(cls, user, provider):
return cls.query(cls.user == user, cls.provider == provider).get()
@classmethod
def get_by_provider_and_uid(cls, provider, uid):
return cls.query(cls.provider == provider, cls.uid == uid).get()
@classmethod
def check_unique_uid(cls, provider, uid):
# pair (provider, uid) should be unique
test_unique_provider = cls.get_by_provider_and_uid(provider, uid)
if test_unique_provider is not None:
return False
else:
return True
@classmethod
def check_unique_user(cls, provider, user):
# pair (user, provider) should be unique
test_unique_user = cls.get_by_user_and_provider(user, provider)
if test_unique_user is not None:
return False
else:
return True
@classmethod
def check_unique(cls, user, provider, uid):
# pair (provider, uid) should be unique and pair (user, provider) should be unique
return cls.check_unique_uid(provider, uid) and cls.check_unique_user(provider, user)
@staticmethod
def open_id_providers():
return [k for k,v in SocialUser.PROVIDERS_INFO.items() if v['uri']]
class Connection(ndb.Model):
"""
Model for the creation of connection results
"""
#: Creation date.
created = ndb.DateTimeProperty(auto_now_add=True)
#: Modification date.
updated = ndb.DateTimeProperty(auto_now=True)
# Link to blog posts about, with some metadata about the post
blog_url = ndb.JsonProperty()
# T/f for if connection is completed
completed = ndb.BooleanProperty(default=False)
# Date completed
completion_date = ndb.DateTimeProperty()
#Connection stage (0=submitted, 1= accepted, 2=completed)
connection_stage = ndb.IntegerProperty(default=0)
#Lets us know which table to lookup the user or person in
connection_type = ndb.StringProperty()
#json_obj of related media
media = ndb.JsonProperty()
#latitude of connection person/thing
latitude = ndb.FloatProperty()
#longitude of connection person/thing
longitude = ndb.FloatProperty()
#Location geopoint
loc_geopt = ndb.GeoPtProperty()
#name of the locations
loc_name = ndb.StringProperty()
#exact address for viewing only by the admins
private_loc = ndb.StringProperty()
#map marker color
marker_color = ndb.StringProperty()
#map marker size
marker_size = ndb.StringProperty(default="medium")
#map marker symbol
marker_symbol = ndb.StringProperty()
#Key of user requesting
user_key = ndb.KeyProperty()
#name of user requesting
user_name = ndb.StringProperty()
#picture of user requesting
user_picture = ndb.StringProperty()
#personalized message on request
personalized_message = ndb.StringProperty()
#primary media item
primary_media = ndb.StringProperty()
#IP from requesting user
ip = ndb.StringProperty()
#UA string of user requesting
uastring = ndb.TextProperty()
#posts related to this one (populated by analytics)
related_post_id = ndb.StringProperty()
#Why the request is being made (private for admins
request_reason = ndb.StringProperty()
#public summary of the request
summary = ndb.StringProperty()
#admin added body
body = ndb.StringProperty()
#title of the request
title = ndb.StringProperty()
#social media share links
social_media_json = ndb.StringProperty()
#request type - give or get?
type = ndb.StringProperty()
#connection key to the user receiving the request
personthing_key = ndb.KeyProperty()
#name of person receiving item
personthing_name = ndb.StringProperty()
# media binary for api
media_binary = msgprop.MessageProperty(MediaJsonFinalResponse, repeated=True)
def to_message(self, media_response):
"""Turns the Connection entity into a ProtoRPC object.
"""
if self.personthing_key:
self.personthing_id = str(self.personthing_key.id())
else:
self.personthing_id = ""
if self.user_key:
self.user_id = self.user_key.id()
else:
self.user_id = None
return ConnectionResponseMessage(title = self.title,
type = self.type,
blog_url=self.blog_url,
completed = self.completed,
completion_date = self.completion_date,
connection_stage = self.connection_stage,
connection_type = self.connection_type,
latitude = self.latitude,
longitude = self.longitude,
loc_name = self.loc_name,
marker_color = self.marker_color,
marker_size = self.marker_size,
marker_symbol = self.marker_symbol,
media = media_response,
user_id = self.user_id,
user_name = self.user_name,
user_picture = self.user_picture,
personthing_id = self.personthing_id,
personthing_name = self.personthing_name,
primary_media = self.primary_media,
summary = self.summary,
req_reason = self.request_reason,
social_media_json = self.social_media_json,
created = self.created,
updated = self.updated,
id = self.key.id())
def to_message_no_media(self):
"""Turns the Connection entity into a ProtoRPC object.
"""
if self.personthing_key:
self.personthing_id = str(self.personthing_key.id())
else:
self.personthing_id = ""
if self.user_key:
self.user_id = self.user_key.id()
else:
self.user_id = None
return ConnectionResponseMessage(title = self.title,
type = self.type,
blog_url=self.blog_url,
completed = self.completed,
completion_date = self.completion_date,
connection_stage = self.connection_stage,
connection_type = self.connection_type,
latitude = self.latitude,
longitude = self.longitude,
loc_name = self.loc_name,
marker_color = self.marker_color,
marker_size = self.marker_size,
marker_symbol = self.marker_symbol,
user_id = self.user_id,
user_name = self.user_name,
user_picture = self.user_picture,
personthing_id = self.personthing_id,
personthing_name = self.personthing_name,
primary_media = self.primary_media,
summary = self.summary,
req_reason = self.request_reason,
social_media_json = self.social_media_json,
created = self.created,
updated = self.updated,
id = self.key.id())
def to_indiv_message(self,media_response):
"""Turns the Connection entity into a ProtoRPC object.
"""
if self.personthing_key:
self.personthing_id = str(self.personthing_key.id())
else:
self.personthing_id = ""
if self.user_key:
self.user_id = self.user_key.id()
else:
self.user_id = None
return SingleConnectionReponseMessage(title = self.title,
type = self.type,
blog_url=self.blog_url,
completed = self.completed,
completion_date = self.completion_date,
connection_stage = self.connection_stage,
connection_type = self.connection_type,
latitude = self.latitude,
longitude = self.longitude,
loc_name = self.loc_name,
marker_color = self.marker_color,
marker_size = self.marker_size,
marker_symbol = self.marker_symbol,
media = media_response,
user_id = self.user_id,
user_name = self.user_name,
user_picture = self.user_picture,
personthing_id = self.personthing_id,
personthing_name = self.personthing_name,
primary_media = self.primary_media,
summary = self.summary,
req_reason = self.request_reason,
body = self.body,
social_media_json = self.social_media_json,
created = self.created,
updated = self.updated,
id = self.key.id())
@classmethod
def get_connections(cls, curs, limit, filter_dictionary):
qry_var = cls.query()
for k,v in filter_dictionary.iteritems():
qry_var_new = qry_var.filter(cls._properties[k] == v)
qry_var = qry_var_new
return qry_var.order(-cls.connection_stage, -cls.created).fetch_page(limit, start_cursor=curs)
@classmethod
def get_connection_by_id(cls, connection_id):
return cls.get_by_id(int(connection_id))
@classmethod
def get_connection_by_title(cls, title_name):
return cls.query(cls.title == title_name).fetch()
@classmethod
def get_connections_by_user_id_status(cls, user_key):
return cls.query(ndb.AND(cls.user_key == user_key, cls.connection_stage == 0)).fetch()
class PeopleThing(ndb.Model):
"""
Model for the person or thing that is getting the gift
"""
#: Creation date.
created = ndb.DateTimeProperty(auto_now_add=True)
#: Modification date.
updated = ndb.DateTimeProperty(auto_now=True)
#json_obj of related media
media = ndb.JsonProperty(default="{'media': 'none'}")
#latitude of connection person/thing
latitude = ndb.FloatProperty(default=0)
#longitude of connection person/thing
longitude = ndb.FloatProperty(default=0)
#name of location of the person/thing
loc_name = ndb.StringProperty()
#Key to user that created
user_key = ndb.KeyProperty()
#name of user that created
user_name = ndb.StringProperty()
#Name of the person/thing
name = ndb.StringProperty()
#age
age = ndb.IntegerProperty()
#type of thing/person
type = ndb.StringProperty()
@classmethod
def get_person_by_id(cls, personthing_id):
return cls.get_by_id(int(personthing_id))
class Relationships(ndb.Model):
"""
Model for the relationship between people, users, and things
"""
#: Creation date.
created = ndb.DateTimeProperty(auto_now_add=True)
#: Modification date.
updated = ndb.DateTimeProperty(auto_now=True)
#Key to user that created
user_key = ndb.KeyProperty()
#name of user that created
user_name = ndb.StringProperty()
#user or peoplething
type = ndb.StringProperty()
#key to the people table
people_key = ndb.KeyProperty()
#name of the personthing
people_name = ndb.StringProperty()
#type of relationship (mom, home, brother)
relationship_type = ndb.StringProperty()
#relationship start date
relationship_start_date = ndb.DateTimeProperty()
#relationship end date
relationship_end_date = ndb.DateTimeProperty()
@classmethod
def get_relationship_by_id(cls, relationship_id):
return cls.get_by_id(int(relationship_id))
class Comments(ndb.Model):
"""
Model for the comments about the connections
"""
#: Creation date.
created = ndb.DateTimeProperty(auto_now_add=True)
#: Modification date.
updated = ndb.DateTimeProperty(auto_now=True)
#json_obj of related media
media = ndb.JsonProperty()
#Key to user that created
user_key = ndb.KeyProperty()
#name of user that created
user_name = ndb.StringProperty()
#type of post -- photo, video, html
post_type = ndb.StringProperty()
#post tags
tags = ndb.StringProperty()
#title of post
title = ndb.StringProperty()
#body of the blog post
body = ndb.StringProperty()
#person key post is associated with
personthing_key = ndb.KeyProperty()
#name of personthing
personthing_name = ndb.StringProperty()
#key to connection
connection_key = ndb.KeyProperty()
#title of connection
connection_title = ndb.StringProperty()
#social media sharing
social_media_json = ndb.JsonProperty()
#IP from requesting user
ip = ndb.StringProperty()
#UA string of user requesting
uastring = ndb.TextProperty()
def to_comment_message(self):
"""Turns the Connection entity into a ProtoRPC object.
"""
if self.personthing_key:
self.personthing_id = str(self.personthing_key.id())
else:
self.personthing_id = ""
if self.user_key:
self.user_id = str(self.user_key.id())
else:
self.user_id = ""
if self.connection_key:
self.connection_id = str(self.connection_key.id())
else:
self.connection_id = ""
return CommentsResponseMessage(title = self.title,
post_type = self.post_type,
body = self.body,
media = self.media,
tags = self.tags,
personthing_id = self.personthing_id,
personthing_name = self.personthing_name,
user_id = self.user_id,
user_name = self.user_name,
connection_id = self.connection_id,
connection_title = self.connection_title,
social_media_json = self.social_media_json,
created = self.created,
updated = self.updated,
comment_id = self.key.id())
@classmethod
def get_comment_by_id(cls, comment_id):
return cls.get_by_id(int(comment_id))
@classmethod
def get_comments(cls, curs, limit, filter_dictionary):
qry_var = cls.query()
for k,v in filter_dictionary.iteritems():
qry_var_new = qry_var.filter(cls._properties[k] == v)
qry_var = qry_var_new
return qry_var.order(-cls.created).fetch_page(limit, start_cursor=curs)
class ApiKeys(ndb.Model):
#: Creation date.
created = ndb.DateTimeProperty(auto_now_add=True)
#: Modification date.
updated = ndb.DateTimeProperty(auto_now=True)
#user id the key is associated with
user_id = ndb.IntegerProperty()
#user_name
user_name = ndb.StringProperty()
#total calls
total_calls = ndb.IntegerProperty(default=0)
#role of the user
role = ndb.StringProperty()
@classmethod
def get_apikey_by_id(cls, apikey_id):
return cls.get_by_id(int(apikey_id))
@classmethod
def get_apikey_by_user_id(cls, user_id):
if user_id:
return cls.query(cls.user_id==int(user_id)).fetch()[0]
class Locations(ndb.Model):
#: Creation date.
created = ndb.DateTimeProperty(auto_now_add=True)
#: Modification date.
updated = ndb.DateTimeProperty(auto_now=True)
# Link to blog posts about, with some metadata about the post
blog_url = ndb.JsonProperty()
#latitude of connection person/thing
latitude = ndb.FloatProperty()
#longitude of connection person/thing
longitude = ndb.FloatProperty()
#Location geopoint
loc_geopt = ndb.GeoPtProperty()
#name of the locations
loc_name = ndb.StringProperty()
#IP from requesting user
ip = ndb.StringProperty()
#UA string of user requesting
uastring = ndb.TextProperty()
#title of the request
title = ndb.StringProperty()
#title of the request
type = ndb.StringProperty()
@classmethod
def get_locations(cls, curs, limit, filter_dictionary):
qry_var = cls.query()
for k,v in filter_dictionary.iteritems():
qry_var_new = qry_var.filter(cls._properties[k] == v)
qry_var = qry_var_new
return qry_var.order(-cls.created).fetch_page(limit, start_cursor=curs)
def to_location_message(self):
"""Turns the Connection entity into a ProtoRPC object.
"""
return LatLngMessage(title = self.title,
created = self.created,
updated = self.updated,
blog_url = self.blog_url,
latitude = self.latitude,
longitude = self.longitude,
loc_name = self.loc_name,
type = self.type)
|
|
# sqlalchemy/exc.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Exceptions used with SQLAlchemy.
The base exception class is :exc:`.SQLAlchemyError`. Exceptions which are
raised as a result of DBAPI exceptions are all subclasses of
:exc:`.DBAPIError`.
"""
class SQLAlchemyError(Exception):
"""Generic error class."""
class ArgumentError(SQLAlchemyError):
"""Raised when an invalid or conflicting function argument is supplied.
This error generally corresponds to construction time state errors.
"""
class NoSuchModuleError(ArgumentError):
"""Raised when a dynamically-loaded module (usually a database dialect)
of a particular name cannot be located."""
class NoForeignKeysError(ArgumentError):
"""Raised when no foreign keys can be located between two selectables
during a join."""
class AmbiguousForeignKeysError(ArgumentError):
"""Raised when more than one foreign key matching can be located
between two selectables during a join."""
class CircularDependencyError(SQLAlchemyError):
"""Raised by topological sorts when a circular dependency is detected.
There are two scenarios where this error occurs:
* In a Session flush operation, if two objects are mutually dependent
on each other, they can not be inserted or deleted via INSERT or
DELETE statements alone; an UPDATE will be needed to post-associate
or pre-deassociate one of the foreign key constrained values.
The ``post_update`` flag described at :ref:`post_update` can resolve
this cycle.
* In a :attr:`.MetaData.sorted_tables` operation, two :class:`.ForeignKey`
or :class:`.ForeignKeyConstraint` objects mutually refer to each
other. Apply the ``use_alter=True`` flag to one or both,
see :ref:`use_alter`.
"""
def __init__(self, message, cycles, edges, msg=None):
if msg is None:
message += " (%s)" % ", ".join(repr(s) for s in cycles)
else:
message = msg
SQLAlchemyError.__init__(self, message)
self.cycles = cycles
self.edges = edges
def __reduce__(self):
return self.__class__, (None, self.cycles,
self.edges, self.args[0])
class CompileError(SQLAlchemyError):
"""Raised when an error occurs during SQL compilation"""
class UnsupportedCompilationError(CompileError):
"""Raised when an operation is not supported by the given compiler.
.. versionadded:: 0.8.3
"""
def __init__(self, compiler, element_type):
super(UnsupportedCompilationError, self).__init__(
"Compiler %r can't render element of type %s" %
(compiler, element_type))
class IdentifierError(SQLAlchemyError):
"""Raised when a schema name is beyond the max character limit"""
class DisconnectionError(SQLAlchemyError):
"""A disconnect is detected on a raw DB-API connection.
This error is raised and consumed internally by a connection pool. It can
be raised by the :meth:`.PoolEvents.checkout` event so that the host pool
forces a retry; the exception will be caught three times in a row before
the pool gives up and raises :class:`~sqlalchemy.exc.InvalidRequestError`
regarding the connection attempt.
"""
class TimeoutError(SQLAlchemyError):
"""Raised when a connection pool times out on getting a connection."""
class InvalidRequestError(SQLAlchemyError):
"""SQLAlchemy was asked to do something it can't do.
This error generally corresponds to runtime state errors.
"""
class NoInspectionAvailable(InvalidRequestError):
"""A subject passed to :func:`sqlalchemy.inspection.inspect` produced
no context for inspection."""
class ResourceClosedError(InvalidRequestError):
"""An operation was requested from a connection, cursor, or other
object that's in a closed state."""
class NoSuchColumnError(KeyError, InvalidRequestError):
"""A nonexistent column is requested from a ``RowProxy``."""
class NoReferenceError(InvalidRequestError):
"""Raised by ``ForeignKey`` to indicate a reference cannot be resolved."""
class NoReferencedTableError(NoReferenceError):
"""Raised by ``ForeignKey`` when the referred ``Table`` cannot be
located.
"""
def __init__(self, message, tname):
NoReferenceError.__init__(self, message)
self.table_name = tname
def __reduce__(self):
return self.__class__, (self.args[0], self.table_name)
class NoReferencedColumnError(NoReferenceError):
"""Raised by ``ForeignKey`` when the referred ``Column`` cannot be
located.
"""
def __init__(self, message, tname, cname):
NoReferenceError.__init__(self, message)
self.table_name = tname
self.column_name = cname
def __reduce__(self):
return self.__class__, (self.args[0], self.table_name,
self.column_name)
class NoSuchTableError(InvalidRequestError):
"""Table does not exist or is not visible to a connection."""
class UnboundExecutionError(InvalidRequestError):
"""SQL was attempted without a database connection to execute it on."""
class DontWrapMixin(object):
"""A mixin class which, when applied to a user-defined Exception class,
will not be wrapped inside of :exc:`.StatementError` if the error is
emitted within the process of executing a statement.
E.g.::
from sqlalchemy.exc import DontWrapMixin
class MyCustomException(Exception, DontWrapMixin):
pass
class MySpecialType(TypeDecorator):
impl = String
def process_bind_param(self, value, dialect):
if value == 'invalid':
raise MyCustomException("invalid!")
"""
# Moved to orm.exc; compatibility definition installed by orm import until 0.6
UnmappedColumnError = None
class StatementError(SQLAlchemyError):
"""An error occurred during execution of a SQL statement.
:class:`StatementError` wraps the exception raised
during execution, and features :attr:`.statement`
and :attr:`.params` attributes which supply context regarding
the specifics of the statement which had an issue.
The wrapped exception object is available in
the :attr:`.orig` attribute.
"""
statement = None
"""The string SQL statement being invoked when this exception occurred."""
params = None
"""The parameter list being used when this exception occurred."""
orig = None
"""The DBAPI exception object."""
def __init__(self, message, statement, params, orig):
SQLAlchemyError.__init__(self, message)
self.statement = statement
self.params = params
self.orig = orig
self.detail = []
def add_detail(self, msg):
self.detail.append(msg)
def __reduce__(self):
return self.__class__, (self.args[0], self.statement,
self.params, self.orig)
def __str__(self):
from sqlalchemy.sql import util
details = [SQLAlchemyError.__str__(self)]
if self.statement:
details.append("[SQL: %r]" % self.statement)
if self.params:
params_repr = util._repr_params(self.params, 10)
details.append("[parameters: %r]" % params_repr)
return ' '.join([
"(%s)" % det for det in self.detail
] + details)
def __unicode__(self):
return self.__str__()
class DBAPIError(StatementError):
"""Raised when the execution of a database operation fails.
Wraps exceptions raised by the DB-API underlying the
database operation. Driver-specific implementations of the standard
DB-API exception types are wrapped by matching sub-types of SQLAlchemy's
:class:`DBAPIError` when possible. DB-API's ``Error`` type maps to
:class:`DBAPIError` in SQLAlchemy, otherwise the names are identical. Note
that there is no guarantee that different DB-API implementations will
raise the same exception type for any given error condition.
:class:`DBAPIError` features :attr:`~.StatementError.statement`
and :attr:`~.StatementError.params` attributes which supply context
regarding the specifics of the statement which had an issue, for the
typical case when the error was raised within the context of
emitting a SQL statement.
The wrapped exception object is available in the
:attr:`~.StatementError.orig` attribute. Its type and properties are
DB-API implementation specific.
"""
@classmethod
def instance(cls, statement, params,
orig, dbapi_base_err,
connection_invalidated=False,
dialect=None):
# Don't ever wrap these, just return them directly as if
# DBAPIError didn't exist.
if (isinstance(orig, BaseException) and
not isinstance(orig, Exception)) or \
isinstance(orig, DontWrapMixin):
return orig
if orig is not None:
# not a DBAPI error, statement is present.
# raise a StatementError
if not isinstance(orig, dbapi_base_err) and statement:
return StatementError(
"(%s.%s) %s" %
(orig.__class__.__module__, orig.__class__.__name__,
orig),
statement, params, orig
)
glob = globals()
for super_ in orig.__class__.__mro__:
name = super_.__name__
if dialect:
name = dialect.dbapi_exception_translation_map.get(
name, name)
if name in glob and issubclass(glob[name], DBAPIError):
cls = glob[name]
break
return cls(statement, params, orig, connection_invalidated)
def __reduce__(self):
return self.__class__, (self.statement, self.params,
self.orig, self.connection_invalidated)
def __init__(self, statement, params, orig, connection_invalidated=False):
try:
text = str(orig)
except Exception as e:
text = 'Error in str() of DB-API-generated exception: ' + str(e)
StatementError.__init__(
self,
'(%s.%s) %s' % (
orig.__class__.__module__, orig.__class__.__name__, text, ),
statement,
params,
orig
)
self.connection_invalidated = connection_invalidated
class InterfaceError(DBAPIError):
"""Wraps a DB-API InterfaceError."""
class DatabaseError(DBAPIError):
"""Wraps a DB-API DatabaseError."""
class DataError(DatabaseError):
"""Wraps a DB-API DataError."""
class OperationalError(DatabaseError):
"""Wraps a DB-API OperationalError."""
class IntegrityError(DatabaseError):
"""Wraps a DB-API IntegrityError."""
class InternalError(DatabaseError):
"""Wraps a DB-API InternalError."""
class ProgrammingError(DatabaseError):
"""Wraps a DB-API ProgrammingError."""
class NotSupportedError(DatabaseError):
"""Wraps a DB-API NotSupportedError."""
# Warnings
class SADeprecationWarning(DeprecationWarning):
"""Issued once per usage of a deprecated API."""
class SAPendingDeprecationWarning(PendingDeprecationWarning):
"""Issued once per usage of a deprecated API."""
class SAWarning(RuntimeWarning):
"""Issued at runtime."""
|
|
"""
IPython/Jupyter Notebook progressbar decorator for iterators.
Includes a default `range` iterator printing to `stderr`.
Usage:
>>> from tqdm.notebook import trange, tqdm
>>> for i in trange(10):
... ...
"""
# future division is important to divide integers and get as
# a result precise floating numbers (instead of truncated int)
from __future__ import absolute_import, division
# import compatibility functions and utilities
import re
import sys
from weakref import proxy
# to inherit from the tqdm class
from .std import tqdm as std_tqdm
from .utils import _range
if True: # pragma: no cover
# import IPython/Jupyter base widget and display utilities
IPY = 0
try: # IPython 4.x
import ipywidgets
IPY = 4
except ImportError: # IPython 3.x / 2.x
IPY = 32
import warnings
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore', message=".*The `IPython.html` package has been deprecated.*")
try:
import IPython.html.widgets as ipywidgets # NOQA: F401
except ImportError:
pass
try: # IPython 4.x / 3.x
if IPY == 32:
from IPython.html.widgets import HTML
from IPython.html.widgets import FloatProgress as IProgress
from IPython.html.widgets import HBox
IPY = 3
else:
from ipywidgets import HTML
from ipywidgets import FloatProgress as IProgress
from ipywidgets import HBox
except ImportError:
try: # IPython 2.x
from IPython.html.widgets import HTML
from IPython.html.widgets import ContainerWidget as HBox
from IPython.html.widgets import FloatProgressWidget as IProgress
IPY = 2
except ImportError:
IPY = 0
IProgress = None
HBox = object
try:
from IPython.display import display # , clear_output
except ImportError:
pass
# HTML encoding
try: # Py3
from html import escape
except ImportError: # Py2
from cgi import escape
__author__ = {"github.com/": ["lrq3000", "casperdcl", "alexanderkuk"]}
__all__ = ['tqdm_notebook', 'tnrange', 'tqdm', 'trange']
class TqdmHBox(HBox):
"""`ipywidgets.HBox` with a pretty representation"""
def _repr_json_(self, pretty=None):
pbar = getattr(self, 'pbar', None)
if pbar is None:
return {}
d = pbar.format_dict
if pretty is not None:
d["ascii"] = not pretty
return d
def __repr__(self, pretty=False):
pbar = getattr(self, 'pbar', None)
if pbar is None:
return super(TqdmHBox, self).__repr__()
return pbar.format_meter(**self._repr_json_(pretty))
def _repr_pretty_(self, pp, *_, **__):
pp.text(self.__repr__(True))
class tqdm_notebook(std_tqdm):
"""
Experimental IPython/Jupyter Notebook widget using tqdm!
"""
@staticmethod
def status_printer(_, total=None, desc=None, ncols=None):
"""
Manage the printing of an IPython/Jupyter Notebook progress bar widget.
"""
# Fallback to text bar if there's no total
# DEPRECATED: replaced with an 'info' style bar
# if not total:
# return super(tqdm_notebook, tqdm_notebook).status_printer(file)
# fp = file
# Prepare IPython progress bar
if IProgress is None: # #187 #451 #558 #872
raise ImportError(
"IProgress not found. Please update jupyter and ipywidgets."
" See https://ipywidgets.readthedocs.io/en/stable"
"/user_install.html")
if total:
pbar = IProgress(min=0, max=total)
else: # No total? Show info style bar with no progress tqdm status
pbar = IProgress(min=0, max=1)
pbar.value = 1
pbar.bar_style = 'info'
if ncols is None:
pbar.layout.width = "20px"
ltext = HTML()
rtext = HTML()
if desc:
ltext.value = desc
container = TqdmHBox(children=[ltext, pbar, rtext])
# Prepare layout
if ncols is not None: # use default style of ipywidgets
# ncols could be 100, "100px", "100%"
ncols = str(ncols) # ipywidgets only accepts string
try:
if int(ncols) > 0: # isnumeric and positive
ncols += 'px'
except ValueError:
pass
pbar.layout.flex = '2'
container.layout.width = ncols
container.layout.display = 'inline-flex'
container.layout.flex_flow = 'row wrap'
return container
def display(self, msg=None, pos=None,
# additional signals
close=False, bar_style=None, check_delay=True):
# Note: contrary to native tqdm, msg='' does NOT clear bar
# goal is to keep all infos if error happens so user knows
# at which iteration the loop failed.
# Clear previous output (really necessary?)
# clear_output(wait=1)
if not msg and not close:
d = self.format_dict
# remove {bar}
d['bar_format'] = (d['bar_format'] or "{l_bar}<bar/>{r_bar}").replace(
"{bar}", "<bar/>")
msg = self.format_meter(**d)
ltext, pbar, rtext = self.container.children
pbar.value = self.n
if msg:
# html escape special characters (like '&')
if '<bar/>' in msg:
left, right = map(escape, re.split(r'\|?<bar/>\|?', msg, 1))
else:
left, right = '', escape(msg)
# Update description
ltext.value = left
# never clear the bar (signal: msg='')
if right:
rtext.value = right
# Change bar style
if bar_style:
# Hack-ish way to avoid the danger bar_style being overridden by
# success because the bar gets closed after the error...
if pbar.bar_style != 'danger' or bar_style != 'success':
pbar.bar_style = bar_style
# Special signal to close the bar
if close and pbar.bar_style != 'danger': # hide only if no error
try:
self.container.close()
except AttributeError:
self.container.visible = False
if check_delay and self.delay > 0 and not self.displayed:
display(self.container)
self.displayed = True
@property
def colour(self):
if hasattr(self, 'container'):
return self.container.children[-2].style.bar_color
@colour.setter
def colour(self, bar_color):
if hasattr(self, 'container'):
self.container.children[-2].style.bar_color = bar_color
def __init__(self, *args, **kwargs):
"""
Supports the usual `tqdm.tqdm` parameters as well as those listed below.
Parameters
----------
display : Whether to call `display(self.container)` immediately
[default: True].
"""
kwargs = kwargs.copy()
# Setup default output
file_kwarg = kwargs.get('file', sys.stderr)
if file_kwarg is sys.stderr or file_kwarg is None:
kwargs['file'] = sys.stdout # avoid the red block in IPython
# Initialize parent class + avoid printing by using gui=True
kwargs['gui'] = True
# convert disable = None to False
kwargs['disable'] = bool(kwargs.get('disable', False))
colour = kwargs.pop('colour', None)
display_here = kwargs.pop('display', True)
super(tqdm_notebook, self).__init__(*args, **kwargs)
if self.disable or not kwargs['gui']:
self.disp = lambda *_, **__: None
return
# Get bar width
self.ncols = '100%' if self.dynamic_ncols else kwargs.get("ncols", None)
# Replace with IPython progress bar display (with correct total)
unit_scale = 1 if self.unit_scale is True else self.unit_scale or 1
total = self.total * unit_scale if self.total else self.total
self.container = self.status_printer(self.fp, total, self.desc, self.ncols)
self.container.pbar = proxy(self)
self.displayed = False
if display_here and self.delay <= 0:
display(self.container)
self.displayed = True
self.disp = self.display
self.colour = colour
# Print initial bar state
if not self.disable:
self.display(check_delay=False)
def __iter__(self):
try:
for obj in super(tqdm_notebook, self).__iter__():
# return super(tqdm...) will not catch exception
yield obj
# NB: except ... [ as ...] breaks IPython async KeyboardInterrupt
except: # NOQA
self.disp(bar_style='danger')
raise
# NB: don't `finally: close()`
# since this could be a shared bar which the user will `reset()`
def update(self, n=1):
try:
return super(tqdm_notebook, self).update(n=n)
# NB: except ... [ as ...] breaks IPython async KeyboardInterrupt
except: # NOQA
# cannot catch KeyboardInterrupt when using manual tqdm
# as the interrupt will most likely happen on another statement
self.disp(bar_style='danger')
raise
# NB: don't `finally: close()`
# since this could be a shared bar which the user will `reset()`
def close(self):
if self.disable:
return
super(tqdm_notebook, self).close()
# Try to detect if there was an error or KeyboardInterrupt
# in manual mode: if n < total, things probably got wrong
if self.total and self.n < self.total:
self.disp(bar_style='danger', check_delay=False)
else:
if self.leave:
self.disp(bar_style='success', check_delay=False)
else:
self.disp(close=True, check_delay=False)
def clear(self, *_, **__):
pass
def reset(self, total=None):
"""
Resets to 0 iterations for repeated use.
Consider combining with `leave=True`.
Parameters
----------
total : int or float, optional. Total to use for the new bar.
"""
if self.disable:
return super(tqdm_notebook, self).reset(total=total)
_, pbar, _ = self.container.children
pbar.bar_style = ''
if total is not None:
pbar.max = total
if not self.total and self.ncols is None: # no longer unknown total
pbar.layout.width = None # reset width
return super(tqdm_notebook, self).reset(total=total)
def tnrange(*args, **kwargs):
"""
A shortcut for `tqdm.notebook.tqdm(xrange(*args), **kwargs)`.
On Python3+, `range` is used instead of `xrange`.
"""
return tqdm_notebook(_range(*args), **kwargs)
# Aliases
tqdm = tqdm_notebook
trange = tnrange
|
|
from __future__ import unicode_literals
from boto.exception import BotoServerError
from moto.core import BaseBackend
from .utils import random_access_key, random_alphanumeric, random_resource_id
from datetime import datetime
import base64
class Role(object):
def __init__(self, role_id, name, assume_role_policy_document, path):
self.id = role_id
self.name = name
self.assume_role_policy_document = assume_role_policy_document
self.path = path
self.policies = {}
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
role = iam_backend.create_role(
role_name=resource_name,
assume_role_policy_document=properties['AssumeRolePolicyDocument'],
path=properties['Path'],
)
policies = properties.get('Policies', [])
for policy in policies:
policy_name = policy['PolicyName']
policy_json = policy['PolicyDocument']
role.put_policy(policy_name, policy_json)
return role
def put_policy(self, policy_name, policy_json):
self.policies[policy_name] = policy_json
@property
def physical_resource_id(self):
return self.id
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == 'Arn':
raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "Arn" ]"')
raise UnformattedGetAttTemplateException()
class InstanceProfile(object):
def __init__(self, instance_profile_id, name, path, roles):
self.id = instance_profile_id
self.name = name
self.path = path
self.roles = roles if roles else []
@classmethod
def create_from_cloudformation_json(cls, resource_name, cloudformation_json, region_name):
properties = cloudformation_json['Properties']
role_ids = properties['Roles']
return iam_backend.create_instance_profile(
name=resource_name,
path=properties['Path'],
role_ids=role_ids,
)
@property
def physical_resource_id(self):
return self.name
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == 'Arn':
raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "Arn" ]"')
raise UnformattedGetAttTemplateException()
class Certificate(object):
def __init__(self, cert_name, cert_body, private_key, cert_chain=None, path=None):
self.cert_name = cert_name
self.cert_body = cert_body
self.private_key = private_key
self.path = path
self.cert_chain = cert_chain
@property
def physical_resource_id(self):
return self.name
class AccessKey(object):
def __init__(self, user_name):
self.user_name = user_name
self.access_key_id = random_access_key()
self.secret_access_key = random_alphanumeric(32)
self.status = 'Active'
self.create_date = datetime.strftime(
datetime.utcnow(),
"%Y-%m-%d-%H-%M-%S"
)
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == 'SecretAccessKey':
return self.secret_access_key
raise UnformattedGetAttTemplateException()
class Group(object):
def __init__(self, name, path='/'):
self.name = name
self.id = random_resource_id()
self.path = path
self.created = datetime.strftime(
datetime.utcnow(),
"%Y-%m-%d-%H-%M-%S"
)
self.users = []
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == 'Arn':
raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "Arn" ]"')
raise UnformattedGetAttTemplateException()
class User(object):
def __init__(self, name, path='/'):
self.name = name
self.id = random_resource_id()
self.path = path
self.created = datetime.strftime(
datetime.utcnow(),
"%Y-%m-%d-%H-%M-%S"
)
self.arn = 'arn:aws:iam::123456789012:user/{0}'.format(name)
self.policies = {}
self.access_keys = []
self.password = None
def get_policy(self, policy_name):
policy_json = None
try:
policy_json = self.policies[policy_name]
except:
raise BotoServerError(404, 'Not Found')
return {
'policy_name': policy_name,
'policy_document': policy_json,
'user_name': self.name,
}
def put_policy(self, policy_name, policy_json):
self.policies[policy_name] = policy_json
def delete_policy(self, policy_name):
if policy_name not in self.policies:
raise BotoServerError(404, 'Not Found')
del self.policies[policy_name]
def create_access_key(self):
access_key = AccessKey(self.name)
self.access_keys.append(access_key)
return access_key
def get_all_access_keys(self):
return self.access_keys
def delete_access_key(self, access_key_id):
for key in self.access_keys:
if key.access_key_id == access_key_id:
self.access_keys.remove(key)
break
else:
raise BotoServerError(404, 'Not Found')
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == 'Arn':
raise NotImplementedError('"Fn::GetAtt" : [ "{0}" , "Arn" ]"')
raise UnformattedGetAttTemplateException()
def to_csv(self):
date_format = '%Y-%m-%dT%H:%M:%S+00:00'
date_created = datetime.strptime(self.created, '%Y-%m-%d-%H-%M-%S')
# aagrawal,arn:aws:iam::509284790694:user/aagrawal,2014-09-01T22:28:48+00:00,true,2014-11-12T23:36:49+00:00,2014-09-03T18:59:00+00:00,N/A,false,true,2014-09-01T22:28:48+00:00,false,N/A,false,N/A,false,N/A
if not self.password:
password_enabled = 'false'
password_last_used = 'not_supported'
else:
password_enabled = 'true'
password_last_used = 'no_information'
if len(self.access_keys) == 0:
access_key_1_active = 'false'
access_key_1_last_rotated = 'N/A'
access_key_2_active = 'false'
access_key_2_last_rotated = 'N/A'
elif len(self.access_keys) == 1:
access_key_1_active = 'true'
access_key_1_last_rotated = date_created.strftime(date_format)
access_key_2_active = 'false'
access_key_2_last_rotated = 'N/A'
else:
access_key_1_active = 'true'
access_key_1_last_rotated = date_created.strftime(date_format)
access_key_2_active = 'true'
access_key_2_last_rotated = date_created.strftime(date_format)
return '{0},{1},{2},{3},{4},{5},not_supported,false,{6},{7},{8},{9},false,N/A,false,N/A'.format(self.name,
self.arn,
date_created.strftime(date_format),
password_enabled,
password_last_used,
date_created.strftime(date_format),
access_key_1_active,
access_key_1_last_rotated,
access_key_2_active,
access_key_2_last_rotated
)
class IAMBackend(BaseBackend):
def __init__(self):
self.instance_profiles = {}
self.roles = {}
self.certificates = {}
self.groups = {}
self.users = {}
self.credential_report = None
super(IAMBackend, self).__init__()
def create_role(self, role_name, assume_role_policy_document, path):
role_id = random_resource_id()
role = Role(role_id, role_name, assume_role_policy_document, path)
self.roles[role_id] = role
return role
def get_role_by_id(self, role_id):
return self.roles.get(role_id)
def get_role(self, role_name):
for role in self.get_roles():
if role.name == role_name:
return role
raise BotoServerError(404, 'Not Found')
def get_roles(self):
return self.roles.values()
def put_role_policy(self, role_name, policy_name, policy_json):
role = self.get_role(role_name)
role.put_policy(policy_name, policy_json)
def get_role_policy(self, role_name, policy_name):
role = self.get_role(role_name)
for p, d in role.policies.items():
if p == policy_name:
return p, d
def list_role_policies(self, role_name):
role = self.get_role(role_name)
return role.policies.keys()
def create_instance_profile(self, name, path, role_ids):
instance_profile_id = random_resource_id()
roles = [iam_backend.get_role_by_id(role_id) for role_id in role_ids]
instance_profile = InstanceProfile(instance_profile_id, name, path, roles)
self.instance_profiles[instance_profile_id] = instance_profile
return instance_profile
def get_instance_profile(self, profile_name):
for profile in self.get_instance_profiles():
if profile.name == profile_name:
return profile
def get_instance_profiles(self):
return self.instance_profiles.values()
def get_instance_profiles_for_role(self, role_name):
found_profiles = []
for profile in self.get_instance_profiles():
if len(profile.roles) > 0:
if profile.roles[0].name == role_name:
found_profiles.append(profile)
return found_profiles
def add_role_to_instance_profile(self, profile_name, role_name):
profile = self.get_instance_profile(profile_name)
role = self.get_role(role_name)
profile.roles.append(role)
def get_all_server_certs(self, marker=None):
return self.certificates.values()
def upload_server_cert(self, cert_name, cert_body, private_key, cert_chain=None, path=None):
certificate_id = random_resource_id()
cert = Certificate(cert_name, cert_body, private_key, cert_chain, path)
self.certificates[certificate_id] = cert
return cert
def get_server_certificate(self, name):
for key, cert in self.certificates.items():
if name == cert.cert_name:
return cert
def create_group(self, group_name, path='/'):
if group_name in self.groups:
raise BotoServerError(409, 'Conflict')
group = Group(group_name, path)
self.groups[group_name] = group
return group
def get_group(self, group_name, marker=None, max_items=None):
group = None
try:
group = self.groups[group_name]
except KeyError:
raise BotoServerError(404, 'Not Found')
return group
def list_groups(self):
return self.groups.values()
def get_groups_for_user(self, user_name):
user = self.get_user(user_name)
groups = []
for group in self.list_groups():
if user in group.users:
groups.append(group)
return groups
def create_user(self, user_name, path='/'):
if user_name in self.users:
raise BotoServerError(409, 'Conflict')
user = User(user_name, path)
self.users[user_name] = user
return user
def get_user(self, user_name):
user = None
try:
user = self.users[user_name]
except KeyError:
raise BotoServerError(404, 'Not Found')
return user
def create_login_profile(self, user_name, password):
if user_name not in self.users:
raise BotoServerError(404, 'Not Found')
# This does not currently deal with PasswordPolicyViolation.
user = self.users[user_name]
if user.password:
raise BotoServerError(409, 'Conflict')
user.password = password
def add_user_to_group(self, group_name, user_name):
group = None
user = None
try:
group = self.groups[group_name]
user = self.users[user_name]
except KeyError:
raise BotoServerError(404, 'Not Found')
group.users.append(user)
def remove_user_from_group(self, group_name, user_name):
group = None
user = None
try:
group = self.groups[group_name]
user = self.users[user_name]
group.users.remove(user)
except (KeyError, ValueError):
raise BotoServerError(404, 'Not Found')
def get_user_policy(self, user_name, policy_name):
policy = None
try:
user = self.users[user_name]
policy = user.get_policy(policy_name)
except KeyError:
raise BotoServerError(404, 'Not Found')
return policy
def put_user_policy(self, user_name, policy_name, policy_json):
try:
user = self.users[user_name]
user.put_policy(policy_name, policy_json)
except KeyError:
raise BotoServerError(404, 'Not Found')
def delete_user_policy(self, user_name, policy_name):
try:
user = self.users[user_name]
user.delete_policy(policy_name)
except KeyError:
raise BotoServerError(404, 'Not Found')
def create_access_key(self, user_name=None):
key = None
try:
user = self.users[user_name]
key = user.create_access_key()
except KeyError:
raise BotoServerError(404, 'Not Found')
return key
def get_all_access_keys(self, user_name, marker=None, max_items=None):
keys = None
try:
user = self.users[user_name]
keys = user.get_all_access_keys()
except KeyError:
raise BotoServerError(404, 'Not Found')
return keys
def delete_access_key(self, access_key_id, user_name):
try:
user = self.users[user_name]
user.delete_access_key(access_key_id)
except KeyError:
raise BotoServerError(404, 'Not Found')
def delete_user(self, user_name):
try:
del self.users[user_name]
except KeyError:
raise BotoServerError(404, 'Not Found')
def report_generated(self):
return self.credential_report
def generate_report(self):
self.credential_report = True
def get_credential_report(self):
if not self.credential_report:
raise BotoServerError(410, 'ReportNotPresent')
report = 'user,arn,user_creation_time,password_enabled,password_last_used,password_last_changed,password_next_rotation,mfa_active,access_key_1_active,access_key_1_last_rotated,access_key_2_active,access_key_2_last_rotated,cert_1_active,cert_1_last_rotated,cert_2_active,cert_2_last_rotated\n'
for user in self.users:
report += self.users[user].to_csv()
return base64.b64encode(report.encode('ascii')).decode('ascii')
iam_backend = IAMBackend()
|
|
data = (
( 2013,1,6),
( 2013,1,11),
( 2013,1,11),
( 2013,1,11),
( 2013,1,12),
( 2013,1,12),
( 2013,1,12),
( 2013,1,12),
( 2013,1,12),
( 2013,1,12),
( 2013,1,12),
( 2013,1,12),
( 2013,1,12),
( 2013,1,12),
( 2013,1,12),
( 2013,1,12),
( 2013,1,13),
( 2013,1,14),
( 2013,1,14),
( 2013,1,14),
( 2013,1,14),
( 2013,1,14),
( 2013,1,14),
( 2013,1,15),
( 2013,1,18),
( 2013,1,18),
( 2013,1,18),
( 2013,1,19),
( 2013,1,20),
( 2013,1,20),
( 2013,1,20),
( 2013,1,21),
( 2013,1,22),
( 2013,1,22),
( 2013,1,22),
( 2013,1,22),
( 2013,1,22),
( 2013,1,23),
( 2013,1,23),
( 2013,1,23),
( 2013,1,23),
( 2013,1,23),
( 2013,1,24),
( 2013,1,24),
( 2013,1,24),
( 2013,1,25),
( 2013,1,25),
( 2013,1,25),
( 2013,1,26),
( 2013,1,26),
( 2013,1,26),
( 2013,1,27),
( 2013,1,27),
( 2013,1,27),
( 2013,1,27),
( 2013,1,27),
( 2013,1,29),
( 2013,1,30),
( 2013,1,30),
( 2013,1,30),
( 2013,2,1),
( 2013,2,1),
( 2013,2,1),
( 2013,2,2),
( 2013,2,2),
( 2013,2,2),
( 2013,2,2),
( 2013,2,2),
( 2013,2,2),
( 2013,2,2),
( 2013,2,3),
( 2013,2,3),
( 2013,2,4),
( 2013,2,4),
( 2013,2,4),
( 2013,2,4),
( 2013,2,4),
( 2013,2,4),
( 2013,2,4),
( 2013,2,4),
( 2013,2,4),
( 2013,2,5),
( 2013,2,5),
( 2013,2,5),
( 2013,2,7),
( 2013,2,8),
( 2013,2,8),
( 2013,2,8),
( 2013,2,8),
( 2013,2,9),
( 2013,2,9),
( 2013,2,9),
( 2013,2,10),
( 2013,2,13),
( 2013,2,13),
( 2013,2,14),
( 2013,2,14),
( 2013,2,14),
( 2013,2,15),
( 2013,2,16),
( 2013,2,16),
( 2013,2,17),
( 2013,2,17),
( 2013,2,18),
( 2013,2,18),
( 2013,2,18),
( 2013,2,18),
( 2013,2,18),
( 2013,2,18),
( 2013,2,18),
( 2013,2,18),
( 2013,2,18),
( 2013,2,18),
( 2013,2,19),
( 2013,2,19),
( 2013,2,19),
( 2013,2,19),
( 2013,2,19),
( 2013,2,19),
( 2013,2,19),
( 2013,2,19),
( 2013,2,19),
( 2013,2,19),
( 2013,2,19),
( 2013,2,20),
( 2013,2,20),
( 2013,2,20),
( 2013,2,20),
( 2013,2,21),
( 2013,2,21),
( 2013,2,22),
( 2013,2,24),
( 2013,2,24),
( 2013,2,25),
( 2013,2,25),
( 2013,2,26),
( 2013,2,26),
( 2013,2,26),
( 2013,2,27),
( 2013,2,27),
( 2013,2,27),
( 2013,2,28),
( 2013,3,1),
( 2013,3,3),
( 2013,3,3),
( 2013,3,3),
( 2013,3,3),
( 2013,3,4),
( 2013,3,4),
( 2013,3,4),
( 2013,3,5),
( 2013,3,5),
( 2013,3,7),
( 2013,3,12),
( 2013,3,13),
( 2013,3,13),
( 2013,3,15),
( 2013,3,16),
( 2013,3,16),
( 2013,3,18),
( 2013,3,18),
( 2013,3,19),
( 2013,3,19),
( 2013,3,19),
( 2013,3,20),
( 2013,3,21),
( 2013,3,21),
( 2013,3,21),
( 2013,3,26),
( 2013,3,26),
( 2013,3,30),
( 2013,3,31),
( 2013,3,31),
( 2013,4,3),
( 2013,4,3),
( 2013,4,4),
( 2013,4,4),
( 2013,4,6),
( 2013,4,6),
( 2013,4,6),
( 2013,4,9),
( 2013,4,9),
( 2013,4,10),
( 2013,4,12),
( 2013,4,13),
( 2013,4,18),
( 2013,4,20),
( 2013,4,21),
( 2013,4,21),
( 2013,4,25),
( 2013,4,25),
( 2013,4,26),
( 2013,4,27),
( 2013,4,30),
( 2013,4,30),
( 2013,5,1),
( 2013,5,2),
( 2013,5,2),
( 2013,5,2),
( 2013,5,2),
( 2013,5,2),
( 2013,5,2),
( 2013,5,3),
( 2013,5,4),
( 2013,5,4),
( 2013,5,5),
( 2013,5,8),
( 2013,5,8),
( 2013,5,9),
( 2013,5,9),
( 2013,5,12),
( 2013,5,12),
( 2013,5,15),
( 2013,5,15),
( 2013,5,16),
( 2013,5,16),
( 2013,5,20),
( 2013,5,21),
( 2013,5,22),
( 2013,5,25),
( 2013,5,26),
( 2013,5,28),
( 2013,5,28),
( 2013,5,29),
( 2013,5,30),
( 2013,6,3),
( 2013,6,5),
( 2013,6,5),
( 2013,6,5),
( 2013,6,7),
( 2013,6,7),
( 2013,6,7),
( 2013,6,8),
( 2013,6,9),
( 2013,6,9),
( 2013,6,11),
( 2013,6,11),
( 2013,6,13),
( 2013,6,15),
( 2013,6,18),
( 2013,6,20),
( 2013,6,20),
( 2013,6,22),
( 2013,6,23),
( 2013,6,23),
( 2013,6,29),
( 2013,7,2),
( 2013,7,4),
( 2013,7,6),
( 2013,7,8),
( 2013,7,11),
( 2013,7,11),
( 2013,7,12),
( 2013,7,12),
( 2013,7,12),
( 2013,7,12),
( 2013,7,12),
( 2013,7,12),
( 2013,7,13),
( 2013,7,14),
( 2013,7,14),
( 2013,7,16),
( 2013,7,17),
( 2013,7,18),
( 2013,7,18),
( 2013,7,22),
( 2013,7,22),
( 2013,7,22),
( 2013,7,22),
( 2013,7,24),
( 2013,7,24),
( 2013,7,24),
( 2013,7,25),
( 2013,7,25),
( 2013,7,25),
( 2013,7,25),
( 2013,7,25),
( 2013,7,25),
( 2013,7,25),
( 2013,7,25),
( 2013,7,25),
( 2013,7,25),
( 2013,7,25),
( 2013,7,25),
( 2013,7,25),
( 2013,7,25),
( 2013,7,25),
( 2013,7,25),
( 2013,7,25),
( 2013,7,25),
( 2013,7,25),
( 2013,7,25),
( 2013,7,25),
( 2013,7,25),
( 2013,7,25),
( 2013,7,25),
( 2013,7,25),
( 2013,7,25),
( 2013,7,25),
( 2013,7,26),
( 2013,7,26),
( 2013,7,26),
( 2013,7,26),
( 2013,7,26),
( 2013,7,26),
( 2013,7,26),
( 2013,7,26),
( 2013,7,26),
( 2013,7,26),
( 2013,7,26),
( 2013,7,27),
( 2013,7,27),
( 2013,7,27),
( 2013,7,27),
( 2013,7,27),
( 2013,7,27),
( 2013,7,28),
( 2013,7,28),
( 2013,7,29),
( 2013,7,30),
( 2013,7,31),
( 2013,7,31),
( 2013,7,31),
( 2013,7,31),
( 2013,7,31),
( 2013,7,31),
( 2013,7,31),
( 2013,7,31),
( 2013,7,31),
( 2013,7,31),
( 2013,7,31),
( 2013,7,31),
( 2013,7,31),
( 2013,7,31),
( 2013,7,31),
( 2013,7,31),
( 2013,7,31),
( 2013,7,31),
( 2013,7,31),
( 2013,7,31),
( 2013,7,31),
( 2013,7,31),
( 2013,8,1),
( 2013,8,1),
( 2013,8,1),
( 2013,8,1),
( 2013,8,1),
( 2013,8,1),
( 2013,8,1),
( 2013,8,1),
( 2013,8,1),
( 2013,8,1),
( 2013,8,1),
( 2013,8,1),
( 2013,8,1),
( 2013,8,2),
( 2013,8,2),
( 2013,8,2),
( 2013,8,2),
( 2013,8,2),
( 2013,8,4),
( 2013,8,4),
( 2013,8,5),
( 2013,8,5),
( 2013,8,5),
( 2013,8,5),
( 2013,8,5),
( 2013,8,5),
( 2013,8,5),
( 2013,8,6),
( 2013,8,6),
( 2013,8,7),
( 2013,8,8),
( 2013,8,8),
( 2013,8,8),
( 2013,8,8),
( 2013,8,8),
( 2013,8,8),
( 2013,8,8),
( 2013,8,8),
( 2013,8,8),
( 2013,8,9),
( 2013,8,9),
( 2013,8,9),
( 2013,8,9),
( 2013,8,11),
( 2013,8,12),
( 2013,8,13),
( 2013,8,13),
( 2013,8,13),
( 2013,8,13),
( 2013,8,14),
( 2013,8,14),
( 2013,8,15),
( 2013,8,15),
( 2013,8,15),
( 2013,8,15),
( 2013,8,15),
( 2013,8,15),
( 2013,8,15),
( 2013,8,16),
( 2013,8,16),
( 2013,8,17),
( 2013,8,18),
( 2013,8,19),
( 2013,8,21),
( 2013,8,22),
( 2013,8,22),
( 2013,8,22),
( 2013,8,22),
( 2013,8,23),
( 2013,8,23),
( 2013,8,24),
( 2013,8,24),
( 2013,8,24),
( 2013,8,27),
( 2013,8,29),
( 2013,8,30),
( 2013,8,30),
( 2013,8,31),
( 2013,9,2),
( 2013,9,2),
( 2013,9,3),
( 2013,9,3),
( 2013,9,6),
( 2013,9,6),
( 2013,9,7),
( 2013,9,7),
( 2013,9,8),
( 2013,9,9),
( 2013,9,10),
( 2013,9,12),
( 2013,9,14),
( 2013,9,15),
( 2013,9,15),
( 2013,9,15),
( 2013,9,15),
( 2013,9,15),
( 2013,9,16),
( 2013,9,17),
( 2013,9,19),
( 2013,9,19),
( 2013,9,21),
( 2013,9,22),
( 2013,9,23),
( 2013,9,24),
( 2013,9,24),
( 2013,9,30),
( 2013,9,30),
( 2013,9,30),
( 2013,9,30),
( 2013,9,30),
( 2013,9,30),
( 2013,9,30),
( 2013,9,30),
( 2013,9,30),
( 2013,9,30),
( 2013,9,30),
( 2013,9,30),
( 2013,9,30),
( 2013,9,30),
( 2013,9,30),
( 2013,9,30),
( 2013,9,30),
( 2013,10,1),
( 2013,10,1),
( 2013,10,2),
( 2013,10,3),
( 2013,10,3),
( 2013,10,4),
( 2013,10,4),
( 2013,10,4),
( 2013,10,4),
( 2013,10,5),
( 2013,10,6),
( 2013,10,7),
( 2013,10,7),
( 2013,10,7),
( 2013,10,7),
( 2013,10,7),
( 2013,10,7),
( 2013,10,7),
( 2013,10,7),
( 2013,10,7),
( 2013,10,7),
( 2013,10,7),
( 2013,10,7),
( 2013,10,7),
( 2013,10,7),
( 2013,10,7),
( 2013,10,7),
( 2013,10,7),
( 2013,10,8),
( 2013,10,8),
( 2013,10,8),
( 2013,10,8),
( 2013,10,8),
( 2013,10,8),
( 2013,10,8),
( 2013,10,8),
( 2013,10,8),
( 2013,10,8),
( 2013,10,8),
( 2013,10,8),
( 2013,10,8),
( 2013,10,8),
( 2013,10,8),
( 2013,10,8),
( 2013,10,8),
( 2013,10,8),
( 2013,10,9),
( 2013,10,9),
( 2013,10,9),
( 2013,10,9),
( 2013,10,9),
( 2013,10,9),
( 2013,10,9),
( 2013,10,9),
( 2013,10,9),
( 2013,10,9),
( 2013,10,9),
( 2013,10,10),
( 2013,10,10),
( 2013,10,10),
( 2013,10,10),
( 2013,10,10),
( 2013,10,10),
( 2013,10,10),
( 2013,10,10),
( 2013,10,10),
( 2013,10,10),
( 2013,10,10),
( 2013,10,10),
( 2013,10,10),
( 2013,10,10),
( 2013,10,10),
( 2013,10,10),
( 2013,10,11),
( 2013,10,11),
( 2013,10,11),
( 2013,10,11),
( 2013,10,11),
( 2013,10,11),
( 2013,10,11),
( 2013,10,11),
( 2013,10,11),
( 2013,10,11),
( 2013,10,11),
( 2013,10,12),
( 2013,10,12),
( 2013,10,12),
( 2013,10,12),
( 2013,10,12),
( 2013,10,12),
( 2013,10,12),
( 2013,10,12),
( 2013,10,12),
( 2013,10,12),
( 2013,10,12),
( 2013,10,12),
( 2013,10,12),
( 2013,10,12),
( 2013,10,12),
( 2013,10,12),
( 2013,10,12),
( 2013,10,12),
( 2013,10,12),
( 2013,10,13),
( 2013,10,13),
( 2013,10,13),
( 2013,10,13),
( 2013,10,13),
( 2013,10,13),
( 2013,10,13),
( 2013,10,13),
( 2013,10,13),
( 2013,10,13),
( 2013,10,13),
( 2013,10,13),
( 2013,10,13),
( 2013,10,13),
( 2013,10,13),
( 2013,10,13),
( 2013,10,14),
( 2013,10,14),
( 2013,10,14),
( 2013,10,14),
( 2013,10,14),
( 2013,10,14),
( 2013,10,14),
( 2013,10,14),
( 2013,10,14),
( 2013,10,14),
( 2013,10,14),
( 2013,10,14),
( 2013,10,15),
( 2013,10,15),
( 2013,10,15),
( 2013,10,15),
( 2013,10,15),
( 2013,10,15),
( 2013,10,15),
( 2013,10,15),
( 2013,10,15),
( 2013,10,15),
( 2013,10,15),
( 2013,10,15),
( 2013,10,15),
( 2013,10,15),
( 2013,10,15),
( 2013,10,16),
( 2013,10,16),
( 2013,10,16),
( 2013,10,16),
( 2013,10,16),
( 2013,10,16),
( 2013,10,16),
( 2013,10,16),
( 2013,10,16),
( 2013,10,16),
( 2013,10,16),
( 2013,10,17),
( 2013,10,17),
( 2013,10,17),
( 2013,10,17),
( 2013,10,17),
( 2013,10,17),
( 2013,10,17),
( 2013,10,17),
( 2013,10,17),
( 2013,10,17),
( 2013,10,17),
( 2013,10,17),
( 2013,10,17),
( 2013,10,18),
( 2013,10,18),
( 2013,10,18),
( 2013,10,18),
( 2013,10,18),
( 2013,10,18),
( 2013,10,18),
( 2013,10,19),
( 2013,10,19),
( 2013,10,19),
( 2013,10,19),
( 2013,10,20),
( 2013,10,20),
( 2013,10,20),
( 2013,10,20),
( 2013,10,20),
( 2013,10,20),
( 2013,10,20),
( 2013,10,21),
( 2013,10,21),
( 2013,10,21),
( 2013,10,21),
( 2013,10,21),
( 2013,10,21),
( 2013,10,21),
( 2013,10,21),
( 2013,10,21),
( 2013,10,21),
( 2013,10,21),
( 2013,10,22),
( 2013,10,22),
( 2013,10,22),
( 2013,10,22),
( 2013,10,22),
( 2013,10,22),
( 2013,10,22),
( 2013,10,23),
( 2013,10,23),
( 2013,10,23),
( 2013,10,23),
( 2013,10,23),
( 2013,10,23),
( 2013,10,23),
( 2013,10,23),
( 2013,10,23),
( 2013,10,23),
( 2013,10,23),
( 2013,10,23),
( 2013,10,24),
( 2013,10,24),
( 2013,10,24),
( 2013,10,24),
( 2013,10,24),
( 2013,10,24),
( 2013,10,24),
( 2013,10,24),
( 2013,10,25),
( 2013,10,25),
( 2013,10,25),
( 2013,10,25),
( 2013,10,25),
( 2013,10,26),
( 2013,10,26),
( 2013,10,26),
( 2013,10,26),
( 2013,10,26),
( 2013,10,26),
( 2013,10,27),
( 2013,10,27),
( 2013,10,27),
( 2013,10,27),
( 2013,10,27),
( 2013,10,27),
( 2013,10,27),
( 2013,10,28),
( 2013,10,28),
( 2013,10,28),
( 2013,10,28),
( 2013,10,28),
( 2013,10,28),
( 2013,10,28),
( 2013,10,28),
( 2013,10,28),
( 2013,10,28),
( 2013,10,28),
( 2013,10,28),
( 2013,10,29),
( 2013,10,29),
( 2013,10,29),
( 2013,10,29),
( 2013,10,29),
( 2013,10,29),
( 2013,10,29),
( 2013,10,29),
( 2013,10,29),
( 2013,10,29),
( 2013,10,29),
( 2013,10,29),
( 2013,10,29),
( 2013,10,29),
( 2013,10,29),
( 2013,10,30),
( 2013,10,30),
( 2013,10,30),
( 2013,10,30),
( 2013,10,30),
( 2013,10,30),
( 2013,10,30),
( 2013,10,30),
( 2013,10,30),
( 2013,10,30),
( 2013,10,30),
( 2013,10,30),
( 2013,10,30),
( 2013,10,30),
( 2013,10,30),
( 2013,10,30),
( 2013,10,30),
( 2013,10,30),
( 2013,10,30),
( 2013,10,30),
( 2013,10,30),
( 2013,10,30),
( 2013,10,30),
( 2013,10,30),
( 2013,10,30),
( 2013,10,30),
( 2013,10,30),
( 2013,10,30),
( 2013,10,30),
( 2013,10,30),
( 2013,10,30),
( 2013,10,30),
( 2013,10,30),
( 2013,10,30),
( 2013,10,30),
( 2013,10,30),
( 2013,10,30),
( 2013,10,30),
( 2013,10,30),
( 2013,10,30),
( 2013,10,30),
( 2013,10,30),
( 2013,10,30),
( 2013,10,30),
( 2013,10,30),
( 2013,10,30),
( 2013,10,30),
( 2013,10,30),
( 2013,10,30),
( 2013,10,31),
( 2013,10,31),
( 2013,10,31),
( 2013,10,31),
( 2013,10,31),
( 2013,10,31),
( 2013,11,1),
( 2013,11,1),
( 2013,11,1),
( 2013,11,1),
( 2013,11,1),
( 2013,11,1),
( 2013,11,1),
( 2013,11,2),
( 2013,11,2),
( 2013,11,2),
( 2013,11,2),
( 2013,11,2),
( 2013,11,3),
( 2013,11,3),
( 2013,11,3),
( 2013,11,3),
( 2013,11,3),
( 2013,11,3),
( 2013,11,3),
( 2013,11,3),
( 2013,11,3),
( 2013,11,4),
( 2013,11,4),
( 2013,11,4),
( 2013,11,4),
( 2013,11,4),
( 2013,11,4),
( 2013,11,4),
( 2013,11,5),
( 2013,11,5),
( 2013,11,5),
( 2013,11,5),
( 2013,11,5),
( 2013,11,5),
( 2013,11,5),
( 2013,11,5),
( 2013,11,5),
( 2013,11,5),
( 2013,11,6),
( 2013,11,6),
( 2013,11,6),
( 2013,11,6),
( 2013,11,6),
( 2013,11,6),
( 2013,11,7),
( 2013,11,7),
( 2013,11,7),
( 2013,11,7),
( 2013,11,7),
( 2013,11,7),
( 2013,11,7),
( 2013,11,7),
( 2013,11,7),
( 2013,11,7),
( 2013,11,7),
( 2013,11,8),
( 2013,11,8),
( 2013,11,8),
( 2013,11,8),
( 2013,11,8),
( 2013,11,8),
( 2013,11,8),
( 2013,11,8),
( 2013,11,9),
( 2013,11,9),
( 2013,11,9),
( 2013,11,9),
( 2013,11,9),
( 2013,11,9),
( 2013,11,9),
( 2013,11,9),
( 2013,11,9),
( 2013,11,10),
( 2013,11,10),
( 2013,11,10),
( 2013,11,12),
( 2013,11,16),
( 2013,11,16),
( 2013,11,16),
( 2013,11,17),
( 2013,11,18),
( 2013,11,20),
( 2013,11,22),
( 2013,11,23),
( 2013,11,23),
( 2013,11,23),
( 2013,11,24),
( 2013,11,25),
( 2013,11,26),
( 2013,11,27),
( 2013,12,2),
( 2013,12,5),
( 2013,12,7),
( 2013,12,9),
( 2013,12,10),
( 2013,12,10),
( 2013,12,11),
( 2013,12,11),
( 2013,12,11),
( 2013,12,13),
( 2013,12,14),
( 2013,12,15),
( 2013,12,16),
( 2013,12,16),
( 2013,12,17),
( 2013,12,17),
( 2013,12,17),
( 2013,12,17),
( 2013,12,17),
( 2013,12,17),
( 2013,12,18),
( 2013,12,21),
( 2013,12,21),
( 2013,12,25),
( 2013,12,26),
( 2013,12,26),
( 2013,12,26),
( 2013,12,29),
( 2013,12,29),
( 2013,12,29),
( 2013,12,30),
( 2013,12,30),
( 2013,12,31),
( 2013,12,31),
( 2013,12,31),
( 2013,12,31),
( 2013,12,31),
)
results = [[]]
for entry in data:
if len(results) < entry[1]:
results.append([])
if len(results[entry[1] - 1]) < entry[2]:
for _ in range(len(results[entry[1] - 1]), entry[2]):
results[entry[1] - 1].append(0)
results[entry[1] - 1][entry[2] - 1] += 1
print results
print [sum(i) for i in results]
|
|
# -*- coding: utf-8 -*-
'''
Set up the version of Salt
'''
# Import python libs
from __future__ import absolute_import, print_function
import re
import sys
import platform
# pylint: disable=invalid-name,redefined-builtin
# Import 3rd-party libs
from salt.ext import six
from salt.ext.six.moves import map
# Don't rely on external packages in this module since it's used at install time
if sys.version_info[0] == 3:
MAX_SIZE = sys.maxsize
string_types = (str,)
else:
MAX_SIZE = sys.maxint
string_types = (six.string_types,)
# pylint: enable=invalid-name,redefined-builtin
# ----- ATTENTION --------------------------------------------------------------------------------------------------->
#
# ALL major version bumps, new release codenames, MUST be defined in the SaltStackVersion.NAMES dictionary, i.e.:
#
# class SaltStackVersion(object):
#
# NAMES = {
# 'Hydrogen': (2014, 1), # <- This is the tuple to bump versions
# ( ... )
# }
#
#
# ONLY UPDATE CODENAMES AFTER BRANCHING
#
# As an example, The Helium codename must only be properly defined with "(2014, 7)" after Hydrogen, "(2014, 1)", has
# been branched out into it's own branch.
#
# ALL OTHER VERSION INFORMATION IS EXTRACTED FROM THE GIT TAGS
#
# <---- ATTENTION ----------------------------------------------------------------------------------------------------
class SaltStackVersion(object):
'''
Handle SaltStack versions class.
Knows how to parse ``git describe`` output, knows about release candidates
and also supports version comparison.
'''
__slots__ = ('name', 'major', 'minor', 'bugfix', 'mbugfix', 'rc', 'noc', 'sha')
git_describe_regex = re.compile(
r'(?:[^\d]+)?(?P<major>[\d]{1,4})'
r'\.(?P<minor>[\d]{1,2})'
r'(?:\.(?P<bugfix>[\d]{0,2}))?'
r'(?:\.(?P<mbugfix>[\d]{0,2}))?'
r'(?:rc(?P<rc>[\d]{1}))?'
r'(?:(?:.*)-(?P<noc>(?:[\d]+|n/a))-(?P<sha>[a-z0-9]{8}))?'
)
git_sha_regex = re.compile(r'(?P<sha>[a-z0-9]{7})')
# Salt versions after 0.17.0 will be numbered like:
# <4-digit-year>.<month>.<bugfix>
#
# Since the actual version numbers will only be know on release dates, the
# periodic table element names will be what's going to be used to name
# versions and to be able to mention them.
NAMES = {
# Let's keep at least 3 version names uncommented counting from the
# latest release so we can map deprecation warnings to versions.
# pylint: disable=E8203
# ----- Please refrain from fixing PEP-8 E203 and E265 ----->
# The idea is to keep this readable.
# -----------------------------------------------------------
'Hydrogen' : (2014, 1),
'Helium' : (2014, 7),
'Lithium' : (2015, 5),
'Beryllium' : (2015, 8),
'Boron' : (2016, 3),
'Carbon' : (MAX_SIZE - 103, 0),
'Nitrogen' : (MAX_SIZE - 102, 0),
'Oxygen' : (MAX_SIZE - 101, 0),
# pylint: disable=E8265
#'Fluorine' : (MAX_SIZE - 100, 0),
#'Neon' : (MAX_SIZE - 99 , 0),
#'Sodium' : (MAX_SIZE - 98 , 0),
#'Magnesium' : (MAX_SIZE - 97 , 0),
#'Aluminium' : (MAX_SIZE - 96 , 0),
#'Silicon' : (MAX_SIZE - 95 , 0),
#'Phosphorus' : (MAX_SIZE - 94 , 0),
#'Sulfur' : (MAX_SIZE - 93 , 0),
#'Chlorine' : (MAX_SIZE - 92 , 0),
#'Argon' : (MAX_SIZE - 91 , 0),
#'Potassium' : (MAX_SIZE - 90 , 0),
#'Calcium' : (MAX_SIZE - 89 , 0),
#'Scandium' : (MAX_SIZE - 88 , 0),
#'Titanium' : (MAX_SIZE - 87 , 0),
#'Vanadium' : (MAX_SIZE - 86 , 0),
#'Chromium' : (MAX_SIZE - 85 , 0),
#'Manganese' : (MAX_SIZE - 84 , 0),
#'Iron' : (MAX_SIZE - 83 , 0),
#'Cobalt' : (MAX_SIZE - 82 , 0),
#'Nickel' : (MAX_SIZE - 81 , 0),
#'Copper' : (MAX_SIZE - 80 , 0),
#'Zinc' : (MAX_SIZE - 79 , 0),
#'Gallium' : (MAX_SIZE - 78 , 0),
#'Germanium' : (MAX_SIZE - 77 , 0),
#'Arsenic' : (MAX_SIZE - 76 , 0),
#'Selenium' : (MAX_SIZE - 75 , 0),
#'Bromine' : (MAX_SIZE - 74 , 0),
#'Krypton' : (MAX_SIZE - 73 , 0),
#'Rubidium' : (MAX_SIZE - 72 , 0),
#'Strontium' : (MAX_SIZE - 71 , 0),
#'Yttrium' : (MAX_SIZE - 70 , 0),
#'Zirconium' : (MAX_SIZE - 69 , 0),
#'Niobium' : (MAX_SIZE - 68 , 0),
#'Molybdenum' : (MAX_SIZE - 67 , 0),
#'Technetium' : (MAX_SIZE - 66 , 0),
#'Ruthenium' : (MAX_SIZE - 65 , 0),
#'Rhodium' : (MAX_SIZE - 64 , 0),
#'Palladium' : (MAX_SIZE - 63 , 0),
#'Silver' : (MAX_SIZE - 62 , 0),
#'Cadmium' : (MAX_SIZE - 61 , 0),
#'Indium' : (MAX_SIZE - 60 , 0),
#'Tin' : (MAX_SIZE - 59 , 0),
#'Antimony' : (MAX_SIZE - 58 , 0),
#'Tellurium' : (MAX_SIZE - 57 , 0),
#'Iodine' : (MAX_SIZE - 56 , 0),
#'Xenon' : (MAX_SIZE - 55 , 0),
#'Caesium' : (MAX_SIZE - 54 , 0),
#'Barium' : (MAX_SIZE - 53 , 0),
#'Lanthanum' : (MAX_SIZE - 52 , 0),
#'Cerium' : (MAX_SIZE - 51 , 0),
#'Praseodymium' : (MAX_SIZE - 50 , 0),
#'Neodymium' : (MAX_SIZE - 49 , 0),
#'Promethium' : (MAX_SIZE - 48 , 0),
#'Samarium' : (MAX_SIZE - 47 , 0),
#'Europium' : (MAX_SIZE - 46 , 0),
#'Gadolinium' : (MAX_SIZE - 45 , 0),
#'Terbium' : (MAX_SIZE - 44 , 0),
#'Dysprosium' : (MAX_SIZE - 43 , 0),
#'Holmium' : (MAX_SIZE - 42 , 0),
#'Erbium' : (MAX_SIZE - 41 , 0),
#'Thulium' : (MAX_SIZE - 40 , 0),
#'Ytterbium' : (MAX_SIZE - 39 , 0),
#'Lutetium' : (MAX_SIZE - 38 , 0),
#'Hafnium' : (MAX_SIZE - 37 , 0),
#'Tantalum' : (MAX_SIZE - 36 , 0),
#'Tungsten' : (MAX_SIZE - 35 , 0),
#'Rhenium' : (MAX_SIZE - 34 , 0),
#'Osmium' : (MAX_SIZE - 33 , 0),
#'Iridium' : (MAX_SIZE - 32 , 0),
#'Platinum' : (MAX_SIZE - 31 , 0),
#'Gold' : (MAX_SIZE - 30 , 0),
#'Mercury' : (MAX_SIZE - 29 , 0),
#'Thallium' : (MAX_SIZE - 28 , 0),
#'Lead' : (MAX_SIZE - 27 , 0),
#'Bismuth' : (MAX_SIZE - 26 , 0),
#'Polonium' : (MAX_SIZE - 25 , 0),
#'Astatine' : (MAX_SIZE - 24 , 0),
#'Radon' : (MAX_SIZE - 23 , 0),
#'Francium' : (MAX_SIZE - 22 , 0),
#'Radium' : (MAX_SIZE - 21 , 0),
#'Actinium' : (MAX_SIZE - 20 , 0),
#'Thorium' : (MAX_SIZE - 19 , 0),
#'Protactinium' : (MAX_SIZE - 18 , 0),
#'Uranium' : (MAX_SIZE - 17 , 0),
#'Neptunium' : (MAX_SIZE - 16 , 0),
#'Plutonium' : (MAX_SIZE - 15 , 0),
#'Americium' : (MAX_SIZE - 14 , 0),
#'Curium' : (MAX_SIZE - 13 , 0),
#'Berkelium' : (MAX_SIZE - 12 , 0),
#'Californium' : (MAX_SIZE - 11 , 0),
#'Einsteinium' : (MAX_SIZE - 10 , 0),
#'Fermium' : (MAX_SIZE - 9 , 0),
#'Mendelevium' : (MAX_SIZE - 8 , 0),
#'Nobelium' : (MAX_SIZE - 7 , 0),
#'Lawrencium' : (MAX_SIZE - 6 , 0),
#'Rutherfordium': (MAX_SIZE - 5 , 0),
#'Dubnium' : (MAX_SIZE - 4 , 0),
#'Seaborgium' : (MAX_SIZE - 3 , 0),
#'Bohrium' : (MAX_SIZE - 2 , 0),
#'Hassium' : (MAX_SIZE - 1 , 0),
#'Meitnerium' : (MAX_SIZE - 0 , 0),
# <---- Please refrain from fixing PEP-8 E203 and E265 ------
# pylint: enable=E8203,E8265
}
LNAMES = dict((k.lower(), v) for (k, v) in iter(NAMES.items()))
VNAMES = dict((v, k) for (k, v) in iter(NAMES.items()))
RMATCH = dict((v[:2], k) for (k, v) in iter(NAMES.items()))
def __init__(self, # pylint: disable=C0103
major,
minor,
bugfix=0,
mbugfix=0,
rc=0, # pylint: disable=C0103
noc=0,
sha=None):
if isinstance(major, string_types):
major = int(major)
if isinstance(minor, string_types):
minor = int(minor)
if bugfix is None:
bugfix = 0
elif isinstance(bugfix, string_types):
bugfix = int(bugfix)
if mbugfix is None:
mbugfix = 0
elif isinstance(mbugfix, string_types):
mbugfix = int(mbugfix)
if rc is None:
rc = 0
elif isinstance(rc, string_types):
rc = int(rc)
if noc is None:
noc = 0
elif isinstance(noc, string_types) and noc == 'n/a':
noc = -1
elif isinstance(noc, string_types):
noc = int(noc)
self.major = major
self.minor = minor
self.bugfix = bugfix
self.mbugfix = mbugfix
self.rc = rc # pylint: disable=C0103
self.name = self.VNAMES.get((major, minor), None)
self.noc = noc
self.sha = sha
@classmethod
def parse(cls, version_string):
if version_string.lower() in cls.LNAMES:
return cls.from_name(version_string)
s = version_string.decode() if isinstance(version_string, bytes) else version_string
match = cls.git_describe_regex.match(s)
if not match:
raise ValueError(
'Unable to parse version string: \'{0}\''.format(version_string)
)
return cls(*match.groups())
@classmethod
def from_name(cls, name):
if name.lower() not in cls.LNAMES:
raise ValueError(
'Named version \'{0}\' is not known'.format(name)
)
return cls(*cls.LNAMES[name.lower()])
@classmethod
def from_last_named_version(cls):
return cls.from_name(
cls.VNAMES[
max([version_info for version_info in
cls.VNAMES if
version_info[0] < (MAX_SIZE - 200)])
]
)
@property
def sse(self):
# Higher than 0.17, lower than first date based
return 0 < self.major < 2014
@property
def info(self):
return (
self.major,
self.minor,
self.bugfix,
self.mbugfix
)
@property
def rc_info(self):
return (
self.major,
self.minor,
self.bugfix,
self.mbugfix,
self.rc
)
@property
def noc_info(self):
return (
self.major,
self.minor,
self.bugfix,
self.mbugfix,
self.rc,
self.noc
)
@property
def full_info(self):
return (
self.major,
self.minor,
self.bugfix,
self.mbugfix,
self.rc,
self.noc,
self.sha
)
@property
def string(self):
version_string = '{0}.{1}.{2}'.format(
self.major,
self.minor,
self.bugfix
)
if self.mbugfix:
version_string += '.{0}'.format(self.mbugfix)
if self.rc:
version_string += 'rc{0}'.format(self.rc)
if self.noc and self.sha:
noc = self.noc
if noc < 0:
noc = 'n/a'
version_string += '-{0}-{1}'.format(noc, self.sha)
return version_string
@property
def formatted_version(self):
if self.name and self.major > 10000:
version_string = self.name
if self.sse:
version_string += ' Enterprise'
version_string += ' (Unreleased)'
return version_string
version_string = self.string
if self.sse:
version_string += ' Enterprise'
if (self.major, self.minor) in self.RMATCH:
version_string += ' ({0})'.format(self.RMATCH[(self.major, self.minor)])
return version_string
def __str__(self):
return self.string
def __compare__(self, other, method):
if not isinstance(other, SaltStackVersion):
if isinstance(other, string_types):
other = SaltStackVersion.parse(other)
elif isinstance(other, (list, tuple)):
other = SaltStackVersion(*other)
else:
raise ValueError(
'Cannot instantiate Version from type \'{0}\''.format(
type(other)
)
)
if (self.rc and other.rc) or (not self.rc and not other.rc):
# Both have rc information, regular compare is ok
return method(self.noc_info, other.noc_info)
# RC's are always lower versions than non RC's
if self.rc > 0 and other.rc <= 0:
noc_info = list(self.noc_info)
noc_info[3] = -1
return method(tuple(noc_info), other.noc_info)
if self.rc <= 0 and other.rc > 0:
other_noc_info = list(other.noc_info)
other_noc_info[3] = -1
return method(self.noc_info, tuple(other_noc_info))
def __lt__(self, other):
return self.__compare__(other, lambda _self, _other: _self < _other)
def __le__(self, other):
return self.__compare__(other, lambda _self, _other: _self <= _other)
def __eq__(self, other):
return self.__compare__(other, lambda _self, _other: _self == _other)
def __ne__(self, other):
return self.__compare__(other, lambda _self, _other: _self != _other)
def __ge__(self, other):
return self.__compare__(other, lambda _self, _other: _self >= _other)
def __gt__(self, other):
return self.__compare__(other, lambda _self, _other: _self > _other)
def __repr__(self):
parts = []
if self.name:
parts.append('name=\'{0}\''.format(self.name))
parts.extend([
'major={0}'.format(self.major),
'minor={0}'.format(self.minor),
'bugfix={0}'.format(self.bugfix)
])
if self.mbugfix:
parts.append('minor-bugfix={0}'.format(self.mbugfix))
if self.rc:
parts.append('rc={0}'.format(self.rc))
noc = self.noc
if noc == -1:
noc = 'n/a'
if noc and self.sha:
parts.extend([
'noc={0}'.format(noc),
'sha={0}'.format(self.sha)
])
return '<{0} {1}>'.format(self.__class__.__name__, ' '.join(parts))
# ----- Hardcoded Salt Codename Version Information ----------------------------------------------------------------->
#
# There's no need to do anything here. The last released codename will be picked up
# --------------------------------------------------------------------------------------------------------------------
__saltstack_version__ = SaltStackVersion.from_last_named_version()
# <---- Hardcoded Salt Version Information ---------------------------------------------------------------------------
# ----- Dynamic/Runtime Salt Version Information -------------------------------------------------------------------->
def __discover_version(saltstack_version):
# This might be a 'python setup.py develop' installation type. Let's
# discover the version information at runtime.
import os
import subprocess
if 'SETUP_DIRNAME' in globals():
# This is from the exec() call in Salt's setup.py
cwd = SETUP_DIRNAME # pylint: disable=E0602
if not os.path.exists(os.path.join(cwd, '.git')):
# This is not a Salt git checkout!!! Don't even try to parse...
return saltstack_version
else:
cwd = os.path.abspath(os.path.dirname(__file__))
if not os.path.exists(os.path.join(os.path.dirname(cwd), '.git')):
# This is not a Salt git checkout!!! Don't even try to parse...
return saltstack_version
try:
kwargs = dict(
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=cwd
)
if not sys.platform.startswith('win'):
# Let's not import `salt.utils` for the above check
kwargs['close_fds'] = True
process = subprocess.Popen(
['git', 'describe', '--tags', '--first-parent', '--match', 'v[0-9]*', '--always'], **kwargs)
out, err = process.communicate()
if process.returncode != 0:
# The git version running this might not support --first-parent
# Revert to old command
process = subprocess.Popen(
['git', 'describe', '--tags', '--match', 'v[0-9]*', '--always'], **kwargs)
out, err = process.communicate()
out = out.strip()
err = err.strip()
if not out or err:
return saltstack_version
try:
return SaltStackVersion.parse(out)
except ValueError:
if not SaltStackVersion.git_sha_regex.match(out):
raise
# We only define the parsed SHA and set NOC as ??? (unknown)
saltstack_version.sha = out.strip()
saltstack_version.noc = -1
except OSError as os_err:
if os_err.errno != 2:
# If the errno is not 2(The system cannot find the file
# specified), raise the exception so it can be catch by the
# developers
raise
return saltstack_version
def __get_version(saltstack_version):
'''
If we can get a version provided at installation time or from Git, use
that instead, otherwise we carry on.
'''
try:
# Try to import the version information provided at install time
from salt._version import __saltstack_version__ # pylint: disable=E0611,F0401
return __saltstack_version__
except ImportError:
return __discover_version(saltstack_version)
# Get additional version information if available
__saltstack_version__ = __get_version(__saltstack_version__)
# This function has executed once, we're done with it. Delete it!
del __get_version
# <---- Dynamic/Runtime Salt Version Information ---------------------------------------------------------------------
# ----- Common version related attributes - NO NEED TO CHANGE ------------------------------------------------------->
__version_info__ = __saltstack_version__.info
__version__ = __saltstack_version__.string
# <---- Common version related attributes - NO NEED TO CHANGE --------------------------------------------------------
def salt_information():
'''
Report version of salt.
'''
yield 'Salt', __version__
def dependency_information(include_salt_cloud=False):
'''
Report versions of library dependencies.
'''
libs = [
('Python', None, sys.version.rsplit('\n')[0].strip()),
('Jinja2', 'jinja2', '__version__'),
('M2Crypto', 'M2Crypto', 'version'),
('msgpack-python', 'msgpack', 'version'),
('msgpack-pure', 'msgpack_pure', 'version'),
('pycrypto', 'Crypto', '__version__'),
('libnacl', 'libnacl', '__version__'),
('PyYAML', 'yaml', '__version__'),
('ioflo', 'ioflo', '__version__'),
('PyZMQ', 'zmq', '__version__'),
('RAET', 'raet', '__version__'),
('ZMQ', 'zmq', 'zmq_version'),
('Mako', 'mako', '__version__'),
('Tornado', 'tornado', 'version'),
('timelib', 'timelib', 'version'),
('dateutil', 'dateutil', '__version__'),
('pygit2', 'pygit2', '__version__'),
('libgit2', 'pygit2', 'LIBGIT2_VERSION'),
('smmap', 'smmap', '__version__'),
('cffi', 'cffi', '__version__'),
('pycparser', 'pycparser', '__version__'),
('gitdb', 'gitdb', '__version__'),
('gitpython', 'git', '__version__'),
('python-gnupg', 'gnupg', '__version__'),
('mysql-python', 'MySQLdb', '__version__'),
('cherrypy', 'cherrypy', '__version__'),
]
if include_salt_cloud:
libs.append(
('Apache Libcloud', 'libcloud', '__version__'),
)
for name, imp, attr in libs:
if imp is None:
yield name, attr
continue
try:
imp = __import__(imp)
version = getattr(imp, attr)
if callable(version):
version = version()
if isinstance(version, (tuple, list)):
version = '.'.join(map(str, version))
yield name, version
except Exception:
yield name, None
def system_information():
'''
Report system versions.
'''
def system_version():
'''
Return host system version.
'''
lin_ver = platform.linux_distribution()
mac_ver = platform.mac_ver()
win_ver = platform.win32_ver()
if lin_ver[0]:
return ' '.join(lin_ver)
elif mac_ver[0]:
if isinstance(mac_ver[1], (tuple, list)) and ''.join(mac_ver[1]):
return ' '.join([mac_ver[0], '.'.join(mac_ver[1]), mac_ver[2]])
else:
return ' '.join([mac_ver[0], mac_ver[2]])
elif win_ver[0]:
return ' '.join(win_ver)
else:
return ''
system = [
('system', platform.system()),
('dist', ' '.join(platform.dist())),
('release', platform.release()),
('machine', platform.machine()),
('version', system_version()),
]
for name, attr in system:
yield name, attr
continue
def versions_information(include_salt_cloud=False):
'''
Report the versions of dependent software.
'''
salt_info = list(salt_information())
lib_info = list(dependency_information(include_salt_cloud))
sys_info = list(system_information())
return {'Salt Version': dict(salt_info),
'Dependency Versions': dict(lib_info),
'System Versions': dict(sys_info)}
def versions_report(include_salt_cloud=False):
'''
Yield each version properly formatted for console output.
'''
ver_info = versions_information(include_salt_cloud)
lib_pad = max(len(name) for name in ver_info['Dependency Versions'])
sys_pad = max(len(name) for name in ver_info['System Versions'])
padding = max(lib_pad, sys_pad) + 1
fmt = '{0:>{pad}}: {1}'
info = []
for ver_type in ('Salt Version', 'Dependency Versions', 'System Versions'):
info.append('{0}:'.format(ver_type))
# List dependencies in alphabetical, case insensitive order
for name in sorted(ver_info[ver_type], cmp=lambda x, y: cmp(x.lower(), y.lower())):
ver = fmt.format(name,
ver_info[ver_type][name] or 'Not Installed',
pad=padding)
info.append(ver)
info.append(' ')
for line in info:
yield line
if __name__ == '__main__':
print(__version__)
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides an interface for working with multiple event files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import threading
import six
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.summary import event_accumulator
from tensorflow.python.summary.impl import io_wrapper
class EventMultiplexer(object):
"""An `EventMultiplexer` manages access to multiple `EventAccumulator`s.
Each `EventAccumulator` is associated with a `run`, which is a self-contained
TensorFlow execution. The `EventMultiplexer` provides methods for extracting
information about events from multiple `run`s.
Example usage for loading specific runs from files:
```python
x = EventMultiplexer({'run1': 'path/to/run1', 'run2': 'path/to/run2'})
x.Reload()
```
Example usage for loading a directory where each subdirectory is a run
```python
(eg:) /parent/directory/path/
/parent/directory/path/run1/
/parent/directory/path/run1/events.out.tfevents.1001
/parent/directory/path/run1/events.out.tfevents.1002
/parent/directory/path/run2/
/parent/directory/path/run2/events.out.tfevents.9232
/parent/directory/path/run3/
/parent/directory/path/run3/events.out.tfevents.9232
x = EventMultiplexer().AddRunsFromDirectory('/parent/directory/path')
(which is equivalent to:)
x = EventMultiplexer({'run1': '/parent/directory/path/run1', 'run2':...}
```
If you would like to watch `/parent/directory/path`, wait for it to be created
(if necessary) and then periodically pick up new runs, use
`AutoloadingMultiplexer`
@@__init__
@@AddRun
@@AddRunsFromDirectory
@@Reload
@@Runs
@@Scalars
@@Graph
@@Histograms
@@CompressedHistograms
@@Images
@@Audio
"""
def __init__(self,
run_path_map=None,
size_guidance=event_accumulator.DEFAULT_SIZE_GUIDANCE,
purge_orphaned_data=True):
"""Constructor for the `EventMultiplexer`.
Args:
run_path_map: Dict `{run: path}` which specifies the
name of a run, and the path to find the associated events. If it is
None, then the EventMultiplexer initializes without any runs.
size_guidance: A dictionary mapping from `tagType` to the number of items
to store for each tag of that type. See
`event_ccumulator.EventAccumulator` for details.
purge_orphaned_data: Whether to discard any events that were "orphaned" by
a TensorFlow restart.
"""
self._accumulators_mutex = threading.Lock()
self._accumulators = {}
self._paths = {}
self._reload_called = False
self._size_guidance = size_guidance
self.purge_orphaned_data = purge_orphaned_data
if run_path_map is not None:
for (run, path) in six.iteritems(run_path_map):
self.AddRun(path, run)
def AddRun(self, path, name=None):
"""Add a run to the multiplexer.
If the name is not specified, it is the same as the path.
If a run by that name exists, and we are already watching the right path,
do nothing. If we are watching a different path, replace the event
accumulator.
If `Reload` has been called, it will `Reload` the newly created
accumulators. This maintains the invariant that once the Multiplexer was
activated, all of its accumulators are active.
Args:
path: Path to the event files (or event directory) for given run.
name: Name of the run to add. If not provided, is set to path.
Returns:
The `EventMultiplexer`.
"""
if name is None or name is '':
name = path
accumulator = None
with self._accumulators_mutex:
if name not in self._accumulators or self._paths[name] != path:
if name in self._paths and self._paths[name] != path:
# TODO(danmane) - Make it impossible to overwrite an old path with
# a new path (just give the new path a distinct name)
logging.warning('Conflict for name %s: old path %s, new path %s',
name, self._paths[name], path)
logging.info('Constructing EventAccumulator for %s', path)
accumulator = event_accumulator.EventAccumulator(
path,
size_guidance=self._size_guidance,
purge_orphaned_data=self.purge_orphaned_data)
self._accumulators[name] = accumulator
self._paths[name] = path
if accumulator:
if self._reload_called:
accumulator.Reload()
return self
def AddRunsFromDirectory(self, path, name=None):
"""Load runs from a directory; recursively walks subdirectories.
If path doesn't exist, no-op. This ensures that it is safe to call
`AddRunsFromDirectory` multiple times, even before the directory is made.
If path is a directory, load event files in the directory (if any exist) and
recursively call AddRunsFromDirectory on any subdirectories. This mean you
can call AddRunsFromDirectory at the root of a tree of event logs and
TensorBoard will load them all.
If the `EventMultiplexer` is already loaded this will cause
the newly created accumulators to `Reload()`.
Args:
path: A string path to a directory to load runs from.
name: Optionally, what name to apply to the runs. If name is provided
and the directory contains run subdirectories, the name of each subrun
is the concatenation of the parent name and the subdirectory name. If
name is provided and the directory contains event files, then a run
is added called "name" and with the events from the path.
Raises:
ValueError: If the path exists and isn't a directory.
Returns:
The `EventMultiplexer`.
"""
if io_wrapper.Exists(path) and not io_wrapper.IsDirectory(path):
raise ValueError('AddRunsFromDirectory: path exists and is not a '
'directory, %s' % path)
# ListRecursively just yields nothing if the path doesn't exist.
subdirs = [
subdir
for (subdir, files) in io_wrapper.ListRecursively(path)
if list(filter(event_accumulator.IsTensorFlowEventsFile, files))
]
for subdir in subdirs:
logging.info('Adding events from directory %s', subdir)
rpath = os.path.relpath(subdir, path)
subname = os.path.join(name, rpath) if name else rpath
self.AddRun(subdir, name=subname)
return self
def Reload(self):
"""Call `Reload` on every `EventAccumulator`."""
self._reload_called = True
with self._accumulators_mutex:
loaders = list(self._accumulators.values())
for l in loaders:
l.Reload()
return self
def Scalars(self, run, tag):
"""Retrieve the scalar events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
RuntimeError: If the run's `EventAccumulator` has not been activated.
Returns:
An array of `event_accumulator.ScalarEvents`.
"""
accumulator = self._GetAccumulator(run)
return accumulator.Scalars(tag)
def Graph(self, run):
"""Retrieve the graph associated with the provided run.
Args:
run: A string name of a run to load the graph for.
Raises:
KeyError: If the run is not found.
ValueError: If the run does not have an associated graph.
RuntimeError: If the run's EventAccumulator has not been activated.
Returns:
The `graph_def` protobuf data structure.
"""
accumulator = self._GetAccumulator(run)
return accumulator.Graph()
def RunMetadata(self, run, tag):
"""Get the session.run() metadata associated with a TensorFlow run and tag.
Args:
run: A string name of a TensorFlow run.
tag: A string name of the tag associated with a particular session.run().
Raises:
KeyError: If the run is not found, or the tag is not available for the
given run.
RuntimeError: If the run's EventAccumulator has not been activated.
Returns:
The metadata in the form of `RunMetadata` protobuf data structure.
"""
accumulator = self._GetAccumulator(run)
return accumulator.RunMetadata(tag)
def Histograms(self, run, tag):
"""Retrieve the histogram events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
RuntimeError: If the run's `EventAccumulator` has not been activated.
Returns:
An array of `event_accumulator.HistogramEvents`.
"""
accumulator = self._GetAccumulator(run)
return accumulator.Histograms(tag)
def CompressedHistograms(self, run, tag):
"""Retrieve the compressed histogram events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
RuntimeError: If the run's EventAccumulator has not been activated.
Returns:
An array of `event_accumulator.CompressedHistogramEvents`.
"""
accumulator = self._GetAccumulator(run)
return accumulator.CompressedHistograms(tag)
def Images(self, run, tag):
"""Retrieve the image events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
RuntimeError: If the run's `EventAccumulator` has not been activated.
Returns:
An array of `event_accumulator.ImageEvents`.
"""
accumulator = self._GetAccumulator(run)
return accumulator.Images(tag)
def Audio(self, run, tag):
"""Retrieve the audio events associated with a run and tag.
Args:
run: A string name of the run for which values are retrieved.
tag: A string name of the tag for which values are retrieved.
Raises:
KeyError: If the run is not found, or the tag is not available for
the given run.
RuntimeError: If the run's `EventAccumulator` has not been activated.
Returns:
An array of `event_accumulator.AudioEvents`.
"""
accumulator = self._GetAccumulator(run)
return accumulator.Audio(tag)
def Runs(self):
"""Return all the run names in the `EventMultiplexer`.
Returns:
```
{runName: { images: [tag1, tag2, tag3],
scalarValues: [tagA, tagB, tagC],
histograms: [tagX, tagY, tagZ],
compressedHistograms: [tagX, tagY, tagZ],
graph: true}}
```
"""
with self._accumulators_mutex:
# To avoid nested locks, we construct a copy of the run-accumulator map
items = list(six.iteritems(self._accumulators))
return {run_name: accumulator.Tags() for run_name, accumulator in items}
def _GetAccumulator(self, run):
with self._accumulators_mutex:
return self._accumulators[run]
|
|
# Copyright 2011 Denali Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import webob
from nova.api.openstack.compute import volumes as volumes_v21
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.volume import cinder
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
class SnapshotApiTestV21(test.NoDBTestCase):
controller = volumes_v21.SnapshotController()
validation_error = exception.ValidationError
def setUp(self):
super(SnapshotApiTestV21, self).setUp()
fakes.stub_out_networking(self)
self.stub_out("nova.volume.cinder.API.create_snapshot",
fakes.stub_snapshot_create)
self.stub_out("nova.volume.cinder.API.create_snapshot_force",
fakes.stub_snapshot_create)
self.stub_out("nova.volume.cinder.API.delete_snapshot",
fakes.stub_snapshot_delete)
self.stub_out("nova.volume.cinder.API.get_snapshot",
fakes.stub_snapshot_get)
self.stub_out("nova.volume.cinder.API.get_all_snapshots",
fakes.stub_snapshot_get_all)
self.stub_out("nova.volume.cinder.API.get", fakes.stub_volume_get)
self.req = fakes.HTTPRequest.blank('')
def _test_snapshot_create(self, force):
snapshot = {"volume_id": '12',
"force": force,
"display_name": "Snapshot Test Name",
"display_description": "Snapshot Test Desc"}
body = dict(snapshot=snapshot)
resp_dict = self.controller.create(self.req, body=body)
self.assertIn('snapshot', resp_dict)
self.assertEqual(snapshot['display_name'],
resp_dict['snapshot']['displayName'])
self.assertEqual(snapshot['display_description'],
resp_dict['snapshot']['displayDescription'])
self.assertEqual(snapshot['volume_id'],
resp_dict['snapshot']['volumeId'])
def test_snapshot_create(self):
self._test_snapshot_create(False)
def test_snapshot_create_force(self):
self._test_snapshot_create(True)
def test_snapshot_create_invalid_force_param(self):
body = {'snapshot': {'volume_id': '1',
'force': '**&&^^%%$$##@@'}}
self.assertRaises(self.validation_error,
self.controller.create, self.req, body=body)
def test_snapshot_delete(self):
snapshot_id = '123'
delete = self.controller.delete
result = delete(self.req, snapshot_id)
# NOTE: on v2.1, http status code is set as wsgi_code of API
# method instead of status_int in a response object.
if isinstance(self.controller, volumes_v21.SnapshotController):
status_int = delete.wsgi_code
else:
status_int = result.status_int
self.assertEqual(202, status_int)
@mock.patch.object(cinder.API, 'delete_snapshot',
side_effect=exception.SnapshotNotFound(snapshot_id=FAKE_UUID))
def test_delete_snapshot_not_exists(self, mock_mr):
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
self.req, FAKE_UUID)
def test_snapshot_delete_invalid_id(self):
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
self.req, '-1')
def test_snapshot_show(self):
snapshot_id = '123'
resp_dict = self.controller.show(self.req, snapshot_id)
self.assertIn('snapshot', resp_dict)
self.assertEqual(str(snapshot_id), resp_dict['snapshot']['id'])
def test_snapshot_show_invalid_id(self):
self.assertRaises(webob.exc.HTTPNotFound, self.controller.show,
self.req, '-1')
def test_snapshot_detail(self):
resp_dict = self.controller.detail(self.req)
self.assertIn('snapshots', resp_dict)
resp_snapshots = resp_dict['snapshots']
self.assertEqual(3, len(resp_snapshots))
resp_snapshot = resp_snapshots.pop()
self.assertEqual(102, resp_snapshot['id'])
def test_snapshot_detail_offset_and_limit(self):
path = ('/v2/%s/os-snapshots/detail?offset=1&limit=1' %
fakes.FAKE_PROJECT_ID)
req = fakes.HTTPRequest.blank(path)
resp_dict = self.controller.detail(req)
self.assertIn('snapshots', resp_dict)
resp_snapshots = resp_dict['snapshots']
self.assertEqual(1, len(resp_snapshots))
resp_snapshot = resp_snapshots.pop()
self.assertEqual(101, resp_snapshot['id'])
def test_snapshot_index(self):
resp_dict = self.controller.index(self.req)
self.assertIn('snapshots', resp_dict)
resp_snapshots = resp_dict['snapshots']
self.assertEqual(3, len(resp_snapshots))
def test_snapshot_index_offset_and_limit(self):
path = ('/v2/%s/os-snapshots?offset=1&limit=1' %
fakes.FAKE_PROJECT_ID)
req = fakes.HTTPRequest.blank(path)
resp_dict = self.controller.index(req)
self.assertIn('snapshots', resp_dict)
resp_snapshots = resp_dict['snapshots']
self.assertEqual(1, len(resp_snapshots))
def _test_list_with_invalid_filter(self, url):
prefix = '/os-snapshots'
req = fakes.HTTPRequest.blank(prefix + url)
controller_list = self.controller.index
if 'detail' in url:
controller_list = self.controller.detail
self.assertRaises(exception.ValidationError,
controller_list, req)
def test_list_with_invalid_non_int_limit(self):
self._test_list_with_invalid_filter('?limit=-9')
def test_list_with_invalid_string_limit(self):
self._test_list_with_invalid_filter('?limit=abc')
def test_list_duplicate_query_with_invalid_string_limit(self):
self._test_list_with_invalid_filter(
'?limit=1&limit=abc')
def test_detail_list_with_invalid_non_int_limit(self):
self._test_list_with_invalid_filter('/detail?limit=-9')
def test_detail_list_with_invalid_string_limit(self):
self._test_list_with_invalid_filter('/detail?limit=abc')
def test_detail_list_duplicate_query_with_invalid_string_limit(self):
self._test_list_with_invalid_filter(
'/detail?limit=1&limit=abc')
def test_list_with_invalid_non_int_offset(self):
self._test_list_with_invalid_filter('?offset=-9')
def test_list_with_invalid_string_offset(self):
self._test_list_with_invalid_filter('?offset=abc')
def test_list_duplicate_query_with_invalid_string_offset(self):
self._test_list_with_invalid_filter(
'?offset=1&offset=abc')
def test_detail_list_with_invalid_non_int_offset(self):
self._test_list_with_invalid_filter('/detail?offset=-9')
def test_detail_list_with_invalid_string_offset(self):
self._test_list_with_invalid_filter('/detail?offset=abc')
def test_detail_list_duplicate_query_with_invalid_string_offset(self):
self._test_list_with_invalid_filter(
'/detail?offset=1&offset=abc')
def _test_list_duplicate_query_parameters_validation(self, url):
params = {
'limit': 1,
'offset': 1
}
controller_list = self.controller.index
if 'detail' in url:
controller_list = self.controller.detail
for param, value in params.items():
req = fakes.HTTPRequest.blank(
url + '?%s=%s&%s=%s' %
(param, value, param, value))
controller_list(req)
def test_list_duplicate_query_parameters_validation(self):
self._test_list_duplicate_query_parameters_validation('/os-snapshots')
def test_detail_list_duplicate_query_parameters_validation(self):
self._test_list_duplicate_query_parameters_validation(
'/os-snapshots/detail')
def test_list_with_additional_filter(self):
req = fakes.HTTPRequest.blank(
'/os-snapshots?limit=1&offset=1&additional=something')
self.controller.index(req)
def test_detail_list_with_additional_filter(self):
req = fakes.HTTPRequest.blank(
'/os-snapshots/detail?limit=1&offset=1&additional=something')
self.controller.detail(req)
class TestSnapshotAPIDeprecation(test.NoDBTestCase):
def setUp(self):
super(TestSnapshotAPIDeprecation, self).setUp()
self.controller = volumes_v21.SnapshotController()
self.req = fakes.HTTPRequest.blank('', version='2.36')
def test_all_apis_return_not_found(self):
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.show, self.req, fakes.FAKE_UUID)
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.delete, self.req, fakes.FAKE_UUID)
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.index, self.req)
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.create, self.req, {})
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.detail, self.req)
|
|
"""Schemas used by insteon component."""
from __future__ import annotations
from binascii import Error as HexError, unhexlify
from pyinsteon.address import Address
from pyinsteon.constants import HC_LOOKUP
import voluptuous as vol
from homeassistant.const import (
CONF_ADDRESS,
CONF_DEVICE,
CONF_ENTITY_ID,
CONF_HOST,
CONF_PASSWORD,
CONF_PLATFORM,
CONF_PORT,
CONF_USERNAME,
ENTITY_MATCH_ALL,
)
import homeassistant.helpers.config_validation as cv
from .const import (
CONF_CAT,
CONF_DIM_STEPS,
CONF_FIRMWARE,
CONF_HOUSECODE,
CONF_HUB_PASSWORD,
CONF_HUB_USERNAME,
CONF_HUB_VERSION,
CONF_IP_PORT,
CONF_OVERRIDE,
CONF_PLM_HUB_MSG,
CONF_PRODUCT_KEY,
CONF_SUBCAT,
CONF_UNITCODE,
CONF_X10,
CONF_X10_ALL_LIGHTS_OFF,
CONF_X10_ALL_LIGHTS_ON,
CONF_X10_ALL_UNITS_OFF,
DOMAIN,
HOUSECODES,
INSTEON_ADDR_REGEX,
PORT_HUB_V1,
PORT_HUB_V2,
SRV_ALL_LINK_GROUP,
SRV_ALL_LINK_MODE,
SRV_CONTROLLER,
SRV_HOUSECODE,
SRV_LOAD_DB_RELOAD,
SRV_RESPONDER,
X10_PLATFORMS,
)
def set_default_port(schema: dict) -> dict:
"""Set the default port based on the Hub version."""
# If the ip_port is found do nothing
# If it is not found the set the default
ip_port = schema.get(CONF_IP_PORT)
if not ip_port:
hub_version = schema.get(CONF_HUB_VERSION)
# Found hub_version but not ip_port
schema[CONF_IP_PORT] = PORT_HUB_V1 if hub_version == 1 else PORT_HUB_V2
return schema
def insteon_address(value: str) -> str:
"""Validate an Insteon address."""
if not INSTEON_ADDR_REGEX.match(value):
raise vol.Invalid("Invalid Insteon Address")
return str(value).replace(".", "").lower()
CONF_DEVICE_OVERRIDE_SCHEMA = vol.All(
vol.Schema(
{
vol.Required(CONF_ADDRESS): cv.string,
vol.Optional(CONF_CAT): cv.byte,
vol.Optional(CONF_SUBCAT): cv.byte,
vol.Optional(CONF_FIRMWARE): cv.byte,
vol.Optional(CONF_PRODUCT_KEY): cv.byte,
vol.Optional(CONF_PLATFORM): cv.string,
}
),
)
CONF_X10_SCHEMA = vol.All(
vol.Schema(
{
vol.Required(CONF_HOUSECODE): cv.string,
vol.Required(CONF_UNITCODE): vol.Range(min=1, max=16),
vol.Required(CONF_PLATFORM): cv.string,
vol.Optional(CONF_DIM_STEPS): vol.Range(min=2, max=255),
}
)
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
cv.deprecated(CONF_X10_ALL_UNITS_OFF),
cv.deprecated(CONF_X10_ALL_LIGHTS_ON),
cv.deprecated(CONF_X10_ALL_LIGHTS_OFF),
vol.Schema(
{
vol.Exclusive(
CONF_PORT, "plm_or_hub", msg=CONF_PLM_HUB_MSG
): cv.string,
vol.Exclusive(
CONF_HOST, "plm_or_hub", msg=CONF_PLM_HUB_MSG
): cv.string,
vol.Optional(CONF_IP_PORT): cv.port,
vol.Optional(CONF_HUB_USERNAME): cv.string,
vol.Optional(CONF_HUB_PASSWORD): cv.string,
vol.Optional(CONF_HUB_VERSION, default=2): vol.In([1, 2]),
vol.Optional(CONF_OVERRIDE): vol.All(
cv.ensure_list_csv, [CONF_DEVICE_OVERRIDE_SCHEMA]
),
vol.Optional(CONF_X10): vol.All(
cv.ensure_list_csv, [CONF_X10_SCHEMA]
),
},
extra=vol.ALLOW_EXTRA,
required=True,
),
cv.has_at_least_one_key(CONF_PORT, CONF_HOST),
set_default_port,
)
},
extra=vol.ALLOW_EXTRA,
)
ADD_ALL_LINK_SCHEMA = vol.Schema(
{
vol.Required(SRV_ALL_LINK_GROUP): vol.Range(min=0, max=255),
vol.Required(SRV_ALL_LINK_MODE): vol.In([SRV_CONTROLLER, SRV_RESPONDER]),
}
)
DEL_ALL_LINK_SCHEMA = vol.Schema(
{vol.Required(SRV_ALL_LINK_GROUP): vol.Range(min=0, max=255)}
)
LOAD_ALDB_SCHEMA = vol.Schema(
{
vol.Required(CONF_ENTITY_ID): vol.Any(cv.entity_id, ENTITY_MATCH_ALL),
vol.Optional(SRV_LOAD_DB_RELOAD, default=False): cv.boolean,
}
)
PRINT_ALDB_SCHEMA = vol.Schema({vol.Required(CONF_ENTITY_ID): cv.entity_id})
X10_HOUSECODE_SCHEMA = vol.Schema({vol.Required(SRV_HOUSECODE): vol.In(HOUSECODES)})
TRIGGER_SCENE_SCHEMA = vol.Schema(
{vol.Required(SRV_ALL_LINK_GROUP): vol.Range(min=0, max=255)}
)
ADD_DEFAULT_LINKS_SCHEMA = vol.Schema({vol.Required(CONF_ENTITY_ID): cv.entity_id})
def normalize_byte_entry_to_int(entry: int | bytes | str):
"""Format a hex entry value."""
if isinstance(entry, int):
if entry in range(0, 256):
return entry
raise ValueError("Must be single byte")
if isinstance(entry, str):
if entry[0:2].lower() == "0x":
entry = entry[2:]
if len(entry) != 2:
raise ValueError("Not a valid hex code")
try:
entry = unhexlify(entry)
except HexError as err:
raise ValueError("Not a valid hex code") from err
return int.from_bytes(entry, byteorder="big")
def add_device_override(config_data, new_override):
"""Add a new device override."""
try:
address = str(Address(new_override[CONF_ADDRESS]))
cat = normalize_byte_entry_to_int(new_override[CONF_CAT])
subcat = normalize_byte_entry_to_int(new_override[CONF_SUBCAT])
except ValueError as err:
raise ValueError("Incorrect values") from err
overrides = []
for override in config_data.get(CONF_OVERRIDE, []):
if override[CONF_ADDRESS] != address:
overrides.append(override)
curr_override = {}
curr_override[CONF_ADDRESS] = address
curr_override[CONF_CAT] = cat
curr_override[CONF_SUBCAT] = subcat
overrides.append(curr_override)
new_config = {}
if config_data.get(CONF_X10):
new_config[CONF_X10] = config_data[CONF_X10]
new_config[CONF_OVERRIDE] = overrides
return new_config
def add_x10_device(config_data, new_x10):
"""Add a new X10 device to X10 device list."""
x10_devices = []
for x10_device in config_data.get(CONF_X10, []):
if (
x10_device[CONF_HOUSECODE] != new_x10[CONF_HOUSECODE]
or x10_device[CONF_UNITCODE] != new_x10[CONF_UNITCODE]
):
x10_devices.append(x10_device)
curr_device = {}
curr_device[CONF_HOUSECODE] = new_x10[CONF_HOUSECODE]
curr_device[CONF_UNITCODE] = new_x10[CONF_UNITCODE]
curr_device[CONF_PLATFORM] = new_x10[CONF_PLATFORM]
curr_device[CONF_DIM_STEPS] = new_x10[CONF_DIM_STEPS]
x10_devices.append(curr_device)
new_config = {}
if config_data.get(CONF_OVERRIDE):
new_config[CONF_OVERRIDE] = config_data[CONF_OVERRIDE]
new_config[CONF_X10] = x10_devices
return new_config
def build_device_override_schema(
address=vol.UNDEFINED,
cat=vol.UNDEFINED,
subcat=vol.UNDEFINED,
firmware=vol.UNDEFINED,
):
"""Build the device override schema for config flow."""
return vol.Schema(
{
vol.Required(CONF_ADDRESS, default=address): str,
vol.Optional(CONF_CAT, default=cat): str,
vol.Optional(CONF_SUBCAT, default=subcat): str,
}
)
def build_x10_schema(
housecode=vol.UNDEFINED,
unitcode=vol.UNDEFINED,
platform=vol.UNDEFINED,
dim_steps=22,
):
"""Build the X10 schema for config flow."""
return vol.Schema(
{
vol.Required(CONF_HOUSECODE, default=housecode): vol.In(HC_LOOKUP.keys()),
vol.Required(CONF_UNITCODE, default=unitcode): vol.In(range(1, 17)),
vol.Required(CONF_PLATFORM, default=platform): vol.In(X10_PLATFORMS),
vol.Optional(CONF_DIM_STEPS, default=dim_steps): vol.In(range(1, 255)),
}
)
def build_plm_schema(device=vol.UNDEFINED):
"""Build the PLM schema for config flow."""
return vol.Schema({vol.Required(CONF_DEVICE, default=device): str})
def build_hub_schema(
hub_version,
host=vol.UNDEFINED,
port=vol.UNDEFINED,
username=vol.UNDEFINED,
password=vol.UNDEFINED,
):
"""Build the Hub schema for config flow."""
if port == vol.UNDEFINED:
port = PORT_HUB_V2 if hub_version == 2 else PORT_HUB_V1
schema = {
vol.Required(CONF_HOST, default=host): str,
vol.Required(CONF_PORT, default=port): int,
}
if hub_version == 2:
schema[vol.Required(CONF_USERNAME, default=username)] = str
schema[vol.Required(CONF_PASSWORD, default=password)] = str
return vol.Schema(schema)
def build_remove_override_schema(data):
"""Build the schema to remove device overrides in config flow options."""
selection = []
for override in data:
selection.append(override[CONF_ADDRESS])
return vol.Schema({vol.Required(CONF_ADDRESS): vol.In(selection)})
def build_remove_x10_schema(data):
"""Build the schema to remove an X10 device in config flow options."""
selection = []
for device in data:
housecode = device[CONF_HOUSECODE].upper()
unitcode = device[CONF_UNITCODE]
selection.append(f"Housecode: {housecode}, Unitcode: {unitcode}")
return vol.Schema({vol.Required(CONF_DEVICE): vol.In(selection)})
def convert_yaml_to_config_flow(yaml_config):
"""Convert the YAML based configuration to a config flow configuration."""
config = {}
if yaml_config.get(CONF_HOST):
hub_version = yaml_config.get(CONF_HUB_VERSION, 2)
default_port = PORT_HUB_V2 if hub_version == 2 else PORT_HUB_V1
config[CONF_HOST] = yaml_config.get(CONF_HOST)
config[CONF_PORT] = yaml_config.get(CONF_PORT, default_port)
config[CONF_HUB_VERSION] = hub_version
if hub_version == 2:
config[CONF_USERNAME] = yaml_config[CONF_USERNAME]
config[CONF_PASSWORD] = yaml_config[CONF_PASSWORD]
else:
config[CONF_DEVICE] = yaml_config[CONF_PORT]
options = {}
for old_override in yaml_config.get(CONF_OVERRIDE, []):
override = {}
override[CONF_ADDRESS] = str(Address(old_override[CONF_ADDRESS]))
override[CONF_CAT] = normalize_byte_entry_to_int(old_override[CONF_CAT])
override[CONF_SUBCAT] = normalize_byte_entry_to_int(old_override[CONF_SUBCAT])
options = add_device_override(options, override)
for x10_device in yaml_config.get(CONF_X10, []):
options = add_x10_device(options, x10_device)
return config, options
|
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import OrderNotFound
class bitflyer(Exchange):
def describe(self):
return self.deep_extend(super(bitflyer, self).describe(), {
'id': 'bitflyer',
'name': 'bitFlyer',
'countries': ['JP'],
'version': 'v1',
'rateLimit': 1000, # their nonce-timestamp is in seconds...
'hostname': 'bitflyer.com', # or bitflyer.com
'has': {
'CORS': None,
'spot': True,
'margin': False,
'swap': None, # has but not fully implemented
'future': None, # has but not fully implemented
'option': False,
'cancelOrder': True,
'createOrder': True,
'fetchBalance': True,
'fetchClosedOrders': 'emulated',
'fetchDeposits': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOpenOrders': 'emulated',
'fetchOrder': 'emulated',
'fetchOrderBook': True,
'fetchOrders': True,
'fetchPositions': True,
'fetchTicker': True,
'fetchTrades': True,
'fetchWithdrawals': True,
'withdraw': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/28051642-56154182-660e-11e7-9b0d-6042d1e6edd8.jpg',
'api': 'https://api.{hostname}',
'www': 'https://bitflyer.com',
'doc': 'https://lightning.bitflyer.com/docs?lang=en',
},
'api': {
'public': {
'get': [
'getmarkets/usa', # new(wip)
'getmarkets/eu', # new(wip)
'getmarkets', # or 'markets'
'getboard', # ...
'getticker',
'getexecutions',
'gethealth',
'getboardstate',
'getchats',
],
},
'private': {
'get': [
'getpermissions',
'getbalance',
'getbalancehistory',
'getcollateral',
'getcollateralhistory',
'getcollateralaccounts',
'getaddresses',
'getcoinins',
'getcoinouts',
'getbankaccounts',
'getdeposits',
'getwithdrawals',
'getchildorders',
'getparentorders',
'getparentorder',
'getexecutions',
'getpositions',
'gettradingcommission',
],
'post': [
'sendcoin',
'withdraw',
'sendchildorder',
'cancelchildorder',
'sendparentorder',
'cancelparentorder',
'cancelallchildorders',
],
},
},
'fees': {
'trading': {
'maker': self.parse_number('0.002'),
'taker': self.parse_number('0.002'),
},
},
})
def parse_expiry_date(self, expiry):
day = expiry[0:2]
monthName = expiry[2:5]
year = expiry[5:9]
months = {
'JAN': '01',
'FEB': '02',
'MAR': '03',
'APR': '04',
'MAY': '05',
'JUN': '06',
'JUL': '07',
'AUG': '08',
'SEP': '09',
'OCT': '10',
'NOV': '11',
'DEC': '12',
}
month = self.safe_string(months, monthName)
return self.parse8601(year + '-' + month + '-' + day + 'T00:00:00Z')
def fetch_markets(self, params={}):
jp_markets = self.publicGetGetmarkets(params)
#
# [
# # spot
# {"product_code": "BTC_JPY", "market_type": "Spot"},
# {"product_code": "BCH_BTC", "market_type": "Spot"},
# # forex swap
# {"product_code": "FX_BTC_JPY", "market_type": "FX"},
# # future
# {
# "product_code": "BTCJPY11FEB2022",
# "alias": "BTCJPY_MAT1WK",
# "market_type": "Futures",
# },
# ]
#
us_markets = self.publicGetGetmarketsUsa(params)
#
# [
# {"product_code": "BTC_USD", "market_type": "Spot"},
# {"product_code": "BTC_JPY", "market_type": "Spot"},
# ]
#
eu_markets = self.publicGetGetmarketsEu(params)
#
# [
# {"product_code": "BTC_EUR", "market_type": "Spot"},
# {"product_code": "BTC_JPY", "market_type": "Spot"},
# ]
#
markets = self.array_concat(jp_markets, us_markets)
markets = self.array_concat(markets, eu_markets)
result = []
for i in range(0, len(markets)):
market = markets[i]
id = self.safe_string(market, 'product_code')
currencies = id.split('_')
marketType = self.safe_string(market, 'market_type')
swap = (marketType == 'FX')
future = (marketType == 'Futures')
spot = not swap and not future
type = 'spot'
settle = None
baseId = None
quoteId = None
expiry = None
if spot:
baseId = self.safe_string(currencies, 0)
quoteId = self.safe_string(currencies, 1)
elif swap:
type = 'swap'
baseId = self.safe_string(currencies, 1)
quoteId = self.safe_string(currencies, 2)
elif future:
alias = self.safe_string(market, 'alias')
splitAlias = alias.split('_')
currencyIds = self.safe_string(splitAlias, 0)
baseId = currencyIds[0:-3]
quoteId = currencyIds[-3:]
splitId = id.split(currencyIds)
expiryDate = self.safe_string(splitId, 1)
expiry = self.parse_expiry_date(expiryDate)
type = 'future'
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
taker = self.fees['trading']['taker']
maker = self.fees['trading']['maker']
contract = swap or future
if contract:
maker = 0.0
taker = 0.0
settle = 'JPY'
symbol = symbol + ':' + settle
if future:
symbol = symbol + '-' + self.yymmdd(expiry)
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'settle': settle,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': type,
'spot': spot,
'margin': False,
'swap': swap,
'future': future,
'option': False,
'active': True,
'contract': contract,
'linear': None if spot else True,
'inverse': None if spot else False,
'taker': taker,
'maker': maker,
'contractSize': None,
'expiry': expiry,
'expiryDatetime': self.iso8601(expiry),
'strike': None,
'optionType': None,
'precision': {
'amount': None,
'price': None,
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': None,
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
},
'info': market,
})
return result
def parse_balance(self, response):
result = {'info': response}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'currency_code')
code = self.safe_currency_code(currencyId)
account = self.account()
account['total'] = self.safe_string(balance, 'amount')
account['free'] = self.safe_string(balance, 'available')
result[code] = account
return self.safe_balance(result)
def fetch_balance(self, params={}):
self.load_markets()
response = self.privateGetGetbalance(params)
#
# [
# {
# "currency_code": "JPY",
# "amount": 1024078,
# "available": 508000
# },
# {
# "currency_code": "BTC",
# "amount": 10.24,
# "available": 4.12
# },
# {
# "currency_code": "ETH",
# "amount": 20.48,
# "available": 16.38
# }
# ]
#
return self.parse_balance(response)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
request = {
'product_code': self.market_id(symbol),
}
orderbook = self.publicGetGetboard(self.extend(request, params))
return self.parse_order_book(orderbook, symbol, None, 'bids', 'asks', 'price', 'size')
def parse_ticker(self, ticker, market=None):
symbol = self.safe_symbol(None, market)
timestamp = self.parse8601(self.safe_string(ticker, 'timestamp'))
last = self.safe_string(ticker, 'ltp')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': None,
'low': None,
'bid': self.safe_string(ticker, 'best_bid'),
'bidVolume': None,
'ask': self.safe_string(ticker, 'best_ask'),
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_string(ticker, 'volume_by_product'),
'quoteVolume': None,
'info': ticker,
}, market, False)
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'product_code': market['id'],
}
response = self.publicGetGetticker(self.extend(request, params))
return self.parse_ticker(response, market)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public) v1
#
# {
# "id":2278466664,
# "side":"SELL",
# "price":56810.7,
# "size":0.08798,
# "exec_date":"2021-11-19T11:46:39.323",
# "buy_child_order_acceptance_id":"JRF20211119-114209-236525",
# "sell_child_order_acceptance_id":"JRF20211119-114639-236919"
# }
#
# {
# "id":2278463423,
# "side":"BUY",
# "price":56757.83,
# "size":0.6003,"exec_date":"2021-11-19T11:28:00.523",
# "buy_child_order_acceptance_id":"JRF20211119-112800-236526",
# "sell_child_order_acceptance_id":"JRF20211119-112734-062017"
# }
#
#
#
side = self.safe_string_lower(trade, 'side')
if side is not None:
if len(side) < 1:
side = None
order = None
if side is not None:
id = side + '_child_order_acceptance_id'
if id in trade:
order = trade[id]
if order is None:
order = self.safe_string(trade, 'child_order_acceptance_id')
timestamp = self.parse8601(self.safe_string(trade, 'exec_date'))
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string(trade, 'size')
id = self.safe_string(trade, 'id')
market = self.safe_market(None, market)
return self.safe_trade({
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'order': order,
'type': None,
'side': side,
'takerOrMaker': None,
'price': priceString,
'amount': amountString,
'cost': None,
'fee': None,
}, market)
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'product_code': market['id'],
}
response = self.publicGetGetexecutions(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
request = {
'product_code': self.market_id(symbol),
'child_order_type': type.upper(),
'side': side.upper(),
'price': price,
'size': amount,
}
result = self.privatePostSendchildorder(self.extend(request, params))
# {"status": - 200, "error_message": "Insufficient funds", "data": null}
id = self.safe_string(result, 'child_order_acceptance_id')
return {
'info': result,
'id': id,
}
def cancel_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a `symbol` argument')
self.load_markets()
request = {
'product_code': self.market_id(symbol),
'child_order_acceptance_id': id,
}
return self.privatePostCancelchildorder(self.extend(request, params))
def parse_order_status(self, status):
statuses = {
'ACTIVE': 'open',
'COMPLETED': 'closed',
'CANCELED': 'canceled',
'EXPIRED': 'canceled',
'REJECTED': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
timestamp = self.parse8601(self.safe_string(order, 'child_order_date'))
price = self.safe_string(order, 'price')
amount = self.safe_string(order, 'size')
filled = self.safe_string(order, 'executed_size')
remaining = self.safe_string(order, 'outstanding_size')
status = self.parse_order_status(self.safe_string(order, 'child_order_state'))
type = self.safe_string_lower(order, 'child_order_type')
side = self.safe_string_lower(order, 'side')
marketId = self.safe_string(order, 'product_code')
symbol = self.safe_symbol(marketId, market)
fee = None
feeCost = self.safe_number(order, 'total_commission')
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': None,
'rate': None,
}
id = self.safe_string(order, 'child_order_acceptance_id')
return self.safe_order({
'id': id,
'clientOrderId': None,
'info': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': status,
'symbol': symbol,
'type': type,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': None,
'cost': None,
'amount': amount,
'filled': filled,
'remaining': remaining,
'fee': fee,
'average': None,
'trades': None,
}, market)
def fetch_orders(self, symbol=None, since=None, limit=100, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a `symbol` argument')
self.load_markets()
market = self.market(symbol)
request = {
'product_code': market['id'],
'count': limit,
}
response = self.privateGetGetchildorders(self.extend(request, params))
orders = self.parse_orders(response, market, since, limit)
if symbol is not None:
orders = self.filter_by(orders, 'symbol', symbol)
return orders
def fetch_open_orders(self, symbol=None, since=None, limit=100, params={}):
request = {
'child_order_state': 'ACTIVE',
}
return self.fetch_orders(symbol, since, limit, self.extend(request, params))
def fetch_closed_orders(self, symbol=None, since=None, limit=100, params={}):
request = {
'child_order_state': 'COMPLETED',
}
return self.fetch_orders(symbol, since, limit, self.extend(request, params))
def fetch_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a `symbol` argument')
orders = self.fetch_orders(symbol)
ordersById = self.index_by(orders, 'id')
if id in ordersById:
return ordersById[id]
raise OrderNotFound(self.id + ' No order found with id ' + id)
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a `symbol` argument')
self.load_markets()
market = self.market(symbol)
request = {
'product_code': market['id'],
}
if limit is not None:
request['count'] = limit
response = self.privateGetGetexecutions(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def fetch_positions(self, symbols=None, params={}):
if symbols is None:
raise ArgumentsRequired(self.id + ' fetchPositions() requires a `symbols` argument, exactly one symbol in an array')
self.load_markets()
request = {
'product_code': self.market_ids(symbols),
}
response = self.privateGetpositions(self.extend(request, params))
#
# [
# {
# "product_code": "FX_BTC_JPY",
# "side": "BUY",
# "price": 36000,
# "size": 10,
# "commission": 0,
# "swap_point_accumulate": -35,
# "require_collateral": 120000,
# "open_date": "2015-11-03T10:04:45.011",
# "leverage": 3,
# "pnl": 965,
# "sfd": -0.5
# }
# ]
#
# todo unify parsePosition/parsePositions
return response
def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
self.load_markets()
if code != 'JPY' and code != 'USD' and code != 'EUR':
raise ExchangeError(self.id + ' allows withdrawing JPY, USD, EUR only, ' + code + ' is not supported')
currency = self.currency(code)
request = {
'currency_code': currency['id'],
'amount': amount,
# 'bank_account_id': 1234,
}
response = self.privatePostWithdraw(self.extend(request, params))
id = self.safe_string(response, 'message_id')
return {
'info': response,
'id': id,
}
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
self.load_markets()
currency = None
request = {}
if code is not None:
currency = self.currency(code)
if limit is not None:
request['count'] = limit # default 100
response = self.privateGetGetcoinins(self.extend(request, params))
# [
# {
# "id": 100,
# "order_id": "CDP20151227-024141-055555",
# "currency_code": "BTC",
# "amount": 0.00002,
# "address": "1WriteySQufKZ2pVuM1oMhPrTtTVFq35j",
# "tx_hash": "9f92ee65a176bb9545f7becb8706c50d07d4cee5ffca34d8be3ef11d411405ae",
# "status": "COMPLETED",
# "event_date": "2015-11-27T08:59:20.301"
# }
# ]
return self.parse_transactions(response, currency, since, limit)
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
self.load_markets()
currency = None
request = {}
if code is not None:
currency = self.currency(code)
if limit is not None:
request['count'] = limit # default 100
response = self.privateGetGetcoinouts(self.extend(request, params))
#
# [
# {
# "id": 500,
# "order_id": "CWD20151224-014040-077777",
# "currency_code": "BTC",
# "amount": 0.1234,
# "address": "1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa",
# "tx_hash": "724c07dfd4044abcb390b0412c3e707dd5c4f373f0a52b3bd295ce32b478c60a",
# "fee": 0.0005,
# "additional_fee": 0.0001,
# "status": "COMPLETED",
# "event_date": "2015-12-24T01:40:40.397"
# }
# ]
#
return self.parse_transactions(response, currency, since, limit)
def parse_deposit_status(self, status):
statuses = {
'PENDING': 'pending',
'COMPLETED': 'ok',
}
return self.safe_string(statuses, status, status)
def parse_withdrawal_status(self, status):
statuses = {
'PENDING': 'pending',
'COMPLETED': 'ok',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# fetchDeposits
#
# {
# "id": 100,
# "order_id": "CDP20151227-024141-055555",
# "currency_code": "BTC",
# "amount": 0.00002,
# "address": "1WriteySQufKZ2pVuM1oMhPrTtTVFq35j",
# "tx_hash": "9f92ee65a176bb9545f7becb8706c50d07d4cee5ffca34d8be3ef11d411405ae",
# "status": "COMPLETED",
# "event_date": "2015-11-27T08:59:20.301"
# }
#
# fetchWithdrawals
#
# {
# "id": 500,
# "order_id": "CWD20151224-014040-077777",
# "currency_code": "BTC",
# "amount": 0.1234,
# "address": "1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa",
# "tx_hash": "724c07dfd4044abcb390b0412c3e707dd5c4f373f0a52b3bd295ce32b478c60a",
# "fee": 0.0005,
# "additional_fee": 0.0001,
# "status": "COMPLETED",
# "event_date": "2015-12-24T01:40:40.397"
# }
#
id = self.safe_string(transaction, 'id')
address = self.safe_string(transaction, 'address')
currencyId = self.safe_string(transaction, 'currency_code')
code = self.safe_currency_code(currencyId, currency)
timestamp = self.parse8601(self.safe_string(transaction, 'event_date'))
amount = self.safe_number(transaction, 'amount')
txId = self.safe_string(transaction, 'tx_hash')
rawStatus = self.safe_string(transaction, 'status')
type = None
status = None
fee = None
if 'fee' in transaction:
type = 'withdrawal'
status = self.parse_withdrawal_status(rawStatus)
feeCost = self.safe_number(transaction, 'fee')
additionalFee = self.safe_number(transaction, 'additional_fee')
fee = {'currency': code, 'cost': feeCost + additionalFee}
else:
type = 'deposit'
status = self.parse_deposit_status(rawStatus)
return {
'info': transaction,
'id': id,
'txid': txId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'network': None,
'address': address,
'addressTo': address,
'addressFrom': None,
'tag': None,
'tagTo': None,
'tagFrom': None,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'internal': None,
'fee': fee,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
request = '/' + self.version + '/'
if api == 'private':
request += 'me/'
request += path
if method == 'GET':
if params:
request += '?' + self.urlencode(params)
baseUrl = self.implode_hostname(self.urls['api'])
url = baseUrl + request
if api == 'private':
self.check_required_credentials()
nonce = str(self.nonce())
auth = ''.join([nonce, method, request])
if params:
if method != 'GET':
body = self.json(params)
auth += body
headers = {
'ACCESS-KEY': self.apiKey,
'ACCESS-TIMESTAMP': nonce,
'ACCESS-SIGN': self.hmac(self.encode(auth), self.encode(self.secret)),
'Content-Type': 'application/json',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
|
|
#!/usr/bin/python
# Copyright 2014 Nervana Systems Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ctypes
import libcudnn
import numpy as np
import pycuda.driver as drv
from pycuda.autoinit import context
from nervanagpu import NervanaGPU, GPUTensor
from math import sqrt
from time import sleep
print context.get_device().name()
# Set dtype to float32 or float16
dtype = np.float16
repeat = 20
start, end = (drv.Event(), drv.Event())
def start_bench():
start.record()
def end_bench(op):
end.record()
end.synchronize()
msecs = end.time_since(start) / repeat
gflops = conv.flops / (msecs * 1000000.0)
print "%7.3f msecs %8.3f gflops (%s: %s)" % (msecs, gflops, op, conv)
ng = NervanaGPU(stochastic_round=False, bench=True)
# Create a cuDNN context
cudnn = libcudnn.cudnnCreate()
C_desc = libcudnn.cudnnCreateConvolutionDescriptor()
I_desc = libcudnn.cudnnCreateTensorDescriptor()
O_desc = libcudnn.cudnnCreateTensorDescriptor()
E_desc = libcudnn.cudnnCreateTensorDescriptor()
B_desc = libcudnn.cudnnCreateTensorDescriptor()
F_desc = libcudnn.cudnnCreateFilterDescriptor()
U_desc = libcudnn.cudnnCreateFilterDescriptor()
# Set some options and tensor dimensions
NCHW_fmt = libcudnn.cudnnTensorFormat['CUDNN_TENSOR_NCHW']
cu_dtype = libcudnn.cudnnDataType['CUDNN_DATA_FLOAT']
conv_mode = libcudnn.cudnnConvolutionMode['CUDNN_CROSS_CORRELATION']
fwd_pref = libcudnn.cudnnConvolutionFwdPreference['CUDNN_CONVOLUTION_FWD_NO_WORKSPACE']
# CUDNN_CONVOLUTION_FWD_NO_WORKSPACE
# CUDNN_CONVOLUTION_FWD_PREFER_FASTEST
# N C K D H W T R S pad str
for dims in ( ( 64, 3, 64, 1, 224,224, 1, 3, 3, 0,1,1, 1,1,1), # VGG
( 64, 64, 64, 1, 224,224, 1, 3, 3, 0,1,1, 1,1,1),
( 64, 64,128, 1, 112,112, 1, 3, 3, 0,1,1, 1,1,1),
( 64,128,128, 1, 112,112, 1, 3, 3, 0,1,1, 1,1,1),
( 64,128,256, 1, 56, 56, 1, 3, 3, 0,1,1, 1,1,1),
( 64,256,256, 1, 56, 56, 1, 3, 3, 0,1,1, 1,1,1),
( 64,256,512, 1, 28, 28, 1, 3, 3, 0,1,1, 1,1,1),
( 64,512,512, 1, 28, 28, 1, 3, 3, 0,1,1, 1,1,1),
( 64,512,512, 1, 14, 14, 1, 3, 3, 0,1,1, 1,1,1),
(128, 3, 64, 1, 224,224, 1,11,11, 0,3,3, 1,4,4), #Alexnet
(128, 64,192, 1, 27, 27, 1, 5, 5, 0,2,2, 1,1,1),
(128,192,384, 1, 13, 13, 1, 3, 3, 0,1,1, 1,1,1),
(128,384,256, 1, 13, 13, 1, 3, 3, 0,1,1, 1,1,1),
(128,256,256, 1, 13, 13, 1, 3, 3, 0,1,1, 1,1,1),):
conv = ng.conv_layer(dtype, *dims)
N,C,K = conv.NCK
D,H,W = conv.DHW
T,R,S = conv.TRS
M,P,Q = conv.MPQ
pad_d, pad_h, pad_w = conv.padding
str_d, str_h, str_w = conv.strides
alpha, beta = (1.0, 0.0)
dimI = conv.dimI2
dimF = conv.dimF2
dimO = conv.dimO2
print "cudnn:"
cuI = ng.empty(dimI[::-1], dtype=np.float32)
cuF = ng.empty(dimF[::-1], dtype=np.float32)
cuE = ng.empty(dimO[::-1], dtype=np.float32)
cuB = ng.empty(dimI[::-1], dtype=np.float32)
cuU = ng.empty(dimF[::-1], dtype=np.float32)
cuO = ng.empty(dimO[::-1], dtype=np.float32)
cuI[:] = 2 * (.5 - ng.rand())
cuF[:] = 2 * (.5 - ng.rand())
cuE[:] = 2 * (.5 - ng.rand())
#print drv.mem_get_info()
I_data = ctypes.c_void_p(int(cuI.gpudata))
F_data = ctypes.c_void_p(int(cuF.gpudata))
O_data = ctypes.c_void_p(int(cuO.gpudata))
E_data = ctypes.c_void_p(int(cuE.gpudata))
B_data = ctypes.c_void_p(int(cuB.gpudata))
U_data = ctypes.c_void_p(int(cuU.gpudata))
libcudnn.cudnnSetConvolution2dDescriptor(C_desc, pad_h, pad_w, str_h, str_w, 1, 1, conv_mode)
libcudnn.cudnnSetTensor4dDescriptor(I_desc, NCHW_fmt, cu_dtype, N, C, H, W)
libcudnn.cudnnSetTensor4dDescriptor(B_desc, NCHW_fmt, cu_dtype, N, C, H, W)
libcudnn.cudnnSetTensor4dDescriptor(O_desc, NCHW_fmt, cu_dtype, N, K, P, Q)
libcudnn.cudnnSetTensor4dDescriptor(E_desc, NCHW_fmt, cu_dtype, N, K, P, Q)
libcudnn.cudnnSetFilter4dDescriptor(F_desc, cu_dtype, K, C, R, S)
libcudnn.cudnnSetFilter4dDescriptor(U_desc, cu_dtype, K, C, R, S)
algo = libcudnn.cudnnGetConvolutionForwardAlgorithm(cudnn, I_desc, F_desc, C_desc, O_desc, fwd_pref, 0)
ws_size = libcudnn.cudnnGetConvolutionForwardWorkspaceSize(cudnn, I_desc, F_desc, C_desc, O_desc, algo)
#print algo.value, ws_size.value
ws_ptr = drv.mem_alloc(ws_size.value) if ws_size.value > 0 else 0
ws_data = ctypes.c_void_p(int(ws_ptr))
start_bench()
for r in (range(repeat)):
libcudnn.cudnnConvolutionForward(cudnn, alpha, I_desc, I_data, F_desc, F_data, C_desc, algo, ws_data, ws_size.value, beta, O_desc, O_data)
end_bench("fprop")
ws_ptr = None
start_bench()
for r in (range(repeat)):
libcudnn.cudnnConvolutionBackwardData(cudnn, alpha, F_desc, F_data, E_desc, E_data, C_desc, beta, B_desc, B_data)
end_bench("bprop")
start_bench()
for r in (range(repeat)):
libcudnn.cudnnConvolutionBackwardFilter(cudnn, alpha, I_desc, I_data, E_desc, E_data, C_desc, beta, U_desc, U_data)
end_bench("updat")
print "\nnervana_lib:"
nlI = ng.empty(dimI, dtype=dtype)
nlI[:] = cuI.T
cuI = None
nlF = ng.empty(dimF, dtype=dtype)
nlF[:] = cuF.T
cuF = None
nlE = ng.empty(dimO, dtype=dtype)
nlE[:] = cuE.T
cuE = None
nlB = ng.empty(dimI, dtype=dtype)
nlU = ng.empty(dimF, dtype=dtype)
nlO = ng.empty(dimO, dtype=dtype)
#print drv.mem_get_info()
ng.fprop_conv (conv, nlI, nlF, nlO, alpha=alpha, repeat=repeat)
ng.bprop_conv (conv, nlF, nlE, nlB, alpha=alpha, repeat=repeat)
ng.update_conv(conv, nlI, nlE, nlU, alpha=alpha, repeat=repeat)
nlI = nlF = nlE = None
print "\ncudnn vs nervanaLib:"
parO = ng.empty((N,1), dtype=np.float32)
parB = ng.empty((N,1), dtype=np.float32)
parU = ng.empty((K,1), dtype=np.float32)
maxO = parO[0:1,0:1]
maxB = parB[0:1,0:1]
maxU = parU[0:1,0:1]
maxo = ng.max(abs(cuO - nlO.T), partial=parO, out=maxO).get()[0,0]
maxb = ng.max(abs(cuB - nlB.T), partial=parB, out=maxB).get()[0,0]
maxu = ng.max(abs(cuU - nlU.T), partial=parU, out=maxU).get()[0,0]
meano = ng.mean(abs(cuO), partial=parO, out=maxO).get()[0,0]
meanb = ng.mean(abs(cuB), partial=parB, out=maxB).get()[0,0]
meanu = ng.mean(abs(cuU), partial=parU, out=maxU).get()[0,0]
print " maxerr mean pct"
print "fprop: %7.5f %6.2f %5.3f" % (maxo, meano, 100*maxo/meano)
print "bprop: %7.5f %6.2f %5.3f" % (maxb, meanb, 100*maxb/meanb)
print "updat: %7.5f %6.2f %5.3f" % (maxu, meanu, 100*maxu/meanu)
# free up memory from this layer before proceeding
cuB = cuU = cuO = None
nlB = nlU = nlO = None
parO = parB = parU = maxO = maxB = maxU = None
libcudnn.cudnnDestroyTensorDescriptor(I_desc)
libcudnn.cudnnDestroyTensorDescriptor(O_desc)
libcudnn.cudnnDestroyFilterDescriptor(F_desc)
libcudnn.cudnnDestroyTensorDescriptor(E_desc)
libcudnn.cudnnDestroyTensorDescriptor(B_desc)
libcudnn.cudnnDestroyFilterDescriptor(U_desc)
libcudnn.cudnnDestroyConvolutionDescriptor(C_desc)
libcudnn.cudnnDestroy(cudnn)
|
|
from django.conf import settings
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.http import Http404
from django.http import HttpResponseForbidden
from django.shortcuts import get_object_or_404
from django.utils.translation import ugettext as _
from django.views.generic import ListView, DetailView, CreateView,\
RedirectView, TemplateView
from braces.views import LoginRequiredMixin
from .models import Referendum
from ekratia.threads.models import Comment
from .forms import ReferendumForm, ReferendumCommentForm
import logging
import datetime
logger = logging.getLogger('ekratia')
class ReferendumListView(TemplateView):
"""
List of Referendums
"""
template_name = 'referendums/list.html'
def get_context_data(self, *args, **kwargs):
context = super(ReferendumListView, self).get_context_data(**kwargs)
context['referendums_created'] = Referendum.objects\
.created().order_by('-date')
context['referendums_open'] = Referendum.objects\
.open().order_by('open_time')
return context
class ReferendumArchivedListView(ListView):
"""
List of Referendums
"""
model = Referendum
template_name = 'referendums/archived.html'
def get_queryset(self):
return Referendum.objects.finished().order_by('open_time')
class ReferendumCreateView(LoginRequiredMixin, CreateView):
"""
Creates a Referendum
"""
model = Referendum
template_name = 'referendums/create.html'
form_class = ReferendumForm
def form_valid(self, form):
"""
If the form is valid, save the associated model.
"""
self.object = form.save(commit=False)
self.object.user = self.request.user
referendum_text = "%s \n %s " % (self.object.text_remove_rules,
self.object.text_add_rules)
root_comment = Comment.add_root(content=referendum_text,
user_id=self.request.user.id)
root_comment.save()
self.object.comment = root_comment
self.object = form.save(commit=True)
self.object.title = "Referendum %i " % self.object.id
self.object.save()
return super(ReferendumCreateView, self).form_valid(form)
class ReferendumDetailView(DetailView):
"""
Detail View for a Referendum
"""
model = Referendum
template_name = "referendums/detail.html"
def get_context_data(self, **kwargs):
"""
Insert the single object into the context dict.
"""
context = super(ReferendumDetailView, self).get_context_data(**kwargs)
context['form_comment'] = ReferendumCommentForm
if self.request.user.is_authenticated():
context['user_vote'] = self.request.user.\
get_vote_referendum(self.object)
# vote_count_for_referendum
context['user_vote_value'] = self.request.user.\
vote_count_for_referendum(self.object)
context['object'] = self.object.update_totals()
if settings.DEBUG:
logger.debug("Vote details for %s" % self.object.title)
for vote in self.object.get_votes_list():
logger.debug("User: %s Value: %s" % (vote.user, vote.value))
self.object.check_status()
self.object.count_comments()
return context
class ReferendumResultsView(DetailView):
"""
Referendum results
"""
model = Referendum
template_name = "referendums/results.html"
def get_context_data(self, **kwargs):
"""
Insert the single object into the context dict.
"""
context = super(ReferendumResultsView, self).get_context_data(**kwargs)
context['object'] = self.object.update_totals()
if settings.DEBUG:
logger.debug("Vote details for %s" % self.object.title)
for vote in self.object.get_votes_list():
logger.debug("User: %s Value: %s" % (vote.user, vote.value))
self.object.check_status()
context['votes'] = self.object.get_votes_list()
return context
class ReferendumOpenView(LoginRequiredMixin, RedirectView):
"""
Open Referendum and redirects back to Referendum
"""
permanent = False
pattern_name = 'referendums:detail'
def get_redirect_url(self, *args, **kwargs):
referendum = get_object_or_404(Referendum, slug=kwargs['slug'])
if referendum.user != self.request.user:
raise HttpResponseForbidden
if not referendum.is_open():
referendum.open_time = datetime.datetime.now()
referendum.save()
messages.success(self.request, _('Referendum Ready to Vote!'))
else:
messages.error(self.request, _('Referendum is already Open'))
return super(ReferendumOpenView, self).\
get_redirect_url(*args, **kwargs)
class ReferendumVoteView(LoginRequiredMixin, ReferendumDetailView):
"""
Detail View for a Referendum
"""
template_name = "referendums/vote.html"
def get_context_data(self, **kwargs):
"""
Insert the single object into the context dict.
"""
context = super(ReferendumVoteView, self).get_context_data(**kwargs)
return context
class ReferendumProcessVoteView(LoginRequiredMixin, RedirectView):
"""
Process refrendum vote and redirects back to Referendum
"""
permanent = False
pattern_name = 'referendums:detail'
def get_redirect_url(self, *args, **kwargs):
logger.debug("Procesing Vote")
referendum = get_object_or_404(Referendum, slug=kwargs['slug'])
# Accepts yes or no
vote_answer = kwargs['value']
if vote_answer != 'yes' and vote_answer != 'no':
logger.error("Invalid Vote Value")
raise Http404("Invalid Vote Value")
logger.debug(
"Procesing Vote %s, Value %s" % (referendum.title, vote_answer))
if referendum.is_open():
logger.debug("Referendum is open")
# Positive or negative depending on answer
vote_value = 1 if vote_answer == 'yes' else -1
# Method receives (1 or -1).
# It already calculates the vote value depending on delegation
# and other referendum votes
referendum.vote_process(self.request.user, vote_value)
messages.success(self.request, _("We've got your Vote. Thanks!"))
else:
messages.error(self.request, _('Referendum is Closed'))
return reverse('referendums:detail', kwargs={'slug': referendum.slug})
|
|
#################################
# version 1.0
# Author S.Guieu
# History
# Todo:
#
# - Add a possibility of masked_array
#
from __future__ import print_function, division
import numpy as np
__all__ = ["islistofaxarray", "isaxarray", "axarrayconcat",
"size","size_of_shape", "axarray"
]
VERBOSE = 1
##
# list of keyword suffix / function for te apply method
# becarefull order maters a lot !!!
_apply_funclist = ["idx", "section", "reduce"]
def asarray(a):
return np.asarray(a)
def islistofaxarray(lst):
""" Return True if all items are axarray object with the same axes names """
first = True
for axa in lst:
if not isaxarray(axa): return False
if first:
axes = axa.axes
first = False
else:
if axes!=axa.axes: return False
return True
def isaxarray(a):
""" True is this is array is a axarray """
return isinstance( a, axarray)
def axarrayconcat(lst, axis=None, check=True):
""" Convert a flat list of axarray to axarray
Args:
lst (iterable) : of axarray items
axis (axis label) : the axis label along the dimension of lst. If no
label is given a array is returned
check (Optiona[bool]) : if True check if all items are compatible axarray
(e.i same axis names). Default is True. If is not the case the axarray
returned will have only on axes labeled.
Turn off if you are sure of what is parsed and save some time.
If False it is assumed that all axarray are the same, axes are taken from
the first item.
Returns:
axarray (or array if axis is None)
Examples:
# make a list of data
>>> exposures = [axarray(np.random.random(4,3), ["y", "x"]) for i in range(5)]
>>> axa = axarrayconcat(exposures, "time")
>>> axa.axes
["time", "y", "x"]
>>> axa.shape
[5, 4, 3]
"""
if axis is None:
return asarray(lst)
if not len(lst):
return axarray( asarray(lst), [axis])
if check:
if not islistofaxarray(lst):
return axarray( [ asarray(data) for data in lst] , [axis])
else:
return axarray( [ asarray(data) for data in lst] , [axis]+list(lst[0].axes))
return axarray( [ asarray(data) for data in lst] , [axis]+list(lst[0].axes))
def _broadcast_axis(sec, axis, axes1, array_axes, axes2, ia, i):
# this cause a problem
# np.random.random( (100, 256, 320))[ np.array([1,2,3]), np.array([[1,2,3],[1,2,3]]) ].shape
#
if isinstance(sec, slice):
#case of slice conserve the axes as it is
if array_axes is None:
return sec, axes1+[axis], array_axes, axes2, ia
else:
return sec, axes1, array_axes, axes2+[axis], ia
if isinstance(sec, (int,long)):
#case of integer the axes is lost
return sec, axes1, array_axes, axes2, ia+1 if ia is not None else None
if isaxarray(sec):
# Take the axes of axarray has new axes (array_axes)
# If array_axes already exists check compatibility
# and return empty list
if array_axes is not None:
if array_axes[2] and sec.axes != array_axes[0]:
raise ValueError("axes mismatch: objects cannot be broadcast to a single axes name: %s!=%s"%(sec.axes,array_axes))
array_axes = (array_axes[0] , max( array_axes[1], len(sec.shape)), True)
if (i-ia)>1:
# array indexing in a not contigus order
return sec, [], array_axes, axes1+axes2, i
else:
return sec, axes1, array_axes, axes2, i
return sec, axes1, (sec.axes, len(sec.shape), True), axes2, i
else:
# Everything else should be a list or ndarray
#
sec = np.array( sec )
if array_axes is not None:
if array_axes[2]: #axes are given by a previous axarray
array_axes = (array_axes[0] , max( array_axes[1], len(sec.shape)), True)
else:
array_axes = (array_axes[0]+[axis], max( array_axes[1], len(sec.shape)), False)
if (i-ia)>1:
# array indexing in a not contigus order
return sec, [], array_axes, axes1+axes2, i
else:
return sec, axes1, array_axes, axes2, i
return sec, axes1, ([axis],len(sec.shape), False) , axes2, i
def _broadcast_axes( sections, axes):
""" new axes from a list of indexes (slice, integer, array, ...)
a slice wiil conserve axis: (slice(0,2),), ["x"] -> ["x"]
a integer will loose the axis : slice(0,2),3), ["x", "y"] -> ["x"]
a axarray index will rename axis :
a array like object will conserve axis if flat, or makes extra if multidimentional
"""
axis1 = []
axis2 = []
array_axes = None
ia = None
for i,(sec,axis) in enumerate(zip(sections, axes)):
sec, axis1, array_axes, axis2,ia = _broadcast_axis( sec,axis, axis1, array_axes, axis2, ia, i)
array_axes = _reform_array_axes (*array_axes[0:2]) if array_axes is not None else []
return axis1+array_axes+axis2
return tuple(newsections), newaxes
def _reform_array_axes( axes, N):
if N==len(axes): return axes
if N>len(axes):
if len(axes)>1:
return [ (tuple(axes), i) for i in range(N) ]
else:
return [ (axes[0], i) for i in range(N) ]
return [tuple(axes)]
def _decore_loose_axes(func):
""" a decorator for np function that make no sens with axes names
e.g ravel, reshape, flatten, ...., more ?
"""
def decored_loose_axes(axa, *args, **kwargs):
return func(asarray( axa), *args, **kwargs)
decored_loose_axes.__doc__ = "axarray : the call of this function will forget the axes labels\n\n"+func.__doc__
return decored_loose_axes
def _decore_reduce_func(reduce_func):
""" decorator for numpy function that reduce axis e.g. mean, std, min, max etc... """
def decorated_reduce_func( axa, *args, **kwargs):
# assume first of args is always axis, is that true ?
if len(args) and "axis" in kwargs:
raise TypeError("%s got multiple values for keyword argument 'axis'"%ufunc)
axis = args[0] if len(args) else kwargs.pop("axis", None)
return axa._reduce_axis( reduce_func, axis=axis, **kwargs)
decorated_reduce_func.__doc__ = "axarray: apply the equivalent numpy function on given axis name(s).\n\n"+reduce_func.__doc__
return decorated_reduce_func
def _prepare_op(left, right):
"""
prepare two axarray objects for ufunc operations
if both left and right are axarray array, the dimensions with the same
axes label are used for the operations. The return object in this case
will have the axes label of the object with the largest number of axes.
This function prepare the left and right operators in this way
"""
revers = False
if len(left.axes)< len(right.axes):
right, left = left, right
revers = True
if left.axes == right.axes[-len(left.axes):]:
return left, right, left.axes, left.axes
newrightaxes = [ a for a in left.axes if a in right.axes ]
newleftaxes = []
for n in left.axes:
if not n in newrightaxes:
newleftaxes.insert(0, n)
else:
newleftaxes.append(n)
if revers:
right.transpose(newrightaxes), left.transpose( newleftaxes ),newrightaxes, right.axes
return left.transpose(newleftaxes), right.transpose( newrightaxes ), newleftaxes, left.axes
def size(A, axis=None):
""" Same as numpy.size but axis can be a axis label and a list of axis label
Args:
A (array-like) : the array or axarray
axis (Optional) : axis label or list of axis label
Returns:
s(int) : array size or partial size if axis is given
"""
if axis is None:
return A.size
axes = A.axes if isaxarray(A) else range(len(A.shape))
if isinstance(axis, (tuple,list)):
return reduce( lambda x,y: x*np.size(A,axis=axes.index(y)), axis, 1)
return np.size(A, axis=axes.index(axis))
def size_of_shape( shape, axes, axis=None):
""" return the axarray size from its shape tuple and a list of axes name
Args:
shape (tuple/list) : array shape
axes (list) : list of array axes name
axis (optional[string]) : the axis label on wich the size is returned
Returns:
s(int) : array size
Notes:
>>> size_of_shape( a.shape, a.axes)
# is iddentical to
>>> size(a)
"""
if axis is None:
return reduce( lambda x,y: x*y, shape, 1)
if isinstance( axis, (tuple,list)):
return reduce( lambda x,y: x*shape[axes.index(y)], axis, 1)
return shape[axes.index(axis)]
def __lop__(op):
""" binary left operator decorator """
def tmpop(self,right):
return self._run_op(self, right, op)
return tmpop
def __rop__(op):
""" binary right operator decorator """
def tmpop(self,left):
return self._run_op(left, self, op)
return tmpop
def __uop__(op):
""" unary operator decorator """
def tmpop(self):
return self.__class__(op(asarray(self)), list(self.axes))
return tmpop
class axarray(np.ndarray):
""" axarray is a numpy array with labeled axes
Often in science, it is usefull to name the array axes by an inteligible label.
For instance, for 2d images taken at different time, axes name of the obtain
cube could be ["time", "y", "x"]
axarray object aims to do that. Basic operations can be done without knowing
the structure of the array. For instance a.mean(axis="time") will execute
the mean on the axis labeled "time" where ever it is.
Given a1 and a2, two axarray, binarry operation like a1+a2 can be performed
even if the two axarray has different axes order as long as they have matching
axis labels.
Args:
A (array like) :
axes (iterable) : list of axis labels, can be any object, but string are
the most obvious.
aliases (Optional[dict]) : An optional dictionary that define axis aliases
used *ONLY* in the apply method (this may change).
for instance aliases = {"pix": ["y", "x"]}
will replace pix_reduce =f by y_reduce = f, x_reduce= f
and pix_idx = (iy,ix) by y_idx = iy, y_idx = ix
Return:
axarray instance
Properies:
A : return the array as a regular numpy array
+ all other numpy array properties
Attributes:
axis_index : -> a.axes.index(label)
axis_len : -> a.shape[ a.axis_index(label) ]
idx : select array indexes on given axis
section : select array indexes one by one on given axis
transform : make a transpose and reshape in one command
reduce : reduce axis from a given reduce function (e.g. np.mean)
+ all other numpy array properties
Examples:
>>> a = axarray( np.random.random((10,4,5)), ["time", "y", "x"])
>>> b = a.transpose( ["x","time", "y"])
>>> b.axes
["x","time", "y"]
## can operate 2 transposed axarray as long as they
## match axis names
>>> (a+b).axes
["time", "y", "x"]
## use the numpy frunction with axis labels
>>> a.min(axis="time").shape
(4,5)
# similar to:
>>> np.min(a , axis="time")
# axis can be alist of axis label
>>> a.mean(axis=["x","y"]).shape
(10,)
# one can use the conveniant apply method. Usefull in non-direct
# call as in a plot func for instance
>>> a.apply(time_reduce=np.mean, y_idx=slice(0,2)).shape
(2,5)
# transpose, reshape rename axes in one call
>>> at = a.transform( [("pixel", "y","x"), "time"])
>>> at.shape
(20, 10) # (4*5, 10)
>>> at.axes
['pixel', 'time']
### e.g. extract a spectrum from image from named indices
### make some indices
>>> iy, ix = axarray( np.indices( (3,4)), [0 ,"spatial", "freq"])
>>> ax[:,iy,ix].axes
['time', 'spatial', 'freq']
"""
_verbose = None # if None use the VERBOSE module default
apply_aliases = None
def __new__(subtype, data_array, axes=None, aliases=None):
# Create the ndarray instance of axarray type, given the usual
# ndarray input arguments. This will call the standard
# ndarray constructor, but return an object of axarray.
# It also triggers a call to InfoArray.__array_finalize__
obj = asarray(data_array).view(subtype)
if axes is None:
# default axes are [0,1,2,3, ...]
axes = range(len(obj.shape))
elif isinstance(axes, str):
axes = [axes,]
elif len(axes)>len(obj.shape):
raise KeyError("len of axes must be inferior or equal to the array shape, got %d < %d"%(len(axes), len(obj.shape)) )
if len(set(axes))!=len(axes):
raise KeyError("All axes labels must be unique")
obj.axes = axes
if aliases:
obj.apply_aliases = dict(aliases)
# Finally, we must return the newly created object:
return obj
def __array_finalize__(self, obj):
# ``self`` is a new object resulting from
# ndarray.__new__(axarray, ...), therefore it only has
# attributes that the ndarray.__new__ constructor gave it -
# i.e. those of a standard ndarray.
#
# We could have got to the ndarray.__new__ call in 3 ways:
# From an explicit constructor - e.g. axarray():
# obj is None
# (we're in the middle of the axarray.__new__
# constructor, and self.info will be set when we return to
# axarray.__new__)
if obj is None: return
# From view casting - e.g arr.view(axarray):
# obj is arr
# (type(obj) can be axarray)
# From new-from-template - e.g infoarr[:3]
# type(obj) is axarray
#
# Note that it is here, rather than in the __new__ method,
# that we set the default value for 'info', because this
# method sees all creation of default objects - with the
# axarray.__new__ constructor, but also with
# arr.view(InfoArray).
self.axes = getattr(obj, 'axes', range(len(obj.shape)))
# We do not need to return anything
@property
def A(self):
return asarray(self)
def axis_len(self, axis):
"""
a.axis_len(axis) -> len of the given axis
if axis is None return a.size
"""
if axis is None:
return self.size
return self.shape[self.axis_index(axis)]
def axis_index(self, axislabel):
""" A.axes.index(axis) """
return self.axes.index(axislabel)
def get_missing_axes(self, lst):
""" from a list of axis label return a lis tof missing axis in the axarray """
return [ axis for axis in self.axes if axis not in lst]
def _get_axes(self, lst=None, alternate=None):
""" """
if alternate is None:
alternate = self.axes
if lst is None: return list(self.axes)
lst = list(lst)
cNone = lst.count(None)
if cNone:
if cNone>1:
raise ValueError("Axes list allows only one None value")
iNone = lst.index(None)
lst.remove(None)
lst = lst[0:iNone]+self.get_missing_axes(lst)+lst[iNone:]
for axis in lst:
if not axis in self.axes:
return ValueError("wrong axis label %s"%axis)
return lst
def __array_wrap__(self, out_arr, context=None):
#print 'In __array_wrap__:'
#print ' self is %s' % repr(self)
#print ' arr is %s' % repr(out_arr)
# then just call the parent
return np.ndarray.__array_wrap__(self, out_arr, context)
def __array_prepare__(self, obj, context=None):
#if context:
# if isinstance(obj, axarray):
# left, right, axes, oaxes = _prepare_op(self, obj)
#print obj.shape
#print "context ", context
return obj
def _allsection(self):
return [ slice(0,None) for a in self.axes ]
def _section(self, allsection, axesname, section):
allsection[self.axis_index(axesname)] = section
if isinstance(section, int):
return False
return True
def idx(self, section, axis=None):
###
# To be optimized !!!!!
# For a axarray
# %timeit data[i]
# 10000 loops, best of 3: 38.5 us per loop
# For a regular array
# In [622]: %timeit data.A[ia]
# 100000 loops, best of 3: 4.36 us per loop
# c
if not isinstance(section,tuple):
raise ValueError("first argument, section, must be a tuple, got a %s"%type(section))
if axis is None:
return self[section]
N = len(section)
if len(axis) != N:
raise ValueError("axis keyword should have the same len than section tuple")
if not N:
return self
if len(set(axis))!=len(axis):
raise ValueError("All element of the axis list must be unique got %s"%axis)
axes_index = [self.axis_index(ax) for ax in axis]
N = max(axes_index)
# build an empty list of section according to the max index
allsection = [slice(0,None)]*(N+1)
for sec,i in zip(section, axes_index):
allsection[i] = sec
return self[tuple(allsection)]
def section(self, section, axis=None):
allsection = self._allsection()
allsection[self.axis_index(axis)] = section
axes = list( self.axes) # copy of axes
arout = asarray(self)[tuple(allsection)]
if len(arout.shape)<len(self.shape):
axes.remove(axis)
if len(arout.shape)>len(self.shape):
i = axes.index(axis)
axes.remove(axis)
# build a list of [(axis,num)] for axis that has been extended
axes = axes[0:i]+[(axis,i) for i in range(len(arout.shape)-len(self.shape)+1)]+axes[i+1:]
#axes = axes[0:i]+[axis]*( len(arout.shape)-len(self.shape))+axes[i:]
return self.__class__( asarray(self)[tuple(allsection)], axes )
def section_s(self, axesname_sec):
# axesname_sec is a list of tuple [(axesname1, sec1), (axesname2, sec2), ....]
if not len(axesname_sec): return self
if isinstance( axesname_sec, dict):
axesname_sec = axesname_sec.iteritems()
allsection = self._allsection()
axes = list( self.axes) # copy of axes
for axesname, section in axesname_sec:
if not self._section( allsection, axesname, section):
axes.remove(axesname)
return self.__class__( asarray(self)[tuple(allsection)], axes )
def __str__(self):
return "%s(\n%s,\naxes=%s)"%(self.__class__.__name__, str(asarray(self)), str(self.axes))
def __repr__(self):
return "%s(\n%s,\naxes=%s)"%(self.__class__.__name__, asarray(self).__repr__(), str(self.axes))
def __getitem__(self, items):
if not isinstance(items, tuple):
items = (items,)
Naxes = len(self.axes)
Nitem = len(items)
newaxis = _broadcast_axes(items, self.axes)+self.axes[Nitem:]
if len(newaxis):
return self.__class__(asarray(self)[items], newaxis)
return asarray(self)[items]
def _get_verbose(self):
"""
return self._verbose or VERBOSE if None
"""
return self._verbose if self._verbose is not None else VERBOSE
@classmethod
def _run_op(cls, left, right, op):
if isaxarray(right) and isaxarray(left):
left, right, axes, oaxes = _prepare_op(left, right)
return cls( op( asarray(left), asarray(right)), axes).transpose( oaxes)
if isaxarray(left):
return cls( op(asarray(left), right) , list(left.axes))
if isaxarray(right):
return cls( op(left, asarray( right)) , list(right.axes))
return cls( op( left, right) )
def _prepare_transform(self, reshapes, ignore_unknown):
"""
From a list of axes name and formulae return a tuple containing:
a list to pass to tranpose, a list new pass to a reshape and a list of new axes name.
"""
datashape = self.shape
newshape = []
transposes = []
newaxes = []
allkeys = []
for i,x in enumerate(list(reshapes)): #list make a copy
if isinstance(x, tuple):
if len(x)<2:
raise ValueError( "If axes def is tuple must be of len>1")
if ignore_unknown:
x2 = list(x[0:1])+[k for k in x[1:] if (k in self.axes) or (k is None)]
if len(x2)>1:
reshapes[i] = tuple(x2)
else:
reshapes.remove(x)
i -= 1
else:
x2 = x
allkeys.extend(x2[1:])
else:
if ignore_unknown and (not x in self.axes) and (x is not None):
reshapes.remove(x)
i -= 1
else:
allkeys.append(x)
if allkeys.count(None):
if allkeys.count(None)>1:
raise ValueError( "None appears more than ones")
allkeys.remove(None)
if None in reshapes:
iNone = reshapes.index(None)
reshapes.remove(None)
reshapes = reshapes[0:iNone]+self.get_missing_axes(allkeys)+reshapes[iNone:]
for x in reshapes:
if isinstance(x, tuple):
if len(x)<2:
raise ValueError( "If axes def is tuple must be of len>1")
newname = x[0]
merged_axis = list(x[1:])
if None in merged_axis:
iNone = merged_axis.index(None)
merged_axis.remove(None)
merged_axis = merged_axis[0:iNone]+self.get_missing_axes(allkeys)+merged_axis[iNone:]
indexes = [ self.axis_index(s) for s in merged_axis ]
transposes.extend(merged_axis)
newshape.append(reduce(lambda x, y: x*y, [datashape[i] for i in indexes]) )
newaxes.append(newname)
else:
if x in self.axes:
i = self.axis_index(x)
transposes.append( x )
newaxes.append( x )
newshape.append( datashape[i] )
else:
transposes.append( x )
newaxes.append( x )
newshape.append( 1 )
return tuple(transposes), tuple(newshape), newaxes
def apply(self, **kwargs):
""" conveniant function to apply indexes and reducing in one command line
The goal is to quickly apply indexes and reducing without knowing the structure of the
array but only the axis names. This function is for convenient use so is a bit durty.
Args:
**kwargs : keyword pair / vaules can be
{axes_name}_idx = valid index for given axis (e.i., int, slice, array like)
{axes_name}_section = valid index. The difference between _idx and _section is that:
apply(x_idx = ix, y_idx = iy) -> will call A[iy,ix] (if "y"/"x" is name of axis 0/1)
apply(x_section=ix , y_section=iy) -> will call A[iy] then index ix with the result.
All the *_idx, if array must be of the same dimension but not with *_section
{axes_name}_reduce = f reduce function with signature f(A, axis=axes_name) with A the array
and axis the axis names
*_idx are called first then *_section then *_reduce
Note: shorter version exists *_i for *_idx, *_s for *_section *_r for *_reduce
squeeze (Optiona[bool]): squeeze the returned axarray (remove axis with len 1) if True. default False
aliases (Optional[dict]): A dictionary of aliases, for instance if aliases = {"pix": ("y", "x")} then
pix_idx = (4,5) keyword will be replaced by y_idx=4, x_idx=5
pix_reduce = np.mean will be replaced by y_reduce = np.mean, x_reduce = np.mean
the aliases keyword update the apply_aliases attribute of the axarray object (if any).
Returns:
A scalar or axarray
Warning:
All keywords that does not match {axis_name}_idx/section/reduce will be
totaly ignored silently.
Examples:
>>> from axarray import axarray
>>> a = axarray( np.random.random(5,10,20), ["time", "y", "x])
>>> a.apply(time_reduce=np.mean, x_id=np.s_[0:2])
>>> a.apply( time_r = np.mean) # time_r is a short version of time_reduce
>>> a.apply( x_idx=[0,1,2,3], y_idx=[0,1,2,3]).shape
(4,)
>>> a.apply( x_section=[0,1,2,3], y_section=[0,1,2,3]).shape
(4,4)
>>> a = axarray( np.random.random(5,10,20), ["time", "y", "x], {"pix":["y", "x"]})
# make a function that return a portion of image
>>> def mybox(pos=(0,0),size=10):
return (slice( pos[0], pos[0]+size), slice( pos[0], pos[0]+size) )
>>> a.apply( pix_idx = mybox(size=5) ) # pix_idx is alias of y_idx, x_idx
"""
squeeze = kwargs.pop("squeeze", False)
aliases = self.apply_aliases or {}
aliases.update(kwargs.pop("aliases", {}))
##
# remove the None, they are not relevant for
# any of the methods
for k,v in kwargs.items():
if v is None: kwargs.pop(k)
verbose = self._get_verbose()
Nfunc = len(_apply_funclist)
ifunc = 0
#for funckey in self._apply_funclist:
while ifunc<Nfunc:
funckey = _apply_funclist[ifunc]
axes = list(self.axes)
f = funckey
args = {}
notargs = []
for ax in self.axes:
lazykwargs = "%s_%s"%(ax,funckey)
shortlazykwargs = "%s_%s"%(ax, funckey[0])
if lazykwargs in kwargs and shortlazykwargs in kwargs:
raise ValueError("'%s' and '%s' keywords are the same use only one"%(lazykwargs, shortlazykwargs))
func_val = kwargs.pop(lazykwargs, kwargs.pop(shortlazykwargs, None))
if func_val is not None:
args[ax] = func_val
## handle the aliases
for alias, al_axes in aliases.iteritems():
lazykwargs = "%s_%s"%(alias, funckey)
shortlazykwargs = "%s_%s"%(alias, funckey[0])
if lazykwargs in kwargs and shortlazykwargs in kwargs:
raise ValueError("'%s' and '%s' keywords are the same use only one"%(lazykwargs, shortlazykwargs))
func_val = kwargs.pop(lazykwargs, kwargs.pop(shortlazykwargs, None))
if func_val is not None:
if f in ["idx", "section"]:
###
# if pix alias of ["y", "x"]
# then pix_idx = (4,5) -> y_idx=4, x_idx=5
if not hasattr(func_val, "__iter__"):
func_val = [func_val]
for ax,v in zip(al_axes, func_val):
if ax in args and args[ax] != v:
raise TypeError("'%s' alias keyword in conflict with the '%s_%s' keyword"%(lazykwargs, ax, f))
args[ax] = v
else:
###
# if pix alias of ["y", "x"]
# then pix_reduce = mean -> y_idx=mean, x_idx=mean
for ax in al_axes:
if ax in args and args[ax] != v:
raise TypeError("'%s' alias keyword in conflict with the '%s_%s' keyword"%(lazykwargs, ax, f))
args[ax] = func_val
if not len(args):
# if there is no fancy keyword in kwargs go to the next func
# because the self.axes can have changed we need to continue
# until there is no fancy keyword available
ifunc += 1
continue
if funckey== "idx":
###
# for idx we need to collect them before
indexes = []
indexes_axis = []
for ax in axes:
if ax in args and ax in self.axes:
indexes.append(args[ax])
indexes_axis.append(ax)
self = self.idx(tuple(indexes), axis=indexes_axis)
########
# At this point self can be something else
# like a float for instance
if not isaxarray(self): return self
else:
for ax in axes:
if ax in args and ax in self.axes:
self = getattr(self, f)( args[ax], axis=ax )
########
# At this point self can be something else
# like a float for instance
if not isaxarray( self): return self
# for k in kwargs:
# for f in _apply_funclist:
# if k.endswith("_"+f):
# raise TypeError("got '%s' keyword but the axis is unknown")
if verbose>=2 and len(kwargs):
print ("NOTICE : keys %s had no effect on data"%kwargs.keys())
if squeeze:
return self.squeeze()
return self
def transform(self, reshapes, add_unknown=False, ignore_unknown=False,
squeeze_missing=True
):
""" make a transpose, reshape, rename of axes in one call
Transform the array axes according to the axes list of axes label or tuple.
transform can make a transpose, reshape and rename axes in the same call.
Args:
reshapes (list): list of axis labels that define the tranform
If item is a tuple, the tuple first item should be the new axes label,
the othes the label of existing axis
>>> a = axarray( np.zeros((10,4,5)), ["time", "y", "x"])
>>> a.transform( [("pix", "y", "x"), "time"])
is equivalent to do
>>> axarray( a.transpose( ["y","x","time"]).reshape( (4*5,10)), ["pix","time"] )
add_unknown (Optional[bool]): if True unkwonw axes label will be added
with a dimension of 1. Default is False
ignore_unknown (Optional[bool]): if True, unknown axis will be ignores.
default is False. add_unknown, ignore_unknown can't be both True
squeeze_missing (Optional[bool]): if True missing axis with dimension 1
will be dropped. Else raise ValueError. Default is True
If ignore_unknown is True, unknown axes will be completely ignored in
in the process.
e.g.:
> d = axarray(np.random.rand( 8, 10, 12,13), ["time", "z", "y", "x"])
> d.transform( [ "z", ("pixel", "x", "y"), "time"] )
axarray(([[...,
[ 0.82653106, 0.99736293, 0.67030048, ..., 0.91404063,
0.71512099, 0.20758938]]]),('z', 'pixel', 'time'))
# Equivalent to :
> axarray ( d.transpose( [1,3,2,0] ).reshape( ( 10,13*12,8) ), ["z", "pixel", "time"])
"""
if add_unknown is True and ignore_unknown is True:
raise KeyError("add_unknown and ignore_unknown cannot be both True")
transposes, newshape, newaxes = self._prepare_transform(reshapes, ignore_unknown)
data = self.transpose(transposes, add_unknown=add_unknown,
squeeze_missing=squeeze_missing).reshape( newshape )
return self.__class__( data, newaxes )
@property
def T(self):
return self.transpose()
def transpose(self, tps=None, add_unknown=False,
squeeze_missing=True,
**kwargs):
if tps is None:
tps = list(self.axes)
tps[0], tps[-1] = tps[-1], tps[0] ## swap first and last axes
axes = self.axes
reshape_uk = False
if add_unknown:
newshape = []
newtps = []
for tp in tps:
if tp in axes:
newshape.append(self.axis_len(tp))
newtps.append(tp)
else:
newshape.append(1)
reshape_uk = True
else:
for a in tps:
if a not in axes:
raise TypeError("unknown axis '%s'"%a)
reshape_missing = False
if squeeze_missing:
newshape_missing = []
newaxes_missing = []
for x in axes:
if x not in tps:
if self.axis_len(x)>1:
raise ValueError("Cannot squeeze axis '%s', its len should be of size 1, got size %d"%(x,self.axis_len(x)))
reshape_missing = True
else:
newshape_missing.append(self.axis_len(x))
newaxes_missing.append(x)
if reshape_missing:
self = self.__class__(self.reshape(newshape_missing),
newaxes_missing)
if reshape_uk:
return self.__class__( asarray(self).transpose( [self.axis_index(tp) for tp in newtps ]).reshape(newshape), list(tps))
return self.__class__( asarray(self).transpose( [self.axis_index(tp) for tp in tps ]), list(tps))
transpose.__doc__ = """transpose for axarray acts the same way than for numpy array but with axis labels
However two keyword are different:
add_unknown (Optional[bool]): if True add a dimension 1 to the array of a unknown
label
squeeze_missing (Optional[bool]): drop axis if label is missing and dimension
is 1 else raise ValueError
Example:
>>> a = axarray( ones((10,4,5)), ["time", "y", "x"])
>>> a.transpose( ["x", "y", "time"])
numpy doc of transpose is copied below
--------------------------------------
"""+np.transpose.__doc__
reshape = _decore_loose_axes(np.reshape)
ravel = _decore_loose_axes(np.ravel)
flatten = _decore_loose_axes(np.ndarray.flatten)
def _reduce_axis(self, freduce, axis=None, **kwargs):
# Loop over axis
if isinstance(axis,list):
for ax in self._get_axes(axis):
#if not isinstance(self,axarray): return self
# initial is None after the first iteration
self = self._reduce_axis(freduce, axis=ax, **kwargs)
return self
##########################################################
if axis is None:
if kwargs.get("keepdims",False):
return self.__class__(freduce( asarray(self), axis = None, **kwargs ), self.axes)
else:
return freduce(asarray(self), axis = None, **kwargs )
iaxis = self.axis_index(axis)
ndata = freduce(asarray(self), axis = iaxis, **kwargs )
axes = list(self.axes)
if len(ndata.shape) < len(self.shape):
axes.remove( axis )
elif len(ndata.shape) > len(self.shape):
raise Exception("The freduce function cannot add dimension to the data")
if not len(ndata.shape): return ndata
return self.__class__(ndata, axes)
def _reduce_func(self, freduce, axis=None, initial=None):
#########################################################
# Loop over axis
if isinstance(axis,list):
for ax in self._get_axes(axis):
if not isaxarray(self): return self
# initial is None after the first iteration
self = self._reduce_func(freduce, axis=ax, initial= (None if axis.index(ax) else initial) )
return self
##########################################################
if axis is None:
if initial is None:
return reduce(freduce, self.flat)
else:
return reduce(freduce, self.flat, initial)
axes = list(self.axes)
if initial is None:
return reduce (freduce, self.transpose( [axis,None] ))
else:
return reduce (freduce, self.transpose( [axis,None] ), initial)
def reduce(self, freduce, axis=None, initial=None):
""" reduce the data along axis name(s) with the freduce func
If the freduce method as signature f(A, axis=) (e.g. mean, std, max, etc...)
f is called with its axis name as keyword argument
If the freduce method as signature f(A), transpose the array so the given axis label
is first and then call f(A)
If axis is iterable freduce is executed for each axis items on the array resulting of the
precedent call.
Args:
freduce : function to apply
axis : axis label or list of label on witch the freduce function will be called.
if None (default) the array is flatten before executing freduce
initial (Optional) : only used if freduce has signature f(A), that the itinital object
of the python reduce function.
"""
#########################################################
# loop over axis
if isinstance(axis, list):
if initial is None:
try:
##
# To avoid confusion try first on the first axes
# if succed do the rest
tmp = self._reduce_axis(freduce, axis=axis[0])
self = tmp
return self._reduce_axis(freduce, axis=axis[1:])
except TypeError as e:
if "'axis'" in e.message:
return self._reduce_func(freduce, axis=axis)
else:
raise e
else:
return self._reduce_func(freduce, axis=axis, initial=initial)
##########################################################
if initial is None:
try:
return self._reduce_axis(freduce, axis=axis)
except TypeError as e:
if "'axis'" in e.message:
return self._reduce_func(freduce, axis=axis)
else:
raise e
return self._reduce_func(freduce, axis=axis, initial=initial)
mean = _decore_reduce_func(np.mean)
var = _decore_reduce_func(np.var)
std = _decore_reduce_func(np.std)
min = _decore_reduce_func(np.min)
max = _decore_reduce_func(np.max)
sum = _decore_reduce_func(np.sum)
prod = _decore_reduce_func(np.prod)
argmax = _decore_reduce_func(np.argmax)
argmin = _decore_reduce_func(np.argmin)
cumsum = _decore_reduce_func(np.cumsum)
cumprod = _decore_reduce_func(np.cumprod)
def squeeze(self, axis=None):
"""remove single-dimensional entries from the shape of an array.
Args:
a : array_like
Input data.
axis : None or axis labels
Selects a subset of the single-dimensional entries in the
shape. If an axis is selected with shape entry greater than
one, an error is raised.
Returns:
squeezed : axarray
"""
if axis is None:
shape = self.shape
axes = [ax for i,ax in enumerate(self.axes) if shape[i]>1]
elif hasattr(axis, "__iter__"):
axes = [ax for ax in self.axes if ax not in axis]
else:
axes = [ax for ax in self.axes if ax != axis]
return axarray( self.A.squeeze(), axes)
@__lop__
def __add__(x, y):
return x+y
@__lop__
def __sub__(x, y):
return x-y
@__lop__
def __mul__(x, y):
return x*y
@__lop__
def __floordiv__(x, y):
return x//y
@__lop__
def __mod__(x, y):
return x%y
@__lop__
def __divmod__(x, y):
return divmod(x,y)
@__lop__
def __pow__(x, y ):
return pow(x,y)
@__lop__
def __lshift__(x, y):
return x<<y
@__lop__
def __rshift__(x, y):
return x>>y
@__lop__
def __and__(x, y):
return x&y
@__lop__
def __xor__(x, y):
return x^y
@__lop__
def __or__(x, y):
return x|y
@__rop__
def __radd__(x, y):
return x+y
@__rop__
def __rsub__(x, y):
return x-y
@__rop__
def __rmul__(x, y):
return x*y
@__rop__
def __rdiv__(x, y):
return x/y
@__rop__
def __rtruediv__(x, y):
return x/y
@__rop__
def __rfloordiv__(x, y):
return x/y
@__rop__
def __rmod__(x, y):
return x%y
@__rop__
def __rdivmod__(x, y):
return divmod(x,y)
@__rop__
def __rpow__(x, y ):
return pow(x,y)
@__rop__
def __rlshift__(x, y):
return x<<y
@__rop__
def __rrshift__(x, y):
return x>>y
@__rop__
def __rand__(x, y):
return x&y
@__rop__
def __rxor__(x, y):
return x^y
@__rop__
def __ror__(x, y):
return x|y
@__uop__
def __neg__(x):
return -x
@__uop__
def __pos__(x):
return +x
@__uop__
def __abs__(x):
return abs(x)
@__uop__
def __invert__(x):
return ~x
@__uop__
def __complex__(x):
return complex(x)
@__uop__
def __int__(x):
return int(x)
@__uop__
def __long__(x):
return long(x)
@__uop__
def __float__(x):
return float(x)
@__uop__
def __index__(x):
return x.__index__()
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals, print_function
import os
import re
import json
import warnings
from io import open
from enum import Enum
from pymatgen.core.units import Mass, Length, unitized, FloatWithUnit, Unit, \
SUPPORTED_UNIT_NAMES
from pymatgen.util.string import formula_double_format
from monty.json import MSONable
"""
Module contains classes presenting Element and Specie (Element + oxidation
state) and PeriodicTable.
"""
__author__ = "Shyue Ping Ong, Michael Kocher"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "2.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "Sep 23, 2011"
# Loads element data from json file
with open(os.path.join(os.path.dirname(__file__),
"periodic_table.json"), "rt") as f:
_pt_data = json.load(f)
_pt_row_sizes = (2, 8, 8, 18, 18, 32, 32)
class Element(Enum):
"""
Basic immutable element object with all relevant properties.
Only one instance of Element for each symbol is stored after creation,
ensuring that a particular element behaves like a singleton. For all
attributes, missing data (i.e., data for which is not available) is
represented by a None unless otherwise stated.
Args:
symbol (str): Element symbol, e.g., "H", "Fe"
.. attribute:: Z
Atomic number
.. attribute:: symbol
Element symbol
.. attribute:: X
Pauling electronegativity. Elements without an electronegativity
number are assigned a value of zero by default.
.. attribute:: number
Alternative attribute for atomic number
.. attribute:: max_oxidation_state
Maximum oxidation state for element
.. attribute:: min_oxidation_state
Minimum oxidation state for element
.. attribute:: oxidation_states
Tuple of all known oxidation states
.. attribute:: common_oxidation_states
Tuple of all common oxidation states
.. attribute:: full_electronic_structure
Full electronic structure as tuple.
E.g., The electronic structure for Fe is represented as:
[(1, "s", 2), (2, "s", 2), (2, "p", 6), (3, "s", 2), (3, "p", 6),
(3, "d", 6), (4, "s", 2)]
.. attribute:: row
Returns the periodic table row of the element.
.. attribute:: group
Returns the periodic table group of the element.
.. attribute:: block
Return the block character "s,p,d,f"
.. attribute:: is_noble_gas
True if element is noble gas.
.. attribute:: is_transition_metal
True if element is a transition metal.
.. attribute:: is_rare_earth_metal
True if element is a rare earth metal.
.. attribute:: is_metalloid
True if element is a metalloid.
.. attribute:: is_alkali
True if element is an alkali metal.
.. attribute:: is_alkaline
True if element is an alkaline earth metal (group II).
.. attribute:: is_halogen
True if element is a halogen.
.. attribute:: is_lanthanoid
True if element is a lanthanoid.
.. attribute:: is_actinoid
True if element is a actinoid.
.. attribute:: long_name
Long name for element. E.g., "Hydrogen".
.. attribute:: atomic_mass
Atomic mass for the element.
.. attribute:: atomic_radius
Atomic radius for the element. This is the empirical value. Data is
obtained from
http://en.wikipedia.org/wiki/Atomic_radii_of_the_elements_(data_page).
.. attribute:: atomic_radius_calculated
Calculated atomic radius for the element. This is the empirical value.
Data is obtained from
http://en.wikipedia.org/wiki/Atomic_radii_of_the_elements_(data_page).
.. attribute:: van_der_waals_radius
Van der Waals radius for the element. This is the empirical
value. Data is obtained from
http://en.wikipedia.org/wiki/Atomic_radii_of_the_elements_(data_page).
.. attribute:: mendeleev_no
Mendeleev number
.. attribute:: electrical_resistivity
Electrical resistivity
.. attribute:: velocity_of_sound
Velocity of sound
.. attribute:: reflectivity
Reflectivity
.. attribute:: refractive_index
Refractice index
.. attribute:: poissons_ratio
Poisson's ratio
.. attribute:: molar_volume
Molar volume
.. attribute:: electronic_structure
Electronic structure. Simplified form with HTML formatting.
E.g., The electronic structure for Fe is represented as
[Ar].3d<sup>6</sup>.4s<sup>2</sup>
.. attribute:: atomic_orbitals
Atomic Orbitals. Energy of the atomic orbitals as a dict.
E.g., The orbitals energies in eV are represented as
{'1s': -1.0, '2s': -0.1}
Data is obtained from
https://www.nist.gov/pml/data/atomic-reference-data-electronic-structure-calculations
The LDA values for neutral atoms are used
.. attribute:: thermal_conductivity
Thermal conductivity
.. attribute:: boiling_point
Boiling point
.. attribute:: melting_point
Melting point
.. attribute:: critical_temperature
Critical temperature
.. attribute:: superconduction_temperature
Superconduction temperature
.. attribute:: liquid_range
Liquid range
.. attribute:: bulk_modulus
Bulk modulus
.. attribute:: youngs_modulus
Young's modulus
.. attribute:: brinell_hardness
Brinell hardness
.. attribute:: rigidity_modulus
Rigidity modulus
.. attribute:: mineral_hardness
Mineral hardness
.. attribute:: vickers_hardness
Vicker's hardness
.. attribute:: density_of_solid
Density of solid phase
.. attribute:: coefficient_of_linear_thermal_expansion
Coefficient of linear thermal expansion
.. attribute:: average_ionic_radius
Average ionic radius for element in ang. The average is taken over all
oxidation states of the element for which data is present.
.. attribute:: ionic_radii
All ionic radii of the element as a dict of
{oxidation state: ionic radii}. Radii are given in ang.
"""
# This name = value convention is redundant and dumb, but unfortunately is
# necessary to preserve backwards compatibility with a time when Element is
# a regular object that is constructed with Element(symbol).
H = "H"
He = "He"
Li = "Li"
Be = "Be"
B = "B"
C = "C"
N = "N"
O = "O"
F = "F"
Ne = "Ne"
Na = "Na"
Mg = "Mg"
Al = "Al"
Si = "Si"
P = "P"
S = "S"
Cl = "Cl"
Ar = "Ar"
K = "K"
Ca = "Ca"
Sc = "Sc"
Ti = "Ti"
V = "V"
Cr = "Cr"
Mn = "Mn"
Fe = "Fe"
Co = "Co"
Ni = "Ni"
Cu = "Cu"
Zn = "Zn"
Ga = "Ga"
Ge = "Ge"
As = "As"
Se = "Se"
Br = "Br"
Kr = "Kr"
Rb = "Rb"
Sr = "Sr"
Y = "Y"
Zr = "Zr"
Nb = "Nb"
Mo = "Mo"
Tc = "Tc"
Ru = "Ru"
Rh = "Rh"
Pd = "Pd"
Ag = "Ag"
Cd = "Cd"
In = "In"
Sn = "Sn"
Sb = "Sb"
Te = "Te"
I = "I"
Xe = "Xe"
Cs = "Cs"
Ba = "Ba"
La = "La"
Ce = "Ce"
Pr = "Pr"
Nd = "Nd"
Pm = "Pm"
Sm = "Sm"
Eu = "Eu"
Gd = "Gd"
Tb = "Tb"
Dy = "Dy"
Ho = "Ho"
Er = "Er"
Tm = "Tm"
Yb = "Yb"
Lu = "Lu"
Hf = "Hf"
Ta = "Ta"
W = "W"
Re = "Re"
Os = "Os"
Ir = "Ir"
Pt = "Pt"
Au = "Au"
Hg = "Hg"
Tl = "Tl"
Pb = "Pb"
Bi = "Bi"
Po = "Po"
At = "At"
Rn = "Rn"
Fr = "Fr"
Ra = "Ra"
Ac = "Ac"
Th = "Th"
Pa = "Pa"
U = "U"
Np = "Np"
Pu = "Pu"
Am = "Am"
Cm = "Cm"
Bk = "Bk"
Cf = "Cf"
Es = "Es"
Fm = "Fm"
Md = "Md"
No = "No"
Lr = "Lr"
def __init__(self, symbol):
self.symbol = "%s" % symbol
d = _pt_data[symbol]
# Store key variables for quick access
self.Z = d["Atomic no"]
at_r = d.get("Atomic radius", "no data")
if str(at_r).startswith("no data"):
self.atomic_radius = None
else:
self.atomic_radius = Length(at_r, "ang")
self.atomic_mass = Mass(d["Atomic mass"], "amu")
self.long_name = d["Name"]
self._data = d
@property
def X(self):
if "X" in self._data:
return self._data["X"]
else:
warnings.warn("No electronegativity for %s. Setting to infinity. "
"This has no physical meaning, and is mainly done to "
"avoid errors caused by the code expecting a float."
% self.symbol)
return float("inf")
def __getattr__(self, item):
if item in ["mendeleev_no", "electrical_resistivity",
"velocity_of_sound", "reflectivity",
"refractive_index", "poissons_ratio", "molar_volume",
"electronic_structure", "thermal_conductivity",
"boiling_point", "melting_point",
"critical_temperature", "superconduction_temperature",
"liquid_range", "bulk_modulus", "youngs_modulus",
"brinell_hardness", "rigidity_modulus",
"mineral_hardness", "vickers_hardness",
"density_of_solid", "atomic_radius_calculated",
"van_der_waals_radius", "atomic_orbitals",
"coefficient_of_linear_thermal_expansion"]:
kstr = item.capitalize().replace("_", " ")
val = self._data.get(kstr, None)
if str(val).startswith("no data"):
val = None
elif type(val) == dict:
pass
else:
try:
val = float(val)
except ValueError:
nobracket = re.sub(r'\(.*\)', "", val)
toks = nobracket.replace("about", "").strip().split(" ", 1)
if len(toks) == 2:
try:
if "10<sup>" in toks[1]:
base_power = re.findall(r'([+-]?\d+)', toks[1])
factor = "e" + base_power[1]
if toks[0] in [">", "high"]:
toks[0] = "1" # return the border value
toks[0] += factor
if item == "electrical_resistivity":
unit = "ohm m"
elif (
item ==
"coefficient_of_linear_thermal_expansion"
):
unit = "K^-1"
else:
unit = toks[1]
val = FloatWithUnit(toks[0], unit)
else:
unit = toks[1].replace("<sup>", "^").replace(
"</sup>", "").replace("Ω",
"ohm")
units = Unit(unit)
if set(units.keys()).issubset(
SUPPORTED_UNIT_NAMES):
val = FloatWithUnit(toks[0], unit)
except ValueError as ex:
# Ignore error. val will just remain a string.
pass
return val
raise AttributeError
@property
def data(self):
"""
Returns dict of data for element.
"""
return self._data.copy()
@property
@unitized("ang")
def average_ionic_radius(self):
"""
Average ionic radius for element (with units). The average is taken
over all oxidation states of the element for which data is present.
"""
if "Ionic radii" in self._data:
radii = self._data["Ionic radii"]
return sum(radii.values()) / len(radii)
else:
return 0
@property
@unitized("ang")
def ionic_radii(self):
"""
All ionic radii of the element as a dict of
{oxidation state: ionic radii}. Radii are given in ang.
"""
if "Ionic radii" in self._data:
return {int(k): v for k, v in self._data["Ionic radii"].items()}
else:
return {}
@property
def number(self):
"""Alternative attribute for atomic number"""
return self.Z
@property
def max_oxidation_state(self):
"""Maximum oxidation state for element"""
if "Oxidation states" in self._data:
return max(self._data["Oxidation states"])
return 0
@property
def min_oxidation_state(self):
"""Minimum oxidation state for element"""
if "Oxidation states" in self._data:
return min(self._data["Oxidation states"])
return 0
@property
def oxidation_states(self):
"""Tuple of all known oxidation states"""
return tuple(self._data.get("Oxidation states", list()))
@property
def common_oxidation_states(self):
"""Tuple of all known oxidation states"""
return tuple(self._data.get("Common oxidation states", list()))
@property
def icsd_oxidation_states(self):
"""Tuple of all oxidation states with at least 10 instances in
ICSD database AND at least 1% of entries for that element"""
return tuple(self._data.get("ICSD oxidation states", list()))
@property
def full_electronic_structure(self):
"""
Full electronic structure as tuple.
E.g., The electronic structure for Fe is represented as:
[(1, "s", 2), (2, "s", 2), (2, "p", 6), (3, "s", 2), (3, "p", 6),
(3, "d", 6), (4, "s", 2)]
"""
estr = self._data["Electronic structure"]
def parse_orbital(orbstr):
m = re.match(r"(\d+)([spdfg]+)<sup>(\d+)</sup>", orbstr)
if m:
return int(m.group(1)), m.group(2), int(m.group(3))
return orbstr
data = [parse_orbital(s) for s in estr.split(".")]
if data[0][0] == "[":
sym = data[0].replace("[", "").replace("]", "")
data = Element(sym).full_electronic_structure + data[1:]
return data
def __eq__(self, other):
return isinstance(other, Element) and self.Z == other.Z
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return self.Z
def __repr__(self):
return "Element " + self.symbol
def __str__(self):
return self.symbol
def __lt__(self, other):
"""
Sets a default sort order for atomic species by electronegativity. Very
useful for getting correct formulas. For example, FeO4PLi is
automatically sorted into LiFePO4.
"""
if self.X != other.X:
return self.X < other.X
else:
# There are cases where the electronegativity are exactly equal.
# We then sort by symbol.
return self.symbol < other.symbol
@staticmethod
def from_Z(z):
"""
Get an element from an atomic number.
Args:
z (int): Atomic number
Returns:
Element with atomic number z.
"""
for sym, data in _pt_data.items():
if data["Atomic no"] == z:
return Element(sym)
raise ValueError("No element with this atomic number %s" % z)
@staticmethod
def from_row_and_group(row, group):
"""
Returns an element from a row and group number.
Args:
row (int): Row number
group (int): Group number
.. note::
The 18 group number system is used, i.e., Noble gases are group 18.
"""
for sym in _pt_data.keys():
el = Element(sym)
if el.row == row and el.group == group:
return el
raise ValueError("No element with this row and group!")
@staticmethod
def is_valid_symbol(symbol):
"""
Returns true if symbol is a valid element symbol.
Args:
symbol (str): Element symbol
Returns:
True if symbol is a valid element (e.g., "H"). False otherwise
(e.g., "Zebra").
"""
try:
Element(symbol)
return True
except:
return False
@property
def row(self):
"""
Returns the periodic table row of the element.
"""
z = self.Z
total = 0
if 57 <= z <= 71:
return 8
elif 89 <= z <= 103:
return 9
for i in range(len(_pt_row_sizes)):
total += _pt_row_sizes[i]
if total >= z:
return i + 1
return 8
@property
def group(self):
"""
Returns the periodic table group of the element.
"""
z = self.Z
if z == 1:
return 1
if z == 2:
return 18
if 3 <= z <= 18:
if (z - 2) % 8 == 0:
return 18
elif (z - 2) % 8 <= 2:
return (z - 2) % 8
else:
return 10 + (z - 2) % 8
if 19 <= z <= 54:
if (z - 18) % 18 == 0:
return 18
else:
return (z - 18) % 18
if (z - 54) % 32 == 0:
return 18
elif (z - 54) % 32 >= 18:
return (z - 54) % 32 - 14
else:
return (z - 54) % 32
@property
def block(self):
"""
Return the block character "s,p,d,f"
"""
block = ""
if (self.is_actinoid or self.is_lanthanoid) and \
self.Z not in [71, 103]:
block = "f"
elif self.is_actinoid or self.is_lanthanoid:
block = "d"
elif self.group in [1, 2]:
block = "s"
elif self.group in range(13, 19):
block = "p"
elif self.group in range(3, 13):
block = "d"
else:
raise ValueError("unable to determine block")
return block
@property
def is_noble_gas(self):
"""
True if element is noble gas.
"""
return self.Z in (2, 10, 18, 36, 54, 86, 118)
@property
def is_transition_metal(self):
"""
True if element is a transition metal.
"""
ns = list(range(21, 31))
ns.extend(list(range(39, 49)))
ns.append(57)
ns.extend(list(range(72, 81)))
ns.append(89)
ns.extend(list(range(104, 113)))
return self.Z in ns
@property
def is_rare_earth_metal(self):
"""
True if element is a rare earth metal.
"""
return self.is_lanthanoid or self.is_actinoid
@property
def is_metalloid(self):
"""
True if element is a metalloid.
"""
return self.symbol in ("B", "Si", "Ge", "As", "Sb", "Te", "Po")
@property
def is_alkali(self):
"""
True if element is an alkali metal.
"""
return self.Z in (3, 11, 19, 37, 55, 87)
@property
def is_alkaline(self):
"""
True if element is an alkaline earth metal (group II).
"""
return self.Z in (4, 12, 20, 38, 56, 88)
@property
def is_halogen(self):
"""
True if element is a halogen.
"""
return self.Z in (9, 17, 35, 53, 85)
@property
def is_chalcogen(self):
"""
True if element is a chalcogen.
"""
return self.Z in (8, 16, 34, 52, 84)
@property
def is_lanthanoid(self):
"""
True if element is a lanthanoid.
"""
return 56 < self.Z < 72
@property
def is_actinoid(self):
"""
True if element is a actinoid.
"""
return 88 < self.Z < 104
def __deepcopy__(self, memo):
return Element(self.symbol)
@staticmethod
def from_dict(d):
"""
Makes Element obey the general json interface used in pymatgen for
easier serialization.
"""
return Element(d["element"])
def as_dict(self):
"""
Makes Element obey the general json interface used in pymatgen for
easier serialization.
"""
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"element": self.symbol}
@staticmethod
def print_periodic_table(filter_function=None):
"""
A pretty ASCII printer for the periodic table, based on some
filter_function.
Args:
filter_function: A filtering function taking an Element as input
and returning a boolean. For example, setting
filter_function = lambda el: el.X > 2 will print a periodic
table containing only elements with electronegativity > 2.
"""
for row in range(1, 10):
rowstr = []
for group in range(1, 19):
try:
el = Element.from_row_and_group(row, group)
except ValueError:
el = None
if el and ((not filter_function) or filter_function(el)):
rowstr.append("{:3s}".format(el.symbol))
else:
rowstr.append(" ")
print(" ".join(rowstr))
class Specie(MSONable):
"""
An extension of Element with an oxidation state and other optional
properties. Properties associated with Specie should be "idealized"
values, not calculated values. For example, high-spin Fe2+ may be
assigned an idealized spin of +5, but an actual Fe2+ site may be
calculated to have a magmom of +4.5. Calculated properties should be
assigned to Site objects, and not Specie.
Args:
symbol (str): Element symbol, e.g., Fe
oxidation_state (float): Oxidation state of element, e.g., 2 or -2
properties: Properties associated with the Specie, e.g.,
{"spin": 5}. Defaults to None. Properties must be one of the
Specie supported_properties.
.. attribute:: oxi_state
Oxidation state associated with Specie
.. attribute:: ionic_radius
Ionic radius of Specie (with specific oxidation state).
.. versionchanged:: 2.6.7
Properties are now checked when comparing two Species for equality.
"""
cache = {}
def __new__(cls, *args, **kwargs):
key = (cls,) + args + tuple(kwargs.items())
try:
inst = Specie.cache.get(key, None)
except TypeError:
# Can't cache this set of arguments
inst = key = None
if inst is None:
inst = object.__new__(cls)
if key is not None:
Specie.cache[key] = inst
return inst
supported_properties = ("spin",)
def __init__(self, symbol, oxidation_state=None, properties=None):
self._el = Element(symbol)
self._oxi_state = oxidation_state
self._properties = properties if properties else {}
for k in self._properties.keys():
if k not in Specie.supported_properties:
raise ValueError("{} is not a supported property".format(k))
def __getattr__(self, a):
# overriding getattr doesn't play nice with pickle, so we
# can't use self._properties
p = object.__getattribute__(self, '_properties')
if a in p:
return p[a]
try:
return getattr(self._el, a)
except:
raise AttributeError(a)
def __eq__(self, other):
"""
Specie is equal to other only if element and oxidation states are
exactly the same.
"""
return isinstance(other, Specie) and self.symbol == other.symbol \
and self.oxi_state == other.oxi_state \
and self._properties == other._properties
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
"""
Equal Specie should have the same str representation, hence
should hash equally. Unequal Specie will have differnt str
representations.
"""
return self.__str__().__hash__()
def __lt__(self, other):
"""
Sets a default sort order for atomic species by electronegativity,
followed by oxidation state, followed by spin.
"""
if self.X != other.X:
return self.X < other.X
elif self.symbol != other.symbol:
# There are cases where the electronegativity are exactly equal.
# We then sort by symbol.
return self.symbol < other.symbol
elif self.oxi_state:
other_oxi = 0 if (isinstance(other, Element)
or other.oxi_state is None) else other.oxi_state
return self.oxi_state < other_oxi
elif getattr(self, "spin", False):
other_spin = getattr(other, "spin", 0)
return self.spin < other.spin
else:
return False
@property
def element(self):
"""
Underlying element object
"""
return self._el
@property
def ionic_radius(self):
"""
Ionic radius of specie. Returns None if data is not present.
"""
if self._oxi_state in self.ionic_radii:
return self.ionic_radii[self._oxi_state]
d = self._el.data
oxstr = str(int(self._oxi_state))
if oxstr in d.get("Ionic radii hs", {}):
warnings.warn("No default ionic radius for %s. Using hs data." %
self)
return d["Ionic radii hs"][oxstr]
elif oxstr in d.get("Ionic radii ls", {}):
warnings.warn("No default ionic radius for %s. Using ls data." %
self)
return d["Ionic radii ls"][oxstr]
warnings.warn("No ionic radius for {}!".format(self))
return None
@property
def oxi_state(self):
"""
Oxidation state of Specie.
"""
return self._oxi_state
@staticmethod
def from_string(species_string):
"""
Returns a Specie from a string representation.
Args:
species_string (str): A typical string representation of a
species, e.g., "Mn2+", "Fe3+", "O2-".
Returns:
A Specie object.
Raises:
ValueError if species_string cannot be intepreted.
"""
m = re.search(r"([A-Z][a-z]*)([0-9\.]*)([\+\-])(.*)", species_string)
if m:
sym = m.group(1)
oxi = 1 if m.group(2) == "" else float(m.group(2))
oxi = -oxi if m.group(3) == "-" else oxi
properties = None
if m.group(4):
toks = m.group(4).replace(",","").split("=")
properties = {toks[0]: float(toks[1])}
return Specie(sym, oxi, properties)
else:
raise ValueError("Invalid Species String")
def __repr__(self):
return "Specie " + self.__str__()
def __str__(self):
output = self.symbol
if self.oxi_state is not None:
if self.oxi_state >= 0:
output += formula_double_format(self.oxi_state) + "+"
else:
output += formula_double_format(-self.oxi_state) + "-"
for p, v in self._properties.items():
output += ",%s=%s" % (p, v)
return output
def get_shannon_radius(self, cn, spin="", radius_type="ionic"):
"""
Get the local environment specific ionic radius for species.
Args:
cn (str): Coordination using roman letters. Supported values are
I-IX, as well as IIIPY, IVPY and IVSQ.
spin (str): Some species have different radii for different
spins. You can get specific values using "High Spin" or
"Low Spin". Leave it as "" if not available. If only one spin
data is available, it is returned and this spin parameter is
ignored.
radius_type (str): Either "crystal" or "ionic" (default).
Returns:
Shannon radius for specie in the specified environment.
"""
radii = self._el.data["Shannon radii"]
# if cn == 1:
# cn_str = "I"
# elif cn == 2:
# cn_str = "II"
# elif cn == 3:
# cn_str = "III"
# elif cn == 4:
# cn_str = "IV"
# elif cn == 5:
# cn_str = "V"
# elif cn == 6:
# cn_str = "VI"
# elif cn == 7:
# cn_str = "VII"
# elif cn == 8:
# cn_str = "VIII"
# elif cn == 9:
# cn_str = "IX"
# else:
# raise ValueError("Invalid coordination number")
if len(radii[str(self._oxi_state)][cn]) == 1:
k, data = list(radii[str(self._oxi_state)][cn].items())[0]
if k != spin:
warnings.warn(
"Specified spin state of %s not consistent with database "
"spin of %s. Because there is only one spin data available, "
"that value is returned." % (spin, k)
)
else:
data = radii[str(self._oxi_state)][cn][spin]
return data["%s_radius" % radius_type]
def get_crystal_field_spin(self, coordination="oct", spin_config="high"):
"""
Calculate the crystal field spin based on coordination and spin
configuration. Only works for transition metal species.
Args:
coordination (str): Only oct and tet are supported at the moment.
spin_config (str): Supported keywords are "high" or "low".
Returns:
Crystal field spin in Bohr magneton.
Raises:
AttributeError if species is not a valid transition metal or has
an invalid oxidation state.
ValueError if invalid coordination or spin_config.
"""
if coordination not in ("oct", "tet") or \
spin_config not in ("high", "low"):
raise ValueError("Invalid coordination or spin config.")
elec = self.full_electronic_structure
if len(elec) < 4 or elec[-1][1] != "s" or elec[-2][1] != "d":
raise AttributeError(
"Invalid element {} for crystal field calculation.".format(
self.symbol))
nelectrons = elec[-1][2] + elec[-2][2] - self.oxi_state
if nelectrons < 0 or nelectrons > 10:
raise AttributeError(
"Invalid oxidation state {} for element {}"
.format(self.oxi_state, self.symbol))
if spin_config == "high":
return nelectrons if nelectrons <= 5 else 10 - nelectrons
elif spin_config == "low":
if coordination == "oct":
if nelectrons <= 3:
return nelectrons
elif nelectrons <= 6:
return 6 - nelectrons
elif nelectrons <= 8:
return nelectrons - 6
else:
return 10 - nelectrons
elif coordination == "tet":
if nelectrons <= 2:
return nelectrons
elif nelectrons <= 4:
return 4 - nelectrons
elif nelectrons <= 7:
return nelectrons - 4
else:
return 10 - nelectrons
def __deepcopy__(self, memo):
return Specie(self.symbol, self.oxi_state, self._properties)
def as_dict(self):
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"element": self.symbol,
"oxidation_state": self._oxi_state}
if self._properties:
d["properties"] = self._properties
return d
@classmethod
def from_dict(cls, d):
return cls(d["element"], d["oxidation_state"],
d.get("properties", None))
class DummySpecie(Specie):
"""
A special specie for representing non-traditional elements or species. For
example, representation of vacancies (charged or otherwise), or special
sites, etc.
Args:
symbol (str): An assigned symbol for the dummy specie. Strict
rules are applied to the choice of the symbol. The dummy
symbol cannot have any part of first two letters that will
constitute an Element symbol. Otherwise, a composition may
be parsed wrongly. E.g., "X" is fine, but "Vac" is not
because Vac contains V, a valid Element.
oxidation_state (float): Oxidation state for dummy specie.
Defaults to zero.
.. attribute:: symbol
Symbol for the DummySpecie.
.. attribute:: oxi_state
Oxidation state associated with Specie.
.. attribute:: Z
DummySpecie is always assigned an atomic number equal to the hash
number of the symbol. Obviously, it makes no sense whatsoever to use
the atomic number of a Dummy specie for anything scientific. The purpose
of this is to ensure that for most use cases, a DummySpecie behaves no
differently from an Element or Specie.
.. attribute:: X
DummySpecie is always assigned an electronegativity of 0.
"""
def __init__(self, symbol="X", oxidation_state=0, properties=None):
for i in range(1, min(2, len(symbol)) + 1):
if Element.is_valid_symbol(symbol[:i]):
raise ValueError("{} contains {}, which is a valid element "
"symbol.".format(symbol, symbol[:i]))
# Set required attributes for DummySpecie to function like a Specie in
# most instances.
self._symbol = symbol
self._oxi_state = oxidation_state
self._properties = properties if properties else {}
for k in self._properties.keys():
if k not in Specie.supported_properties:
raise ValueError("{} is not a supported property".format(k))
def __getattr__(self, a):
# overriding getattr doens't play nice with pickle, so we
# can't use self._properties
p = object.__getattribute__(self, '_properties')
if a in p:
return p[a]
raise AttributeError(a)
def __hash__(self):
return self.symbol.__hash__()
def __eq__(self, other):
"""
Specie is equal to other only if element and oxidation states are
exactly the same.
"""
if not isinstance(other, DummySpecie):
return False
return isinstance(other, Specie) and self.symbol == other.symbol \
and self.oxi_state == other.oxi_state \
and self._properties == other._properties
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
"""
Sets a default sort order for atomic species by electronegativity,
followed by oxidation state.
"""
if self.X != other.X:
return self.X < other.X
elif self.symbol != other.symbol:
# There are cases where the electronegativity are exactly equal.
# We then sort by symbol.
return self.symbol < other.symbol
else:
other_oxi = 0 if isinstance(other, Element) else other.oxi_state
return self.oxi_state < other_oxi
@property
def Z(self):
"""
DummySpecie is always assigned an atomic number equal to the hash of
the symbol. The expectation is that someone would be an actual dummy
to use atomic numbers for a Dummy specie.
"""
return self.symbol.__hash__()
@property
def oxi_state(self):
"""
Oxidation state associated with DummySpecie
"""
return self._oxi_state
@property
def X(self):
"""
DummySpecie is always assigned an electronegativity of 0. The effect of
this is that DummySpecie are always sorted in front of actual Specie.
"""
return 0
@property
def symbol(self):
return self._symbol
def __deepcopy__(self, memo):
return DummySpecie(self.symbol, self._oxi_state)
@staticmethod
def from_string(species_string):
"""
Returns a Dummy from a string representation.
Args:
species_string (str): A string representation of a dummy
species, e.g., "X2+", "X3+".
Returns:
A DummySpecie object.
Raises:
ValueError if species_string cannot be intepreted.
"""
m = re.search(r"([A-Z][a-z]*)([0-9.]*)([+\-]*)(.*)", species_string)
if m:
sym = m.group(1)
if m.group(2) == "" and m.group(3) == "":
oxi = 0
else:
oxi = 1 if m.group(2) == "" else float(m.group(2))
oxi = -oxi if m.group(3) == "-" else oxi
properties = None
if m.group(4):
toks = m.group(4).split("=")
properties = {toks[0]: float(toks[1])}
return DummySpecie(sym, oxi, properties)
raise ValueError("Invalid DummySpecies String")
@classmethod
def safe_from_composition(cls, comp, oxidation_state=0):
"""
Returns a DummySpecie object that can be safely used
with (i.e. not present in) a given composition
"""
# We don't want to add a DummySpecie with the same
# symbol as anything in the composition, even if the
# oxidation state is different
els = comp.element_composition.elements
for c in 'abcdfghijklmnopqrstuvwxyz':
if DummySpecie('X' + c) not in els:
return DummySpecie('X' + c, oxidation_state)
raise ValueError("All attempted DummySpecies already "
"present in {}".format(comp))
def as_dict(self):
d = {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"element": self.symbol,
"oxidation_state": self._oxi_state}
if self._properties:
d["properties"] = self._properties
return d
@classmethod
def from_dict(cls, d):
return cls(d["element"], d["oxidation_state"],
d.get("properties", None))
def __repr__(self):
return "DummySpecie " + self.__str__()
def __str__(self):
output = self.symbol
if self.oxi_state is not None:
if self.oxi_state >= 0:
output += formula_double_format(self.oxi_state) + "+"
else:
output += formula_double_format(-self.oxi_state) + "-"
for p, v in self._properties.items():
output += ",%s=%s" % (p, v)
return output
def get_el_sp(obj):
"""
Utility method to get an Element or Specie from an input obj.
If obj is in itself an element or a specie, it is returned automatically.
If obj is an int or a string representing an integer, the Element
with the atomic number obj is returned.
If obj is a string, Specie parsing will be attempted (e.g., Mn2+), failing
which Element parsing will be attempted (e.g., Mn), failing which
DummyElement parsing will be attempted.
Args:
obj (Element/Specie/str/int): An arbitrary object. Supported objects
are actual Element/Specie objects, integers (representing atomic
numbers) or strings (element symbols or species strings).
Returns:
Specie or Element, with a bias for the maximum number of properties
that can be determined.
Raises:
ValueError if obj cannot be converted into an Element or Specie.
"""
if isinstance(obj, (Element, Specie, DummySpecie)):
return obj
if isinstance(obj, (list, tuple)):
return [get_el_sp(o) for o in obj]
try:
c = float(obj)
i = int(c)
i = i if i == c else None
except (ValueError, TypeError):
i = None
if i is not None:
return Element.from_Z(i)
try:
return Specie.from_string(obj)
except (ValueError, KeyError):
try:
return Element(obj)
except (ValueError, KeyError):
try:
return DummySpecie.from_string(obj)
except:
raise ValueError("Can't parse Element or String from type"
" %s: %s." % (type(obj), obj))
|
|
from __future__ import absolute_import
from collections import deque
import contextlib
import errno
import locale
# we have a submodule named 'logging' which would shadow this if we used the
# regular name:
import logging as std_logging
import re
import os
import posixpath
import shutil
import stat
import subprocess
import sys
import tarfile
import zipfile
from pip.exceptions import InstallationError
from pip.compat import console_to_str, expanduser, stdlib_pkgs
from pip.locations import (
site_packages, user_site, running_under_virtualenv, virtualenv_no_global,
write_delete_marker_file,
)
from pip._vendor import pkg_resources
from pip._vendor.six.moves import input
from pip._vendor.six import PY2
from pip._vendor.retrying import retry
if PY2:
from io import BytesIO as StringIO
else:
from io import StringIO
__all__ = ['rmtree', 'display_path', 'backup_dir',
'ask', 'splitext',
'format_size', 'is_installable_dir',
'is_svn_page', 'file_contents',
'split_leading_dir', 'has_leading_dir',
'normalize_path',
'renames', 'get_terminal_size', 'get_prog',
'unzip_file', 'untar_file', 'unpack_file', 'call_subprocess',
'captured_stdout', 'remove_tracebacks', 'ensure_dir',
'ARCHIVE_EXTENSIONS', 'SUPPORTED_EXTENSIONS',
'get_installed_version']
logger = std_logging.getLogger(__name__)
BZ2_EXTENSIONS = ('.tar.bz2', '.tbz')
XZ_EXTENSIONS = ('.tar.xz', '.txz', '.tlz', '.tar.lz', '.tar.lzma')
ZIP_EXTENSIONS = ('.zip', '.whl')
TAR_EXTENSIONS = ('.tar.gz', '.tgz', '.tar')
ARCHIVE_EXTENSIONS = (
ZIP_EXTENSIONS + BZ2_EXTENSIONS + TAR_EXTENSIONS + XZ_EXTENSIONS)
SUPPORTED_EXTENSIONS = ZIP_EXTENSIONS + TAR_EXTENSIONS
try:
import bz2 # noqa
SUPPORTED_EXTENSIONS += BZ2_EXTENSIONS
except ImportError:
logger.debug('bz2 module is not available')
try:
# Only for Python 3.3+
import lzma # noqa
SUPPORTED_EXTENSIONS += XZ_EXTENSIONS
except ImportError:
logger.debug('lzma module is not available')
def import_or_raise(pkg_or_module_string, ExceptionType, *args, **kwargs):
try:
return __import__(pkg_or_module_string)
except ImportError:
raise ExceptionType(*args, **kwargs)
def ensure_dir(path):
"""os.path.makedirs without EEXIST."""
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def get_prog():
try:
if os.path.basename(sys.argv[0]) in ('__main__.py', '-c'):
return "%s -m pip" % sys.executable
except (AttributeError, TypeError, IndexError):
pass
return 'pip'
# Retry every half second for up to 3 seconds
@retry(stop_max_delay=3000, wait_fixed=500)
def rmtree(dir, ignore_errors=False):
shutil.rmtree(dir, ignore_errors=ignore_errors,
onerror=rmtree_errorhandler)
def rmtree_errorhandler(func, path, exc_info):
"""On Windows, the files in .svn are read-only, so when rmtree() tries to
remove them, an exception is thrown. We catch that here, remove the
read-only attribute, and hopefully continue without problems."""
# if file type currently read only
if os.stat(path).st_mode & stat.S_IREAD:
# convert to read/write
os.chmod(path, stat.S_IWRITE)
# use the original function to repeat the operation
func(path)
return
else:
raise
def display_path(path):
"""Gives the display value for a given path, making it relative to cwd
if possible."""
path = os.path.normcase(os.path.abspath(path))
if sys.version_info[0] == 2:
path = path.decode(sys.getfilesystemencoding(), 'replace')
path = path.encode(sys.getdefaultencoding(), 'replace')
if path.startswith(os.getcwd() + os.path.sep):
path = '.' + path[len(os.getcwd()):]
return path
def backup_dir(dir, ext='.bak'):
"""Figure out the name of a directory to back up the given dir to
(adding .bak, .bak2, etc)"""
n = 1
extension = ext
while os.path.exists(dir + extension):
n += 1
extension = ext + str(n)
return dir + extension
def ask_path_exists(message, options):
for action in os.environ.get('PIP_EXISTS_ACTION', '').split():
if action in options:
return action
return ask(message, options)
def ask(message, options):
"""Ask the message interactively, with the given possible responses"""
while 1:
if os.environ.get('PIP_NO_INPUT'):
raise Exception(
'No input was expected ($PIP_NO_INPUT set); question: %s' %
message
)
response = input(message)
response = response.strip().lower()
if response not in options:
print(
'Your response (%r) was not one of the expected responses: '
'%s' % (response, ', '.join(options))
)
else:
return response
def format_size(bytes):
if bytes > 1000 * 1000:
return '%.1fMB' % (bytes / 1000.0 / 1000)
elif bytes > 10 * 1000:
return '%ikB' % (bytes / 1000)
elif bytes > 1000:
return '%.1fkB' % (bytes / 1000.0)
else:
return '%ibytes' % bytes
def is_installable_dir(path):
"""Return True if `path` is a directory containing a setup.py file."""
if not os.path.isdir(path):
return False
setup_py = os.path.join(path, 'setup.py')
if os.path.isfile(setup_py):
return True
return False
def is_svn_page(html):
"""
Returns true if the page appears to be the index page of an svn repository
"""
return (re.search(r'<title>[^<]*Revision \d+:', html) and
re.search(r'Powered by (?:<a[^>]*?>)?Subversion', html, re.I))
def file_contents(filename):
with open(filename, 'rb') as fp:
return fp.read().decode('utf-8')
def read_chunks(file, size=4096):
"""Yield pieces of data from a file-like object until EOF."""
while True:
chunk = file.read(size)
if not chunk:
break
yield chunk
def split_leading_dir(path):
path = path.lstrip('/').lstrip('\\')
if '/' in path and (('\\' in path and path.find('/') < path.find('\\')) or
'\\' not in path):
return path.split('/', 1)
elif '\\' in path:
return path.split('\\', 1)
else:
return path, ''
def has_leading_dir(paths):
"""Returns true if all the paths have the same leading path name
(i.e., everything is in one subdirectory in an archive)"""
common_prefix = None
for path in paths:
prefix, rest = split_leading_dir(path)
if not prefix:
return False
elif common_prefix is None:
common_prefix = prefix
elif prefix != common_prefix:
return False
return True
def normalize_path(path, resolve_symlinks=True):
"""
Convert a path to its canonical, case-normalized, absolute version.
"""
path = expanduser(path)
if resolve_symlinks:
path = os.path.realpath(path)
else:
path = os.path.abspath(path)
return os.path.normcase(path)
def splitext(path):
"""Like os.path.splitext, but take off .tar too"""
base, ext = posixpath.splitext(path)
if base.lower().endswith('.tar'):
ext = base[-4:] + ext
base = base[:-4]
return base, ext
def renames(old, new):
"""Like os.renames(), but handles renaming across devices."""
# Implementation borrowed from os.renames().
head, tail = os.path.split(new)
if head and tail and not os.path.exists(head):
os.makedirs(head)
shutil.move(old, new)
head, tail = os.path.split(old)
if head and tail:
try:
os.removedirs(head)
except OSError:
pass
def is_local(path):
"""
Return True if path is within sys.prefix, if we're running in a virtualenv.
If we're not in a virtualenv, all paths are considered "local."
"""
if not running_under_virtualenv():
return True
return normalize_path(path).startswith(normalize_path(sys.prefix))
def dist_is_local(dist):
"""
Return True if given Distribution object is installed locally
(i.e. within current virtualenv).
Always True if we're not in a virtualenv.
"""
return is_local(dist_location(dist))
def dist_in_usersite(dist):
"""
Return True if given Distribution is installed in user site.
"""
norm_path = normalize_path(dist_location(dist))
return norm_path.startswith(normalize_path(user_site))
def dist_in_site_packages(dist):
"""
Return True if given Distribution is installed in
distutils.sysconfig.get_python_lib().
"""
return normalize_path(
dist_location(dist)
).startswith(normalize_path(site_packages))
def dist_is_editable(dist):
"""Is distribution an editable install?"""
for path_item in sys.path:
egg_link = os.path.join(path_item, dist.project_name + '.egg-link')
if os.path.isfile(egg_link):
return True
return False
def get_installed_distributions(local_only=True,
skip=stdlib_pkgs,
include_editables=True,
editables_only=False,
user_only=False):
"""
Return a list of installed Distribution objects.
If ``local_only`` is True (default), only return installations
local to the current virtualenv, if in a virtualenv.
``skip`` argument is an iterable of lower-case project names to
ignore; defaults to stdlib_pkgs
If ``editables`` is False, don't report editables.
If ``editables_only`` is True , only report editables.
If ``user_only`` is True , only report installations in the user
site directory.
"""
if local_only:
local_test = dist_is_local
else:
def local_test(d):
return True
if include_editables:
def editable_test(d):
return True
else:
def editable_test(d):
return not dist_is_editable(d)
if editables_only:
def editables_only_test(d):
return dist_is_editable(d)
else:
def editables_only_test(d):
return True
if user_only:
user_test = dist_in_usersite
else:
def user_test(d):
return True
return [d for d in pkg_resources.working_set
if local_test(d) and
d.key not in skip and
editable_test(d) and
editables_only_test(d) and
user_test(d)
]
def egg_link_path(dist):
"""
Return the path for the .egg-link file if it exists, otherwise, None.
There's 3 scenarios:
1) not in a virtualenv
try to find in site.USER_SITE, then site_packages
2) in a no-global virtualenv
try to find in site_packages
3) in a yes-global virtualenv
try to find in site_packages, then site.USER_SITE
(don't look in global location)
For #1 and #3, there could be odd cases, where there's an egg-link in 2
locations.
This method will just return the first one found.
"""
sites = []
if running_under_virtualenv():
if virtualenv_no_global():
sites.append(site_packages)
else:
sites.append(site_packages)
if user_site:
sites.append(user_site)
else:
if user_site:
sites.append(user_site)
sites.append(site_packages)
for site in sites:
egglink = os.path.join(site, dist.project_name) + '.egg-link'
if os.path.isfile(egglink):
return egglink
def dist_location(dist):
"""
Get the site-packages location of this distribution. Generally
this is dist.location, except in the case of develop-installed
packages, where dist.location is the source code location, and we
want to know where the egg-link file is.
"""
egg_link = egg_link_path(dist)
if egg_link:
return egg_link
return dist.location
def get_terminal_size():
"""Returns a tuple (x, y) representing the width(x) and the height(x)
in characters of the terminal window."""
def ioctl_GWINSZ(fd):
try:
import fcntl
import termios
import struct
cr = struct.unpack(
'hh',
fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234')
)
except:
return None
if cr == (0, 0):
return None
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
cr = (os.environ.get('LINES', 25), os.environ.get('COLUMNS', 80))
return int(cr[1]), int(cr[0])
def current_umask():
"""Get the current umask which involves having to set it temporarily."""
mask = os.umask(0)
os.umask(mask)
return mask
def unzip_file(filename, location, flatten=True):
"""
Unzip the file (with path `filename`) to the destination `location`. All
files are written based on system defaults and umask (i.e. permissions are
not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
ensure_dir(location)
zipfp = open(filename, 'rb')
try:
zip = zipfile.ZipFile(zipfp, allowZip64=True)
leading = has_leading_dir(zip.namelist()) and flatten
for info in zip.infolist():
name = info.filename
data = zip.read(name)
fn = name
if leading:
fn = split_leading_dir(name)[1]
fn = os.path.join(location, fn)
dir = os.path.dirname(fn)
if fn.endswith('/') or fn.endswith('\\'):
# A directory
ensure_dir(fn)
else:
ensure_dir(dir)
fp = open(fn, 'wb')
try:
fp.write(data)
finally:
fp.close()
mode = info.external_attr >> 16
# if mode and regular file and any execute permissions for
# user/group/world?
if mode and stat.S_ISREG(mode) and mode & 0o111:
# make dest file have execute for user/group/world
# (chmod +x) no-op on windows per python docs
os.chmod(fn, (0o777 - current_umask() | 0o111))
finally:
zipfp.close()
def untar_file(filename, location):
"""
Untar the file (with path `filename`) to the destination `location`.
All files are written based on system defaults and umask (i.e. permissions
are not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
ensure_dir(location)
if filename.lower().endswith('.gz') or filename.lower().endswith('.tgz'):
mode = 'r:gz'
elif filename.lower().endswith(BZ2_EXTENSIONS):
mode = 'r:bz2'
elif filename.lower().endswith(XZ_EXTENSIONS):
mode = 'r:xz'
elif filename.lower().endswith('.tar'):
mode = 'r'
else:
logger.warning(
'Cannot determine compression type for file %s', filename,
)
mode = 'r:*'
tar = tarfile.open(filename, mode)
try:
# note: python<=2.5 doesn't seem to know about pax headers, filter them
leading = has_leading_dir([
member.name for member in tar.getmembers()
if member.name != 'pax_global_header'
])
for member in tar.getmembers():
fn = member.name
if fn == 'pax_global_header':
continue
if leading:
fn = split_leading_dir(fn)[1]
path = os.path.join(location, fn)
if member.isdir():
ensure_dir(path)
elif member.issym():
try:
tar._extract_member(member, path)
except Exception as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warning(
'In the tar file %s the member %s is invalid: %s',
filename, member.name, exc,
)
continue
else:
try:
fp = tar.extractfile(member)
except (KeyError, AttributeError) as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warning(
'In the tar file %s the member %s is invalid: %s',
filename, member.name, exc,
)
continue
ensure_dir(os.path.dirname(path))
with open(path, 'wb') as destfp:
shutil.copyfileobj(fp, destfp)
fp.close()
# Update the timestamp (useful for cython compiled files)
tar.utime(member, path)
# member have any execute permissions for user/group/world?
if member.mode & 0o111:
# make dest file have execute for user/group/world
# no-op on windows per python docs
os.chmod(path, (0o777 - current_umask() | 0o111))
finally:
tar.close()
def unpack_file(filename, location, content_type, link):
filename = os.path.realpath(filename)
if (content_type == 'application/zip' or
filename.lower().endswith(ZIP_EXTENSIONS) or
zipfile.is_zipfile(filename)):
unzip_file(
filename,
location,
flatten=not filename.endswith('.whl')
)
elif (content_type == 'application/x-gzip' or
tarfile.is_tarfile(filename) or
filename.lower().endswith(
TAR_EXTENSIONS + BZ2_EXTENSIONS + XZ_EXTENSIONS)):
untar_file(filename, location)
elif (content_type and content_type.startswith('text/html') and
is_svn_page(file_contents(filename))):
# We don't really care about this
from pip.vcs.subversion import Subversion
Subversion('svn+' + link.url).unpack(location)
else:
# FIXME: handle?
# FIXME: magic signatures?
logger.critical(
'Cannot unpack file %s (downloaded from %s, content-type: %s); '
'cannot detect archive format',
filename, location, content_type,
)
raise InstallationError(
'Cannot determine archive format of %s' % location
)
def remove_tracebacks(output):
pattern = (r'(?:\W+File "(?:.*)", line (?:.*)\W+(?:.*)\W+\^\W+)?'
r'Syntax(?:Error|Warning): (?:.*)')
output = re.sub(pattern, '', output)
if PY2:
return output
# compileall.compile_dir() prints different messages to stdout
# in Python 3
return re.sub(r"\*\*\* Error compiling (?:.*)", '', output)
def call_subprocess(cmd, show_stdout=True, cwd=None,
on_returncode='raise',
command_level=std_logging.DEBUG, command_desc=None,
extra_environ=None, spinner=None):
if command_desc is None:
cmd_parts = []
for part in cmd:
if ' ' in part or '\n' in part or '"' in part or "'" in part:
part = '"%s"' % part.replace('"', '\\"')
cmd_parts.append(part)
command_desc = ' '.join(cmd_parts)
logger.log(command_level, "Running command %s", command_desc)
env = os.environ.copy()
if extra_environ:
env.update(extra_environ)
try:
proc = subprocess.Popen(
cmd, stderr=subprocess.STDOUT, stdin=None, stdout=subprocess.PIPE,
cwd=cwd, env=env)
except Exception as exc:
logger.critical(
"Error %s while executing command %s", exc, command_desc,
)
raise
all_output = []
while True:
line = console_to_str(proc.stdout.readline())
if not line:
break
line = line.rstrip()
all_output.append(line + '\n')
if show_stdout:
logger.debug(line)
if spinner is not None:
spinner.spin()
proc.wait()
if spinner is not None:
if proc.returncode:
spinner.finish("error")
else:
spinner.finish("done")
if proc.returncode:
if on_returncode == 'raise':
if all_output:
logger.info(
'Complete output from command %s:', command_desc,
)
logger.info(
''.join(all_output) +
'\n----------------------------------------'
)
raise InstallationError(
'Command "%s" failed with error code %s in %s'
% (command_desc, proc.returncode, cwd))
elif on_returncode == 'warn':
logger.warning(
'Command "%s" had error code %s in %s',
command_desc, proc.returncode, cwd,
)
elif on_returncode == 'ignore':
pass
else:
raise ValueError('Invalid value: on_returncode=%s' %
repr(on_returncode))
if not show_stdout:
return remove_tracebacks(''.join(all_output))
def read_text_file(filename):
"""Return the contents of *filename*.
Try to decode the file contents with utf-8, the preferred system encoding
(e.g., cp1252 on some Windows machines), and latin1, in that order.
Decoding a byte string with latin1 will never raise an error. In the worst
case, the returned string will contain some garbage characters.
"""
with open(filename, 'rb') as fp:
data = fp.read()
encodings = ['utf-8', locale.getpreferredencoding(False), 'latin1']
for enc in encodings:
try:
data = data.decode(enc)
except UnicodeDecodeError:
continue
break
assert type(data) != bytes # Latin1 should have worked.
return data
def _make_build_dir(build_dir):
os.makedirs(build_dir)
write_delete_marker_file(build_dir)
class FakeFile(object):
"""Wrap a list of lines in an object with readline() to make
ConfigParser happy."""
def __init__(self, lines):
self._gen = (l for l in lines)
def readline(self):
try:
try:
return next(self._gen)
except NameError:
return self._gen.next()
except StopIteration:
return ''
def __iter__(self):
return self._gen
class StreamWrapper(StringIO):
@classmethod
def from_stream(cls, orig_stream):
cls.orig_stream = orig_stream
return cls()
# compileall.compile_dir() needs stdout.encoding to print to stdout
@property
def encoding(self):
return self.orig_stream.encoding
@contextlib.contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO.
Taken from Lib/support/__init__.py in the CPython repo.
"""
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, StreamWrapper.from_stream(orig_stdout))
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as stdout:
print('hello')
self.assertEqual(stdout.getvalue(), 'hello\n')
Taken from Lib/support/__init__.py in the CPython repo.
"""
return captured_output('stdout')
class cached_property(object):
"""A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property.
Source: https://github.com/bottlepy/bottle/blob/0.11.5/bottle.py#L175
"""
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None:
# We're being accessed from the class itself, not from an object
return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
def get_installed_version(dist_name):
"""Get the installed version of dist_name avoiding pkg_resources cache"""
# Create a requirement that we'll look for inside of setuptools.
req = pkg_resources.Requirement.parse(dist_name)
# We want to avoid having this cached, so we need to construct a new
# working set each time.
working_set = pkg_resources.WorkingSet()
# Get the installed distribution from our working set
dist = working_set.find(req)
# Check to see if we got an installed distribution or not, if we did
# we want to return it's version.
return dist.version if dist else None
def consume(iterator):
"""Consume an iterable at C speed."""
deque(iterator, maxlen=0)
|
|
import os
import subprocess
import sys
import traceback
from array import array
from collections import deque
from shutil import get_terminal_size
from threading import Thread
import asyncio
import audioop
from enum import Enum
from musicbot.config import static_config
from musicbot.entry import RadioSongEntry, StreamEntry, TimestampEntry
from musicbot.exceptions import FFmpegError, FFmpegWarning
from musicbot.lib.event_emitter import EventEmitter
from musicbot.queue import Queue
from musicbot.utils import create_cmd_params, format_time_ffmpeg
from musicbot.web_socket_server import GieselaServer
class PatchedBuff:
"""
PatchedBuff monkey patches a readable object, allowing you to vary what the volume is as the song is playing.
"""
def __init__(self, buff, *, draw=False):
self.buff = buff
self.frame_count = 0
self._volume = 1.0
self.draw = draw
self.use_audioop = True
self.frame_skip = 2
self.rmss = deque([2048], maxlen=90)
def __del__(self):
if self.draw:
print(" " * (get_terminal_size().columns - 1), end="\r")
@property
def volume(self):
return self._volume
@volume.setter
def volume(self, v):
value = v**static_config.volume_power
self._volume = v
def read(self, frame_size):
self.frame_count += 1
frame = self.buff.read(frame_size)
if self.volume != 1:
frame = self._frame_vol(frame, self.volume, maxv=2)
if self.draw and not self.frame_count % self.frame_skip:
# these should be processed for every frame, but "overhead"
rms = audioop.rms(frame, 2)
self.rmss.append(rms)
max_rms = sorted(self.rmss)[-1]
meter_text = "avg rms: {:.2f}, max rms: {:.2f} ".format(
self._avg(self.rmss), max_rms)
self._pprint_meter(rms / max(1, max_rms),
text=meter_text, shift=True)
return frame
def _frame_vol(self, frame, mult, *, maxv=2, use_audioop=True):
if use_audioop:
return audioop.mul(frame, 2, min(mult, maxv))
else:
# ffmpeg returns s16le pcm frames.
frame_array = array("h", frame)
for i in range(len(frame_array)):
frame_array[i] = int(frame_array[i] * min(mult, min(1, maxv)))
return frame_array.tobytes()
def _avg(self, i):
return sum(i) / len(i)
def _pprint_meter(self, perc, *, char="#", text="", shift=True):
tx, ty = get_terminal_size()
if shift:
outstr = text + \
"{}".format(char * (int((tx - len(text)) * perc) - 1))
else:
outstr = text + \
"{}".format(char * (int(tx * perc) - 1))[len(text):]
print(outstr.ljust(tx - 1), end="\r")
class MusicPlayerState(Enum):
STOPPED = 0 # When the player isn't playing anything
PLAYING = 1 # The player is actively playing music.
PAUSED = 2 # The player is paused on a song.
WAITING = 3 # The player has finished its song but is still downloading the next one
DEAD = 4 # The player has been killed.
def __str__(self):
return self.name
class MusicPlayerRepeatState(Enum):
NONE = 0 # queue plays as normal
ALL = 1 # Entire queue repeats
SINGLE = 2 # Currently playing song repeats forever
def __str__(self):
return self.name
class MusicPlayer(EventEmitter):
def __init__(self, bot, voice_client):
super().__init__()
self.bot = bot
self.loop = bot.loop
self.voice_client = voice_client
self.queue = Queue(bot, self)
self.queue.on("entry-added", self.on_entry_added)
self._play_lock = asyncio.Lock()
self._current_player = None
self._current_entry = None
self.state = MusicPlayerState.STOPPED
self.repeatState = MusicPlayerRepeatState.NONE
self.skipRepeat = False
self.loop.create_task(self.websocket_check())
self.handle_manually = False
self.volume = bot.config.default_volume
self.chapter_updater = None
@property
def volume(self):
return self._volume
@volume.setter
def volume(self, value):
self._volume = value
if self._current_player:
self._current_player.buff.volume = value
GieselaServer.send_small_update(self.voice_client.server.id, volume=value)
def on_entry_added(self, queue, entry):
if self.is_stopped:
self.loop.call_later(2, self.play)
def skip(self):
self.skipRepeat = True
self._kill_current_player()
self.update_chapter_updater()
def repeat(self):
if self.is_repeatNone:
self.repeatState = MusicPlayerRepeatState.ALL
elif self.is_repeatAll:
self.repeatState = MusicPlayerRepeatState.SINGLE
elif self.is_repeatSingle:
self.repeatState = MusicPlayerRepeatState.NONE
else:
# no idea how that should happen but eh...
return False
GieselaServer.send_small_update(self.voice_client.server.id, repeat_state=self.repeatState.value, repeat_state_name=str(self.repeatState))
return True
def stop(self):
self.state = MusicPlayerState.STOPPED
self._kill_current_player()
self.emit("stop", player=self)
def resume(self):
if self.is_paused and self._current_player:
self._current_player.resume()
self.update_chapter_updater()
self.state = MusicPlayerState.PLAYING
self.emit("resume", player=self, entry=self.current_entry)
return
if self.is_paused and not self._current_player:
self.state = MusicPlayerState.PLAYING
self._kill_current_player()
return
raise ValueError("Cannot resume playback from state %s" % self.state)
def seek(self, secs):
if (not self.current_entry) or secs >= self.current_entry.end_seconds:
print("[PLAYER] Seek target out of bounds, skipping!")
self.skip()
return True
secs = max(0, secs)
entry = self.current_entry
if not entry.seek(secs):
print("[PLAYER] Couldn't set start of entry")
return False
self.handle_manually = True
self.play_entry(entry)
self.emit("play", player=self, entry=entry)
return True
def set_filters(self, filters):
if not self.current_entry:
return False
entry = self.current_entry
entry.set_start(self.progress)
if not filters:
entry.meta.pop("filters", None)
else:
entry.meta["filters"] = filters
self.handle_manually = True
self.play_entry(entry)
self.emit("play", player=self, entry=entry)
return True
def pause(self):
if isinstance(self.current_entry, StreamEntry):
print("Won't pause because I'm playing a stream")
self.stop()
return
if self.is_playing:
self.state = MusicPlayerState.PAUSED
if self._current_player:
self._current_player.pause()
self.update_chapter_updater(pause=True)
self.emit("pause", player=self, entry=self.current_entry)
return
elif self.is_paused:
return
raise ValueError("Cannot pause a MusicPlayer in state %s" % self.state)
def kill(self):
self.state = MusicPlayerState.DEAD
self.queue.clear()
self._events.clear()
self._kill_current_player()
def _playback_finished(self):
if self.handle_manually:
self.handle_manually = False
return
entry = self._current_entry
self.queue.push_history(entry)
if self.is_repeatAll or (self.is_repeatSingle and not self.skipRepeat):
self.queue._add_entry(entry, placement=0)
self.skipRepeat = False
if self._current_player:
self._current_player.after = None
self._kill_current_player()
self._current_entry = None
if not self.is_stopped and not self.is_dead:
self.play(_continue=True)
if not self.bot.config.save_videos and entry:
if any([entry.filename == e.filename for e in self.queue.entries]):
print("[Config:SaveVideos] Skipping deletion, found song in queue")
else:
asyncio.ensure_future(self._delete_file(entry.filename))
self.emit("finished-playing", player=self, entry=entry)
def _kill_current_player(self):
if self._current_player:
if self.is_paused:
self.resume()
try:
self._current_player.stop()
except OSError:
pass
self._current_player = None
return True
return False
async def _delete_file(self, filename):
for x in range(30):
try:
os.unlink(filename)
break
except PermissionError as e:
if e.winerror == 32: # File is in use
await asyncio.sleep(0.25)
except Exception as e:
traceback.print_exc()
print("Error trying to delete " + filename)
break
else:
print("[Config:SaveVideos] Could not delete file {}, giving up and moving on".format(
os.path.relpath(filename)))
def play(self, _continue=False):
self.loop.create_task(self._play(_continue=_continue))
async def _play(self, _continue=False):
"""
Plays the next entry from the Queue, or resumes playback of the current entry if paused.
"""
if self.is_paused:
return self.resume()
if self.is_dead:
return
if self.is_stopped or _continue:
try:
entry = await self.queue.get_next_entry()
except Exception as e:
print("Failed to get entry.")
traceback.print_exc()
# Retry playing the next entry in a sec.
self.loop.call_later(0.1, self.play)
return
# If nothing left to play, transition to the stopped state.
if not entry:
self.stop()
return
await self._play_entry(entry)
self.emit("play", player=self, entry=entry)
def play_entry(self, entry):
self.loop.create_task(self._play_entry(entry))
async def _play_entry(self, entry):
"""
Play the entry
"""
if self.is_dead:
return
with await self._play_lock:
# In-case there was a player, kill it. RIP.
self._kill_current_player()
before_options = {
"nostdin": None
}
options = {
"vn": None,
"b:a": "128k"
}
if not isinstance(entry, StreamEntry):
start_seconds = int(entry.start_seconds)
before_options["ss"] = format_time_ffmpeg(start_seconds)
options["to"] = format_time_ffmpeg(int(entry.end_seconds) - start_seconds)
if "filters" in entry.meta:
options.update({
"filter:a": "\"" + ",".join(entry.meta["filters"]) + "\""
})
self._current_player = self._monkeypatch_player(self.voice_client.create_ffmpeg_player(
entry.filename,
before_options=create_cmd_params(before_options),
options=create_cmd_params(options),
stderr=subprocess.PIPE,
after=lambda: self.loop.call_soon_threadsafe(
self._playback_finished)
))
self._current_player.setDaemon(True)
self._current_player.buff.volume = self._volume
self.state = MusicPlayerState.PLAYING
self._current_entry = entry
self._stderr_future = asyncio.Future()
stderr_thread = Thread(
target=filter_stderr,
args=(self._current_player.process, self._stderr_future),
name="{} stderr reader".format(self._current_player.name)
)
stderr_thread.start()
self._current_player.start()
self.update_chapter_updater()
def update_chapter_updater(self, pause=False):
if self.chapter_updater:
print("[CHAPTER-UPDATER] Cancelling old updater")
self.chapter_updater.cancel()
if not pause and isinstance(self.current_entry, (RadioSongEntry, TimestampEntry)):
print("[CHAPTER-UPDATER] Creating new updater")
self.chapter_updater = asyncio.ensure_future(self.update_chapter(), loop=self.loop)
async def update_chapter(self):
while True:
if self.current_entry:
if isinstance(self.current_entry, TimestampEntry):
sub_entry = self.current_entry.current_sub_entry
# just to be sure, add an extra 2 seconds
delay = (sub_entry["duration"] - sub_entry["progress"]) + 2
elif isinstance(self.current_entry, RadioSongEntry):
if self.current_entry.poll_time:
print("[CHAPTER-UPDATER] this radio stations enforces a custom wait time")
delay = self.current_entry.poll_time
elif self.current_entry.song_duration > 5:
delay = self.current_entry.song_duration - self.current_entry.song_progress + self.current_entry.uncertainty
if delay <= 0:
delay = 40
else:
delay = 40
else:
return # this is not the kind of entry that requires an update
else:
print("[CHAPTER-UPDATER] There's nothing playing")
return
print("[CHAPTER-UPDATER] Waiting " + str(round(delay, 1)) +
" seconds before emitting now playing event")
before_title = self.current_entry.title
await asyncio.sleep(delay)
if not self.current_entry:
# print("[CHAPTER-UPDATER] Waited for nothing. There's nothing playing anymore")
return
if self.current_entry.title == before_title:
print(
"[CHAPTER-UPDATER] The same thing is still playing. Back to sleep!")
continue
print("[CHAPTER-UPDATER] Emitting next now playing event")
self.emit("play", player=self, entry=self.current_entry)
def _monkeypatch_player(self, player):
original_buff = player.buff
player.buff = PatchedBuff(original_buff)
return player
def reload_voice(self, voice_client):
self.voice_client = voice_client
if self._current_player:
self._current_player.player = voice_client.play_audio
self._current_player._resumed.clear()
self._current_player._connected.set()
async def websocket_check(self):
if self.bot.config.debug_mode:
print("[Debug] Creating websocket check loop")
while not self.is_dead:
try:
self.voice_client.ws.ensure_open()
assert self.voice_client.ws.open
except:
if self.bot.config.debug_mode:
print("[Debug] Voice websocket is %s, reconnecting" %
self.voice_client.ws.state_name)
try:
await self.voice_client.disconnect()
except:
print("Error disconnecting during reconnect")
traceback.print_exc()
await asyncio.sleep(0.1)
new_vc = await self.bot.join_voice_channel(self.voice_client.channel)
self.reload_voice(new_vc)
if self.is_paused:
self.resume()
await asyncio.sleep(4)
finally:
await asyncio.sleep(1)
@property
def current_entry(self):
return self._current_entry
@property
def is_repeatNone(self):
return self.repeatState == MusicPlayerRepeatState.NONE
@property
def is_repeatAll(self):
return self.repeatState == MusicPlayerRepeatState.ALL
@property
def is_repeatSingle(self):
return self.repeatState == MusicPlayerRepeatState.SINGLE
@property
def is_playing(self):
return self.state == MusicPlayerState.PLAYING
@property
def is_paused(self):
return self.state == MusicPlayerState.PAUSED
@property
def is_stopped(self):
return self.state == MusicPlayerState.STOPPED
@property
def is_dead(self):
return self.state == MusicPlayerState.DEAD
@property
def progress(self):
secs = 0
if self._current_player:
secs = round(self._current_player.buff.frame_count * 0.02)
if self.current_entry.start_seconds:
secs += self.current_entry.start_seconds
return secs
def filter_stderr(popen: subprocess.Popen, future: asyncio.Future):
last_ex = None
while True:
data = popen.stderr.readline()
if data:
print("Data from ffmpeg: {}".format(data))
try:
if check_stderr(data):
sys.stderr.buffer.write(data)
sys.stderr.buffer.flush()
except FFmpegError as e:
print("Error from ffmpeg: %s", str(e).strip())
last_ex = e
except FFmpegWarning:
pass # useless message
else:
break
if last_ex:
future.set_exception(last_ex)
else:
future.set_result(True)
def check_stderr(data: bytes):
try:
data = data.decode("utf8")
except:
print("Unknown error decoding message from ffmpeg", exc_info=True)
return True # duck it
warnings = [
"Header missing",
"Estimating duration from birate, this may be inaccurate",
"Using AVStream.codec to pass codec parameters to muxers is deprecated, use AVStream.codecpar instead.",
"Application provided invalid, non monotonically increasing dts to muxer in stream",
"Last message repeated",
"Failed to send close message",
"decode_band_types: Input buffer exhausted before END element found"
]
errors = [
# need to regex this properly, its both a warning and an error
"Invalid data found when processing input",
]
if any(msg in data for msg in warnings):
raise FFmpegWarning(data)
if any(msg in data for msg in errors):
raise FFmpegError(data)
return True
|
|
from datetime import datetime
from sqlalchemy import Column, Integer, String, DateTime
from sqlalchemy.ext.mutable import MutableDict
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy_utils import JSONType
from sqlalchemy.orm.exc import NoResultFound
from flask_dance.utils import FakeCache, first, getattrd
from flask_dance.consumer.backend import BaseBackend
try:
from flask_login import AnonymousUserMixin
except ImportError:
AnonymousUserMixin = None
class OAuthConsumerMixin(object):
"""
A :ref:`SQLAlchemy declarative mixin <sqlalchemy:declarative_mixins>` with
some suggested columns for a model to store OAuth tokens:
``id``
an integer primary key
``provider``
a short name to indicate which OAuth provider issued
this token
``created_at``
an automatically generated datetime that indicates when
the OAuth provider issued this token
``token``
a :class:`JSON <sqlalchemy_utils.types.json.JSONType>` field to store
the actual token received from the OAuth provider
"""
@declared_attr
def __tablename__(cls):
return "flask_dance_{}".format(cls.__name__.lower())
id = Column(Integer, primary_key=True)
provider = Column(String(50))
created_at = Column(DateTime, default=datetime.utcnow)
token = Column(MutableDict.as_mutable(JSONType))
def __repr__(self):
parts = []
parts.append(self.__class__.__name__)
if self.id:
parts.append("id={}".format(self.id))
if self.provider:
parts.append('provider="{}"'.format(self.provider))
return "<{}>".format(" ".join(parts))
class SQLAlchemyBackend(BaseBackend):
"""
Stores and retrieves OAuth tokens using a relational database through
the `SQLAlchemy`_ ORM.
.. _SQLAlchemy: http://www.sqlalchemy.org/
"""
def __init__(self, model, session,
user=None, user_id=None, anon_user=None, cache=None):
"""
Args:
model: The SQLAlchemy model class that represents the OAuth token
table in the database. At a minimum, it must have a
``provider`` column and a ``token`` column. If tokens are to be
associated with individual users in the application, it must
also have a ``user`` relationship to your User model.
It is recommended, though not required, that your model class
inherit from
:class:`~flask_dance.consumer.storage.sqla.OAuthConsumerMixin`.
session:
The :class:`SQLAlchemy session <sqlalchemy.orm.session.Session>`
for the database. If you're using `Flask-SQLAlchemy`_, this is
``db.session``.
user:
If you want OAuth tokens to be associated with individual users
in your application, this is a reference to the user that you
want to use for the current request. It can be an actual User
object, a function that returns a User object, or a proxy to the
User object. If you're using `Flask-Login`_, this is
:attr:`~flask.ext.login.current_user`.
user_id:
If you want to pass an identifier for a user instead of an actual
User object, use this argument instead. Sometimes it can save
a database query or two. If both ``user`` and ``user_id`` are
provided, ``user_id`` will take precendence.
anon_user:
If anonymous users are represented by a class in your application,
provide that class here. If you are using `Flask-Login`_,
anonymous users are represented by the
:class:`flask_login.AnonymousUserMixin` class, but you don't have
to provide that -- Flask-Dance treats it as the default.
cache:
An instance of `Flask-Cache`_. Providing a caching system is
highly recommended, but not required.
.. _Flask-SQLAlchemy: http://pythonhosted.org/Flask-SQLAlchemy/
.. _Flask-Login: https://flask-login.readthedocs.org/
.. _Flask-Cache: http://pythonhosted.org/Flask-Cache/
"""
self.model = model
self.session = session
self.user = user
self.user_id = user_id
self.anon_user = anon_user or AnonymousUserMixin
self.cache = cache or FakeCache()
def make_cache_key(self, blueprint, user=None, user_id=None):
uid = first([user_id, self.user_id, blueprint.config.get("user_id")])
if not uid:
u = first(_get_real_user(ref, self.anon_user)
for ref in (user, self.user, blueprint.config.get("user")))
uid = getattr(u, "id", u)
return "flask_dance_token|{name}|{user_id}".format(
name=blueprint.name, user_id=uid,
)
def get(self, blueprint, user=None, user_id=None):
# check cache
cache_key = self.make_cache_key(blueprint=blueprint, user=user, user_id=user_id)
token = self.cache.get(cache_key)
if token:
return token
# if not cached, make database queries
query = (
self.session.query(self.model)
.filter_by(provider=blueprint.name)
)
uid = first([user_id, self.user_id, blueprint.config.get("user_id")])
u = first(_get_real_user(ref, self.anon_user)
for ref in (user, self.user, blueprint.config.get("user")))
# check for user ID
if hasattr(self.model, "user_id") and uid:
query = query.filter_by(user_id=uid)
# check for user (relationship property)
elif hasattr(self.model, "user") and u:
query = query.filter_by(user=u)
# if we have the property, but not value, filter by None
elif hasattr(self.model, "user_id"):
query = query.filter_by(user_id=None)
# run query
try:
token = query.one().token
except NoResultFound:
token = None
# cache the result
self.cache.set(cache_key, token)
return token
def set(self, blueprint, token, user=None, user_id=None):
# if there was an existing model, delete it
existing_query = (
self.session.query(self.model)
.filter_by(provider=blueprint.name)
)
# check for user ID
has_user_id = hasattr(self.model, "user_id")
if has_user_id:
uid = first([user_id, self.user_id, blueprint.config.get("user_id")])
if uid:
existing_query = existing_query.filter_by(user_id=uid)
# check for user (relationship property)
has_user = hasattr(self.model, "user")
if has_user:
u = first(_get_real_user(ref, self.anon_user)
for ref in (user, self.user, blueprint.config.get("user")))
if u:
existing_query = existing_query.filter_by(user=u)
# queue up delete query -- won't be run until commit()
existing_query.delete()
# create a new model for this token
kwargs = {
"provider": blueprint.name,
"token": token,
}
if has_user_id and uid:
kwargs["user_id"] = uid
if has_user and u:
kwargs["user"] = u
self.session.add(self.model(**kwargs))
# commit to delete and add simultaneously
self.session.commit()
# invalidate cache
self.cache.delete(self.make_cache_key(
blueprint=blueprint, user=user, user_id=user_id
))
def delete(self, blueprint, user=None, user_id=None):
query = (
self.session.query(self.model)
.filter_by(provider=blueprint.name)
)
uid = first([user_id, self.user_id, blueprint.config.get("user_id")])
u = first(_get_real_user(ref, self.anon_user)
for ref in (user, self.user, blueprint.config.get("user")))
# check for user ID
if hasattr(self.model, "user_id") and uid:
query = query.filter_by(user_id=uid)
# check for user (relationship property)
elif hasattr(self.model, "user") and u:
query = query.filter_by(user=u)
# if we have the property, but not value, filter by None
elif hasattr(self.model, "user_id"):
query = query.filter_by(user_id=None)
# run query
query.delete()
self.session.commit()
# invalidate cache
self.cache.delete(self.make_cache_key(
blueprint=blueprint, user=user, user_id=user_id,
))
def _get_real_user(user, anon_user=None):
"""
Given a "user" that could be:
* a real user object
* a function that returns a real user object
* a LocalProxy to a real user object (like Flask-Login's ``current_user``)
This function returns the real user object, regardless of which we have.
"""
if hasattr(user, "_get_current_object"):
# this is a proxy
user = user._get_current_object()
if callable(user):
# this is a function
user = user()
if anon_user and isinstance(user, anon_user):
return None
return user
|
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import json
from telemetry.internal.browser import browser_options
from telemetry.internal.platform import android_device
from telemetry.internal.util import binary_manager
from telemetry.testing import browser_test_context
from telemetry.testing import serially_executed_browser_test_case
from py_utils import discover
import typ
from typ import arg_parser
TEST_SUFFIXES = ['*_test.py', '*_tests.py', '*_unittest.py', '*_unittests.py']
def PrintTelemetryHelp():
options = browser_options.BrowserFinderOptions()
options.browser_type = 'any'
parser = options.CreateParser()
print '\n\nCommand line arguments handled by Telemetry:'
parser.print_help()
def ProcessCommandLineOptions(test_class, typ_options, args):
options = browser_options.BrowserFinderOptions()
options.browser_type = 'any'
parser = options.CreateParser(test_class.__doc__)
test_class.AddCommandlineArgs(parser)
# Set the default chrome root variable. This is required for the
# Android browser finder to function properly.
if typ_options.default_chrome_root:
parser.set_defaults(chrome_root=typ_options.default_chrome_root)
finder_options, positional_args = parser.parse_args(args)
finder_options.positional_args = positional_args
# Typ parses the "verbose", or "-v", command line arguments which
# are supposed to control logging verbosity. Carry them over.
finder_options.verbosity = typ_options.verbose
return finder_options
def _ValidateDistinctNames(browser_test_classes):
names_to_test_classes = {}
for cl in browser_test_classes:
name = cl.Name()
if name in names_to_test_classes:
raise Exception('Test name %s is duplicated between %s and %s' % (
name, repr(cl), repr(names_to_test_classes[name])))
names_to_test_classes[name] = cl
def _TestIndicesForShard(total_shards, shard_index, num_tests):
"""Returns indices of tests to run for a given shard.
This methods returns every Nth index, where N is the number of shards. We
intentionally avoid picking sequential runs of N tests, since that will pick
groups of related tests, which can skew runtimes. See
https://crbug.com/1028298.
"""
return range(shard_index, num_tests, total_shards)
def _MedianTestTime(test_times):
times = test_times.values()
times.sort()
if len(times) == 0:
return 0
halfLen = len(times) / 2
if len(times) % 2:
return times[halfLen]
else:
return 0.5 * (times[halfLen - 1] + times[halfLen])
def _TestTime(test, test_times, default_test_time):
return test_times.get(test.shortName()) or default_test_time
def _DebugShardDistributions(shards, test_times):
for i, s in enumerate(shards):
num_tests = len(s)
if test_times:
median = _MedianTestTime(test_times)
shard_time = 0.0
for t in s:
shard_time += _TestTime(t, test_times, median)
print 'shard %d: %d seconds (%d tests)' % (i, shard_time, num_tests)
else:
print 'shard %d: %d tests (unknown duration)' % (i, num_tests)
def _SplitShardsByTime(test_cases, total_shards, test_times,
debug_shard_distributions):
median = _MedianTestTime(test_times)
shards = []
for i in xrange(total_shards):
shards.append({'total_time': 0.0, 'tests': []})
test_cases.sort(key=lambda t: _TestTime(t, test_times, median),
reverse=True)
# The greedy algorithm has been empirically tested on the WebGL 2.0
# conformance tests' times, and results in an essentially perfect
# shard distribution of 530 seconds per shard. In the same scenario,
# round-robin scheduling resulted in shard times spread between 502
# and 592 seconds, and the current alphabetical sharding resulted in
# shard times spread between 44 and 1591 seconds.
# Greedy scheduling. O(m*n), where m is the number of shards and n
# is the number of test cases.
for t in test_cases:
min_shard_index = 0
min_shard_time = None
for i in xrange(total_shards):
if min_shard_time is None or shards[i]['total_time'] < min_shard_time:
min_shard_index = i
min_shard_time = shards[i]['total_time']
shards[min_shard_index]['tests'].append(t)
shards[min_shard_index]['total_time'] += _TestTime(t, test_times, median)
res = [s['tests'] for s in shards]
if debug_shard_distributions:
_DebugShardDistributions(res, test_times)
return res
def LoadTestCasesToBeRun(
test_class, finder_options, filter_tests_after_sharding,
total_shards, shard_index, test_times, debug_shard_distributions,
typ_runner):
test_cases = []
match_everything = lambda _: True
test_filter_matcher_func = typ_runner.matches_filter
if filter_tests_after_sharding:
test_filter_matcher = match_everything
post_test_filter_matcher = test_filter_matcher_func
else:
test_filter_matcher = test_filter_matcher_func
post_test_filter_matcher = match_everything
for t in serially_executed_browser_test_case.GenerateTestCases(
test_class, finder_options):
if test_filter_matcher(t):
test_cases.append(t)
if test_times:
# Assign tests to shards.
shards = _SplitShardsByTime(test_cases, total_shards, test_times,
debug_shard_distributions)
return [t for t in shards[shard_index]
if post_test_filter_matcher(t)]
else:
test_cases.sort(key=lambda t: t.shortName())
test_cases = filter(post_test_filter_matcher, test_cases)
test_indices = _TestIndicesForShard(
total_shards, shard_index, len(test_cases))
if debug_shard_distributions:
tmp_shards = []
for i in xrange(total_shards):
tmp_indices = _TestIndicesForShard(
total_shards, i, len(test_cases))
tmp_tests = [test_cases[index] for index in tmp_indices]
tmp_shards.append(tmp_tests)
# Can edit the code to get 'test_times' passed in here for
# debugging and comparison purposes.
_DebugShardDistributions(tmp_shards, None)
return [test_cases[index] for index in test_indices]
def _CreateTestArgParsers():
parser = typ.ArgumentParser(discovery=True, reporting=True, running=True)
parser.add_argument('test', type=str, help='Name of the test suite to run')
parser.add_argument(
'--filter-tests-after-sharding', default=False, action='store_true',
help=('Apply the test filter after tests are split for sharding. Useful '
'for reproducing bugs related to the order in which tests run.'))
parser.add_argument(
'--read-abbreviated-json-results-from',
metavar='FILENAME',
action='store',
help=(
'If specified, reads abbreviated results from that path in json '
'form. This information is used to more evenly distribute tests '
'among shards.'))
parser.add_argument(
'--debug-shard-distributions',
action='store_true', default=False,
help='Print debugging information about the shards\' test distributions')
parser.add_argument('--default-chrome-root', type=str, default=None)
parser.add_argument(
'--client-config', dest='client_configs', action='append', default=[])
parser.add_argument(
'--start-dir', dest='start_dirs', action='append', default=[])
return parser
def _GetClassifier(typ_runner):
def _SeriallyExecutedBrowserTestCaseClassifer(test_set, test):
# Do not pick up tests that do not inherit from
# serially_executed_browser_test_case.SeriallyExecutedBrowserTestCase
# class.
if not isinstance(
test,
serially_executed_browser_test_case.SeriallyExecutedBrowserTestCase):
return
if typ_runner.should_skip(test):
test_set.add_test_to_skip(test, 'skipped because matched --skip')
return
# For now, only support running these tests serially.
test_set.add_test_to_run_isolated(test)
return _SeriallyExecutedBrowserTestCaseClassifer
def RunTests(args):
parser = _CreateTestArgParsers()
try:
options, extra_args = parser.parse_known_args(args)
except arg_parser._Bailout:
PrintTelemetryHelp()
return parser.exit_status
binary_manager.InitDependencyManager(options.client_configs)
for start_dir in options.start_dirs:
modules_to_classes = discover.DiscoverClasses(
start_dir,
options.top_level_dir,
base_class=serially_executed_browser_test_case.
SeriallyExecutedBrowserTestCase)
browser_test_classes = modules_to_classes.values()
_ValidateDistinctNames(browser_test_classes)
test_class = None
for cl in browser_test_classes:
if cl.Name() == options.test:
test_class = cl
break
if not test_class:
print 'Cannot find test class with name matching %s' % options.test
print 'Available tests: %s' % '\n'.join(
cl.Name() for cl in browser_test_classes)
return 1
test_class._typ_runner = typ_runner = typ.Runner()
# Create test context.
typ_runner.context = browser_test_context.TypTestContext()
for c in options.client_configs:
typ_runner.context.client_configs.append(c)
typ_runner.context.finder_options = ProcessCommandLineOptions(
test_class, options, extra_args)
typ_runner.context.test_class = test_class
typ_runner.context.expectations_files = options.expectations_files
test_times = None
if options.read_abbreviated_json_results_from:
with open(options.read_abbreviated_json_results_from, 'r') as f:
abbr_results = json.load(f)
test_times = abbr_results.get('times')
# Setup typ.Runner instance.
typ_runner.args.all = options.all
typ_runner.args.expectations_files = options.expectations_files
typ_runner.args.jobs = options.jobs
typ_runner.args.list_only = options.list_only
typ_runner.args.metadata = options.metadata
typ_runner.args.passthrough = options.passthrough
typ_runner.args.path = options.path
typ_runner.args.quiet = options.quiet
typ_runner.args.repeat = options.repeat
typ_runner.args.repository_absolute_path = options.repository_absolute_path
typ_runner.args.retry_limit = options.retry_limit
typ_runner.args.retry_only_retry_on_failure_tests = (
options.retry_only_retry_on_failure_tests)
typ_runner.args.skip = options.skip
typ_runner.args.suffixes = TEST_SUFFIXES
typ_runner.args.tags = options.tags
typ_runner.args.test_name_prefix = options.test_name_prefix
typ_runner.args.test_filter = options.test_filter
typ_runner.args.test_results_server = options.test_results_server
typ_runner.args.test_type = options.test_type
typ_runner.args.top_level_dir = options.top_level_dir
typ_runner.args.write_full_results_to = options.write_full_results_to
typ_runner.args.write_trace_to = options.write_trace_to
typ_runner.setup_fn = _SetUpProcess
typ_runner.teardown_fn = _TearDownProcess
typ_runner.classifier = _GetClassifier(typ_runner)
typ_runner.path_delimiter = test_class.GetJSONResultsDelimiter()
tests_to_run = LoadTestCasesToBeRun(
test_class=test_class, finder_options=typ_runner.context.finder_options,
filter_tests_after_sharding=options.filter_tests_after_sharding,
total_shards=options.total_shards, shard_index=options.shard_index,
test_times=test_times,
debug_shard_distributions=options.debug_shard_distributions,
typ_runner=typ_runner)
for t in tests_to_run:
typ_runner.context.test_case_ids_to_run.add(t.id())
typ_runner.context.Freeze()
browser_test_context._global_test_context = typ_runner.context
# several class level variables are set for GPU tests when
# LoadTestCasesToBeRun is called. Functions line ExpectationsFiles and
# GenerateTags which use these variables should be called after
# LoadTestCasesToBeRun
test_class_expectations_files = test_class.ExpectationsFiles()
# all file paths in test_class_expectations-files must be absolute
assert all(os.path.isabs(path) for path in test_class_expectations_files)
typ_runner.args.expectations_files.extend(
test_class_expectations_files)
# Since sharding logic is handled by browser_test_runner harness by passing
# browser_test_context.test_case_ids_to_run to subprocess to indicate test
# cases to be run, we explicitly disable sharding logic in typ.
typ_runner.args.total_shards = 1
typ_runner.args.shard_index = 0
typ_runner.args.timing = True
typ_runner.args.verbose = options.verbose
typ_runner.win_multiprocessing = typ.WinMultiprocessing.importable
try:
ret, _, _ = typ_runner.run()
except KeyboardInterrupt:
print >> sys.stderr, "interrupted, exiting"
ret = 130
return ret
def _SetUpProcess(child, context):
args = context.finder_options
if binary_manager.NeedsInit():
# On windows, typ doesn't keep the DependencyManager initialization in the
# child processes.
binary_manager.InitDependencyManager(context.client_configs)
if args.remote_platform_options.device == 'android':
android_devices = android_device.FindAllAvailableDevices(args)
if not android_devices:
raise RuntimeError("No Android device found")
android_devices.sort(key=lambda device: device.name)
args.remote_platform_options.device = (
android_devices[child.worker_num-1].guid)
browser_test_context._global_test_context = context
context.test_class.SetUpProcess()
if child.has_expectations:
child.expectations.set_tags(
context.test_class._typ_runner.expectations.tags)
def _TearDownProcess(child, context):
del child, context # Unused.
browser_test_context._global_test_context.test_class.TearDownProcess()
browser_test_context._global_test_context = None
if __name__ == '__main__':
ret_code = RunTests(sys.argv[1:])
sys.exit(ret_code)
|
|
#
# General:
# This file is part of .NET Bridge
#
# Copyright:
# 2010 Jonathan Shore
# 2017 Jonathan Shore and Contributors
#
# License:
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from socket import socket
import struct
import numpy
class BufferedReader:
"""
Buffered stream with various convenient operations to read fundamental
types:
- strings (UTF-8)
- integers
- floating point
- bytes
- arrays
"""
def __init__ (self, fp):
self._fp = fp
self._buffer = bytearray()
self._basis = 0
self._pos = 0
self._len = 0
self._eof = False
self._Sint16 = struct.Struct("@h")
self._Sint32 = struct.Struct("@i")
self._Sint64 = struct.Struct("@l")
self._Sint16 = struct.Struct("@H")
self._Sfloat64 = struct.Struct("@d")
@property
def position (self):
"""
Current position in the file
"""
return self._fp.tell()
def isEOF (self):
"""
Determine whether is EOF
"""
if self._eof:
return True
elif self._pos < self._len:
return False
else:
self._replenish(1)
self._eof = self._len == 0
return self._eof
def readByte (self) -> int:
"""
read a byte
"""
if (self._pos + 1) > self._len:
self._replenish(1)
v = self._buffer[self._pos]
self._pos += 1
return v
def readString (self) -> str:
"""
read a string
"""
len = self.readInt32()
if (self._pos + len) > self._len:
self._replenish(len)
Istart = self._pos
Iend = Istart + len
s = self._buffer[Istart:Iend].decode("UTF-8")
self._pos = Iend
return s
def readInt16 (self) -> int:
"""
read an int16
"""
if (self._pos + 2) > self._len:
self._replenish(2)
(v,) = self._Sint16.unpack_from(self._buffer, self._pos)
self._pos += 2
return v
def readInt32 (self) -> int:
"""
read an int
"""
if (self._pos + 4) > self._len:
self._replenish(4)
(v,) = self._Sint32.unpack_from(self._buffer, self._pos)
self._pos += 4
return v
def readInt64 (self) -> int:
"""
read an int64
"""
if (self._pos + 8) > self._len:
self._replenish(8)
(v,) = self._Sint64.unpack_from(self._buffer, self._pos)
self._pos += 8
return v
def readFloat64 (self) -> float:
"""
read a float
"""
if (self._pos + 8) > self._len:
self._replenish(8)
(v,) = self._Sfloat64.unpack_from(self._buffer, self._pos)
self._pos += 8
return v
def readByteArray (self, len = None) -> bytes:
"""
read a byte array
"""
if len is None:
len = self.readInt32()
if (self._pos + len) > self._len:
self._replenish(len)
Istart = self._pos
Iend = Istart + len
vec = self._buffer[Istart:Iend]
self._pos = Iend
return vec
def read (self, len) -> bytes:
"""
read a buffer
"""
if (self._pos + len) > self._len:
self._replenish(len)
Istart = self._pos
Iend = Istart + len
vec = self._buffer[Istart:Iend]
self._pos = Iend
return vec
def seek (self, pos: int):
"""
Seek to position in file
"""
self._fp.seek (pos)
self._pos = 0
self._len = 0
def find (self, seq: bytes) -> int:
"""
Locate start of sequence
"""
tolen = len(seq)
while not self.isEOF():
if (self._pos + tolen) > self._len:
self._replenish(tolen)
try:
idx = self._buffer.index(seq, self._pos, self._len)
self._pos = idx
return self._basis + idx
except ValueError:
self._pos = self._len - tolen + 1
return -1
def close(self):
"""
Close socket stream
"""
try:
self._fp.close()
finally:
self._pos = 0
def _replenish (self, n: int):
"""
replenish's buffer to minimum required capacity (or more)
"""
## copy out remainder
remains = self._buffer[self._pos:self._len]
self._buffer.clear()
self._buffer.extend(remains)
self._pos = 0
self._basis = len(remains) + self._fp.tell()
## read until we have enough
read = 1
while len(self._buffer) < n and read > 0:
amount = max(n - len(self._buffer), 1024)
piece = self._fp.read(amount)
read = len(piece)
self._buffer.extend (piece)
self._len = len(self._buffer)
|
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An abstract device model.
Concrete implementations should be placed alongside this.
Concerete subclasses must implement all methods that have NotImplementedError
exceptions raised in this abstract interface. Methods Lock and Unlock are
optional, so clients of the device classes should expect that a
NotSupportedError will potentially be raised.
"""
import time
#import gflags
from absl import flags as gflags
import push_exceptions as exceptions
import logging
FLAGS = gflags.FLAGS
gflags.DEFINE_float('host_down_sinbin_time',
180.0,
'Seconds that down hosts are placed in the sin-bin for.')
# Define the default timeout values for each vendor.
# Each vendor also provides the same flags (s/base/$VENDOR_NAME/),
# with None as the default value. See BaseDevice._SetupTimeouts.
gflags.DEFINE_float('base_timeout_response',
300.0,
'Default device response timeout in seconds.')
gflags.DEFINE_float('base_timeout_connect',
10.0,
'Default device connect timeout in seconds.')
gflags.DEFINE_float('base_timeout_idle',
600.0,
'Default device idle timeout in seconds.')
gflags.DEFINE_float('base_timeout_disconnect',
10.0,
'Default device disconnect timeout in seconds.')
gflags.DEFINE_float('base_timeout_act_user',
10.0,
'Default device user activation timeout in seconds.')
# The default for this is set to 180 seconds, so that it is the same as
# host_down_sinbin_time's default. This effectively disables the faster retries
# by default - the flag must be used to enable them.
gflags.DEFINE_float('base_device_initial_failure_delay', 180.0,
'If a device fails to connect, retry after '
'this many seconds at first, doubling each time '
'for frequent errors (only applies to whitelisted devices).')
gflags.DEFINE_float('base_device_failure_forgiveness_delay', 10 * 60,
'Forget connect failures that happened more than this many '
'seconds ago (only on whitelisted devices).')
class BaseDevice(object):
"""A skeleton base device referring to a specific device in the network.
Notes:
All methods other than Connect and Nop raise NotImplementedError as
they are pure virtual methods.
Methods that have arguments perform argument type testing prior to
calling private implementations of their method. Replace the private
method in your implementation.
Attributes:
host: A string, the host name.
loopback_ipv4: A string representation of the IPv4 address used for
device management inside device modules.
vendor: A string, the vendor, e.g., 'JUNIPER'.
connected: A bool, whether we are connected to the device or not.
active: A bool, whether we're active or not.
rollout: A list of strings, active rollout tags for the device.
"""
# A dict to map from vendor string to vendor class, e.g.,
# {'FORCE10': ftos.FtosDevice}
# This dict is updated by each concrete subclass at class load time (by
# factory.py).
vendor_classes = {}
# Standardized strings defining types of configurations.
CONFIG_RUNNING = 'running-config'
CONFIG_STARTUP = 'startup-config'
CONFIG_PATCH = 'patch-config'
NON_FILE_DESTINATIONS = (CONFIG_RUNNING, CONFIG_STARTUP, CONFIG_PATCH)
def __init__(self, **kwargs):
# Use kwargs so that subclasses can extend this state via the factory.
self.host = kwargs.get('host', None)
self.loopback_ipv4 = kwargs.get('loopback_ipv4', None)
self.accessproxy = kwargs.get('accessproxy', None)
self.accessproxy_device_dict = {}
self.role = kwargs.get('role', None)
self.realm = kwargs.get('realm', None)
self.notes = self.__class__.__name__
# Default to true for active.
self.active = kwargs.get('active', True)
self.vendor = kwargs.get('vendor', None)
self.rollout = kwargs.get('rollout', [])
self._subclass = kwargs.get('subclass', False)
# Connection details.
self._username = kwargs.get('username', None)
self._password = None
self._ssh_keys = None
self._enable_password = None
self._ssl_cert_set = None
# Boolean attribute containing the considered state of the device. (True=up)
self._host_status = True
# The time the host's up/down status changed. If None, ignore this value.
self._host_last_status_change = None
# Connected boolean, accessed via property connected.
self._connected = False
# Our last-raised exception if not None.
self.__exc = None
# If we have been initialised directly, set our vendor name.
if not hasattr(self, 'vendor_name'):
self.vendor_name = 'base'
# Some sub-classes override this.
if not hasattr(self, 'unsupported_non_file_destinations'):
self.unsupported_non_file_destinations = (self.CONFIG_PATCH,)
# Setup timeouts.
self._InitialiseTimeouts()
def __del__(self):
"""Special delete method called on object garbage collection.
Holders of device objects should call Disconnect() explicltly,
rather than relying on disconnection by this method.
A global Exception handler must ensure deletion of references to
instances of this class. Garbage collection will close device
connections when it runs this method, but there are no guarantees it
will be run for all classes at program exit.
"""
if self.connected:
logging.debug('Garbage collection disconnecting %r' % self.host)
self.Disconnect()
def __str__(self):
return '%s(host=%s, vendor=%s, role=%s)' % (
self.__class__.__name__,
repr(self.host),
repr(self.vendor),
repr(self.role))
def _InitialiseTimeouts(self):
"""Sets up timeouts by scanning module flags.
Subclasses must provide a _SetTimeouts method, to be called at the
end of initialization.
"""
for var in ('connect', 'response', 'idle', 'disconnect', 'act_user'):
flag_name = '%s_timeout_%s' % (self.vendor_name, var)
default_flag_name = 'base_timeout_%s' % var
if getattr(FLAGS, flag_name) is not None:
value = getattr(FLAGS, flag_name)
setattr(self, 'timeout_%s' % var, value)
else:
default_value = getattr(FLAGS, default_flag_name)
setattr(self, 'timeout_%s' % var, default_value)
# Allow devices to optionally override timeouts.
self._SetupTimeouts()
def _SetupTimeouts(self):
"""Optionally setup device specific timeout value logic.
If more than a global and device module specific timeout value are
required (e.g., to set a minima), implement this method in the
concrete device module. It need not be provided otherwise.
"""
pass
def _HostDownPrepareConnect(self):
"""Works out if it's safe to retry a connection attempt.
Raises an exception if we're not prepared to retry the connection attempt.
See also Connect, and HandleConnectFailure.
Raises:
The last exception class recorded in self.__exc.
"""
now = time.time()
time_left = self._dampen_end_time - now
logging.debug('BaseDevice.Connect is waiting because of previous '
'connection errors, host is %s, time_left is %s',
self.host, time_left)
if time_left > 0:
# pylint: disable=g-doc-exception
raise self.__exc.__class__(
'Connection to %s(%s) failed. Will not retry for %.1fs.'
% (self.host, self.loopback_ipv4, time_left),
dampen_connect=True)
# pylint: enable=g-doc-exception
else:
# Next time, we'll try to connect.
self._host_status = True
self.connected = False
def Connect(self, username, password=None, ssh_keys=None,
enable_password=None, ssl_cert_set=None):
"""Sets up a connection to the device.
Concrete classes must implement _Connect() instead, with the same arguments.
Concrete classes are expected not to disconnect the connection until it
is cleaned-up by Disconnect(). A generic exception handler at the top-
level should ensure sessions have an opportunity to be cleaned-up upon
abnormal program termination.
Args:
username: A string, the username (role account) to use.
password: A string, the password to use (optional; may be None).
ssh_keys: A tuple of strings, SSH private keys (optional; may be None).
enable_password: A string, an optional enable password (may be None).
ssl_cert_set: An optional SSLCertificateSet protobuf (may be None).
Raises:
exceptions.ConnectError: the connection could not be established.
exceptions.AuthenticationError: A device authentication error occurred, or
neither a password nor an SSH private key was supplied.
"""
# Either an SSH key or password must be supplied for authentication.
if (password is None and not
ssh_keys and not
ssl_cert_set and not
FLAGS.use_ssh_agent):
raise exceptions.AuthenticationError(
'Cannot connect. No authentication information provided to device '
'Connect method.')
self._username = username
self._password = password
self._ssh_keys = ssh_keys or ()
self._enable_password = enable_password
self._ssl_cert_set = ssl_cert_set
if not self.loopback_ipv4 and not self.accessproxy_device_dict:
raise exceptions.ConnectError(
'Device %r, or any access proxies, need to have an IPv4 '
'management address.'
% self.host)
logging.debug('In BaseDevice.Connect, host is %s, _connected is %s',
self.host, self._connected)
while not self.connected:
try:
if self._host_status:
logging.debug('CONNECTING %s(%s)',
self.host, self.loopback_ipv4)
self._Connect(username, password=password, ssh_keys=self._ssh_keys,
enable_password=enable_password,
ssl_cert_set=ssl_cert_set)
self.connected = True
logging.debug('CONNECTED %s(%s)',
self.host, self.loopback_ipv4)
self._last_failure_time = None
else:
self._HostDownPrepareConnect()
except (exceptions.ConnectError,
exceptions.AuthenticationError), e:
logging.error('CONNECT FAILURE %s(%s)',
self.host, self.loopback_ipv4)
self._host_status = False
self.__exc = e
raise
logging.debug('Leaving BaseDevice.Connect, host is %s, _connected is %s',
self.host, self._connected)
return None
def Nop(self, name):
"""No-operation.
Args:
name: A string, the (no) operation's name.
Returns:
A string, some output (can be ignored by the client).
"""
msg = 'No-operation request named `%s` received.' % name
logging.debug('ActionRequest: NOP %s %s', str(self.__class__), repr(msg))
return msg
def Cmd(self, command, mode=None):
"""Executes a command.
Concrete classes must define _Cmd with the same arguments.
Args:
command: A string, the command to execute.
mode: A string, the CLI mode to use for this command (e.g., 'shell'
on Netscaler). The empty string or None will use the device's
default mode.
Returns:
A string, the response.
Raises:
exceptions.CmdError: An error occurred inside the call to _Cmd.
"""
if not command:
raise exceptions.CmdError('No command supplied for Cmd() method.')
else:
if not mode:
mode = None
return self._Cmd(command, mode=mode)
def GetConfig(self, source):
"""Returns a configuration file from the device.
Concrete classes must define _GetConfig with the same arguments.
Args:
source: A string, representing either a path to a configuration file or a
string to be interpreted by the device module. For readability,
consider using CONFIG_RUNNING and CONFIG_STARTUP to represent the
generic concepts of the running and startup configurations.
Returns:
A string, the configuration file. (This may be large).
Raises:
GetConfigError: the GetConfig operation failed.
EmptyConfigError: the operation produced an empty configuration.
"""
return self._GetConfig(source)
def SetConfig(self, destination_file, data, canary,
juniper_skip_show_compare=False,
juniper_skip_commit_check=False,
juniper_get_rollback_patch=False):
"""Updates a devices' configuration.
Concrete classes must define _SetConfig with the same arguments.
Args:
destination_file: A string. A path to a file on the device.
data: A string, the configuration data to set.
canary: A boolean, whether to canary, rather than set, the configuration.
juniper_skip_show_compare: A boolean, temporary flag to skip
'show | compare' on Junipers due to a bug.
juniper_skip_commit_check: A boolean, flag to skip 'commit check' on
Junipers when doing a canary.
juniper_get_rollback_patch: A boolean, optionally try to retrieve a
patch to rollback the config change.
Returns:
A SetConfigResult. Transcript of any device interaction that occurred
during the operation, plus any optional extras.
Raises:
exceptions.SetConfigError: the SetConfig operation failed.
exceptions.SetConfigSyntaxError: the configuration data had a syntax
error.
"""
if destination_file in self.unsupported_non_file_destinations:
raise exceptions.SetConfigError(
'%s devices do not support %s as a destination.' %
(self.vendor_name, destination_file))
if ((juniper_skip_show_compare or
juniper_skip_commit_check or
juniper_get_rollback_patch) and
self.__class__.__name__ == 'JunosDevice'):
return self._SetConfig(destination_file, data, canary,
skip_show_compare=juniper_skip_show_compare,
skip_commit_check=juniper_skip_commit_check,
get_rollback_patch=juniper_get_rollback_patch)
else:
return self._SetConfig(destination_file, data, canary)
def Disconnect(self):
"""Disconnects from the device.
Concrete classes must define _Disconnect.
This method is called by the class __del__ method, and should also be
called by any global Exception handler (as __del__() is not guaranteed to
be called when the Python interpreter exits).
Disconnect is also called by the Device Manager during garbage collection.
Raises:
exceptions.DisconnectError if the disconnect operation failed.
"""
self._Disconnect()
self.connected = False
logging.debug('DISCONNECTED %s(%s)',
self.host, self.loopback_ipv4)
def _GetConnected(self):
return self._connected
def _SetConnected(self, c):
logging.debug('Setting connected property on host %s to %s',
self.host, c)
self._connected = c
# Property for the connection status.
connected = property(_GetConnected, _SetConnected)
class SetConfigResult(object):
"""Results of one SetConfig, including transcript and any optional extras.
Attributes:
transcript: A string, the chatter from the router and/or any error text.
rollback_patch: None or a string, the optional rollback patch, if supported.
"""
def __init__(self):
self.transcript = ''
self.rollback_patch = None
def __len__(self):
return len(self.transcript) + len(self.rollback_patch or '')
|
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
from collections import defaultdict
from telemetry.timeline import model as model_module
from telemetry.timeline import tracing_category_filter
from telemetry.timeline import tracing_options
from telemetry.value import trace
from telemetry.web_perf.metrics import timeline_based_metric
from telemetry.web_perf.metrics import blob_timeline
from telemetry.web_perf.metrics import gpu_timeline
from telemetry.web_perf.metrics import layout
from telemetry.web_perf.metrics import memory_timeline
from telemetry.web_perf.metrics import responsiveness_metric
from telemetry.web_perf.metrics import smoothness
from telemetry.web_perf.metrics import text_selection
from telemetry.web_perf import smooth_gesture_util
from telemetry.web_perf import story_test
from telemetry.web_perf import timeline_interaction_record as tir_module
# TimelineBasedMeasurement considers all instrumentation as producing a single
# timeline. But, depending on the amount of instrumentation that is enabled,
# overhead increases. The user of the measurement must therefore chose between
# a few levels of instrumentation.
NO_OVERHEAD_LEVEL = 'no-overhead'
MINIMAL_OVERHEAD_LEVEL = 'minimal-overhead'
DEBUG_OVERHEAD_LEVEL = 'debug-overhead'
ALL_OVERHEAD_LEVELS = [
NO_OVERHEAD_LEVEL,
MINIMAL_OVERHEAD_LEVEL,
DEBUG_OVERHEAD_LEVEL
]
def _GetAllTimelineBasedMetrics():
# TODO(nednguyen): use discovery pattern to return all the instances of
# all TimelineBasedMetrics class in web_perf/metrics/ folder.
# This cannot be done until crbug.com/460208 is fixed.
return (smoothness.SmoothnessMetric(),
responsiveness_metric.ResponsivenessMetric(),
layout.LayoutMetric(),
gpu_timeline.GPUTimelineMetric(),
blob_timeline.BlobTimelineMetric(),
memory_timeline.MemoryTimelineMetric(),
text_selection.TextSelectionMetric())
class InvalidInteractions(Exception):
pass
# TODO(nednguyen): Get rid of this results wrapper hack after we add interaction
# record to telemetry value system (crbug.com/453109)
class ResultsWrapperInterface(object):
def __init__(self):
self._tir_label = None
self._results = None
def SetResults(self, results):
self._results = results
def SetTirLabel(self, tir_label):
self._tir_label = tir_label
@property
def current_page(self):
return self._results.current_page
def AddValue(self, value):
raise NotImplementedError
class _TBMResultWrapper(ResultsWrapperInterface):
def AddValue(self, value):
assert self._tir_label
value.name = '%s-%s' % (self._tir_label, value.name)
self._results.AddValue(value)
def _GetRendererThreadsToInteractionRecordsMap(model):
threads_to_records_map = defaultdict(list)
interaction_labels_of_previous_threads = set()
for curr_thread in model.GetAllThreads():
for event in curr_thread.async_slices:
# TODO(nduca): Add support for page-load interaction record.
if tir_module.IsTimelineInteractionRecord(event.name):
interaction = tir_module.TimelineInteractionRecord.FromAsyncEvent(event)
# Adjust the interaction record to match the synthetic gesture
# controller if needed.
interaction = (
smooth_gesture_util.GetAdjustedInteractionIfContainGesture(
model, interaction))
threads_to_records_map[curr_thread].append(interaction)
if interaction.label in interaction_labels_of_previous_threads:
raise InvalidInteractions(
'Interaction record label %s is duplicated on different '
'threads' % interaction.label)
if curr_thread in threads_to_records_map:
interaction_labels_of_previous_threads.update(
r.label for r in threads_to_records_map[curr_thread])
return threads_to_records_map
class _TimelineBasedMetrics(object):
def __init__(self, model, renderer_thread, interaction_records,
results_wrapper, metrics):
self._model = model
self._renderer_thread = renderer_thread
self._interaction_records = interaction_records
self._results_wrapper = results_wrapper
self._all_metrics = metrics
def AddResults(self, results):
interactions_by_label = defaultdict(list)
for i in self._interaction_records:
interactions_by_label[i.label].append(i)
for label, interactions in interactions_by_label.iteritems():
are_repeatable = [i.repeatable for i in interactions]
if not all(are_repeatable) and len(interactions) > 1:
raise InvalidInteractions('Duplicate unrepeatable interaction records '
'on the page')
self._results_wrapper.SetResults(results)
self._results_wrapper.SetTirLabel(label)
self.UpdateResultsByMetric(interactions, self._results_wrapper)
def UpdateResultsByMetric(self, interactions, wrapped_results):
if not interactions:
return
for metric in self._all_metrics:
metric.AddResults(self._model, self._renderer_thread,
interactions, wrapped_results)
class Options(object):
"""A class to be used to configure TimelineBasedMeasurement.
This is created and returned by
Benchmark.CreateTimelineBasedMeasurementOptions.
By default, all the timeline based metrics in telemetry/web_perf/metrics are
used (see _GetAllTimelineBasedMetrics above).
To customize your metric needs, use SetTimelineBasedMetrics().
"""
def __init__(self, overhead_level=NO_OVERHEAD_LEVEL):
"""As the amount of instrumentation increases, so does the overhead.
The user of the measurement chooses the overhead level that is appropriate,
and the tracing is filtered accordingly.
overhead_level: Can either be a custom TracingCategoryFilter object or
one of NO_OVERHEAD_LEVEL, MINIMAL_OVERHEAD_LEVEL or
DEBUG_OVERHEAD_LEVEL.
"""
self._category_filter = None
if isinstance(overhead_level,
tracing_category_filter.TracingCategoryFilter):
self._category_filter = overhead_level
elif overhead_level in ALL_OVERHEAD_LEVELS:
if overhead_level == NO_OVERHEAD_LEVEL:
self._category_filter = tracing_category_filter.CreateNoOverheadFilter()
elif overhead_level == MINIMAL_OVERHEAD_LEVEL:
self._category_filter = (
tracing_category_filter.CreateMinimalOverheadFilter())
else:
self._category_filter = (
tracing_category_filter.CreateDebugOverheadFilter())
else:
raise Exception("Overhead level must be a TracingCategoryFilter object"
" or valid overhead level string."
" Given overhead level: %s" % overhead_level)
self._tracing_options = tracing_options.TracingOptions()
self._tracing_options.enable_chrome_trace = True
self._tracing_options.enable_platform_display_trace = True
self._timeline_based_metrics = _GetAllTimelineBasedMetrics()
def ExtendTraceCategoryFilter(self, filters):
for new_category_filter in filters:
self._category_filter.AddIncludedCategory(new_category_filter)
@property
def category_filter(self):
return self._category_filter
@property
def tracing_options(self):
return self._tracing_options
@tracing_options.setter
def tracing_options(self, value):
self._tracing_options = value
def SetTimelineBasedMetrics(self, metrics):
assert isinstance(metrics, collections.Iterable)
for m in metrics:
assert isinstance(m, timeline_based_metric.TimelineBasedMetric)
self._timeline_based_metrics = metrics
def GetTimelineBasedMetrics(self):
return self._timeline_based_metrics
class TimelineBasedMeasurement(story_test.StoryTest):
"""Collects multiple metrics based on their interaction records.
A timeline based measurement shifts the burden of what metrics to collect onto
the story under test. Instead of the measurement
having a fixed set of values it collects, the story being tested
issues (via javascript) an Interaction record into the user timing API that
describing what is happening at that time, as well as a standardized set
of flags describing the semantics of the work being done. The
TimelineBasedMeasurement object collects a trace that includes both these
interaction records, and a user-chosen amount of performance data using
Telemetry's various timeline-producing APIs, tracing especially.
It then passes the recorded timeline to different TimelineBasedMetrics based
on those flags. As an example, this allows a single story run to produce
load timing data, smoothness data, critical jank information and overall cpu
usage information.
For information on how to mark up a page to work with
TimelineBasedMeasurement, refer to the
perf.metrics.timeline_interaction_record module.
Args:
options: an instance of timeline_based_measurement.Options.
results_wrapper: A class that has the __init__ method takes in
the page_test_results object and the interaction record label. This
class follows the ResultsWrapperInterface. Note: this class is not
supported long term and to be removed when crbug.com/453109 is resolved.
"""
def __init__(self, options, results_wrapper=None):
self._tbm_options = options
self._results_wrapper = results_wrapper or _TBMResultWrapper()
def WillRunStory(self, platform):
"""Set up test according to the tbm options."""
pass
def Measure(self, platform, results):
"""Collect all possible metrics and added them to results."""
pass
def DidRunStory(self, platform):
"""Clean up test according to the tbm options."""
pass
def WillRunStoryForPageTest(self, tracing_controller,
synthetic_delay_categories=None):
"""Configure and start tracing.
Args:
app: an app.App subclass instance.
synthetic_delay_categories: iterable of delays. For example:
['DELAY(cc.BeginMainFrame;0.014;alternating)']
where 'cc.BeginMainFrame' is a timeline event, 0.014 is the delay,
and 'alternating' is the mode.
"""
if not tracing_controller.IsChromeTracingSupported():
raise Exception('Not supported')
category_filter = self._tbm_options.category_filter
# TODO(slamm): Move synthetic_delay_categories to the TBM options.
for delay in synthetic_delay_categories or []:
category_filter.AddSyntheticDelay(delay)
tracing_controller.Start(self._tbm_options.tracing_options, category_filter)
def MeasureForPageTest(self, tracing_controller, results):
"""Collect all possible metrics and added them to results."""
trace_result = tracing_controller.Stop()
results.AddValue(trace.TraceValue(results.current_page, trace_result))
model = model_module.TimelineModel(trace_result)
threads_to_records_map = _GetRendererThreadsToInteractionRecordsMap(model)
for renderer_thread, interaction_records in (
threads_to_records_map.iteritems()):
meta_metrics = _TimelineBasedMetrics(
model, renderer_thread, interaction_records,
self._results_wrapper, self._tbm_options.GetTimelineBasedMetrics())
meta_metrics.AddResults(results)
def DidRunStoryForPageTest(self, tracing_controller):
if tracing_controller.is_tracing_running:
tracing_controller.Stop()
|
|
##
# Copyright (c) 2010-2014 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Tests for txdav.caldav.datastore.postgres, mostly based on
L{txdav.caldav.datastore.test.common}.
"""
from twisted.internet.defer import inlineCallbacks, returnValue
from twistedcaldav.memcacher import Memcacher
from txdav.common.datastore.test.util import buildStore, StubNotifierFactory
from txdav.base.propertystore.test.base import (
PropertyStoreTest, propertyName, propertyValue)
from twisted.internet.defer import gatherResults
from twext.enterprise.ienterprise import AlreadyFinishedError
try:
from txdav.base.propertystore.sql import PropertyStore
except ImportError, e:
# XXX: when could this ever fail?
PropertyStore = None
importErrorMessage = str(e)
class PropertyStoreTest(PropertyStoreTest):
@inlineCallbacks
def setUp(self):
self.notifierFactory = StubNotifierFactory()
self.store = yield buildStore(self, self.notifierFactory)
self.addCleanup(self.maybeCommitLast)
self._txn = self.store.newTransaction()
self.propertyStore = \
self.propertyStore1 = yield PropertyStore.load("user01", None, None, self._txn, 1)
self.propertyStore2 = yield PropertyStore.load("user01", "user02", None, self._txn, 1)
self.propertyStore3 = yield PropertyStore.load("user01", None, "user03", self._txn, 1)
self.propertyStore4 = yield PropertyStore.load("user01", "user02", "user04", self._txn, 1)
@inlineCallbacks
def maybeCommitLast(self):
if hasattr(self, "_txn"):
result = yield self._txn.commit()
delattr(self, "_txn")
else:
result = None
self.propertyStore = \
self.propertyStore1 = \
self.propertyStore2 = \
self.propertyStore3 = \
self.propertyStore4 = None
returnValue(result)
@inlineCallbacks
def _changed(self, store):
if hasattr(self, "_txn"):
yield self._txn.commit()
delattr(self, "_txn")
self._txn = self.store.newTransaction()
store = self.propertyStore1
self.propertyStore = \
self.propertyStore1 = yield PropertyStore.load("user01", None, None, self._txn, 1)
self.propertyStore1._shadowableKeys = store._shadowableKeys
self.propertyStore1._proxyOverrideKeys = store._proxyOverrideKeys
self.propertyStore1._globalKeys = store._globalKeys
store = self.propertyStore2
self.propertyStore2 = yield PropertyStore.load("user01", "user02", None, self._txn, 1)
self.propertyStore2._shadowableKeys = store._shadowableKeys
self.propertyStore2._proxyOverrideKeys = store._proxyOverrideKeys
self.propertyStore2._globalKeys = store._globalKeys
store = self.propertyStore3
self.propertyStore3 = yield PropertyStore.load("user01", None, "user03", self._txn, 1)
self.propertyStore3._shadowableKeys = store._shadowableKeys
self.propertyStore3._proxyOverrideKeys = store._proxyOverrideKeys
self.propertyStore3._globalKeys = store._globalKeys
store = self.propertyStore4
self.propertyStore4 = yield PropertyStore.load("user01", "user02", "user04", self._txn, 1)
self.propertyStore4._shadowableKeys = store._shadowableKeys
self.propertyStore4._proxyOverrideKeys = store._proxyOverrideKeys
self.propertyStore4._globalKeys = store._globalKeys
@inlineCallbacks
def _abort(self, store):
if hasattr(self, "_txn"):
yield self._txn.abort()
delattr(self, "_txn")
self._txn = self.store.newTransaction()
store = self.propertyStore1
self.propertyStore = \
self.propertyStore1 = yield PropertyStore.load("user01", None, None, self._txn, 1)
self.propertyStore1._shadowableKeys = store._shadowableKeys
self.propertyStore1._proxyOverrideKeys = store._proxyOverrideKeys
self.propertyStore1._globalKeys = store._globalKeys
store = self.propertyStore2
self.propertyStore2 = yield PropertyStore.load("user01", "user02", None, self._txn, 1)
self.propertyStore2._shadowableKeys = store._shadowableKeys
self.propertyStore2._proxyOverrideKeys = store._proxyOverrideKeys
self.propertyStore2._globalKeys = store._globalKeys
store = self.propertyStore3
self.propertyStore3 = yield PropertyStore.load("user01", None, "user03", self._txn, 1)
self.propertyStore3._shadowableKeys = store._shadowableKeys
self.propertyStore3._proxyOverrideKeys = store._proxyOverrideKeys
self.propertyStore3._globalKeys = store._globalKeys
store = self.propertyStore4
self.propertyStore4 = yield PropertyStore.load("user01", "user02", "user04", self._txn, 1)
self.propertyStore4._shadowableKeys = store._shadowableKeys
self.propertyStore4._proxyOverrideKeys = store._proxyOverrideKeys
self.propertyStore4._globalKeys = store._globalKeys
@inlineCallbacks
def test_concurrentInsertion(self):
"""
When two property stores set the same value, both should succeed, and
update the cache. Whoever wins the race (i.e. updates last) will set
the last property value.
"""
pname = propertyName("concurrent")
pval1 = propertyValue("alpha")
pval2 = propertyValue("beta")
concurrentTxn = self.store.newTransaction()
@inlineCallbacks
def maybeAbortIt():
try:
yield concurrentTxn.abort()
except AlreadyFinishedError:
pass
self.addCleanup(maybeAbortIt)
concurrentPropertyStore = yield PropertyStore.load(
"user01", None, None, concurrentTxn, 1
)
concurrentPropertyStore[pname] = pval1
race = []
def tiebreaker(label):
# Let's not get into the business of figuring out who the database
# concurrency rules are supposed to pick; it might differ. We just
# take the answer we're given for who gets to be the final writer,
# and make sure that matches the property read in the next
# transaction.
def breaktie(result):
race.append(label)
return result
return breaktie
a = concurrentTxn.commit().addCallback(tiebreaker('a'))
self.propertyStore[pname] = pval2
b = self._txn.commit().addCallback(tiebreaker('b'))
del self._txn
self.assertEquals((yield gatherResults([a, b])), [None, None])
yield self._abort(self.propertyStore)
winner = {'a': pval1,
'b': pval2}[race[-1]]
self.assertEquals(self.propertyStore[pname], winner)
@inlineCallbacks
def test_copy(self):
# Existing store
store1_user1 = yield PropertyStore.load("user01", None, None, self._txn, 2)
store1_user2 = yield PropertyStore.load("user01", "user02", None, self._txn, 2)
# Populate current store with data
props_user1 = (
(propertyName("dummy1"), propertyValue("value1-user1")),
(propertyName("dummy2"), propertyValue("value2-user1")),
)
props_user2 = (
(propertyName("dummy1"), propertyValue("value1-user2")),
(propertyName("dummy3"), propertyValue("value3-user2")),
)
for name, value in props_user1:
store1_user1[name] = value
for name, value in props_user2:
store1_user2[name] = value
yield self._txn.commit()
self._txn = self.store.newTransaction()
# Existing store
store1_user1 = yield PropertyStore.load("user01", None, None, self._txn, 2)
# New store
store2_user1 = yield PropertyStore.load("user01", None, None, self._txn, 3)
# Do copy and check results
yield store2_user1.copyAllProperties(store1_user1)
self.assertEqual(store1_user1.keys(), store2_user1.keys())
store1_user2 = yield PropertyStore.load("user01", "user02", None, self._txn, 2)
store2_user2 = yield PropertyStore.load("user01", "user02", None, self._txn, 3)
self.assertEqual(store1_user2.keys(), store2_user2.keys())
@inlineCallbacks
def test_insert_delete(self):
# Existing store
store1_user1 = yield PropertyStore.load("user01", None, None, self._txn, 2)
pname = propertyName("dummy1")
pvalue = propertyValue("value1-user1")
yield store1_user1.__setitem__(pname, pvalue)
self.assertEqual(store1_user1[pname], pvalue)
yield store1_user1.__delitem__(pname)
self.assertTrue(pname not in store1_user1)
yield store1_user1.__setitem__(pname, pvalue)
self.assertEqual(store1_user1[pname], pvalue)
@inlineCallbacks
def test_cacher_failure(self):
"""
Test that properties can still be read and written even when they are too larger for the
cacher to handle.
"""
# Existing store - add a normal property
self.assertFalse("SQL.props:10/user01" in PropertyStore._cacher._memcacheProtocol._cache)
store1_user1 = yield PropertyStore.load("user01", None, None, self._txn, 10)
self.assertTrue("SQL.props:10/user01" in PropertyStore._cacher._memcacheProtocol._cache)
pname1 = propertyName("dummy1")
pvalue1 = propertyValue("*")
yield store1_user1.__setitem__(pname1, pvalue1)
self.assertEqual(store1_user1[pname1], pvalue1)
self.assertEqual(len(store1_user1._cached), 1)
yield self._txn.commit()
# Existing store - add a large property
self._txn = self.store.newTransaction()
self.assertFalse("SQL.props:10/user01" in PropertyStore._cacher._memcacheProtocol._cache)
store1_user1 = yield PropertyStore.load("user01", None, None, self._txn, 10)
self.assertTrue("SQL.props:10/user01" in PropertyStore._cacher._memcacheProtocol._cache)
pname2 = propertyName("dummy2")
pvalue2 = propertyValue("*" * (Memcacher.MEMCACHE_VALUE_LIMIT + 10))
yield store1_user1.__setitem__(pname2, pvalue2)
self.assertEqual(store1_user1[pname2], pvalue2)
self.assertEqual(len(store1_user1._cached), 2)
yield self._txn.commit()
# Try again - the cacher will fail large values
self._txn = self.store.newTransaction()
self.assertFalse("SQL.props:10/user01" in PropertyStore._cacher._memcacheProtocol._cache)
store1_user1 = yield PropertyStore.load("user01", None, None, self._txn, 10)
self.assertFalse("SQL.props:10/user01" in store1_user1._cacher._memcacheProtocol._cache)
self.assertEqual(store1_user1[pname1], pvalue1)
self.assertEqual(store1_user1[pname2], pvalue2)
self.assertEqual(len(store1_user1._cached), 2)
yield store1_user1.__delitem__(pname1)
self.assertTrue(pname1 not in store1_user1)
yield store1_user1.__delitem__(pname2)
self.assertTrue(pname2 not in store1_user1)
self.assertEqual(len(store1_user1._cached), 0)
self.assertFalse("SQL.props:10/user01" in store1_user1._cacher._memcacheProtocol._cache)
@inlineCallbacks
def test_cacher_off(self):
"""
Test that properties can still be read and written when the cacher is disabled.
"""
self.patch(self.store, "queryCacher", None)
# Existing store - add a normal property
self.assertFalse("SQL.props:10/user01" in PropertyStore._cacher._memcacheProtocol._cache)
store1_user1 = yield PropertyStore.load("user01", None, None, self._txn, 10)
self.assertFalse("SQL.props:10/user01" in PropertyStore._cacher._memcacheProtocol._cache)
pname1 = propertyName("dummy1")
pvalue1 = propertyValue("*")
yield store1_user1.__setitem__(pname1, pvalue1)
self.assertEqual(store1_user1[pname1], pvalue1)
self.assertEqual(len(store1_user1._cached), 1)
self.assertFalse("SQL.props:10/user01" in PropertyStore._cacher._memcacheProtocol._cache)
yield self._txn.commit()
self._txn = self.store.newTransaction()
# Existing store - check a normal property
self.assertFalse("SQL.props:10/user01" in PropertyStore._cacher._memcacheProtocol._cache)
store1_user1 = yield PropertyStore.load("user01", None, None, self._txn, 10)
self.assertFalse("SQL.props:10/user01" in PropertyStore._cacher._memcacheProtocol._cache)
self.assertEqual(store1_user1[pname1], pvalue1)
if PropertyStore is None:
PropertyStoreTest.skip = importErrorMessage
|
|
#!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# IMPORTANT NOTE: If you make local mods to this file, you must run:
# % pnacl/build.sh driver
# in order for them to take effect in the scons build. This command
# updates the copy in the toolchain/ tree.
#
from driver_tools import *
from driver_env import env
from driver_log import Log, DriverOpen, TempFiles
import platform
EXTRA_ENV = {
'ALLOW_NATIVE': '0', # Allow LD args which will change the behavior
# of native linking. This must be accompanied by
# -arch to produce a .nexe.
'USE_IRT': '1', # Use stable IRT interfaces.
'INPUTS' : '',
'OUTPUT' : '',
'SHARED' : '0',
'STATIC' : '0',
'PIC' : '0',
'STDLIB' : '1',
'RELOCATABLE': '0',
'SONAME' : '',
'STRIP_MODE' : 'none',
'STRIP_FLAGS' : '--do-not-wrap ${STRIP_FLAGS_%STRIP_MODE%}',
'STRIP_FLAGS_all' : '-s',
'STRIP_FLAGS_debug': '-S',
'TRANSLATE_FLAGS': '${PIC ? -fPIC} ${!STDLIB ? -nostdlib} ' +
'${STATIC ? -static} ' +
'${SHARED ? -shared} ' +
'${#SONAME ? -Wl,--soname=${SONAME}} ' +
'${TRANSLATE_FLAGS_USER}',
# Extra pnacl-translate flags specified by the user using -Wt
'TRANSLATE_FLAGS_USER': '',
'OPT_FLAGS': '-O${OPT_LEVEL} ${OPT_STRIP_%STRIP_MODE%} ' +
'-inline-threshold=${OPT_INLINE_THRESHOLD} ' +
'--do-not-wrap',
'OPT_INLINE_THRESHOLD': '100',
'OPT_LEVEL': '0',
'OPT_STRIP_none': '',
'OPT_STRIP_all': '-disable-opt --strip',
'OPT_STRIP_debug': '-disable-opt --strip-debug',
'GOLD_PLUGIN_ARGS': '-plugin=${GOLD_PLUGIN_SO} ' +
'-plugin-opt=emit-llvm',
'LD_FLAGS' : '-nostdlib ${@AddPrefix:-L:SEARCH_DIRS} ' +
'${SHARED ? -shared} ${STATIC ? -static} ' +
'${RELOCATABLE ? -relocatable} ' +
'${#SONAME ? --soname=${SONAME}}',
# Flags for native linking.
# Only allowed if ALLOW_NATIVE is true.
'LD_FLAGS_NATIVE': '',
'SEARCH_DIRS' : '${SEARCH_DIRS_USER} ${SEARCH_DIRS_BUILTIN}',
'SEARCH_DIRS_USER' : '',
# Standard Library Directories
'SEARCH_DIRS_BUILTIN': '${STDLIB ? ' +
' ${BASE_USR}/lib/ ' +
' ${BASE_SDK}/lib/ ' +
' ${BASE_LIB}/ ' +
' ${SEARCH_DIRS_NATIVE} ' +
'}',
# HACK-BEGIN
# Add glibc/lib-<arch>/ to the search path.
# These are here to let the bitcode link find native objects
# needed by the GLibC toolchain.
# TODO(pdox): Remove these when we have bitcode .pso stubs for GlibC.
'SEARCH_DIRS_NATIVE': '${LIBMODE_GLIBC ? ${LIBS_ARCH}/}',
'LIBS_ARCH' : '${LIBS_%ARCH%}',
'LIBS_ARM' : '${BASE_GLIBC}/lib-arm',
'LIBS_X8632' : '${BASE_GLIBC}/lib-x86-32',
'LIBS_X8664' : '${BASE_GLIBC}/lib-x86-64',
# HACK-END
'LD_GOLD_OFORMAT' : '${LD_GOLD_OFORMAT_%ARCH%}',
'LD_GOLD_OFORMAT_ARM' : 'elf32-littlearm',
'LD_GOLD_OFORMAT_X8632' : 'elf32-nacl',
'LD_GOLD_OFORMAT_X8664' : 'elf64-nacl',
'BCLD' : '${LD_GOLD}',
'BCLD_FLAGS': '--native-client --oformat ${LD_GOLD_OFORMAT} -Ttext=0x20000 ' +
'${!SHARED && !RELOCATABLE ? --undef-sym-check} ' +
'${GOLD_PLUGIN_ARGS} ${LD_FLAGS}',
'RUN_BCLD': ('${BCLD} ${BCLD_FLAGS} ${inputs} -o ${output}'),
}
def AddToBCLinkFlags(*args):
env.append('LD_FLAGS', *args)
def AddToNativeFlags(*args):
env.append('LD_FLAGS_NATIVE', *args)
def AddToBothFlags(*args):
env.append('LD_FLAGS', *args)
env.append('LD_FLAGS_NATIVE', *args)
LDPatterns = [
( '--pnacl-allow-native', "env.set('ALLOW_NATIVE', '1')"),
( '--noirt', "env.set('USE_IRT', '0')"),
( '-o(.+)', "env.set('OUTPUT', pathtools.normalize($0))"),
( ('-o', '(.+)'), "env.set('OUTPUT', pathtools.normalize($0))"),
( '-shared', "env.set('SHARED', '1')"),
( '-static', "env.set('STATIC', '1')"),
( '-nostdlib', "env.set('STDLIB', '0')"),
( '-r', "env.set('RELOCATABLE', '1')"),
( '-relocatable', "env.set('RELOCATABLE', '1')"),
( ('-L', '(.+)'),
"env.append('SEARCH_DIRS_USER', pathtools.normalize($0))\n"),
( '-L(.+)',
"env.append('SEARCH_DIRS_USER', pathtools.normalize($0))\n"),
# We just ignore undefined symbols in shared objects, so
# -rpath-link should not be necessary.
#
# However, libsrpc.so still needs to be linked in directly (in non-IRT mode)
# or it malfunctions. This is the only way that -rpath-link is still used.
# There's a corresponding hack in pnacl-translate to recognize libsrpc.so
# and link it in directly.
# TODO(pdox): Investigate this situation.
( ('(-rpath)','(.*)'), ""),
( ('(-rpath)=(.*)'), ""),
( ('(-rpath-link)','(.*)'),
"env.append('TRANSLATE_FLAGS', $0+'='+pathtools.normalize($1))"),
( ('(-rpath-link)=(.*)'),
"env.append('TRANSLATE_FLAGS', $0+'='+pathtools.normalize($1))"),
( ('(-Ttext)','(.*)'), AddToNativeFlags),
( ('(-Ttext=.*)'), AddToNativeFlags),
# This overrides the builtin linker script.
( ('(-T)', '(.*)'), AddToNativeFlags),
# TODO(pdox): Allow setting an alternative _start symbol in bitcode
( ('(-e)','(.*)'), AddToBothFlags),
# TODO(pdox): Support GNU versioning.
( '(--version-script=.*)', ""),
# Flags to pass to the native linker.
( '-Wn,(.*)', "env.append('LD_FLAGS_NATIVE', *($0.split(',')))"),
# Flags to pass to translate
( '-Wt,(.*)', "env.append('TRANSLATE_FLAGS_USER', *($0.split(',')))"),
( ('(--section-start)','(.*)'), AddToNativeFlags),
# NOTE: -export-dynamic doesn't actually do anything to the bitcode link
# right now. This is just in case we do want to record that in metadata
# eventually, and have that influence the native linker flags.
( '(-export-dynamic)', AddToBCLinkFlags),
( '-?-soname=(.*)', "env.set('SONAME', $0)"),
( ('-?-soname', '(.*)'), "env.set('SONAME', $0)"),
( '(-M)', AddToBCLinkFlags),
( '(-t)', AddToBCLinkFlags),
( ('(-y)','(.*)'), AddToBCLinkFlags),
( ('(-defsym)','(.*)'), AddToBCLinkFlags),
( '-melf_nacl', "env.set('ARCH', 'X8632')"),
( ('-m','elf_nacl'), "env.set('ARCH', 'X8632')"),
( '-melf64_nacl', "env.set('ARCH', 'X8664')"),
( ('-m','elf64_nacl'), "env.set('ARCH', 'X8664')"),
( '-marmelf_nacl', "env.set('ARCH', 'ARM')"),
( ('-m','armelf_nacl'), "env.set('ARCH', 'ARM')"),
( ('(-?-wrap)', '(.+)'), AddToBCLinkFlags),
( ('(-?-wrap=.+)'), AddToBCLinkFlags),
# NOTE: For scons tests, the code generation fPIC flag is used with pnacl-ld.
( '-fPIC', "env.set('PIC', '1')"),
# This controls LTO optimization.
# opt does not support -Os but internally it is identical to -O2
# opt also does not support -O4 but -O4 is how you ask clang for LTO, so we
# can support it as well
( '-Os', "env.set('OPT_LEVEL', '2')"),
( '-O([0-3])', "env.set('OPT_LEVEL', $0)"),
( '-O([0-9]+)', "env.set('OPT_LEVEL', '3')"),
( '(-translate-fast)', "env.set('TRANSLATE_FLAGS', $0)"),
( '-s', "env.set('STRIP_MODE', 'all')"),
( '--strip-all', "env.set('STRIP_MODE', 'all')"),
( '-S', "env.set('STRIP_MODE', 'debug')"),
( '--strip-debug', "env.set('STRIP_MODE', 'debug')"),
# Inputs and options that need to be kept in order
( '(-l.*)', "env.append('INPUTS', $0)"),
( ('(-l)','(.*)'), "env.append('INPUTS', $0+$1)"),
( '(--no-as-needed)', "env.append('INPUTS', $0)"),
( '(--as-needed)', "env.append('INPUTS', $0)"),
( '(--start-group)', "env.append('INPUTS', $0)"),
( '(--end-group)', "env.append('INPUTS', $0)"),
( '(-Bstatic)', "env.append('INPUTS', $0)"),
( '(-Bdynamic)', "env.append('INPUTS', $0)"),
( '(--(no-)?whole-archive)', "env.append('INPUTS', $0)"),
( '(--undefined=.*)', "env.append('INPUTS', $0)"),
( '(-.*)', UnrecognizedOption),
( '(.*)', "env.append('INPUTS', pathtools.normalize($0))"),
]
def main(argv):
env.update(EXTRA_ENV)
ParseArgs(argv, LDPatterns)
# If the user passed -arch, then they want native output.
arch_flag_given = GetArch() is not None
# Both LD_FLAGS_NATIVE and TRANSLATE_FLAGS_USER affect
# the translation process. If they are non-empty,
# then --pnacl-allow-native must be given.
allow_native = env.getbool('ALLOW_NATIVE')
native_flags = env.get('LD_FLAGS_NATIVE') + env.get('TRANSLATE_FLAGS_USER')
if len(native_flags) > 0:
if not allow_native:
flagstr = ' '.join(native_flags)
Log.Fatal('"%s" affects translation. '
'To allow, specify --pnacl-allow-native' % flagstr)
if env.getbool('ALLOW_NATIVE') and not arch_flag_given:
Log.Fatal("--pnacl-allow-native given, but translation "
"is not happening (missing -arch?)")
if env.getbool('RELOCATABLE'):
if env.getbool('SHARED'):
Log.Fatal("-r and -shared may not be used together")
env.set('STATIC', '0')
inputs = env.get('INPUTS')
output = env.getone('OUTPUT')
if output == '':
output = pathtools.normalize('a.out')
if not arch_flag_given:
# If -arch is not given, assume X86-32.
# This is because gold requires an arch (even for bitcode linking).
SetArch('X8632')
assert(GetArch() is not None)
# Expand all parameters
# This resolves -lfoo into actual filenames,
# and expands linker scripts into command-line arguments.
inputs = ldtools.ExpandInputs(inputs,
env.get('SEARCH_DIRS'),
env.getbool('STATIC'),
# Once all glibc bitcode link is purely
# bitcode (e.g., even libc_nonshared.a)
# we may be able to restrict this more.
# This is also currently used by
# pnacl_generate_pexe=0 with glibc,
# for user libraries.
ldtools.LibraryTypes.ANY)
# Make sure the inputs have matching arch.
CheckInputsArch(inputs)
regular_inputs, native_objects = SplitLinkLine(inputs)
if not env.getbool('USE_IRT'):
inputs = UsePrivateLibraries(inputs)
# Filter out object files which are currently used in the bitcode link.
# These don't actually need to be treated separately, since the
# translator includes them automatically. Eventually, these will
# be compiled to bitcode or replaced by bitcode stubs, and this list
# can go away.
if env.getbool('STDLIB'):
native_objects = RemoveNativeStdLibs(native_objects)
if env.getbool('SHARED'):
bitcode_type = 'pso'
native_type = 'so'
elif env.getbool('RELOCATABLE'):
bitcode_type = 'po'
native_type = 'o'
else:
bitcode_type = 'pexe'
native_type = 'nexe'
if native_objects and not allow_native:
argstr = ' '.join(native_objects)
Log.Fatal("Native objects '%s' detected in the link. "
"To allow, specify --pnacl-allow-native" % argstr)
tng = TempNameGen([], output)
# Do the bitcode link.
if HasBitcodeInputs(inputs):
chain = DriverChain(inputs, output, tng)
chain.add(LinkBC, 'pre_opt.' + bitcode_type)
if env.getbool('STATIC') and len(native_objects) == 0:
chain.add(DoExpandCtors, 'expand_ctors.' + bitcode_type)
if env.getone('OPT_LEVEL') != '0':
chain.add(DoOPT, 'opt.' + bitcode_type)
elif env.getone('STRIP_MODE') != 'none':
chain.add(DoStrip, 'stripped.' + bitcode_type)
else:
chain = DriverChain('', output, tng)
# If -arch is also specified, invoke pnacl-translate afterwards.
if arch_flag_given:
env.set('NATIVE_OBJECTS', *native_objects)
chain.add(DoTranslate, native_type)
chain.run()
if bitcode_type == 'pexe' and not arch_flag_given:
# Add bitcode wrapper header
WrapBitcode(output)
# Mark .pexe files as executable.
# Some versions of 'configure' expect this.
SetExecutableMode(output)
return 0
def RemoveNativeStdLibs(objs):
# For newlib, all standard libraries are already bitcode.
if env.getbool('LIBMODE_NEWLIB'):
return objs
# GLibC standard libraries
defaultlibs = ['libc_nonshared.a', 'libpthread_nonshared.a',
'libc.a', 'libstdc++.a', 'libgcc.a', 'libgcc_eh.a',
'libm.a']
return [f for f in objs if pathtools.split(f)[1] not in defaultlibs]
def UsePrivateLibraries(libs):
""" Place libnacl_sys_private.a before libnacl.a
Replace libpthread.a with libpthread_private.a
Replace libnacl_dyncode.a with libnacl_dyncode_private.a
This assumes that the private libs can be found at the same directory
as the public libs.
"""
result_libs = []
for l in libs:
base = pathtools.basename(l)
dname = pathtools.dirname(l)
if base == 'libnacl.a':
Log.Info('Not using IRT -- injecting libnacl_sys_private.a to link line')
result_libs.append(pathtools.join(dname, 'libnacl_sys_private.a'))
result_libs.append(l)
elif base == 'libpthread.a':
Log.Info('Not using IRT -- swapping private lib for libpthread')
result_libs.append(pathtools.join(dname, 'libpthread_private.a'))
elif base == 'libnacl_dyncode.a':
Log.Info('Not using IRT -- swapping private lib for libnacl_dyncode')
result_libs.append(pathtools.join(dname, 'libnacl_dyncode_private.a'))
else:
result_libs.append(l)
return result_libs
def SplitLinkLine(inputs):
""" Pull native objects (.o, .a) out of the input list.
These need special handling since they cannot be
encoded in the bitcode file.
(NOTE: native .so files do not need special handling,
because they can be encoded as dependencies in the bitcode)
"""
normal = []
native = []
for f in inputs:
if ldtools.IsFlag(f):
normal.append(f)
elif IsNativeArchive(f) or IsNativeObject(f):
native.append(f)
else:
normal.append(f)
return (normal, native)
def HasBitcodeInputs(inputs):
for f in inputs:
if ldtools.IsFlag(f):
continue
elif IsBitcodeObject(f) or IsBitcodeArchive(f):
return True
return False
def CheckInputsArch(inputs):
count = 0
for f in inputs:
if ldtools.IsFlag(f):
continue
elif IsBitcodeObject(f) or IsBitcodeDSO(f) or IsBitcodeArchive(f):
pass
elif IsNative(f):
ArchMerge(f, True)
else:
Log.Fatal("%s: Unexpected type of file for linking (%s)",
pathtools.touser(f), FileType(f))
count += 1
if count == 0:
Log.Fatal("no input files")
def DoExpandCtors(infile, outfile):
RunDriver('opt', ['-nacl-expand-ctors', infile, '-o', outfile])
def DoOPT(infile, outfile):
opt_flags = env.get('OPT_FLAGS')
RunDriver('opt', opt_flags + [ infile, '-o', outfile ])
def DoStrip(infile, outfile):
strip_flags = env.get('STRIP_FLAGS')
RunDriver('strip', strip_flags + [ infile, '-o', outfile ])
def DoTranslate(infile, outfile):
args = env.get('TRANSLATE_FLAGS')
args += ['-Wl,'+s for s in env.get('LD_FLAGS_NATIVE')]
if infile:
args += [infile]
args += [s for s in env.get('NATIVE_OBJECTS')]
args += ['-o', outfile]
RunDriver('translate', args)
def LinkBC(inputs, output):
'''Input: a bunch of bc/o/lib input files
Output: a combined & optimized bitcode file
'''
# Produce combined bitcode file
RunWithEnv('${RUN_BCLD}',
inputs=inputs,
output=output)
|
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Eager-graph unified check numerics callback."""
import collections
import threading
import numpy as np
from tensorflow.core.protobuf import debug_event_pb2
from tensorflow.python.debug.lib import op_callbacks_common
from tensorflow.python.debug.lib import source_utils
from tensorflow.python.eager import monitoring
from tensorflow.python.framework import op_callbacks
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_debug_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util.tf_export import tf_export
# Many ops have benign NaN outputs, and running them with check_numerics
# on will create unwanted errors
# TODO(b/142497024): Replace this allowlist with function decorators in the ops
IGNORE_OP_OUTPUTS = (
# For FusedBatchNorm, if the input tensor is empty then batch_mean and
# batch_variance will be NaN. reserve_space holds intermediate values
# derived from batch_mean and batch_variance used for gradient calculation
(b"FusedBatchNorm", 1), # batch_mean
(b"FusedBatchNorm", 2), # batch_variance
(b"FusedBatchNorm", 3), # reserve_space_1
(b"FusedBatchNorm", 4), # reserve_space_2
# Same as above
(b"FusedBatchNormV2", 1), # batch_mean
(b"FusedBatchNormV2", 2), # batch_variance
(b"FusedBatchNormV2", 3), # reserve_space_1
(b"FusedBatchNormV2", 4), # reserve_space_2
# Same as above, but reserve_space_3 holds additional intermediate values
(b"FusedBatchNormV3", 1), # batch_mean
(b"FusedBatchNormV3", 2), # batch_variance
(b"FusedBatchNormV3", 3), # reserve_space_1
(b"FusedBatchNormV3", 4), # reserve_space_2
(b"FusedBatchNormV3", 5), # reserve_space_3
)
# Some frequently used ops are generally safe and we can skip them to reduce
# overhead. NOTE: This list is compiled by observing operations called by
# models in practice and is not a comprehensive list of safe operations.
SAFE_OPS = (
b"Concat",
b"ConcatV2",
b"ExpandDims",
b"Fill",
b"Gather",
b"Maximum",
b"Minimum",
b"Reshape",
b"Slice",
b"Squeeze",
b"Stack",
b"StridedSlice",
b"StridedSliceGrad",
b"TensorListConcatV2",
b"TensorListGather",
b"TensorListGetItem",
b"TensorListPopBack",
b"TensorListStack",
b"Transpose",
b"Unpack",
)
_state = threading.local()
_check_numerics_callback_create_counter = monitoring.Counter(
"/tensorflow/api/python/debugging/check_numerics_callback_create_counter",
"Counter for number of times the check_numerics op callback is created.")
def limit_string_length(string, max_len=50):
"""Limit the length of input string.
Args:
string: Input string.
max_len: (int or None) If int, the length limit. If None, no limit.
Returns:
Possibly length-limited string.
"""
if max_len is None or len(string) <= max_len:
return string
else:
return "..." + string[len(string) - max_len:]
# A dictionary that supports looking up the original input tensor names.
_CHECK_NUMERICS_INPUT_LOOKUP = collections.defaultdict(dict)
def _maybe_lookup_original_input_tensor(graph, tensor):
if (graph and
graph in _CHECK_NUMERICS_INPUT_LOOKUP and
tensor.name in _CHECK_NUMERICS_INPUT_LOOKUP[graph]):
return _CHECK_NUMERICS_INPUT_LOOKUP[graph][tensor.name]
else:
return tensor
def get_check_numerics_error_message(slot,
num_outputs,
op_type,
tensor,
inputs,
graph=None,
traceback=None,
stack_height_limit=30,
path_length_limit=50):
"""Create a meaningful and user-friendly error message about offending tensor.
The error message reveals the following info about the op that outputs
NaN/Infinity: dtype, shape (to the extent known at graph-construction time),
input tensors, stack trace for op creation (if is graph mode).
Args:
slot: (int) slot index of the tensor output.
num_outputs: (int) total number of outputs of the op.
op_type: (str) Type of the that generates `tensor`.
tensor: (Tensor) the offending tensor, i.e., the tensor that contains
Infinities or NaNs.
inputs: (array of Tensor) inputs to the op that generates `tensor`.
graph: (tf.Graph) the graph object that `tensor` belongs to. Available only
under graph mode.
traceback: (list of trace frames) the stack trace of the op's creation.
Available only under graph model.
stack_height_limit: (int or None) If int, limit to the height of the stack
trace printed in the error message. If None, no limit to the height.
path_length_limit: (int or None) Length limit for file paths included in the
formatted stack trace.
Returns:
(str) A formatted error message.
"""
eager_vs_graph_qualifier = "graph" if graph else "eagerly-executing"
message = "\n"
message += (
"\n!!! Detected Infinity or NaN in output %d of "
"%s op \"%s\" (# of outputs: %d) !!!\n" %
(slot, eager_vs_graph_qualifier, op_type, num_outputs))
message += " dtype: %s\n" % tensor.dtype
message += " shape: %s\n" % (tensor.shape,)
if not graph:
# This is an eager tensor. We can get its numpy value and count
# NaNs and Infs.
is_inf = np.isinf(tensor)
num_neg_inf = np.sum(np.logical_and(np.less(tensor, 0.), is_inf))
num_pos_inf = np.sum(np.logical_and(np.greater(tensor, 0.), is_inf))
num_nan = np.sum(np.isnan(tensor))
if num_neg_inf > 0:
message += " # of -Inf elements: %s\n" % num_neg_inf
if num_pos_inf > 0:
message += " # of +Inf elements: %s\n" % num_pos_inf
if num_nan:
message += " # of +NaN elements: %s\n" % num_nan
if len(inputs) > 1:
message += "\n Input tensors (%d):\n" % len(inputs)
for slot, input_tensor in enumerate(inputs):
message += " %d: %s\n" % (
slot, _maybe_lookup_original_input_tensor(graph, input_tensor))
elif len(inputs) == 1:
message += "\n Input tensor: %s\n" % (
_maybe_lookup_original_input_tensor(graph, inputs[0]))
if graph and hasattr(graph, "name") and graph.name:
message += " Graph name: \"%s\"\n" % graph.name
# Format the stack trace for the op's creation. We omit files that
# belong to tensorflow itself.
if graph and traceback:
message += (
"\n Stack trace of op's creation (\"->\": inferred user code):\n")
if stack_height_limit is not None and len(traceback) > stack_height_limit:
num_omitted_frames = len(traceback) - stack_height_limit
message += " + ... (Omitted %d frames)\n" % num_omitted_frames
for filepath, lineno, function_name, source_line in traceback[
-stack_height_limit:]:
user_code_indicator = " "
if not source_utils.guess_is_tensorflow_py_library(filepath):
user_code_indicator = " -> "
message += " + %s (L%d) %s\n" % (
limit_string_length(filepath, path_length_limit), lineno,
function_name)
if source_line is not None:
message += "%s| %s\n" % (user_code_indicator, source_line)
message += "\n"
return message
def _debug_summary(x):
return gen_debug_ops.debug_numeric_summary_v2(
x,
tensor_debug_mode=(
debug_event_pb2.TensorDebugMode.REDUCE_INF_NAN_THREE_SLOTS))
class CheckNumericsCallback(object):
"""Wrapper for the numerics-checking callback for thread locality."""
def __init__(self, stack_height_limit, path_length_limit):
self._stack_height_limit = stack_height_limit
self._path_length_limit = path_length_limit
# A dict mapping Placeholder tensors to their instrumenting debug tensors.
# Used only under V1 graph mode, where we can't rely on auto control
# dependency to execute the debug tensors and hence need to attach the debug
# tensors as control dependencies of the ops that consume the Placeholder.
self._placeholder_to_debug_tensor = dict()
def callback(self,
op_type,
inputs,
attrs,
outputs,
op_name=None,
graph=None):
"""Eager-function unified callback for checking numerics."""
del attrs, op_name # Unused
op_type_bytes = compat.as_bytes(op_type)
is_v1_graph_mode = not ops.executing_eagerly_outside_functions()
if (op_type_bytes in op_callbacks_common.OP_CALLBACK_SKIP_OPS or
op_type_bytes in SAFE_OPS):
return None
if graph:
# Under graph mode. Insert check_numerics op.
instrumented_outputs = []
if is_v1_graph_mode:
for input_tensor in inputs:
if input_tensor in self._placeholder_to_debug_tensor and outputs:
outputs[0].op._add_control_input( # pylint: disable=protected-access
self._placeholder_to_debug_tensor[input_tensor].op)
for slot, output in enumerate(outputs):
if (output.dtype.is_floating and
(op_type_bytes, slot) not in IGNORE_OP_OUTPUTS):
checked_output = array_ops.check_numerics_v2(
# TF v2 has automatic control dependencies added to stateful async
# ops, which allows us to run check_numerics asynchronously.
# In the above case we use debug_summary to reduce all output
# tensors asynchronously from the op being checked and then
# process the tensor summary with check_numerics.
output if is_v1_graph_mode else _debug_summary(output),
get_check_numerics_error_message(
slot,
len(outputs),
op_type,
output,
inputs,
graph=graph,
traceback=output.op.traceback,
stack_height_limit=self._stack_height_limit,
path_length_limit=self._path_length_limit))
_CHECK_NUMERICS_INPUT_LOOKUP[graph][checked_output.name] = output
instrumented_outputs.append(self._get_output_tensor(
op_type_bytes, output, checked_output, is_v1_graph_mode))
else:
instrumented_outputs.append(output)
return instrumented_outputs
else:
if op_type_bytes == b"CheckNumericsV2":
# TODO(b/140334369): Remove this special casing logic once op_callback.
# automatically prevents infinite recursion in eager mode.
return None
# Under eager mode. Eagerly execute check_numerics op.
for slot, output in enumerate(outputs):
if (output.dtype.is_floating and
(op_type_bytes, slot) not in IGNORE_OP_OUTPUTS):
array_ops.check_numerics_v2(
output,
get_check_numerics_error_message(
slot, len(outputs), op_type, output, inputs,
stack_height_limit=self._stack_height_limit,
path_length_limit=self._path_length_limit))
def _get_output_tensor(self,
op_type,
tensor,
checked_tensor,
is_v1_graph_mode):
"""Determine what tensor to output from callback.
Args:
op_type: Type of the op that outputs the original symbolic tensor, as
`bytes`.
tensor: The original output symbolic tensor.
checked_tensor: The debugger-instrumented, numerics-checking tensor.
is_v1_graph_mode: Whether the debugged proggram is running under V1 graph
mode.
Returns:
A symbolic tensor to be returned by the dumping op_callback.
"""
if is_v1_graph_mode:
# Placeholders need special treatment under V1 graph mode. The
# callback can't simply override the Placeholder tensor to the debug
# tensor, as that would cause the Placeholder op to lack a value.
# The debug tensor is remembered and will be attached as control
# inputs to ops that consumer the Placeholders later.
if op_type == b"Placeholder":
self._placeholder_to_debug_tensor[tensor] = checked_tensor
return tensor
else:
return checked_tensor
else:
# Under non-v1 graph mode, rely on auto control dependency to run the
# checked tensor.
return tensor
@tf_export("debugging.enable_check_numerics")
def enable_check_numerics(stack_height_limit=30,
path_length_limit=50):
r"""Enable tensor numerics checking in an eager/graph unified fashion.
The numerics checking mechanism will cause any TensorFlow eager execution or
graph execution to error out as soon as an op's output tensor contains
infinity or NaN.
This method is idempotent. Calling it multiple times has the same effect
as calling it once.
This method takes effect only on the thread in which it is called.
When a op's float-type output tensor contains any Infinity or NaN, an
`tf.errors.InvalidArgumentError` will be thrown, with an error message that
reveals the following information:
- The type of the op that generated the tensor with bad numerics.
- Data type (dtype) of the tensor.
- Shape of the tensor (to the extent known at the time of eager execution
or graph construction).
- Name of the containing graph (if available).
- (Graph mode only): The stack trace of the intra-graph op's creation,
with a stack-height limit and a path-length limit for visual clarity.
The stack frames that belong to the user's code (as opposed to
tensorflow's internal code) are highlighted with a text arrow ("->").
- (Eager mode only): How many of the offending tensor's elements are
`Infinity` and `NaN`, respectively.
Once enabled, the check-numerics mechanism can be disabled by using
`tf.debugging.disable_check_numerics()`.
Example usage:
1. Catching infinity during the execution of a `tf.function` graph:
```py
import tensorflow as tf
tf.debugging.enable_check_numerics()
@tf.function
def square_log_x_plus_1(x):
v = tf.math.log(x + 1)
return tf.math.square(v)
x = -1.0
# When the following line runs, a function graph will be compiled
# from the Python function `square_log_x_plus_1()`. Due to the
# `enable_check_numerics()` call above, the graph will contain
# numerics checking ops that will run during the function graph's
# execution. The function call generates an -infinity when the Log
# (logarithm) op operates on the output tensor of the Add op.
# The program errors out at this line, printing an error message.
y = square_log_x_plus_1(x)
z = -y
```
2. Catching NaN during eager execution:
```py
import numpy as np
import tensorflow as tf
tf.debugging.enable_check_numerics()
x = np.array([[0.0, -1.0], [4.0, 3.0]])
# The following line executes the Sqrt op eagerly. Due to the negative
# element in the input array, a NaN is generated. Due to the
# `enable_check_numerics()` call above, the program errors immediately
# at this line, printing an error message.
y = tf.math.sqrt(x)
z = tf.matmul(y, y)
```
NOTE: If your code is running on TPUs, be sure to call
`tf.config.set_soft_device_placement(True)` before calling
`tf.debugging.enable_check_numerics()` as this API uses automatic outside
compilation on TPUs. For example:
```py
tf.config.set_soft_device_placement(True)
tf.debugging.enable_check_numerics()
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='')
strategy = tf.distribute.TPUStrategy(resolver)
with strategy.scope():
# ...
```
Args:
stack_height_limit: Limit to the height of the printed stack trace.
Applicable only to ops in `tf.function`s (graphs).
path_length_limit: Limit to the file path included in the printed stack
trace. Applicable only to ops in `tf.function`s (graphs).
"""
if not hasattr(_state, "check_numerics_callback"):
_state.check_numerics_callback = CheckNumericsCallback(
stack_height_limit, path_length_limit)
op_callbacks.add_op_callback(_state.check_numerics_callback.callback)
logging.info(
"Enabled check-numerics callback in thread %s",
threading.current_thread().name)
_check_numerics_callback_create_counter.get_cell().increase_by(1)
@tf_export("debugging.disable_check_numerics")
def disable_check_numerics():
"""Disable the eager/graph unified numerics checking mechanism.
This method can be used after a call to `tf.debugging.enable_check_numerics()`
to disable the numerics-checking mechanism that catches infinity and NaN
values output by ops executed eagerly or in tf.function-compiled graphs.
This method is idempotent. Calling it multiple times has the same effect
as calling it once.
This method takes effect only on the thread in which it is called.
"""
if not hasattr(_state, "check_numerics_callback"):
return
try:
op_callbacks.remove_op_callback(_state.check_numerics_callback.callback)
delattr(_state, "check_numerics_callback")
logging.info(
"Disabled check-numerics callback in thread %s",
threading.current_thread().name)
except KeyError:
# Tolerate disabling the check numerics callback without
# enable_check_numerics() being called first.
pass
|
|
from nose.tools import * # noqa
from faker import Factory
fake = Factory.create()
import uuid
from scripts.dropbox import migrate_to_external_accounts as migration
from framework.mongo import database
from tests.base import OsfTestCase
from tests.factories import ProjectFactory, UserFactory
from website.models import User, Node
from website.oauth.models import ExternalAccount
from addons.dropbox.model import (
DropboxUserSettings,
DropboxNodeSettings
)
def fake_user_settings_document(user, deleted=False):
return {
"_id": fake.credit_card_number(),
"_version": 1,
"access_token": fake.sha1(),
"deleted": deleted,
"dropbox_id": fake.ean8(),
"dropbox_info": {
"display_name": fake.name()
},
"owner": user._id,
}
def fake_node_settings_document(user_settings_document=None, node=None, deleted=False):
return {
"_id": fake.credit_card_number(),
"_version": 1,
"deleted": deleted,
"folder": uuid.uuid4(),
"owner": node._id if node else 'null',
"user_settings": user_settings_document['_id'] if user_settings_document else 'null'
}
class TestDropboxMigration(OsfTestCase):
def setUp(self):
super(TestDropboxMigration, self).setUp()
self.unlinked_user_settings = []
self.linked_user_settings = []
self.deleted_user_settings = []
self.node_settings_documents = []
self.unauthorized_node_settings_documents = []
for i in range(3):
user = UserFactory()
self.unlinked_user_settings.append(fake_user_settings_document(user))
database['dropboxusersettings'].insert(self.unlinked_user_settings)
for i in range(3):
user = UserFactory()
self.linked_user_settings.append(fake_user_settings_document(user))
node = ProjectFactory()
self.node_settings_documents.append(
fake_node_settings_document(self.linked_user_settings[-1], node)
)
database['dropboxusersettings'].insert(self.linked_user_settings)
database['dropboxnodesettings'].insert(self.node_settings_documents)
for i in range(3):
user = UserFactory()
self.deleted_user_settings.append(fake_user_settings_document(user, deleted=True))
database['dropboxusersettings'].insert(self.deleted_user_settings)
for i in range(3):
node = ProjectFactory()
self.unauthorized_node_settings_documents.append(
fake_node_settings_document(None, node)
)
database['dropboxnodesettings'].insert(self.unauthorized_node_settings_documents)
def tearDown(self):
super(TestDropboxMigration, self).tearDown()
database['dropboxnodesettings'].remove()
database['dropboxusersettings'].remove()
database['externalaccount'].remove()
def test_migrate_to_external_account(self):
assert_equal(ExternalAccount.find().count(), 0)
user_settings_document = self.unlinked_user_settings[0]
external_account, user, new = migration.migrate_to_external_account(user_settings_document)
assert_true(new)
assert_equal(ExternalAccount.find().count(), 1)
assert_is_not_none(external_account)
assert_equal(user_settings_document['owner'], user._id)
assert_equal(external_account.provider, 'dropbox')
assert_equal(external_account.provider_name, 'Dropbox')
assert_equal(
external_account.oauth_key,
user_settings_document['access_token']
)
assert_equal(
external_account.display_name,
user_settings_document['dropbox_info']['display_name']
)
def test_make_new_user_settings(self):
user_settings_document = self.unlinked_user_settings[0]
user = User.load(user_settings_document['owner'])
user_settings = migration.make_new_user_settings(user)
user.reload()
assert_equal(
len(user._backrefs['addons']['dropboxusersettings']['owner']),
1
)
assert_equal(
user._backrefs['addons']['dropboxusersettings']['owner'][0],
user_settings._id
)
assert_false(hasattr(user_settings, 'access_token'))
def test_make_new_node_settings(self):
node_settings_document = self.node_settings_documents[0]
node = Node.load(node_settings_document['owner'])
user_settings_document = database['dropboxusersettings'].find_one({
'_id': node_settings_document['user_settings']
})
external_account, user, new = migration.migrate_to_external_account(
user_settings_document
)
user_settings = migration.make_new_user_settings(user)
node_settings = migration.make_new_node_settings(
node,
node_settings_document,
external_account,
user_settings
)
assert_equal(
len(node._backrefs['addons']['dropboxnodesettings']['owner']),
1
)
assert_equal(
node._backrefs['addons']['dropboxnodesettings']['owner'][0],
node_settings._id
)
def test_remove_old_documents(self):
user_settings_collection = database['dropboxusersettings']
old_user_settings = list(user_settings_collection.find())
old_user_settings_count = user_settings_collection.count()
node_settings_collection = database['dropboxnodesettings']
old_node_settings = list(node_settings_collection.find())
old_node_settings_count = node_settings_collection.count
migration.migrate(dry_run=False, remove_old=False)
assert_equal(
database['dropboxusersettings'].count(),
15
) # 3 + 3 + 3 + 6 (non-deleted)
assert_equal(
database['dropboxnodesettings'].count(),
9
) # 3 + 3 + 3
migration.remove_old_documents(
old_user_settings, old_user_settings_count,
old_node_settings, old_node_settings_count,
dry_run=False
)
assert_equal(
database['dropboxusersettings'].count(),
6
)
assert_equal(
database['dropboxnodesettings'].count(),
3
)
def test_migrate(self):
migration.migrate(dry_run=False)
assert_equal(
DropboxUserSettings.find().count(),
6
)
assert_equal(
DropboxNodeSettings.find().count(),
3
)
for user_settings in DropboxUserSettings.find():
assert_is_not_none(user_settings.owner)
assert_false(hasattr(user_settings, 'access_token'))
for node_settings in DropboxNodeSettings.find():
assert_is_not_none(node_settings.owner)
if (
not node_settings.user_settings or
not node_settings.external_account
):
assert_in(
node_settings.folder,
map(
lambda d: d['folder'],
self.unauthorized_node_settings_documents
)
)
def test_migrate_two_users_one_account(self):
self.linked_user_settings[1]["_id"] = self.linked_user_settings[0]["_id"]
self.linked_user_settings[1]["_version"] = self.linked_user_settings[0]["_version"]
self.linked_user_settings[1]["access_token"] = self.linked_user_settings[0]["access_token"]
self.linked_user_settings[1]["deleted"] = self.linked_user_settings[0]["deleted"]
self.linked_user_settings[1]["dropbox_id"] = self.linked_user_settings[0]["dropbox_id"]
self.linked_user_settings[1]["dropbox_info"] = self.linked_user_settings[0]["dropbox_info"]
external_account_1, user_1, new_1 = migration.migrate_to_external_account(self.linked_user_settings[0])
external_account_2, user_2, new_2 = migration.migrate_to_external_account(self.linked_user_settings[1])
assert_equal(external_account_1._id, external_account_2._id)
assert_not_equal(user_1, user_2)
assert_true(new_1)
assert_false(new_2)
|
|
# Copyright (C) 2005-2019 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
"""
from . import util as orm_util
from .attributes import QueryableAttribute
from .base import _class_to_mapper
from .base import _is_aliased_class
from .base import _is_mapped_class
from .base import InspectionAttr
from .interfaces import MapperOption
from .interfaces import PropComparator
from .path_registry import _DEFAULT_TOKEN
from .path_registry import _WILDCARD_TOKEN
from .path_registry import PathRegistry
from .path_registry import TokenRegistry
from .util import _orm_full_deannotate
from .. import exc as sa_exc
from .. import inspect
from .. import util
from ..sql import coercions
from ..sql import roles
from ..sql.base import _generative
from ..sql.base import Generative
class Load(Generative, MapperOption):
"""Represents loader options which modify the state of a
:class:`.Query` in order to affect how various mapped attributes are
loaded.
The :class:`.Load` object is in most cases used implicitly behind the
scenes when one makes use of a query option like :func:`.joinedload`,
:func:`.defer`, or similar. However, the :class:`.Load` object
can also be used directly, and in some cases can be useful.
To use :class:`.Load` directly, instantiate it with the target mapped
class as the argument. This style of usage is
useful when dealing with a :class:`.Query` that has multiple entities::
myopt = Load(MyClass).joinedload("widgets")
The above ``myopt`` can now be used with :meth:`.Query.options`, where it
will only take effect for the ``MyClass`` entity::
session.query(MyClass, MyOtherClass).options(myopt)
One case where :class:`.Load` is useful as public API is when specifying
"wildcard" options that only take effect for a certain class::
session.query(Order).options(Load(Order).lazyload('*'))
Above, all relationships on ``Order`` will be lazy-loaded, but other
attributes on those descendant objects will load using their normal
loader strategy.
.. seealso::
:ref:`deferred_options`
:ref:`deferred_loading_w_multiple`
:ref:`relationship_loader_options`
"""
def __init__(self, entity):
insp = inspect(entity)
self.path = insp._path_registry
# note that this .context is shared among all descendant
# Load objects
self.context = util.OrderedDict()
self.local_opts = {}
self._of_type = None
self.is_class_strategy = False
@classmethod
def for_existing_path(cls, path):
load = cls.__new__(cls)
load.path = path
load.context = {}
load.local_opts = {}
load._of_type = None
return load
def _generate_cache_key(self, path):
if path.path[0].is_aliased_class:
return False
serialized = []
for (key, loader_path), obj in self.context.items():
if key != "loader":
continue
for local_elem, obj_elem in zip(self.path.path, loader_path):
if local_elem is not obj_elem:
break
else:
endpoint = obj._of_type or obj.path.path[-1]
chopped = self._chop_path(loader_path, path)
if (
# means loader_path and path are unrelated,
# this does not need to be part of a cache key
chopped
is None
) or (
# means no additional path with loader_path + path
# and the endpoint isn't using of_type so isn't modified
# into an alias or other unsafe entity
not chopped
and not obj._of_type
):
continue
serialized_path = []
for token in chopped:
if isinstance(token, util.string_types):
serialized_path.append(token)
elif token.is_aliased_class:
return False
elif token.is_property:
serialized_path.append(token.key)
else:
assert token.is_mapper
serialized_path.append(token.class_)
if not serialized_path or endpoint != serialized_path[-1]:
if endpoint.is_mapper:
serialized_path.append(endpoint.class_)
elif endpoint.is_aliased_class:
return False
serialized.append(
(
tuple(serialized_path)
+ (obj.strategy or ())
+ (
tuple(
[
(key, obj.local_opts[key])
for key in sorted(obj.local_opts)
]
)
if obj.local_opts
else ()
)
)
)
if not serialized:
return None
else:
return tuple(serialized)
def _generate(self):
cloned = super(Load, self)._generate()
cloned.local_opts = {}
return cloned
is_opts_only = False
is_class_strategy = False
strategy = None
propagate_to_loaders = False
def process_query(self, query):
self._process(query, True)
def process_query_conditionally(self, query):
self._process(query, False)
def _process(self, query, raiseerr):
current_path = query._current_path
if current_path:
for (token, start_path), loader in self.context.items():
chopped_start_path = self._chop_path(start_path, current_path)
if chopped_start_path is not None:
query._attributes[(token, chopped_start_path)] = loader
else:
query._attributes.update(self.context)
def _generate_path(
self, path, attr, for_strategy, wildcard_key, raiseerr=True
):
existing_of_type = self._of_type
self._of_type = None
if raiseerr and not path.has_entity:
if isinstance(path, TokenRegistry):
raise sa_exc.ArgumentError(
"Wildcard token cannot be followed by another entity"
)
else:
raise sa_exc.ArgumentError(
"Mapped attribute '%s' does not "
"refer to a mapped entity" % (path.prop,)
)
if isinstance(attr, util.string_types):
default_token = attr.endswith(_DEFAULT_TOKEN)
if attr.endswith(_WILDCARD_TOKEN) or default_token:
if default_token:
self.propagate_to_loaders = False
if wildcard_key:
attr = "%s:%s" % (wildcard_key, attr)
# TODO: AliasedInsp inside the path for of_type is not
# working for a with_polymorphic entity because the
# relationship loaders don't render the with_poly into the
# path. See #4469 which will try to improve this
if existing_of_type and not existing_of_type.is_aliased_class:
path = path.parent[existing_of_type]
path = path.token(attr)
self.path = path
return path
if existing_of_type:
ent = inspect(existing_of_type)
else:
ent = path.entity
try:
# use getattr on the class to work around
# synonyms, hybrids, etc.
attr = getattr(ent.class_, attr)
except AttributeError:
if raiseerr:
raise sa_exc.ArgumentError(
'Can\'t find property named "%s" on '
"%s in this Query." % (attr, ent)
)
else:
return None
else:
attr = found_property = attr.property
path = path[attr]
elif _is_mapped_class(attr):
# TODO: this does not appear to be a valid codepath. "attr"
# would never be a mapper. This block is present in 1.2
# as well however does not seem to be accessed in any tests.
if not orm_util._entity_corresponds_to_use_path_impl(
attr.parent, path[-1]
):
if raiseerr:
raise sa_exc.ArgumentError(
"Attribute '%s' does not "
"link from element '%s'" % (attr, path.entity)
)
else:
return None
else:
prop = found_property = attr.property
if not orm_util._entity_corresponds_to_use_path_impl(
attr.parent, path[-1]
):
if raiseerr:
raise sa_exc.ArgumentError(
'Attribute "%s" does not '
'link from element "%s".%s'
% (
attr,
path.entity,
(
" Did you mean to use "
"%s.of_type(%s)?"
% (path[-2], attr.class_.__name__)
if len(path) > 1
and path.entity.is_mapper
and attr.parent.is_aliased_class
else ""
),
)
)
else:
return None
if getattr(attr, "_of_type", None):
ac = attr._of_type
ext_info = of_type_info = inspect(ac)
existing = path.entity_path[prop].get(
self.context, "path_with_polymorphic"
)
if not ext_info.is_aliased_class:
ac = orm_util.with_polymorphic(
ext_info.mapper.base_mapper,
ext_info.mapper,
aliased=True,
_use_mapper_path=True,
_existing_alias=existing,
)
ext_info = inspect(ac)
path.entity_path[prop].set(
self.context, "path_with_polymorphic", ext_info
)
path = path[prop][ext_info]
self._of_type = of_type_info
else:
path = path[prop]
if for_strategy is not None:
found_property._get_strategy(for_strategy)
if path.has_entity:
path = path.entity_path
self.path = path
return path
def __str__(self):
return "Load(strategy=%r)" % (self.strategy,)
def _coerce_strat(self, strategy):
if strategy is not None:
strategy = tuple(sorted(strategy.items()))
return strategy
def _apply_to_parent(self, parent, applied, bound):
raise NotImplementedError(
"Only 'unbound' loader options may be used with the "
"Load.options() method"
)
@_generative
def options(self, *opts):
r"""Apply a series of options as sub-options to this :class:`.Load`
object.
E.g.::
query = session.query(Author)
query = query.options(
joinedload(Author.book).options(
load_only("summary", "excerpt"),
joinedload(Book.citations).options(
joinedload(Citation.author)
)
)
)
:param \*opts: A series of loader option objects (ultimately
:class:`.Load` objects) which should be applied to the path
specified by this :class:`.Load` object.
.. versionadded:: 1.3.6
.. seealso::
:func:`.defaultload`
:ref:`relationship_loader_options`
:ref:`deferred_loading_w_multiple`
"""
apply_cache = {}
bound = not isinstance(self, _UnboundLoad)
if bound:
raise NotImplementedError(
"The options() method is currently only supported "
"for 'unbound' loader options"
)
for opt in opts:
opt._apply_to_parent(self, apply_cache, bound)
@_generative
def set_relationship_strategy(
self, attr, strategy, propagate_to_loaders=True
):
strategy = self._coerce_strat(strategy)
self.is_class_strategy = False
self.propagate_to_loaders = propagate_to_loaders
# if the path is a wildcard, this will set propagate_to_loaders=False
self._generate_path(self.path, attr, strategy, "relationship")
self.strategy = strategy
if strategy is not None:
self._set_path_strategy()
@_generative
def set_column_strategy(self, attrs, strategy, opts=None, opts_only=False):
strategy = self._coerce_strat(strategy)
self.is_class_strategy = False
for attr in attrs:
cloned = self._generate()
cloned.strategy = strategy
cloned._generate_path(self.path, attr, strategy, "column")
cloned.propagate_to_loaders = True
if opts:
cloned.local_opts.update(opts)
if opts_only:
cloned.is_opts_only = True
cloned._set_path_strategy()
self.is_class_strategy = False
@_generative
def set_generic_strategy(self, attrs, strategy):
strategy = self._coerce_strat(strategy)
for attr in attrs:
path = self._generate_path(self.path, attr, strategy, None)
cloned = self._generate()
cloned.strategy = strategy
cloned.path = path
cloned.propagate_to_loaders = True
cloned._set_path_strategy()
@_generative
def set_class_strategy(self, strategy, opts):
strategy = self._coerce_strat(strategy)
cloned = self._generate()
cloned.is_class_strategy = True
path = cloned._generate_path(self.path, None, strategy, None)
cloned.strategy = strategy
cloned.path = path
cloned.propagate_to_loaders = True
cloned._set_path_strategy()
cloned.local_opts.update(opts)
def _set_for_path(self, context, path, replace=True, merge_opts=False):
if merge_opts or not replace:
existing = path.get(self.context, "loader")
if existing:
if merge_opts:
existing.local_opts.update(self.local_opts)
else:
path.set(context, "loader", self)
else:
existing = path.get(self.context, "loader")
path.set(context, "loader", self)
if existing and existing.is_opts_only:
self.local_opts.update(existing.local_opts)
def _set_path_strategy(self):
if not self.is_class_strategy and self.path.has_entity:
effective_path = self.path.parent
else:
effective_path = self.path
if effective_path.is_token:
for path in effective_path.generate_for_superclasses():
self._set_for_path(
self.context,
path,
replace=True,
merge_opts=self.is_opts_only,
)
else:
self._set_for_path(
self.context,
effective_path,
replace=True,
merge_opts=self.is_opts_only,
)
def __getstate__(self):
d = self.__dict__.copy()
d["path"] = self.path.serialize()
return d
def __setstate__(self, state):
self.__dict__.update(state)
self.path = PathRegistry.deserialize(self.path)
def _chop_path(self, to_chop, path):
i = -1
for i, (c_token, p_token) in enumerate(zip(to_chop, path.path)):
if isinstance(c_token, util.string_types):
# TODO: this is approximated from the _UnboundLoad
# version and probably has issues, not fully covered.
if i == 0 and c_token.endswith(":" + _DEFAULT_TOKEN):
return to_chop
elif (
c_token != "relationship:%s" % (_WILDCARD_TOKEN,)
and c_token != p_token.key
):
return None
if c_token is p_token:
continue
elif (
isinstance(c_token, InspectionAttr)
and c_token.is_mapper
and p_token.is_mapper
and c_token.isa(p_token)
):
continue
else:
return None
return to_chop[i + 1 :]
class _UnboundLoad(Load):
"""Represent a loader option that isn't tied to a root entity.
The loader option will produce an entity-linked :class:`.Load`
object when it is passed :meth:`.Query.options`.
This provides compatibility with the traditional system
of freestanding options, e.g. ``joinedload('x.y.z')``.
"""
def __init__(self):
self.path = ()
self._to_bind = []
self.local_opts = {}
_is_chain_link = False
def _generate_cache_key(self, path):
serialized = ()
for val in self._to_bind:
for local_elem, val_elem in zip(self.path, val.path):
if local_elem is not val_elem:
break
else:
opt = val._bind_loader([path.path[0]], None, None, False)
if opt:
c_key = opt._generate_cache_key(path)
if c_key is False:
return False
elif c_key:
serialized += c_key
if not serialized:
return None
else:
return serialized
def _set_path_strategy(self):
self._to_bind.append(self)
def _apply_to_parent(self, parent, applied, bound):
if self in applied:
return applied[self]
cloned = self._generate()
applied[self] = cloned
cloned.strategy = self.strategy
if self.path:
attr = self.path[-1]
if isinstance(attr, util.string_types) and attr.endswith(
_DEFAULT_TOKEN
):
attr = attr.split(":")[0] + ":" + _WILDCARD_TOKEN
cloned._generate_path(
parent.path + self.path[0:-1], attr, self.strategy, None
)
# these assertions can go away once the "sub options" API is
# mature
assert cloned.propagate_to_loaders == self.propagate_to_loaders
assert cloned.is_class_strategy == self.is_class_strategy
assert cloned.is_opts_only == self.is_opts_only
new_to_bind = {
elem._apply_to_parent(parent, applied, bound)
for elem in self._to_bind
}
cloned._to_bind = parent._to_bind
cloned._to_bind.extend(new_to_bind)
cloned.local_opts.update(self.local_opts)
return cloned
def _generate_path(self, path, attr, for_strategy, wildcard_key):
if (
wildcard_key
and isinstance(attr, util.string_types)
and attr in (_WILDCARD_TOKEN, _DEFAULT_TOKEN)
):
if attr == _DEFAULT_TOKEN:
self.propagate_to_loaders = False
attr = "%s:%s" % (wildcard_key, attr)
if path and _is_mapped_class(path[-1]) and not self.is_class_strategy:
path = path[0:-1]
if attr:
path = path + (attr,)
self.path = path
return path
def __getstate__(self):
d = self.__dict__.copy()
d["path"] = self._serialize_path(self.path, filter_aliased_class=True)
return d
def __setstate__(self, state):
ret = []
for key in state["path"]:
if isinstance(key, tuple):
if len(key) == 2:
# support legacy
cls, propkey = key
of_type = None
else:
cls, propkey, of_type = key
prop = getattr(cls, propkey)
if of_type:
prop = prop.of_type(of_type)
ret.append(prop)
else:
ret.append(key)
state["path"] = tuple(ret)
self.__dict__ = state
def _process(self, query, raiseerr):
dedupes = query._attributes["_unbound_load_dedupes"]
for val in self._to_bind:
if val not in dedupes:
dedupes.add(val)
val._bind_loader(
[ent.entity_zero for ent in query._mapper_entities],
query._current_path,
query._attributes,
raiseerr,
)
@classmethod
def _from_keys(cls, meth, keys, chained, kw):
opt = _UnboundLoad()
def _split_key(key):
if isinstance(key, util.string_types):
# coerce fooload('*') into "default loader strategy"
if key == _WILDCARD_TOKEN:
return (_DEFAULT_TOKEN,)
# coerce fooload(".*") into "wildcard on default entity"
elif key.startswith("." + _WILDCARD_TOKEN):
key = key[1:]
return key.split(".")
else:
return (key,)
all_tokens = [token for key in keys for token in _split_key(key)]
for token in all_tokens[0:-1]:
if chained:
opt = meth(opt, token, **kw)
else:
opt = opt.defaultload(token)
opt._is_chain_link = True
opt = meth(opt, all_tokens[-1], **kw)
opt._is_chain_link = False
return opt
def _chop_path(self, to_chop, path):
i = -1
for i, (c_token, (p_entity, p_prop)) in enumerate(
zip(to_chop, path.pairs())
):
if isinstance(c_token, util.string_types):
if i == 0 and c_token.endswith(":" + _DEFAULT_TOKEN):
return to_chop
elif (
c_token != "relationship:%s" % (_WILDCARD_TOKEN,)
and c_token != p_prop.key
):
return None
elif isinstance(c_token, PropComparator):
if c_token.property is not p_prop or (
c_token._parententity is not p_entity
and (
not c_token._parententity.is_mapper
or not c_token._parententity.isa(p_entity)
)
):
return None
else:
i += 1
return to_chop[i:]
def _serialize_path(self, path, filter_aliased_class=False):
ret = []
for token in path:
if isinstance(token, QueryableAttribute):
if (
filter_aliased_class
and token._of_type
and inspect(token._of_type).is_aliased_class
):
ret.append((token._parentmapper.class_, token.key, None))
else:
ret.append(
(token._parentmapper.class_, token.key, token._of_type)
)
elif isinstance(token, PropComparator):
ret.append((token._parentmapper.class_, token.key, None))
else:
ret.append(token)
return ret
def _bind_loader(self, entities, current_path, context, raiseerr):
"""Convert from an _UnboundLoad() object into a Load() object.
The _UnboundLoad() uses an informal "path" and does not necessarily
refer to a lead entity as it may use string tokens. The Load()
OTOH refers to a complete path. This method reconciles from a
given Query into a Load.
Example::
query = session.query(User).options(
joinedload("orders").joinedload("items"))
The above options will be an _UnboundLoad object along the lines
of (note this is not the exact API of _UnboundLoad)::
_UnboundLoad(
_to_bind=[
_UnboundLoad(["orders"], {"lazy": "joined"}),
_UnboundLoad(["orders", "items"], {"lazy": "joined"}),
]
)
After this method, we get something more like this (again this is
not exact API)::
Load(
User,
(User, User.orders.property))
Load(
User,
(User, User.orders.property, Order, Order.items.property))
"""
start_path = self.path
if self.is_class_strategy and current_path:
start_path += (entities[0],)
# _current_path implies we're in a
# secondary load with an existing path
if current_path:
start_path = self._chop_path(start_path, current_path)
if not start_path:
return None
# look at the first token and try to locate within the Query
# what entity we are referring towards.
token = start_path[0]
if isinstance(token, util.string_types):
entity = self._find_entity_basestring(entities, token, raiseerr)
elif isinstance(token, PropComparator):
prop = token.property
entity = self._find_entity_prop_comparator(
entities, prop, token._parententity, raiseerr
)
elif self.is_class_strategy and _is_mapped_class(token):
entity = inspect(token)
if entity not in entities:
entity = None
else:
raise sa_exc.ArgumentError(
"mapper option expects " "string key or list of attributes"
)
if not entity:
return
path_element = entity
# transfer our entity-less state into a Load() object
# with a real entity path. Start with the lead entity
# we just located, then go through the rest of our path
# tokens and populate into the Load().
loader = Load(path_element)
if context is not None:
loader.context = context
else:
context = loader.context
loader.strategy = self.strategy
loader.is_opts_only = self.is_opts_only
loader.is_class_strategy = self.is_class_strategy
path = loader.path
if not loader.is_class_strategy:
for idx, token in enumerate(start_path):
if not loader._generate_path(
loader.path,
token,
self.strategy if idx == len(start_path) - 1 else None,
None,
raiseerr,
):
return
loader.local_opts.update(self.local_opts)
if not loader.is_class_strategy and loader.path.has_entity:
effective_path = loader.path.parent
else:
effective_path = loader.path
# prioritize "first class" options over those
# that were "links in the chain", e.g. "x" and "y" in
# someload("x.y.z") versus someload("x") / someload("x.y")
if effective_path.is_token:
for path in effective_path.generate_for_superclasses():
loader._set_for_path(
context,
path,
replace=not self._is_chain_link,
merge_opts=self.is_opts_only,
)
else:
loader._set_for_path(
context,
effective_path,
replace=not self._is_chain_link,
merge_opts=self.is_opts_only,
)
return loader
def _find_entity_prop_comparator(self, entities, prop, mapper, raiseerr):
if _is_aliased_class(mapper):
searchfor = mapper
else:
searchfor = _class_to_mapper(mapper)
for ent in entities:
if orm_util._entity_corresponds_to(ent, searchfor):
return ent
else:
if raiseerr:
if not list(entities):
raise sa_exc.ArgumentError(
"Query has only expression-based entities, "
'which do not apply to %s "%s"'
% (util.clsname_as_plain_name(type(prop)), prop)
)
else:
raise sa_exc.ArgumentError(
'Mapped attribute "%s" does not apply to any of the '
"root entities in this query, e.g. %s. Please "
"specify the full path "
"from one of the root entities to the target "
"attribute. "
% (prop, ", ".join(str(x) for x in entities))
)
else:
return None
def _find_entity_basestring(self, entities, token, raiseerr):
if token.endswith(":" + _WILDCARD_TOKEN):
if len(list(entities)) != 1:
if raiseerr:
raise sa_exc.ArgumentError(
"Can't apply wildcard ('*') or load_only() "
"loader option to multiple entities %s. Specify "
"loader options for each entity individually, such "
"as %s."
% (
", ".join(str(ent) for ent in entities),
", ".join(
"Load(%s).some_option('*')" % ent
for ent in entities
),
)
)
elif token.endswith(_DEFAULT_TOKEN):
raiseerr = False
for ent in entities:
# return only the first _MapperEntity when searching
# based on string prop name. Ideally object
# attributes are used to specify more exactly.
return ent
else:
if raiseerr:
raise sa_exc.ArgumentError(
"Query has only expression-based entities - "
'can\'t find property named "%s".' % (token,)
)
else:
return None
class loader_option(object):
def __init__(self):
pass
def __call__(self, fn):
self.name = name = fn.__name__
self.fn = fn
if hasattr(Load, name):
raise TypeError("Load class already has a %s method." % (name))
setattr(Load, name, fn)
return self
def _add_unbound_fn(self, fn):
self._unbound_fn = fn
fn_doc = self.fn.__doc__
self.fn.__doc__ = """Produce a new :class:`.Load` object with the
:func:`.orm.%(name)s` option applied.
See :func:`.orm.%(name)s` for usage examples.
""" % {
"name": self.name
}
fn.__doc__ = fn_doc
return self
def _add_unbound_all_fn(self, fn):
fn.__doc__ = """Produce a standalone "all" option for :func:`.orm.%(name)s`.
.. deprecated:: 0.9
The :func:`.%(name)s_all` function is deprecated, and will be removed
in a future release. Please use method chaining with :func:`.%(name)s`
instead, as in::
session.query(MyClass).options(
%(name)s("someattribute").%(name)s("anotherattribute")
)
""" % {
"name": self.name
}
fn = util.deprecated(
"0.9",
"The :func:`.%(name)s_all` function is deprecated, and will be "
"removed in a future release. Please use method chaining with "
":func:`.%(name)s` instead" % {"name": self.name},
add_deprecation_to_docstring=False,
)(fn)
self._unbound_all_fn = fn
return self
@loader_option()
def contains_eager(loadopt, attr, alias=None):
r"""Indicate that the given attribute should be eagerly loaded from
columns stated manually in the query.
This function is part of the :class:`.Load` interface and supports
both method-chained and standalone operation.
The option is used in conjunction with an explicit join that loads
the desired rows, i.e.::
sess.query(Order).\
join(Order.user).\
options(contains_eager(Order.user))
The above query would join from the ``Order`` entity to its related
``User`` entity, and the returned ``Order`` objects would have the
``Order.user`` attribute pre-populated.
:func:`.contains_eager` also accepts an `alias` argument, which is the
string name of an alias, an :func:`~sqlalchemy.sql.expression.alias`
construct, or an :func:`~sqlalchemy.orm.aliased` construct. Use this when
the eagerly-loaded rows are to come from an aliased table::
user_alias = aliased(User)
sess.query(Order).\
join((user_alias, Order.user)).\
options(contains_eager(Order.user, alias=user_alias))
When using :func:`.contains_eager` in conjunction with inherited
subclasses, the :meth:`.RelationshipProperty.of_type` modifier should
also be used in order to set up the pathing properly::
sess.query(Company).\
outerjoin(Company.employees.of_type(Manager)).\
options(
contains_eager(
Company.employees.of_type(Manager),
alias=Manager)
)
.. seealso::
:ref:`loading_toplevel`
:ref:`contains_eager`
"""
if alias is not None:
if not isinstance(alias, str):
info = inspect(alias)
alias = info.selectable
elif getattr(attr, "_of_type", None):
ot = inspect(attr._of_type)
alias = ot.selectable
cloned = loadopt.set_relationship_strategy(
attr, {"lazy": "joined"}, propagate_to_loaders=False
)
cloned.local_opts["eager_from_alias"] = alias
return cloned
@contains_eager._add_unbound_fn
def contains_eager(*keys, **kw):
return _UnboundLoad()._from_keys(
_UnboundLoad.contains_eager, keys, True, kw
)
@loader_option()
def load_only(loadopt, *attrs):
"""Indicate that for a particular entity, only the given list
of column-based attribute names should be loaded; all others will be
deferred.
This function is part of the :class:`.Load` interface and supports
both method-chained and standalone operation.
Example - given a class ``User``, load only the ``name`` and ``fullname``
attributes::
session.query(User).options(load_only("name", "fullname"))
Example - given a relationship ``User.addresses -> Address``, specify
subquery loading for the ``User.addresses`` collection, but on each
``Address`` object load only the ``email_address`` attribute::
session.query(User).options(
subqueryload("addresses").load_only("email_address")
)
For a :class:`.Query` that has multiple entities, the lead entity can be
specifically referred to using the :class:`.Load` constructor::
session.query(User, Address).join(User.addresses).options(
Load(User).load_only("name", "fullname"),
Load(Address).load_only("email_addres")
)
.. versionadded:: 0.9.0
"""
cloned = loadopt.set_column_strategy(
attrs, {"deferred": False, "instrument": True}
)
cloned.set_column_strategy(
"*", {"deferred": True, "instrument": True}, {"undefer_pks": True}
)
return cloned
@load_only._add_unbound_fn
def load_only(*attrs):
return _UnboundLoad().load_only(*attrs)
@loader_option()
def joinedload(loadopt, attr, innerjoin=None):
"""Indicate that the given attribute should be loaded using joined
eager loading.
This function is part of the :class:`.Load` interface and supports
both method-chained and standalone operation.
examples::
# joined-load the "orders" collection on "User"
query(User).options(joinedload(User.orders))
# joined-load Order.items and then Item.keywords
query(Order).options(
joinedload(Order.items).joinedload(Item.keywords))
# lazily load Order.items, but when Items are loaded,
# joined-load the keywords collection
query(Order).options(
lazyload(Order.items).joinedload(Item.keywords))
:param innerjoin: if ``True``, indicates that the joined eager load should
use an inner join instead of the default of left outer join::
query(Order).options(joinedload(Order.user, innerjoin=True))
In order to chain multiple eager joins together where some may be
OUTER and others INNER, right-nested joins are used to link them::
query(A).options(
joinedload(A.bs, innerjoin=False).
joinedload(B.cs, innerjoin=True)
)
The above query, linking A.bs via "outer" join and B.cs via "inner" join
would render the joins as "a LEFT OUTER JOIN (b JOIN c)". When using
older versions of SQLite (< 3.7.16), this form of JOIN is translated to
use full subqueries as this syntax is otherwise not directly supported.
The ``innerjoin`` flag can also be stated with the term ``"unnested"``.
This indicates that an INNER JOIN should be used, *unless* the join
is linked to a LEFT OUTER JOIN to the left, in which case it
will render as LEFT OUTER JOIN. For example, supposing ``A.bs``
is an outerjoin::
query(A).options(
joinedload(A.bs).
joinedload(B.cs, innerjoin="unnested")
)
The above join will render as "a LEFT OUTER JOIN b LEFT OUTER JOIN c",
rather than as "a LEFT OUTER JOIN (b JOIN c)".
.. note:: The "unnested" flag does **not** affect the JOIN rendered
from a many-to-many association table, e.g. a table configured
as :paramref:`.relationship.secondary`, to the target table; for
correctness of results, these joins are always INNER and are
therefore right-nested if linked to an OUTER join.
.. versionchanged:: 1.0.0 ``innerjoin=True`` now implies
``innerjoin="nested"``, whereas in 0.9 it implied
``innerjoin="unnested"``. In order to achieve the pre-1.0 "unnested"
inner join behavior, use the value ``innerjoin="unnested"``.
See :ref:`migration_3008`.
.. note::
The joins produced by :func:`.orm.joinedload` are **anonymously
aliased**. The criteria by which the join proceeds cannot be
modified, nor can the :class:`.Query` refer to these joins in any way,
including ordering. See :ref:`zen_of_eager_loading` for further
detail.
To produce a specific SQL JOIN which is explicitly available, use
:meth:`.Query.join`. To combine explicit JOINs with eager loading
of collections, use :func:`.orm.contains_eager`; see
:ref:`contains_eager`.
.. seealso::
:ref:`loading_toplevel`
:ref:`joined_eager_loading`
"""
loader = loadopt.set_relationship_strategy(attr, {"lazy": "joined"})
if innerjoin is not None:
loader.local_opts["innerjoin"] = innerjoin
return loader
@joinedload._add_unbound_fn
def joinedload(*keys, **kw):
return _UnboundLoad._from_keys(_UnboundLoad.joinedload, keys, False, kw)
@joinedload._add_unbound_all_fn
def joinedload_all(*keys, **kw):
return _UnboundLoad._from_keys(_UnboundLoad.joinedload, keys, True, kw)
@loader_option()
def subqueryload(loadopt, attr):
"""Indicate that the given attribute should be loaded using
subquery eager loading.
This function is part of the :class:`.Load` interface and supports
both method-chained and standalone operation.
examples::
# subquery-load the "orders" collection on "User"
query(User).options(subqueryload(User.orders))
# subquery-load Order.items and then Item.keywords
query(Order).options(
subqueryload(Order.items).subqueryload(Item.keywords))
# lazily load Order.items, but when Items are loaded,
# subquery-load the keywords collection
query(Order).options(
lazyload(Order.items).subqueryload(Item.keywords))
.. seealso::
:ref:`loading_toplevel`
:ref:`subquery_eager_loading`
"""
return loadopt.set_relationship_strategy(attr, {"lazy": "subquery"})
@subqueryload._add_unbound_fn
def subqueryload(*keys):
return _UnboundLoad._from_keys(_UnboundLoad.subqueryload, keys, False, {})
@subqueryload._add_unbound_all_fn
def subqueryload_all(*keys):
return _UnboundLoad._from_keys(_UnboundLoad.subqueryload, keys, True, {})
@loader_option()
def selectinload(loadopt, attr):
"""Indicate that the given attribute should be loaded using
SELECT IN eager loading.
This function is part of the :class:`.Load` interface and supports
both method-chained and standalone operation.
examples::
# selectin-load the "orders" collection on "User"
query(User).options(selectinload(User.orders))
# selectin-load Order.items and then Item.keywords
query(Order).options(
selectinload(Order.items).selectinload(Item.keywords))
# lazily load Order.items, but when Items are loaded,
# selectin-load the keywords collection
query(Order).options(
lazyload(Order.items).selectinload(Item.keywords))
.. versionadded:: 1.2
.. seealso::
:ref:`loading_toplevel`
:ref:`selectin_eager_loading`
"""
return loadopt.set_relationship_strategy(attr, {"lazy": "selectin"})
@selectinload._add_unbound_fn
def selectinload(*keys):
return _UnboundLoad._from_keys(_UnboundLoad.selectinload, keys, False, {})
@selectinload._add_unbound_all_fn
def selectinload_all(*keys):
return _UnboundLoad._from_keys(_UnboundLoad.selectinload, keys, True, {})
@loader_option()
def lazyload(loadopt, attr):
"""Indicate that the given attribute should be loaded using "lazy"
loading.
This function is part of the :class:`.Load` interface and supports
both method-chained and standalone operation.
.. seealso::
:ref:`loading_toplevel`
:ref:`lazy_loading`
"""
return loadopt.set_relationship_strategy(attr, {"lazy": "select"})
@lazyload._add_unbound_fn
def lazyload(*keys):
return _UnboundLoad._from_keys(_UnboundLoad.lazyload, keys, False, {})
@lazyload._add_unbound_all_fn
def lazyload_all(*keys):
return _UnboundLoad._from_keys(_UnboundLoad.lazyload, keys, True, {})
@loader_option()
def immediateload(loadopt, attr):
"""Indicate that the given attribute should be loaded using
an immediate load with a per-attribute SELECT statement.
The :func:`.immediateload` option is superseded in general
by the :func:`.selectinload` option, which performs the same task
more efficiently by emitting a SELECT for all loaded objects.
This function is part of the :class:`.Load` interface and supports
both method-chained and standalone operation.
.. seealso::
:ref:`loading_toplevel`
:ref:`selectin_eager_loading`
"""
loader = loadopt.set_relationship_strategy(attr, {"lazy": "immediate"})
return loader
@immediateload._add_unbound_fn
def immediateload(*keys):
return _UnboundLoad._from_keys(_UnboundLoad.immediateload, keys, False, {})
@loader_option()
def noload(loadopt, attr):
"""Indicate that the given relationship attribute should remain unloaded.
This function is part of the :class:`.Load` interface and supports
both method-chained and standalone operation.
:func:`.orm.noload` applies to :func:`.relationship` attributes; for
column-based attributes, see :func:`.orm.defer`.
.. seealso::
:ref:`loading_toplevel`
"""
return loadopt.set_relationship_strategy(attr, {"lazy": "noload"})
@noload._add_unbound_fn
def noload(*keys):
return _UnboundLoad._from_keys(_UnboundLoad.noload, keys, False, {})
@loader_option()
def raiseload(loadopt, attr, sql_only=False):
"""Indicate that the given relationship attribute should disallow lazy loads.
A relationship attribute configured with :func:`.orm.raiseload` will
raise an :exc:`~sqlalchemy.exc.InvalidRequestError` upon access. The
typical way this is useful is when an application is attempting to ensure
that all relationship attributes that are accessed in a particular context
would have been already loaded via eager loading. Instead of having
to read through SQL logs to ensure lazy loads aren't occurring, this
strategy will cause them to raise immediately.
:param sql_only: if True, raise only if the lazy load would emit SQL,
but not if it is only checking the identity map, or determining that
the related value should just be None due to missing keys. When False,
the strategy will raise for all varieties of lazyload.
This function is part of the :class:`.Load` interface and supports
both method-chained and standalone operation.
:func:`.orm.raiseload` applies to :func:`.relationship` attributes only.
.. versionadded:: 1.1
.. seealso::
:ref:`loading_toplevel`
:ref:`prevent_lazy_with_raiseload`
"""
return loadopt.set_relationship_strategy(
attr, {"lazy": "raise_on_sql" if sql_only else "raise"}
)
@raiseload._add_unbound_fn
def raiseload(*keys, **kw):
return _UnboundLoad._from_keys(_UnboundLoad.raiseload, keys, False, kw)
@loader_option()
def defaultload(loadopt, attr):
"""Indicate an attribute should load using its default loader style.
This method is used to link to other loader options further into
a chain of attributes without altering the loader style of the links
along the chain. For example, to set joined eager loading for an
element of an element::
session.query(MyClass).options(
defaultload(MyClass.someattribute).
joinedload(MyOtherClass.someotherattribute)
)
:func:`.defaultload` is also useful for setting column-level options
on a related class, namely that of :func:`.defer` and :func:`.undefer`::
session.query(MyClass).options(
defaultload(MyClass.someattribute).
defer("some_column").
undefer("some_other_column")
)
.. seealso::
:meth:`.Load.options` - allows for complex hierarchical
loader option structures with less verbosity than with individual
:func:`.defaultload` directives.
:ref:`relationship_loader_options`
:ref:`deferred_loading_w_multiple`
"""
return loadopt.set_relationship_strategy(attr, None)
@defaultload._add_unbound_fn
def defaultload(*keys):
return _UnboundLoad._from_keys(_UnboundLoad.defaultload, keys, False, {})
@loader_option()
def defer(loadopt, key):
r"""Indicate that the given column-oriented attribute should be deferred,
e.g. not loaded until accessed.
This function is part of the :class:`.Load` interface and supports
both method-chained and standalone operation.
e.g.::
from sqlalchemy.orm import defer
session.query(MyClass).options(
defer("attribute_one"),
defer("attribute_two"))
session.query(MyClass).options(
defer(MyClass.attribute_one),
defer(MyClass.attribute_two))
To specify a deferred load of an attribute on a related class,
the path can be specified one token at a time, specifying the loading
style for each link along the chain. To leave the loading style
for a link unchanged, use :func:`.orm.defaultload`::
session.query(MyClass).options(defaultload("someattr").defer("some_column"))
A :class:`.Load` object that is present on a certain path can have
:meth:`.Load.defer` called multiple times, each will operate on the same
parent entity::
session.query(MyClass).options(
defaultload("someattr").
defer("some_column").
defer("some_other_column").
defer("another_column")
)
:param key: Attribute to be deferred.
:param \*addl_attrs: This option supports the old 0.8 style
of specifying a path as a series of attributes, which is now superseded
by the method-chained style.
.. deprecated:: 0.9 The \*addl_attrs on :func:`.orm.defer` is
deprecated and will be removed in a future release. Please
use method chaining in conjunction with defaultload() to
indicate a path.
.. seealso::
:ref:`deferred`
:func:`.orm.undefer`
"""
return loadopt.set_column_strategy(
(key,), {"deferred": True, "instrument": True}
)
@defer._add_unbound_fn
def defer(key, *addl_attrs):
if addl_attrs:
util.warn_deprecated(
"The *addl_attrs on orm.defer is deprecated. Please use "
"method chaining in conjunction with defaultload() to "
"indicate a path."
)
return _UnboundLoad._from_keys(
_UnboundLoad.defer, (key,) + addl_attrs, False, {}
)
@loader_option()
def undefer(loadopt, key):
r"""Indicate that the given column-oriented attribute should be undeferred,
e.g. specified within the SELECT statement of the entity as a whole.
The column being undeferred is typically set up on the mapping as a
:func:`.deferred` attribute.
This function is part of the :class:`.Load` interface and supports
both method-chained and standalone operation.
Examples::
# undefer two columns
session.query(MyClass).options(undefer("col1"), undefer("col2"))
# undefer all columns specific to a single class using Load + *
session.query(MyClass, MyOtherClass).options(
Load(MyClass).undefer("*"))
# undefer a column on a related object
session.query(MyClass).options(
defaultload(MyClass.items).undefer('text'))
:param key: Attribute to be undeferred.
:param \*addl_attrs: This option supports the old 0.8 style
of specifying a path as a series of attributes, which is now superseded
by the method-chained style.
.. deprecated:: 0.9 The \*addl_attrs on :func:`.orm.undefer` is
deprecated and will be removed in a future release. Please
use method chaining in conjunction with defaultload() to
indicate a path.
.. seealso::
:ref:`deferred`
:func:`.orm.defer`
:func:`.orm.undefer_group`
"""
return loadopt.set_column_strategy(
(key,), {"deferred": False, "instrument": True}
)
@undefer._add_unbound_fn
def undefer(key, *addl_attrs):
if addl_attrs:
util.warn_deprecated(
"The *addl_attrs on orm.undefer is deprecated. Please use "
"method chaining in conjunction with defaultload() to "
"indicate a path."
)
return _UnboundLoad._from_keys(
_UnboundLoad.undefer, (key,) + addl_attrs, False, {}
)
@loader_option()
def undefer_group(loadopt, name):
"""Indicate that columns within the given deferred group name should be
undeferred.
The columns being undeferred are set up on the mapping as
:func:`.deferred` attributes and include a "group" name.
E.g::
session.query(MyClass).options(undefer_group("large_attrs"))
To undefer a group of attributes on a related entity, the path can be
spelled out using relationship loader options, such as
:func:`.orm.defaultload`::
session.query(MyClass).options(
defaultload("someattr").undefer_group("large_attrs"))
.. versionchanged:: 0.9.0 :func:`.orm.undefer_group` is now specific to a
particular entity load path.
.. seealso::
:ref:`deferred`
:func:`.orm.defer`
:func:`.orm.undefer`
"""
return loadopt.set_column_strategy(
"*", None, {"undefer_group_%s" % name: True}, opts_only=True
)
@undefer_group._add_unbound_fn
def undefer_group(name):
return _UnboundLoad().undefer_group(name)
@loader_option()
def with_expression(loadopt, key, expression):
r"""Apply an ad-hoc SQL expression to a "deferred expression" attribute.
This option is used in conjunction with the :func:`.orm.query_expression`
mapper-level construct that indicates an attribute which should be the
target of an ad-hoc SQL expression.
E.g.::
sess.query(SomeClass).options(
with_expression(SomeClass.x_y_expr, SomeClass.x + SomeClass.y)
)
.. versionadded:: 1.2
:param key: Attribute to be undeferred.
:param expr: SQL expression to be applied to the attribute.
.. seealso::
:ref:`mapper_query_expression`
"""
expression = coercions.expect(
roles.LabeledColumnExprRole, _orm_full_deannotate(expression)
)
return loadopt.set_column_strategy(
(key,), {"query_expression": True}, opts={"expression": expression}
)
@with_expression._add_unbound_fn
def with_expression(key, expression):
return _UnboundLoad._from_keys(
_UnboundLoad.with_expression, (key,), False, {"expression": expression}
)
@loader_option()
def selectin_polymorphic(loadopt, classes):
"""Indicate an eager load should take place for all attributes
specific to a subclass.
This uses an additional SELECT with IN against all matched primary
key values, and is the per-query analogue to the ``"selectin"``
setting on the :paramref:`.mapper.polymorphic_load` parameter.
.. versionadded:: 1.2
.. seealso::
:ref:`inheritance_polymorphic_load`
"""
loadopt.set_class_strategy(
{"selectinload_polymorphic": True},
opts={
"entities": tuple(
sorted((inspect(cls) for cls in classes), key=id)
)
},
)
return loadopt
@selectin_polymorphic._add_unbound_fn
def selectin_polymorphic(base_cls, classes):
ul = _UnboundLoad()
ul.is_class_strategy = True
ul.path = (inspect(base_cls),)
ul.selectin_polymorphic(classes)
return ul
|
|
#!/usr/bin/env python
#-*- coding:utf-8 -*-
"""
Module containing the logic of the game
"""
from numpy.core import vstack
from numpy.core.numeric import array, zeros, fromiter
from numpy.core.numerictypes import int8, int16
from matplotlib.pylab import flatten
from itertools import ifilter, product
import copy as cp
import os
import pickle
from blokus3d.utils import emptyIter, fold, unik
from blokus3d.block import nbBlocks, adjacentCoords, containsCube, blocksVar,\
blockVarWithOrigin, argsortBlocks, blocks, includesCube
class GameSettings(object):
def __init__(self, nbPlayers):
self.nbPlayers = nbPlayers
self.boardSize = (5, 4, 2*nbPlayers if nbPlayers < 4 else 8)
self.xycoords = list(product(xrange(self.boardSize[0]),xrange(self.boardSize[1])))
class GameState(object):
def __init__(self, settings, playerBlocks, board, nextPlayer=0, firstToPass=None):
self.nbPlayers = settings.nbPlayers
self.boardSize = settings.boardSize
self.xycoords = settings.xycoords
assert len(playerBlocks)==self.nbPlayers
self.settings = settings
self.playerBlocks = playerBlocks
self.board = board
self.nextPlayer = nextPlayer # next player to play
self.firstToPass = firstToPass
@classmethod
def initState(cls, settings):
"""Returns a new GameState that corresponds to
the beginning of the game"""
return GameState( \
settings,
list(range(nbBlocks) for _ in xrange(settings.nbPlayers)), \
cls.initBoard(settings.boardSize))
@classmethod
def initBoard(cls, boardSize):
return [[[] for _ in xrange(boardSize[1])] for _ in xrange(boardSize[0])]
def __uniqueid__(self):
# Create an order of players such that
# the nextPlayer is 0
order = fromiter((player % self.nbPlayers \
for player in xrange(self.nextPlayer, self.nextPlayer+4)),\
dtype=int8)
remainingBlocks = sum( \
(p*(2**nbBlocks) \
+ sum(2**b for b in self.playerBlocks[p])) \
for p in order)
board = tuple(tuple(tuple(order[h] for h in c)\
for c in l) for l in self.board)
return (remainingBlocks, board)
def height(self,xy):
assert len(xy)==2
assert xy[0]>=0 and xy[0] < self.boardSize[0]
assert xy[1]>=0 and xy[1] < self.boardSize[1]
return len(self.board[xy[0]][xy[1]])
def heightMap(self):
return tuple(tuple(self.height([x,y]) \
for y in xrange(self.boardSize[1])) \
for x in xrange(self.boardSize[0]))
def emptyCoords(self,coords):
"""Returns True iff the coordinates are within the board
and empty of any cube"""
assert len(coords)==3
return coords[0] >= 0 and coords[0] < self.boardSize[0] and coords[1] >= 0 \
and coords[1] < self.boardSize[1] and coords[2] < self.boardSize[2] \
and coords[2] >= self.height(coords[:2])
def adjToPlayers(self,players):
"""Returns the coordinates of grounded (not floating)
empty cubes adjacent to given players' cubes"""
# Get liberty cubes
L = self.libertyCubes(players)
if L==[]:
return L
# Remove all the "floating" cubes
# XXX maybe there's a cleaner way, using an iterator ?
L.reverse()
x, y, z = L[-1][0], L[-1][1], L[-1][2]
newL = [ L.pop() ]
while len(L) > 0:
e = L.pop()
if x==e[0] and y==e[1]:
assert e[2] > z
else:
newL.append(e)
x, y, z = e[0], e[1], e[2]
return newL
def libertyCubes(self,players):
"""Return the coordinates of empty cubes
adjacent to given players' cubes"""
L = []
for (x,y) in self.xycoords:
for z in xrange(self.height([x,y])):
if self.board[x][y][z] in players:
L.extend(filter(lambda coords : \
self.emptyCoords(coords), \
adjacentCoords+array([x,y,z])))
# remove duplicates
L = unik(L)
return L
def doesFit(self,blk,xyz):
"""Returns whether a block fits at a particular position
on the board, checking that it not over some empty space,
but not that its adjacent to another block."""
assert blk.shape[1] == 3
assert len(xyz) == 3
blkWithOrigin = vstack([blk,array([0,0,0])])
for cube in blkWithOrigin:
if not (self.emptyCoords(xyz+cube) \
and (self.height(xyz[:2]+cube[:2])==xyz[2]+cube[2] \
or containsCube(blkWithOrigin, \
cube-array([0,0,1])))):
return False
return True
def blockVarsThatFit(self,blkId,coords):
return filter(lambda i: self.doesFit(blocksVar[blkId][:,:,i],coords),\
xrange(blocksVar[blkId].shape[2]))
def legalCubes(self):
alreadyPlayed = not emptyIter( \
ifilter(lambda x : x == self.nextPlayer, \
flatten(self.board)))
# Did the next player already played ?
if alreadyPlayed:
return self.adjToPlayers([self.nextPlayer])
# Did someone else played ?
elif not emptyIter(ifilter(lambda x: x != [],flatten(self.board))):
# Get the cubes adjacent to any player
return self.adjToPlayers(range(self.nbPlayers))
# Else, all floor cubes are legal
else:
return vstack([array([x,y,0]) for (x,y) in self.xycoords])
def legalMoves(self):
uid = self.__uniqueid__()
if legalMovesDic.has_key(uid):
return legalMovesDic[uid]
lc = self.legalCubes()
L = []
#duplicates = 0
for blkId in self.playerBlocks[self.nextPlayer]:
# Find all the variations that fit
# and make coords-blkVarId couple
coordsBlkVarIds = fold(list.__add__, [[(coords,blkVarId) \
for blkVarId in self.blockVarsThatFit(blkId,coords)]
for coords in lc])
# If there are some
if coordsBlkVarIds != [] and coordsBlkVarIds != None:
# We will eliminate duplicates
# For each coords-blkVarId pair, get the cube
# coordinates on the board
M = map(lambda (coords,blkVarId): \
coords+blockVarWithOrigin(blkId,blkVarId), coordsBlkVarIds)
N = iter(argsortBlocks(M))
lastOne = next(N)
# Add the first coords-blkVarId variation to the big list
L.append((coordsBlkVarIds[lastOne][0],\
blkId,coordsBlkVarIds[lastOne][1]))
# Go though all the coords-blkVarId variations
try:
while True:
nextToLastOne = next(N)
# If next one is not the same
if not (M[lastOne]==M[nextToLastOne]).all():
# Add the next on to the big list
L.append((coordsBlkVarIds[nextToLastOne][0],\
blkId,coordsBlkVarIds[nextToLastOne][1]))
#else:
# duplicates += 1
lastOne = nextToLastOne
except StopIteration:
pass
#print "%d duplicates" % duplicates
if L == []:
L = [None]
# Add it to the dictionary
legalMovesDic[uid] = L
return L
def legalMovesAsTuple(self):
"""For using UCT"""
return [(coords[0],coords[1],coords[2],blkId,blkVarId) \
for coords,blkId,blkVarId in self.legalMoves()]
def baseScores(self):
s = zeros(self.nbPlayers,dtype=int16)
for (x,y) in self.xycoords:
if self.height([x,y])>0:
s[self.board[x][y][-1]] += 1
return s
def penalty(self):
return list(sum(map(lambda x : blocks[x].shape[0], \
self.playerBlocks[player])) \
for player in xrange(self.nbPlayers))
def finalScores(self):
return self.baseScores() - self.penalty()
def isOver(self):
return self.firstToPass == self.nextPlayer
def assertValidMove(self,move):
assert len(move)==3
coords,blkId,blkVarId = move
assert len(coords)==3
assert blkId in self.playerBlocks[self.nextPlayer]
assert self.doesFit(blocksVar[blkId][:,:,blkVarId],coords)
return True
def playMove(self,move):
assert move == None or (len(move) == 3 and move[0].shape == (3,))
if self.firstToPass == self.nextPlayer:
# Game is over !
return self
if move == None:
if self.firstToPass == None:
self.firstToPass = self.nextPlayer
else:
assert self.assertValidMove(move)
coords,blkId,blkVarId = move
# Remove the block from the player's stock
self.playerBlocks[self.nextPlayer].remove(blkId)
blkWithOrigin = blockVarWithOrigin(blkId,blkVarId)
# Place the block on the board
for cube in blkWithOrigin:
assert cube[2]+coords[2]==self.height([cube[0]+coords[0],cube[1]+coords[1]])
self.board[cube[0]+coords[0]][cube[1]+coords[1]].append(self.nextPlayer)
# Break the "passing chain", if necessary
self.firstToPass = None
# Update the next player
self.nextPlayer = (self.nextPlayer+1) % self.nbPlayers
return self
def clone(self):
return GameState(self.settings,\
list(map(cp.copy,self.playerBlocks)),\
cp.deepcopy(self.board),\
nextPlayer=self.nextPlayer,\
firstToPass=self.firstToPass)
def boardToASCII(self, markedCubes=None, zRange=None):
if zRange == None:
zRange = xrange(self.boardSize[2])
if markedCubes == None:
markedCubes = []
s = ""
for z in zRange:
for y in xrange(self.boardSize[1]):
for x in xrange(self.boardSize[0]):
s += "x" if includesCube(markedCubes,array([x,y,z]))\
else ("." if self.height([x,y]) <= z \
else chr(self.board[x][y][z]+65))
s += "\n"
s += "\n"
return s
def __str__(self):
s = "Next player is %s\n" % chr(self.nextPlayer+65)
for player,blocksId in enumerate(self.playerBlocks):
s += "%s's blocks : %s\n" \
% (chr(player+65),\
','.join(map(lambda x : str(x+1),blocksId)))
s += "\n"+self.boardToASCII()
return s
def toASCII(self):
s = str(self.nextPlayer)+"\n"
for blocksId in self.playerBlocks:
s += str(blocksId).replace('[','').replace(']','')+"\n"
s += "\n"+self.boardToASCII()
return s
def showScores(self):
print "base scores ", self.baseScores()
print "penalties - ", self.penalty()
print "-------------------------------"
print "final scores = ", self.finalScores()
@classmethod
def fromASCII(cls, string):
# FIXME guess settings from the text !!
settings = GameSettings(2)
lines = string.split('\n')[::-1]
nextPlayer = int(lines.pop().rstrip())
playerBlocks = []
while True:
l = lines.pop().rstrip()
if l=="":
break
playerBlocks.append(map(int,l.split(',')))
assert settings.nbPlayers == len(playerBlocks)
board=[[[] for _ in xrange(settings.boardSize[1])] for _ in xrange(settings.boardSize[0])]
y = z = 0
while len(lines) > 0:
l = lines.pop().rstrip()
if l=="":
y = 0
z += 1
if z < settings.boardSize[2]:
continue
else:
break
x = 0
for c in l:
if c != '.':
p = ord(c)-65
assert len(board[x][y])==z
board[x][y].append(p)
x += 1
y += 1
return GameState(settings,playerBlocks,board,nextPlayer=nextPlayer)
def save(self,filename):
with open(filename,'w') as f:
return f.write(self.toASCII())
@classmethod
def load(cls,filename):
with open(filename,'r') as f:
return cls.fromASCII(f.read())
def loadLegalMovesDic():
""" Save/load the legal moves cache """
with open('legalMovesDic.dat','r') as f:
return pickle.load(f)
if 'legalMovesDic.dat' in os.listdir('.'):
legalMovesDic = loadLegalMovesDic()
else:
legalMovesDic = {}
def saveLegalMovesDic():
print "Saving moves cache..."
with open('legalMovesDic.dat','w') as f:
pickle.dump(legalMovesDic,f)
print "...done"
|
|
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.test.utils import override_settings
import cinderclient as cinder_client
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
class CinderApiTests(test.APITestCase):
def test_volume_list(self):
search_opts = {'all_tenants': 1}
volumes = self.cinder_volumes.list()
cinderclient = self.stub_cinderclient()
cinderclient.volumes = self.mox.CreateMockAnything()
cinderclient.volumes.list(search_opts=search_opts,).AndReturn(volumes)
self.mox.ReplayAll()
# No assertions are necessary. Verification is handled by mox.
api.cinder.volume_list(self.request, search_opts=search_opts)
def test_volume_snapshot_list(self):
search_opts = {'all_tenants': 1}
volume_snapshots = self.cinder_volume_snapshots.list()
cinderclient = self.stub_cinderclient()
cinderclient.volume_snapshots = self.mox.CreateMockAnything()
cinderclient.volume_snapshots.list(search_opts=search_opts).\
AndReturn(volume_snapshots)
self.mox.ReplayAll()
api.cinder.volume_snapshot_list(self.request, search_opts=search_opts)
def test_volume_snapshot_list_no_volume_configured(self):
# remove volume from service catalog
catalog = self.service_catalog
for service in catalog:
if service["type"] == "volume":
self.service_catalog.remove(service)
search_opts = {'all_tenants': 1}
volume_snapshots = self.cinder_volume_snapshots.list()
cinderclient = self.stub_cinderclient()
cinderclient.volume_snapshots = self.mox.CreateMockAnything()
cinderclient.volume_snapshots.list(search_opts=search_opts).\
AndReturn(volume_snapshots)
self.mox.ReplayAll()
api.cinder.volume_snapshot_list(self.request, search_opts=search_opts)
def test_volume_type_list_with_qos_associations(self):
volume_types = self.cinder_volume_types.list()
# Due to test data limitations, we can only run this test using
# one qos spec, which is associated with one volume type.
# If we use multiple qos specs, the test data will always
# return the same associated volume type, which is invalid
# and prevented by the UI.
qos_specs_full = self.cinder_qos_specs.list()
qos_specs_only_one = [qos_specs_full[0]]
associations = self.cinder_qos_spec_associations.list()
cinderclient = self.stub_cinderclient()
cinderclient.volume_types = self.mox.CreateMockAnything()
cinderclient.volume_types.list().AndReturn(volume_types)
cinderclient.qos_specs = self.mox.CreateMockAnything()
cinderclient.qos_specs.list().AndReturn(qos_specs_only_one)
cinderclient.qos_specs.get_associations = self.mox.CreateMockAnything()
cinderclient.qos_specs.get_associations(qos_specs_only_one[0].id).\
AndReturn(associations)
self.mox.ReplayAll()
assoc_vol_types = \
api.cinder.volume_type_list_with_qos_associations(self.request)
associate_spec = assoc_vol_types[0].associated_qos_spec
self.assertTrue(associate_spec, qos_specs_only_one[0].name)
class CinderApiVersionTests(test.TestCase):
def setUp(self):
super(CinderApiVersionTests, self).setUp()
# The version is set when the module is loaded. Reset the
# active version each time so that we can test with different
# versions.
api.cinder.VERSIONS._active = None
def test_default_client_is_v1(self):
client = api.cinder.cinderclient(self.request)
self.assertIsInstance(client, cinder_client.v1.client.Client)
@override_settings(OPENSTACK_API_VERSIONS={'volume': 1})
def test_v1_setting_returns_v1_client(self):
client = api.cinder.cinderclient(self.request)
self.assertIsInstance(client, cinder_client.v1.client.Client)
@override_settings(OPENSTACK_API_VERSIONS={'volume': 2})
def test_v2_setting_returns_v2_client(self):
client = api.cinder.cinderclient(self.request)
self.assertIsInstance(client, cinder_client.v2.client.Client)
def test_get_v1_volume_attributes(self):
# Get a v1 volume
volume = self.cinder_volumes.first()
self.assertTrue(hasattr(volume._apiresource, 'display_name'))
self.assertFalse(hasattr(volume._apiresource, 'name'))
name = "A test volume name"
description = "A volume description"
setattr(volume._apiresource, 'display_name', name)
setattr(volume._apiresource, 'display_description', description)
self.assertEqual(name, volume.name)
self.assertEqual(description, volume.description)
def test_get_v2_volume_attributes(self):
# Get a v2 volume
volume = self.cinder_volumes.get(name="v2_volume")
self.assertTrue(hasattr(volume._apiresource, 'name'))
self.assertFalse(hasattr(volume._apiresource, 'display_name'))
name = "A v2 test volume name"
description = "A v2 volume description"
setattr(volume._apiresource, 'name', name)
setattr(volume._apiresource, 'description', description)
self.assertEqual(name, volume.name)
self.assertEqual(description, volume.description)
def test_get_v1_snapshot_attributes(self):
# Get a v1 snapshot
snapshot = self.cinder_volume_snapshots.first()
self.assertFalse(hasattr(snapshot._apiresource, 'name'))
name = "A test snapshot name"
description = "A snapshot description"
setattr(snapshot._apiresource, 'display_name', name)
setattr(snapshot._apiresource, 'display_description', description)
self.assertEqual(name, snapshot.name)
self.assertEqual(description, snapshot.description)
def test_get_v2_snapshot_attributes(self):
# Get a v2 snapshot
snapshot = self.cinder_volume_snapshots.get(
description="v2 volume snapshot description")
self.assertFalse(hasattr(snapshot._apiresource, 'display_name'))
name = "A v2 test snapshot name"
description = "A v2 snapshot description"
setattr(snapshot._apiresource, 'name', name)
setattr(snapshot._apiresource, 'description', description)
self.assertEqual(name, snapshot.name)
self.assertEqual(description, snapshot.description)
def test_get_id_for_nameless_volume(self):
volume = self.cinder_volumes.first()
setattr(volume._apiresource, 'display_name', "")
self.assertEqual(volume.id, volume.name)
@override_settings(OPENSTACK_API_VERSIONS={'volume': 1})
def test_adapt_dictionary_to_v1(self):
volume = self.cinder_volumes.first()
data = {'name': volume.name,
'description': volume.description,
'size': volume.size}
ret_data = api.cinder._replace_v2_parameters(data)
self.assertIn('display_name', ret_data.keys())
self.assertIn('display_description', ret_data.keys())
self.assertNotIn('name', ret_data.keys())
self.assertNotIn('description', ret_data.keys())
@override_settings(OPENSTACK_API_VERSIONS={'volume': 2})
def test_adapt_dictionary_to_v2(self):
volume = self.cinder_volumes.first()
data = {'name': volume.name,
'description': volume.description,
'size': volume.size}
ret_data = api.cinder._replace_v2_parameters(data)
self.assertIn('name', ret_data.keys())
self.assertIn('description', ret_data.keys())
self.assertNotIn('display_name', ret_data.keys())
self.assertNotIn('display_description', ret_data.keys())
@override_settings(OPENSTACK_API_VERSIONS={'volume': 1})
def test_version_get_1(self):
version = api.cinder.version_get()
self.assertEqual(version, 1)
@override_settings(OPENSTACK_API_VERSIONS={'volume': 2})
def test_version_get_2(self):
version = api.cinder.version_get()
self.assertEqual(version, 2)
@override_settings(OPENSTACK_API_VERSIONS={'volume': 1})
def test_retype_not_supported(self):
retype_supported = api.cinder.retype_supported()
self.assertFalse(retype_supported)
|
|
#!/usr/bin/env python
import os, time, tempfile, shutil, re, string, pickle, codecs
try: from hashlib import md5
except ImportError: from md5 import new as md5
from plasTeX.Logging import getLogger
from StringIO import StringIO
from plasTeX.Filenames import Filenames
from plasTeX.dictutils import ordereddict
log = getLogger()
depthlog = getLogger('render.images.depth')
status = getLogger('status')
try:
import Image as PILImage
import ImageChops as PILImageChops
except ImportError:
PILImage = PILImageChops = None
def autoCrop(im, bgcolor=None, margin=0):
"""
Automatically crop image down to non-background portion
Required Argument:
im -- image object
Optional Argument:
bgcolor -- value or tuple containing the color to use for the
background color when cropping
margin -- leave this many pixels around the content. If there
aren't that many pixels to leave, leave as many as possible.
Returns: cropped image object and tuple containing the number
of pixels removed from each side (left, top, right, bottom)
"""
if im.mode != "RGB":
im = im.convert("RGB")
origbbox = im.getbbox()
if origbbox is None:
origbbox = (0,0,im.size[0],im.size[1])
# Figure out the background color from the corners, if needed
if bgcolor is None:
topleft = im.getpixel((origbbox[0],origbbox[1]))
topright = im.getpixel((origbbox[2]-1,origbbox[1]))
bottomleft = im.getpixel((origbbox[0],origbbox[3]-1))
bottomright = im.getpixel((origbbox[2]-1,origbbox[3]-1))
corners = [topleft, topright, bottomleft, bottomright]
matches = []
matches.append(len([x for x in corners if x == topleft]))
matches.append(len([x for x in corners if x == topright]))
matches.append(len([x for x in corners if x == bottomleft]))
matches.append(len([x for x in corners if x == bottomright]))
try: bgcolor = corners[matches.index(1)]
except ValueError: pass
try: bgcolor = corners[matches.index(2)]
except ValueError: pass
try: bgcolor = corners[matches.index(3)]
except ValueError: pass
try: bgcolor = corners[matches.index(4)]
except ValueError: pass
# Create image with only the background color
bg = PILImage.new("RGB", im.size, bgcolor)
# Get bounding box of non-background content
diff = PILImageChops.difference(im, bg)
bbox = diff.getbbox()
if bbox:
if margin:
bbox = list(bbox)
bbox[0] -= margin
bbox[1] -= margin
bbox[2] += margin
bbox[3] += margin
bbox = tuple([max(0,x) for x in bbox])
return im.crop(bbox), tuple([abs(x-y) for x,y in zip(origbbox,bbox)])
return PILImage.new("RGB", (1,1), bgcolor), (0,0,0,0)
return None, None # no contents
class Box(object):
pass
class Dimension(float):
"""
Dimension object used for width, height, and depth of images
This object is simply a float value. The value of the float
is in pixels. All other units can be gotten using their
corresponding property.
"""
fontSize = 15
@property
def ex(self):
return '%sex' % self.format(self / (self.fontSize * 0.6))
@property
def em(self):
return '%sem' % self.format(self / self.fontSize)
@property
def pt(self):
return '%spt' % self.format(self)
@property
def px(self):
return '%spx' % self.format(self)
@property
def mm(self):
return '%smm' % self.format(self.cm * 10.0)
@property
def inch(self):
return '%sin' % self.format(self.pt / 72.0)
@property
def cm(self):
return '%scm' % self.format(self.inch * 2.54)
@property
def pc(self):
return '%spc' % self.format(self.pt / 12.0)
def __getattribute__(self, name):
if name in ['in']:
return self.inch
return float.__getattribute__(self, name)
def format(self, value):
if abs(int(value) - value) < 0.0001:
return '%s' % int(value)
return '%0.3f' % value
def __str__(self):
return self.format(self)
def __repr__(self):
return self.format(self)
class DimensionPlaceholder(str):
"""
Placeholder for dimensions
Dimensions for an image aren't generally known until the end of
the rendering process. This object generates a placeholder
for the dimension.
"""
imageUnits = ''
def __getattribute__(self, name):
if name in ['in','ex','em','pt','px','mm','cm','pc']:
if not self:
return self
vars = {'units':name}
return self + string.Template(self.imageUnits).substitute(vars)
return str.__getattribute__(self, name)
def __setattribute__(self, name, value):
if name in ['in','ex','em','pt','px','mm','cm','pc']:
return
return str.__setattribute__(self, name, value)
class Image(object):
""" Generic image object """
def __init__(self, filename, config, width=None, height=None, alt=None,
depth=None, longdesc=None):
self.filename = filename
self.path = os.path.join(os.getcwd(), self.filename)
self.width = width
self.height = height
self.alt = alt
self.depth = depth
self.depthRatio = 0
self.longdesc = longdesc
self.config = config
self._cropped = False
self.bitmap = self
self.checksum = None
def height():
def fget(self):
return getattr(self.bitmap, '_height', None)
def fset(self, value):
if value is None:
self._height = value
elif isinstance(value, DimensionPlaceholder):
self._height = value
else:
self._height = Dimension(value)
return locals()
height = property(**height())
def width():
def fget(self):
return getattr(self.bitmap, '_width', None)
def fset(self, value):
if value is None:
self._width = value
elif isinstance(value, DimensionPlaceholder):
self._width = value
else:
self._width = Dimension(value)
return locals()
width = property(**width())
def depth():
def fget(self):
return getattr(self, '_depth', None)
def fset(self, value):
if value is None:
self._depth = value
elif isinstance(value, DimensionPlaceholder):
self._depth = value
else:
self._depth = Dimension(value)
return locals()
depth = property(**depth())
@property
def url(self):
base = self.config['base-url']
if base and base.endswith('/'):
base = base[:-1]
if base:
return '%s/%s' % (base, self.filename)
return self.filename
def crop(self):
""" Do the actual cropping """
if self._cropped:
return
# Crop an SVG image
if os.path.splitext(self.path)[-1] in ['.svg']:
svg = open(self.path,'r').read()
self.width = 0
width = re.search(r'width=(?:\'|")([^\d\.]+)\w*(?:\'|")', svg)
if width:
self.width = float(width)
self.height = 0
height = re.search(r'height=(?:\'|")([^\d\.]+)\w*(?:\'|")', svg)
if height:
self.height = float(height)
self.depth = 0
if self.bitmap and self.height:
depth = (self.height / self.bitmap.height) * self.bitmap.depth
if abs(depth - int(depth)) > 0.1:
self.depth = depth - 1
else:
self.depth = depth
self._cropped = True
return
padbaseline = self.config['baseline-padding']
try:
im, self.depth = self._stripBaseline(PILImage.open(self.path),
padbaseline)
self.width, self.height = im.size
except IOError, msg:
# import traceback
# traceback.print_exc()
self._cropped = True
log.warning(msg)
return
if padbaseline and self.depth > padbaseline:
log.warning('depth of image %s (%d) is greater than the baseline padding (%s). This may cause the image to be misaligned with surrounding text.', self.filename, self.depth, padbaseline)
if self.config['transparent']:
im = im.convert("P")
lut = im.resize((256,1))
lut.putdata(range(256))
index = list(lut.convert("RGB").getdata()).index((255,255,255))
im.save(self.path, transparency=index)
else:
im.save(self.path)
self._cropped = True
def __str__(self):
return self.filename
def __repr__(self):
return self.filename
def _autoCrop(self, im, bgcolor=None, margin=0):
return autoCrop(im, bgcolor, margin)
def _stripBaseline(self, im, padbaseline=0):
"""
Find the baseline register mark and crop it out
The image has to have a particular layout. The top left corner
must be the background color of the image. There should be a
square registration mark which has the bottom edge at the baseline
of the image (see \\plasTeXregister in LaTeX code at the top
of this file). This registration mark should be the leftmost
content of the image. If the registration mark is at the top
of the image, the baseline is ignored.
Required Arguments:
im -- image to be cropped
Keyword Arguments:
padbaseline -- amount to pad the bottom of all cropped images.
This allows you to use one margin-bottom for all images;
however, you need to make sure that this padding is large
enough to handle the largest descender in the document.
Returns:
(cropped image, distance from baseline to bottom of image)
"""
if im.mode != "RGB":
im = im.convert("RGB")
depth = 0
background = im.getpixel((0,0))
# Crop the image so that the regitration mark is on the left edge
im = self._autoCrop(im)[0]
width, height = im.size
# Determine if registration mark is at top or left
top = False
# Found mark at top
if im.getpixel((0,0)) != background:
top = True
i = 1
# Parse past the registration mark
# We're fudging the vertical position by 1px to catch
# things sitting right under the baseline.
while i < width and im.getpixel((i,1)) != background:
i += 1
# Look for additional content after mark
if i < width:
while i < width and im.getpixel((i,1)) == background:
i += 1
# If there is non-background content after mark,
# consider the mark to be on the left
if i < width:
top = False
# Registration mark at the top
blank = False
if top:
pos = height - 1
while pos and im.getpixel((0,pos)) == background:
pos -= 1
depth = pos - height + 1
# Get the height of the registration mark so it can be cropped out
rheight = 0
while rheight < height and im.getpixel((0,rheight)) != background:
rheight += 1
# If the depth is the entire height, just make depth = 0
if -depth == (height-rheight):
depth = 0
# Handle empty images
bbox = im.getbbox()
if bbox is None or rheight == (height-1):
blank = True
else:
bbox = list(bbox)
bbox[1] = rheight
# Registration mark on left side
if blank or not(top) or im.getbbox()[1] == 0:
pos = height - 1
while pos and im.getpixel((0,pos)) == background:
pos -= 1
depth = pos - height + 1
# Get the width of the registration mark so it can be cropped out
rwidth = 0
while rwidth < width and im.getpixel((rwidth,pos)) != background:
rwidth += 1
# Handle empty images
bbox = im.getbbox()
if bbox is None or rwidth == (width-1):
return PILImage.new("RGB", (1,1), background), 0
bbox = list(bbox)
bbox[0] = rwidth
# Crop out register mark, and autoCrop result
im, cropped = self._autoCrop(im.crop(bbox), background)
# If the content was entirely above the baseline,
# we need to keep that whitespace
depth += cropped[3]
depthlog.debug('Depth of image %s is %s', self.filename, depth)
# Pad all images with the given amount. This allows you to
# set one margin-bottom for all images.
if padbaseline:
width, height = im.size
newim = PILImage.new("RGB", (width,height+(padbaseline+depth)), background)
newim.paste(im, im.getbbox())
im = newim
return im, depth
class Imager(object):
""" Generic Imager """
# The command to run on the LaTeX output file to generate images.
# This should be overridden by the subclass.
command = ''
# The compiler command used to compile the LaTeX document
compiler = 'latex'
# Verification command to determine if the imager is available
verification = ''
fileExtension = '.png'
imageAttrs = ''
imageUnits = ''
def __init__(self, document, imageTypes=None):
self.config = document.config
self.ownerDocument = document
if imageTypes is None:
self.imageTypes = [self.fileExtension]
else:
self.imageTypes = imageTypes[:]
# Dictionary that makes sure each image is only generated once.
# The key is the LaTeX source and the value is the image instance.
self._cache = {}
usednames = {}
self._filecache = os.path.abspath(os.path.join('.cache',
self.__class__.__name__+'.images'))
if self.config['images']['cache'] and os.path.isfile(self._filecache):
try:
self._cache = pickle.load(open(self._filecache, 'r'))
for key, value in self._cache.items():
if not os.path.isfile(value.filename):
del self._cache[key]
continue
usednames[value.filename] = None
except ImportError:
os.remove(self._filecache)
# List of images in the order that they appear in the LaTeX file
self.images = ordereddict()
# Images that are simply copied from the source directory
self.staticimages = ordereddict()
# Filename generator
self.newFilename = Filenames(self.config['images'].get('filenames', raw=True),
vars={'jobname':document.userdata.get('jobname','')},
extension=self.fileExtension, invalid=usednames)
# Start the document with a preamble
self.source = StringIO()
self.source.write('\\scrollmode\n')
self.writePreamble(document)
self.source.write('\\begin{document}\n')
# Set up additional options
self._configOptions = self.formatConfigOptions(self.config['images'])
def formatConfigOptions(self, config):
"""
Format configuration options as command line options
Required Arguments:
config -- the images section of the configuration object
Returns: a list of two-element tuples contain option value pairs
Example::
output = []
if config['resolution']:
output.append(('-D', config['resolution']))
return output
"""
return []
def writePreamble(self, document):
""" Write any necessary code to the preamble of the document """
self.source.write(document.preamble.source)
self.source.write('\\makeatletter\\oddsidemargin -0.25in\\evensidemargin -0.25in\n')
# self.source.write('\\tracingoutput=1\n')
# self.source.write('\\tracingonline=1\n')
# self.source.write('\\showboxbreadth=\maxdimen\n')
# self.source.write('\\showboxdepth=\maxdimen\n')
# self.source.write('\\newenvironment{plasTeXimage}[1]{\\def\\@current@file{#1}\\thispagestyle{empty}\\def\\@eqnnum{}\\setbox0=\\vbox\\bgroup}{\\egroup\\typeout{imagebox:\\@current@file(\\the\\ht0+\\the\\dp0)}\\box0\\newpage}')
self.source.write('\\@ifundefined{plasTeXimage}{'
'\\newenvironment{plasTeXimage}[1]{' +
'\\vfil\\break\\plasTeXregister' +
'\\thispagestyle{empty}\\def\\@eqnnum{}' +
'\\ignorespaces}{}}{}\n')
self.source.write('\\@ifundefined{plasTeXregister}{' +
'\\def\\plasTeXregister{\\parindent=-0.5in\\ifhmode\\hrule' +
'\\else\\vrule\\fi height 2pt depth 0pt ' +
'width 2pt\\hskip2pt}}{}\n')
def verify(self):
""" Verify that this commmand works on this machine """
if self.verification:
proc = os.popen(self.verification)
proc.read()
if not proc.close():
return True
return False
if not self.command.strip():
return False
cmd = self.command.split()[0]
if not cmd:
return False
proc = os.popen('%s --help' % cmd)
proc.read()
if not proc.close():
return True
return False
@property
def enabled(self):
if self.config['images']['enabled'] and \
(self.command or (type(self) is not Imager and type(self) is not VectorImager)):
return True
return False
def close(self):
""" Invoke the rendering code """
# Finish the document
self.source.write('\n\\end{document}\\endinput')
for value in self._cache.values():
if value.checksum and os.path.isfile(value.path):
d = md5(open(value.path,'r').read()).digest()
if value.checksum != d:
log.warning('The image data for "%s" on the disk has changed. You may want to clear the image cache.' % value.filename)
# Bail out if there are no images
if not self.images:
return
if not self.enabled:
return
# Compile LaTeX source, then convert the output
self.source.seek(0)
output = self.compileLatex(self.source.read())
if output is None:
log.error('Compilation of the document containing the images failed. No output file was found.')
return
self.convert(output)
for value in self._cache.values():
if value.checksum is None and os.path.isfile(value.path):
value.checksum = md5(open(value.path,'r').read()).digest()
if not os.path.isdir(os.path.dirname(self._filecache)):
os.makedirs(os.path.dirname(self._filecache))
pickle.dump(self._cache, open(self._filecache,'w'))
def compileLatex(self, source):
"""
Compile the LaTeX source
Arguments:
source -- the LaTeX source to compile
Returns:
file object corresponding to the output from LaTeX
"""
cwd = os.getcwd()
# Make a temporary directory to work in
tempdir = tempfile.mkdtemp()
os.chdir(tempdir)
filename = 'images.tex'
# Write LaTeX source file
if self.config['images']['save-file']:
self.source.seek(0)
codecs.open(os.path.join(cwd,filename), 'w', self.config['files']['input-encoding']).write(self.source.read())
self.source.seek(0)
codecs.open(filename, 'w', self.config['files']['input-encoding']).write(self.source.read())
# Run LaTeX
os.environ['SHELL'] = '/bin/sh'
program = self.config['images']['compiler']
if not program:
program = self.compiler
os.system(r"%s %s" % (program, filename))
output = None
for ext in ['.dvi','.pdf','.ps']:
if os.path.isfile('images'+ext):
output = WorkingFile('images'+ext, 'rb', tempdir=tempdir)
break
# Change back to original working directory
os.chdir(cwd)
return output
def executeConverter(self, output):
"""
Execute the actual image converter
Arguments:
output -- file object pointing to the rendered LaTeX output
Returns:
two-element tuple. The first element is the return code of the
command. The second element is the list of filenames generated.
If the default filenames (i.e. img001.png, img002.png, ...) are
used, you can simply return None.
"""
open('images.out', 'wb').write(output.read())
options = ''
if self._configOptions:
for opt, value in self._configOptions:
opt, value = str(opt), str(value)
if ' ' in value:
value = '"%s"' % value
options += '%s %s ' % (opt, value)
return os.system('%s %s%s' % (self.command, options, 'images.out')), None
def convert(self, output):
"""
Convert the output from LaTeX into images
Arguments:
output -- output file object
"""
if not self.command and self.executeConverter is Imager.executeConverter:
log.warning('No imager command is configured. ' +
'No images will be created.')
return
cwd = os.getcwd()
# Make a temporary directory to work in
tempdir = tempfile.mkdtemp()
os.chdir(tempdir)
# Execute converter
rc, images = self.executeConverter(output)
if rc:
log.warning('Image converter did not exit properly. ' +
'Images may be corrupted or missing.')
# Get a list of all of the image files
if images is None:
images = [f for f in os.listdir('.')
if re.match(r'^img\d+\.\w+$', f)]
if len(images) != len(self.images):
log.warning('The number of images generated (%d) and the number of images requested (%d) is not the same.' % (len(images), len(self.images)))
# Sort by creation date
#images.sort(lambda a,b: cmp(os.stat(a)[9], os.stat(b)[9]))
images.sort(lambda a,b: cmp(int(re.search(r'^img(\d+)',a).group(1)),
int(re.search(r'^img(\d+)',b).group(1))))
os.chdir(cwd)
if PILImage is None:
log.warning('PIL (Python Imaging Library) is not installed. ' +
'Images will not be cropped.')
# Move images to their final location
for src, dest in zip(images, self.images.values()):
# Move the image
directory = os.path.dirname(dest.path)
if directory and not os.path.isdir(directory):
os.makedirs(directory)
shutil.copy2(os.path.join(tempdir,src), dest.path)
# Crop the image
try:
dest.crop()
status.dot()
except Exception, msg:
import traceback
traceback.print_exc()
log.warning('failed to crop %s (%s)', dest.path, msg)
# Remove temporary directory
shutil.rmtree(tempdir, True)
def writeImage(self, filename, code, context):
"""
Write LaTeX source for the image
Arguments:
filename -- the name of the file that will be generated
code -- the LaTeX code of the image
context -- the LaTeX code of the context of the image
"""
self.source.write('%s\n\\begin{plasTeXimage}{%s}\n%s\n\\end{plasTeXimage}\n' % (context, filename, code))
def newImage(self, text, context='', filename=None):
"""
Invoke a new image
Required Arguments:
text -- the LaTeX source to be rendered in an image
Keyword Arguments:
context -- LaTeX source to be executed before the image
is created. This generally consists of setting counters,
lengths, etc. that will be used by the code that
generates the image.
filename -- filename to force the image to. This filename
should not include the file extension.
"""
# Convert ligatures back to original string
for dest, src in self.ownerDocument.charsubs:
text = text.replace(src, dest)
key = text
# See if this image has been cached
if self._cache.has_key(key):
return self._cache[key]
# Generate a filename
if not filename:
filename = self.newFilename()
# Add the image to the current document and cache
#log.debug('Creating %s from %s', filename, text)
self.writeImage(filename, text, context)
img = Image(filename, self.config['images'])
# Populate image attrs that will be bound later
if self.imageAttrs:
tmpl = string.Template(self.imageAttrs)
vars = {'filename':filename}
for name in ['height','width','depth']:
if getattr(img, name) is None:
vars['attr'] = name
value = DimensionPlaceholder(tmpl.substitute(vars))
value.imageUnits = self.imageUnits
setattr(img, name, value)
self.images[filename] = self._cache[key] = img
return img
def getImage(self, node):
"""
Get an image from the given node whatever way possible
This method attempts to find an existing image using the
`imageoverride' attribute. If it finds it, the image is
converted to the appropriate output format and copied to the
images directory. If no image is available, or there was
a problem in getting the image, an image is generated.
Arguments:
node -- the node to create the image from
Returns:
Image instance
"""
name = getattr(node, 'imageoverride', None)
if name is None:
return self.newImage(node.source)
if name in self.staticimages:
return self.staticimages[name]
# Copy or convert the image as needed
path = self.newFilename()
newext = os.path.splitext(path)[-1]
oldext = os.path.splitext(name)[-1]
try:
directory = os.path.dirname(path)
if directory and not os.path.isdir(directory):
os.makedirs(directory)
# If PIL isn't available or no conversion is necessary,
# just copy the image to the new location
if newext == oldext or oldext in self.imageTypes:
path = os.path.splitext(path)[0] + os.path.splitext(name)[-1]
if PILImage is None:
shutil.copyfile(name, path)
tmpl = string.Template(self.imageAttrs)
width = DimensionPlaceholder(tmpl.substitute({'filename':path, 'attr':'width'}))
height = DimensionPlaceholder(tmpl.substitute({'filename':path, 'attr':'height'}))
height.imageUnits = width.imageUnits = self.imageUnits
else:
img = PILImage.open(name)
width, height = img.size
scale = self.config['images']['scale-factor']
if scale != 1:
width = int(width * scale)
height = int(height * scale)
img.resize((width,height))
img.save(path)
else:
shutil.copyfile(name, path)
# If PIL is available, convert the image to the appropriate type
else:
img = PILImage.open(name)
width, height = img.size
scale = self.config['images']['scale-factor']
if scale != 1:
width = int(width * scale)
height = int(height * scale)
img.resize((width,height))
img.save(path)
img = Image(path, self.ownerDocument.config['images'], width=width, height=height)
self.staticimages[name] = img
return img
# If anything fails, just let the imager handle it...
except Exception, msg:
#log.warning('%s in image "%s". Reverting to LaTeX to generate the image.' % (msg, name))
pass
return self.newImage(node.source)
class VectorImager(Imager):
fileExtension = '.svg'
def writePreamble(self, document):
Imager.writePreamble(self, document)
# self.source.write('\\usepackage{type1ec}\n')
self.source.write('\\def\\plasTeXregister{}\n')
class WorkingFile(file):
"""
File used for processing in a temporary directory
When the file is closed or the object is deleted, the temporary
directory associated with the file is deleted as well.
"""
def __init__(self, *args, **kwargs):
if 'tempdir' in kwargs:
self.tempdir = kwargs['tempdir']
del kwargs['tempdir']
file.__init__(self, *args, **kwargs)
def close(self):
if self.tempdir and os.path.isdir(self.tempdir):
shutil.rmtree(self.tempdir, True)
file.close(self)
def __del__(self):
self.close()
|
|
import datetime
import threading
from django.utils.html import escape as html_escape
from mongoengine import EmbeddedDocument
try:
from mongoengine.base import ValidationError
except ImportError:
from mongoengine.errors import ValidationError
from mongoengine.base.datastructures import BaseList
from mongoengine.queryset import Q
from cripts.core.class_mapper import class_from_id
from cripts.core.form_consts import NotificationType
from cripts.core.user import CRIPTsUser
from cripts.core.user_tools import user_sources, get_subscribed_users
from cripts.notifications.notification import Notification
from cripts.notifications.processor import ChangeParser, MappedMongoFields
from cripts.notifications.processor import NotificationHeaderManager
def create_notification(obj, username, message, source_filter=None,
notification_type=NotificationType.ALERT):
"""
Generate a notification -- based on mongo obj.
:param obj: The object.
:type obj: class which inherits from
:class:`cripts.core.cripts_mongoengine.CriptsBaseAttributes`
:param username: The user creating the notification.
:type username: str
:param message: The notification message.
:type message: str
:param source_filter: Filter on who can see this notification.
:type source_filter: list(str)
:param notification_type: The notification type (e.g. alert, error).
:type notification_type: str
"""
n = Notification()
n.analyst = username
obj_type = obj._meta['cripts_type']
users = set()
if notification_type not in NotificationType.ALL:
notification_type = NotificationType.ALERT
n.notification_type = notification_type
if obj_type == 'Comment':
n.obj_id = obj.obj_id
n.obj_type = obj.obj_type
n.notification = "%s added a comment: %s" % (username, obj.comment)
users.update(obj.users) # notify mentioned users
# for comments, use the sources from the object that it is linked to
# instead of the comments's sources
obj = class_from_id(n.obj_type, n.obj_id)
else:
n.notification = message
n.obj_id = obj.id
n.obj_type = obj_type
if hasattr(obj, 'source'):
sources = [s.name for s in obj.source]
subscribed_users = get_subscribed_users(n.obj_type, n.obj_id, sources)
# Filter on users that have access to the source of the object
for subscribed_user in subscribed_users:
allowed_sources = user_sources(subscribed_user)
for allowed_source in allowed_sources:
if allowed_source in sources:
if source_filter is None or allowed_source in source_filter:
users.add(subscribed_user)
break
else:
users.update(get_subscribed_users(n.obj_type, n.obj_id, []))
users.discard(username) # don't notify the user creating this notification
n.users = list(users)
if not len(n.users):
return
try:
n.save()
except ValidationError:
pass
# Signal potentially waiting threads that notification information is available
for user in n.users:
notification_lock = NotificationLockManager.get_notification_lock(user)
notification_lock.acquire()
try:
notification_lock.notifyAll()
finally:
notification_lock.release()
def create_general_notification(username, target_users, header, link_url, message,
notification_type=NotificationType.ALERT):
"""
Generate a general notification -- not based on mongo obj.
:param obj: The object.
:type obj: class which inherits from
:class:`cripts.core.cripts_mongoengine.CriptsBaseAttributes`
:param username: The user creating the notification.
:type username: str
:param target_users: The list of users who will get the notification.
:type target_users: list(str)
:param header: The notification header message.
:type header: list(str)
:param link_url: A link URL for the header, specify None if there is no link.
:type link_url: str
:param message: The notification message.
:type message: str
:param notification_type: The notification type (e.g. alert, error).
:type notification_type: str
"""
if notification_type not in NotificationType.ALL:
notification_type = NotificationType.ALERT
n = Notification()
n.analyst = username
n.notification_type = notification_type
n.notification = message
n.header = header
n.link_url = link_url
for target_user in target_users:
# Check to make sure the user actually exists
user = CRIPTsUser.objects(username=target_user).first()
if user is not None:
n.users.append(target_user)
# don't notify the user creating this notification
n.users = [u for u in n.users if u != username]
if not len(n.users):
return
try:
n.save()
except ValidationError:
pass
# Signal potentially waiting threads that notification information is available
for user in n.users:
notification_lock = NotificationLockManager.get_notification_lock(user)
notification_lock.acquire()
try:
notification_lock.notifyAll()
finally:
notification_lock.release()
def generate_audit_notification(username, operation_type, obj, changed_fields,
what_changed, is_new_doc=False):
"""
Generate an audit notification on the specific change, if applicable.
This is called during an audit of the object, before the actual save
to the database occurs.
:param username: The user creating the notification.
:type username: str
:param operation_type: The type of operation (i.e. save or delete).
:type operation_type: str
:param obj: The object.
:type obj: class which inherits from
:class:`cripts.core.cripts_mongoengine.CriptsBaseAttributes`
:param changed_fields: A list of field names that were changed.
:type changed_fields: list of str
:param message: A message summarizing what changed.
:type message: str
:param is_new_doc: Indicates if the input obj is newly created.
:type is_new_doc: bool
"""
obj_type = obj._meta['cripts_type']
supported_notification = __supported_notification_types__.get(obj_type)
# Check if the obj is supported for notifications
if supported_notification is None:
return
if operation_type == "save":
message = "%s updated the following attributes: %s" % (username,
what_changed)
elif operation_type == "delete":
header_description = generate_notification_header(obj)
message = "%s deleted the following: %s" % (username,
header_description)
if is_new_doc:
sources = []
if hasattr(obj, 'source'):
sources = [s.name for s in obj.source]
message = None
target_users = get_subscribed_users(obj_type, obj.id, sources)
header = generate_notification_header(obj)
link_url = None
if hasattr(obj, 'get_details_url'):
link_url = obj.get_details_url()
if header is not None:
header = "New " + header
create_general_notification(username,
target_users,
header,
link_url,
message)
process_result = process_changed_fields(message, changed_fields, obj)
message = process_result.get('message')
source_filter = process_result.get('source_filter')
if message is not None:
message = html_escape(message)
create_notification(obj, username, message, source_filter, NotificationType.ALERT)
def combine_source_filters(current_source_filters, new_source_filters):
"""
Combines sources together in a restrictive way, e.g. combines sources
like a boolean AND operation, e.g. the source must exist in both lists.
The only exception is if current_source_filters == None, in which case the
new_source_filters will act as the new baseline.
:type current_source_filters: list of source names
:param current_source_filters: list(str).
:type new_source_filters: list of source names
:param new_source_filters: list(str).
:returns: str: Returns a list of combined source names.
"""
combined_source_filters = []
if current_source_filters is None:
return new_source_filters
else:
for new_source_filter in new_source_filters:
if new_source_filter in current_source_filters:
combined_source_filters.append(new_source_filter)
return combined_source_filters
def process_changed_fields(initial_message, changed_fields, obj):
"""
Processes the changed fields to determine what actually changed.
:param message: An initial message to include.
:type message: str
:param changed_fields: A list of field names that were changed.
:type changed_fields: list of str
:param obj: The object.
:type obj: class which inherits from
:class:`cripts.core.cripts_mongoengine.CriptsBaseAttributes`
:returns: str: Returns a message indicating what was changed.
"""
obj_type = obj._meta['cripts_type']
message = initial_message
if message is None:
message = ''
source_filter = None
for changed_field in changed_fields:
# Fields may be fully qualified, e.g. source.1.instances.0.reference
# So, split on the '.' character and get the root of the changed field
base_changed_field = MappedMongoFields.get_mapped_mongo_field(obj_type, changed_field.split('.')[0])
new_value = getattr(obj, base_changed_field, '')
old_obj = class_from_id(obj_type, obj.id)
old_value = getattr(old_obj, base_changed_field, '')
change_handler = ChangeParser.get_changed_field_handler(obj_type, base_changed_field)
if change_handler is not None:
change_message = change_handler(old_value, new_value, base_changed_field)
if isinstance(change_message, dict):
if change_message.get('source_filter') is not None:
new_source_filter = change_message.get('source_filter')
source_filter = combine_source_filters(source_filter, new_source_filter)
change_message = change_message.get('message')
if change_message is not None:
message += "\n" + change_message[:1].capitalize() + change_message[1:]
else:
change_field_handler = ChangeParser.generic_single_field_change_handler
if isinstance(old_value, BaseList):
list_value = None
if len(old_value) > 0:
list_value = old_value[0]
elif len(new_value) > 0:
list_value = new_value[0]
if isinstance(list_value, basestring):
change_field_handler = ChangeParser.generic_list_change_handler
elif isinstance(list_value, EmbeddedDocument):
change_field_handler = ChangeParser.generic_list_json_change_handler
change_message = change_field_handler(old_value, new_value, base_changed_field)
if isinstance(change_message, dict):
if change_message.get('source_filter') is not None:
new_source_filter = change_message.get('source_filter')
combine_source_filters(source_filter, new_source_filter)
change_message = change_message.get('message')
if change_message is not None:
message += "\n" + change_message[:1].capitalize() + change_message[1:]
return {'message': message, 'source_filter': source_filter}
def get_notification_details(request, newer_than):
"""
Generate the data to render the notification dialogs.
:param request: The Django request.
:type request: :class:`django.http.HttpRequest`
:param newer_than: A filter that specifies that only notifications
newer than this time should be returned.
:type newer_than: str in ISODate format.
:returns: arguments (dict)
"""
username = request.user.username
notifications_list = []
notifications = None
latest_notification_time = None
lock = NotificationLockManager.get_notification_lock(username)
timeout = 0
# Critical section, check if there are notifications to be consumed.
lock.acquire()
try:
notifications = get_user_notifications(username, newer_than=newer_than)
if len(notifications) > 0:
latest_notification_time = str(notifications[0].created)
else:
# no new notifications -- block until time expiration or lock release
lock.wait(60)
# lock was released, check if there is any new information yet
notifications = get_user_notifications(username, newer_than=newer_than)
if len(notifications) > 0:
latest_notification_time = str(notifications[0].created)
finally:
lock.release()
if latest_notification_time is not None:
acknowledgement_type = request.user.get_preference('toast_notifications', 'acknowledgement_type', 'sticky')
if acknowledgement_type == 'timeout':
timeout = request.user.get_preference('toast_notifications', 'timeout', 30) * 1000
for notification in notifications:
obj = class_from_id(notification.obj_type, notification.obj_id)
if obj is not None:
link_url = obj.get_details_url()
header = generate_notification_header(obj)
else:
if notification.header is not None:
header = notification.header
else:
header = "%s %s" % (notification.obj_type, notification.obj_id)
if notification.link_url is not None:
link_url = notification.link_url
else:
link_url = None
notification_type = notification.notification_type
if notification_type is None or notification_type not in NotificationType.ALL:
notification_type = NotificationType.ALERT
notification_data = {
"header": header,
"message": notification.notification,
"date_modified": str(notification.created.strftime("%Y/%m/%d %H:%M:%S")),
"link": link_url,
"modified_by": notification.analyst,
"id": str(notification.id),
"type": notification_type,
}
notifications_list.append(notification_data)
return {
'notifications': notifications_list,
'newest_notification': latest_notification_time,
'server_time': str(datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S")),
'timeout': timeout,
}
def get_notifications_for_id(username, obj_id, obj_type):
"""
Get notifications for a specific top-level object and user.
:param username: The user to search for.
:param obj_id: The ObjectId to search for.
:type obj_id: str
:param obj_type: The top-level object type.
:type obj_type: str
:returns: :class:`cripts.core.cripts_mongoengine.CriptsQuerySet`
"""
return Notification.objects(users=username,
obj_id=obj_id,
obj_type=obj_type)
def remove_notification(obj_id):
"""
Remove an existing notification.
:param obj_id: The top-level ObjectId to find the notification to remove.
:type obj_id: str
:returns: dict with keys "success" (boolean) and "message" (str).
"""
notification = Notification.objects(id=obj_id).first()
if not notification:
message = "Could not find notification to remove!"
result = {'success': False, 'message': message}
else:
notification.delete()
message = "Notification removed successfully!"
result = {'success': True, 'message': message}
return result
def get_new_notifications():
"""
Get any new notifications.
"""
return Notification.objects(status="new")
def remove_user_from_notification(username, obj_id, obj_type):
"""
Remove a user from the list of users for a notification.
:param username: The user to remove.
:type username: str
:param obj_id: The ObjectId of the top-level object for this notification.
:type obj_id: str
:param obj_type: The top-level object type.
:type obj_type: str
:returns: dict with keys "success" (boolean) and "message" (str) if failed.
"""
Notification.objects(obj_id=obj_id,
obj_type=obj_type).update(pull__users=username)
return {'success': True}
def remove_user_from_notification_id(username, id):
"""
Remove a user from the list of users for a notification.
:param username: The user to remove.
:type username: str
:param obj_id: The ObjectId of the top-level object for this notification.
:type obj_id: str
:param obj_type: The top-level object type.
:type obj_type: str
:returns: dict with keys "success" (boolean) and "message" (str) if failed.
"""
Notification.objects(id=id).update(pull__users=username)
return {'success': True}
def remove_user_notifications(username):
"""
Remove a user from all notifications.
:param username: The user to remove.
:type username: str
:returns: dict with keys "success" (boolean) and "message" (str) if failed.
"""
Notification.objects(users=username).update(pull__users=username)
def get_user_notifications(username, count=False, newer_than=None):
"""
Get the notifications for a user.
:param username: The user to get notifications for.
:type username: str
:param count: Only return the count.
:type count:bool
:returns: int, :class:`cripts.core.cripts_mongoengine.CriptsQuerySet`
"""
n = None
if newer_than is None or newer_than == None:
n = Notification.objects(users=username).order_by('-created')
else:
n = Notification.objects(Q(users=username) & Q(created__gt=newer_than)).order_by('-created')
if count:
return len(n)
else:
return n
__supported_notification_types__ = {
'Comment': 'object_id',
'Dataset': 'id',
'EmailAddress': 'id',
'Event': 'id',
'Hash': 'id',
'Target': 'id',
'UserName': 'id',
}
class NotificationLockManager(object):
"""
Manager class to handle locks for notifications.
"""
__notification_mutex__ = threading.Lock()
__notification_locks__ = {}
@classmethod
def get_notification_lock(cls, username):
"""
@threadsafe
Gets a notification lock for the specified user, if it doesn't exist
then one is created.
"""
if username not in cls.__notification_locks__:
# notification lock doesn't exist for user, create new lock
cls.__notification_mutex__.acquire()
try:
# safe double checked locking
if username not in cls.__notification_locks__:
cls.__notification_locks__[username] = threading.Condition()
finally:
cls.__notification_mutex__.release()
return cls.__notification_locks__.get(username)
def generate_notification_header(obj):
"""
Generates notification header information based upon the object -- this is
used to preface the notification's context.
Could possibly be used for "Favorites" descriptions as well.
:param obj: The top-level object instantiated class.
:type obj: class which inherits from
:class:`cripts.core.cripts_mongoengine.CriptsBaseAttributes`.
:returns: str with a human readable identification of the object
"""
generate_notification_header_handler = NotificationHeaderManager.get_header_handler(obj._meta['cripts_type'])
if generate_notification_header_handler is not None:
return generate_notification_header_handler(obj)
else:
return "%s: %s" % (obj._meta['cripts_type'], str(obj.id))
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/ImagingStudy) on 2019-05-07.
# 2019, SMART Health IT.
from . import domainresource
class ImagingStudy(domainresource.DomainResource):
""" A set of images produced in single study (one or more series of references
images).
Representation of the content produced in a DICOM imaging study. A study
comprises a set of series, each of which includes a set of Service-Object
Pair Instances (SOP Instances - images or other data) acquired or produced
in a common context. A series is of only one modality (e.g. X-ray, CT, MR,
ultrasound), but a study may have multiple series of different modalities.
"""
resource_type = "ImagingStudy"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.basedOn = None
""" Request fulfilled.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.description = None
""" Institution-generated description.
Type `str`. """
self.encounter = None
""" Encounter with which this imaging study is associated.
Type `FHIRReference` (represented as `dict` in JSON). """
self.endpoint = None
""" Study access endpoint.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.identifier = None
""" Identifiers for the whole study.
List of `Identifier` items (represented as `dict` in JSON). """
self.interpreter = None
""" Who interpreted images.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.location = None
""" Where ImagingStudy occurred.
Type `FHIRReference` (represented as `dict` in JSON). """
self.modality = None
""" All series modality if actual acquisition modalities.
List of `Coding` items (represented as `dict` in JSON). """
self.note = None
""" User-defined comments.
List of `Annotation` items (represented as `dict` in JSON). """
self.numberOfInstances = None
""" Number of Study Related Instances.
Type `int`. """
self.numberOfSeries = None
""" Number of Study Related Series.
Type `int`. """
self.procedureCode = None
""" The performed procedure code.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.procedureReference = None
""" The performed Procedure reference.
Type `FHIRReference` (represented as `dict` in JSON). """
self.reasonCode = None
""" Why the study was requested.
List of `CodeableConcept` items (represented as `dict` in JSON). """
self.reasonReference = None
""" Why was study performed.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.referrer = None
""" Referring physician.
Type `FHIRReference` (represented as `dict` in JSON). """
self.series = None
""" Each study has one or more series of instances.
List of `ImagingStudySeries` items (represented as `dict` in JSON). """
self.started = None
""" When the study was started.
Type `FHIRDate` (represented as `str` in JSON). """
self.status = None
""" registered | available | cancelled | entered-in-error | unknown.
Type `str`. """
self.subject = None
""" Who or what is the subject of the study.
Type `FHIRReference` (represented as `dict` in JSON). """
super(ImagingStudy, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ImagingStudy, self).elementProperties()
js.extend([
("basedOn", "basedOn", fhirreference.FHIRReference, True, None, False),
("description", "description", str, False, None, False),
("encounter", "encounter", fhirreference.FHIRReference, False, None, False),
("endpoint", "endpoint", fhirreference.FHIRReference, True, None, False),
("identifier", "identifier", identifier.Identifier, True, None, False),
("interpreter", "interpreter", fhirreference.FHIRReference, True, None, False),
("location", "location", fhirreference.FHIRReference, False, None, False),
("modality", "modality", coding.Coding, True, None, False),
("note", "note", annotation.Annotation, True, None, False),
("numberOfInstances", "numberOfInstances", int, False, None, False),
("numberOfSeries", "numberOfSeries", int, False, None, False),
("procedureCode", "procedureCode", codeableconcept.CodeableConcept, True, None, False),
("procedureReference", "procedureReference", fhirreference.FHIRReference, False, None, False),
("reasonCode", "reasonCode", codeableconcept.CodeableConcept, True, None, False),
("reasonReference", "reasonReference", fhirreference.FHIRReference, True, None, False),
("referrer", "referrer", fhirreference.FHIRReference, False, None, False),
("series", "series", ImagingStudySeries, True, None, False),
("started", "started", fhirdate.FHIRDate, False, None, False),
("status", "status", str, False, None, True),
("subject", "subject", fhirreference.FHIRReference, False, None, True),
])
return js
from . import backboneelement
class ImagingStudySeries(backboneelement.BackboneElement):
""" Each study has one or more series of instances.
Each study has one or more series of images or other content.
"""
resource_type = "ImagingStudySeries"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.bodySite = None
""" Body part examined.
Type `Coding` (represented as `dict` in JSON). """
self.description = None
""" A short human readable summary of the series.
Type `str`. """
self.endpoint = None
""" Series access endpoint.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.instance = None
""" A single SOP instance from the series.
List of `ImagingStudySeriesInstance` items (represented as `dict` in JSON). """
self.laterality = None
""" Body part laterality.
Type `Coding` (represented as `dict` in JSON). """
self.modality = None
""" The modality of the instances in the series.
Type `Coding` (represented as `dict` in JSON). """
self.number = None
""" Numeric identifier of this series.
Type `int`. """
self.numberOfInstances = None
""" Number of Series Related Instances.
Type `int`. """
self.performer = None
""" Who performed the series.
List of `ImagingStudySeriesPerformer` items (represented as `dict` in JSON). """
self.specimen = None
""" Specimen imaged.
List of `FHIRReference` items (represented as `dict` in JSON). """
self.started = None
""" When the series started.
Type `FHIRDate` (represented as `str` in JSON). """
self.uid = None
""" DICOM Series Instance UID for the series.
Type `str`. """
super(ImagingStudySeries, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ImagingStudySeries, self).elementProperties()
js.extend([
("bodySite", "bodySite", coding.Coding, False, None, False),
("description", "description", str, False, None, False),
("endpoint", "endpoint", fhirreference.FHIRReference, True, None, False),
("instance", "instance", ImagingStudySeriesInstance, True, None, False),
("laterality", "laterality", coding.Coding, False, None, False),
("modality", "modality", coding.Coding, False, None, True),
("number", "number", int, False, None, False),
("numberOfInstances", "numberOfInstances", int, False, None, False),
("performer", "performer", ImagingStudySeriesPerformer, True, None, False),
("specimen", "specimen", fhirreference.FHIRReference, True, None, False),
("started", "started", fhirdate.FHIRDate, False, None, False),
("uid", "uid", str, False, None, True),
])
return js
class ImagingStudySeriesInstance(backboneelement.BackboneElement):
""" A single SOP instance from the series.
A single SOP instance within the series, e.g. an image, or presentation
state.
"""
resource_type = "ImagingStudySeriesInstance"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.number = None
""" The number of this instance in the series.
Type `int`. """
self.sopClass = None
""" DICOM class type.
Type `Coding` (represented as `dict` in JSON). """
self.title = None
""" Description of instance.
Type `str`. """
self.uid = None
""" DICOM SOP Instance UID.
Type `str`. """
super(ImagingStudySeriesInstance, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ImagingStudySeriesInstance, self).elementProperties()
js.extend([
("number", "number", int, False, None, False),
("sopClass", "sopClass", coding.Coding, False, None, True),
("title", "title", str, False, None, False),
("uid", "uid", str, False, None, True),
])
return js
class ImagingStudySeriesPerformer(backboneelement.BackboneElement):
""" Who performed the series.
Indicates who or what performed the series and how they were involved.
"""
resource_type = "ImagingStudySeriesPerformer"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.actor = None
""" Who performed the series.
Type `FHIRReference` (represented as `dict` in JSON). """
self.function = None
""" Type of performance.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(ImagingStudySeriesPerformer, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(ImagingStudySeriesPerformer, self).elementProperties()
js.extend([
("actor", "actor", fhirreference.FHIRReference, False, None, True),
("function", "function", codeableconcept.CodeableConcept, False, None, False),
])
return js
import sys
try:
from . import annotation
except ImportError:
annotation = sys.modules[__package__ + '.annotation']
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + '.codeableconcept']
try:
from . import coding
except ImportError:
coding = sys.modules[__package__ + '.coding']
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + '.fhirdate']
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + '.fhirreference']
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + '.identifier']
|
|
# -*- coding: utf-8 -*-
### RDH - I'm taking this from Madrona and tweaking it to use extra fields (marked with #[RDH])
### I'm porting this over to use osgeo ogr instead of the libgdal version 'cause I can't get that to work
### I'm going to use ### to comment out the lines that I'm changing. The replacement will be below.
import os
import zipfile
import tempfile
import datetime
import cStringIO
from django.http import HttpResponse
from django.utils.encoding import smart_str
from django.contrib.gis.db.models.fields import GeometryField
from django.contrib.gis.db import models #[RDH]
###from django.contrib.gis.gdal.libgdal import lgdal as ogr
from osgeo import ogr,osr
from django.contrib.gis.gdal import check_err, OGRGeomType # Driver, OGRGeometry, OGRGeomType, SpatialReference, check_err, CoordTransform
class ShpResponder(object):
def __init__(self, queryset, readme=None, geo_field=None, proj_transform=None, mimetype='application/zip',file_name='shp_download'):
"""
"""
self.queryset = queryset
self.readme = readme
self.geo_field = geo_field
self.proj_transform = proj_transform
self.mimetype = mimetype
self.file_name = smart_str(file_name)
# def __call__(self, *args, **kwargs):
def __call__(self, addl_cols=None, *args, **kwargs): #[RDH]
#[RDH] Adding optional list of additional columns for adding arbitrary float columns to shapefile
"""
"""
fields = self.queryset.model._meta.fields[:]#[RDH] - make a copy, don't alter original
for col in addl_cols: #[RDH]
float_field = models.FloatField(name=col) #[RDH]
fields.append(float_field) #[RDH]
geo_fields = [f for f in fields if isinstance(f, GeometryField)]
geo_fields_names = ', '.join([f.name for f in geo_fields])
attributes = [f for f in fields if not isinstance(f, GeometryField)]
if len(geo_fields) > 1:
if not self.geo_field:
raise ValueError("More than one geodjango geometry field found, please specify which to use by name using the 'geo_field' keyword. Available fields are: '%s'" % geo_fields_names)
else:
geo_field_by_name = [fld for fld in geo_fields if fld.name == self.geo_field]
if not geo_field_by_name:
raise ValueError("Geodjango geometry field not found with the name '%s', fields available are: '%s'" % (self.geo_field,geo_fields_names))
else:
geo_field = geo_field_by_name[0]
elif geo_fields:
geo_field = geo_fields[0]
else:
raise ValueError('No geodjango geometry fields found in this model queryset')
# Get the shapefile driver
###dr = Driver('ESRI Shapefile')
dr = ogr.GetDriverByName('ESRI Shapefile')
# create a temporary file to write the shapefile to
# since we are ultimately going to zip it up
tmp = tempfile.NamedTemporaryFile(suffix='.shp', mode='w+b')
# we must close the file for GDAL to be able to open and write to it
tmp.close()
# Creating the datasource
###ds = ogr.OGR_Dr_CreateDataSource(dr._ptr, tmp.name, None)
ds = dr.CreateDataSource(tmp.name)
if ds is None:
raise Exception('Could not create file!')
# Get the right geometry type number for ogr
if hasattr(geo_field,'geom_type'):
###ogr_type = OGRGeomType(geo_field.geom_type).num
ogr_type = OGRGeomType(geo_field.geom_type).num
else:
###ogr_type = OGRGeomType(geo_field._geom).num
ogr_type = OGRGeomType(geo_field._geom).num
# Set up the native spatial reference of the geometry field using the srid
native_srs = osr.SpatialReference()
if hasattr(geo_field,'srid'):
###native_srs = SpatialReference(geo_field.srid)
native_srs.ImportFromEPSG(geo_field.srid)
else:
###native_srs = SpatialReference(geo_field._srid)
native_srs.ImportFromEPSG(geo_field._srid)
###if self.proj_transform:
### output_srs = SpatialReference(self.proj_transform)
### ct = CoordTransform(native_srs, output_srs)
###else:
### output_srs = native_srs
output_srs = native_srs
# create the layer
# print 'about to try to create data layer'
# print 'ds: %s, path: %s' % (ds, tmp.name)
###layer = ogr.OGR_DS_CreateLayer(ds, tmp.name, output_srs._ptr, ogr_type, None)
layer = ds.CreateLayer('lyr',srs=output_srs,geom_type=ogr_type)
# Create the fields
# Todo: control field order as param
for field in attributes:
###fld = ogr.OGR_Fld_Create(str(field.name), 4)
###added = ogr.OGR_L_CreateField(layer, fld, 0)
###check_err(added)
if field.__class__.__name__ == 'FloatField':
field_defn = ogr.FieldDefn(str(field.name),ogr.OFTReal)
elif field.__class__.__name__ == 'IntegerField':
field_defn = ogr.FieldDefn(str(field.name),ogr.OFTInteger)
else:
field_defn = ogr.FieldDefn(str(field.name),ogr.OFTString)
field_defn.SetWidth(255)
if layer.CreateField(field_defn) != 0:
raise Exception('Faild to create field')
# Getting the Layer feature definition.
###feature_def = ogr.OGR_L_GetLayerDefn(layer)
feature_def = layer.GetLayerDefn()
# Loop through queryset creating features
for item in self.queryset:
###feat = ogr.OGR_F_Create(feature_def)
feat = ogr.Feature(feature_def)
# For now, set all fields as strings
# TODO: catch model types and convert to ogr fields
# http://www.gdal.org/ogr/classOGRFeature.html
# OGR_F_SetFieldDouble
#OFTReal => FloatField DecimalField
# OGR_F_SetFieldInteger
#OFTInteger => IntegerField
#OGR_F_SetFieldStrin
#OFTString => CharField
# OGR_F_SetFieldDateTime()
#OFTDateTime => DateTimeField
#OFTDate => TimeField
#OFTDate => DateField
idx = 0
for field in attributes:
value = getattr(item,field.name)
if field.__class__.__name__ == 'FloatField':
try: #[RDH]
value = float(value)
except TypeError, E: #[RDH] a 'None' value breaks this.
if value == None: #[RDH]
pass #[RDH]
else: #[RDH]
value = 0.0 #[RDH] since all vals are assumed float, set to 0.0
elif field.__class__.__name__ == 'IntegerField':
value = int(value)
else:
try:
value = str(value)
except UnicodeEncodeError, E:
# http://trac.osgeo.org/gdal/ticket/882
value = ''
###ogr.OGR_F_SetFieldString(feat, idx, string_value)
#changing the following SetField command from accessing field by name to index
#this change solves an issue that arose sometime after gdal 1.6.3
#in which the field names became truncated to 10 chars in CreateField
#feat.SetField(str(field.name),string_value)
feat.SetField(idx, value)
idx += 1
# Transforming & setting the geometry
geom = getattr(item,geo_field.name)
# if requested we transform the input geometry
# to match the shapefiles projection 'to-be'
if geom:
###ogr_geom = OGRGeometry(geom.wkt,output_srs)
ogr_geom = ogr.CreateGeometryFromWkt(geom.wkt)
###if self.proj_transform:
### ogr_geom.transform(ct)
# create the geometry
###check_err(ogr.OGR_F_SetGeometry(feat, ogr_geom._ptr))
check_err(feat.SetGeometry(ogr_geom))
else:
# Case where geometry object is not found because of null value for field
# effectively looses whole record in shapefile if geometry does not exist
pass
# creat the feature in the layer.
###check_err(ogr.OGR_L_SetFeature(layer, feat))
check_err(layer.CreateFeature(feat))
# Cleaning up
###check_err(ogr.OGR_L_SyncToDisk(layer))
###ogr.OGR_DS_Destroy(ds)
###ogr.OGRCleanupAll()
ds.Destroy()
if 'return_file_not_response' in args:
return tmp.name
else:
# Read resulting shapefile into a zipfile buffer
buffer = cStringIO.StringIO()
zip = zipfile.ZipFile(buffer, 'w', zipfile.ZIP_DEFLATED)
files = ['shp','shx','prj','dbf']
for item in files:
filename = '%s.%s' % (tmp.name.replace('.shp',''), item)
zip.write(filename, arcname='%s.%s' % (self.file_name.replace('.shp',''), item))
if self.readme:
zip.writestr('README.txt',self.readme)
zip.close()
buffer.flush()
zip_stream = buffer.getvalue()
buffer.close()
# Stick it all in a django HttpResponse
response = HttpResponse()
response['Content-Disposition'] = 'attachment; filename=%s.zip' % self.file_name.replace('.shp','')
response['Content-length'] = str(len(zip_stream))
response['Content-Type'] = self.mimetype
response.write(zip_stream)
return response
|
|
"""
Retrosheet Data Notice:
Recipients of Retrosheet data are free to make any desired use of
the information, including (but not limited to) selling it,
giving it away, or producing a commercial product based upon the
data. Retrosheet has one requirement for any such transfer of
data or product development, which is that the following
statement must appear prominently:
The information used here was obtained free of
charge from and is copyrighted by Retrosheet. Interested
parties may contact Retrosheet at "www.retrosheet.org".
Retrosheet makes no guarantees of accuracy for the information
that is supplied. Much effort is expended to make our website
as correct as possible, but Retrosheet shall not be held
responsible for any consequences arising from the use the
material presented here. All information is subject to corrections
as additional data are received. We are grateful to anyone who
discovers discrepancies and we appreciate learning of the details.
"""
import pandas as pd
from pybaseball.utils import get_text_file
from datetime import datetime
from io import StringIO
from github import Github
import os
from getpass import getuser, getpass
from github.GithubException import RateLimitExceededException
import warnings
gamelog_columns = [
'date', 'game_num', 'day_of_week', 'visiting_team',
'visiting_team_league', 'visiting_team_game_num', 'home_team',
'home_team_league', 'home_team_game_num', 'visiting_score',
'home_score', 'num_outs', 'day_night', 'completion_info',
'forfeit_info', 'protest_info', 'park_id', 'attendance',
'time_of_game_minutes', 'visiting_line_score',
'home_line_score', 'visiting_abs', 'visiting_hits',
'visiting_doubles', 'visiting_triples', 'visiting_homeruns',
'visiting_rbi', 'visiting_sac_hits', 'visiting_sac_flies',
'visiting_hbp', 'visiting_bb', 'visiting_iw', 'visiting_k',
'visiting_sb', 'visiting_cs', 'visiting_gdp', 'visiting_ci',
'visiting_lob', 'visiting_pitchers_used',
'visiting_individual_er', 'visiting_er', 'visiting_wp',
'visiting_balks', 'visiting_po', 'visiting_assists',
'visiting_errors', 'visiting_pb', 'visiting_dp',
'visiting_tp', 'home_abs', 'home_hits', 'home_doubles',
'home_triples', 'home_homeruns', 'home_rbi',
'home_sac_hits', 'home_sac_flies', 'home_hbp', 'home_bb',
'home_iw', 'home_k', 'home_sb', 'home_cs', 'home_gdp',
'home_ci', 'home_lob', 'home_pitchers_used',
'home_individual_er', 'home_er', 'home_wp', 'home_balks',
'home_po', 'home_assists', 'home_errors', 'home_pb',
'home_dp', 'home_tp', 'ump_home_id', 'ump_home_name',
'ump_first_id', 'ump_first_name', 'ump_second_id',
'ump_second_name', 'ump_third_id', 'ump_third_name',
'ump_lf_id', 'ump_lf_name', 'ump_rf_id', 'ump_rf_name',
'visiting_manager_id', 'visiting_manager_name',
'home_manager_id', 'home_manager_name',
'winning_pitcher_id', 'winning_pitcher_name',
'losing_pitcher_id', 'losing_pitcher_name',
'save_pitcher_id', 'save_pitcher_name',
'game_winning_rbi_id', 'game_winning_rbi_name',
'visiting_starting_pitcher_id',
'visiting_starting_pitcher_name',
'home_starting_pitcher_id', 'home_starting_pitcher_name',
'visiting_1_id', 'visiting_1_name', 'visiting_1_pos',
'visiting_2_id', 'visiting_2_name', 'visiting_2_pos',
'visiting_2_id.1', 'visiting_3_name', 'visiting_3_pos',
'visiting_4_id', 'visiting_4_name', 'visiting_4_pos',
'visiting_5_id', 'visiting_5_name', 'visiting_5_pos',
'visiting_6_id', 'visiting_6_name', 'visiting_6_pos',
'visiting_7_id', 'visiting_7_name', 'visiting_7_pos',
'visiting_8_id', 'visiting_8_name', 'visiting_8_pos',
'visiting_9_id', 'visiting_9_name', 'visiting_9_pos',
'home_1_id', 'home_1_name', 'home_1_pos', 'home_2_id',
'home_2_name', 'home_2_pos', 'home_3_id', 'home_3_name',
'home_3_pos', 'home_4_id', 'home_4_name', 'home_4_pos',
'home_5_id', 'home_5_name', 'home_5_pos', 'home_6_id',
'home_6_name', 'home_6_pos', 'home_7_id', 'home_7_name',
'home_7_pos', 'home_8_id', 'home_8_name', 'home_8_pos',
'home_9_id', 'home_9_name', 'home_9_pos', 'misc',
'acquisition_info'
]
schedule_columns = [
'date', 'game_num', 'day_of_week', 'visiting_team', 'visiting_team_league',
'visiting_team_game_num', 'home_team', 'home_team_league',
'home_team_game_num', 'day_night', 'postponement_cancellation',
'date_of_makeup'
]
parkcode_columns = [
'park_id', 'name', 'nickname', 'city', 'state', 'open', 'close', 'league', 'notes'
]
roster_columns = [
'player_id', 'last_name', 'first_name', 'bats', 'throws', 'team', 'position'
]
gamelog_url = 'https://raw.githubusercontent.com/chadwickbureau/retrosheet/master/gamelog/GL{}.TXT'
schedule_url = 'https://raw.githubusercontent.com/chadwickbureau/retrosheet/master/schedule/{}SKED.TXT'
parkid_url = 'https://raw.githubusercontent.com/chadwickbureau/retrosheet/master/misc/parkcode.txt'
roster_url = 'https://raw.githubusercontent.com/chadwickbureau/retrosheet/master/rosters/{}{}.ROS'
event_url = 'https://raw.githubusercontent.com/chadwickbureau/retrosheet/master/event/{}/{}'
def events(season, type='regular', export_dir='.'):
"""
Pulls retrosheet event files for an entire season. The `type` argument
specifies whether to pull regular season, postseason or asg files. Valid
arguments are 'regular', 'post', and 'asg'.
Right now, pybaseball does not parse the retrosheet files but downloads and
saves them.
"""
GH_TOKEN=os.getenv('GH_TOKEN', '')
if not os.path.exists(export_dir):
os.mkdir(export_dir)
try:
g = Github(GH_TOKEN)
repo = g.get_repo('chadwickbureau/retrosheet')
tree = repo.get_git_tree('master')
for t in tree.tree:
if t.path == 'event':
subtree = t
subtree = repo.get_git_tree(subtree.sha)
for t in subtree.tree:
if t.path == type:
subsubtree = t
event_files = [t.path for t in repo.get_git_tree(subsubtree.sha).tree if str(season) in t.path]
if len(event_files) == 0:
raise ValueError(f'Event files not available for {season}')
except RateLimitExceededException:
warnings.warn(
'Github rate limit exceeded. Cannot check if the file you want exists.',
UserWarning
)
for filename in event_files:
print(f'Downloading {filename}')
s = get_text_file(event_url.format(type, filename))
with open(os.path.join(export_dir, filename), 'w') as f:
f.write(s)
def rosters(season):
"""
Pulls retrosheet roster files for an entire season
"""
GH_TOKEN=os.getenv('GH_TOKEN', '')
try:
g = Github(GH_TOKEN)
repo = g.get_repo('chadwickbureau/retrosheet')
tree = repo.get_git_tree('master')
for t in tree.tree:
if t.path == 'rosters':
subtree = t
rosters = [t.path for t in repo.get_git_tree(subtree.sha).tree if str(season) in t.path]
if len(rosters) == 0:
raise ValueError(f'Rosters not available for {season}')
except RateLimitExceededException:
warnings.warn(
'Github rate limit exceeded. Cannot check if the file you want exists.',
UserWarning
)
df_list = [_roster(team = r[:3], season = season, checked=False) for r in rosters]
return pd.concat(df_list)
def _roster(team, season, checked = False):
"""
Pulls retrosheet roster files
"""
GH_TOKEN=os.getenv('GH_TOKEN', '')
if not checked:
g = Github(GH_TOKEN)
try:
repo = g.get_repo('chadwickbureau/retrosheet')
tree = repo.get_git_tree('master')
for t in tree.tree:
if t.path == 'rosters':
subtree = t
rosters = [t.path for t in repo.get_git_tree(subtree.sha).tree]
file_name = f'{team}{season}.ROS'
if file_name not in rosters:
raise ValueError(f'Roster not available for {team} in {season}')
except RateLimitExceededException:
warnings.warn(
'Github rate limit exceeded. Cannot check if the file you want exists.',
UserWarning
)
s = get_text_file(roster_url.format(team, season))
data = pd.read_csv(StringIO(s), header=None, sep=',', quotechar='"')
data.columns = roster_columns
return data
def park_codes():
"""
Pulls retrosheet Park IDs
"""
s = get_text_file(parkid_url)
data = pd.read_csv(StringIO(s), sep=',', quotechar='"')
data.columns = parkcode_columns
return data
def schedules(season):
"""
Pull retrosheet schedule for a given season
"""
GH_TOKEN=os.getenv('GH_TOKEN', '')
# validate input
g = Github(GH_TOKEN)
repo = g.get_repo('chadwickbureau/retrosheet')
schedules = [f.path[f.path.rfind('/')+1:] for f in repo.get_contents('schedule')]
file_name = f'{season}SKED.TXT'
if file_name not in schedules:
raise ValueError(f'Schedule not available for {season}')
s = get_text_file(schedule_url.format(season))
data = pd.read_csv(StringIO(s), header=None, sep=',', quotechar='"')
data.columns = schedule_columns
return data
def season_game_logs(season):
"""
Pull Retrosheet game logs for a given season
"""
GH_TOKEN=os.getenv('GH_TOKEN', '')
# validate input
g = Github(GH_TOKEN)
repo = g.get_repo('chadwickbureau/retrosheet')
gamelogs = [f.path[f.path.rfind('/')+1:] for f in repo.get_contents('gamelog')]
file_name = f'GL{season}.TXT'
if file_name not in gamelogs:
raise ValueError(f'Season game logs not available for {season}')
s = get_text_file(gamelog_url.format(season))
data = pd.read_csv(StringIO(s), header=None, sep=',', quotechar='"')
data.columns = gamelog_columns
return data
def world_series_logs():
"""
Pull Retrosheet World Series Game Logs
"""
s = get_text_file(gamelog_url.format('WS'))
data = pd.read_csv(StringIO(s), header=None, sep=',', quotechar='"')
data.columns = gamelog_columns
return data
def all_star_game_logs():
"""
Pull Retrosheet All Star Game Logs
"""
s = get_text_file(gamelog_url.format('AS'))
data = pd.read_csv(StringIO(s), header=None, sep=',', quotechar='"')
data.columns = gamelog_columns
return data
def wild_card_logs():
"""
Pull Retrosheet Wild Card Game Logs
"""
s = get_text_file(gamelog_url.format('WC'))
data = pd.read_csv(StringIO(s), header=None, sep=',', quotechar='"')
data.columns = gamelog_columns
return data
def division_series_logs():
"""
Pull Retrosheet Division Series Game Logs
"""
s = get_text_file(gamelog_url.format('DV'))
data = pd.read_csv(StringIO(s), header=None, sep=',', quotechar='"')
data.columns = gamelog_columns
return data
def lcs_logs():
"""
Pull Retrosheet LCS Game Logs
"""
s = get_text_file(gamelog_url.format('LC'))
data = pd.read_csv(StringIO(s), header=None, sep=',', quotechar='"')
data.columns = gamelog_columns
return data
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.