| input
				 stringlengths 2.65k 237k | output
				 stringclasses 1
				value | 
|---|---|
| 
	'std'==format:
 out.write("ChanMngmt:\n")
 if 'MessageID' in params: out.write(" MessageID: "+str(params['MessageID'])+"\n")
 if 'RepeatIndicator' in params: out.write(" RepeatIndicator: "+str(params['RepeatIndicator'])+"\n")
 if 'UserID' in params: out.write(" UserID: "+str(params['UserID'])+"\n")
 if 'Spare' in params: out.write(" Spare: "+str(params['Spare'])+"\n")
 if 'ChanA' in params: out.write(" ChanA: "+str(params['ChanA'])+"\n")
 if 'ChanB' in params: out.write(" ChanB: "+str(params['ChanB'])+"\n")
 if 'TxRxMode' in params: out.write(" TxRxMode: "+str(params['TxRxMode'])+"\n")
 if 'power' in params: out.write(" power: "+str(params['power'])+"\n")
 if 'corner1_lon' in params: out.write(" corner1_lon: "+str(params['corner1_lon'])+"\n")
 if 'corner1_lat' in params: out.write(" corner1_lat: "+str(params['corner1_lat'])+"\n")
 if 'corner2_lon' in params: out.write(" corner2_lon: "+str(params['corner2_lon'])+"\n")
 if 'corner2_lat' in params: out.write(" corner2_lat: "+str(params['corner2_lat'])+"\n")
 if 'IndicatorType' in params: out.write(" IndicatorType: "+str(params['IndicatorType'])+"\n")
 if 'ChanABandwidth' in params: out.write(" ChanABandwidth: "+str(params['ChanABandwidth'])+"\n")
 if 'ChanBBandwidth' in params: out.write(" ChanBBandwidth: "+str(params['ChanBBandwidth'])+"\n")
 if 'TransZoneSize' in params: out.write(" TransZoneSize: "+str(params['TransZoneSize'])+"\n")
 if 'Spare2' in params: out.write(" Spare2: "+str(params['Spare2'])+"\n")
 elif 'csv'==format:
 if None == options.fieldList:
 options.fieldList = fieldList
 needComma = False;
 for field in fieldList:
 if needComma: out.write(',')
 needComma = True
 if field in params:
 out.write(str(params[field]))
 # else: leave it empty
 out.write("\n")
 elif 'html'==format:
 printHtml(params,out)
 elif 'sql'==format:
 sqlInsertStr(params,out,dbType=dbType)
 else:
 print "ERROR: unknown format:",format
 assert False
 return # Nothing to return
RepeatIndicatorEncodeLut = {
 'default':'0',
 'do not repeat any more':'3',
 } #RepeatIndicatorEncodeLut
RepeatIndicatorDecodeLut = {
 '0':'default',
 '3':'do not repeat any more',
 } # RepeatIndicatorEncodeLut
TxRxModeEncodeLut = {
 'Tx A/Tx B, Rx A/RX B':'0',
 'Tx A, Rx A/Rx B':'1',
 'Tx B, Rx A/Rx B':'2',
 } #TxRxModeEncodeLut
TxRxModeDecodeLut = {
 '0':'Tx A/Tx B, Rx A/RX B',
 '1':'Tx A, Rx A/Rx B',
 '2':'Tx B, Rx A/Rx B',
 } # TxRxModeEncodeLut
powerEncodeLut = {
 'high':'0',
 'low':'1',
 } #powerEncodeLut
powerDecodeLut = {
 '0':'high',
 '1':'low',
 } # powerEncodeLut
IndicatorTypeEncodeLut = {
 'broadcast':'0',
 'addressed':'1',
 } #IndicatorTypeEncodeLut
IndicatorTypeDecodeLut = {
 '0':'broadcast',
 '1':'addressed',
 } # IndicatorTypeEncodeLut
ChanABandwidthEncodeLut = {
 'specified by channel number':'0',
 '12.5kHz':'1',
 } #ChanABandwidthEncodeLut
ChanABandwidthDecodeLut = {
 '0':'specified by channel number',
 '1':'12.5kHz',
 } # ChanABandwidthEncodeLut
ChanBBandwidthEncodeLut = {
 'specified by channel number':'0',
 '12.5kHz':'1',
 } #ChanBBandwidthEncodeLut
ChanBBandwidthDecodeLut = {
 '0':'specified by channel number',
 '1':'12.5kHz',
 } # ChanBBandwidthEncodeLut
TransZoneSizeEncodeLut = {
 '1':'0',
 '2':'1',
 '3':'2',
 '4':'3',
 '5':'4',
 '6':'5',
 '7':'6',
 '8':'7',
 } #TransZoneSizeEncodeLut
TransZoneSizeDecodeLut = {
 '0':'1',
 '1':'2',
 '2':'3',
 '3':'4',
 '4':'5',
 '5':'6',
 '6':'7',
 '7':'8',
 } # TransZoneSizeEncodeLut
######################################################################
# SQL SUPPORT
######################################################################
dbTableName='ChanMngmt'
'Database table name'
def sqlCreateStr(outfile=sys.stdout, fields=None, extraFields=None
 ,addCoastGuardFields=True
 ,dbType='postgres'
 ):
 """
 Return the SQL CREATE command for this message type
 @param outfile: file like object to print to.
 @param fields: which fields to put in the create. Defaults to all.
 @param extraFields: A sequence of tuples containing (name,sql type) for additional fields
 @param addCoastGuardFields: Add the extra fields that come after the NMEA check some from the USCG N-AIS format
 @param dbType: Which flavor of database we are using so that the create is tailored ('sqlite' or 'postgres')
 @type addCoastGuardFields: bool
 @return: sql create string
 @rtype: str
 @see: sqlCreate
 """
 # FIX: should this sqlCreate be the same as in LaTeX (createFuncName) rather than hard coded?
 outfile.write(str(sqlCreate(fields,extraFields,addCoastGuardFields,dbType=dbType)))
def sqlCreate(fields=None, extraFields=None, addCoastGuardFields=True, dbType='postgres'):
 """Return the sqlhelp object to create the table.
 @param fields: which fields to put in the create. Defaults to all.
 @param extraFields: A sequence of tuples containing (name,sql type) for additional fields
 @param addCoastGuardFields: Add the extra fields that come after the NMEA check some from the USCG N-AIS format
 @type addCoastGuardFields: bool
 @param dbType: Which flavor of database we are using so that the create is tailored ('sqlite' or 'postgres')
 @return: An object that can be used to generate a return
 @rtype: sqlhelp.create
 """
 if fields is None:
 fields = fieldList
 c = sqlhelp.create('ChanMngmt',dbType=dbType)
 c.addPrimaryKey()
 if 'MessageID' in fields: c.addInt ('MessageID')
 if 'RepeatIndicator' in fields: c.addInt ('RepeatIndicator')
 if 'UserID' in fields: c.addInt ('UserID')
 if 'Spare' in fields: c.addInt ('Spare')
 if 'ChanA' in fields: c.addInt ('ChanA')
 if 'ChanB' in fields: c.addInt ('ChanB')
 if 'TxRxMode' in fields: c.addInt ('TxRxMode')
 if 'power' in fields: c.addInt ('power')
 if dbType != 'postgres':
 if 'corner1_lon' in fields: c.addDecimal('corner1_lon',5,2)
 if dbType != 'postgres':
 if 'corner1_lat' in fields: c.addDecimal('corner1_lat',5,2)
 if dbType != 'postgres':
 if 'corner2_lon' in fields: c.addDecimal('corner2_lon',5,2)
 if dbType != 'postgres':
 if 'corner2_lat' in fields: c.addDecimal('corner2_lat',5,2)
 if 'IndicatorType' in fields: c.addInt ('IndicatorType')
 if 'ChanABandwidth' in fields: c.addInt ('ChanABandwidth')
 if 'ChanBBandwidth' in fields: c.addInt ('ChanBBandwidth')
 if 'TransZoneSize' in fields: c.addInt ('TransZoneSize')
 if 'Spare2' in fields: c.addInt ('Spare2')
 if addCoastGuardFields:
 # c.addInt('cg_s_rssi') # Relative signal strength indicator
 # c.addInt('cg_d_strength') # dBm receive strength
 # c.addVarChar('cg_x',10) # Idonno
 c.addInt('cg_t_arrival') # Receive timestamp from the AIS equipment 'T'
 c.addInt('cg_s_slotnum') # Slot received in
 c.addVarChar('cg_r',15) # Receiver station ID - should usually be an MMSI, but sometimes is a string
 c.addInt('cg_sec') # UTC seconds since the epoch
 c.addTimestamp('cg_timestamp') # UTC decoded cg_sec - not actually in the data stream
 if dbType == 'postgres':
 #--- EPSG 4326 : WGS 84
 #INSERT INTO "spatial_ref_sys" ("srid","auth_name","auth_srid","srtext","proj4text") VALUES (4326,'EPSG',4326,'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],TOWGS84[0,0,0,0,0,0,0],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]','+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs ');
 c.addPostGIS('corner1','POINT',2,SRID=4326);
 #--- EPSG 4326 : WGS 84
 #INSERT INTO "spatial_ref_sys" ("srid","auth_name","auth_srid","srtext","proj4text") VALUES (4326,'EPSG',4326,'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],TOWGS84[0,0,0,0,0,0,0],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.01745329251994328,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]','+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs ');
 c.addPostGIS('corner2','POINT',2,SRID=4326);
 return c
def sqlInsertStr(params, outfile=sys.stdout, extraParams=None, dbType='postgres'):
 """
 Return the SQL INSERT command for this message type
 @param params: dictionary of values keyed by field name
 @param outfile: file like object to print to.
 @param extraParams: A sequence of tuples containing (name,sql type) for additional fields
 @return: sql create string
 @rtype: str
 @see: sqlCreate
 """
 outfile.write(str(sqlInsert(params,extraParams,dbType=dbType)))
def sqlInsert(params,extraParams=None,dbType='postgres'):
 """
 Give the SQL INSERT statement
 @param params: dict keyed by field name of values
 @param extraParams: any extra fields that you have created beyond the normal ais message fields
 @rtype: sqlhelp.insert
 @return: insert class instance
 TODO(schwehr):allow optional type checking of params?
 @warning: this will take invalid keys happily and do what???
 """
 i = sqlhelp.insert('ChanMngmt',dbType=dbType)
 if dbType=='postgres':
 finished = []
 for key in params:
 if key in finished:
 continue
 if key not in toPgFields and key not in fromPgFields:
 if type(params[key])==Decimal: i.add(key,float(params[key]))
 else: i.add(key,params[key])
 else:
 if key in fromPgFields:
 val = params[key]
 # Had better be a WKT type like POINT(-88.1 30.321)
 i.addPostGIS(key,val)
 finished.append(key)
 else:
 # Need to construct the type.
 pgName = toPgFields[key]
 #valStr='GeomFromText(\''+pgTypes[pgName]+'('
 valStr=pgTypes[pgName]+'('
 vals = []
 for nonPgKey in fromPgFields[pgName]:
 vals.append(str(params[nonPgKey]))
 finished.append(nonPgKey)
 valStr+=' '.join(vals)+')'
 i.addPostGIS(pgName,valStr)
 else:
 for key in params:
 if type(params[key])==Decimal: i.add(key,float(params[key]))
 else: i.add(key,params[key])
 if None != extraParams:
 for key in extraParams:
 i.add(key,extraParams[key])
 return i
######################################################################
# LATEX SUPPORT
######################################################################
def latexDefinitionTable(outfile=sys.stdout
 ):
 """
 Return the LaTeX definition table for this message type
 @param outfile: file like object to print to.
 @type outfile: file obj
 @return: LaTeX table string via the outfile
 @rtype: str
 """
 o = outfile
 o.write("""
\\begin{table}%[htb]
\\centering
\\begin{tabular}{|l|c|l|}
\\hline
Parameter & Number of bits & Description
\\\\ \\hline\\hline
MessageID & 6 & AIS message number. Must be 22 \\\\ \hline
RepeatIndicator & 2 & Indicated how many times a message has been repeated \\\\ \hline
UserID & 30 & Unique ship identification number (MMSI) \\\\ \hline
Spare & 2 & Not used. Should be set to zero. \\\\ \hline
ChanA & 12 & Channel number from ITU-R M.1084 Annex 4 \\\\ \hline
ChanB & 12 & Channel number from ITU-R M.1084 Annex 4 \\\\ \hline
TxRxMode & 4 & FIX: find the description \\\\ \hline
power & 1 & FIX: put in a description \\\\ \hline
corner1\_lon & 18 & north-east corner of area for assignment longitude of corner \\\\ \hline
corner1\_lat & 17 & north-east corner of area for assignment latitude of corner \\\\ \hline
corner2\_lon & 18 & south-west corner of area for assignment longitude of corner \\\\ \hline
corner2\_lat & 17 & south-west corner of area for assignment latitude of corner \\\\ \hline
IndicatorType & 1 & FIX: put in a description \\\\ \hline
ChanABandwidth & 1 & FIX: put in a description \\\\ \hline
ChanBBandwidth & 1 & FIX: put in a description \\\\ \hline
TransZoneSize & 3 & FIX: put in a description \\\\ \hline
Spare2 & 23 & Not used. Should be set to zero.\\\\ \\hline \\hline
Total bits & 168 & Appears to take 1 slot \\\\ \\hline
\\end{tabular}
\\caption{AIS message number 22: Base station report - F}
\\label{tab:ChanMngmt}
\\end{table}
""")
######################################################################
# Text Definition
######################################################################
def textDefinitionTable(outfile=sys.stdout ,delim=' '):
 """Return the text definition table for this message type
 @param outfile: file like object to print to.
 @type outfile: file obj
 @return: text table string via the outfile
 @rtype: str
 """
 o = outfile
 o.write('Parameter'+delim+'Number of bits'+delim+"""Description
MessageID"""+delim+'6'+delim+"""AIS message number. Must be | |
| 
	# coding: utf-8
"""
 Rumble API
 Rumble Network Discovery API # noqa: E501
 OpenAPI spec version: 2.11.0
 Contact: <EMAIL>
 Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Event(object):
 """NOTE: This class is auto generated by the swagger code generator program.
 Do not edit the class manually.
 """
 """
 Attributes:
 swagger_types (dict): The key is attribute name
 and the value is attribute type.
 attribute_map (dict): The key is attribute name
 and the value is json key in definition.
 """
 swagger_types = {
 'id': 'str',
 'created_at': 'int',
 'client_id': 'str',
 'organization_id': 'str',
 'site_id': 'str',
 'action': 'str',
 'source_id': 'str',
 'source_name': 'str',
 'source_type': 'str',
 'target_id': 'str',
 'target_name': 'str',
 'target_type': 'str',
 'success': 'bool',
 'details': 'dict(str, object)',
 'state': 'str',
 'processor_id': 'str',
 'processed_at': 'int'
 }
 attribute_map = {
 'id': 'id',
 'created_at': 'created_at',
 'client_id': 'client_id',
 'organization_id': 'organization_id',
 'site_id': 'site_id',
 'action': 'action',
 'source_id': 'source_id',
 'source_name': 'source_name',
 'source_type': 'source_type',
 'target_id': 'target_id',
 'target_name': 'target_name',
 'target_type': 'target_type',
 'success': 'success',
 'details': 'details',
 'state': 'state',
 'processor_id': 'processor_id',
 'processed_at': 'processed_at'
 }
 def __init__(self, id:str=None, created_at:int=None, client_id:str=None, organization_id:str=None, site_id:str=None, action:str=None, source_id:str=None, source_name:str=None, source_type:str=None, target_id:str=None, target_name:str=None, target_type:str=None, success:bool=None, details:dict=None, state:str=None, processor_id:str=None, processed_at:int=None): # noqa: E501
 """Event - a model defined in Swagger""" # noqa: E501
 self._id = None
 self._created_at = None
 self._client_id = None
 self._organization_id = None
 self._site_id = None
 self._action = None
 self._source_id = None
 self._source_name = None
 self._source_type = None
 self._target_id = None
 self._target_name = None
 self._target_type = None
 self._success = None
 self._details = None
 self._state = None
 self._processor_id = None
 self._processed_at = None
 self.discriminator = None
 if id is not None:
 self.id = id
 if created_at is not None:
 self.created_at = created_at
 if client_id is not None:
 self.client_id = client_id
 if organization_id is not None:
 self.organization_id = organization_id
 if site_id is not None:
 self.site_id = site_id
 if action is not None:
 self.action = action
 if source_id is not None:
 self.source_id = source_id
 if source_name is not None:
 self.source_name = source_name
 if source_type is not None:
 self.source_type = source_type
 if target_id is not None:
 self.target_id = target_id
 if target_name is not None:
 self.target_name = target_name
 if target_type is not None:
 self.target_type = target_type
 if success is not None:
 self.success = success
 if details is not None:
 self.details = details
 if state is not None:
 self.state = state
 if processor_id is not None:
 self.processor_id = processor_id
 if processed_at is not None:
 self.processed_at = processed_at
 @property
 def id(self):
 """Gets the id of this Event. # noqa: E501
 :return: The id of this Event. # noqa: E501
 :rtype: str
 """
 return self._id
 @id.setter
 def id(self, id):
 """Sets the id of this Event.
 :param id: The id of this Event. # noqa: E501
 :type: str
 """
 self._id = id
 @property
 def created_at(self):
 """Gets the created_at of this Event. # noqa: E501
 :return: The created_at of this Event. # noqa: E501
 :rtype: int
 """
 return self._created_at
 @created_at.setter
 def created_at(self, created_at):
 """Sets the created_at of this Event.
 :param created_at: The created_at of this Event. # noqa: E501
 :type: int
 """
 self._created_at = created_at
 @property
 def client_id(self):
 """Gets the client_id of this Event. # noqa: E501
 :return: The client_id of this Event. # noqa: E501
 :rtype: str
 """
 return self._client_id
 @client_id.setter
 def client_id(self, client_id):
 """Sets the client_id of this Event.
 :param client_id: The client_id of this Event. # noqa: E501
 :type: str
 """
 self._client_id = client_id
 @property
 def organization_id(self):
 """Gets the organization_id of this Event. # noqa: E501
 :return: The organization_id of this Event. # noqa: E501
 :rtype: str
 """
 return self._organization_id
 @organization_id.setter
 def organization_id(self, organization_id):
 """Sets the organization_id of this Event.
 :param organization_id: The organization_id of this Event. # noqa: E501
 :type: str
 """
 self._organization_id = organization_id
 @property
 def site_id(self):
 """Gets the site_id of this Event. # noqa: E501
 :return: The site_id of this Event. # noqa: E501
 :rtype: str
 """
 return self._site_id
 @site_id.setter
 def site_id(self, site_id):
 """Sets the site_id of this Event.
 :param site_id: The site_id of this Event. # noqa: E501
 :type: str
 """
 self._site_id = site_id
 @property
 def action(self):
 """Gets the action of this Event. # noqa: E501
 :return: The action of this Event. # noqa: E501
 :rtype: str
 """
 return self._action
 @action.setter
 def action(self, action):
 """Sets the action of this Event.
 :param action: The action of this Event. # noqa: E501
 :type: str
 """
 self._action = action
 @property
 def source_id(self):
 """Gets the source_id of this Event. # noqa: E501
 :return: The source_id of this Event. # noqa: E501
 :rtype: str
 """
 return self._source_id
 @source_id.setter
 def source_id(self, source_id):
 """Sets the source_id of this Event.
 :param source_id: The source_id of this Event. # noqa: E501
 :type: str
 """
 self._source_id = source_id
 @property
 def source_name(self):
 """Gets the source_name of this Event. # noqa: E501
 :return: The source_name of this Event. # noqa: E501
 :rtype: str
 """
 return self._source_name
 @source_name.setter
 def source_name(self, source_name):
 """Sets the source_name of this Event.
 :param source_name: The source_name of this Event. # noqa: E501
 :type: str
 """
 self._source_name = source_name
 @property
 def source_type(self):
 """Gets the source_type of this Event. # noqa: E501
 :return: The source_type of this Event. # noqa: E501
 :rtype: str
 """
 return self._source_type
 @source_type.setter
 def source_type(self, source_type):
 """Sets the source_type of this Event.
 :param source_type: The source_type of this Event. # noqa: E501
 :type: str
 """
 self._source_type = source_type
 @property
 def target_id(self):
 """Gets the target_id of this Event. # noqa: E501
 :return: The target_id of this Event. # noqa: E501
 :rtype: str
 """
 return self._target_id
 @target_id.setter
 def target_id(self, target_id):
 """Sets the target_id of this Event.
 :param target_id: The target_id of this Event. # noqa: E501
 :type: str
 """
 self._target_id = target_id
 @property
 def target_name(self):
 """Gets the target_name of this Event. # noqa: E501
 :return: The target_name of this Event. # noqa: E501
 :rtype: str
 """
 return self._target_name
 @target_name.setter
 def target_name(self, target_name):
 """Sets the target_name of this Event.
 :param target_name: The target_name of this Event. # noqa: E501
 :type: str
 """
 self._target_name = target_name
 @property
 def target_type(self):
 """Gets the target_type of this Event. # noqa: E501
 :return: The target_type of this Event. # noqa: E501
 :rtype: str
 """
 return self._target_type
 @target_type.setter
 def target_type(self, target_type):
 """Sets the target_type of this Event.
 :param target_type: The target_type of this Event. # noqa: E501
 :type: str
 """
 self._target_type = target_type
 @property
 def success(self):
 """Gets the success of this Event. # noqa: E501
 :return: The success of this Event. # noqa: E501
 :rtype: bool
 """
 return self._success
 @success.setter
 def success(self, success):
 """Sets the success of this Event.
 :param success: The success of this Event. # noqa: E501
 :type: bool
 """
 self._success = success
 @property
 def details(self):
 """Gets the details of this Event. # noqa: E501
 :return: The details of this Event. # noqa: E501
 :rtype: dict
 """
 return self._details
 @details.setter
 def details(self, details):
 """Sets the details of this Event.
 :param details: The details of this Event. # noqa: E501
 :type: dict
 """
 self._details = details
 @property
 def state(self):
 """Gets the state of this Event. # noqa: E501
 :return: The state of this Event. # noqa: E501
 :rtype: str
 """
 return self._state
 @state.setter
 def state(self, state):
 """Sets the state of this Event.
 :param state: The state of this Event. # noqa: E501
 :type: str
 """
 self._state = state
 @property
 def processor_id(self):
 """Gets the processor_id of this Event. # noqa: E501
 :return: The processor_id of this Event. # noqa: E501
 :rtype: str
 """
 return self._processor_id
 @processor_id.setter
 def processor_id(self, processor_id):
 """Sets the processor_id of this Event.
 :param processor_id: The processor_id of this Event. # noqa: E501
 :type: str
 """
 self._processor_id = processor_id
 @property
 def processed_at(self):
 """Gets the processed_at of this Event. # noqa: E501
 :return: The processed_at of this Event. # noqa: E501
 :rtype: int
 """
 return self._processed_at
 @processed_at.setter
 def processed_at(self, processed_at):
 """Sets the processed_at of this Event.
 :param processed_at: The processed_at of this Event. # noqa: E501
 :type: int
 """
 self._processed_at = processed_at
 def to_dict(self):
 """Returns the model properties as a dict"""
 result = {}
 for attr, _ in six.iteritems(self.swagger_types):
 value = | |
| 
	other[cut2:]
 offspring.append(hijo2)
 return offspring
 def mutacion(self, individuo):
 # probabilidad de mutación
 p = self.diversityIndex*self.pm #*self.N;
 size = len(individuo)
 # swap 1 bit
 # for i in range(0, size):
 if(r.uniform(0, 1) < p):
 i = r.randrange(0, size);
 j = r.randrange(0, size);
 prev = individuo[i]
 individuo[i] = individuo[j]
 individuo[j] = prev;
 return individuo;
 def print_line(self, n):
 print('\t\t|',end='')
 for i in range(0,n):
 print('---|', end='')
 print('')
 def print_board(self, board):
 n = self.N
 self.print_line(n)
 for i in range(0,n):
 print('\t\t|',end='')
 for j in range(0,n):
 try:
 test = list(filter(lambda pos: pos==(i,j), board))[0]
 print(' R |',end='')
 except:
 print(' |',end='')
 print('')
 self.print_line(n)
 def check_exit(self, individuo):
 fitness = self.getFitness(individuo);
 if(fitness == 1 and individuo not in self.solutions):
 self.solutions.append(individuo);
 # STATS
 print('''
 [N-Queens GA ordered]
 N={}
 {}
 {}
 Ciclos: {}
 Soluciones: {}
 '''.format(self.N, individuo, self.evaluaciones, self.ciclos, len(self.solutions)))
 # BOARD
 # board = [(y, x) for (y, x) in enumerate(individuo)]
 # self.print_board(board);
 
 self.criterioDeParada = True;
class Queens_binary(Queens_ordered):
 def __init__(self, N, iter, pSize, K, L, pm, pc):
 if(N<4):
 print('''
 El problema no tiene solución para n=2 o n=3.
 > <NAME> et al., "Construction for the Solutions of the m Queens Problem". Mathematics Magazine, Vol. XX (1969), pp. 66–72.
 http://penguin.ewu.edu/~trolfe/QueenLasVegas/Hoffman.pdf
 ''')
 exit()
 elif(K>pSize):
 print('El tamaño de muestra de torneo (', K,')tiene que ser menor que el tamaño de población (', pSize, ')')
 exit()
 # Parámetros básicos
 self.N = N;
 self.iter = i;
 
 self.K = K;
 self.L = L;
 self.pm = pm;
 self.pc = pc;
 
 # Parámetros internos
 self.fitnesses = {}; # cache de evaluaciones
 self.evaluaciones = 0; # num de evaluaciones
 self.ciclos = 0; # num de ciclos de evaluación
 self.criterioDeParada = False;
 self.solutions = [];
 # diversity index
 self.bestValue = 9999;
 self.diversityIndex = 1
 # INIT
 poblacion = [];
 size = N*N;
 for p in range(0, pSize):
 individuo = [0 for i in range(0,size)]
 for queens in range(0, N):
 individuo[r.randrange(0,size)] = 1
 poblacion.append(individuo);
 self.poblacion = poblacion;
 def main(self, findAll):
 # bar = progressbar.ProgressBar(redirect_stdout=False)
 poblacion = self.poblacion;
 if(findAll and self.N == 8):
 criterioDeParada = len(self.solutions) >= 92
 else:
 criterioDeParada = False # len(self.solutions < 92) # self.ciclos < self.iter
 while(not criterioDeParada):
 try:
 # bar.update((self.ciclos*100)/self.iter)
 # selección
 padres = [Queens_ordered.seleccion(self, poblacion, i%2==0, False) for i in range(0,2*self.L)]
 # cruce + reemplazo
 nuevaPoblacion = self.cruce_SP(padres);
 poblacion = poblacion[self.L*2:]
 # mutación
 nuevaPoblacion = list(map(lambda ind: self.mutacion(ind), nuevaPoblacion));
 poblacion += nuevaPoblacion;
 self.ciclos += 1;
 for individuo in poblacion:
 self.check_exit(individuo)
 bestValue = min(self.fitnesses.values())
 # print('Best individual fitness: ', bestValue, 'worst individual fitness: ', max(self.fitnesses.values()), 'diversityIndex: ', int(self.diversityIndex))
 if(bestValue == self.bestValue):
 self.diversityIndex += 0.01
 if(self.diversityIndex>10):
 self.diversityIndex = 1;
 else:
 self.diversityIndex = 1;
 self.bestValue = bestValue;
 
 if((not findAll and self.criterioDeParada) or criterioDeParada):
 break;
 except KeyboardInterrupt:
 print('RTL+C. Parando.')
 break
 '''
 if(len(self.solutions) == 0):
 print('\nNo solution found. ', self.evaluaciones, 'evaluaciones', 'máximo fitness: ', self.bestValue, 'en ', self.ciclos, 'iteraciones')
 else:
 print(len(self.solutions), 'soluciones encontradas')
 '''
 return (self.evaluaciones, self.ciclos)
 def getFitnessWithURL(self, individuo):
 key = ''.join(str(n) for n in individuo)
 try:
 fitness = self.fitnesses[key];
 return fitness;
 except:
 cadena = ft.reduce(lambda s,c: s+str(c), individuo, '')
 self.evaluaciones += 1;
 try:
 r = requests.get('http://memento.evannai.inf.uc3m.es/age/test?c='+cadena)
 fitness = float(r.text)
 self.fitnesses[key] = fitness
 except:
 fitness = 9999
 return fitness;
 def getFitness(self, individuo):
 key = ''.join(str(n) for n in individuo)
 try:
 fitness = self.fitnesses[key];
 return fitness;
 except:
 # print('Calculating fitness...')
 evaluable = individuo[:]
 self.evaluaciones += 1;
 bad = 0;
 queens = 0;
 for (i, value) in enumerate(individuo): # posiciones de reinas
 if(value==0):
 continue;
 else:
 queens += 1;
 (ind_x) = i%self.N
 (ind_y) = int(i/self.N)
 for j in range(i+1, len(individuo)):
 value = individuo[j]
 if(value==0):
 continue;
 else: 
 (r_x) = j%self.N
 (r_y) = int(j/self.N)
 if(r_x==ind_x or
 r_y==ind_y or
 r_x-r_y == ind_x-ind_y or
 r_x+r_y == ind_x+ind_y):
 bad += 1;
 # return (n/N - bad/n)
 fitness = (abs(self.N-queens) + bad)
 self.fitnesses[key] = fitness
 return fitness;
 def cruce_SP(self, padres):
 offspring = []
 for i in range(0, int(len(padres)/2)):
 # para cada par de padres:
 p1 = padres[i*2]
 p2 = padres[(i*2)+1]
 size = len(p1)
 if(size != len(p2) or size != self.N*self.N):
 raise ValueError('La longitud de los padres difiere.')
 cut = r.randrange(0,size)
 hijo1 = p1[:cut] + p2[cut:]
 offspring.append(hijo1)
 hijo2 = p2[:cut] + p1[cut:]
 offspring.append(hijo2)
 return offspring
 def mutacion(self, individuo):
 p = self.diversityIndex*self.pm #*self.N; # probabilidad de mutación
 size = len(individuo)
 # swap 1 individuo solo
 # for i in range(0, size):
 if(r.uniform(0, 1) < p):
 i = r.randrange(0, size);
 flip = list(filter(lambda n: n!= individuo[i], [0,1]))[0]
 individuo[i] = flip
 return individuo;
 def check_exit(self, individuo):
 fitness = self.getFitness(individuo);
 if(fitness == 0 and individuo not in self.solutions):
 self.solutions.append(individuo);
 
 # STATS
 print('''
 [N-Queens GA binary]
 N={}
 {}
 {}
 Ciclos: {}
 '''.format(self.N, individuo, self.evaluaciones, self.ciclos))
 '''
 # BOARD
 board = []
 for (i, valor) in enumerate(individuo):
 if(valor==1):
 board.append((int(i/self.N), i%self.N))
 # print(board)
 self.print_board(board);
 '''
 self.criterioDeParada = True;
class Queens_bruteforce(Queens_ordered):
 def __init__(self, N):
 if(N<4):
 print('''
 El problema no tiene solución para n=2 o n=3.
 > <NAME> et al., "Construction for the Solutions of the m Queens Problem". Mathematics Magazine, Vol. XX (1969), pp. 66–72.
 http://penguin.ewu.edu/~trolfe/QueenLasVegas/Hoffman.pdf
 ''')
 exit()
 self.N = N
 self.reinas = 0
 self.coords = [(None, None) for i in range(0,self.N)]
 # Ampliación: todas las soluciones
 self.solutions = 0
 def main(self, findAll):
 solutions = []
 ciclos = 0
 (x, y) = (0, 0)
 # Ampliación
 criterioDeParada = True;
 # criterioDeParada = (x<self.N);
 while(criterioDeParada):
 ciclos += 1
 if(self.validate(self.coords,x,y,)):
 self.coords[self.reinas] = (x,y)
 self.reinas += 1
 x, y = x+1, 0
 if(self.reinas == self.N):
 if(self.coords not in solutions):
 solutions.append(self.coords[:])
 self.solutions += 1
 print('''
 [N-Queens bruteforce]
 N={}
 Solution: {}
 Ciclos: {}
 '''.format(self.N, self.coords, ciclos))
 
 # Queens_ordered.print_board(self, self.coords);
 # Ampliación: break para 1 solución
 if(not findAll): 
 break;
 elif(y == self.N-1):
 # Backtracking
 (x, y) = self.coords[self.reinas-1]
 y += 1
 # Eliminamos la reina actual
 self.coords[self.reinas-1] = (None,None)
 self.reinas -= 1
 if(y >= self.N):
 # Doble salto de backtracking
 (x, y) = self.coords[self.reinas-1]
 if(y == None): break
 y += 1
 self.coords[self.reinas-1] = (None,None)
 self.reinas = self.reinas-1
 else:
 y += 1
 if(self.solutions == 0):
 print('\nNo se ha encontrado solución en ', ciclos, 'iteraciones')
 else:
 print(self.solutions, 'soluciones encontradas para N =', self.N, 'en ', ciclos, 'iteraciones')
 return ciclos
 def validate(self, reinas, x, y):
 size = len(list(filter(lambda r: r != (None, None), reinas)))
 for i in range(0, size):
 (r_x, r_y) = reinas[i]
 if(r_x==x or
 r_y==y or
 r_x-r_y == x-y or
 r_x+r_y == x+y):
 return False
 return True
def meta_test():
 for i in range(8, 12):
 N = i*2
 queens = Queens_ordered(N, i, P, K, L, pm, pc)
 queens.main()
def csv_writer():
 with open('detail.csv', 'w', newline='') as detail, open('mini.csv', 'w', newline='') as mini:
 detailWriter = csv.writer(detail)
 miniWriter = csv.writer(mini)
 for i in range(4,12):
 N = i
 evaluaciones_sum = [];
 ciclos_sum = []
 for j in range(0,10):
 queens = Queens_binary(N, i, P, K, L, pm, pc)
 (evaluaciones, ciclos) = queens.main()
 print('N=',N,' en ', evaluaciones,'evaluaciones y', ciclos, 'ciclos')
 evaluaciones_sum.append(evaluaciones)
 ciclos_sum.append(ciclos)
 row = [N, evaluaciones, ciclos]
 detailWriter.writerow(row)
 meaneval = ft.reduce(lambda a,b: a+b, evaluaciones_sum)/10
 meanciclos = ft.reduce(lambda a,b: a+b, ciclos_sum)/10
 miniWriter.writerow([N, meaneval, meanciclos])
def csv_writer_bf():
 with open('detail.csv', 'w', newline='') as detail, open('mini.csv', 'w', newline='') as mini:
 detailWriter = csv.writer(detail)
 miniWriter = csv.writer(mini)
 for i in range(21,25):
 N = i
 ciclos_sum = []
 queens = Queens_bruteforce(N)
 ciclos = queens.main()
 print('N=',N,' en ', ciclos, 'ciclos')
 miniWriter.writerow([N, ciclos])
'''
 _ __ __ _ _ _ ___ ___ _ __ ___
 | '_ \ / _` | | | |/ _ \/ _ \ '_ \/ __|
 | | | | | (_| | |_| | __/ __/ | | \__ \
 |_| |_| \__, |\__,_|\___|\___|_| |_|___/
 | |
 |_|
Posibles paramametros:
N: numero de reinas, dimension del tablero
i: iteraciones del AG
P: tamaño de población
K: tamaño de muestra de selección
L: número de ganadores del torneo (padres)
pm: probabilidad de mutación
pc: probabilidad de cruce
Ejemplo:
antes: python3 AG_N_reinas.py 8 20000 100 20 10 0.1 0.9
despues: python3 AG_N_reinas.py 8 30000 100 4 30 0.04 0.9
'''
def_N = int(sys.argv[1])
# Para 10: P=15, K=3, L=12
# Para 25: P=15, K=3, L=12
# Para 50: P=20, K=4, L=15
# Para 100:P=200, K=10, L=99
i = 30000 # int(sys.argv[2])
P = int(sys.argv[2])
K = int(sys.argv[3])
L = int(sys.argv[4])
pm = 0.3 # float(sys.argv[6])
pc = 0.9 # float(sys.argv[7])
print('''
N={}, P={}, | |
| 
	\
 0.6 if board_type == "rainbow" and flop[0] - flop[1] >= 5 else \
 0.8 if board_type != "rainbow" and flop[0] - flop[1] <= 4 else \
 0.7
 # Determine my opponent's strategy this hand
 #### Identify which of the 8 flops this is to decide the opponent's strategy they will use
 flop_type_number = get_flop_type_number()
 opponents_hands_cat1_0b = opponents_hands_cat1_level_x_and_above(opponent_strategy[get_opponent_situation(0)]["cat1"][flop_type_number])
 opponents_hands_cat2_0b = opponents_hands_cat2_level_x_and_above(min(7, opponent_strategy[get_opponent_situation(0)]["cat2"][flop_type_number] + 1 if not my_position_ip else 0), opponents_hands_cat1_0b) # they are wider IP, but not wider than 7
 opponents_hands_cat3_0b = opponents_hands_cat3_level_x_and_above(opponent_strategy[get_opponent_situation(0)]["cat3"][flop_type_number] + 1 if not my_position_ip else 0, opponents_hands_cat1_0b, opponents_hands_cat2_0b) # they are wider IP
 opponents_hands_cat1_1b = opponents_hands_cat1_level_x_and_above(opponent_strategy[get_opponent_situation(1)]["cat1"][flop_type_number])
 opponents_hands_cat2_1b = opponents_hands_cat2_level_x_and_above(min(7, opponent_strategy[get_opponent_situation(1)]["cat2"][flop_type_number] + 1 if not my_position_ip else 0), opponents_hands_cat1_1b) # they are wider IP, but not wider than 7
 opponents_hands_cat3_1b = opponents_hands_cat3_level_x_and_above(opponent_strategy[get_opponent_situation(1)]["cat3"][flop_type_number] + 1 if not my_position_ip else 0, opponents_hands_cat1_1b, opponents_hands_cat2_1b) # they are wider IP
 opponents_hands_cat1_2b = opponents_hands_cat1_level_x_and_above(opponent_strategy[get_opponent_situation(2)]["cat1"][flop_type_number])
 opponents_hands_cat2_2b = opponents_hands_cat2_level_x_and_above(min(7, opponent_strategy[get_opponent_situation(2)]["cat2"][flop_type_number] + 1 if not my_position_ip else 0), opponents_hands_cat1_2b) # they are wider IP, but not wider than 7
 opponents_hands_cat3_2b = opponents_hands_cat3_level_x_and_above(opponent_strategy[get_opponent_situation(2)]["cat3"][flop_type_number] + 1 if not my_position_ip else 0, opponents_hands_cat1_2b, opponents_hands_cat2_2b) # they are wider IP
 # Determine opponent's bet size:
 opponents_bet_size = 0.60
 opponents_hands_with_combos = [[], [], []]
 opponents_hands_with_combos[0] = [(x, 6) if x[0] not in flop else (x, 3) for x in opponents_hands[0]]
 opponents_hands_with_combos[1] = [(x, 4) if x[0] not in flop and x[1] not in flop else (x, 2) if x[0] in flop and x[1] in flop else (x,3) for x in opponents_hands[1]]
 opponents_hands_with_combos[2] = [(x, 12) if x[0] not in flop and x[1] not in flop else (x, 7) if x[0] in flop and x[1] in flop else (x,9) for x in opponents_hands[2]]
 def count_hand_combos(hands):
 return sum([y for (x,y) in hands[0]])+sum([y for (x,y) in hands[1]])+sum([y for (x,y) in hands[2]])
 def get_check_hands(all_hands_before_action_w_combos, cat1_hands_for_action, cat3_hands_for_action):
 hands = [[], [], []]
 temp_cat3_hands = deepcopy(cat3_hands_for_action)
 # Flip sign (for subtraction)
 for i in range(3):
 temp_cat3_hands[i] = [(x, -1*y) for (x, y) in temp_cat3_hands[i]]
 # Combine (for subtraction)
 result = combine_hands(all_hands_before_action_w_combos, temp_cat3_hands)
 # Subtraction
 for i in range(3):
 groupby_dict = defaultdict(int)
 for val in result[i]:
 groupby_dict[tuple(val[0])] += val[1]
 result[i] = [(sorted(list(x), reverse=True), max(0, min(y, 6 if i == 0 else 4 if i == 1 else 12))) for (x,y) in groupby_dict.items()]
 result[i] = [(x,y) for (x,y) in result[i] if y != 0 and x not in [x for (x,y) in cat1_hands_for_action[i]]]
 return result
 def get_fold_hands(all_hands_before_action_w_combos, cat1_hands_for_action, cat2_hands_for_action, cat3_hands_for_action):
 hands = get_check_hands(all_hands_before_action_w_combos, cat1_hands_for_action, cat3_hands_for_action)
 for i in range(3):
 hands[i] = [(x,y) for (x,y) in hands[i] if x not in [x for (x,y) in cat2_hands_for_action[i]]]
 return hands
 def get_call_hands(all_hands_before_action_w_combos, cat2_hands_for_action):
 hands = deepcopy(cat2_hands_for_action)
 for i in range(3):
 hands[i] = [(x,y) for (x,y) in hands[i] if x in [x for (x,y) in all_hands_before_action_w_combos[i]]]
 return hands
 def get_raise_hands(all_hands_before_action_w_combos, cat1_hands_for_action, cat3_hands_for_action):
 hands = combine_hands(cat1_hands_for_action, cat3_hands_for_action)
 for i in range(3):
 hands[i] = [(x,y) for (x,y) in hands[i] if x in [x for (x,y) in all_hands_before_action_w_combos[i]]]
 return hands
 def combine_hands(hands1, hands2):
 hands = [[], [], []]
 for i in range(3):
 hands[i] = hands1[i] + hands2[i]
 return hands
 my_hands_with_combos = [[], [], []]
 my_hands_with_combos[0] = [(x, 6) if x[0] not in flop else (x, 3) for x in my_hands[0]]
 my_hands_with_combos[1] = [(x, 4) if x[0] not in flop and x[1] not in flop else (x, 2) if x[0] in flop and x[1] in flop else (x,3) for x in my_hands[1]]
 my_hands_with_combos[2] = [(x, 12) if x[0] not in flop and x[1] not in flop else (x, 7) if x[0] in flop and x[1] in flop else (x,9) for x in my_hands[2]]
 # Determine:
 #### 0) Hands in each situation
 if my_position_ip:
 pass
 else:
 # Hands
 my_hands_c = get_check_hands(my_hands_with_combos, my_hands_cat1_0b, my_hands_cat3_0b)
 my_hands_b = combine_hands(my_hands_cat1_0b, my_hands_cat3_0b)
 opponents_hands_cc = get_check_hands(opponents_hands_with_combos, opponents_hands_cat1_0b, opponents_hands_cat3_0b)
 opponents_hands_cb = combine_hands(opponents_hands_cat1_0b, opponents_hands_cat3_0b)
 opponents_hands_bf = get_fold_hands(opponents_hands_with_combos, opponents_hands_cat1_1b, opponents_hands_cat2_1b, opponents_hands_cat3_1b)
 opponents_hands_bc = opponents_hands_cat2_1b
 opponents_hands_bb = combine_hands(opponents_hands_cat1_1b, opponents_hands_cat3_1b)
 my_hands_cbf = get_fold_hands(my_hands_c, my_hands_cat1_1b, my_hands_cat2_1b, my_hands_cat3_1b)
 my_hands_cbc = get_call_hands(my_hands_c, my_hands_cat2_1b)
 my_hands_cbb = get_raise_hands(my_hands_c, my_hands_cat1_1b, my_hands_cat3_1b)
 my_hands_bbf = get_fold_hands(my_hands_b, my_hands_cat1_2b, my_hands_cat2_2b, my_hands_cat3_2b)
 my_hands_bbc = get_call_hands(my_hands_b, combine_hands(my_hands_cat2_2b, my_hands_cat3_2b)) # cat3 is a call
 opponents_hands_cbbf = get_fold_hands(opponents_hands_cb, opponents_hands_cat1_2b, opponents_hands_cat2_2b, opponents_hands_cat3_2b)
 opponents_hands_cbbc = get_call_hands(opponents_hands_cb, combine_hands(opponents_hands_cat2_2b, opponents_hands_cat3_2b)) # cat3 is a call
 # Combos
 combos_c = count_hand_combos(my_hands_c)
 combos_b = count_hand_combos(my_hands_b)
 combos_cc = count_hand_combos(opponents_hands_cc)
 combos_cb = count_hand_combos(opponents_hands_cb)
 combos_bf = count_hand_combos(opponents_hands_bf)
 combos_bc = count_hand_combos(opponents_hands_bc)
 combos_bb = count_hand_combos(opponents_hands_bb)
 combos_cbf = count_hand_combos(my_hands_cbf)
 combos_cbc = count_hand_combos(my_hands_cbc)
 combos_cbb = count_hand_combos(my_hands_cbb)
 combos_bbf = count_hand_combos(my_hands_bbf)
 combos_bbc = count_hand_combos(my_hands_bbc)
 combos_cbbf = count_hand_combos(opponents_hands_cbbf)
 combos_cbbc = count_hand_combos(opponents_hands_cbbc)
 # Cat3 pct_makeup
 my_cat3_pct_cc = 0
 my_cat3_pct_cbc = 0
 my_cat3_pct_cbbc = 0 if combos_cbb == 0 else count_hand_combos(get_raise_hands(my_hands_c, [[],[],[]], my_hands_cat3_1b)) / float(combos_cbb)
 my_cat3_pct_bc = 0 if combos_b == 0 else count_hand_combos(my_hands_cat3_0b) / float(combos_b)
 my_cat3_pct_bbc = 0 if combos_bbc == 0 else count_hand_combos(get_raise_hands(my_hands_b, [[],[],[]], my_hands_cat3_2b)) / float(combos_bbc)
 opponents_cat3_pct_cc = 0
 opponents_cat3_pct_cbc = 0 if combos_cb == 0 else count_hand_combos(opponents_hands_cat3_0b) / float(combos_cb)
 opponents_cat3_pct_cbbc = 0 if combos_cbbc == 0 else count_hand_combos(get_raise_hands(opponents_hands_cb, [[],[],[]], opponents_hands_cat3_2b)) / float(combos_cbbc)
 opponents_cat3_pct_bc = 0
 opponents_cat3_pct_bbc = 0 if combos_bb == 0 else count_hand_combos(opponents_hands_cat3_1b) / float(combos_bb)
 #### 1) the % chance of each bet sequence
 if my_position_ip:
 pass
 else:
 chance_c = float(combos_c)/(combos_c + combos_b)
 chance_b = float(combos_b)/(combos_c + combos_b)
 chance_cc = chance_c*(float(combos_cc)/(combos_cc + combos_cb))
 chance_cb = chance_c*(float(combos_cb)/(combos_cc + combos_cb))
 chance_bf = chance_b*(float(combos_bf)/(combos_bf + combos_bc + combos_bb))
 chance_bc = chance_b*(float(combos_bc)/(combos_bf + combos_bc + combos_bb))
 chance_bb = chance_b*(float(combos_bb)/(combos_bf + combos_bc + combos_bb))
 chance_cbf = chance_cb*(float(combos_cbf)/(combos_cbf + combos_cbc + combos_cbb))
 chance_cbc = chance_cb*(float(combos_cbc)/(combos_cbf + combos_cbc + combos_cbb))
 chance_cbb = chance_cb*(float(combos_cbb)/(combos_cbf + combos_cbc + combos_cbb))
 chance_bbf = chance_bb*(float(combos_bbf)/(combos_bbf + combos_bbc))
 chance_bbc = chance_bb*(float(combos_bbc)/(combos_bbf + combos_bbc))
 chance_cbbf = chance_cbb*(float(combos_cbbf)/(combos_cbbf + combos_cbbc))
 chance_cbbc = chance_cbb*(float(combos_cbbc)/(combos_cbbf + combos_cbbc))
 # print("Test that all add to 1.0")
 chance_c+chance_b, chance_cc+chance_cb+chance_bf+chance_bc+chance_bb, \
 chance_cc+chance_cbf+chance_cbc+chance_cbb+chance_bf+chance_bc+chance_bbf+chance_bbc, \
 chance_cc+chance_cbf+chance_cbc+chance_cbbf+chance_cbbc+chance_bf+chance_bc+chance_bbf+chance_bbc
 m2 = {14: 'A', 13: 'K', 12: 'Q', 11: 'J', 10: 'T',
 9: '9', 8: '8', 7: '7', 6: '6', 5: '5',
 4: '4', 3: '3', 2: '2'}
 def convert_hands_to_range(hands, flop):
 result = []
 for hand in hands[0]:
 rank = hand[0][0]
 if hand[1] != 6 and rank not in flop:
 result.append(m2[rank] + "s" + m2[rank] + "h")
 result.append(m2[rank] + "s" + m2[rank] + "c")
 result.append(m2[rank] + "s" + m2[rank] + "d")
 else:
 result.append(m2[rank]*2)
 for hand in hands[1]:
 rank1 = hand[0][0]
 rank2 = hand[0][1]
 if hand[1] == 4:
 result.append(m2[rank1] + m2[rank2])
 elif hand[1] == 3 and (rank1 in flop or rank2 in flop):
 result.append(m2[rank1] + m2[rank2])
 elif hand[1] == 3:
 result.append(m2[rank1] + "s" + m2[rank2] + "s")
 result.append(m2[rank1] + "h" + m2[rank2] + "h")
 result.append(m2[rank1] + "c" + m2[rank2] + "c")
 elif hand[1] == 2 and ((rank1 in flop and rank2 in flop) or (rank1 == paired_value or rank2 == paired_value)):
 result.append(m2[rank1] + m2[rank2])
 elif hand[1] == 2:
 result.append(m2[rank1] + "s" + m2[rank2] + "s")
 result.append(m2[rank1] + "h" + m2[rank2] + "h")
 elif hand[1] == 1:
 result.append(m2[rank1] + "s" + m2[rank2] + "s")
 for hand in hands[2]:
 rank1 = hand[0][0]
 rank2 = hand[0][1]
 if hand[1] == 12:
 result.append(m2[rank1] + m2[rank2])
 elif rank1 in flop or rank2 in flop:
 result.append(m2[rank1] + m2[rank2])
 elif hand[1] == 6:
 # one spade
 result.append(m2[rank1] + "s" + m2[rank2] + "h")
 result.append(m2[rank1] + "s" + m2[rank2] + "c")
 result.append(m2[rank1] + "s" + m2[rank2] + "d")
 result.append(m2[rank1] + "h" + m2[rank2] + "s")
 result.append(m2[rank1] + "c" + m2[rank2] + "s")
 result.append(m2[rank1] + "d" + m2[rank2] + "s")
 else:
 raise Exception # Should never occur, investigate if this occurs
 return ",".join(result)
 #### 2) the ranges that go against each other (or who won pot)
 if my_position_ip:
 pass
 else:
 # Hands version
 final_my_hands_cc = my_hands_c
 final_opponents_hands_cc = opponents_hands_cc
 final_my_hands_cbc = my_hands_cbc
 final_opponents_hands_cbc = opponents_hands_cb
 final_my_hands_cbbc = my_hands_cbb
 final_opponents_hands_cbbc = opponents_hands_cbbc
 final_my_hands_bc = my_hands_b
 final_opponents_hands_bc = opponents_hands_bc
 final_my_hands_bbc = my_hands_bbc
 final_opponents_hands_bbc = opponents_hands_bb
 # String version for Equilab
 final_my_hands_cc_string = convert_hands_to_range(my_hands_c, flop)
 final_opponents_hands_cc_string = convert_hands_to_range(opponents_hands_cc, flop)
 final_my_hands_cbc_string = convert_hands_to_range(my_hands_cbc, flop)
 final_opponents_hands_cbc_string = convert_hands_to_range(opponents_hands_cb, flop)
 final_my_hands_cbbc_string = convert_hands_to_range(my_hands_cbb, flop)
 final_opponents_hands_cbbc_string = convert_hands_to_range(opponents_hands_cbbc, flop)
 final_my_hands_bc_string = convert_hands_to_range(my_hands_b, flop)
 final_opponents_hands_bc_string = convert_hands_to_range(opponents_hands_bc, flop)
 final_my_hands_bbc_string = convert_hands_to_range(my_hands_bbc, flop)
 final_opponents_hands_bbc_string = convert_hands_to_range(opponents_hands_bb, flop)
 # Flop as string (won't | |
| 
	m1[n]
def edit_distance_backpointer(
 seq1, seq2, action_function=lowest_cost_action, test=operator.eq
):
 """
 Similar to :py:func:`~edit_distance.edit_distance` except that this
 function keeps backpointers during the search. This allows us to return
 the opcodes (i.e. the specific edits that were used to change from one
 string to another). This function contructs the full 2d array for the
 backpointers only.
 """
 
 m: int = len(seq1)
 n: int = len(seq2)
 # backpointer array:
 bp = [[None for x in range(n + 1)] for y in range(m + 1)]
 tp = [[None for x in range(n + 1)] for y in range(m + 1)]
 
 # Two columns of the distance and match arrays
 d0 = [0] * (n + 1) # The two 'distance' columns
 d1 = [0] * (n + 1)
 m0 = [0] * (n + 1) # The two 'match' columns
 m1 = [0] * (n + 1)
 # Fill in the first column
 for i in range(1, n + 1):
 d0[i] = i
 bp[0][i] = INSERT
 for i in range(1, m + 1):
 d1[0] = i
 bp[i][0] = DELETE
 for j in range(1, n + 1):
 cost = 0 if test(seq1[i - 1], seq2[j - 1]) else 1
 
 # The costs of each action...
 ins_cost = d1[j - 1] + 1 # insertion
 del_cost = d0[j] + 1 # deletion
 sub_cost = d0[j - 1] + cost # substitution/match
 # The match scores of each action
 ins_match = m1[j - 1]
 del_match = m0[j]
 sub_match = m0[j - 1] + int(not cost)
 action = action_function(
 ins_cost, del_cost, sub_cost, ins_match, del_match, sub_match, cost
 )
 if action == EQUAL:
 d1[j] = sub_cost
 m1[j] = sub_match
 bp[i][j] = EQUAL
 tp[i][j] = seq1[i - 1]+"$"+seq2[j - 1]
 elif action == REPLACE:
 d1[j] = sub_cost
 m1[j] = sub_match
 bp[i][j] = REPLACE
 tp[i][j] = seq1[i - 1]+"$"+seq2[j - 1]
 
 elif action == INSERT:
 d1[j] = ins_cost
 m1[j] = ins_match
 bp[i][j] = INSERT
 tp[i][j] = ""+"$"+seq2[j - 1]
 elif action == DELETE:
 d1[j] = del_cost
 m1[j] = del_match
 bp[i][j] = DELETE
 tp[i][j] = seq1[i - 1]+"$"+" "
 else:
 raise Exception("Invalid dynamic programming action returned!")
 # copy over the columns
 for k in range(n + 1):
 d0[k] = d1[k]
 m0[k] = m1[k]
 opcodes = get_opcodes_from_bp_table(bp,tp)
# print(opcodes)
 return d1[n], m1[n], opcodes
def get_opcodes_from_bp_table(bp,tp):
 """Given a 2d list structure, create opcodes from the best path."""
 x = len(bp) - 1
 y = len(bp[0]) - 1
 opcodes = []
 while x != 0 or y != 0:
 this_bp = bp[x][y]
 if tp[x][y]:
 tt = tp[x][y].split("$")
 
 if this_bp in [EQUAL, REPLACE]:
 opcodes.append([this_bp, max(x - 1, 0), x, max(y - 1, 0), y,tt[0],tt[1]])
 x = x - 1
 y = y - 1
 elif this_bp == INSERT:
 opcodes.append([INSERT, x, x, max(y - 1, 0), y,tt[0],tt[1]])
 y = y - 1
 elif this_bp == DELETE:
 opcodes.append([DELETE, max(x - 1, 0), x, max(y - 1, 0), max(y - 1, 0),tt[0],tt[1]])
 x = x - 1
 else:
 raise Exception("Invalid dynamic programming action in BP table!")
 opcodes.reverse()
 return opcodes
# TODO - rename this function. Move some of it into evaluate.py?
def main(args):
 """Main method - this reads the hyp and ref files, and creates
 editdistance.SequenceMatcher objects to compute the edit distance.
 All the statistics necessary statistics are collected, and results are
 printed as specified by the command line options.
 This function doesn't not check to ensure that the reference and
 hypothesis file have the same number of lines. It will stop after the
 shortest one runs out of lines. This should be easy to fix...
 """
 global counter
 global ignored_count
 global total_errors
 global total_sub
 global total_ins
 global total_del
 set_global_variables(args)
 filename = ""
 counter = 0
 calc_table = []
 # Loop through each line of the reference and hyp file
 for ref_line, hyp_line in zip(args.ref, args.hyp):
 if "_norm.txt" in ref_line:
 filename = ref_line.replace("_norm.txt\n","")
 if not confusions:
 print('\n')
 print(filename)
 continue
 processed_p,calc = process_line_pair(ref_line, hyp_line, filename, case_insensitive=args.case_insensitive,
 remove_empty_refs=args.remove_empty_refs)
 
 if calc is not None:
 calc_table.append({
 'File':filename,
 'Total words':calc[0],
 'Correct':calc[1],
 'Error':calc[2],
 'WER':calc[3],
 'WRR':calc[4]})
 
 if processed_p:
 counter += 1
 if confusions:
 print_confusions()
 if wer_vs_length_p:
 print_wer_vs_length()
 # Compute WER and WRR
 if ref_token_count > 0:
 wrr = match_count / ref_token_count
 wer = error_count / ref_token_count
 else:
 wrr = 0.0
 wer = 0.0
 # Compute SER
 ser = sent_error_count / counter if counter > 0 else 0.0
 print('Sentence count: {}'.format(counter))
 print('WER: {:10.3%} ({:10d} / {:10d})'.format(wer, error_count, error_count+match_count))
 print('WRR: {:10.3%} ({:10d} / {:10d})'.format(wrr, match_count, error_count+match_count))
 print('SER: {:10.3%} ({:10d} / {:10d})'.format(ser, sent_error_count, counter))
 if not confusions:
 print('TOTAL_ERRORS: {:10d}'.format(total_errors//2))
 print('SUBSTITUTIONS: {:10d} {:10.3%}'.format(total_sub//2, (total_sub//2)/error_count))
 print('DELETIONS: {:10d} {:10.3%}'.format(total_del//2, (total_del//2)/error_count))
 print('INSERTIONS: {:10d} {:10.3%}'.format(total_ins//2, (total_ins//2)/error_count))
 pd.DataFrame(calc_table).to_csv("detailed_results.csv",index=None)
 elif confusions:
 print('TOTAL_ERRORS: {:10d}'.format(total_errors))
 print('SUBSTITUTIONS: {:10d} {:10.3%}'.format(total_sub, total_sub/error_count))
 print('DELETIONS: {:10d} {:10.3%}'.format(total_del, total_del/error_count))
 print('INSERTIONS: {:10d} {:10.3%}'.format(total_ins, total_ins/error_count))
 
 
def process_line_pair(ref_line, hyp_line, filename, case_insensitive=False, remove_empty_refs=False):
 """Given a pair of strings corresponding to a reference and hypothesis,
 compute the edit distance, print if desired, and keep track of results
 in global variables.
 Return true if the pair was counted, false if the pair was not counted due
 to an empty reference string."""
 # I don't believe these all need to be global. In any case, they shouldn't be.
 global error_count
 global match_count
 global ref_token_count
 global sent_error_count
 global ignored_count
 # Split into tokens by whitespace
 ref = ref_line.split()
 hyp = hyp_line.split()
 id_ = None
 # If the files have IDs, then split the ID off from the text
 if files_head_ids:
 id_ = ref[0]
 ref, hyp = remove_head_id(ref, hyp)
 elif files_tail_ids:
 id_ = ref[-1]
 ref, hyp = remove_tail_id(ref, hyp)
 if case_insensitive:
 ref = list(map(str.lower, ref))
 hyp = list(map(str.lower, hyp))
 if remove_empty_refs and len(ref) == 0:
 return False
 # Create an object to get the edit distance, and then retrieve the
 # relevant counts that we need.
 sm = SequenceMatcher(a=ref, b=hyp)
 errors = get_error_count(sm,PRINT=False)
 
 matches = get_match_count(sm)
 ref_length = len(ref)
 # Increment the total counts we're tracking
 error_count += errors
 match_count += matches
 ref_token_count += ref_length
 if errors != 0:
 sent_error_count += 1
 # If we're keeping track of which words get mixed up with which others, call track_confusions
 if confusions:
 track_confusions(sm, ref, hyp,filename)
 # If we're printing instances, do it here (in roughly the align.c format)
 if print_instances_p or (print_errors_p and errors != 0):
 calc = print_instances(ref, hyp, sm, id_=id_)
 # Keep track of the individual error rates, and reference lengths, so we
 # can compute average WERs by sentence length
 lengths.append(ref_length)
 error_rate = errors * 1.0 / len(ref) if len(ref) > 0 else float("inf")
 error_rates.append(error_rate)
 wer_bins[len(ref)].append(error_rate)
 if print_instances_p or (print_errors_p and errors != 0):
 return True, calc
 else:
 return True, None
def set_global_variables(args):
 """Copy argparse args into global variables."""
 global print_instances_p
 global print_errors_p
 global files_head_ids
 global files_tail_ids
 global confusions
 global min_count
 global wer_vs_length_p
 # Put the command line options into global variables.
 print_instances_p = args.print_instances
 print_errors_p = args.print_errors
 files_head_ids = args.head_ids
 files_tail_ids = args.tail_ids
 confusions = args.confusions
 min_count = args.min_word_count
 wer_vs_length_p = args.print_wer_vs_length
def remove_head_id(ref, hyp):
 """Assumes that the ID is the begin token of the string which is common
 in Kaldi but not in Sphinx."""
 ref_id = ref[0]
 hyp_id = hyp[0]
 if ref_id != hyp_id:
 print('Reference and hypothesis IDs do not match! '
 'ref="{}" hyp="{}"\n'
 'File lines in hyp file should match those in the ref file.'.format(ref_id, hyp_id))
 exit(-1)
 ref = ref[1:]
 hyp = hyp[1:]
 return ref, hyp
def remove_tail_id(ref, hyp):
 """Assumes that the ID is the final token of the string which is common
 in Sphinx but not in Kaldi."""
 ref_id = ref[-1]
 hyp_id = hyp[-1]
 if ref_id != hyp_id:
 print('Reference and hypothesis IDs do not match! '
 'ref="{}" hyp="{}"\n'
 'File lines in hyp file should match those in the ref file.'.format(ref_id, hyp_id))
 exit(-1)
 ref = ref[:-1]
 hyp = hyp[:-1]
 return ref, hyp
def print_instances(ref, hyp, sm, id_=None):
 """Print a single instance of a ref/hyp pair."""
 global ignored_count
 global total_errors
 print_diff(sm, ref, hyp)
 if id_:
 print(('SENTENCE {0:d} {1!s}'.format(counter + 1, id_)))
 else:
 print('SENTENCE {0:d}'.format(counter + 1))
 
 errors = get_error_count(sm,PRINT=False)
 matches = get_match_count(sm)
 | |
| 
	"""Provides access to application-level structures.
This module is the starting point for getting access to windows and other application-global data.
"""
import iterm2.broadcast
import iterm2.connection
import iterm2.notifications
import iterm2.rpc
import iterm2.session
import iterm2.tab
import iterm2.window
import json
import typing
async def async_get_app(connection: iterm2.connection.Connection, create_if_needed: bool=True) -> typing.Union[None,'App']:
 """Returns the app singleton, creating it if needed.
 :param connection: The connection to iTerm2.
 :param create_if_needed: If `True`, create the global :class:`App` instance if one does not already exists. If `False`, do not create it.
 :returns: The global :class:`App` instance. If `create_if_needed` is False this may return `None` if no such instance exists."""
 if App.instance is None:
 if create_if_needed:
 App.instance = await App.async_construct(connection)
 else:
 await App.instance.async_refresh()
 return App.instance
class CreateWindowException(Exception):
 """A problem was encountered while creating a window."""
 pass
class App:
 """Represents the application.
 Stores and provides access to app-global state. Holds a collection of
 terminal windows and provides utilities for them.
 This object keeps itself up to date by getting notifications when sessions,
 tabs, or windows change.
 """
 instance:typing.Union[None,'App'] = None
 @staticmethod
 async def async_construct(connection: iterm2.connection.Connection) -> 'App':
 """Don't use this directly. Use :func:`async_get_app()`.
 Use this to construct a new hierarchy instead of __init__.
 This exists only because __init__ can't be async.
 """
 response = await iterm2.rpc.async_list_sessions(connection)
 list_sessions_response = response.list_sessions_response
 windows = App._windows_from_list_sessions_response(connection, list_sessions_response)
 buried_sessions = App._buried_sessions_from_list_sessions_response(connection, list_sessions_response)
 app = App(connection, windows, buried_sessions)
 await app._async_listen()
 await app.async_refresh_focus()
 await app.async_refresh_broadcast_domains()
 return app
 def __init__(self, connection, windows, buried_sessions):
 """Do not call this directly. Use App.construct() instead."""
 self.connection = connection
 self.__terminal_windows = windows
 self.__buried_sessions = buried_sessions
 self.tokens = []
 self.__broadcast_domains = []
 # None in these fields means unknown. Notifications will update them.
 self.app_active = None
 self.current_terminal_window_id = None
 async def async_activate(self, raise_all_windows: bool=True, ignoring_other_apps: bool=False) -> None:
 """Activate the app, giving it keyboard focus.
 :param raise_all_windows: Raise all windows if True, or only the key
 window. Defaults to True.
 :param ignoring_other_apps: If True, activate even if the user
 interacts with another app after the call.
 """
 opts = []
 if raise_all_windows:
 opts.append(iterm2.rpc.ACTIVATE_RAISE_ALL_WINDOWS)
 if ignoring_other_apps:
 opts.append(iterm2.rpc.ACTIVATE_IGNORING_OTHER_APPS)
 await iterm2.rpc.async_activate(
 self.connection,
 False,
 False,
 False,
 activate_app_opts=opts)
 @staticmethod
 def _windows_from_list_sessions_response(connection, response):
 return list(
 filter(
 lambda x: x,
 map(lambda window: iterm2.window.Window.create_from_proto(connection, window),
 response.windows)))
 @staticmethod
 def _buried_sessions_from_list_sessions_response(connection, response):
 mf = map(lambda summary: iterm2.session.Session(connection, None, summary),
 response.buried_sessions)
 return list(mf)
 def pretty_str(self) -> str:
 """Returns the hierarchy as a human-readable string"""
 session = ""
 for window in self.terminal_windows:
 if session:
 session += "\n"
 session += window.pretty_str(indent="")
 return session
 def _search_for_session_id(self, session_id):
 if session_id == "active":
 return iterm2.session.Session.active_proxy(self.connection)
 if session_id == "all":
 return iterm2.session.Session.all_proxy(self.connection)
 for window in self.terminal_windows:
 for tab in window.tabs:
 sessions = tab.sessions
 for session in sessions:
 if session.session_id == session_id:
 return session
 return None
 def _search_for_tab_id(self, tab_id):
 for window in self.terminal_windows:
 for tab in window.tabs:
 if tab_id == tab.tab_id:
 return tab
 return None
 def _search_for_window_id(self, window_id):
 for window in self.terminal_windows:
 if window_id == window.window_id:
 return window
 return None
 async def async_refresh_focus(self) -> None:
 """Updates state about which objects have focus."""
 focus_info = await iterm2.rpc.async_get_focus_info(self.connection)
 for notif in focus_info.focus_response.notifications:
 await self._async_focus_change(self.connection, notif)
 async def async_refresh_broadcast_domains(self) -> None:
 response = await iterm2.rpc.async_get_broadcast_domains(self.connection)
 self._set_broadcast_domains(response.get_broadcast_domains_response.broadcast_domains)
 def get_session_by_id(self, session_id: str) -> typing.Union[None,iterm2.session.Session]:
 """Finds a session exactly matching the passed-in id.
 :param session_id: The session ID to search for.
 :returns: A :class:`Session` or `None`.
 """
 assert session_id
 return self._search_for_session_id(session_id)
 def get_tab_by_id(self, tab_id: str) -> typing.Union[iterm2.tab.Tab, None]:
 """Finds a tab exactly matching the passed-in id.
 :param tab_id: The tab ID to search for.
 :returns: A :class:`Tab` or `None`.
 """
 return self._search_for_tab_id(tab_id)
 def get_window_by_id(self, window_id: str) -> typing.Union[iterm2.window.Window, None]:
 """Finds a window exactly matching the passed-in id.
 :param window_id: The window ID to search for.
 :returns: A :class:`Window` or `None`.
 """
 return self._search_for_window_id(window_id)
 def get_window_for_tab(self, tab_id: str) -> typing.Union[iterm2.window.Window, None]:
 """Finds the window that contains the passed-in tab id.
 :param tab_id: The tab ID to search for.
 :returns: A :class:`Window` or `None`.
 """
 return self._search_for_window_with_tab(tab_id)
 def _search_for_window_with_tab(self, tab_id):
 for window in self.terminal_windows:
 for tab in window.tabs:
 if tab.tab_id == tab_id:
 return window
 return None
 async def async_refresh(self, connection: iterm2.connection.Connection=None, _sub_notif: typing.Any=None) -> None:
 """Reloads the hierarchy.
 Note that this calls :meth:`async_refresh_focus`.
 You generally don't need to call this explicitly because App keeps its state fresh by
 receiving notifications. One exception is if you need the REPL to pick up changes to the
 state, since it doesn't receive notifications at the Python prompt.
 """
 layout = await iterm2.rpc.async_list_sessions(self.connection)
 return await self._async_handle_layout_change(connection, layout)
 async def _async_handle_layout_change(self, _connection: iterm2.connection.Connection=None, layout: typing.Any=None) -> None:
 """Layout change notification handler. Also called by async_refresh."""
 list_sessions_response = layout.list_sessions_response
 new_windows = App._windows_from_list_sessions_response(
 self.connection,
 list_sessions_response)
 def all_sessions(windows):
 for w in windows:
 for t in w.tabs:
 for s in t.sessions:
 yield s
 old_sessions = list(all_sessions(self.terminal_windows))
 windows = []
 new_ids: typing.List[str] = []
 for new_window in new_windows:
 for new_tab in new_window.tabs:
 for new_session in new_tab.sessions:
 old = self.get_session_by_id(new_session.session_id)
 if old is not None:
 # Upgrade the old session's state
 old.update_from(new_session)
 # Replace references to the new session in the new tab with the old session
 new_tab.update_session(old)
 old_tab = self.get_tab_by_id(new_tab.tab_id)
 if old_tab is not None:
 # Upgrade the old tab's state. This copies the root over. The new tab
 # has references to old sessions, so it's ok. The only problem is that
 # splitters are left in the old state.
 old_tab.update_from(new_tab)
 # Replace the reference in the new window to the old tab.
 new_window.update_tab(old_tab)
 if new_window.window_id not in new_ids:
 new_ids.append(new_window.window_id)
 old_window = self.get_window_by_id(new_window.window_id)
 if old_window is not None:
 old_window.update_from(new_window)
 windows.append(old_window)
 else:
 windows.append(new_window)
 new_sessions = list(all_sessions(self.terminal_windows))
 def find_session(id_wanted, sessions):
 """Finds a session by ID."""
 for session in sessions:
 if session.session_id == id_wanted:
 return session
 return None
 def get_buried_session(session_summary):
 """Takes a session summary and returns an existing Session if one exists, or else creates a new one."""
 s = find_session(session_summary.unique_identifier, new_sessions)
 if s is None:
 s = find_session(session_summary.unique_identifier, old_sessions)
 if s is None:
 s = iterm2.session.Session(self.connection, None, session_summary)
 return s
 self.__buried_sessions = list(map(get_buried_session, list_sessions_response.buried_sessions))
 self.__terminal_windows = windows
 await self.async_refresh_focus()
 async def _async_focus_change(self, _connection, sub_notif):
 """Updates the record of what is in focus."""
 if sub_notif.HasField("application_active"):
 self.app_active = sub_notif.application_active
 elif sub_notif.HasField("window"):
 # Ignore window resigned key notifications because we track the
 # current terminal.
 if sub_notif.window.window_status != iterm2.api_pb2.FocusChangedNotification.Window.WindowStatus.Value("TERMINAL_WINDOW_RESIGNED_KEY"):
 self.current_terminal_window_id = sub_notif.window.window_id
 elif sub_notif.HasField("selected_tab"):
 window = self.get_window_for_tab(sub_notif.selected_tab)
 if window is None:
 await self.async_refresh()
 else:
 window.selected_tab_id = sub_notif.selected_tab
 elif sub_notif.HasField("session"):
 session = self.get_session_by_id(sub_notif.session)
 window, tab = self.get_tab_and_window_for_session(session)
 if tab is None:
 await self.async_refresh()
 else:
 tab.active_session_id = sub_notif.session
 async def _async_broadcast_domains_change(self, _connection, sub_notif):
 """Updates the current set of broadcast domains."""
 self._set_broadcast_domains(sub_notif.broadcast_domains_changed.broadcast_domains)
 def _set_broadcast_domains(self, broadcast_domains):
 self.__broadcast_domains = self.parse_broadcast_domains(broadcast_domains)
 def parse_broadcast_domains(self, list_of_broadcast_domain_protos: typing.List[iterm2.api_pb2.BroadcastDomain]) -> typing.List[iterm2.broadcast.BroadcastDomain]:
 """Converts a list of broadcast domain protobufs into a list of :class:`BroadcastDomain` objects.
 :param list_of_broadcast_domain_protos: A list of `BroadcastDomain` protos.
 :returns: A list of :class:`BroadcastDomain` objects.
 """
 domain_list = []
 for broadcast_domain_proto in list_of_broadcast_domain_protos:
 domain = iterm2.broadcast.BroadcastDomain()
 for sid in broadcast_domain_proto.session_ids:
 session = self.get_session_by_id(sid)
 if session:
 domain.add_session(session)
 else:
 domain.add_unresolved(lambda: self.get_session_by_id(sid))
 domain_list.append(domain)
 return domain_list
 @property
 def current_terminal_window(self) -> typing.Union[iterm2.window.Window, None]:
 """Gets the topmost terminal window.
 The current terminal window is the window that receives keyboard input
 when iTerm2 is the active application.
 :returns: A :class:`Window` or `None`.
 """
 return self.get_window_by_id(self.current_terminal_window_id)
 @property
 def terminal_windows(self) -> typing.List[iterm2.window.Window]:
 """Returns a list of all terminal windows.
 :returns: A list of :class:`Window`
 """
 return self.__terminal_windows
 @property
 def buried_sessions(self) -> typing.List[iterm2.session.Session]:
 """Returns a list of buried sessions.
 :returns: A list of buried :class:`Session` objects.
 """
 return self.__buried_sessions
 @property
 def broadcast_domains(self) -> typing.List[iterm2.broadcast.BroadcastDomain]:
 """Returns the current broadcast domains.
 .. seealso::
 * Example ":ref:`targeted_input_example`"
 * Example ":ref:`enable_broadcasting_example`"
 """
 return self.__broadcast_domains
 def get_tab_and_window_for_session(self,
 session: iterm2.session.Session) -> typing.Union[typing.Tuple[None, None], typing.Tuple[iterm2.window.Window, iterm2.tab.Tab]]:
 """Deprecated because the name is wrong for the order of return arguments"""
 return self.get_window_and_tab_for_session(session)
 def get_window_and_tab_for_session(self,
 session: iterm2.session.Session) -> typing.Union[typing.Tuple[None, None], typing.Tuple[iterm2.window.Window, iterm2.tab.Tab]]:
 """Finds the tab and window that own a session.
 :param session: The session whose tab and window you wish to find.
 :returns: A tuple of (:class:`Window`, :class:`Tab`), or (`None`, `None`) if the session was not found.
 """
 for window in self.terminal_windows:
 for tab in window.tabs:
 if session in tab.sessions:
 return window, tab
 return None, None
 | |
| 
	move axis a to position b. Also see `numpy.moveaxis`.
 """
 re=ch.functions.moveaxis(a.re,b,c)
 im=ch.functions.moveaxis(a.im,b,c)
 return Array(re,im)
 
def apply_action(action, reg):
 """
 Applies the action to the register reg, thereby chainging the resister's state (reg.psi). 
 Parameters
 ----------
 action : qem.Action
 reg : qem.Reg
 Examples
 --------
 >>> import qem
 >>> reg=qem.Reg(2)
 >>> action=qem.Action((0),qem.H())
 >>> qem.apply_action(action,reg)
 >>> action=qem.Action((0,1),qem.CNOT())
 >>> qem.apply_action(action,reg)
 >>> print(reg.psi)
 variable([[0.70710678 0. ]
 [0. 0.70710678]])
 + i* 
 variable([[0. 0.]
 [0. 0.]])
 """
 # qem.Heisenberg_exp gets a special treatment. This way it is faster. Note e^(-i angle/4) * e^(-i angle (XX+YY+ZZ)/4)=cos(angle/2) Id - i sin(angle/2) SWAP.
 if type(action.gate)==Heisenberg_exp:
 angle=action.gate.angle
 reg_id=EmptyReg(reg.n)
 reg_SWAP=EmptyReg(reg.n)
 reg_id.psi.re=ch.functions.cos(angle/2.)*reg.psi.re
 reg_id.psi.im=ch.functions.cos(angle/2.)*reg.psi.im
 # Multiply SWAP term with sin
 reg_SWAP.psi.re=ch.functions.sin(angle/2.)*reg.psi.re
 reg_SWAP.psi.im=ch.functions.sin(angle/2.)*reg.psi.im
 # Multiply SWAP term with -i
 c=reg_SWAP.psi.re 
 reg_SWAP.psi.re=reg_SWAP.psi.im
 reg_SWAP.psi.im=-c
 # Do the SWAP
 reg_SWAP.psi.re=ch.functions.swapaxes(reg_SWAP.psi.re,*action.qubits)
 reg_SWAP.psi.im=ch.functions.swapaxes(reg_SWAP.psi.im,*action.qubits)
 # Add the SWAP term to the identity term
 reg.psi=reg_id.psi+reg_SWAP.psi
 # Also the gate Heisenberg() gets special treatment, very much like Heisenberg_exp. Note (XX+YY+ZZ)/4=SWAP/2-Id/4.
 elif type(action.gate)==Heisenberg:
 reg_id=EmptyReg(reg.n)
 reg_SWAP=EmptyReg(reg.n)
 reg_id.psi.re=-reg.psi.re/4
 reg_id.psi.im=-reg.psi.im/4
 reg_SWAP.psi.re=reg.psi.re/2
 reg_SWAP.psi.im=reg.psi.im/2
 reg_SWAP.psi.re=ch.functions.swapaxes(reg_SWAP.psi.re,*action.qubits)
 reg_SWAP.psi.im=ch.functions.swapaxes(reg_SWAP.psi.im,*action.qubits)
 # Add the SWAP term to the identity term
 reg.psi=reg_id.psi+reg_SWAP.psi
 
 else:
 n_legs=len(action.gate.array.shape)
 lower_legs=range(n_legs//2,n_legs)
 reg.psi=tensordot(action.gate.array,reg.psi,(lower_legs,action.qubits))
 reg.psi=moveaxis(reg.psi,range(n_legs//2),action.qubits)
def run(cir,reg):
 """
 Run the circuit cir on the register reg, thereby changing the quantum state of the register. 
 Paramters
 ---------
 cir : qem.Circuit
 reg : qem.Reg
 Examples
 --------
 Create a GHZ state on 8 qubits.
 >>> import qem
 >>> reg=qem.Reg(8)
 >>> cir=qem.Cir()
 >>> cir.append_action(qem.Action((0),qem.H()))
 >>> for i in range(7):
 ... cir.append_layer()
 ... cir.append_action(qem.Action((i,i+1),qem.CNOT()))
 >>> qem.run(cir,reg)
 >>> reg.print_ket_state()
 psi = (0.707107+0j)|00000000> + (0.707107+0j)|11111111>
 """ 
 for layer in cir.layers:
 for action in layer.actions:
 apply_action(action,reg)
 
def ground_state(g,k,return_state=False):
 """
 Compute the k lowest energies of the Heisenberg model defined on the graph g. (If k=1 only the ground state energy is computed.) The nodes of the graph need not be integers or coordinates (as is the case for test_graph_input.edges_fig() and related functions). If return_state=True, also the whole state vector is returned. 
 Optionally, a 'weight' attribute can be set for edges, which we will call w_e here. Then the Hamiltonain will read \sum_e w_e (X_e1 X_e2 + Y_e1 Y_e2 + Z_e1 Z_e2)/4. w_e defaults to 1 for the edges where no weight is given. 
 
 Parameters
 ----------
 g : list
 The graph on which the Heisenberg model is defined as a list of edges, where every edge is of the form (int,int).
 k : int
 The energy is computed of the k states with the lowest energy. For k=1 only the ground state energy is computed.
 return_state : Bool (optional)
 If true, also the whole state vector is returned.
 Returns
 -------
 w : numpy.ndarray (dtype=numpy.float64)
 Array containing the k lowest eigenvalues in increasing order.
 If return_state==True, also the state vectors are returned. Then the output is equal to that of scipy.linalg.eighs. This means in this case w=[b,v] with b the array containing the k lowest eigenvalues, and v an array representing the k eigenvectors. The column v[:, i] is the eigenvector corresponding to the eigenvalue w[i]. Note the ground state is retrned as a flat array (as opposed to the shape of e.g. qem.Reg.psi and functions as qem.basis_state()).
 
 Notes
 -----
 This function uses a Lanszos algorithm (ARPACK via scipy.sparse.linalg.eigsh) to compute the energy memory-efficiently. The storage of the complete Hamiltonian, even as a sparse matrix, can be very costly. Therefore, the Hamiltonian is suplied to scipy.linalg.eighs as a callable. That is, a function that receives the vector r and returns H.r (the Hamiltonian applied to r).
 
 """
 heisenberg_tensor_real=xp.array([[1,0,0,0],[0,-1,2,0],[0,2,-1,0],[0,0,0,1]],dtype=xp.float64)/4
 heisenberg_tensor_real=heisenberg_tensor_real.reshape((2,2,2,2)) # Note heisenberg tensor is real. So also the Hamiltonian, and any vector during the Lanszos algo will be real.
 nodes=[node for edge in g for node in edge]
 nodes=set(nodes)
 n=len(nodes)
 del nodes
 
 def Hv(v):
 v=xp.array(v,dtype=xp.float64)
 v=v.reshape((2,)*n)
 vp=xp.zeros((2,)*n,dtype=xp.float64)
 for edge in g:
 new_term=xp.tensordot(heisenberg_tensor_real,v,((2,3),edge))
 new_term=xp.moveaxis(new_term,(0,1),edge)
 vp+=new_term
 vp=vp.flatten()
 if GPU==True:
 vp=xp.asnumpy(vp)
 return vp
 H=scipy.sparse.linalg.LinearOperator((2**n,2**n),matvec=Hv)
 output=scipy.sparse.linalg.eigsh(H,k,which='SA',maxiter=numpy.iinfo(numpy.int32).max)
 if return_state==False:
 return output[0]
 else:
 return output
# Gates as functions
def apply_prepare_singlet(qubits,reg):
 action=Action(qubits, prepare_singlet())
 apply_action(action,reg)
def apply_H(qubits,reg):
 """
 Apply the Hadamard gate to control (int) of the register.
 Parameters
 ----------
 qubits : tuple containing one int
 The number of the qubit the gate is to be applied to
 
 reg : qem.reg
 The register the gate H is to be applied to.
 
 Examples
 --------
 >>> import qem
 >>> reg=qem.Reg(2)
 >>> qem.apply_H(0,reg)
 >>> print(reg.psi)
 variable([[0.70710678 0. ]
 [0.70710678 0. ]])
 + i* 
 variable([[0. 0.]
 [0. 0.]])
 """
 action=Action(qubits, H())
 apply_action(action,reg)
def apply_X(qubits,reg):
 """
 Apply the X gate to reg. See `qem.apply_H`.
 """
 action=Action(qubits, X())
 apply_action(action,reg)
def apply_Y(qubits,reg):
 """
 Apply the Y gate to qubits reg. See `qem.apply_H`.
 """
 action=Action(qubits, Y())
 apply_action(action,reg)
def apply_Z(qubits,reg):
 """
 Apply the Z gate to reg. See `qem.apply_H`.
 """
 action=Action(qubits, Z())
 apply_action(action,reg)
def apply_CNOT(qubits,reg):
 """
 Apply the CNOT gate to reg. Qubits is a tuple of the form (int,int), containing the control and target qubit number (in that order).
 ----------
 qubits : tuple (int,int)
 Tuple containing the control and target qubit numner (in that order).
 reg : qem.reg
 The register the CNOT is to be applied to.
 Examples
 --------
 >>> import qem
 >>> reg=qem.Reg(2)
 >>> qem.apply_H((0),reg)
 >>> qem.apply_CNOT((0,1),reg)
 >>> print(reg.psi)
 variable([[0.70710678 0. ]
 [0. 0.70710678]])
 + i* 
 variable([[0. 0.]
 [0. 0.]])
 """
 action=Action(qubits, CNOT())
 apply_action(action,reg)
def Heisenberg_energy(g,reg,reg_psi_list):
 """
 Compute < reg.psi | H | reg.psi >, with H the Heisenberg Hamiltonian defined on the graph g.
 Parameters
 ----------
 g : list of edges or networkx.Graph 
 List of edges of the form (int,int) that define the graph. If it is a networkx.Graph object, this graph should already be mapped to ints (containing the 'old' node attribute completeness, but not required). In that case, the edges can additionally specify an edge attribute called 'weight'. This means that for this edge, the Hamiltonian is weight*(XX+YY+ZZ)/4.
 reg : qem.reg
 The resister containing the state for which the expectation value of the Hamiltonian is to be computed. 
 Retruns
 -------
 energy : chainer.Variable
 Example
 -------
 Compute the expectation value of the energy of the Neel state |0101> on a square. 
 >>> import qem
 >>> import numpy as np
 >>> edges=[(0,1),(1,2),(2,3),(3,0)]
 >>> reg=qem.Reg(4)
 >>> qem.apply_X(1,reg)
 >>> qem.apply_X(3,reg)
 >>> print(qem.Heisenberg_energy(edges,reg))
 variable(-1.)
 Compare this to the ground state energy of the Heisenberg model on the square.
 >>> qem.ground_state(edges,1)
 >>> print(qem.ground_state(g,1)[0].round())
 -2.0
 """
 large_para = 500
 # global reg_psi_list
 global reg_psi
 E=0.
 reg_prime=EmptyReg(reg.n)
 gate=Heisenberg()
 for edge in g:
 reg_prime.psi=reg.psi 
 action=Action(edge,gate)
 apply_action(action,reg_prime)
 reg_prime.psi.do_dagger()
 E_term=tensordot(reg_prime.psi,reg.psi, (range(reg.n),range(reg.n)))
 E+=E_term.re
 if len(reg_psi_list) != 0:
 for i in range(len(reg_psi_list)):
 innerproduct = tensordot(reg_psi_list[i],reg.psi,(range(reg.n),range(reg.n)))
 E = E + large_para*(innerproduct.re*innerproduct.re+innerproduct.im*innerproduct.im)
 reg_psi = reg.psi
 return E,reg_psi
def compute_s(reg):
 """
 Checks if the state of the register is an eigenstate of the total spin operator S^2. If it is not an eigenstate, it returns None. If it is an eigenstate, it returns the quantum number S, defined by S^2|psi>=s(s+1)|psi>, with |psi> the eigenstate. 
 """
 #Check that the norm of the state of the register is unity
 norm=tensordot(reg.psi.dagger().flatten(),reg.psi.flatten(),((0),(0)))
 norm=xp.sqrt(norm.re.array**2+norm.im.array**2)
 assert xp.around(norm,5)==1., 'State of the register is not normalized.'
 reg_prime=Reg(reg.n)
 reg_prime.psi=Array(xp.zeros(reg.psi.shape), xp.zeros(reg.psi.shape))
 for i in range(reg.n):
 for j in range(reg.n):
 reg_prime_prime=deepcopy(reg)
 apply_X(j,reg_prime_prime)
 apply_X(i,reg_prime_prime)
 reg_prime.psi=reg_prime.psi + reg_prime_prime.psi
 for i in range(reg.n): 
 for j in range(reg.n):
 reg_prime_prime=deepcopy(reg)
 apply_Y(j,reg_prime_prime)
 apply_Y(i,reg_prime_prime)
 reg_prime.psi=reg_prime.psi + reg_prime_prime.psi
 for i in range(reg.n):
 for j in range(reg.n):
 reg_prime_prime=deepcopy(reg)
 apply_Z(j,reg_prime_prime)
 apply_Z(i,reg_prime_prime)
 reg_prime.psi=reg_prime.psi + reg_prime_prime.psi
 inner=tensordot(reg.psi.dagger().flatten(),reg_prime.psi.flatten(),((0),(0)))
 norm=tensordot(reg_prime.psi.dagger().flatten(),reg_prime.psi.flatten(),((0),(0)))
 norm=xp.sqrt(norm.re.array**2+norm.im.array**2)
 if xp.around(norm,5)==0.:
 print('State of register is eigenstate of the total spin operator, with s=0')
 return 0.
 elif xp.around(xp.sqrt(inner.re.array**2+inner.im.array**2)/norm,5)!=1.:
 print('State of register is not an eigenstate of the total spin operator')
 return None
 elif xp.around(xp.sqrt(inner.re.array**2+inner.im.array**2)/norm,5)==1.:
 print('State of register is eigenstate of the total spin operator, with')
 s=-1/2+1/2*xp.sqrt(1+4*norm)
 print('s=',s)
 return s
 else:
 raise ValueError()
def expectation(cir,reg):
 """
 Returns the expectation value <psi|U|psi>, where psi is the state of the register (=reg.psi) and U is the unitary induced by the circuit.
 
 Parameters
 ----------
 cir : qem.Circuit
 
 reg : qem.Reg
 Returns
 -------
 ex : qem.Array (with qem.re a chainer.Variable with shape (), likewise for qem.im)
 
 Examples
 --------
 Compute the expectation value <psi|Z_0 Z_1|psi> with |psi>=|0000>:
 >>> | |
| 
	<filename>test.py
# coding=utf-8
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import cld
import unittest
import sys
from testData import *
VERBOSE = False
# MKM: ported from FullTests in compact_lang_det_unittest_small.cc
class TestCLD(unittest.TestCase):
 langsSeen = set()
 detLangsSeen = set()
 def runOne(self, expectedLangName, s, shouldBeReliable=True):
 if VERBOSE:
 print
 print 'Test: %s [%d bytes]' % (expectedLangName, len(s))
 detectedLangName, detectedLangCode, isReliable, textBytesFound, details = cld.detect(s, pickSummaryLanguage=True, removeWeakMatches=False)
 if VERBOSE:
 print ' detected: %s' % detectedLangName
 print ' reliable: %s' % (isReliable != 0)
 print ' textBytes: %s' % textBytesFound
 print ' details: %s' % str(details)
 self.langsSeen.add(expectedLangName)
 for tup in details:
 self.detLangsSeen.add(tup[0])
 print ' %d langs; %d ever detected' % (len(self.langsSeen), len(self.detLangsSeen))
 if False:
 if expectedLangName == 'YIDDISH':
 l = list(self.detLangsSeen)
 l.sort()
 for i, name in enumerate(l):
 print ' PyTuple_SET_ITEM(pyDetLangs, %d, PyString_FromString("%s"));' % (i, name)
 
 self.assertEquals(expectedLangName, detectedLangName, '%s != %s; details: %s' % (detectedLangName, expectedLangName, str(details)))
 self.assertTrue(not shouldBeReliable or isReliable)
 def testAFRIKAANS(self):
 self.runOne('AFRIKAANS', kTeststr_af_Latn)
 
 # def testAFAR(self):
 # self.runOne('AFAR', kTeststr_aa_Latn)
 
 # def testABKHAZIAN(self):
 # self.runOne('ABKHAZIAN', kTeststr_ab_Cyrl)
 
 def testAMHARIC(self):
 self.runOne('AMHARIC', kTeststr_am_Ethi)
 
 def testARABIC(self):
 self.runOne('ARABIC', kTeststr_ar_Arab)
 
 # def testASSAMESE(self):
 # self.runOne('ASSAMESE', kTeststr_as_Beng)
 
 # def testAYMARA(self):
 # self.runOne('AYMARA', kTeststr_ay_Latn)
 
 # AZERBAIJANI Arab & Cyrl removed 2008.05.27. Just AZERBAIJANI Latn left
 # def testAZERBAIJANI(self):
 # self.runOne('AZERBAIJANI', kTeststr_az_Arab)
 
 # Missing data: az-Cyrl
 def testAZERBAIJANI2(self):
 self.runOne('AZERBAIJANI', kTeststr_az_Latn)
 
 # def testBASHKIR(self):
 # self.runOne('BASHKIR', kTeststr_ba_Cyrl)
 
 def testBELARUSIAN(self):
 self.runOne('BELARUSIAN', kTeststr_be_Cyrl)
 
 def testBULGARIAN(self):
 self.runOne('BULGARIAN', kTeststr_bg_Cyrl)
 
 # def testBIHARI(self):
 # self.runOne('BIHARI', kTeststr_bh_Deva)
 
 # def testBISLAMA(self):
 # self.runOne('BISLAMA', kTeststr_bi_Latn)
 
 def testBENGALI(self):
 self.runOne('BENGALI', kTeststr_bn_Beng)
 
 def testTIBETAN(self):
 self.runOne('TIBETAN', kTeststr_bo_Tibt)
 
 # def testBRETON(self):
 # self.runOne('BRETON', kTeststr_br_Latn)
 
 def testSERBIAN(self):
 self.runOne('SERBIAN', kTeststr_bs_Cyrl) # NOTE: Not BOSNIAN
 
 def testCROATIAN(self):
 self.runOne('CROATIAN', kTeststr_bs_Latn) # NOTE: Not BOSNIAN
 
 def testCATALAN(self):
 self.runOne('CATALAN', kTeststr_ca_Latn)
 
 def testCHEROKEE(self):
 self.runOne('CHEROKEE', kTeststr_chr_Cher)
 
 # def testCORSICAN(self):
 # self.runOne('CORSICAN', kTeststr_co_Latn)
 
 # No CREOLES_AND_PIDGINS_ENGLISH_BASED
 # No CREOLES_AND_PIDGINS_FRENCH_BASED
 # No CREOLES_AND_PIDGINS_OTHER
 # No CREOLES_AND_PIDGINS_PORTUGUESE_BASED
 def testCZECH(self):
 self.runOne('CZECH', kTeststr_cs_Latn)
 
 def testWELSH(self):
 self.runOne('WELSH', kTeststr_cy_Latn)
 def testDANISH(self):
 self.runOne('DANISH', kTeststr_da_Latn)
 
 def testGERMAN(self):
 self.runOne('GERMAN', kTeststr_de_Latn)
 
 def testDHIVEHI(self):
 self.runOne('DHIVEHI', kTeststr_dv_Thaa)
 
 # def testDZONGKHA(self):
 # self.runOne('DZONGKHA', kTeststr_dz_Tibt)
 def testGREEK(self):
 self.runOne('GREEK', kTeststr_el_Grek)
 
 def testENGLISH(self):
 self.runOne('ENGLISH', kTeststr_en_Latn)
 
 def testENGLISH2(self):
 self.runOne('ENGLISH', kTeststr_en)
 # def testESPERANTO(self):
 # self.runOne('ESPERANTO', kTeststr_eo_Latn)
 
 def testSPANISH(self):
 self.runOne('SPANISH', kTeststr_es_Latn)
 
 def testESTONIAN(self):
 self.runOne('ESTONIAN', kTeststr_et_Latn)
 
 def testBASQUE(self):
 self.runOne('BASQUE', kTeststr_eu_Latn)
 def testPERSIAN(self):
 self.runOne('PERSIAN', kTeststr_fa_Arab)
 
 def testFINNISH(self):
 self.runOne('FINNISH', kTeststr_fi_Latn)
 
 # def testFIJIAN(self):
 # self.runOne('FIJIAN', kTeststr_fj_Latn)
 
 # def testFAROESE(self):
 # self.runOne('FAROESE', kTeststr_fo_Latn)
 
 def testFRENCH(self):
 self.runOne('FRENCH', kTeststr_fr_Latn)
 
 # def testFRISIAN(self):
 # self.runOne('FRISIAN', kTeststr_fy_Latn)
 def testIRISH(self):
 self.runOne('IRISH', kTeststr_ga_Latn)
 
 # def testSCOTS_GAELIC(self):
 # self.runOne('SCOTS_GAELIC', kTeststr_gd_Latn)
 
 # def testGALICIAN(self):
 # self.runOne('GALICIAN', kTeststr_gl_Latn)
 def testGALICIAN2(self):
 self.runOne('GALICIAN', kTeststr_gl_Latn2)
 
 # def testGUARANI(self):
 # self.runOne('GUARANI', kTeststr_gn_Latn)
 
 def testGUJARATI(self):
 self.runOne('GUJARATI', kTeststr_gu_Gujr)
 
 # def testMANX(self):
 # self.runOne('MANX', kTeststr_gv_Latn)
 # def testHAUSA(self):
 # self.runOne('HAUSA', kTeststr_ha_Latn)
 
 def testHINDI(self):
 self.runOne('HINDI', kTeststr_hi_Deva)
 
 def testHINDI2(self):
 self.runOne('HINDI', kTeststr_ks)
 
 def testCROATIAN2(self):
 self.runOne('CROATIAN', kTeststr_hr_Latn, shouldBeReliable=False) # NOTE: now CROATIAN
 
 def testHAITIAN_CREOLE(self):
 self.runOne('HAITIAN_CREOLE', kTeststr_ht_Latn)
 
 def testHUNGARIAN(self):
 self.runOne('HUNGARIAN', kTeststr_hu_Latn)
 
 def testARMENIAN(self):
 self.runOne('ARMENIAN', kTeststr_hy_Armn)
 # def testINTERLINGUA(self):
 # self.runOne('INTERLINGUA', kTeststr_ia_Latn)
 
 def testMALAY(self):
 self.runOne('MALAY', kTeststr_id_Latn)
 
 # def testINTERLINGUE(self):
 # self.runOne('INTERLINGUE', kTeststr_ie_Latn)
 
 # def testINUPIAK(self):
 # self.runOne('INUPIAK', kTeststr_ik_Latn)
 
 def testICELANDIC(self):
 self.runOne('ICELANDIC', kTeststr_is_Latn)
 
 def testITALIAN(self):
 self.runOne('ITALIAN', kTeststr_it_Latn)
 
 def testINUKTITUT(self):
 self.runOne('INUKTITUT', kTeststr_iu_Cans)
 
 def testHEBREW(self):
 self.runOne('HEBREW', kTeststr_iw_Hebr)
 def testJAPANESE(self):
 self.runOne('Japanese', kTeststr_ja_Hani)
 
 # def testJAVANESE(self):
 # self.runOne('JAVANESE', kTeststr_jw_Latn)
 def testGEORGIAN(self):
 self.runOne('GEORGIAN', kTeststr_ka_Geor)
 
 # def testKHASI(self):
 # self.runOne('KHASI', kTeststr_kha_Latn)
 
 # def testKAZAKH(self):
 # self.runOne('KAZAKH', kTeststr_kk_Arab)
 
 # def testKAZAKH2(self):
 # self.runOne('KAZAKH', kTeststr_kk_Cyrl)
 
 # def testKAZAKH3(self):
 # self.runOne('KAZAKH', kTeststr_kk_Latn)
 
 # def testGREENLANDIC(self):
 # self.runOne('GREENLANDIC', kTeststr_kl_Latn)
 
 def testKHMER(self):
 self.runOne('KHMER', kTeststr_km_Khmr)
 
 def testKANNADA(self):
 self.runOne('KANNADA', kTeststr_kn_Knda)
 
 def testKOREAN(self):
 self.runOne('Korean', kTeststr_ko_Hani)
 
 # def testKASHMIRI(self):
 # self.runOne('KASHMIRI', kTeststr_ks_Deva)
 
 # KURDISH Latn removed 2008.05.27. Just KURDISH Arab left
 # def testKURDISH(self):
 # self.runOne('KURDISH', kTeststr_ku_Arab)
 
 # def testKURDISH2(self):
 # self.runOne('KURDISH', kTeststr_ku_Latn)
 
 # def testKYRGYZ(self):
 # self.runOne('KYRGYZ', kTeststr_ky_Arab)
 
 # def testKYRGYZ2(self):
 # self.runOne('KYRGYZ', kTeststr_ky_Cyrl)
 
 # def testLATIN(self):
 # self.runOne('LATIN', kTeststr_la_Latn)
 
 # def testLUXEMBOURGISH(self):
 # self.runOne('LUXEMBOURGISH', kTeststr_lb_Latn)
 
 # def testGANDA(self):
 # self.runOne('GANDA', kTeststr_lg_Latn)
 
 # def testLINGALA(self):
 # self.runOne('LINGALA', kTeststr_ln_Latn)
 
 def testLAOTHIAN(self):
 self.runOne('LAOTHIAN', kTeststr_lo_Laoo)
 
 def testLITHUANIAN(self):
 self.runOne('LITHUANIAN', kTeststr_lt_Latn)
 
 def testLATVIAN(self):
 self.runOne('LATVIAN', kTeststr_lv_Latn)
 
 # def testMALAGASY(self):
 # self.runOne('MALAGASY', kTeststr_mg_Latn)
 
 # def testMAORI(self):
 # self.runOne('MAORI', kTeststr_mi_Latn)
 
 def testMACEDONIAN(self):
 self.runOne('MACEDONIAN', kTeststr_mk_Cyrl)
 
 def testMALAYALAM(self):
 self.runOne('MALAYALAM', kTeststr_ml_Mlym)
 
 # def testMONGOLIAN(self):
 # self.runOne('MONGOLIAN', kTeststr_mn_Cyrl)
 
 # def testMOLDAVIAN(self):
 # self.runOne('MOLDAVIAN', kTeststr_mo_Cyrl)
 
 # def testMARATHI(self):
 # self.runOne('MARATHI', kTeststr_mr_Deva)
 
 def testMALAY2(self):
 self.runOne('MALAY', kTeststr_ms_Latn)
 
 def testMALAY3(self):
 self.runOne('MALAY', kTeststr_ms_Latn2)
 
 def testMALAY4(self):
 self.runOne('MALAY', kTeststr_ms_Latn3)
 
 def testMALTESE(self):
 self.runOne('MALTESE', kTeststr_mt_Latn)
 
 # def testBURMESE(self):
 # self.runOne('BURMESE', kTeststr_my_Latn)
 
 def testBURMESE2(self):
 self.runOne('BURMESE', kTeststr_my_Mymr)
 # def testNAURU(self):
 # self.runOne('NAURU', kTeststr_na_Latn)
 
 # def testNEPALI(self):
 # self.runOne('NEPALI', kTeststr_ne_Deva)
 
 def testDUTCH(self):
 self.runOne('DUTCH', kTeststr_nl_Latn)
 
 #def testNORWEGIAN_N(self):
 # self.runOne('NORWEGIAN_N', kTeststr_nn_Latn)
 
 def testNORWEGIAN(self):
 self.runOne('NORWEGIAN', kTeststr_no_Latn)
 
 # def testOCCITAN(self):
 # self.runOne('OCCITAN', kTeststr_oc_Latn)
 
 # def testOROMO(self):
 # self.runOne('OROMO', kTeststr_om_Latn)
 
 def testORIYA(self):
 self.runOne('ORIYA', kTeststr_or_Orya)
 
 def testPUNJABI(self):
 self.runOne('PUNJABI', kTeststr_pa_Guru)
 
 def testPOLISH(self):
 self.runOne('POLISH', kTeststr_pl_Latn)
 
 # def testPASHTO(self):
 # self.runOne('PASHTO', kTeststr_ps_Arab)
 
 def testPORTUGUESE(self):
 self.runOne('PORTUGUESE', kTeststr_pt_BR) # NOTE: not PORTUGUESE_B
 # nor PORTUGUESE_P
 # def testQUECHUA(self):
 # self.runOne('QUECHUA', kTeststr_qu_Latn)
 
 # def testRHAETO_ROMANCE(self):
 # self.runOne('RHAETO_ROMANCE', kTeststr_rm_Latn)
 
 # def testRUNDI(self):
 # self.runOne('RUNDI', kTeststr_rn_Latn)
 
 def testROMANIAN(self):
 self.runOne('ROMANIAN', kTeststr_ro_Latn)
 
 def testRUSSIAN(self):
 self.runOne('RUSSIAN', kTeststr_ru_Cyrl)
 
 # def testKINYARWANDA(self):
 # self.runOne('KINYARWANDA', kTeststr_rw_Latn)
 # def testSANSKRIT(self):
 # self.runOne('SANSKRIT', kTeststr_sa_Deva)
 
 # def testSANSKRIT2(self):
 # self.runOne('SANSKRIT', kTeststr_sa_Latn)
 
 # def testSCOTS(self):
 # self.runOne('SCOTS', kTeststr_sco_Latn)
 
 # def testSINDHI(self):
 # self.runOne('SINDHI', kTeststr_sd_Arab)
 
 # def testSANGO(self):
 # self.runOne('SANGO', kTeststr_sg_Latn)
 
 # No SERBO_CROATIAN (sh)
 def testSINHALESE(self):
 self.runOne('SINHALESE', kTeststr_si_Sinh)
 
 # def testLIMBU(self):
 # self.runOne('LIMBU', kTeststr_sit_NP)
 
 def testSLOVAK(self):
 self.runOne('SLOVAK', kTeststr_sk_Latn)
 
 def testSLOVENIAN(self):
 self.runOne('SLOVENIAN', kTeststr_sl_Latn)
 
 # def testSAMOAN(self):
 # self.runOne('SAMOAN', kTeststr_sm_Latn)
 
 # def testSHONA(self):
 # self.runOne('SHONA', kTeststr_sn_Latn)
 
 # def testSOMALI(self):
 # self.runOne('SOMALI', kTeststr_so_Latn)
 
 def testALBANIAN(self):
 self.runOne('ALBANIAN', kTeststr_sq_Latn)
 
 def testSERBIAN2(self):
 self.runOne('SERBIAN', kTeststr_sr_Cyrl) # NOTE: now SERBIAN
 
 def testCROATIAN3(self):
 self.runOne('CROATIAN', kTeststr_sr_Latn) # NOTE: Not SERBIAN
 
 def testCROATIAN4(self):
 self.runOne('CROATIAN', kTeststr_sr_ME_Latn) # NOTE: not SERBIAN nor MONTENEGRIN
 
 # def testSISWANT(self):
 # self.runOne('SISWANT', kTeststr_ss_Latn)
 
 # def testSESOTHO(self):
 # self.runOne('SESOTHO', kTeststr_st_Latn)
 
 # def testSUNDANESE(self):
 # self.runOne('SUNDANESE', kTeststr_su_Latn)
 
 def testSWEDISH(self):
 self.runOne('SWEDISH', kTeststr_sv_Latn)
 
 def testSWAHILI(self):
 self.runOne('SWAHILI', kTeststr_sw_Latn)
 
 def testSYRIAC(self):
 self.runOne('SYRIAC', kTeststr_syr_Syrc)
 
 def testTAMIL(self):
 self.runOne('TAMIL', kTeststr_ta_Taml)
 
 def testTELUGU(self):
 self.runOne('TELUGU', kTeststr_te_Telu)
 
 # Tajik Arab removed 2008.05.27. Just Tajik Cyrl left
 # def testTAJIK(self):
 # self.runOne('TAJIK', kTeststr_tg_Arab)
 
 # def testTAJIK2(self):
 # self.runOne('TAJIK', kTeststr_tg_Cyrl)
 
 def testTHAI(self):
 self.runOne('THAI', kTeststr_th_Thai)
 
 # def testTIGRINYA(self):
 # self.runOne('TIGRINYA', kTeststr_ti_Ethi)
 
 # def testTURKMEN(self):
 # self.runOne('TURKMEN', kTeststr_tk_Cyrl)
 
 # def testTURKMEN2(self):
 # self.runOne('TURKMEN', kTeststr_tk_Latn)
 
 def testTAGALOG(self):
 self.runOne('TAGALOG', kTeststr_tl_Latn)
 
 # def testTSWANA(self):
 # self.runOne('TSWANA', kTeststr_tn_Latn)
 
 # def testTONGA(self):
 # self.runOne('TONGA', kTeststr_to_Latn)
 
 def testTURKISH(self):
 self.runOne('TURKISH', kTeststr_tr_Latn)
 
 # def testTSONGA(self):
 # self.runOne('TSONGA', kTeststr_ts_Latn)
 
 # def testTATAR(self):
 # self.runOne('TATAR', kTeststr_tt_Cyrl)
 
 # def testTATAR2(self):
 # self.runOne('TATAR', kTeststr_tt_Latn)
 
 # def testTWI(self):
 # self.runOne('TWI', kTeststr_tw_Latn)
 
 # def testUIGHUR(self):
 # self.runOne('UIGHUR', kTeststr_ug_Arab)
 
 # def testUIGHUR2(self):
 # self.runOne('UIGHUR', kTeststr_ug_Cyrl)
 
 # def testUIGHUR3(self):
 # self.runOne('UIGHUR', kTeststr_ug_Latn)
 
 def testUKRAINIAN(self):
 self.runOne('UKRAINIAN', kTeststr_uk_Cyrl)
 
 def testURDU(self):
 self.runOne('URDU', kTeststr_ur_Arab)
 
 # def testUZBEK(self):
 # self.runOne('UZBEK', kTeststr_uz_Arab)
 
 # def testUZBEK2(self):
 # self.runOne('UZBEK', kTeststr_uz_Cyrl)
 
 # def testUZBEK3(self):
 # self.runOne('UZBEK', kTeststr_uz_Latn)
 
 def testVIETNAMESE(self):
 self.runOne('VIETNAMESE', kTeststr_vi_Latn)
 
 # def testVOLAPUK(self):
 # self.runOne('VOLAPUK', kTeststr_vo_Latn)
 
 # def testWOLOF(self):
 # self.runOne('WOLOF', kTeststr_wo_Latn)
 # def testXHOSA(self):
 # self.runOne('XHOSA', kTeststr_xh_Latn)
 
 def testYIDDISH(self):
 self.runOne('YIDDISH', kTeststr_yi_Hebr)
 
 # def testYORUBA(self):
 # self.runOne('YORUBA', kTeststr_yo_Latn)
 # Zhu<NAME>ani removed 2008.05.13. Just Zhuang Latn left
 # def testZHUANG(self):
 # self.runOne('ZHUANG', kTeststr_za_Hani)
 
 # def testZHUANG2(self):
 # self.runOne('ZHUANG', kTeststr_za_Latn)
 
 def testCHINESE(self):
 self.runOne('Chinese', kTeststr_zh_Hani)
 
 def testCHINESE_T(self):
 self.runOne('ChineseT', kTeststr_zh_TW)
 def testINDONESIAN(self):
 self.runOne('INDONESIAN', kTeststr_id)
 | |
| 
	</footer>\n",
 " </div>\n",
 "</div>\n",
 "</div>\n",
 "\n",
 " </div>\n",
 " </div>\n",
 " </section>\n",
 " </body>\n",
 "</html>\n",
 "\n"
 ]
 }
 ],
 "source": [
 "import requests\n",
 "\n",
 "URL = \"https://realpython.github.io/fake-jobs/\"\n",
 "page = requests.get(URL)\n",
 "\n",
 "print(page.text)"
 ]
 },
 {
 "cell_type": "code",
 "execution_count": 2,
 "id": "eec9708f",
 "metadata": {},
 "outputs": [],
 "source": [
 "import requests\n",
 "from bs4 import BeautifulSoup\n",
 "\n",
 "URL = \"https://realpython.github.io/fake-jobs/\"\n",
 "page = requests.get(URL)\n",
 "\n",
 "soup = BeautifulSoup(page.content, \"html.parser\")"
 ]
 },
 {
 "cell_type": "code",
 "execution_count": 3,
 "id": "eb4a70c8",
 "metadata": {},
 "outputs": [],
 "source": [
 "results = soup.find(id=\"ResultsContainer\")"
 ]
 },
 {
 "cell_type": "code",
 "execution_count": 4,
 "id": "59cc8fee",
 "metadata": {},
 "outputs": [],
 "source": [
 "job_elements = results.find_all(\"div\", class_=\"card-content\")"
 ]
 },
 {
 "cell_type": "code",
 "execution_count": 5,
 "id": "3f26ae7a",
 "metadata": {},
 "outputs": [
 {
 "name": "stdout",
 "output_type": "stream",
 "text": [
 "<div class=\"card-content\">\n",
 "<div class=\"media\">\n",
 "<div class=\"media-left\">\n",
 "<figure class=\"image is-48x48\">\n",
 "<img alt=\"Real Python Logo\" src=\"https://files.realpython.com/media/real-python-logo-thumbnail.7f0db70c2ed2.jpg?__no_cf_polish=1\"/>\n",
 "</figure>\n",
 "</div>\n",
 "<div class=\"media-content\">\n",
 "<h2 class=\"title is-5\">Senior Python Developer</h2>\n",
 "<h3 class=\"subtitle is-6 company\"><NAME> and Davis</h3>\n",
 "</div>\n",
 "</div>\n",
 "<div class=\"content\">\n",
 "<p class=\"location\">\n",
 " Stewartbury, AA\n",
 " </p>\n",
 "<p class=\"is-small has-text-grey\">\n",
 "<time datetime=\"2021-04-08\">2021-04-08</time>\n",
 "</p>\n",
 "</div>\n",
 "<footer class=\"card-footer\">\n",
 "<a class=\"card-footer-item\" href=\"https://www.realpython.com\" target=\"_blank\">Learn</a>\n",
 "<a class=\"card-footer-item\" href=\"https://realpython.github.io/fake-jobs/jobs/senior-python-developer-0.html\" target=\"_blank\">Apply</a>\n",
 "</footer>\n",
 "</div>\n",
 "\n",
 "<div class=\"card-content\">\n",
 "<div class=\"media\">\n",
 "<div class=\"media-left\">\n",
 "<figure class=\"image is-48x48\">\n",
 "<img alt=\"Real Python Logo\" src=\"https://files.realpython.com/media/real-python-logo-thumbnail.7f0db70c2ed2.jpg?__no_cf_polish=1\"/>\n",
 "</figure>\n",
 "</div>\n",
 "<div class=\"media-content\">\n",
 "<h2 class=\"title is-5\">Energy engineer</h2>\n",
 "<h3 class=\"subtitle is-6 company\">Vasquez-Davidson</h3>\n",
 "</div>\n",
 "</div>\n",
 "<div class=\"content\">\n",
 "<p class=\"location\">\n",
 " Christopherville, AA\n",
 " </p>\n",
 "<p class=\"is-small has-text-grey\">\n",
 "<time datetime=\"2021-04-08\">2021-04-08</time>\n",
 "</p>\n",
 "</div>\n",
 "<footer class=\"card-footer\">\n",
 "<a class=\"card-footer-item\" href=\"https://www.realpython.com\" target=\"_blank\">Learn</a>\n",
 "<a class=\"card-footer-item\" href=\"https://realpython.github.io/fake-jobs/jobs/energy-engineer-1.html\" target=\"_blank\">Apply</a>\n",
 "</footer>\n",
 "</div>\n",
 "\n",
 "<div class=\"card-content\">\n",
 "<div class=\"media\">\n",
 "<div class=\"media-left\">\n",
 "<figure class=\"image is-48x48\">\n",
 "<img alt=\"Real Python Logo\" src=\"https://files.realpython.com/media/real-python-logo-thumbnail.7f0db70c2ed2.jpg?__no_cf_polish=1\"/>\n",
 "</figure>\n",
 "</div>\n",
 "<div class=\"media-content\">\n",
 "<h2 class=\"title is-5\">Legal executive</h2>\n",
 "<h3 class=\"subtitle is-6 company\"><NAME> and Levy</h3>\n",
 "</div>\n",
 "</div>\n",
 "<div class=\"content\">\n",
 "<p class=\"location\">\n",
 " Port Ericaburgh, AA\n",
 " </p>\n",
 "<p class=\"is-small has-text-grey\">\n",
 "<time datetime=\"2021-04-08\">2021-04-08</time>\n",
 "</p>\n",
 "</div>\n",
 "<footer class=\"card-footer\">\n",
 "<a class=\"card-footer-item\" href=\"https://www.realpython.com\" target=\"_blank\">Learn</a>\n",
 "<a class=\"card-footer-item\" href=\"https://realpython.github.io/fake-jobs/jobs/legal-executive-2.html\" target=\"_blank\">Apply</a>\n",
 "</footer>\n",
 "</div>\n",
 "\n",
 "<div class=\"card-content\">\n",
 "<div class=\"media\">\n",
 "<div class=\"media-left\">\n",
 "<figure class=\"image is-48x48\">\n",
 "<img alt=\"Real Python Logo\" src=\"https://files.realpython.com/media/real-python-logo-thumbnail.7f0db70c2ed2.jpg?__no_cf_polish=1\"/>\n",
 "</figure>\n",
 "</div>\n",
 "<div class=\"media-content\">\n",
 "<h2 class=\"title is-5\">Fitness centre manager</h2>\n",
 "<h3 class=\"subtitle is-6 company\">Savage-Bradley</h3>\n",
 "</div>\n",
 "</div>\n",
 "<div class=\"content\">\n",
 "<p class=\"location\">\n",
 " East Seanview, AP\n",
 " </p>\n",
 "<p class=\"is-small has-text-grey\">\n",
 "<time datetime=\"2021-04-08\">2021-04-08</time>\n",
 "</p>\n",
 "</div>\n",
 "<footer class=\"card-footer\">\n",
 "<a class=\"card-footer-item\" href=\"https://www.realpython.com\" target=\"_blank\">Learn</a>\n",
 "<a class=\"card-footer-item\" href=\"https://realpython.github.io/fake-jobs/jobs/fitness-centre-manager-3.html\" target=\"_blank\">Apply</a>\n",
 "</footer>\n",
 "</div>\n",
 "\n",
 "<div class=\"card-content\">\n",
 "<div class=\"media\">\n",
 "<div class=\"media-left\">\n",
 "<figure class=\"image is-48x48\">\n",
 "<img alt=\"Real Python Logo\" src=\"https://files.realpython.com/media/real-python-logo-thumbnail.7f0db70c2ed2.jpg?__no_cf_polish=1\"/>\n",
 "</figure>\n",
 "</div>\n",
 "<div class=\"media-content\">\n",
 "<h2 class=\"title is-5\">Product manager</h2>\n",
 "<h3 class=\"subtitle is-6 company\">Ramirez Inc</h3>\n",
 "</div>\n",
 "</div>\n",
 "<div class=\"content\">\n",
 "<p class=\"location\">\n",
 " North Jamieview, AP\n",
 " </p>\n",
 "<p class=\"is-small has-text-grey\">\n",
 "<time datetime=\"2021-04-08\">2021-04-08</time>\n",
 "</p>\n",
 "</div>\n",
 "<footer class=\"card-footer\">\n",
 "<a class=\"card-footer-item\" href=\"https://www.realpython.com\" target=\"_blank\">Learn</a>\n",
 "<a class=\"card-footer-item\" href=\"https://realpython.github.io/fake-jobs/jobs/product-manager-4.html\" target=\"_blank\">Apply</a>\n",
 "</footer>\n",
 "</div>\n",
 "\n",
 "<div class=\"card-content\">\n",
 "<div class=\"media\">\n",
 "<div class=\"media-left\">\n",
 "<figure class=\"image is-48x48\">\n",
 "<img alt=\"Real Python Logo\" src=\"https://files.realpython.com/media/real-python-logo-thumbnail.7f0db70c2ed2.jpg?__no_cf_polish=1\"/>\n",
 "</figure>\n",
 "</div>\n",
 "<div class=\"media-content\">\n",
 "<h2 class=\"title is-5\">Medical technical officer</h2>\n",
 "<h3 class=\"subtitle is-6 company\">Rogers-Yates</h3>\n",
 "</div>\n",
 "</div>\n",
 "<div class=\"content\">\n",
 "<p class=\"location\">\n",
 " Davidville, AP\n",
 " </p>\n",
 "<p class=\"is-small has-text-grey\">\n",
 "<time datetime=\"2021-04-08\">2021-04-08</time>\n",
 "</p>\n",
 "</div>\n",
 "<footer class=\"card-footer\">\n",
 "<a class=\"card-footer-item\" href=\"https://www.realpython.com\" target=\"_blank\">Learn</a>\n",
 "<a class=\"card-footer-item\" href=\"https://realpython.github.io/fake-jobs/jobs/medical-technical-officer-5.html\" target=\"_blank\">Apply</a>\n",
 "</footer>\n",
 "</div>\n",
 "\n",
 "<div class=\"card-content\">\n",
 "<div class=\"media\">\n",
 "<div class=\"media-left\">\n",
 "<figure class=\"image is-48x48\">\n",
 "<img alt=\"Real Python Logo\" src=\"https://files.realpython.com/media/real-python-logo-thumbnail.7f0db70c2ed2.jpg?__no_cf_polish=1\"/>\n",
 "</figure>\n",
 "</div>\n",
 "<div class=\"media-content\">\n",
 "<h2 class=\"title is-5\">Physiological scientist</h2>\n",
 "<h3 class=\"subtitle is-6 company\">Kramer-Klein</h3>\n",
 "</div>\n",
 "</div>\n",
 "<div class=\"content\">\n",
 "<p class=\"location\">\n",
 " South Christopher, AE\n",
 " </p>\n",
 "<p class=\"is-small has-text-grey\">\n",
 "<time datetime=\"2021-04-08\">2021-04-08</time>\n",
 "</p>\n",
 "</div>\n",
 "<footer class=\"card-footer\">\n",
 "<a class=\"card-footer-item\" href=\"https://www.realpython.com\" target=\"_blank\">Learn</a>\n",
 "<a class=\"card-footer-item\" href=\"https://realpython.github.io/fake-jobs/jobs/physiological-scientist-6.html\" target=\"_blank\">Apply</a>\n",
 "</footer>\n",
 "</div>\n",
 "\n",
 "<div class=\"card-content\">\n",
 "<div class=\"media\">\n",
 "<div class=\"media-left\">\n",
 "<figure class=\"image is-48x48\">\n",
 "<img alt=\"Real Python Logo\" src=\"https://files.realpython.com/media/real-python-logo-thumbnail.7f0db70c2ed2.jpg?__no_cf_polish=1\"/>\n",
 "</figure>\n",
 "</div>\n",
 "<div class=\"media-content\">\n",
 "<h2 class=\"title is-5\">Textile designer</h2>\n",
 "<h3 class=\"subtitle is-6 company\">Meyers-Johnson</h3>\n",
 "</div>\n",
 "</div>\n",
 "<div class=\"content\">\n",
 "<p class=\"location\">\n",
 " Port Jonathan, AE\n",
 " </p>\n",
 "<p class=\"is-small has-text-grey\">\n",
 "<time datetime=\"2021-04-08\">2021-04-08</time>\n",
 "</p>\n",
 "</div>\n",
 "<footer class=\"card-footer\">\n",
 "<a class=\"card-footer-item\" href=\"https://www.realpython.com\" target=\"_blank\">Learn</a>\n",
 "<a class=\"card-footer-item\" href=\"https://realpython.github.io/fake-jobs/jobs/textile-designer-7.html\" target=\"_blank\">Apply</a>\n",
 "</footer>\n",
 "</div>\n",
 "\n",
 "<div class=\"card-content\">\n",
 "<div class=\"media\">\n",
 "<div class=\"media-left\">\n",
 "<figure class=\"image is-48x48\">\n",
 "<img alt=\"Real Python Logo\" src=\"https://files.realpython.com/media/real-python-logo-thumbnail.7f0db70c2ed2.jpg?__no_cf_polish=1\"/>\n",
 "</figure>\n",
 "</div>\n",
 "<div class=\"media-content\">\n",
 "<h2 class=\"title is-5\">Television floor manager</h2>\n",
 "<h3 class=\"subtitle is-6 company\">Hughes-Williams</h3>\n",
 "</div>\n",
 "</div>\n",
 "<div class=\"content\">\n",
 "<p class=\"location\">\n",
 " Osbornetown, AE\n",
 " </p>\n",
 "<p class=\"is-small has-text-grey\">\n",
 "<time datetime=\"2021-04-08\">2021-04-08</time>\n",
 "</p>\n",
 "</div>\n",
 "<footer class=\"card-footer\">\n",
 "<a class=\"card-footer-item\" href=\"https://www.realpython.com\" target=\"_blank\">Learn</a>\n",
 "<a class=\"card-footer-item\" href=\"https://realpython.github.io/fake-jobs/jobs/television-floor-manager-8.html\" target=\"_blank\">Apply</a>\n",
 "</footer>\n",
 "</div>\n",
 "\n",
 "<div class=\"card-content\">\n",
 "<div class=\"media\">\n",
 "<div class=\"media-left\">\n",
 "<figure class=\"image is-48x48\">\n",
 "<img alt=\"Real Python Logo\" src=\"https://files.realpython.com/media/real-python-logo-thumbnail.7f0db70c2ed2.jpg?__no_cf_polish=1\"/>\n",
 "</figure>\n",
 "</div>\n",
 "<div class=\"media-content\">\n",
 "<h2 class=\"title is-5\">Waste management officer</h2>\n",
 "<h3 class=\"subtitle is-6 company\"><NAME> and Villa</h3>\n",
 "</div>\n",
 "</div>\n",
 "<div class=\"content\">\n",
 "<p class=\"location\">\n",
 " Scotttown, AP\n",
 " </p>\n",
 "<p class=\"is-small has-text-grey\">\n",
 "<time datetime=\"2021-04-08\">2021-04-08</time>\n",
 "</p>\n",
 "</div>\n",
 "<footer class=\"card-footer\">\n",
 "<a class=\"card-footer-item\" href=\"https://www.realpython.com\" target=\"_blank\">Learn</a>\n",
 "<a class=\"card-footer-item\" href=\"https://realpython.github.io/fake-jobs/jobs/waste-management-officer-9.html\" target=\"_blank\">Apply</a>\n",
 "</footer>\n",
 "</div>\n",
 "\n",
 "<div class=\"card-content\">\n",
 "<div class=\"media\">\n",
 "<div class=\"media-left\">\n",
 "<figure class=\"image is-48x48\">\n",
 "<img alt=\"Real Python Logo\" src=\"https://files.realpython.com/media/real-python-logo-thumbnail.7f0db70c2ed2.jpg?__no_cf_polish=1\"/>\n",
 "</figure>\n",
 "</div>\n",
 "<div class=\"media-content\">\n",
 "<h2 class=\"title is-5\">Software Engineer (Python)</h2>\n",
 "<h3 class=\"subtitle is-6 company\">Garcia PLC</h3>\n",
 "</div>\n",
 "</div>\n",
 "<div class=\"content\">\n",
 "<p class=\"location\">\n",
 " Ericberg, AE\n",
 " </p>\n",
 "<p class=\"is-small has-text-grey\">\n",
 "<time datetime=\"2021-04-08\">2021-04-08</time>\n",
 "</p>\n",
 "</div>\n",
 "<footer class=\"card-footer\">\n",
 "<a class=\"card-footer-item\" href=\"https://www.realpython.com\" target=\"_blank\">Learn</a>\n",
 "<a class=\"card-footer-item\" href=\"https://realpython.github.io/fake-jobs/jobs/software-engineer-python-10.html\" target=\"_blank\">Apply</a>\n",
 "</footer>\n",
 "</div>\n",
 "\n",
 "<div class=\"card-content\">\n",
 "<div class=\"media\">\n",
 "<div class=\"media-left\">\n",
 "<figure class=\"image is-48x48\">\n",
 "<img alt=\"Real Python Logo\" src=\"https://files.realpython.com/media/real-python-logo-thumbnail.7f0db70c2ed2.jpg?__no_cf_polish=1\"/>\n",
 "</figure>\n",
 "</div>\n",
 "<div class=\"media-content\">\n",
 "<h2 class=\"title is-5\">Interpreter</h2>\n",
 "<h3 class=\"subtitle is-6 company\">Gregory and Sons</h3>\n",
 "</div>\n",
 "</div>\n",
 "<div class=\"content\">\n",
 "<p class=\"location\">\n",
 " Ramireztown, AE\n",
 " </p>\n",
 "<p class=\"is-small has-text-grey\">\n",
 "<time datetime=\"2021-04-08\">2021-04-08</time>\n",
 "</p>\n",
 "</div>\n",
 "<footer class=\"card-footer\">\n",
 "<a class=\"card-footer-item\" href=\"https://www.realpython.com\" target=\"_blank\">Learn</a>\n",
 "<a class=\"card-footer-item\" href=\"https://realpython.github.io/fake-jobs/jobs/interpreter-11.html\" target=\"_blank\">Apply</a>\n",
 "</footer>\n",
 "</div>\n",
 "\n",
 "<div class=\"card-content\">\n",
 "<div class=\"media\">\n",
 "<div class=\"media-left\">\n",
 "<figure class=\"image is-48x48\">\n",
 "<img alt=\"Real Python Logo\" src=\"https://files.realpython.com/media/real-python-logo-thumbnail.7f0db70c2ed2.jpg?__no_cf_polish=1\"/>\n",
 "</figure>\n",
 "</div>\n",
 "<div class=\"media-content\">\n",
 "<h2 class=\"title is-5\">Architect</h2>\n",
 "<h3 class=\"subtitle is-6 company\"><NAME> and Sosa</h3>\n",
 "</div>\n",
 "</div>\n",
 "<div class=\"content\">\n",
 "<p class=\"location\">\n",
 " Figueroaview, AA\n",
 " </p>\n",
 "<p class=\"is-small has-text-grey\">\n",
 "<time datetime=\"2021-04-08\">2021-04-08</time>\n",
 "</p>\n",
 "</div>\n",
 "<footer class=\"card-footer\">\n",
 "<a class=\"card-footer-item\" href=\"https://www.realpython.com\" target=\"_blank\">Learn</a>\n",
 "<a class=\"card-footer-item\" href=\"https://realpython.github.io/fake-jobs/jobs/architect-12.html\" target=\"_blank\">Apply</a>\n",
 "</footer>\n",
 "</div>\n",
 "\n",
 "<div class=\"card-content\">\n",
 "<div class=\"media\">\n",
 "<div class=\"media-left\">\n",
 "<figure class=\"image is-48x48\">\n",
 "<img alt=\"Real Python Logo\" src=\"https://files.realpython.com/media/real-python-logo-thumbnail.7f0db70c2ed2.jpg?__no_cf_polish=1\"/>\n",
 "</figure>\n",
 "</div>\n",
 "<div class=\"media-content\">\n",
 "<h2 class=\"title is-5\">Meteorologist</h2>\n",
 "<h3 class=\"subtitle is-6 company\">Bush PLC</h3>\n",
 "</div>\n",
 "</div>\n",
 "<div class=\"content\">\n",
 "<p class=\"location\">\n",
 " Kelseystad, AA\n",
 " </p>\n",
 "<p class=\"is-small has-text-grey\">\n",
 "<time datetime=\"2021-04-08\">2021-04-08</time>\n",
 "</p>\n",
 "</div>\n",
 "<footer class=\"card-footer\">\n",
 "<a class=\"card-footer-item\" href=\"https://www.realpython.com\" target=\"_blank\">Learn</a>\n",
 "<a class=\"card-footer-item\" href=\"https://realpython.github.io/fake-jobs/jobs/meteorologist-13.html\" target=\"_blank\">Apply</a>\n",
 "</footer>\n",
 "</div>\n",
 "\n",
 "<div class=\"card-content\">\n",
 "<div class=\"media\">\n",
 "<div class=\"media-left\">\n",
 "<figure class=\"image is-48x48\">\n",
 "<img alt=\"Real Python Logo\" src=\"https://files.realpython.com/media/real-python-logo-thumbnail.7f0db70c2ed2.jpg?__no_cf_polish=1\"/>\n",
 "</figure>\n",
 "</div>\n",
 "<div class=\"media-content\">\n",
 "<h2 class=\"title is-5\">Audiological scientist</h2>\n",
 "<h3 class=\"subtitle is-6 company\">Salazar-Meyers</h3>\n",
 "</div>\n",
 "</div>\n",
 "<div class=\"content\">\n",
 "<p class=\"location\">\n",
 " Williamsburgh, AE\n",
 " </p>\n",
 "<p class=\"is-small has-text-grey\">\n",
 "<time datetime=\"2021-04-08\">2021-04-08</time>\n",
 "</p>\n",
 "</div>\n",
 "<footer class=\"card-footer\">\n",
 "<a class=\"card-footer-item\" href=\"https://www.realpython.com\" target=\"_blank\">Learn</a>\n",
 "<a class=\"card-footer-item\" href=\"https://realpython.github.io/fake-jobs/jobs/audiological-scientist-14.html\" target=\"_blank\">Apply</a>\n",
 "</footer>\n",
 "</div>\n",
 "\n",
 "<div class=\"card-content\">\n",
 "<div class=\"media\">\n",
 "<div class=\"media-left\">\n",
 "<figure class=\"image is-48x48\">\n",
 "<img alt=\"Real Python Logo\" src=\"https://files.realpython.com/media/real-python-logo-thumbnail.7f0db70c2ed2.jpg?__no_cf_polish=1\"/>\n",
 "</figure>\n",
 "</div>\n",
 "<div class=\"media-content\">\n",
 "<h2 class=\"title is-5\">English as a second language teacher</h2>\n",
 "<h3 class=\"subtitle is-6 company\">Parker, Murphy and Brooks</h3>\n",
 "</div>\n",
 "</div>\n",
 "<div class=\"content\">\n",
 "<p class=\"location\">\n",
 " Mitchellburgh, AE\n",
 " </p>\n",
 "<p class=\"is-small has-text-grey\">\n",
 "<time datetime=\"2021-04-08\">2021-04-08</time>\n",
 "</p>\n",
 "</div>\n",
 "<footer class=\"card-footer\">\n",
 "<a class=\"card-footer-item\" href=\"https://www.realpython.com\" target=\"_blank\">Learn</a>\n",
 "<a class=\"card-footer-item\" href=\"https://realpython.github.io/fake-jobs/jobs/english-as-a-second-language-teacher-15.html\" target=\"_blank\">Apply</a>\n",
 "</footer>\n",
 "</div>\n",
 "\n",
 "<div class=\"card-content\">\n",
 "<div class=\"media\">\n",
 "<div class=\"media-left\">\n",
 "<figure class=\"image is-48x48\">\n",
 "<img alt=\"Real Python Logo\" src=\"https://files.realpython.com/media/real-python-logo-thumbnail.7f0db70c2ed2.jpg?__no_cf_polish=1\"/>\n",
 "</figure>\n",
 "</div>\n",
 "<div class=\"media-content\">\n",
 "<h2 class=\"title is-5\">Surgeon</h2>\n",
 "<h3 class=\"subtitle is-6 company\">Cruz-Brown</h3>\n",
 "</div>\n",
 "</div>\n",
 "<div class=\"content\">\n",
 "<p class=\"location\">\n",
 " West Jessicabury, AA\n",
 " </p>\n",
 "<p class=\"is-small has-text-grey\">\n",
 "<time datetime=\"2021-04-08\">2021-04-08</time>\n",
 "</p>\n",
 "</div>\n",
 "<footer class=\"card-footer\">\n",
 "<a class=\"card-footer-item\" href=\"https://www.realpython.com\" target=\"_blank\">Learn</a>\n",
 "<a class=\"card-footer-item\" href=\"https://realpython.github.io/fake-jobs/jobs/surgeon-16.html\" target=\"_blank\">Apply</a>\n",
 "</footer>\n",
 "</div>\n",
 "\n",
 "<div class=\"card-content\">\n",
 "<div class=\"media\">\n",
 "<div class=\"media-left\">\n",
 "<figure class=\"image is-48x48\">\n",
 "<img alt=\"Real Python Logo\" src=\"https://files.realpython.com/media/real-python-logo-thumbnail.7f0db70c2ed2.jpg?__no_cf_polish=1\"/>\n",
 "</figure>\n",
 "</div>\n",
 "<div class=\"media-content\">\n",
 "<h2 class=\"title is-5\">Equities trader</h2>\n",
 "<h3 class=\"subtitle is-6 company\">Macdonald-Ferguson</h3>\n",
 "</div>\n",
 "</div>\n",
 "<div class=\"content\">\n",
 "<p class=\"location\">\n",
 " Maloneshire, AE\n",
 " </p>\n",
 "<p class=\"is-small has-text-grey\">\n",
 "<time datetime=\"2021-04-08\">2021-04-08</time>\n",
 "</p>\n",
 "</div>\n",
 "<footer class=\"card-footer\">\n",
 "<a class=\"card-footer-item\" href=\"https://www.realpython.com\" target=\"_blank\">Learn</a>\n",
 "<a class=\"card-footer-item\" href=\"https://realpython.github.io/fake-jobs/jobs/equities-trader-17.html\" target=\"_blank\">Apply</a>\n",
 "</footer>\n",
 "</div>\n",
 "\n",
 "<div class=\"card-content\">\n",
 "<div class=\"media\">\n",
 "<div class=\"media-left\">\n",
 "<figure class=\"image is-48x48\">\n",
 "<img alt=\"Real Python Logo\" src=\"https://files.realpython.com/media/real-python-logo-thumbnail.7f0db70c2ed2.jpg?__no_cf_polish=1\"/>\n",
 "</figure>\n",
 "</div>\n",
 "<div class=\"media-content\">\n",
 "<h2 class=\"title is-5\">Newspaper journalist</h2>\n",
 "<h3 class=\"subtitle is-6 company\">Williams, Peterson and Rojas</h3>\n",
 "</div>\n",
 "</div>\n",
 "<div class=\"content\">\n",
 "<p class=\"location\">\n",
 " Johnsonton, AA\n",
 " </p>\n",
 "<p class=\"is-small has-text-grey\">\n",
 "<time datetime=\"2021-04-08\">2021-04-08</time>\n",
 "</p>\n",
 "</div>\n",
 "<footer class=\"card-footer\">\n",
 "<a class=\"card-footer-item\" href=\"https://www.realpython.com\" target=\"_blank\">Learn</a>\n",
 "<a class=\"card-footer-item\" href=\"https://realpython.github.io/fake-jobs/jobs/newspaper-journalist-18.html\" target=\"_blank\">Apply</a>\n",
 "</footer>\n",
 "</div>\n",
 "\n",
 "<div class=\"card-content\">\n",
 "<div class=\"media\">\n",
 "<div class=\"media-left\">\n",
 "<figure class=\"image is-48x48\">\n",
 "<img alt=\"Real Python Logo\" src=\"https://files.realpython.com/media/real-python-logo-thumbnail.7f0db70c2ed2.jpg?__no_cf_polish=1\"/>\n",
 "</figure>\n",
 "</div>\n",
 "<div class=\"media-content\">\n",
 "<h2 class=\"title is-5\">Materials engineer</h2>\n",
 "<h3 class=\"subtitle is-6 company\">Smith and Sons</h3>\n",
 "</div>\n",
 "</div>\n",
 "<div class=\"content\">\n",
 "<p class=\"location\">\n",
 " South Davidtown, AP\n",
 " </p>\n",
 "<p class=\"is-small has-text-grey\">\n",
 "<time | |
| 
	# -*- coding: utf-8 -*-
"""
*Module* ``project.generator``
This module provides some classes to generate and handle
different types of data during running the application. This can be used
outside the application as it works independently.
"""
from statistics import stdev
from collections import Counter
from random import shuffle, choice
from itertools import product, chain
class BaseGenerator(object):
	"""
	This class contains a base function :meth:`get_frequency` to count
	occurencies of each items in all tuples.
	Args:
		tuples (list(tuple or set or list)): list of tuples/lists/sets of items
	
	Attributes:
		frequencies (collections.Counter): container in form of a dictionary to
				save all items and frequencies inside given lists of sets
	Examples:
		>>> tuples = [('A','B','C'), ('B','C','D'),('D','A','C')]
		>>> base = BaseGenerator(tuples=tuples)
		>>> base.frequencies
		Counter({'C': 3, 'A': 2, 'B': 2, 'D': 2})
	"""
	def __init__(self, tuples):
		self.frequencies = self.get_frequency(tuples)
	def get_frequency(self, tuples):
		"""
		Count the number of tuples each item is in. 
		Args:
			tuples (list(tuple or set or list)): set of tuples
		Returns: 
			collections.Counter: frequencies of items in all tuples
		"""
		return Counter(chain(*tuples))		
		
class DataGenerator(BaseGenerator):
	"""
	Extend :class:`BaseGenerator`. Create an object of input data for the
	survey based on input file(s).
	Args:
		num_iter (int, optional): number of necessary iterations to generate 
					tuples, *default:* ``100``
		batch_size (int, optional): size of a normal batch, *default:* ``20``
		minimum (int, optional): minimum size of a batch to be formed if the
					rest items do not meet the normal size, *default:* ``5``
	Attributes:
		items (set): the unique given items
		tuples (list): list of all unique generated tuples with the best
				results after all iterations
		batches (dict): all batches prepared for questionnaire
		num_iter (int): number of necessary iterations to generate tuples,
				*default:* ``100``
		batch_size (int): size of a normal batch, *default:* ``20``
		minimum (int): minimum size of a batch to be formed if the rest
				items do not meet the normal :attr:`batch_size`, *default:* ``5``
		factor (int or float): to decide the number of tuples to be 
				generated - `n_tuples =` :attr:`factor` `* len(` :attr:`items` `)`,
				*default:* ``2`` if fewer than 10000 items 
		tuple_size (int): size of each tuple, *default:* ``4`` if fewer than 1000
				items else ``5``
	Examples:
		>>> example = open('../examples/movie_reviews_examples.txt','rb')
		>>> data = DataGenerator()
		>>> data.generate_items(example)
		>>> data.generate_data()
		>>> data.items # items read from input example
		{'interesting', 'excited', 'annoyed', 'boring', 'aggressive', 'joyful', 'fantastic', 'indifferent'}
		>>> data.tuples # tuples generated from the items (change each time calling this function)
		[['interesting', 'indifferent', 'excited', 'joyful'], ['indifferent', 'boring', 'aggressive', 'joyful'], ['interesting', 'fantastic', 'annoyed', 'indifferent'], ['joyful', 'fantastic', 'annoyed', 'indifferent'], ['fantastic', 'annoyed', 'aggressive', 'indifferent'], ['fantastic', 'boring', 'indifferent', 'joyful'], ['excited', 'boring', 'aggressive', 'interesting'], ['interesting', 'aggressive', 'annoyed', 'joyful'], ['interesting', 'fantastic', 'boring', 'aggressive'], ['excited', 'fantastic', 'indifferent', 'joyful'], ['excited', 'boring', 'annoyed', 'joyful'], ['interesting', 'fantastic', 'excited', 'indifferent'], ['excited', 'aggressive', 'annoyed', 'interesting'], ['fantastic', 'boring', 'aggressive', 'annoyed'], ['interesting', 'fantastic', 'aggressive', 'joyful'], ['excited', 'boring', 'annoyed', 'indifferent']]
		>>> data.batches # batches generated from the tuples (change each time calling this function)
		{1: [['interesting', 'indifferent', 'excited', 'joyful'], ['indifferent', 'boring', 'aggressive', 'joyful'], ['interesting', 'fantastic', 'annoyed', 'indifferent'], ['joyful', 'fantastic', 'annoyed', 'indifferent'], ['fantastic', 'annoyed', 'aggressive', 'indifferent']], 2: [['fantastic', 'boring', 'indifferent', 'joyful'], ['excited', 'boring', 'aggressive', 'interesting'], ['interesting', 'aggressive', 'annoyed', 'joyful'], ['interesting', 'fantastic', 'boring', 'aggressive'], ['excited', 'fantastic', 'indifferent', 'joyful']], 3: [['excited', 'boring', 'annoyed', 'joyful'], ['interesting', 'fantastic', 'excited', 'indifferent'], ['excited', 'aggressive', 'annoyed', 'interesting'], ['fantastic', 'boring', 'aggressive', 'annoyed'], ['interesting', 'fantastic', 'aggressive', 'joyful'], ['excited', 'boring', 'annoyed', 'indifferent']]}
		>>> data.get_frequency(data.tuples) # get frequency of each item in all generated tuples
		Counter({'indifferent': 9, 'fantastic': 9, 'interesting': 8, 'joyful': 8, 'aggressive': 8, 'annoyed': 8, 'excited': 7, 'boring': 7})
	
	"""
	def __init__(self, num_iter=100, batch_size=20, minimum=5):
		# initialize all data: items, tuples, batches
		self.items = set()
		self.tuples = []
		self.batches = {}
		# extra information
		self.num_iter = num_iter 
		self.batch_size = batch_size 
		self.minimum = minimum
		# set factor
		self.factor = 1.5 if len(self.items) > 10000 and len(self.items) %4 == 0 else 2
		# set tuple size
		self.tuple_size = 5 if len(self.items) > 1000 else 4
	def generate_tuples(self):
		"""
		Generate tuples, this is a reimplementation of `generate-BWS-tuples.pl`
		in :bws:`source code <Best-Worst-Scaling-Scripts.zip>`.
		The tuples are generated by random sampling and satisfy the following
		criteria:
			1. no two items within a tuple are identical; 
			2. each item in the item list appears approximately in the same number of tuples; 
			3. each pair of items appears approximately in the same number of tuples.
		Returns:
			list: update list of all unique generated tuples with the best results
			after all (attribute :attr:`tuples`).
		Raises:
			ValueError: if the number of :attr:`items` is fewer than :attr:`tuple_size`.
		"""
		create_key = lambda i1, i2: "%s-%s"%(i1, i2)
		# sort the items
		items = sorted(list(self.items))
		num_items = len(items)
		# check if the number of unique items is not less than 
		# the number of items requested per tuple
		if num_items < self.tuple_size:
			raise ValueError('''The number of unique items is less than the number 
										of items requested per tuple''')
		# generate tuples
		number_tuples = int(0.5 + self.factor * num_items)
		# try many iterations of different randomizations
		best_score = 0
		best_tuples = []
		iter_ = 0
		for i_iter in range(self.num_iter):
			# print('Iteration %d'%(i_iter+1))
			# generate tuples by randomly sampling without replacement
			tuples = []
			count = 1
			# make a random list of items
			random_items = items[:]
			shuffle(random_items)
			freq_pair = {}
			# set index of current item in the random list
			curr_ind = 0
			while count <= number_tuples:
				# new tuple
				new_tuple = set()
				# check if there are enough remained items in the random list for a new tuple
				if (curr_ind + self.tuple_size) <= len(random_items):
					# set a new tuple with tuple_size items in the random list 
					# starting at index curr_ind
					new_tuple = set(random_items[curr_ind:curr_ind+self.tuple_size])
					curr_ind += self.tuple_size
				# get the rest of the list
				else:
					# the number of items that we will need to get from a new random list
					need_more = self.tuple_size - len(random_items) + curr_ind
					while curr_ind < len(random_items):
						new_tuple.add(random_items[curr_ind])
						curr_ind += 1
					# generate a new random list of items
					random_items = items[:]
					shuffle(random_items)
					for curr_ind in range(need_more):
						# if there is a duplicate item, move it to the end of the list
						while random_items[curr_ind] in new_tuple:
							dup = random_items.pop(curr_ind)
							random_items.append(dup)
						new_tuple.add(random_items[curr_ind])
				# check whether this new_tuple already in the list of tuples
				if new_tuple not in tuples:
					tuples.append(new_tuple)
					count += 1
				else:
					continue
				
				# add frequencies of pairs of items
				for (item1, item2) in product(new_tuple, new_tuple):
					if item1 < item2:
						key = create_key(item1, item2)
					else: 
						key = create_key(item2, item1)
					if key in freq_pair:
						freq_pair[key] += 1
					else:
						freq_pair[key] = 1
			# calculate the two-way balance of the set of tuples
			freq_pair_values = freq_pair.values()
			# calculate the score for the set and keep the best score and the best set
			score = stdev(freq_pair_values)
			if i_iter == 0 or score < best_score:
				best_score = score
				best_tuples = tuples[:]
				# iter_ = i_iter
		# print('Choose from iteration {} with score {}'.format(iter_+1, best_score))
		self.tuples = [list(best_tuple) for best_tuple in best_tuples]
	def generate_batches(self):
		"""
		Split the whole set of tuples into batches.
		Returns:
			dict(int = list): update all batches prepared for questionnaire
					(attribute :attr:`batches`).
		
		Raises:
			ValueError: if there is no attribute :attr:`tuples`.
		"""
		if not self.tuples:
			raise ValueError('No tuples generated')
		n_tuples = len(self.tuples)
		# in case there are too few generated tuples and 
		# there is only one batch for all due to large batch size,
		# the batch size will be set to a smaller size (5)
		if n_tuples <= self.batch_size:
			self.batch_size = 5
			self.minimum = 3
		remained = n_tuples % self.batch_size
		shuffle(self.tuples)
		# divide the tuples into batches
		for count, i in enumerate(range(0, n_tuples-remained, self.batch_size)):
			self.batches[count+1] = self.tuples[i:i+self.batch_size]
		# set this batch with minimum size if it cannot fulfill the normally set batch size
		if remained >= self.minimum:
			self.batches[count+2] = self.tuples[i+self.batch_size:]
		# if the number of remained items cannot fulfill the minimum size condition, 
		# try to randomly add each of the rest tuples into the formed batches
		else:
			chosen_batches = []
			remained_tuples = self.tuples[i+self.batch_size:]
			while len(remained_tuples) > 0:
				tuple_ = remained_tuples.pop()
				# choose randomly a batch to add tuple
				i_batch = choice(list(self.batches.keys()))
				# check if all batches already have new randomly added tuples 
				# while there are still remained tuples
				while i_batch in chosen_batches:
					if len(chosen_batches) < len(self.batches):
						i_batch = choice(list(self.batches.keys()))
					# set the check to null again
					else:
						chosen_batches = []
				self.batches[i_batch].append(tuple_)
				chosen_batches.append(i_batch)
	def generate_items(self, file_name):
		"""
		Read uploaded *txt*-file. Accept only one file each time.
		Args:
			file_name (:dat-struct:`FileStorage <werkzeug.datastructures.FileStorage>` or :reader:`io.BufferedReader <io.BufferedReader>`): uploaded file
		Returns:
			list: update list of items with this file (attribute :attr:`items`).
 
		"""
		input_file = file_name.read().decode(encoding='utf-8', errors='ignore').strip()
		data = input_file.split('\n') if input_file != '' else []
		self.items = self.items.union(set(data))
	def generate_data(self):
		"""
		Generate data including tuples and batches. This method calls
		:meth:`generate_tuples` and :meth:`generate_batches`.
		"""
		self.generate_tuples()
		self.generate_batches()
class ScoreGenerator(BaseGenerator):
	"""
	Create an object to calculate the scores of given items based on annotations.
	Args:
		tuples (list): list of tuples
		best (list): list of items annotated as '**best**'
		worst (list): list of items annotated as '**worst**'
	Attributes:
		frequencies (dict or collections.Counter): frequency of each item in all tuples
		best (dict or collections.Counter): frequency of each item annotated as '**best**'
		worst (dict or collections.Counter): frequency of each item annotated as '**worst**'
	Examples:
		>>> tuples = [('A','B','C'), ('B','C','D'),('D','A','C')]
		>>> best = ['A','B','A']
		>>> worst = ['B','D','C']
		>>> generator = ScoreGenerator(tuples, best, worst)
		>>> generator.scoring()
		[('A', 1.0), ('B', 0.0), ('C', -0.3333333333333333), ('D', -0.5)]
	"""
	def __init__(self, tuples, best, worst):
		super().__init__(tuples)
		self.best = Counter(best)
		self.worst = Counter(worst)
	
	def scoring(self):
		"""
		Calculate scores of the items using formula of :orme09:`Orme 2009 <indivmaxdiff.pdf>`.
		Returns:
			list(tuple(str, float)): | |
| 
	cfl_safety_factor
 self._no_dimensions = no_dimensions
 super().__init__()
 @property
 def cfl_safety_factor(self) -> float:
 return self._cfl_safety_factor
 
 @property
 def no_dimensions(self) -> bool:
 return self._no_dimensions
 def get_time_step(self, grid):
 dt = self._cfl_safety_factor * grid.step / SPEED_OF_SOUND_INFTY / (MACH_INFTY + 1.)
 if self._no_dimensions:
 dt /= grid.length_y / self.characteristic_velocity
 return dt
 def average_eigensystem(
 self, grid, w_avg
 ):
 x_velocity_edge_x = w_avg['x_momentum'][0] / w_avg['density'][0]
 x_velocity_edge_y = w_avg['x_momentum'][1] / w_avg['density'][1]
 y_velocity_edge_x = w_avg['y_momentum'][0] / w_avg['density'][0]
 y_velocity_edge_y = w_avg['y_momentum'][1] / w_avg['density'][1]
 e_edge_x = 0.5 * (x_velocity_edge_x ** 2 + y_velocity_edge_x ** 2)
 e_edge_y = 0.5 * (x_velocity_edge_y ** 2 + y_velocity_edge_y ** 2)
 pressure_edge_x = \
 (GAMMA - 1) * (w_avg['energy'][0] - w_avg['density'][0] * e_edge_x )
 speed_of_sound_edge_x = _sqrt(GAMMA * pressure_edge_x / w_avg['density'][0])
 pressure_edge_y = \
 (GAMMA - 1) * (w_avg['energy'][1] - w_avg['density'][1] * e_edge_y )
 speed_of_sound_edge_y = _sqrt(GAMMA * pressure_edge_y / w_avg['density'][1])
 R = {}
 R['density'] = \
 [ tf.stack(
 [tf.ones(grid.shape),
 tf.ones(grid.shape),
 tf.ones(grid.shape),
 tf.zeros(grid.shape)]
 ),
 tf.stack(
 [tf.ones(grid.shape),
 tf.ones(grid.shape),
 tf.ones(grid.shape),
 tf.zeros(grid.shape)]
 )
 ]
 
 R['x_momentum'] = \
 [ tf.stack(
 [x_velocity_edge_x - speed_of_sound_edge_x,
 x_velocity_edge_x,
 x_velocity_edge_x + speed_of_sound_edge_x,
 tf.zeros(grid.shape)]
 ),
 tf.stack(
 [x_velocity_edge_y,
 x_velocity_edge_y,
 x_velocity_edge_y,
 tf.ones(grid.shape)]
 )
 ]
 
 R['y_momentum'] = \
 [ tf.stack(
 [y_velocity_edge_x,
 y_velocity_edge_x,
 y_velocity_edge_x,
 - tf.ones(grid.shape)]
 ),
 tf.stack(
 [y_velocity_edge_y - speed_of_sound_edge_y,
 y_velocity_edge_y,
 y_velocity_edge_y + speed_of_sound_edge_y,
 tf.zeros(grid.shape)]
 )
 ]
 R['energy'] = \
 [ tf.stack(
 [e_edge_x + speed_of_sound_edge_x ** 2 / (GAMMA - 1) \
 - speed_of_sound_edge_x * x_velocity_edge_x,
 e_edge_x,
 e_edge_x + speed_of_sound_edge_x ** 2 / (GAMMA - 1) \
 + speed_of_sound_edge_x * x_velocity_edge_x,
 - y_velocity_edge_x]
 ),
 tf.stack(
 [e_edge_y + speed_of_sound_edge_y ** 2 / (GAMMA - 1) \
 - speed_of_sound_edge_y * y_velocity_edge_y,
 e_edge_y,
 e_edge_y + speed_of_sound_edge_y ** 2 / (GAMMA - 1) \
 + speed_of_sound_edge_y * y_velocity_edge_y,
 x_velocity_edge_y]
 )
 ]
 
 L = {}
 L['density'] = \
 [ tf.stack(
 [((GAMMA - 1)*e_edge_x + speed_of_sound_edge_x * x_velocity_edge_x) \
 / 2 / speed_of_sound_edge_x ** 2,
 ((1 - GAMMA) * x_velocity_edge_x - speed_of_sound_edge_x) \
 / 2 / speed_of_sound_edge_x ** 2,
 ((1 - GAMMA) * y_velocity_edge_x) \
 / 2 / speed_of_sound_edge_x ** 2,
 (GAMMA - 1) / 2 / speed_of_sound_edge_x ** 2]
 ),
 tf.stack(
 [((GAMMA - 1)*e_edge_y + speed_of_sound_edge_y * y_velocity_edge_y) \
 / 2 / speed_of_sound_edge_y ** 2,
 ((1 - GAMMA) * x_velocity_edge_y) \
 / 2 / speed_of_sound_edge_y ** 2,
 ((1 - GAMMA) * y_velocity_edge_y - speed_of_sound_edge_y) \
 / 2 / speed_of_sound_edge_y ** 2,
 (GAMMA - 1) / 2 / speed_of_sound_edge_y ** 2]
 )
 ]
 
 L['x_momentum'] = \
 [ tf.stack(
 [tf.ones(grid.shape) - (GAMMA-1)*e_edge_x / speed_of_sound_edge_x**2,
 (GAMMA - 1) * x_velocity_edge_x / speed_of_sound_edge_x ** 2,
 (GAMMA - 1) * y_velocity_edge_x / speed_of_sound_edge_x ** 2,
 (1 - GAMMA) / speed_of_sound_edge_x ** 2]
 ),
 tf.stack(
 [tf.ones(grid.shape) - (GAMMA-1)*e_edge_y / speed_of_sound_edge_y**2,
 (GAMMA - 1) * x_velocity_edge_y / speed_of_sound_edge_y ** 2,
 (GAMMA - 1) * y_velocity_edge_y / speed_of_sound_edge_y ** 2,
 (1 - GAMMA) / speed_of_sound_edge_y ** 2]
 )
 ]
 
 L['y_momentum'] = \
 [ tf.stack(
 [((GAMMA - 1)*e_edge_x - speed_of_sound_edge_x * x_velocity_edge_x) \
 / 2 / speed_of_sound_edge_x ** 2,
 ((1 - GAMMA) * x_velocity_edge_x + speed_of_sound_edge_x) \
 / 2 / speed_of_sound_edge_x ** 2,
 ((1 - GAMMA) * y_velocity_edge_x) \
 / 2 / speed_of_sound_edge_x ** 2,
 (GAMMA - 1) / 2 / speed_of_sound_edge_x ** 2]
 ),
 tf.stack(
 [((GAMMA - 1)*e_edge_y - speed_of_sound_edge_y * y_velocity_edge_y) \
 / 2 / speed_of_sound_edge_y ** 2,
 ((1 - GAMMA) * x_velocity_edge_y) \
 / 2 / speed_of_sound_edge_y ** 2,
 ((1 - GAMMA) * y_velocity_edge_y + speed_of_sound_edge_y) \
 / 2 / speed_of_sound_edge_y ** 2,
 (GAMMA - 1) / 2 / speed_of_sound_edge_y ** 2]
 )
 ]
 L['energy'] = \
 [ tf.stack(
 [y_velocity_edge_x,
 tf.zeros(grid.shape),
 - tf.ones(grid.shape),
 tf.zeros(grid.shape)]
 ),
 tf.stack(
 [- x_velocity_edge_y,
 tf.ones(grid.shape),
 tf.zeros(grid.shape),
 tf.zeros(grid.shape)]
 )
 ]
 return R, L
 def flux_splitting(
 self, grid, f, eigenvalues, w
 ):
 f_left = {}
 f_right = {}
 for i_axis, axis in enumerate([X_AXIS, Y_AXIS]):
 for key in self.evolving_keys:
 a = _amax(_abs(eigenvalues[key][i_axis]))
 f_left.setdefault(key, [])
 f_right.setdefault(key, [])
 f_left[key].append(
 0.5 * (f[key][i_axis] + a * w[key][i_axis])
 )
 f_right[key].append(
 0.5 * (f[key][i_axis] - a * w[key][i_axis])
 )
 return f_left, f_right
 def neighbors(self, f):
 f_m2 = {}
 f_m1 = {}
 f_p1 = {}
 f_p2 = {}
 f_p3 = {}
 for i_axis, axis in enumerate([X_AXIS, Y_AXIS]):
 for key in self.evolving_keys:
 f_m2.setdefault(key, [])
 f_m1.setdefault(key, [])
 f_p1.setdefault(key, [])
 f_p2.setdefault(key, [])
 f_p3.setdefault(key, [])
 f_ = f[key][i_axis]
 f_m2[key].append(_roll_minus_two(f_,axis))
 f_m1[key].append(_roll_minus_one(f_,axis))
 f_p1[key].append(_roll_plus_one(f_,axis))
 f_p2[key].append(_roll_plus_two(f_,axis))
 f_p3[key].append(_roll_plus_three(f_,axis))
 
 return f_m2, f_m1, f_p1, f_p2, f_p3
 def smoothness_indicators_tau(self, f, f_m2, f_m1, f_p1, f_p2):
 """ Compute WENO5 smoothness indicators and \tau parameter.
 
 Returns:
 dict of 
 [ tf.stack([BETA_1_X, BETA_2_X, BETA_3_X]), 
 tf.stack([BETA_1_Y, BETA_2_Y, BETA_3_Y]) ]
 lists;
 
 dict of [TAU_X, TAU_Y] lists.
 """
 beta = {}
 tau = {}
 for i_axis in range(2):
 for key in self.evolving_keys:
 f_j = f[key][i_axis]
 f_jm2 = f_m2[key][i_axis]
 f_jm1 = f_m1[key][i_axis]
 f_jp1 = f_p1[key][i_axis]
 f_jp2 = f_p2[key][i_axis]
 beta_1 = (13/12) * (f_jm2 - 2 * f_jm1 + f_j) ** 2 \
 + (1/4) * (f_jm2 - 4 * f_jm1 + 3 * f_j) ** 2 
 beta_2 = (13/12) * (f_jm1 - 2 * f_j + f_jp1) ** 2 \
 + (1/4) * (f_jm1 - f_jp1) ** 2
 beta_3 = (13/12) * (f_j - 2 * f_jp1 + f_jp2) ** 2 \
 + (1/4) * (3 * f_j - 4 * f_jp1 + f_jp2) ** 2
 beta.setdefault(key, [])
 beta[key].append( tf.stack([beta_1, beta_2, beta_3]) )
 tau.setdefault(key, [])
 tau[key].append( 
 (f_jm2 - 4 * f_jm1 + 6 * f_j - 4 * f_jp1 + f_jp2) ** 2
 ) 
 return beta, tau
 def weights(self, grid, beta, tau, epsilon, c, p):
 """ Compute WENO5 weights.
 
 Returns:
 dicts of 
 [ [WEIGHT_1_X, WEIGHT_2_X, WEIGHT_3_X], 
 [WEIGHT_1_Y, WEIGHT_2_Y, WEIGHT_3_Y] ]
 lists.
 """
 # Convert c to a tf Tensor with shape (len(c),grid.size_x,grid.size_y)
 c_1 = tf.constant( c[0], shape=grid.shape )
 c_2 = tf.constant( c[1], shape=grid.shape )
 c_3 = tf.constant( c[2], shape=grid.shape )
 c = tf.stack( [c_1, c_2, c_3] )
 weights = {}
 for i_axis in range(2):
 for key in self.evolving_keys:
 alpha = c \
 * ( 1 + ( tau[key][i_axis] / ( epsilon + beta[key][i_axis] ) ) ** p )
 weights.setdefault(key, [])
 weights[key].append( alpha / _sum(alpha, axis=0) )
 return weights
 
 def reconstruction(
 self,
 grid, 
 f_left, f_right,
 f_m2_left, f_m2_right, 
 f_m1_left, f_m1_right,
 f_p1_left, f_p1_right,
 f_p2_left, f_p2_right,
 f_p3_left, f_p3_right
 ):
 # Left reconstruction (Stencil I_{i-2} to I_{i+2})
 beta, tau = self.smoothness_indicators_tau(
 f_left, f_m2_left, f_m1_left, f_p1_left, f_p2_left)
 epsilon = 1e-6
 c = [0.1, 0.6, 0.3]
 p = 2
 weights = self.weights(grid, beta, tau, epsilon, c, p)
 flux_left = {}
 for i_axis in range(2):
 for key in self.evolving_keys:
 f_j = f_left[key][i_axis]
 f_jm2 = f_m2_left[key][i_axis]
 f_jm1 = f_m1_left[key][i_axis]
 f_jp1 = f_p1_left[key][i_axis]
 f_jp2 = f_p2_left[key][i_axis]
 [omega_1, omega_2, omega_3] = \
 [ weight for weight in weights[key][i_axis] ]
 flux_biased = \
 (1/3) * omega_1 * f_jm2 \
 - (1/6) * (7 * omega_1 + omega_2) * f_jm1 \
 + (1/6) * (11*omega_1 + 5*omega_2 + 2*omega_3) * f_j \
 + (1/6) * (2 * omega_2 + 5 * omega_3) * f_jp1 \
 - (1/6) * omega_3 * f_jp2
 flux_left.setdefault(key, [])
 flux_left[key].append( flux_biased )
 
 # Right reconstruction (Stencil I_{i-1} to I_{i+3})
 beta, tau = self.smoothness_indicators_tau(
 f_p1_right, f_m1_right, f_right, f_p2_right, f_p3_right)
 c = c[::-1] # reverse order
 weights = self.weights(grid, beta, tau, epsilon, c, p)
 flux_right = {}
 for i_axis in range(2):
 for key in self.evolving_keys:
 f_j = f_p1_right[key][i_axis]
 f_jm2 = f_m1_right[key][i_axis]
 f_jm1 = f_right[key][i_axis]
 f_jp1 = f_p2_right[key][i_axis]
 f_jp2 = f_p3_right[key][i_axis]
 [omega_1, omega_2, omega_3] = \
 [ weight for weight in weights[key][i_axis] ]
 flux_biased = \
 - (1/6) * omega_1 * f_jm2 \
 + (1/6) * (5 * omega_1 + 2 * omega_2) * f_jm1 \
 + (1/6) * (2*omega_1 + 5*omega_2 + 11*omega_3) * f_j \
 - (1/6) * (omega_2 + 7 * omega_3) * f_jp1 \
 + (1/3) * omega_3 * f_jp2
 flux_right.setdefault(key, [])
 flux_right[key].append( flux_biased )
 return flux_left, flux_right
 def average_state(
 self, pressure, w, roe=True
 ):
 w_avg = {}
 if roe:
 for i_axis, axis in enumerate([X_AXIS, Y_AXIS]):
 enthalpy = \
 (w['energy'][i_axis] + pressure) / w['density'][i_axis]
 sqrt_density = _sqrt( w['density'][i_axis] )
 sqrt_density_p1 = _roll_plus_one(sqrt_density, axis)
 x_velocity = w['x_momentum'][i_axis] / w['density'][i_axis]
 | |
| 
	<filename>database/dataio.py
#pulled this out of api.py
import inspect
from database.main import printD,tdb
#tdb.off()
class BaseDataIO:
 """ 
 Base class for all experiment steps
 This should be extended for each type of step
 Step types should then be extended once more
 To define individual records/steps
 :attr::class:`.MappedClass` should appear in local namespace via
 `from database.models import ModelName as MappedClass`. These
 classes are things that ususally will not need to be queried
 within the scope of datacollection.
 :attr::string:`.ctrl_name`
 :attr::list:`.prereqList`
 :param: Controller, a class instance w/ function that can return floats,
 Controller.__class__.__name__ must match ctrl_name
 :param: session, a sqlalchemy database session that
 (hopefully) has tables matching your mapped classes
 :meth:`.Persist`
 :meth:`.do` retunrs self.value
 """
 #FIXME could make a factory function that takes the class variables and returns the class...
 #the only issue is writeTarget can't be checked before hand :/
 MappedClass=None #from database.models import thing as MappedClass
 mappedClassPropertiesDict={} #things required by the database, eg datasource units
 ctrl_name=None
 @property
 def name(self):
 #FIXME add a way to explicity name classes if you want?
 return self.__class__.__name__[4:]
 def __init__(self,Controller,session): #FIXME controller could also be a MappedInstance?
 if Controller.__class__.__name__==self.ctrl_name:
 self.controller_version=Controller.version #FIXME hash the file or something for external stuff
 #BIGGER FIXME doccumenting which version of the controller was used is now VITAL
 if not self.controller_version:
 raise AttributeError('What are you doing not keeping track of'
 ' what software you used! BAD SCIENTIST')
 self.ctrl=Controller
 else:
 raise TypeError('Wrong controller for this step!')
 self.session=session
 try:
 self.MappedInstance=self.session.query(MappedClass).filter_by(name=self.name).one()
 except NoResultFound:
 self.Persist()
 def checkVersion(self,thing,strict=False): #validate that the code has not changed
 #TODO this should be handled at the level of the experiment
 #hash the code of the thing #FIXME should this all be here or should it be tracked globally on startup?
 if strict:
 #hash the file that it came from and compare it to the previous hash
 pass
 def Persist(self):
 """
 Returns None
 Creates an instance of :class:`.MappedClass` according to other defined
 params, assigns it to :instance:`.MappedInstance` and commits it to the database.
 
 """
 raise NotImplementedError('You MUST implement this at the subclass level')
 def do(self):
 raise NotImplementedError('You MUST implement this at the subclass level')
class DataIO(BaseDataIO): #IXCK ugly ugly might be nice for a factory :/ but is poorly constrained @do, so slow
 #NOTE TO SELF: this interface needs to be here unless we go with STI for dataio objects in order to implement persistence, and EVEN THAT misses the point which is that there are live functions that we want to run and have doccumented, I supposed using only names it would be possible to init everything and save it BUT we would still need something to deal with actually tying it all together at run time which is what THIS is supposed to do
 #doing it this way we keep the all the relevant information in one place that can all be seen at the same time and debugged more easily
 #the alternative is generating DataIO objects directly from database entries but that still leaves tying them to actual live code objects which seems like it could go very wrong and would still require an input interface and we would essentially be persisting a class that looks like this anyway
 #probably do want a way to recreate classes straight from the database though... but that is alot of work and we will have to do it in the future NOT RIGHT NOW
 MappedClass=None #from database.models import thing as MappedClass
 mcKwargs={} # MappedClass(**kwargs) things for the database, eg datasource units
 ctrl_name=None #FIXME why do we need this again??? ANSWER: because we need live functions and I'm not sure the best way to unambigiously name a 'dead' function of a class and make it live (the way in rigcont is iffy)
 setter_name=None #FIXME the name of the setting function
 getter_name=None #name of the function used to get stuff
 writer_name=None #eg getattr(writeTarget,self.writer_name)
 collection_name=None #eg metadata_ or something mapped collection name
 check_function=None #FIXME checks are ONLY going to be written to experiments, so we can pull them out to steps? or even make them their own step akin to analysis? yeah, because checks often need to occur across multiple steps and longer stretches of time
 analysis_function=None #FIXME probably should be a from xyz import thing as function
 def __init__(self,Controller,session):
 super().__init__(Controller,session)
 if getter_name:
 self.getter=getattr(self.ctrl,self.getter_name) #FIXME allow override
 if setter_name:
 self.setter=getattr(self.ctrl,self.setter_name)
 #TODO version checks
 def Persist(self):
 #self.MappedInstance=MappedClass(name=self.name,prefix=self.prefix,unit=self.unit,mantissa=self.mantissa,hardware_id=hardware_id)
 self.MappedInstance=MappedClass(**self.mcKwargs)
 self.session.add(self.MappedInstance)
 self.session.commit()
 def setValue(self,set_value,error=0): #both value and expected value will be recoreded somehow...
 self.expected_value=set_value
 self.ev_error=error #allowed error
 self.setter(self.expected_value)
 def getValue(self,analysis_value=None):
 self.value=analysis_value #FIXME how do we link this to the output...
 if not self.value:
 self.value=self.getter()
 def checkValue(self): #FIXME making check steps similar to analysis simplifies saving results
 self.check_function()
 def analysis(self):
 #FIXME need version control here... :/ so it is possible to track down errors
 self.value=self.analysis_function(self.value)
 def writeValue(self,writeTarget,autocommit=False):
 collection=getattr(writeTarget,self.collection_name)
 writer=getattr(writeTarget,self.writer_name)
 collection.append(writer(MappedInstance,self.value)) #FIXME this gives some insight into array formats
 if autocommit:
 self.session.commit()
 def do(self,writeTarget=None,set_value=None,set_error=0,analysis_value=None,autocommit=False):
 if set_value: #FIXME handle lack of setter_name?
 self.setValue(set_value,set_error) #TODO make sure that this will block properly
 if analysis_value:
 self.getValue(analysis_value)
 else:
 self.getValue()
 if self.analysis_function:
 self.analysis() #FIXME how to check these...
 if writeTarget:
 self.writeValue(writeTarget,autocommit)
 if self.check_function:
 self.checkValue() #check post write and THEN raise so that the bad value is recorded
 return self.value
#pairings:
#MetaDataSource - clx,mcc,esp,key
#need failover if a function is not found?
#XXX NOTE: readTarget and WriteTarget are really the only INPUT that is dynamic for dataios
class baseio:
 #TODO inspect.getsourcelines for versioning? or... what? doesn't work for wrapped classes
 dependencies=[] #these propagate up to iodeps, we want to use the same dataios for many steps
 #so we can't use dataio names for step names DUH that was why we split stuff up in the first place!
 #and they should be the expected keywords for anything downstream
 dynamic_inputs=False
 @property
 def name(self):
 return self.__class__.__name__#[4:]
 def __init__(self,session,controller_class=None,ctrlDict=None):
 if not self.__doc__:
 raise NotImplementedError('PLEASE DOCUMENT YOUR SCIENCE! <3 U FOREVER! (add a docstring to %s)'%self.__class__)
 if ctrlDict:
 self.ctrlDict=ctrlDict
 if getattr(self,'ctrl_name',None): #FIXME not quite correct
 if ctrlDict:
 self.ctrl=ctrlDict[self.ctrl_name]
 #self.ctrlDict=ctrlDict #XXX this is a really hacky way to do call dependnet dataios
 #TODO yeah, now I'm seeing why keeing the live dataio dict might be a good idea...
 elif controller_class:
 self.ctrl=controller_class
 else:
 raise ValueError('ctrl_name defined but no controller passed in during init!')
 self.validate()
 try:
 self.MappedInstance=session.query(self.MappedClass).filter_by(name=self.name).order_by(self.MappedClass.id.desc()).first() #FIXME versioning?
 if not self.MappedInstance:
 raise ValueError('MappedInstance of %s did not init!'%self.MappedClass)
 #printD(self.MappedInstance.name) #FIXME somehow this line fixes everythign!?
 #assert self.MappedInstance, 'self.MappedInstance is None'
 except ValueError:
 #raise AttributeError('MappedInstance not in the database')
 self.persist(session)
 #printD('debugging to see what the issue here is with calling persist in super')
 self.session=session
 assert self.MappedInstance, 'MappedInstance did not init in %s'%self.name
 def validate(self):
 raise NotImplementedError('You MUST implement this at the subclass level')
 #TODO check the version and increment??!
 def persist(self,session):
 #will raise an error, this is just here for super() calls
 printD('2 should be called AFTER in: %s'%self.name)
 self.MappedInstance.docstring=self.__doc__
 session.add(self.MappedInstance)
 session.commit()
 def _rec_do_kwargs(self,kwargs):
 #FIXME this is so that we can record the input values in the step record
 #ideally we shouldnt need this for write and stuff like that
 #and we really shouldnt need it at all because it obfusticates everythings >_<
 #XXX we don't want internal representations ending up in the db
 #really we just want the value(s) we set to be recorded iff the dataio itself sets/expects
 #dynamic inputs that won't be recorded elsewhere, eg validating that we are on the correct channel
 #but that could be handled as a check steps? and should be dealt with as part of the looping stuff :/
 if self.dynamic_inputs:
 self.full_kwargs=kwargs
 
class ctrlio(baseio):
 mcKwargs={}
 ctrl_name=''
 func_kwargs={}
 function_name=''
 hardware=''
 def __init__(self,session,controller_class=None,ctrlDict=None):
 from database.models import Hardware
 if type(controller_class)==dict:
 raise TypeError('you passed the dict in the class spot!')
 try:
 self.hardware_id=self.session.query(Hardware).filter_by(name=self.hardware).first().id
 except: #TODO
 printD('no hardware by that name found! TODO')
 self.hardware_id=1
 super().__init__(session,controller_class,ctrlDict)
 #self.persist(session) #FIXME
 def validate(self):
 printD('validating...')
 if self.ctrl_name:
 print(self.ctrl_name,self.ctrl.__class__.__name__)
 if self.ctrl_name == self.ctrl.__class__.__name__:
 #TODO check the controller version!
 #TODO check the function exists
 if not hasattr(self.ctrl,self.function_name):
 raise TypeError('%s has no function by that name!'%self.ctrl)
 else:
 raise TypeError('Wrong controller for this step!')
 | |
| 
	<filename>server/www/packages/packages-windows/x86/ldap3/abstract/entry.py
"""
"""
# Created on 2016.08.19
#
# Author: <NAME>
#
# Copyright 2016 - 2020 <NAME>
#
# This file is part of ldap3.
#
# ldap3 is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ldap3 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with ldap3 in the COPYING and COPYING.LESSER files.
# If not, see <http://www.gnu.org/licenses/>.
import json
try:
 from collections import OrderedDict
except ImportError:
 from ..utils.ordDict import OrderedDict # for Python 2.6
from os import linesep
from .. import STRING_TYPES, SEQUENCE_TYPES, MODIFY_ADD, MODIFY_REPLACE
from .attribute import WritableAttribute
from .objectDef import ObjectDef
from .attrDef import AttrDef
from ..core.exceptions import LDAPKeyError, LDAPCursorError, LDAPCursorAttributeError
from ..utils.conv import check_json_dict, format_json, prepare_for_stream
from ..protocol.rfc2849 import operation_to_ldif, add_ldif_header
from ..utils.dn import safe_dn, safe_rdn, to_dn
from ..utils.repr import to_stdout_encoding
from ..utils.ciDict import CaseInsensitiveWithAliasDict
from ..utils.config import get_config_parameter
from . import STATUS_VIRTUAL, STATUS_WRITABLE, STATUS_PENDING_CHANGES, STATUS_COMMITTED, STATUS_DELETED,\
 STATUS_INIT, STATUS_READY_FOR_DELETION, STATUS_READY_FOR_MOVING, STATUS_READY_FOR_RENAMING, STATUS_MANDATORY_MISSING, STATUSES, INITIAL_STATUSES
from ..core.results import RESULT_SUCCESS
from ..utils.log import log, log_enabled, ERROR, BASIC, PROTOCOL, EXTENDED
class EntryState(object):
 """Contains data on the status of the entry. Does not pollute the Entry __dict__.
 """
 def __init__(self, dn, cursor):
 self.dn = dn
 self._initial_status = None
 self._to = None # used for move and rename
 self.status = STATUS_INIT
 self.attributes = CaseInsensitiveWithAliasDict()
 self.raw_attributes = CaseInsensitiveWithAliasDict()
 self.response = None
 self.cursor = cursor
 self.origin = None # reference to the original read-only entry (set when made writable). Needed to update attributes in read-only when modified (only if both refer the same server)
 self.read_time = None
 self.changes = OrderedDict() # includes changes to commit in a writable entry
 if cursor.definition:
 self.definition = cursor.definition
 else:
 self.definition = None
 def __repr__(self):
 if self.__dict__ and self.dn is not None:
 r = 'DN: ' + to_stdout_encoding(self.dn) + ' - STATUS: ' + ((self._initial_status + ', ') if self._initial_status != self.status else '') + self.status + ' - READ TIME: ' + (self.read_time.isoformat() if self.read_time else '<never>') + linesep
 r += 'attributes: ' + ', '.join(sorted(self.attributes.keys())) + linesep
 r += 'object def: ' + (', '.join(sorted(self.definition._object_class)) if self.definition._object_class else '<None>') + linesep
 r += 'attr defs: ' + ', '.join(sorted(self.definition._attributes.keys())) + linesep
 r += 'response: ' + ('present' if self.response else '<None>') + linesep
 r += 'cursor: ' + (self.cursor.__class__.__name__ if self.cursor else '<None>') + linesep
 return r
 else:
 return object.__repr__(self)
 def __str__(self):
 return self.__repr__()
 def __getstate__(self):
 cpy = dict(self.__dict__)
 cpy['cursor'] = None
 return cpy
 def set_status(self, status):
 conf_ignored_mandatory_attributes_in_object_def = [v.lower() for v in get_config_parameter('IGNORED_MANDATORY_ATTRIBUTES_IN_OBJECT_DEF')]
 if status not in STATUSES:
 error_message = 'invalid entry status ' + str(status)
 if log_enabled(ERROR):
 log(ERROR, '%s for <%s>', error_message, self)
 raise LDAPCursorError(error_message)
 if status in INITIAL_STATUSES:
 self._initial_status = status
 self.status = status
 if status == STATUS_DELETED:
 self._initial_status = STATUS_VIRTUAL
 if status == STATUS_COMMITTED:
 self._initial_status = STATUS_WRITABLE
 if self.status == STATUS_VIRTUAL or (self.status == STATUS_PENDING_CHANGES and self._initial_status == STATUS_VIRTUAL): # checks if all mandatory attributes are present in new entries
 for attr in self.definition._attributes:
 if self.definition._attributes[attr].mandatory and attr.lower() not in conf_ignored_mandatory_attributes_in_object_def:
 if (attr not in self.attributes or self.attributes[attr].virtual) and attr not in self.changes:
 self.status = STATUS_MANDATORY_MISSING
 break
 @property
 def entry_raw_attributes(self):
 return self.raw_attributes
class EntryBase(object):
 """The Entry object contains a single LDAP entry.
 Attributes can be accessed either by sequence, by assignment
 or as dictionary keys. Keys are not case sensitive.
 The Entry object is read only
 - The DN is retrieved by entry_dn
 - The cursor reference is in _cursor
 - Raw attributes values are retrieved with _raw_attributes and the _raw_attribute() methods
 """
 def __init__(self, dn, cursor):
 self._state = EntryState(dn, cursor)
 def __repr__(self):
 if self.__dict__ and self.entry_dn is not None:
 r = 'DN: ' + to_stdout_encoding(self.entry_dn) + ' - STATUS: ' + ((self._state._initial_status + ', ') if self._state._initial_status != self.entry_status else '') + self.entry_status + ' - READ TIME: ' + (self.entry_read_time.isoformat() if self.entry_read_time else '<never>') + linesep
 if self._state.attributes:
 for attr in sorted(self._state.attributes):
 if self._state.attributes[attr] or (hasattr(self._state.attributes[attr], 'changes') and self._state.attributes[attr].changes):
 r += ' ' + repr(self._state.attributes[attr]) + linesep
 return r
 else:
 return object.__repr__(self)
 def __str__(self):
 return self.__repr__()
 def __iter__(self):
 for attribute in self._state.attributes:
 yield self._state.attributes[attribute]
 # raise StopIteration # deprecated in PEP 479
 return
 def __contains__(self, item):
 try:
 self.__getitem__(item)
 return True
 except LDAPKeyError:
 return False
 def __getattr__(self, item):
 if isinstance(item, STRING_TYPES):
 if item == '_state':
 return object.__getattr__(self, item)
 item = ''.join(item.split()).lower()
 attr_found = None
 for attr in self._state.attributes.keys():
 if item == attr.lower():
 attr_found = attr
 break
 if not attr_found:
 for attr in self._state.attributes.aliases():
 if item == attr.lower():
 attr_found = attr
 break
 if not attr_found:
 for attr in self._state.attributes.keys():
 if item + ';binary' == attr.lower():
 attr_found = attr
 break
 if not attr_found:
 for attr in self._state.attributes.aliases():
 if item + ';binary' == attr.lower():
 attr_found = attr
 break
 if not attr_found:
 for attr in self._state.attributes.keys():
 if item + ';range' in attr.lower():
 attr_found = attr
 break
 if not attr_found:
 for attr in self._state.attributes.aliases():
 if item + ';range' in attr.lower():
 attr_found = attr
 break
 if not attr_found:
 error_message = 'attribute \'%s\' not found' % item
 if log_enabled(ERROR):
 log(ERROR, '%s for <%s>', error_message, self)
 raise LDAPCursorAttributeError(error_message)
 return self._state.attributes[attr]
 error_message = 'attribute name must be a string'
 if log_enabled(ERROR):
 log(ERROR, '%s for <%s>', error_message, self)
 raise LDAPCursorAttributeError(error_message)
 def __setattr__(self, item, value):
 if item == '_state':
 object.__setattr__(self, item, value)
 elif item in self._state.attributes:
 error_message = 'attribute \'%s\' is read only' % item
 if log_enabled(ERROR):
 log(ERROR, '%s for <%s>', error_message, self)
 raise LDAPCursorAttributeError(error_message)
 else:
 error_message = 'entry is read only, cannot add \'%s\'' % item
 if log_enabled(ERROR):
 log(ERROR, '%s for <%s>', error_message, self)
 raise LDAPCursorAttributeError(error_message)
 def __getitem__(self, item):
 if isinstance(item, STRING_TYPES):
 item = ''.join(item.split()).lower()
 attr_found = None
 for attr in self._state.attributes.keys():
 if item == attr.lower():
 attr_found = attr
 break
 if not attr_found:
 for attr in self._state.attributes.aliases():
 if item == attr.lower():
 attr_found = attr
 break
 if not attr_found:
 for attr in self._state.attributes.keys():
 if item + ';binary' == attr.lower():
 attr_found = attr
 break
 if not attr_found:
 for attr in self._state.attributes.aliases():
 if item + ';binary' == attr.lower():
 attr_found = attr
 break
 if not attr_found:
 error_message = 'key \'%s\' not found' % item
 if log_enabled(ERROR):
 log(ERROR, '%s for <%s>', error_message, self)
 raise LDAPKeyError(error_message)
 return self._state.attributes[attr]
 error_message = 'key must be a string'
 if log_enabled(ERROR):
 log(ERROR, '%s for <%s>', error_message, self)
 raise LDAPKeyError(error_message)
 def __eq__(self, other):
 if isinstance(other, EntryBase):
 return self.entry_dn == other.entry_dn
 return False
 def __lt__(self, other):
 if isinstance(other, EntryBase):
 return self.entry_dn <= other.entry_dn
 return False
 @property
 def entry_dn(self):
 return self._state.dn
 @property
 def entry_cursor(self):
 return self._state.cursor
 @property
 def entry_status(self):
 return self._state.status
 @property
 def entry_definition(self):
 return self._state.definition
 @property
 def entry_raw_attributes(self):
 return self._state.raw_attributes
 def entry_raw_attribute(self, name):
 """
 :param name: name of the attribute
 :return: raw (unencoded) value of the attribute, None if attribute is not found
 """
 return self._state.raw_attributes[name] if name in self._state.raw_attributes else None
 @property
 def entry_mandatory_attributes(self):
 return [attribute for attribute in self.entry_definition._attributes if self.entry_definition._attributes[attribute].mandatory]
 @property
 def entry_attributes(self):
 return list(self._state.attributes.keys())
 @property
 def entry_attributes_as_dict(self):
 return dict((attribute_key, attribute_value.values) for (attribute_key, attribute_value) in self._state.attributes.items())
 @property
 def entry_read_time(self):
 return self._state.read_time
 @property
 def _changes(self):
 return self._state.changes
 def entry_to_json(self, raw=False, indent=4, sort=True, stream=None, checked_attributes=True, include_empty=True):
 json_entry = dict()
 json_entry['dn'] = self.entry_dn
 if checked_attributes:
 if not include_empty:
 # needed for python 2.6 compatibility
 json_entry['attributes'] = dict((key, self.entry_attributes_as_dict[key]) for key in self.entry_attributes_as_dict if self.entry_attributes_as_dict[key])
 else:
 json_entry['attributes'] = self.entry_attributes_as_dict
 if raw:
 if not include_empty:
 # needed for python 2.6 compatibility
 json_entry['raw'] = dict((key, self.entry_raw_attributes[key]) for key in self.entry_raw_attributes if self.entry_raw_attributes[key])
 else:
 json_entry['raw'] = dict(self.entry_raw_attributes)
 if str is bytes: # Python 2
 check_json_dict(json_entry)
 json_output = json.dumps(json_entry,
 ensure_ascii=True,
 sort_keys=sort,
 indent=indent,
 check_circular=True,
 default=format_json,
 separators=(',', ': '))
 if stream:
 stream.write(json_output)
 return json_output
 def entry_to_ldif(self, all_base64=False, line_separator=None, sort_order=None, stream=None):
 ldif_lines = operation_to_ldif('searchResponse', [self._state.response], all_base64, sort_order=sort_order)
 ldif_lines = add_ldif_header(ldif_lines)
 line_separator = line_separator or linesep
 ldif_output = line_separator.join(ldif_lines)
 if stream:
 if stream.tell() == 0:
 header = add_ldif_header(['-'])[0]
 | |
| 
	#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Provide a unix-like grep function for Python.
Author: <NAME>.
Since: 2017.
"""
import re
# get python base string for either Python 2.x or 3.x
try:
 _basestring = basestring
except NameError:
 _basestring = str
def __fix_args(kwargs):
 """
 Set all named arguments shortcuts and flags.
 """
 kwargs.setdefault('fixed_strings', kwargs.get('F'))
 kwargs.setdefault('basic_regexp', kwargs.get('G'))
 kwargs.setdefault('extended_regexp', kwargs.get('E'))
 kwargs.setdefault('ignore_case', kwargs.get('i'))
 kwargs.setdefault('invert', kwargs.get('v'))
 kwargs.setdefault('words', kwargs.get('w'))
 kwargs.setdefault('line', kwargs.get('x'))
 kwargs.setdefault('count', kwargs.get('c'))
 kwargs.setdefault('max_count', kwargs.get('m'))
 kwargs.setdefault('after_context', kwargs.get('A'))
 kwargs.setdefault('before_context', kwargs.get('B'))
 kwargs.setdefault('quiet', kwargs.get('q'))
 kwargs.setdefault('byte_offset', kwargs.get('b'))
 kwargs.setdefault('only_matching', kwargs.get('o'))
 kwargs.setdefault('line_number', kwargs.get('n'))
 kwargs.setdefault('regex_flags', kwargs.get('r'))
 kwargs.setdefault('keep_eol', kwargs.get('k'))
 kwargs.setdefault('trim', kwargs.get('t'))
def _is_part_of_word(c):
 """
 return if a given character is a part of a word, eg not a word breaker character
 """
 return c.isalpha() or c == '_'
def grep(target, pattern, **kwargs):
 """
 Main grep function.
 :param target: Target to apply grep on. Can be a single string, an iterable, a function, or an opened file handler.
 :param pattern: Grep pattern to search.
 :param kwargs: Optional flags (note: the docs below talk about matching 'lines', but this function also accept lists
 and other iterables - in those cases, a 'line' means a single value from the iterable).
 The available flags are:
 - F, fixed_strings: Interpret 'pattern' as a string or a list of strings, any of which is to be matched.
 If not set, will interpret 'pattern' as a python regular expression.
 - i, ignore_case: Ignore case.
 - v, invert: Invert (eg return non-matching lines / values).
 - w, words: Select only those lines containing matches that form whole words.
 - x, line: Select only matches that exactly match the whole line.
 - c, count: Instead of the normal output, print a count of matching lines.
 - m NUM, max_count: Stop reading after NUM matching values.
 - A NUM, after_context: Return NUM lines of trailing context after matching lines. This will replace the string
 part of the reply to a list of strings. Note that in some input types this might skip
 following matches. For example, if the input is a file or a custom iterator.
 - B NUM, before_context: Return NUM lines of leading context before matching lines. This will replace the string
 part of the reply to a list of strings.
 - q, quiet: Instead of returning string / list of strings return just a single True / False if
 found matches.
 - b, byte_offset: Instead of a list of strings will return a list of (offset, string), where offset is
 the offset of the matched 'pattern' in line.
 - n, line_number: Instead of a list of strings will return a list of (index, string), where index is the
 line number.
 - o, only_matching: Return only the part of a matching line that matches 'pattern'.
 - r, regex_flags: Any additional regex flags you want to add when using regex (see python re flags).
 - k, keep_eol When iterating file, if this option is set will keep the end-of-line at the end of every
 line. If not (default) will trim the end of line character.
 - t, trim Trim all whitespace characters from every line processed.
 :return: A list with matching lines (even if provided target is a single string), unless flags state otherwise.
 """
 # unify flags (convert shortcuts to full name)
 __fix_args(kwargs)
 # parse the params that are relevant to this function
 f_count = kwargs.get('count')
 f_max_count = kwargs.get('max_count')
 f_quiet = kwargs.get('quiet')
 # use the grep_iter to build the return list
 ret = []
 for value in grep_iter(target, pattern, **kwargs):
 # if quiet mode no need to continue, just return True because we got a value
 if f_quiet:
 return True
 # add current value to return list
 ret.append(value)
 # if have max limit and exceeded that limit, break:
 if f_max_count and len(ret) >= f_max_count:
 break
 # if quiet mode and got here it means we didn't find a match
 if f_quiet:
 return False
 # if requested count return results count
 if f_count:
 return len(ret)
 # return results list
 return ret
def grep_iter(target, pattern, **kwargs):
 """
 Main grep function, as a memory efficient iterator.
 Note: this function does not support the 'quiet' or 'count' flags.
 :param target: Target to apply grep on. Can be a single string, an iterable, a function, or an opened file handler.
 :param pattern: Grep pattern to search.
 :param kwargs: See grep() help for more info.
 :return: Next match.
 """
 # unify flags (convert shortcuts to full name)
 __fix_args(kwargs)
 # parse the params that are relevant to this function
 f_offset = kwargs.get('byte_offset')
 f_line_number = kwargs.get('line_number')
 f_trim = kwargs.get('trim')
 f_after_context = kwargs.get('after_context')
 f_before_context = kwargs.get('before_context')
 f_only_matching = kwargs.get('only_matching')
 # if target is a callable function, call it first to get value
 if callable(target):
 target = target()
 # if we got a single string convert it to a list
 if isinstance(target, _basestring):
 target = [target]
 # calculate if need to trim end of lines
 need_to_trim_eol = not kwargs.get('keep_eol') and hasattr(target, 'readline')
 # list of previous lines, used only when f_before_context is set
 prev_lines = []
 # iterate target and grep
 for line_index, line in enumerate(target):
 # fix current line
 line = __process_line(line, need_to_trim_eol, f_trim)
 # do grap
 match, offset, endpos = __do_grep(line, pattern, **kwargs)
 # nullify return value
 value = None
 # if matched
 if match:
 # the textual part we return in response
 ret_str = line
 # if only return matching
 if f_only_matching:
 ret_str = ret_str[offset:endpos]
 # if 'before_context' is set
 if f_before_context:
 # make ret_str be a list with previous lines
 ret_str = prev_lines + [ret_str]
 # if need to return X lines after trailing context
 if f_after_context:
 # convert return string to list (unless f_before_context is set, in which case its already a list)
 if not f_before_context:
 ret_str = [ret_str]
 # iterate X lines to read after
 for i in range(f_after_context):
 # if target got next or readline, use next()
 # note: unfortunately due to python files next() implementation we can't use tell and seek to
 # restore position and not skip next matches.
 if hasattr(target, '__next__') or hasattr(target, 'readline'):
 try:
 val = next(target)
 except StopIteration:
 break
 # if not, try to access next item based on index (for lists)
 else:
 try:
 val = target[line_index+i+1]
 except IndexError:
 break
 # add value to return string
 ret_str.append(__process_line(val, need_to_trim_eol, f_trim))
 # if requested offset, add offset + line to return list
 if f_offset:
 value = (offset, ret_str)
 # if requested line number, add offset + line to return list
 elif f_line_number:
 value = (line_index, ret_str)
 # default: add line to return list
 else:
 value = ret_str
 # maintain a list of previous lines, if the before-context option is provided
 if f_before_context:
 prev_lines.append(line)
 if len(prev_lines) > f_before_context:
 prev_lines.pop(0)
 # if we had a match return current value
 if value is not None:
 yield value
 # done iteration
 raise StopIteration
def __process_line(line, strip_eol, strip):
 """
 process a single line value.
 """
 if strip:
 line = line.strip()
 elif strip_eol and line.endswith('\n'):
 line = line[:-1]
 return line
def __do_grep(curr_line, pattern, **kwargs):
 """
 Do grep on a single string.
 See 'grep' docs for info about kwargs.
 :param curr_line: a single line to test.
 :param pattern: pattern to search.
 :return: (matched, position, end_position).
 """
 # currently found position
 position = -1
 end_pos = -1
 # check if fixed strings mode
 if kwargs.get('fixed_strings'):
 # if case insensitive fix case
 if kwargs.get('ignore_case'):
 pattern = pattern.lower()
 curr_line = curr_line.lower()
 # if pattern is a single string, match it:
 pattern_len = 0
 if isinstance(pattern, _basestring):
 position = curr_line.find(pattern)
 pattern_len = len(pattern)
 # if not, treat it as a list of strings and match any
 else:
 for p in pattern:
 position = curr_line.find(p)
 pattern_len = len(p)
 if position != -1:
 break
 # calc end position
 end_pos = position + pattern_len
 # check if need to match whole words
 if kwargs.get('words') and position != -1:
 foundpart = (' ' + curr_line + ' ')[position:position+len(pattern)+2]
 if _is_part_of_word(foundpart[0]):
 position = -1
 elif _is_part_of_word(foundpart[-1]):
 position = -1
 # if not fixed | |
| 
	= set()
 for (chain_id,) in cursor.fetchall():
 chain_ids.add(chain_id)
 for chain_id in sorted(chain_ids):
 chain = CommentChain.fromId(db, chain_id, user, review=review)
 chain.loadComments(db, user)
 result.append(chain)
 return result
def createCommentChain(db, user, review, chain_type, commit_id=None, origin=None, file_id=None, parent_id=None, child_id=None, old_sha1=None, new_sha1=None, offset=None, count=None):
 if chain_type == "issue" and review.state != "open":
 raise Exception, "review not open; can't raise issue"
 cursor = db.cursor()
 if file_id is not None and (parent_id == child_id or parent_id is None):
 cursor.execute("""SELECT 1
 FROM reviewchangesets
 JOIN fileversions USING (changeset)
 WHERE reviewchangesets.review=%s
 AND fileversions.file=%s
 AND fileversions.old_sha1!='0000000000000000000000000000000000000000'
 AND fileversions.new_sha1!='0000000000000000000000000000000000000000'""",
 (review.id, file_id))
 if cursor.fetchone(): raise Exception, "file changed in review"
 cursor.execute("INSERT INTO commentchains (review, uid, type, file, first_commit, last_commit) VALUES (%s, %s, %s, %s, %s, %s) RETURNING id", [review.id, user.id, chain_type, file_id, child_id, child_id])
 chain_id = cursor.fetchone()[0]
 cursor.execute("INSERT INTO commentchainlines (chain, uid, commit, sha1, first_line, last_line) VALUES (%s, %s, %s, %s, %s, %s)", (chain_id, user.id, child_id, new_sha1, offset, offset + count - 1))
 elif file_id is not None:
 parents_returned = set()
 def getFileParent(new_sha1):
 cursor.execute("""SELECT changesets.id, fileversions.old_sha1
 FROM changesets, reviewchangesets, fileversions
 WHERE reviewchangesets.review=%s
 AND reviewchangesets.changeset=changesets.id
 AND fileversions.changeset=changesets.id
 AND fileversions.file=%s
 AND fileversions.new_sha1=%s""",
 [review.id, file_id, new_sha1])
 try:
 changeset_id, old_sha1 = cursor.fetchone()
 if old_sha1 in parents_returned: return None, None
 parents_returned.add(old_sha1)
 return changeset_id, old_sha1
 except:
 return None, None
 children_returned = set()
 def getFileChild(old_sha1):
 cursor.execute("""SELECT changesets.id, fileversions.new_sha1
 FROM changesets, reviewchangesets, fileversions
 WHERE reviewchangesets.review=%s
 AND reviewchangesets.changeset=changesets.id
 AND fileversions.changeset=changesets.id
 AND fileversions.file=%s
 AND fileversions.old_sha1=%s""",
 [review.id, file_id, old_sha1])
 try:
 changeset_id, new_sha1 = cursor.fetchone()
 if new_sha1 in children_returned: return None, None
 children_returned.add(new_sha1)
 return changeset_id, new_sha1
 except:
 return None, None
 cursor.execute("""SELECT changesets.id
 FROM changesets, reviewchangesets, fileversions
 WHERE reviewchangesets.review=%s
 AND reviewchangesets.changeset=changesets.id
 AND changesets.child=%s
 AND fileversions.changeset=changesets.id
 AND fileversions.file=%s
 AND fileversions.old_sha1=%s
 AND fileversions.new_sha1=%s""",
 [review.id, child_id, file_id, old_sha1, new_sha1])
 row = cursor.fetchone()
 if not row:
 if origin == "old":
 cursor.execute("""SELECT changesets.id
 FROM changesets, reviewchangesets, fileversions
 WHERE reviewchangesets.review=%s
 AND reviewchangesets.changeset=changesets.id
 AND fileversions.changeset=changesets.id
 AND fileversions.file=%s
 AND fileversions.old_sha1=%s""",
 [review.id, file_id, old_sha1])
 else:
 cursor.execute("""SELECT changesets.id
 FROM changesets, reviewchangesets, fileversions
 WHERE reviewchangesets.review=%s
 AND reviewchangesets.changeset=changesets.id
 AND fileversions.changeset=changesets.id
 AND fileversions.file=%s
 AND fileversions.new_sha1=%s""",
 [review.id, file_id, new_sha1])
 row = cursor.fetchone()
 primary_changeset_id = row[0]
 sha1s_older = { }
 sha1s_newer = { old_sha1: (primary_changeset_id, new_sha1) }
 sha1 = new_sha1
 while True:
 changeset_id, next_sha1 = getFileParent(sha1)
 if changeset_id:
 sha1s_older[sha1] = changeset_id, next_sha1
 sha1s_newer[next_sha1] = changeset_id, sha1
 sha1 = next_sha1
 else:
 break
 sha1 = new_sha1
 while True:
 changeset_id, next_sha1 = getFileChild(sha1)
 if changeset_id:
 sha1s_newer[sha1] = changeset_id, next_sha1
 sha1 = next_sha1
 else:
 break
 commentchainlines_values = []
 processed = set()
 def searchOrigin(changeset_id, sha1, search_space, first_line, last_line):
 try:
 while sha1 not in processed:
 processed.add(sha1)
 changeset_id, next_sha1 = search_space[sha1]
 changeset = changeset_load.loadChangeset(db, review.repository, changeset_id, filtered_file_ids=set([file_id]))
 if len(changeset.child.parents) > 1: break
 verdict, next_first_line, next_last_line = updateCommentChain(first_line, last_line, changeset.files[0].chunks, forward)
 if verdict == "modified": break
 sha1 = next_sha1
 first_line = next_first_line
 last_line = next_last_line
 except:
 pass
 return changeset_id, sha1, first_line, last_line
 first_line = offset
 last_line = offset + count - 1
 if origin == 'old':
 changeset_id, sha1, first_line, last_line = searchOrigin(primary_changeset_id, old_sha1, sha1s_older, first_line, last_line)
 commit_id = diff.Changeset.fromId(db, review.repository, changeset_id).parent.id
 else:
 changeset_id, sha1, first_line, last_line = searchOrigin(primary_changeset_id, new_sha1, sha1s_older, first_line, last_line)
 commit_id = diff.Changeset.fromId(db, review.repository, changeset_id).child.id
 commentchainlines_values.append((user.id, commit_id, sha1, first_line, last_line))
 processed = set()
 processed.add(sha1)
 while sha1 in sha1s_newer:
 changeset_id, sha1 = sha1s_newer[sha1]
 if sha1 in processed: break
 else: processed.add(sha1)
 changeset = changeset_load.loadChangeset(db, review.repository, changeset_id, filtered_file_ids=set([file_id]))
 if len(changeset.child.parents) != 1:
 chunks = diff.parse.parseDifferences(review.repository, from_commit=changeset.parent, to_commit=changeset.child, selected_path=dbutils.describe_file(db, file_id)).chunks
 else:
 chunks = changeset.files[0].chunks
 verdict, first_line, last_line = updateCommentChain(first_line, last_line, chunks)
 if verdict == "transfer":
 commentchainlines_values.append((user.id, changeset.child.getId(db), sha1, first_line, last_line))
 else:
 break
 cursor.execute("INSERT INTO commentchains (review, uid, type, origin, file, first_commit, last_commit) VALUES (%s, %s, %s, %s, %s, %s, %s) RETURNING id", [review.id, user.id, chain_type, origin, file_id, parent_id, child_id])
 chain_id = cursor.fetchone()[0]
 try: cursor.executemany("INSERT INTO commentchainlines (chain, uid, commit, sha1, first_line, last_line) VALUES (%s, %s, %s, %s, %s, %s)", [(chain_id,) + values for values in commentchainlines_values])
 except: raise Exception, repr(commentchainlines_values)
 elif commit_id is not None:
 commit = gitutils.Commit.fromId(db, review.repository, commit_id)
 cursor.execute("INSERT INTO commentchains (review, uid, type, first_commit, last_commit) VALUES (%s, %s, %s, %s, %s) RETURNING id", [review.id, user.id, chain_type, commit_id, commit_id])
 chain_id = cursor.fetchone()[0]
 cursor.execute("INSERT INTO commentchainlines (chain, uid, commit, sha1, first_line, last_line) VALUES (%s, %s, %s, %s, %s, %s)", (chain_id, user.id, commit_id, commit.sha1, offset, offset + count - 1))
 else:
 cursor.execute("INSERT INTO commentchains (review, uid, type) VALUES (%s, %s, %s) RETURNING id", [review.id, user.id, chain_type])
 chain_id = cursor.fetchone()[0]
 commentchainusers = set([user.id] + map(int, review.owners))
 if file_id is not None:
 filters = Filters()
 filters.load(db, review=review)
 for user_id in filters.listUsers(db, file_id):
 commentchainusers.add(user_id)
 cursor.executemany("INSERT INTO commentchainusers (chain, uid) VALUES (%s, %s)", [(chain_id, user_id) for user_id in commentchainusers])
 return chain_id
def createComment(db, user, chain_id, comment, first=False):
 cursor = db.cursor()
 cursor.execute("INSERT INTO comments (chain, uid, time, state, comment) VALUES (%s, %s, now(), 'draft', %s) RETURNING id", (chain_id, user.id, comment))
 comment_id = cursor.fetchone()[0]
 if first:
 cursor.execute("UPDATE commentchains SET first_comment=%s WHERE id=%s", (comment_id, chain_id))
 return comment_id
def updateCommentChain(first_line, last_line, chunks, forward=True):
 delta = 0
 for chunk in chunks:
 if forward:
 if chunk.delete_offset + chunk.delete_count <= first_line:
 # Chunk is before (and does not overlap) the comment chain.
 delta += chunk.insert_count - chunk.delete_count
 elif chunk.delete_offset <= last_line:
 # Chunk overlaps the comment chain.
 return ("modified", None, None)
 else:
 # Chunk is after comment chain, which thus was not overlapped by
 # any chunk. Copy the comment chain over to the new version of
 # the file with 'delta' added to its 'first_line'/'last_line'.
 return ("transfer", first_line + delta, last_line + delta)
 else:
 if chunk.insert_offset + chunk.insert_count <= first_line:
 # Chunk is before (and does not overlap) the comment chain.
 delta += chunk.delete_count - chunk.insert_count
 elif chunk.insert_offset <= last_line:
 # Chunk overlaps the comment chain.
 return ("modified", None, None)
 else:
 # Chunk is after comment chain, which thus was not overlapped by
 # any chunk. Copy the comment chain over to the new version of
 # the file with 'delta' added to its 'first_line'/'last_line'.
 return ("transfer", first_line + delta, last_line + delta)
 else:
 # Comment chain was after all the chunks. Copy it over to the new
 # version of the file with 'delta' added to its 'first_line' and
 # 'last_line'.
 return ("transfer", first_line + delta, last_line + delta)
def updateCommentChains(db, user, review, changeset):
 cursor = db.cursor()
 commentchainlines_values = []
 addressed = set()
 for file in changeset.files:
 cursor.execute("""SELECT id, commentchains.uid, type, commentchains.state, first_line, last_line
 FROM commentchains
 INNER JOIN commentchainlines ON (id=chain)
 WHERE commentchains.review=%s
 AND commentchains.state in ('draft', 'open')
 AND commentchains.file=%s
 AND commentchainlines.sha1=%s
 ORDER BY commentchainlines.first_line""",
 [review.id, file.id, file.old_sha1])
 rows = cursor.fetchall()
 if not rows: continue
 if len(changeset.child.parents) != 1:
 full_file = diff.parse.parseDifferences(review.repository, from_commit=changeset.parent, to_commit=changeset.child, selected_path=file.path)
 if not full_file: continue
 chunks = full_file.chunks
 else:
 chunks = file.chunks
 for chain_id, chain_user_id, chain_type, chain_state, first_line, last_line in rows:
 verdict, new_first_line, new_last_line = updateCommentChain(first_line, last_line, chunks)
 if verdict == "modified" and chain_type == "issue": addressed.add(chain_id)
 elif verdict == "transfer":
 cursor.execute("SELECT 1 FROM commentchainlines WHERE chain=%s AND sha1=%s", (chain_id, file.new_sha1))
 if not cursor.fetchone():
 if chain_state == 'open':
 lines_state = 'current'
 lines_user_id = user.id
 else:
 lines_state = 'draft'
 lines_user_id = chain_user_id
 commentchainlines_values.append([chain_id, lines_user_id, lines_state, changeset.child.getId(db), file.new_sha1, new_first_line, new_last_line])
 cursor.executemany("""INSERT INTO commentchainlines (chain, uid, state, commit, sha1, first_line, last_line)
 VALUES (%s, %s, %s, %s, %s, %s, %s)""",
 commentchainlines_values)
 if addressed:
 cursor.executemany("UPDATE commentchains SET state='addressed', addressed_by=%s WHERE id=%s AND state='open'", [[changeset.child.id, chain_id] for chain_id in addressed])
 cursor.executemany("UPDATE commentchains SET addressed_by=%s WHERE id=%s AND state='draft'", [[changeset.child.id, chain_id] for chain_id in addressed])
 print "Addressed issues:"
 for chain_id in addressed:
 chain = CommentChain.fromId(db, chain_id, user, review=review)
 if chain.state == 'addressed':
 chain.loadComments(db, user)
 title = " %s: " % chain.title(False)
 print "%s%s" % (title, chain.leader(max_length=80 - len(title), text=True))
def validateCommentChain(db, review, file_id, sha1, offset, count):
 """Check whether the commented lines are changed by later commits in the review.
If they are, a diff.Changeset object representing the first changeset that
modifies those lines is returned. If they are not, None is returned."""
 cursor = db.cursor()
 cursor.execute("""SELECT old_sha1, new_sha1, reviewchangesets.changeset
 FROM reviewchangesets, fileversions
 WHERE reviewchangesets.review=%s
 AND fileversions.changeset=reviewchangesets.changeset
 AND fileversions.file=%s""",
 [review.id, file_id])
 sha1s = {}
 for old_sha1, new_sha1, changeset_id in cursor.fetchall():
 sha1s[old_sha1] = (new_sha1, changeset_id)
 commit_count = 0
 processed = set()
 while sha1 in sha1s | |
| 
	<filename>uipath/sequence.py
from bs4 import BeautifulSoup
class Sequence:
	def __init__(self, file):
		self.file = file
		self.init_vars() # load all the vars from the xaml file
	#Initializes all variables for the class file
	def init_vars(self):
		self.xaml = self.read_xaml() # Read xaml file
		self.name = self.get_class_name() # get class name from XAML file
		self.arguments = self.build_arguments(self.get_arguments())
		self.sequences = self.create_sequences(self.get_sequences())
	#Gets the existing XAML
	def get_xaml(self):
		return self.xaml
	#Reads an XAML file
	def read_xaml(self):
		infile = open(self.file, "r")
		data = infile.read()
		infile.close()
		data = data[data.find("<"):len(data)] # remove garbage from start of file
		xaml = BeautifulSoup(data, "xml");
		return xaml
	#Updates a node of the XAML stored in memory
	def update_xaml_node(self, node, new_xaml):
		self.xaml = new_xaml
		self.save()
	#Saves the raw XAML in memory to the XAML file
	def save(self):
		#First we need to strip the <XML> tag that our xaml parser adds
		xml_string = '<?xml version="1.0" encoding="utf-8"?>\n' # The XML encoding tag we need to remove
		data = str(self.xaml).replace(xml_string, "", 1)
		file = open(self.file,"w+")
		file.write(str(data))
		file.close()
		#Re-initialize variables from the newly saved XAML file
		self.init_vars()
		return file
	#Get classname from raw XAML
	def get_class_name(self):
		return self.xaml.find_all("Activity")[0]["x:Class"]
	#Sets a class name for the file
	def set_class_name(self, new_name):
		self.xaml.find("Activity")["x:Class"] = new_name
		self.save()
		return new_name
	#Returns the first sequece/block/whatever in the class. The outmost layer
	def get_first_block(self):
		return self.xaml.find("TextExpression.ReferencesForImplementation").next_sibling.next_sibling
	#Get annotation for node
	def get_annotation(self, node):
		return node["sap2010:Annotation.AnnotationText"]
	#Set annotation for node
	def set_annotation(self, node, text):
		node["sap2010:Annotation.AnnotationText"] = text
		self.save()
	#Gets node name
	def get_node_name(self, node):
		return node["DisplayName"]
	#Sets node name
	def set_node_name(self, node, new_name):
		node["DisplayName"] = new_name
		this.save()
		return new_name
	#Returns an argument by name
	def get_argument_by_name(self, name):
		if not self.arguments is None:
			for item in self.arguments:
				if item.name == name:
					return item
	#Gets arguments of sequence
	def get_arguments(self):
		return self.xaml.find_all("x:Property")
	#Builds the argument objects for the main class
	def build_arguments(self, argument_list):
		if not argument_list is None and len(argument_list) > 0:
			args = []
			for item in argument_list:
				args.append(Sequence.Argument(self, item))
			return args
	#Creates and adds a new argument for the sequence
	def add_argument(self, name, string_direction, datatype):
		new_arg = self.xaml.new_tag("x:Property")
		new_arg["Name"] = name
		#build XAML friendly variables for the passed in arguments
		new_arg_type_end = ")"
		if string_direction == "in":
			new_arg_type_start = "InArgument("
		elif string_direction == "out":
			new_arg_type_start = "OutArgument("
		elif string_direction == "io":
			new_arg_type_start = "InOutArgument("
		else:
			new_arg_type_end = ""
			new_arg_type_start = ""
		# Type="InArgument(scg:Dictionary(x:String, x:Object))"
		new_arg["Type"] = new_arg_type_start + datatype + new_arg_type_end
		#Add to array in XAML
		self.xaml.find("x:Members").append(new_arg)
		#Rebuild all arguments from the XAML
		self.arguments = self.build_arguments(self.get_arguments())
		self.save()
	#Gets a block by its Displayname
	def get_node_by_display_name(self, display_name):
		return self.xaml.find(DisplayName=display_name)
	#Gets nested sequences
	def get_sequences(self):
		return self.xaml.find_all("Sequence")
	#Builds the argument objects for the main class
	def create_sequences(self, sequence_list):
		if not sequence_list is None and len(sequence_list) > 0:
			sequences = []
			for item in sequence_list:
				sequences.append(Sequence.Inner_Sequence(self, item))
			return sequences
	#To String.
	def __str__(self):
		to_return = "{" + "\n"
		for key in self.__dict__.keys():
			if key != "xaml":
				if isinstance(self.__dict__[key], (list,)):
					to_return = to_return + "\t" + key + ": [" + "\n"
					for item in self.__dict__[key]:
						to_return = to_return + "\t\t" + str(item) + "\n"
					to_return = to_return + "\t]" + "\n"
				else:
					to_return = to_return + "\t" + key + ":" + str(self.__dict__[key]) + "\n"
		to_return = to_return + "}"
		return to_return
	#-----------------------------------------------------------------------------------------------------------------------
	# Subclass: Variable
	# Description: Stores the data about a variable in a sequence
	#-----------------------------------------------------------------------------------------------------------------------
	class Variable():
		def __init__(self, parent=None, xaml = None, name=None, type_arg=None, default=None):
			self.parent = parent #Store the calling sequence
			#If xaml was passed in just use that
			self.xaml = xaml
			if not self.xaml is None:
				self.name = self.xaml["Name"]
				self.type = self.xaml["x:TypeArguments"]
			#Else build new xaml based on the name and type passed in
			else:
				self.name = name
				self.type = type_arg
				self.default = default
				self.xaml = self.build_xaml()
		#Builds XAML for a name and type
		def build_xaml(self):
			new_node = self.parent.xaml.new_tag("Variable") # have the beautiful soup instance from the parent create a new tag
			new_node["Name"] = self.name
			new_node["x:TypeArguments"] = self.type
			if not self.default is None:
				new_node["Default"] = self.default
			return new_node
	#-----------------------------------------------------------------------------------------------------------------------
	# Subclass: Argument
	# Description: Stores the arguments and their default values from the xaml file
	#-----------------------------------------------------------------------------------------------------------------------
	#Define subclass for the arguments
	class Argument():
		def __init__(self, outer_sequence, xaml, default_value=None):
			self.outer_sequence = outer_sequence #the containing sequence
			
			self.xaml = xaml
			self.init_vars()
			self.default_value = self.get_default_value_for_attr()
			
		#Creates a new argument
		#Get default value for attribute
		def get_default_value_for_attr(self):
			values_string = self.outer_sequence.xaml.find_all("Activity")[0]
			key = "this:" + self.outer_sequence.name + "." + self.name
			if key in values_string:
				return values_string["this:" + self.outer_sequence.name + "." + self.name]
			
		#Decodes XAML into python friendly strings
		def init_vars(self):
			self.name = self.xaml["Name"]
			self.direction = self.get_direction_from_xaml()
			self.type = self.get_datatype_from_xaml()
		#Converts string direction to xaml direction
		def convert_string_direction_to_xaml(self, string_direction):
			if string_direction == "in":
				return "InArgument"
			elif string_direction == "out":
				return "OutArgument"
			elif string_direction == "io":
				return "InOutArgument"
			else:
				return "Property"
		#Gets direction of argument
		def get_direction_from_xaml(self):
				temp = self.xaml["Type"]
				if "InArgument" in temp:
					return "InArgument"
				elif "OutArgument" in temp:
					return "OutArgument"
				elif "InOutArgument" in temp:
					return "InOutArgument"
				else:
					return "Property"
		#Gets the datatype of the argument
		def get_datatype_from_xaml(self):
			return self.xaml["Type"].replace("(", "").replace(")", "").replace(self.direction, "")
		#Sets a new name value for this Class instance, as well as updating the parent raw XAML.
		def set_name(self, new_name):
			self.update_default_value_name(self.name, new_name)
			self.xaml["Name"] = new_name
			self.name = new_name
			self.update_outer_sequence()
			
		#Sets a new direction value for this Class instance, as well as updating the parent raw XAML.
		def set_direction(self, direction_string):
			self.direction = direction_string #update class' variable
			new_direction_xaml = ""
			if self.direction == "InArgument":
				new_direction_xaml = "InArgument(" + self.type + ")"
			elif self.direction == "InOutArgument":
				self.delete_default_value() #delete default value as it is not supported by this direction type
				new_direction_xaml = "InOutArgument(" + self.type + ")"
			elif self.direction == "OutArgument":
				self.delete_default_value() #delete default value as it is not supported by this direction type
				new_direction_xaml = "OutArgument(" + self.type + ")"
			else:
				new_direction_xaml = self.type
			self.xaml["Type"] = new_direction_xaml
			self.update_outer_sequence()
		#Creates a default value when one does not yet exist
		#STUB
		#Sets a new default value for this Class instance, as well as updating the parent raw XAML.
		def update_default_value(self, new_value):
			self.default_value = new_value
			values_string = self.outer_sequence.xaml.find_all("Activity")[0]
			values_string["this:" + self.outer_sequence.name + "." + self.name] = new_value
			self.update_outer_sequence()
		#Changes the name of default values to match the argument name when it is changed
		def update_default_value_name(self, old_name, new_name):
			values_string = self.outer_sequence.xaml.find_all("Activity")[0]
			key = "this:" + self.outer_sequence.name + "." + self.name
			
			if key in values_string:
				self.delete_default_value()
				values_string["this:" + self.outer_sequence.name + "." + new_name] = self.default_value
				#values_string["this:" + self.outer_sequence.name + "." + self.name] = new_value
		#Deletes the default value
		def delete_default_value(self):
			values_string = self.outer_sequence.xaml.find_all("Activity")[0]
			key = "this:" + self.outer_sequence.name + "." + self.name
			
			if key in values_string:
				del self.outer_sequence.xaml.find_all("Activity")[0]["this:" + self.outer_sequence.name + "." + self.name]
			if not self.default_value is None:
				del self.default_value
		#Update parent Sequence XAML. Do this when using any setter method defined above
		def update_outer_sequence(self):
			#if not self.outer_sequence.xaml.find(attrs={"Name":self.name})["Name"] is None:
			self.outer_sequence.xaml.find(attrs={"Name":self.name}).replace_with(self.xaml)
			self.outer_sequence.save()
		#override to string method
		def __str__(self):
			if self.default_value is None: # check if self has default_value
				return "{name: \"" + self.name + "\", direction: \"" + self.direction + "\", type: \"" + self.type + "\"}"
			else:
				return "{name: \"" + self.name + "\", direction: \"" + self.direction + "\", type: \"" + self.type + "\", default_value: \"" + self.default_value + "\"}"
	#-----------------------------------------------------------------------------------------------------------------------
	# Subclass: Argument
	# Description: Stores the arguments and their default values from the xaml file
	#-----------------------------------------------------------------------------------------------------------------------
	#Define subclass for the arguments
	class Inner_Sequence():
		def __init__(self, outer_sequence, xaml):
			self.outer_sequence = outer_sequence
			self.xaml = xaml
			self.id = xaml["sap2010:WorkflowViewState.IdRef"]
			self.get_invoked_workflow()
			self.variables = self.get_sequence_variables()
		#Get the invoked workflow for the sequences, if applicable
		def get_invoked_workflow(self):
			self.invoked_workflow = self.xaml.find("ui:InvokeWorkflowFile")
			if not self.invoked_workflow is None:
				#Get path to invoked workflow
				self.invoked_workflow_path = self.invoked_workflow["WorkflowFileName"]
				
				#get arguments of invoked workflow
				self.invoked_workflow_arguments_xaml = self.invoked_workflow.find("ui:InvokeWorkflowFile.Arguments")
				
				if len(self.invoked_workflow_arguments_xaml.findChildren()) > 0:
					self.invoked_workflow_arguments = []
					for index,child in enumerate(self.invoked_workflow_arguments_xaml.findChildren()):
						self.invoked_workflow_arguments.append(Sequence.Inner_Sequence.Invoked_Workflow_Argument(self, child, index))
		#Get variables in this sequence
		def get_sequence_variables(self):
			vars_from_xaml = self.xaml.find_all("Variable")
			
			#If there are any variables, we will build an array of variable objects and return them
			if len(vars_from_xaml) > 0:
				all_vars = []
				for item in self.xaml.find_all("Variable"):
					all_vars.append(self.outer_sequence.Variable(self.outer_sequence, xaml=item))
				return all_vars
		#Add variable to sequence
		def create_variable(self, xaml=None, name=None, type=None, default=None):
			#If xaml is None, build new BS node
			if xaml is None:
				xaml = self.outer_sequence.xaml.new_tag("Variable")
				xaml["Name"] = name
				xaml["x:TypeArguments"] = type
				if not default is None:
					xaml["Default"] = default
			self.xaml.find("Sequence.Variables").append(xaml) #Add new variable to xaml file
			self.variables = self.get_sequence_variables() #Regenerate vars from xaml
			self.update_outer_sequence()
			return xaml #return the new variable
		#Deletes a variable by name
		def delete_variable(self, var_name):
			self.xaml.find_all("Variable", Name=var_name)[0].decompose() #Deletes the variable from the XAML
			self.variables = self.get_sequence_variables() #Regenerate vars from xaml
			self.update_outer_sequence()
		#Imports arguments from invoked function. Just like the button in UiPath
		def import_arguments(self):
			#Clear the array of invoked workflow argument objects
			self.invoked_workflow_arguments = []
			self.invoked_workflow_arguments_xaml.clear()
			#clear the array of 
			invoked_sequence = Sequence(self.invoked_workflow_path) #Load the invoked sequence
			#Create new nodes for each of the imported arguments
			for index,item in enumerate(invoked_sequence.arguments):
				#Determine new node type
				new_node_type = item.direction
				#build a new Invoked_Workflow_Argument object
				new_node = self.outer_sequence.xaml.new_tag(new_node_type) # have the beautiful soup instance from the parent create a new tag
				new_node["x:Key"] = item.name
				new_node["x:TypeArguments"] = item.type
				new_node.string = "" #Add this so BS adds a closing tag
				#Add it to this sequence's invoked workflow arguments
				self.invoked_workflow_arguments.append(Sequence.Inner_Sequence.Invoked_Workflow_Argument(self, new_node, index))
				#Add new node to the invoked_arguments_xaml
				self.invoked_workflow_arguments_xaml.append(new_node)
			self.update_outer_sequence()
			return self.invoked_workflow_arguments
		#Update parent Sequence XAML. Do this when using any setter method defined above
		def update_outer_sequence(self):
			#if not self.outer_sequence.xaml.find(attrs={"Name":self.name})["Name"] is None:
			self.outer_sequence.xaml.find(attrs={"sap2010:WorkflowViewState.IdRef":self.id}).replace_with(self.xaml)
			self.outer_sequence.save()
		#To String
		def __str__(self):
			return str(self.xaml.prettify())
		#Define inner class for invoked_argument
		class Invoked_Workflow_Argument():
			def __init__(self, parent, xaml, index):
				self.parent = parent
				self.xaml = xaml
				self.name = xaml.name
				self.index = index
				self.key = self.xaml["x:Key"]
				self.type = self.xaml["x:TypeArguments"]
				self.value = self.xaml.getText()
				self.value_type = "value" # if the value is a pre-entered value and not a variable
				#Check if the value is a variable (denoted by square braces)
				if "[" in self.value:
					self.value = self.value[1:(len(self.value) - 1)]
					self.value_type = "variable"
			#Change the value of the argument (passing a value, not a variable)
			def set_value(self, new_value):
				self.value = new_value
				self.value_type = "value"
				self.xaml.string = new_value
				self.update_parent()
			#Change the value of the argument (the variable that is entered)
			def set_value_to_variable(self, variable):
				self.value = variable
				self.value_type = "variable"
				self.xaml.string = "[" + variable + "]"
				self.update_parent()
			#Change the key that the argument | |
| 
	<filename>music.py
"""
MicroPython on the BBC micro:bit comes with a powerful music and
sound module. It’s very easy to generate bleeps and bloops from
the device if you attach a speaker. Use crocodile clips to attach
pin 0 and GND to the positive and negative inputs on the speaker -
it doesn’t matter which way round they are connected to the speaker.
Musical Notation
 An individual note is specified thus:
 NOTE[octave][:duration]
 For example, A1:4 refers to the note “A” in octave 1 that lasts for four ticks
 (a tick is an arbitrary length of time defined by a tempo setting function -
 see below). If the note name R is used then it is treated as a rest (silence).
 Accidentals (flats and sharps) are denoted by the b
 (flat - a lower case b) and # (sharp - a hash symbol).
 For example, Ab is A-flat and C# is C-sharp.
 Note names are case-insensitive.
 The octave and duration parameters are states that carry over to
 subsequent notes until re-specified. The default states are octave = 4
 (containing middle C) and duration = 4 (a crotchet, given the default
 tempo settings - see below).
 For example, if 4 ticks is a crotchet, the following list is crotchet,
 quaver, quaver, crotchet based arpeggio:
 ['c1:4', 'e:2', 'g', 'c2:4']
 The opening of Beethoven’s 5th Symphony would be encoded thus:
 ['r4:2', 'g', 'g', 'g', 'eb:8', 'r:2', 'f', 'f', 'f', 'd:8']
 The definition and scope of an octave conforms to the table
 listed on this page about scientific pitch notation. For example, middle “C” is
 c4' and concert “A” (440) is 'a4'. Octaves start on the note “C”.
 The definition and scope of an octave conforms to the table
 listed on this page about scientific pitch notation. For example, middle
 “C” is 'c4' and concert “A” (440) is 'a4'. Octaves start on the note “C”.
 Built in Melodies
 For the purposes of education and entertainment, the module contains
 several example tunes that are expressed as Python lists. They can be used like this:
 import music
 music.play(music.NYAN)
 All the tunes are either out of copyright, composed by
 <NAME> and
 released to the public domain or have an unknown composer and are
 covered by a fair (educational) use provision.
 They are:
 DADADADUM - the opening to Beethoven’s 5th Symphony in C minor.
 ENTERTAINER - the opening fragment of Scott Joplin’s
 Ragtime classic “The Entertainer”.
 PRELUDE - the opening of the first Prelude in C Major of
 J.S.Bach’s 48 Preludes and Fugues.
 ODE - the “Ode to Joy” theme from Beethoven’s 9th Symphony in D minor.
 NYAN - the Nyan Cat theme (http://www.nyan.cat/). The composer is unknown.
 This is fair use for educational porpoises (as they say in New York).
 RINGTONE - something that sounds like a mobile phone ringtone. To be used
 to indicate an incoming message.
 FUNK - a funky bass line for secret agents and criminal masterminds.
 BLUES - a boogie-woogie 12-bar blues walking bass.
 BIRTHDAY - “Happy Birthday to You...” for copyright status see:
 http://www.bbc.co.uk/news/world-us-canada-34332853
 WEDDING - the bridal chorus from Wagner’s opera “Lohengrin”.
 FUNERAL - the “funeral march” otherwise known as Frédéric Chopin’s
 Piano Sonata No. 2 in B♭ minor, Op. 35.
 PUNCHLINE - a fun fragment that signifies a joke has been made.
 PYTHON - <NAME> Sousa’s march “Liberty Bell” aka, the theme for
 “Monty Python’s Flying Circus” (after which the Python
 programming language is named).
 BADDY - silent movie era entrance of a baddy.
 CHASE - silent movie era chase scene.
 BA_DING - a short signal to indicate something has happened.
 WAWAWAWAA - a very sad trombone.
 JUMP_UP - for use in a game, indicating upward movement.
 JUMP_DOWN - for use in a game, indicating downward movement.
 POWER_UP - a fanfare to indicate an achievement unlocked.
 POWER_DOWN - a sad fanfare to indicate an achievement lost.
 Example
 Plays a simple tune using the Micropython music module.
 This example requires a speaker/buzzer/headphones connected to P0 and GND.
 from microbit import *
 import music
 # play Prelude in C.
 notes = [
 'c4:1', 'e', 'g', 'c5', 'e5', 'g4', 'c5', 'e5', 'c4', 'e', 'g', 'c5', 'e5', 'g4', 'c5', 'e5',
 'c4', 'd', 'g', 'd5', 'f5', 'g4', 'd5', 'f5', 'c4', 'd', 'g', 'd5', 'f5', 'g4', 'd5', 'f5',
 'b3', 'd4', 'g', 'd5', 'f5', 'g4', 'd5', 'f5', 'b3', 'd4', 'g', 'd5', 'f5', 'g4', 'd5', 'f5',
 'c4', 'e', 'g', 'c5', 'e5', 'g4', 'c5', 'e5', 'c4', 'e', 'g', 'c5', 'e5', 'g4', 'c5', 'e5',
 'c4', 'e', 'a', 'e5', 'a5', 'a4', 'e5', 'a5', 'c4', 'e', 'a', 'e5', 'a5', 'a4', 'e5', 'a5',
 'c4', 'd', 'f#', 'a', 'd5', 'f#4', 'a', 'd5', 'c4', 'd', 'f#', 'a', 'd5', 'f#4', 'a', 'd5',
 'b3', 'd4', 'g', 'd5', 'g5', 'g4', 'd5', 'g5', 'b3', 'd4', 'g', 'd5', 'g5', 'g4', 'd5', 'g5',
 'b3', 'c4', 'e', 'g', 'c5', 'e4', 'g', 'c5', 'b3', 'c4', 'e', 'g', 'c5', 'e4', 'g', 'c5',
 'b3', 'c4', 'e', 'g', 'c5', 'e4', 'g', 'c5', 'b3', 'c4', 'e', 'g', 'c5', 'e4', 'g', 'c5',
 'a3', 'c4', 'e', 'g', 'c5', 'e4', 'g', 'c5', 'a3', 'c4', 'e', 'g', 'c5', 'e4', 'g', 'c5',
 'd3', 'a', 'd4', 'f#', 'c5', 'd4', 'f#', 'c5', 'd3', 'a', 'd4', 'f#', 'c5', 'd4', 'f#', 'c5',
 'g3', 'b', 'd4', 'g', 'b', 'd', 'g', 'b', 'g3', 'b3', 'd4', 'g', 'b', 'd', 'g', 'b'
 ]
 music.play(notes)
 """
from typing import Tuple, Union, List
from microbit import pin0, MicroBitAnalogDigitalPin
DADADADUM = 0
ENTERTAINER = 1
PRELUDE = 2
ODE = 3
NYAN = 4
RINGTONE = 5
FUNK = 6
BLUES = 7
BIRTHDAY = 8
WEDDING = 9
FUNERAL = 10
PUNCHLINE = 11
PYTHON = 12
BADDY = 12
CHASE = 13
BA_DING = 14
WAWAWAWAA = 15
JUMP_UP = 16
JUMP_DOWN = 17
POWER_UP = 18
POWER_DOWN = 19
def set_tempo(ticks: int = 4, bpm: int = 120) -> None:
 """
 Sets the approximate tempo for playback.
 A number of ticks (expressed as an integer) constitute a beat.
 Each beat is to be played at a certain frequency per minute
 (expressed as the more familiar BPM - beats per minute -
 also as an integer).
 Suggested default values allow the following useful behaviour:
 music.set_tempo() - reset the tempo to default of ticks = 4, bpm = 120
 music.set_tempo(ticks=8) - change the “definition” of a beat
 music.set_tempo(bpm=180) - just change the tempo
 To work out the length of a tick in milliseconds is very simple arithmetic:
 60000/bpm/ticks_per_beat . For the default values that’s
 60000/120/4 = 125 milliseconds or 1 beat = 500 milliseconds.
 """
def get_tempo(self) -> Tuple[int, int]:
 """
 Gets the current tempo as a tuple of integers: (ticks, bpm).
 """
def play(music: Union[str, List[str]],
 pin: MicroBitAnalogDigitalPin = pin0, wait: bool=True, loop: bool=False) -> None:
 """
 Sets the approximate tempo for playback.
 A number of ticks (expressed as an integer) constitute a beat.
 Each beat is to be played at a certain frequency per
 minute (expressed as the more familiar BPM - beats per minute -
 also as an integer).
 Suggested default values allow the following useful behaviour:
 music.set_tempo() - reset the tempo to default of ticks = 4, bpm = 120
 music.set_tempo(ticks=8) - change the “definition” of a beat
 music.set_tempo(bpm=180) - just change the tempo
 To work out the length of a tick in milliseconds is very simple arithmetic:
 60000/bpm/ticks_per_beat . For the default values that’s 60000/120/4 =
 125 milliseconds or 1 beat = 500 milliseconds.
 """
def pitch(frequency: int, len=-1, pin: MicroBitAnalogDigitalPin = pin0,
 wait: bool=True) -> None:
 """
 Plays a pitch at the integer frequency given for the specified
 number of milliseconds. For example, if the frequency is set to 440 and
 the length to 1000 then we hear a standard concert A for one second.
 If wait is set to True, this function is blocking.
 If len is negative the pitch is played continuously until
 either the blocking call is interrupted or, in the case of a background call,
 a new frequency is set or stop is called (see below).
 """
def stop(pin: MicroBitAnalogDigitalPin = pin0) -> None:
 """
 Stops all music playback | |
| 
	produce 1 page TLS Validation Reports
# functions needed to produce 1 page TLS Validation Reports
# functions needed to produce 1 page TLS Validation Reports
# functions needed to produce 1 page TLS Validation Reports
# functions needed to produce 1 page TLS Validation Reports
def plot_odd_even_transits(LC_df, TLSbestfit_df, TLSTCs_df, TLSmodel_df, ax,fig):
 ax.set_title('All Odd / Even Events')
 
 markersize=5
 fontsize=12
 #T_C_x_position and T_C_y_position control where the text appears for time stamps of "transit events"
 T_C_x_position = -0.55
 T_C_y_position =0.002
 
 time = np.array(LC_df['Time'])
 flux = np.array(LC_df['Detrended Flux'])
 error = np.array(LC_df['Detrended Error'])
 
 P = TLSbestfit_df['TLS Period [d]'].item()
 T0 = TLSbestfit_df['TLS TC [BTJD]'].item()
 Dur= TLSbestfit_df['TLS Dur [hrs]'].item()
 Depth = (TLSbestfit_df['TLS depths [ppt]'].item())/1000 #in ppo now
 
 spacing = 4* (Depth)
 
 XLIM=1.5*Dur
 
 T_C_array = np.array(TLSTCs_df['TLS TCs [BTJD]'])
 Depths_array = np.array(TLSTCs_df['TLS Depths'])
 Depths_err_array = np.array(TLSTCs_df['TLS Depths Error'])
 
 Modeltime = np.array(TLSmodel_df['Time'])
 Modelflux = np.array(TLSmodel_df['Model'])
 pf_model,ff_model = phasefold(T0,Modeltime,P,Modelflux)
 
 # cutting a 1 days worth of data around individual odd/even transits
 # and appending them into odd/even arrays for comparison
 
 
 window_size = 1 #day
 EvenDepths=[]
 OddDepths=[]
 Even=[]
 Evenflux=[]
 Odd=[]
 Oddflux=[]
 for x in range(len(T_C_array)):
 if x %2 ==0: #even
 EvenDepths=np.append(EvenDepths,Depths_array[x])
 cut = np.where( ((T_C_array[x]-window_size) < time) & ((T_C_array[x]+window_size) > time) )[0]
 cut_t = time[cut]
 cut_f = flux[cut]
 cut_fe = error[cut]
 if len(cut_f)<1: #in case window size is too small to cut data around
 window_size=1.5
 cut = np.where( ((T_C_array[x]-window_size) < time) & ((T_C_array[x]+window_size) > time) )[0]
 cut_t = time[cut]
 cut_f = flux[cut]
 cut_fe = error[cut] 
 ###
 phasefolded,foldedflux = phasefold(T_C_array[x],cut_t,P,cut_f)
 Even=np.append(Even,phasefolded)
 Evenflux=np.append(Evenflux,foldedflux)
 
 
 else: #odd
 OddDepths=np.append(OddDepths,Depths_array[x])
 cut = np.where( ((T_C_array[x]-window_size) < time) & ((T_C_array[x]+window_size) > time) )[0]
 cut_t = time[cut]
 cut_f = flux[cut]
 cut_fe = error[cut]
 if len(cut_f)<1: #in case window size is too small to cut data around
 window_size=1.5
 cut = np.where( ((T_C_array[x]-window_size) < time) & ((T_C_array[x]+window_size) > time) )[0]
 cut_t = time[cut]
 cut_f = flux[cut]
 cut_fe = error[cut]
 phasefolded,foldedflux = phasefold(T_C_array[x],cut_t,P,cut_f)
 Odd=np.append(Odd,phasefolded)
 Oddflux=np.append(Oddflux,foldedflux)
 
 ax.plot(24*Odd,np.array(Oddflux)+spacing,color='lightblue',marker='.',linestyle='none',markersize=markersize+1, rasterized=True,label='Odd') 
 ax.plot(24*Even,np.array(Evenflux),color='dimgrey',marker='.',linestyle='none',markersize=markersize+1, rasterized=True,label='Even')
 ax.plot(24*pf_model,ff_model,'r.-',linewidth=1,markersize=2,label='TLS Model')
 ax.plot(24*pf_model,ff_model+spacing,'r.-',linewidth=1,markersize=2)
 ###
 ymax = np.nanmax(np.nanmean(ff_model)+2*spacing)
 ymin = np.nanmin(np.nanmean(ff_model)-spacing) 
 
 ax.set_xlim(-XLIM,XLIM)
 ax.set_ylim(ymin,ymax)
 ax.set_xlabel('Phase [Hours since '+str(np.round(T0,3))+' [BTJD]')
 ax.set_ylabel('Normalized Flux + Offset')
 
 
 #get odd/even metrics from TLS
 odd_even_mismatch = (TLSbestfit_df['TLS Odd Even Mismatch'].item()) #in standard deviations
 
 tx=0.085
 ty=0.915
 ax.text(tx,ty,'N Transits: '+str(len(T_C_array))+' O/E mismatch '+str(np.round(odd_even_mismatch,3))+r' ${\sigma}$', transform=fig.transFigure, size=fontsize-2)
 
 ax.axhline(y=1-np.nanmean(EvenDepths)/1000,color='green',linestyle='-')
 ax.axhline(y=1+spacing-np.nanmean(OddDepths)/1000,color='green',linestyle='-',label='Odd/Even Mismatch')
 
 handles, labels = ax.get_legend_handles_labels()
 by_label = dict(zip(labels, handles))
# ax.legend(by_label.values(), by_label.keys(),ncol=2,fontsize=fs-1)
 ax.annotate("Odd", xy=( -1, np.nanmean(ff_model)+1.5*spacing ), va='top',xycoords='data', fontsize=fontsize+4,weight="bold")
 ax.annotate("Even", xy=(-1, np.nanmean(ff_model)-0.5*spacing ), va='top',xycoords='data', fontsize=fontsize+4,weight="bold")
def plot_power_spectra(TLS_df,TLSbestfit_df, ax):
 #power spectra
 TLS_periods= TLS_df['TLS Periods']
 TLS_Power = TLS_df['TLS Power']
 
 #best fit params
 P = TLSbestfit_df['TLS Period [d]'].item()
 RP = TLSbestfit_df['Planet Radius [RE]'].item()
 Depth = TLSbestfit_df['TLS depths [ppt]'].item()
 mdumps=TLSbestfit_df['Momentum Dump Rate [d]'].item()
 
 ax.axvline(x=P,color='r')
 if 0.5*P> np.nanmin(TLS_periods):
 ax.axvline(x=0.5*P,color='r',linestyle='--')
 ###
 if 2.0*P < np.nanmax(TLS_periods):
 ax.axvline(x=2.0*P,color='r',linestyle='--')
 
 ax.plot(TLS_periods,TLS_Power, color='black', rasterized=True)
 ax.axvline(x=mdumps,color='grey',linestyle='--')
 ax.set_xlabel('Period [days]')
 ax.set_ylabel('TLS Power')
 ax.set_xticks(np.arange(np.nanmin(TLS_periods), np.nanmax(TLS_periods)+1, 1))
 if np.nanmax(TLS_Power)> 12:
 ax.set_yticks(np.arange(int(np.nanmin(TLS_Power)), int(np.nanmax(TLS_Power)+5), 5))
 if (np.nanmax(TLS_Power)>= 7) & (np.nanmax(TLS_Power)< 12):
 ax.set_yticks(np.arange(int(np.nanmin(TLS_Power)), int(np.nanmax(TLS_Power)+2), 2)) 
 if np.nanmax(TLS_Power)< 7:
 ax.set_yticks(np.arange(int(np.nanmin(TLS_Power)), int(np.nanmax(TLS_Power)+1), 1))
 ax.set_title('TLS Power Spectrum: '+'Period '+str(np.round(P,3))+' d'+' Depth '+str(np.round(Depth,3))+' ppt'+' Planet Radius: '+str(np.round(RP,3))+' RE') 
 
def fullphasefold(time,T0,period,flux,offset):
 phase= (time - T0 + offset*period) / (period) - np.floor((time - T0 + offset*period) / period)
 ind=np.argsort(phase, axis=0)
 return phase[ind],flux[ind]
 
def plot_phasefold_LCs(ID,Sector,LC_df,TLS_df,TLSbestfit_df,TLSTCs_df,TLSmodel_df, axa,axb,axc,axd,axe):
 #fontsize
 fs=12
 
 #plots LC, PFLC, 0.5*P PFLC, 2*P PFLC and full PFLC 
 time=np.array(LC_df['Time'])
 flux=np.array(LC_df['Detrended Flux'])
 error=np.array(LC_df['Detrended Error'])
 sap_flux=np.array(LC_df['SAP Flux'])
 sap_error=np.array(LC_df['SAP Error'])
 
 
 modeltime = np.array(TLSmodel_df['Time'])
 modelflux = np.array(TLSmodel_df['Model'])
 
 T0 = TLSbestfit_df['TLS TC [BTJD]'].item()
 P = TLSbestfit_df['TLS Period [d]'].item()
 Dur=TLSbestfit_df['TLS Dur [hrs]'].item()
 Depth=TLSbestfit_df['TLS depths [ppt]'].item()/1000
 
 XLIM=3.5*Dur
 YLIM=2*Depth
 
 T_C_array = np.array(TLSTCs_df['TLS TCs [BTJD]'])
 
 #calculate full phase 0 to 1 + an offset to shift midtransit from 0 to offset
 offset=0.25
 fullphase, fullphaseflux = fullphasefold(time,T0,P,flux,offset) 
 
 #calculate phase in hours since T0 for 0.5x, 1x, 2x Period
 phasefolda, phasefoldfluxa = phasefold(time,T0,P,flux)
 phasefoldb, phasefoldfluxb = phasefold(time,T0,P*0.5,flux)
 phasefoldc, phasefoldfluxc = phasefold(time,T0,P*2.0,flux)
 
 #do same for transit models
 fullphase_model, fullphaseflux_model = fullphasefold(modeltime,T0,P,modelflux,offset)
 phasefold_modela, phasefoldflux_modela = phasefold(modeltime,T0,P,modelflux)
 phasefold_modelb, phasefoldflux_modelb = phasefold(modeltime,T0,0.5*P,modelflux)
 phasefold_modelc, phasefoldflux_modelc = phasefold(modeltime,T0,2.0*P,modelflux)
 #power spectra limits for PFLCs
 TLSPmin,TLSPmax = np.nanmin(np.array(TLS_df['TLS Periods'])) , np.nanmax(np.array(TLS_df['TLS Periods']))
 
 # plot LC
 cdpp_sap = CDPP(time,sap_flux,sap_error,'median','ppm',binsize=(1.0/24.0))
 cdpp_det = CDPP(time,flux,error,'median','ppm',binsize=(1.0/24.0))
 
 axa.set_title(r'Light Curve CDPPs: SAP CDPP = '+str(np.round(cdpp_sap,1))+' $\sigma _{ppm}$ ''hr$^{-1/2}$, Detrended CDPP ='+str(np.round(cdpp_det,1))+' $\sigma _{ppm}$ ''hr$^{-1/2}$') 
 
 mdumps,t_0,t_1 = momentumdump_check(Sector)
 t_0=np.nanmin(time) #sometimes data near beginning gets chopped based on TESS DRNs
 if Sector==31:
 t_0end = 2157.45371
 t_1end = 2169.94398
 time_mdump1 = t_0+ (t_0end - t_0)/2
 time_mdump2 = t_1+ (t_1end - t_1)/2
 axa.axvline(x=time_mdump1,zorder=-2)
 axa.axvline(x=time_mdump2,zorder=-2) 
 else:
 Num_mdumps = int(np.round((np.nanmax(time)-np.nanmin(time))/mdumps,2))+1
 for N in range(Num_mdumps):
 time_mdump1 = t_0+(N)*mdumps
 time_mdump2 = t_1+(N+0.5)*mdumps 
 if time_mdump1 < t_1:
 axa.axvline(x=time_mdump1,zorder=-2) 
 if time_mdump2 < np.nanmax(time):
 axa.axvline(x=time_mdump2,zorder=-2)
 
 axa.plot(time,flux,'k.',markersize=3,zorder=1, rasterized=True)
 axa.plot(np.array(TLSmodel_df['Time'].to_list()),np.array(TLSmodel_df['Model'].to_list())\
 ,'r.',markersize=1, rasterized=True)
 for x in range(len(T_C_array)):
 ### plotting 3 slightly overlapping to make it more obvious in tiny subplot window
 axa.plot(T_C_array[x], 1.0+1.5*Depth, marker=r'$\downarrow$',color='cyan', rasterized=True)
 axa.plot(T_C_array[x], 1.0+1.6*Depth, marker=r'$\downarrow$',color='cyan', rasterized=True)
 axa.plot(T_C_array[x], 1.0+1.7*Depth, marker=r'$\downarrow$',color='cyan', rasterized=True) 
 ###
# tx=0.39
# ty=0.8
# axa.text(tx,ty,'Momentum Dump Rate: '+str(mdumps)+' days', transform=fig.transFigure, size=fs-2)
 axa.set_xlabel('Time [BTJD]')
 axa.set_ylabel('Norm. Flux')
 
 
 # plot PFLC
 axb.set_title('Phase Folded Light Curve',fontsize=fs-1) 
 axb.plot(24*phasefolda, phasefoldfluxa,'k.',markersize=3,zorder=0, rasterized=True)
 axb.plot(24*phasefold_modela, phasefoldflux_modela,'r.-',markersize=2,zorder=1, rasterized=True)
 axb.set_xlabel(r'Phase [Hours since '+str(np.round(T0,3))+' [BTJD]')
 axb.set_ylabel('Norm. Flux')
 
 # plot full PFLC
 axc.set_title("Full Phase Folded Light Curve",fontsize = fs)
 axc.plot(fullphase, fullphaseflux,'k.',markersize=3,zorder=0, rasterized=True) 
 axc.plot(fullphase_model, fullphaseflux_model,'r.-',markersize=2,zorder=1, rasterized=True) 
 axc.set_xlabel('Phase + 0.25')
 axc.set_ylabel('Norm. Flux')
 
 # plot PFLC with 0.5x P
 axd.set_title('0.5x Period = '+(str(np.round(0.5*P,3)))+' days')
 axd.plot(24*phasefoldb, phasefoldfluxb,'k.',markersize=3,zorder=0, rasterized=True)
 #models never looks good at 1/2x 
 # axd.plot(24*phasefold_modelb, phasefoldflux_modelb,'r.-',markersize=2,zorder=1, rasterized=True)
 #models never looks good at 1/2x 
 axd.set_xlabel(r'Phase [Hours since '+str(np.round(T0,3))+' [BTJD]')
 axd.set_ylabel('Norm. Flux')
 
 # plot PFLC with 2x P
 axe.set_title('2x Period = '+(str(np.round(2*P,3)))+' days')
 axe.plot(24*phasefoldc, phasefoldfluxc,'k.',markersize=3,zorder=0, rasterized=True)
 axe.plot(24*phasefold_modelc, phasefoldflux_modelc,'r.-',markersize=2,zorder=1, rasterized=True)
 axe.set_xlabel(r'Phase [Hours since '+str(np.round(T0,3))+' [BTJD]')
 axe.set_ylabel('Norm. Flux')
 
 
 axc.set_xticks(np.arange(0.0, 1+0.25, 0.25)) 
 if XLIM < 8:
 axb.set_xticks(np.arange(int(-XLIM), int(XLIM)+1, 1.0)) 
 axd.set_xticks(np.arange(int(-XLIM), int(XLIM)+1, 1.0))
 axe.set_xticks(np.arange(int(-XLIM), int(XLIM)+1, 1.0))
 if XLIM > 8:
 axb.set_xticks(np.arange(int(-XLIM), int(XLIM)+2, 2.0))
 axd.set_xticks(np.arange(int(-XLIM), int(XLIM)+2, 2.0))
 axe.set_xticks(np.arange(int(-XLIM), int(XLIM)+2, 2.0))
 
 
 axb.set_xlim(-XLIM,XLIM)
 axc.set_xlim(-0.01,1.01)
 axd.set_xlim(-XLIM,XLIM)
 axe.set_xlim(-XLIM,XLIM)
 
 axa.set_ylim(1-YLIM,1+YLIM)
 axb.set_ylim(1-YLIM,1+YLIM)
 axc.set_ylim(1-YLIM,1+YLIM)
 axd.set_ylim(1-YLIM,1+YLIM)
 axe.set_ylim(1-YLIM,1+YLIM)
 #turn off exponential notiation in axes
 axa.ticklabel_format(useOffset=False)
 axb.ticklabel_format(useOffset=False)
 axc.ticklabel_format(useOffset=False)
 axd.ticklabel_format(useOffset=False)
 axe.ticklabel_format(useOffset=False)
 
def Get_FFI(ID,Sector,cadence,path,use_SPOC_aperture='no',for_injections=False):
 #Step 0: Creating directories to save figures and data
 import pandas as pd 
 verbose=False
 if for_injections==False:
 Path=path+'Sector_'+str(Sector)+'/'
 if for_injections==True:
 Path=path
 if cadence=='long':
 saveReportpath = Path+'FFI_TLS_Report/'
 savelcpath= Path+'FFI_PLD_LCs/'
 downloadpath = Path+'cache/'
 if cadence=='short': 
 saveReportpath = Path+'TPF_TLS_Report/'
 savelcpath= Path+'TPF_PLD_LCs/'
 downloadpath = Path+'cache/'
 try:
 bkg_mask = readNDarr(savelcpath,"TIC_"+str(ID)+"_Sector_"+str(Sector)+"_bkg_mask")
 pix_mask = readNDarr(savelcpath,"TIC_"+str(ID)+"_Sector_"+str(Sector)+"_pix_mask")
 images = readNDarr(savelcpath,"TIC_"+str(ID)+"_Sector_"+str(Sector)+"_image_fluxes")
 median_image = np.nanmedian(images, axis=0)
 
 try:
 hdu,CCD,Camera,quality_mask,reference_pixel = gethdu(ID,Sector,cutoutsize=11,cadence=cadence,\
 minimum_photon_counts=1,verbose=True,\
 downloadpath=downloadpath)
 except TypeError as TE:
 print(TE)
 import time as clock
 os.system('rm -r ~/.astropy/cache/download/py3/lock') #clear any locks that might be in cache
 clock.sleep(10) #wait 10 seconds and try again
 hdu,CCD,Camera,quality_mask,reference_pixel = gethdu(ID,Sector,cutoutsize=11,cadence=cadence,\
 minimum_photon_counts=1,verbose=True,\
 downloadpath=downloadpath) 
 except FileNotFoundError as FNFE:
 print('')
 print(FNFE)
 print('recreating cutouts, aperture and background masks with default settings')
 print(' ')
 #Step 1: Download FFI Cutout from MAST
 # sometimes MAST/Astropy has issues, if it fails try again
 # if it got to this point, the FFI definitely exists!
 try:
 hdu,CCD,Camera,quality_mask,reference_pixel = gethdu(ID,Sector,cutoutsize=11,cadence=cadence,\
 minimum_photon_counts=1,verbose=True,\
 downloadpath=downloadpath)
 except TypeError as TE:
 print(TE)
 import time as clock
 os.system('rm -r ~/.astropy/cache/download/py3/lock') #clear any locks that might be in cache
 clock.sleep(10) #wait 10 seconds and try again
 hdu,CCD,Camera,quality_mask,reference_pixel = gethdu(ID,Sector,cutoutsize=11,cadence=cadence,\
 minimum_photon_counts=1,verbose=True,\
 downloadpath=downloadpath)
 print('') 
 #step 2: get aperture and background masks
 bkg_mask, pix_mask ,flux, median_image, SAP_LC = SAP(ID=ID,Sector=Sector,cutoutsize=11,hdu=hdu,\
 quality_mask=quality_mask,threshold=7.5,cadence=cadence,\
 reference_pixel=reference_pixel,verbose=False,\
 savelcpath=savelcpath,use_SPOC_aperture='no') 
 #resave pkl data
 saveNDarr(pix_mask,savelcpath,"TIC_"+str(ID)+"_Sector_"+str(Sector)+"_pix_mask")
 saveNDarr(bkg_mask,savelcpath,"TIC_"+str(ID)+"_Sector_"+str(Sector)+"_bkg_mask")
 saveNDarr(flux,savelcpath,"TIC_"+str(ID)+"_Sector_"+str(Sector)+"_image_fluxes") 
 ###
 #Step 3: Get information on target star and apply some basic selection cuts
 try:
 qld, M_star, M_star_min, M_star_max, R_star, R_star_min, R_star_max = catalog_info(TIC_ID=ID)
 except (requests.exceptions.ConnectionError,requests.exceptions.HTTPError) as E:
 clock.sleep(5) #pause 5 seconds then try again
 qld, M_star, M_star_min, M_star_max, R_star, R_star_min, R_star_max = catalog_info(TIC_ID=ID) 
 ###
 ###
 #Get more stellar params
 ###
 Vmag,Tmag,Gmag,rmag,imag,zmag,Jmag,Hmag,Kmag,Teff,ra,dec,logg,rho,dist = Get_stellar_params(ID,downloadpath)
 ###
 CCD=hdu[0].header['CCD']
 Camera=hdu[0].header['Camera'] 
 wcs = WCS(hdu[2].header)
 return median_image, hdu, wcs, pix_mask, bkg_mask, Vmag,Tmag,Gmag,rmag,imag,zmag,Jmag,Hmag,Kmag,Teff,ra,dec,logg,rho,dist,CCD,Camera 
 
 
def plot_image(ID,Sector,cadence,path,ax_placement,fig,fs,for_injections=False):
 if for_injections==False:
 Path=path+'Sector_'+str(Sector)+'/'
 if for_injections==True:
 Path=path
 if cadence=='long':
 saveReportpath = Path+'FFI_TLS_Report/'
 savelcpath= Path+'FFI_PLD_LCs/'
 downloadpath = Path+'cache/'
 if cadence=='short': 
 saveReportpath = Path+'TPF_TLS_Report/'
 savelcpath= Path+'TPF_PLD_LCs/'
 downloadpath = Path+'cache/'
 
 #get image data and stellar params
 median_image, | |
| 
	<gh_stars>1-10
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
"""
A plugin to navigate HDUs in a FITS file or planes in a 3D cube or
higher dimension dataset.
**Plugin Type: Local**
``MultiDim`` is a local plugin, which means it is associated with a
channel. An instance can be opened for each channel.
**Usage**
``MultiDim`` is a plugin designed to handle data cubes and multi-HDU FITS
files. If you have opened such an image in Ginga, starting this plugin
will enable you to browse to other slices of the cube or view other
HDUs.
For a data cube, you can save a slice as an image using the "Save Slice"
button or create a movie using the "Save Movie" button by entering the
"Start" and "End" slice indices. This feature requires ``mencoder`` to be
installed.
For a FITS table, its data are read in using Astropy table.
Column units are displayed right under the main header ("None" if no unit).
For masked columns, masked values are replaced with pre-defined fill values.
**Browsing HDUs**
Use the HDU drop down list in the upper part of the UI to browse and
select an HDU to open in the channel.
**Navigating Cubes**
Use the controls in the lower part of the UI to select the axis and
to step through the planes in that axis.
**User Configuration**
"""
import time
import re
import os
from distutils import spawn
from contextlib import contextmanager
from ginga.gw import Widgets
from ginga.misc import Future
from ginga import GingaPlugin
from ginga.util.iohelper import get_hdu_suffix
from ginga.util.videosink import VideoSink
import numpy as np
have_mencoder = False
if spawn.find_executable("mencoder"):
 have_mencoder = True
__all__ = ['MultiDim']
class MultiDim(GingaPlugin.LocalPlugin):
 def __init__(self, fv, fitsimage):
 # superclass defines some variables for us, like logger
 super(MultiDim, self).__init__(fv, fitsimage)
 self.curhdu = 0
 self.naxispath = []
 self.name_pfx = 'NONAME'
 self.img_path = None
 self.img_name = None
 self.file_obj = None
 self.orientation = 'vertical'
 # For animation feature
 self.play_image = None
 self.play_axis = 2
 self.play_idx = 0
 self.play_max = 0
 self.play_int_sec = 0.1
 self.play_min_sec = 1.0 / 30
 self.play_last_time = 0.0
 self.play_fps = 0
 self.timer = fv.get_timer()
 self.timer.set_callback('expired', self._play_next_cb)
 # Load plugin preferences
 prefs = self.fv.get_preferences()
 self.settings = prefs.create_category('plugin_MultiDim')
 self.settings.set_defaults(auto_start_naxis=False)
 self.settings.load(onError='silent')
 self.gui_up = False
 def build_gui(self, container):
 top = Widgets.VBox()
 top.set_border_width(4)
 vbox, sw, orientation = Widgets.get_oriented_box(container,
 scrolled=True)
 self.orientation = orientation
 vbox.set_border_width(4)
 vbox.set_spacing(2)
 fr = Widgets.Frame("HDU")
 vb1 = Widgets.VBox()
 captions = [("Num HDUs:", 'label', "Num HDUs", 'llabel'),
 ]
 w, b = Widgets.build_info(captions, orientation=orientation)
 self.w.numhdu = b.num_hdus
 self.w.update(b)
 vb1.add_widget(w)
 captions = [("Choose HDU", 'combobox'),
 ]
 w, b = Widgets.build_info(captions, orientation=orientation)
 vb1.add_widget(w)
 self.w.hdu = b.choose_hdu
 self.w.hdu.set_tooltip("Choose which HDU to view")
 self.w.hdu.add_callback('activated', self.set_hdu_cb)
 fr.set_widget(vb1)
 vbox.add_widget(fr, stretch=0)
 fr = Widgets.Frame("NAXIS (data cubes)")
 self.naxisfr = fr
 vbox.add_widget(fr, stretch=0)
 tbar = Widgets.Toolbar(orientation='horizontal')
 for name, actn, cb in (
 ('first', 'first', lambda w: self.first_slice()),
 ('last', 'last', lambda w: self.last_slice()),
 ('reverse', 'prev', lambda w: self.prev_slice()),
 ('forward', 'next', lambda w: self.next_slice()),
 ('play', 'play', lambda w: self.play_start()),
 ('stop', 'stop', lambda w: self.play_stop()), ):
 iconpath = os.path.join(self.fv.iconpath, "%s_48.png" % name)
 btn = tbar.add_action(None, iconpath=iconpath)
 self.w[actn] = btn
 btn.set_enabled(False)
 btn.set_tooltip(actn)
 btn.add_callback('activated', cb)
 vbox.add_widget(tbar, stretch=0)
 captions = [("Interval:", 'label', "Interval", 'spinfloat',
 "fps", 'llabel'),
 ]
 w, b = Widgets.build_info(captions, orientation=orientation)
 self.w.update(b)
 lower, upper = self.play_min_sec, 8.0
 b.interval.set_limits(lower, upper, incr_value=0.01)
 b.interval.set_value(self.play_int_sec)
 b.interval.set_decimals(2)
 b.interval.add_callback('value-changed', self.play_int_cb)
 b.interval.set_enabled(False)
 vbox.add_widget(w, stretch=0)
 captions = [("Slice:", 'label', "Slice", 'llabel'),
 # ("Value:", 'label', "Value", 'llabel'),
 ("Save Slice", 'button'),
 ]
 w, b = Widgets.build_info(captions, orientation=orientation)
 self.w.update(b)
 b.save_slice.add_callback('activated', lambda w: self.save_slice_cb())
 b.save_slice.set_enabled(False)
 b.save_slice.set_tooltip("Save current slice as RGB image")
 vbox.add_widget(w, stretch=0)
 fr = Widgets.Frame("Movie")
 if have_mencoder:
 captions = [("Start:", 'label', "Start Slice", 'entry',
 "End:", 'label', "End Slice", 'entry',
 'Save Movie', 'button')]
 w, b = Widgets.build_info(captions, orientation=orientation)
 self.w.update(b)
 b.start_slice.set_tooltip("Starting slice")
 b.end_slice.set_tooltip("Ending slice")
 b.start_slice.set_length(6)
 b.end_slice.set_length(6)
 b.save_movie.add_callback(
 'activated', lambda w: self.save_movie_cb())
 b.save_movie.set_enabled(False)
 fr.set_widget(w)
 else:
 infolbl = Widgets.Label()
 infolbl.set_text("Please install 'mencoder' to save as movie")
 fr.set_widget(infolbl)
 vbox.add_widget(fr, stretch=0)
 # spacer = Widgets.Label('')
 # vbox.add_widget(spacer, stretch=1)
 top.add_widget(sw, stretch=1)
 btns = Widgets.HBox()
 btns.set_spacing(4)
 btn = Widgets.Button("Close")
 btn.add_callback('activated', lambda w: self.close())
 btns.add_widget(btn)
 btn = Widgets.Button("Help")
 btn.add_callback('activated', lambda w: self.help())
 btns.add_widget(btn, stretch=0)
 btns.add_widget(Widgets.Label(''), stretch=1)
 top.add_widget(btns, stretch=0)
 container.add_widget(top, stretch=1)
 self.gui_up = True
 def set_hdu_cb(self, w, val):
 # idx = int(val)
 idx = w.get_index()
 idx = max(0, idx)
 try:
 self.set_hdu(idx)
 except Exception as e:
 self.logger.error("Error loading HDU #%d: %s" % (
 idx + 1, str(e)))
 def set_naxis_cb(self, widget, idx, n):
 play_idx = int(idx) - 1
 self.set_naxis(play_idx, n)
 def build_naxis(self, dims, image):
 self.naxispath = list(image.naxispath)
 # build a vbox of NAXIS controls
 captions = [("NAXIS1:", 'label', 'NAXIS1', 'llabel'),
 ("NAXIS2:", 'label', 'NAXIS2', 'llabel')]
 for n in range(2, len(dims)):
 key = 'naxis%d' % (n + 1)
 title = key.upper()
 maxn = int(dims[n])
 self.logger.debug("NAXIS%d=%d" % (n + 1, maxn))
 if maxn <= 1:
 captions.append((title + ':', 'label', title, 'llabel'))
 else:
 captions.append((title + ':', 'label', title, 'llabel',
 "Choose %s" % (title), 'hscale'))
 # Remove old naxis widgets
 for key in self.w:
 if key.startswith('choose_'):
 self.w[key] = None
 hbox = Widgets.HBox()
 if len(dims) > 3: # only add radiobuttons if we have more than 3 dim
 group = None
 for i in range(2, len(dims)):
 title = 'AXIS%d' % (i + 1)
 btn = Widgets.RadioButton(title, group=group)
 if group is None:
 group = btn
 hbox.add_widget(btn)
 self.w[title.lower()] = btn
 w, b = Widgets.build_info(captions, orientation=self.orientation)
 self.w.update(b)
 vbox = Widgets.VBox()
 vbox.add_widget(w)
 vbox.add_widget(hbox)
 for n in range(0, len(dims)):
 key = 'naxis%d' % (n + 1)
 lbl = b[key]
 maxn = int(dims[n])
 lbl.set_text("%d" % maxn)
 slkey = 'choose_' + key
 if slkey in b:
 slider = b[slkey]
 lower = 1
 upper = maxn
 slider.set_limits(lower, upper, incr_value=1)
 text = self.naxispath[n - 2] + 1
 if np.isscalar(text):
 slider.set_value(text)
 else:
 slider.set_value(text[n - 2])
 slider.set_tracking(True)
 # slider.set_digits(0)
 # slider.set_wrap(True)
 slider.add_callback('value-changed', self.set_naxis_cb, n)
 # Disable playback if there is only 1 slice in the higher dimension
 if n > 2 and dims[n] == 1:
 radiobutton = self.w['axis%d' % (n + 1)]
 radiobutton.set_enabled(False)
 # Add vbox of naxis controls to gui
 self.naxisfr.set_widget(vbox)
 # for storing play_idx for each dim of image. used for going back to
 # the idx where you left off.
 self.play_indices = ([0 for i in range(len(dims) - 2)] if len(dims) > 3
 else None)
 if len(dims) > 3:
 # dims only exists in here, hence this function exists here
 def play_axis_change_func_creator(n):
 # widget callable needs (widget, value) args
 def play_axis_change():
 self.play_indices[self.play_axis - 2] = self.play_idx % dims[self.play_axis] # noqa
 self.play_axis = n
 self.logger.debug("play_axis changed to %d" % n)
 if self.play_axis < len(dims):
 self.play_max = dims[self.play_axis] - 1
 self.play_idx = self.play_indices[n - 2]
 self.fv.gui_do(self.set_naxis, self.play_idx,
 self.play_axis)
 def check_if_we_need_change(w, v):
 if self.play_axis is not n:
 play_axis_change()
 return check_if_we_need_change
 for n in range(2, len(dims)):
 key = 'axis%d' % (n + 1)
 self.w[key].add_callback(
 'activated', play_axis_change_func_creator(n))
 if n == 2:
 self.w[key].set_state(True)
 is_dc = len(dims) > 2
 self.play_axis = 2
 if self.play_axis < len(dims):
 self.play_max = dims[self.play_axis] - 1
 if is_dc:
 self.play_idx = self.naxispath[self.play_axis - 2]
 else:
 self.play_idx = 0
 if self.play_indices:
 text = [i + 1 for i in self.naxispath]
 else:
 text = self.play_idx + 1
 self.w.slice.set_text(str(text))
 # Enable or disable NAXIS animation controls
 self.w.next.set_enabled(is_dc)
 self.w.prev.set_enabled(is_dc)
 self.w.first.set_enabled(is_dc)
 self.w.last.set_enabled(is_dc)
 self.w.play.set_enabled(is_dc)
 self.w.stop.set_enabled(is_dc)
 self.w.interval.set_enabled(is_dc)
 self.w.save_slice.set_enabled(is_dc)
 if have_mencoder:
 self.w.save_movie.set_enabled(is_dc)
 def close(self):
 self.fv.stop_local_plugin(self.chname, str(self))
 return True
 def start(self):
 self.resume()
 def pause(self):
 self.play_stop()
 pass
 def resume(self):
 self.redo()
 def stop(self):
 self.gui_up = False
 self.play_stop()
 if self.file_obj is not None:
 try:
 self.file_obj.close()
 except Exception:
 pass
 self.file_obj = None
 self.img_path = None
 self.img_name = None
 self.fv.show_status("")
 def set_hdu(self, idx):
 self.logger.debug("Loading fits hdu #%d" % (idx))
 # determine canonical index of this HDU
 info = self.file_obj.hdu_info[idx]
 aidx = (info.name, info.extver)
 if aidx not in self.file_obj.hdu_db:
 aidx = idx
 sfx = get_hdu_suffix(aidx)
 # See if this HDU is still in the channel's datasrc
 imname = self.name_pfx + sfx
 chname = self.chname
 chinfo = self.channel
 if imname in chinfo.datasrc:
 self.curhdu = idx
 image = chinfo.datasrc[imname]
 self.fv.switch_name(chname, imname)
 return
 # Nope, we'll have to load it
 self.logger.debug("HDU %d not in memory; refreshing from file" % (idx))
 try:
 self.curhdu = idx
 info = self.file_obj.hdu_info[idx]
 image = self.file_obj.get_hdu(idx)
 # create a future for reconstituting this HDU
 future = Future.Future()
 future.freeze(self.fv.load_image, self.img_path, idx=aidx)
 image.set(path=self.img_path, idx=aidx, name=imname,
 image_future=future)
 self.fv.add_image(imname, image, chname=chname)
 self.logger.debug("HDU #%d loaded." % (idx))
 except Exception as e:
 errmsg = "Error loading FITS HDU #%d: %s" | |
| 
	from DejaVu import Viewer
from DejaVu.Spheres import Spheres
from DejaVu.IndexedPolylines import IndexedPolylines
from DejaVu.Materials import propertyNum
from time import sleep
import unittest
import sys
#declare the 'showwarning' variable that is used in the code returned by maa.getSourceCode()
showwarning = False
class CustomAnimations_Tests(unittest.TestCase):
 def setUp(self):
 """Create DejaVu Viewer
 """
 #if not hasattr(self, "vi"):
 self.vi = Viewer()
 def tearDown(self):
 """
 clean-up
 """
 try:
 self.vi.Exit()
 except:
 pass
 
 def test_flyin(self):
 """Tests:
 - creation of FlyInObjectMAA with different options (number
 of keyframes, direction)
 - playing different frames of maa . """
 vi = self.vi
 sph = Spheres( 'sph', centers=[ (0,0,0), (5, 0,0), (0,5,0), (0, 0,5) ],
 materials = [ (1,1,1), (1,0,0), (0,1,0), (0,0,1) ],
 inheritMaterial=False) 
 vi.AddObject(sph)
 from DejaVu.scenarioInterface.animations import FlyInObjectMAA
 # fly in from left
 maa1 = FlyInObjectMAA(sph, objectName=None, direction='left', kfpos=[0, 30])
 actors = maa1.actors
 self.assertEqual(len(actors), 3)
 vi.OneRedraw()
 sph.Set(translation=[0,0,0])
 # check that the position (translation) of the object changes from left to center
 # of the viewer at frames 0 - 15 - 30
 maa1.setValuesAt(0)
 t1 = sph.translation[0]
 vi.OneRedraw()
 self.assertEqual(t1 < 0, True)
 maa1.setValuesAt(15)
 t2 = sph.translation[0] 
 self.assertEqual( int(t1/2), int(t2))
 vi.OneRedraw()
 maa1.setValuesAt(30)
 t3 = sph.translation[0] 
 self.assertEqual(t3, 0)
 vi.OneRedraw()
 # fly in from right
 maa2 = FlyInObjectMAA(sph, objectName=None, direction='right', kfpos=[0, 60])
 actors = maa2.actors
 self.assertEqual(len(actors), 3)
 sph.Set(translation=[0,0,0])
 # check that the position (translation) of the object changes from right to center
 # of the viewer at frames 0 - 30- 60
 maa2.setValuesAt(0)
 vi.OneRedraw()
 t1 = sph.translation[0]
 self.assertEqual(t1 > 0, True)
 maa2.setValuesAt(30)
 vi.OneRedraw()
 t2 = sph.translation[0] 
 self.assertEqual(int(t1/2), int(t2))
 maa2.setValuesAt(60)
 vi.OneRedraw()
 t3 = sph.translation[0] 
 self.assertEqual(t3, 0)
 # fly in from top
 maa3 = FlyInObjectMAA(sph, objectName=None, direction='top', kfpos=[0, 30])
 actors = maa3.actors
 self.assertEqual(len(actors), 3)
 sph.Set(translation=[0,0,0])
 # check that the position (translation) of the object changes from top to center
 # of the viewer at frames 0 - 15 - 30
 maa3.setValuesAt(0)
 vi.OneRedraw()
 t1 = sph.translation[1]
 self.assertEqual(t1 > 0, True)
 maa3.setValuesAt(15)
 vi.OneRedraw()
 t2 = sph.translation[1] 
 self.assertEqual(int(t1/2), int(t2))
 maa3.setValuesAt(30)
 vi.OneRedraw()
 t3 = sph.translation[1] 
 self.assertEqual(t3, 0)
 # fly in from bottom
 
 maa4 = FlyInObjectMAA(sph, objectName=None, direction='bottom', kfpos=[0, 60])
 actors = maa4.actors
 self.assertEqual(len(actors),3)
 sph.Set(translation=[0,0,0])
 sph.Set(visible = 0)
 # check that the position (translation) of the object changes from bottom to center
 # of the viewer at frames 0 - 30 - 60
 maa4.setValuesAt(0)
 vi.OneRedraw()
 # check that the "visible" maa's actor sets the sph.visible attribute to 1
 self.assertEqual(sph.visible, 1)
 t1 = sph.translation[1]
 self.assertEqual( t1 < 0, True)
 maa4.setValuesAt(30)
 vi.OneRedraw()
 t2 = sph.translation[1] 
 self.assertEqual( int(t1/2), int(t2))
 maa4.setValuesAt(60)
 vi.OneRedraw()
 t3 = sph.translation[1] 
 self.assertEqual(t3, 0)
 #run maa 
 maa1.run()
 maa2.run()
 maa3.run()
 self.assertEqual(sph.visible, 1)
 maa4.run()
 #check we can reproduce the maa from it's sourcecode:
 maa5 = None
 maasrc = maa4.getSourceCode("maa5")
 viewer = vi
 exec(maasrc)
 assert maa5 != None
 self.assertEqual(len(maa5.actors),3)
 sph.Set(translation=[0,0,0])
 # check that the position (translation) of the object changes from bottom to center
 # of the viewer at frames 0 - 30 - 60
 maa5.setValuesAt(0)
 vi.OneRedraw()
 # check that the "visible" maa's actor sets the sph.visible attribute to 1
 self.assertEqual(sph.visible, 1)
 t1 = sph.translation[1]
 self.assertEqual( t1 < 0, True)
 maa5.setValuesAt(30)
 vi.OneRedraw()
 t2 = sph.translation[1] 
 self.assertEqual( int(t1/2), int(t2))
 maa5.setValuesAt(60)
 vi.OneRedraw()
 t3 = sph.translation[1] 
 self.assertEqual(t3, 0)
 
 def test_flyout(self):
 """Test creation of FlyOutObjectMAA with different options (number of keyframes, direction); playing different frames of maa ."""
 vi = self.vi
 sph = Spheres( 'sph', centers=[ (0,0,0), (5, 0,0), (0,5,0), (0, 0,5) ],
 materials = [ (1,1,1), (1,0,0), (0,1,0), (0,0,1) ],
 inheritMaterial=False) 
 vi.AddObject(sph)
 from DejaVu.scenarioInterface.animations import FlyOutObjectMAA
 # direction: left
 sph.Set(translation=[0,0,0])
 maa1 = FlyOutObjectMAA(sph, objectName=None, direction='left', kfpos=[0, 30])
 actors = maa1.actors
 self.assertEqual (len(actors), 3)
 vi.OneRedraw()
 sph.Set(translation=[5,-5,5])
 # check that the position (translation) of the object changes from center to left side
 # of the viewer at frames 0 - 15 - 30
 maa1.setValuesAt(0)
 t1 = sph.translation
 vi.OneRedraw()
 self.assertEqual ([t1[0], t1[1], t1[2]] , [0, 0, 0])
 maa1.setValuesAt(15)
 t2 = sph.translation[0]
 self.assertEqual(t2 < 0, True)
 vi.OneRedraw()
 maa1.setValuesAt(30)
 t3 = sph.translation[0]
 self.assertEqual(int(t3/2), int(t2))
 vi.OneRedraw()
 # direction: right
 sph.Set(translation=[0,0,0])
 maa2 = FlyOutObjectMAA(sph, objectName=None, direction='right', kfpos=[0, 60])
 actors = maa2.actors
 self.assertEqual(len(actors), 3)
 vi.OneRedraw()
 sph.Set(translation=[5,5,5])
 # check that the position (translation) of the object changes from center to right side
 # of the viewer at frames 0 - 30 - 60
 maa2.setValuesAt(0)
 t1 = sph.translation
 vi.OneRedraw()
 self.assertEqual([t1[0], t1[1], t1[2]] , [0, 0, 0])
 
 maa2.setValuesAt(30)
 t2 = sph.translation[0]
 self.assertEqual(t2 > 0, True)
 vi.OneRedraw()
 maa2.setValuesAt(60)
 t3 = sph.translation[0]
 self.assertEqual(int(t3/2), int(t2))
 vi.OneRedraw()
 # direction: top
 sph.Set(translation=[0,0,0])
 maa3 = FlyOutObjectMAA(sph, objectName=None, direction='top', kfpos=[0, 30])
 actors = maa3.actors
 self.assertEqual (len(actors), 3)
 vi.OneRedraw()
 sph.Set(translation=[-5,5,5]) 
 # check that the position (translation) of the object changes from center to top side
 # of the viewer at frames 0 - 15 - 30
 maa3.setValuesAt(0)
 t1 = sph.translation
 vi.OneRedraw()
 self.assertEqual([t1[0], t1[1], t1[2]] , [0, 0, 0])
 maa3.setValuesAt(15)
 t2 = sph.translation[1]
 self.assertEqual(t2 > 0, True)
 vi.OneRedraw()
 maa3.setValuesAt(30)
 t3 = sph.translation[1]
 self.assertEqual(int(t3/2), int(t2))
 vi.OneRedraw()
 # direction: bottom
 sph.Set(translation=[0,0,0])
 maa4 = FlyOutObjectMAA(sph, objectName=None, direction='bottom', kfpos=[0, 60])
 actors = maa4.actors
 self.assertEqual (len(actors), 3)
 sph.Set(visible = 0)
 vi.OneRedraw()
 sph.Set(translation=[5,5,5])
 # check that the position (translation) of the object changes from center to top side
 # of the viewer at frames 0 - 30 - 60
 maa4.setValuesAt(0)
 t1 = sph.translation
 vi.OneRedraw()
 self.assertEqual([t1[0], t1[1], t1[2]] , [0, 0, 0])
 # check that the "visible" maa's actor sets the sph.visible attribute to 1
 self.assertEqual(sph.visible, 1)
 maa4.setValuesAt(30)
 t2 = sph.translation[1]
 self.assertEqual(t2 < 0, True)
 vi.OneRedraw()
 maa4.setValuesAt(60)
 t3 = sph.translation[1]
 self.assertEqual(int(t3/2), int(t2))
 vi.OneRedraw()
 #run maas
 maa1.run()
 maa2.run()
 maa3.run()
 self.assertEqual(sph.visible, 1)
 maa4.run()
 #check we can reproduce the maa from it's sourcecode:
 maa5 = None
 maasrc = maa4.getSourceCode("maa5")
 viewer = vi
 exec(maasrc)
 assert maa5 != None
 self.assertEqual (len(maa5.actors), 3)
 sph.Set(translation=[5,5,5])
 vi.OneRedraw()
 # check that the position (translation) of the object changes from center to top side
 # of the viewer at frames 0 - 30 - 60
## maa5.setValuesAt(0)
## t1 = sph.translation
## vi.OneRedraw()
## self.assertEqual([t1[0], t1[1], t1[2]] , [0, 0, 0])
## # check that the "visible" maa's actor sets the sph.visible attribute to 1
## self.assertEqual(sph.visible, 1)
## maa5.setValuesAt(30)
## t2 = sph.translation[1]
## self.assertEqual(t2 < 0, True)
## vi.OneRedraw()
## maa5.setValuesAt(60)
## t3 = sph.translation[1]
## self.assertEqual(int(t3/2), int(t2))
 maa5.run()
 
 def check_fadevals(self, maa, obj, vi):
 # check that the opacity of the object changes from 0 to 1 
 # at frames 0 - 15 - 30
 maa.setValuesAt(0)
 val1 = obj.materials[1028].prop[propertyNum['opacity']]
 self.assertEqual(len(val1), 1)
 self.assertEqual (val1[0] , 0)
 self.assertEqual(obj.visible, 1)
 vi.OneRedraw()
 maa.setValuesAt(15)
 val2 = obj.materials[1028].prop[propertyNum['opacity']]
 self.assertEqual(len(val1), 1)
 self.assertEqual (val2[0] , 0.5)
 vi.OneRedraw()
 maa.setValuesAt(30)
 val3 = obj.materials[1028].prop[propertyNum['opacity']]
 self.assertEqual(len(val1), 1)
 self.assertEqual(val3[0], 1)
 vi.OneRedraw()
 def test_fadein(self):
 """Test creation of FadeInObjectMAA and playing different frames of maa ."""
 vi = viewer = self.vi
 sph = Spheres( 'sph', centers=[ (0,0,0), (5, 0,0), (0,5,0), (0, 0,5) ],
 materials = [ (1,1,1), (1,0,0), (0,1,0), (0,0,1) ],
 inheritMaterial=False) 
 viewer.AddObject(sph)
 from DejaVu.scenarioInterface.animations import FadeInObjectMAA
 maa1 = FadeInObjectMAA(sph, objectName=None, kfpos=[0, 30])
 #check we can reproduce the maa from it's sourcecode:
 maa2 = None
 maasrc = maa1.getSourceCode("maa2")
 #viewer = vi
 exec(maasrc)
 assert maa2 != None
 sph.Set(visible = 0)
 for maa in [maa1, maa2]:
 actors = maa.actors
 self.assertEqual (len(actors), 3)
 viewer.OneRedraw()
 # check that the opacity of the object changes from 0 to 1 
 # at frames 0 - 15 - 30
 maa.setValuesAt(0)
 val1 = sph.materials[1028].prop[propertyNum['opacity']]
 self.assertEqual(len(val1), 1)
 self.assertEqual (val1[0] , 0)
 self.assertEqual(sph.visible, 1)
 vi.OneRedraw()
 maa.setValuesAt(15)
 val2 = sph.materials[1028].prop[propertyNum['opacity']]
 self.assertEqual(len(val1), 1)
 self.assertEqual (val2[0] , 0.5)
 vi.OneRedraw()
 maa.setValuesAt(30)
 val3 = sph.materials[1028].prop[propertyNum['opacity']]
 self.assertEqual(len(val1), 1)
 self.assertEqual(val3[0], 1)
 vi.OneRedraw()
 # run maa
 maa.run()
 
 def test_fadeout(self):
 """Test creation of FadeInObjectMAA and playing different frames of maa ."""
 vi = self.vi
 sph = Spheres( 'sph', centers=[ (0,0,0), (5, 0,0), (0,5,0), (0, 0,5) ],
 materials = [ (1,1,1), (1,0,0), (0,1,0), (0,0,1) ],
 inheritMaterial=False) 
 vi.AddObject(sph)
 sph.Set(opacity = 0.8)
 from DejaVu.scenarioInterface.animations import FadeOutObjectMAA
 #from DejaVu.Materials import propertyNum
 # create an instance of FadeOutObjectMAA object 
 maa1 = FadeOutObjectMAA(sph, objectName=None, kfpos=[0, 60])
 #check we can reproduce the maa from it's sourcecode:
 maa2 = None
 maasrc = maa1.getSourceCode("maa2")
 viewer = vi
 print maasrc
 exec(maasrc)
 
 assert maa2 != None
 # check the maas
 for maa in [maa1, maa2]:
 actors = | |
| 
	# -*- coding: utf-8 -*-
import base64
import decimal
from decimal import Context, Decimal, Inexact
from .asset import Asset
from .stellarxdr import Xdr
from .utils import (account_xdr_object, best_rational_approximation as best_r,
 division, encode_check, signer_key_xdr_object,
 is_valid_address, convert_hex_to_bytes)
from .exceptions import StellarAddressInvalidError, NotValidParamError
ONE = Decimal(10 ** 5)
class Operation(object):
 """The :class:`Operation` object, which represents an operation on
 Stellar's network.
 An operation is an individual command that mutates Stellar's ledger. It is
 typically rolled up into a transaction (a transaction is a list of
 operations with additional metadata).
 Operations are executed on behalf of the source account specified in the
 transaction, unless there is an override defined for the operation.
 For more on operations, see `Stellar's documentation on operations
 <https://www.stellar.org/developers/guides/concepts/operations.html>`_ as
 well as `Stellar's List of Operations
 <https://www.stellar.org/developers/guides/concepts/list-of-operations.html>`_,
 which includes information such as the security necessary for a given
 operation, as well as information about when validity checks occur on the
 network.
 The :class:`Operation` class is typically not used, but rather one of its
 subclasses is typically included in transactions.
 :param str source: The source account for the payment. Defaults to the
 transaction's source account.
 """
 def __init__(self, source=None):
 self.source = source
 self.body = Xdr.nullclass()
 def __eq__(self, other):
 return self.xdr() == other.xdr()
 def to_xdr_object(self):
 """Creates an XDR Operation object that represents this
 :class:`Operation`.
 """
 try:
 source_account = [account_xdr_object(self.source)]
 except TypeError:
 source_account = []
 return Xdr.types.Operation(source_account, self.body)
 def xdr(self):
 """Packs and base64 encodes this :class:`Operation` as an XDR string.
 """
 op = Xdr.StellarXDRPacker()
 op.pack_Operation(self.to_xdr_object())
 return base64.b64encode(op.get_buffer())
 @staticmethod
 def to_xdr_amount(value):
 """Converts an amount to the appropriate value to send over the network
 as a part of an XDR object.
 Each asset amount is encoded as a signed 64-bit integer in the XDR
 structures. An asset amount unit (that which is seen by end users) is
 scaled down by a factor of ten million (10,000,000) to arrive at the
 native 64-bit integer representation. For example, the integer amount
 value 25,123,456 equals 2.5123456 units of the asset. This scaling
 allows for seven decimal places of precision in human-friendly amount
 units.
 This static method correctly multiplies the value by the scaling factor
 in order to come to the integer value used in XDR structures.
 See `Stellar's documentation on Asset Precision
 <https://www.stellar.org/developers/guides/concepts/assets.html#amount-precision-and-representation>`_
 for more information.
 :param str value: The amount to convert to an integer for XDR
 serialization.
 """
 if not isinstance(value, str):
 raise NotValidParamError("Value of type '{}' must be of type String, but got {}".format(value, type(value)))
 # throw exception if value * ONE has decimal places (it can't be
 # represented as int64)
 try:
 amount = int((Decimal(value) * ONE).to_integral_exact(context=Context(traps=[Inexact])))
 except decimal.Inexact:
 raise NotValidParamError("Value of '{}' must have at most 5 digits after the decimal.".format(value))
 except decimal.InvalidOperation:
 raise NotValidParamError("Value of '{}' must represent a positive number.".format(value))
 return amount
 @staticmethod
 def to_xdr_price(price):
 if isinstance(price, dict):
 if not ('n' in price and 'd' in price):
 raise NotValidParamError(
 "You need pass `price` params as `str` or `{'n': numerator, 'd': denominator}`"
 )
 else:
 price = best_r(price)
 return price
 @staticmethod
 def from_xdr_amount(value):
 """Converts an amount from an XDR object into its appropriate integer
 representation.
 Each asset amount is encoded as a signed 64-bit integer in the XDR
 structures. An asset amount unit (that which is seen by end users) is
 scaled down by a factor of ten million (10,000,000) to arrive at the
 native 64-bit integer representation. For example, the integer amount
 value 25,123,456 equals 2.5123456 units of the asset. This scaling
 allows for seven decimal places of precision in human-friendly amount
 units.
 This static method correctly divides the value by the scaling factor in
 order to get the proper units of the asset.
 See `Stellar's documentation on Asset Precision
 <https://www.stellar.org/developers/guides/concepts/assets.html#amount-precision-and-representation>`_
 for more information.
 :param int value: The amount to convert to a string from an XDR int64
 amount.
 """
 return str(Decimal(value) / ONE)
 @classmethod
 def type_code(cls):
 pass
 @classmethod
 def from_xdr_object(cls, operation):
 for sub_cls in cls.__subclasses__():
 if sub_cls.type_code() == operation.type:
 return sub_cls.from_xdr_object(operation)
 raise NotImplementedError("Operation of type={} is not implemented"
 ".".format(operation.type))
 @classmethod
 def from_xdr(cls, xdr):
 """Create the appropriate :class:`Operation` subclass from the XDR
 structure.
 Decode an XDR base64 encoded string and create the appropriate
 :class:`Operation` object.
 :param str xdr: The XDR object to create an :class:`Operation` (or
 subclass) instance from.
 """
 xdr_decode = base64.b64decode(xdr)
 op = Xdr.StellarXDRUnpacker(xdr_decode)
 op = op.unpack_Operation()
 return cls.from_xdr_object(op)
class CreateAccount(Operation):
 """The :class:`CreateAccount` object, which represents a Create Account
 operation on Stellar's network.
 This operation creates and funds a new account with the specified starting
 balance.
 Threshold: Medium
 :param str destination: Destination account ID to create an account for.
 :param str starting_balance: Amount in KIN the account should be
 funded for. Must be greater than the [reserve balance amount]
 (https://www.stellar.org/developers/learn/concepts/fees.html).
 :param str source: The source account for the payment. Defaults to the
 transaction's source account.
 """
 @classmethod
 def type_code(cls):
 return Xdr.const.CREATE_ACCOUNT
 def __init__(self, destination, starting_balance, source=None):
 super(CreateAccount, self).__init__(source)
 self.destination = destination
 self.starting_balance = starting_balance
 def to_xdr_object(self):
 """Creates an XDR Operation object that represents this
 :class:`CreateAccount`.
 """
 destination = account_xdr_object(self.destination)
 create_account_op = Xdr.types.CreateAccountOp(
 destination, Operation.to_xdr_amount(self.starting_balance))
 self.body.type = Xdr.const.CREATE_ACCOUNT
 self.body.createAccountOp = create_account_op
 return super(CreateAccount, self).to_xdr_object()
 @classmethod
 def from_xdr_object(cls, op_xdr_object):
 """Creates a :class:`CreateAccount` object from an XDR Operation
 object.
 """
 if not op_xdr_object.sourceAccount:
 source = None
 else:
 source = encode_check(
 'account', op_xdr_object.sourceAccount[0].ed25519).decode()
 destination = encode_check(
 'account',
 op_xdr_object.body.createAccountOp.destination.ed25519).decode()
 starting_balance = Operation.from_xdr_amount(
 op_xdr_object.body.createAccountOp.startingBalance)
 return cls(
 source=source,
 destination=destination,
 starting_balance=starting_balance,
 )
class Payment(Operation):
 """The :class:`Payment` object, which represents a Payment operation on
 Stellar's network.
 Sends an amount in a specific asset to a destination account.
 Threshold: Medium
 :param str destination: The destination account ID.
 :param Asset asset: The asset to send.
 :param str amount: The amount to send.
 :param str source: The source account for the payment. Defaults to the
 transaction's source account.
 """
 @classmethod
 def type_code(cls):
 return Xdr.const.PAYMENT
 def __init__(self, destination, asset, amount, source=None):
 super(Payment, self).__init__(source)
 self.destination = destination
 self.asset = asset
 self.amount = amount
 def to_xdr_object(self):
 """Creates an XDR Operation object that represents this
 :class:`Payment`.
 """
 asset = self.asset.to_xdr_object()
 destination = account_xdr_object(self.destination)
 amount = Operation.to_xdr_amount(self.amount)
 payment_op = Xdr.types.PaymentOp(destination, asset, amount)
 self.body.type = Xdr.const.PAYMENT
 self.body.paymentOp = payment_op
 return super(Payment, self).to_xdr_object()
 @classmethod
 def from_xdr_object(cls, op_xdr_object):
 """Creates a :class:`Payment` object from an XDR Operation
 object.
 """
 if not op_xdr_object.sourceAccount:
 source = None
 else:
 source = encode_check(
 'account', op_xdr_object.sourceAccount[0].ed25519).decode()
 destination = encode_check(
 'account',
 op_xdr_object.body.paymentOp.destination.ed25519).decode()
 asset = Asset.from_xdr_object(op_xdr_object.body.paymentOp.asset)
 amount = Operation.from_xdr_amount(op_xdr_object.body.paymentOp.amount)
 return cls(
 source=source,
 destination=destination,
 asset=asset,
 amount=amount,
 )
class PathPayment(Operation):
 """The :class:`PathPayment` object, which represents a PathPayment
 operation on Stellar's network.
 Sends an amount in a specific asset to a destination account through a path
 of offers. This allows the asset sent (e.g., 450 KIN) to be different from
 the asset received (e.g, 6 BTC).
 Threshold: Medium
 :param str destination: The destination account to send to.
 :param Asset send_asset: The asset to pay with.
 :param str send_max: The maximum amount of send_asset to send.
 :param Asset dest_asset: The asset the destination will receive.
 :param str dest_amount: The amount the destination receives.
 :param list path: A list of Asset objects to use as the path.
 :param str source: The source account for the payment. Defaults to the
 transaction's source account.
 """
 @classmethod
 def type_code(cls):
 return Xdr.const.PATH_PAYMENT
 def __init__(self,
 destination,
 send_asset,
 send_max,
 dest_asset,
 dest_amount,
 path,
 source=None):
 super(PathPayment, self).__init__(source)
 self.destination = destination
 self.send_asset = send_asset
 self.send_max = send_max
 self.dest_asset = dest_asset
 self.dest_amount = dest_amount
 self.path = path # a list of paths/assets
 def to_xdr_object(self):
 """Creates an XDR Operation object that represents this
 :class:`PathPayment`.
 """
 destination = account_xdr_object(self.destination)
 send_asset = self.send_asset.to_xdr_object()
 dest_asset = self.dest_asset.to_xdr_object()
 path = [asset.to_xdr_object() for asset in self.path]
 path_payment = Xdr.types.PathPaymentOp(
 send_asset, Operation.to_xdr_amount(self.send_max), destination,
 dest_asset, Operation.to_xdr_amount(self.dest_amount), path)
 self.body.type = Xdr.const.PATH_PAYMENT
 self.body.pathPaymentOp = path_payment
 return super(PathPayment, self).to_xdr_object()
 @classmethod
 def from_xdr_object(cls, op_xdr_object):
 """Creates a :class:`PathPayment` object from an XDR Operation
 object.
 """
 if not op_xdr_object.sourceAccount:
 source = None
 else:
 source = encode_check(
 'account', op_xdr_object.sourceAccount[0].ed25519).decode()
 destination = encode_check(
 'account',
 op_xdr_object.body.pathPaymentOp.destination.ed25519).decode()
 send_asset = Asset.from_xdr_object(
 op_xdr_object.body.pathPaymentOp.sendAsset)
 dest_asset = Asset.from_xdr_object(
 op_xdr_object.body.pathPaymentOp.destAsset)
 send_max = Operation.from_xdr_amount(
 op_xdr_object.body.pathPaymentOp.sendMax)
 dest_amount = Operation.from_xdr_amount(
 op_xdr_object.body.pathPaymentOp.destAmount)
 path = []
 if op_xdr_object.body.pathPaymentOp.path:
 for x in op_xdr_object.body.pathPaymentOp.path:
 path.append(Asset.from_xdr_object(x))
 return cls(
 | |
| 
	self.icdf.conf_set(conf['ICDF'])
 if self.FIELD is None:
 pass
 else:
 self.FIELD.conf_set(conf['FIELD'])
 if self.VEL is None:
 pass
 else:
 self.VEL.conf_set(conf['VEL'])
# ====================
class DrawingConfig():
# ====================
 def __init__(self):
 # ========================
 self.FILECONF = '%s' % COSMO_CONF + 'drawing.conf'
 self.VERSION = __version__
 self.OUTPUT_FIGURE = tk.BooleanVar()
 self.OUTPUT_LEAFLET = tk.BooleanVar()
 self.GEOMAP = tk.BooleanVar()
 self.WITH_AXIS = tk.BooleanVar()
 #EG Cartopy projection and parameters
 self.MAP_PROJECTION = tk.StringVar()
 self.MAP_PROJ_LAT_0		= tk.DoubleVar()
 self.MAP_PROJ_LON_0		= tk.DoubleVar()
 self.MAP_PROJ_MIN_LAT	= tk.DoubleVar()
 self.MAP_PROJ_MAX_LAT	= tk.DoubleVar()
 self.MAP_PROJ_F_NORTH	= tk.DoubleVar()
 self.MAP_PROJ_F_EAST	= tk.DoubleVar()
 self.MAP_PROJ_LAT_T_SCA	= tk.DoubleVar()
 self.MAP_PROJ_T_SCA_LAT	= tk.DoubleVar()
 self.MAP_PROJ_SCA_FAC	= tk.DoubleVar()
 self.MAP_PROJ_SATELLITE_HEIGHT	= tk.DoubleVar()
 self.MAP_PROJ_SWEEP_AXIS = tk.StringVar()
 
 self.MAP_RESOLUTION = tk.StringVar()
 self.EPSG = tk.IntVar()
 self.SOUTH = tk.DoubleVar()
 self.NORTH = tk.DoubleVar()
 self.WEST = tk.DoubleVar()
 self.EAST = tk.DoubleVar()
 self.WIDTH = tk.DoubleVar()
 self.HEIGHT = tk.DoubleVar()
 self.LAT_0 = tk.DoubleVar() #
 self.LON_0 = tk.DoubleVar()
 self.SATELLITE_HEIGHT = tk.DoubleVar()
 self.COASTLINE_SHOW = tk.BooleanVar()
 # EG 1:Natural-Earth 2: EMODNET
 self.COASTLINE_SOURCE = tk.IntVar()
 self.COASTLINE_WIDTH = tk.DoubleVar()
 self.COASTLINE_COLOR = tk.StringVar()
 self.COASTLINE_ZORDER = tk.IntVar()
 self.COUNTRYLINE_SHOW = tk.BooleanVar()
 self.COUNTRYLINE_WIDTH = tk.DoubleVar()
 self.COUNTRYLINE_COLOR = tk.StringVar()
 self.LAND_COLOR = tk.StringVar()
 self.LAND_ZORDER = tk.IntVar()
 self.WATER_COLOR = tk.StringVar()
 self.WATER_ZORDER = tk.IntVar()
 self.TITLE = tk.StringVar()
 self.TITLEFONT = FontProperties().copy()
 self.TITLE_PAD = tk.DoubleVar()
 self.XLABEL = tk.StringVar()
 self.YLABEL = tk.StringVar()
 self.LABEL_SIZE = tk.IntVar()
 self.XLABEL_PAD = tk.DoubleVar()
 self.YLABEL_PAD = tk.DoubleVar()
 self.ZLABEL = tk.StringVar()
 self.TLABEL = tk.StringVar()
 self.DPI = tk.IntVar()
 self.OUT_FILENAME = None
 self.FIGURE_COLOR = tk.StringVar()
 self.TEXT_COLOR = tk.StringVar()
 self.GRID_SHOW = tk.BooleanVar()
 self.GRID_LINEWIDTH = tk.DoubleVar()
 self.MERIDIAN_INI = tk.DoubleVar()
 self.MERIDIAN_FIN = tk.DoubleVar()
 self.MERIDIAN_INT = tk.DoubleVar()
 self.PARALLEL_INI = tk.DoubleVar()
 self.PARALLEL_FIN = tk.DoubleVar()
 self.PARALLEL_INT = tk.DoubleVar()
 self.GRID_COLOR = tk.StringVar()
 self.GRID_FONTCOLOR = tk.StringVar()
 self.GRID_SIZE = tk.IntVar()
 self.GRID_NORTH = tk.BooleanVar()
 self.GRID_SOUTH = tk.BooleanVar()
 self.GRID_WEST = tk.BooleanVar()
 self.GRID_EAST = tk.BooleanVar()
 self.GRID_LINESTYLE = tk.StringVar()
 self.GRID_ALPHA = tk.DoubleVar()
 self.GRID_ZORDER = tk.IntVar()
 self.SCALE_SHOW = tk.BooleanVar()
 self.SCALE_X = tk.DoubleVar()
 self.SCALE_Y = tk.DoubleVar()
 self.SCALE_XO = tk.DoubleVar()
 self.SCALE_YO = tk.DoubleVar()
 self.SCALE_LENGTH = tk.DoubleVar()
 self.SCALE_UNITS = tk.StringVar()
 self.SCALE_STYLE = tk.StringVar()
 self.SCALE_FONTSIZE = tk.IntVar()
 self.SCALE_FONTCOLOR = tk.StringVar()
 self.SCALE_LABELSTYLE = tk.StringVar()
 self.SCALE_FORMAT = tk.StringVar()
 self.SCALE_YOFFSET = tk.DoubleVar()
 self.SCALE_FILLCOLOR1 = tk.StringVar()
 self.SCALE_FILLCOLOR2 = tk.StringVar()
 self.SCALE_LINECOLOR = tk.StringVar()
 self.SCALE_LINEWIDTH = tk.IntVar()
 self.SCALE_ZORDER = tk.IntVar()
 self.cons = None
 #self.X = None
 #self.Y = None
 #EG RELIEF=1 GEBCO, RELIEF=2 EMODNET
 self.RELIEF_SHOW = tk.BooleanVar()
 self.RELIEF = tk.IntVar()
 #EG self.BLUEMARBLE = tk.BooleanVar()
 #EG self.ETOPO = tk.BooleanVar()
 self.BACKGROUND_SCALE = tk.DoubleVar()
 self.RIVERS_SHOW = tk.BooleanVar()
 self.RIVERS_WIDTH = tk.DoubleVar()
 self.RIVERS_COLOR = tk.StringVar()
 #EG ARCGIS changed by EMODNET
 self.EMODNET_ISO = tk.BooleanVar()
 #EG self.ARCGISIMAGE = tk.IntVar()
 #EG self.ARCGISSERVICE = tk.StringVar()
 #EG self.ARCGISSERVICE_LIST = ['ESRI_Imagery_World_2D', \
 #EG 'ESRI_StreetMap_World_2D', \
 #EG 'NatGEo_World_Map', \
 #EG 'Ocean_Basemap', \
 #EG 'World_Imagery', \
 #EG 'World_Physical_Map', \
 #EG 'World_Shaded_Relief', \
 #EG 'World_Street_Map', \
 #EG 'World_Terrain_Base', \
 #EG 'World_Topo_Map']
 #EG self.ARCGISPIXELS = tk.IntVar()
 #EG self.ARCGISDPI = tk.IntVar()
 #EG self.ARCGISVERBOSE = tk.BooleanVar()
 self.LOGO_FILE = tk.StringVar()
 self.LOGO_ZOOM = tk.DoubleVar()
 self.LOGO_LOCATION = tk.StringVar()
 self.LOGO_X = tk.DoubleVar()
 self.LOGO_Y = tk.DoubleVar()
 self.LOGO_DISPLAY = tk.BooleanVar()
 self.TIMESTAMP_SHOW = tk.BooleanVar()
 self.TIMESTAMP_BOLD = tk.BooleanVar()
 self.TIMESTAMP_X = tk.DoubleVar()
 self.TIMESTAMP_Y = tk.DoubleVar()
 self.TIMESTAMP_SIZE = tk.IntVar()
 self.TIMESTAMP_COLOR = tk.StringVar()
 self.VIDEO_NAME = tk.StringVar()
 self.VIDEO_TITLE = tk.StringVar()
 self.VIDEO_AUTHOR = tk.StringVar()
 self.VIDEO_COMMENT = tk.StringVar()
 self.VIDEO_FPS = tk.IntVar()
 self.VIDEO_DPI = tk.IntVar()
 self.VIDEO_L1 = tk.IntVar()
 self.VIDEO_L2 = tk.IntVar()
 self.WINDOW_FONT_TYPE = tk.StringVar()
 self.WINDOW_FONT_SIZE = tk.IntVar()
 self.MAP_FONT_TYPE = tk.StringVar()
 self.LEGEND = legend.LegendConfig()
 self.LEGEND.SHOW.set(False)
 self.CROP_PAD = tk.DoubleVar()
 self.CROP_PAD.set(0.0)
 # Parameters for Saving frames
 self.SFRAME_PREFIX = tk.StringVar()
 self.SFRAME_POSTFIX_MODE = tk.IntVar()
 self.SFRAME_L1 = tk.IntVar()
 self.SFRAME_L2 = tk.IntVar()
 self.SFRAME_LSTEP = tk.IntVar()
 
 self.SIZE = [9,6]
 self.OUTPUT_FIGURE.set(True)
 self.OUTPUT_LEAFLET.set(False)
 self.GEOMAP.set(True)
 self.WITH_AXIS.set(False)
 #EG Default Cartopy PlateCarree and parameters
 self.MAP_PROJECTION.set('PlateCarree')
 self.MAP_PROJ_LAT_0.set(0.0)
 self.MAP_PROJ_LON_0.set(0.0)
 self.MAP_PROJ_MIN_LAT.set(-80.0)
 self.MAP_PROJ_MAX_LAT.set(84.0)
 self.MAP_PROJ_F_NORTH.set(0.0)
 self.MAP_PROJ_F_EAST.set(0.0)
 self.MAP_PROJ_LAT_T_SCA.set(0.0)
 self.MAP_PROJ_T_SCA_LAT.set(-1)
 self.MAP_PROJ_SCA_FAC.set(-1)
 self.MAP_PROJ_SATELLITE_HEIGHT.set(35785831)
 self.MAP_PROJ_SWEEP_AXIS.set('y')
 
 self.MAP_RESOLUTION.set('50m')
 self.EPSG.set(4326)
 
 #EG self.MAP_PROJECTION.set('cyl')
 #EG self.MAP_RESOLUTION.set('l')
 
 self.SOUTH.set(-90)
 self.NORTH.set(90)
 self.WEST.set(-180)
 self.EAST.set(180)
 self.WIDTH.set(0)
 self.HEIGHT.set(0)
 self.LAT_0.set(0)
 self.LON_0.set(0)
 self.SATELLITE_HEIGHT.set(35786000)
 self.COASTLINE_SHOW.set(False)
 self.COASTLINE_SOURCE.set(1)
 self.COASTLINE_WIDTH.set(1)
 self.COASTLINE_COLOR.set('black')
 self.COASTLINE_ZORDER.set(1)
 self.COUNTRYLINE_SHOW.set(False)
 self.COUNTRYLINE_WIDTH.set(2)
 self.COUNTRYLINE_COLOR.set('grey')
 self.LAND_COLOR.set('coral')
 self.LAND_ZORDER.set(0)
 self.WATER_COLOR.set('white')
 self.WATER_ZORDER.set(0)
 self.TITLE.set('')
 self.TITLEFONT.set_size(22)
 self.TITLEFONT.set_weight('bold')
 self.TITLE_PAD.set(0)
 self.XLABEL.set('Longitude')
 self.YLABEL.set('Latitude')
 self.LABEL_SIZE.set(16)
 self.XLABEL_PAD.set(0.12)
 self.YLABEL_PAD.set(0.05)
 self.ZLABEL.set('')
 self.TLABEL.set('')
 self.DPI.set(72)
 self.FIGURE_COLOR.set('white')
 self.TEXT_COLOR.set('black')
 self.GRID_SHOW.set(True)
 self.GRID_LINEWIDTH.set(1)
 self.MERIDIAN_INI.set(-180)
 self.MERIDIAN_FIN.set(210)
 self.MERIDIAN_INT.set(60)
 self.PARALLEL_INI.set(-90)
 self.PARALLEL_FIN.set(120)
 self.PARALLEL_INT.set(30)
 self.GRID_COLOR.set('black')
 self.GRID_FONTCOLOR.set('black')
 self.GRID_SIZE.set(12)
 self.GRID_NORTH.set(False)
 self.GRID_SOUTH.set(True)
 self.GRID_WEST.set(True)
 self.GRID_EAST.set(False)
 self.GRID_LINESTYLE.set(':')
 self.GRID_ALPHA.set(1.0)
 self.GRID_ZORDER.set(2)
 self.SCALE_SHOW.set(False)
 self.SCALE_X.set(0)
 self.SCALE_Y.set(0)
 self.SCALE_XO.set(0.5)
 self.SCALE_YO.set(0.05)
 self.SCALE_LENGTH.set(400)
 self.SCALE_UNITS.set('km')
 self.SCALE_STYLE.set('fancy')
 self.SCALE_FONTSIZE.set(14)
 self.SCALE_FONTCOLOR.set('k')
 self.SCALE_LABELSTYLE.set('simple')
 self.SCALE_FORMAT.set('%d')
 self.SCALE_YOFFSET.set(None)
 self.SCALE_FILLCOLOR1.set('w')
 self.SCALE_FILLCOLOR2.set('k')
 self.SCALE_LINECOLOR.set('k')
 self.SCALE_LINEWIDTH.set(3)
 self.SCALE_ZORDER.set(10)
 #EG RELIEF refers to GEBCO tile vms
 self.RELIEF_SHOW.set(False)
 self.RELIEF.set(1)
 self.BACKGROUND_SCALE.set(1.0)
 self.RIVERS_SHOW.set(False)
 self.RIVERS_WIDTH.set(0.2)
 self.RIVERS_COLOR.set('blue')
 #EG EMODNET
 #self.EMODNET_COAST.set(False)
 self.EMODNET_ISO.set(False)
 #EG self.ARCGISIMAGE.set(0)
 #EG self.ARCGISSERVICE.set('ESRI_Imagery_world_2D')
 #EG self.ARCGISPIXELS.set(400)
 #EG self.ARCGISDPI.set(96)
 #EG self.ARCGISVERBOSE.set(True)
 self.LOGO_FILE.set(COSMO_CONF_PATH+'MEDOSMOSIS.png')
 self.LOGO_IMAGE = image.imread(self.LOGO_FILE.get())
 self.LOGO_ZOOM.set(0.20)
 self.LOGO_LOCATION.set('SW')
 self.LOGO_DISPLAY.set(False)
 self.ISOBAT_PATH = tk.StringVar()
 self.ISOBAT_PATH.set(COSMO_ROOT+'/data/isobaths/')
 # self.ISOBAT_Z = [ 0, 100, 200, 400, 
 # 600, 800, 1000, 1200, 1400,
 # 1600, 1800, 2000, 2500, 3000,
 # ]
 #
 # self.ISOBAT_LABEL = ['coastline', '100 m', '200 m', '400 m',
 # '600 m', '800 m','1000 m','1200 m','1400 m',
 # '1600 m','1800 m','2000 m','2500 m','3000 m',
 # ]
 #
 self.ISOBAT_Z = [ 0, 50, 100, 200, 250, 400, 500,
 600, 750, 800, 1000, 1250, 1500, 1750,
 2000, 2500, 3000, 3500, 4000, 4500, 5000]
 
 self.ISOBAT_LABEL = ['coastline', '50 m', '100 m', '200 m', 
 '250 m', '400 m', '500 m', '600 m', '750 m',
 '800 m','1000 m','1250 m','1500 m','1750 m',
 '2000 m','2500 m','3000 m','5500 m','4000 m',
 '4500 m','5000 m' ]
 self.nisobat = len(self.ISOBAT_Z)
 self.ISOBAT_SELEC = []
 self.ISOBAT_COLOR = []
 self.ISOBAT_STYLE = []
 self.ISOBAT_WIDTH = []
 self.ISOBAT_SHOW = []
 self.ISOBAT_DATA = []
 for i in range(self.nisobat):
 self.ISOBAT_SELEC.append(tk.BooleanVar(value=False))
 self.ISOBAT_COLOR.append(tk.StringVar(value='black'))
 self.ISOBAT_STYLE.append(tk.StringVar(value='-'))
 self.ISOBAT_WIDTH.append(tk.DoubleVar(value=1))
 self.ISOBAT_SHOW.append(False)
 self.ISOBAT_DATA.append(None)
 self.ISOBAT_LABEL_SHOW = tk.BooleanVar()
 self.ISOBAT_LABEL_SHOW.set(False)
 self.ISOBAT_NPLOT = sum(self.ISOBAT_SHOW)
 self.ISOBAT_ZPOINTER = tk.StringVar()
 self.ISOBAT_ZPOINTER.set(self.ISOBAT_LABEL[0])
 self.ISOBAT_selected = False
 self.ISOBAT_loaded = False
 self.ISOBAT_cropped = False
 self.ISOBAT_LEGEND = legend.LegendConfig()
 self.ISOBAT_LEGEND.TITLE.set('Isobaths')
 self.ISOBAT_LEGEND.LOC.set(2)
 self.TIMESTAMP_SHOW.set(False)
 self.TIMESTAMP_BOLD.set(False)
 self.TIMESTAMP_X.set(0.12)
 self.TIMESTAMP_Y.set(0.12)
 self.TIMESTAMP_COLOR.set('black')
 self.TIMESTAMP_SIZE.set(15)
 self.VIDEO_NAME.set('movie.mp4')
 self.VIDEO_TITLE.set('COSMO-VIEW Movie')
 self.VIDEO_AUTHOR.set('Matplotlib')
 self.VIDEO_COMMENT.set('Ocean currents movie')
 self.VIDEO_FPS.set(2)
 self.VIDEO_DPI.set(100)
 self.VIDEO_L1.set(0)
 self.SFRAME_PREFIX.set('Frame')
 self.SFRAME_POSTFIX_MODE.set(0)
 self.SFRAME_L1.set(0)
 self.SFRAME_LSTEP.set(1)
 self.WINDOW_FONT_TYPE.set('Helvetica')
 self.WINDOW_FONT_SIZE.set(14)
 font_type = matplotlib.rcParams['font.family'][0]
 self.MAP_FONT_TYPE.set(font_type)
 self.MESSAGE = "\n"+self.LEGEND.MESSAGE+"\n"+self.ISOBAT_LEGEND.MESSAGE
 if exists(self.FILECONF):
 self.MESSAGE += "\nReading conf. file: "+self.FILECONF
 try:
 conf = self.conf_load(self.FILECONF)
 self.conf_set(conf)
 except:
 self.MESSAGE += '\n\tError reading, using default parameters'
 conf = self.conf_get()
 self.conf_save(conf,self.FILECONF)
 else:
 self.MESSAGE += '\n\tSaving configuration file ...'
 conf = self.conf_get()
 self.conf_save(conf,self.FILECONF)
 def conf_get(self):
 # ===========================
 '''Get the conf dictionnary from program variables'''
 conf = {}
 conf['_VERSION_'] = self.VERSION
 conf['OUTPUT_FIGURE'] = self.OUTPUT_FIGURE.get()
 conf['OUTPUT_LEAFLET'] = self.OUTPUT_LEAFLET.get()
 conf['SIZE'] = [self.SIZE[0],self.SIZE[1]]
 conf['DPI'] = self.DPI.get()
 conf['FIGURE_COLOR'] = self.FIGURE_COLOR.get()
 conf['TEXT_COLOR'] = self.TEXT_COLOR.get()
 conf['GEOMAP'] = self.GEOMAP.get()
 conf['WITH_AXIS'] = self.WITH_AXIS.get()
 
 #EG Default Cartopy PlateCarree and parameters
 conf['MAP_PROJECTION'] = self.MAP_PROJECTION.get()
 conf['MAP_PROJ_LAT_0'] = self.MAP_PROJ_LAT_0.get()
 conf['MAP_PROJ_LON_0'] = self.MAP_PROJ_LON_0.get()
 conf['MAP_PROJ_MIN_LAT'] = self.MAP_PROJ_MIN_LAT.get()
 conf['MAP_PROJ_MAX_LAT'] = self.MAP_PROJ_MAX_LAT.get()
 conf['MAP_PROJ_F_NORTH'] = self.MAP_PROJ_F_NORTH.get()
 conf['MAP_PROJ_F_EAST'] = self.MAP_PROJ_F_EAST.get()
 conf['MAP_PROJ_LAT_T_SCA'] = self.MAP_PROJ_LAT_T_SCA.get()
 conf['MAP_PROJ_T_SCA_LAT'] = self.MAP_PROJ_T_SCA_LAT.get()
 conf['MAP_PROJ_SCA_FAC'] = self.MAP_PROJ_SCA_FAC.get()
 conf['MAP_PROJ_SATELLITE_HEIGHT'] = self.MAP_PROJ_SATELLITE_HEIGHT.get()
 conf['MAP_PROJ_SWEEP_AXIS'] = self.MAP_PROJ_SWEEP_AXIS.get()
 
 conf['MAP_RESOLUTION'] = self.MAP_RESOLUTION.get()
 conf['EPSG'] = self.EPSG.get()
 conf['SOUTH'] = self.SOUTH.get()
 conf['NORTH'] = self.NORTH.get()
 conf['WEST'] = self.WEST.get()
 conf['EAST'] = self.EAST.get()
 conf['WIDTH'] = self.WIDTH.get()
 conf['HEIGHT'] = self.HEIGHT.get()
 conf['LAT_0'] = self.LAT_0.get()
 conf['LON_0'] = self.LON_0.get()
 conf['SATELLITE_HEIGHT'] = self.SATELLITE_HEIGHT.get()
 conf['COASTLINE_SHOW'] = self.COASTLINE_SHOW.get()
 conf['COASTLINE_SOURCE'] = self.COASTLINE_SOURCE.get()
 conf['COASTLINE_WIDTH'] = self.COASTLINE_WIDTH.get()
 conf['COASTLINE_COLOR'] = self.COASTLINE_COLOR.get()
 conf['COASTLINE_ZORDER'] = self.COASTLINE_ZORDER.get()
 conf['COUNTRYLINE_SHOW'] = self.COUNTRYLINE_SHOW.get()
 conf['COUNTRYLINE_WIDTH'] = self.COUNTRYLINE_WIDTH.get()
 conf['COUNTRYLINE_COLOR'] = self.COUNTRYLINE_COLOR.get()
 conf['LAND_COLOR'] = self.LAND_COLOR.get()
 conf['LAND_ZORDER'] = self.LAND_ZORDER.get()
 conf['WATER_COLOR'] = self.WATER_COLOR.get()
 conf['WATER_ZORDER'] = self.WATER_ZORDER.get()
 conf['TITLE'] = self.TITLE.get()
 conf['TITLEFONT'] = self.TITLEFONT.__dict__
 conf['TITLE_PAD'] = self.TITLE_PAD.get()
 conf['XLABEL'] = self.XLABEL.get()
 conf['YLABEL'] = self.YLABEL.get()
 conf['LABEL_SIZE'] = self.LABEL_SIZE.get()
 conf['XLABEL_PAD'] = self.XLABEL_PAD.get()
 conf['YLABEL_PAD'] = self.YLABEL_PAD.get()
 conf['GRID_SHOW'] = self.GRID_SHOW.get()
 conf['GRID_LINEWIDTH'] = self.GRID_LINEWIDTH.get()
 conf['MERIDIAN_INI'] = self.MERIDIAN_INI.get()
 conf['MERIDIAN_FIN'] = self.MERIDIAN_FIN.get()
 conf['MERIDIAN_INT'] = self.MERIDIAN_INT.get()
 conf['PARALLEL_INI'] = self.PARALLEL_INI.get()
 conf['PARALLEL_FIN'] = self.PARALLEL_FIN.get()
 conf['PARALLEL_INT'] = self.PARALLEL_INT.get()
 conf['GRID_COLOR'] = self.GRID_COLOR.get()
 conf['GRID_FONTCOLOR'] = self.GRID_FONTCOLOR.get()
 conf['GRID_SIZE'] = self.GRID_SIZE.get()
 conf['GRID_NORTH'] = self.GRID_NORTH.get()
 conf['GRID_SOUTH'] = self.GRID_SOUTH.get()
 conf['GRID_WEST'] = self.GRID_WEST.get()
 conf['GRID_EAST'] = self.GRID_EAST.get()
 conf['GRID_LINESTYLE'] = self.GRID_LINESTYLE.get()
 conf['GRID_ALPHA'] = self.GRID_ALPHA.get()
 conf['GRID_ZORDER'] = self.GRID_ZORDER.get()
 conf['SCALE_SHOW'] = self.SCALE_SHOW.get()
 conf['SCALE_X'] = self.SCALE_X.get()
 conf['SCALE_Y'] = self.SCALE_Y.get()
 conf['SCALE_XO'] = self.SCALE_XO.get()
 conf['SCALE_YO'] = self.SCALE_YO.get()
 conf['SCALE_LENGTH'] = self.SCALE_LENGTH.get()
 conf['SCALE_UNITS'] = self.SCALE_UNITS.get()
 conf['SCALE_STYLE'] = self.SCALE_STYLE.get()
 conf['SCALE_FONTSIZE'] = self.SCALE_FONTSIZE.get()
 conf['SCALE_FONTCOLOR'] = self.SCALE_FONTCOLOR.get()
 conf['SCALE_LABELSTYLE'] = self.SCALE_LABELSTYLE.get()
 conf['SCALE_FORMAT'] = self.SCALE_FORMAT.get()
 try:
 conf['SCALE_YOFFSET'] = self.SCALE_YOFFSET.get()
 except:
 conf['SCALE_YOFFSET'] = None
 conf['SCALE_FILLCOLOR1'] = self.SCALE_FILLCOLOR1.get()
 conf['SCALE_FILLCOLOR2'] = self.SCALE_FILLCOLOR2.get()
 conf['SCALE_LINECOLOR'] = self.SCALE_LINECOLOR.get()
 try:
 conf['SCALE_LINEWIDTH'] = self.SCALE_LINEWIDTH.get()
 except:
 conf['SCALE_LINEWIDTH'] = None
 conf['SCALE_ZORDER'] = self.SCALE_ZORDER.get()
 #EG RELIEF refers to GEBCO
 conf['RELIEF_SHOW'] = self.RELIEF_SHOW.get()
 conf['RELIEF'] = self.RELIEF.get()
 #EGconf['BLUEMARBLE'] = self.BLUEMARBLE.get()
 #EGconf['ETOPO'] = self.ETOPO.get()
 conf['BACKGROUND_SCALE'] = self.BACKGROUND_SCALE.get()
 conf['RIVERS_SHOW'] = self.RIVERS_SHOW.get()
 conf['RIVERS_WIDTH'] = self.RIVERS_WIDTH.get()
 conf['RIVERS_COLOR'] = self.RIVERS_COLOR.get()
 #EG EMODNET
 #conf['EMODNET_COAST'] = self.EMODNET_COAST.get()
 conf['EMODNET_ISO'] = self.EMODNET_ISO.get()
 #EG conf['ARCGISIMAGE'] = self.ARCGISIMAGE.get()
 #EG conf['ARCGISSERVICE'] = self.ARCGISSERVICE.get()
 #EG conf['ARCGISPIXELS'] = self.ARCGISPIXELS.get()
 #EG conf['ARCGISDPI'] = self.ARCGISDPI.get()
 #EG conf['ARCGISVERBOSE'] = self.ARCGISVERBOSE.get()
 conf['LOGO_FILE'] = self.LOGO_FILE.get()
 conf['LOGO_ZOOM'] = self.LOGO_ZOOM.get()
 conf['LOGO_LOCATION'] = self.LOGO_LOCATION.get()
 conf['LOGO_X'] = self.LOGO_X.get()
 conf['LOGO_Y'] = self.LOGO_Y.get()
 conf['LOGO_DISPLAY'] = self.LOGO_DISPLAY.get()
 conf['ISOBAT_PATH'] = self.ISOBAT_PATH.get()
 conf['ISOBAT_Z'] = self.ISOBAT_Z
 conf['ISOBAT_LABEL'] = self.ISOBAT_LABEL
 WIDTH = []
 COLOR = []
 STYLE = []
 SELEC = []
 for i in range(self.nisobat):
 WIDTH.append(self.ISOBAT_WIDTH[i].get())
 COLOR.append(self.ISOBAT_COLOR[i].get())
 STYLE.append(self.ISOBAT_STYLE[i].get())
 SELEC.append(self.ISOBAT_SELEC[i].get())
 conf['ISOBAT_WIDTH'] = WIDTH
 conf['ISOBAT_COLOR'] = | |
| 
	test_in_use_with_available_volume(self):
 volume = self._create_volume_dict()
 self.assertIsNone(self._driver._in_use(volume))
 @mock.patch.object(VMDK_DRIVER, '_in_use', return_value=True)
 def test_retype_with_in_use_volume(self, in_use):
 context = mock.sentinel.context
 volume = self._create_volume_dict(
 status='retyping', attachment=[mock.sentinel.attachment_1])
 new_type = mock.sentinel.new_type
 diff = mock.sentinel.diff
 host = mock.sentinel.host
 self.assertFalse(self._driver.retype(context, volume, new_type, diff,
 host))
 in_use.assert_called_once_with(volume)
 @mock.patch.object(VMDK_DRIVER, '_in_use', return_value=False)
 @mock.patch.object(VMDK_DRIVER, 'volumeops')
 def test_retype_with_no_volume_backing(self, vops, in_use):
 vops.get_backing.return_value = None
 context = mock.sentinel.context
 volume = self._create_volume_dict(status='retyping')
 new_type = mock.sentinel.new_type
 diff = mock.sentinel.diff
 host = mock.sentinel.host
 self.assertTrue(self._driver.retype(context, volume, new_type, diff,
 host))
 @mock.patch.object(VMDK_DRIVER, '_in_use', return_value=False)
 @mock.patch.object(VMDK_DRIVER, 'volumeops')
 @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
 '_get_disk_type')
 @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
 '_get_extra_spec_disk_type')
 @mock.patch.object(VMDK_DRIVER, '_get_storage_profile')
 @mock.patch.object(VMDK_DRIVER, '_get_extra_spec_storage_profile')
 @mock.patch.object(VMDK_DRIVER, 'ds_sel')
 @mock.patch.object(VMDK_DRIVER, '_select_datastore')
 @mock.patch.object(
 VMDK_DRIVER, '_get_adapter_type', return_value='lsiLogic')
 @mock.patch.object(
 VMDK_DRIVER, '_get_extra_spec_adapter_type', return_value='lsiLogic')
 def test_retype_with_diff_profile_and_ds_compliance(
 self,
 _get_extra_spec_adapter_type,
 _get_adapter_type,
 select_datastore,
 ds_sel,
 get_extra_spec_storage_profile,
 get_storage_profile,
 get_extra_spec_disk_type,
 get_disk_type,
 vops,
 in_use):
 backing = mock.sentinel.backing
 vops.get_backing.return_value = backing
 datastore = vmware_fake.ManagedObjectReference(value='ds1')
 vops.get_datastore.return_value = datastore
 disk_type = mock.sentinel.disk_type
 get_disk_type.return_value = disk_type
 get_extra_spec_disk_type.return_value = disk_type
 self._driver._storage_policy_enabled = True
 profile = 'gold'
 get_storage_profile.return_value = profile
 new_profile = 'silver'
 get_extra_spec_storage_profile.return_value = new_profile
 ds_sel.is_datastore_compliant.return_value = True
 new_profile_id = mock.sentinel.new_profile_id
 ds_sel.get_profile_id.return_value = new_profile_id
 context = mock.sentinel.context
 volume = self._create_volume_dict(status='retyping')
 new_type = {'id': 'f04a65e0-d10c-4db7-b4a5-f933d57aa2b5'}
 diff = mock.sentinel.diff
 host = mock.sentinel.host
 self.assertTrue(self._driver.retype(context, volume, new_type, diff,
 host))
 ds_sel.is_datastore_compliant.assert_called_once_with(datastore,
 new_profile)
 select_datastore.assert_not_called()
 vops.change_backing_profile.assert_called_once_with(backing,
 new_profile_id)
 @mock.patch.object(VMDK_DRIVER, '_in_use', return_value=False)
 @mock.patch.object(VMDK_DRIVER, 'volumeops')
 @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
 '_get_disk_type')
 @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
 '_get_extra_spec_disk_type')
 @mock.patch.object(VMDK_DRIVER, '_get_storage_profile')
 @mock.patch.object(VMDK_DRIVER, '_get_extra_spec_storage_profile')
 @mock.patch.object(VMDK_DRIVER, 'ds_sel')
 @mock.patch.object(VMDK_DRIVER, '_select_datastore')
 def test_retype_with_diff_profile_and_ds_sel_no_candidate(
 self, select_datastore, ds_sel, get_extra_spec_storage_profile,
 get_storage_profile, get_extra_spec_disk_type, get_disk_type,
 vops, in_use):
 backing = mock.sentinel.backing
 vops.get_backing.return_value = backing
 datastore = vmware_fake.ManagedObjectReference(value='ds1')
 vops.get_datastore.return_value = datastore
 disk_type = mock.sentinel.disk_type
 get_disk_type.return_value = disk_type
 get_extra_spec_disk_type.return_value = disk_type
 vops.snapshot_exists.return_value = False
 self._driver._storage_policy_enabled = True
 profile = 'gold'
 get_storage_profile.return_value = profile
 new_profile = 'silver'
 get_extra_spec_storage_profile.return_value = new_profile
 ds_sel.is_datastore_compliant.return_value = False
 select_datastore.side_effect = (
 vmdk_exceptions.NoValidDatastoreException)
 context = mock.sentinel.context
 volume = self._create_volume_dict(status='retyping')
 new_type = {'id': 'f04a65e0-d10c-4db7-b4a5-f933d57aa2b5'}
 diff = mock.sentinel.diff
 host = mock.sentinel.host
 self.assertFalse(self._driver.retype(context, volume, new_type, diff,
 host))
 ds_sel.is_datastore_compliant.assert_called_once_with(datastore,
 new_profile)
 select_datastore.assert_called_once_with(
 {hub.DatastoreSelector.SIZE_BYTES: volume['size'] * units.Gi,
 hub.DatastoreSelector.PROFILE_NAME: new_profile})
 @mock.patch.object(VMDK_DRIVER, '_in_use', return_value=False)
 @mock.patch.object(VMDK_DRIVER, 'volumeops')
 @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
 '_get_disk_type')
 @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
 '_get_extra_spec_disk_type')
 @mock.patch.object(VMDK_DRIVER, '_get_storage_profile')
 @mock.patch.object(VMDK_DRIVER, '_get_extra_spec_storage_profile')
 @mock.patch.object(VMDK_DRIVER, 'ds_sel')
 @mock.patch.object(VMDK_DRIVER, '_select_datastore')
 @mock.patch.object(VMDK_DRIVER, '_get_dc')
 @mock.patch.object(VMDK_DRIVER, '_get_volume_group_folder')
 @mock.patch.object(
 VMDK_DRIVER, '_get_adapter_type', return_value='lsiLogic')
 @mock.patch.object(
 VMDK_DRIVER, '_get_extra_spec_adapter_type', return_value='lsiLogic')
 def test_retype_with_diff_extra_spec_and_vol_snapshot(
 self,
 get_extra_spec_adapter_type,
 get_adapter_type,
 get_volume_group_folder,
 get_dc,
 select_datastore,
 ds_sel, get_extra_spec_storage_profile,
 get_storage_profile,
 get_extra_spec_disk_type,
 get_disk_type,
 vops,
 in_use):
 backing = mock.sentinel.backing
 vops.get_backing.return_value = backing
 datastore = vmware_fake.ManagedObjectReference(value='ds1')
 vops.get_datastore.return_value = datastore
 get_disk_type.return_value = 'thin'
 new_disk_type = 'thick'
 get_extra_spec_disk_type.return_value = new_disk_type
 vops.snapshot_exists.return_value = True
 self._driver._storage_policy_enabled = True
 profile = 'gold'
 get_storage_profile.return_value = profile
 new_profile = 'silver'
 get_extra_spec_storage_profile.return_value = new_profile
 ds_sel.is_datastore_compliant.return_value = False
 host = mock.sentinel.host
 rp = mock.sentinel.rp
 new_datastore = mock.Mock(value='ds2')
 summary = mock.Mock(datastore=new_datastore)
 select_datastore.return_value = (host, rp, summary)
 dc = mock.sentinel.dc
 get_dc.return_value = dc
 folder = mock.sentinel.folder
 get_volume_group_folder.return_value = folder
 new_profile_id = mock.sentinel.new_profile_id
 ds_sel.get_profile_id.return_value = new_profile_id
 context = mock.sentinel.context
 volume = self._create_volume_dict(status='retyping')
 new_type = {'id': 'f04a65e0-d10c-4db7-b4a5-f933d57aa2b5'}
 diff = mock.sentinel.diff
 host = mock.sentinel.host
 self.assertTrue(self._driver.retype(context, volume, new_type, diff,
 host))
 ds_sel.is_datastore_compliant.assert_called_once_with(datastore,
 new_profile)
 select_datastore.assert_called_once_with(
 {hub.DatastoreSelector.SIZE_BYTES: volume['size'] * units.Gi,
 hub.DatastoreSelector.HARD_ANTI_AFFINITY_DS: ['ds1'],
 hub.DatastoreSelector.PROFILE_NAME: new_profile})
 get_dc.assert_called_once_with(rp)
 get_volume_group_folder.assert_called_once_with(dc,
 volume['project_id'])
 vops.relocate_backing.assert_called_once_with(
 backing, new_datastore, rp, host, new_disk_type)
 vops.move_backing_to_folder.assert_called_once_with(backing, folder)
 vops.change_backing_profile.assert_called_once_with(backing,
 new_profile_id)
 @mock.patch.object(VMDK_DRIVER, '_in_use', return_value=False)
 @mock.patch.object(VMDK_DRIVER, 'volumeops')
 @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
 '_get_disk_type')
 @mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
 '_get_extra_spec_disk_type')
 @mock.patch.object(VMDK_DRIVER, '_get_storage_profile')
 @mock.patch.object(VMDK_DRIVER, '_get_extra_spec_storage_profile')
 @mock.patch.object(VMDK_DRIVER, 'ds_sel')
 @mock.patch.object(VMDK_DRIVER, '_select_datastore')
 @mock.patch.object(VMDK_DRIVER, '_get_dc')
 @mock.patch.object(VMDK_DRIVER, '_get_volume_group_folder')
 @mock.patch('oslo_utils.uuidutils.generate_uuid')
 @mock.patch.object(VMDK_DRIVER, '_delete_temp_backing')
 @mock.patch.object(VMDK_DRIVER, '_get_adapter_type')
 @mock.patch.object(VMDK_DRIVER, '_get_extra_spec_adapter_type')
 def _test_retype_with_diff_extra_spec_and_ds_compliance(
 self,
 get_extra_spec_adapter_type,
 get_adapter_type,
 delete_temp_backing,
 generate_uuid,
 get_volume_group_folder,
 get_dc,
 select_datastore,
 ds_sel,
 get_extra_spec_storage_profile,
 get_storage_profile,
 get_extra_spec_disk_type,
 get_disk_type,
 vops,
 in_use,
 clone_error=False):
 backing = mock.sentinel.backing
 vops.get_backing.return_value = backing
 datastore = vmware_fake.ManagedObjectReference(value='ds1')
 vops.get_datastore.return_value = datastore
 get_disk_type.return_value = 'thin'
 new_disk_type = 'thick'
 get_extra_spec_disk_type.return_value = new_disk_type
 vops.snapshot_exists.return_value = False
 self._driver._storage_policy_enabled = True
 profile = 'gold'
 get_storage_profile.return_value = profile
 new_profile = 'silver'
 get_extra_spec_storage_profile.return_value = new_profile
 ds_sel.is_datastore_compliant.return_value = True
 host = mock.sentinel.host
 rp = mock.sentinel.rp
 summary = mock.Mock(datastore=datastore)
 select_datastore.return_value = (host, rp, summary)
 dc = mock.sentinel.dc
 get_dc.return_value = dc
 folder = mock.sentinel.folder
 get_volume_group_folder.return_value = folder
 new_profile_id = mock.sentinel.new_profile_id
 ds_sel.get_profile_id.return_value = new_profile_id
 uuid = '025b654b-d4ed-47f9-8014-b71a7744eafc'
 generate_uuid.return_value = uuid
 if clone_error:
 vops.clone_backing.side_effect = exceptions.VimException
 else:
 new_backing = mock.sentinel.new_backing
 vops.clone_backing.return_value = new_backing
 adapter_type = 'lsiLogic'
 get_adapter_type.return_value = adapter_type
 new_adapter_type = 'paraVirtual'
 get_extra_spec_adapter_type.return_value = new_adapter_type
 capacity = self.VOL_SIZE * units.Mi
 filename = mock.sentinel.filename
 disk_backing = mock.Mock(filename=filename)
 disk_device = mock.Mock(capacityInKB=capacity, backing=disk_backing)
 vops._get_disk_device.return_value = disk_device
 context = mock.sentinel.context
 volume = self._create_volume_dict(status='retyping')
 new_type = {'id': 'f04a65e0-d10c-4db7-b4a5-f933d57aa2b5'}
 diff = mock.sentinel.diff
 host = mock.sentinel.host
 if clone_error:
 self.assertRaises(exceptions.VimException, self._driver.retype,
 context, volume, new_type, diff, host)
 else:
 self.assertTrue(self._driver.retype(context, volume, new_type,
 diff, host))
 ds_sel.is_datastore_compliant.assert_called_once_with(datastore,
 new_profile)
 select_datastore.assert_called_once_with(
 {hub.DatastoreSelector.SIZE_BYTES: volume['size'] * units.Gi,
 hub.DatastoreSelector.PROFILE_NAME: new_profile})
 get_dc.assert_called_once_with(rp)
 get_volume_group_folder.assert_called_once_with(dc,
 volume['project_id'])
 vops.clone_backing.assert_called_once_with(
 volume['name'], backing, None, volumeops.FULL_CLONE_TYPE,
 datastore, disk_type=new_disk_type, host=host, resource_pool=rp,
 folder=folder)
 if clone_error:
 exp_rename_calls = [mock.call(backing, uuid),
 mock.call(backing, volume['name'])]
 self.assertEqual(exp_rename_calls,
 vops.rename_backing.call_args_list)
 else:
 vops.rename_backing.assert_called_once_with(backing, uuid)
 vops.update_backing_uuid.assert_called_once_with(
 new_backing, volume['id'])
 vops.update_backing_disk_uuid.assert_called_once_with(
 new_backing, volume['id'])
 delete_temp_backing.assert_called_once_with(backing)
 vops.detach_disk_from_backing.assert_called_once_with(
 new_backing, disk_device)
 vops.attach_disk_to_backing.assert_called_once_with(
 new_backing, disk_device.capacityInKB, new_disk_type,
 new_adapter_type, None, disk_device.backing.fileName)
 vops.change_backing_profile.assert_called_once_with(new_backing,
 new_profile_id)
 def test_retype_with_diff_extra_spec_and_ds_compliance(self):
 self._test_retype_with_diff_extra_spec_and_ds_compliance()
 def test_retype_with_diff_extra_spec_ds_compliance_and_clone_error(self):
 self._test_retype_with_diff_extra_spec_and_ds_compliance(
 clone_error=True)
 @mock.patch.object(VMDK_DRIVER, 'volumeops')
 def test_extend_backing(self, vops):
 vmdk_path = mock.sentinel.vmdk_path
 vops.get_vmdk_path.return_value = vmdk_path
 dc = mock.sentinel.datacenter
 vops.get_dc.return_value = dc
 disk_type = mock.sentinel.disk_type
 eager_zero = (True if disk_type == "eagerZeroedThick" else False)
 backing = mock.sentinel.backing
 new_size = 1
 self._driver._extend_backing(backing, new_size, disk_type)
 vops.get_vmdk_path.assert_called_once_with(backing)
 vops.get_dc.assert_called_once_with(backing)
 vops.extend_virtual_disk.assert_called_once_with(new_size,
 vmdk_path,
 dc,
 eager_zero)
 @mock.patch.object(VMDK_DRIVER, 'session')
 @mock.patch('oslo_vmware.vim_util.get_vc_version')
 def test_get_vc_version(self, get_vc_version, session):
 self._driver.configuration.vmware_host_version = None
 version_str = '6.0.0'
 get_vc_version.return_value = version_str
 version = self._driver._get_vc_version()
 self.assertEqual(version_str, version)
 get_vc_version.assert_called_once_with(session)
 @mock.patch('oslo_vmware.vim_util.get_vc_version')
 def test_get_vc_version_override(self, get_vc_version):
 version = self._driver._get_vc_version()
 self.assertEqual(
 self._driver.configuration.vmware_host_version,
 version)
 get_vc_version.assert_not_called()
 @mock.patch('cinder.volume.drivers.vmware.vmdk.LOG')
 @ddt.data('5.5', '6.0')
 def test_validate_vcenter_version(self, version, log):
 # vCenter versions 5.5 and above should pass validation.
 self._driver._validate_vcenter_version(version)
 # Deprecation warning should be logged for vCenter versions which are
 # incompatible with next minimum supported version.
 if not versionutils.is_compatible(
 self._driver.NEXT_MIN_SUPPORTED_VC_VERSION, version,
 same_major=False):
 log.warning.assert_called_once()
 else:
 log.warning.assert_not_called()
 def test_validate_vcenter_version_with_less_than_min_supported_version(
 self):
 # Validation should fail for vCenter version less than 5.1.
 self.assertRaises(exceptions.VMwareDriverException,
 self._driver._validate_vcenter_version,
 '5.1')
 @mock.patch('oslo_vmware.vim_util.find_extension')
 @mock.patch('oslo_vmware.vim_util.register_extension')
 @mock.patch.object(VMDK_DRIVER, 'session')
 def _test_register_extension(
 self, session, register_extension, find_extension,
 ext_exists=False):
 if not ext_exists:
 find_extension.return_value = None
 self._driver._register_extension()
 find_extension.assert_called_once_with(session.vim, vmdk.EXTENSION_KEY)
 if not ext_exists:
 register_extension.assert_called_once_with(
 session.vim, vmdk.EXTENSION_KEY, vmdk.EXTENSION_TYPE,
 label='OpenStack Cinder')
 def test_register_extension(self):
 self._test_register_extension()
 def test_register_extension_with_existing_extension(self):
 self._test_register_extension(ext_exists=True)
 @mock.patch('oslo_vmware.vim_util.find_extension', return_value=None)
 @mock.patch('oslo_vmware.vim_util.register_extension')
 @mock.patch.object(VMDK_DRIVER, 'session')
 def test_concurrent_register_extension(
 self, session, register_extension, find_extension):
 register_extension.side_effect = exceptions.VimFaultException(
 ['InvalidArgument'], 'error')
 self._driver._register_extension()
 find_extension.assert_called_once_with(session.vim, vmdk.EXTENSION_KEY)
 register_extension.assert_called_once_with(
 session.vim, vmdk.EXTENSION_KEY, vmdk.EXTENSION_TYPE,
 label='OpenStack Cinder')
 @mock.patch('oslo_vmware.vim_util.find_extension', return_value=None)
 @mock.patch('oslo_vmware.vim_util.register_extension')
 @mock.patch.object(VMDK_DRIVER, 'session')
 def test_register_extension_failure(
 self, session, register_extension, find_extension):
 register_extension.side_effect = exceptions.VimFaultException(
 ['RuntimeFault'], 'error')
 self.assertRaises(exceptions.VimFaultException,
 self._driver._register_extension)
 find_extension.assert_called_once_with(session.vim, vmdk.EXTENSION_KEY)
 register_extension.assert_called_once_with(
 session.vim, vmdk.EXTENSION_KEY, vmdk.EXTENSION_TYPE,
 label='OpenStack Cinder')
 @mock.patch.object(VMDK_DRIVER, '_validate_params')
 @mock.patch('re.compile')
 @mock.patch.object(VMDK_DRIVER, '_create_session')
 @mock.patch.object(VMDK_DRIVER, '_get_vc_version')
 @mock.patch.object(VMDK_DRIVER, '_validate_vcenter_version')
 @mock.patch('oslo_vmware.pbm.get_pbm_wsdl_location')
 @mock.patch.object(VMDK_DRIVER, '_register_extension')
 @mock.patch('cinder.volume.drivers.vmware.volumeops.VMwareVolumeOps')
 @mock.patch('cinder.volume.drivers.vmware.datastore.DatastoreSelector')
 @mock.patch.object(VMDK_DRIVER, 'volumeops')
 @mock.patch.object(VMDK_DRIVER, 'session')
 def _test_do_setup(
 self, session, vops, ds_sel_cls, vops_cls, register_extension,
 get_pbm_wsdl_loc, validate_vc_version, get_vc_version,
 create_session, re_compile, validate_params, enable_pbm=True,
 ds_regex_pat=None, invalid_regex=False):
 mock_session = mock.Mock()
 create_session.return_value = mock_session
 if enable_pbm:
 ver_str = '5.5'
 pbm_wsdl = mock.sentinel.pbm_wsdl
 get_pbm_wsdl_loc.return_value = pbm_wsdl
 else:
 ver_str = '5.1'
 get_vc_version.return_value = ver_str
 cls_1 = mock.sentinel.cls_1
 cls_2 = mock.sentinel.cls_2
 cluster_refs = {'cls-1': cls_1, 'cls-2': cls_2}
 vops.get_cluster_refs.return_value = cluster_refs
 self._driver.configuration.vmware_datastore_regex = ds_regex_pat
 ds_regex = None
 if ds_regex_pat:
 if invalid_regex:
 re_compile.side_effect = re.error("error")
 else:
 ds_regex = mock.sentinel.ds_regex
 re_compile.return_value = ds_regex
 if ds_regex_pat and invalid_regex:
 self.assertRaises(cinder_exceptions.InvalidInput,
 self._driver.do_setup,
 mock.ANY)
 validate_params.assert_called_once_with()
 else:
 self._driver.do_setup(mock.ANY)
 validate_params.assert_called_once_with()
 create_session.assert_called_once_with()
 get_vc_version.assert_called_once_with()
 validate_vc_version.assert_called_once_with(ver_str)
 if enable_pbm:
 get_pbm_wsdl_loc.assert_called_once_with(ver_str)
 mock_session.pbm_wsdl_loc_set.assert_called_once_with(pbm_wsdl)
 self.assertEqual(enable_pbm, self._driver._storage_policy_enabled)
 register_extension.assert_called_once()
 vops_cls.assert_called_once_with(
 session,
 self._driver.configuration.vmware_max_objects_retrieval,
 vmdk.EXTENSION_KEY,
 vmdk.EXTENSION_TYPE)
 self.assertEqual(vops_cls.return_value, self._driver._volumeops)
 ds_sel_cls.assert_called_once_with(
 vops,
 session,
 self._driver.configuration.vmware_max_objects_retrieval,
 ds_regex=ds_regex)
 self.assertEqual(ds_sel_cls.return_value, self._driver._ds_sel)
 vops.get_cluster_refs.assert_called_once_with(
 self._driver.configuration.vmware_cluster_name)
 vops.build_backing_ref_cache.assert_called_once_with()
 self.assertEqual(list(cluster_refs.values()),
 list(self._driver._clusters))
 if ds_regex_pat:
 re_compile.assert_called_once_with(ds_regex_pat)
 def test_do_setup(self):
 self._test_do_setup()
 def test_do_setup_with_pbm_disabled(self):
 self._test_do_setup(enable_pbm=False)
 @mock.patch.object(VMDK_DRIVER, '_validate_params')
 @mock.patch.object(VMDK_DRIVER, '_create_session')
 @mock.patch.object(VMDK_DRIVER, '_get_vc_version')
 @mock.patch.object(VMDK_DRIVER, '_validate_vcenter_version')
 @mock.patch('oslo_vmware.pbm.get_pbm_wsdl_location')
 def test_do_setup_with_invalid_pbm_wsdl(
 self, get_pbm_wsdl_loc, validate_vc_version, get_vc_version,
 create_session, validate_params):
 ver_str = '5.5'
 get_vc_version.return_value = ver_str
 get_pbm_wsdl_loc.return_value = None
 self.assertRaises(exceptions.VMwareDriverException,
 self._driver.do_setup,
 mock.ANY)
 validate_params.assert_called_once_with()
 create_session.assert_called_once_with()
 get_vc_version.assert_called_once_with()
 validate_vc_version.assert_called_once_with(ver_str)
 get_pbm_wsdl_loc.assert_called_once_with(ver_str)
 def test_do_setup_with_ds_regex(self):
 self._test_do_setup(ds_regex_pat='foo')
 def test_do_setup_with_invalid_ds_regex(self):
 self._test_do_setup(ds_regex_pat='(foo', invalid_regex=True)
 @mock.patch.object(VMDK_DRIVER, 'volumeops')
 def test_get_dc(self, vops):
 dc_1 = mock.sentinel.dc_1
 dc_2 = mock.sentinel.dc_2
 vops.get_dc.side_effect = [dc_1, dc_2]
 # cache miss
 rp_1 = vmware_fake.ManagedObjectReference(value='rp-1')
 rp_2 = vmware_fake.ManagedObjectReference(value='rp-2')
 self.assertEqual(dc_1, self._driver._get_dc(rp_1))
 self.assertEqual(dc_2, self._driver._get_dc(rp_2))
 self.assertDictEqual({'rp-1': dc_1, 'rp-2': dc_2},
 self._driver._dc_cache)
 # cache hit
 self.assertEqual(dc_1, self._driver._get_dc(rp_1))
 self.assertEqual(dc_2, self._driver._get_dc(rp_2))
 vops.get_dc.assert_has_calls([mock.call(rp_1), mock.call(rp_2)])
 @mock.patch.object(VMDK_DRIVER, '_get_storage_profile')
 @mock.patch.object(VMDK_DRIVER, '_select_datastore')
 @mock.patch.object(VMDK_DRIVER, '_get_dc')
 @mock.patch.object(VMDK_DRIVER, 'volumeops')
 @mock.patch.object(VMDK_DRIVER, '_get_volume_group_folder')
 @ddt.data(None, {vmdk.CREATE_PARAM_DISK_SIZE: 2 * VOL_SIZE})
 def test_select_ds_for_volume(
 self, create_params, get_volume_group_folder, vops, get_dc,
 select_datastore, get_storage_profile):
 profile = mock.sentinel.profile
 get_storage_profile.return_value = profile
 host = mock.sentinel.host
 rp = mock.sentinel.rp
 summary = mock.sentinel.summary
 select_datastore.return_value = (host, rp, summary)
 dc = mock.sentinel.dc
 get_dc.return_value = dc
 folder = mock.sentinel.folder
 get_volume_group_folder.return_value = folder
 vol = self._create_volume_dict()
 ret = self._driver._select_ds_for_volume(
 vol, host=host, create_params=create_params)
 self.assertEqual((host, rp, folder, summary), ret)
 if create_params:
 exp_size = create_params[vmdk.CREATE_PARAM_DISK_SIZE] * units.Gi
 else:
 exp_size = vol['size'] * units.Gi
 exp_req = {hub.DatastoreSelector.SIZE_BYTES: exp_size,
 hub.DatastoreSelector.PROFILE_NAME: profile}
 select_datastore.assert_called_once_with(exp_req, host)
 get_dc.assert_called_once_with(rp)
 get_volume_group_folder.assert_called_once_with(dc, vol['project_id'])
 @mock.patch.object(VMDK_DRIVER, 'volumeops')
 @mock.patch.object(VMDK_DRIVER, '_get_storage_profile_id')
 def _test_get_connection_info(
 self, get_storage_profile_id, vops, vmdk_connector=False):
 volume = self._create_volume_obj()
 backing = vmware_fake.ManagedObjectReference(value='ref-1')
 profile_id = mock.sentinel.profile_id
 get_storage_profile_id.return_value = profile_id
 if vmdk_connector:
 vmdk_path = mock.sentinel.vmdk_path
 vops.get_vmdk_path.return_value = vmdk_path
 datastore = | |
| 
	#!/usr/bin/env python
# $Id: Compiler.py,v 1.148 2006/06/22 00:18:22 tavis_rudd Exp $
"""Compiler classes for Cheetah:
ModuleCompiler aka 'Compiler'
ClassCompiler
MethodCompiler
If you are trying to grok this code start with ModuleCompiler.__init__,
ModuleCompiler.compile, and ModuleCompiler.__getattr__.
Meta-Data
================================================================================
Author: <NAME> <<EMAIL>>
Version: $Revision: 1.148 $
Start Date: 2001/09/19
Last Revision Date: $Date: 2006/06/22 00:18:22 $
"""
__author__ = "<NAME> <<EMAIL>>"
__revision__ = "$Revision: 1.148 $"[11:-2]
import sys
import os
import os.path
from os.path import getmtime, exists
import re
import types
import time
import random
import warnings
import __builtin__
import copy
from Cheetah.Version import Version, VersionTuple
from Cheetah.SettingsManager import SettingsManager
from Cheetah.Parser import Parser, ParseError, specialVarRE, \
 STATIC_CACHE, REFRESH_CACHE, SET_LOCAL, SET_GLOBAL,SET_MODULE
from Cheetah.Utils.Indenter import indentize # an undocumented preprocessor
from Cheetah import ErrorCatchers
from Cheetah import NameMapper
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
class Error(Exception): pass
DEFAULT_COMPILER_SETTINGS = {
 ## controlling the handling of Cheetah $placeholders
 'useNameMapper': True, # Unified dotted notation and the searchList
 'useSearchList': True, # if false, assume the first
 # portion of the $variable (before the first dot) is a global,
 # builtin, or local var that doesn't need
 # looking up in the searchlist BUT use
 # namemapper on the rest of the lookup
 'allowSearchListAsMethArg': True,
 'useAutocalling': True, # detect and call callable()'s, requires NameMapper
 'useStackFrames': True, # use NameMapper.valueFromFrameOrSearchList
 # rather than NameMapper.valueFromSearchList
 'useErrorCatcher':False,
 'alwaysFilterNone':True, # filter out None, before the filter is called
 'useFilters':True, # use str instead if =False
 'includeRawExprInFilterArgs':True,
 
 #'lookForTransactionAttr':False,
 'autoAssignDummyTransactionToSelf':False,
 'useKWsDictArgForPassingTrans':True,
 
 ## controlling the aesthetic appearance / behaviour of generated code
 'commentOffset': 1,
 # should shorter str constant chunks be printed using repr rather than ''' quotes
 'reprShortStrConstants': True, 
 'reprNewlineThreshold':3,
 'outputRowColComments':True,
 # should #block's be wrapped in a comment in the template's output
 'includeBlockMarkers': False, 
 'blockMarkerStart':('\n<!-- START BLOCK: ',' -->\n'),
 'blockMarkerEnd':('\n<!-- END BLOCK: ',' -->\n'), 
 'defDocStrMsg':'Autogenerated by CHEETAH: The Python-Powered Template Engine',
 'setup__str__method': False, 
 'mainMethodName':'respond',
 'mainMethodNameForSubclasses':'writeBody',
 'indentationStep': ' '*4,
 'initialMethIndentLevel': 2,
 'monitorSrcFile':False,
 'outputMethodsBeforeAttributes': True,
 ## customizing the #extends directive
 'autoImportForExtendsDirective':True,
 'handlerForExtendsDirective':None, # baseClassName = handler(compiler, baseClassName)
 # a callback hook for customizing the
 # #extends directive. It can manipulate
 # the compiler's state if needed.
 # also see allowExpressionsInExtendsDirective
 
 # input filtering/restriction
 # use lower case keys here!!
 'disabledDirectives':[], # list of directive keys, without the start token
 'enabledDirectives':[], # list of directive keys, without the start token
 'disabledDirectiveHooks':[], # callable(parser, directiveKey)
 'preparseDirectiveHooks':[], # callable(parser, directiveKey)
 'postparseDirectiveHooks':[], # callable(parser, directiveKey)
 'preparsePlaceholderHooks':[], # callable(parser)
 'postparsePlaceholderHooks':[], # callable(parser)
 # the above hooks don't need to return anything
 'expressionFilterHooks':[], # callable(parser, expr, exprType, rawExpr=None, startPos=None)
 # exprType is the name of the directive, 'psp', or 'placeholder'. all
 # lowercase. The filters *must* return the expr or raise an exception.
 # They can modify the expr if needed.
 'templateMetaclass':None, # strictly optional. Only works with new-style baseclasses
 'i18NFunctionName':'self.i18n',
 
 ## These are used in the parser, but I've put them here for the time being to
 ## facilitate separating the parser and compiler: 
 'cheetahVarStartToken':'$',
 'commentStartToken':'##',
 'multiLineCommentStartToken':'#*',
 'multiLineCommentEndToken':'*#',
 'gobbleWhitespaceAroundMultiLineComments':True,
 'directiveStartToken':'#',
 'directiveEndToken':'#',
 'allowWhitespaceAfterDirectiveStartToken':False, 
 'PSPStartToken':'<%',
 'PSPEndToken':'%>',
 'EOLSlurpToken':'#',
 'gettextTokens': ["_", "N_", "ngettext"],
 'allowExpressionsInExtendsDirective': False, # the default restricts it to
 # accepting dotted names 
 'allowEmptySingleLineMethods': False,
 'allowNestedDefScopes': True,
 'allowPlaceholderFilterArgs': True,
 ## See Parser.initDirectives() for the use of the next 3
 #'directiveNamesAndParsers':{}
 #'endDirectiveNamesAndHandlers':{}
 #'macroDirectives':{}
 }
class GenUtils:
 """An abstract baseclass for the Compiler classes that provides methods that
 perform generic utility functions or generate pieces of output code from
 information passed in by the Parser baseclass. These methods don't do any
 parsing themselves.
 """
 def genTimeInterval(self, timeString):
 ##@@ TR: need to add some error handling here
 if timeString[-1] == 's':
 interval = float(timeString[:-1])
 elif timeString[-1] == 'm':
 interval = float(timeString[:-1])*60
 elif timeString[-1] == 'h':
 interval = float(timeString[:-1])*60*60
 elif timeString[-1] == 'd':
 interval = float(timeString[:-1])*60*60*24
 elif timeString[-1] == 'w':
 interval = float(timeString[:-1])*60*60*24*7
 else: # default to minutes
 interval = float(timeString)*60
 return interval
 def genCacheInfo(self, cacheTokenParts):
 """Decipher a placeholder cachetoken
 """
 cacheInfo = {}
 if cacheTokenParts['REFRESH_CACHE']:
 cacheInfo['type'] = REFRESH_CACHE
 cacheInfo['interval'] = self.genTimeInterval(cacheTokenParts['interval'])
 elif cacheTokenParts['STATIC_CACHE']:
 cacheInfo['type'] = STATIC_CACHE
 return cacheInfo # is empty if no cache
 def genCacheInfoFromArgList(self, argList):
 cacheInfo = {'type':REFRESH_CACHE}
 for key, val in argList:
 if val[0] in '"\'':
 val = val[1:-1]
 if key == 'timer':
 key = 'interval'
 val = self.genTimeInterval(val)
 
 cacheInfo[key] = val
 return cacheInfo
 
 def genCheetahVar(self, nameChunks, plain=False):
 if nameChunks[0][0] in self.setting('gettextTokens'):
 self.addGetTextVar(nameChunks) 
 if self.setting('useNameMapper') and not plain:
 return self.genNameMapperVar(nameChunks)
 else:
 return self.genPlainVar(nameChunks)
 def addGetTextVar(self, nameChunks):
 """Output something that gettext can recognize.
 
 This is a harmless side effect necessary to make gettext work when it
 is scanning compiled templates for strings marked for translation.
 @@TR: another marginally more efficient approach would be to put the
 output in a dummy method that is never called.
 """
 # @@TR: this should be in the compiler not here
 self.addChunk("if False:")
 self.indent()
 self.addChunk(self.genPlainVar(nameChunks[:]))
 self.dedent()
 def genPlainVar(self, nameChunks): 
 """Generate Python code for a Cheetah $var without using NameMapper
 (Unified Dotted Notation with the SearchList).
 """
 nameChunks.reverse()
 chunk = nameChunks.pop()
 pythonCode = chunk[0] + chunk[2]
 while nameChunks:
 chunk = nameChunks.pop()
 pythonCode = (pythonCode + '.' + chunk[0] + chunk[2])
 return pythonCode
 def genNameMapperVar(self, nameChunks):
 """Generate valid Python code for a Cheetah $var, using NameMapper
 (Unified Dotted Notation with the SearchList).
 nameChunks = list of var subcomponents represented as tuples
 [ (name,useAC,remainderOfExpr),
 ]
 where:
 name = the dotted name base
 useAC = where NameMapper should use autocalling on namemapperPart
 remainderOfExpr = any arglist, index, or slice
 If remainderOfExpr contains a call arglist (e.g. '(1234)') then useAC
 is False, otherwise it defaults to True. It is overridden by the global
 setting 'useAutocalling' if this setting is False.
 EXAMPLE
 ------------------------------------------------------------------------
 if the raw Cheetah Var is
 $a.b.c[1].d().x.y.z
 
 nameChunks is the list
 [ ('a.b.c',True,'[1]'), # A
 ('d',False,'()'), # B
 ('x.y.z',True,''), # C
 ]
 
 When this method is fed the list above it returns
 VFN(VFN(VFFSL(SL, 'a.b.c',True)[1], 'd',False)(), 'x.y.z',True)
 which can be represented as
 VFN(B`, name=C[0], executeCallables=(useAC and C[1]))C[2]
 where:
 VFN = NameMapper.valueForName
 VFFSL = NameMapper.valueFromFrameOrSearchList
 VFSL = NameMapper.valueFromSearchList # optionally used instead of VFFSL
 SL = self.searchList()
 useAC = self.setting('useAutocalling') # True in this example
 
 A = ('a.b.c',True,'[1]')
 B = ('d',False,'()')
 C = ('x.y.z',True,'')
 C` = VFN( VFN( VFFSL(SL, 'a.b.c',True)[1],
 'd',False)(),
 'x.y.z',True)
 = VFN(B`, name='x.y.z', executeCallables=True)
 
 B` = VFN(A`, name=B[0], executeCallables=(useAC and B[1]))B[2]
 A` = VFFSL(SL, name=A[0], executeCallables=(useAC and A[1]))A[2]
 Note, if the compiler setting useStackFrames=False (default is true)
 then
 A` = VFSL([locals()]+SL+[globals(), __builtin__], name=A[0], executeCallables=(useAC and A[1]))A[2]
 This option allows Cheetah to be used with Psyco, which doesn't support
 stack frame introspection.
 """
 defaultUseAC = self.setting('useAutocalling')
 useSearchList = self.setting('useSearchList')
 nameChunks.reverse()
 name, useAC, remainder = nameChunks.pop()
 if not useSearchList:
 firstDotIdx = name.find('.')
 if firstDotIdx != -1 and firstDotIdx < len(name):
 beforeFirstDot, afterDot = name[:firstDotIdx], name[firstDotIdx+1:]
 pythonCode = ('VFN(' + beforeFirstDot +
 ',"' + afterDot +
 '",' + repr(defaultUseAC and useAC) + ')'
 + remainder)
 else:
 pythonCode = name+remainder
 elif self.setting('useStackFrames'):
 pythonCode = ('VFFSL(SL,'
 '"'+ name + '",'
 + repr(defaultUseAC and useAC) + ')'
 + remainder)
 else:
 pythonCode = ('VFSL([locals()]+SL+[globals(), __builtin__],'
 '"'+ name + '",'
 + repr(defaultUseAC and useAC) + ')'
 + remainder)
 ## 
 while nameChunks:
 name, useAC, remainder = nameChunks.pop()
 pythonCode = ('VFN(' + pythonCode +
 ',"' + name +
 '",' + repr(defaultUseAC and useAC) + ')'
 + remainder)
 return pythonCode
 
##################################################
## METHOD COMPILERS
class MethodCompiler(GenUtils):
 def __init__(self, methodName, classCompiler,
 initialMethodComment=None,
 decorator=None):
 self._settingsManager = classCompiler
 self._classCompiler = classCompiler
 self._moduleCompiler = classCompiler._moduleCompiler
 self._methodName = methodName
 self._initialMethodComment = initialMethodComment
 self._setupState()
 self._decorator = decorator
 def setting(self, key):
 return self._settingsManager.setting(key)
 def _setupState(self):
 self._indent = self.setting('indentationStep')
 self._indentLev = self.setting('initialMethIndentLevel')
 self._pendingStrConstChunks = []
 self._methodSignature = None
 self._methodDef = None
 self._docStringLines = []
 self._methodBodyChunks = []
 self._cacheRegionsStack = []
 self._callRegionsStack = []
 self._captureRegionsStack = []
 self._filterRegionsStack = []
 self._isErrorCatcherOn = False
 self._hasReturnStatement = False
 self._isGenerator = False
 
 
 def cleanupState(self):
 """Called by the containing class compiler instance
 """
 pass
 def methodName(self):
 return self._methodName
 def setMethodName(self, name):
 self._methodName = name
 
 ## methods for managing indentation
 
 def indentation(self):
 return self._indent * self._indentLev
 
 def indent(self):
 self._indentLev +=1
 
 def dedent(self):
 if self._indentLev:
 self._indentLev -=1
 else:
 raise Error('Attempt to dedent when the indentLev is 0')
 | |
| 
	import torch
import torch.nn as nn
from collections import OrderedDict
import os
import numpy as np
import utils as utils
import torch.nn.functional as F
def get_batch_params(x):
 batch_size = x.shape[0]
 bessel = (batch_size - 1) / batch_size
 mean = torch.mean(x, 0)
 std = torch.sqrt(torch.var(x, 0) * bessel + 1e-05)
 return mean, std
def downplay(x, factor):
 idxs = (torch.sum(x, dim=1, keepdim=True) == 0).repeat(1,x.shape[1],1,1)
 x[idxs] = x[idxs] / factor
 return x
class Dropout(nn.Module):
 def __init__(self, p=0.5):
 super(Dropout, self).__init__()
 self.p = p
 def forward(self, x):
 return F.dropout2d(x, self.p, True, False)
 def set_p(self, new_p):
 self.p = 1-new_p
class EncoderDecoder(nn.Module):
 def __init__(self, layer_dims, index, position, noise_std, arglist):
 super(EncoderDecoder, self).__init__()
 # this module will hold the variables it needs to in a dictionary
 # it will also have a set of functions
 self.index = index
 self.layer_dims = layer_dims
 self.position = position
 self.noise_std = noise_std
 self.use_bn = True
 # encoding modules
 if self.position is 'first':
 en_indim = self.layer_dims[self.index]
 en_outdim = self.layer_dims[self.index]
 else:
 en_indim = layer_dims[self.index-1]
 en_outdim = layer_dims[self.index]
 self.en_conv = nn.Conv2d(en_indim, en_outdim, bias=False, **arglist[self.index-1])
 self.en_bn_clean = nn.BatchNorm2d(en_outdim, affine=False)
 self.en_bn_noisy = nn.BatchNorm2d(en_outdim, affine=False)
 self.en_gamma = nn.Parameter(torch.rand(en_outdim, 1, 1))
 self.en_beta = nn.Parameter(torch.rand(en_outdim, 1, 1))
 self.en_nonlin = nn.ReLU()
 # decoding modules
 if self.position is 'last':
 de_indim = self.layer_dims[self.index]
 de_outdim = self.layer_dims[self.index]
 else:
 de_indim = self.layer_dims[self.index+1]
 de_outdim = self.layer_dims[self.index]
 self.de_conv = nn.ConvTranspose2d(de_indim, de_outdim, bias=False, **arglist[self.index])
 self.de_bn = nn.BatchNorm2d(de_outdim, affine=False)
 self.de_gamma = nn.Parameter(torch.rand(de_outdim, 1, 1))
 self.de_beta = nn.Parameter(torch.rand(de_outdim, 1, 1))
 self.ver_dropout = Dropout(0.5)
 self.lat_dropout = Dropout(0.5)
 self.parsig1 = ParamSigmoid()
 self.parsig2 = ParamSigmoid()
 def set_ver_dropout(self, p):
 self.ver_dropout.p = 1-p
 def set_lat_dropout(self, p):
 self.lat_dropout.p = 1-p
 def deconvout(self, in_size):
 if self.position is 'last':
 return in_size
 else:
 # Note, we're making an assumption of squareness
 ker = self.de_conv.kernel_size[0]
 stride = self.de_conv.stride[0]
 pad = self.de_conv.padding[0]
 dil = self.de_conv.dilation[0]
 out_size = stride * (in_size - 1) - 2 * pad + dil * (ker - 1) + 1
 return out_size
 def forward(self, input):
 raise Exception('You should use either the encode or decode functions')
 # This function performs the clean encoding pass of one layer of the ladder network
 def encode_clean(self, variables):
 # print('Clean encoder:', self.index)
 varx = variables[self.index]
 # if first layer (index=0), z_pre_(i) = x
 if self.position is 'first':
 z_pre = variables[self.index]['x']
 else:
 z_pre = self.en_conv(variables[self.index-1]['h'])
 # collect batch statistics
 varx['mean'], varx['std'] = get_batch_params(z_pre)
 if self.use_bn:
 varx['z'] = self.en_bn_clean(z_pre)
 # if first layer (index=0), h_(i) = z_(i)
 if self.position is 'first':
 varx['h'] = varx['z']
 else:
 # varx['h'] = self.en_nonlin(self.en_gamma * (varx['z'] + self.en_beta)) # original formulation
 varx['h'] = self.en_nonlin(self.en_gamma * varx['z'] + self.en_beta) # I think this makes more sense
 # This function performs the noisy encoding pass of one layer of the ladder network
 def encode_noisy(self, variables):
 # print('Noisy encoder:', self.index)
 varx = variables[self.index]
 # if first layer (index=0), z_pre_tilda_(i) = x
 if self.position is 'first':
 z_pre_tilda = variables[self.index]['x']
 else:
 z_pre_tilda = self.en_conv(variables[self.index - 1]['h_tilda'])
 # we don't record the mean and std here
 if self.use_bn:
 varx['z_tilda'] = self.en_bn_noisy(z_pre_tilda) + (self.noise_std * torch.randn_like(z_pre_tilda))
 else:
 varx['z_tilda'] = z_pre_tilda + (self.noise_std * torch.randn_like(z_pre_tilda))
 # if first layer (index=0), h_tilda_(i) = z_tilda_(i)
 if self.position is 'first':
 varx['h_tilda'] = varx['z_tilda']
 else:
 # varx['h_tilda'] = self.en_nonlin(self.en_gamma * (varx['z_tilda'] + self.en_beta)) # original formulation
 varx['h_tilda'] = self.en_nonlin(self.en_gamma * varx['z_tilda'] + self.en_beta) # ditto
 def decode(self, variables):
 # print('Decoder:', self.index)
 varx = variables[self.index]
 # if layer layer (index=L), u_(i) = de_batchnorm( h_tilda_(i) )
 if self.position is 'last':
 if self.use_bn:
 u = self.de_bn(variables[self.index]['h_tilda'])
 else:
 u = variables[self.index]['h_tilda']
 else:
 # calculate output padding
 in_shape = variables[self.index + 1]['z_hat'].shape
 w_pad = varx['z_tilda'].shape[2] - self.deconvout(in_shape[2])
 h_pad = varx['z_tilda'].shape[3] - self.deconvout(in_shape[3])
 self.de_conv.output_padding = (w_pad, h_pad)
 if self.use_bn:
 u = self.ver_dropout(self.de_bn(self.de_conv(variables[self.index + 1]['z_hat'])))
 else:
 u = self.ver_dropout(self.de_conv(variables[self.inex + 1]['z_hat']))
 psig1u = self.parsig1(u)
 psig2u = self.parsig2(u)
 varx['z_hat'] = (self.lat_dropout(varx['z_tilda']) - psig1u) * psig2u + psig1u
 if self.use_bn:
 if self.training:
 varx['z_hat_bn'] = (varx['z_hat'] - varx['mean']) / varx['std']
 else:
 assert not self.en_bn_clean.training
 varx['z_hat_bn'] = self.en_bn_clean(varx['z_hat'])
 else:
 varx['z_hat_bn'] = varx['z_hat']
 # special addition to keep the decoder from sucking needlessly
 # this has less effect than I thought it would
 # perhaps we should try and use a unique batchnorm?
 varx['z_hat_bn'] = self.de_gamma * varx['z_hat_bn'] + self.de_beta
class ParamSigmoid(nn.Module):
 def __init__(self, **kwargs):
 super(ParamSigmoid, self).__init__()
 self.a1 = nn.Parameter(torch.randn(1))
 self.a2 = nn.Parameter(torch.randn(1))
 self.a3 = nn.Parameter(torch.randn(1))
 self.a4 = nn.Parameter(torch.randn(1))
 self.a5 = nn.Parameter(torch.randn(1))
 self.sigmoid = nn.Sigmoid()
 def forward(self, x):
 return self.a1 * self.sigmoid(self.a2 * x + self.a3) + self.a4 * x + self.a5
class LadderNetwork(nn.Module):
 def __init__(self, **kwargs):
 super(LadderNetwork, self).__init__()
 self.num_layers, self.layer_dims, self.arglist = LadderNetwork.gen_layer_args(**kwargs)
 self.variables = list()
 self.encoder_decoder_layers = list()
 for lidx in range(self.num_layers):
 self.variables.append(dict())
 if lidx == 0: # the first layer
 layer = EncoderDecoder(self.layer_dims, lidx, 'first', kwargs['noise_std'], self.arglist)
 elif lidx == self.num_layers-1: # the last layer
 layer = EncoderDecoder(self.layer_dims, lidx, 'last', kwargs['noise_std'], self.arglist)
 else: # middle layers
 layer = EncoderDecoder(self.layer_dims, lidx, 'middle', kwargs['noise_std'], self.arglist)
 if 'batchnorm' in kwargs:
 layer.use_bn = kwargs['batchnorm']
 self.encoder_decoder_layers.append(layer)
 self.layers = nn.ModuleList(self.encoder_decoder_layers)
 self.start_epoch = 0
 @ staticmethod
 def gen_layer_args(**kwargs):
 if 'layer_dims' in kwargs:
 num_layers = len(kwargs['layer_dims'])
 layer_dims = kwargs['layer_dims']
 elif 'num_layers' in kwargs:
 assert 'in_dim' in kwargs, 'Must include \'in_dim\' when specifying \'num_layers\''
 assert 'code_dim' in kwargs, 'Must include \'code_dim\' when specifying \'num_layers\''
 num_layers = kwargs['num_layers']
 in_dim = kwargs['in_dim']
 code_dim = kwargs['code_dim']
 layer_dims = list(np.linspace(in_dim, code_dim, num_layers).round().astype(int))
 else:
 raise Exception('Must specify \'layer_dims\' or \'num_layers\'')
 arglist = list()
 for lidx in range(num_layers):
 args = {
 'kernel_size': utils.get_arg_index('kernel_size', lidx, **kwargs),
 'stride': utils.get_arg_index('stride', lidx, **kwargs),
 'padding': utils.get_arg_index('padding', lidx, **kwargs),
 'dilation': utils.get_arg_index('dilation', lidx, **kwargs)
 }
 arglist.append(args)
 return num_layers, layer_dims, arglist
 def set_lateral_weights(self, new_weight):
 for layer in self.layers:
 layer.set_lat_dropout(new_weight)
 def set_vertical_weights(self, new_weight):
 for layer in self.layers:
 layer.set_ver_dropout(new_weight)
 def set_weight(self, kind, layer_index, new_weight):
 layer = self.encoder_decoder_layers[layer_index]
 if kind is 'vertical':
 layer.set_ver_dropout(new_weight)
 elif kind is 'lateral':
 layer.set_lat_dropout(new_weight)
 else:
 raise Exception('That\'s not an option')
 def suggested_in_size(self, out_size):
 in_size = out_size
 for module in self.encoder_decoder_layers:
 in_size = module.deconvout(in_size)
 return in_size
 def set_noise_std(self, new_std):
 for module in self.layers:
 module.noise_std = new_std
 def forward(self, **netinput):
 # setup input for network
 if 'cpu' in netinput:
 del netinput['cpu']
 self.variables[0]['x'] = netinput['x'].cpu()
 else:
 self.variables[0]['x'] = netinput['x']
 for lidx in range(self.num_layers):
 # clean pass to collect ground truth and batch statistics
 self.encoder_decoder_layers[lidx].encode_clean(self.variables)
 # noisy pass to make the architecture work for it
 self.encoder_decoder_layers[lidx].encode_noisy(self.variables)
 for lidx in reversed(range(self.num_layers)):
 # decoding pass to reconstruct input
 self.encoder_decoder_layers[lidx].decode(self.variables)
 output = self.make_output(netinput)
 return output
 def empty_vars(self):
 self.variables = list()
 for lidx in range(self.layers + 1):
 self.variables.append(dict())
 def make_output(self, input):
 clean = list()
 recon = list()
 for i in range(len(self.variables)):
 layer = self.variables[i]
 if i == 0:
 clean.append(layer['z'])
 recon.append(downplay(layer['z_hat'], 5))
 else:
 clean.append(layer['z'])
 recon.append(layer['z_hat_bn'])
 output = {'clean': clean, 'recon': recon, **input}
 return output
 def save(self, apath, file='model_latest.pt'):
 save_dirs = [os.path.join(apath, file)]
 for s in save_dirs:
 torch.save(self.state_dict(), s)
 def save_model(self, path, filename):
 model = {
 'model': LadderNetwork,
 'config': self.config,
 'state_dict': self.state_dict(),
 }
 torch.save(model, path + filename)
 def load(self, apath, file='model_latest.pt', resume=-1):
 load_from = None
 kwargs = {}
 if resume == -1:
 load_from = torch.load(os.path.join(apath, file), **kwargs)
 if load_from:
 self.load_state_dict(load_from, strict=False)
 @staticmethod
 def load_model(path, filename):
 checkpoint = torch.load(path + filename)
 model = checkpoint['model'](**checkpoint['config'])
 model.load_state_dict(checkpoint['state_dict'])
 return model
class OwlNet(nn.Module):
 def __init__(self, **kwargs):
 super(OwlNet, self).__init__()
 self.config = kwargs
 self.ladder = LadderNetwork(**kwargs)
 self.avgpool = Global_Avg_Pool()
 kwargs['code_dim'] = self.ladder.layer_dims[-1]
 self.classifier = Classifier(**kwargs)
 self.start_epoch = 0
 def set_noise_std(self, new_std):
 self.ladder.set_noise_std(new_std)
 def forward(self, **kwargs):
 owl_out = self.ladder.forward(kwargs)
 avg = self.avgpool(owl_out['recon'][-1])
 _c_ = self.classifier(avg)
 owl_out['_c_'] = _c_
 return owl_out
 def predict(self, **kwargs):
 c = kwargs['c']
 for_out = self.forward(**kwargs)
 _c_ = for_out['_c_']
 values, guesses = torch.max(_c_, 1)
 accuracy = torch.mean(1-torch.abs(guesses-c).float())
 output = {
 'guess': guesses,
 'right': accuracy
 }
 return output
 def save(self, apath, file='model_latest.pt'):
 save_dirs = [os.path.join(apath, file)]
 for s in save_dirs:
 torch.save(self.state_dict(), s)
 def save_model(self, path, filename):
 model = {
 'model': OwlNet,
 'config': self.config,
 'state_dict': self.state_dict(),
 }
 torch.save(model, path + filename)
 @staticmethod
 def load_model(path, filename):
 checkpoint = torch.load(path + filename)
 model = checkpoint['model'](**checkpoint['config'])
 model.load_state_dict(checkpoint['state_dict'])
 return model
 def load(self, apath, file='model_latest.pt', resume=-1):
 load_from = None
 kwargs = {}
 if resume == -1:
 load_from = torch.load(os.path.join(apath, file), **kwargs)
 if load_from:
 self.load_state_dict(load_from, strict=False)
class Global_Avg_Pool(nn.Module):
 def __init__(self):
 super(Global_Avg_Pool, self).__init__()
 def forward(self, x):
 y = torch.mean(x, dim=[2, 3])
 return y
class Classifier(nn.Module):
 def __init__(self, **kwargs):
 super(Classifier, self).__init__()
 code_dim = kwargs['code_dim']
 class_dim | |
| 
	. . . . . . set/get chapter in current item
# | chapter_n . . . . . . . . . . . . . . next chapter in current item
# | chapter_p . . . . . . . . . . . . previous chapter in current item
# |
# | seek X . . . . . . . . . . . seek in seconds, for instance `seek 12'
 def _rc_seek(self, time: int):
 self._rc_send('title %i' % int(time))
 def _http_seek(self, time: int):
 self._http_request('seek&val=%i' % int(time))
 def seek(self, time: int):
 """Seek in seconds (jump to position)."""
 self._select_interface(self._rc_seek, self._http_seek, time)
# | pause . . . . . . . . . . . . . . . . . . . . . . . . toggle pause
 def _rc_pause(self, id=None):
 if id is None:
 self._rc_send('pause')
 else:
 self._rc_send('pause %i' % int(id))
 def _http_pause(self, id=None):
 if id is None:
 self._http_request('pl_pause')
 else:
 self._http_request('pl_pause&id=%i' % int(id))
 def pause(self, id=None):
 """
 Pause and jump to title with playlist id <id>.
 If <id> is omitted or <id> is None, pause last active item.
 """
 self._select_interface(self._rc_pause, self._http_pause, id)
# | fastforward . . . . . . . . . . . . . . . . . . set to maximum rate
# | rewind . . . . . . . . . . . . . . . . . . . . . set to minimum rate
# | faster . . . . . . . . . . . . . . . . . . faster playing of stream
# | slower . . . . . . . . . . . . . . . . . . slower playing of stream
# | normal . . . . . . . . . . . . . . . . . . normal playing of stream
# | frame . . . . . . . . . . . . . . . . . . . . . play frame by frame
# | fullscreen, f, F [on|off] . . . . . . . . . . . . toggle fullscreen
# | info . . . . . . . . . . . . . information about the current stream
# | stats . . . . . . . . . . . . . . . . show statistical information
# | rate [playback rate] . . . . . . . . . . set playback rate to value
# | get_time . . . . . . . . . seconds elapsed since stream's beginning
 def _rc_get_time(self) -> int:
 return int(self._rc_get('get_time', buffersize=128))
 def _http_get_time(self) -> int:
 status = self._http_status()
 title_length = int(status['length'])
 position = float(status['position'])
 return int(title_length * position)
 def get_time(self) -> int:
 """Get seconds elapsed since stream's beginning."""
 return self._select_interface(self._rc_get_time, self._http_get_time)
 time = get_time
 def _rc_get_position(self) -> float:
 raise NotImplementedError("_rc_get_position is not implemented. " +
 "Use _rc_get_time or http instead.")
 # TODO: implement
 def _http_get_position(self) -> float:
 """Get position in current stream (between 0..1)."""
 return float(self._http_status()['position'])
 def get_position(self) -> float:
 """Get position in current stream (between 0..1)."""
 return self._select_interface(self._rc_get_position,
 self._http_get_position)
 position = get_position
 # | is_playing . . . . . . . . . . . . 1 if a stream plays, 0 otherwise
 def _rc_is_playing(self) -> bool:
 return (int(self._rc_get('is_playing', buffersize=255)) > 0)
 def _http_is_playing(self) -> bool:
 return (self._http_status()['state'] == 'playing')
 def is_playing(self) -> bool:
 """Get playing status."""
 return self._select_interface(self._rc_is_playing,
 self._http_is_playing)
 def _rc_is_stopped(self) -> bool:
 raise NotImplementedError("_rc_is_stopped is not implemented. " +
 "Use http instead.")
 # TODO: implement
 return False
 def _http_is_stopped(self) -> bool:
 return (self._http_status()['state'] == 'stopped')
 def is_stopped(self) -> bool:
 """Get playing status."""
 return self._select_interface(self._rc_is_stopped,
 self._http_is_stopped)
 def _rc_is_paused(self) -> bool:
 raise NotImplementedError("_rc_is_paused is not implemented. " +
 "Use http instead.")
 # TODO: implement
 return False
 def _http_is_paused(self) -> bool:
 return (self._http_status()['state'] == 'paused')
 def is_paused(self) -> bool:
 """Get playing status."""
 return self._select_interface(self._rc_is_paused, self._http_is_paused)
 # | get_title . . . . . . . . . . . . . the title of the current stream
 def _rc_get_title(self):
 return self._rc_get('get_title', buffersize=1024)
 def _http_get_title_by_id(self, id) -> dict:
 """Search playlist for <id> and return corresponding title."""
 if int(id) == -1:
 # there is no title
 return None
 playl = self._http_playlist()
 return [title for title in playl if int(title['id']) == int(id)][0]
 def _http_get_current_id(self):
 """Get the it of currently playing title."""
 return self._http_request('').json()['currentplid']
 def _http_get_title(self) -> dict:
 return self._http_get_title_by_id(self._http_get_current_id())
 def get_title(self):
 """Return currently playing title."""
 return self._select_interface(self._rc_get_title, self._http_get_title)
# | get_length . . . . . . . . . . . . the length of the current stream
 def _rc_get_length(self):
 return int(self._rc_get('get_title', buffersize=1024))
 def _http_get_length(self):
 return self._http_request('').json()['length']
 def get_length(self):
 """Get the length of playing title in seconds."""
 return self._select_interface(self._rc_get_length,
 self._http_get_length)
 length = get_length
 # | volume [X] . . . . . . . . . . . . . . . . . . set/get audio volume
 def _rc_get_volume(self) -> int:
 return int(self._rc_get('volume', buffersize=4096))
 def _http_get_volume(self) -> int:
 return int(self._http_request('').json()['volume'])
 def get_volume(self) -> int:
 """Get the volume."""
 return self._select_interface(self._rc_get_volume,
 self._http_get_volume)
 def _rc_set_volume(self, volume) -> int:
 return int(self._rc_get('volume %i' % int(volume), buffersize=4096))
 def _http_set_volume(self, volume) -> int:
 self._http_request('volume&val=%i' % int(volume))
 # need to redo the request, because first request returns volume
 # before change
 return self._http_get_volume()
 def set_volume(self, volume) -> int:
 """Set the volume."""
 return self._select_interface(self._rc_set_volume,
 self._http_set_volume, volume)
# | volup [X] . . . . . . . . . . . . . . . raise audio volume X steps
 def _rc_volup(self, x) -> int:
 return int(self._rc_get('volup %i' % (x), buffersize=1024))
 def _http_volup(self, x) -> int:
 self._http_request('volume&val=+%i' % int(x))
 return self._http_get_volume()
 def volup(self, x) -> int:
 """Increase the volume by x."""
 return self._select_interface(self._rc_volup, self._http_volup, x)
# | voldown [X] . . . . . . . . . . . . . . lower audio volume X steps
 def _rc_voldown(self, x) -> int:
 return int(self._rc_get('voldown %i' % (x), buffersize=1024))
 def _http_voldown(self, x) -> int:
 self._http_request('volume&val=-%i' % int(x))
 return self._http_get_volume()
 def voldown(self, x) -> int:
 """Decrease the volume by x."""
 return self._select_interface(self._rc_voldown, self._http_voldown, x)
# | achan [X] . . . . . . . . . . . . set/get stereo audio output mode
# | atrack [X] . . . . . . . . . . . . . . . . . . . set/get audio track
# | vtrack [X] . . . . . . . . . . . . . . . . . . . set/get video track
# | vratio [X] . . . . . . . . . . . . . . . set/get video aspect ratio
# | vcrop, crop [X] . . . . . . . . . . . . . . . . set/get video crop
# | vzoom, zoom [X] . . . . . . . . . . . . . . . . set/get video zoom
# | vdeinterlace [X] . . . . . . . . . . . . . set/get video deinterlace
# | vdeinterlace_mode [X] . . . . . . . set/get video deinterlace mode
# | snapshot . . . . . . . . . . . . . . . . . . . . take video snapshot
# | strack [X] . . . . . . . . . . . . . . . . . set/get subtitle track
# |
# | vlm . . . . . . . . . . . . . . . . . . . . . . . . . load the VLM
# | description . . . . . . . . . . . . . . . . . describe this module
# | help, ? [pattern] . . . . . . . . . . . . . . . . . a help | |
| 
	# MIT License
# 
# Copyright (c) 2019 <NAME>, <NAME>
# 
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# 
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# 
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Equations for 9-spin Ising model.
# Written on 2019/03/12.
from numpy import zeros, exp, array, prod, isnan
from ..enumerate import fast_logsumexp
def calc_observables(params):
 """
 Give all parameters concatenated into one array from lowest to highest order.
 Returns all correlations.
 """
 Cout = zeros((45))
 H = params[0:9]
 J = params[9:45]
 energyTerms = array([ +0, +H[8]+0, +H[7]+0, +H[7]+H[8]+J[35], +H[6]+0, +H[6]+H[8]+J[34], +H[6]+H[7]+J[33], +H[6]+H[7]+H[8]+
 J[33]+J[34]+J[35], +H[5]+0, +H[5]+H[8]+J[32], +H[5]+H[7]+J[31], +H[5]+H[7]+H[8]+J[31]+J[32]+J[35], +
 H[5]+H[6]+J[30], +H[5]+H[6]+H[8]+J[30]+J[32]+J[34], +H[5]+H[6]+H[7]+J[30]+J[31]+J[33], +H[5]+H[6]+H[7]+
 H[8]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[4]+0, +H[4]+H[8]+J[29], +H[4]+H[7]+J[28], +H[4]+H[7]+H[8]+
 J[28]+J[29]+J[35], +H[4]+H[6]+J[27], +H[4]+H[6]+H[8]+J[27]+J[29]+J[34], +H[4]+H[6]+H[7]+J[27]+J[28]+
 J[33], +H[4]+H[6]+H[7]+H[8]+J[27]+J[28]+J[29]+J[33]+J[34]+J[35], +H[4]+H[5]+J[26], +H[4]+H[5]+H[8]+J[26]+
 J[29]+J[32], +H[4]+H[5]+H[7]+J[26]+J[28]+J[31], +H[4]+H[5]+H[7]+H[8]+J[26]+J[28]+J[29]+J[31]+J[32]+J[35], +
 H[4]+H[5]+H[6]+J[26]+J[27]+J[30], +H[4]+H[5]+H[6]+H[8]+J[26]+J[27]+J[29]+J[30]+J[32]+J[34], +H[4]+H[5]+
 H[6]+H[7]+J[26]+J[27]+J[28]+J[30]+J[31]+J[33], +H[4]+H[5]+H[6]+H[7]+H[8]+J[26]+J[27]+J[28]+J[29]+J[30]+
 J[31]+J[32]+J[33]+J[34]+J[35], +H[3]+0, +H[3]+H[8]+J[25], +H[3]+H[7]+J[24], +H[3]+H[7]+H[8]+J[24]+J[25]+
 J[35], +H[3]+H[6]+J[23], +H[3]+H[6]+H[8]+J[23]+J[25]+J[34], +H[3]+H[6]+H[7]+J[23]+J[24]+J[33], +H[3]+
 H[6]+H[7]+H[8]+J[23]+J[24]+J[25]+J[33]+J[34]+J[35], +H[3]+H[5]+J[22], +H[3]+H[5]+H[8]+J[22]+J[25]+J[32], +
 H[3]+H[5]+H[7]+J[22]+J[24]+J[31], +H[3]+H[5]+H[7]+H[8]+J[22]+J[24]+J[25]+J[31]+J[32]+J[35], +H[3]+H[5]+
 H[6]+J[22]+J[23]+J[30], +H[3]+H[5]+H[6]+H[8]+J[22]+J[23]+J[25]+J[30]+J[32]+J[34], +H[3]+H[5]+H[6]+H[7]+
 J[22]+J[23]+J[24]+J[30]+J[31]+J[33], +H[3]+H[5]+H[6]+H[7]+H[8]+J[22]+J[23]+J[24]+J[25]+J[30]+J[31]+J[32]+
 J[33]+J[34]+J[35], +H[3]+H[4]+J[21], +H[3]+H[4]+H[8]+J[21]+J[25]+J[29], +H[3]+H[4]+H[7]+J[21]+J[24]+
 J[28], +H[3]+H[4]+H[7]+H[8]+J[21]+J[24]+J[25]+J[28]+J[29]+J[35], +H[3]+H[4]+H[6]+J[21]+J[23]+J[27], +
 H[3]+H[4]+H[6]+H[8]+J[21]+J[23]+J[25]+J[27]+J[29]+J[34], +H[3]+H[4]+H[6]+H[7]+J[21]+J[23]+J[24]+J[27]+
 J[28]+J[33], +H[3]+H[4]+H[6]+H[7]+H[8]+J[21]+J[23]+J[24]+J[25]+J[27]+J[28]+J[29]+J[33]+J[34]+J[35], +
 H[3]+H[4]+H[5]+J[21]+J[22]+J[26], +H[3]+H[4]+H[5]+H[8]+J[21]+J[22]+J[25]+J[26]+J[29]+J[32], +H[3]+H[4]+
 H[5]+H[7]+J[21]+J[22]+J[24]+J[26]+J[28]+J[31], +H[3]+H[4]+H[5]+H[7]+H[8]+J[21]+J[22]+J[24]+J[25]+J[26]+
 J[28]+J[29]+J[31]+J[32]+J[35], +H[3]+H[4]+H[5]+H[6]+J[21]+J[22]+J[23]+J[26]+J[27]+J[30], +H[3]+H[4]+
 H[5]+H[6]+H[8]+J[21]+J[22]+J[23]+J[25]+J[26]+J[27]+J[29]+J[30]+J[32]+J[34], +H[3]+H[4]+H[5]+H[6]+H[7]+
 J[21]+J[22]+J[23]+J[24]+J[26]+J[27]+J[28]+J[30]+J[31]+J[33], +H[3]+H[4]+H[5]+H[6]+H[7]+H[8]+J[21]+J[22]+
 J[23]+J[24]+J[25]+J[26]+J[27]+J[28]+J[29]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[2]+0, +H[2]+H[8]+J[20], +
 H[2]+H[7]+J[19], +H[2]+H[7]+H[8]+J[19]+J[20]+J[35], +H[2]+H[6]+J[18], +H[2]+H[6]+H[8]+J[18]+J[20]+J[34], +
 H[2]+H[6]+H[7]+J[18]+J[19]+J[33], +H[2]+H[6]+H[7]+H[8]+J[18]+J[19]+J[20]+J[33]+J[34]+J[35], +H[2]+H[5]+
 J[17], +H[2]+H[5]+H[8]+J[17]+J[20]+J[32], +H[2]+H[5]+H[7]+J[17]+J[19]+J[31], +H[2]+H[5]+H[7]+H[8]+J[17]+
 J[19]+J[20]+J[31]+J[32]+J[35], +H[2]+H[5]+H[6]+J[17]+J[18]+J[30], +H[2]+H[5]+H[6]+H[8]+J[17]+J[18]+J[20]+
 J[30]+J[32]+J[34], +H[2]+H[5]+H[6]+H[7]+J[17]+J[18]+J[19]+J[30]+J[31]+J[33], +H[2]+H[5]+H[6]+H[7]+H[8]+
 J[17]+J[18]+J[19]+J[20]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[2]+H[4]+J[16], +H[2]+H[4]+H[8]+J[16]+
 J[20]+J[29], +H[2]+H[4]+H[7]+J[16]+J[19]+J[28], +H[2]+H[4]+H[7]+H[8]+J[16]+J[19]+J[20]+J[28]+J[29]+J[35], +
 H[2]+H[4]+H[6]+J[16]+J[18]+J[27], +H[2]+H[4]+H[6]+H[8]+J[16]+J[18]+J[20]+J[27]+J[29]+J[34], +H[2]+H[4]+
 H[6]+H[7]+J[16]+J[18]+J[19]+J[27]+J[28]+J[33], +H[2]+H[4]+H[6]+H[7]+H[8]+J[16]+J[18]+J[19]+J[20]+J[27]+
 J[28]+J[29]+J[33]+J[34]+J[35], +H[2]+H[4]+H[5]+J[16]+J[17]+J[26], +H[2]+H[4]+H[5]+H[8]+J[16]+J[17]+J[20]+
 J[26]+J[29]+J[32], +H[2]+H[4]+H[5]+H[7]+J[16]+J[17]+J[19]+J[26]+J[28]+J[31], +H[2]+H[4]+H[5]+H[7]+H[8]+
 J[16]+J[17]+J[19]+J[20]+J[26]+J[28]+J[29]+J[31]+J[32]+J[35], +H[2]+H[4]+H[5]+H[6]+J[16]+J[17]+J[18]+
 J[26]+J[27]+J[30], +H[2]+H[4]+H[5]+H[6]+H[8]+J[16]+J[17]+J[18]+J[20]+J[26]+J[27]+J[29]+J[30]+J[32]+J[34], +
 H[2]+H[4]+H[5]+H[6]+H[7]+J[16]+J[17]+J[18]+J[19]+J[26]+J[27]+J[28]+J[30]+J[31]+J[33], +H[2]+H[4]+H[5]+
 H[6]+H[7]+H[8]+J[16]+J[17]+J[18]+J[19]+J[20]+J[26]+J[27]+J[28]+J[29]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +
 H[2]+H[3]+J[15], +H[2]+H[3]+H[8]+J[15]+J[20]+J[25], +H[2]+H[3]+H[7]+J[15]+J[19]+J[24], +H[2]+H[3]+H[7]+
 H[8]+J[15]+J[19]+J[20]+J[24]+J[25]+J[35], +H[2]+H[3]+H[6]+J[15]+J[18]+J[23], +H[2]+H[3]+H[6]+H[8]+J[15]+
 J[18]+J[20]+J[23]+J[25]+J[34], +H[2]+H[3]+H[6]+H[7]+J[15]+J[18]+J[19]+J[23]+J[24]+J[33], +H[2]+H[3]+
 H[6]+H[7]+H[8]+J[15]+J[18]+J[19]+J[20]+J[23]+J[24]+J[25]+J[33]+J[34]+J[35], +H[2]+H[3]+H[5]+J[15]+J[17]+
 J[22], +H[2]+H[3]+H[5]+H[8]+J[15]+J[17]+J[20]+J[22]+J[25]+J[32], +H[2]+H[3]+H[5]+H[7]+J[15]+J[17]+J[19]+
 J[22]+J[24]+J[31], +H[2]+H[3]+H[5]+H[7]+H[8]+J[15]+J[17]+J[19]+J[20]+J[22]+J[24]+J[25]+J[31]+J[32]+J[35], +
 H[2]+H[3]+H[5]+H[6]+J[15]+J[17]+J[18]+J[22]+J[23]+J[30], +H[2]+H[3]+H[5]+H[6]+H[8]+J[15]+J[17]+J[18]+
 J[20]+J[22]+J[23]+J[25]+J[30]+J[32]+J[34], +H[2]+H[3]+H[5]+H[6]+H[7]+J[15]+J[17]+J[18]+J[19]+J[22]+J[23]+
 J[24]+J[30]+J[31]+J[33], +H[2]+H[3]+H[5]+H[6]+H[7]+H[8]+J[15]+J[17]+J[18]+J[19]+J[20]+J[22]+J[23]+J[24]+
 J[25]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[2]+H[3]+H[4]+J[15]+J[16]+J[21], +H[2]+H[3]+H[4]+H[8]+J[15]+
 J[16]+J[20]+J[21]+J[25]+J[29], +H[2]+H[3]+H[4]+H[7]+J[15]+J[16]+J[19]+J[21]+J[24]+J[28], +H[2]+H[3]+
 H[4]+H[7]+H[8]+J[15]+J[16]+J[19]+J[20]+J[21]+J[24]+J[25]+J[28]+J[29]+J[35], +H[2]+H[3]+H[4]+H[6]+J[15]+
 J[16]+J[18]+J[21]+J[23]+J[27], +H[2]+H[3]+H[4]+H[6]+H[8]+J[15]+J[16]+J[18]+J[20]+J[21]+J[23]+J[25]+J[27]+
 J[29]+J[34], +H[2]+H[3]+H[4]+H[6]+H[7]+J[15]+J[16]+J[18]+J[19]+J[21]+J[23]+J[24]+J[27]+J[28]+J[33], +
 H[2]+H[3]+H[4]+H[6]+H[7]+H[8]+J[15]+J[16]+J[18]+J[19]+J[20]+J[21]+J[23]+J[24]+J[25]+J[27]+J[28]+J[29]+
 J[33]+J[34]+J[35], +H[2]+H[3]+H[4]+H[5]+J[15]+J[16]+J[17]+J[21]+J[22]+J[26], +H[2]+H[3]+H[4]+H[5]+H[8]+
 J[15]+J[16]+J[17]+J[20]+J[21]+J[22]+J[25]+J[26]+J[29]+J[32], +H[2]+H[3]+H[4]+H[5]+H[7]+J[15]+J[16]+J[17]+
 J[19]+J[21]+J[22]+J[24]+J[26]+J[28]+J[31], +H[2]+H[3]+H[4]+H[5]+H[7]+H[8]+J[15]+J[16]+J[17]+J[19]+J[20]+
 J[21]+J[22]+J[24]+J[25]+J[26]+J[28]+J[29]+J[31]+J[32]+J[35], +H[2]+H[3]+H[4]+H[5]+H[6]+J[15]+J[16]+J[17]+
 J[18]+J[21]+J[22]+J[23]+J[26]+J[27]+J[30], +H[2]+H[3]+H[4]+H[5]+H[6]+H[8]+J[15]+J[16]+J[17]+J[18]+J[20]+
 J[21]+J[22]+J[23]+J[25]+J[26]+J[27]+J[29]+J[30]+J[32]+J[34], +H[2]+H[3]+H[4]+H[5]+H[6]+H[7]+J[15]+J[16]+
 J[17]+J[18]+J[19]+J[21]+J[22]+J[23]+J[24]+J[26]+J[27]+J[28]+J[30]+J[31]+J[33], +H[2]+H[3]+H[4]+H[5]+
 H[6]+H[7]+H[8]+J[15]+J[16]+J[17]+J[18]+J[19]+J[20]+J[21]+J[22]+J[23]+J[24]+J[25]+J[26]+J[27]+J[28]+J[29]+
 J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[1]+0, +H[1]+H[8]+J[14], +H[1]+H[7]+J[13], +H[1]+H[7]+H[8]+J[13]+
 J[14]+J[35], +H[1]+H[6]+J[12], +H[1]+H[6]+H[8]+J[12]+J[14]+J[34], +H[1]+H[6]+H[7]+J[12]+J[13]+J[33], +
 H[1]+H[6]+H[7]+H[8]+J[12]+J[13]+J[14]+J[33]+J[34]+J[35], +H[1]+H[5]+J[11], +H[1]+H[5]+H[8]+J[11]+J[14]+
 J[32], +H[1]+H[5]+H[7]+J[11]+J[13]+J[31], +H[1]+H[5]+H[7]+H[8]+J[11]+J[13]+J[14]+J[31]+J[32]+J[35], +
 H[1]+H[5]+H[6]+J[11]+J[12]+J[30], +H[1]+H[5]+H[6]+H[8]+J[11]+J[12]+J[14]+J[30]+J[32]+J[34], +H[1]+H[5]+
 H[6]+H[7]+J[11]+J[12]+J[13]+J[30]+J[31]+J[33], +H[1]+H[5]+H[6]+H[7]+H[8]+J[11]+J[12]+J[13]+J[14]+J[30]+
 J[31]+J[32]+J[33]+J[34]+J[35], +H[1]+H[4]+J[10], +H[1]+H[4]+H[8]+J[10]+J[14]+J[29], +H[1]+H[4]+H[7]+
 J[10]+J[13]+J[28], +H[1]+H[4]+H[7]+H[8]+J[10]+J[13]+J[14]+J[28]+J[29]+J[35], +H[1]+H[4]+H[6]+J[10]+J[12]+
 J[27], +H[1]+H[4]+H[6]+H[8]+J[10]+J[12]+J[14]+J[27]+J[29]+J[34], +H[1]+H[4]+H[6]+H[7]+J[10]+J[12]+J[13]+
 J[27]+J[28]+J[33], +H[1]+H[4]+H[6]+H[7]+H[8]+J[10]+J[12]+J[13]+J[14]+J[27]+J[28]+J[29]+J[33]+J[34]+J[35], +
 H[1]+H[4]+H[5]+J[10]+J[11]+J[26], +H[1]+H[4]+H[5]+H[8]+J[10]+J[11]+J[14]+J[26]+J[29]+J[32], +H[1]+H[4]+
 H[5]+H[7]+J[10]+J[11]+J[13]+J[26]+J[28]+J[31], +H[1]+H[4]+H[5]+H[7]+H[8]+J[10]+J[11]+J[13]+J[14]+J[26]+
 J[28]+J[29]+J[31]+J[32]+J[35], +H[1]+H[4]+H[5]+H[6]+J[10]+J[11]+J[12]+J[26]+J[27]+J[30], +H[1]+H[4]+
 H[5]+H[6]+H[8]+J[10]+J[11]+J[12]+J[14]+J[26]+J[27]+J[29]+J[30]+J[32]+J[34], +H[1]+H[4]+H[5]+H[6]+H[7]+
 J[10]+J[11]+J[12]+J[13]+J[26]+J[27]+J[28]+J[30]+J[31]+J[33], +H[1]+H[4]+H[5]+H[6]+H[7]+H[8]+J[10]+J[11]+
 J[12]+J[13]+J[14]+J[26]+J[27]+J[28]+J[29]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[1]+H[3]+J[9], +H[1]+
 H[3]+H[8]+J[9]+J[14]+J[25], +H[1]+H[3]+H[7]+J[9]+J[13]+J[24], +H[1]+H[3]+H[7]+H[8]+J[9]+J[13]+J[14]+
 J[24]+J[25]+J[35], +H[1]+H[3]+H[6]+J[9]+J[12]+J[23], +H[1]+H[3]+H[6]+H[8]+J[9]+J[12]+J[14]+J[23]+J[25]+
 J[34], +H[1]+H[3]+H[6]+H[7]+J[9]+J[12]+J[13]+J[23]+J[24]+J[33], +H[1]+H[3]+H[6]+H[7]+H[8]+J[9]+J[12]+
 J[13]+J[14]+J[23]+J[24]+J[25]+J[33]+J[34]+J[35], +H[1]+H[3]+H[5]+J[9]+J[11]+J[22], +H[1]+H[3]+H[5]+H[8]+
 J[9]+J[11]+J[14]+J[22]+J[25]+J[32], +H[1]+H[3]+H[5]+H[7]+J[9]+J[11]+J[13]+J[22]+J[24]+J[31], +H[1]+H[3]+
 H[5]+H[7]+H[8]+J[9]+J[11]+J[13]+J[14]+J[22]+J[24]+J[25]+J[31]+J[32]+J[35], +H[1]+H[3]+H[5]+H[6]+J[9]+
 J[11]+J[12]+J[22]+J[23]+J[30], +H[1]+H[3]+H[5]+H[6]+H[8]+J[9]+J[11]+J[12]+J[14]+J[22]+J[23]+J[25]+J[30]+
 J[32]+J[34], +H[1]+H[3]+H[5]+H[6]+H[7]+J[9]+J[11]+J[12]+J[13]+J[22]+J[23]+J[24]+J[30]+J[31]+J[33], +
 H[1]+H[3]+H[5]+H[6]+H[7]+H[8]+J[9]+J[11]+J[12]+J[13]+J[14]+J[22]+J[23]+J[24]+J[25]+J[30]+J[31]+J[32]+
 J[33]+J[34]+J[35], +H[1]+H[3]+H[4]+J[9]+J[10]+J[21], +H[1]+H[3]+H[4]+H[8]+J[9]+J[10]+J[14]+J[21]+J[25]+
 J[29], +H[1]+H[3]+H[4]+H[7]+J[9]+J[10]+J[13]+J[21]+J[24]+J[28], +H[1]+H[3]+H[4]+H[7]+H[8]+J[9]+J[10]+
 J[13]+J[14]+J[21]+J[24]+J[25]+J[28]+J[29]+J[35], +H[1]+H[3]+H[4]+H[6]+J[9]+J[10]+J[12]+J[21]+J[23]+J[27], +
 H[1]+H[3]+H[4]+H[6]+H[8]+J[9]+J[10]+J[12]+J[14]+J[21]+J[23]+J[25]+J[27]+J[29]+J[34], +H[1]+H[3]+H[4]+
 H[6]+H[7]+J[9]+J[10]+J[12]+J[13]+J[21]+J[23]+J[24]+J[27]+J[28]+J[33], +H[1]+H[3]+H[4]+H[6]+H[7]+H[8]+
 J[9]+J[10]+J[12]+J[13]+J[14]+J[21]+J[23]+J[24]+J[25]+J[27]+J[28]+J[29]+J[33]+J[34]+J[35], +H[1]+H[3]+
 H[4]+H[5]+J[9]+J[10]+J[11]+J[21]+J[22]+J[26], +H[1]+H[3]+H[4]+H[5]+H[8]+J[9]+J[10]+J[11]+J[14]+J[21]+
 J[22]+J[25]+J[26]+J[29]+J[32], +H[1]+H[3]+H[4]+H[5]+H[7]+J[9]+J[10]+J[11]+J[13]+J[21]+J[22]+J[24]+J[26]+
 J[28]+J[31], +H[1]+H[3]+H[4]+H[5]+H[7]+H[8]+J[9]+J[10]+J[11]+J[13]+J[14]+J[21]+J[22]+J[24]+J[25]+J[26]+
 J[28]+J[29]+J[31]+J[32]+J[35], +H[1]+H[3]+H[4]+H[5]+H[6]+J[9]+J[10]+J[11]+J[12]+J[21]+J[22]+J[23]+J[26]+
 J[27]+J[30], +H[1]+H[3]+H[4]+H[5]+H[6]+H[8]+J[9]+J[10]+J[11]+J[12]+J[14]+J[21]+J[22]+J[23]+J[25]+J[26]+
 J[27]+J[29]+J[30]+J[32]+J[34], +H[1]+H[3]+H[4]+H[5]+H[6]+H[7]+J[9]+J[10]+J[11]+J[12]+J[13]+J[21]+J[22]+
 J[23]+J[24]+J[26]+J[27]+J[28]+J[30]+J[31]+J[33], +H[1]+H[3]+H[4]+H[5]+H[6]+H[7]+H[8]+J[9]+J[10]+J[11]+
 J[12]+J[13]+J[14]+J[21]+J[22]+J[23]+J[24]+J[25]+J[26]+J[27]+J[28]+J[29]+J[30]+J[31]+J[32]+J[33]+J[34]+
 J[35], +H[1]+H[2]+J[8], +H[1]+H[2]+H[8]+J[8]+J[14]+J[20], +H[1]+H[2]+H[7]+J[8]+J[13]+J[19], +H[1]+H[2]+
 H[7]+H[8]+J[8]+J[13]+J[14]+J[19]+J[20]+J[35], +H[1]+H[2]+H[6]+J[8]+J[12]+J[18], +H[1]+H[2]+H[6]+H[8]+
 J[8]+J[12]+J[14]+J[18]+J[20]+J[34], +H[1]+H[2]+H[6]+H[7]+J[8]+J[12]+J[13]+J[18]+J[19]+J[33], +H[1]+H[2]+
 H[6]+H[7]+H[8]+J[8]+J[12]+J[13]+J[14]+J[18]+J[19]+J[20]+J[33]+J[34]+J[35], +H[1]+H[2]+H[5]+J[8]+J[11]+
 J[17], +H[1]+H[2]+H[5]+H[8]+J[8]+J[11]+J[14]+J[17]+J[20]+J[32], +H[1]+H[2]+H[5]+H[7]+J[8]+J[11]+J[13]+
 J[17]+J[19]+J[31], +H[1]+H[2]+H[5]+H[7]+H[8]+J[8]+J[11]+J[13]+J[14]+J[17]+J[19]+J[20]+J[31]+J[32]+J[35], +
 H[1]+H[2]+H[5]+H[6]+J[8]+J[11]+J[12]+J[17]+J[18]+J[30], +H[1]+H[2]+H[5]+H[6]+H[8]+J[8]+J[11]+J[12]+J[14]+
 J[17]+J[18]+J[20]+J[30]+J[32]+J[34], +H[1]+H[2]+H[5]+H[6]+H[7]+J[8]+J[11]+J[12]+J[13]+J[17]+J[18]+J[19]+
 J[30]+J[31]+J[33], +H[1]+H[2]+H[5]+H[6]+H[7]+H[8]+J[8]+J[11]+J[12]+J[13]+J[14]+J[17]+J[18]+J[19]+J[20]+
 J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[1]+H[2]+H[4]+J[8]+J[10]+J[16], +H[1]+H[2]+H[4]+H[8]+J[8]+J[10]+
 J[14]+J[16]+J[20]+J[29], +H[1]+H[2]+H[4]+H[7]+J[8]+J[10]+J[13]+J[16]+J[19]+J[28], +H[1]+H[2]+H[4]+H[7]+
 H[8]+J[8]+J[10]+J[13]+J[14]+J[16]+J[19]+J[20]+J[28]+J[29]+J[35], +H[1]+H[2]+H[4]+H[6]+J[8]+J[10]+J[12]+
 J[16]+J[18]+J[27], +H[1]+H[2]+H[4]+H[6]+H[8]+J[8]+J[10]+J[12]+J[14]+J[16]+J[18]+J[20]+J[27]+J[29]+J[34], +
 H[1]+H[2]+H[4]+H[6]+H[7]+J[8]+J[10]+J[12]+J[13]+J[16]+J[18]+J[19]+J[27]+J[28]+J[33], +H[1]+H[2]+H[4]+
 H[6]+H[7]+H[8]+J[8]+J[10]+J[12]+J[13]+J[14]+J[16]+J[18]+J[19]+J[20]+J[27]+J[28]+J[29]+J[33]+J[34]+J[35], +
 H[1]+H[2]+H[4]+H[5]+J[8]+J[10]+J[11]+J[16]+J[17]+J[26], +H[1]+H[2]+H[4]+H[5]+H[8]+J[8]+J[10]+J[11]+J[14]+
 J[16]+J[17]+J[20]+J[26]+J[29]+J[32], +H[1]+H[2]+H[4]+H[5]+H[7]+J[8]+J[10]+J[11]+J[13]+J[16]+J[17]+J[19]+
 J[26]+J[28]+J[31], +H[1]+H[2]+H[4]+H[5]+H[7]+H[8]+J[8]+J[10]+J[11]+J[13]+J[14]+J[16]+J[17]+J[19]+J[20]+
 J[26]+J[28]+J[29]+J[31]+J[32]+J[35], +H[1]+H[2]+H[4]+H[5]+H[6]+J[8]+J[10]+J[11]+J[12]+J[16]+J[17]+J[18]+
 J[26]+J[27]+J[30], +H[1]+H[2]+H[4]+H[5]+H[6]+H[8]+J[8]+J[10]+J[11]+J[12]+J[14]+J[16]+J[17]+J[18]+J[20]+
 J[26]+J[27]+J[29]+J[30]+J[32]+J[34], +H[1]+H[2]+H[4]+H[5]+H[6]+H[7]+J[8]+J[10]+J[11]+J[12]+J[13]+J[16]+
 J[17]+J[18]+J[19]+J[26]+J[27]+J[28]+J[30]+J[31]+J[33], +H[1]+H[2]+H[4]+H[5]+H[6]+H[7]+H[8]+J[8]+J[10]+
 J[11]+J[12]+J[13]+J[14]+J[16]+J[17]+J[18]+J[19]+J[20]+J[26]+J[27]+J[28]+J[29]+J[30]+J[31]+J[32]+J[33]+
 J[34]+J[35], +H[1]+H[2]+H[3]+J[8]+J[9]+J[15], +H[1]+H[2]+H[3]+H[8]+J[8]+J[9]+J[14]+J[15]+J[20]+J[25], +
 H[1]+H[2]+H[3]+H[7]+J[8]+J[9]+J[13]+J[15]+J[19]+J[24], +H[1]+H[2]+H[3]+H[7]+H[8]+J[8]+J[9]+J[13]+J[14]+
 J[15]+J[19]+J[20]+J[24]+J[25]+J[35], +H[1]+H[2]+H[3]+H[6]+J[8]+J[9]+J[12]+J[15]+J[18]+J[23], +H[1]+H[2]+
 H[3]+H[6]+H[8]+J[8]+J[9]+J[12]+J[14]+J[15]+J[18]+J[20]+J[23]+J[25]+J[34], +H[1]+H[2]+H[3]+H[6]+H[7]+
 J[8]+J[9]+J[12]+J[13]+J[15]+J[18]+J[19]+J[23]+J[24]+J[33], +H[1]+H[2]+H[3]+H[6]+H[7]+H[8]+J[8]+J[9]+
 J[12]+J[13]+J[14]+J[15]+J[18]+J[19]+J[20]+J[23]+J[24]+J[25]+J[33]+J[34]+J[35], +H[1]+H[2]+H[3]+H[5]+
 J[8]+J[9]+J[11]+J[15]+J[17]+J[22], +H[1]+H[2]+H[3]+H[5]+H[8]+J[8]+J[9]+J[11]+J[14]+J[15]+J[17]+J[20]+
 J[22]+J[25]+J[32], +H[1]+H[2]+H[3]+H[5]+H[7]+J[8]+J[9]+J[11]+J[13]+J[15]+J[17]+J[19]+J[22]+J[24]+J[31], +
 H[1]+H[2]+H[3]+H[5]+H[7]+H[8]+J[8]+J[9]+J[11]+J[13]+J[14]+J[15]+J[17]+J[19]+J[20]+J[22]+J[24]+J[25]+
 J[31]+J[32]+J[35], +H[1]+H[2]+H[3]+H[5]+H[6]+J[8]+J[9]+J[11]+J[12]+J[15]+J[17]+J[18]+J[22]+J[23]+J[30], +
 H[1]+H[2]+H[3]+H[5]+H[6]+H[8]+J[8]+J[9]+J[11]+J[12]+J[14]+J[15]+J[17]+J[18]+J[20]+J[22]+J[23]+J[25]+
 J[30]+J[32]+J[34], +H[1]+H[2]+H[3]+H[5]+H[6]+H[7]+J[8]+J[9]+J[11]+J[12]+J[13]+J[15]+J[17]+J[18]+J[19]+
 J[22]+J[23]+J[24]+J[30]+J[31]+J[33], +H[1]+H[2]+H[3]+H[5]+H[6]+H[7]+H[8]+J[8]+J[9]+J[11]+J[12]+J[13]+
 J[14]+J[15]+J[17]+J[18]+J[19]+J[20]+J[22]+J[23]+J[24]+J[25]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[1]+
 H[2]+H[3]+H[4]+J[8]+J[9]+J[10]+J[15]+J[16]+J[21], +H[1]+H[2]+H[3]+H[4]+H[8]+J[8]+J[9]+J[10]+J[14]+J[15]+
 J[16]+J[20]+J[21]+J[25]+J[29], +H[1]+H[2]+H[3]+H[4]+H[7]+J[8]+J[9]+J[10]+J[13]+J[15]+J[16]+J[19]+J[21]+
 J[24]+J[28], +H[1]+H[2]+H[3]+H[4]+H[7]+H[8]+J[8]+J[9]+J[10]+J[13]+J[14]+J[15]+J[16]+J[19]+J[20]+J[21]+
 J[24]+J[25]+J[28]+J[29]+J[35], +H[1]+H[2]+H[3]+H[4]+H[6]+J[8]+J[9]+J[10]+J[12]+J[15]+J[16]+J[18]+J[21]+
 J[23]+J[27], +H[1]+H[2]+H[3]+H[4]+H[6]+H[8]+J[8]+J[9]+J[10]+J[12]+J[14]+J[15]+J[16]+J[18]+J[20]+J[21]+
 J[23]+J[25]+J[27]+J[29]+J[34], +H[1]+H[2]+H[3]+H[4]+H[6]+H[7]+J[8]+J[9]+J[10]+J[12]+J[13]+J[15]+J[16]+
 J[18]+J[19]+J[21]+J[23]+J[24]+J[27]+J[28]+J[33], +H[1]+H[2]+H[3]+H[4]+H[6]+H[7]+H[8]+J[8]+J[9]+J[10]+
 J[12]+J[13]+J[14]+J[15]+J[16]+J[18]+J[19]+J[20]+J[21]+J[23]+J[24]+J[25]+J[27]+J[28]+J[29]+J[33]+J[34]+
 J[35], +H[1]+H[2]+H[3]+H[4]+H[5]+J[8]+J[9]+J[10]+J[11]+J[15]+J[16]+J[17]+J[21]+J[22]+J[26], +H[1]+H[2]+
 H[3]+H[4]+H[5]+H[8]+J[8]+J[9]+J[10]+J[11]+J[14]+J[15]+J[16]+J[17]+J[20]+J[21]+J[22]+J[25]+J[26]+J[29]+
 J[32], +H[1]+H[2]+H[3]+H[4]+H[5]+H[7]+J[8]+J[9]+J[10]+J[11]+J[13]+J[15]+J[16]+J[17]+J[19]+J[21]+J[22]+
 J[24]+J[26]+J[28]+J[31], +H[1]+H[2]+H[3]+H[4]+H[5]+H[7]+H[8]+J[8]+J[9]+J[10]+J[11]+J[13]+J[14]+J[15]+
 J[16]+J[17]+J[19]+J[20]+J[21]+J[22]+J[24]+J[25]+J[26]+J[28]+J[29]+J[31]+J[32]+J[35], +H[1]+H[2]+H[3]+
 H[4]+H[5]+H[6]+J[8]+J[9]+J[10]+J[11]+J[12]+J[15]+J[16]+J[17]+J[18]+J[21]+J[22]+J[23]+J[26]+J[27]+J[30], +
 H[1]+H[2]+H[3]+H[4]+H[5]+H[6]+H[8]+J[8]+J[9]+J[10]+J[11]+J[12]+J[14]+J[15]+J[16]+J[17]+J[18]+J[20]+J[21]+
 J[22]+J[23]+J[25]+J[26]+J[27]+J[29]+J[30]+J[32]+J[34], +H[1]+H[2]+H[3]+H[4]+H[5]+H[6]+H[7]+J[8]+J[9]+
 J[10]+J[11]+J[12]+J[13]+J[15]+J[16]+J[17]+J[18]+J[19]+J[21]+J[22]+J[23]+J[24]+J[26]+J[27]+J[28]+J[30]+
 J[31]+J[33], +H[1]+H[2]+H[3]+H[4]+H[5]+H[6]+H[7]+H[8]+J[8]+J[9]+J[10]+J[11]+J[12]+J[13]+J[14]+J[15]+
 J[16]+J[17]+J[18]+J[19]+J[20]+J[21]+J[22]+J[23]+J[24]+J[25]+J[26]+J[27]+J[28]+J[29]+J[30]+J[31]+J[32]+
 J[33]+J[34]+J[35], +H[0]+0, +H[0]+H[8]+J[7], +H[0]+H[7]+J[6], +H[0]+H[7]+H[8]+J[6]+J[7]+J[35], +H[0]+
 H[6]+J[5], +H[0]+H[6]+H[8]+J[5]+J[7]+J[34], +H[0]+H[6]+H[7]+J[5]+J[6]+J[33], +H[0]+H[6]+H[7]+H[8]+J[5]+
 J[6]+J[7]+J[33]+J[34]+J[35], +H[0]+H[5]+J[4], +H[0]+H[5]+H[8]+J[4]+J[7]+J[32], +H[0]+H[5]+H[7]+J[4]+
 J[6]+J[31], +H[0]+H[5]+H[7]+H[8]+J[4]+J[6]+J[7]+J[31]+J[32]+J[35], +H[0]+H[5]+H[6]+J[4]+J[5]+J[30], +
 H[0]+H[5]+H[6]+H[8]+J[4]+J[5]+J[7]+J[30]+J[32]+J[34], +H[0]+H[5]+H[6]+H[7]+J[4]+J[5]+J[6]+J[30]+J[31]+
 J[33], +H[0]+H[5]+H[6]+H[7]+H[8]+J[4]+J[5]+J[6]+J[7]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[0]+H[4]+
 J[3], +H[0]+H[4]+H[8]+J[3]+J[7]+J[29], +H[0]+H[4]+H[7]+J[3]+J[6]+J[28], +H[0]+H[4]+H[7]+H[8]+J[3]+J[6]+
 J[7]+J[28]+J[29]+J[35], +H[0]+H[4]+H[6]+J[3]+J[5]+J[27], +H[0]+H[4]+H[6]+H[8]+J[3]+J[5]+J[7]+J[27]+J[29]+
 J[34], +H[0]+H[4]+H[6]+H[7]+J[3]+J[5]+J[6]+J[27]+J[28]+J[33], +H[0]+H[4]+H[6]+H[7]+H[8]+J[3]+J[5]+J[6]+
 J[7]+J[27]+J[28]+J[29]+J[33]+J[34]+J[35], +H[0]+H[4]+H[5]+J[3]+J[4]+J[26], +H[0]+H[4]+H[5]+H[8]+J[3]+
 J[4]+J[7]+J[26]+J[29]+J[32], +H[0]+H[4]+H[5]+H[7]+J[3]+J[4]+J[6]+J[26]+J[28]+J[31], +H[0]+H[4]+H[5]+
 H[7]+H[8]+J[3]+J[4]+J[6]+J[7]+J[26]+J[28]+J[29]+J[31]+J[32]+J[35], +H[0]+H[4]+H[5]+H[6]+J[3]+J[4]+J[5]+
 J[26]+J[27]+J[30], +H[0]+H[4]+H[5]+H[6]+H[8]+J[3]+J[4]+J[5]+J[7]+J[26]+J[27]+J[29]+J[30]+J[32]+J[34], +
 H[0]+H[4]+H[5]+H[6]+H[7]+J[3]+J[4]+J[5]+J[6]+J[26]+J[27]+J[28]+J[30]+J[31]+J[33], +H[0]+H[4]+H[5]+H[6]+
 H[7]+H[8]+J[3]+J[4]+J[5]+J[6]+J[7]+J[26]+J[27]+J[28]+J[29]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[0]+
 H[3]+J[2], +H[0]+H[3]+H[8]+J[2]+J[7]+J[25], +H[0]+H[3]+H[7]+J[2]+J[6]+J[24], +H[0]+H[3]+H[7]+H[8]+J[2]+
 J[6]+J[7]+J[24]+J[25]+J[35], +H[0]+H[3]+H[6]+J[2]+J[5]+J[23], +H[0]+H[3]+H[6]+H[8]+J[2]+J[5]+J[7]+J[23]+
 J[25]+J[34], +H[0]+H[3]+H[6]+H[7]+J[2]+J[5]+J[6]+J[23]+J[24]+J[33], +H[0]+H[3]+H[6]+H[7]+H[8]+J[2]+J[5]+
 J[6]+J[7]+J[23]+J[24]+J[25]+J[33]+J[34]+J[35], +H[0]+H[3]+H[5]+J[2]+J[4]+J[22], +H[0]+H[3]+H[5]+H[8]+
 J[2]+J[4]+J[7]+J[22]+J[25]+J[32], +H[0]+H[3]+H[5]+H[7]+J[2]+J[4]+J[6]+J[22]+J[24]+J[31], +H[0]+H[3]+
 H[5]+H[7]+H[8]+J[2]+J[4]+J[6]+J[7]+J[22]+J[24]+J[25]+J[31]+J[32]+J[35], +H[0]+H[3]+H[5]+H[6]+J[2]+J[4]+
 J[5]+J[22]+J[23]+J[30], +H[0]+H[3]+H[5]+H[6]+H[8]+J[2]+J[4]+J[5]+J[7]+J[22]+J[23]+J[25]+J[30]+J[32]+
 J[34], +H[0]+H[3]+H[5]+H[6]+H[7]+J[2]+J[4]+J[5]+J[6]+J[22]+J[23]+J[24]+J[30]+J[31]+J[33], +H[0]+H[3]+
 H[5]+H[6]+H[7]+H[8]+J[2]+J[4]+J[5]+J[6]+J[7]+J[22]+J[23]+J[24]+J[25]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +
 H[0]+H[3]+H[4]+J[2]+J[3]+J[21], +H[0]+H[3]+H[4]+H[8]+J[2]+J[3]+J[7]+J[21]+J[25]+J[29], +H[0]+H[3]+H[4]+
 H[7]+J[2]+J[3]+J[6]+J[21]+J[24]+J[28], +H[0]+H[3]+H[4]+H[7]+H[8]+J[2]+J[3]+J[6]+J[7]+J[21]+J[24]+J[25]+
 J[28]+J[29]+J[35], +H[0]+H[3]+H[4]+H[6]+J[2]+J[3]+J[5]+J[21]+J[23]+J[27], +H[0]+H[3]+H[4]+H[6]+H[8]+
 J[2]+J[3]+J[5]+J[7]+J[21]+J[23]+J[25]+J[27]+J[29]+J[34], +H[0]+H[3]+H[4]+H[6]+H[7]+J[2]+J[3]+J[5]+J[6]+
 J[21]+J[23]+J[24]+J[27]+J[28]+J[33], +H[0]+H[3]+H[4]+H[6]+H[7]+H[8]+J[2]+J[3]+J[5]+J[6]+J[7]+J[21]+J[23]+
 J[24]+J[25]+J[27]+J[28]+J[29]+J[33]+J[34]+J[35], +H[0]+H[3]+H[4]+H[5]+J[2]+J[3]+J[4]+J[21]+J[22]+J[26], +
 H[0]+H[3]+H[4]+H[5]+H[8]+J[2]+J[3]+J[4]+J[7]+J[21]+J[22]+J[25]+J[26]+J[29]+J[32], +H[0]+H[3]+H[4]+H[5]+
 H[7]+J[2]+J[3]+J[4]+J[6]+J[21]+J[22]+J[24]+J[26]+J[28]+J[31], +H[0]+H[3]+H[4]+H[5]+H[7]+H[8]+J[2]+J[3]+
 J[4]+J[6]+J[7]+J[21]+J[22]+J[24]+J[25]+J[26]+J[28]+J[29]+J[31]+J[32]+J[35], +H[0]+H[3]+H[4]+H[5]+H[6]+
 J[2]+J[3]+J[4]+J[5]+J[21]+J[22]+J[23]+J[26]+J[27]+J[30], +H[0]+H[3]+H[4]+H[5]+H[6]+H[8]+J[2]+J[3]+J[4]+
 J[5]+J[7]+J[21]+J[22]+J[23]+J[25]+J[26]+J[27]+J[29]+J[30]+J[32]+J[34], +H[0]+H[3]+H[4]+H[5]+H[6]+H[7]+
 J[2]+J[3]+J[4]+J[5]+J[6]+J[21]+J[22]+J[23]+J[24]+J[26]+J[27]+J[28]+J[30]+J[31]+J[33], +H[0]+H[3]+H[4]+
 H[5]+H[6]+H[7]+H[8]+J[2]+J[3]+J[4]+J[5]+J[6]+J[7]+J[21]+J[22]+J[23]+J[24]+J[25]+J[26]+J[27]+J[28]+J[29]+
 J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[0]+H[2]+J[1], +H[0]+H[2]+H[8]+J[1]+J[7]+J[20], +H[0]+H[2]+H[7]+
 J[1]+J[6]+J[19], +H[0]+H[2]+H[7]+H[8]+J[1]+J[6]+J[7]+J[19]+J[20]+J[35], +H[0]+H[2]+H[6]+J[1]+J[5]+J[18], +
 H[0]+H[2]+H[6]+H[8]+J[1]+J[5]+J[7]+J[18]+J[20]+J[34], +H[0]+H[2]+H[6]+H[7]+J[1]+J[5]+J[6]+J[18]+J[19]+
 J[33], +H[0]+H[2]+H[6]+H[7]+H[8]+J[1]+J[5]+J[6]+J[7]+J[18]+J[19]+J[20]+J[33]+J[34]+J[35], +H[0]+H[2]+
 H[5]+J[1]+J[4]+J[17], +H[0]+H[2]+H[5]+H[8]+J[1]+J[4]+J[7]+J[17]+J[20]+J[32], +H[0]+H[2]+H[5]+H[7]+J[1]+
 J[4]+J[6]+J[17]+J[19]+J[31], +H[0]+H[2]+H[5]+H[7]+H[8]+J[1]+J[4]+J[6]+J[7]+J[17]+J[19]+J[20]+J[31]+J[32]+
 J[35], +H[0]+H[2]+H[5]+H[6]+J[1]+J[4]+J[5]+J[17]+J[18]+J[30], +H[0]+H[2]+H[5]+H[6]+H[8]+J[1]+J[4]+J[5]+
 J[7]+J[17]+J[18]+J[20]+J[30]+J[32]+J[34], +H[0]+H[2]+H[5]+H[6]+H[7]+J[1]+J[4]+J[5]+J[6]+J[17]+J[18]+
 J[19]+J[30]+J[31]+J[33], +H[0]+H[2]+H[5]+H[6]+H[7]+H[8]+J[1]+J[4]+J[5]+J[6]+J[7]+J[17]+J[18]+J[19]+J[20]+
 J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[0]+H[2]+H[4]+J[1]+J[3]+J[16], +H[0]+H[2]+H[4]+H[8]+J[1]+J[3]+
 J[7]+J[16]+J[20]+J[29], +H[0]+H[2]+H[4]+H[7]+J[1]+J[3]+J[6]+J[16]+J[19]+J[28], +H[0]+H[2]+H[4]+H[7]+
 H[8]+J[1]+J[3]+J[6]+J[7]+J[16]+J[19]+J[20]+J[28]+J[29]+J[35], +H[0]+H[2]+H[4]+H[6]+J[1]+J[3]+J[5]+J[16]+
 J[18]+J[27], +H[0]+H[2]+H[4]+H[6]+H[8]+J[1]+J[3]+J[5]+J[7]+J[16]+J[18]+J[20]+J[27]+J[29]+J[34], +H[0]+
 H[2]+H[4]+H[6]+H[7]+J[1]+J[3]+J[5]+J[6]+J[16]+J[18]+J[19]+J[27]+J[28]+J[33], +H[0]+H[2]+H[4]+H[6]+H[7]+
 H[8]+J[1]+J[3]+J[5]+J[6]+J[7]+J[16]+J[18]+J[19]+J[20]+J[27]+J[28]+J[29]+J[33]+J[34]+J[35], +H[0]+H[2]+
 H[4]+H[5]+J[1]+J[3]+J[4]+J[16]+J[17]+J[26], +H[0]+H[2]+H[4]+H[5]+H[8]+J[1]+J[3]+J[4]+J[7]+J[16]+J[17]+
 J[20]+J[26]+J[29]+J[32], +H[0]+H[2]+H[4]+H[5]+H[7]+J[1]+J[3]+J[4]+J[6]+J[16]+J[17]+J[19]+J[26]+J[28]+
 J[31], +H[0]+H[2]+H[4]+H[5]+H[7]+H[8]+J[1]+J[3]+J[4]+J[6]+J[7]+J[16]+J[17]+J[19]+J[20]+J[26]+J[28]+J[29]+
 J[31]+J[32]+J[35], +H[0]+H[2]+H[4]+H[5]+H[6]+J[1]+J[3]+J[4]+J[5]+J[16]+J[17]+J[18]+J[26]+J[27]+J[30], +
 H[0]+H[2]+H[4]+H[5]+H[6]+H[8]+J[1]+J[3]+J[4]+J[5]+J[7]+J[16]+J[17]+J[18]+J[20]+J[26]+J[27]+J[29]+J[30]+
 J[32]+J[34], +H[0]+H[2]+H[4]+H[5]+H[6]+H[7]+J[1]+J[3]+J[4]+J[5]+J[6]+J[16]+J[17]+J[18]+J[19]+J[26]+J[27]+
 J[28]+J[30]+J[31]+J[33], +H[0]+H[2]+H[4]+H[5]+H[6]+H[7]+H[8]+J[1]+J[3]+J[4]+J[5]+J[6]+J[7]+J[16]+J[17]+
 J[18]+J[19]+J[20]+J[26]+J[27]+J[28]+J[29]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[0]+H[2]+H[3]+J[1]+
 J[2]+J[15], +H[0]+H[2]+H[3]+H[8]+J[1]+J[2]+J[7]+J[15]+J[20]+J[25], +H[0]+H[2]+H[3]+H[7]+J[1]+J[2]+J[6]+
 J[15]+J[19]+J[24], +H[0]+H[2]+H[3]+H[7]+H[8]+J[1]+J[2]+J[6]+J[7]+J[15]+J[19]+J[20]+J[24]+J[25]+J[35], +
 H[0]+H[2]+H[3]+H[6]+J[1]+J[2]+J[5]+J[15]+J[18]+J[23], +H[0]+H[2]+H[3]+H[6]+H[8]+J[1]+J[2]+J[5]+J[7]+
 J[15]+J[18]+J[20]+J[23]+J[25]+J[34], +H[0]+H[2]+H[3]+H[6]+H[7]+J[1]+J[2]+J[5]+J[6]+J[15]+J[18]+J[19]+
 J[23]+J[24]+J[33], +H[0]+H[2]+H[3]+H[6]+H[7]+H[8]+J[1]+J[2]+J[5]+J[6]+J[7]+J[15]+J[18]+J[19]+J[20]+J[23]+
 J[24]+J[25]+J[33]+J[34]+J[35], +H[0]+H[2]+H[3]+H[5]+J[1]+J[2]+J[4]+J[15]+J[17]+J[22], +H[0]+H[2]+H[3]+
 H[5]+H[8]+J[1]+J[2]+J[4]+J[7]+J[15]+J[17]+J[20]+J[22]+J[25]+J[32], +H[0]+H[2]+H[3]+H[5]+H[7]+J[1]+J[2]+
 J[4]+J[6]+J[15]+J[17]+J[19]+J[22]+J[24]+J[31], +H[0]+H[2]+H[3]+H[5]+H[7]+H[8]+J[1]+J[2]+J[4]+J[6]+J[7]+
 J[15]+J[17]+J[19]+J[20]+J[22]+J[24]+J[25]+J[31]+J[32]+J[35], +H[0]+H[2]+H[3]+H[5]+H[6]+J[1]+J[2]+J[4]+
 J[5]+J[15]+J[17]+J[18]+J[22]+J[23]+J[30], +H[0]+H[2]+H[3]+H[5]+H[6]+H[8]+J[1]+J[2]+J[4]+J[5]+J[7]+J[15]+
 J[17]+J[18]+J[20]+J[22]+J[23]+J[25]+J[30]+J[32]+J[34], +H[0]+H[2]+H[3]+H[5]+H[6]+H[7]+J[1]+J[2]+J[4]+
 J[5]+J[6]+J[15]+J[17]+J[18]+J[19]+J[22]+J[23]+J[24]+J[30]+J[31]+J[33], +H[0]+H[2]+H[3]+H[5]+H[6]+H[7]+
 H[8]+J[1]+J[2]+J[4]+J[5]+J[6]+J[7]+J[15]+J[17]+J[18]+J[19]+J[20]+J[22]+J[23]+J[24]+J[25]+J[30]+J[31]+
 J[32]+J[33]+J[34]+J[35], +H[0]+H[2]+H[3]+H[4]+J[1]+J[2]+J[3]+J[15]+J[16]+J[21], +H[0]+H[2]+H[3]+H[4]+
 H[8]+J[1]+J[2]+J[3]+J[7]+J[15]+J[16]+J[20]+J[21]+J[25]+J[29], +H[0]+H[2]+H[3]+H[4]+H[7]+J[1]+J[2]+J[3]+
 J[6]+J[15]+J[16]+J[19]+J[21]+J[24]+J[28], +H[0]+H[2]+H[3]+H[4]+H[7]+H[8]+J[1]+J[2]+J[3]+J[6]+J[7]+J[15]+
 J[16]+J[19]+J[20]+J[21]+J[24]+J[25]+J[28]+J[29]+J[35], +H[0]+H[2]+H[3]+H[4]+H[6]+J[1]+J[2]+J[3]+J[5]+
 J[15]+J[16]+J[18]+J[21]+J[23]+J[27], +H[0]+H[2]+H[3]+H[4]+H[6]+H[8]+J[1]+J[2]+J[3]+J[5]+J[7]+J[15]+J[16]+
 J[18]+J[20]+J[21]+J[23]+J[25]+J[27]+J[29]+J[34], +H[0]+H[2]+H[3]+H[4]+H[6]+H[7]+J[1]+J[2]+J[3]+J[5]+
 J[6]+J[15]+J[16]+J[18]+J[19]+J[21]+J[23]+J[24]+J[27]+J[28]+J[33], +H[0]+H[2]+H[3]+H[4]+H[6]+H[7]+H[8]+
 J[1]+J[2]+J[3]+J[5]+J[6]+J[7]+J[15]+J[16]+J[18]+J[19]+J[20]+J[21]+J[23]+J[24]+J[25]+J[27]+J[28]+J[29]+
 J[33]+J[34]+J[35], +H[0]+H[2]+H[3]+H[4]+H[5]+J[1]+J[2]+J[3]+J[4]+J[15]+J[16]+J[17]+J[21]+J[22]+J[26], +
 H[0]+H[2]+H[3]+H[4]+H[5]+H[8]+J[1]+J[2]+J[3]+J[4]+J[7]+J[15]+J[16]+J[17]+J[20]+J[21]+J[22]+J[25]+J[26]+
 J[29]+J[32], +H[0]+H[2]+H[3]+H[4]+H[5]+H[7]+J[1]+J[2]+J[3]+J[4]+J[6]+J[15]+J[16]+J[17]+J[19]+J[21]+J[22]+
 J[24]+J[26]+J[28]+J[31], +H[0]+H[2]+H[3]+H[4]+H[5]+H[7]+H[8]+J[1]+J[2]+J[3]+J[4]+J[6]+J[7]+J[15]+J[16]+
 J[17]+J[19]+J[20]+J[21]+J[22]+J[24]+J[25]+J[26]+J[28]+J[29]+J[31]+J[32]+J[35], +H[0]+H[2]+H[3]+H[4]+
 H[5]+H[6]+J[1]+J[2]+J[3]+J[4]+J[5]+J[15]+J[16]+J[17]+J[18]+J[21]+J[22]+J[23]+J[26]+J[27]+J[30], +H[0]+
 H[2]+H[3]+H[4]+H[5]+H[6]+H[8]+J[1]+J[2]+J[3]+J[4]+J[5]+J[7]+J[15]+J[16]+J[17]+J[18]+J[20]+J[21]+J[22]+
 J[23]+J[25]+J[26]+J[27]+J[29]+J[30]+J[32]+J[34], +H[0]+H[2]+H[3]+H[4]+H[5]+H[6]+H[7]+J[1]+J[2]+J[3]+
 J[4]+J[5]+J[6]+J[15]+J[16]+J[17]+J[18]+J[19]+J[21]+J[22]+J[23]+J[24]+J[26]+J[27]+J[28]+J[30]+J[31]+J[33], +
 H[0]+H[2]+H[3]+H[4]+H[5]+H[6]+H[7]+H[8]+J[1]+J[2]+J[3]+J[4]+J[5]+J[6]+J[7]+J[15]+J[16]+J[17]+J[18]+J[19]+
 J[20]+J[21]+J[22]+J[23]+J[24]+J[25]+J[26]+J[27]+J[28]+J[29]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[0]+
 H[1]+J[0], +H[0]+H[1]+H[8]+J[0]+J[7]+J[14], +H[0]+H[1]+H[7]+J[0]+J[6]+J[13], +H[0]+H[1]+H[7]+H[8]+J[0]+
 J[6]+J[7]+J[13]+J[14]+J[35], +H[0]+H[1]+H[6]+J[0]+J[5]+J[12], +H[0]+H[1]+H[6]+H[8]+J[0]+J[5]+J[7]+J[12]+
 J[14]+J[34], +H[0]+H[1]+H[6]+H[7]+J[0]+J[5]+J[6]+J[12]+J[13]+J[33], +H[0]+H[1]+H[6]+H[7]+H[8]+J[0]+J[5]+
 J[6]+J[7]+J[12]+J[13]+J[14]+J[33]+J[34]+J[35], +H[0]+H[1]+H[5]+J[0]+J[4]+J[11], +H[0]+H[1]+H[5]+H[8]+
 J[0]+J[4]+J[7]+J[11]+J[14]+J[32], +H[0]+H[1]+H[5]+H[7]+J[0]+J[4]+J[6]+J[11]+J[13]+J[31], +H[0]+H[1]+
 H[5]+H[7]+H[8]+J[0]+J[4]+J[6]+J[7]+J[11]+J[13]+J[14]+J[31]+J[32]+J[35], +H[0]+H[1]+H[5]+H[6]+J[0]+J[4]+
 J[5]+J[11]+J[12]+J[30], +H[0]+H[1]+H[5]+H[6]+H[8]+J[0]+J[4]+J[5]+J[7]+J[11]+J[12]+J[14]+J[30]+J[32]+
 J[34], +H[0]+H[1]+H[5]+H[6]+H[7]+J[0]+J[4]+J[5]+J[6]+J[11]+J[12]+J[13]+J[30]+J[31]+J[33], +H[0]+H[1]+
 H[5]+H[6]+H[7]+H[8]+J[0]+J[4]+J[5]+J[6]+J[7]+J[11]+J[12]+J[13]+J[14]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +
 H[0]+H[1]+H[4]+J[0]+J[3]+J[10], +H[0]+H[1]+H[4]+H[8]+J[0]+J[3]+J[7]+J[10]+J[14]+J[29], +H[0]+H[1]+H[4]+
 H[7]+J[0]+J[3]+J[6]+J[10]+J[13]+J[28], +H[0]+H[1]+H[4]+H[7]+H[8]+J[0]+J[3]+J[6]+J[7]+J[10]+J[13]+J[14]+
 J[28]+J[29]+J[35], +H[0]+H[1]+H[4]+H[6]+J[0]+J[3]+J[5]+J[10]+J[12]+J[27], +H[0]+H[1]+H[4]+H[6]+H[8]+
 J[0]+J[3]+J[5]+J[7]+J[10]+J[12]+J[14]+J[27]+J[29]+J[34], +H[0]+H[1]+H[4]+H[6]+H[7]+J[0]+J[3]+J[5]+J[6]+
 J[10]+J[12]+J[13]+J[27]+J[28]+J[33], +H[0]+H[1]+H[4]+H[6]+H[7]+H[8]+J[0]+J[3]+J[5]+J[6]+J[7]+J[10]+J[12]+
 J[13]+J[14]+J[27]+J[28]+J[29]+J[33]+J[34]+J[35], +H[0]+H[1]+H[4]+H[5]+J[0]+J[3]+J[4]+J[10]+J[11]+J[26], +
 H[0]+H[1]+H[4]+H[5]+H[8]+J[0]+J[3]+J[4]+J[7]+J[10]+J[11]+J[14]+J[26]+J[29]+J[32], +H[0]+H[1]+H[4]+H[5]+
 H[7]+J[0]+J[3]+J[4]+J[6]+J[10]+J[11]+J[13]+J[26]+J[28]+J[31], +H[0]+H[1]+H[4]+H[5]+H[7]+H[8]+J[0]+J[3]+
 J[4]+J[6]+J[7]+J[10]+J[11]+J[13]+J[14]+J[26]+J[28]+J[29]+J[31]+J[32]+J[35], +H[0]+H[1]+H[4]+H[5]+H[6]+
 J[0]+J[3]+J[4]+J[5]+J[10]+J[11]+J[12]+J[26]+J[27]+J[30], +H[0]+H[1]+H[4]+H[5]+H[6]+H[8]+J[0]+J[3]+J[4]+
 J[5]+J[7]+J[10]+J[11]+J[12]+J[14]+J[26]+J[27]+J[29]+J[30]+J[32]+J[34], +H[0]+H[1]+H[4]+H[5]+H[6]+H[7]+
 J[0]+J[3]+J[4]+J[5]+J[6]+J[10]+J[11]+J[12]+J[13]+J[26]+J[27]+J[28]+J[30]+J[31]+J[33], +H[0]+H[1]+H[4]+
 H[5]+H[6]+H[7]+H[8]+J[0]+J[3]+J[4]+J[5]+J[6]+J[7]+J[10]+J[11]+J[12]+J[13]+J[14]+J[26]+J[27]+J[28]+J[29]+
 J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[0]+H[1]+H[3]+J[0]+J[2]+J[9], +H[0]+H[1]+H[3]+H[8]+J[0]+J[2]+
 J[7]+J[9]+J[14]+J[25], +H[0]+H[1]+H[3]+H[7]+J[0]+J[2]+J[6]+J[9]+J[13]+J[24], +H[0]+H[1]+H[3]+H[7]+H[8]+
 J[0]+J[2]+J[6]+J[7]+J[9]+J[13]+J[14]+J[24]+J[25]+J[35], +H[0]+H[1]+H[3]+H[6]+J[0]+J[2]+J[5]+J[9]+J[12]+
 J[23], +H[0]+H[1]+H[3]+H[6]+H[8]+J[0]+J[2]+J[5]+J[7]+J[9]+J[12]+J[14]+J[23]+J[25]+J[34], +H[0]+H[1]+
 H[3]+H[6]+H[7]+J[0]+J[2]+J[5]+J[6]+J[9]+J[12]+J[13]+J[23]+J[24]+J[33], +H[0]+H[1]+H[3]+H[6]+H[7]+H[8]+
 J[0]+J[2]+J[5]+J[6]+J[7]+J[9]+J[12]+J[13]+J[14]+J[23]+J[24]+J[25]+J[33]+J[34]+J[35], +H[0]+H[1]+H[3]+
 H[5]+J[0]+J[2]+J[4]+J[9]+J[11]+J[22], +H[0]+H[1]+H[3]+H[5]+H[8]+J[0]+J[2]+J[4]+J[7]+J[9]+J[11]+J[14]+
 J[22]+J[25]+J[32], +H[0]+H[1]+H[3]+H[5]+H[7]+J[0]+J[2]+J[4]+J[6]+J[9]+J[11]+J[13]+J[22]+J[24]+J[31], +
 H[0]+H[1]+H[3]+H[5]+H[7]+H[8]+J[0]+J[2]+J[4]+J[6]+J[7]+J[9]+J[11]+J[13]+J[14]+J[22]+J[24]+J[25]+J[31]+
 J[32]+J[35], +H[0]+H[1]+H[3]+H[5]+H[6]+J[0]+J[2]+J[4]+J[5]+J[9]+J[11]+J[12]+J[22]+J[23]+J[30], +H[0]+
 H[1]+H[3]+H[5]+H[6]+H[8]+J[0]+J[2]+J[4]+J[5]+J[7]+J[9]+J[11]+J[12]+J[14]+J[22]+J[23]+J[25]+J[30]+J[32]+
 J[34], +H[0]+H[1]+H[3]+H[5]+H[6]+H[7]+J[0]+J[2]+J[4]+J[5]+J[6]+J[9]+J[11]+J[12]+J[13]+J[22]+J[23]+J[24]+
 J[30]+J[31]+J[33], +H[0]+H[1]+H[3]+H[5]+H[6]+H[7]+H[8]+J[0]+J[2]+J[4]+J[5]+J[6]+J[7]+J[9]+J[11]+J[12]+
 J[13]+J[14]+J[22]+J[23]+J[24]+J[25]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[0]+H[1]+H[3]+H[4]+J[0]+J[2]+
 J[3]+J[9]+J[10]+J[21], +H[0]+H[1]+H[3]+H[4]+H[8]+J[0]+J[2]+J[3]+J[7]+J[9]+J[10]+J[14]+J[21]+J[25]+J[29], +
 H[0]+H[1]+H[3]+H[4]+H[7]+J[0]+J[2]+J[3]+J[6]+J[9]+J[10]+J[13]+J[21]+J[24]+J[28], +H[0]+H[1]+H[3]+H[4]+
 H[7]+H[8]+J[0]+J[2]+J[3]+J[6]+J[7]+J[9]+J[10]+J[13]+J[14]+J[21]+J[24]+J[25]+J[28]+J[29]+J[35], +H[0]+
 H[1]+H[3]+H[4]+H[6]+J[0]+J[2]+J[3]+J[5]+J[9]+J[10]+J[12]+J[21]+J[23]+J[27], +H[0]+H[1]+H[3]+H[4]+H[6]+
 H[8]+J[0]+J[2]+J[3]+J[5]+J[7]+J[9]+J[10]+J[12]+J[14]+J[21]+J[23]+J[25]+J[27]+J[29]+J[34], +H[0]+H[1]+
 H[3]+H[4]+H[6]+H[7]+J[0]+J[2]+J[3]+J[5]+J[6]+J[9]+J[10]+J[12]+J[13]+J[21]+J[23]+J[24]+J[27]+J[28]+J[33], +
 H[0]+H[1]+H[3]+H[4]+H[6]+H[7]+H[8]+J[0]+J[2]+J[3]+J[5]+J[6]+J[7]+J[9]+J[10]+J[12]+J[13]+J[14]+J[21]+
 J[23]+J[24]+J[25]+J[27]+J[28]+J[29]+J[33]+J[34]+J[35], +H[0]+H[1]+H[3]+H[4]+H[5]+J[0]+J[2]+J[3]+J[4]+
 J[9]+J[10]+J[11]+J[21]+J[22]+J[26], +H[0]+H[1]+H[3]+H[4]+H[5]+H[8]+J[0]+J[2]+J[3]+J[4]+J[7]+J[9]+J[10]+
 J[11]+J[14]+J[21]+J[22]+J[25]+J[26]+J[29]+J[32], +H[0]+H[1]+H[3]+H[4]+H[5]+H[7]+J[0]+J[2]+J[3]+J[4]+
 J[6]+J[9]+J[10]+J[11]+J[13]+J[21]+J[22]+J[24]+J[26]+J[28]+J[31], +H[0]+H[1]+H[3]+H[4]+H[5]+H[7]+H[8]+
 J[0]+J[2]+J[3]+J[4]+J[6]+J[7]+J[9]+J[10]+J[11]+J[13]+J[14]+J[21]+J[22]+J[24]+J[25]+J[26]+J[28]+J[29]+
 J[31]+J[32]+J[35], +H[0]+H[1]+H[3]+H[4]+H[5]+H[6]+J[0]+J[2]+J[3]+J[4]+J[5]+J[9]+J[10]+J[11]+J[12]+J[21]+
 J[22]+J[23]+J[26]+J[27]+J[30], +H[0]+H[1]+H[3]+H[4]+H[5]+H[6]+H[8]+J[0]+J[2]+J[3]+J[4]+J[5]+J[7]+J[9]+
 J[10]+J[11]+J[12]+J[14]+J[21]+J[22]+J[23]+J[25]+J[26]+J[27]+J[29]+J[30]+J[32]+J[34], +H[0]+H[1]+H[3]+
 H[4]+H[5]+H[6]+H[7]+J[0]+J[2]+J[3]+J[4]+J[5]+J[6]+J[9]+J[10]+J[11]+J[12]+J[13]+J[21]+J[22]+J[23]+J[24]+
 J[26]+J[27]+J[28]+J[30]+J[31]+J[33], +H[0]+H[1]+H[3]+H[4]+H[5]+H[6]+H[7]+H[8]+J[0]+J[2]+J[3]+J[4]+J[5]+
 J[6]+J[7]+J[9]+J[10]+J[11]+J[12]+J[13]+J[14]+J[21]+J[22]+J[23]+J[24]+J[25]+J[26]+J[27]+J[28]+J[29]+J[30]+
 J[31]+J[32]+J[33]+J[34]+J[35], +H[0]+H[1]+H[2]+J[0]+J[1]+J[8], +H[0]+H[1]+H[2]+H[8]+J[0]+J[1]+J[7]+J[8]+
 J[14]+J[20], +H[0]+H[1]+H[2]+H[7]+J[0]+J[1]+J[6]+J[8]+J[13]+J[19], +H[0]+H[1]+H[2]+H[7]+H[8]+J[0]+J[1]+
 J[6]+J[7]+J[8]+J[13]+J[14]+J[19]+J[20]+J[35], +H[0]+H[1]+H[2]+H[6]+J[0]+J[1]+J[5]+J[8]+J[12]+J[18], +
 H[0]+H[1]+H[2]+H[6]+H[8]+J[0]+J[1]+J[5]+J[7]+J[8]+J[12]+J[14]+J[18]+J[20]+J[34], +H[0]+H[1]+H[2]+H[6]+
 H[7]+J[0]+J[1]+J[5]+J[6]+J[8]+J[12]+J[13]+J[18]+J[19]+J[33], +H[0]+H[1]+H[2]+H[6]+H[7]+H[8]+J[0]+J[1]+
 J[5]+J[6]+J[7]+J[8]+J[12]+J[13]+J[14]+J[18]+J[19]+J[20]+J[33]+J[34]+J[35], +H[0]+H[1]+H[2]+H[5]+J[0]+
 J[1]+J[4]+J[8]+J[11]+J[17], +H[0]+H[1]+H[2]+H[5]+H[8]+J[0]+J[1]+J[4]+J[7]+J[8]+J[11]+J[14]+J[17]+J[20]+
 J[32], +H[0]+H[1]+H[2]+H[5]+H[7]+J[0]+J[1]+J[4]+J[6]+J[8]+J[11]+J[13]+J[17]+J[19]+J[31], +H[0]+H[1]+
 H[2]+H[5]+H[7]+H[8]+J[0]+J[1]+J[4]+J[6]+J[7]+J[8]+J[11]+J[13]+J[14]+J[17]+J[19]+J[20]+J[31]+J[32]+J[35], +
 H[0]+H[1]+H[2]+H[5]+H[6]+J[0]+J[1]+J[4]+J[5]+J[8]+J[11]+J[12]+J[17]+J[18]+J[30], +H[0]+H[1]+H[2]+H[5]+
 H[6]+H[8]+J[0]+J[1]+J[4]+J[5]+J[7]+J[8]+J[11]+J[12]+J[14]+J[17]+J[18]+J[20]+J[30]+J[32]+J[34], +H[0]+
 H[1]+H[2]+H[5]+H[6]+H[7]+J[0]+J[1]+J[4]+J[5]+J[6]+J[8]+J[11]+J[12]+J[13]+J[17]+J[18]+J[19]+J[30]+J[31]+
 J[33], +H[0]+H[1]+H[2]+H[5]+H[6]+H[7]+H[8]+J[0]+J[1]+J[4]+J[5]+J[6]+J[7]+J[8]+J[11]+J[12]+J[13]+J[14]+
 J[17]+J[18]+J[19]+J[20]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[0]+H[1]+H[2]+H[4]+J[0]+J[1]+J[3]+J[8]+
 J[10]+J[16], +H[0]+H[1]+H[2]+H[4]+H[8]+J[0]+J[1]+J[3]+J[7]+J[8]+J[10]+J[14]+J[16]+J[20]+J[29], +H[0]+
 H[1]+H[2]+H[4]+H[7]+J[0]+J[1]+J[3]+J[6]+J[8]+J[10]+J[13]+J[16]+J[19]+J[28], +H[0]+H[1]+H[2]+H[4]+H[7]+
 H[8]+J[0]+J[1]+J[3]+J[6]+J[7]+J[8]+J[10]+J[13]+J[14]+J[16]+J[19]+J[20]+J[28]+J[29]+J[35], +H[0]+H[1]+
 H[2]+H[4]+H[6]+J[0]+J[1]+J[3]+J[5]+J[8]+J[10]+J[12]+J[16]+J[18]+J[27], +H[0]+H[1]+H[2]+H[4]+H[6]+H[8]+
 J[0]+J[1]+J[3]+J[5]+J[7]+J[8]+J[10]+J[12]+J[14]+J[16]+J[18]+J[20]+J[27]+J[29]+J[34], +H[0]+H[1]+H[2]+
 H[4]+H[6]+H[7]+J[0]+J[1]+J[3]+J[5]+J[6]+J[8]+J[10]+J[12]+J[13]+J[16]+J[18]+J[19]+J[27]+J[28]+J[33], +
 H[0]+H[1]+H[2]+H[4]+H[6]+H[7]+H[8]+J[0]+J[1]+J[3]+J[5]+J[6]+J[7]+J[8]+J[10]+J[12]+J[13]+J[14]+J[16]+
 J[18]+J[19]+J[20]+J[27]+J[28]+J[29]+J[33]+J[34]+J[35], +H[0]+H[1]+H[2]+H[4]+H[5]+J[0]+J[1]+J[3]+J[4]+
 J[8]+J[10]+J[11]+J[16]+J[17]+J[26], +H[0]+H[1]+H[2]+H[4]+H[5]+H[8]+J[0]+J[1]+J[3]+J[4]+J[7]+J[8]+J[10]+
 J[11]+J[14]+J[16]+J[17]+J[20]+J[26]+J[29]+J[32], +H[0]+H[1]+H[2]+H[4]+H[5]+H[7]+J[0]+J[1]+J[3]+J[4]+
 J[6]+J[8]+J[10]+J[11]+J[13]+J[16]+J[17]+J[19]+J[26]+J[28]+J[31], +H[0]+H[1]+H[2]+H[4]+H[5]+H[7]+H[8]+
 J[0]+J[1]+J[3]+J[4]+J[6]+J[7]+J[8]+J[10]+J[11]+J[13]+J[14]+J[16]+J[17]+J[19]+J[20]+J[26]+J[28]+J[29]+
 J[31]+J[32]+J[35], +H[0]+H[1]+H[2]+H[4]+H[5]+H[6]+J[0]+J[1]+J[3]+J[4]+J[5]+J[8]+J[10]+J[11]+J[12]+J[16]+
 J[17]+J[18]+J[26]+J[27]+J[30], +H[0]+H[1]+H[2]+H[4]+H[5]+H[6]+H[8]+J[0]+J[1]+J[3]+J[4]+J[5]+J[7]+J[8]+
 J[10]+J[11]+J[12]+J[14]+J[16]+J[17]+J[18]+J[20]+J[26]+J[27]+J[29]+J[30]+J[32]+J[34], +H[0]+H[1]+H[2]+
 H[4]+H[5]+H[6]+H[7]+J[0]+J[1]+J[3]+J[4]+J[5]+J[6]+J[8]+J[10]+J[11]+J[12]+J[13]+J[16]+J[17]+J[18]+J[19]+
 J[26]+J[27]+J[28]+J[30]+J[31]+J[33], +H[0]+H[1]+H[2]+H[4]+H[5]+H[6]+H[7]+H[8]+J[0]+J[1]+J[3]+J[4]+J[5]+
 J[6]+J[7]+J[8]+J[10]+J[11]+J[12]+J[13]+J[14]+J[16]+J[17]+J[18]+J[19]+J[20]+J[26]+J[27]+J[28]+J[29]+J[30]+
 J[31]+J[32]+J[33]+J[34]+J[35], +H[0]+H[1]+H[2]+H[3]+J[0]+J[1]+J[2]+J[8]+J[9]+J[15], +H[0]+H[1]+H[2]+
 H[3]+H[8]+J[0]+J[1]+J[2]+J[7]+J[8]+J[9]+J[14]+J[15]+J[20]+J[25], +H[0]+H[1]+H[2]+H[3]+H[7]+J[0]+J[1]+
 J[2]+J[6]+J[8]+J[9]+J[13]+J[15]+J[19]+J[24], +H[0]+H[1]+H[2]+H[3]+H[7]+H[8]+J[0]+J[1]+J[2]+J[6]+J[7]+
 J[8]+J[9]+J[13]+J[14]+J[15]+J[19]+J[20]+J[24]+J[25]+J[35], +H[0]+H[1]+H[2]+H[3]+H[6]+J[0]+J[1]+J[2]+
 J[5]+J[8]+J[9]+J[12]+J[15]+J[18]+J[23], +H[0]+H[1]+H[2]+H[3]+H[6]+H[8]+J[0]+J[1]+J[2]+J[5]+J[7]+J[8]+
 J[9]+J[12]+J[14]+J[15]+J[18]+J[20]+J[23]+J[25]+J[34], +H[0]+H[1]+H[2]+H[3]+H[6]+H[7]+J[0]+J[1]+J[2]+
 J[5]+J[6]+J[8]+J[9]+J[12]+J[13]+J[15]+J[18]+J[19]+J[23]+J[24]+J[33], +H[0]+H[1]+H[2]+H[3]+H[6]+H[7]+
 H[8]+J[0]+J[1]+J[2]+J[5]+J[6]+J[7]+J[8]+J[9]+J[12]+J[13]+J[14]+J[15]+J[18]+J[19]+J[20]+J[23]+J[24]+J[25]+
 J[33]+J[34]+J[35], +H[0]+H[1]+H[2]+H[3]+H[5]+J[0]+J[1]+J[2]+J[4]+J[8]+J[9]+J[11]+J[15]+J[17]+J[22], +
 H[0]+H[1]+H[2]+H[3]+H[5]+H[8]+J[0]+J[1]+J[2]+J[4]+J[7]+J[8]+J[9]+J[11]+J[14]+J[15]+J[17]+J[20]+J[22]+
 J[25]+J[32], +H[0]+H[1]+H[2]+H[3]+H[5]+H[7]+J[0]+J[1]+J[2]+J[4]+J[6]+J[8]+J[9]+J[11]+J[13]+J[15]+J[17]+
 J[19]+J[22]+J[24]+J[31], +H[0]+H[1]+H[2]+H[3]+H[5]+H[7]+H[8]+J[0]+J[1]+J[2]+J[4]+J[6]+J[7]+J[8]+J[9]+
 J[11]+J[13]+J[14]+J[15]+J[17]+J[19]+J[20]+J[22]+J[24]+J[25]+J[31]+J[32]+J[35], +H[0]+H[1]+H[2]+H[3]+
 H[5]+H[6]+J[0]+J[1]+J[2]+J[4]+J[5]+J[8]+J[9]+J[11]+J[12]+J[15]+J[17]+J[18]+J[22]+J[23]+J[30], +H[0]+
 H[1]+H[2]+H[3]+H[5]+H[6]+H[8]+J[0]+J[1]+J[2]+J[4]+J[5]+J[7]+J[8]+J[9]+J[11]+J[12]+J[14]+J[15]+J[17]+
 J[18]+J[20]+J[22]+J[23]+J[25]+J[30]+J[32]+J[34], +H[0]+H[1]+H[2]+H[3]+H[5]+H[6]+H[7]+J[0]+J[1]+J[2]+
 J[4]+J[5]+J[6]+J[8]+J[9]+J[11]+J[12]+J[13]+J[15]+J[17]+J[18]+J[19]+J[22]+J[23]+J[24]+J[30]+J[31]+J[33], +
 H[0]+H[1]+H[2]+H[3]+H[5]+H[6]+H[7]+H[8]+J[0]+J[1]+J[2]+J[4]+J[5]+J[6]+J[7]+J[8]+J[9]+J[11]+J[12]+J[13]+
 J[14]+J[15]+J[17]+J[18]+J[19]+J[20]+J[22]+J[23]+J[24]+J[25]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35], +H[0]+
 H[1]+H[2]+H[3]+H[4]+J[0]+J[1]+J[2]+J[3]+J[8]+J[9]+J[10]+J[15]+J[16]+J[21], +H[0]+H[1]+H[2]+H[3]+H[4]+
 H[8]+J[0]+J[1]+J[2]+J[3]+J[7]+J[8]+J[9]+J[10]+J[14]+J[15]+J[16]+J[20]+J[21]+J[25]+J[29], +H[0]+H[1]+
 H[2]+H[3]+H[4]+H[7]+J[0]+J[1]+J[2]+J[3]+J[6]+J[8]+J[9]+J[10]+J[13]+J[15]+J[16]+J[19]+J[21]+J[24]+J[28], +
 H[0]+H[1]+H[2]+H[3]+H[4]+H[7]+H[8]+J[0]+J[1]+J[2]+J[3]+J[6]+J[7]+J[8]+J[9]+J[10]+J[13]+J[14]+J[15]+J[16]+
 J[19]+J[20]+J[21]+J[24]+J[25]+J[28]+J[29]+J[35], +H[0]+H[1]+H[2]+H[3]+H[4]+H[6]+J[0]+J[1]+J[2]+J[3]+
 J[5]+J[8]+J[9]+J[10]+J[12]+J[15]+J[16]+J[18]+J[21]+J[23]+J[27], +H[0]+H[1]+H[2]+H[3]+H[4]+H[6]+H[8]+
 J[0]+J[1]+J[2]+J[3]+J[5]+J[7]+J[8]+J[9]+J[10]+J[12]+J[14]+J[15]+J[16]+J[18]+J[20]+J[21]+J[23]+J[25]+
 J[27]+J[29]+J[34], +H[0]+H[1]+H[2]+H[3]+H[4]+H[6]+H[7]+J[0]+J[1]+J[2]+J[3]+J[5]+J[6]+J[8]+J[9]+J[10]+
 J[12]+J[13]+J[15]+J[16]+J[18]+J[19]+J[21]+J[23]+J[24]+J[27]+J[28]+J[33], +H[0]+H[1]+H[2]+H[3]+H[4]+H[6]+
 H[7]+H[8]+J[0]+J[1]+J[2]+J[3]+J[5]+J[6]+J[7]+J[8]+J[9]+J[10]+J[12]+J[13]+J[14]+J[15]+J[16]+J[18]+J[19]+
 J[20]+J[21]+J[23]+J[24]+J[25]+J[27]+J[28]+J[29]+J[33]+J[34]+J[35], +H[0]+H[1]+H[2]+H[3]+H[4]+H[5]+J[0]+
 J[1]+J[2]+J[3]+J[4]+J[8]+J[9]+J[10]+J[11]+J[15]+J[16]+J[17]+J[21]+J[22]+J[26], +H[0]+H[1]+H[2]+H[3]+
 H[4]+H[5]+H[8]+J[0]+J[1]+J[2]+J[3]+J[4]+J[7]+J[8]+J[9]+J[10]+J[11]+J[14]+J[15]+J[16]+J[17]+J[20]+J[21]+
 J[22]+J[25]+J[26]+J[29]+J[32], +H[0]+H[1]+H[2]+H[3]+H[4]+H[5]+H[7]+J[0]+J[1]+J[2]+J[3]+J[4]+J[6]+J[8]+
 J[9]+J[10]+J[11]+J[13]+J[15]+J[16]+J[17]+J[19]+J[21]+J[22]+J[24]+J[26]+J[28]+J[31], +H[0]+H[1]+H[2]+
 H[3]+H[4]+H[5]+H[7]+H[8]+J[0]+J[1]+J[2]+J[3]+J[4]+J[6]+J[7]+J[8]+J[9]+J[10]+J[11]+J[13]+J[14]+J[15]+
 J[16]+J[17]+J[19]+J[20]+J[21]+J[22]+J[24]+J[25]+J[26]+J[28]+J[29]+J[31]+J[32]+J[35], +H[0]+H[1]+H[2]+
 H[3]+H[4]+H[5]+H[6]+J[0]+J[1]+J[2]+J[3]+J[4]+J[5]+J[8]+J[9]+J[10]+J[11]+J[12]+J[15]+J[16]+J[17]+J[18]+
 J[21]+J[22]+J[23]+J[26]+J[27]+J[30], +H[0]+H[1]+H[2]+H[3]+H[4]+H[5]+H[6]+H[8]+J[0]+J[1]+J[2]+J[3]+J[4]+
 J[5]+J[7]+J[8]+J[9]+J[10]+J[11]+J[12]+J[14]+J[15]+J[16]+J[17]+J[18]+J[20]+J[21]+J[22]+J[23]+J[25]+J[26]+
 J[27]+J[29]+J[30]+J[32]+J[34], +H[0]+H[1]+H[2]+H[3]+H[4]+H[5]+H[6]+H[7]+J[0]+J[1]+J[2]+J[3]+J[4]+J[5]+
 J[6]+J[8]+J[9]+J[10]+J[11]+J[12]+J[13]+J[15]+J[16]+J[17]+J[18]+J[19]+J[21]+J[22]+J[23]+J[24]+J[26]+J[27]+
 J[28]+J[30]+J[31]+J[33], +H[0]+H[1]+H[2]+H[3]+H[4]+H[5]+H[6]+H[7]+H[8]+J[0]+J[1]+J[2]+J[3]+J[4]+J[5]+
 J[6]+J[7]+J[8]+J[9]+J[10]+J[11]+J[12]+J[13]+J[14]+J[15]+J[16]+J[17]+J[18]+J[19]+J[20]+J[21]+J[22]+J[23]+
 J[24]+J[25]+J[26]+J[27]+J[28]+J[29]+J[30]+J[31]+J[32]+J[33]+J[34]+J[35],])
 logZ = fast_logsumexp(energyTerms)[0]
 num = fast_logsumexp(energyTerms, [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,
 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1])
 Cout[0] = exp( num[0] - logZ ) * num[1]
 num = fast_logsumexp(energyTerms, [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,
 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1])
 Cout[1] = exp( num[0] - logZ ) * num[1]
 num = fast_logsumexp(energyTerms, [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,
 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,
 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,
 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
 1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1])
 Cout[2] = exp( num[0] - logZ ) * num[1]
 num = fast_logsumexp(energyTerms, [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,
 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,
 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
 1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,
 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,
 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
 1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
 1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1])
 Cout[3] = exp( num[0] - logZ ) * num[1]
 num = fast_logsumexp(energyTerms, [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,
 0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,
 0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
 0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,
 1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,
 1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
 1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,
 0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,
 0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
 0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,
 1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,
 1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,
 1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,
 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1])
 Cout[4] = exp( num[0] - logZ ) * num[1]
 num = fast_logsumexp(energyTerms, [0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,
 0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,
 1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,
 1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,
 0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,
 1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,
 1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,
 0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,
 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,
 1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,
 0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,
 0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,
 1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,
 0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1])
 Cout[5] = exp( num[0] - logZ ) * num[1]
 num = fast_logsumexp(energyTerms, [0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,
 1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,
 0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,
 1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,
 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,0,
 | |
| 
	IncUnempRet : float
 Transitory income received while "unemployed" when retired.
 T_cycle : int
 Total number of non-terminal periods in the consumer's sequence of periods.
 Returns
 -------
 IncomeDstn : [[np.array]]
 A list with T_cycle elements, each of which is a list of three arrays
 representing a discrete approximation to the income process in a period.
 Order: probabilities, permanent shocks, transitory shocks.
 PermShkDstn : [[np.array]]
 A list with T_cycle elements, each of which is a list of two arrays
 representing a discrete approximation to the permanent income shocks.
 TranShkDstn : [[np.array]]
 A list with T_cycle elements, each of which is a list of two arrays
 representing a discrete approximation to the transitory income shocks.
 '''
 # Unpack the parameters from the input
 PermShkStd = parameters.PermShkStd
 PermShkCount = parameters.PermShkCount
 TranShkStd = parameters.TranShkStd
 TranShkCount = parameters.TranShkCount
 T_cycle = parameters.T_cycle
 T_retire = parameters.T_retire
 UnempPrb = parameters.UnempPrb
 IncUnemp = parameters.IncUnemp
 UnempPrbRet = parameters.UnempPrbRet
 IncUnempRet = parameters.IncUnempRet
 IncomeDstn = [] # Discrete approximations to income process in each period
 PermShkDstn = [] # Discrete approximations to permanent income shocks
 TranShkDstn = [] # Discrete approximations to transitory income shocks
 # Fill out a simple discrete RV for retirement, with value 1.0 (mean of shocks)
 # in normal times; value 0.0 in "unemployment" times with small prob.
 if T_retire > 0:
 if UnempPrbRet > 0:
 PermShkValsRet = np.array([1.0, 1.0]) # Permanent income is deterministic in retirement (2 states for temp income shocks)
 TranShkValsRet = np.array([IncUnempRet,
 (1.0-UnempPrbRet*IncUnempRet)/(1.0-UnempPrbRet)])
 ShkPrbsRet = np.array([UnempPrbRet, 1.0-UnempPrbRet])
 else:
 PermShkValsRet = np.array([1.0])
 TranShkValsRet = np.array([1.0])
 ShkPrbsRet = np.array([1.0])
 IncomeDstnRet = [ShkPrbsRet,PermShkValsRet,TranShkValsRet]
 # Loop to fill in the list of IncomeDstn random variables.
 for t in range(T_cycle): # Iterate over all periods, counting forward
 if T_retire > 0 and t >= T_retire:
 # Then we are in the "retirement period" and add a retirement income object.
 IncomeDstn.append(deepcopy(IncomeDstnRet))
 PermShkDstn.append([np.array([1.0]),np.array([1.0])])
 TranShkDstn.append([ShkPrbsRet,TranShkValsRet])
 else:
 # We are in the "working life" periods.
 TranShkDstn_t = approxMeanOneLognormal(N=TranShkCount, sigma=TranShkStd[t], tail_N=0)
 if UnempPrb > 0:
 TranShkDstn_t = addDiscreteOutcomeConstantMean(TranShkDstn_t, p=UnempPrb, x=IncUnemp)
 PermShkDstn_t = approxMeanOneLognormal(N=PermShkCount, sigma=PermShkStd[t], tail_N=0)
 IncomeDstn.append(combineIndepDstns(PermShkDstn_t,TranShkDstn_t)) # mix the independent distributions
 PermShkDstn.append(PermShkDstn_t)
 TranShkDstn.append(TranShkDstn_t)
 return IncomeDstn, PermShkDstn, TranShkDstn
def applyFlatIncomeTax(IncomeDstn,tax_rate,T_retire,unemployed_indices=[],transitory_index=2):
 '''
 Applies a flat income tax rate to all employed income states during the working
 period of life (those before T_retire). Time runs forward in this function.
 Parameters
 ----------
 IncomeDstn : [income distributions]
 The discrete approximation to the income distribution in each time period.
 tax_rate : float
 A flat income tax rate to be applied to all employed income.
 T_retire : int
 The time index after which the agent retires.
 unemployed_indices : [int]
 Indices of transitory shocks that represent unemployment states (no tax).
 transitory_index : int
 The index of each element of IncomeDstn representing transitory shocks.
 Returns
 -------
 IncomeDstn_new : [income distributions]
 The updated income distributions, after applying the tax.
 '''
 IncomeDstn_new = deepcopy(IncomeDstn)
 i = transitory_index
 for t in range(len(IncomeDstn)):
 if t < T_retire:
 for j in range((IncomeDstn[t][i]).size):
 if j not in unemployed_indices:
 IncomeDstn_new[t][i][j] = IncomeDstn[t][i][j]*(1-tax_rate)
 return IncomeDstn_new
# =======================================================
# ================ Other useful functions ===============
# =======================================================
def constructAssetsGrid(parameters):
 '''
 Constructs the base grid of post-decision states, representing end-of-period
 assets above the absolute minimum.
 All parameters are passed as attributes of the single input parameters. The
 input can be an instance of a ConsumerType, or a custom Parameters class.
 Parameters
 ----------
 aXtraMin: float
 Minimum value for the a-grid
 aXtraMax: float
 Maximum value for the a-grid
 aXtraCount: int
 Size of the a-grid
 aXtraExtra: [float]
 Extra values for the a-grid.
 exp_nest: int
 Level of nesting for the exponentially spaced grid
 Returns
 -------
 aXtraGrid: np.ndarray
 Base array of values for the post-decision-state grid.
 '''
 # Unpack the parameters
 aXtraMin = parameters.aXtraMin
 aXtraMax = parameters.aXtraMax
 aXtraCount = parameters.aXtraCount
 aXtraExtra = parameters.aXtraExtra
 grid_type = 'exp_mult'
 exp_nest = parameters.aXtraNestFac
 # Set up post decision state grid:
 aXtraGrid = None
 if grid_type == "linear":
 aXtraGrid = np.linspace(aXtraMin, aXtraMax, aXtraCount)
 elif grid_type == "exp_mult":
 aXtraGrid = makeGridExpMult(ming=aXtraMin, maxg=aXtraMax, ng=aXtraCount, timestonest=exp_nest)
 else:
 raise Exception("grid_type not recognized in __init__." + \
 "Please ensure grid_type is 'linear' or 'exp_mult'")
 # Add in additional points for the grid:
 for a in aXtraExtra:
 if (a is not None):
 if a not in aXtraGrid:
 j = aXtraGrid.searchsorted(a)
 aXtraGrid = np.insert(aXtraGrid, j, a)
 return aXtraGrid
####################################################################################################
# %% [markdown]
# ## Convergence of the Consumption Rules
#
# [The paper's first figure](http://econ.jhu.edu/people/ccarroll/papers/BufferStockTheory/#Convergence-of-the-Consumption-Rules) depicts the successive consumption rules that apply in the last period of life $(c_{T}(m))$, the second-to-last period, and earlier periods under the baseline parameter values given above.
# %% {"code_folding": [0]}
# Create a buffer stock consumer instance by passing the dictionary to the class.
baseEx = IndShockConsumerType(**base_params)
baseEx.cycles = 100 # Make this type have a finite horizon (Set T = 100)
baseEx.solve() # Solve the model
baseEx.unpackcFunc() # Make the consumption function easily accessible
# %% {"code_folding": [0]}
# Plot the different periods' consumption rules.
m1 = np.linspace(0,9.5,1000) # Set the plot range of m
m2 = np.linspace(0,6.5,500)
c_m = baseEx.cFunc[0](m1) # c_m can be used to define the limiting infinite-horizon consumption rule here
c_t1 = baseEx.cFunc[-2](m1) # c_t1 defines the second-to-last period consumption rule
c_t5 = baseEx.cFunc[-6](m1) # c_t5 defines the T-5 period consumption rule
c_t10 = baseEx.cFunc[-11](m1) # c_t10 defines the T-10 period consumption rule
c_t0 = m2 # c_t0 defines the last period consumption rule
plt.figure(figsize = (12,9))
plt.plot(m1,c_m,color="black")
plt.plot(m1,c_t1,color="black")
plt.plot(m1,c_t5,color="black")
plt.plot(m1,c_t10,color="black")
plt.plot(m2,c_t0,color="black")
plt.xlim(0,11)
plt.ylim(0,7)
plt.text(7,6,r'$c_{T}(m) = 45$ degree line',fontsize = 22,fontweight='bold')
plt.text(9.6,5.3,r'$c_{T-1}(m)$',fontsize = 22,fontweight='bold')
plt.text(9.6,2.6,r'$c_{T-5}(m)$',fontsize = 22,fontweight='bold')
plt.text(9.6,2.1,r'$c_{T-10}(m)$',fontsize = 22,fontweight='bold')
plt.text(9.6,1.7,r'$c(m)$',fontsize = 22,fontweight='bold')
plt.arrow(6.9,6.05,-0.6,0,head_width= 0.1,width=0.001,facecolor='black',length_includes_head='True')
plt.tick_params(labelbottom=False, labelleft=False,left='off',right='off',bottom='off',top='off')
plt.text(0,7.05,"$c$",fontsize = 26)
plt.text(11.1,0,"$m$",fontsize = 26)
# Save the figures in several formats
if Generator:
 plt.savefig(os.path.join(Figures_HARK_dir, 'cFuncsConverge.png'))
 plt.savefig(os.path.join(Figures_HARK_dir, 'cFuncsConverge.jpg'))
 plt.savefig(os.path.join(Figures_HARK_dir, 'cFuncsConverge.pdf'))
 plt.savefig(os.path.join(Figures_HARK_dir, 'cFuncsConverge.svg'))
if not in_ipynb:
 plt.show(block=False) 
else:
 plt.show(block=True) # Change to False if you want to run uninterrupted
# %% [markdown]
# ## Factors and Conditions
#
# ### [The Finite Human Wealth Condition](http://econ.jhu.edu/people/ccarroll/papers/BufferStockTheory/#Human-Wealth)
#
# Human wealth for a perfect foresight consumer is defined as the present discounted value of future income:
#
# \begin{eqnarray}
# H_{t} & = & \mathbb{E}[P_{t} + R^{-1} P_{t+1} + R^{2} P_{t+2} ... ] \\ 
# & = & P_{t}\mathbb{E}[P_{t} + (\Gamma/R) + (\Gamma/R)^{2} ... ]
# \end{eqnarray}
# which is an infinite number if $\Gamma/R \geq 1$. We say that the 'Finite Human Wealth Condition' (FHWC) holds if 
# $0 \leq (\Gamma/R) \leq 1$.
# %% [markdown]
# ### [Absolute Patience and the AIC](http://econ.jhu.edu/people/ccarroll/papers/BufferStockTheory/#AIC)
#
# The paper defines an object which it calls the Absolute Patience Factor, equal to the ratio of $C_{t+1}/C_{t}$ for a perfect foresight consumer. The Old English character <span style="font-size:larger;">"Þ"</span> is used for this object in the paper, but <span style="font-size:larger;">"Þ"</span> cannot currently be rendered conveniently in Jupyter notebooks, so we will substitute $\Phi$ here:
#
# \begin{equation}
# \Phi = (R \beta)^{1/\rho} 
# \end{equation}
#
# If $\Phi = 1$, a perfect foresight consumer will spend exactly the amount that can be sustained perpetually (given their current and future resources). If $\Phi < 1$ (the consumer is 'absolutely impatient'; or, 'the absolute impatience condition holds'), the consumer is consuming more than the sustainable amount, so consumption will fall, and if the consumer is 'absolutely patient' with $\Phi > 1$ consumption will grow over time.
#
#
# %% [markdown]
# ### [Growth Patience and the GIC](http://econ.jhu.edu/people/ccarroll/papers/BufferStockTheory/#GIC)
#
# For a [perfect foresight consumer](http://econ.jhu.edu/people/ccarroll/public/lecturenotes/consumption/PerfForesightCRRA), whether the ratio of consumption to the permanent component of income $P$ is rising, constant, or falling depends on the relative growth rates of consumption and permanent income, which is measured by the "Perfect Foresight Growth Patience Factor":
#
# \begin{eqnarray}
# \Phi_{\Gamma} & = & \Phi/\Gamma
# \end{eqnarray}
# and whether the ratio is falling or rising over time depends on whether $\Phi_{\Gamma}$ is below or above 1.
#
# An analogous condition can be defined when there is uncertainty about permanent income. Defining $\tilde{\Gamma} = (\mathbb{E}[\psi^{-1}])^{-1}\Gamma$, the 'Growth Impatience Condition' (GIC) is that 
# \begin{eqnarray}
# \Phi/\tilde{\Gamma} & < & 1
# \end{eqnarray}
# %% [markdown]
# ### [The Finite Value of Autarky Condition (FVAC)](http://econ.jhu.edu/people/ccarroll/papers/BufferStockTheory/#Autarky-Value)
# %% [markdown]
# The paper [shows](http://econ.jhu.edu/people/ccarroll/papers/BufferStockTheory/#Autarky-Value) that a consumer who planned to spend his permanent income $\{ p_{t}, p_{t+1}, ...\} $ in every period would have value defined by
#
# \begin{equation}
# v_{t}^{\text{autarky}} = u(p_{t})\left(\frac{1}{1-\beta \Gamma^{1-\rho} \mathbb{E}[\psi^{1-\rho}]}\right)
# \end{equation}
#
# and defines the 'Finite Value of Autarky Condition' as the requirement that the denominator of this expression | |
| 
	#!/usr/bin/env python
from glob import glob
from accessoryFunctions.accessoryFunctions import *
import shutil
__author__ = 'adamkoziol'
def relativesymlink(src_file, dest_file):
 """
 https://stackoverflow.com/questions/9793631/creating-a-relative-symlink-in-python-without-using-os-chdir
 :param src_file: the file to be linked
 :param dest_file: the path and filename to which the file is to be linked
 """
 # Perform relative symlinking
 try:
 os.symlink(
 # Find the relative path for the source file and the destination file
 os.path.relpath(src_file),
 os.path.relpath(dest_file)
 )
 # Except os errors
 except OSError as exception:
 # If the os error is anything but directory exists, then raise
 if exception.errno != errno.EEXIST:
 raise
class Merger(object):
 def idseek(self):
 import pandas
 nesteddictionary = dict()
 # Create a list of all the lines in the file: open(self.idfile).readlines()
 # Create a lambda function
 # Map the list to the lambda function and split the list based on the delimiter: x.split(self.delimiter)
 # List comprehension of individual seq IDs without whitespace: [x.rstrip() for x in ...]
 # self.seqids = map(lambda x: [x.rstrip() for x in x.split(self.delimiter)], open(self.idfile).readlines())
 dictionary = pandas.read_excel(self.idfile).to_dict()
 # Iterate through the dictionary - each header from the excel file
 for header in dictionary:
 # Sample is the primary key, and value is the value of the cell for that primary key + header combination
 for sample, value in dictionary[header].items():
 # Update the dictionary with the new data
 try:
 nesteddictionary[sample].update({header: value})
 # Create the nested dictionary if it hasn't been created yet
 except KeyError:
 nesteddictionary[sample] = dict()
 nesteddictionary[sample].update({header: value})
 # Create objects for each of the samples, rather than using a nested dictionary. It may have been possible to
 # skip the creation of the nested dictionary, and create the objects from the original dictionary, but there
 # seemed to be too many possible places for something to go wrong
 for line in nesteddictionary:
 # Create an object for each sample
 metadata = MetadataObject()
 # Set the name of the metadata to be the primary key for the sample from the excel file
 metadata.name = line
 # Find the headers and values for every sample
 for header, value in nesteddictionary[line].items():
 # Try/except for value.encode() - some of the value are type int, so they cannot be encoded
 try:
 # Create each attribute - use the header (in lowercase, and spaces removed) as the attribute name,
 # and the value as the attribute value
 setattr(metadata, header.replace(' ', '').lower(), str(value))
 except AttributeError:
 setattr(metadata, header.replace(' ', '').lower(), value)
 # Append the object to the list of objects
 self.metadata.append(metadata)
 for sample in self.metadata:
 # Sort the seqIDs
 sample.merge = sorted(sample.merge.split(self.delimiter))
 def idfind(self):
 """Find the fastq files associated with the seq IDs pulled from the seq ID file. Populate a MetadataObject
 with the name of the merged files as well as the fastq file names and paths"""
 for sample in self.metadata:
 # Create the general category for the MetadataObject
 sample.general = GenObject()
 sample.general.fastqfiles = list()
 for ids in sample.merge:
 # Ensure that the id exists. Dues to the way the ids were pulled from the file, newline characters
 # will be entered into the list. Skip them
 if ids:
 # Glob for files in the path with the seq ID and 'fastq'
 idfile = glob.glob('{}{}*fastq*'.format(self.path, ids))
 # Assertion to ensure that all the files specified in :self.idfile are present in the path
 assert idfile, 'Cannot find files for seq ID: {}. Please check that the seqIDs ' \
 'provided in the seq ID file match the files present in the path'.format(ids)
 # Append the fastq file and path and the seq ID to the appropriate list
 sample.general.fastqfiles.append(idfile)
 def idmerge(self):
 """Merge the files together"""
 from threading import Thread
 #
 for i in range(self.cpus):
 # Send the threads to the merge method. :args is empty as I'm using
 threads = Thread(target=self.merge, args=())
 # Set the daemon to true - something to do with thread management
 threads.setDaemon(True)
 # Start the threading
 threads.start()
 for sample in self.metadata:
 # Initialise strings to hold the forward and reverse fastq files
 forwardfiles = list()
 reversefiles = list()
 # Create the output directory
 sample.general.outputdir = '{}{}'.format(self.path, sample.name)
 make_path(sample.general.outputdir)
 # Iterate through the samples
 for files in sample.general.fastqfiles:
 # Find the forward and reverse files (forward files must have have either _R1_ or _1_
 for fastq in files:
 if '_R1_' in fastq or '_1_' in fastq or '_1.' in fastq:
 forwardfiles.append(fastq)
 elif '_R2_' in fastq or '_2_' in fastq or '_2.' in fastq:
 reversefiles.append(fastq)
 # Add the files to the processing queue
 sample.general.outputforward = '{}/{}_S1_L001_R1_001.fastq.gz'.format(sample.general.outputdir, sample.name)
 sample.general.outputreverse = '{}/{}_S1_L001_R2_001.fastq.gz'.format(sample.general.outputdir, sample.name)
 # Add the command object to self.data
 sample.commands = GenObject()
 sample.commands.forwardmerge = 'cat {} > {}'.format(' '.join(forwardfiles), sample.general.outputforward)
 sample.commands.reversemerge = 'cat {} > {}'.format(' '.join(reversefiles), sample.general.outputreverse)
 # Add the commands to the queue
 self.mergequeue.put((sample.commands.forwardmerge, sample.general.outputforward))
 self.mergequeue.put((sample.commands.reversemerge, sample.general.outputreverse))
 # Join the threads
 self.mergequeue.join()
 def merge(self):
 while True: # while daemon
 # Unpack the merge command and the output file from the queue
 (mergecommand, outputfile) = self.mergequeue.get()
 # Don't run the command if the output file exists
 if not os.path.isfile(outputfile):
 try:
 self.execute(mergecommand)
 except KeyboardInterrupt:
 printtime(u'Keyboard interrupt! The system call will not stop until it is finished.', self.start)
 self.mergequeue.empty()
 try:
 os.remove(outputfile)
 except IOError:
 pass
 sys.exit()
 # Signal to mergequeue that job is done
 self.mergequeue.task_done()
 def filelink(self):
 # If the creation of a sample sheet is necessary
 if self.samplesheet:
 # Extract the path of the current script from the full path + file name
 samplesheet = open('{}/SampleSheet.csv'.format(os.path.split(os.path.abspath(__file__))[0])).readlines()
 # Iterate through each merged file
 for sample in self.data:
 # Append enough information to the list to allow the pipeline to work
 samplesheet.append('{},{},,,NA,NA,NA,NA,NA,NA\n'.format(sample.name, sample.name))
 # Initialise the name and path of the output sample sheet
 outsamplesheet = '{}/SampleSheet.csv'.format(self.assemblypath)
 # Don't overwrite a sample sheet already present in the directory
 if not os.path.isfile(outsamplesheet):
 # Open the file to write and write to it
 with open(outsamplesheet, 'w') as writesheet:
 writesheet.write(''.join(samplesheet))
 # Optionally copy
 if self.copy:
 make_path('{}/BestAssemblies'.format(self.assemblypath))
 # Link the files to the assembly path
 for sample in self.metadata:
 try:
 if self.copy:
 shutil.copyfile(sample.general.outputforward, '{}/{}'.format(self.assemblypath,
 os.path.basename(sample.general.outputforward)))
 shutil.copyfile(sample.general.outputreverse, '{}/{}'.format(self.assemblypath,
 os.path.basename(sample.general.outputreverse)))
 else:
 if self.relativepaths:
 relativesymlink(sample.general.outputforward, '{}/{}'.format(self.assemblypath,
 os.path.basename(sample.general.outputforward)))
 relativesymlink(sample.general.outputreverse, '{}/{}'.format(self.assemblypath,
 os.path.basename(sample.general.outputreverse)))
 else:
 os.symlink(sample.general.outputforward, '{}/{}'.format(self.assemblypath,
 os.path.basename(sample.general.outputforward)))
 os.symlink(sample.general.outputreverse, '{}/{}'.format(self.assemblypath,
 os.path.basename(sample.general.outputreverse)))
 # Except os errors
 except OSError as exception:
 # If the os error is anything but directory exists, then raise
 if exception.errno != errno.EEXIST:
 raise
 # Remove the BestAssemblies directory if necessary
 if self.copy:
 os.removedirs('{}/BestAssemblies'.format(self.assemblypath))
 def execute(self, command, outfile=""):
 """
 Allows for dots to be printed to the terminal while waiting for a long system call to run
 :param command: the command to be executed
 :param outfile: optional string of an output file
 from https://stackoverflow.com/questions/4417546/constantly-print-subprocess-output-while-process-is-running
 """
 import time
 from subprocess import Popen, PIPE, STDOUT
 # Initialise the starting time
 start = int(time.time())
 maxtime = 0
 # Run the commands - direct stdout to PIPE and stderr to stdout
 process = Popen(command, shell=True, stdout=PIPE, stderr=STDOUT)
 # Create the output file - if not provided, then nothing should happen
 writeout = open(outfile, "ab+") if outfile else ""
 # Poll process for new output until finished
 while True:
 # If an output file name is provided
 if outfile:
 # Get stdout into a variable
 nextline = process.stdout.readline()
 # Print stdout to the file
 writeout.write(nextline)
 # Break from the loop if the command is finished
 if process.poll() is not None:
 break
 # Adding sleep commands slowed down this method when there was lots of output. Difference between the start
 # time of the analysis and the current time. Action on each second passed
 currenttime = int(time.time())
 # As each thread will print a dot at the same time, often the dots printed to the terminal do not look
 # even. Instead of 80 per line, there are sometimes around 78-82, or just one. Having this random number
 # seems to fix this
 from random import randint
 # Set the number to be a random integer between 0 and 999
 number = randint(0, 999)
 if currenttime - start > maxtime + number:
 # Set the max time for | |
| 
	# =============
# INTERNET
# =============
SUBREDDITS = [
 "/r/AskReddit",
 "/r/IAmA",
 "/r/bestof",
 "/r/fatpeoplestories",
 "/r/pettyrevenge",
 "/r/TalesFromRetail",
 "/r/DoesAnybodyElse",
 "/r/CrazyIdeas",
 "/r/WTF",
 "/r/aww",
 "/r/cringepics",
 "/r/cringe",
 "/r/JusticePorn",
 "/r/MorbidReality",
 "/r/rage",
 "/r/mildlyinfuriating",
 "/r/creepy",
 "/r/creepyPMs",
 "/r/nosleep",
 "/r/nostalgia",
 "/r/gaming",
 "/r/leagueoflegends",
 "/r/pokemon",
 "/r/Minecraft",
 "/r/starcraft",
 "/r/Games",
 "/r/DotA2",
 "/r/skyrim",
 "/r/tf2",
 "/r/magicTCG",
 "/r/wow",
 "/r/KerbalSpaceProgram",
 "/r/mindcrack",
 "/r/Fallout",
 "/r/roosterteeth",
 "/r/Planetside",
 "/r/gamegrumps",
 "/r/battlefield3",
 "/r/zelda",
 "/r/darksouls",
 "/r/masseffect",
 "/r/arresteddevelopment",
 "/r/gameofthrones",
 "/r/doctorwho",
 "/r/mylittlepony",
 "/r/community",
 "/r/breakingbad",
 "/r/adventuretime",
 "/r/startrek",
 "/r/TheSimpsons",
 "/r/futurama",
 "/r/HIMYM",
 "/r/DunderMifflin",
 "/r/thewalkingdead",
 "/r/Music",
 "/r/movies",
 "/r/harrypotter",
 "/r/StarWars",
 "/r/DaftPunk",
 "/r/hiphopheads",
 "/r/anime",
 "/r/comicbooks",
 "/r/geek",
 "/r/batman",
 "/r/TheLastAirbender",
 "/r/Naruto",
 "/r/FanTheories",
 "/r/funny",
 "/r/AdviceAnimals",
 "/r/fffffffuuuuuuuuuuuu",
 "/r/4chan",
 "/r/ImGoingToHellForThis",
 "/r/firstworldanarchists",
 "/r/circlejerk",
 "/r/MURICA",
 "/r/facepalm",
 "/r/Jokes",
 "/r/wheredidthesodago",
 "/r/polandball",
 "/r/TrollXChromosomes",
 "/r/comics",
 "/r/nottheonion",
 "/r/britishproblems",
 "/r/TumblrInAction",
 "/r/onetruegod",
 "/r/pics",
 "/r/videos",
 "/r/gifs",
 "/r/reactiongifs",
 "/r/mildlyinteresting",
 "/r/woahdude",
 "/r/FiftyFifty",
 "/r/FoodPorn",
 "/r/HistoryPorn",
 "/r/wallpapers",
 "/r/youtubehaiku",
 "/r/Unexpected",
 "/r/photoshopbattles",
 "/r/AnimalsBeingJerks",
 "/r/cosplay",
 "/r/EarthPorn",
 "/r/QuotesPorn",
 "/r/awwnime",
 "/r/AbandonedPorn",
 "/r/carporn",
 "/r/PerfectTiming",
 "/r/OldSchoolCool",
 "/r/RoomPorn",
 "/r/Pareidolia",
 "/r/MapPorn",
 "/r/tumblr",
 "/r/techsupportgore",
 "/r/PrettyGirls",
 "/r/itookapicture",
 "/r/todayilearned",
 "/r/science",
 "/r/askscience",
 "/r/space",
 "/r/AskHistorians",
 "/r/YouShouldKnow",
 "/r/explainlikeimfive",
 "/r/trees",
 "/r/MakeupAddiction",
 "/r/cats",
 "/r/LifeProTips",
 "/r/RedditLaqueristas",
 "/r/Random_Acts_Of_Amazon",
 "/r/food",
 "/r/guns",
 "/r/tattoos",
 "/r/corgi",
 "/r/teenagers",
 "/r/GetMotivated",
 "/r/motorcycles",
 "/r/sex",
 "/r/progresspics",
 "/r/DIY",
 "/r/bicycling",
 "/r/Fitness",
 "/r/lifehacks",
 "/r/longboarding",
 "/r/Frugal",
 "/r/drunk",
 "/r/Art",
 "/r/loseit"
]
SUBREDDITS_NSFW = [
 "/r/nsfw",
 "/r/OnOff",
 "/r/nsfwhardcore",
 "/r/Hotchickswithtattoos",
 "/r/randomsexiness",
 "/r/HappyEmbarrassedGirls",
 "/r/suicidegirls",
 "/r/nsfw2",
 "/r/nsfwcosplay",
 "/r/nsfwoutfits",
 "/r/unashamed",
 "/r/Camwhores",
 "/r/TipOfMyPenis",
 "/r/bonermaterial",
 "/r/Shemales",
 "/r/volleyballgirls",
 "/r/passionx",
 "/r/voluptuous",
 "/r/porn",
 "/r/fitgirls",
 "/r/SheLikesItRough",
 "/r/sexyfrex",
 "/r/trashyboners",
 "/r/GirlswithNeonHair",
 "/r/nsfw411",
 "/r/iWantToFuckHer",
 "/r/freeuse",
 "/r/exxxtras",
 "/r/distension",
 "/r/twingirls",
 "/r/ChangingRooms",
 "/r/realgirls",
 "/r/amateur",
 "/r/homemadexxx",
 "/r/AmateurArchives",
 "/r/dirtypenpals",
 "/r/wifesharing",
 "/r/FestivalSluts",
 "/r/hotwife",
 "/r/NSFW_Snapchat",
 "/r/CollegeAmateurs",
 "/r/amateurcumsluts",
 "/r/GoneWild",
 "/r/gonewildcurvy",
 "/r/AsiansGoneWild",
 "/r/GWCouples",
 "/r/GoneWildplus",
 "/r/PetiteGoneWild",
 "/r/gonewildstories",
 "/r/GoneWildTube",
 "/r/treesgonewild",
 "/r/gonewildaudio",
 "/r/workgonewild",
 "/r/GWNerdy",
 "/r/BigBoobsGW",
 "/r/gonemild",
 "/r/altgonewild",
 "/r/gwcumsluts",
 "/r/gonewildcolor",
 "/r/gifsgonewild",
 "/r/analgw",
 "/r/rule34",
 "/r/hentai",
 "/r/rule34_comics",
 "/r/AsianHotties",
 "/r/AsianHotties",
 "/r/AsiansGoneWild",
 "/r/realasians",
 "/r/juicyasians",
 "/r/IndianBabes",
 "/r/NSFW_Japan",
 "/r/ass",
 "/r/girlsinyogapants",
 "/r/asstastic",
 "/r/facedownassup",
 "/r/anal",
 "/r/assinthong",
 "/r/analgw",
 "/r/BDSM",
 "/r/Bondage",
 "/r/blowjobs",
 "/r/lipsthatgrip",
 "/r/boobies",
 "/r/BustyPetite",
 "/r/tinytits",
 "/r/TittyDrop",
 "/r/burstingout",
 "/r/hugeboobs",
 "/r/stacked",
 "/r/boltedontits",
 "/r/BigBoobsGW",
 "/r/boobbounce",
 "/r/boobs",
 "/r/downblouse",
 "/r/celebnsfw",
 "/r/WatchItForThePlot",
 "/r/girlsinyogapants",
 "/r/girlswithglasses",
 "/r/lingerie",
 "/r/stockings",
 "/r/candidfashionpolice",
 "/r/WtSSTaDaMiT",
 "/r/tightdresses",
 "/r/upskirt",
 "/r/cumsluts",
 "/r/GirlsFinishingTheJob",
 "/r/cumfetish",
 "/r/creampies",
 "/r/amateurcumsluts",
 "/r/gonewildcurvy",
 "/r/curvy",
 "/r/gonewildplus",
 "/r/thick",
 "/r/juicyasians",
 "/r/NSFW_GIF",
 "/r/nsfw_gifs",
 "/r/porn_gifs",
 "/r/porninfifteenseconds",
 "/r/CuteModeSlutMode",
 "/r/ginger",
 "/r/redheads",
 "/r/60fpsporn",
 "/r/highresNSFW",
 "/r/NSFW_HTML5",
 "/r/datgap",
 "/r/girlsinyogapants",
 "/r/stockings",
 "/r/lesbians",
 "/r/StraightGirlsPlaying",
 "/r/ladybonersgw",
 "/r/milf",
 "/r/BustyPetite",
 "/r/dirtysmall",
 "/r/petitegonewild",
 "/r/pussy",
 "/r/rearpussy",
 "/r/innie",
 "/r/legalteens",
 "/r/collegesluts",
 "/r/pornvids",
 "/r/nsfw_videos",
 "/r/palegirls",
 "/r/pawg",
 "/r/holdthemoan",
 "/r/O_faces",
 "/r/grool",
 "/r/jilling",
 "/r/gettingherselfoff",
 "/r/quiver"
]
DOMAINS = [
 ".ac",
 ".ad",
 ".biz",
 ".by",
 ".com",
 ".edu",
 ".gov",
 ".in",
 ".info",
 ".int",
 ".io",
 ".is",
 ".mil",
 ".name",
 ".net",
 ".org",
 ".place",
 ".pw",
 ".ru",
 ".ua",
 ".uk",
 ".us",
 ".uk"
]
EMAIL_DOMAINS = (
 "@gmail.com",
 "@yandex.com",
 "@yahoo.com",
 "@live.com",
 "@outlook.com"
)
EMOJI = (
 ":bowtie:",
 ":smile:",
 ":laughing:",
 ":blush:",
 ":smiley:",
 ":relaxed:",
 ":smirk:",
 ":heart_eyes:",
 ":kissing_heart:",
 ":kissing_closed_eyes:",
 ":flushed:",
 ":relieved:",
 ":satisfied:",
 ":grin:",
 ":wink:",
 ":stuck_out_tongue_winking_eye:",
 ":stuck_out_tongue_closed_eyes:",
 ":grinning:",
 ":kissing:",
 ":kissing_smiling_eyes:",
 ":stuck_out_tongue:",
 ":sleeping:",
 ":worried:",
 ":frowning:",
 ":anguished:",
 ":open_mouth:",
 ":grimacing:",
 ":confused:",
 ":hushed:",
 ":expressionless:",
 ":unamused:",
 ":sweat_smile:",
 ":sweat:",
 ":disappointed_relieved:",
 ":weary:",
 ":pensive:",
 ":disappointed:",
 ":confounded:",
 ":fearful:",
 ":cold_sweat:",
 ":persevere:",
 ":cry:",
 ":sob:",
 ":joy:",
 ":astonished:",
 ":scream:",
 ":neckbeard:",
 ":tired_face:",
 ":angry:",
 ":rage:",
 ":triumph:",
 ":sleepy:",
 ":yum:",
 ":mask:",
 ":sunglasses:",
 ":dizzy_face:",
 ":imp:",
 ":smiling_imp:",
 ":neutral_face:",
 ":no_mouth:",
 ":innocent:",
 ":alien:",
 ":yellow_heart:",
 ":blue_heart:",
 ":purple_heart:",
 ":heart:",
 ":green_heart:",
 ":broken_heart:",
 ":heartbeat:",
 ":heartpulse:",
 ":two_hearts:",
 ":revolving_hearts:",
 ":cupid:",
 ":sparkling_heart:",
 ":sparkles:",
 ":star:",
 ":star2:",
 ":dizzy:",
 ":boom:",
 ":collision:",
 ":anger:",
 ":exclamation:",
 ":question:",
 ":grey_exclamation:",
 ":grey_question:",
 ":zzz:",
 ":dash:",
 ":sweat_drops:",
 ":notes:",
 ":musical_note:",
 ":fire:",
 ":hankey:",
 ":poop:",
 ":shit:",
 ":+1:",
 ":thumbsup:",
 ":-1:",
 ":thumbsdown:",
 ":ok_hand:",
 ":punch:",
 ":facepunch:",
 ":fist:",
 ":v:",
 ":wave:",
 ":hand:",
 ":raised_hand:",
 ":open_hands:",
 ":point_up:",
 ":point_down:",
 ":point_left:",
 ":point_right:",
 ":raised_hands:",
 ":pray:",
 ":point_up_2:",
 ":clap:",
 ":muscle:",
 ":metal:",
 ":fu:",
 ":runner:",
 ":running:",
 ":couple:",
 ":family:",
 ":two_men_holding_hands:",
 ":two_women_holding_hands:",
 ":dancer:",
 ":dancers:",
 ":ok_woman:",
 ":no_good:",
 ":information_desk_person:",
 ":raising_hand:",
 ":bride_with_veil:",
 ":person_with_pouting_face:",
 ":person_frowning:",
 ":bow:",
 ":couplekiss:",
 ":couple_with_heart:",
 ":massage:",
 ":haircut:",
 ":nail_care:",
 ":boy:",
 ":girl:",
 ":woman:",
 ":man:",
 ":baby:",
 ":older_woman:",
 ":older_man:",
 ":person_with_blond_hair:",
 ":man_with_gua_pi_mao:",
 ":man_with_turban:",
 ":construction_worker:",
 ":cop:",
 ":angel:",
 ":princess:",
 ":smiley_cat:",
 ":smile_cat:",
 ":heart_eyes_cat:",
 ":kissing_cat:",
 ":smirk_cat:",
 ":scream_cat:",
 ":crying_cat_face:",
 ":joy_cat:",
 ":pouting_cat:",
 ":japanese_ogre:",
 ":japanese_goblin:",
 ":see_no_evil:",
 ":hear_no_evil:",
 ":speak_no_evil:",
 ":guardsman:",
 ":skull:",
 ":feet:",
 ":lips:",
 ":kiss:",
 ":droplet:",
 ":ear:",
 ":eyes:",
 ":nose:",
 ":tongue:",
 ":love_letter:",
 ":bust_in_silhouette:",
 ":busts_in_silhouette:",
 ":speech_balloon:",
 ":thought_balloon:",
 ":feelsgood:",
 ":finnadie:",
 ":goberserk:",
 ":godmode:",
 ":hurtrealbad:",
 ":rage1:",
 ":rage2:",
 ":rage3:",
 ":rage4:",
 ":suspect:",
 ":trollface:",
 ":sunny:",
 ":umbrella:",
 ":cloud:",
 ":snowflake:",
 ":snowman:",
 ":zap:",
 ":cyclone:",
 ":foggy:",
 ":ocean:",
 ":cat:",
 ":dog:",
 ":mouse:",
 ":hamster:",
 ":rabbit:",
 ":wolf:",
 ":frog:",
 ":tiger:",
 ":koala:",
 ":bear:",
 ":pig:",
 ":pig_nose:",
 ":cow:",
 ":boar:",
 ":monkey_face:",
 ":monkey:",
 ":horse:",
 ":racehorse:",
 ":camel:",
 ":sheep:",
 ":elephant:",
 ":panda_face:",
 ":snake:",
 ":bird:",
 ":baby_chick:",
 ":hatched_chick:",
 ":hatching_chick:",
 ":chicken:",
 ":penguin:",
 ":turtle:",
 ":bug:",
 ":honeybee:",
 ":ant:",
 ":beetle:",
 ":snail:",
 ":octopus:",
 ":tropical_fish:",
 ":fish:",
 ":whale:",
 ":whale2:",
 ":dolphin:",
 ":cow2:",
 ":ram:",
 ":rat:",
 ":water_buffalo:",
 ":tiger2:",
 ":rabbit2:",
 ":dragon:",
 ":goat:",
 ":rooster:",
 ":dog2:",
 ":pig2:",
 ":mouse2:",
 ":ox:",
 ":dragon_face:",
 ":blowfish:",
 ":crocodile:",
 ":dromedary_camel:",
 ":leopard:",
 ":cat2:",
 ":poodle:",
 ":paw_prints:",
 ":bouquet:",
 ":cherry_blossom:",
 ":tulip:",
 ":four_leaf_clover:",
 ":rose:",
 ":sunflower:",
 ":hibiscus:",
 ":maple_leaf:",
 ":leaves:",
 ":fallen_leaf:",
 ":herb:",
 ":mushroom:",
 ":cactus:",
 ":palm_tree:",
 ":evergreen_tree:",
 ":deciduous_tree:",
 ":chestnut:",
 ":seedling:",
 ":blossom:",
 ":ear_of_rice:",
 ":shell:",
 ":globe_with_meridians:",
 ":sun_with_face:",
 ":full_moon_with_face:",
 ":new_moon_with_face:",
 ":new_moon:",
 ":waxing_crescent_moon:",
 ":first_quarter_moon:",
 ":waxing_gibbous_moon:",
 ":full_moon:",
 ":waning_gibbous_moon:",
 ":last_quarter_moon:",
 ":waning_crescent_moon:",
 ":last_quarter_moon_with_face:",
 ":first_quarter_moon_with_face:",
 ":crescent_moon:",
 ":earth_africa:",
 ":earth_americas:",
 ":earth_asia:",
 ":volcano:",
 ":milky_way:",
 ":partly_sunny:",
 ":octocat:",
 ":squirrel:",
 ":bamboo:",
 ":gift_heart:",
 ":dolls:",
 ":school_satchel:",
 ":mortar_board:",
 ":flags:",
 ":fireworks:",
 ":sparkler:",
 ":wind_chime:",
 ":rice_scene:",
 ":jack_o_lantern:",
 ":ghost:",
 ":santa:",
 ":christmas_tree:",
 ":gift:",
 ":bell:",
 ":no_bell:",
 ":tanabata_tree:",
 ":tada:",
 ":confetti_ball:",
 ":balloon:",
 ":crystal_ball:",
 ":cd:",
 ":dvd:",
 ":floppy_disk:",
 ":camera:",
 ":video_camera:",
 ":movie_camera:",
 ":computer:",
 ":tv:",
 ":iphone:",
 ":phone:",
 ":telephone:",
 ":telephone_receiver:",
 ":pager:",
 ":fax:",
 ":minidisc:",
 ":vhs:",
 ":sound:",
 ":speaker:",
 ":mute:",
 ":loudspeaker:",
 ":mega:",
 ":hourglass:",
 ":hourglass_flowing_sand:",
 ":alarm_clock:",
 ":watch:",
 ":radio:",
 ":satellite:",
 ":loop:",
 ":mag:",
 ":mag_right:",
 ":unlock:",
 ":lock:",
 ":lock_with_ink_pen:",
 ":closed_lock_with_key:",
 ":key:",
 ":bulb:",
 ":flashlight:",
 ":high_brightness:",
 ":low_brightness:",
 ":electric_plug:",
 ":battery:",
 ":calling:",
 ":email:",
 ":mailbox:",
 ":postbox:",
 ":bath:",
 ":bathtub:",
 ":shower:",
 ":toilet:",
 ":wrench:",
 ":nut_and_bolt:",
 ":hammer:",
 ":seat:",
 ":moneybag:",
 ":yen:",
 ":dollar:",
 ":pound:",
 ":euro:",
 ":credit_card:",
 ":money_with_wings:",
 ":e-mail:",
 ":inbox_tray:",
 ":outbox_tray:",
 ":envelope:",
 ":incoming_envelope:",
 ":postal_horn:",
 ":mailbox_closed:",
 ":mailbox_with_mail:",
 ":mailbox_with_no_mail:",
 ":package:",
 ":door:",
 ":smoking:",
 ":bomb:",
 ":gun:",
 ":hocho:",
 ":pill:",
 ":syringe:",
 ":page_facing_up:",
 ":page_with_curl:",
 ":bookmark_tabs:",
 ":bar_chart:",
 ":chart_with_upwards_trend:",
 ":chart_with_downwards_trend:",
 ":scroll:",
 ":clipboard:",
 ":calendar:",
 ":date:",
 ":card_index:",
 ":file_folder:",
 ":open_file_folder:",
 ":scissors:",
 ":pushpin:",
 ":paperclip:",
 ":black_nib:",
 ":pencil2:",
 ":straight_ruler:",
 ":triangular_ruler:",
 ":closed_book:",
 ":green_book:",
 ":blue_book:",
 ":orange_book:",
 ":notebook:",
 ":notebook_with_decorative_cover:",
 ":ledger:",
 ":books:",
 ":bookmark:",
 ":name_badge:",
 ":microscope:",
 ":telescope:",
 ":newspaper:",
 ":football:",
 ":basketball:",
 ":soccer:",
 ":baseball:",
 ":tennis:",
 ":8ball:",
 ":rugby_football:",
 ":bowling:",
 ":golf:",
 ":mountain_bicyclist:",
 ":bicyclist:",
 ":horse_racing:",
 ":snowboarder:",
 ":swimmer:",
 ":surfer:",
 ":ski:",
 ":spades:",
 ":hearts:",
 ":clubs:",
 ":diamonds:",
 ":gem:",
 ":ring:",
 ":trophy:",
 ":musical_score:",
 ":musical_keyboard:",
 ":violin:",
 ":space_invader:",
 ":video_game:",
 ":black_joker:",
 ":flower_playing_cards:",
 ":game_die:",
 ":dart:",
 ":mahjong:",
 ":clapper:",
 ":memo:",
 ":pencil:",
 ":book:",
 ":art:",
 ":microphone:",
 ":headphones:",
 ":trumpet:",
 ":saxophone:",
 ":guitar:",
 ":shoe:",
 ":sandal:",
 ":high_heel:",
 ":lipstick:",
 ":boot:",
 ":shirt:",
 ":tshirt:",
 ":necktie:",
 ":womans_clothes:",
 ":dress:",
 ":running_shirt_with_sash:",
 ":jeans:",
 ":kimono:",
 ":bikini:",
 ":ribbon:",
 ":tophat:",
 ":crown:",
 ":womans_hat:",
 ":mans_shoe:",
 ":closed_umbrella:",
 ":briefcase:",
 ":handbag:",
 ":pouch:",
 ":purse:",
 ":eyeglasses:",
 ":fishing_pole_and_fish:",
 ":coffee:",
 ":tea:",
 ":sake:",
 ":baby_bottle:",
 ":beer:",
 ":beers:",
 ":cocktail:",
 ":tropical_drink:",
 ":wine_glass:",
 ":fork_and_knife:",
 ":pizza:",
 ":hamburger:",
 ":fries:",
 ":poultry_leg:",
 ":meat_on_bone:",
 ":spaghetti:",
 ":curry:",
 ":fried_shrimp:",
 ":bento:",
 ":sushi:",
 ":fish_cake:",
 ":rice_ball:",
 ":rice_cracker:",
 ":rice:",
 ":ramen:",
 ":stew:",
 ":oden:",
 ":dango:",
 ":egg:",
 ":bread:",
 ":doughnut:",
 ":custard:",
 ":icecream:",
 ":ice_cream:",
 ":shaved_ice:",
 ":birthday:",
 ":cake:",
 ":cookie:",
 ":chocolate_bar:",
 ":candy:",
 ":lollipop:",
 ":honey_pot:",
 ":apple:",
 ":green_apple:",
 ":tangerine:",
 ":lemon:",
 ":cherries:",
 ":grapes:",
 ":watermelon:",
 ":strawberry:",
 ":peach:",
 ":melon:",
 ":banana:",
 ":pear:",
 ":pineapple:",
 ":sweet_potato:",
 ":eggplant:",
 ":tomato:",
 ":corn:",
 ":house:",
 ":house_with_garden:",
 ":school:",
 ":office:",
 ":post_office:",
 ":hospital:",
 ":bank:",
 ":convenience_store:",
 ":love_hotel:",
 ":hotel:",
 ":wedding:",
 ":elizabeth:",
 ":department_store:",
 ":european_post_office:",
 ":city_sunrise:",
 ":city_sunset:",
 ":japanese_castle:",
 ":european_castle:",
 ":tent:",
 ":factory:",
 ":tokyo_tower:",
 ":japan:",
 ":mount_fuji:",
 ":sunrise_over_mountains:",
 ":sunrise:",
 ":stars:",
 ":statue_of_liberty:",
 ":bridge_at_night:",
 ":carousel_horse:",
 ":rainbow:",
 ":ferris_wheel:",
 ":fountain:",
 ":roller_coaster:",
 ":ship:",
 ":speedboat:",
 ":boat:",
 ":sailboat:",
 ":rowboat:",
 ":anchor:",
 ":rocket:",
 ":airplane:",
 ":helicopter:",
 ":steam_locomotive:",
 ":tram:",
 ":mountain_railway:",
 ":bike:",
 ":aerial_tramway:",
 ":suspension_railway:",
 ":mountain_cableway:",
 ":tractor:",
 ":blue_car:",
 ":oncoming_automobile:",
 ":car:",
 ":red_car:",
 ":taxi:",
 ":oncoming_taxi:",
 ":articulated_lorry:",
 ":bus:",
 ":oncoming_bus:",
 ":rotating_light:",
 ":police_car:",
 ":oncoming_police_car:",
 ":fire_engine:",
 ":ambulance:",
 ":minibus:",
 ":truck:",
 ":train:",
 ":station:",
 ":train2:",
 ":bullettrain_front:",
 ":bullettrain_side:",
 ":light_rail:",
 ":monorail:",
 ":railway_car:",
 ":trolleybus:",
 ":ticket:",
 ":fuelpump:",
 ":vertical_traffic_light:",
 ":traffic_light:",
 ":warning:",
 ":construction:",
 ":beginner:",
 ":atm:",
 ":slot_machine:",
 ":busstop:",
 ":barber:",
 ":hotsprings:",
 ":checkered_flag:",
 ":crossed_flags:",
 ":izakaya_lantern:",
 ":moyai:",
 ":circus_tent:",
 ":performing_arts:",
 ":round_pushpin:",
 ":triangular_flag_on_post:",
 ":jp:",
 ":kr:",
 ":cn:",
 ":us:",
 ":fr:",
 ":es:",
 ":it:",
 ":ru:",
 ":gb:",
 ":uk:",
 ":de:",
 ":one:",
 ":two:",
 ":three:",
 ":four:",
 ":five:",
 ":six:",
 ":seven:",
 ":eight:",
 ":nine:",
 ":keycap_ten:",
 ":1234:",
 ":zero:",
 ":hash:",
 ":symbols:",
 ":arrow_backward:",
 ":arrow_down:",
 ":arrow_forward:",
 ":arrow_left:",
 ":capital_abcd:",
 ":abcd:",
 ":abc:",
 ":arrow_lower_left:",
 ":arrow_lower_right:",
 ":arrow_right:",
 ":arrow_up:",
 ":arrow_upper_left:",
 ":arrow_upper_right:",
 ":arrow_double_down:",
 ":arrow_double_up:",
 ":arrow_down_small:",
 ":arrow_heading_down:",
 ":arrow_heading_up:",
 ":leftwards_arrow_with_hook:",
 ":arrow_right_hook:",
 ":left_right_arrow:",
 ":arrow_up_down:",
 ":arrow_up_small:",
 ":arrows_clockwise:",
 ":arrows_counterclockwise:",
 ":rewind:",
 ":fast_forward:",
 ":information_source:",
 ":ok:",
 ":twisted_rightwards_arrows:",
 ":repeat:",
 ":repeat_one:",
 ":new:",
 ":top:",
 ":up:",
 ":cool:",
 ":free:",
 ":ng:",
 ":cinema:",
 ":koko:",
 ":signal_strength:",
 ":u5272:",
 ":u5408:",
 ":u55b6:",
 ":u6307:",
 ":u6708:",
 ":u6709:",
 ":u6e80:",
 ":u7121:",
 ":u7533:",
 ":u7a7a:",
 ":u7981:",
 ":sa:",
 ":restroom:",
 ":mens:",
 ":womens:",
 ":baby_symbol:",
 ":no_smoking:",
 ":parking:",
 ":wheelchair:",
 ":metro:",
 ":baggage_claim:",
 ":accept:",
 ":wc:",
 ":potable_water:",
 ":put_litter_in_its_place:",
 ":secret:",
 ":congratulations:",
 ":m:",
 ":passport_control:",
 ":left_luggage:",
 ":customs:",
 ":ideograph_advantage:",
 ":cl:",
 ":sos:",
 ":id:",
 ":no_entry_sign:",
 ":underage:",
 ":no_mobile_phones:",
 ":do_not_litter:",
 ":non-potable_water:",
 ":no_bicycles:",
 ":no_pedestrians:",
 ":children_crossing:",
 ":no_entry:",
 ":eight_spoked_asterisk:",
 ":sparkle:",
 ":eight_pointed_black_star:",
 ":heart_decoration:",
 ":vs:",
 ":vibration_mode:",
 ":mobile_phone_off:",
 ":chart:",
 ":currency_exchange:",
 ":aries:",
 ":taurus:",
 ":gemini:",
 ":cancer:",
 ":leo:",
 ":virgo:",
 ":libra:",
 ":scorpius:",
 ":sagittarius:",
 ":capricorn:",
 ":aquarius:",
 ":pisces:",
 ":ophiuchus:",
 ":six_pointed_star:",
 ":negative_squared_cross_mark:",
 ":a:",
 ":b:",
 ":ab:",
 ":o2:",
 ":diamond_shape_with_a_dot_inside:",
 ":recycle:",
 ":end:",
 ":back:",
 ":on:",
 ":soon:",
 ":clock1:",
 ":clock130:",
 ":clock10:",
 ":clock1030:",
 ":clock11:",
 ":clock1130:",
 ":clock12:",
 ":clock1230:",
 ":clock2:",
 ":clock230:",
 ":clock3:",
 ":clock330:",
 ":clock4:",
 ":clock430:",
 ":clock5:",
 ":clock530:",
 ":clock6:",
 ":clock630:",
 ":clock7:",
 ":clock730:",
 ":clock8:",
 ":clock830:",
 ":clock9:",
 ":clock930:",
 ":heavy_dollar_sign:",
 ":copyright:",
 ":registered:",
 ":tm:",
 ":x:",
 ":heavy_exclamation_mark:",
 ":bangbang:",
 ":interrobang:",
 ":o:",
 ":heavy_multiplication_x:",
 ":heavy_plus_sign:",
 ":heavy_minus_sign:",
 ":heavy_division_sign:",
 ":white_flower:",
 ":100:",
 ":heavy_check_mark:",
 ":ballot_box_with_check:",
 ":radio_button:",
 ":link:",
 ":curly_loop:",
 ":wavy_dash:",
 ":part_alternation_mark:",
 ":trident:",
 ":black_small_square:",
 ":white_small_square:",
 ":black_medium_small_square:",
 ":white_medium_small_square:",
 ":black_medium_square:",
 ":white_medium_square:",
 ":black_large_square:",
 ":white_large_square:",
 ":white_check_mark:",
 ":black_square_button:",
 ":white_square_button:",
 ":black_circle:",
 ":white_circle:",
 ":red_circle:",
 ":large_blue_circle:",
 ":large_blue_diamond:",
 ":large_orange_diamond:",
 ":small_blue_diamond:",
 ":small_orange_diamond:",
 ":small_red_triangle:",
 ":small_red_triangle_down:",
 ":shipit:"
)
USER_AGENTS = [
 "Mozilla/5.0 (Linux; Android 6.0.1; SM-G920V Build/MMB29K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.98 Mobile Safari/537.36",
 "Mozilla/5.0 (Linux; Android 5.1.1; SM-G928X Build/LMY47X) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.83 Mobile Safari/537.36",
 "Mozilla/5.0 (Windows Phone 10.0; Android 4.2.1; Microsoft; Lumia 950) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2486.0 Mobile Safari/537.36 Edge/13.10586",
 "Mozilla/5.0 (Linux; Android 6.0.1; Nexus 6P Build/MMB29P) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.83 Mobile Safari/537.36",
 "Mozilla/5.0 (Linux; Android 6.0.1; E6653 Build/32.2.A.0.253) AppleWebKit/537.36 (KHTML, like | |
| 
	
if __name__ == "__main__":
 #%%
 import sys
 import time
 from sklearn.model_selection import StratifiedKFold, train_test_split
 from tqdm import trange
 sys.path.append('..')
 import os
 import torch
 import pandas as pd
 import numpy as np
 from torch.nn import CrossEntropyLoss, BCEWithLogitsLoss
 from lens.models.relu_nn import XReluNN
 from lens.models.psi_nn import PsiNetwork
 from lens.models.tree import XDecisionTreeClassifier
 from lens.models.brl import XBRLClassifier
 from lens.models.deep_red import XDeepRedClassifier
 from lens.utils.base import set_seed, ClassifierNotTrainedError, IncompatibleClassifierError
 from lens.utils.metrics import Accuracy, F1Score
 from lens.models.general_nn import XGeneralNN
 from lens.utils.datasets import StructuredDataset
 from lens.logic.base import test_explanation
 from lens.logic.metrics import complexity, fidelity, formula_consistency
 from data import VDEM
 from data.load_structured_datasets import load_vDem
 # n_sample = 100
 results_dir = f'results/vDem'
 if not os.path.isdir(results_dir):
 os.makedirs(results_dir)
 #%% md
 ## Loading VDEM data
 #%%
 dataset_root = "../data/"
 dataset_name = VDEM
 print(dataset_root)
 print(results_dir)
 x, c, y, feature_names, concept_names, class_names = load_vDem(dataset_root)
 y = y.argmax(dim=1)
 n_features = x.shape[1]
 n_concepts = c.shape[1]
 n_classes = len(class_names)
 dataset_low = StructuredDataset(x, c, dataset_name=dataset_name, feature_names=feature_names, class_names=concept_names)
 print("Number of features", n_features)
 print("Number of concepts", n_concepts)
 print("Feature names", feature_names)
 print("Concept names", concept_names)
 print("Class names", class_names)
 #%% md
 ## Define loss, metrics and methods
 #%%
 loss_low = BCEWithLogitsLoss()
 loss_high = CrossEntropyLoss()
 metric = Accuracy()
 expl_metric = F1Score()
 method_list = ['DTree', 'BRL', 'Psi', 'Relu', 'General'] # 'DeepRed']
 print("Methods", method_list)
 #%% md
 ## Training
 #%%
 epochs = 1000
 n_processes = 4
 timeout = 60 * 60 # 1 h timeout
 l_r = 1e-3
 lr_scheduler = False
 top_k_explanations = None
 simplify = True
 seeds = [*range(5)]
 print("Seeds", seeds)
 device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
 print("Device", device)
 for method in method_list:
 methods = []
 splits = []
 model_explanations = []
 model_accuracies = []
 explanation_accuracies = []
 elapsed_times = []
 explanation_fidelities = []
 explanation_complexities = []
 skf = StratifiedKFold(n_splits=len(seeds), shuffle=True, random_state=0)
 for seed, (trainval_index, test_index) in enumerate(skf.split(x.numpy(), y.numpy())):
 set_seed(seed)
 x_trainval, c_trainval, y_trainval = x[trainval_index], c[trainval_index], y[trainval_index]
 x_test, c_test, y_test = x[test_index], c[test_index], y[test_index]
 x_train, x_val, c_train, c_val, y_train, y_val = train_test_split(x_trainval, c_trainval, y_trainval,
 test_size=0.3, random_state=0)
 train_data_low = StructuredDataset(x_train, c_train, dataset_name, feature_names, concept_names)
 val_data_low = StructuredDataset(x_val, c_val, dataset_name, feature_names, concept_names)
 test_data_low = StructuredDataset(x_test, c_test, dataset_name, feature_names, concept_names)
 data_low = StructuredDataset(x, c, dataset_name, feature_names, concept_names)
 name_low = os.path.join(results_dir, f"{method}_{seed}_low")
 name_high = os.path.join(results_dir, f"{method}_{seed}_high")
 # Setting device
 print(f"Training {name_low} classifier...")
 start_time = time.time()
 if method == 'DTree':
 model_low = XDecisionTreeClassifier(name=name_low, n_classes=n_concepts,
 n_features=n_features, max_depth=5)
 try:
 model_low.load(device)
 print(f"Model {name_low} already trained")
 except (ClassifierNotTrainedError, IncompatibleClassifierError):
 model_low.fit(train_data_low, val_data_low, metric=metric, save=True)
 c_predicted_train, _ = model_low.predict(train_data_low, device=device)
 c_predicted_val, _ = model_low.predict(val_data_low, device=device)
 c_predicted_test, _ = model_low.predict(test_data_low, device=device)
 accuracy_low = model_low.evaluate(test_data_low, metric=metric)
 train_data_high = StructuredDataset(c_predicted_train, y_train, dataset_name, feature_names, concept_names)
 val_data_high = StructuredDataset(c_predicted_val, y_val, dataset_name, feature_names, concept_names)
 test_data_high = StructuredDataset(c_predicted_test, y_test, dataset_name, feature_names, concept_names)
 model_high = XDecisionTreeClassifier(name=name_high, n_classes=n_classes, n_features=n_concepts, max_depth=5)
 try:
 model_high.load(device)
 print(f"Model {name_high} already trained")
 except (ClassifierNotTrainedError, IncompatibleClassifierError):
 model_high.fit(train_data_high, val_data_high, metric=metric, save=True)
 outputs, labels = model_high.predict(test_data_high, device=device)
 accuracy = model_high.evaluate(test_data_high, metric=metric, outputs=outputs, labels=labels)
 explanations, exp_accuracies, exp_fidelities, exp_complexities = [], [], [], []
 for i in trange(n_classes):
 explanation = model_high.get_global_explanation(i, concept_names)
 class_output = torch.as_tensor((outputs > 0.5) == i)
 class_label = torch.as_tensor(labels == i)
 exp_fidelity = 100
 exp_accuracy = expl_metric(class_output, class_label)
 explanation_complexity = complexity(explanation)
 explanations.append(explanation), exp_accuracies.append(exp_accuracy)
 exp_fidelities.append(exp_fidelity), exp_complexities.append(explanation_complexity)
 elif method == 'BRL':
 train_sample_rate = 1.0
 model_low = XBRLClassifier(name=name_low, n_classes=n_concepts, n_features=n_features,
 n_processes=n_processes, feature_names=feature_names, class_names=concept_names)
 try:
 model_low.load(device)
 print(f"Model {name_low} already trained")
 except (ClassifierNotTrainedError, IncompatibleClassifierError):
 model_low.fit(train_data_low, train_sample_rate=train_sample_rate,
 verbose=True, eval=False)
 c_predicted, _ = model_low.predict(data_low, device=device)
 c_predicted_train, c_predicted_test = c_predicted[trainval_index], c_predicted[test_index]
 accuracy_low = model_low.evaluate(test_data_low, metric=metric, outputs=c_predicted_test, labels=c_test)
 train_data_high = StructuredDataset(c_predicted_train, y_trainval, dataset_name, feature_names, concept_names)
 test_data_high = StructuredDataset(c_predicted_test, y_test, dataset_name, feature_names, concept_names)
 model_high = XBRLClassifier(name=name_high, n_classes=n_classes, n_features=n_concepts,
 n_processes=n_processes, feature_names=concept_names, class_names=class_names)
 try:
 model_high.load(device)
 print(f"Model {name_high} already trained")
 except (ClassifierNotTrainedError, IncompatibleClassifierError):
 model_high.fit(train_data_high, train_sample_rate=train_sample_rate, verbose=True,
 eval=False)
 outputs, labels = model_high.predict(test_data_high, device=device)
 accuracy = model_high.evaluate(test_data_high, metric=metric, outputs=outputs, labels=labels)
 explanations, exp_accuracies, exp_fidelities, exp_complexities = [], [], [], []
 for i in trange(n_classes):
 explanation = model_high.get_global_explanation(i, concept_names)
 exp_accuracy, exp_predictions = test_explanation(explanation, i, c_predicted_test, y_test, metric=expl_metric,
 concept_names=concept_names)
 exp_fidelity = 100
 explanation_complexity = complexity(explanation, to_dnf=True)
 explanations.append(explanation), exp_accuracies.append(exp_accuracy)
 exp_fidelities.append(exp_fidelity), exp_complexities.append(explanation_complexity)
 elif method == 'DeepRed':
 train_sample_rate = 0.1
 model_low = XDeepRedClassifier(name=name_low, n_classes=n_concepts, n_features=n_features)
 model_low.prepare_data(dataset_low, dataset_name + "low", seed, trainval_index, test_index, train_sample_rate)
 try:
 model_low.load(device)
 print(f"Model {name_low} already trained")
 except (ClassifierNotTrainedError, IncompatibleClassifierError):
 model_low.fit(epochs, train_sample_rate=train_sample_rate, verbose=True, eval=False)
 c_predicted_train, _ = model_low.predict(train=True, device=device)
 c_predicted_test, _ = model_low.predict(train=False, device=device)
 accuracy_low = model_low.evaluate(train=False, outputs=c_predicted_test, labels=c_test, metric=metric)
 model_low.finish()
 c_predicted = torch.vstack((c_predicted_train, c_predicted_test))
 y = torch.vstack((y_train, y_test))
 dataset_high = StructuredDataset(c_predicted, y, dataset_name, feature_names, concept_names)
 model_high = XDeepRedClassifier(n_classes, n_features, name=name_high)
 model_high.prepare_data(dataset_high, dataset_name + "high", seed, trainval_index, test_index, train_sample_rate)
 try:
 model_high.load(device)
 print(f"Model {name_high} already trained")
 except (ClassifierNotTrainedError, IncompatibleClassifierError):
 model_low.fit(epochs, train_sample_rate=train_sample_rate, verbose=True, eval=False)
 outputs, labels = model_high.predict(train=False, device=device)
 accuracy = model_high.evaluate(train=False, metric=metric, outputs=outputs, labels=labels)
 explanations, exp_accuracies, exp_fidelities, exp_complexities = [], [], [], []
 print("Extracting rules...")
 t = time.time()
 for i in trange(n_classes):
 explanation = model_high.get_global_explanation(i, concept_names, simplify=simplify)
 exp_accuracy, exp_predictions = test_explanation(explanation, i, c_predicted_test, y_test,
 metric=expl_metric,
 concept_names=concept_names, inequalities=True)
 exp_predictions = torch.as_tensor(exp_predictions)
 class_output = torch.as_tensor(outputs.argmax(dim=1) == i)
 exp_fidelity = fidelity(exp_predictions, class_output, expl_metric)
 explanation_complexity = complexity(explanation)
 explanations.append(explanation), exp_accuracies.append(exp_accuracy)
 exp_fidelities.append(exp_fidelity), exp_complexities.append(explanation_complexity)
 print(f"{i + 1}/{n_classes} Rules extracted. Time {time.time() - t}")
 elif method == 'Psi':
 # Network structures
 l1_weight = 1e-4
 hidden_neurons = [10, 5]
 fan_in = 3
 lr_psi = 1e-2
 print("L1 weight", l1_weight)
 print("Hidden neurons", hidden_neurons)
 print("Fan in", fan_in)
 print("Learning rate", lr_psi)
 name_low = os.path.join(results_dir, f"{method}_{seed}_{l1_weight}_{hidden_neurons}_{fan_in}_{lr_psi}_low")
 name_high = os.path.join(results_dir, f"{method}_{seed}_{l1_weight}_{hidden_neurons}_{fan_in}_{lr_psi}_high")
 model_low = PsiNetwork(n_concepts, n_features, hidden_neurons, loss_low, l1_weight, name=name_low,
 fan_in=fan_in)
 try:
 model_low.load(device)
 print(f"Model {name_low} already trained")
 except (ClassifierNotTrainedError, IncompatibleClassifierError):
 model_low.fit(train_data_low, val_data_low, epochs=epochs, l_r=lr_psi,
 metric=metric, lr_scheduler=lr_scheduler, device=device, verbose=True)
 c_predicted_train = model_low.predict(train_data_low, device=device)[0].detach().cpu()
 c_predicted_val = model_low.predict(val_data_low, device=device)[0].detach().cpu()
 c_predicted_test = model_low.predict(test_data_low, device=device)[0].detach().cpu()
 accuracy_low = model_low.evaluate(test_data_low, outputs=c_predicted_test, labels=c_test, metric=metric)
 train_data_high = StructuredDataset(c_predicted_train, y_train, dataset_name, feature_names, concept_names)
 val_data_high = StructuredDataset(c_predicted_val, y_val, dataset_name, feature_names, concept_names)
 test_data_high = StructuredDataset(c_predicted_test, y_test, dataset_name, feature_names, concept_names)
 model_high = PsiNetwork(n_classes, n_concepts, hidden_neurons, loss_high, l1_weight,
 name=name_high, fan_in=fan_in)
 try:
 model_high.load(device)
 print(f"Model {name_high} already trained")
 except (ClassifierNotTrainedError, IncompatibleClassifierError):
 model_high.fit(train_data_high, val_data_high, epochs=epochs, l_r=lr_psi,
 metric=metric, lr_scheduler=lr_scheduler, device=device, verbose=True)
 outputs, labels = model_high.predict(test_data_high, device=device)
 accuracy = model_high.evaluate(test_data_high, metric=metric, outputs=outputs, labels=labels)
 explanations, exp_accuracies, exp_fidelities, exp_complexities = [], [], [], []
 for i in trange(n_classes):
 explanation = model_high.get_global_explanation(i, concept_names, simplify=simplify, x_train=c_predicted_train)
 exp_accuracy, exp_predictions = test_explanation(explanation, i, c_predicted_test, y_test,
 metric=expl_metric, concept_names=concept_names)
 exp_predictions = torch.as_tensor(exp_predictions)
 class_output = torch.as_tensor(outputs.argmax(dim=1) == i)
 exp_fidelity = fidelity(exp_predictions, class_output, expl_metric)
 explanation_complexity = complexity(explanation, to_dnf=True)
 explanations.append(explanation), exp_accuracies.append(exp_accuracy)
 exp_fidelities.append(exp_fidelity), exp_complexities.append(explanation_complexity)
 elif method == 'General':
 # Network structures
 l1_weight = 1e-3
 hidden_neurons = [100, 30, 10]
 fan_in = 5
 top_k_explanations = None
 name_low = os.path.join(results_dir, f"{method}_{seed}_{l1_weight}_{hidden_neurons}_{fan_in}_low")
 name_high = os.path.join(results_dir, f"{method}_{seed}_{l1_weight}_{hidden_neurons}_{fan_in}_high")
 model_low = XGeneralNN(n_concepts, n_features, hidden_neurons, fan_in=n_features,
 loss=loss_low, name=name_low, l1_weight=l1_weight)
 try:
 model_low.load(device)
 print(f"Model {name_low} already trained")
 except (ClassifierNotTrainedError, IncompatibleClassifierError):
 model_low.fit(train_data_low, val_data_low, epochs=epochs, l_r=l_r,
 metric=metric, lr_scheduler=lr_scheduler, device=device, verbose=True)
 c_predicted_train = model_low.predict(train_data_low, device=device)[0].detach().cpu()
 c_predicted_val = model_low.predict(val_data_low, device=device)[0].detach().cpu()
 c_predicted_test = model_low.predict(test_data_low, device=device)[0].detach().cpu()
 accuracy_low = model_low.evaluate(test_data_low, outputs=c_predicted_test, labels=c_test, metric=metric)
 train_data_high = StructuredDataset(c_predicted_train, y_train, dataset_name, feature_names, concept_names)
 val_data_high = StructuredDataset(c_predicted_val, y_val, dataset_name, feature_names, concept_names)
 test_data_high = StructuredDataset(c_predicted_test, y_test, dataset_name, feature_names, concept_names)
 model_high = XGeneralNN(n_classes, n_concepts, hidden_neurons, fan_in=fan_in,
 loss=loss_high, name=name_high, l1_weight=l1_weight)
 try:
 model_high.load(device)
 print(f"Model {name_high} already trained")
 except (ClassifierNotTrainedError, IncompatibleClassifierError):
 model_high.fit(train_data_high, val_data_high, epochs=epochs, l_r=l_r*1e-1,
 metric=metric, lr_scheduler=lr_scheduler, device=device, verbose=True)
 outputs, labels = model_high.predict(test_data_high, device=device)
 accuracy = model_high.evaluate(test_data_high, metric=metric, outputs=outputs, labels=labels)
 explanations, exp_accuracies, exp_fidelities, exp_complexities = [], [], [], []
 for i in trange(n_classes):
 explanation = model_high.get_global_explanation(c_predicted_train, y_train, i,
 top_k_explanations=top_k_explanations,
 concept_names=concept_names, simplify=simplify,
 metric=expl_metric, x_val=c_predicted_val,
 y_val=y_val)
 exp_accuracy, exp_predictions = test_explanation(explanation, i, c_predicted_test, y_test,
 metric=expl_metric, concept_names=concept_names)
 exp_predictions = torch.as_tensor(exp_predictions)
 class_output = torch.as_tensor(outputs.argmax(dim=1) == i)
 exp_fidelity = fidelity(exp_predictions, class_output, expl_metric)
 explanation_complexity = complexity(explanation, to_dnf=True)
 explanations.append(explanation), exp_accuracies.append(exp_accuracy)
 exp_fidelities.append(exp_fidelity), exp_complexities.append(explanation_complexity)
 elif method == 'Relu':
 # Network structures
 l1_weight = 1e-4
 hidden_neurons = [100, 50, 30, 10]
 dropout_rate = 0.01
 print("l1 weight", l1_weight)
 print("hidden neurons", hidden_neurons)
 model_low = XReluNN(n_classes=n_concepts, n_features=n_features, name=name_low, dropout_rate=dropout_rate,
 hidden_neurons=hidden_neurons, loss=loss_low, l1_weight=l1_weight*1e-2)
 try:
 model_low.load(device)
 print(f"Model {name_low} already trained")
 except (ClassifierNotTrainedError, IncompatibleClassifierError):
 model_low.fit(train_data_low, val_data_low, epochs=epochs, l_r=l_r,
 metric=metric, lr_scheduler=lr_scheduler, device=device, verbose=True)
 c_predicted_train = model_low.predict(train_data_low, device=device)[0].detach().cpu()
 c_predicted_val = model_low.predict(val_data_low, device=device)[0].detach().cpu()
 c_predicted_test = model_low.predict(test_data_low, device=device)[0].detach().cpu()
 accuracy_low = model_low.evaluate(test_data_low, outputs=c_predicted_test, labels=c_test, metric=metric)
 train_data_high = StructuredDataset(c_predicted_train, y_train, dataset_name, feature_names, concept_names)
 val_data_high = StructuredDataset(c_predicted_val, y_val, dataset_name, feature_names, concept_names)
 test_data_high = StructuredDataset(c_predicted_test, y_test, dataset_name, feature_names, concept_names)
 model_high = XReluNN(n_classes=n_classes, n_features=n_concepts, name=name_high, dropout_rate=dropout_rate,
 hidden_neurons=hidden_neurons, loss=loss_high, l1_weight=l1_weight)
 try:
 model_high.load(device)
 print(f"Model {name_high} already trained")
 except (ClassifierNotTrainedError, IncompatibleClassifierError):
 model_high.fit(train_data_high, val_data_high, epochs=epochs, l_r=l_r * 1e-1,
 metric=metric, lr_scheduler=lr_scheduler, device=device, verbose=True)
 outputs, labels = model_high.predict(test_data_high, device=device)
 accuracy | |
| 
	<filename>src/pretix/presale/checkoutflow.py
#
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 <NAME> and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
# This file is based on an earlier version of pretix which was released under the Apache License 2.0. The full text of
# the Apache License 2.0 can be obtained at <http://www.apache.org/licenses/LICENSE-2.0>.
#
# This file may have since been changed and any changes are released under the terms of AGPLv3 as described above. A
# full history of changes and contributors is available at <https://github.com/pretix/pretix>.
#
# This file contains Apache-licensed contributions copyrighted by: <NAME>
#
# Unless required by applicable law or agreed to in writing, software distributed under the Apache License 2.0 is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under the License.
import inspect
from collections import defaultdict
from decimal import Decimal
from django.conf import settings
from django.contrib import messages
from django.core.exceptions import ValidationError
from django.core.validators import EmailValidator
from django.http import HttpResponseNotAllowed, JsonResponse
from django.shortcuts import redirect
from django.utils import translation
from django.utils.functional import cached_property
from django.utils.translation import (
 get_language, gettext_lazy as _, pgettext_lazy,
)
from django.views.generic.base import TemplateResponseMixin
from django_scopes import scopes_disabled
from pretix.base.models import Order
from pretix.base.models.orders import InvoiceAddress, OrderPayment
from pretix.base.models.tax import TaxedPrice, TaxRule
from pretix.base.services.cart import (
 CartError, error_messages, get_fees, set_cart_addons, update_tax_rates,
)
from pretix.base.services.orders import perform_order
from pretix.base.signals import validate_cart_addons
from pretix.base.templatetags.rich_text import rich_text_snippet
from pretix.base.views.tasks import AsyncAction
from pretix.multidomain.urlreverse import eventreverse
from pretix.presale.forms.checkout import (
 ContactForm, InvoiceAddressForm, InvoiceNameForm,
)
from pretix.presale.signals import (
 checkout_all_optional, checkout_confirm_messages, checkout_flow_steps,
 contact_form_fields, contact_form_fields_overrides,
 order_meta_from_request, question_form_fields,
 question_form_fields_overrides,
)
from pretix.presale.views import (
 CartMixin, get_cart, get_cart_is_free, get_cart_total,
)
from pretix.presale.views.cart import (
 cart_session, create_empty_cart_id, get_or_create_cart_id,
)
from pretix.presale.views.event import get_grouped_items
from pretix.presale.views.questions import QuestionsViewMixin
class BaseCheckoutFlowStep:
 requires_valid_cart = True
 icon = 'pencil'
 def __init__(self, event):
 self.event = event
 self.request = None
 @property
 def identifier(self):
 raise NotImplementedError()
 @property
 def label(self):
 return pgettext_lazy('checkoutflow', 'Step')
 @property
 def priority(self):
 return 100
 def is_applicable(self, request):
 return True
 def is_completed(self, request, warn=False):
 raise NotImplementedError()
 def get_next_applicable(self, request):
 if hasattr(self, '_next') and self._next:
 if not self._next.is_applicable(request):
 return self._next.get_next_applicable(request)
 return self._next
 def get_prev_applicable(self, request):
 if hasattr(self, '_previous') and self._previous:
 if not self._previous.is_applicable(request):
 return self._previous.get_prev_applicable(request)
 return self._previous
 def get(self, request):
 return HttpResponseNotAllowed([])
 def post(self, request):
 return HttpResponseNotAllowed([])
 def get_step_url(self, request):
 kwargs = {'step': self.identifier}
 if request.resolver_match and 'cart_namespace' in request.resolver_match.kwargs:
 kwargs['cart_namespace'] = request.resolver_match.kwargs['cart_namespace']
 return eventreverse(self.event, 'presale:event.checkout', kwargs=kwargs)
 def get_prev_url(self, request):
 prev = self.get_prev_applicable(request)
 if not prev:
 kwargs = {}
 if request.resolver_match and 'cart_namespace' in request.resolver_match.kwargs:
 kwargs['cart_namespace'] = request.resolver_match.kwargs['cart_namespace']
 return eventreverse(self.request.event, 'presale:event.index', kwargs=kwargs)
 else:
 return prev.get_step_url(request)
 def get_next_url(self, request):
 n = self.get_next_applicable(request)
 if n:
 return n.get_step_url(request)
 @cached_property
 def cart_session(self):
 return cart_session(self.request)
 @cached_property
 def invoice_address(self):
 if not hasattr(self.request, '_checkout_flow_invoice_address'):
 iapk = self.cart_session.get('invoice_address')
 if not iapk:
 self.request._checkout_flow_invoice_address = InvoiceAddress()
 else:
 try:
 with scopes_disabled():
 self.request._checkout_flow_invoice_address = InvoiceAddress.objects.get(
 pk=iapk, order__isnull=True
 )
 except InvoiceAddress.DoesNotExist:
 self.request._checkout_flow_invoice_address = InvoiceAddress()
 return self.request._checkout_flow_invoice_address
def get_checkout_flow(event):
 flow = list([step(event) for step in DEFAULT_FLOW])
 for receiver, response in checkout_flow_steps.send(event):
 step = response(event=event)
 if step.priority > 1000:
 raise ValueError('Plugins are not allowed to define a priority greater than 1000')
 flow.append(step)
 # Sort by priority
 flow.sort(key=lambda p: p.priority)
 # Create a double-linked-list for easy forwards/backwards traversal
 last = None
 for step in flow:
 step._previous = last
 if last:
 last._next = step
 last = step
 return flow
class TemplateFlowStep(TemplateResponseMixin, BaseCheckoutFlowStep):
 template_name = ""
 def get_context_data(self, **kwargs):
 kwargs.setdefault('step', self)
 kwargs.setdefault('event', self.event)
 kwargs.setdefault('has_prev', self.get_prev_applicable(self.request) is not None)
 kwargs.setdefault('prev_url', self.get_prev_url(self.request))
 kwargs.setdefault('checkout_flow', [
 step
 for step in self.request._checkout_flow
 if step.is_applicable(self.request)
 ])
 return kwargs
 def render(self, **kwargs):
 context = self.get_context_data(**kwargs)
 return self.render_to_response(context)
 def get(self, request):
 self.request = request
 return self.render()
 def post(self, request):
 self.request = request
 return self.render()
 def is_completed(self, request, warn=False):
 raise NotImplementedError()
 @property
 def identifier(self):
 raise NotImplementedError()
class AddOnsStep(CartMixin, AsyncAction, TemplateFlowStep):
 priority = 40
 identifier = "addons"
 template_name = "pretixpresale/event/checkout_addons.html"
 task = set_cart_addons
 known_errortypes = ['CartError']
 requires_valid_cart = False
 label = pgettext_lazy('checkoutflow', 'Add-on products')
 icon = 'puzzle-piece'
 def is_applicable(self, request):
 if not hasattr(request, '_checkoutflow_addons_applicable'):
 request._checkoutflow_addons_applicable = get_cart(request).filter(item__addons__isnull=False).exists()
 return request._checkoutflow_addons_applicable
 def is_completed(self, request, warn=False):
 if getattr(self, '_completed', None) is not None:
 return self._completed
 for cartpos in get_cart(request).filter(addon_to__isnull=True).prefetch_related(
 'item__addons', 'item__addons__addon_category', 'addons', 'addons__item'
 ):
 a = cartpos.addons.all()
 for iao in cartpos.item.addons.all():
 found = len([1 for p in a if p.item.category_id == iao.addon_category_id and not p.is_bundled])
 if found < iao.min_count or found > iao.max_count:
 self._completed = False
 return False
 self._completed = True
 return True
 @cached_property
 def forms(self):
 """
 A list of forms with one form for each cart position that can have add-ons.
 All forms have a custom prefix, so that they can all be submitted at once.
 """
 formset = []
 quota_cache = {}
 item_cache = {}
 for cartpos in get_cart(self.request).filter(addon_to__isnull=True).prefetch_related(
 'item__addons', 'item__addons__addon_category', 'addons', 'addons__variation',
 ).order_by('pk'):
 formsetentry = {
 'cartpos': cartpos,
 'item': cartpos.item,
 'variation': cartpos.variation,
 'categories': []
 }
 formset.append(formsetentry)
 current_addon_products = defaultdict(list)
 for a in cartpos.addons.all():
 if not a.is_bundled:
 current_addon_products[a.item_id, a.variation_id].append(a)
 for iao in cartpos.item.addons.all():
 ckey = '{}-{}'.format(cartpos.subevent.pk if cartpos.subevent else 0, iao.addon_category.pk)
 if ckey not in item_cache:
 # Get all items to possibly show
 items, _btn = get_grouped_items(
 self.request.event,
 subevent=cartpos.subevent,
 voucher=None,
 channel=self.request.sales_channel.identifier,
 base_qs=iao.addon_category.items,
 allow_addons=True,
 quota_cache=quota_cache
 )
 item_cache[ckey] = items
 else:
 items = item_cache[ckey]
 for i in items:
 i.allow_waitinglist = False
 if i.has_variations:
 for v in i.available_variations:
 v.initial = len(current_addon_products[i.pk, v.pk])
 if v.initial and i.free_price:
 a = current_addon_products[i.pk, v.pk][0]
 v.initial_price = TaxedPrice(
 net=a.price - a.tax_value,
 gross=a.price,
 tax=a.tax_value,
 name=a.item.tax_rule.name if a.item.tax_rule else "",
 rate=a.tax_rate,
 )
 else:
 v.initial_price = v.display_price
 i.expand = any(v.initial for v in i.available_variations)
 else:
 i.initial = len(current_addon_products[i.pk, None])
 if i.initial and i.free_price:
 a = current_addon_products[i.pk, None][0]
 i.initial_price = TaxedPrice(
 net=a.price - a.tax_value,
 gross=a.price,
 tax=a.tax_value,
 name=a.item.tax_rule.name if a.item.tax_rule else "",
 rate=a.tax_rate,
 )
 else:
 i.initial_price = i.display_price
 if items:
 formsetentry['categories'].append({
 'category': iao.addon_category,
 'price_included': iao.price_included,
 'multi_allowed': iao.multi_allowed,
 'min_count': iao.min_count,
 'max_count': iao.max_count,
 'iao': iao,
 'items': items
 })
 return formset
 def get_context_data(self, **kwargs):
 ctx = super().get_context_data(**kwargs)
 ctx['forms'] = self.forms
 ctx['cart'] = self.get_cart()
 return ctx
 def get_success_message(self, value):
 return None
 def get_success_url(self, value):
 return self.get_next_url(self.request)
 def get_error_url(self):
 return self.get_step_url(self.request)
 def get(self, request, **kwargs):
 self.request = request
 if 'async_id' in request.GET and settings.HAS_CELERY:
 return self.get_result(request)
 return TemplateFlowStep.get(self, request)
 def _clean_category(self, form, category):
 selected = {}
 for i in category['items']:
 if i.has_variations:
 for v in i.available_variations:
 val = int(self.request.POST.get(f'cp_{form["cartpos"].pk}_variation_{i.pk}_{v.pk}') or '0')
 price = self.request.POST.get(f'cp_{form["cartpos"].pk}_variation_{i.pk}_{v.pk}_price') or '0'
 if val:
 selected[i, v] = val, price
 else:
 val = int(self.request.POST.get(f'cp_{form["cartpos"].pk}_item_{i.pk}') or '0')
 price = self.request.POST.get(f'cp_{form["cartpos"].pk}_item_{i.pk}_price') or '0'
 if val:
 selected[i, None] = val, price
 if sum(a[0] for a in selected.values()) > category['max_count']:
 # TODO: Proper pluralization
 raise ValidationError(
 _(error_messages['addon_max_count']),
 'addon_max_count',
 {
 'base': str(form['item'].name),
 'max': category['max_count'],
 'cat': str(category['category'].name),
 }
 )
 elif sum(a[0] for a in selected.values()) < category['min_count']:
 # TODO: Proper pluralization
 raise ValidationError(
 _(error_messages['addon_min_count']),
 'addon_min_count',
 {
 'base': str(form['item'].name),
 'min': category['min_count'],
 'cat': str(category['category'].name),
 }
 )
 elif any(sum(v[0] for k, v in selected.items() if k[0] == i) > 1 for i in category['items']) and not category['multi_allowed']:
 raise ValidationError(
 _(error_messages['addon_no_multi']),
 'addon_no_multi',
 {
 'base': str(form['item'].name),
 'cat': str(category['category'].name),
 }
 )
 try:
 validate_cart_addons.send(
 sender=self.event,
 addons={k: v[0] for k, v in selected.items()},
 base_position=form["cartpos"],
 iao=category['iao']
 )
 except CartError as e:
 raise ValidationError(str(e))
 return selected
 def post(self, request, *args, **kwargs):
 self.request = request
 data = []
 for f in self.forms:
 for c in f['categories']:
 try:
 selected = self._clean_category(f, c)
 except ValidationError as e:
 messages.error(request, e.message % e.params | |
| 
	<filename>src/celpy/celtypes.py
# SPDX-Copyright: Copyright (c) Capital One Services, LLC
# SPDX-License-Identifier: Apache-2.0
# Copyright 2020 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
"""
CEL Types: wrappers on Python types to provide CEL semantics.
This can be used by a Python module to work with CEL-friendly values and CEL results.
Examples of distinctions between CEL and Python:
- Unlike Python ``bool``, CEL :py:class:`BoolType` won't do some math.
- CEL has ``int64`` and ``uint64`` subclasses of integer. These have specific ranges and
 raise :exc:`ValueError` errors on overflow.
CEL types will raise :exc:`ValueError` for out-of-range values and :exc:`TypeError`
for operations they refuse.
The :py:mod:`evaluation` module can capture these exceptions and turn them into result values.
This can permit the logic operators to quietly silence them via "short-circuiting".
In the normal course of events, CEL's evaluator may attempt operations between a
CEL exception result and an instance of one of CEL types.
We rely on this leading to an ordinary Python :exc:`TypeError` to be raised to propogate
the error. Or. A logic operator may discard the error object.
The :py:mod:`evaluation` module extends these types with it's own :exc:`CELEvalError` exception.
We try to keep that as a separate concern from the core operator implementations here.
We leverage Python features, which means raising exceptions when there is a problem.
Types
=============
See https://github.com/google/cel-go/tree/master/common/types
These are the Go type definitions that are used by CEL:
- BoolType
- BytesType
- DoubleType
- DurationType
- IntType
- ListType
- MapType
- NullType
- StringType
- TimestampType
- TypeType
- UintType
The above types are handled directly byt CEL syntax.
e.g., ``42`` vs. ``42u`` vs. ``"42"`` vs. ``b"42"`` vs. ``42.``.
We provide matching Python class names for each of these types. The Python type names
are subclasses of Python native types, allowing a client to transparently work with
CEL results. A Python host should be able to provide values to CEL that will be tolerated.
A type hint of ``Value`` unifies these into a common hint.
The CEL Go implementation also supports protobuf types:
- dpb.Duration
- tpb.Timestamp
- structpb.ListValue
- structpb.NullValue
- structpb.Struct
- structpb.Value
- wrapperspb.BoolValue
- wrapperspb.BytesValue
- wrapperspb.DoubleValue
- wrapperspb.FloatValue
- wrapperspb.Int32Value
- wrapperspb.Int64Value
- wrapperspb.StringValue
- wrapperspb.UInt32Value
- wrapperspb.UInt64Value
These types involve expressions like the following::
 google.protobuf.UInt32Value{value: 123u}
In this case, the well-known protobuf name is directly visible as CEL syntax.
There's a ``google`` package with the needed definitions.
Type Provider
==============================
A type provider can be bound to the environment, this will support additional types.
This appears to be a factory to map names of types to type classes.
Run-time type binding is shown by a CEL expression like the following::
 TestAllTypes{single_uint32_wrapper: 432u}
The ``TestAllTypes`` is a protobuf type added to the CEL run-time. The syntax
is defined by this syntax rule::
 member_object : member "{" [fieldinits] "}"
The ``member`` is part of a type provider library,
either a standard protobuf definition or an extension. The field inits build
values for the protobuf object.
See https://github.com/google/cel-go/blob/master/test/proto3pb/test_all_types.proto
for the ``TestAllTypes`` protobuf definition that is registered as a type provider.
This expression will describes a Protobuf ``uint32`` object.
Type Adapter
=============
So far, it appears that a type adapter wraps existing Go or C++ types
with CEL-required methods. This seems like it does not need to be implemented
in Python.
Numeric Details
===============
Integer division truncates toward zero.
The Go definition of modulus::
 // Mod returns the floating-point remainder of x/y.
 // The magnitude of the result is less than y and its
 // sign agrees with that of x.
https://golang.org/ref/spec#Arithmetic_operators
"Go has the nice property that -a/b == -(a/b)."
::
 x y x / y x % y
 5 3 1 2
 -5 3 -1 -2
 5 -3 -1 2
 -5 -3 1 -2
Python definition::
 The modulo operator always yields a result
 with the same sign as its second operand (or zero);
 the absolute value of the result is strictly smaller than
 the absolute value of the second operand.
Here's the essential rule::
 x//y * y + x%y == x
However. Python ``//`` truncates toward negative infinity. Go ``/`` truncates toward zero.
To get Go-like behavior, we need to use absolute values and restore the signs later.
::
 x_sign = -1 if x < 0 else +1
 go_mod = x_sign * (abs(x) % abs(y))
 return go_mod
Timzone Details
===============
An implementation may have additional timezone names that must be injected into
th dateutil.gettz() processing.
For example, there may be the following sequence:
1. A lowercase match for an alias or an existing dateutil timezone.
2. A titlecase match for an existing dateutil timezone.
3. The fallback, which is a +/-HH:MM string.
.. TODO: Permit an extension into the timezone lookup.
"""
import datetime
import logging
import re
from functools import reduce, wraps
from math import fsum
from typing import (Any, Callable, Dict, Iterable, List, Mapping, NoReturn,
 Optional, Sequence, Tuple, Type, TypeVar, Union, cast,
 overload)
import dateutil.parser
import dateutil.tz
logger = logging.getLogger("celtypes")
Value = Union[
 'BoolType',
 'BytesType',
 'DoubleType',
 'DurationType',
 'IntType',
 'ListType',
 'MapType',
 None, # Used instead of NullType
 'StringType',
 'TimestampType',
 'UintType',
]
# The domain of types used to build Annotations.
CELType = Union[
 Type['BoolType'],
 Type['BytesType'],
 Type['DoubleType'],
 Type['DurationType'],
 Type['IntType'],
 Type['ListType'],
 Type['MapType'],
 Callable[..., None], # Used instead of NullType
 Type['StringType'],
 Type['TimestampType'],
 Type['TypeType'], # Used to mark Protobuf Type values
 Type['UintType'],
 Type['PackageType'],
 Type['MessageType'],
]
def type_matched(method: Callable[[Any, Any], Any]) -> Callable[[Any, Any], Any]:
 """Decorates a method to assure the "other" value has the same type."""
 @wraps(method)
 def type_matching_method(self: Any, other: Any) -> Any:
 if not(issubclass(type(other), type(self)) or issubclass(type(self), type(other))):
 raise TypeError(f"no such overload: {self!r} {type(self)} != {other!r} {type(other)}")
 return method(self, other)
 return type_matching_method
def logical_condition(e: Value, x: Value, y: Value) -> Value:
 """
 CEL e ? x : y operator.
 Choose one of x or y. Exceptions in the unchosen expression are ignored.
 Example::
 2 / 0 > 4 ? 'baz' : 'quux'
 is a "division by zero" error.
 ::
 >>> logical_condition(
 ... BoolType(True), StringType("this"), StringType("Not That"))
 StringType('this')
 >>> logical_condition(
 ... BoolType(False), StringType("Not This"), StringType("that"))
 StringType('that')
 """
 if not isinstance(e, BoolType):
 raise TypeError(f"Unexpected {type(e)} ? {type(x)} : {type(y)}")
 result = x if e else y
 logger.debug(f"logical_condition({e!r}, {x!r}, {y!r}) = {result!r}")
 return result
def logical_and(x: Value, y: Value) -> Value:
 """
 Native Python has a left-to-right rule.
 CEL && is commutative with non-Boolean values, including error objects.
 """
 if not isinstance(x, BoolType) and not isinstance(y, BoolType):
 raise TypeError(f"{type(x)} {x!r} and {type(y)} {y!r}")
 elif not isinstance(x, BoolType) and isinstance(y, BoolType):
 if y:
 return x # whatever && true == whatever
 else:
 return y # whatever && false == false
 elif isinstance(x, BoolType) and not isinstance(y, BoolType):
 if x:
 return y # true && whatever == whatever
 else:
 return x # false && whatever == false
 else:
 return BoolType(cast(BoolType, x) and cast(BoolType, y))
def logical_not(x: Value) -> Value:
 """
 Native python `not` isn't fully exposed for CEL types.
 """
 if isinstance(x, BoolType):
 result = BoolType(not x)
 else:
 raise TypeError(f"not {type(x)}")
 logger.debug(f"logical_not({x!r}) = {result!r}")
 return result
def logical_or(x: Value, y: Value) -> Value:
 """
 Native Python has a left-to-right rule: (True or y) is True, (False or y) is y.
 CEL || is commutative with non-Boolean values, including errors.
 ``(x || false)`` is ``x``, and ``(false || y)`` is ``y``.
 Example 1::
 false || 1/0 != 0
 is a "no matching overload" error.
 Example 2::
 (2 / 0 > 3 ? false : true) || true
 is a "True"
 If the operand(s) are not BoolType, we'll create an TypeError that will become a CELEvalError.
 """
 if not isinstance(x, BoolType) and not isinstance(y, BoolType):
 raise TypeError(f"{type(x)} {x!r} or {type(y)} {y!r}")
 elif not isinstance(x, BoolType) and isinstance(y, BoolType):
 if y:
 return y # whatever || true == true
 else:
 return x # whatever || false == whatever
 elif isinstance(x, BoolType) and not isinstance(y, BoolType):
 if x:
 return x # true || whatever == true
 else:
 return y # false || whatever == whatever
 else:
 return BoolType(cast(BoolType, x) or cast(BoolType, y))
class BoolType(int):
 """
 Native Python permits unary operators on Booleans.
 For CEL, We need to prevent -false from | |
| 
	import os
import glob
import scipy
import pickle
import numpy as np
from source.offline_ds_evaluation.metrics_manager import MetricsManager
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
# Turn interactive plotting off
plt.ioff()
import seaborn as sns
sns.set()
matplotlib.rc('xtick', labelsize=10)
matplotlib.rc('ytick', labelsize=10)
run = 4
folder = ["baselines", "offpolicy", "offline", "all", "presentation"][run]
image_type = "png"
figsize = (12, 6)
figsize_legend = (12, 1)
figsize_half = (12, 3.5)
figsize_half_half = (8, 4)
figsize_small = (16, 3)
figsize_comp = (12, 6)
figsize_envs = (12, 7.2)
figsize_theplot = (12, 12)
# metric manager
experiments = ["ex4", "ex5", "ex6"]
mm = MetricsManager(0)
for ex in experiments:
 paths = glob.glob(os.path.join("..", "..", "data", ex, "metrics*.pkl"))
 for path in paths:
 with open(path, "rb") as f:
 m = pickle.load(f)
 mm.data.update(m.data)
# static stuff
envs = {'CartPole-v1': 0, 'MountainCar-v0': 1, "MiniGrid-LavaGapS7-v0": 2, "MiniGrid-Dynamic-Obstacles-8x8-v0": 3,
 'Breakout-MinAtar-v0': 4, "Space_invaders-MinAtar-v0": 5}
algolist = [["BC", "BVE", "MCE"],
 ["DQN", "QRDQN", "REM"],
 ["BCQ", "CQL", "CRR"],
 ["BC", "BVE", "MCE", "DQN", "QRDQN", "REM", "BCQ", "CQL", "CRR"],
 ["BC", "BVE", "DQN", "BCQ"]]
algos = algolist[run]
buffer = {"random": "Random Policy", "mixed": "Mixed Policy", "er": "Experience Replay",
 "noisy": "Noisy Policy", "fully": "Final Policy"}
y_bounds = {'CartPole-v1': (-15, 15), "MiniGrid-LavaGapS7-v0":(-0.5, 1.3), 'MountainCar-v0': (-50, 100),
 "MiniGrid-Dynamic-Obstacles-8x8-v0":(-1, 1), 'Breakout-MinAtar-v0': (-5, 25), "Space_invaders-MinAtar-v0": (-5, 25)}
metrics = {(0,0):"Return (dataset)", (0,1):"Return (std)",
 1:"Unique States", 2:"Unique State-Action Pairs",
 (3,0):"Entropy", (3,1):"Entropy (std)",
 (4,0):"Sparsity", (4,1): "Sparsity (std)",
 (5,0):"Episode Length", (5,1):"Episode Length (std)",
 }
annotations = ["(R)", "(M)", "(E)", "(N)", "(F)"]
def plt_csv(ax, csv, algo, mode, ylims=None, set_title=True, color=None, set_label=True):
 est = np.mean(csv, axis=1)
 sd = np.std(csv, axis=1)
 cis = (est - sd, est + sd)
 ax.fill_between(np.arange(0, len(est) * 100, 100), cis[0], cis[1], alpha=0.2, color=color)
 ax.plot(np.arange(0, len(est) * 100, 100), est, label=(algo if set_label else None), color=color)
 ax.ticklabel_format(axis="x", style="sci", scilimits=(0, 0))
 if set_title:
 ax.set_title(buffer[mode])
 if ylims != None:
 ax.set_ylim(bottom=ylims[0], top=ylims[1])
####################################
# Usual Return plots #
####################################
mark = "return"
# titles
y_label = "Moving Average Return"
x_label = "Update Steps"
indir = os.path.join("..", "..", "results", "csv", mark)
outdir = os.path.join("..", "..", "results", folder, mark)
os.makedirs(outdir, exist_ok=True)
files = []
for file in glob.glob(os.path.join(indir, "*.csv")):
 files.append(file)
data = dict()
for file in files:
 name = file.split("/")[-1]
 env = "_".join(name.split("_")[:-2])
 mode = name.split("_")[-2]
 algo = name.split("_")[-1].split(".")[0]
 try:
 csv = np.loadtxt(file, delimiter=";")
 except:
 print("Error in ", env, mode, algo)
 if len(csv.shape) == 1:
 csv = csv.reshape(-1, 1)
 if not data.keys() or env not in data.keys():
 data[env] = dict()
 if not data[env].keys() or mode not in data[env].keys():
 data[env][mode] = dict()
 data[env][mode][algo] = csv
for e, env in enumerate(data.keys()):
 f, axs = plt.subplots(1, 5, figsize=figsize_small, sharex=True, sharey=True)
 #axs = [item for sublist in axs for item in sublist]
 for m, mode in enumerate(data[env].keys()):
 if mode == "online":
 continue
 ids = list(buffer.keys())
 ax = axs[ids.index(mode)]
 norm = mm.get_data(env, mode)[0][0]
 ax.axhline(y=norm, color="black", linestyle="dotted",
 linewidth=2, label=("Behav." if m==0 else None))
 
 csv = data[env]["online"]["DQN"]
 ax.axhline(y=csv.max(), color="black", linewidth=2)
 plt_csv(ax, csv, "Online", mode, color="black", set_label=m==0)
 for a, algo in enumerate(algos):
 csv = data[env][mode][algo]
 plt_csv(ax, csv, algo, mode, color=f"C{(a + run * 3 if run < 3 else a)}", set_label=m==0)
 for ax in axs[m:]:
 f.delaxes(ax)
 f.text(0.52, 0.92, "-".join(env.split("-")[:-1]), ha='center', fontsize="x-large")
 #f.legend(loc="upper center", ncol=len(algos) + 2, fontsize="small")
 f.tight_layout(rect=(0.008, 0.022, 1, 0.92))
 f.text(0.52, 0.02, x_label, ha='center', fontsize="large")
 f.text(0.005, 0.5, y_label, va='center', rotation='vertical', fontsize="large")
 plt.savefig(os.path.join(outdir, env + "." + "png"))
 if e == 0:
 for ax in axs:
 ax.set_visible(False)
 for text in f.texts:
 text.set_visible(False)
 f.set_size_inches(figsize_small[0] - 4, 0.4, forward=True)
 f.legend(loc="center", ncol=len(algos) + 2, fontsize="small")
 f.tight_layout()
 plt.savefig(os.path.join(outdir, "legend." + image_type))
 plt.close()
###############
# plot metrics for policies
###############
modes = list(buffer.keys())
outdir = os.path.join("..", "..", "results", folder, "metrics")
os.makedirs(outdir, exist_ok=True)
# titles
x_label = "Dataset"
# plot for discussion
f, axs = plt.subplots(2, 3, figsize=figsize, sharex=True)
axs = [item for sublist in zip(axs[:, 0], axs[:, 1], axs[:,2]) for item in sublist]
for m, metric in enumerate([(0, 0), 2, (3, 0), 1, (5, 0), (4, 0)]):
 for env in envs:
 x = []
 random_return = mm.get_data(env, "random")[0][0]
 for mode in modes:
 if m == 1 or m == 3:
 x.append(mm.get_data(env, mode)[metric])
 else:
 x.append(mm.get_data(env, mode)[metric[0]][metric[1]])
 if m == 0:
 csv = data[env]["online"]["DQN"]
 x = [ (x_ - random_return) / (np.max(csv) - random_return) for x_ in x]
 axs[m].axhline(y=1, color="silver")
 axs[m].plot(range(len(x)), x, "-o", label = "-".join(env.split("-")[:-1]) if m == 0 else None, zorder=20)
 if m == 1 or m == 3 or m == 4:
 axs[m].set_yscale('log')
 if m == 5:
 axs[m].set_ylim(0.74, 1.01)
 if m == 0:
 axs[m].set_ylabel("Normalized Return")
 else:
 axs[m].set_ylabel(metrics[metric])
 axs[m].set_xticks(range(len(modes)))
 axs[m].set_xticklabels([buffer[m] for m in modes], fontsize="x-small", rotation=15, rotation_mode="anchor")
f.legend(loc="upper center", ncol=len(env), fontsize="small")
f.tight_layout(rect=(0, 0.022, 1, 0.95))
f.text(0.52, 0.01, x_label, ha='center', fontsize="large")
plt.savefig(os.path.join(outdir, "overview_6." + image_type))
plt.close()
# plot for thesis
f, axs = plt.subplots(1, 3, figsize=figsize_half, sharex=True)
for m, metric in enumerate([(0, 0), 2, (3, 0)]):
 for env in envs:
 x = []
 random_return = mm.get_data(env, "random")[0][0]
 online_usap = mm.get_data(env, "er")[2]
 for mode in modes:
 if m == 1 or m == 3:
 x.append(mm.get_data(env, mode)[metric])
 else:
 x.append(mm.get_data(env, mode)[metric[0]][metric[1]])
 if m == 0:
 csv = data[env]["online"]["DQN"]
 x = [(x_ - random_return) / (np.max(csv) - random_return) for x_ in x]
 axs[m].axhline(y=1, color="silver")
 if m == 1:
 x = [x_ / online_usap for x_ in x]
 axs[m].axhline(y=1, color="silver")
 axs[m].plot(range(len(x)), x, "-o", label = "-".join(env.split("-")[:-1]) if m == 0 else None, zorder=20)
 if m == 0:
 axs[m].set_ylabel("Relative Trajectory Quality")
 elif m == 1:
 axs[m].set_ylabel("Relative State-Action Coverage")
 else:
 axs[m].set_ylabel(metrics[metric])
 axs[m].set_xticks(range(len(modes)))
 axs[m].set_xticklabels([buffer[m] for m in modes], fontsize="x-small", rotation=15, rotation_mode="anchor")
f.legend(loc="upper center", ncol=len(env), fontsize="small")
f.tight_layout(rect=(0, 0.022, 1, 0.92))
f.text(0.52, 0.01, x_label, ha='center', fontsize="large")
plt.savefig(os.path.join(outdir, "overview_3." + image_type))
plt.close()
# plot for presentation
f, axs = plt.subplots(1, 2, figsize=figsize_half_half, sharex=True)
for m, metric in enumerate([(0, 0), 2]):
 for env in envs:
 x = []
 random_return = mm.get_data(env, "random")[0][0]
 online_usap = mm.get_data(env, "er")[2]
 for mode in modes:
 if m == 1 or m == 3:
 x.append(mm.get_data(env, mode)[metric])
 else:
 x.append(mm.get_data(env, mode)[metric[0]][metric[1]])
 if m == 0:
 csv = data[env]["online"]["DQN"]
 x = [(x_ - random_return) / (np.max(csv) - random_return) for x_ in x]
 axs[m].axhline(y=1, color="silver")
 if m == 1:
 x = [x_ / online_usap for x_ in x]
 axs[m].axhline(y=1, color="silver")
 axs[m].plot(range(len(x)), x, "-o", label = "-".join(env.split("-")[:-1]) if m == 0 else None, zorder=20)
 if m == 0:
 axs[m].set_ylabel("Relative Trajectory Quality")
 elif m == 1:
 axs[m].set_ylabel("Relative State-Action Coverage")
 axs[m].set_xticks(range(len(modes)))
 axs[m].set_xticklabels([buffer[m] for m in modes], fontsize="x-small", rotation=15, rotation_mode="anchor")
f.legend(loc="upper center", ncol=(len(envs) // 2), fontsize="x-small")
f.tight_layout(rect=(0, 0.022, 1, 0.88))
f.text(0.52, 0.01, x_label, ha='center', fontsize="large")
plt.savefig(os.path.join(outdir, "overview_2." + image_type))
plt.close()
##################################
# Action-Value Deviations #
##################################
mark = "avd"
# titles
y_label = "Action-Value Deviation"
x_label = "Update Steps"
indir = os.path.join("..", "..", "results", "csv", mark)
outdir = os.path.join("..", "..", "results", folder, mark)
os.makedirs(outdir, exist_ok=True)
files = []
for file in glob.glob(os.path.join(indir, "*.csv")):
 files.append(file)
data_avd = dict()
for file in files:
 name = file.split("/")[-1]
 env = "_".join(name.split("_")[:-2])
 mode = name.split("_")[-2]
 algo = name.split("_")[-1].split(".")[0]
 try:
 csv = np.loadtxt(file, delimiter=";")
 except:
 print("Error in ", env, mode, algo)
 if len(csv.shape) == 1:
 csv = csv.reshape(-1, 1)
 if not data_avd.keys() or env not in data_avd.keys():
 data_avd[env] = dict()
 if not data_avd[env].keys() or mode not in data_avd[env].keys():
 data_avd[env][mode] = dict()
 data_avd[env][mode][algo] = csv
algos_ = algos.copy()
try:
 algos_.remove("BC")
except ValueError:
 pass
for e, env in enumerate(data_avd.keys()):
 f, axs = plt.subplots(1, 5, figsize=figsize_small, sharex=True, sharey=True)
 #axs = [item for sublist in axs for item in sublist]
 for m, mode in enumerate(data_avd[env].keys()):
 if mode == "online":
 continue
 ids = list(buffer.keys())
 ax = axs[ids.index(mode)]
 ax.axhline(y=0, color="black", linewidth=2, linestyle="dotted", label=("Optimal" if m==0 else None))
 csv = data_avd[env]["online"]["DQN"]
 bottom, top = list(), list()
 plt_csv(ax, csv, "Online", mode, color="black", set_label=m==0)
 for a, algo in enumerate(algos_):
 csv = data_avd[env][mode][algo]
 plt_csv(ax, csv, algo, mode, color=f"C{((a + 1 if len(algos_) < 3 else a) + run * 3 if run < 3 else a + 1)}", set_label=m==0)
 ax.set_ylim(bottom=y_bounds[env][0], top=y_bounds[env][1])
 for ax in axs[m:]:
 f.delaxes(ax)
 #axs[2].xaxis.set_tick_params(labelbottom=True)
 f.text(0.52, 0.92, "-".join(env.split("-")[:-1]), ha='center', fontsize="x-large")
 f.tight_layout(rect=(0.008, 0.022, 1, 0.92))
 f.text(0.52, 0.02, x_label, ha='center', fontsize="large")
 f.text(0.005, 0.5, y_label, va='center', rotation='vertical', fontsize="large")
 plt.savefig(os.path.join(outdir, env + ".png"))
 if e == 0:
 for ax in axs:
 ax.set_visible(False)
 for text in f.texts:
 text.set_visible(False)
 f.set_size_inches(figsize_small[0] - 4, 0.4, forward=True)
 f.legend(loc="center", ncol=len(algos_) + 2, fontsize="small")
 f.tight_layout()
 plt.savefig(os.path.join(outdir, "legend." + image_type))
 plt.close()
#############################
# Comparisons #
#############################
##################################
# load action-value deviation data
##################################
indir = os.path.join("..", "..", "results", "csv", "avd")
outdir = os.path.join("..", "..", "results", folder, "comp_avd")
os.makedirs(outdir, exist_ok=True)
files = []
for file in glob.glob(os.path.join(indir, "*.csv")):
 files.append(file)
data_avd = dict()
for file in files:
 name = file.split("/")[-1]
 env = "_".join(name.split("_")[:-2])
 mode = name.split("_")[-2]
 algo = name.split("_")[-1].split(".")[0]
 try:
 csv = np.loadtxt(file, delimiter=";")
 except:
 print("Error in ", env, mode, algo)
 if len(csv.shape) == 1:
 csv = csv.reshape(-1, 1)
 # first hundred invalid, as they are not the correct sma!
 csv = csv[100:]
 if not data_avd.keys() or env | |
| 
	""" Inline separated list of species and their weights of an observable
 Attributes:
 separator (:obj:`str`): list separator
 """
 def __init__(self, related_class, separator=' + ', related_name='', verbose_name='', verbose_related_name='', help=''):
 """
 Args:
 related_class (:obj:`class`): related class
 separator (:obj:`str`, optional): list separator
 related_name (:obj:`str`, optional): name of related attribute on `related_class`
 verbose_name (:obj:`str`, optional): verbose name
 verbose_related_name (:obj:`str`, optional): verbose related name
 help (:obj:`str`, optional): help message
 """
 super(ObservableSpeciesParticipantAttribute, self).__init__(related_class, related_name=related_name,
 verbose_name=verbose_name,
 verbose_related_name=verbose_related_name,
 help=help)
 self.separator = separator
 def serialize(self, spec_coeffs, encoded=None):
 """ Serialize related object
 Args:
 spec_coeffs (:obj:`list` of :obj:`Model`): Python representation of species and their coefficients
 encoded (:obj:`dict`, optional): dictionary of objects that have already been encoded
 Returns:
 :obj:`str`: simple Python representation
 """
 if not spec_coeffs:
 return ''
 spec_coeff_strs = []
 for spec_coeff_obj in spec_coeffs:
 spec_coeff_str = spec_coeff_obj.serialize(
 show_compartment=True, show_coefficient_sign=True)
 spec_coeff_strs.append(spec_coeff_str)
 return self.separator.join(spec_coeff_strs)
 def deserialize(self, value, objects, decoded=None):
 """ Deserialize value
 Args:
 value (:obj:`str`): String representation
 objects (:obj:`dict`): dictionary of objects, grouped by model
 decoded (:obj:`dict`, optional): dictionary of objects that have already been decoded
 Returns:
 :obj:`tuple` of `list` of `related_class`, `InvalidAttribute` or `None`: tuple of cleaned value
 and cleaning error
 """
 if not value:
 return ([], None)
 pat_id = r'([a-z][a-z0-9_]*)'
 pat_coeff = r'\(((\d*\.?\d+|\d+\.)(e[\-\+]?\d+)?)\)'
 pat_spec_coeff = r'({} )*({}\[{}\])'.format(pat_coeff, pat_id, pat_id)
 pat_observable = r'^{}( \+ {})*$'.format(pat_spec_coeff, pat_spec_coeff)
 if not re.match(pat_observable, value, flags=re.I):
 return (None, InvalidAttribute(self, ['Incorrectly formatted observable: {}'.format(value)]))
 spec_coeff_objs = []
 errors = []
 for spec_coeff_match in re.findall(pat_spec_coeff, value, flags=re.I):
 spec_type_errors = []
 
 spec_type_id = spec_coeff_match[5]
 
 spec_type = None
 for species_type_cls in get_subclasses(SpeciesType):
 if species_type_cls in objects and spec_type_id in objects[species_type_cls]:
 spec_type = objects[species_type_cls][spec_type_id]
 break
 if not spec_type:
 spec_type_errors.append(
 'Undefined species type "{}"'.format(spec_type_id)) 
 
 compartment_id = spec_coeff_match[6]
 if compartment_id in objects[Compartment]:
 compartment = objects[Compartment][compartment_id]
 else:
 spec_type_errors.append(
 'Undefined compartment "{}"'.format(compartment_id))
 coefficient = float(spec_coeff_match[1] or 1.)
 if spec_type_errors:
 errors += spec_type_errors
 elif coefficient != 0:
 spec_id = Species.gen_id(
 spec_type.get_primary_attribute(), compartment.get_primary_attribute())
 obj, error = Species.deserialize(self, spec_id, objects)
 if error:
 raise ValueError('Invalid object "{}"'.format(spec_primary_attribute)
 ) # pragma: no cover # unreachable due to error checking above
 if self.related_class not in objects:
 objects[self.related_class] = {}
 serialized_value = self.related_class._serialize(
 obj, coefficient)
 if serialized_value in objects[self.related_class]:
 spec_coeff_obj = objects[self.related_class][serialized_value]
 else:
 spec_coeff_obj = self.related_class(
 species=obj, coefficient=coefficient)
 objects[self.related_class][serialized_value] = spec_coeff_obj
 spec_coeff_objs.append(spec_coeff_obj)
 if errors:
 return (None, InvalidAttribute(self, errors))
 return (spec_coeff_objs, None)
class ObservableObservableParticipantAttribute(ManyToManyAttribute):
 """ Inline separated list of observables and their weights of an observable
 Attributes:
 separator (:obj:`str`): list separator
 """
 def __init__(self, related_class, separator=' + ', related_name='', verbose_name='', verbose_related_name='', help=''):
 """
 Args:
 related_class (:obj:`class`): related class
 separator (:obj:`str`, optional): list separator
 related_name (:obj:`str`, optional): name of related attribute on `related_class`
 verbose_name (:obj:`str`, optional): verbose name
 verbose_related_name (:obj:`str`, optional): verbose related name
 help (:obj:`str`, optional): help message
 """
 super(ObservableObservableParticipantAttribute, self).__init__(related_class, related_name=related_name,
 verbose_name=verbose_name,
 verbose_related_name=verbose_related_name,
 help=help)
 self.separator = separator
 def serialize(self, obs_coeffs, encoded=None):
 """ Serialize related object
 Args:
 obs_coeffs (:obj:`list` of :obj:`Model`): Python representation of observables and their coefficients
 encoded (:obj:`dict`, optional): dictionary of objects that have already been encoded
 Returns:
 :obj:`str`: simple Python representation
 """
 if not obs_coeffs:
 return ''
 obs_coeff_strs = []
 for obs_coeff_obj in obs_coeffs:
 obs_coeff_str = obs_coeff_obj.serialize()
 obs_coeff_strs.append(obs_coeff_str)
 return self.separator.join(obs_coeff_strs)
 def deserialize(self, value, objects, decoded=None):
 """ Deserialize value
 Args:
 value (:obj:`str`): String representation
 objects (:obj:`dict`): dictionary of objects, grouped by model
 decoded (:obj:`dict`, optional): dictionary of objects that have already been decoded
 Returns:
 :obj:`tuple` of `list` of `related_class`, `InvalidAttribute` or `None`: tuple of cleaned value
 and cleaning error
 """
 if not value:
 return ([], None)
 pat_id = r'([a-z][a-z0-9_]*)'
 pat_coeff = r'\(((\d*\.?\d+|\d+\.)(e[\-\+]?\d+)?)\)'
 pat_obs_coeff = r'({} )*({})'.format(pat_coeff, pat_id, pat_id)
 pat_observable = r'^{}( \+ {})*$'.format(pat_obs_coeff, pat_obs_coeff)
 if not re.match(pat_observable, value, flags=re.I):
 return (None, InvalidAttribute(self, ['Incorrectly formatted observable: {}'.format(value)]))
 obs_coeff_objs = []
 errors = []
 for obs_coeff_match in re.findall(pat_obs_coeff, value, flags=re.I):
 obs_errors = []
 obs_id = obs_coeff_match[5]
 if obs_id in objects[Observable]:
 obs = objects[Observable][obs_id]
 else:
 obs_errors.append('Undefined observable "{}"'.format(obs_id))
 coefficient = float(obs_coeff_match[1] or 1.)
 if obs_errors:
 errors += obs_errors
 elif coefficient != 0:
 if self.related_class not in objects:
 objects[self.related_class] = {}
 serialized_value = self.related_class._serialize(
 obs, coefficient)
 if serialized_value in objects[self.related_class]:
 obs_coeff_obj = objects[self.related_class][serialized_value]
 else:
 obs_coeff_obj = self.related_class(
 observable=obs, coefficient=coefficient)
 objects[self.related_class][serialized_value] = obs_coeff_obj
 obs_coeff_objs.append(obs_coeff_obj)
 if errors:
 return (None, InvalidAttribute(self, errors))
 return (obs_coeff_objs, None)
#####################
#####################
# Base classes
class DatabaseReference(obj_model.Model):
 """ Reference to an entity in an external database
 Attributes:
 database (:obj:`str`): name of the external database
 id (:obj:`str`): identifier within the database
 Related attributes:
 compartments (:obj:`list` of :obj:`Compartment`): compartments
 species_types (:obj:`list` of :obj:`SpeciesType`): species_types
 concentrations (:obj:`list` of :obj:`Concentration`): concentrations
 loci (:obj:`list` of :obj:`PolymerLocus`): loci
 properties (:obj:`list` of :obj:`Property`): properties
 reactions (:obj:`list` of :obj:`Reaction`): reactions
 rate_laws (:obj:`list` of :obj:`RateLaw`): rate_laws
 observables (:obj:`list` of :obj:`Observable`): observables
 """
 database = obj_model.StringAttribute()
 id = obj_model.StringAttribute()
 class Meta(obj_model.Model.Meta):
 attribute_order = ('database', 'id')
 tabular_orientation = TabularOrientation.inline
 unique_together = (('database', 'id'), )
 ordering = ('database', 'id')
 def serialize(self):
 """ Generate string representation
 Returns:
 :obj:`str`: value of primary attribute
 """
 return '{}:{}'.format(self.database, self.id)
class KnowledgeBaseObject(obj_model.Model):
 """ Knowledge of a biological entity
 Attributes:
 id (:obj:`str`): identifier
 name (:obj:`str`): name
 comments (:obj:`str`): comments
 """
 id = obj_model.SlugAttribute(primary=True, unique=True)
 name = obj_model.StringAttribute()
 comments = obj_model.LongStringAttribute()
class KnowledgeBase(KnowledgeBaseObject):
 """ A knowledge base
 Attributes:
 version (:obj:`str`): version
 translation_table (:obj:`int`): translation table
 version (:obj:`str`): version of the KB
 url (:obj:`str`): url of the KB Git repository
 branch (:obj:`str`): branch of the KB Git repository
 revision (:obj:`str`): revision of the KB Git repository
 wc_kb_version (:obj:`str`): version of ``wc_kb``
 Related attributes:
 cell (:obj:`Cell`): cell
 """
 translation_table = obj_model.IntegerAttribute()
 version = RegexAttribute(
 min_length=1, pattern=r'^[0-9]+\.[0-9+]\.[0-9]+', flags=re.I)
 url = obj_model.StringAttribute(verbose_name='URL')
 branch = obj_model.StringAttribute()
 revision = obj_model.StringAttribute()
 wc_kb_version = RegexAttribute(min_length=1, pattern=r'^[0-9]+\.[0-9+]\.[0-9]+', flags=re.I,
 default=wc_kb_version, verbose_name='wc_kb version')
 class Meta(obj_model.Model.Meta):
 attribute_order = ('id', 'name', 'translation_table', 'version',
 'url', 'branch', 'revision', 'wc_kb_version', 'comments')
 tabular_orientation = obj_model.TabularOrientation.column
class Cell(KnowledgeBaseObject):
 """ Knowledge of a cell
 Attributes:
 knowledge_base (:obj:`KnowledgeBase`): knowledge base
 taxon (:obj:`int`): NCBI taxon identifier
 Related attributes:
 references (:obj:`list` of :obj:`Reference`): references
 compartments (:obj:`list` of :obj:`Compartment`): compartments
 species_types (:obj:`list` of :obj:`SpeciesType`): species types
 concentrations (:obj:`list` of :obj:`Concentration`): concentrations
 observables (:obj:`list` or :obj:`Observable`) : observables
 loci (:obj:`list` of :obj:`PolymerLocus`): locus
 reactions (:obj:`list` of :obj:`Reaction`): reactions
 """
 knowledge_base = obj_model.OneToOneAttribute(
 KnowledgeBase, related_name='cell')
 taxon = obj_model.IntegerAttribute()
 class Meta(obj_model.Model.Meta):
 attribute_order = ('id', 'name', 'taxon', 'comments')
 tabular_orientation = obj_model.TabularOrientation.column
class Reference(obj_model.Model):
 """ Reference to the literature
 Attributes:
 id (:obj:`str`): identifier 
 standard_id (:obj:`str`): standard identifier such as DOI or PubMed ID
 cell (:obj:`Cell`): cell
 Related attributes:
 compartments (:obj:`list` of :obj:`Compartment`): compartments
 species_types (:obj:`list` of :obj:`SpeciesType`): species_types
 concentrations (:obj:`list` of :obj:`Concentration`): concentrations
 loci (:obj:`list` of :obj:`PolymerLocus`): loci
 properties (:obj:`list` of :obj:`Property`): properties
 reactions (:obj:`list` of :obj:`Reaction`): reactions
 rate_laws (:obj:`list` of :obj:`RateLaw`): rate_laws
 observables (:obj:`list` of :obj:`Observable`): observables
 """
 id = obj_model.SlugAttribute(primary=True, unique=True) 
 standard_id = obj_model.StringAttribute()
 cell = obj_model.ManyToOneAttribute(Cell, related_name='references')
 class Meta(obj_model.Model.Meta):
 attribute_order = ('id', 'standard_id') 
class Compartment(KnowledgeBaseObject):
 """ Knowledge of a subcellular compartment
 Attributes:
 cell (:obj:`Cell`): cell
 volumetric_fraction (:obj:`float`): average volumetric fraction relative to the cell volume
 references (:obj:`list` of :obj:`Reference`): references
 database_references (:obj:`list` of :obj:`DatabaseReference`): database references
 Related attributes:
 reaction_participants (:obj:`list` of :obj:`ReactionParticipant`): reaction participants
 """
 cell = obj_model.ManyToOneAttribute(Cell, related_name='compartments')
 volumetric_fraction = obj_model.FloatAttribute(min=0., max=1.)
 references = obj_model.ManyToManyAttribute(Reference, related_name='compartments')
 database_references = DatabaseReferenceAttribute(related_name='compartments')
 class Meta(obj_model.Model.Meta):
 attribute_order = ('id', 'name', 'volumetric_fraction', 'comments', 'references', 'database_references')
class SpeciesType(six.with_metaclass(obj_model.abstract.AbstractModelMeta, KnowledgeBaseObject)):
 """ Knowledge of a molecular species
 Attributes:
 cell (:obj:`Cell`): cell
 half_life (:obj:`float`): half life (s)
 references (:obj:`list` of :obj:`Reference`): references
 database_references (:obj:`list` of :obj:`DatabaseReference`): database references
 Related attributes:
 reaction_participants (:obj:`list` of :obj:`ReactionParticipant`): reaction participants
 """
 cell = obj_model.ManyToOneAttribute(Cell, related_name='species_types')
 half_life = obj_model.FloatAttribute(min=0)
 references = obj_model.ManyToManyAttribute(Reference, related_name='species_types')
 database_references = DatabaseReferenceAttribute(related_name='species_types')
 class Meta(obj_model.Model.Meta):
 attribute_order = ('id', 'name', 'half_life', 'comments', 'references', 'database_references')
 @abc.abstractmethod
 def get_empirical_formula(self):
 """ Get the empirical formula
 Returns:
 :obj:`chem.EmpiricalFormula`: empirical formula
 """
 pass # pragma: no cover
 @abc.abstractmethod
 def get_charge(self):
 """ Get the charge
 Returns:
 :obj:`int`: charge
 """
 pass # pragma: no cover
 @abc.abstractmethod
 def get_mol_wt(self):
 """ Get the molecular weight
 Returns:
 :obj:`float`: molecular weight
 """
 pass # pragma: no cover
class Species(obj_model.Model):
 """ Species (tuple of species type, compartment)
 Attributes:
 species_type (:obj:`SpeciesType`): species type
 compartment (:obj:`Compartment`): compartment
 Related attributes:
 concentration (:obj:`Concentration`): concentration
 species_coefficients (:obj:`list` of :obj:`SpeciesCoefficient`): participations in reactions and observables
 """
 species_type = ManyToOneAttribute(
 SpeciesType, related_name='species', min_related=1)
 compartment = ManyToOneAttribute(
 Compartment, related_name='species', min_related=1)
 class Meta(obj_model.Model.Meta):
 attribute_order = ('species_type', | |
| 
	< self._assets["A-s"]:
 result.update({"entrant": self.ENTRANT_CHOICES["indifferent"]})
 result.update({"incumbent": self.INCUMBENT_CHOICES["copy"]})
 result.update({"development": self.DEVELOPMENT_OUTCOME["failure"]})
 else:
 result.update({"entrant": self.ENTRANT_CHOICES["substitute"]})
 result.update({"development": self.DEVELOPMENT_OUTCOME["success"]})
 if F <= self._copying_fixed_costs["F(YY)s"]:
 result.update({"incumbent": self.INCUMBENT_CHOICES["copy"]})
 else:
 result.update({"incumbent": self.INCUMBENT_CHOICES["refrain"]})
 return result
 def _plot(self, coordinates: List[List[Tuple[float, float]]], labels: List[str],
 axis: matplotlib.axes.Axes = None, **kwargs) -> matplotlib.axes.Axes:
 """
 Plots the areas containing the optimal choices and answers into a coordinate system.
 Parameters
 ----------
 coordinates : List[List[Tuple[float, float]]]
 List of all polygons (list of coordinates) to plot.
 labels: List[str]
 List containing all the labels for the areas.
 axis : matplotlib.axes.Axes
 Axis to draw the plot on. (optional)
 **kwargs
 Optional key word arguments for the plots.<br>
 - title: title of the plot.<br>
 - xlabel: label for the x - axis.<br>
 - ylabel: label for the y - axis.<br>
 - options_legend: If true, an additional legend, explaining the options of the entrant and the incumbent, will be added to the plot.<br>
 - asset_legend: If true, an additional legend explaining the thresholds of the assets of the entrant will be added to the plot.<br>
 - costs_legend: If true, an additional legend explaining the thresholds of the fixed costs of copying for the incumbent will be added to the plot.<br>
 - legend_width : Maximum number of characters in one line in the legend (for adjustments to figure width).<br>
 - x_max : Maximum number plotted on the x - axis.<br>
 - y_max : Maximum number plotted on the y - axis.<br>
 Returns
 -------
 Axis containing the plot.
 """
 if axis is None:
 plot_fig, axis = plt.subplots()
 self._draw_thresholds(axis, x_horizontal=kwargs.get("x_max", 0), y_vertical=kwargs.get("y_max", 0))
 for i, coordinates in enumerate(coordinates):
 poly = plt.Polygon(coordinates, linewidth=0, color=self._get_color(i), label=labels[i])
 axis.add_patch(poly)
 if kwargs.get("legend", True):
 axis.legend(bbox_to_anchor=(1.3, 1), loc="upper left")
 additional_legend: str = self._create_additional_legend(options_legend=kwargs.get('options_legend', False),
 assets_thresholds_legend=kwargs.get('asset_legend', False),
 costs_thresholds_legend=kwargs.get('costs_legend', False),
 width=kwargs.get('legend_width', 60))
 if additional_legend != "":
 axis.text(-0.1, -0.6, additional_legend, verticalalignment='top', linespacing=1, wrap=True)
 BaseModel._set_axis_labels(axis, title=kwargs.get('title', ''),
 x_label=kwargs.get('xlabel', 'Assets of the entrant'),
 y_label=kwargs.get('ylabel', 'Fixed costs of copying for the incumbent'))
 BaseModel._set_axis(axis)
 return axis
 def plot_incumbent_best_answers(self, axis: matplotlib.axes.Axes = None, **kwargs) -> matplotlib.axes.Axes:
 poly_coordinates: List[List[Tuple[float, float]]] = self._get_incumbent_best_answer_coordinates(
 kwargs.get("x_max", 0),
 kwargs.get("y_max", 0))
 poly_labels: List[str] = self._get_incumbent_best_answer_labels()
 kwargs.update({'title': kwargs.get('title', "Best Answers of the incumbent to the choices of the entrant")})
 return self._plot(coordinates=poly_coordinates, labels=poly_labels, axis=axis, **kwargs)
 def _create_choice_answer_label(self, entrant: Literal["complement", "substitute", "indifferent"],
 incumbent: Literal["copy", "refrain"],
 development: Literal["success", "failure"],
 kill_zone: bool = False, acquisition: str = "") -> str:
 """
 Creates a label for the legend based on the choice of the entrant, the incumbent, the development outcome and additionally on possible acquisition.
 Parameters
 ----------
 entrant: Literal["complement", "substitute", "indifferent"]
 choice of the entrant.
 incumbent: Literal["copy", "refrain"]
 choice of the incumbent.
 development: Literal["success", "failure"]
 outcome of the development.
 kill_zone: bool
 If true, the label adds a "(Kill Zone)" tag.
 acquisition: str
 The entity, which develops the additional product chosen by the entrant.
 Returns
 -------
 str
 label based on the parameters mentioned above.
 """
 if acquisition != "":
 acquisition = "_" + acquisition
 return self.ENTRANT_CHOICES[entrant] + " $\\rightarrow$ " + self.INCUMBENT_CHOICES[
 incumbent] + " $\\rightarrow " + self.DEVELOPMENT_OUTCOME[development] + acquisition + "$" + (
 "\n(Kill Zone)" if kill_zone else "")
 def _get_incumbent_best_answer_labels(self) -> List[str]:
 """
 Returns a list containing the labels for the squares in the plot of the best answers of the incumbent to the choice of the entrant.
 For the order of the labels refer to the file resources/dev_notes.md.
 Returns
 -------
 List containing the labels for the squares in the plot of the best answers of the incumbent to the choice of the entrant.
 """
 return [
 # Area 1
 self._create_choice_answer_label(entrant="substitute", incumbent="copy", development="failure") + " \n" +
 self._create_choice_answer_label(entrant="complement", incumbent="copy", development="failure"),
 # Area 2
 self._create_choice_answer_label(entrant="substitute", incumbent="copy", development="success") + " \n" +
 self._create_choice_answer_label(entrant="complement", incumbent="copy", development="failure"),
 # Area 3
 self._create_choice_answer_label(entrant="substitute", incumbent="copy", development="success") + " \n" +
 self._create_choice_answer_label(entrant="complement", incumbent="copy", development="success"),
 # Area 4
 self._create_choice_answer_label(entrant="substitute", incumbent="copy", development="failure") + " \n" +
 self._create_choice_answer_label(entrant="complement", incumbent="refrain", development="success"),
 # Area 5
 self._create_choice_answer_label(entrant="substitute", incumbent="refrain", development="success") + " \n" +
 self._create_choice_answer_label(entrant="complement", incumbent="copy", development="success"),
 # Area 6
 self._create_choice_answer_label(entrant="substitute", incumbent="refrain", development="success") + " \n" +
 self._create_choice_answer_label(entrant="complement", incumbent="refrain", development="success"),
 ]
 def _get_incumbent_best_answer_coordinates(self, x_max: float, y_max: float) -> List[List[Tuple[float, float]]]:
 """
 Returns a list containing the coordinates for the areas in the plot of the best answers of the incumbent to the choice of the entrant.
 For the order of the areas refer to the file resources/dev_notes.md.
 Returns
 -------
 List[List[Tuple[float, float]]]
 List containing the coordinates for the areas in the plot of the best answers of the incumbent to the choice of the entrant.
 """
 y_max = self._get_y_max(y_max)
 x_max = self._get_x_max(x_max)
 return [
 # Area 1
 [(0, 0), (self._assets['A-s'], 0), (self._assets['A-s'], max(self._copying_fixed_costs['F(YN)c'], 0)),
 (0, max(self._copying_fixed_costs['F(YN)c'], 0))],
 # Area 2
 [(self._assets['A-s'], 0), (self._assets['A-c'], 0),
 (self._assets['A-c'], self._copying_fixed_costs['F(YY)s']),
 (self._assets['A-s'], self._copying_fixed_costs['F(YY)s'])],
 # Area 3
 [(self._assets['A-c'], 0), (x_max, 0), (x_max, self._copying_fixed_costs['F(YY)s']),
 (self._assets['A-c'], self._copying_fixed_costs['F(YY)s'])],
 # Area 4
 [(0, max(self._copying_fixed_costs['F(YN)c'], 0)),
 (self._assets['A-s'], max(self._copying_fixed_costs['F(YN)c'], 0)),
 (self._assets['A-s'], self._copying_fixed_costs['F(YN)s']), (0, self._copying_fixed_costs['F(YN)s'])],
 # Area 5
 [(self._assets['A-c'], self._copying_fixed_costs['F(YY)s']), (x_max, self._copying_fixed_costs['F(YY)s']),
 (x_max, self._copying_fixed_costs['F(YY)c']), (self._assets['A-c'], self._copying_fixed_costs['F(YY)c'])],
 # Area 6
 [(self._assets['A-s'], self._copying_fixed_costs['F(YY)s']),
 (self._assets['A-c'], self._copying_fixed_costs['F(YY)s']),
 (self._assets['A-c'], self._copying_fixed_costs['F(YY)c']), (x_max, self._copying_fixed_costs['F(YY)c']),
 (x_max, y_max), (0, y_max),
 (0, self._copying_fixed_costs['F(YN)s']), (self._assets['A-s'], self._copying_fixed_costs['F(YN)s'])]]
 def plot_equilibrium(self, axis: matplotlib.axes.Axes = None, **kwargs) -> matplotlib.axes.Axes:
 poly_coordinates: List[List[Tuple[float, float]]] = self._get_equilibrium_coordinates(kwargs.get("x_max", 0),
 kwargs.get("y_max", 0))
 poly_labels: List[str] = self._get_equilibrium_labels()
 kwargs.update({'title': kwargs.get('title', 'Equilibrium Path')})
 return self._plot(coordinates=poly_coordinates, labels=poly_labels, axis=axis, **kwargs)
 def _get_equilibrium_labels(self) -> List[str]:
 """
 Returns a list containing the labels for the squares in the plot of the equilibrium path.
 For the order of the squares refer to the file resources/dev_notes.md.
 Returns
 -------
 List[str]
 List containing the labels for the squares in the plot of the best answers of the equilibrium path.
 """
 return [
 # Area 1
 self._create_choice_answer_label(entrant="indifferent", incumbent="copy", development="failure"),
 # Area 2
 self._create_choice_answer_label(entrant="substitute", incumbent="copy", development="success"),
 # Area 3
 self._create_choice_answer_label(entrant="complement", incumbent="refrain", development="success",
 kill_zone=True),
 # Area 4
 self._create_choice_answer_label(entrant="substitute", incumbent="refrain", development="success")
 ]
 def _get_equilibrium_coordinates(self, x_max: float, y_max: float) -> List[List[Tuple[float, float]]]:
 """
 Returns a list containing the coordinates for the areas in the plot of the equilibrium path.
 For the order of the areas refer to the file resources/dev_notes.md.
 Returns
 -------
 List[List[Tuple[float, float]]]
 List containing the coordinates for the areas in the plot of the best answers of the equilibrium path.
 """
 y_max = self._get_y_max(y_max)
 x_max = self._get_x_max(x_max)
 return [
 # Area 1
 [(0, 0), (self._assets['A-s'], 0), (self._assets['A-s'], max(self._copying_fixed_costs['F(YN)c'], 0)),
 (0, max(self._copying_fixed_costs['F(YN)c'], 0))],
 # Area 2
 [(self._assets['A-s'], 0), (x_max, 0), (x_max, self._copying_fixed_costs['F(YY)s']),
 (self._assets['A-s'], self._copying_fixed_costs['F(YY)s'])],
 # Area 3
 [(0, max(self._copying_fixed_costs['F(YN)c'], 0)),
 (self._assets['A-s'], max(self._copying_fixed_costs['F(YN)c'], 0)),
 (self._assets['A-s'], self._copying_fixed_costs['F(YN)s']), (0, self._copying_fixed_costs['F(YN)s'])],
 # Area 4
 [(self._assets['A-s'], self._copying_fixed_costs['F(YY)s']), (x_max, self._copying_fixed_costs['F(YY)s']),
 (x_max, y_max), (0, y_max), (0, self._copying_fixed_costs['F(YN)s']),
 (self._assets['A-s'], self._copying_fixed_costs['F(YN)s'])]]
 def plot_payoffs(self, axis: matplotlib.axes.Axes = None, **kwargs) -> matplotlib.axes.Axes:
 if axis is None:
 plot_fig, axis = plt.subplots()
 index = arange(0, len(self._payoffs) * 2, 2)
 bar_width = 0.35
 spacing = 0.05
 self._plot_payoffs_bars(axis, bar_width, index, spacing, **kwargs)
 axis.set_xlabel('Market Configurations')
 axis.set_title('Payoffs for different Market Configurations')
 self._set_payoffs_ticks(axis, bar_width, index, spacing)
 if kwargs.get("legend", True):
 self._set_payoff_legend(axis, kwargs.get("products_legend", False))
 self._set_payoffs_figure(axis)
 return axis
 def _plot_payoffs_bars(self, axis: matplotlib.axes.Axes, bar_width: float, index: array, spacing: float,
 **kwargs) -> None:
 """
 Plots the bars representing the payoffs for different market configurations of different stakeholders on the specified axis.
 Parameters
 ----------
 axis matplotlib.axes.Axes
 To plot the bars on.
 bar_width: float
 Width of a bar in the plot.
 index: np.array
 Index of the different market configurations in the plot.
 spacing: float
 Spacing between the bars on the plot.
 **kwargs
 Optional key word arguments for the payoff plot.<br>
 - opacity : Opacity of the not optimal payoffs.<br>
 """
 for counter, utility_type in enumerate(self._payoffs[list(self._payoffs.keys())[0]].keys()):
 utility_values: List[float] = []
 for market_configuration in self._payoffs:
 utility_values.append(self._payoffs[market_configuration][utility_type])
 bars = axis.bar(index + counter * (bar_width + spacing), utility_values, bar_width,
 alpha=kwargs.get("opacity", 0.2),
 color=self._get_color(counter),
 edgecolor=None,
 label=self._convert_payoffs_label(utility_type))
 max_indices: List[int] = list(
 filter(lambda x: utility_values[x] == max(utility_values), range(len(utility_values))))
 for max_index in max_indices:
 bars[max_index].set_alpha(1)
 def _set_payoff_legend(self, axis: matplotlib.axes.Axes, products_legend: bool = False) -> None:
 """
 Creates the legend and an additional legend for the products of the entrant and the incumbent,
 Parameters
 ----------
 axis: matplotlib.axes.Axes
 To set the legends for.
 products_legend: bool
 If true, an additional legend, containing all possible products of the entrant and the incumbent, will be created.
 """
 axis.legend(bbox_to_anchor=(1.02, 1), loc='upper left', ncol=1)
 if products_legend:
 axis.text(-0.7, -0.8, | |
| 
	<reponame>forgeservicelab/ansible.account-cleanup<gh_stars>0
#!/usr/bin/env python
# (c) 2012, <NAME> <<EMAIL>>
# modified by <NAME> <<EMAIL>>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import sys
import re
import os
import argparse
import subprocess
import yaml
import time
import md5
import itertools
import novaclient.client
import ansible.module_utils.openstack
try:
 import json
except ImportError:
 import simplejson as json
# This is a script getting dynamic inventory from Nova. Features:
# - you can refer to instances by their nova name in ansible{-playbook} calls
# - you can refer to single tenants, regions and openstack environments in
# ansible{-playbook} calls
# - you can refer to a hostgroup when you pass the arbitrary --meta group=
# in "nova boot"
# - it caches the state of the cloud
# - it tries to guess ansible_ssh_user based on name of image
# ('\cubuntu' -> 'ubuntu', '\ccentos' -> 'cloud-user', ...)
# - allows to access machines by their private ip *
# - it will work with no additional configuration, just handling single tenant
# from set OS_* environment variables (just like python-novaclient).
# - you can choose to heavy-configure it for multiple environments
# - it's configured from simple YAML (I dislike ConfigParser). See nova.yml
# - Nodes can be listed in inventory either by DNS name or IP address based
# on setting.
#
# * I took few ideas and some code from other pull requests
# - https://github.com/ansible/ansible/pull/8657 by <NAME>
# - https://github.com/ansible/ansible/pull/7444 by <NAME>
#
# If Ansible fails to parse JSON, please run this with --list and observe.
#
# HOW CACHING WORKS:
# Cache of list of servers is kept per combination of (auth_url, region_name,
# project_id). Default max age is 300 seconds. You can set the age per section
# (openstack envrionment) in config.
#
# If you want to build the cache from cron, consider:
# */5 * * * * . /home/tomk/os/openrc.sh && \
# ANSIBLE_NOVA_CONFIG=/home/tomk/.nova.yml \
# /home/tomk/ansible/plugins/inventory/nova.py --refresh-cache
#
# HOW IS NOVA INVENTORY CONFIGURED:
# (Note: if you have env vars set from openrc.sh, you can run this without
# writing the config file. Defaults are sane. The values in the config file
# will rewrite the defaults.)
#
# To load configuration from a file, you must have the config file path in
# environment variable ANSIBLE_NOVA_CONFIG.
#
# IN THE CONFIG FILE:
# The keys in the top level dict are names for different OS environments.
# The keys in a dict for OS environment can be:
# - auth_url
# - region_name (can be a list)
# - project_id (can be a list)
# - username
# - api_key
# - service_type
# - auth_system
# - prefer_private (connect using private IPs)
# - cache_max_age (how long to consider cached data. In seconds)
# - resolve_ips (translate IP addresses to domain names)
#
# If you have a list in region and/or project, all the combinations of
# will be listed.
#
# If you don't have configfile, there will be one cloud section created called
# 'openstack'.
#
# WHAT IS AVAILABLE AS A GROUP FOR ANSIBLE CALLS (how are nodes grouped):
# tenants, regions, clouds (top config section), groups by metadata key (nova
# boot --meta group=<name>).
CONFIG_ENV_VAR_NAME = 'ANSIBLE_NOVA_CONFIG'
NOVA_DEFAULTS = {
 'auth_system': os.environ.get('OS_AUTH_SYSTEM'),
 'service_type': 'compute',
 'username': os.environ.get('OS_USERNAME'),
 'api_key': os.environ.get('OS_PASSWORD'),
 'auth_url': os.environ.get('OS_AUTH_URL'),
 'project_id': os.environ.get('OS_TENANT_NAME'),
 'region_name': os.environ.get('OS_REGION_NAME'),
 'prefer_private': False,
 'version': '2',
 'cache_max_age': 300,
 'resolve_ips': True,
}
DEFAULT_CONFIG_KEY = 'openstack'
CACHE_DIR = '~/.ansible/tmp'
CONFIG = {}
def load_config():
 global CONFIG
 _config_file = os.environ.get(CONFIG_ENV_VAR_NAME)
 if _config_file:
 with open(_config_file) as f:
 CONFIG = yaml.load(f.read())
 if not CONFIG:
 CONFIG = {DEFAULT_CONFIG_KEY: {}}
 for section in CONFIG.values():
 for key in NOVA_DEFAULTS:
 if (key not in section):
 section[key] = NOVA_DEFAULTS[key]
def push(data, key, element):
 ''' Assist in items to a dictionary of lists '''
 if (not element) or (not key):
 return
 if key in data:
 data[key].append(element)
 else:
 data[key] = [element]
def to_safe(word):
 '''
 Converts 'bad' characters in a string to underscores so they can
 be used as Ansible groups
 '''
 return re.sub(r"[^A-Za-z0-9\-]", "_", word)
def get_access_ip(server, prefer_private):
 ''' Find an IP for Ansible SSH for a host. '''
 private = ansible.module_utils.openstack.openstack_find_nova_addresses(
 getattr(server, 'addresses'), 'fixed', 'private')
 public = ansible.module_utils.openstack.openstack_find_nova_addresses(
 getattr(server, 'addresses'), 'floating', 'public')
 if prefer_private:
 return private[0]
 if server.accessIPv4:
 return server.accessIPv4
 if public:
 return public[0]
 else:
 return private[0]
def get_metadata(server):
 ''' Returns dictionary of all host metadata '''
 results = {}
 for key in vars(server):
 # Extract value
 value = getattr(server, key)
 # Generate sanitized key
 key = 'os_' + re.sub(r"[^A-Za-z0-9\-]", "_", key).lower()
 # Att value to instance result (exclude manager class)
 #TODO: maybe use value.__class__ or similar inside of key_name
 if key != 'os_manager':
 results[key] = value
 return results
def get_ssh_user(server, nova_client):
 ''' Try to guess ansible_ssh_user based on image name. '''
 try:
 image_name = nova_client.images.get(server.image['id']).name
 if 'ubuntu' in image_name.lower():
 return 'ubuntu'
 if 'centos' in image_name.lower():
 return 'cloud-user'
 if 'debian' in image_name.lower():
 return 'debian'
 if 'coreos' in image_name.lower():
 return 'coreos'
 except:
 pass
def get_nova_client(combination):
 '''
 There is a bit more info in the combination than we need for nova client,
 so we need to create a copy and delete keys that are not relevant.
 '''
 kwargs = dict(combination)
 del kwargs['name']
 del kwargs['prefer_private']
 del kwargs['cache_max_age']
 del kwargs['resolve_ips']
 return novaclient.client.Client(**kwargs)
def merge_update_to_result(result, update):
 '''
 This will merge data from a nova servers.list call (in update) into
 aggregating dict (in result)
 '''
 for host, specs in update['_meta']['hostvars'].items():
 # Can same host be in two differnt listings? I hope not.
 result['_meta']['hostvars'][host] = dict(specs)
 # groups must be copied if not present, otherwise merged
 for group in update:
 if group == '_meta':
 continue
 if group not in result:
 # copy the list over
 result[group] = update[group][:]
 else:
 result[group] = list(set(update[group]) | set(result[group]))
def get_name(ip):
 ''' Gets the shortest domain name for IP address'''
 # I first did this with gethostbyaddr but that did not return all the names
 # Also, this won't work on Windows. But it can be turned of by setting
 # resolve_ips to false
 command = "host %s" % ip
 p = subprocess.Popen(command.split(), stdout=subprocess.PIPE,
 stderr=subprocess.PIPE)
 stdout, _ = p.communicate()
 if p.returncode != 0:
 return None
 names = []
 for l in stdout.split('\n'):
 if 'domain name pointer' not in l:
 continue
 names.append(l.split()[-1])
 return min(names, key=len)
def get_update(call_params):
 '''
 Fetch host dicts and groups from single nova_client.servers.list call.
 This is called for each element in "cartesian product" of openstack e
 environments, tenants and regions.
 '''
 update = {'_meta': {'hostvars': {}}}
 # Cycle on servers
 nova_client = get_nova_client(call_params)
 for server in nova_client.servers.list():
 access_ip = get_access_ip(server, call_params['prefer_private'])
 access_identifier = access_ip
 if call_params['resolve_ips']:
 dns_name = get_name(access_ip)
 if dns_name:
 access_identifier = dns_name
 # Push to a group for its name. This way we can use the nova name as
 # a target for ansible{-playbook}
 push(update, server.name, access_identifier)
 # Run through each metadata item and add instance to it
 for key, value in server.metadata.iteritems():
 composed_key = to_safe('tag_{0}_{1}'.format(key, value))
 push(update, composed_key, access_identifier)
 # Do special handling of group for backwards compat
 # inventory update
 group = 'undefined'
 if 'group' in server.metadata:
 group = server.metadata['group']
 push(update, group, access_identifier)
 # Add vars to _meta key for performance optimization in
 # Ansible 1.3+
 update['_meta']['hostvars'][access_identifier] = get_metadata(server)
 # guess username based on image name
 ssh_user = get_ssh_user(server, nova_client)
 if ssh_user:
 host_record = update['_meta']['hostvars'][access_identifier]
 host_record['ansible_ssh_user'] = ssh_user
 push(update, call_params['name'], access_identifier)
 push(update, call_params['project_id'], access_identifier)
 if call_params['region_name']:
 push(update, call_params['region_name'], access_identifier)
 return update
def expand_to_product(d):
 '''
 this will transform
 {1: [2, 3, 4], 5: [6, 7]}
 to
 [{1: 2, 5: 6}, {1: 2, 5: 7}, {1: 3, 5: 6}, {1: 3, 5: 7}, {1: 4, 5: 6},
 {1: 4, 5: 7}]
 '''
 return (dict(itertools.izip(d, x)) for x in
 itertools.product(*d.itervalues()))
def get_list_of_kwarg_combinations():
 '''
 This will transfrom
 CONFIG = {'openstack':{version:'2', project_id:['tenant1', tenant2'],...},
 'openstack_dev':{version:'2', project_id:'tenant3',...},
 into
 [{'name':'openstack', version:'2', project_id: 'tenant1', ...},
 {'name':'openstack', version:'2', project_id: 'tenant2', ...},
 {'name':'openstack_dev', version:'2', project_id: 'tenant3', ...}]
 The | |
| 
	import numpy as _np
class Full:
 def __init__(self):
 """
 Full fitting, no omissions to rate equations
 """
 pass
 def C(self, t, k1, k2, k3, k4, k5, k6, k7, rho1, rho2, C0, CO0, O0, OH0, O20, HCO0):
 output = C0 * (_np.exp((((-k2) - k1) * (rho2 * t))))
 return output
 def CO(self, t, k1, k2, k3, k4, k5, k6, k7, rho1, rho2, C0, CO0, O0, OH0, O20, HCO0):
 aux0 = (_np.exp(((k1 + k2) * (rho2 * t)))) * (
 ((C0 * (k1 * rho2)) + (CO0 * (((k1 + k2) - k4) * rho2))) - (CO0 * (k3 * rho1)))
 aux1 = (_np.exp(((((-(k1 + (k2 + k4)) * rho2)) - (k3 * rho1)) * t))) * (
 (C0 * ((_np.exp(((k3 * (rho1 * t)) + (k4 * (rho2 * t))))) * (k1 * rho2))) - aux0)
 output = aux1 / ((k3 * rho1) - (((k1 + k2) - k4) * rho2))
 return output
 def HCO(self, t, k1, k2, k3, k4, k5, k6, k7, rho1, rho2, C0, CO0, O0, OH0, O20, HCO0):
 aux0 = (((_np.exp(((k1 + k2) * (rho2 * t)))) * ((k1 + k2) - k4)) + k4) - (
 (_np.exp((((((k1 + k2) - k4) * rho2) - (k3 * rho1)) * t))) * (k1 + k2))
 aux1 = C0 * (k1 * (k3 * (rho1 * (((1. - (_np.exp(((k1 + k2) * (rho2 * t))))) * (k3 * rho1)) + (aux0 * rho2)))))
 aux2 = ((_np.exp(((k1 + k2) * (rho2 * t)))) * (((CO0 + HCO0) * (k3 * rho1)) + (HCO0 * (k4 * rho2)))) - (
 CO0 * ((_np.exp((((((k1 + k2) - k4) * rho2) - (k3 * rho1)) * t))) * (k3 * rho1)))
 aux3 = (_np.exp((((-k2) - k1) * (rho2 * t)))) * (
 aux1 + ((k1 + k2) * (((((k1 + k2) - k4) * rho2) - (k3 * rho1)) * aux2)))
 output = ((aux3 / ((k3 * rho1) + (k4 * rho2))) / ((((k1 + k2) - k4) * rho2) - (k3 * rho1))) / (k1 + k2)
 return output
 def O(self, t, k1, k2, k3, k4, k5, k6, k7, rho1, rho2, C0, CO0, O0, OH0, O20, HCO0):
 aux0 = (_np.exp(((k1 + k2) * (rho2 * t)))) * ((k5 * (O0 * rho1)) - (((C0 * k2) + (((k1 + k2) - k6) * O0)) * rho2))
 aux1 = (_np.exp(((((-(k1 + (k2 + k6)) * rho2)) - (k5 * rho1)) * t))) * (
 (C0 * ((_np.exp(((k5 * (rho1 * t)) + (k6 * (rho2 * t))))) * (k2 * rho2))) + aux0)
 output = aux1 / ((k5 * rho1) - (((k1 + k2) - k6) * rho2))
 return output
 def O2(self, t, k1, k2, k3, k4, k5, k6, k7, rho1, rho2, C0, CO0, O0, OH0, O20, HCO0):
 aux0 = (k5 * (rho1 - ((_np.exp(((-k7 * (rho2 * t))))) * rho1))) + (
 (1. - (_np.exp((((-k6 * rho2) - (k5 * rho1)) * t)))) * ((k6 - k7) * rho2))
 aux1 = (((_np.exp((((-k4 * rho2) - (k3 * rho1)) * t))) * (k1 * (k4 * (rho2 ** 2)))) / ((k3 * rho1) + (k4 * rho2))) / (
 (k3 * rho1) - (((k1 + k2) - k4) * rho2))
 aux2 = ((_np.exp((((-k6 * rho2) - (k5 * rho1)) * t))) * (k2 * ((k6 - k7) * (rho2 ** 2)))) / (
 (k5 * rho1) + ((k6 - k7) * rho2))
 aux3 = (k2 * (k3 * (k5 * (k7 * (rho1 ** 2))))) + (
 (k1 + k2) * (((k1 * k4) + ((k2 - k4) * k6)) * (((k1 + k2) - k7) * (rho2 ** 2))))
 aux4 = ((k1 + k2) * ((k1 * (k4 * k5)) + (k2 * (k3 * k6)))) + (
 (((k1 + k2) * ((k2 - k4) * k5)) - (k2 * (k3 * k6))) * k7)
 aux5 = ((_np.exp((((-k2) - k1) * (rho2 * t)))) * (aux3 - (aux4 * (rho1 * rho2)))) / (
 (((k1 + k2) - k6) * rho2) - (k5 * rho1))
 aux6 = (aux2 / ((k5 * rho1) - (((k1 + k2) - k6) * rho2))) + (
 ((aux5 / ((((k1 + k2) - k4) * rho2) - (k3 * rho1))) / ((k1 + k2) - k7)) / (k1 + k2))
 aux7 = (((_np.exp(((-k7 * (rho2 * t))))) * (k2 * (k5 * rho1))) / ((k5 * rho1) + ((k6 - k7) * rho2))) / ((k1 + k2) - k7)
 aux8 = C0 * (((1. + (aux1 + aux6)) - aux7) - (((k1 * (k3 * rho1)) / ((k3 * rho1) + (k4 * rho2))) / (k1 + k2)))
 aux9 = (CO0 * ((-1. + (_np.exp((((-k4 * rho2) - (k3 * rho1)) * t)))) * (k4 * rho2))) / ((k3 * rho1) + (k4 * rho2))
 output = ((O20 + (OH0 + (((O0 * aux0) / ((k5 * rho1) + ((k6 - k7) * rho2))) + aux8))) - aux9) - (
 (_np.exp(((-k7 * (rho2 * t))))) * OH0)
 return output
 def OH(self, t, k1, k2, k3, k4, k5, k6, k7, rho1, rho2, C0, CO0, O0, OH0, O20, HCO0):
 aux0 = ((_np.exp(((k1 + k2) * (rho2 * t)))) * ((k5 * rho1) - (((k1 + k2) - k6) * rho2))) + (
 (_np.exp((k7 * (rho2 * t)))) * (((k7 * rho2) - (k6 * rho2)) - (k5 * rho1)))
 aux1 = ((_np.exp((((((k1 + (k2 + k7)) - k6) * rho2) - (k5 * rho1)) * t))) * (((k1 + k2) - k7) * rho2)) + aux0
 aux2 = ((_np.exp((((((k1 + (k2 + k7)) - k6) * rho2) - (k5 * rho1)) * t))) * (k5 * (O0 * rho1))) - (
 (_np.exp(((k1 + k2) * (rho2 * t)))) * ((k5 * ((O0 + OH0) * rho1)) + ((k6 - k7) * (OH0 * rho2))))
 aux3 = (C0 * (k2 * (k5 * (rho1 * aux1)))) + (((k1 + k2) - k7) * (((((k1 + k2) - k6) * rho2) - (k5 * rho1)) * aux2))
 aux4 = (((_np.exp(((((-k7) - k2) - k1) * (rho2 * t)))) * aux3) / (((k7 - k6) * rho2) - (k5 * rho1))) / (
 (((k1 + k2) - k6) * rho2) - (k5 * rho1))
 output = aux4 / ((k1 + k2) - k7)
 
 return output
class FixK:
 def __init__(self, k3=7.5e-10, k4=1.2e-10, k5=1.7e-9, k6=1.9e-10, k7=5.9e-10, rho1=1, rho2=1):
 """
 Fixes k3-7 and pressures
 :param k3: 
 :param k4: 
 :param k5: 
 :param k6: 
 :param k7: 
 :param rho1: 
 :param rho2: 
 """
 self.k3 = k3
 self.k4 = k4
 self.k5 = k5
 self.k6 = k6
 self.k7 = k7
 self.rho1 = rho1
 self.rho2 = rho2
 def C(self, t, k1, k2, C0, CO0, O0, OH0, O20, HCO0):
 output = C0 * (_np.exp((((-k2) - k1) * (self.rho2 * t))))
 return output
 def CO(self, t, k1, k2, C0, CO0, O0, OH0, O20, HCO0):
 aux0 = (_np.exp(((k1 + k2) * (self.rho2 * t)))) * (
 ((C0 * (k1 * self.rho2)) + (CO0 * (((k1 + k2) - self.k4) * self.rho2))) - (CO0 * (self.k3 * self.rho1)))
 aux1 = (_np.exp(((((-(k1 + (k2 + self.k4)) * self.rho2)) - (self.k3 * self.rho1)) * t))) * (
 (C0 * ((_np.exp(((self.k3 * (self.rho1 * t)) + (self.k4 * (self.rho2 * t))))) * (k1 * self.rho2))) - aux0)
 output = aux1 / ((self.k3 * self.rho1) - (((k1 + k2) - self.k4) * self.rho2))
 return output
 def HCO(self, t, k1, k2, C0, CO0, O0, OH0, O20, HCO0):
 aux0 = (((_np.exp(((k1 + k2) * (self.rho2 * t)))) * ((k1 + k2) - self.k4)) + self.k4) - (
 (_np.exp((((((k1 + k2) - self.k4) * self.rho2) - (self.k3 * self.rho1)) * t))) * (k1 + k2))
 aux1 = C0 * (k1 * (self.k3 * (self.rho1 * (((1. - (_np.exp(((k1 + k2) * (self.rho2 * t))))) * (self.k3 * self.rho1)) | |
| 
	<reponame>neilferg/matlab2cpp
import logging
import re
import os
from os.path import sep
import matlab2cpp
from . import m2cpp
import matlab2cpp.pyplot
from . import reference
def flatten(node, ordered=False, reverse=False, inverse=False):
 """
Backend for the :py:func:`~matlab2cpp.Node.flatten` function.
Args:
 node (Node): Root node to start from
 ordered (bool): If True, make sure the nodes are hierarcically ordered.
 reverse (bool): If True, children are itterated in reverse order.
 inverse (bool): If True, tree is itterated in reverse order.
See also:
 :py:func:`~matlab2cpp.Node.flatten`
 """
 o = bool(ordered)
 r = bool(reverse)
 i = bool(inverse)
 out = []
 if o:
 nodes = [node]
 for node in nodes:
 nodes.extend(node.children[::1-2*(r ^ i)])
 out.extend(nodes[::1-2*i])
 else:
 if i:
 def foo(node):
 for child in node[::1-2*r]:
 foo(child)
 out.append(node)
 else:
 def foo(node):
 out.append(node)
 for child in node[::1-2*r]:
 foo(child)
 foo(node)
 return out
def summary(node, opt):
 """
Backend for creating summary of the node tree.
See :py:func:`~matlab2cpp.qtree` for behavior.
Args:
 node (Node): Relative root of the tree
Returns:
 str: string representation of the node
See also:
 :py:func:`~matlab2cpp.qtree`
 """
 
 nodes = flatten(node, False, False, False)
 if not (opt is None) and opt.disp:
 print("iterating over %d nodes" % len(nodes))
 
 if not (opt is None) and not (opt.line is None):
 for node in nodes:
 if node.cls != "Block" and node.line == opt.line:
 nodes = flatten(node, False, False, False)
 break
 indent = []
 outl = []
 nl = len(str(nodes[-1].line))+1
 nc = len(str(nodes[-1].cur+1))+1
 for node in nodes:
 out = ""
 if node.line:
 nl_ = len(str(node.line))
 out += " "*(nl-nl_) + str(node.line) + " "
 nc_ = len(str(node.cur+1))
 out += " "*(nc-nc_) + str(node.cur+1)
 else:
 out += " "*(nl+nc+1)
 # indentation
 while indent and not (node.parent is indent[-1]):
 indent.pop()
 out += "| "*(len(indent))
 indent.append(node)
 out += node.cls.ljust(11)
 out += node.backend.ljust(13)
 
 # define type
 if node.type == "TYPE":
 type = node.declare.prop.get("suggest", "TYPE")
 if type != "TYPE":
 type = "(" + type + ")"
 else:
 type = node.type
 out += type.ljust(8)
 out += node.name
 outl.append(out)
 out = "\n".join(outl)
 out = re.sub(r"(\\n){2,}", "", out)
 return out
def auxillary(node, type, convert):
 """
Backend for the :py:func:`~matlab2cpp.Node.auxillary` function.
Args:
 node (Node):
 Root of the tree where split into new line will occour.
 type (str, None):
 If provided, auxiliary variable type will be converted
 convert (bool):
 If true, add an extra function call ``conv_to`` to convert datatype in
 Armadillo.
See also:
 :py:func:`~matlab2cpp.Node.auxiliary`
 """
 assert node.parent.cls != "Assign",\
 ".auxiliary() must be triggered mid expression."
 type = type or node.type
 if not isinstance(type, str):
 if isinstance(type[0], int):
 type = matlab2cpp.datatype.get_name(*type)
 else:
 type = matlab2cpp.datatype.common_strict(type)
 matrix_mode = False
 if node.cls == "Matrix":
 matrix_mode = True
 if matrix_mode and type == "int" and node.group.cls in ("Get", "Set"):
 type = "uword"
 line = node
 while line.parent.cls != "Block":
 line = line.parent
 block = line.parent
 # Create new var
 i = 1
 declares = node.func[0]
 while "_aux_" + type + "_" + str(i) in declares:
 i += 1
 var = "_aux_" + type + "_" + str(i)
 # Create Assign
 assign = matlab2cpp.collection.Assign(block, code=node.code)
 assign.type = type
 if matrix_mode:
 assign.backend = "matrix"
 # Return value
 aux_var = matlab2cpp.collection.Var(assign, var)
 aux_var.type = type
 aux_var.backend = type
 aux_var.create_declare()
 if convert:
 rhs = matlab2cpp.collection.Get(assign, "_conv_to")
 rhs.type = type
 else:
 rhs = assign
 swap_var = matlab2cpp.collection.Var(rhs, var)
 swap_var.declare.type = type
 # Place Assign correctly in Block
 i = block.children.index(line)
 block.children = block[:i] + block[-1:] + block[i:-1]
 # Swap node and Var
 index = node.parent.children.index(node)
 node.parent.children[index] = swap_var
 rhs.children[-1] = node
 swap_var.parent, node.parent = node.parent, swap_var.parent
 # generate code
 node.translate()
 swap_var.translate(only=True)
 aux_var.translate(only=True)
 if convert:
 rhs.translate(only=True)
 assign.translate(only=True)
 if convert:
 assert node.type != swap_var.type
 return swap_var
def resize(node):
 """
Backend for the :py:func:`~matlab2cpp.Node.resize` function.
Args:
 node (Node): node to be resized
See also:
 :py:func:`~matlab2cpp.Node.resize`
 """
 if "_resize" in node.prop:
 return
 node["_resize"] = True
 type = node.type
 node.dim = 3
 line = node
 while line.parent.cls != "Block":
 line = line.parent
 resize = matlab2cpp.collection.Resize(line.parent, name=node.name)
 resize.type = type
 i = line.parent.children.index(line)
 ps = line.parent.children
 line.parent.children = ps[:i] + ps[-1:] + ps[i:-1]
 resize.translate(False, only=True)
def error(node, msg, onlyw=False):
 """
Add an error or warning to the log subtree.
Args:
 node (Node): node where error occoured
 msg (str): error message content
 onlyw (bool): if true, use warning instead of error
See also:
 :py:func:`~matlab2cpp.Node.error`
 :py:func:`~matlab2cpp.Node.warning`
 """
 msg = msg % node.properties()
 code = node.program.code
 cur = node.cur
 end = cur+len(node.code)
 start = cur
 while code[start] != "\n" and start != 0:
 start -= 1
 if end >= len(code):
 end = len(code)-1
 finish = end
 while code[finish] != "\n" and finish != len(code)-1:
 finish += 1
 code = code[start:finish]
 pos = cur-start
 name = node.cls + ":" + str(cur)
 errors = node.program[5]
 if name in errors.names:
 return
 if onlyw:
 err = matlab2cpp.collection.Warning(errors, name=name, line=node.line,
 cur=pos, value=msg, code=code)
 else:
 err = matlab2cpp.collection.Error(errors, name=name, line=node.line,
 cur=pos, value=msg, code=code)
 err.backend="program"
def create_declare(node):
 """
Backend for the :py:func:`~matlab2cpp.Node.create_declare` function.
Args:
 node (Node): Node to create declare from
Returns:
 Node : the (newly) declared node
 """
 if not (node is node.declare):
 return node
 if node.cls in reference.structvars:
 if node.cls in ("Nget", "Nset"):
 if node[0].cls == "String":
 return None
 value = node[0].value
 else:
 value = node.value
 structs = node.program[3]
 assert structs.cls == "Structs"
 if node not in structs:
 struct = matlab2cpp.collection.Struct(structs, name=node.name)
 else:
 struct = structs[node]
 if value in struct.names:
 return struct[struct.names.index(value)]
 declares = node.func[0]
 if node.cls in ("Sset", "Sget"):
 sname = "_size"
 if sname not in struct.names:
 matlab2cpp.collection.Counter(struct, sname, value="100")
 if node.name not in declares.names:
 var = matlab2cpp.collection.Var(declares, name=node.name, value=value)
 var.type="structs"
 else:
 if node.name not in declares.names:
 var = matlab2cpp.collection.Var(declares, name=node.name, value=value)
 var.type="struct"
 return matlab2cpp.collection.Var(struct, name=value)
 parent = struct
 else:
 parent = node.func[0]
 if node in parent:
 declare = parent[node]
 declare.type = node.type
 declare.pointer = node.pointer
 return declare
 out = matlab2cpp.collection.Var(parent, name=node.name,
 pointer=node.pointer, value=node.value)
 out.type = node.type
 return out
def suggest_datatype(node):
 """
Backend for the :py:func:`~matlab2cpp.Node.suggest_datatype` function.
Args:
 node (Node): Node to suggest datatype for.
Returns:
 (tuple): Suggestion on the form ``(dim, mem)``
See also:
 :py:func:`~matlab2cpp.Node.suggest_datatype`
 """
 if node.group.cls in ("Transpose", "Ctranspose"):
 dim, mem = suggest_datatype(node.group)
 if dim == 1:
 dim = 2
 elif dim == 2:
 dim = 2
 return dim, mem
 elif node.group.cls == "Assign":
 if node.group[0].num:
 return node.group[0].dim, node.group[0].mem
 elif node.group.cls == "Matrix":
 mems = set([])
 if node.group.value: # decomposed
 ax0, ax1 = len(node.group), len(node.group[0])
 if ax0 > 1:
 if ax1 > 1:
 dim = 3
 else:
 dim = 1
 else:
 if ax1 > 1:
 dim = 2
 else:
 dim = 0
 for vec in node.group:
 for elem in vec:
 if elem.num:
 mems.add(elem.mem)
 # rowvec definition
 elif len(node.group) == 1:
 if len(node.group[0]) == 1:
 return None, None
 for elem in node.group[0]:
 if elem.num:
 mems.add(elem.mem)
 dim = 3
 
 # colvec definition
 elif len(node.group[0]) == 1:
 for vec in node.group:
 if vec[0].num:
 mems.add(vec[0].mem)
 dim = 3
 else:
 for vec in node.group:
 for elem in vec:
 if elem.num:
 mems.add(elem.mem)
 dim = 3
 if len(mems) == 1:
 return dim, mems.pop()
 elif len(mems) > 1:
 return dim, max(*mems)
 else:
 return None, None
 return None, None
# small hack to ensure that log isn't cleaned mid translation
mid_translation = [0]
def translate(node, opt=None):
 """
Backend for performing translation of subtree
Args:
 node (Node): Root of the translation
 opt (argparse.Namespace, optional): optional arguments from frontend
See also:
 :py:func:`~matlab2cpp.Node.translate`
 """
 # translate for every program
 if node.cls == "Project":
 map(translate, node)
 return node
 if mid_translation[0] == 0:
 log = node.program[5]
 log.children = []
 mid_translation[0] += 1
 nodes = flatten(node, False, True, False)
 if not (opt is None) and opt.disp:
 print("iterating %d nodes" % len(nodes))
 for node in nodes[::-1]:
 translate_one(node, opt)
 mid_translation[0] -= 1
 if not mid_translation[0]:
 logs = flatten(log, False, True, False)
 for node in logs[::-1]:
 translate_one(node, opt)
 
 return node
def translate_one(node, opt):
 """
Backend for performing translation of single node
Args:
 node (Node): Node to perform translation on
 opt (argparse.Namespace, optional): optional arguments from frontend
See also:
 :py:func:`~matlab2cpp.Node.translate`
 """
 logger = logging.getLogger(__name__)
 # e.g. Get_a from user
 value = node.program.parent.kws.get(node.cls+"_"+node.name, None)
 # e.g. Get from user
 if value is None:
 value = node.program.parent.kws.get(node.cls, None)
 if value is None:
 backend = node.backend
 if backend == "TYPE":
 backend = "unknown"
 assert "_"+backend in matlab2cpp.rules.__dict__, (
 "No rule {}; ensure your .py file is properly set up.".format(backend))
 try:
 target = matlab2cpp.rules.__dict__["_"+backend]
 except KeyError as err:
 | |
| 
	left: 30%;
 }
 .uk-push-7-10 {
 left: 70%;
 }
 .uk-push-9-10 {
 left: 90%;
 }
 /*
 * Pull
 */
 /* Halves */
 .uk-pull-1-2,
 .uk-pull-2-4,
 .uk-pull-3-6,
 .uk-pull-5-10 {
 left: -50%;
 }
 /* Thirds */
 .uk-pull-1-3,
 .uk-pull-2-6 {
 left: -33.333%;
 }
 .uk-pull-2-3,
 .uk-pull-4-6 {
 left: -66.666%;
 }
 /* Quarters */
 .uk-pull-1-4 {
 left: -25%;
 }
 .uk-pull-3-4 {
 left: -75%;
 }
 /* Fifths */
 .uk-pull-1-5,
 .uk-pull-2-10 {
 left: -20%;
 }
 .uk-pull-2-5,
 .uk-pull-4-10 {
 left: -40%;
 }
 .uk-pull-3-5,
 .uk-pull-6-10 {
 left: -60%;
 }
 .uk-pull-4-5,
 .uk-pull-8-10 {
 left: -80%;
 }
 /* Sixths */
 .uk-pull-1-6 {
 left: -16.666%;
 }
 .uk-pull-5-6 {
 left: -83.333%;
 }
 /* Tenths */
 .uk-pull-1-10 {
 left: -10%;
 }
 .uk-pull-3-10 {
 left: -30%;
 }
 .uk-pull-7-10 {
 left: -70%;
 }
 .uk-pull-9-10 {
 left: -90%;
 }
}
/* ========================================================================
 Component: Panel
 ========================================================================== */
/*
 * 1. Needed for `a` elements
 * 2. Create position context for badges
 */
.uk-panel {
 /* 1 */
 display: block;
 /* 2 */
 position: relative;
}
/*
 * Allow panels to be anchors
 */
.uk-panel,
.uk-panel:hover {
 text-decoration: none;
}
/*
 * Micro clearfix to make panels more robust
 */
.uk-panel:before,
.uk-panel:after {
 content: "";
 display: table;
}
.uk-panel:after {
 clear: both;
}
/*
 * Remove margin from the last-child if not `uk-widget-title`
 */
.uk-panel > :not(.uk-panel-title):last-child {
 margin-bottom: 0;
}
/* Sub-object: `uk-panel-title`
 ========================================================================== */
.uk-panel-title {
 margin-top: 0;
 margin-bottom: 15px;
 font-size: 18px;
 line-height: 24px;
 font-weight: normal;
 text-transform: none;
 color: #444;
}
/* Sub-object: `uk-panel-badge`
 ========================================================================== */
.uk-panel-badge {
 position: absolute;
 top: 0;
 right: 0;
 z-index: 1;
}
/* Sub-object: `uk-panel-teaser`
 ========================================================================== */
.uk-panel-teaser {
 margin-bottom: 15px;
}
/* Sub-object: `uk-panel-body`
 ========================================================================== */
.uk-panel-body {
 padding: 15px;
}
/* Modifier: `uk-panel-box`
 ========================================================================== */
.uk-panel-box {
 padding: 15px;
 background: #fafafa;
 color: #444;
 border: 1px solid #ddd;
 border-radius: 4px;
}
.uk-panel-box-hover:hover {
 color: #444;
}
.uk-panel-box .uk-panel-title {
 color: #444;
}
.uk-panel-box .uk-panel-badge {
 top: 10px;
 right: 10px;
}
.uk-panel-box > .uk-panel-teaser {
 margin-top: -16px;
 margin-left: -16px;
 margin-right: -16px;
}
/*
 * Nav in panel
 */
.uk-panel-box > .uk-nav-side {
 margin: 0 -15px;
}
/*
 * Sub-modifier: `uk-panel-box-primary`
 */
.uk-panel-box-primary {
 background-color: #ebf7fd;
 color: #2d7091;
 border-color: rgba(45, 112, 145, 0.3);
}
.uk-panel-box-primary-hover:hover {
 color: #2d7091;
}
.uk-panel-box-primary .uk-panel-title {
 color: #2d7091;
}
/*
 * Sub-modifier: `uk-panel-box-secondary`
 */
.uk-panel-box-secondary {
 background-color: #fff;
 color: #444;
}
.uk-panel-box-secondary-hover:hover {
 color: #444;
}
.uk-panel-box-secondary .uk-panel-title {
 color: #444;
}
/* Modifier: `uk-panel-hover`
 ========================================================================== */
.uk-panel-hover {
 padding: 15px;
 color: #444;
 border: 1px solid transparent;
 border-radius: 4px;
}
.uk-panel-hover:hover {
 background: #fafafa;
 color: #444;
 border-color: #ddd;
}
.uk-panel-hover .uk-panel-badge {
 top: 10px;
 right: 10px;
}
.uk-panel-hover > .uk-panel-teaser {
 margin-top: -16px;
 margin-left: -16px;
 margin-right: -16px;
}
/* Modifier: `uk-panel-header`
 ========================================================================== */
.uk-panel-header .uk-panel-title {
 padding-bottom: 10px;
 border-bottom: 1px solid #ddd;
 color: #444;
}
/* Modifier: `uk-panel-space`
 ========================================================================== */
.uk-panel-space {
 padding: 30px;
}
.uk-panel-space .uk-panel-badge {
 top: 30px;
 right: 30px;
}
/* Modifier: `uk-panel-divider`
 ========================================================================== */
.uk-panel + .uk-panel-divider {
 margin-top: 50px !important;
}
.uk-panel + .uk-panel-divider:before {
 content: "";
 display: block;
 position: absolute;
 top: -25px;
 left: 0;
 right: 0;
 border-top: 1px solid #ddd;
}
/* Large screen and bigger */
@media (min-width: 1220px) {
 .uk-panel + .uk-panel-divider {
 margin-top: 70px !important;
 }
 .uk-panel + .uk-panel-divider:before {
 top: -35px;
 }
}
.uk-panel-box .uk-panel-teaser {
 border-top-left-radius: 4px;
 border-top-right-radius: 4px;
 overflow: hidden;
 -webkit-transform: translateZ(0);
}
/* ========================================================================
 Component: Block
 ========================================================================== */
.uk-block {
 position: relative;
 box-sizing: border-box;
 padding-top: 20px;
 padding-bottom: 20px;
}
/* Phone landscape and bigger */
@media (min-width: 768px) {
 .uk-block {
 padding-top: 50px;
 padding-bottom: 50px;
 }
}
/*
 * Micro clearfix to make blocks more robust
 */
.uk-block:before,
.uk-block:after {
 content: "";
 display: table;
}
.uk-block:after {
 clear: both;
}
/*
 * Remove margin from the last-child
 */
.uk-block > :last-child {
 margin-bottom: 0;
}
/* Padding Modifier
 ========================================================================== */
/*
 * Large padding
 */
.uk-block-large {
 padding-top: 20px;
 padding-bottom: 20px;
}
/* Tablets and bigger */
@media (min-width: 768px) {
 .uk-block-large {
 padding-top: 50px;
 padding-bottom: 50px;
 }
}
/* Desktop and bigger */
@media (min-width: 960px) {
 .uk-block-large {
 padding-top: 100px;
 padding-bottom: 100px;
 }
}
/* Color Modifier
 ========================================================================== */
/*
 * Default
 */
.uk-block-default {
 background: #fff;
}
/*
 * Muted
 */
.uk-block-muted {
 background: #f9f9f9;
}
/*
 * Primary
 */
.uk-block-primary {
 background: #00a8e6;
}
/*
 * Secondary
 */
.uk-block-secondary {
 background: #222;
}
/*
 * Adjust padding between equal colored blocks
 */
.uk-block-default + .uk-block-default,
.uk-block-muted + .uk-block-muted,
.uk-block-primary + .uk-block-primary,
.uk-block-secondary + .uk-block-secondary {
 padding-top: 0;
}
/* ========================================================================
 Component: Article
 ========================================================================== */
/*
 * Micro clearfix to make articles more robust
 */
.uk-article:before,
.uk-article:after {
 content: "";
 display: table;
}
.uk-article:after {
 clear: both;
}
/*
 * Remove margin from the last-child
 */
.uk-article > :last-child {
 margin-bottom: 0;
}
/*
 * Vertical gutter for articles
 */
.uk-article + .uk-article {
 margin-top: 25px;
}
/* Sub-object `uk-article-title`
 ========================================================================== */
.uk-article-title {
 font-size: 36px;
 line-height: 42px;
 font-weight: normal;
 text-transform: none;
}
.uk-article-title a {
 color: inherit;
 text-decoration: none;
}
/* Sub-object `uk-article-meta`
 ========================================================================== */
.uk-article-meta {
 font-size: 12px;
 line-height: 18px;
 color: #999;
}
/* Sub-object `uk-article-lead`
 ========================================================================== */
.uk-article-lead {
 color: #444;
 font-size: 18px;
 line-height: 24px;
 font-weight: normal;
}
/* Sub-object `uk-article-divider`
 ========================================================================== */
.uk-article-divider {
 margin-bottom: 25px;
 border-color: #ddd;
}
* + .uk-article-divider {
 margin-top: 25px;
}
.uk-article + .uk-article {
 padding-top: 25px;
 border-top: 1px solid #ddd;
}
/* ========================================================================
 Component: Comment
 ========================================================================== */
/* Sub-object `uk-comment-header`
 ========================================================================== */
.uk-comment-header {
 margin-bottom: 15px;
 padding: 10px;
 border: 1px solid #ddd;
 border-radius: 4px;
 background: #fafafa;
}
/*
 * Micro clearfix
 */
.uk-comment-header:before,
.uk-comment-header:after {
 content: "";
 display: table;
}
.uk-comment-header:after {
 clear: both;
}
/* Sub-object `uk-comment-avatar`
 ========================================================================== */
.uk-comment-avatar {
 margin-right: 15px;
 float: left;
}
/* Sub-object `uk-comment-title`
 ========================================================================== */
.uk-comment-title {
 margin: 5px 0 0 0;
 font-size: 16px;
 line-height: 22px;
}
/* Sub-object `uk-comment-meta`
 ========================================================================== */
.uk-comment-meta {
 margin: 2px 0 0 0;
 font-size: 11px;
 line-height: 16px;
 color: #999;
}
/* Sub-object `uk-comment-body`
 ========================================================================== */
.uk-comment-body {
 padding-left: 10px;
 padding-right: 10px;
}
/*
 * Remove margin from the last-child
 */
.uk-comment-body > :last-child {
 margin-bottom: 0;
}
/* Sub-object `uk-comment-list`
 ========================================================================== */
.uk-comment-list {
 padding: 0;
 list-style: none;
}
.uk-comment-list .uk-comment + ul {
 margin: 25px 0 0 0;
 list-style: none;
}
.uk-comment-list > li:nth-child(n+2),
.uk-comment-list .uk-comment + ul > li:nth-child(n+2) {
 margin-top: 25px;
}
/* Tablet and bigger */
@media (min-width: 768px) {
 .uk-comment-list .uk-comment + ul {
 padding-left: 100px;
 }
}
/* Modifier `uk-comment-primary`
 ========================================================================== */
.uk-comment-primary .uk-comment-header {
 border-color: rgba(45, 112, 145, 0.3);
 background-color: #ebf7fd;
 color: #2d7091;
 text-shadow: 0 1px 0 #fff;
}
/* ========================================================================
 Component: Cover
 ========================================================================== */
/*
 * Background image always covers and centers its element
 */
.uk-cover-background {
 background-position: 50% 50%;
 background-size: cover;
 background-repeat: no-repeat;
}
/*
 * Emulates image cover, works with video and image elements
 * 1. Parent container which clips resized object
 * 2. Resizes the object to always covers its container
 * 3. Reset the responsive image CSS
 * 4. Center object
 */
/* 1 */
.uk-cover {
 overflow: hidden;
}
.uk-cover-object {
 /* 2 */
 width: auto;
 height: auto;
 min-width: 100%;
 min-height: 100%;
 /* 3 */
 max-width: none;
 /* 4 */
 position: relative;
 left: 50%;
 top: 50%;
 -webkit-transform: translate(-50%, -50%);
 transform: translate(-50%, -50%);
}
/*
 * To center iframes use `data-uk-cover` JavaScript
 */
[data-uk-cover] {
 position: relative;
 left: 50%;
 top: 50%;
 -webkit-transform: translate(-50%, -50%);
 transform: translate(-50%, -50%);
}
/* ========================================================================
 Component: Nav
 ========================================================================== */
.uk-nav,
.uk-nav ul {
 margin: 0;
 padding: 0;
 list-style: none;
}
/*
 * Items
 */
.uk-nav li > a {
 display: block;
 text-decoration: none;
}
.uk-nav > li > a {
 padding: 5px 15px;
}
/*
 * Nested items
 */
.uk-nav ul {
 padding-left: 15px;
}
.uk-nav ul a {
 padding: 2px 0;
}
/*
 * Item subtitle
 */
.uk-nav li > a > div {
 font-size: 12px;
 line-height: 18px;
}
/* Sub-object: `uk-nav-header`
 ========================================================================== */
.uk-nav-header {
 padding: 5px 15px;
 text-transform: uppercase;
 font-weight: bold;
 font-size: 12px;
}
.uk-nav-header:not(:first-child) {
 margin-top: 15px;
}
/* Sub-object: `uk-nav-divider`
 ========================================================================== */
.uk-nav-divider {
 margin: 9px 15px;
}
/* Sub-object: `uk-nav-sub`
 ========================================================================== */
/*
 * `ul` needed for higher specificity to override padding
 */
ul.uk-nav-sub {
 padding: 5px 0 5px 15px;
}
/* Modifier: `uk-nav-parent-icon`
 ========================================================================== */
.uk-nav-parent-icon > .uk-parent > a:after {
 content: "\\f104";
 width: 20px;
 margin-right: -10px;
 float: right;
 font-family: FontAwesome;
 text-align: center;
}
.uk-nav-parent-icon > .uk-parent.uk-open > a:after {
 content: "\\f107";
}
/* Modifier `uk-nav-side`
 ========================================================================== */
/*
 * Items
 */
.uk-nav-side > li > a {
 color: #444;
}
/*
 * Hover
 * 1. Apply hover style also to focus state
 * 2. Remove default focus style
 */
.uk-nav-side > li > a:hover,
.uk-nav-side > li > a:focus {
 background: rgba(0, 0, 0, 0.03);
 color: #444;
 /* 2 */
 outline: none;
 box-shadow: inset 0 0 1px rgba(0, 0, 0, 0.1);
 text-shadow: 0 -1px 0 #fff;
}
/* Active */
.uk-nav-side > li.uk-active > a {
 background: #009dd8;
 color: #fff;
 box-shadow: inset 0 2px 4px rgba(0, 0, 0, 0.2);
 text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.2);
}
/*
 * Sub-object: `uk-nav-header`
 */
.uk-nav-side .uk-nav-header {
 color: #444;
}
/*
 * Sub-object: `uk-nav-divider`
 */
.uk-nav-side .uk-nav-divider {
 border-top: 1px solid #ddd;
 box-shadow: 0 1px 0 #fff;
}
/*
 * Nested items
 */
.uk-nav-side ul a {
 color: #07D;
}
.uk-nav-side ul a:hover {
 color: #059;
}
/* Modifier `uk-nav-dropdown`
 ========================================================================== */
/*
 * Items
 */
.uk-nav-dropdown > li > a {
 color: #444;
}
/*
 * Hover
 * 1. Apply hover style also to focus state
 * 2. Remove | |
| 
	: Python dictionary
 parameter from the froce plate calibration file
 keys: 'fp', 'scale', 'size', 'cal_matrix', 'origin', 'center', 'orientation'
 """
 fp, scale, size, cal_matrix, origin, center, orientation = [], [], [], [], [], [], []
 with open(file=fname, mode='rt', encoding='utf-8', newline='') as f:
 if show_msg:
 print('Opening file "{}" ... '.format(fname), end='')
 reader = csv.reader(f, delimiter=' ')
 for row in reader:
 # force plate number
 fp.append(int(row[0][0]))
 # number of rows for Kistler or AMTI/Bertec force plate
 n = 8 if row[0][-1] == 'K' else 6
 # scale (inverse of the gain)
 scale_size = np.array(next(reader)).astype(np.float)
 scale.append(scale_size[0])
 # force plate length (cm) and width (cm)
 size.append(scale_size[1:])
 # calibration matrix (the inverse sensitivity matrix)
 matrix = [next(reader) for x in range(n)]
 cal_matrix.append(np.array(matrix).astype(np.float))
 # true origin in relation to the geometric center (cm)
 origin.append(np.array(next(reader)).astype(np.float))
 # geometric center in relation to LCS origin (cm)
 center.append(np.array(next(reader)).astype(np.float))
 # 3 x 3 orientation matrix
 orienta = [next(reader) for x in range(3)]
 orientation.append(np.array(orienta).astype(np.float))
 forcepla = {'fp': fp, 'scale': scale, 'size': size, 'cal_matrix': cal_matrix,
 'origin': origin, 'center': center, 'orientation': orientation}
 if show_msg:
 print('done.')
 return forcepla
def read_forces(fname, time=True, forcepla=[], mm2m=True, show_msg=True):
 """Read .forces file format from Cortex MAC.
 The .forces file in ASCII contains force plate data. The data is saved
 based on the forcepla.cal file of the trial and converts the raw force
 plate data into calibrated forces. The units used are Newtons and
 Newton-meters and each line in the file equates to one analog sample.
 Example of .forces file structure:
 [Force Data]
 NumberOfForcePlates=7
 SampleRate=150.000000
 NumberOfSamples=150
 #Sample FX1 FY1 FZ1 X1 Y1 Z1 MZ1 FX2 ...
 ...
 Parameters
 ----------
 fname : string
 full file name of the .forces file to be opened
 time : bool (default = True)
 Whether the data index is in units of time (True) or not (False).
 forcepla : list of integers (default = [])
 List of force plates to read. An empty list reads all force plates.
 Enter a list of force plate numbers to read.
 mm2m : bool (default = True)
 Whether to change the COP units from mm to m (True) or not (False).
 show_msg : bool (default = True)
 Whether to print messages about the execution of the intermediary steps
 (True) or not (False).
 Returns
 -------
 h : Python dictionary
 .forces header information
 keys: name, nforceplates, data_rate, nsamples, ch_names
 df : pandas dataframe
 force plate data with shape (nsamples, 7*nforceplates)
 """
 with open(file=fname, mode='rt', encoding='utf-8', newline='') as f:
 if show_msg:
 print('Opening file "{}" ... '.format(fname), end='')
 # get header information
 read = csv.reader(f, delimiter='\t')
 header = [next(read) for x in range(5)]
 h = {'name': header[0][0],
 'NumberOfForcePlates': int(header[1][0].split('=')[1]),
 'SampleRate': float(header[2][0].split('=')[1]),
 'NumberOfSamples': int(header[3][0].split('=')[1]),
 'ch_names': header[4][1:]
 }
 if forcepla:
 if not isinstance(forcepla, list):
 forcepla = [forcepla]
 h['NumberOfForcePlates'] = len(forcepla)
 usecols = []
 for fp in forcepla:
 usecols.extend([i+1 for i, s in enumerate(h['ch_names']) if str(fp) in s])
 h['ch_names'] = [h['ch_names'][col-1] for col in usecols]
 else:
 usecols = np.arange(1, 1+7*h['NumberOfForcePlates'])
 # force plate data
 df = pd.read_csv(f, sep='\t', names=h['ch_names'], index_col=False,
 usecols=usecols, engine='c')
 if mm2m:
 cols = [[3+c, 4+c, 5+c, 6+c] for c in range(0, int(df.shape[1]), 7)]
 cols = [item for sublist in cols for item in sublist] # flat list
 df.iloc[:, cols] = df.iloc[:, cols]/1000
 if time:
 df.index = df.index/h['SampleRate']
 df.index.name = 'Time'
 if show_msg:
 print('done.')
 return h, df
def read_mot(fname, show_msg=True):
 """Read .mot file format from OpenSim.
 The .mot file in ASCII contains force plate data in the dataframe df.
 Example of .mot file structure:
 name /Users/data.mot
 datacolumns 19
 datarows 1260
 range 0 2.797778e+00
 endheader
 time R_ground_force_vx R_ground_force_vy R_ground_force_vz R_ground_force_px ...
 ...
 Parameters
 ----------
 fname : string
 full file name of the .mot file to be opened
 show_msg : bool (default = True)
 Whether to print messages about the execution of the intermediary steps
 (True) or not (False).
 Returns
 -------
 h : Python dictionary
 .mot header information
 keys: name, datacolumns, datarows, range
 df : pandas dataframe
 force plate data with shape (datarows, datacolumns)
 """
 # column names of the .mot dataframe
 cols = ['time',
 'R_ground_force_vx', 'R_ground_force_vy', 'R_ground_force_vz',
 'R_ground_force_px', 'R_ground_force_py', 'R_ground_force_pz',
 'L_ground_force_vx', 'L_ground_force_vy', 'L_ground_force_vz',
 'L_ground_force_px', 'L_ground_force_py', 'L_ground_force_pz',
 'R_ground_torque_x', 'R_ground_torque_y', 'R_ground_torque_z',
 'L_ground_torque_x', 'L_ground_torque_y', 'L_ground_torque_z']
 with open(file=fname, mode='rt', encoding='utf-8', newline='') as f:
 if show_msg:
 print('Opening file "{}" ... '.format(fname), end='')
 # get header information
 read = csv.reader(f, delimiter='\t')
 header = [next(read) for x in range(4)]
 h = {'name': header[0][0],
 'datacolumns': int(header[1][0].split('=')[1]),
 'datarows': int(header[2][0].split('=')[1]),
 'range': float(header[3][0].split('=')[1]),
 }
 # force plate data
 df = pd.read_csv(f, sep='\t', names=cols, index_col=0, engine='c')
 if show_msg:
 print('done.')
 return h, df
def read_delsys(fname, fname2='', sensors=None, freq_trc=150, emg=True,
 imu=False, resample=[1200, 150], freqs=[20, 20, 450],
 show_msg=True, show=False, ax=None, suptitle=''):
 """Read Delsys csv file from Cortex MAC (Asynchronous device data file).
 Parameters
 ----------
 fname : string
 Full file name of the Delsys csv file from Cortex file to be opened.
 fname2 : string, optional (default = '')
 Full file name of the text file to be saved with data if desired.
 If both parameters `emg` and `imu` are True, you must input a list with
 the two full file names (EMG and IMU).
 If fname2 is '', no file is saved.
 If fname2 is '=', the original file name will be used but its extension
 will be .emg and .imu for the files with EMG data and with IMU data (if
 parameters `emg` and `imu` are True).
 sensors : list of strings, optional
 Names of sensors to be used as column names for the EMG and IM data.
 freq_trc : number, optional (default = 150)
 Sampling frequency of the markers data
 emg : bool, optional (default = True)
 Read and save EMG data
 imu : bool, optional (default = False)
 Read and save IMU data
 resample : list with two numbers, optional (default = [1200, 150])
 Whether to resample the data to have the given frequencies.
 The list order is [freq_emg, freq_imu]. Enter 0 (zero) to not resample.
 It's used signal.resample_poly scipy function.
 For the EMG signal, if the parameter frequency is lower than 1000 Hz,
 first it will be calculated the linear envelope with a low-pass
 frequency given by parameter freqs[0] (but first the EMG data will be
 band-pass filtered with frequencies given by parameters freqs[1], freqs[2].
 freqs : list of three numbers, optional (default = [20, 20, 450])
 Frequencies to be used at the linear envelope calculation if desired.
 See the parameter `resample`.
 show_msg : bool, optional (default = True)
 Whether to print messages about the execution of the intermediary steps
 (True) or not (False).
 show : bool, optional (default = False)
 if True (1), plot data in matplotlib figure.
 ax : a matplotlib.axes.Axes instance, optional (default = None).
 suptitle : string, optional (default = '')
 If string, shows string as suptitle. If empty, doesn't show suptitle.
 Returns
 -------
 data : 1 or 2 pandas dataframe
 df_emg and df_imu if paramters `emg` and `imu`.
 The units of df_emg will be mV (the raw signal is multiplied by 1000).
 The units of the IMU data are according to Delsys specification.
 """
 with open(file=fname, mode='rt', newline=None) as f:
 if show_msg:
 print('Opening file "{}" ... '.format(fname), end='')
 file = f.read().splitlines()
 if file[0] != 'Cortex generated Asynchronous device data file (.add)':
 print('\n"{}" is not a valid Delsys from Cortex file.'.format(fname))
 if emg and imu:
 return None, None
 elif emg:
 return None
 elif imu:
 return None
 # find start and final lines of data in file
 idx = file.index('[Devices]') + 2
 count = int(file[idx].split('=')[1])
 devices = [name.split(', ')[-1] for name in file[idx+1:idx+1+count]]
 if sensors is None:
 sensors = devices
 idx = idx + 3 + count
 count2 = int(file[idx].split('=')[1])
 channels = [name for name in file[idx+1:idx+1+count2]]
 n_im = int((count2-count)/count)
 # indexes for ini_emg, end_emg, ini_im, end_im
 idxs = np.zeros((count, 4), dtype=int)
 for i, device in enumerate(devices):
 idxs[i, 0] = file.index(device) + 3
 idxs[i, 1] = file[idxs[i, 0]:].index('') + idxs[i, 0] - 1
 idxs[:, 2] = idxs[:, 1] + 3
 idxs[:, 3] = np.r_[idxs[1:, 0] - 6,
 np.array(len(file) - | |
| 
	(1, time//iks[1]), strides=(1, s), use_bias=False, padding='same', name='input_over_' + str(iks[1]))(inputs)
 w2 = layers.BatchNormalization(name='input_over_' + str(iks[1]) + '_bn')(w2)
 w2 = layers.Activation('relu', name='input_over_' + str(iks[1]) + '_relu')(w2)
 
 w3 = layers.Conv2D(2, (1, time//iks[2]), strides=(1, s), use_bias=False, padding='same', name='input_over_' + str(iks[2]))(inputs)
 w3 = layers.BatchNormalization(name='input_over_' + str(iks[2]) + '_bn')(w3)
 w3 = layers.Activation('relu', name='input_over_' + str(iks[2]) + '_relu')(w3)
 
 w4 = layers.Conv2D(2, (1, time//iks[3]), strides=(1, s), use_bias=False, padding='same', name='input_over_' + str(iks[3]))(inputs)
 w4 = layers.BatchNormalization(name='input_over_' + str(iks[3]) + '_bn')(w4)
 w4 = layers.Activation('relu', name='input_over_' + str(iks[3]) + '_relu')(w4)
 w5 = layers.Conv2D(2, (1, time//iks[4]), strides=(1, s), use_bias=False, padding='same', name='input_over_' + str(iks[4]))(inputs)
 w5 = layers.BatchNormalization(name='input_over_' + str(iks[4]) + '_bn')(w5)
 w5 = layers.Activation('relu', name='input_over_' + str(iks[4]) + '_relu')(w5)
 w6 = layers.Conv2D(2, (1, time//iks[5]), strides=(1, s), use_bias=False, padding='same', name='input_over_' + str(iks[5]))(inputs)
 w6 = layers.BatchNormalization(name='input_over_' + str(iks[5]) + '_bn')(w6)
 w6 = layers.Activation('relu', name='input_over_' + str(iks[5]) + '_relu')(w6)
 
 x = layers.concatenate([w1, w2, w3, w4, w5, w6], axis=3, name='inputs')
 ############## HIDDEN LAYER 1 ##############
 x = layers.SeparableConv2D(16, (1, t), use_bias=False, padding=pad, name='block1')(x)
 x = layers.BatchNormalization(name='block1_bn')(x)
 if features in ['chroma', 'mfcc']:
 x = layers.MaxPooling2D((1, 2), strides=(1, 2), padding='same', name='block1_mp_freq')(x)
 else:
 x = layers.MaxPooling2D((2, 1), strides=(2, 1), padding='same', name='block1_mp_freq')(x)
 x = layers.Activation('relu', name='block1_relu')(x)
 ############## HIDDEN LAYER 2 ##############
 x = layers.SeparableConv2D(32, (1, t), use_bias=False, padding=pad, name='block2')(x)
 x = layers.BatchNormalization(name='block2_bn')(x)
 if features in ['chroma', 'mfcc']:
 x = layers.MaxPooling2D((1, 2), strides=(1, 2), padding='same', name='block2_mp_freq')(x)
 else:
 x = layers.MaxPooling2D((2, 1), strides=(2, 1), padding='same', name='block2_mp_freq')(x)
 x = layers.Activation('relu', name='block2_relu')(x)
 ############## HIDDEN LAYER 3 ##############
 x = layers.SeparableConv2D(64, (1, t), use_bias=False, padding=pad, name='block3')(x)
 x = layers.BatchNormalization(name='block3_bn')(x)
 if features in ['chroma', 'mfcc']:
 x = layers.MaxPooling2D((1, 2), strides=(1, 2), padding='same', name='block3_mp_freq')(x)
 else:
 x = layers.MaxPooling2D((2, 1), strides=(2, 1), padding='same', name='block3_mp_freq')(x)
 x = layers.Activation('relu', name='block3_relu')(x)
 
 ############## OUTPUT LAYER ##############
 x = layers.SeparableConv2D(128, (1, t), use_bias=False, padding=pad, name='preflat')(x)
 x = layers.BatchNormalization(name='preflat_bn')(x)
 x = layers.Activation('relu', name='preflat_relu')(x)
 x = layers.SeparableConv2D(256, (int(x.shape[1]), 1), use_bias=False, name='freq_flat')(x)
 x = layers.BatchNormalization(name='freq_flat_bn')(x)
 x = layers.Activation('relu', name='freq_flat_relu')(x) 
 
 x = layers.GlobalAveragePooling2D(name='GAP')(x)
 x = layers.Dense(num_classes, name='logits')(x)
 
 if test_type == 'sgc':
 output_activation = 'softmax'
 elif test_type == 'mgc':
 output_activation = 'sigmoid'
 elif test_type in ['cos', 'mse']:
 output_activation = 'linear'
 pred = layers.Activation(output_activation, name=output_activation)(x)
 return Model(inputs=inputs, outputs=pred)
def Freq(features, test_type, iks, input_shape, num_classes):
 freq = input_shape[0]
 
 inputs = layers.Input(shape=input_shape)
 if features in ['chroma', 'mfcc']:
 s, f, pad = 1, 3, 'same'
 elif features in ['cifar100']:
 s, f, pad = 1, 9, 'same'
 else:
 s, f, pad = 2, 9, 'valid'
 ############## INPUT LAYER ##############
 h1 = layers.Conv2D(2, (freq//iks[0], 1), strides=(s, 1), use_bias=False, padding='same', name='input_over_' + str(iks[0]))(inputs)
 h1 = layers.BatchNormalization(name='input_over_' + str(iks[0]) + '_bn')(h1)
 h1 = layers.Activation('relu', name='input_over_' + str(iks[0]) + '_relu')(h1)
 
 h2 = layers.Conv2D(2, (freq//iks[1], 1), strides=(s, 1), use_bias=False, padding='same', name='input_over_' + str(iks[1]))(inputs)
 h2 = layers.BatchNormalization(name='input_over_' + str(iks[1]) + '_bn')(h2)
 h2 = layers.Activation('relu', name='input_over_' + str(iks[1]) + '_relu')(h2)
 
 h3 = layers.Conv2D(2, (freq//iks[2], 1), strides=(s, 1), use_bias=False, padding='same', name='input_over_' + str(iks[2]))(inputs)
 h3 = layers.BatchNormalization(name='input_over_' + str(iks[2]) + '_bn')(h3)
 h3 = layers.Activation('relu', name='input_over_' + str(iks[2]) + '_relu')(h3)
 
 h4 = layers.Conv2D(2, (freq//iks[3], 1), strides=(s, 1), use_bias=False, padding='same', name='input_over_' + str(iks[3]))(inputs)
 h4 = layers.BatchNormalization(name='input_over_' + str(iks[3]) + '_bn')(h4)
 h4 = layers.Activation('relu', name='input_over_' + str(iks[3]) + '_relu')(h4)
 h5 = layers.Conv2D(2, (freq//iks[4], 1), strides=(s, 1), use_bias=False, padding='same', name='input_over_' + str(iks[4]))(inputs)
 h5 = layers.BatchNormalization(name='input_over_' + str(iks[4]) + '_bn')(h5)
 h5 = layers.Activation('relu', name='input_over_' + str(iks[4]) + '_relu')(h5)
 h6 = layers.Conv2D(2, (freq//iks[5], 1), strides=(s, 1), use_bias=False, padding='same', name='input_over_' + str(iks[5]))(inputs)
 h6 = layers.BatchNormalization(name='input_over_' + str(iks[5]) + '_bn')(h6)
 h6 = layers.Activation('relu', name='input_over_' + str(iks[5]) + '_relu')(h6)
 
 x = layers.concatenate([h1, h2, h3, h4, h5, h6], axis=3, name='inputs')
 ############## HIDDEN LAYER 1 ##############
 x = layers.SeparableConv2D(16, (f, 1), use_bias=False, padding=pad, name='block1')(x)
 x = layers.BatchNormalization(name='block1_bn')(x)
 x = layers.MaxPooling2D((1, 2), strides=(1, 2), padding='same', name='block1_mp_time')(x)
 x = layers.Activation('relu', name='block1_relu')(x)
 ############## HIDDEN LAYER 2 ##############
 x = layers.SeparableConv2D(32, (f, 1), use_bias=False, padding=pad, name='block2')(x)
 x = layers.BatchNormalization(name='block2_bn')(x)
 x = layers.MaxPooling2D((1, 2), strides=(1, 2), padding='same', name='block2_mp_time')(x)
 x = layers.Activation('relu', name='block2_relu')(x)
 ############## HIDDEN LAYER 3 ##############
 x = layers.SeparableConv2D(64, (f, 1), use_bias=False, padding=pad, name='block3')(x)
 x = layers.BatchNormalization(name='block3_bn')(x)
 x = layers.MaxPooling2D((1, 2), strides=(1, 2), padding='same', name='block3_mp_time')(x)
 x = layers.Activation('relu', name='block3_relu')(x)
 ############## OUTPUT LAYER ##############
 x = layers.SeparableConv2D(128, (f, 1), use_bias=False, padding=pad, name='preflat')(x)
 x = layers.BatchNormalization(name='preflat_bn')(x)
 x = layers.Activation('relu', name='preflat_relu')(x)
 x = layers.SeparableConv2D(256, (1, int(x.shape[2])), use_bias=False, name='time_flat')(x)
 x = layers.BatchNormalization(name='time_flat_bn')(x)
 x = layers.Activation('relu', name='time_flat_relu')(x)
 x = layers.GlobalAveragePooling2D(name='GAP')(x)
 x = layers.Dense(num_classes, name='logits')(x)
 if test_type == 'sgc':
 output_activation = 'softmax'
 elif test_type == 'mgc':
 output_activation = 'sigmoid'
 elif test_type in ['cos', 'mse']:
 output_activation = 'linear'
 pred = layers.Activation(output_activation, name=output_activation)(x)
 return Model(inputs=inputs, outputs=pred)
def TimeFreq(features, test_type, dataset, input_shape, num_classes, quick):
 if quick:
 time_path = './Models/cnn/sgc/' + 'DELETE.' + dataset + '.' + features + '.' + 'Time.hdf5'
 freq_path = './Models/cnn/sgc/' + 'DELETE.' + dataset + '.' + features + '.' + 'Freq.hdf5'
 else:
 time_path = './Models/cnn/sgc/' + dataset + '.' + features + '.' + 'Time.hdf5'
 freq_path = './Models/cnn/sgc/' + dataset + '.' + features + '.' + 'Freq.hdf5'
 time_model = load_model(time_path)
 freq_model = load_model(freq_path)
 time_model = Model(inputs=time_model.input, outputs=time_model.get_layer('logits').output)
 freq_model = Model(inputs=freq_model.input, outputs=freq_model.get_layer('logits').output)
 
 for layer in time_model.layers:
 layer.trainable = False
 for layer in freq_model.layers:
 layer.trainable = False
 inputs = layers.Input(shape=input_shape)
 
 t = time_model(inputs)
 f = freq_model(inputs)
 
 x = layers.concatenate([t,f], name='Time_Freq')
 
 x = layers.Dense(256, kernel_regularizer=layers.regularizers.l2(0.002), name='fc_1')(x)
 x = layers.Activation('relu', name='fc_1_relu')(x)
 x = layers.Dropout(0.5, name='fc_1_dropout')(x)
 
 x = layers.Dense(128, kernel_regularizer=layers.regularizers.l2(0.002), name='fc_2')(x)
 x = layers.Activation('relu', name='fc_2_relu')(x)
 x = layers.Dropout(0.5, name='fc_2_dropout')(x)
 
 x = layers.Dense(num_classes, name='logits')(x)
 
 pred = layers.Activation('softmax', name='softmax')(x)
 
 return Model(inputs=inputs, outputs=pred)
if __name__ == '__main__':
 if args.dataset in ['fma_med', 'fma_large', 'spotify']:
 if args.dataset == 'fma_med':
 FMA = FMA.FreeMusicArchive('medium', 22050)
 num_classes = FMA.NUM_CLASSES
 elif args.dataset == 'fma_large':
 FMA = FMA.FreeMusicArchive('large', 22050)
 num_classes = FMA.NUM_CLASSES
 elif args.dataset == 'spotify':
 SPOTIFY = SPOTIFY.SPOTIFY()
 num_classes = SPOTIFY.EMB_DIM
 
 if args.features == 'stft':
 freq, time = 2049, 643 
 dim = (freq, time, 1)
 fiks = [6, 12, 32, 64, 128, 256] # 341, 170, 64, 32, 16, 8
 tiks = [4, 8, 16, 32, 64, 96] # 160, 80, 40, 20, 10, 6
 elif args.features == 'stft_halved':
 freq, time = 2049//2, 643
 dim = (freq, time, 1)
 fiks = [6, 12, 32, 64, 128, 256] # 170, 85, 32, 16, 8, 4
 tiks = [4, 8, 16, 32, 64, 96] # 160, 80, 40, 20, 10, 6
 elif args.features == 'mel_scaled_stft':
 freq, time = 256, 643
 dim = (freq, time, 1)
 fiks = [6, 8, 12, 24, 32, 64] # 42, 32, 21, 10, 8, 4
 tiks = [4, 8, 16, 32, 64, 96] # 160, 80, 40, 20, 10, 6
 elif args.features == 'cqt':
 freq, time = 168, 643
 dim = (freq, time, 1)
 fiks = [4, 5, 6, 12, 24, 48] # 42, 33, 28, 14, 7, 3
 tiks = [4, 8, 16, 32, 64, 96] # 160, 80, 40, 20, 10, 6
 elif args.features in ['chroma', 'mfcc']:
 freq, time = 12, 643
 dim = (freq, time, 1)
 fiks = [1, 2, 3, 4, 6, 12] # 12, 6, 4, 3, 2, 1
 tiks = [4, 8, 16, 32, 64, 96] # 160, 80, 40, 20, 10, 6
 else:
 raise Exception('Wrong dataset/feature combination!')
 elif args.dataset == 'cifar100':
 args.features = 'cifar100'
 num_classes = 100
 freq, time = 32, 32
 dim = (freq, time, 3)
 fiks = [3, 4, 5, 6, 7, 10] # 10, 8, 6, 5, 4, 3
 tiks = [3, 4, 5, 6, 7, 10] # 10, 8, 6, 5, 4, 3
 
 else:
 raise Exception('Wrong dataset!')
 ################# Freq ################
 K.clear_session()
 model = Freq(features=args.features, test_type=args.test, iks=fiks, input_shape=dim, num_classes=num_classes)
 model.summary()
 train_model(model=model, model_name='Freq', dim=dim, features=args.features, dataset=args.dataset, test_type=args.test, quick=args.quick) 
 ################ Time ################
 K.clear_session()
 model = Time(features=args.features, test_type=args.test, iks=tiks, input_shape=dim, num_classes=num_classes)
 model.summary()
 train_model(model=model, model_name='Time', dim=dim, features=args.features, dataset=args.dataset, test_type=args.test, quick=args.quick) 
 ################ Simple ################
 K.clear_session()
 model = Simple(features=args.features, test_type=args.test, input_shape=dim, num_classes=num_classes)
 model.summary()
 train_model(model=model, model_name='Simple', dim=dim, features=args.features, dataset=args.dataset, test_type=args.test, quick=args.quick) 
 if args.dataset == 'fma_med' and args.test == 'sgc':
 ############### TimeFreq ################
 K.clear_session()
 model = TimeFreq(features=args.features, test_type=args.test, dataset=args.dataset, input_shape=dim, num_classes=num_classes, quick=args.quick)
 model.summary()
 train_model(model=model, model_name='TimeFreq', dim=dim, features=args.features, dataset=args.dataset, | |
| 
	seconds." % (num_kmers, time_spent)
 # return reversed slice
 return kmer_arr[0:count][::-1]
 def clear_kmer(self, kmer):
 if isinstance(kmer, str):
 kmer = kmer_to_intval(kmer)
 ck = self.get_central_kmer(kmer)
 node = self.get_node_for_central_kmer(ck)
 return self.remotemultiset[node].delete(kmer)
 def prune_kmer_extensions(self, min_ratio_non_error):
 deletion_list = []
 for kmer_val in self.multiset.items():
 count = self.multiset[kmer_val]
 if count == 0:
 continue
 candidates = self.get_forward_kmer_candidates(kmer_val)
 dominant_count = 0
 for i in range(len(candidates)):
 if candidates[i][1]:
 candidate_count = candidates[i][1]
 if dominant_count == 0:
 dominant_count = candidate_count
 elif dominant_count > 0 and float(candidate_count)/float(dominant_count) < min_ratio_non_error:
 kmer_candidate = self.find_kmer( candidates[i][0] )
 deletion_list.append(kmer_candidate)
 # kmer_candidate->second = 0; // disable when encountered in further iterations.
 if len(deletion_list) > 0:
 for kmer in deletion_list:
 self.prune_kmer(kmer)
 return True
 else:
 return False
 def dump(self):
 self.multiset.dump()
def get_central_kmer(kmer, kmer_length):
 # given ABCDE, want BCD
 kmer = kmer >> 2 # remove last nucleotide
 kmer_mask = long(math.pow(2,2*( (kmer_length-1) -1) ) -1) # remove first nucleotide of the resulting (kmer-1) to get the core seq
 central_kmer = kmer & kmer_mask
 return central_kmer
def get_central_right_kmer(kmer, kmer_length):
 # given ABCDE, want CDE
 kmer_mask = long(math.pow(2,2*(kmer_length-2))-1) # remove first two nucleotides of kmer
 central_kmer = kmer & kmer_mask
 return central_kmer
def get_central_left_kmer(kmer, kmer_length):
 # given ABCDE, want ABC
 return kmer >> 4 # shift out the last two nucleotides.
def get_node_for_central_kmer(central_kmer, kmer_length):
 canonical_central_kmer = central_kmer
 rev_central_kmer = revcomp_val(central_kmer, kmer_length)
 
 if rev_central_kmer < canonical_central_kmer:
 canonical_central_kmer = rev_central_kmer
 node_for_kmer = canonical_central_kmer % ACP.procs()
 
 if False: ##IRKE_COMMON.MONITOR >= 4:
 print "Kmer: " + decode_kmer_from_intval(central_kmer, kmer_length) + " or " \
 + decode_kmer_from_intval(canonical_central_kmer, kmer_length) + " assigned to node: " + str(node_for_kmer)
 # all nodes are kmer servers
 return node_for_kmer
def is_good_seed_kmer(kcounter, kmer, kmer_count, kmer_length, min_connectivity):
 print "kmer:" + str(kmer) + " kmer_count:" + str(kmer_count) + " kmer_length:" + str(kmer_length)
 if kmer_count == 0: return False
 if kmer == revcomp_val(kmer, kmer_length):
 # palindromic kmer, avoid palindromes as seeds
 if IRKE_COMMON.MONITOR >= 2:
 print "SEED kmer: " + kcounter.get_kmer_string(kmer) + " is palidnromic. Skipping. " + "\n";
 
 return False
 if kmer_count < MIN_SEED_COVERAGE:
 if IRKE_COMMON.MONITOR >= 2:
 print "-seed has insufficient coverage, skipping"
 return False
 entropy = compute_entropy_val(kmer, kmer_length)
 
 
 if entropy < MIN_SEED_ENTROPY :
 if IRKE_COMMON.MONITOR >= 2:
 print "-skipping seed due to low entropy: " + str(entropy)
 
 return False
 # got this far, so kmer is fine as a seed
 return True
def build_inchworm_contig_from_seed(kmer, kcounter, min_connectivity): #, PARALLEL_IWORM):
 kmer_length = kcounter.get_kmer_length()
 # track those kmers included in growing path.
 visitor = Kmer_visitor(kmer_length, DOUBLE_STRANDED_MODE)
 forward_path = inchworm(kcounter, 'F', kmer, visitor, min_connectivity)
 ## visitor.clear()
 # add selected path to visitor
 if IRKE_COMMON.MONITOR >= 2:
 print "Forward path contains: " + str(len(forward_path)) + " kmers. "
 for kmer_ in forward_path:
 #visitor.add(kmer_)
 print "\tForward path kmer: " + kcounter.get_kmer_string(kmer_)
 
 ### Extend to the left ###
 # visitor.erase(kmer) # reset the seed
 
 reverse_path = inchworm(kcounter, 'R', kmer, visitor, min_connectivity)
 if IRKE_COMMON.MONITOR >= 2:
 print "Reverse path contains: " + str(len(reverse_path)) + " kmers. "
 for p in reverse_path:
 print "\tReverse path kmer: " + kcounter.get_kmer_string(p)
 joined_path = join_forward_n_reverse_paths(reverse_path, kmer, forward_path);
 return joined_path
def join_forward_n_reverse_paths(reverse_path, seed_kmer_val, forward_path):
 joined_path = []
 
 # want reverse path in reverse order
 for path in reversed(reverse_path):
 joined_path.append( path )
 
 # add seed kmer
 joined_path.append(seed_kmer_val)
 
 # tack on the entire forward path.
 for path in forward_path:
 joined_path.append( path )
 return joined_path
def inchworm(kcounter, direction, kmer, visitor, min_connectivity):
 growing_path = []
 kmer_length = kcounter.get_kmer_length()
 while True:
 if direction == 'F':
 # forward search
 kmer_candidates = kcounter.get_forward_kmer_candidates(kmer)
 else:
 # reverse search
 kmer_candidates = kcounter.get_reverse_kmer_candidates(kmer)
 print "kmer_candidates", kmer_candidates
 if len(kmer_candidates):
 best_extension = kmer_candidates[0][0]
 else:
 best_extension = 0
 print "best_extension", best_extension
 if best_extension == 0:
 break
 elif visitor.exists(best_extension):
 break
 else:
 visitor.add(best_extension)
 growing_path.append(best_extension)
 kmer = best_extension
 return growing_path
def reconstruct_path_sequence(kcounter, path):
 if len(path) == 0: return ""
 
 seq = kcounter.get_kmer_string(path[0])
 #cov_counter.append( kcounter.get_kmer_count(path[0]) )
 
 for kmer in path:
 kmer_str = kcounter.get_kmer_string(kmer)
 seq = seq + kmer_str[len(kmer_str) - 1:len(kmer_str)]
 #cov_counter.append(kcounter.get_kmer_count(kmer))
 return seq
def zap_kmers(kcounter, kmer_path):
 kmer_length = kcounter.get_kmer_length()
 # exclude kmers built into current contig.
 for kmer in kmer_path:
 kcounter.clear_kmer(kmer)
def run_MPI_master_all_completion_check(phase_val):
 raise NotImplementedError("run_MPI_master_all_completion_check")
def test_MPI(kcounter):
 raise NotImplementedError("test_MPI is not implemented")
def add_fasta_seq_line_breaks(sequence, interval):
 fasta_seq = ""
 counter = 0
 for c in sequence:
 counter = counter + 1
 fasta_seq = fasta_seq + c
 if counter % interval == 0 and counter != len(sequence):
 fasta_seq = fasta_seq + '\n'
 return fasta_seq
def get_ACP_proc_filename(node_id):
 return "tmp." + TOKEN + ".iworm_acp_proc_" + str(node_id) + ".contigs.txt";
def extract_best_seed(kmer_vec, kcounter, min_connectivity):
 kmer_length = kcounter.get_kmer_length()
 best_kmer_count = 0
 best_seed = 0
 for kmer in kmer_vec:
 count = kcounter.get_kmer_count(kmer)
 if count > best_kmer_count and is_good_seed_kmer(kcounter, kmer, count, kmer_length, min_connectivity):
 best_kmer_count = count
 best_seed = kmer
 if IRKE_COMMON.MONITOR >= 2:
 print "Parallel method found better seed: " + kcounter.get_kmer_string(best_seed) + " with count: " + str(best_kmer_count)
 return best_seed
def read_file(fasta_filename, kcounter, kmer_length):
 if False: #ACP.rank() == 1:
 print "sleeping..."
 time.sleep(3600)
 else:
 print "reading file..."
 # everyone participates in reading a part of the kmer file
 
 # figure out which section of the kmer fasta file we're supposed to use:
 file_length = 0 # init, set below
 this_mpi_section_start = 0
 this_mpi_section_end = -1
 
 if ACP.procs() > 1:
 fasta_file_reader = open(fasta_filename, "r")
 fasta_file_reader.seek(0, 2)
 file_length = fasta_file_reader.tell()
 fasta_file_reader.seek(0, 2)
 fasta_file_reader.close()
 mpi_section_length = file_length / ACP.procs()
 this_mpi_section_start = ACP.rank() * mpi_section_length;
 this_mpi_section_end = this_mpi_section_start + mpi_section_length
 
 #---------------------------------------------
 # Kmer partitioning among the nodes: 
 # Populate kmer hashtable on each node
 # where each node gets a subset of the kmers
 #---------------------------------------------
 
 fasta_reader = Fasta_reader(fasta_filename, this_mpi_section_start, this_mpi_section_end)
 
 if WRITE_KMER_FILES:
 filewriter = open("tmp." + TOKEN + ".kmers.tid_" + str(ACP.rank()), "w")
 if MONITOR_MPI_COMMUNICATION:
 kmerReaderLog = open("mpi_kmer_file_reader.mpi_" + str(ACP.rank()) + ".log", "w")
 
 kmer_counter = 0
 
 while True:
 if not fasta_reader.hasNext():
 break
 fe = fasta_reader.getNext()
 seq = fe.sequence
 
 if seq == "":
 continue
 
 if len(seq) < kmer_length:
 continue
 
 kmer_counter = kmer_counter + 1
 print "kmer_counter = %d\n" % (kmer_counter,)
 count = 1
 
 if False: #READ_TYPE == KMER:
 count = int(fe.get_header())
 sys.stdout.write("input kmer: " + seq + "\n")
 
 for i in range(len(seq) - kmer_length + 1):
 #print i
 kmer_s = seq[i:i+kmer_length] # seq.substr(i, kmer_length); 
 if contains_non_gatc(kmer_s):
 continue
 kmer = kcounter.get_kmer_intval(kmer_s)
 kcounter.add_kmer(kmer, count)
 
 central_kmer = get_central_kmer(kmer, kmer_length)
 
 if IRKE_COMMON.MONITOR >= 4:
 pass
 # central_kmer_string = decode_kmer_from_intval(central_kmer, kmer_length-2);
 # right_central_kmer = decode_kmer_from_intval(get_central_right_kmer(kmer, kmer_length), kmer_length-2)
 # left_central_kmer = decode_kmer_from_intval(get_central_left_kmer(kmer, kmer_length), kmer_length-2)
 # sys.stdout.write( "central: kmer " + str(central_kmer_string) )
 # sys.stdout.write( " left central kmer: " + left_central_kmer )
 # sys.stdout.write( " right central kmer: " + right_central_kmer )
 # partition kmers according to central kmer value and thread number
 # so all kmers with common core sequence end up on the same thread.
 # note, by virtue of this, all extensions for a given kmer should be
 # accessible via the same node. (idea of <NAME> @ Cray)
 print "Node[" + str(ACP.rank()) + "] is Done populating kmers."
 if MONITOR_MPI_COMMUNICATION:
 kmerReaderLog.writeln("Node[" + str(ACP.rank()) + "] is Done populating kmers.")
 
 THIS_NODE_DONE = True
 if ACP.rank() == 0:
 if ACP.procs() == 1:
 # no reason to run kmer server
 print "** Phase 1: Only 1 MPI node, no reason to do MPI communication. Skipping run_MPI_master_all_completion_check())"
 else:
 print "Phase 1: Master node running MPI_completion check."
 # Barrierで行けない理由がわからないので保留
 # run_MPI_master_all_completion_check(1)
def do_prune_error_kmers(kcounter, min_ratio_non_error):
 if ACP.rank() == 0:
 print "Kmer db size before pruning: " + str( kcounter.size() )
 kcounter.prune_kmer_extensions(min_ratio_non_error)
 
 if ACP.rank() == 0:
 print "Kmer db size after pruning: " + str( kcounter.size() )
 ACP.sync()
def do_assembly(kcounter, kmer_length):
 contig_outfilename = get_ACP_proc_filename(ACP.rank())
 contig_writer = open(contig_outfilename, "w")
 print "Writing contigs to: " + contig_outfilename
 kmers = kcounter.get_kmers_sort_descending_counts()
 for j in range(len(kmers)):
 kmer = long(kmers[j][0])
 kmer_count = kcounter.get_kmer_count(kmer)
 print "kmer=%s, count=%d" % (decode_kmer_from_intval(kmer,kmer_length), kmer_count)
 if not is_good_seed_kmer(kcounter, kmer, kmer_count, kmer_length, MIN_CONNECTIVITY_RATIO):
 continue
 # build draft contig.
 joined_path = build_inchworm_contig_from_seed(kmer, kcounter, MIN_CONNECTIVITY_RATIO)
 # now use this draft contig to select a new seed:
 new_seed = extract_best_seed(joined_path, kcounter, MIN_CONNECTIVITY_RATIO)
 
 if new_seed == 0:
 continue # must have been zapped by another thread
 
 # nicely polished new inchworm | |
| 
	This is used when changing the model.
 """
 self._remove_labels()
 reset_minus1 = True
 # new geometry
 if reset_minus1:
 self.label_actors = {-1 : []}
 else:
 for idi in self.label_actors:
 if idi == -1:
 continue
 self.label_actors[idi] = []
 self.label_ids = {}
 #self.case_keys = [
 #(1, 'ElementID', 1, 'centroid', '%.0f'),
 #(1, 'Region', 1, 'centroid', '%.0f')
 #]
 for icase in self.case_keys:
 #result_name = self.get_result_name(icase)
 self.label_actors[icase] = []
 self.label_ids[icase] = set([])
 #print(self.label_actors)
 #print(self.label_ids)
 def _remove_labels(self):
 """
 Remove all labels from the current result case.
 This happens when the user explictly selects the clear label button.
 """
 if len(self.label_actors) == 0:
 self.log.warning('No actors to remove')
 return
 # existing geometry
 for icase, actors in iteritems(self.label_actors):
 if icase == -1:
 continue
 for actor in actors:
 self.rend.RemoveActor(actor)
 del actor
 self.label_actors[icase] = []
 self.label_ids[icase] = set([])
 def clear_labels(self):
 """
 This clears out all labels from all result cases.
 """
 if len(self.label_actors) == 0:
 self.log.warning('No actors to clear')
 return
 # existing geometry
 #icase = self.case_keys[self.icase]
 icase = self.icase
 result_name = self.result_name
 actors = self.label_actors[icase]
 for actor in actors:
 self.rend.RemoveActor(actor)
 del actor
 self.label_actors[icase] = []
 self.label_ids[icase] = set([])
 def resize_labels(self, case_keys=None, show_msg=True):
 """
 This resizes labels for all result cases.
 TODO: not done...
 """
 if case_keys is None:
 names = 'None) # None -> all'
 case_keys = sorted(self.label_actors.keys())
 else:
 mid = '%s,' * len(case_keys)
 names = '[' + mid[:-1] + '])'
 count = 0
 for icase in case_keys:
 actors = self.label_actors[icase]
 for actor in actors:
 actor.VisibilityOff()
 count += 1
 if count and show_msg:
 self.log_command('resize_labels(%s)' % names)
 def hide_labels(self, case_keys=None, show_msg=True):
 if case_keys is None:
 names = 'None) # None -> all'
 case_keys = sorted(self.label_actors.keys())
 else:
 mid = '%s,' * len(case_keys)
 names = '[' + mid[:-1] + '])'
 count = 0
 for icase in case_keys:
 actors = self.label_actors[icase]
 for actor in actors:
 actor.VisibilityOff()
 #prop = actor.GetProperty()
 count += 1
 if count and show_msg:
 self.log_command('hide_labels(%s)' % names)
 def show_labels(self, case_keys=None, show_msg=True):
 if case_keys is None:
 names = 'None) # None -> all'
 case_keys = sorted(self.label_actors.keys())
 else:
 mid = '%s,' * len(case_keys)
 names = mid[:-1] % case_keys + ')'
 count = 0
 for icase in case_keys:
 try:
 actors = self.label_actors[icase]
 except KeyError:
 msg = 'Cant find label_actors for icase=%r; keys=%s' % (
 icase, self.label_actors.keys())
 self.log.error(msg)
 continue
 for actor in actors:
 actor.VisibilityOn()
 count += 1
 if count and show_msg:
 # yes the ) is intentionally left off because it's already been added
 self.log_command('show_labels(%s)' % names)
 def update_scalar_bar(self, title, min_value, max_value, norm_value,
 data_format,
 nlabels=None, labelsize=None,
 ncolors=None, colormap='jet',
 is_low_to_high=True, is_horizontal=True,
 is_shown=True):
 """
 Updates the Scalar Bar
 Parameters
 ----------
 title : str
 the scalar bar title
 min_value : float
 the blue value
 max_value :
 the red value
 data_format : str
 '%g','%f','%i', etc.
 nlabels : int (default=None -> auto)
 the number of labels
 labelsize : int (default=None -> auto)
 the label size
 ncolors : int (default=None -> auto)
 the number of colors
 colormap : varies
 str :
 the name
 ndarray : (N, 3) float ndarry
 red-green-blue array
 is_low_to_high : bool; default=True
 flips the order of the RGB points
 is_horizontal : bool; default=True
 makes the scalar bar horizontal
 is_shown : bool
 show the scalar bar
 """
 #print("update_scalar_bar min=%s max=%s norm=%s" % (min_value, max_value, norm_value))
 self.scalar_bar.update(title, min_value, max_value, norm_value, data_format,
 nlabels=nlabels, labelsize=labelsize,
 ncolors=ncolors, colormap=colormap,
 is_low_to_high=is_low_to_high, is_horizontal=is_horizontal,
 is_shown=is_shown)
 #---------------------------------------------------------------------------------------
 # CAMERA MENU
 def view_camera(self):
 set_camera_menu(self)
 #def _apply_camera(self, data):
 #name = data['name']
 #self.cameras = deepcopy(data['cameras'])
 #self.on_set_camera(name)
 def on_set_camera(self, name, show_log=True):
 camera_data = self.cameras[name]
 #position, clip_range, focal_point, view_up, distance = camera_data
 self.on_set_camera_data(camera_data, show_log=show_log)
 def get_camera_data(self):
 camera = self.rend.GetActiveCamera()
 position = camera.GetPosition()
 focal_point = camera.GetFocalPoint()
 view_angle = camera.GetViewAngle()
 view_up = camera.GetViewUp()
 clip_range = camera.GetClippingRange() # TODO: do I need this???
 parallel_scale = camera.GetParallelScale() # TODO: do I need this???
 #parallel_proj = GetParralelProjection()
 parallel_proj = 32.
 distance = camera.GetDistance()
 # clip_range, view_up, distance
 camera_data = [
 position, focal_point, view_angle, view_up, clip_range,
 parallel_scale, parallel_proj, distance
 ]
 return camera_data
 def on_set_camera_data(self, camera_data, show_log=True):
 """
 Sets the current camera
 Parameters
 ----------
 position : (float, float, float)
 where am I is xyz space
 focal_point : (float, float, float)
 where am I looking
 view_angle : float
 field of view (angle); perspective only?
 view_up : (float, float, float)
 up on the screen vector
 clip_range : (float, float)
 start/end distance from camera where clipping starts
 parallel_scale : float
 ???
 parallel_projection : bool (0/1)
 flag?
 TODO: not used
 distance : float
 distance to the camera
 i_vector = focal_point - position
 j'_vector = view_up
 use:
 i x j' -> k
 k x i -> j
 or it's like k'
 """
 #position, clip_range, focal_point, view_up, distance = camera_data
 (position, focal_point, view_angle, view_up, clip_range,
 parallel_scale, parallel_proj, distance) = camera_data
 camera = self.rend.GetActiveCamera()
 camera.SetPosition(position)
 camera.SetFocalPoint(focal_point)
 camera.SetViewAngle(view_angle)
 camera.SetViewUp(view_up)
 camera.SetClippingRange(clip_range)
 camera.SetParallelScale(parallel_scale)
 #parallel_proj
 camera.SetDistance(distance)
 camera.Modified()
 self.vtk_interactor.Render()
 if show_log:
 self.log_command(
 'on_set_camera_data([%s, %s, %s, %s, %s, %s, %s, %s])'
 % (position, focal_point, view_angle, view_up,
 clip_range, parallel_scale, parallel_proj, distance))
 #---------------------------------------------------------------------------------------
 # PICKER
 @property
 def node_picker_size(self):
 """Gets the node picker size"""
 return self.node_picker.GetTolerance()
 @node_picker_size.setter
 def node_picker_size(self, size):
 """Sets the node picker size"""
 assert size >= 0., size
 self.node_picker.SetTolerance(size)
 @property
 def element_picker_size(self):
 """Gets the element picker size"""
 return self.cell_picker.GetTolerance()
 @element_picker_size.setter
 def element_picker_size(self, size):
 """Sets the element picker size"""
 assert size >= 0., size
 self.cell_picker.SetTolerance(size)
 #---------------------------------------------------------------------------------------
 def set_preferences_menu(self):
 """
 Opens a dialog box to set:
 +--------+----------+
 | Min | Float |
 +--------+----------+
 """
 set_preferences_menu(self)
 #---------------------------------------------------------------------------------------
 # CLIPPING MENU
 def set_clipping(self):
 """
 Opens a dialog box to set:
 +--------+----------+
 | Min | Float |
 +--------+----------+
 | Max | Float |
 +--------+----------+
 """
 set_clipping_menu(self)
 def _apply_clipping(self, data):
 min_clip = data['clipping_min']
 max_clip = data['clipping_max']
 self.on_update_clipping(min_clip, max_clip)
 def on_update_clipping(self, min_clip=None, max_clip=None):
 camera = self.GetCamera()
 _min_clip, _max_clip = camera.GetClippingRange()
 if min_clip is None:
 min_clip = _min_clip
 if max_clip is None:
 max_clip = _max_clip
 camera.SetClippingRange(min_clip, max_clip)
 self.log_command('self.on_update_clipping(min_value=%s, max_clip=%s)'
 % (min_clip, max_clip))
 #---------------------------------------------------------------------------------------
 def on_set_anti_aliasing(self, scale=0):
 assert isinstance(scale, int), 'scale=%r; type=%r' % (scale, type(scale))
 renwin = self.render_window
 renwin.LineSmoothingOn()
 renwin.PolygonSmoothingOn()
 renwin.PointSmoothingOn()
 renwin.SetMultiSamples(scale)
 self.vtk_interactor.Render()
 self.log_command('on_set_anti_aliasing(%r)' % (scale))
 #---------------------------------------------------------------------------------------
 # LEGEND MENU
 def set_legend(self):
 """
 Opens a dialog box to set:
 +--------+----------+
 | Name | String |
 +--------+----------+
 | Min | Float |
 +--------+----------+
 | Max | Float |
 +--------+----------+
 | Format | pyString |
 +--------+----------+
 """
 set_legend_menu(self)
 def update_legend(self, icase, name, min_value, max_value, data_format, scale, phase,
 nlabels, labelsize, ncolors, colormap,
 is_low_to_high, is_horizontal_scalar_bar):
 if not self._legend_window_shown:
 return
 self._legend_window._updated_legend = True
 key = self.case_keys[icase]
 assert isinstance(key, integer_types), key
 (obj, (i, name)) = self.result_cases[key]
 #subcase_id = obj.subcase_id
 #case = obj.get_result(i, name)
 #result_type = obj.get_title(i, name)
 #vector_size = obj.get_vector_size(i, name)
 #location = obj.get_location(i, name)
 #data_format = obj.get_data_format(i, name)
 #scale = obj.get_scale(i, name)
 #label2 = obj.get_header(i, name)
 default_data_format = obj.get_default_data_format(i, name)
 default_min, default_max = obj.get_default_min_max(i, name)
 default_scale = obj.get_default_scale(i, name)
 default_title = obj.get_default_title(i, name)
 default_phase = obj.get_default_phase(i, name)
 out_labels = obj.get_default_nlabels_labelsize_ncolors_colormap(i, name)
 default_nlabels, default_labelsize, default_ncolors, default_colormap = out_labels
 is_normals = obj.is_normal_result(i, name)
 assert isinstance(scale, float), 'scale=%s' % scale
 self._legend_window.update_legend(
 icase,
 name, min_value, max_value, data_format, scale, phase,
 nlabels, labelsize,
 ncolors, colormap,
 default_title, default_min, default_max, default_data_format,
 default_scale, default_phase,
 default_nlabels, default_labelsize,
 default_ncolors, default_colormap,
 is_low_to_high, is_horizontal_scalar_bar, is_normals, font_size=self.font_size)
 #self.scalar_bar.set_visibility(self._legend_shown)
 #self.vtk_interactor.Render()
 def _apply_legend(self, data):
 title = data['name']
 min_value = data['min']
 max_value = data['max']
 scale = data['scale']
 phase = data['phase']
 data_format = data['format']
 is_low_to_high = data['is_low_to_high']
 is_discrete = data['is_discrete']
 is_horizontal = data['is_horizontal']
 is_shown = data['is_shown']
 nlabels = data['nlabels']
 labelsize = data['labelsize']
 ncolors = data['ncolors']
 colormap = data['colormap']
 #print('is_shown1 =', is_shown)
 self.on_update_legend(title=title, min_value=min_value, max_value=max_value,
 scale=scale, phase=phase, data_format=data_format,
 is_low_to_high=is_low_to_high,
 is_discrete=is_discrete, is_horizontal=is_horizontal,
 nlabels=nlabels, labelsize=labelsize,
 ncolors=ncolors, colormap=colormap,
 is_shown=is_shown)
 def on_update_legend(self, title='Title', min_value=0., max_value=1., scale=0.0,
 phase=0.0,
 data_format='%.0f',
 is_low_to_high=True, is_discrete=True, is_horizontal=True,
 nlabels=None, labelsize=None, ncolors=None, colormap='jet',
 is_shown=True):
 """
 Updates the legend/model
 Parameters
 ----------
 scale : float
 displacemnt scale factor; true scale
 """
 #print('is_shown2 =', is_shown)
 #assert is_shown == False, is_shown
 key = self.case_keys[self.icase]
 name_vector = None
 plot_value = self.result_cases[key] # scalar
 vector_size1 = 1
 update_3d = False
 assert isinstance(key, integer_types), key
 (obj, (i, res_name)) = self.result_cases[key]
 subcase_id = obj.subcase_id
 #print('plot_value =', plot_value)
 result_type = obj.get_title(i, res_name)
 vector_size = obj.get_vector_size(i, res_name)
 if vector_size == 3:
 plot_value = obj.get_plot_value(i, res_name) # vector
 update_3d = True
 #print('setting scale=%s' % scale)
 assert isinstance(scale, float), scale
 obj.set_scale(i, res_name, scale)
 obj.set_phase(i, res_name, | |
| 
	self.button_deletar_registro_10.setGeometry(QtCore.QRect(200, 2, 221, 27))
 font = QtGui.QFont()
 font.setFamily("Segoe UI Black")
 font.setPointSize(9)
 font.setBold(True)
 font.setWeight(75)
 self.button_deletar_registro_10.setFont(font)
 self.button_deletar_registro_10.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
 self.button_deletar_registro_10.setStyleSheet("QPushButton{\n"
" \n"
" color:rgb(255, 56, 56);\n"
" border-radius:10px;\n"
" background-color: rgb(72, 72, 72);\n"
"}\n"
"\n"
"QPushButton:hover{\n"
" color:rgb(255, 152, 152);\n"
" background-color: rgb(255, 56, 56);\n"
"}")
 self.button_deletar_registro_10.setObjectName("button_deletar_registro_10")
 self.qual_site_10 = QtWidgets.QLabel(self.registro_10)
 self.qual_site_10.setGeometry(QtCore.QRect(522, 29, 32, 32))
 self.qual_site_10.setText("")
 self.qual_site_10.setObjectName("qual_site_10")
 self.verticalLayout_3.addWidget(self.registro_10)
 self.registro_11 = QtWidgets.QFrame(self.scrollAreaWidgetContents)
 self.registro_11.setMinimumSize(QtCore.QSize(621, 121))
 self.registro_11.setMaximumSize(QtCore.QSize(100000, 10000))
 self.registro_11.setStyleSheet(" background-color: rgb(31, 31, 31);\n"
"")
 self.registro_11.setFrameShape(QtWidgets.QFrame.NoFrame)
 self.registro_11.setFrameShadow(QtWidgets.QFrame.Raised)
 self.registro_11.setObjectName("registro_11")
 self.title_site_11 = QtWidgets.QLabel(self.registro_11)
 self.title_site_11.setGeometry(QtCore.QRect(0, 30, 80, 30))
 self.title_site_11.setMinimumSize(QtCore.QSize(0, 30))
 self.title_site_11.setMaximumSize(QtCore.QSize(10000, 30))
 font = QtGui.QFont()
 font.setFamily("Segoe UI Black")
 font.setPointSize(14)
 font.setBold(False)
 font.setItalic(False)
 font.setWeight(10)
 self.title_site_11.setFont(font)
 self.title_site_11.setStyleSheet("font: 87 14pt \"Segoe UI Black\";\n"
"color: rgb(255, 255, 255);")
 self.title_site_11.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
 self.title_site_11.setObjectName("title_site_11")
 self.title_senha_11 = QtWidgets.QLabel(self.registro_11)
 self.title_senha_11.setGeometry(QtCore.QRect(0, 60, 80, 30))
 self.title_senha_11.setMinimumSize(QtCore.QSize(0, 30))
 self.title_senha_11.setMaximumSize(QtCore.QSize(10000, 30))
 font = QtGui.QFont()
 font.setFamily("Segoe UI Black")
 font.setPointSize(14)
 font.setBold(False)
 font.setItalic(False)
 font.setWeight(10)
 self.title_senha_11.setFont(font)
 self.title_senha_11.setStyleSheet("font: 87 14pt \"Segoe UI Black\";\n"
"color: rgb(255, 255, 255);")
 self.title_senha_11.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
 self.title_senha_11.setObjectName("title_senha_11")
 self.title_email_11 = QtWidgets.QLabel(self.registro_11)
 self.title_email_11.setGeometry(QtCore.QRect(0, 90, 80, 30))
 self.title_email_11.setMinimumSize(QtCore.QSize(0, 30))
 self.title_email_11.setMaximumSize(QtCore.QSize(10000, 30))
 font = QtGui.QFont()
 font.setFamily("Segoe UI Black")
 font.setPointSize(14)
 font.setBold(False)
 font.setItalic(False)
 font.setWeight(10)
 self.title_email_11.setFont(font)
 self.title_email_11.setStyleSheet("font: 87 14pt \"Segoe UI Black\";\n"
"color: rgb(255, 255, 255);")
 self.title_email_11.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
 self.title_email_11.setObjectName("title_email_11")
 self.text_site_11 = QtWidgets.QLabel(self.registro_11)
 self.text_site_11.setGeometry(QtCore.QRect(80, 30, 441, 30))
 self.text_site_11.setStyleSheet("font: 87 12pt \"Segoe UI Black\";\n"
"color: rgb(240, 240, 240);")
 self.text_site_11.setText("")
 self.text_site_11.setAlignment(QtCore.Qt.AlignCenter)
 self.text_site_11.setObjectName("text_site_11")
 self.text_senha_11 = QtWidgets.QLabel(self.registro_11)
 self.text_senha_11.setGeometry(QtCore.QRect(80, 60, 441, 30))
 self.text_senha_11.setStyleSheet("font: 87 12pt \"Segoe UI Black\";\n"
"color: rgb(240, 240, 240);")
 self.text_senha_11.setText("")
 self.text_senha_11.setAlignment(QtCore.Qt.AlignCenter)
 self.text_senha_11.setObjectName("text_senha_11")
 self.text_email_11 = QtWidgets.QLabel(self.registro_11)
 self.text_email_11.setGeometry(QtCore.QRect(80, 90, 441, 30))
 self.text_email_11.setStyleSheet("font: 87 12pt \"Segoe UI Black\";\n"
"color: rgb(240, 240, 240);")
 self.text_email_11.setText("")
 self.text_email_11.setAlignment(QtCore.Qt.AlignCenter)
 self.text_email_11.setObjectName("text_email_11")
 self.button_copiar_site_11 = QtWidgets.QPushButton(self.registro_11)
 self.button_copiar_site_11.setGeometry(QtCore.QRect(580, 60, 32, 32))
 self.button_copiar_site_11.setMinimumSize(QtCore.QSize(0, 32))
 self.button_copiar_site_11.setMaximumSize(QtCore.QSize(32, 32))
 self.button_copiar_site_11.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
 self.button_copiar_site_11.setStyleSheet("border-radius:1px")
 self.button_copiar_site_11.setText("")
 self.button_copiar_site_11.setObjectName("button_copiar_site_11")
 self.button_deletar_registro_11 = QtWidgets.QPushButton(self.registro_11)
 self.button_deletar_registro_11.setGeometry(QtCore.QRect(200, 2, 221, 27))
 font = QtGui.QFont()
 font.setFamily("Segoe UI Black")
 font.setPointSize(9)
 font.setBold(True)
 font.setWeight(75)
 self.button_deletar_registro_11.setFont(font)
 self.button_deletar_registro_11.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
 self.button_deletar_registro_11.setStyleSheet("QPushButton{\n"
" \n"
" color:rgb(255, 56, 56);\n"
" border-radius:10px;\n"
" background-color: rgb(72, 72, 72);\n"
"}\n"
"\n"
"QPushButton:hover{\n"
" color:rgb(255, 152, 152);\n"
" background-color: rgb(255, 56, 56);\n"
"}")
 self.button_deletar_registro_11.setObjectName("button_deletar_registro_11")
 self.qual_site_11 = QtWidgets.QLabel(self.registro_11)
 self.qual_site_11.setGeometry(QtCore.QRect(522, 29, 32, 32))
 self.qual_site_11.setText("")
 self.qual_site_11.setObjectName("qual_site_11")
 self.verticalLayout_3.addWidget(self.registro_11)
 self.registro_12 = QtWidgets.QFrame(self.scrollAreaWidgetContents)
 self.registro_12.setMinimumSize(QtCore.QSize(621, 121))
 self.registro_12.setMaximumSize(QtCore.QSize(100000, 10000))
 self.registro_12.setStyleSheet(" background-color: rgb(31, 31, 31);\n"
"")
 self.registro_12.setFrameShape(QtWidgets.QFrame.NoFrame)
 self.registro_12.setFrameShadow(QtWidgets.QFrame.Raised)
 self.registro_12.setObjectName("registro_12")
 self.title_site_12 = QtWidgets.QLabel(self.registro_12)
 self.title_site_12.setGeometry(QtCore.QRect(0, 30, 80, 30))
 self.title_site_12.setMinimumSize(QtCore.QSize(0, 30))
 self.title_site_12.setMaximumSize(QtCore.QSize(10000, 30))
 font = QtGui.QFont()
 font.setFamily("Segoe UI Black")
 font.setPointSize(14)
 font.setBold(False)
 font.setItalic(False)
 font.setWeight(10)
 self.title_site_12.setFont(font)
 self.title_site_12.setStyleSheet("font: 87 14pt \"Segoe UI Black\";\n"
"color: rgb(255, 255, 255);")
 self.title_site_12.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
 self.title_site_12.setObjectName("title_site_12")
 self.title_senha_12 = QtWidgets.QLabel(self.registro_12)
 self.title_senha_12.setGeometry(QtCore.QRect(0, 60, 80, 30))
 self.title_senha_12.setMinimumSize(QtCore.QSize(0, 30))
 self.title_senha_12.setMaximumSize(QtCore.QSize(10000, 30))
 font = QtGui.QFont()
 font.setFamily("Segoe UI Black")
 font.setPointSize(14)
 font.setBold(False)
 font.setItalic(False)
 font.setWeight(10)
 self.title_senha_12.setFont(font)
 self.title_senha_12.setStyleSheet("font: 87 14pt \"Segoe UI Black\";\n"
"color: rgb(255, 255, 255);")
 self.title_senha_12.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
 self.title_senha_12.setObjectName("title_senha_12")
 self.title_email_12 = QtWidgets.QLabel(self.registro_12)
 self.title_email_12.setGeometry(QtCore.QRect(0, 90, 80, 30))
 self.title_email_12.setMinimumSize(QtCore.QSize(0, 30))
 self.title_email_12.setMaximumSize(QtCore.QSize(10000, 30))
 font = QtGui.QFont()
 font.setFamily("Segoe UI Black")
 font.setPointSize(14)
 font.setBold(False)
 font.setItalic(False)
 font.setWeight(10)
 self.title_email_12.setFont(font)
 self.title_email_12.setStyleSheet("font: 87 14pt \"Segoe UI Black\";\n"
"color: rgb(255, 255, 255);")
 self.title_email_12.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
 self.title_email_12.setObjectName("title_email_12")
 self.text_site_12 = QtWidgets.QLabel(self.registro_12)
 self.text_site_12.setGeometry(QtCore.QRect(80, 30, 441, 30))
 self.text_site_12.setStyleSheet("font: 87 12pt \"Segoe UI Black\";\n"
"color: rgb(240, 240, 240);")
 self.text_site_12.setText("")
 self.text_site_12.setAlignment(QtCore.Qt.AlignCenter)
 self.text_site_12.setObjectName("text_site_12")
 self.text_senha_12 = QtWidgets.QLabel(self.registro_12)
 self.text_senha_12.setGeometry(QtCore.QRect(80, 60, 441, 30))
 self.text_senha_12.setStyleSheet("font: 87 12pt \"Segoe UI Black\";\n"
"color: rgb(240, 240, 240);")
 self.text_senha_12.setText("")
 self.text_senha_12.setAlignment(QtCore.Qt.AlignCenter)
 self.text_senha_12.setObjectName("text_senha_12")
 self.text_email_12 = QtWidgets.QLabel(self.registro_12)
 self.text_email_12.setGeometry(QtCore.QRect(80, 90, 441, 30))
 self.text_email_12.setStyleSheet("font: 87 12pt \"Segoe UI Black\";\n"
"color: rgb(240, 240, 240);")
 self.text_email_12.setText("")
 self.text_email_12.setAlignment(QtCore.Qt.AlignCenter)
 self.text_email_12.setObjectName("text_email_12")
 self.button_copiar_site_12 = QtWidgets.QPushButton(self.registro_12)
 self.button_copiar_site_12.setGeometry(QtCore.QRect(580, 60, 32, 32))
 self.button_copiar_site_12.setMinimumSize(QtCore.QSize(0, 32))
 self.button_copiar_site_12.setMaximumSize(QtCore.QSize(32, 32))
 self.button_copiar_site_12.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
 self.button_copiar_site_12.setStyleSheet("border-radius:1px")
 self.button_copiar_site_12.setText("")
 self.button_copiar_site_12.setObjectName("button_copiar_site_12")
 self.button_deletar_registro_12 = QtWidgets.QPushButton(self.registro_12)
 self.button_deletar_registro_12.setGeometry(QtCore.QRect(200, 2, 221, 27))
 font = QtGui.QFont()
 font.setFamily("Segoe UI Black")
 font.setPointSize(9)
 font.setBold(True)
 font.setWeight(75)
 self.button_deletar_registro_12.setFont(font)
 self.button_deletar_registro_12.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
 self.button_deletar_registro_12.setStyleSheet("QPushButton{\n"
" \n"
" color:rgb(255, 56, 56);\n"
" border-radius:10px;\n"
" background-color: rgb(72, 72, 72);\n"
"}\n"
"\n"
"QPushButton:hover{\n"
" color:rgb(255, 152, 152);\n"
" background-color: rgb(255, 56, 56);\n"
"}")
 self.button_deletar_registro_12.setObjectName("button_deletar_registro_12")
 self.qual_site_12 = QtWidgets.QLabel(self.registro_12)
 self.qual_site_12.setGeometry(QtCore.QRect(522, 29, 32, 32))
 self.qual_site_12.setText("")
 self.qual_site_12.setObjectName("qual_site_12")
 self.verticalLayout_3.addWidget(self.registro_12)
 self.registro_13 = QtWidgets.QFrame(self.scrollAreaWidgetContents)
 self.registro_13.setMinimumSize(QtCore.QSize(621, 121))
 self.registro_13.setMaximumSize(QtCore.QSize(100000, 10000))
 self.registro_13.setStyleSheet(" background-color: rgb(31, 31, 31);\n"
"")
 self.registro_13.setFrameShape(QtWidgets.QFrame.NoFrame)
 self.registro_13.setFrameShadow(QtWidgets.QFrame.Raised)
 self.registro_13.setObjectName("registro_13")
 self.title_site_13 = QtWidgets.QLabel(self.registro_13)
 self.title_site_13.setGeometry(QtCore.QRect(0, 30, 80, 30))
 self.title_site_13.setMinimumSize(QtCore.QSize(0, 30))
 self.title_site_13.setMaximumSize(QtCore.QSize(10000, 30))
 font = QtGui.QFont()
 font.setFamily("Segoe UI Black")
 font.setPointSize(14)
 font.setBold(False)
 font.setItalic(False)
 font.setWeight(10)
 self.title_site_13.setFont(font)
 self.title_site_13.setStyleSheet("font: 87 14pt \"Segoe UI Black\";\n"
"color: rgb(255, 255, 255);")
 self.title_site_13.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
 self.title_site_13.setObjectName("title_site_13")
 self.title_senha_13 = QtWidgets.QLabel(self.registro_13)
 self.title_senha_13.setGeometry(QtCore.QRect(0, 60, 80, 30))
 self.title_senha_13.setMinimumSize(QtCore.QSize(0, 30))
 self.title_senha_13.setMaximumSize(QtCore.QSize(10000, 30))
 font = QtGui.QFont()
 font.setFamily("Segoe UI Black")
 font.setPointSize(14)
 font.setBold(False)
 font.setItalic(False)
 font.setWeight(10)
 self.title_senha_13.setFont(font)
 self.title_senha_13.setStyleSheet("font: 87 14pt \"Segoe UI Black\";\n"
"color: rgb(255, 255, 255);")
 self.title_senha_13.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
 self.title_senha_13.setObjectName("title_senha_13")
 self.title_email_13 = QtWidgets.QLabel(self.registro_13)
 self.title_email_13.setGeometry(QtCore.QRect(0, 90, 80, 30))
 self.title_email_13.setMinimumSize(QtCore.QSize(0, 30))
 self.title_email_13.setMaximumSize(QtCore.QSize(10000, 30))
 font = QtGui.QFont()
 font.setFamily("Segoe UI Black")
 font.setPointSize(14)
 font.setBold(False)
 font.setItalic(False)
 font.setWeight(10)
 self.title_email_13.setFont(font)
 self.title_email_13.setStyleSheet("font: 87 14pt \"Segoe UI Black\";\n"
"color: rgb(255, 255, 255);")
 self.title_email_13.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
 self.title_email_13.setObjectName("title_email_13")
 self.text_site_13 = QtWidgets.QLabel(self.registro_13)
 self.text_site_13.setGeometry(QtCore.QRect(80, 30, 441, 30))
 self.text_site_13.setStyleSheet("font: 87 12pt \"Segoe UI Black\";\n"
"color: rgb(240, 240, 240);")
 self.text_site_13.setText("")
 self.text_site_13.setAlignment(QtCore.Qt.AlignCenter)
 self.text_site_13.setObjectName("text_site_13")
 self.text_senha_13 = QtWidgets.QLabel(self.registro_13)
 self.text_senha_13.setGeometry(QtCore.QRect(80, 60, 441, 30))
 self.text_senha_13.setStyleSheet("font: 87 12pt \"Segoe UI Black\";\n"
"color: rgb(240, 240, 240);")
 self.text_senha_13.setText("")
 self.text_senha_13.setAlignment(QtCore.Qt.AlignCenter)
 self.text_senha_13.setObjectName("text_senha_13")
 self.text_email_13 = QtWidgets.QLabel(self.registro_13)
 self.text_email_13.setGeometry(QtCore.QRect(80, 90, 441, 30))
 self.text_email_13.setStyleSheet("font: 87 12pt \"Segoe UI Black\";\n"
"color: rgb(240, 240, 240);")
 self.text_email_13.setText("")
 self.text_email_13.setAlignment(QtCore.Qt.AlignCenter)
 self.text_email_13.setObjectName("text_email_13")
 self.button_copiar_site_13 = QtWidgets.QPushButton(self.registro_13)
 self.button_copiar_site_13.setGeometry(QtCore.QRect(580, 60, 32, 32))
 self.button_copiar_site_13.setMinimumSize(QtCore.QSize(0, 32))
 self.button_copiar_site_13.setMaximumSize(QtCore.QSize(32, 32))
 self.button_copiar_site_13.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
 self.button_copiar_site_13.setStyleSheet("border-radius:1px")
 self.button_copiar_site_13.setText("")
 self.button_copiar_site_13.setObjectName("button_copiar_site_13")
 self.button_deletar_registro_13 = QtWidgets.QPushButton(self.registro_13)
 self.button_deletar_registro_13.setGeometry(QtCore.QRect(200, 2, 221, 27))
 font = QtGui.QFont()
 font.setFamily("Segoe UI Black")
 font.setPointSize(9)
 font.setBold(True)
 font.setWeight(75)
 self.button_deletar_registro_13.setFont(font)
 self.button_deletar_registro_13.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
 self.button_deletar_registro_13.setStyleSheet("QPushButton{\n"
" \n"
" color:rgb(255, 56, 56);\n"
" border-radius:10px;\n"
" background-color: rgb(72, 72, 72);\n"
"}\n"
"\n"
"QPushButton:hover{\n"
" color:rgb(255, 152, 152);\n"
" background-color: rgb(255, 56, 56);\n"
"}")
 self.button_deletar_registro_13.setObjectName("button_deletar_registro_13")
 self.qual_site_13 = QtWidgets.QLabel(self.registro_13)
 self.qual_site_13.setGeometry(QtCore.QRect(522, 29, 32, 32))
 self.qual_site_13.setText("")
 self.qual_site_13.setObjectName("qual_site_13")
 self.verticalLayout_3.addWidget(self.registro_13)
 self.registro_14 = QtWidgets.QFrame(self.scrollAreaWidgetContents)
 self.registro_14.setMinimumSize(QtCore.QSize(621, 121))
 self.registro_14.setMaximumSize(QtCore.QSize(100000, 10000))
 self.registro_14.setStyleSheet(" background-color: rgb(31, 31, 31);\n"
"")
 self.registro_14.setFrameShape(QtWidgets.QFrame.NoFrame)
 self.registro_14.setFrameShadow(QtWidgets.QFrame.Raised)
 self.registro_14.setObjectName("registro_14")
 self.title_site_14 = QtWidgets.QLabel(self.registro_14)
 self.title_site_14.setGeometry(QtCore.QRect(0, 30, 80, 30))
 self.title_site_14.setMinimumSize(QtCore.QSize(0, 30))
 self.title_site_14.setMaximumSize(QtCore.QSize(10000, 30))
 font = QtGui.QFont()
 font.setFamily("Segoe UI Black")
 font.setPointSize(14)
 font.setBold(False)
 font.setItalic(False)
 font.setWeight(10)
 self.title_site_14.setFont(font)
 self.title_site_14.setStyleSheet("font: 87 14pt \"Segoe UI Black\";\n"
"color: rgb(255, 255, 255);")
 self.title_site_14.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
 self.title_site_14.setObjectName("title_site_14")
 self.title_senha_14 = QtWidgets.QLabel(self.registro_14)
 self.title_senha_14.setGeometry(QtCore.QRect(0, 60, 80, 30))
 self.title_senha_14.setMinimumSize(QtCore.QSize(0, 30))
 self.title_senha_14.setMaximumSize(QtCore.QSize(10000, 30))
 font = QtGui.QFont()
 font.setFamily("Segoe UI Black")
 font.setPointSize(14)
 font.setBold(False)
 font.setItalic(False)
 font.setWeight(10)
 self.title_senha_14.setFont(font)
 self.title_senha_14.setStyleSheet("font: 87 14pt \"Segoe UI Black\";\n"
"color: rgb(255, 255, 255);")
 self.title_senha_14.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
 self.title_senha_14.setObjectName("title_senha_14")
 self.title_email_14 = QtWidgets.QLabel(self.registro_14)
 self.title_email_14.setGeometry(QtCore.QRect(0, 90, 80, 30))
 self.title_email_14.setMinimumSize(QtCore.QSize(0, 30))
 self.title_email_14.setMaximumSize(QtCore.QSize(10000, 30))
 font = QtGui.QFont()
 font.setFamily("Segoe UI Black")
 font.setPointSize(14)
 font.setBold(False)
 font.setItalic(False)
 font.setWeight(10)
 self.title_email_14.setFont(font)
 self.title_email_14.setStyleSheet("font: 87 14pt \"Segoe UI Black\";\n"
"color: rgb(255, 255, 255);")
 self.title_email_14.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
 self.title_email_14.setObjectName("title_email_14")
 self.text_site_14 = QtWidgets.QLabel(self.registro_14)
 self.text_site_14.setGeometry(QtCore.QRect(80, 30, 441, 30))
 self.text_site_14.setStyleSheet("font: 87 12pt \"Segoe UI Black\";\n"
"color: rgb(240, 240, 240);")
 self.text_site_14.setText("")
 self.text_site_14.setAlignment(QtCore.Qt.AlignCenter)
 self.text_site_14.setObjectName("text_site_14")
 self.text_senha_14 = QtWidgets.QLabel(self.registro_14)
 self.text_senha_14.setGeometry(QtCore.QRect(80, 60, 441, 30))
 self.text_senha_14.setStyleSheet("font: 87 12pt \"Segoe UI Black\";\n"
"color: rgb(240, 240, 240);")
 self.text_senha_14.setText("")
 self.text_senha_14.setAlignment(QtCore.Qt.AlignCenter)
 self.text_senha_14.setObjectName("text_senha_14")
 self.text_email_14 = QtWidgets.QLabel(self.registro_14)
 self.text_email_14.setGeometry(QtCore.QRect(80, 90, 441, 30))
 self.text_email_14.setStyleSheet("font: 87 12pt \"Segoe UI Black\";\n"
"color: rgb(240, 240, 240);")
 self.text_email_14.setText("")
 self.text_email_14.setAlignment(QtCore.Qt.AlignCenter)
 self.text_email_14.setObjectName("text_email_14")
 self.button_copiar_site_14 = QtWidgets.QPushButton(self.registro_14)
 self.button_copiar_site_14.setGeometry(QtCore.QRect(580, 60, 32, 32))
 self.button_copiar_site_14.setMinimumSize(QtCore.QSize(0, 32))
 self.button_copiar_site_14.setMaximumSize(QtCore.QSize(32, 32))
 self.button_copiar_site_14.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
 self.button_copiar_site_14.setStyleSheet("border-radius:1px")
 self.button_copiar_site_14.setText("")
 self.button_copiar_site_14.setObjectName("button_copiar_site_14")
 self.button_deletar_registro_14 = QtWidgets.QPushButton(self.registro_14)
 self.button_deletar_registro_14.setGeometry(QtCore.QRect(200, 2, 221, 27))
 font = QtGui.QFont()
 font.setFamily("Segoe UI Black")
 font.setPointSize(9)
 font.setBold(True)
 font.setWeight(75)
 self.button_deletar_registro_14.setFont(font)
 self.button_deletar_registro_14.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
 self.button_deletar_registro_14.setStyleSheet("QPushButton{\n"
" \n"
" color:rgb(255, 56, 56);\n"
" border-radius:10px;\n"
" background-color: rgb(72, 72, 72);\n"
"}\n"
"\n"
"QPushButton:hover{\n"
" color:rgb(255, 152, 152);\n"
" background-color: rgb(255, 56, 56);\n"
"}")
 self.button_deletar_registro_14.setObjectName("button_deletar_registro_14")
 self.qual_site_14 = QtWidgets.QLabel(self.registro_14)
 self.qual_site_14.setGeometry(QtCore.QRect(522, 29, 32, 32))
 self.qual_site_14.setText("")
 self.qual_site_14.setObjectName("qual_site_14")
 self.verticalLayout_3.addWidget(self.registro_14)
 self.registro_15 = QtWidgets.QFrame(self.scrollAreaWidgetContents)
 self.registro_15.setMinimumSize(QtCore.QSize(621, 121))
 self.registro_15.setMaximumSize(QtCore.QSize(100000, 10000))
 self.registro_15.setStyleSheet(" background-color: rgb(31, 31, 31);\n"
"")
 self.registro_15.setFrameShape(QtWidgets.QFrame.NoFrame)
 self.registro_15.setFrameShadow(QtWidgets.QFrame.Raised)
 self.registro_15.setObjectName("registro_15")
 self.title_site_15 = QtWidgets.QLabel(self.registro_15)
 self.title_site_15.setGeometry(QtCore.QRect(0, 30, 80, 30))
 self.title_site_15.setMinimumSize(QtCore.QSize(0, 30))
 self.title_site_15.setMaximumSize(QtCore.QSize(10000, 30))
 font = QtGui.QFont()
 font.setFamily("Segoe UI Black")
 font.setPointSize(14)
 font.setBold(False)
 font.setItalic(False)
 font.setWeight(10)
 self.title_site_15.setFont(font)
 self.title_site_15.setStyleSheet("font: 87 14pt \"Segoe UI Black\";\n"
"color: rgb(255, 255, 255);")
 self.title_site_15.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
 self.title_site_15.setObjectName("title_site_15")
 self.title_senha_15 = QtWidgets.QLabel(self.registro_15)
 self.title_senha_15.setGeometry(QtCore.QRect(0, 60, 80, 30))
 self.title_senha_15.setMinimumSize(QtCore.QSize(0, 30))
 self.title_senha_15.setMaximumSize(QtCore.QSize(10000, 30))
 font = QtGui.QFont()
 font.setFamily("Segoe UI Black")
 font.setPointSize(14)
 font.setBold(False)
 font.setItalic(False)
 font.setWeight(10)
 self.title_senha_15.setFont(font)
 self.title_senha_15.setStyleSheet("font: 87 14pt \"Segoe UI Black\";\n"
"color: rgb(255, 255, 255);")
 self.title_senha_15.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
 self.title_senha_15.setObjectName("title_senha_15")
 self.title_email_15 = QtWidgets.QLabel(self.registro_15)
 self.title_email_15.setGeometry(QtCore.QRect(0, 90, 80, 30))
 self.title_email_15.setMinimumSize(QtCore.QSize(0, 30))
 self.title_email_15.setMaximumSize(QtCore.QSize(10000, 30))
 font = QtGui.QFont()
 font.setFamily("Segoe UI Black")
 font.setPointSize(14)
 font.setBold(False)
 font.setItalic(False)
 font.setWeight(10)
 self.title_email_15.setFont(font)
 self.title_email_15.setStyleSheet("font: 87 14pt \"Segoe UI Black\";\n"
"color: rgb(255, 255, 255);")
 self.title_email_15.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
 self.title_email_15.setObjectName("title_email_15")
 self.text_site_15 = QtWidgets.QLabel(self.registro_15)
 self.text_site_15.setGeometry(QtCore.QRect(80, 30, 441, 30))
 self.text_site_15.setStyleSheet("font: 87 12pt \"Segoe UI Black\";\n"
"color: rgb(240, 240, 240);")
 self.text_site_15.setText("")
 self.text_site_15.setAlignment(QtCore.Qt.AlignCenter)
 self.text_site_15.setObjectName("text_site_15")
 self.text_senha_15 = QtWidgets.QLabel(self.registro_15)
 self.text_senha_15.setGeometry(QtCore.QRect(80, 60, 441, 30))
 self.text_senha_15.setStyleSheet("font: 87 12pt \"Segoe UI Black\";\n"
"color: rgb(240, 240, 240);")
 self.text_senha_15.setText("")
 self.text_senha_15.setAlignment(QtCore.Qt.AlignCenter)
 self.text_senha_15.setObjectName("text_senha_15")
 self.text_email_15 = QtWidgets.QLabel(self.registro_15)
 self.text_email_15.setGeometry(QtCore.QRect(80, 90, 441, 30))
 self.text_email_15.setStyleSheet("font: 87 12pt \"Segoe UI Black\";\n"
"color: rgb(240, 240, 240);")
 self.text_email_15.setText("")
 self.text_email_15.setAlignment(QtCore.Qt.AlignCenter)
 self.text_email_15.setObjectName("text_email_15")
 self.button_copiar_site_15 = QtWidgets.QPushButton(self.registro_15)
 self.button_copiar_site_15.setGeometry(QtCore.QRect(580, 60, 32, 32))
 self.button_copiar_site_15.setMinimumSize(QtCore.QSize(0, 32))
 self.button_copiar_site_15.setMaximumSize(QtCore.QSize(32, 32))
 self.button_copiar_site_15.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
 self.button_copiar_site_15.setStyleSheet("border-radius:1px")
 self.button_copiar_site_15.setText("")
 self.button_copiar_site_15.setObjectName("button_copiar_site_15")
 self.button_deletar_registro_15 = QtWidgets.QPushButton(self.registro_15)
 self.button_deletar_registro_15.setGeometry(QtCore.QRect(200, 2, 221, 27))
 font = QtGui.QFont()
 font.setFamily("Segoe UI Black")
 font.setPointSize(9)
 font.setBold(True)
 font.setWeight(75)
 self.button_deletar_registro_15.setFont(font)
 self.button_deletar_registro_15.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
 self.button_deletar_registro_15.setStyleSheet("QPushButton{\n"
" \n"
" color:rgb(255, 56, 56);\n"
" border-radius:10px;\n"
" background-color: rgb(72, 72, 72);\n"
"}\n"
"\n"
"QPushButton:hover{\n"
" color:rgb(255, 152, 152);\n"
" background-color: rgb(255, 56, 56);\n"
"}")
 self.button_deletar_registro_15.setObjectName("button_deletar_registro_15")
 self.qual_site_15 = QtWidgets.QLabel(self.registro_15)
 self.qual_site_15.setGeometry(QtCore.QRect(522, 29, 32, 32))
 self.qual_site_15.setText("")
 self.qual_site_15.setObjectName("qual_site_15")
 self.verticalLayout_3.addWidget(self.registro_15)
 self.registro_16 = QtWidgets.QFrame(self.scrollAreaWidgetContents)
 self.registro_16.setMinimumSize(QtCore.QSize(621, 121))
 self.registro_16.setMaximumSize(QtCore.QSize(100000, 10000))
 self.registro_16.setStyleSheet(" background-color: rgb(31, 31, 31);\n"
"")
 self.registro_16.setFrameShape(QtWidgets.QFrame.NoFrame)
 self.registro_16.setFrameShadow(QtWidgets.QFrame.Raised)
 self.registro_16.setObjectName("registro_16")
 self.title_site_16 = QtWidgets.QLabel(self.registro_16)
 self.title_site_16.setGeometry(QtCore.QRect(0, 30, 80, 30))
 self.title_site_16.setMinimumSize(QtCore.QSize(0, 30))
 self.title_site_16.setMaximumSize(QtCore.QSize(10000, 30))
 font = QtGui.QFont()
 font.setFamily("Segoe UI Black")
 font.setPointSize(14)
 font.setBold(False)
 font.setItalic(False)
 font.setWeight(10)
 self.title_site_16.setFont(font)
 self.title_site_16.setStyleSheet("font: 87 14pt \"Segoe UI Black\";\n"
"color: rgb(255, 255, 255);")
 self.title_site_16.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
 self.title_site_16.setObjectName("title_site_16")
 self.title_senha_16 = QtWidgets.QLabel(self.registro_16)
 self.title_senha_16.setGeometry(QtCore.QRect(0, 60, 80, 30))
 self.title_senha_16.setMinimumSize(QtCore.QSize(0, 30))
 self.title_senha_16.setMaximumSize(QtCore.QSize(10000, 30))
 font = QtGui.QFont()
 font.setFamily("Segoe UI Black")
 font.setPointSize(14)
 font.setBold(False)
 font.setItalic(False)
 font.setWeight(10)
 self.title_senha_16.setFont(font)
 self.title_senha_16.setStyleSheet("font: 87 14pt \"Segoe UI Black\";\n"
"color: rgb(255, | |
| 
	# -*- coding: utf-8 -*-
"""
Created on Sat Sep 29 20:55:53 2018
Image dataset loader for a .txt file with a sample per line in the format
'path of image start_frame verb_id noun_id'
@author: Γιώργος
"""
import os
import pickle
import cv2
import numpy as np
from scipy.spatial.distance import pdist, squareform
from torch.utils.data import Dataset as torchDataset
from utils.video_sampler import RandomSampling, SequentialSampling, MiddleSampling, DoubleFullSampling, FullSampling
def get_class_weights(list_file, num_classes, use_mapping):
 samples_list = parse_samples_list(list_file, DataLine)
 counts = np.zeros(num_classes)
 mapping = None
 if use_mapping:
 mapping = make_class_mapping(samples_list)
 for s in samples_list:
 counts[mapping[s.label_verb]] += 1
 else:
 for s in samples_list:
 counts[s.label_verb] += 1
 weights = 1 / counts
 weights = weights / np.sum(weights)
 return weights.astype(np.float32)
def make_class_mapping(samples_list):
 classes = []
 for sample in samples_list:
 if sample.label_verb not in classes:
 classes.append(sample.label_verb)
 classes = np.sort(classes)
 mapping_dict = {}
 for i, c in enumerate(classes):
 mapping_dict[c] = i
 return mapping_dict
def make_class_mapping_generic(samples_list, attribute):
 classes = []
 for sample in samples_list:
 label = getattr(sample, attribute)
 if label not in classes:
 classes.append(label)
 classes = np.sort(classes)
 mapping_dict = {}
 for i, c in enumerate(classes):
 mapping_dict[c] = i
 return mapping_dict
def load_pickle(tracks_path):
 with open(tracks_path, 'rb') as f:
 tracks = pickle.load(f)
 return tracks
def substitute_prefix(tracks_path, secondary_prefix):
 obj_path = secondary_prefix
 for p in tracks_path.split('\\')[1:]:
 obj_path = os.path.join(obj_path, p)
 return obj_path
def load_two_pickle(tracks_path, secondary_prefix):
 obj_path = substitute_prefix(tracks_path, secondary_prefix)
 return load_pickle(tracks_path), load_pickle(obj_path)
def load_point_samples(samples_list, bpv_prefix=None):
 if bpv_prefix:
 data_arr = [load_two_pickle(samples_list[index].data_path, bpv_prefix) for index in range(len(samples_list))]
 else:
 data_arr = [load_pickle(samples_list[index].data_path) for index in range(len(samples_list))]
 return data_arr
# from PIL import Image
def load_images(data_path, frame_indices, image_tmpl):
 images = []
 # images = np.zeros((len(frame_indices), 640, 480, 3))
 for f_ind in frame_indices:
 im_name = os.path.join(data_path, image_tmpl.format(f_ind))
 # next_image = np.array(Image.open(im_name).convert('RGB'))
 next_image = cv2.imread(im_name, cv2.IMREAD_COLOR)
 next_image = cv2.cvtColor(next_image, cv2.COLOR_BGR2RGB)
 images.append(next_image)
 # images[i] = next_image
 return images
def prepare_sampler(sampler_type, clip_length, frame_interval):
 if sampler_type == "train":
 train_sampler = RandomSampling(num=clip_length,
 interval=frame_interval,
 speed=[0.5, 1.5], seed=None)
 out_sampler = train_sampler
 else:
 val_sampler = SequentialSampling(num=clip_length,
 interval=frame_interval,
 fix_cursor=True,
 shuffle=True, seed=None)
 out_sampler = val_sampler
 return out_sampler
def object_list_to_bpv(detections, num_noun_classes, max_seq_length):
 sampled_detections = np.array(detections)
 if max_seq_length != 0:
 sampled_detections = sampled_detections[
 np.linspace(0, len(detections), max_seq_length, endpoint=False, dtype=int)].tolist()
 seq_length = max_seq_length
 else:
 seq_length = len(detections)
 bpv = np.zeros((seq_length, num_noun_classes), dtype=np.float32)
 for i, dets in enumerate(sampled_detections):
 for obj in dets:
 bpv[i, obj] = 1
 return bpv
def load_left_right_tracks(hand_tracks, max_seq_length):
 left_track = np.array(hand_tracks['left'], dtype=np.float32)
 right_track = np.array(hand_tracks['right'], dtype=np.float32)
 if max_seq_length != 0:
 left_track = left_track[np.linspace(0, len(left_track), max_seq_length, endpoint=False, dtype=int)]
 right_track = right_track[np.linspace(0, len(right_track), max_seq_length, endpoint=False, dtype=int)]
 return left_track, right_track
def calc_distance_differences(track):
 x2 = track[:, 0]
 x1 = np.roll(x2, 1)
 x1[0] = x1[1]
 y2 = track[:, 1]
 y1 = np.roll(y2, 1)
 y1[0] = y1[1]
 xdifs = x2 - x1
 ydifs = y2 - y1
 return np.concatenate((xdifs[:, np.newaxis], ydifs[:, np.newaxis]), -1)
def calc_angles(track):
 x2 = track[:, 0]
 x1 = np.roll(x2, 1)
 x1[0] = x1[1]
 y2 = track[:, 1]
 y1 = np.roll(y2, 1)
 y1[0] = y1[1]
 angles = np.arctan2(y2 * x1 - y1 * x2, x2 * x1 + y2 * y1, dtype=np.float32)
 return angles
def calc_polar_distance_from_prev(track):
 return np.concatenate((np.array([0]),
 np.diagonal(squareform(pdist(track)), offset=-1)))
class DataLine(object):
 def __init__(self, row):
 self.data = row
 @property
 def data_path(self):
 return self.data[0]
 @property
 def num_frames(self): # sto palio format ayto einai to start_frame
 return int(self.data[1])
 @property
 def label_verb(self):
 return int(self.data[2])
 @property
 def label_noun(self):
 return int(self.data[3])
 @property
 def uid(self):
 return int(self.data[4] if len(self.data) > 4 else -1)
 @property
 def start_frame(self):
 return int(self.data[5] if len(self.data) > 5 else -1)
 @property
 def label_action(self):
 return int(self.data[6] if len(self.data) > 6 else -1)
class GTEADataLine(object):
 def __init__(self, row):
 self.data = row
 self.data_len = len(row)
 def get_video_path(self, prefix): # only used for FromVideoDatasetLoader and is deprecated
 return os.path.join(prefix, self.id_recipe, self.data_path + '.mp4')
 @property
 def data_path(self):
 return self.data[0]
 @property
 def frames_path(self):
 path_parts = os.path.normpath(self.data[0]).split(os.sep)
 session_parts = path_parts[1].split('-')
 session = session_parts[0] + '-' + session_parts[1] + '-' + session_parts[2]
 return os.path.join(path_parts[0], session, path_parts[1])
 @property
 def instance_name(self):
 return os.path.normpath(self.data[0]).split(os.sep)[1]
 @property
 def id_recipe(self):
 name_parts = self.data[0].split('-')
 id_recipe = name_parts[0] + '-' + name_parts[1] + '-' + name_parts[2]
 return id_recipe
 @property
 def label_action(self): # to zero based labels
 return int(self.data[1]) - 1 
 @property
 def label_verb(self):
 return int(self.data[2]) - 1 
 @property
 def label_noun(self):
 return int(self.data[3]) - 1
 @property
 def extra_nouns(self):
 extra_nouns = list()
 if self.data_len > 4:
 for noun in self.data[4:]:
 extra_nouns.append(int(noun) - 1)
 return extra_nouns
def parse_samples_list(list_file, datatype):
 return [datatype(x.strip().split(' ')) for x in open(list_file)]
class ImageDatasetLoader(torchDataset):
 def __init__(self, list_file, num_classes=120,
 batch_transform=None, channels='RGB', validation=False):
 self.samples_list = parse_samples_list(list_file, DataLine)
 if num_classes != 120:
 self.mapping = make_class_mapping(self.samples_list)
 else:
 self.mapping = None
 self.transform = batch_transform
 self.channels = channels
 self.validation = validation
 self.image_read_type = cv2.IMREAD_COLOR if channels == 'RGB' else cv2.IMREAD_GRAYSCALE
 def __len__(self):
 return len(self.samples_list)
 def __getitem__(self, index):
 img = cv2.imread(self.samples_list[index].data_path, self.image_read_type).astype(np.float32)
 if self.channels == 'RGB':
 img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
 if self.transform is not None:
 img = self.transform(img)
 if self.mapping:
 class_id = self.mapping[self.samples_list[index].label_verb]
 else:
 class_id = self.samples_list[index].label_verb
 if not self.validation:
 return img, class_id
 else:
 name_parts = self.samples_list[index].data_path.split("\\")
 return img, class_id, name_parts[-2] + "\\" + name_parts[-1]
class Video(object):
 # adapted from https://github.com/cypw/PyTorch-MFNet/blob/master/data/video_iterator.py
 """basic Video class"""
 def __init__(self, vid_path):
 self.open(vid_path)
 def __del__(self):
 self.close()
 def __enter__(self):
 return self
 def __exit__(self, exc_type, exc_value, traceback):
 self.__del__()
 def reset(self):
 self.close()
 self.vid_path = None
 self.frame_count = -1
 self.faulty_frame = None
 return self
 def open(self, vid_path):
 assert os.path.exists(vid_path), "VideoIter:: cannot locate: `{}'".format(vid_path)
 # close previous video & reset variables
 self.reset()
 # try to open video
 cap = cv2.VideoCapture(vid_path)
 if cap.isOpened():
 self.cap = cap
 self.vid_path = vid_path
 else:
 raise IOError("VideoIter:: failed to open video: `{}'".format(vid_path))
 return self
 def count_frames(self, check_validity=False):
 offset = 0
 if self.vid_path.endswith('.flv'):
 offset = -1
 unverified_frame_count = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT)) + offset
 if check_validity:
 verified_frame_count = 0
 for i in range(unverified_frame_count):
 self.cap.set(cv2.CAP_PROP_POS_FRAMES, i)
 if not self.cap.grab():
 print("VideoIter:: >> frame (start from 0) {} corrupted in {}".format(i, self.vid_path))
 break
 verified_frame_count = i + 1
 self.frame_count = verified_frame_count
 else:
 self.frame_count = unverified_frame_count
 assert self.frame_count > 0, "VideoIter:: Video: `{}' has no frames".format(self.vid_path)
 return self.frame_count
 def extract_frames(self, idxs, force_color=True):
 frames = self.extract_frames_fast(idxs, force_color)
 if frames is None:
 # try slow method:
 frames = self.extract_frames_slow(idxs, force_color)
 return frames
 def extract_frames_fast(self, idxs, force_color=True):
 assert self.cap is not None, "No opened video."
 if len(idxs) < 1:
 return []
 frames = []
 pre_idx = max(idxs)
 for idx in idxs:
 assert (self.frame_count < 0) or (idx < self.frame_count), \
 "idxs: {} > total valid frames({})".format(idxs, self.frame_count)
 if pre_idx != (idx - 1):
 self.cap.set(cv2.CAP_PROP_POS_FRAMES, idx)
 res, frame = self.cap.read() # in BGR/GRAY format
 pre_idx = idx
 if not res:
 self.faulty_frame = idx
 return None
 if len(frame.shape) < 3:
 if force_color:
 # Convert Gray to RGB
 frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)
 else:
 # Convert BGR to RGB
 frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
 frames.append(frame)
 return frames
 def extract_frames_slow(self, idxs, force_color=True):
 assert self.cap is not None, "No opened video."
 if len(idxs) < 1:
 return []
 frames = [None] * len(idxs)
 idx = min(idxs)
 self.cap.set(cv2.CAP_PROP_POS_FRAMES, idx)
 while idx <= max(idxs):
 res, frame = self.cap.read() # in BGR/GRAY format
 if not res:
 # end of the video
 self.faulty_frame = idx
 return None
 if idx in idxs:
 # fond a frame
 if len(frame.shape) < 3:
 if force_color:
 # Convert Gray to RGB
 frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)
 else:
 # Convert BGR to RGB
 frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
 pos = [k for k, i in enumerate(idxs) if i == idx]
 for k in pos:
 frames[k] = frame
 idx += 1
 return frames
 def close(self):
 if hasattr(self, 'cap') and self.cap is not None:
 self.cap.release()
 self.cap = None
 return self
class VideoFromImagesDatasetLoader(torchDataset): # loads GTEA dataset from frames
 OBJECTIVE_NAMES = ['label_action', 'label_verb', 'label_noun']
 def __init__(self, sampler, split_file, line_type, num_classes, max_num_classes, img_tmpl='img_{:05d}.jpg',
 batch_transform=None, extra_nouns=False, use_gaze=False, gaze_list_prefix=None, use_hands=False,
 hand_list_prefix=None, validation=False, gaze_evaluation=False, vis_data=False):
 self.sampler = sampler
 self.video_list = parse_samples_list(split_file, GTEADataLine) # if line_type=='GTEA' else DataLine)
 self.extra_nouns = extra_nouns
 self.usable_objectives = list()
 self.mappings = list()
 for i, (objective, objective_name) in enumerate(zip(num_classes, FromVideoDatasetLoader.OBJECTIVE_NAMES)):
 self.usable_objectives.append(objective > 0)
 if objective != max_num_classes[i] and self.usable_objectives[-1]:
 self.mappings.append(make_class_mapping_generic(self.video_list, objective_name))
 else:
 self.mappings.append(None)
 assert any(obj is True for obj in self.usable_objectives)
 self.transform = batch_transform
 self.validation = validation
 self.vis_data = vis_data
 self.use_gaze = use_gaze
 self.gaze_list_prefix = gaze_list_prefix
 self.use_hands = use_hands
 self.hand_list_prefix = hand_list_prefix
 self.norm_val = [640., 480., 640., 480.]
 self.image_tmpl = img_tmpl
 self.gaze_evaluation = gaze_evaluation
 def __len__(self):
 return len(self.video_list)
 def __getitem__(self, index):
 path = self.video_list[index].frames_path
 instance_name = self.video_list[index].instance_name
 frame_count = len(os.listdir(path))
 assert frame_count > | |
| 
	<reponame>benshaw/modeldb
# -*- coding: utf-8 -*-
import glob
import json
import os
import pickle
import shutil
import sys
import tarfile
import tempfile
import time
import zipfile
import cloudpickle
import pytest
import six
import verta
from verta.tracking.entities._deployable_entity import _CACHE_DIR
from verta._internal_utils import (
 _artifact_utils,
 _utils,
)
from verta.endpoint.update import DirectUpdateStrategy
from verta.environment import Python
pytestmark = pytest.mark.not_oss
@pytest.fixture
def model_packaging():
 """Additional items added to model API in log_model()."""
 return {
 'python_version': _utils.get_python_version(),
 'type': "sklearn",
 'deserialization': "cloudpickle",
 }
class TestLogModel:
 def test_model(self, deployable_entity, model_for_deployment):
 deployable_entity.log_model(model_for_deployment['model'])
 assert model_for_deployment['model'].get_params() == deployable_entity.get_model().get_params()
 def test_custom_modules(self, deployable_entity, model_for_deployment):
 custom_modules_dir = "."
 deployable_entity.log_model(
 model_for_deployment['model'],
 custom_modules=["."],
 )
 custom_module_filenames = {"__init__.py", "_verta_config.py"}
 for parent_dir, dirnames, filenames in os.walk(custom_modules_dir):
 # skip venvs
 # This logic is from _utils.find_filepaths().
 exec_path_glob = os.path.join(parent_dir, "{}", "bin", "python*")
 dirnames[:] = [dirname for dirname in dirnames if not glob.glob(exec_path_glob.format(dirname))]
 custom_module_filenames.update(map(os.path.basename, filenames))
 custom_modules = deployable_entity.get_artifact(_artifact_utils.CUSTOM_MODULES_KEY)
 with zipfile.ZipFile(custom_modules, 'r') as zipf:
 assert custom_module_filenames == set(map(os.path.basename, zipf.namelist()))
 def test_no_custom_modules(self, deployable_entity, model_for_deployment):
 deployable_entity.log_model(model_for_deployment['model'])
 custom_module_filenames = {"__init__.py", "_verta_config.py"}
 for path in sys.path:
 # skip std libs and venvs
 # This logic is from verta.client._log_modules().
 lib_python_str = os.path.join(os.sep, "lib", "python")
 i = path.find(lib_python_str)
 if i != -1 and glob.glob(os.path.join(path[:i], "bin", "python*")):
 continue
 for parent_dir, dirnames, filenames in os.walk(path):
 # only Python files
 filenames[:] = [filename for filename in filenames if filename.endswith(('.py', '.pyc', '.pyo'))]
 if not _utils.is_in_venv(path) and _utils.is_in_venv(parent_dir):
 continue
 custom_module_filenames.update(map(os.path.basename, filenames))
 custom_modules = deployable_entity.get_artifact(_artifact_utils.CUSTOM_MODULES_KEY)
 with zipfile.ZipFile(custom_modules, 'r') as zipf:
 assert custom_module_filenames == set(map(os.path.basename, zipf.namelist()))
 def test_model_api(self, deployable_entity, model_for_deployment, model_packaging):
 deployable_entity.log_model(
 model_for_deployment['model'],
 model_api=model_for_deployment['model_api'],
 )
 model_api = model_for_deployment['model_api'].to_dict()
 model_api.update({
 'model_packaging': model_packaging,
 })
 assert model_api == json.loads(six.ensure_str(
 deployable_entity.get_artifact(_artifact_utils.MODEL_API_KEY).read()))
 def test_no_model_api(self, deployable_entity, model_for_deployment, model_packaging):
 deployable_entity.log_model(model_for_deployment['model'])
 model_api = {
 'version': "v1",
 'model_packaging': model_packaging,
 }
 assert model_api == json.loads(six.ensure_str(
 deployable_entity.get_artifact(_artifact_utils.MODEL_API_KEY).read()))
 def test_model_class(self, deployable_entity, model_for_deployment):
 deployable_entity.log_model(model_for_deployment['model'].__class__)
 assert model_for_deployment['model'].__class__ == deployable_entity.get_model()
 retrieved_model_api = verta.utils.ModelAPI.from_file(
 deployable_entity.get_artifact(_artifact_utils.MODEL_API_KEY))
 assert retrieved_model_api.to_dict()['model_packaging']['type'] == "class"
 def test_artifacts(self, deployable_entity, model_for_deployment, strs, flat_dicts):
 for key, artifact in zip(strs, flat_dicts):
 deployable_entity.log_artifact(key, artifact)
 deployable_entity.log_model(
 model_for_deployment['model'].__class__,
 artifacts=strs,
 )
 assert deployable_entity.get_attribute("verta_model_artifacts") == strs
 def test_no_artifacts(self, deployable_entity, model_for_deployment):
 deployable_entity.log_model(model_for_deployment['model'].__class__)
 with pytest.raises(KeyError):
 deployable_entity.get_attribute("verta_model_artifacts")
 def test_wrong_type_artifacts_error(self, deployable_entity, model_for_deployment, all_values):
 # remove Nones, because they're equivalent to unprovided
 all_values = [val for val in all_values
 if val is not None]
 # remove lists of strings and empty lists, because they're valid arguments
 all_values = [val for val in all_values
 if not (isinstance(val, list) and all(isinstance(el, six.string_types) for el in val))]
 for val in all_values:
 with pytest.raises(TypeError):
 deployable_entity.log_model(
 model_for_deployment['model'].__class__,
 artifacts=val,
 )
 def test_not_class_model_artifacts_error(self, deployable_entity, model_for_deployment, strs, flat_dicts):
 for key, artifact in zip(strs, flat_dicts):
 deployable_entity.log_artifact(key, artifact)
 with pytest.raises(ValueError):
 deployable_entity.log_model(
 model_for_deployment['model'],
 artifacts=strs,
 )
 def test_unlogged_keys_artifacts_error(self, deployable_entity, model_for_deployment, strs, flat_dicts):
 with pytest.raises(ValueError):
 deployable_entity.log_model(
 model_for_deployment['model'],
 artifacts=[strs[0]],
 )
 deployable_entity.log_artifact(strs[0], flat_dicts[0])
 with pytest.raises(ValueError):
 deployable_entity.log_model(
 model_for_deployment['model'],
 artifacts=[strs[1]],
 )
 with pytest.raises(ValueError):
 deployable_entity.log_model(
 model_for_deployment['model'],
 artifacts=strs[1:],
 )
 def test_overwrite_artifacts(self, deployable_entity, endpoint, in_tempdir):
 key = "foo"
 val = {'a': 1}
 class ModelWithDependency(object):
 def __init__(self, artifacts):
 with open(artifacts[key], 'rb') as f: # should not KeyError
 if cloudpickle.load(f) != val:
 raise ValueError # should not ValueError
 def predict(self, x):
 return x
 # first log junk artifact, to test `overwrite`
 bad_key = "bar"
 bad_val = {'b': 2}
 deployable_entity.log_artifact(bad_key, bad_val)
 deployable_entity.log_model(ModelWithDependency, custom_modules=[], artifacts=[bad_key])
 # log real artifact using `overwrite`
 deployable_entity.log_artifact(key, val)
 deployable_entity.log_model(ModelWithDependency, custom_modules=[], artifacts=[key], overwrite=True)
 deployable_entity.log_environment(Python([]))
 endpoint.update(deployable_entity, DirectUpdateStrategy(), wait=True)
 assert val == endpoint.get_deployed_model().predict(val)
class TestFetchArtifacts:
 def test_fetch_artifacts(self, deployable_entity, strs, flat_dicts):
 strs, flat_dicts = strs[:3], flat_dicts[:3] # all 12 is excessive for a test
 for key, artifact in zip(strs, flat_dicts):
 deployable_entity.log_artifact(key, artifact)
 try:
 artifacts = deployable_entity.fetch_artifacts(strs)
 assert set(six.viewkeys(artifacts)) == set(strs)
 assert all(
 filepath.startswith(_CACHE_DIR)
 for filepath in six.viewvalues(artifacts)
 )
 for key, filepath in six.viewitems(artifacts):
 artifact_contents = deployable_entity._get_artifact(key)
 if type(artifact_contents) is tuple:
 # ER returns (contents, path_only)
 # TODO: ER & RMV _get_artifact() should return the same thing
 artifact_contents, _ = artifact_contents
 with open(filepath, 'rb') as f:
 file_contents = f.read()
 assert file_contents == artifact_contents
 finally:
 shutil.rmtree(_CACHE_DIR, ignore_errors=True)
 def test_cached_fetch_artifacts(self, deployable_entity, strs, flat_dicts):
 key = strs[0]
 deployable_entity.log_artifact(key, flat_dicts[0])
 try:
 filepath = deployable_entity.fetch_artifacts([key])[key]
 last_modified = os.path.getmtime(filepath)
 time.sleep(3)
 assert deployable_entity.fetch_artifacts([key])[key] == filepath
 assert os.path.getmtime(filepath) == last_modified
 finally:
 shutil.rmtree(_CACHE_DIR, ignore_errors=True)
 def test_fetch_zip(self, deployable_entity, strs, dir_and_files):
 dirpath, filepaths = dir_and_files
 key = strs[0]
 deployable_entity.log_artifact(key, dirpath)
 try:
 dirpath = deployable_entity.fetch_artifacts([key])[key]
 assert dirpath.startswith(_CACHE_DIR)
 retrieved_filepaths = set()
 for root, _, files in os.walk(dirpath):
 for filename in files:
 filepath = os.path.join(root, filename)
 filepath = os.path.relpath(filepath, dirpath)
 retrieved_filepaths.add(filepath)
 assert filepaths == retrieved_filepaths
 finally:
 shutil.rmtree(_CACHE_DIR, ignore_errors=True)
 def test_cached_fetch_zip(self, deployable_entity, strs, dir_and_files):
 dirpath, _ = dir_and_files
 key = strs[0]
 deployable_entity.log_artifact(key, dirpath)
 try:
 dirpath = deployable_entity.fetch_artifacts([key])[key]
 last_modified = os.path.getmtime(dirpath)
 time.sleep(3)
 assert deployable_entity.fetch_artifacts([key])[key] == dirpath
 assert os.path.getmtime(dirpath) == last_modified
 finally:
 shutil.rmtree(_CACHE_DIR, ignore_errors=True)
 def test_fetch_tgz(self, deployable_entity, strs, dir_and_files):
 dirpath, filepaths = dir_and_files
 key = strs[0]
 with tempfile.NamedTemporaryFile(suffix='.tgz') as tempf:
 # make archive
 with tarfile.open(tempf.name, 'w:gz') as tarf:
 tarf.add(dirpath, "")
 tempf.flush() # flush object buffer
 os.fsync(tempf.fileno()) # flush OS buffer
 tempf.seek(0)
 deployable_entity.log_artifact(key, tempf.name)
 try:
 dirpath = deployable_entity.fetch_artifacts([key])[key]
 assert dirpath.startswith(_CACHE_DIR)
 retrieved_filepaths = set()
 for root, _, files in os.walk(dirpath):
 for filename in files:
 filepath = os.path.join(root, filename)
 filepath = os.path.relpath(filepath, dirpath)
 retrieved_filepaths.add(filepath)
 assert filepaths == retrieved_filepaths
 finally:
 shutil.rmtree(_CACHE_DIR, ignore_errors=True)
 def test_fetch_tar(self, deployable_entity, strs, dir_and_files):
 dirpath, filepaths = dir_and_files
 key = strs[0]
 with tempfile.NamedTemporaryFile(suffix='.tar') as tempf:
 # make archive
 with tarfile.open(tempf.name, 'w') as tarf:
 tarf.add(dirpath, "")
 tempf.flush() # flush object buffer
 os.fsync(tempf.fileno()) # flush OS buffer
 tempf.seek(0)
 deployable_entity.log_artifact(key, tempf.name)
 try:
 dirpath = deployable_entity.fetch_artifacts([key])[key]
 assert dirpath.startswith(_CACHE_DIR)
 retrieved_filepaths = set()
 for root, _, files in os.walk(dirpath):
 for filename in files:
 filepath = os.path.join(root, filename)
 filepath = os.path.relpath(filepath, dirpath)
 retrieved_filepaths.add(filepath)
 assert filepaths == retrieved_filepaths
 finally:
 shutil.rmtree(_CACHE_DIR, ignore_errors=True)
 def test_fetch_tar_gz(self, deployable_entity, strs, dir_and_files):
 dirpath, filepaths = dir_and_files
 key = strs[0]
 with tempfile.NamedTemporaryFile(suffix='.tar.gz') as tempf:
 # make archive
 with tarfile.open(tempf.name, 'w:gz') as tarf:
 tarf.add(dirpath, "")
 tempf.flush() # flush object buffer
 os.fsync(tempf.fileno()) # flush OS buffer
 tempf.seek(0)
 deployable_entity.log_artifact(key, tempf.name)
 try:
 dirpath = deployable_entity.fetch_artifacts([key])[key]
 assert dirpath.startswith(_CACHE_DIR)
 retrieved_filepaths = set()
 for root, _, files in os.walk(dirpath):
 for filename in files:
 filepath = os.path.join(root, filename)
 filepath = os.path.relpath(filepath, dirpath)
 retrieved_filepaths.add(filepath)
 assert filepaths == retrieved_filepaths
 finally:
 shutil.rmtree(_CACHE_DIR, ignore_errors=True)
 def test_wrong_type_artifacts_error(self, deployable_entity, all_values):
 # remove lists of strings and empty lists, because they're valid arguments
 all_values = [val for val in all_values
 if not (isinstance(val, list) and all(isinstance(el, six.string_types) for el in val))]
 for val in all_values:
 with pytest.raises(TypeError):
 deployable_entity.fetch_artifacts(val)
 def test_unlogged_keys_artifacts_error(self, deployable_entity, strs, flat_dicts):
 with pytest.raises(ValueError):
 deployable_entity.fetch_artifacts([strs[0]])
 deployable_entity.log_artifact(strs[0], flat_dicts[0])
 with pytest.raises(ValueError):
 deployable_entity.fetch_artifacts([strs[1]])
 with pytest.raises(ValueError):
 deployable_entity.fetch_artifacts(strs[1:])
class TestDeployability:
 """Deployment-related functionality"""
 def test_log_environment(self, registered_model):
 deployable_entity = registered_model.get_or_create_version(name="my version")
 reqs = Python.read_pip_environment()
 env = Python(requirements=reqs)
 deployable_entity.log_environment(env)
 deployable_entity = registered_model.get_version(id=deployable_entity.id)
 assert str(env) == str(deployable_entity.get_environment())
 with pytest.raises(ValueError):
 deployable_entity.log_environment(env)
 deployable_entity.log_environment(env, overwrite=True)
 assert str(env) == str(deployable_entity.get_environment())
 def test_del_environment(self, registered_model):
 deployable_entity = registered_model.get_or_create_version(name="my version")
 reqs = Python.read_pip_environment()
 env = Python(requirements=reqs)
 deployable_entity.log_environment(env)
 deployable_entity.del_environment()
 deployable_entity = registered_model.get_version(id=deployable_entity.id)
 assert not deployable_entity.has_environment
 with pytest.raises(RuntimeError) as excinfo:
 deployable_entity.get_environment()
 assert "environment was not previously set" in str(excinfo.value)
 def test_log_model(self, deployable_entity):
 np = pytest.importorskip("numpy")
 sklearn = pytest.importorskip("sklearn")
 from sklearn.linear_model import LogisticRegression
 classifier = LogisticRegression()
 classifier.fit(np.random.random((36, 12)), np.random.random(36).round())
 original_coef = classifier.coef_
 deployable_entity.log_model(classifier)
 # retrieve the classifier:
 retrieved_classfier = deployable_entity.get_model()
 assert np.array_equal(retrieved_classfier.coef_, original_coef)
 # check model api:
 assert _artifact_utils.MODEL_API_KEY in deployable_entity.get_artifact_keys()
 for artifact in deployable_entity._msg.artifacts:
 if artifact.key == _artifact_utils.MODEL_API_KEY:
 assert artifact.filename_extension == "json"
 # overwrite should work:
 new_classifier = LogisticRegression()
 new_classifier.fit(np.random.random((36, 12)), np.random.random(36).round())
 deployable_entity.log_model(new_classifier, overwrite=True)
 retrieved_classfier = deployable_entity.get_model()
 assert np.array_equal(retrieved_classfier.coef_, new_classifier.coef_)
 # when overwrite = false, overwriting should fail
 with pytest.raises(ValueError) as excinfo:
 deployable_entity.log_model(new_classifier)
 assert "already exists" in str(excinfo.value)
 # Check custom modules:
 custom_module_filenames = {"__init__.py", "_verta_config.py"}
 for path in sys.path:
 # skip std libs and venvs
 # This logic is from verta.client._log_modules().
 lib_python_str = os.path.join(os.sep, "lib", "python")
 i = path.find(lib_python_str)
 if i != -1 and glob.glob(os.path.join(path[:i], "bin", "python*")):
 continue
 for parent_dir, dirnames, filenames in os.walk(path):
 # only Python files
 filenames[:] = [
 filename
 for filename in filenames
 if filename.endswith((".py", ".pyc", ".pyo"))
 ]
 if not _utils.is_in_venv(path) and _utils.is_in_venv(parent_dir):
 continue
 custom_module_filenames.update(map(os.path.basename, filenames))
 custom_modules = deployable_entity.get_artifact(_artifact_utils.CUSTOM_MODULES_KEY)
 with zipfile.ZipFile(custom_modules, "r") as zipf:
 assert custom_module_filenames == set(
 map(os.path.basename, zipf.namelist())
 )
 def test_download_sklearn(self, deployable_entity, in_tempdir):
 LogisticRegression = pytest.importorskip(
 "sklearn.linear_model"
 ).LogisticRegression
 upload_path = "model.pkl"
 download_path = "retrieved_model.pkl"
 model = LogisticRegression(C=0.67, max_iter=178) # set some non-default values
 with open(upload_path, "wb") as f:
 pickle.dump(model, f)
 deployable_entity.log_model(model, custom_modules=[])
 returned_path = deployable_entity.download_model(download_path)
 assert returned_path == os.path.abspath(download_path)
 with open(download_path, "rb") as f:
 downloaded_model = pickle.load(f)
 assert downloaded_model.get_params() == model.get_params()
 def test_log_model_with_custom_modules(self, deployable_entity, model_for_deployment):
 custom_modules_dir = "."
 deployable_entity.log_model(
 model_for_deployment["model"],
 custom_modules=["."],
 )
 custom_module_filenames = {"__init__.py", "_verta_config.py"}
 for parent_dir, dirnames, | |
| 
	truth from h5py
 vfile.f.create_dataset(f'data{i}', data=data, fillvalue=-1, chunks=chunks,
 maxshape=(None, None, None))
 vfile.f[f'data{i}'].resize(newshape)
 new_data = vfile.f[f'data{i}'][()]
 # resize after creation
 with vfile.stage_version(f'version1_{i}') as group:
 group.create_dataset(f'dataset1_{i}', data=data, chunks=chunks,
 fillvalue=-1)
 group[f'dataset1_{i}'].resize(newshape)
 assert group[f'dataset1_{i}'].shape == newshape
 assert_equal(group[f'dataset1_{i}'][()], new_data)
 version1 = vfile[f'version1_{i}']
 assert version1[f'dataset1_{i}'].shape == newshape
 assert_equal(version1[f'dataset1_{i}'][()], new_data)
 # resize in a new version
 with vfile.stage_version(f'version2_1_{i}', '') as group:
 group.create_dataset(f'dataset2_{i}', data=data, chunks=chunks,
 fillvalue=-1)
 with vfile.stage_version(f'version2_2_{i}', f'version2_1_{i}') as group:
 group[f'dataset2_{i}'].resize(newshape)
 assert group[f'dataset2_{i}'].shape == newshape
 assert_equal(group[f'dataset2_{i}'][()], new_data, str((oldshape, newshape)))
 version2_2 = vfile[f'version2_2_{i}']
 assert version2_2[f'dataset2_{i}'].shape == newshape
 assert_equal(version2_2[f'dataset2_{i}'][()], new_data)
 # resize after some data is read in
 with vfile.stage_version(f'version3_1_{i}', '') as group:
 group.create_dataset(f'dataset3_{i}', data=data, chunks=chunks,
 fillvalue=-1)
 with vfile.stage_version(f'version3_2_{i}', f'version3_1_{i}') as group:
 # read in first and last chunks
 group[f'dataset3_{i}'][0, 0, 0]
 group[f'dataset3_{i}'][-1, -1, -1]
 group[f'dataset3_{i}'].resize(newshape)
 assert group[f'dataset3_{i}'].shape == newshape
 assert_equal(group[f'dataset3_{i}'][()], new_data)
 version3_2 = vfile[f'version3_2_{i}']
 assert version3_2[f'dataset3_{i}'].shape == newshape
 assert_equal(version3_2[f'dataset3_{i}'][()], new_data)
def test_getitem(vfile):
 data = np.arange(2*DEFAULT_CHUNK_SIZE)
 with vfile.stage_version('version1') as group:
 group.create_dataset('test_data', data=data)
 test_data = group['test_data']
 assert test_data.shape == (2*DEFAULT_CHUNK_SIZE,)
 assert_equal(test_data[0], 0)
 assert test_data[0].dtype == np.int64
 assert_equal(test_data[:], data)
 assert_equal(test_data[:DEFAULT_CHUNK_SIZE+1], data[:DEFAULT_CHUNK_SIZE+1])
 with vfile.stage_version('version2') as group:
 test_data = group['test_data']
 assert test_data.shape == (2*DEFAULT_CHUNK_SIZE,)
 assert_equal(test_data[0], 0)
 assert test_data[0].dtype == np.int64
 assert_equal(test_data[:], data)
 assert_equal(test_data[:DEFAULT_CHUNK_SIZE+1], data[:DEFAULT_CHUNK_SIZE+1])
def test_timestamp_auto(vfile):
 data = np.ones((2*DEFAULT_CHUNK_SIZE,))
 with vfile.stage_version('version1') as group:
 group.create_dataset('test_data', data=data)
 assert isinstance(vfile['version1'].attrs['timestamp'], str)
def test_timestamp_manual(vfile):
 data1 = np.ones((2*DEFAULT_CHUNK_SIZE,))
 data2 = np.ones((3*DEFAULT_CHUNK_SIZE))
 ts1 = datetime.datetime(2020, 6, 29, 20, 12, 56, tzinfo=datetime.timezone.utc)
 ts2 = datetime.datetime(2020, 6, 29, 22, 12, 56)
 with vfile.stage_version('version1', timestamp=ts1) as group:
 group['test_data_1'] = data1
 assert vfile['version1'].attrs['timestamp'] == ts1.strftime(TIMESTAMP_FMT)
 with raises(ValueError):
 with vfile.stage_version('version2', timestamp=ts2) as group:
 group['test_data_2'] = data2
 with raises(TypeError):
 with vfile.stage_version('version3', timestamp='2020-6-29') as group:
 group['test_data_3'] = data1
def test_timestamp_pytz(vfile):
 # pytz is not a dependency of versioned-hdf5, but it is supported if it is
 # used.
 import pytz
 data1 = np.ones((2*DEFAULT_CHUNK_SIZE,))
 data2 = np.ones((3*DEFAULT_CHUNK_SIZE))
 ts1 = datetime.datetime(2020, 6, 29, 20, 12, 56, tzinfo=pytz.utc)
 ts2 = datetime.datetime(2020, 6, 29, 22, 12, 56)
 with vfile.stage_version('version1', timestamp=ts1) as group:
 group['test_data_1'] = data1
 assert vfile['version1'].attrs['timestamp'] == ts1.strftime(TIMESTAMP_FMT)
 with raises(ValueError):
 with vfile.stage_version('version2', timestamp=ts2) as group:
 group['test_data_2'] = data2
 with raises(TypeError):
 with vfile.stage_version('version3', timestamp='2020-6-29') as group:
 group['test_data_3'] = data1
def test_timestamp_manual_datetime64(vfile):
 data = np.ones((2*DEFAULT_CHUNK_SIZE,))
 # Also tests that it works correctly for 0 fractional part (issue #190).
 ts = datetime.datetime(2020, 6, 29, 20, 12, 56, tzinfo=datetime.timezone.utc)
 npts = np.datetime64(ts.replace(tzinfo=None))
 with vfile.stage_version('version1', timestamp=npts) as group:
 group['test_data'] = data
 v1 = vfile['version1']
 assert v1.attrs['timestamp'] == ts.strftime(TIMESTAMP_FMT)
 assert vfile[npts] == v1
 assert vfile[ts] == v1
 assert vfile.get_version_by_timestamp(npts, exact=True) == v1
 assert vfile.get_version_by_timestamp(ts, exact=True) == v1
def test_getitem_by_timestamp(vfile):
 data = np.arange(2*DEFAULT_CHUNK_SIZE)
 with vfile.stage_version('version1') as group:
 group.create_dataset('test_data', data=data)
 v1 = vfile['version1']
 ts1 = datetime.datetime.strptime(v1.attrs['timestamp'], TIMESTAMP_FMT)
 assert vfile[ts1] == v1
 assert vfile.get_version_by_timestamp(ts1) == v1
 assert vfile.get_version_by_timestamp(ts1, exact=True) == v1
 dt1 = np.datetime64(ts1.replace(tzinfo=None))
 assert vfile[dt1] == v1
 assert vfile.get_version_by_timestamp(dt1) == v1
 assert vfile.get_version_by_timestamp(dt1, exact=True) == v1
 minute = datetime.timedelta(minutes=1)
 second = datetime.timedelta(seconds=1)
 ts2 = ts1 + minute
 dt2 = np.datetime64(ts2.replace(tzinfo=None))
 with vfile.stage_version('version2', timestamp=ts2) as group:
 group['test_data'][0] += 1
 v2 = vfile['version2']
 assert vfile[ts2] == v2
 assert vfile.get_version_by_timestamp(ts2) == v2
 assert vfile.get_version_by_timestamp(ts2, exact=True) == v2
 assert vfile[dt2] == v2
 assert vfile.get_version_by_timestamp(dt2) == v2
 assert vfile.get_version_by_timestamp(dt2, exact=True) == v2
 ts2_1 = ts2 + second
 dt2_1 = np.datetime64(ts2_1.replace(tzinfo=None))
 assert vfile[ts2_1] == v2
 assert vfile.get_version_by_timestamp(ts2_1) == v2
 raises(KeyError, lambda: vfile.get_version_by_timestamp(ts2_1, exact=True))
 assert vfile[dt2_1] == v2
 assert vfile.get_version_by_timestamp(dt2_1) == v2
 raises(KeyError, lambda: vfile.get_version_by_timestamp(dt2_1, exact=True))
 ts1_1 = ts1 + second
 dt1_1 = np.datetime64(ts1_1.replace(tzinfo=None))
 assert vfile[ts1_1] == v1
 assert vfile.get_version_by_timestamp(ts1_1) == v1
 raises(KeyError, lambda: vfile.get_version_by_timestamp(ts1_1, exact=True))
 assert vfile[dt1_1] == v1
 assert vfile.get_version_by_timestamp(dt1_1) == v1
 raises(KeyError, lambda: vfile.get_version_by_timestamp(dt1_1, exact=True))
 ts0 = ts1 - second
 dt0 = np.datetime64(ts0.replace(tzinfo=None))
 raises(KeyError, lambda: vfile[ts0] == v1)
 raises(KeyError, lambda: vfile.get_version_by_timestamp(ts0) == v1)
 raises(KeyError, lambda: vfile.get_version_by_timestamp(ts0, exact=True))
 raises(KeyError, lambda: vfile[dt0] == v1)
 raises(KeyError, lambda: vfile.get_version_by_timestamp(dt0) == v1)
 raises(KeyError, lambda: vfile.get_version_by_timestamp(dt0, exact=True))
def test_nonroot(vfile):
 g = vfile.f.create_group('subgroup')
 file = VersionedHDF5File(g)
 test_data = np.concatenate((np.ones((2*DEFAULT_CHUNK_SIZE,)),
 2*np.ones((DEFAULT_CHUNK_SIZE,)),
 3*np.ones((DEFAULT_CHUNK_SIZE,))))
 with file.stage_version('version1', '') as group:
 group['test_data'] = test_data
 version1 = file['version1']
 assert version1.attrs['prev_version'] == '__first_version__'
 assert_equal(version1['test_data'], test_data)
 ds = vfile.f['/subgroup/_version_data/test_data/raw_data']
 assert ds.shape == (3*DEFAULT_CHUNK_SIZE,)
 assert_equal(ds[0:1*DEFAULT_CHUNK_SIZE], 1.0)
 assert_equal(ds[1*DEFAULT_CHUNK_SIZE:2*DEFAULT_CHUNK_SIZE], 2.0)
 assert_equal(ds[2*DEFAULT_CHUNK_SIZE:3*DEFAULT_CHUNK_SIZE], 3.0)
def test_attrs(vfile):
 data = np.arange(2*DEFAULT_CHUNK_SIZE)
 with vfile.stage_version('version1') as group:
 group.create_dataset('test_data', data=data)
 test_data = group['test_data']
 assert 'test_attr' not in test_data.attrs
 test_data.attrs['test_attr'] = 0
 assert vfile['version1']['test_data'].attrs['test_attr'] == \
 vfile.f['_version_data']['versions']['version1']['test_data'].attrs['test_attr'] == 0
 with vfile.stage_version('version2') as group:
 test_data = group['test_data']
 assert test_data.attrs['test_attr'] == 0
 test_data.attrs['test_attr'] = 1
 assert vfile['version1']['test_data'].attrs['test_attr'] == \
 vfile.f['_version_data']['versions']['version1']['test_data'].attrs['test_attr'] == 0
 assert vfile['version2']['test_data'].attrs['test_attr'] == \
 vfile.f['_version_data']['versions']['version2']['test_data'].attrs['test_attr'] == 1
def test_auto_delete(vfile):
 try:
 with vfile.stage_version('version1') as group:
 raise RuntimeError
 except RuntimeError:
 pass
 else:
 raise AssertionError("did not raise")
 # Make sure the version got deleted so that we can make it again
 data = np.arange(2*DEFAULT_CHUNK_SIZE)
 with vfile.stage_version('version1') as group:
 group.create_dataset('test_data', data=data)
 assert_equal(vfile['version1']['test_data'], data)
def test_delitem(vfile):
 data = np.arange(2*DEFAULT_CHUNK_SIZE)
 with vfile.stage_version('version1') as group:
 group.create_dataset('test_data', data=data)
 with vfile.stage_version('version2') as group:
 group.create_dataset('test_data2', data=data)
 del vfile['version2']
 assert list(vfile) == ['version1']
 assert vfile.current_version == 'version1'
 with raises(KeyError):
 del vfile['version2']
 del vfile['version1']
 assert list(vfile) == []
 assert vfile.current_version == '__first_version__'
def test_groups(vfile):
 data = np.ones(2*DEFAULT_CHUNK_SIZE)
 with vfile.stage_version('version1') as group:
 group.create_group('group1')
 group.create_dataset('group1/test_data', data=data)
 assert_equal(group['group1']['test_data'], data)
 assert_equal(group['group1/test_data'], data)
 version = vfile['version1']
 assert_equal(version['group1']['test_data'], data)
 assert_equal(version['group1/test_data'], data)
 with vfile.stage_version('version2', '') as group:
 group.create_dataset('group1/test_data', data=data)
 assert_equal(group['group1']['test_data'], data)
 assert_equal(group['group1/test_data'], data)
 version = vfile['version2']
 assert_equal(version['group1']['test_data'], data)
 assert_equal(version['group1/test_data'], data)
 with vfile.stage_version('version3', 'version1') as group:
 group['group1']['test_data'][0] = 0
 group['group1/test_data'][1] = 0
 assert_equal(group['group1']['test_data'][:2], 0)
 assert_equal(group['group1']['test_data'][2:], 1)
 assert_equal(group['group1/test_data'][:2], 0)
 assert_equal(group['group1/test_data'][2:], 1)
 version = vfile['version3']
 assert_equal(version['group1']['test_data'][:2], 0)
 assert_equal(version['group1']['test_data'][2:], 1)
 assert_equal(version['group1/test_data'][:2], 0)
 assert_equal(version['group1/test_data'][2:], 1)
 assert list(version) == ['group1']
 assert list(version['group1']) == ['test_data']
 with vfile.stage_version('version4', 'version3') as group:
 group.create_dataset('group2/test_data', data=2*data)
 assert_equal(group['group1']['test_data'][:2], 0)
 assert_equal(group['group1']['test_data'][2:], 1)
 assert_equal(group['group2']['test_data'][:], 2)
 assert_equal(group['group1/test_data'][:2], 0)
 assert_equal(group['group1/test_data'][2:], 1)
 assert_equal(group['group2/test_data'][:], 2)
 version = vfile['version4']
 assert_equal(version['group1']['test_data'][:2], 0)
 assert_equal(version['group1']['test_data'][2:], 1)
 assert_equal(group['group2']['test_data'][:], 2)
 assert_equal(version['group1/test_data'][:2], 0)
 assert_equal(version['group1/test_data'][2:], 1)
 assert_equal(group['group2/test_data'][:], 2)
 assert list(version) == ['group1', 'group2']
 assert list(version['group1']) == ['test_data']
 assert list(version['group2']) == ['test_data']
 with vfile.stage_version('version5', '') as group:
 group.create_dataset('group1/group2/test_data', data=data)
 assert_equal(group['group1']['group2']['test_data'], data)
 assert_equal(group['group1/group2']['test_data'], data)
 assert_equal(group['group1']['group2/test_data'], data)
 assert_equal(group['group1/group2/test_data'], data)
 version = vfile['version5']
 assert_equal(version['group1']['group2']['test_data'], data)
 assert_equal(version['group1/group2']['test_data'], data)
 assert_equal(version['group1']['group2/test_data'], data)
 assert_equal(version['group1/group2/test_data'], data)
 with vfile.stage_version('version6', '') as group:
 group.create_dataset('group1/test_data1', data=data)
 group.create_dataset('group1/group2/test_data2', data=2*data)
 group.create_dataset('group1/group2/group3/test_data3', data=3*data)
 group.create_dataset('group1/group2/test_data4', data=4*data)
 assert_equal(group['group1']['test_data1'], data)
 assert_equal(group['group1/test_data1'], data)
 assert_equal(group['group1']['group2']['test_data2'], 2*data)
 assert_equal(group['group1/group2']['test_data2'], 2*data)
 assert_equal(group['group1']['group2/test_data2'], 2*data)
 assert_equal(group['group1/group2/test_data2'], 2*data)
 assert_equal(group['group1']['group2']['group3']['test_data3'], 3*data)
 assert_equal(group['group1/group2']['group3']['test_data3'], 3*data)
 assert_equal(group['group1/group2']['group3/test_data3'], 3*data)
 assert_equal(group['group1']['group2/group3/test_data3'], 3*data)
 assert_equal(group['group1/group2/group3/test_data3'], 3*data)
 assert_equal(group['group1']['group2']['test_data4'], 4*data)
 assert_equal(group['group1/group2']['test_data4'], 4*data)
 assert_equal(group['group1']['group2/test_data4'], 4*data)
 assert_equal(group['group1/group2/test_data4'], 4*data)
 assert list(group) == ['group1']
 assert set(group['group1']) == {'group2', 'test_data1'}
 assert set(group['group1']['group2']) == set(group['group1/group2']) == {'group3', 'test_data2', 'test_data4'}
 assert list(group['group1']['group2']['group3']) == list(group['group1/group2/group3']) == ['test_data3']
 version = vfile['version6']
 assert_equal(version['group1']['test_data1'], data)
 assert_equal(version['group1/test_data1'], data)
 assert_equal(version['group1']['group2']['test_data2'], 2*data)
 assert_equal(version['group1/group2']['test_data2'], 2*data)
 assert_equal(version['group1']['group2/test_data2'], 2*data)
 assert_equal(version['group1/group2/test_data2'], 2*data)
 assert_equal(version['group1']['group2']['group3']['test_data3'], 3*data)
 assert_equal(version['group1/group2']['group3']['test_data3'], 3*data)
 assert_equal(version['group1/group2']['group3/test_data3'], 3*data)
 assert_equal(version['group1']['group2/group3/test_data3'], 3*data)
 assert_equal(version['group1/group2/group3/test_data3'], 3*data)
 assert_equal(version['group1']['group2']['test_data4'], 4*data)
 assert_equal(version['group1/group2']['test_data4'], 4*data)
 assert_equal(version['group1']['group2/test_data4'], 4*data)
 assert_equal(version['group1/group2/test_data4'], 4*data)
 assert list(version) == ['group1']
 assert set(version['group1']) == {'group2', 'test_data1'}
 assert set(version['group1']['group2']) == set(version['group1/group2']) == {'group3', 'test_data2', 'test_data4'}
 assert list(version['group1']['group2']['group3']) == list(version['group1/group2/group3']) == ['test_data3']
 with vfile.stage_version('version-bad', '') as group:
 raises(ValueError, lambda: group.create_dataset('/group1/test_data', data=data))
 raises(ValueError, lambda: group.create_group('/group1'))
def test_group_contains(vfile):
 data = np.ones(2*DEFAULT_CHUNK_SIZE)
 with vfile.stage_version('version1') as group:
 group.create_dataset('group1/group2/test_data', data=data)
 assert 'group1' in group
 assert 'group2' in group['group1']
 assert 'test_data' in group['group1/group2']
 assert 'test_data' not in group
 assert 'test_data' not in group['group1']
 assert 'group1/group2' in group
 assert 'group1/group3' not in group
 assert 'group1/group2/test_data' in group
 assert 'group1/group3/test_data' not in group
 assert 'group1/group3/test_data2' not in group
 with vfile.stage_version('version2') as group:
 group.create_dataset('group1/group3/test_data2', data=data)
 assert 'group1' in group
 assert 'group2' in group['group1']
 assert 'group3' in group['group1']
 assert 'test_data' in group['group1/group2']
 assert 'test_data' not in group
 assert 'test_data' not in group['group1']
 assert 'test_data2' in group['group1/group3']
 assert 'test_data2' not in group['group1/group2']
 assert 'group1/group2' in group
 assert 'group1/group3' in group
 assert 'group1/group2/test_data' in group
 assert 'group1/group3/test_data' not in group
 assert 'group1/group3/test_data2' in group
 version1 = vfile['version1']
 version2 = vfile['version2']
 assert 'group1' in version1
 assert 'group1/' in version1
 assert 'group1' in version2
 assert 'group1/' in version2
 assert 'group2' in version1['group1']
 assert 'group2/' in version1['group1']
 assert 'group2' in version2['group1']
 assert 'group2/' in version2['group1']
 assert 'group3' not in version1['group1']
 assert 'group3/' not in version1['group1']
 assert 'group3' in version2['group1']
 assert 'group3/' in version2['group1']
 assert 'group1/group2' in version1
 assert 'group1/group2/' in version1
 assert 'group1/group2' in version2
 assert 'group1/group2/' in version2
 assert 'group1/group3' not in version1
 assert 'group1/group3/' not in version1
 assert 'group1/group3' in version2
 assert 'group1/group3/' in version2
 assert 'group1/group2/test_data' in version1
 assert 'group1/group2/test_data/' in version1
 assert 'group1/group2/test_data' in version2
 assert 'group1/group2/test_data/' in version2
 assert 'group1/group3/test_data' not in version1
 assert 'group1/group3/test_data/' not in version1
 assert | |
| 
	m.b2270 <= 0)
m.e2463 = Constraint(expr= m.x1723 - 1.10947836929589 * m.b2271 <= 0)
m.e2464 = Constraint(expr= m.x1724 - 1.10947836929589 * m.b2272 <= 0)
m.e2465 = Constraint(expr= m.x1725 + 1.10947836929589 * m.b2269
 <= 1.10947836929589)
m.e2466 = Constraint(expr= m.x1726 + 1.10947836929589 * m.b2270
 <= 1.10947836929589)
m.e2467 = Constraint(expr= m.x1727 + 1.10947836929589 * m.b2271
 <= 1.10947836929589)
m.e2468 = Constraint(expr= m.x1728 + 1.10947836929589 * m.b2272
 <= 1.10947836929589)
m.e2469 = Constraint(expr= -0.9 * m.x1665 + m.x1729 == 0)
m.e2470 = Constraint(expr= -0.9 * m.x1666 + m.x1730 == 0)
m.e2471 = Constraint(expr= -0.9 * m.x1667 + m.x1731 == 0)
m.e2472 = Constraint(expr= -0.9 * m.x1668 + m.x1732 == 0)
m.e2473 = Constraint(expr= m.x1669 == 0)
m.e2474 = Constraint(expr= m.x1670 == 0)
m.e2475 = Constraint(expr= m.x1671 == 0)
m.e2476 = Constraint(expr= m.x1672 == 0)
m.e2477 = Constraint(expr= m.x1733 == 0)
m.e2478 = Constraint(expr= m.x1734 == 0)
m.e2479 = Constraint(expr= m.x1735 == 0)
m.e2480 = Constraint(expr= m.x1736 == 0)
m.e2481 = Constraint(expr= m.x1261 - m.x1665 - m.x1669 == 0)
m.e2482 = Constraint(expr= m.x1262 - m.x1666 - m.x1670 == 0)
m.e2483 = Constraint(expr= m.x1263 - m.x1667 - m.x1671 == 0)
m.e2484 = Constraint(expr= m.x1264 - m.x1668 - m.x1672 == 0)
m.e2485 = Constraint(expr= m.x1285 - m.x1729 - m.x1733 == 0)
m.e2486 = Constraint(expr= m.x1286 - m.x1730 - m.x1734 == 0)
m.e2487 = Constraint(expr= m.x1287 - m.x1731 - m.x1735 == 0)
m.e2488 = Constraint(expr= m.x1288 - m.x1732 - m.x1736 == 0)
m.e2489 = Constraint(expr= m.x1665 - 3.5 * m.b2273 <= 0)
m.e2490 = Constraint(expr= m.x1666 - 3.5 * m.b2274 <= 0)
m.e2491 = Constraint(expr= m.x1667 - 3.5 * m.b2275 <= 0)
m.e2492 = Constraint(expr= m.x1668 - 3.5 * m.b2276 <= 0)
m.e2493 = Constraint(expr= m.x1669 + 3.5 * m.b2273 <= 3.5)
m.e2494 = Constraint(expr= m.x1670 + 3.5 * m.b2274 <= 3.5)
m.e2495 = Constraint(expr= m.x1671 + 3.5 * m.b2275 <= 3.5)
m.e2496 = Constraint(expr= m.x1672 + 3.5 * m.b2276 <= 3.5)
m.e2497 = Constraint(expr= m.x1729 - 3.15 * m.b2273 <= 0)
m.e2498 = Constraint(expr= m.x1730 - 3.15 * m.b2274 <= 0)
m.e2499 = Constraint(expr= m.x1731 - 3.15 * m.b2275 <= 0)
m.e2500 = Constraint(expr= m.x1732 - 3.15 * m.b2276 <= 0)
m.e2501 = Constraint(expr= m.x1733 + 3.15 * m.b2273 <= 3.15)
m.e2502 = Constraint(expr= m.x1734 + 3.15 * m.b2274 <= 3.15)
m.e2503 = Constraint(expr= m.x1735 + 3.15 * m.b2275 <= 3.15)
m.e2504 = Constraint(expr= m.x1736 + 3.15 * m.b2276 <= 3.15)
m.e2505 = Constraint(expr= -0.6 * m.x1673 + m.x1737 == 0)
m.e2506 = Constraint(expr= -0.6 * m.x1674 + m.x1738 == 0)
m.e2507 = Constraint(expr= -0.6 * m.x1675 + m.x1739 == 0)
m.e2508 = Constraint(expr= -0.6 * m.x1676 + m.x1740 == 0)
m.e2509 = Constraint(expr= m.x1677 == 0)
m.e2510 = Constraint(expr= m.x1678 == 0)
m.e2511 = Constraint(expr= m.x1679 == 0)
m.e2512 = Constraint(expr= m.x1680 == 0)
m.e2513 = Constraint(expr= m.x1741 == 0)
m.e2514 = Constraint(expr= m.x1742 == 0)
m.e2515 = Constraint(expr= m.x1743 == 0)
m.e2516 = Constraint(expr= m.x1744 == 0)
m.e2517 = Constraint(expr= m.x1265 - m.x1673 - m.x1677 == 0)
m.e2518 = Constraint(expr= m.x1266 - m.x1674 - m.x1678 == 0)
m.e2519 = Constraint(expr= m.x1267 - m.x1675 - m.x1679 == 0)
m.e2520 = Constraint(expr= m.x1268 - m.x1676 - m.x1680 == 0)
m.e2521 = Constraint(expr= m.x1289 - m.x1737 - m.x1741 == 0)
m.e2522 = Constraint(expr= m.x1290 - m.x1738 - m.x1742 == 0)
m.e2523 = Constraint(expr= m.x1291 - m.x1739 - m.x1743 == 0)
m.e2524 = Constraint(expr= m.x1292 - m.x1740 - m.x1744 == 0)
m.e2525 = Constraint(expr= m.x1673 - 3.5 * m.b2277 <= 0)
m.e2526 = Constraint(expr= m.x1674 - 3.5 * m.b2278 <= 0)
m.e2527 = Constraint(expr= m.x1675 - 3.5 * m.b2279 <= 0)
m.e2528 = Constraint(expr= m.x1676 - 3.5 * m.b2280 <= 0)
m.e2529 = Constraint(expr= m.x1677 + 3.5 * m.b2277 <= 3.5)
m.e2530 = Constraint(expr= m.x1678 + 3.5 * m.b2278 <= 3.5)
m.e2531 = Constraint(expr= m.x1679 + 3.5 * m.b2279 <= 3.5)
m.e2532 = Constraint(expr= m.x1680 + 3.5 * m.b2280 <= 3.5)
m.e2533 = Constraint(expr= m.x1737 - 2.1 * m.b2277 <= 0)
m.e2534 = Constraint(expr= m.x1738 - 2.1 * m.b2278 <= 0)
m.e2535 = Constraint(expr= m.x1739 - 2.1 * m.b2279 <= 0)
m.e2536 = Constraint(expr= m.x1740 - 2.1 * m.b2280 <= 0)
m.e2537 = Constraint(expr= m.x1741 + 2.1 * m.b2277 <= 2.1)
m.e2538 = Constraint(expr= m.x1742 + 2.1 * m.b2278 <= 2.1)
m.e2539 = Constraint(expr= m.x1743 + 2.1 * m.b2279 <= 2.1)
m.e2540 = Constraint(expr= m.x1744 + 2.1 * m.b2280 <= 2.1)
m.e2541 = Constraint(expr= (m.x1745 / (0.001 + 0.999 * m.b2281) - 1.1 * log(
 m.x1681 / (0.001 + 0.999 * m.b2281) + 1)) * (0.001 + 0.999 * m.b2281) <= 0)
m.e2542 = Constraint(expr= (m.x1746 / (0.001 + 0.999 * m.b2282) - 1.1 * log(
 m.x1682 / (0.001 + 0.999 * m.b2282) + 1)) * (0.001 + 0.999 * m.b2282) <= 0)
m.e2543 = Constraint(expr= (m.x1747 / (0.001 + 0.999 * m.b2283) - 1.1 * log(
 m.x1683 / (0.001 + 0.999 * m.b2283) + 1)) * (0.001 + 0.999 * m.b2283) <= 0)
m.e2544 = Constraint(expr= (m.x1748 / (0.001 + 0.999 * m.b2284) - 1.1 * log(
 m.x1684 / (0.001 + 0.999 * m.b2284) + 1)) * (0.001 + 0.999 * m.b2284) <= 0)
m.e2545 = Constraint(expr= m.x1685 == 0)
m.e2546 = Constraint(expr= m.x1686 == 0)
m.e2547 = Constraint(expr= m.x1687 == 0)
m.e2548 = Constraint(expr= m.x1688 == 0)
m.e2549 = Constraint(expr= m.x1749 == 0)
m.e2550 = Constraint(expr= m.x1750 == 0)
m.e2551 = Constraint(expr= m.x1751 == 0)
m.e2552 = Constraint(expr= m.x1752 == 0)
m.e2553 = Constraint(expr= m.x1269 - m.x1681 - m.x1685 == 0)
m.e2554 = Constraint(expr= m.x1270 - m.x1682 - m.x1686 == 0)
m.e2555 = Constraint(expr= m.x1271 - m.x1683 - m.x1687 == 0)
m.e2556 = Constraint(expr= m.x1272 - m.x1684 - m.x1688 == 0)
m.e2557 = Constraint(expr= m.x1293 - m.x1745 - m.x1749 == 0)
m.e2558 = Constraint(expr= m.x1294 - m.x1746 - m.x1750 == 0)
m.e2559 = Constraint(expr= m.x1295 - m.x1747 - m.x1751 == 0)
m.e2560 = Constraint(expr= m.x1296 - m.x1748 - m.x1752 == 0)
m.e2561 = Constraint(expr= m.x1681 - 3.5 * m.b2281 <= 0)
m.e2562 = Constraint(expr= m.x1682 - 3.5 * m.b2282 <= 0)
m.e2563 = Constraint(expr= m.x1683 - 3.5 * m.b2283 <= 0)
m.e2564 = Constraint(expr= m.x1684 - 3.5 * m.b2284 <= 0)
m.e2565 = Constraint(expr= m.x1685 + 3.5 * m.b2281 <= 3.5)
m.e2566 = Constraint(expr= m.x1686 + 3.5 * m.b2282 <= 3.5)
m.e2567 = Constraint(expr= m.x1687 + 3.5 * m.b2283 <= 3.5)
m.e2568 = Constraint(expr= m.x1688 + 3.5 * m.b2284 <= 3.5)
m.e2569 = Constraint(expr= m.x1745 - 1.6544851364539 * m.b2281 <= 0)
m.e2570 = Constraint(expr= m.x1746 - 1.6544851364539 * m.b2282 <= 0)
m.e2571 = Constraint(expr= m.x1747 - 1.6544851364539 * m.b2283 <= 0)
m.e2572 = Constraint(expr= m.x1748 - 1.6544851364539 * m.b2284 <= 0)
m.e2573 = Constraint(expr= m.x1749 + 1.6544851364539 * m.b2281
 <= 1.6544851364539)
m.e2574 = Constraint(expr= m.x1750 + 1.6544851364539 * m.b2282
 <= 1.6544851364539)
m.e2575 = Constraint(expr= m.x1751 + 1.6544851364539 * m.b2283
 <= 1.6544851364539)
m.e2576 = Constraint(expr= m.x1752 + 1.6544851364539 * m.b2284
 <= 1.6544851364539)
m.e2577 = Constraint(expr= -0.9 * m.x1693 + m.x1825 == 0)
m.e2578 = Constraint(expr= -0.9 * m.x1694 + m.x1826 == 0)
m.e2579 = Constraint(expr= -0.9 * m.x1695 + m.x1827 == 0)
m.e2580 = Constraint(expr= -0.9 * m.x1696 + m.x1828 == 0)
m.e2581 = Constraint(expr= -m.x1769 + m.x1825 == 0)
m.e2582 = Constraint(expr= -m.x1770 + m.x1826 == 0)
m.e2583 = Constraint(expr= -m.x1771 + m.x1827 == 0)
m.e2584 = Constraint(expr= -m.x1772 + m.x1828 == 0)
m.e2585 = Constraint(expr= m.x1701 == 0)
m.e2586 = Constraint(expr= m.x1702 == 0)
m.e2587 = Constraint(expr= m.x1703 == 0)
m.e2588 = Constraint(expr= m.x1704 == 0)
m.e2589 = Constraint(expr= m.x1773 == 0)
m.e2590 = Constraint(expr= m.x1774 == 0)
m.e2591 = Constraint(expr= m.x1775 == 0)
m.e2592 = Constraint(expr= m.x1776 == 0)
m.e2593 = Constraint(expr= m.x1829 == 0)
m.e2594 = Constraint(expr= m.x1830 == 0)
m.e2595 = Constraint(expr= m.x1831 == 0)
m.e2596 = Constraint(expr= m.x1832 == 0)
m.e2597 = Constraint(expr= m.x1273 - m.x1693 - m.x1701 == 0)
m.e2598 = Constraint(expr= m.x1274 - m.x1694 - m.x1702 == 0)
m.e2599 = Constraint(expr= m.x1275 - m.x1695 - m.x1703 == 0)
m.e2600 = Constraint(expr= m.x1276 - m.x1696 - m.x1704 == 0)
m.e2601 = Constraint(expr= m.x1305 - m.x1769 - m.x1773 == 0)
m.e2602 = Constraint(expr= m.x1306 - m.x1770 - m.x1774 == 0)
m.e2603 = Constraint(expr= m.x1307 - m.x1771 - m.x1775 == 0)
m.e2604 = Constraint(expr= m.x1308 - m.x1772 - m.x1776 == 0)
m.e2605 = Constraint(expr= m.x1337 - m.x1825 - m.x1829 == 0)
m.e2606 = Constraint(expr= m.x1338 - m.x1826 - m.x1830 == 0)
m.e2607 = Constraint(expr= m.x1339 - m.x1827 - m.x1831 == 0)
m.e2608 = Constraint(expr= m.x1340 - m.x1828 - m.x1832 == 0)
m.e2609 = Constraint(expr= m.x1693 - 1.43746550029693 * m.b2285 <= 0)
m.e2610 = Constraint(expr= m.x1694 - 1.43746550029693 * m.b2286 <= 0)
m.e2611 = Constraint(expr= m.x1695 - 1.43746550029693 * m.b2287 <= 0)
m.e2612 = Constraint(expr= m.x1696 - | |
| 
	will find the scale automatially by the minimum and maximum
 # values in the field
 # contour_lim A two array or tuple containing the lower and upper bounds for the contour lines
 # levels: The levels to draw contours for, if an int then they will be evenly spaced between clim[0] and clim[1].
 # Can also be a list/array of levels in increasing order
 # colors: Matplotlib colour string or a list of colours to give each contour their own colour, only used if use_cmap is False
 # linestyles: The linestyles to draw the contours with, a string if all contours should use same linestyle or a list of string for different linestyles
 # use_scalar: True if it should plot the scalar field
 # use_contour: True if it should plot the contours
 def make_video(self, Name, FPS = 30, figsize = np.array([10., 10.]), dpi = 100, extent = [0, 1, 0, 1], scale = default_scale, cmap = "coolwarm", clim = None, contour_lim = None, levels = 10, colors = "black", linestyles = "solid", use_scalar = True, use_contour = False):
 # Save data
 self.extent = extent
 self.scale = scale
 self.cmap = cmap
 self.clim = clim
 self.contour_lim = contour_lim
 self.levels = levels
 self.linestyles = linestyles
 self.colors = colors
 self.use_scalar = use_scalar
 self.use_contour = use_contour
 # Make the video
 super().make_video(Name, FPS = FPS, figsize = figsize, dpi = dpi)
 
 # Creates the first frame of a video
 #
 # t: The timestamp of the frame
 # Data: The data for the frame
 def start_video(self, t, Data):
 # Plot the scalar
 if self.use_scalar is True:
 self.video.plot_scalar(Data, extent = self.extent, scale = self.scale, cmap = self.cmap, clim = self.clim)
 
 # Plot the contours
 if self.use_contour is True:
 self.video.plot_contour(Data, extent = self.extent, levels = self.levels, scale = self.scale, colors = self.colors, use_cmap = False, linestyles = self.linestyles, clim = self.contour_lim)
 
 # Create the next frame of the video
 #
 # t: The timestamp of the frame
 # Data: The data for the frame
 def update_video(self, t, Data):
 # Update the scalar
 if self.use_scalar is True:
 self.video.update_scalar(Data)
 
 # Plot the contours
 if self.use_contour is True:
 self.video.update_contour(Data)
 
 # Plots the scalar field at some time
 #
 # t: The time from which to take the data, it will find the data closest to this time
 # extent: Used to label the axis must be given as [x_min, x_max, y_min, y_max]
 # scale: Function to scale the values of the field
 # ax: The axes to draw the plot inside
 # fig: The figure to draw in, if given then ax must also be given
 # figsize: The size of the figure if ax is not given
 # dpi: The resolution of the figure if ax is not given
 # cmap: The colour map to plot the scalar field with
 # clim: Array containing the (min, max) values in the colour map, these are the raw values of the field,
 # not the scaled values, if None then it will find the scale automatially by the minimum and maximum
 # values in the field
 # contour_lim A two array or tuple containing the lower and upper bounds for the contour lines
 # levels: The levels to draw contours for, if an int then they will be evenly spaced between clim[0] and clim[1].
 # Can also be a list/array of levels in increasing order
 # colors: Matplotlib colour string or a list of colours to give each contour their own colour, only used if use_cmap is False
 # linestyles: The linestyles to draw the contours with, a string if all contours should use same linestyle or a list of string for different linestyles
 # use_scalar: True if it should plot the scalar field
 # use_contour: True if it should plot the contours
 def plot(self, t, extent = [0, 1, 0, 1], scale = default_scale, fig = None, ax = None, figsize = np.array([10., 10.]), dpi = 100, cmap = "coolwarm", clim = None, contour_lim = None, levels = 10, colors = "black", linestyles = "solid", use_scalar = True, use_contour = False):
 # Find the correct data
 Dist = np.abs(np.array(self.t) - t)
 Pos = np.argmin(Dist)
 
 Data = self.data[Pos]
 Plot1 = None
 Plot2 = None
 
 # Plot the data
 if use_scalar is True:
 fig, ax, Plot1 = plot_scalar(Data, extent = extent, scale = scale, fig = fig, ax = ax, figsize = figsize, dpi = dpi, cmap = cmap, clim = clim)
 if use_contour is True:
 fig, ax, Plot2 = plot_contour(Data, extent = extent, levels = levels, fig = fig, ax = ax, figsize = figsize, dpi = dpi, clim = contour_lim, colors = colors, use_cmap = False, linestyles = linestyles)
 return fig, ax, (Plot1, Plot2)
# A sampler which samples a vector field in 2D
#
# Sim: The simulation to sample from, it will automatically add this sampler to the sim
# Points: numpy array of all the points to sample from, the x,y,z-coordinates are in the first axis
# x_hat: The x direction, should have unit norm, it should have a shape of type 
# Points.shape + (3,) or (3,) for constant vectors.
# y_hat: The y direction, should have unit norm, it should have a shape of type 
# Points.shape + (3,) or (3,) for constant vectors, it should be the same shape as for x_hat
class sampler_field_vector(sampler_field):
 def __init__(self, Sim, Points, x_hat, y_hat):
 # Collect the hats
 hat = np.append(x_hat.reshape(x_hat.shape + (1,)), y_hat.reshape(y_hat.shape + (1,)), axis = -1)
 
 super().__init__(Sim, Points, hat = hat, single = False)
 # Creates a video using the data it has sampled
 #
 # Name: The name of the video file to be saved
 # FPS: How many frames per second the video should have
 # figsize: The size of the figure in
 # dpi: The resolution of the figure
 # extent: Used to label the axis must be given as [x_min, x_max, y_min, y_max]
 # scale: Function to scale the values of the field
 # cmap: The colour map to plot the scalar field with
 # clim: Array containing the (min, max) values in the colour map, these are the raw values of the field,
 # not the scaled values, if None then it will find the scale automatially by the minimum and maximum
 # values in the field
 # cutoff: Determines a cutoff point where if vectors are shorter than the length of the longest vector times cutoff, then it is not shown
 # density: How many stream lines should be drawn
 # length: The minimum length of the stream lines (In some scaled coordinates)
 # use_vector: True if it should plot the vector field
 # use_streams: True if it should plot the streamlines
 def make_video(self, Name, FPS = 30, figsize = np.array([10., 10.]), dpi = 100, extent = [0, 1, 0, 1], scale = default_scale, cmap = "coolwarm", clim = None, cutoff = 0, density = 1, length = 1, use_vector = True, use_streams = False):
 # Save the data
 self.extent = extent
 self.scale = scale
 self.cmap = cmap
 self.clim = clim
 self.cutoff = cutoff
 self.density = density
 self.length = length
 self.use_vector = use_vector
 self.use_streams = use_streams
 # Make the video
 super().make_video(Name, FPS = FPS, figsize = figsize, dpi = dpi)
 
 # Creates the first frame of a video
 #
 # t: The timestamp of the frame
 # Data: The data for the frame
 def start_video(self, t, Data):
 # Plot the data
 if self.use_vector is True:
 self.video.plot_vector(Data[:, :, 0], Data[:, :, 1], extent = self.extent, scale = self.scale, cmap = self.cmap, clim = self.clim, cutoff = self.cutoff)
 
 if self.use_streams is True:
 self.video.plot_streams(Data[:, :, 0], Data[:, :, 1], extent = self.extent, scale = self.scale, cmap = self.cmap, clim = self.clim, density = self.density, length = self.length)
 
 # Create | |
| 
	# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import datetime
import json
import re
import string
import textwrap
import traceback
from recipe_engine.types import freeze
RESULTS_URL = 'https://chromeperf.appspot.com'
class TestOptions(object):
 """Abstracts command line flags to be passed to the test."""
 def __init__(self, repeat_count=None, test_filter=None, run_disabled=False,
 retry_limit=None):
 """Construct a TestOptions object with immutable attributes.
 Args:
 repeat_count - how many times to run each test
 test_filter - a list of tests, e.g.
 ['suite11.test1',
 'suite12.test2']
 run_disabled - whether to run tests that have been disabled.
 retry_limit - how many times to retry a test until getting a pass.
 """
 self._test_filter = freeze(test_filter)
 self._repeat_count = repeat_count
 self._run_disabled = run_disabled
 self._retry_limit = retry_limit
 @property
 def repeat_count(self):
 return self._repeat_count
 @property
 def run_disabled(self):
 return self._run_disabled
 @property
 def retry_limit(self):
 return self._retry_limit
 @property
 def test_filter(self):
 return self._test_filter
class Test(object):
 """
 Base class for tests that can be retried after deapplying a previously
 applied patch.
 """
 def __init__(self, waterfall_mastername=None, waterfall_buildername=None):
 """
 Args:
 waterfall_mastername (str): Matching waterfall buildbot master name.
 This value would be different from trybot master name.
 waterfall_buildername (str): Matching waterfall buildbot builder name.
 This value would be different from trybot builder name.
 """
 super(Test, self).__init__()
 self._test_runs = {}
 self._waterfall_mastername = waterfall_mastername
 self._waterfall_buildername = waterfall_buildername
 self._test_options = None
 @property
 def test_options(self):
 return self._test_options or TestOptions()
 @test_options.setter
 def test_options(self, value): # pragma: no cover
 raise NotImplementedError(
 'This test %s does not support test options objects yet' % type(self))
 @property
 def abort_on_failure(self):
 """If True, abort build when test fails."""
 return False
 @property
 def name(self): # pragma: no cover
 """Name of the test."""
 raise NotImplementedError()
 def isolate_target(self, _api):
 """Returns isolate target name. Defaults to name.
 The _api is here in case classes want to use api information to alter the
 isolation target.
 """
 return self.name # pragma: no cover
 @staticmethod
 def compile_targets(api):
 """List of compile targets needed by this test."""
 raise NotImplementedError() # pragma: no cover
 def pre_run(self, api, suffix): # pragma: no cover
 """Steps to execute before running the test."""
 return []
 def run(self, api, suffix): # pragma: no cover
 """Run the test. suffix is 'with patch' or 'without patch'."""
 raise NotImplementedError()
 def post_run(self, api, suffix): # pragma: no cover
 """Steps to execute after running the test."""
 return []
 def has_valid_results(self, api, suffix): # pragma: no cover
 """
 Returns True if results (failures) are valid.
 This makes it possible to distinguish between the case of no failures
 and the test failing to even report its results in machine-readable
 format.
 """
 raise NotImplementedError()
 def failures(self, api, suffix): # pragma: no cover
 """Return list of failures (list of strings)."""
 raise NotImplementedError()
 @property
 def uses_swarming(self):
 """Returns true if the test uses swarming."""
 return False
 @property
 def uses_local_devices(self):
 return False # pragma: no cover
 def _step_name(self, suffix):
 """Helper to uniformly combine tests's name with a suffix."""
 if not suffix:
 return self.name
 return '%s (%s)' % (self.name, suffix)
class ArchiveBuildStep(Test):
 def __init__(self, gs_bucket, gs_acl=None):
 self.gs_bucket = gs_bucket
 self.gs_acl = gs_acl
 def run(self, api, suffix):
 return api.chromium.archive_build(
 'archive build',
 self.gs_bucket,
 gs_acl=self.gs_acl,
 )
 @staticmethod
 def compile_targets(_):
 return []
class SizesStep(Test):
 def __init__(self, results_url, perf_id):
 self.results_url = results_url
 self.perf_id = perf_id
 def run(self, api, suffix):
 return api.chromium.sizes(self.results_url, self.perf_id)
 @staticmethod
 def compile_targets(_):
 return ['chrome']
 @property
 def name(self):
 return 'sizes' # pragma: no cover
 def has_valid_results(self, api, suffix):
 # TODO(sebmarchand): implement this function as well as the
 # |failures| one.
 return True
 def failures(self, api, suffix):
 return []
class ScriptTest(Test): # pylint: disable=W0232
 """
 Test which uses logic from script inside chromium repo.
 This makes it possible to keep the logic src-side as opposed
 to the build repo most Chromium developers are unfamiliar with.
 Another advantage is being to test changes to these scripts
 on trybots.
 All new tests are strongly encouraged to use this infrastructure.
 """
 def __init__(self, name, script, all_compile_targets, script_args=None,
 override_compile_targets=None,
 waterfall_mastername=None, waterfall_buildername=None):
 super(ScriptTest, self).__init__(
 waterfall_mastername=waterfall_mastername,
 waterfall_buildername=waterfall_buildername)
 self._name = name
 self._script = script
 self._all_compile_targets = all_compile_targets
 self._script_args = script_args
 self._override_compile_targets = override_compile_targets
 @property
 def name(self):
 return self._name
 def compile_targets(self, api):
 if self._override_compile_targets:
 return self._override_compile_targets
 try:
 substitutions = {'name': self._name}
 return [string.Template(s).safe_substitute(substitutions)
 for s in self._all_compile_targets[self._script]]
 except KeyError: # pragma: no cover
 # There are internal recipes that appear to configure
 # test script steps, but ones that don't have data.
 # We get around this by returning a default value for that case.
 # But the recipes should be updated to not do this.
 # We mark this as pragma: no cover since the public recipes
 # will not exercise this block.
 #
 # TODO(phajdan.jr): Revisit this when all script tests
 # lists move src-side. We should be able to provide
 # test data then.
 if api.chromium._test_data.enabled:
 return []
 raise
 def run(self, api, suffix):
 name = self.name
 if suffix:
 name += ' (%s)' % suffix
 run_args = []
 if suffix == 'without patch':
 run_args.extend([
 '--filter-file', api.json.input(self.failures(api, 'with patch'))
 ]) # pragma: no cover
 try:
 script_args = []
 if self._script_args:
 script_args = ['--args', api.json.input(self._script_args)]
 api.python(
 name,
 # Enforce that all scripts are in the specified directory
 # for consistency.
 api.path['checkout'].join(
 'testing', 'scripts', api.path.basename(self._script)),
 args=(api.chromium_tests.get_common_args_for_scripts() +
 script_args +
 ['run', '--output', api.json.output()] +
 run_args),
 step_test_data=lambda: api.json.test_api.output(
 {'valid': True, 'failures': []}))
 finally:
 self._test_runs[suffix] = api.step.active_result
 if self.has_valid_results(api, suffix):
 self._test_runs[suffix].presentation.step_text += (
 api.test_utils.format_step_text([
 ['failures:', self.failures(api, suffix)]
 ]))
 return self._test_runs[suffix]
 def has_valid_results(self, api, suffix):
 try:
 # Make sure the JSON includes all necessary data.
 self.failures(api, suffix)
 return self._test_runs[suffix].json.output['valid']
 except Exception: # pragma: no cover
 return False
 def failures(self, api, suffix):
 return self._test_runs[suffix].json.output['failures']
class LocalGTestTest(Test):
 def __init__(self, name, args=None, target_name=None, use_isolate=False,
 revision=None, webkit_revision=None,
 android_shard_timeout=None, android_tool=None,
 override_compile_targets=None, override_isolate_target=None,
 use_xvfb=True, waterfall_mastername=None,
 waterfall_buildername=None, **runtest_kwargs):
 """Constructs an instance of LocalGTestTest.
 Args:
 name: Displayed name of the test. May be modified by suffixes.
 args: Arguments to be passed to the test.
 target_name: Actual name of the test. Defaults to name.
 use_isolate: When set, uses api.isolate.runtest to invoke the test.
 Calling recipe should have isolate in their DEPS.
 revision: Revision of the Chrome checkout.
 webkit_revision: Revision of the WebKit checkout.
 override_compile_targets: List of compile targets for this test
 (for tests that don't follow target naming conventions).
 override_isolate_target: List of isolate targets for this test
 (for tests that don't follow target naming conventions).
 use_xvfb: whether to use the X virtual frame buffer. Only has an
 effect on Linux. Defaults to True. Mostly harmless to
 specify this, except on GPU bots.
 runtest_kwargs: Additional keyword args forwarded to the runtest.
 """
 super(LocalGTestTest, self).__init__(
 waterfall_mastername=waterfall_mastername,
 waterfall_buildername=waterfall_buildername)
 self._name = name
 self._args = args or []
 self._target_name = target_name
 self._use_isolate = use_isolate
 self._revision = revision
 self._webkit_revision = webkit_revision
 self._android_shard_timeout = android_shard_timeout
 self._android_tool = android_tool
 self._override_compile_targets = override_compile_targets
 self._override_isolate_target = override_isolate_target
 self._use_xvfb = use_xvfb
 self._runtest_kwargs = runtest_kwargs
 self._gtest_results = {}
 @Test.test_options.setter
 def test_options(self, value):
 self._test_options = value
 @property
 def name(self):
 return self._name
 @property
 def target_name(self):
 return self._target_name or self._name
 @property
 def uses_local_devices(self):
 return True # pragma: no cover
 def isolate_target(self, _api): # pragma: no cover
 if self._override_isolate_target:
 return self._override_isolate_target
 return self.target_name
 def compile_targets(self, api):
 # TODO(phajdan.jr): clean up override_compile_targets (remove or cover).
 if self._override_compile_targets: # pragma: no cover
 return self._override_compile_targets
 return [self.target_name]
 def run(self, api, suffix):
 # Copy the list because run can be invoked multiple times and we modify
 # the local copy.
 args = self._args[:]
 is_android = api.chromium.c.TARGET_PLATFORM == 'android'
 options = self.test_options
 test_filter = options.test_filter
 if suffix == 'without patch':
 test_filter = self.failures(api, 'with patch')
 kwargs = {}
 if test_filter and is_android: # pragma: no cover
 kwargs['gtest_filter'] = ':'.join(test_filter)
 test_filter = None
 # We pass a local test_filter variable to override the immutable
 # options.test_filter, in case test_filter was modified in the suffix ==
 # without patch clause above.
 args = GTestTest.args_from_options(api, args, self,
 override_test_filter=test_filter)
 gtest_results_file = api.test_utils.gtest_results(add_json_log=False)
 step_test_data = lambda: api.test_utils.test_api.canned_gtest_output(True)
 kwargs['name'] = self._step_name(suffix)
 kwargs['args'] = args
 kwargs['step_test_data'] = step_test_data
 if is_android:
 kwargs['json_results_file'] = gtest_results_file
 kwargs['shard_timeout'] = self._android_shard_timeout
 kwargs['tool'] = self._android_tool
 else:
 kwargs['xvfb'] = self._use_xvfb
 kwargs['test_type'] = self.name
 kwargs['annotate'] = 'gtest'
 kwargs['test_launcher_summary_output'] = gtest_results_file
 kwargs.update(self._runtest_kwargs)
 try:
 if is_android:
 api.chromium_android.run_test_suite(self.target_name, | |
| 
	import xml.etree.ElementTree as ET
import os
from PIL import Image
import pathlib
import shutil
import base64
from tkinter import filedialog
import pandas as pd
import shutil
from tkinter import messagebox
import sqlite3
import xlsxwriter
import collections.abc as byteobj
from pandas import ExcelWriter
class XML_Interface():
 # table_index_dict beinhaltet Daten für ALLE Fragentypen
 def __init__(self, DBI, table_dict, table_index_list, table_index_dict):
 self.DBI = DBI
 self.table_index_list = table_index_list
 self.table_index_dict = table_index_dict
 self.table_dict = table_dict
 self.DBI.subscribe(self.update_xml)
 # Forced Values
 # question_test / question_pool
 # = "question_pool"
 self.number_of_entrys = []
 self.pool_qpl_file_path_template = ""
 self.pool_qpl_file_path_output = ""
 self.qpl_file_path = ""
 #######
 print("\n")
 print("\n")
 print("\n")
 
 ############### Deklarierung der Pfade
 # Pfad des Projekts und der Module
 self.project_root_path = pathlib.Path().absolute()
 self.formelfrage_files_path = os.path.normpath(os.path.join(self.project_root_path, "ILIAS-Formelfrage"))
 self.singlechoice_files_path = os.path.normpath(os.path.join(self.project_root_path, "ILIAS-SingleChoice"))
 self.multiplechoice_files_path = os.path.normpath(os.path.join(self.project_root_path, "ILIAS-MultipleChoice"))
 self.zuordnungsfrage_files_path = os.path.normpath(os.path.join(self.project_root_path, "ILIAS-Zuordnungsfrage"))
 self.gemischte_fragentypen_files_path = os.path.normpath(os.path.join(self.project_root_path, "ILIAS-Gemischte_Fragentypen"))
 self.formelfrage_files_path_pool_output = os.path.normpath(os.path.join(self.formelfrage_files_path, "ff_ilias_pool_abgabe"))
 self.singlechoice_files_path_pool_output = os.path.normpath(os.path.join(self.singlechoice_files_path, "sc_ilias_pool_abgabe"))
 self.multiplechoice_files_path_pool_output = os.path.normpath(os.path.join(self.multiplechoice_files_path, "mc_ilias_pool_abgabe"))
 self.zuordnungsfrage_files_path_pool_output = os.path.normpath(os.path.join(self.zuordnungsfrage_files_path, "mq_ilias_pool_abgabe"))
 self.gemischte_fragentypen_files_path_pool_output = os.path.normpath(os.path.join(self.gemischte_fragentypen_files_path, "mixed_ilias_pool_abgabe"))
 # Pfad für ILIAS-Pool Dateien (zum hochladen in ILIAS)
 # Die Pfade für die qti.xml und qpl.xml werden erst zur Laufzeit bestimmt.
 #### Prüfung abgeschlossen
 ##### Pfade für Formelfrage #############################
 # Pfad für ILIAS-Test Vorlage
 self.formelfrage_test_qti_file_path_template = os.path.normpath(os.path.join(self.formelfrage_files_path, "ff_test_qti_und_tst_dateien_vorlage", "ilias_test_vorlage__qti__.xml"))
 self.formelfrage_test_tst_file_path_template = os.path.normpath(os.path.join(self.formelfrage_files_path, "ff_test_qti_und_tst_dateien_vorlage", "ilias_test_vorlage__tst__.xml"))
 # Pfad für ILIAS-Test Dateien (zum hochladen in ILIAS)
 self.formelfrage_test_qti_file_path_output = os.path.normpath(os.path.join(self.formelfrage_files_path, "ff_ilias_test_abgabe", "1604407426__0__tst_2040314", "1604407426__0__qti_2040314.xml"))
 self.formelfrage_test_tst_file_path_output = os.path.normpath(os.path.join(self.formelfrage_files_path, "ff_ilias_test_abgabe", "1604407426__0__tst_2040314", "1604407426__0__tst_2040314.xml"))
 self.formelfrage_test_img_file_path_output = os.path.normpath(os.path.join(self.formelfrage_files_path, "ff_ilias_test_abgabe", "1604407426__0__tst_2040314", "objects"))
 # Pfad für ILIAS-Pool Vorlage
 self.formelfrage_pool_qti_file_path_template = os.path.normpath(os.path.join(self.formelfrage_files_path, "ff_pool_qti_und_qpl_dateien_vorlage", "ilias_pool_vorlage__qti__.xml"))
 self.formelfrage_pool_qpl_file_path_template = os.path.normpath(os.path.join(self.formelfrage_files_path, "ff_pool_qti_und_qpl_dateien_vorlage", "ilias_pool_vorlage__qpl__.xml"))
 ##### Pfade für singlechoice #############################
 # Pfad für ILIAS-Test Vorlage
 self.singlechoice_test_qti_file_path_template = os.path.normpath(os.path.join(self.singlechoice_files_path, "sc_test_qti_und_tst_dateien_vorlage", "ilias_test_vorlage__qti__.xml"))
 self.singlechoice_test_tst_file_path_template = os.path.normpath(os.path.join(self.singlechoice_files_path, "sc_test_qti_und_tst_dateien_vorlage", "ilias_test_vorlage__tst__.xml"))
 # Pfad für ILIAS-Test Dateien (zum hochladen in ILIAS)
 self.singlechoice_test_qti_file_path_output = os.path.normpath(os.path.join(self.singlechoice_files_path, "sc_ilias_test_abgabe", "1604407426__0__tst_2040314", "1604407426__0__qti_2040314.xml"))
 self.singlechoice_test_tst_file_path_output = os.path.normpath(os.path.join(self.singlechoice_files_path, "sc_ilias_test_abgabe", "1604407426__0__tst_2040314", "1604407426__0__tst_2040314.xml"))
 self.singlechoice_test_img_file_path_output = os.path.normpath(os.path.join(self.singlechoice_files_path, "sc_ilias_test_abgabe", "1604407426__0__tst_2040314", "objects"))
 # Pfad für ILIAS-Pool Vorlage
 self.singlechoice_pool_qti_file_path_template = os.path.normpath(os.path.join(self.singlechoice_files_path, "sc_pool_qti_und_qpl_dateien_vorlage", "ilias_pool_vorlage__qti__.xml"))
 self.singlechoice_pool_qpl_file_path_template = os.path.normpath(os.path.join(self.singlechoice_files_path, "sc_pool_qti_und_qpl_dateien_vorlage", "ilias_pool_vorlage__qpl__.xml"))
 
 ##### Pfade für multiplechoice #############################
 # Pfad für ILIAS-Test Vorlage
 self.multiplechoice_test_qti_file_path_template = os.path.normpath(os.path.join(self.multiplechoice_files_path, "mc_test_qti_und_tst_dateien_vorlage", "ilias_test_vorlage__qti__.xml"))
 self.multiplechoice_test_tst_file_path_template = os.path.normpath(os.path.join(self.multiplechoice_files_path, "mc_test_qti_und_tst_dateien_vorlage", "ilias_test_vorlage__tst__.xml"))
 # Pfad für ILIAS-Test Dateien (zum hochladen in ILIAS)
 self.multiplechoice_test_qti_file_path_output = os.path.normpath(os.path.join(self.multiplechoice_files_path, "mc_ilias_test_abgabe", "1604407426__0__tst_2040314", "1604407426__0__qti_2040314.xml"))
 self.multiplechoice_test_tst_file_path_output = os.path.normpath(os.path.join(self.multiplechoice_files_path, "mc_ilias_test_abgabe", "1604407426__0__tst_2040314", "1604407426__0__tst_2040314.xml"))
 self.multiplechoice_test_img_file_path_output = os.path.normpath(os.path.join(self.multiplechoice_files_path, "mc_ilias_test_abgabe", "1604407426__0__tst_2040314", "objects"))
 # Pfad für ILIAS-Pool Vorlage
 self.multiplechoice_pool_qti_file_path_template = os.path.normpath(os.path.join(self.multiplechoice_files_path, "mc_pool_qti_und_qpl_dateien_vorlage", "ilias_pool_vorlage__qti__.xml"))
 self.multiplechoice_pool_qpl_file_path_template = os.path.normpath(os.path.join(self.multiplechoice_files_path, "mc_pool_qti_und_qpl_dateien_vorlage", "ilias_pool_vorlage__qpl__.xml"))
 
 
 ##### Pfade für zuordnungsfrage #############################
 # Pfad für ILIAS-Test Vorlage
 self.zuordnungsfrage_test_qti_file_path_template = os.path.normpath(os.path.join(self.zuordnungsfrage_files_path, "mq_test_qti_und_tst_dateien_vorlage", "ilias_test_vorlage__qti__.xml"))
 self.zuordnungsfrage_test_tst_file_path_template = os.path.normpath(os.path.join(self.zuordnungsfrage_files_path, "mq_test_qti_und_tst_dateien_vorlage", "ilias_test_vorlage__tst__.xml"))
 # Pfad für ILIAS-Test Dateien (zum hochladen in ILIAS)
 self.zuordnungsfrage_test_qti_file_path_output = os.path.normpath(os.path.join(self.zuordnungsfrage_files_path, "mq_ilias_test_abgabe", "1604407426__0__tst_2040314", "1604407426__0__qti_2040314.xml"))
 self.zuordnungsfrage_test_tst_file_path_output = os.path.normpath(os.path.join(self.zuordnungsfrage_files_path, "mq_ilias_test_abgabe", "1604407426__0__tst_2040314", "1604407426__0__tst_2040314.xml"))
 self.zuordnungsfrage_test_img_file_path_output = os.path.normpath(os.path.join(self.zuordnungsfrage_files_path, "mq_ilias_test_abgabe", "1604407426__0__tst_2040314", "objects"))
 # Pfad für ILIAS-Pool Vorlage
 self.zuordnungsfrage_pool_qti_file_path_template = os.path.normpath(os.path.join(self.zuordnungsfrage_files_path, "mq_pool_qti_und_qpl_dateien_vorlage", "ilias_pool_vorlage__qti__.xml"))
 self.zuordnungsfrage_pool_qpl_file_path_template = os.path.normpath(os.path.join(self.zuordnungsfrage_files_path, "mq_pool_qti_und_qpl_dateien_vorlage", "ilias_pool_vorlage__qpl__.xml"))
 
 
 
 ##### Pfade für gemischte Fragentypen ####################
 # Pfad für ILIAS-Test Vorlage
 self.gemischte_fragentypen_test_qti_file_path_template = os.path.normpath(os.path.join(self.gemischte_fragentypen_files_path, "mixed_test_qti_und_tst_dateien_vorlage", "ilias_test_vorlage__qti__.xml"))
 self.gemischte_fragentypen_test_tst_file_path_template = os.path.normpath(os.path.join(self.gemischte_fragentypen_files_path, "mixed_test_qti_und_tst_dateien_vorlage", "ilias_test_vorlage__tst__.xml"))
 # Pfad für ILIAS-Test Dateien (zum hochladen in ILIAS)
 self.gemischte_fragentypen_test_qti_file_path_output = os.path.normpath(os.path.join(self.gemischte_fragentypen_files_path, "mixed_ilias_test_abgabe", "1604407426__0__tst_2040314", "1604407426__0__qti_2040314.xml"))
 self.gemischte_fragentypen_test_tst_file_path_output = os.path.normpath(os.path.join(self.gemischte_fragentypen_files_path, "mixed_ilias_test_abgabe", "1604407426__0__tst_2040314", "1604407426__0__tst_2040314.xml"))
 self.gemischte_fragentypen_test_img_file_path_output = os.path.normpath(os.path.join(self.gemischte_fragentypen_files_path, "mixed_ilias_test_abgabe", "1604407426__0__tst_2040314", "objects"))
 # Pfad für ILIAS-Pool Vorlage
 self.gemischte_fragentypen_pool_qti_file_path_template = os.path.normpath(os.path.join(self.gemischte_fragentypen_files_path, "mixed_pool_qti_und_qpl_dateien_vorlage", "ilias_pool_vorlage__qti__.xml"))
 self.gemischte_fragentypen_pool_qpl_file_path_template = os.path.normpath(os.path.join(self.gemischte_fragentypen_files_path, "mixed_pool_qti_und_qpl_dateien_vorlage", "ilias_pool_vorlage__qpl__.xml"))
 
 ############### Pfad ende
 def update_xml(self, db_data):
 #bekommt immer den aktuellen stand der datenbank es muss so nichts übergeben werden, get question add question etc können ebenfalls benutzt werden
 self.db_data = db_data
 print("xml class is subcribed")
 def on_closing(self):
 #muss die subscribtion ausheben sonst führt das zu Problemen
 self.DBI.unsubscribe(self.update_xml())
 print("xml class is unsubcribed")
 def create_test_or_pool(self, Profil_name, ilias_test_or_pool):
 # Daten aus DB abgreifen
 self.test_data = self.DBI.get_dbtemp_data()
 self.create_ilias_test_or_pool = ilias_test_or_pool
 self.qpl_file_path = ""
 self.tst_file_path = ""
 ###### Prüft, ob die zu erstellenden Fragen, von EINEM Fragentyp sind
 self.all_ff_questions_flag = 0
 self.all_sc_questions_flag = 0
 self.all_mc_questions_flag = 0
 self.all_mq_questions_flag = 0
 self.mixed_questions_flag = 0
 self.test_data_question_types = []
 for t in range(len(self.test_data)):
 self.test_data_question_types.append(self.test_data[t][2])
 # Zählt die Anzahl von "formelfrage" in testdaten
 # Ist die Zahl gleich der Länge der Liste, haben alle Fragen den gleichen Typ
 # "and self.test_data_question_types" prüft auf eine leere Liste
 # Wenn keine Fragen enthalten sind und keine Frage einen Fragentyp hat, wäre es ein falsches Ergebnis
 if self.test_data_question_types.count("formelfrage") == len(self.test_data_question_types) and self.test_data_question_types:
 self.all_ff_questions_flag = 1
 # höchste ID aus Ordner auslesen -- Wird benötigt um Pool-Ordner mit aufsteigender ID erstellen zu können
 self.max_id = XML_Interface.find_max_id_in_dir(self, self.formelfrage_files_path_pool_output, "formelfrage")
 elif self.test_data_question_types.count("singlechoice") == len(self.test_data_question_types) and self.test_data_question_types:
 self.all_sc_questions_flag = 1
 # höchste ID aus Ordner auslesen -- Wird benötigt um Pool-Ordner mit aufsteigender ID erstellen zu können
 self.max_id = XML_Interface.find_max_id_in_dir(self, self.singlechoice_files_path_pool_output, "singlechoice")
 elif self.test_data_question_types.count("multiplechoice") == len(self.test_data_question_types) and self.test_data_question_types:
 self.all_mc_questions_flag = 1
 # höchste ID aus Ordner auslesen -- Wird benötigt um Pool-Ordner mit aufsteigender ID erstellen zu können
 self.max_id = XML_Interface.find_max_id_in_dir(self, self.multiplechoice_files_path_pool_output, "multiplechoice")
 elif self.test_data_question_types.count("zuordnungsfrage") == len(self.test_data_question_types) and self.test_data_question_types:
 self.all_mq_questions_flag = 1
 # höchste ID aus Ordner auslesen -- Wird benötigt um Pool-Ordner mit aufsteigender ID erstellen zu können
 self.max_id = XML_Interface.find_max_id_in_dir(self, self.zuordnungsfrage_files_path_pool_output, "zuordnungsfrage")
 else:
 self.mixed_questions_flag = 1
 self.max_id = XML_Interface.find_max_id_in_dir(self, self.gemischte_fragentypen_files_path_pool_output, "gemischte_fragentypen")
 
 self.ilias_id_pool_qpl_dir = "1596569820__0__qpl_" + self.max_id
 self.ilias_id_pool_qpl_xml = "1596569820__0__qpl_" + self.max_id + ".xml"
 self.ilias_id_pool_qti_xml = "1596569820__0__qti_" + self.max_id + ".xml"
 self.formelfrage_pool_qti_file_path_output = os.path.normpath(os.path.join(self.formelfrage_files_path, "ff_ilias_pool_abgabe", "1596569820__0__qpl_" + str(self.max_id), self.ilias_id_pool_qti_xml))
 self.formelfrage_pool_qpl_file_path_output = os.path.normpath(os.path.join(self.formelfrage_files_path, "ff_ilias_pool_abgabe", "1596569820__0__qpl_" + str(self.max_id), self.ilias_id_pool_qpl_xml))
 self.formelfrage_pool_img_file_path_output = os.path.normpath(os.path.join(self.formelfrage_files_path, "ff_ilias_pool_abgabe", "1596569820__0__qpl_" + str(self.max_id), "objects"))
 self.singlechoice_pool_qti_file_path_output = os.path.normpath(os.path.join(self.singlechoice_files_path, "sc_ilias_pool_abgabe", "1596569820__0__qpl_" + str(self.max_id), self.ilias_id_pool_qti_xml))
 self.singlechoice_pool_qpl_file_path_output = os.path.normpath(os.path.join(self.singlechoice_files_path, "sc_ilias_pool_abgabe", "1596569820__0__qpl_" + str(self.max_id), self.ilias_id_pool_qpl_xml))
 self.singlechoice_pool_img_file_path_output = os.path.normpath(os.path.join(self.singlechoice_files_path, "sc_ilias_pool_abgabe", "1596569820__0__qpl_" + str(self.max_id), "objects"))
 
 self.multiplechoice_pool_qti_file_path_output = os.path.normpath(os.path.join(self.multiplechoice_files_path, "mc_ilias_pool_abgabe", "1596569820__0__qpl_" + str(self.max_id), self.ilias_id_pool_qti_xml))
 self.multiplechoice_pool_qpl_file_path_output = os.path.normpath(os.path.join(self.multiplechoice_files_path, "mc_ilias_pool_abgabe", "1596569820__0__qpl_" + str(self.max_id), self.ilias_id_pool_qpl_xml))
 self.multiplechoice_pool_img_file_path_output = os.path.normpath(os.path.join(self.multiplechoice_files_path, "mc_ilias_pool_abgabe", "1596569820__0__qpl_" + str(self.max_id), "objects"))
 
 self.zuordnungsfrage_pool_qti_file_path_output = os.path.normpath(os.path.join(self.zuordnungsfrage_files_path, "mq_ilias_pool_abgabe", "1596569820__0__qpl_" + str(self.max_id), self.ilias_id_pool_qti_xml))
 self.zuordnungsfrage_pool_qpl_file_path_output = os.path.normpath(os.path.join(self.zuordnungsfrage_files_path, "mq_ilias_pool_abgabe", "1596569820__0__qpl_" + str(self.max_id), self.ilias_id_pool_qpl_xml))
 self.zuordnungsfrage_pool_img_file_path_output = os.path.normpath(os.path.join(self.zuordnungsfrage_files_path, "mq_ilias_pool_abgabe", "1596569820__0__qpl_" + str(self.max_id), "objects"))
 self.gemischte_fragentypen_pool_qti_file_path_output = os.path.normpath(os.path.join(self.gemischte_fragentypen_files_path, "mixed_ilias_pool_abgabe", "1596569820__0__qpl_" + str(self.max_id), self.ilias_id_pool_qti_xml))
 self.gemischte_fragentypen_pool_qpl_file_path_output = os.path.normpath(os.path.join(self.gemischte_fragentypen_files_path, "mixed_ilias_pool_abgabe", "1596569820__0__qpl_" + str(self.max_id), self.ilias_id_pool_qpl_xml))
 self.gemischte_fragentypen_pool_img_file_path_output = os.path.normpath(os.path.join(self.gemischte_fragentypen_files_path, "mixed_ilias_pool_abgabe", "1596569820__0__qpl_" + str(self.max_id), "objects"))
 # Wenn ein Test erstellt wird, ist der Pfad fix
 #Bei einem Pool, wird die ID hochgezählt
 if self.create_ilias_test_or_pool == "ilias_test":
 if self.all_ff_questions_flag == 1:
 self.qti_file_path_output = self.formelfrage_test_qti_file_path_output
 self.img_file_path_output = self.formelfrage_test_img_file_path_output
 self.ff_mytree = ET.parse(self.formelfrage_test_qti_file_path_template)
 self.tst_file_path = self.formelfrage_test_tst_file_path_output
 
 elif self.all_sc_questions_flag == 1:
 self.qti_file_path_output = self.singlechoice_test_qti_file_path_output
 self.img_file_path_output = self.singlechoice_test_img_file_path_output
 self.ff_mytree = ET.parse(self.singlechoice_test_qti_file_path_template)
 self.tst_file_path = self.singlechoice_test_tst_file_path_output
 
 elif self.all_mc_questions_flag == 1:
 self.qti_file_path_output = self.multiplechoice_test_qti_file_path_output
 self.img_file_path_output = self.multiplechoice_test_img_file_path_output
 self.ff_mytree = ET.parse(self.multiplechoice_test_qti_file_path_template)
 self.tst_file_path = self.multiplechoice_test_tst_file_path_output
 elif self.all_mq_questions_flag == 1:
 self.qti_file_path_output = self.zuordnungsfrage_test_qti_file_path_output
 self.ff_mytree = ET.parse(self.zuordnungsfrage_test_qti_file_path_template)
 self.img_file_path_output = self.zuordnungsfrage_test_img_file_path_output
 self.tst_file_path = self.zuordnungsfrage_test_tst_file_path_output
 
 else:
 self.qti_file_path_output = self.gemischte_fragentypen_test_qti_file_path_output
 self.img_file_path_output = self.gemischte_fragentypen_test_img_file_path_output
 self.ff_mytree = ET.parse(self.gemischte_fragentypen_test_qti_file_path_template)
 self.tst_file_path = self.gemischte_fragentypen_test_tst_file_path_output
 elif self.create_ilias_test_or_pool == "ilias_pool":
 if self.all_ff_questions_flag == 1:
 print("-------------- FF ------------")
 self.qti_file_path_output = self.formelfrage_pool_qti_file_path_output
 XML_Interface.create_pool_dir_from_template(self, self.formelfrage_files_path_pool_output)
 self.ff_mytree = ET.parse(self.formelfrage_pool_qti_file_path_template)
 self.pool_qpl_file_path_template = self.formelfrage_pool_qpl_file_path_template
 self.pool_qpl_file_path_output = self.formelfrage_pool_qpl_file_path_output
 self.qpl_file_path = self.formelfrage_pool_qpl_file_path_output
 self.img_file_path_output = self.formelfrage_pool_img_file_path_output
 elif self.all_sc_questions_flag == 1:
 print("-------------- SC ------------")
 self.qti_file_path_output = self.singlechoice_pool_qti_file_path_output
 XML_Interface.create_pool_dir_from_template(self, self.singlechoice_files_path_pool_output)
 self.ff_mytree = ET.parse(self.singlechoice_pool_qti_file_path_template)
 self.pool_qpl_file_path_template = self.singlechoice_pool_qpl_file_path_template
 self.pool_qpl_file_path_output = self.singlechoice_pool_qpl_file_path_output
 self.qpl_file_path = self.singlechoice_pool_qpl_file_path_output
 self.img_file_path_output = self.singlechoice_pool_img_file_path_output
 
 elif self.all_mc_questions_flag == 1:
 print("-------------- MC ------------")
 self.qti_file_path_output = self.multiplechoice_pool_qti_file_path_output
 XML_Interface.create_pool_dir_from_template(self, self.multiplechoice_files_path_pool_output)
 self.ff_mytree = ET.parse(self.multiplechoice_pool_qti_file_path_template)
 self.pool_qpl_file_path_template = self.multiplechoice_pool_qpl_file_path_template
 self.pool_qpl_file_path_output = self.multiplechoice_pool_qpl_file_path_output
 self.qpl_file_path = self.multiplechoice_pool_qpl_file_path_output
 self.img_file_path_output = self.multiplechoice_pool_img_file_path_output
 
 elif self.all_mq_questions_flag == 1:
 print("-------------- MQ ------------")
 self.qti_file_path_output = self.zuordnungsfrage_pool_qti_file_path_output
 XML_Interface.create_pool_dir_from_template(self, self.zuordnungsfrage_files_path_pool_output)
 self.ff_mytree = ET.parse(self.zuordnungsfrage_pool_qti_file_path_template)
 self.pool_qpl_file_path_template = self.zuordnungsfrage_pool_qpl_file_path_template
 self.pool_qpl_file_path_output = self.zuordnungsfrage_pool_qpl_file_path_output
 self.qpl_file_path = self.zuordnungsfrage_pool_qpl_file_path_output
 self.img_file_path_output = self.zuordnungsfrage_pool_img_file_path_output
 else:
 print("-------------- MIXED ------------")
 self.qti_file_path_output = self.gemischte_fragentypen_pool_qti_file_path_output
 XML_Interface.create_pool_dir_from_template(self, self.gemischte_fragentypen_files_path_pool_output)
 self.ff_mytree = ET.parse(self.gemischte_fragentypen_pool_qti_file_path_template)
 self.pool_qpl_file_path_template = self.gemischte_fragentypen_pool_qpl_file_path_template
 self.pool_qpl_file_path_output = self.gemischte_fragentypen_pool_qpl_file_path_output
 self.qpl_file_path = self.gemischte_fragentypen_pool_qpl_file_path_output
 self.img_file_path_output = self.gemischte_fragentypen_pool_img_file_path_output
 self.ff_myroot = self.ff_mytree.getroot()
 self.id_nr = 0
 # Fragen in die XML schreiben
 for i in range(len(self.test_data)):
 if self.test_data[i][2].lower() == "formelfrage":
 XML_Interface.ff_question_structure(self, self.test_data[i], self.table_index_dict, self.id_nr, self.pool_qpl_file_path_template, self.pool_qpl_file_path_output, self.img_file_path_output, False)
 if self.test_data[i][2].lower() == "singlechoice":
 XML_Interface.sc_question_structure(self, self.test_data[i], self.table_index_dict, self.id_nr, self.pool_qpl_file_path_template, self.pool_qpl_file_path_output, self.img_file_path_output)
 if self.test_data[i][2].lower() == "multiplechoice":
 XML_Interface.mc_question_structure(self, self.test_data[i], self.table_index_dict, self.id_nr, self.pool_qpl_file_path_template, self.pool_qpl_file_path_output, self.img_file_path_output)
 
 if self.test_data[i][2].lower() == "zuordnungsfrage":
 XML_Interface.mq_question_structure(self, self.test_data[i], self.table_index_dict, self.id_nr, self.pool_qpl_file_path_template, self.pool_qpl_file_path_output, self.img_file_path_output)
 if self.test_data[i][2].lower() == "formelfrage_permutation":
 XML_Interface.ff_question_structure(self, self.test_data[i], self.table_index_dict, self.id_nr, self.pool_qpl_file_path_template, self.pool_qpl_file_path_output, self.img_file_path_output, True)
 if self.create_ilias_test_or_pool == "ilias_pool":
 ###### Anpassung der Datei "qpl". Akualisierung des Dateinamens
 self.mytree = ET.parse(self.qpl_file_path)
 self.myroot = self.mytree.getroot()
 for ident_id in self.myroot.iter('Identifier'):
 ident_id.set('Entry', "il_0_qpl_" + str(self.max_id+str(1)))
 self.mytree.write(self.qpl_file_path)
 self.id_nr += 1
 # Beschriebene XML im Pfad "self.qti_file_path_output" schreiben
 print(self.qti_file_path_output)
 self.ff_mytree.write(self.qti_file_path_output)
 print("TEST DONE")
 XML_Interface.replace_amp_in_xml_file(self, self.qti_file_path_output)
 
 ###### FORMELFRAGE FUNKTIONEN ################
 def ff_question_structure(self, test_data_list, table_index_dict, id_nr, pool_qpl_file_path_template , pool_qpl_file_path_output, img_file_path, activate_permutation):
 | |
| 
	= [get_special(base_type).import_str]
 else:
 retval = []
 elif not msg_context.is_registered(full_msg_type):
 retval = []
 else:
 retval = ['import %s.msg'%pkg]
 iter_types = get_registered_ex(msg_context, full_msg_type).types
 for t in iter_types:
 assert t != full_msg_type, "msg [%s] has circular self-dependencies"%(full_msg_type)
 full_sub_type = "%s/%s"%(package, t)
 log("compute_import", full_msg_type, package, t)
 sub = compute_import(msg_context, package, t)
 retval.extend([x for x in sub if not x in retval])
 return retval
def compute_full_text_escaped(msg_context, spec):
 """
 Same as genmsg.compute_full_text, except that the
 resulting text is escaped to be safe for Python's triple-quote string
 quoting
 :param get_deps_dict: dictionary returned by load_dependencies call, ``dict``
 :returns: concatenated text for msg/srv file and embedded msg/srv types. Text will be escaped for triple-quote, ``str``
 """
 msg_definition = genmsg.compute_full_text(msg_context, spec)
 msg_definition = msg_definition.replace('"""', r'\"\"\"')
 return msg_definition
################################################################################
# (De)serialization generators
_serial_context = ''
_context_stack = []
_counter = 0
def next_var():
 # we could optimize this by reusing vars once the context is popped
 global _counter
 _counter += 1
 return '_v%s'%_counter
def reset_var():
 global _counter
 _counter = 0
def push_context(context):
 """
 Push new variable context onto context stack. The context stack
 manages field-reference context for serialization, e.g. 'self.foo'
 vs. 'self.bar.foo' vs. 'var.foo'
 """
 global _serial_context, _context_stack
 _context_stack.append(_serial_context)
 _serial_context = context
def pop_context():
 """
 Pop variable context from context stack. The context stack manages
 field-reference context for serialization, e.g. 'self.foo'
 vs. 'self.bar.foo' vs. 'var.foo'
 """
 global _serial_context
 _serial_context = _context_stack.pop()
# These are the workhorses of the message generation. The generators
# are implemented as iterators, where each iteration value is a line
# of Python code. The generators will invoke underlying generators,
# using the context stack to manage any changes in variable-naming, so
# that code can be reused as much as possible.
def len_serializer_generator(var, is_string, serialize):
 """
 Generator for array-length serialization (32-bit, little-endian unsigned integer)
 :param var: variable name, ``str``
 :param is_string: if True, variable is a string type, ``bool``
 :param serialize bool: if True, generate code for
 serialization. Other, generate code for deserialization, ``bool``
 """
 if serialize:
 yield "length = len(%s)"%var
 # NOTE: it's more difficult to save a call to struct.pack with
 # the array length as we are already using *array_val to pass
 # into struct.pack as *args. Although it's possible that
 # Python is not optimizing it, it is potentially worse for
 # performance to attempt to combine
 if not is_string:
 yield int32_pack("length")
 else:
 yield "start = end"
 yield "end += 4"
 yield int32_unpack('length', 'str[start:end]') #4 = struct.calcsize('<i')
def string_serializer_generator(package, type_, name, serialize):
 """
 Generator for string types. similar to arrays, but with more
 efficient call to struct.pack.
 :param name: spec field name, ``str``
 :param serialize: if ``True``, generate code for
 serialization. Other, generate code for deserialization, ``bool``
 """
 # don't optimize in deserialization case as assignment doesn't
 # work
 if _serial_context and serialize:
 # optimize as string serialization accesses field twice
 yield "_x = %s%s"%(_serial_context, name)
 var = "_x"
 else:
 var = _serial_context+name
 # the length generator is a noop if serialize is True as we
 # optimize the serialization call.
 base_type, is_array, array_len = genmsg.msgs.parse_type(type_)
 # - don't serialize length for fixed-length arrays of bytes
 if base_type not in ['uint8', 'char'] or array_len is None:
 for y in len_serializer_generator(var, True, serialize):
 yield y #serialize string length
 if serialize:
 #serialize length and string together
 #check to see if its a uint8/byte type, in which case we need to convert to string before serializing
 base_type, is_array, array_len = genmsg.msgs.parse_type(type_)
 if base_type in ['uint8', 'char']:
 yield "# - if encoded as a list instead, serialize as bytes instead of string"
 if array_len is None:
 yield "if type(%s) in [list, tuple]:"%var
 yield INDENT+pack2("'<I%sB'%length", "length, *%s"%var)
 yield "else:"
 yield INDENT+pack2("'<I%ss'%length", "length, %s"%var)
 else:
 yield "if type(%s) in [list, tuple]:"%var
 yield INDENT+pack('%sB'%array_len, "*%s"%var)
 yield "else:"
 yield INDENT+pack('%ss'%array_len, var)
 else:
 # FIXME: for py3k, this needs to be w/ encode(), but this interferes with actual byte data
 yield "if python3 or type(%s) == unicode:"%(var)
 yield INDENT+"%s = %s.encode('utf-8')"%(var,var) #For unicode-strings in Python2, encode using utf-8
 yield INDENT+"length = len(%s)"%(var) # Update the length after utf-8 conversion
 yield pack2("'<I%ss'%length", "length, %s"%var)
 else:
 yield "start = end"
 if array_len is not None:
 yield "end += %s" % array_len
 yield "%s = str[start:end]" % var
 else:
 yield "end += length"
 if base_type in ['uint8', 'char']:
 yield "%s = str[start:end]" % (var)
 else:
 yield "if python3:"
 yield INDENT+"%s = str[start:end].decode('utf-8')" % (var) #If messages are python3-decode back to unicode
 yield "else:"
 yield INDENT+"%s = str[start:end]" % (var)
def array_serializer_generator(msg_context, package, type_, name, serialize, is_numpy):
 """
 Generator for array types
 :raises: :exc:`MsgGenerationException` If array spec is invalid
 """
 base_type, is_array, array_len = genmsg.msgs.parse_type(type_)
 if not is_array:
 raise MsgGenerationException("Invalid array spec: %s"%type_)
 var_length = array_len is None
 # handle fixed-size byte arrays could be slightly more efficient
 # as we recalculated the length in the generated code.
 if base_type in ['char', 'uint8']: #treat unsigned int8 arrays as string type
 for y in string_serializer_generator(package, type_, name, serialize):
 yield y
 return
 var = _serial_context+name
 # yield length serialization, if necessary
 if var_length:
 for y in len_serializer_generator(var, False, serialize):
 yield y #serialize array length
 length = None
 else:
 length = array_len
 #optimization for simple arrays
 if is_simple(base_type):
 if var_length:
 pattern = compute_struct_pattern([base_type])
 yield "pattern = '<%%s%s'%%length"%pattern
 if serialize:
 if is_numpy:
 yield pack_numpy(var)
 else:
 yield pack2('pattern', "*"+var)
 else:
 yield "start = end"
 yield "end += struct.calcsize(pattern)"
 if is_numpy:
 dtype = NUMPY_DTYPE[base_type]
 yield unpack_numpy(var, 'length', dtype, 'str[start:end]')
 else:
 yield unpack2(var, 'pattern', 'str[start:end]')
 else:
 pattern = "%s%s"%(length, compute_struct_pattern([base_type]))
 if serialize:
 if is_numpy:
 yield pack_numpy(var)
 else:
 yield pack(pattern, "*"+var)
 else:
 yield "start = end"
 yield "end += %s"%struct.calcsize('<%s'%pattern)
 if is_numpy:
 dtype = NUMPY_DTYPE[base_type]
 yield unpack_numpy(var, length, dtype, 'str[start:end]')
 else:
 yield unpack(var, pattern, 'str[start:end]')
 if not serialize and base_type == 'bool':
 # convert uint8 to bool
 if base_type == 'bool':
 yield "%s = map(bool, %s)"%(var, var)
 else:
 #generic recursive serializer
 #NOTE: this is functionally equivalent to the is_registered branch of complex_serializer_generator
 # choose a unique temporary variable for iterating
 loop_var = 'val%s'%len(_context_stack)
 # compute the variable context and factory to use
 if base_type == 'string':
 push_context('')
 factory = string_serializer_generator(package, base_type, loop_var, serialize)
 else:
 push_context('%s.'%loop_var)
 factory = serializer_generator(msg_context, make_python_safe(get_registered_ex(msg_context, base_type)), serialize, is_numpy)
 if serialize:
 yield 'for %s in %s:'%(loop_var, var)
 else:
 yield '%s = []'%var
 if var_length:
 yield 'for i in range(0, length):'
 else:
 yield 'for i in range(0, %s):'%length
 if base_type != 'string':
 yield INDENT + '%s = %s'%(loop_var, compute_constructor(msg_context, package, base_type))
 for y in factory:
 yield INDENT + y
 if not serialize:
 yield INDENT + '%s.append(%s)'%(var, loop_var)
 pop_context()
def complex_serializer_generator(msg_context, package, type_, name, serialize, is_numpy):
 """
 Generator for serializing complex type
 :param serialize: if True, generate serialization
 code. Otherwise, deserialization code. ``bool``
 :param is_numpy: if True, generate serializer code for numpy
 datatypes instead of Python lists, ``bool``
 :raises: MsgGenerationException If type is not a valid
 """
 # ordering of these statements is important as we mutate the type
 # string we are checking throughout. parse_type strips array
 # brackets, then we check for the 'complex' builtin types (string,
 # time, duration, Header), then we canonicalize it to an embedded
 # message type.
 _, is_array, _ = genmsg.msgs.parse_type(type_)
 #Array
 if is_array:
 for y in array_serializer_generator(msg_context, package, type_, name, serialize, is_numpy):
 yield y
 #Embedded Message
 elif type_ == 'string':
 for y in string_serializer_generator(package, type_, name, serialize):
 yield y
 else:
 if not is_special(type_):
 # canonicalize type
 pkg, base_type = compute_pkg_type(package, type_)
 type_ = "%s/%s"%(pkg, base_type)
 if msg_context.is_registered(type_):
 # descend data structure ####################
 ctx_var = next_var()
 yield "%s = %s"%(ctx_var, _serial_context+name)
 push_context(ctx_var+'.')
 # unoptimized code
 #push_context(_serial_context+name+'.')
 for y in serializer_generator(msg_context, make_python_safe(get_registered_ex(msg_context, type_)), serialize, is_numpy):
 yield y #recurs on subtype
 pop_context()
 else:
 #Invalid
 raise MsgGenerationException("Unknown type: %s. Package context is %s"%(type_, package))
def simple_serializer_generator(msg_context, spec, start, end, serialize): #primitives that can be handled with struct
 """
 Generator (de)serialization code for multiple fields from spec
 :param spec: :class:`genmsg.MsgSpec`
 :param start: first field to serialize, ``int``
 :param end: last field to serialize, ``int``
 """
 # optimize member var access
 if end - start > 1 and _serial_context.endswith('.'):
 yield | |
| 
	and won't be scanned.
 # Duplication note: When scanning multple copies, the original
 # or multiple copy
 #Determine if the destination file is a multple copy or not.
 # Remove the src_bfid from original_copy_list, so that we can tell
 # the difference between the cases listed below.
 #There are two conditions we want to get here. Those destination
 # bfids are surrounded by asterisks (*).
 #The M indicates an entry in the migration table.
 #The D indicates an entry in the file_copies_map table.
 #
 # 1) src_bfid -MD-> dst_bfid Duplication with multiple copies
 # | |
 # | D
 # | |
 # | v
 # |--MD--> *dst_bfid*
 #
 # 2) src_bfid -MD-> dst_bfid Duplication to one copy
 #
 # 3) src_bfid -M--> dst_bfid Migration with multiple copies
 # | |
 # | D
 # | |
 # | v
 # |--M--> *dst_bfid*
 #
 # 4) src_bfid -M--> dst_bfid Migration to one copy
 #
 #For all other dst_bfids we want to cleanup.
 original_copy_list = get_original_copy_bfid(dst_bfid, db)
 scrubbed_copy_list = []
 for i in range(len(original_copy_list)):
 if original_copy_list[i] != src_bfid:
 scrubbed_copy_list.append(original_copy_list[i])
 remove_mig_path = ((len(scrubbed_copy_list) == 0) or
 getattr(intf, 'make_failed_copies', None) or \
 getattr(intf, 'make_copies', None))
 checked_tstamp = is_checked(dst_bfid, fcc, db)
 if checked_tstamp:
 ok_log(my_task, dst_bfid, "was already checked at", checked_tstamp)
 if not remove_mig_path:
 return None
 # make sure the migration path has been removed
 # FIXME: using pnfs_name0 is not correct for files 'mv'ed in pnfs.
 likely_path = dst_file_record['pnfs_name0']
 mig_path = migration_path(likely_path, src_file_record)
 # cleanup_after_scan() reports its own errors.
 rc = cleanup_after_scan(my_task, mig_path, src_bfid, fcc, db)
 debug_log(my_task, 'cleanup_after_scan returned %s'%(rc,))
 return rc
 #
 # File was not checked before, check the file
 #
 # We need to tell get_filenames() if the file is a multiple copy or not.
 if is_multiple_copy_bfid(dst_bfid, db):
 is_multiple_copy = True
 else:
 is_multiple_copy = False
 if debug:
 log(my_task, "start checking dst src: %s %s dst_record %s mig_path %s"
 % (dst_bfid,src_bfid,dst_file_record,mig_path))
 try:
 (pnfs_path,use_path) = get_filenames(my_task,
 job,is_multiple_copy,fcc,db,intf)
 except (OSError, IOError), msg:
 if msg.args[0] == errno.EBADF and \
 msg.args[1].find("conflicting layer") != -1:
 #If we get here, we have a state where PNFS is returning
 # different values for the normal pathname and the
 # .(access)() pathname. Remounting the filesystem usually
 # clears this situation.
 error_log(my_task, msg.args[1])
 log(my_task, "HINT: remount the PNFS filesystem and/or " \
 "flush the PNFS file system buffer cache.")
 return 1
 exc_type, exc_value, exc_tb = sys.exc_info()
 Trace.handle_error(exc_type, exc_value, exc_tb)
 del exc_tb #avoid resource leaks
 error_log(my_task, str(exc_type),str(exc_value),
 " %s %s %s %s is not a valid pnfs file" \
 % (
 dst_volume_record['external_label'],
 dst_bfid,
 dst_file_record['location_cookie'],
 dst_file_record['pnfsid']))
 raise sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2]
 except:
 raise sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2]
 debug_log(my_task, 'pnfs_path %s use_path %s'%(pnfs_path, use_path))
 #Make sure the destination volume is found as the volume mentioned
 # in layer 4. (Obviously the file must be active.)
 if dst_file_record['deleted'] == NO:
 if not is_expected_volume(
 my_task, dst_volume_record['external_label'], pnfs_path, fcc, db):
 #Error message reported from is_expected_volume().
 return 1
 # make sure the path is NOT a migration path
 if pnfs_path == None:
 error_log(my_task,
 'none swapped file %s' % \
 (pnfs_path))
 return 1
 #It has been found that write failures from previous migrations
 # leave files with "Migration' in the path. The scan should allow
 # these failures.
 if is_migration_path(pnfs_path):
 if not is_migration_path(src_file_record['pnfs_name0']) and \
 src_file_record['deleted'] == NO:
 error_log(my_task,'found Migration file %s' % (pnfs_path))
 return 1
 mig_path = migration_path(pnfs_path, src_file_record)
 #Replace src_path with the source path to use, which may not even
 # be a path in situations like scaning a deleted file.
 sf_job = (src_file_record, src_volume_record, use_path,
 dst_file_record, dst_volume_record, tmp_path, mig_path)
 #Read the destination file.
 if scan_file(my_task, sf_job, use_path, "/dev/null", intf, encp):
 return 1
 log_checked(src_bfid, dst_bfid, fcc, db)
 if remove_mig_path:
 # cleanup_after_scan() reports its own errors.
 return cleanup_after_scan(my_task, mig_path, src_bfid, fcc, db)
# Helper function for the final_scan_files() and final_scan_volume().
# It is merged code from final_scan_files() and final_scan_volume().
# All arguments are internal variables of functions above
# returns error count (int).
def _scan_dest(my_task,dst_file_record,encp,intf,fcc,vcc,db):
 dst_bfid = dst_file_record['bfid']
 dst_vol = dst_file_record['external_label']
 dst_volume_record = get_volume_info(my_task,dst_vol,vcc,db)
 dst_package_id = dst_file_record.get("package_id", None)
 dst_package_files_count = dst_file_record.get("package_files_count",0)
 dst_is_a_package = ((dst_package_id is not None)
 and (dst_bfid == dst_package_id))
 # Get the first (src_bfid,dst_bfid) record from migration table for dst_bfid
 # It will return (None,None) for the migrated package v1.5 as the package
 # file is created during migration and does not have src.
 (src_bfid, check_dst_bfid) = get_bfids(dst_bfid, fcc, db)
 if debug:
 ddd = dst_file_record['deleted']
 print ("_scan_dest(): dst %s pack %s is_pack %s count %s (%s,%s) del %s vol %s "
 % (dst_bfid,dst_package_id,dst_is_a_package,dst_package_files_count, src_bfid,check_dst_bfid,ddd,dst_vol))
 # dst_bfid is not in migration table or query error;
 # or newly created package during migration with packaging
 if src_bfid == None and check_dst_bfid == None:
 # Deleted and unknown files:
 if dst_file_record['deleted'] in [YES, UNKNOWN]:
 #The file is a failed migration file.
 message = ("found failed migration file %s (deleted or unknown), "
 "skipping" % (dst_bfid,))
 log(my_task, message)
 return 1
 # Active files:
 # 1) These could be new files written to the destination tape.
 # This can happen if the tape is being rescanned after being released to
 # users who write additional files onto it.
 if not dst_is_a_package: # new regular file
 message = "active file on destination tape without a source"
 warning_log(my_task, message)
 return 0
 # 2) if dst_is_a_package:
 # 2a) note: package files created during
 # migration without repackaging (phase 1.0) do have entry
 # in migration table and processed below wit=
 # 2b) This is a package file created during migration with packaging v1.5
 # The destination file is a package and it is not in the migration
 # table, but constituent files are in migration table.
 # find and scan constituent files for destination package dst_bfid:
 err_count = 0
 d_children = fcc.get_children(dst_bfid)
 if not e_errors.is_ok(d_children['status']):
 message = ("Failed to get children for dst_bfid %s: %s"
 % (dst_bfid, d_children['status']))
 error_log(my_task, message)
 return 1
 for dc_rec in d_children['children']:
 dc_bfid = dc_rec['bfid']
 if dc_bfid == dst_bfid:
 continue # skip the original package record
 dc_file_record = get_file_info(my_task, dc_bfid, fcc, db)
 err_count += _scan_dest(my_task,dc_file_record,encp,intf,fcc,vcc,db)
 # scan package file itself by reading bfid.
 use_path = "--skip-pnfs --get-bfid %s" % (dst_bfid,)
 ret = _scan_bfid(my_task,dst_bfid,use_path,"/dev/null",intf,encp)
 err_count += ret
# hold on for now adding (None,dst_bfid) to migration table.
# if not is_closed(dst_bfid, fcc, db):
# # The file has been scaned OK but not closed yet - set it closed
# log_closed(src_bfid, dst_bfid, fcc, db)
# close_log('OK')
 return err_count
 # These files have normal (src_bfid,dst_bfid) entries in migration table
 # - Regular files,
 # - package file written with migration without repackaging
 # - constituent files
 if not is_swapped(src_bfid, fcc, db):
 error_log(my_task,"%s %s has not been swapped" % (src_bfid, dst_bfid))
 return 1
 src_file_record = get_file_info(my_task, src_bfid, fcc, db)
 if not e_errors.is_ok(src_file_record):
 error_log(my_task,"unable to get file information for %s" % (src_bfid,))
 return 1
 src_vol = src_file_record['external_label']
 src_volume_record = get_volume_info(my_task,src_vol,vcc,db,use_cache=True)
 job = (src_file_record, src_volume_record, None,
 dst_file_record, dst_volume_record, None, None)
 ## Scan the file by reading it with encp.
 ## Note: if we are using volume assert, then final_scan_file()
 ## uses --check with the encp to avoid redundant read
 if final_scan_file(my_task, job, fcc, encp, intf, db):
 return 1
 # File scan OK
 # set closed for regular files and migrated packages phase 1.0
 if not is_closed(dst_bfid, fcc, db):
 # The file has been scaned OK but not closed yet - set it closed
 log_closed(src_bfid, dst_bfid, fcc, db)
 close_log('OK')
 return 0
# Scan list of destination bfids.
def final_scan_files(dst_bfids, intf):
 my_task = "FINAL_SCAN"
 threading.currentThread().setName('FINAL_SCAN')
 encp = encp_wrapper.Encp(tid='FINAL_SCAN')
 (fcc,vcc) = get_clerks()
 err_count = 0
 with Pgdb() as db:
 try:
 for dst_bfid in dst_bfids:
 dst_file_record = get_file_info(my_task, dst_bfid, fcc, db)
 err_count += _scan_dest(my_task,dst_file_record,encp,intf,fcc,vcc,db)
 except:
 exc_type, exc_value, exc_tb = sys.exc_info()
 Trace.handle_error(exc_type, exc_value, exc_tb)
# error_log(my_task, str(exc_type), str(exc_value), str(dst_bfid))
 del exc_tb #avoid resource leaks
 return err_count
# final_scan() -- last part of migration, driven by scan_queue
# read the file as a user to reassure everything is fine
def final_scan(thread_num, scan_list, intf, deleted_files = NO):
 if deleted_files == YES:
 my_task = "FINAL_SCAN_DELETED"
 else:
 my_task = "FINAL_SCAN"
 # get | |
| 
	<reponame>piyush82/elastest-device-emulator-service
from base64 import b64encode, b64decode
from datetime import datetime
from futile import uc
from futile.etree import ElementTree as ET, tostring
from futile.etree.ElementTree import QName
from futile.logging import DEBUG
from openmtc.model import EntityAttribute
from openmtc_etsi.exc import SCLValueError, SCLSyntaxError, SCLMissingValue
from openmtc_etsi.model import ListAttribute, Entity, \
 AnyURI, ContentInstance, MembersContent, Notify
from openmtc_etsi.response import get_status_message_safe, \
 ErrorResponseConfirmation
from openmtc_etsi.serializer import Serializer
from openmtc_etsi.serializer.xml.binding import AnyURIList, NamedReferenceCollection,\
 ReferenceToNamedResource, PermissionListType, PermissionType,\
 PermissionFlagListType, PermissionHolderType, CTD_ANON,\
 ContentInstanceCollection, SearchStrings, ContentTypes, \
 ContentInstancesFilterCriteriaType
from pyxb.binding.basis import complexTypeDefinition
from pyxb.binding.content import _PluralBinding
from pyxb.exceptions_ import UnrecognizedAttributeError, \
 UnrecognizedContentError, SimpleFacetValueError,\
 UnrecognizedDOMRootNodeError, NonElementValidationError,\
 IncompleteElementContentError, SimpleTypeValueError
from pyxb.namespace import ExpandedName
from pyxb.utils.domutils import BindingDOMSupport
import binding
XMLNS = "http://uri.etsi.org/m2m"
namespace_prefix = "tns"
namespace_url = "{" + XMLNS + "}"
ET.register_namespace(namespace_prefix, XMLNS)
class XMLSerializer(Serializer):
 def __init__(self, type_factory=None, *args, **kw):
 if type_factory is None:
 from openmtc_etsi.model import get_etsi_type as type_factory
 self.get_resource_type = type_factory
 def _convert_value(self, value, need_type, mapped_type=None):
 if need_type is not None:
 if mapped_type is None:
 mapped_type = self._get_mapper_class(need_type.get_typename())
 print "_convert_value:", need_type, "->", mapped_type, "from", value, " - ", value.values
 values = {}
 for k, v in value.values.items():
 a = getattr(need_type, k)
 if isinstance(a, ListAttribute):
 if a.content_type is AnyURI:
 if isinstance(v, (tuple, list)):
 l = AnyURIList()
 l.reference = v
 else:
 l = AnyURIList()
 l.reference = v["reference"]
 else:
 if issubclass(a.content_type, Entity) or k[-1] != "s":
 l = v
 else:
 print k, v, a.content_type
 wrappercls = self._get_wrapper_class(k)
 wrapper = wrappercls()
 if type(v) is list:
 setattr(wrapper, k, v)
 else:
 l = v.values()[0]
 setattr(wrapper, v.keys()[0], l)
 l = wrapper
 values[k] = l
 elif isinstance(a, EntityAttribute) and \
 issubclass(a.type, Entity):
 values[k] = self._convert_value(v, a.type)
 elif k == "all" and mapped_type is PermissionHolderType:
 values[k] = CTD_ANON()
 else:
 values[k] = v
 self.logger.debug("Creating mapper of type %s with %s",
 mapped_type, values)
 value = mapped_type(**values)
 else:
 try:
 value = value.isoformat()
 except AttributeError:
 pass
 return value
 __wrappers = {
 "Permissions": PermissionListType,
 "SelfPermissions": PermissionListType,
 "PermissionFlags": PermissionFlagListType,
 "HolderRefs": binding.HolderRefListType,
 "Domains": binding.DomainListType
 }
 __mappers = {
 "Permission": PermissionType,
 "SelfPermission": PermissionType,
 "PermissionHolder": PermissionHolderType,
 "FilterCriteria": ContentInstancesFilterCriteriaType,
 "M2mPoc": binding.M2MPoc,
 "M2mPocs": binding.M2MPocs
 }
 def _get_wrapper_class(self, name):
 valclsname = name[0].upper() + name[1:]
 try:
 return self.__wrappers[valclsname]
 except KeyError:
 return getattr(binding, valclsname)
 def _get_mapper_class(self, name):
 valclsname = name[0].upper() + name[1:]
 try:
 return self.__mappers[valclsname]
 except KeyError:
 return getattr(binding, valclsname)
 def _encode_contentinstance(self, r, fields=None):
 instance = binding.ContentInstance()
 instance.href = r.href
 instance.id = r.id
 if fields is None or "searchStrings" in fields:
 instance.searchStrings = SearchStrings(searchString=r.searchStrings)
 if fields is None or "creationTime" in fields:
 instance.creationTime = r.creationTime
 if fields is None or "contentTypes" in fields:
 instance.contentTypes = ContentTypes(contentType=r.contentTypes)
 if fields is None or "lastModifiedTime" in fields:
 instance.lastModifiedTime = r.lastModifiedTime
 if fields is None or "content" in fields:
 content_elem = binding.Content(contentType=r.content.contentType)
 content = r.content
 if content.binaryContent:
 content_elem.binaryContent = b64decode(content.binaryContent)
 else:
 content_elem.textContent = content.textContent
 instance.content_ = content_elem
 if fields is None or "contentSize" in fields:
 instance.contentSize = r.contentSize
 return instance
 def _encode_memberscontent(self, resource, pretty, encoding):
 cls = binding.membersContentResponses.typeDefinition()
 instance = cls()
 statuscls = instance._ElementMap.values()[0].elementBinding().typeDefinition()
 stati = []
 for response in resource.membersContentResponses:
 status = statuscls()
 status.id = response.id
 status.lastModifiedTime = response.lastModifiedTime
 status.statusCode = response.status
 try:
 status.resourceURI = response.resourceURI
 except AttributeError:
 # Weirdly enough and contrary to what is stated in table 11.35 in TS102.921, resourceURI is not optional as per XSD
 status.resourceURI = response.id
 try:
 body = response.errorInfo
 except AttributeError:
 try:
 body = response.resource
 except AttributeError:
 pass
 else:
 status.resultBody = b64encode(self.encode(body, pretty,
 encoding=encoding))
 else:
 status.resultBody = b64encode(body)
 stati.append(status)
 instance.status = stati
 return instance
 def _encode_notify(self, resource, pretty, encoding):
 instance = binding.Notify(
 statusCode=resource.statusCode,
 subscriptionReference=resource.subscriptionReference,
 requestingEntity=resource.requestingEntity,
 contact=resource.contact
 )
 if resource.timeoutReason:
 instance.timeoutReason = resource.timeoutReason
 else:
 content_type = resource.representation.get("contentType")
 if content_type:
 payload = resource.representation["$t"]
 else:
 content_type = "application/xml"
 payload = resource.representation["$t"]
 if isinstance(payload, ErrorResponseConfirmation):
 payload = self.encode_error(payload, pretty, encoding)
 else:
 payload = self.encode(payload, pretty, encoding)
 instance.representation = binding.base64Binary(
 payload,
 contentType=content_type
 )
 return instance
 def encode(self, resource, pretty=False, encoding="utf-8", fields=None,
 attributes_only=False):
 # representation = self.get_representation(resource, fields=fields)
 try:
 id_attribute = resource.id_attribute
 except AttributeError:
 xml = tostring(self._build_elementtree(resource),
 pretty_print=pretty,
 encoding=encoding)
 return '<?xml version="1.0" encoding="utf-8"?>' + xml
 representation = {}
 if isinstance(resource, ContentInstance):
 instance = self._encode_contentinstance(resource, fields)
 elif isinstance(resource, MembersContent):
 instance = self._encode_memberscontent(resource, pretty, encoding)
 elif isinstance(resource, Notify):
 instance = self._encode_notify(resource, pretty, encoding)
 else:
 for attr in resource.attributes:
 a_name = attr.name
 if (fields is None or a_name == id_attribute or a_name in fields) \
 and attr.accesstype is not None:
 val = getattr(resource, "_" + a_name, None)
 if val is None:
 continue
 if isinstance(attr, ListAttribute):
 if attr.content_type is AnyURI:
 representation[a_name] = l = AnyURIList()
 l.reference = val
 else:
 wrappercls = self._get_wrapper_class(a_name)
 if issubclass(attr.content_type, Entity):
 valcls = self._get_mapper_class(a_name[:-1])
 vals = [self._convert_value(v,
 attr.content_type,
 valcls)
 for v in val]
 else:
 vals = val
 wrapper = wrappercls()
 setattr(wrapper, wrappercls._ElementMap.keys()[0].localName(), vals)
 representation[a_name] = wrapper
 elif isinstance(attr, EntityAttribute):
 valcls = self._get_mapper_class(
 attr.type.get_typename()
 )
 val = self._convert_value(val, attr.type,
 valcls)
 representation[a_name] = val
 else:
 try:
 val = val.isoformat()
 except AttributeError:
 pass
 representation[a_name] = val
 if fields is None and not attributes_only:
 path = resource.path
 for sr in resource.subresources:
 representation[sr.name + "Reference"] = path + "/" + sr.name
 for collection_member in resource.collections:
 collection = getattr(resource, collection_member.name)
 if collection_member.name == "contentInstanceCollection":
 cr = ContentInstanceCollection()
 instances = map(self._encode_contentinstance, collection)
 """
 for r in collection:
 ci = binding.ContentInstance()
 ci.searchStrings = SearchStrings(searchString=r.searchStrings)
 ci.creationTime = r.creationTime
 ci.href = r.href
 ci.contentTypes = ContentTypes(contentType=r.contentTypes)
 ci.lastModifiedTime = r.lastModifiedTime
 ci.content_ = binding.Content(r.content["$t"],
 contentType=r.content["contentType"])
 ci.id = r.id
 ci.contentSize = r.contentSize
 instances.append(ci)
 """
 cr.contentInstance = instances
 else:
 cr = NamedReferenceCollection()
 references = []
 for item in collection:
 r = ReferenceToNamedResource(item.path)
 r.id = item.name
 references.append(r)
 cr.namedReference = references
 representation[collection_member.name] = cr
 try:
 latest = resource.latest
 oldest = resource.oldest
 except AttributeError:
 pass
 else:
 if latest is not None:
 representation["latest"] = ReferenceToNamedResource(latest.path, id=latest.name)
 representation["oldest"] = ReferenceToNamedResource(oldest.path, id=oldest.name)
 cls = self._get_mapper_class(type(resource).__name__)
 self.logger.debug("Creating instance of %s with %s", cls, representation)
 instance = cls()
 for k, v in representation.iteritems():
 setattr(instance, k, v)
 try:
 flex_values = resource.flex_values
 except AttributeError:
 pass
 else:
 # FIXME: find out how to set these
 for k, v in flex_values.items():
 self.logger.debug("Set flex: %s - %s", k, v)
 bds = BindingDOMSupport()
 bds.declareNamespace(binding.Namespace, namespace_prefix)
 return instance.toDOM(element_name=namespace_prefix + ":" + resource.typename, bds=bds) \
 .toxml(encoding=encoding)
 def encode_error(self, error, pretty=False, encoding="utf-8"):
 try:
 statuscode = error.statusCode
 except AttributeError:
 status = "STATUS_INTERNAL_SERVER_ERROR"
 else:
 _, status = get_status_message_safe(statuscode)
 return binding.ErrorInfo(
 statusCode=status, additionalInfo=uc(error)
 ).toDOM(element_name="ns1:errorInfo").toxml(encoding=encoding)
 def _build_elementtree(self, representation, id_attribute=[]):
 resource_name, representation_values = representation.items()[0]
 self.logger.debug("building elementtree from: resource_name: %s and representation_value: %s",resource_name, representation_values)
 tagname = QName(XMLNS, resource_name)
 e = ET.Element(tagname)
 if isinstance(representation_values, dict):
 for k, v in representation_values.iteritems():
 if k in id_attribute:
 e.set(namespace_url+k, v)
 elif isinstance(v,list):
 for i in v:
 s = self._build_elementtree({k: i}, id_attribute=id_attribute)
 e.append(s)
 elif isinstance(v,dict): #check if instance is a dict: for example searchStrings :{ 'searchString': [] }
 #if k == "searchStrings":
 # v = {'searchString': ["XML serializer test searchstring", "this is just a test"]}
 s = self._build_elementtree({k: v}, id_attribute=id_attribute) #create a subelement
 e.append(s) #append the result
 elif k == "$t":
 self.logger.debug("hve: %s", v)
 e.text = v
 else:
 s = ET.SubElement(e, namespace_url+k)
 s.text = str(v)
 elif isinstance(representation_values, list):
 for i in representation_values: #we have a list
 if isinstance(i,str):
 e.text = str(i)
 else:
 s = self._build_elementtree({resource_name:i}, id_attribute=id_attribute)
 e.append(s)
 elif isinstance(representation_values, (basestring, int, float)):
 e.text = uc(representation_values)
 elif isinstance(representation_values, datetime):
 e.text = representation_values.isoformat()
 else:
 self.logger.debug("building elementtree: Unknown representation value, %s", type(representation_values))
 return e
 def iterencode(self, resource, pretty=False):
 return [self.encode(resource, pretty), ]
 def encode_values(self, typename, values, filter_none=False, pretty=False,
 encoding="utf-8"):
 if isinstance(values, str): # HACK retargeted update?
 return values
 # UGH, but works
 resource = self.parse_representation({typename: values})
 return self.encode(resource, pretty, encoding, attributes_only=True)
 """
 def encode_values(self, typename, values, filter_none=False):
 self.logger.debug("hve_encode_values")
 if filter_none:
 values = {k: v for k, v in values.items() if v is not None}
 try:
 status_code = values["statusCode"]
 except KeyError:
 pass
 except TypeError as e: # HACK retargeted update?
 if isinstance(values, str):
 self.logger.debug("error encoding values: %s", e)
 return values
 else:
 raise
 else:
 if not filter_none:
 values = values.copy()
 values["statusCode"] = get_status_message(status_code)
 # return self._build_elementtree({typename: values})
 return ET.tostring(self._build_elementtree({typename: values}))
 """
 if Serializer.get_class_logger().isEnabledFor(DEBUG):
 def decode(self, s):
 try:
 s = s.read()
 except AttributeError:
 pass
 return self.decode_string(s)
 else:
 def decode(self, s):
 entity = self.load(s)
 return self.decode_values(entity)
 decode_resource_values = decode
 def decode_string(self, s):
 self.logger.debug("Reading XML input: %s", s)
 entity = self.load_string(s)
 return self.decode_values(entity)
 def _convert_entity(self, entity):
 if isinstance(entity, complexTypeDefinition):
 return self._convert_instance(entity)
 return entity
 def _convert_instance(self, instance):
 values = {}
 for elem in | |
| 
	None
 maximum_value: Optional[int] = None
 none_of: Optional[Union[Union[dict, AnonymousTypeExpression], List[Union[dict, AnonymousTypeExpression]]]] = empty_list()
 exactly_one_of: Optional[Union[Union[dict, AnonymousTypeExpression], List[Union[dict, AnonymousTypeExpression]]]] = empty_list()
 any_of: Optional[Union[Union[dict, AnonymousTypeExpression], List[Union[dict, AnonymousTypeExpression]]]] = empty_list()
 all_of: Optional[Union[Union[dict, AnonymousTypeExpression], List[Union[dict, AnonymousTypeExpression]]]] = empty_list()
 def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
 if self._is_empty(self.name):
 self.MissingRequiredField("name")
 if not isinstance(self.name, TypeDefinitionName):
 self.name = TypeDefinitionName(self.name)
 if self.typeof is not None and not isinstance(self.typeof, TypeDefinitionName):
 self.typeof = TypeDefinitionName(self.typeof)
 if self.base is not None and not isinstance(self.base, str):
 self.base = str(self.base)
 if self.uri is not None and not isinstance(self.uri, URIorCURIE):
 self.uri = URIorCURIE(self.uri)
 if self.repr is not None and not isinstance(self.repr, str):
 self.repr = str(self.repr)
 if self.pattern is not None and not isinstance(self.pattern, str):
 self.pattern = str(self.pattern)
 if self.structured_pattern is not None and not isinstance(self.structured_pattern, PatternExpression):
 self.structured_pattern = PatternExpression(**as_dict(self.structured_pattern))
 if self.equals_string is not None and not isinstance(self.equals_string, str):
 self.equals_string = str(self.equals_string)
 if not isinstance(self.equals_string_in, list):
 self.equals_string_in = [self.equals_string_in] if self.equals_string_in is not None else []
 self.equals_string_in = [v if isinstance(v, str) else str(v) for v in self.equals_string_in]
 if self.equals_number is not None and not isinstance(self.equals_number, int):
 self.equals_number = int(self.equals_number)
 if self.minimum_value is not None and not isinstance(self.minimum_value, int):
 self.minimum_value = int(self.minimum_value)
 if self.maximum_value is not None and not isinstance(self.maximum_value, int):
 self.maximum_value = int(self.maximum_value)
 if not isinstance(self.none_of, list):
 self.none_of = [self.none_of] if self.none_of is not None else []
 self.none_of = [v if isinstance(v, AnonymousTypeExpression) else AnonymousTypeExpression(**as_dict(v)) for v in self.none_of]
 if not isinstance(self.exactly_one_of, list):
 self.exactly_one_of = [self.exactly_one_of] if self.exactly_one_of is not None else []
 self.exactly_one_of = [v if isinstance(v, AnonymousTypeExpression) else AnonymousTypeExpression(**as_dict(v)) for v in self.exactly_one_of]
 if not isinstance(self.any_of, list):
 self.any_of = [self.any_of] if self.any_of is not None else []
 self.any_of = [v if isinstance(v, AnonymousTypeExpression) else AnonymousTypeExpression(**as_dict(v)) for v in self.any_of]
 if not isinstance(self.all_of, list):
 self.all_of = [self.all_of] if self.all_of is not None else []
 self.all_of = [v if isinstance(v, AnonymousTypeExpression) else AnonymousTypeExpression(**as_dict(v)) for v in self.all_of]
 super().__post_init__(**kwargs)
@dataclass
class SubsetDefinition(Element):
 """
 the name and description of a subset
 """
 _inherited_slots: ClassVar[List[str]] = []
 class_class_uri: ClassVar[URIRef] = LINKML.SubsetDefinition
 class_class_curie: ClassVar[str] = "linkml:SubsetDefinition"
 class_name: ClassVar[str] = "subset_definition"
 class_model_uri: ClassVar[URIRef] = LINKML.SubsetDefinition
 name: Union[str, SubsetDefinitionName] = None
 def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
 if self._is_empty(self.name):
 self.MissingRequiredField("name")
 if not isinstance(self.name, SubsetDefinitionName):
 self.name = SubsetDefinitionName(self.name)
 super().__post_init__(**kwargs)
@dataclass
class Definition(Element):
 """
 base class for definitions
 """
 _inherited_slots: ClassVar[List[str]] = []
 class_class_uri: ClassVar[URIRef] = LINKML.Definition
 class_class_curie: ClassVar[str] = "linkml:Definition"
 class_name: ClassVar[str] = "definition"
 class_model_uri: ClassVar[URIRef] = LINKML.Definition
 name: Union[str, DefinitionName] = None
 is_a: Optional[Union[str, DefinitionName]] = None
 abstract: Optional[Union[bool, Bool]] = None
 mixin: Optional[Union[bool, Bool]] = None
 mixins: Optional[Union[Union[str, DefinitionName], List[Union[str, DefinitionName]]]] = empty_list()
 apply_to: Optional[Union[Union[str, DefinitionName], List[Union[str, DefinitionName]]]] = empty_list()
 values_from: Optional[Union[Union[str, URIorCURIE], List[Union[str, URIorCURIE]]]] = empty_list()
 created_by: Optional[Union[str, URIorCURIE]] = None
 created_on: Optional[Union[str, XSDDateTime]] = None
 last_updated_on: Optional[Union[str, XSDDateTime]] = None
 modified_by: Optional[Union[str, URIorCURIE]] = None
 status: Optional[Union[str, URIorCURIE]] = None
 string_serialization: Optional[str] = None
 def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
 if self.is_a is not None and not isinstance(self.is_a, DefinitionName):
 self.is_a = DefinitionName(self.is_a)
 if self.abstract is not None and not isinstance(self.abstract, Bool):
 self.abstract = Bool(self.abstract)
 if self.mixin is not None and not isinstance(self.mixin, Bool):
 self.mixin = Bool(self.mixin)
 if not isinstance(self.mixins, list):
 self.mixins = [self.mixins] if self.mixins is not None else []
 self.mixins = [v if isinstance(v, DefinitionName) else DefinitionName(v) for v in self.mixins]
 if not isinstance(self.apply_to, list):
 self.apply_to = [self.apply_to] if self.apply_to is not None else []
 self.apply_to = [v if isinstance(v, DefinitionName) else DefinitionName(v) for v in self.apply_to]
 if not isinstance(self.values_from, list):
 self.values_from = [self.values_from] if self.values_from is not None else []
 self.values_from = [v if isinstance(v, URIorCURIE) else URIorCURIE(v) for v in self.values_from]
 if self.created_by is not None and not isinstance(self.created_by, URIorCURIE):
 self.created_by = URIorCURIE(self.created_by)
 if self.created_on is not None and not isinstance(self.created_on, XSDDateTime):
 self.created_on = XSDDateTime(self.created_on)
 if self.last_updated_on is not None and not isinstance(self.last_updated_on, XSDDateTime):
 self.last_updated_on = XSDDateTime(self.last_updated_on)
 if self.modified_by is not None and not isinstance(self.modified_by, URIorCURIE):
 self.modified_by = URIorCURIE(self.modified_by)
 if self.status is not None and not isinstance(self.status, URIorCURIE):
 self.status = URIorCURIE(self.status)
 if self.string_serialization is not None and not isinstance(self.string_serialization, str):
 self.string_serialization = str(self.string_serialization)
 super().__post_init__(**kwargs)
@dataclass
class EnumDefinition(Element):
 """
 List of values that constrain the range of a slot
 """
 _inherited_slots: ClassVar[List[str]] = []
 class_class_uri: ClassVar[URIRef] = LINKML.EnumDefinition
 class_class_curie: ClassVar[str] = "linkml:EnumDefinition"
 class_name: ClassVar[str] = "enum_definition"
 class_model_uri: ClassVar[URIRef] = LINKML.EnumDefinition
 name: Union[str, EnumDefinitionName] = None
 code_set: Optional[Union[str, URIorCURIE]] = None
 code_set_tag: Optional[str] = None
 code_set_version: Optional[str] = None
 pv_formula: Optional[Union[str, "PvFormulaOptions"]] = None
 permissible_values: Optional[Union[Dict[Union[str, PermissibleValueText], Union[dict, "PermissibleValue"]], List[Union[dict, "PermissibleValue"]]]] = empty_dict()
 def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
 if self._is_empty(self.name):
 self.MissingRequiredField("name")
 if not isinstance(self.name, EnumDefinitionName):
 self.name = EnumDefinitionName(self.name)
 if self.code_set is not None and not isinstance(self.code_set, URIorCURIE):
 self.code_set = URIorCURIE(self.code_set)
 if self.code_set_tag is not None and not isinstance(self.code_set_tag, str):
 self.code_set_tag = str(self.code_set_tag)
 if self.code_set_version is not None and not isinstance(self.code_set_version, str):
 self.code_set_version = str(self.code_set_version)
 if self.pv_formula is not None and not isinstance(self.pv_formula, PvFormulaOptions):
 self.pv_formula = PvFormulaOptions(self.pv_formula)
 self._normalize_inlined_as_dict(slot_name="permissible_values", slot_type=PermissibleValue, key_name="text", keyed=True)
 super().__post_init__(**kwargs)
@dataclass
class StructuredAlias(YAMLRoot):
 """
 object that contains meta data about a synonym or alias including where it came from (source) and its scope
 (narrow, broad, etc.)
 """
 _inherited_slots: ClassVar[List[str]] = []
 class_class_uri: ClassVar[URIRef] = SKOSXL.Label
 class_class_curie: ClassVar[str] = "skosxl:Label"
 class_name: ClassVar[str] = "structured_alias"
 class_model_uri: ClassVar[URIRef] = LINKML.StructuredAlias
 literal_form: str = None
 predicate: Optional[Union[str, "AliasPredicateEnum"]] = None
 categories: Optional[Union[Union[str, URIorCURIE], List[Union[str, URIorCURIE]]]] = empty_list()
 extensions: Optional[Union[Dict[Union[str, ExtensionTag], Union[dict, Extension]], List[Union[dict, Extension]]]] = empty_dict()
 annotations: Optional[Union[Dict[Union[str, AnnotationTag], Union[dict, Annotation]], List[Union[dict, Annotation]]]] = empty_dict()
 description: Optional[str] = None
 alt_descriptions: Optional[Union[Dict[Union[str, AltDescriptionSource], Union[dict, "AltDescription"]], List[Union[dict, "AltDescription"]]]] = empty_dict()
 title: Optional[str] = None
 deprecated: Optional[str] = None
 todos: Optional[Union[str, List[str]]] = empty_list()
 notes: Optional[Union[str, List[str]]] = empty_list()
 comments: Optional[Union[str, List[str]]] = empty_list()
 examples: Optional[Union[Union[dict, "Example"], List[Union[dict, "Example"]]]] = empty_list()
 in_subset: Optional[Union[Union[str, SubsetDefinitionName], List[Union[str, SubsetDefinitionName]]]] = empty_list()
 from_schema: Optional[Union[str, URI]] = None
 imported_from: Optional[str] = None
 source: Optional[Union[str, URIorCURIE]] = None
 in_language: Optional[str] = None
 see_also: Optional[Union[Union[str, URIorCURIE], List[Union[str, URIorCURIE]]]] = empty_list()
 deprecated_element_has_exact_replacement: Optional[Union[str, URIorCURIE]] = None
 deprecated_element_has_possible_replacement: Optional[Union[str, URIorCURIE]] = None
 aliases: Optional[Union[str, List[str]]] = empty_list()
 structured_aliases: Optional[Union[Union[dict, "StructuredAlias"], List[Union[dict, "StructuredAlias"]]]] = empty_list()
 mappings: Optional[Union[Union[str, URIorCURIE], List[Union[str, URIorCURIE]]]] = empty_list()
 exact_mappings: Optional[Union[Union[str, URIorCURIE], List[Union[str, URIorCURIE]]]] = empty_list()
 close_mappings: Optional[Union[Union[str, URIorCURIE], List[Union[str, URIorCURIE]]]] = empty_list()
 related_mappings: Optional[Union[Union[str, URIorCURIE], List[Union[str, URIorCURIE]]]] = empty_list()
 narrow_mappings: Optional[Union[Union[str, URIorCURIE], List[Union[str, URIorCURIE]]]] = empty_list()
 broad_mappings: Optional[Union[Union[str, URIorCURIE], List[Union[str, URIorCURIE]]]] = empty_list()
 rank: Optional[int] = None
 def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
 if self._is_empty(self.literal_form):
 self.MissingRequiredField("literal_form")
 if not isinstance(self.literal_form, str):
 self.literal_form = str(self.literal_form)
 if self.predicate is not None and not isinstance(self.predicate, AliasPredicateEnum):
 self.predicate = AliasPredicateEnum(self.predicate)
 if not isinstance(self.categories, list):
 self.categories = [self.categories] if self.categories is not None else []
 self.categories = [v if isinstance(v, URIorCURIE) else URIorCURIE(v) for v in self.categories]
 self._normalize_inlined_as_dict(slot_name="extensions", slot_type=Extension, key_name="tag", keyed=True)
 self._normalize_inlined_as_dict(slot_name="annotations", slot_type=Annotation, key_name="tag", keyed=True)
 if self.description is not None and not isinstance(self.description, str):
 self.description = str(self.description)
 self._normalize_inlined_as_dict(slot_name="alt_descriptions", slot_type=AltDescription, key_name="source", keyed=True)
 if self.title is not None and not isinstance(self.title, str):
 self.title = str(self.title)
 if self.deprecated is not None and not isinstance(self.deprecated, str):
 self.deprecated = str(self.deprecated)
 if not isinstance(self.todos, list):
 self.todos = [self.todos] if self.todos is not None else []
 self.todos = [v if isinstance(v, str) else str(v) for v in self.todos]
 if not isinstance(self.notes, list):
 self.notes = [self.notes] if self.notes is not None else []
 self.notes = [v if isinstance(v, str) else str(v) for v in self.notes]
 if not isinstance(self.comments, list):
 self.comments = [self.comments] if self.comments is not None else []
 self.comments = [v if isinstance(v, str) else str(v) for v in self.comments]
 if not isinstance(self.examples, list):
 self.examples = [self.examples] if self.examples is not None else []
 self.examples = [v if isinstance(v, Example) else Example(**as_dict(v)) for v in self.examples]
 if not isinstance(self.in_subset, list):
 self.in_subset = [self.in_subset] if self.in_subset is not None else []
 self.in_subset = [v if isinstance(v, SubsetDefinitionName) else SubsetDefinitionName(v) for v in self.in_subset]
 if self.from_schema is not None and not isinstance(self.from_schema, URI):
 self.from_schema = URI(self.from_schema)
 if self.imported_from is not None and not isinstance(self.imported_from, str):
 self.imported_from = str(self.imported_from)
 if self.source is not None and not isinstance(self.source, URIorCURIE):
 self.source = URIorCURIE(self.source)
 if self.in_language is not None and not isinstance(self.in_language, str):
 | |
| 
	provided, the provider project is used.
 """
 return pulumi.get(self, "project")
 @project.setter
 def project(self, value: Optional[pulumi.Input[str]]):
 pulumi.set(self, "project", value)
 @property
 @pulumi.getter
 def region(self) -> Optional[pulumi.Input[str]]:
 """
 The region where the managed instance group resides. If not provided, the provider region is used.
 """
 return pulumi.get(self, "region")
 @region.setter
 def region(self, value: Optional[pulumi.Input[str]]):
 pulumi.set(self, "region", value)
 @property
 @pulumi.getter(name="selfLink")
 def self_link(self) -> Optional[pulumi.Input[str]]:
 """
 The URL of the created resource.
 """
 return pulumi.get(self, "self_link")
 @self_link.setter
 def self_link(self, value: Optional[pulumi.Input[str]]):
 pulumi.set(self, "self_link", value)
 @property
 @pulumi.getter(name="statefulDisks")
 def stateful_disks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerStatefulDiskArgs']]]]:
 """
 Disks created on the instances that will be preserved on instance delete, update, etc. Structure is documented below. For more information see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/configuring-stateful-disks-in-migs). Proactive cross zone instance redistribution must be disabled before you can update stateful disks on existing instance group managers. This can be controlled via the `update_policy`.
 """
 return pulumi.get(self, "stateful_disks")
 @stateful_disks.setter
 def stateful_disks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerStatefulDiskArgs']]]]):
 pulumi.set(self, "stateful_disks", value)
 @property
 @pulumi.getter
 def statuses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerStatusArgs']]]]:
 """
 The status of this managed instance group.
 """
 return pulumi.get(self, "statuses")
 @statuses.setter
 def statuses(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerStatusArgs']]]]):
 pulumi.set(self, "statuses", value)
 @property
 @pulumi.getter(name="targetPools")
 def target_pools(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
 """
 The full URL of all target pools to which new
 instances in the group are added. Updating the target pools attribute does
 not affect existing instances.
 """
 return pulumi.get(self, "target_pools")
 @target_pools.setter
 def target_pools(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
 pulumi.set(self, "target_pools", value)
 @property
 @pulumi.getter(name="targetSize")
 def target_size(self) -> Optional[pulumi.Input[int]]:
 """
 - The number of instances calculated as a fixed number or a percentage depending on the settings. Structure is documented below.
 """
 return pulumi.get(self, "target_size")
 @target_size.setter
 def target_size(self, value: Optional[pulumi.Input[int]]):
 pulumi.set(self, "target_size", value)
 @property
 @pulumi.getter(name="updatePolicy")
 def update_policy(self) -> Optional[pulumi.Input['RegionInstanceGroupManagerUpdatePolicyArgs']]:
 """
 The update policy for this managed instance group. Structure is documented below. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/updating-managed-instance-groups) and [API](https://cloud.google.com/compute/docs/reference/rest/beta/regionInstanceGroupManagers/patch)
 """
 return pulumi.get(self, "update_policy")
 @update_policy.setter
 def update_policy(self, value: Optional[pulumi.Input['RegionInstanceGroupManagerUpdatePolicyArgs']]):
 pulumi.set(self, "update_policy", value)
 @property
 @pulumi.getter
 def versions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerVersionArgs']]]]:
 """
 Application versions managed by this instance group. Each
 version deals with a specific instance template, allowing canary release scenarios.
 Structure is documented below.
 """
 return pulumi.get(self, "versions")
 @versions.setter
 def versions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RegionInstanceGroupManagerVersionArgs']]]]):
 pulumi.set(self, "versions", value)
 @property
 @pulumi.getter(name="waitForInstances")
 def wait_for_instances(self) -> Optional[pulumi.Input[bool]]:
 """
 Whether to wait for all instances to be created/updated before
 returning. Note that if this is set to true and the operation does not succeed, the provider will
 continue trying until it times out.
 """
 return pulumi.get(self, "wait_for_instances")
 @wait_for_instances.setter
 def wait_for_instances(self, value: Optional[pulumi.Input[bool]]):
 pulumi.set(self, "wait_for_instances", value)
 @property
 @pulumi.getter(name="waitForInstancesStatus")
 def wait_for_instances_status(self) -> Optional[pulumi.Input[str]]:
 """
 When used with `wait_for_instances` it specifies the status to wait for.
 When `STABLE` is specified this resource will wait until the instances are stable before returning. When `UPDATED` is
 set, it will wait for the version target to be reached and any per instance configs to be effective as well as all
 instances to be stable before returning. The possible values are `STABLE` and `UPDATED`
 """
 return pulumi.get(self, "wait_for_instances_status")
 @wait_for_instances_status.setter
 def wait_for_instances_status(self, value: Optional[pulumi.Input[str]]):
 pulumi.set(self, "wait_for_instances_status", value)
class RegionInstanceGroupManager(pulumi.CustomResource):
 @overload
 def __init__(__self__,
 resource_name: str,
 opts: Optional[pulumi.ResourceOptions] = None,
 auto_healing_policies: Optional[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerAutoHealingPoliciesArgs']]] = None,
 base_instance_name: Optional[pulumi.Input[str]] = None,
 description: Optional[pulumi.Input[str]] = None,
 distribution_policy_target_shape: Optional[pulumi.Input[str]] = None,
 distribution_policy_zones: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
 name: Optional[pulumi.Input[str]] = None,
 named_ports: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerNamedPortArgs']]]]] = None,
 project: Optional[pulumi.Input[str]] = None,
 region: Optional[pulumi.Input[str]] = None,
 stateful_disks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerStatefulDiskArgs']]]]] = None,
 target_pools: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
 target_size: Optional[pulumi.Input[int]] = None,
 update_policy: Optional[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerUpdatePolicyArgs']]] = None,
 versions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerVersionArgs']]]]] = None,
 wait_for_instances: Optional[pulumi.Input[bool]] = None,
 wait_for_instances_status: Optional[pulumi.Input[str]] = None,
 __props__=None):
 """
 The Google Compute Engine Regional Instance Group Manager API creates and manages pools
 of homogeneous Compute Engine virtual machine instances from a common instance
 template.
 To get more information about regionInstanceGroupManagers, see:
 * [API documentation](https://cloud.google.com/compute/docs/reference/latest/regionInstanceGroupManagers)
 * How-to Guides
 * [Regional Instance Groups Guide](https://cloud.google.com/compute/docs/instance-groups/distributing-instances-with-regional-instance-groups)
 > **Note:** Use [compute.InstanceGroupManager](https://www.terraform.io/docs/providers/google/r/compute_instance_group_manager.html) to create a zonal instance group manager.
 ## Example Usage
 ### With Top Level Instance Template (`Google` Provider)
 ```python
 import pulumi
 import pulumi_gcp as gcp
 autohealing = gcp.compute.HealthCheck("autohealing",
 check_interval_sec=5,
 timeout_sec=5,
 healthy_threshold=2,
 unhealthy_threshold=10,
 http_health_check=gcp.compute.HealthCheckHttpHealthCheckArgs(
 request_path="/healthz",
 port=8080,
 ))
 appserver = gcp.compute.RegionInstanceGroupManager("appserver",
 base_instance_name="app",
 region="us-central1",
 distribution_policy_zones=[
 "us-central1-a",
 "us-central1-f",
 ],
 versions=[gcp.compute.RegionInstanceGroupManagerVersionArgs(
 instance_template=google_compute_instance_template["appserver"]["id"],
 )],
 target_pools=[google_compute_target_pool["appserver"]["id"]],
 target_size=2,
 named_ports=[gcp.compute.RegionInstanceGroupManagerNamedPortArgs(
 name="custom",
 port=8888,
 )],
 auto_healing_policies=gcp.compute.RegionInstanceGroupManagerAutoHealingPoliciesArgs(
 health_check=autohealing.id,
 initial_delay_sec=300,
 ))
 ```
 ### With Multiple Versions
 ```python
 import pulumi
 import pulumi_gcp as gcp
 appserver = gcp.compute.RegionInstanceGroupManager("appserver",
 base_instance_name="app",
 region="us-central1",
 target_size=5,
 versions=[
 gcp.compute.RegionInstanceGroupManagerVersionArgs(
 instance_template=google_compute_instance_template["appserver"]["id"],
 ),
 gcp.compute.RegionInstanceGroupManagerVersionArgs(
 instance_template=google_compute_instance_template["appserver-canary"]["id"],
 target_size=gcp.compute.RegionInstanceGroupManagerVersionTargetSizeArgs(
 fixed=1,
 ),
 ),
 ])
 ```
 ## Import
 Instance group managers can be imported using the `name`, e.g.
 ```sh
 $ pulumi import gcp:compute/regionInstanceGroupManager:RegionInstanceGroupManager appserver appserver-igm
 ```
 :param str resource_name: The name of the resource.
 :param pulumi.ResourceOptions opts: Options for the resource.
 :param pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerAutoHealingPoliciesArgs']] auto_healing_policies: The autohealing policies for this managed instance
 group. You can specify only one value. Structure is documented below. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/creating-groups-of-managed-instances#monitoring_groups).
 :param pulumi.Input[str] base_instance_name: The base instance name to use for
 instances in this group. The value must be a valid
 [RFC1035](https://www.ietf.org/rfc/rfc1035.txt) name. Supported characters
 are lowercase letters, numbers, and hyphens (-). Instances are named by
 appending a hyphen and a random four-character string to the base instance
 name.
 :param pulumi.Input[str] description: An optional textual description of the instance
 group manager.
 :param pulumi.Input[str] distribution_policy_target_shape: The shape to which the group converges either proactively or on resize events (depending on the value set in update_policy.0.instance_redistribution_type). For more information see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/regional-mig-distribution-shape).
 :param pulumi.Input[Sequence[pulumi.Input[str]]] distribution_policy_zones: The distribution policy for this managed instance
 group. You can specify one or more values. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/distributing-instances-with-regional-instance-groups#selectingzones).
 :param pulumi.Input[str] name: - Version name.
 :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerNamedPortArgs']]]] named_ports: The named port configuration. See the section below
 for details on configuration.
 :param pulumi.Input[str] project: The ID of the project in which the resource belongs. If it
 is not provided, the provider project is used.
 :param pulumi.Input[str] region: The region where the managed instance group resides. If not provided, the provider region is used.
 :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerStatefulDiskArgs']]]] stateful_disks: Disks created on the instances that will be preserved on instance delete, update, etc. Structure is documented below. For more information see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/configuring-stateful-disks-in-migs). Proactive cross zone instance redistribution must be disabled before you can update stateful disks on existing instance group managers. This can be controlled via the `update_policy`.
 :param pulumi.Input[Sequence[pulumi.Input[str]]] target_pools: The full URL of all target pools to which new
 instances in the group are added. Updating the target pools attribute does
 not affect existing instances.
 :param pulumi.Input[int] target_size: - The number of instances calculated as a fixed number or a percentage depending on the settings. Structure is documented below.
 :param pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerUpdatePolicyArgs']] update_policy: The update policy for this managed instance group. Structure is documented below. For more information, see the [official documentation](https://cloud.google.com/compute/docs/instance-groups/updating-managed-instance-groups) and [API](https://cloud.google.com/compute/docs/reference/rest/beta/regionInstanceGroupManagers/patch)
 :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RegionInstanceGroupManagerVersionArgs']]]] versions: Application versions managed by this instance group. Each
 version deals with a specific instance template, allowing canary release scenarios.
 Structure is documented below.
 :param pulumi.Input[bool] wait_for_instances: Whether to wait for all instances to be created/updated before
 returning. Note that if this is set to true and the operation does not succeed, the provider will
 continue trying until it times out.
 :param pulumi.Input[str] wait_for_instances_status: When used with `wait_for_instances` it specifies the status to wait for.
 When `STABLE` is specified this resource will wait until the instances are stable before returning. When `UPDATED` is
 set, it will wait for the version target to be reached and any per instance configs to be effective as well as all
 instances to be stable before returning. The possible values are `STABLE` and `UPDATED`
 """
 ...
 @overload
 def __init__(__self__,
 resource_name: str,
 args: RegionInstanceGroupManagerArgs,
 opts: Optional[pulumi.ResourceOptions] = None):
 """
 The Google Compute Engine Regional Instance Group Manager API creates and manages pools
 of homogeneous Compute Engine virtual machine instances from a common instance
 template.
 To get more information about regionInstanceGroupManagers, see:
 * [API documentation](https://cloud.google.com/compute/docs/reference/latest/regionInstanceGroupManagers)
 * How-to Guides
 * [Regional Instance Groups Guide](https://cloud.google.com/compute/docs/instance-groups/distributing-instances-with-regional-instance-groups)
 > **Note:** Use [compute.InstanceGroupManager](https://www.terraform.io/docs/providers/google/r/compute_instance_group_manager.html) to create a zonal instance group manager.
 ## Example Usage
 ### With Top Level Instance Template (`Google` Provider)
 ```python
 import pulumi
 import pulumi_gcp as gcp
 autohealing = | |
| 
	> 0
 def test_real_request_part_upload_drive_media(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.part_upload_drive_media(pylark.PartUploadDriveMediaReq())
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_finish_upload_drive_media(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.finish_upload_drive_media(
 pylark.FinishUploadDriveMediaReq()
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_create_drive_member_permission_old(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.create_drive_member_permission_old(
 pylark.CreateDriveMemberPermissionOldReq()
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_transfer_drive_member_permission(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.transfer_drive_member_permission(
 pylark.TransferDriveMemberPermissionReq()
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_get_drive_member_permission_list(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.get_drive_member_permission_list(
 pylark.GetDriveMemberPermissionListReq()
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_create_drive_member_permission(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.create_drive_member_permission(
 pylark.CreateDriveMemberPermissionReq(
 token="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_delete_drive_member_permission_old(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.delete_drive_member_permission_old(
 pylark.DeleteDriveMemberPermissionOldReq()
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_delete_drive_member_permission(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.delete_drive_member_permission(
 pylark.DeleteDriveMemberPermissionReq(
 token="x",
 member_id="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_update_drive_member_permission_old(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.update_drive_member_permission_old(
 pylark.UpdateDriveMemberPermissionOldReq()
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_update_drive_member_permission(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.update_drive_member_permission(
 pylark.UpdateDriveMemberPermissionReq(
 token="x",
 member_id="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_check_drive_member_permission(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.check_drive_member_permission(
 pylark.CheckDriveMemberPermissionReq()
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_update_drive_public_permission_v1_old(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.update_drive_public_permission_v1_old(
 pylark.UpdateDrivePublicPermissionV1OldReq()
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_update_drive_public_permission_v2_old(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.update_drive_public_permission_v2_old(
 pylark.UpdateDrivePublicPermissionV2OldReq()
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_get_drive_public_permission_v2(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.get_drive_public_permission_v2(
 pylark.GetDrivePublicPermissionV2Req()
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_update_drive_public_permission(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.update_drive_public_permission(
 pylark.UpdateDrivePublicPermissionReq(
 token="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_batch_get_drive_media_tmp_download_url(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.batch_get_drive_media_tmp_download_url(
 pylark.BatchGetDriveMediaTmpDownloadURLReq()
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_get_drive_comment_list(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.get_drive_comment_list(
 pylark.GetDriveCommentListReq(
 file_token="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_get_drive_comment(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.get_drive_comment(
 pylark.GetDriveCommentReq(
 file_token="x",
 comment_id="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_create_drive_comment(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.create_drive_comment(
 pylark.CreateDriveCommentReq(
 file_token="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_update_drive_comment(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.update_drive_comment(
 pylark.UpdateDriveCommentReq(
 file_token="x",
 comment_id="x",
 reply_id="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_delete_drive_comment(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.delete_drive_comment(
 pylark.DeleteDriveCommentReq(
 file_token="x",
 comment_id="x",
 reply_id="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_update_drive_comment_patch(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.update_drive_comment_patch(
 pylark.UpdateDriveCommentPatchReq(
 file_token="x",
 comment_id="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_create_drive_doc(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.create_drive_doc(pylark.CreateDriveDocReq())
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_get_drive_doc_content(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.get_drive_doc_content(
 pylark.GetDriveDocContentReq(
 doc_token="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_get_drive_doc_raw_content(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.get_drive_doc_raw_content(
 pylark.GetDriveDocRawContentReq(
 doc_token="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_get_drive_doc_meta(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.get_drive_doc_meta(
 pylark.GetDriveDocMetaReq(
 doc_token="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_create_sheet(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.create_sheet(pylark.CreateSheetReq())
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_get_sheet_meta(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.get_sheet_meta(
 pylark.GetSheetMetaReq(
 spreadsheet_token="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_update_sheet_property(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.update_sheet_property(
 pylark.UpdateSheetPropertyReq(
 spreadsheet_token="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_batch_update_sheet(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.batch_update_sheet(
 pylark.BatchUpdateSheetReq(
 spreadsheet_token="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_import_sheet(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.import_sheet(pylark.ImportSheetReq())
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_create_drive_import_task(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.create_drive_import_task(pylark.CreateDriveImportTaskReq())
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_get_drive_import_task(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.get_drive_import_task(
 pylark.GetDriveImportTaskReq(
 ticket="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_move_sheet_dimension(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.move_sheet_dimension(
 pylark.MoveSheetDimensionReq(
 spreadsheet_token="x",
 sheet_id="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_prepend_sheet_value(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.prepend_sheet_value(
 pylark.PrependSheetValueReq(
 spreadsheet_token="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_append_sheet_value(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.append_sheet_value(
 pylark.AppendSheetValueReq(
 spreadsheet_token="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_insert_sheet_dimension_range(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.insert_sheet_dimension_range(
 pylark.InsertSheetDimensionRangeReq(
 spreadsheet_token="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_add_sheet_dimension_range(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.add_sheet_dimension_range(
 pylark.AddSheetDimensionRangeReq(
 spreadsheet_token="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_update_sheet_dimension_range(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.update_sheet_dimension_range(
 pylark.UpdateSheetDimensionRangeReq(
 spreadsheet_token="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_delete_sheet_dimension_range(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.delete_sheet_dimension_range(
 pylark.DeleteSheetDimensionRangeReq(
 spreadsheet_token="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_get_sheet_value(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.get_sheet_value(
 pylark.GetSheetValueReq(
 spreadsheet_token="x",
 range_="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_batch_get_sheet_value(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.batch_get_sheet_value(
 pylark.BatchGetSheetValueReq(
 spreadsheet_token="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_set_sheet_value(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.set_sheet_value(
 pylark.SetSheetValueReq(
 spreadsheet_token="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_batch_set_sheet_value(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.batch_set_sheet_value(
 pylark.BatchSetSheetValueReq(
 spreadsheet_token="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_set_sheet_style(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.set_sheet_style(
 pylark.SetSheetStyleReq(
 spreadsheet_token="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_batch_set_sheet_style(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.batch_set_sheet_style(
 pylark.BatchSetSheetStyleReq(
 spreadsheet_token="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_merge_sheet_cell(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.merge_sheet_cell(
 pylark.MergeSheetCellReq(
 spreadsheet_token="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_unmerge_sheet_cell(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.unmerge_sheet_cell(
 pylark.UnmergeSheetCellReq(
 spreadsheet_token="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_set_sheet_value_image(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.set_sheet_value_image(
 pylark.SetSheetValueImageReq(
 spreadsheet_token="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_find_sheet(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.find_sheet(
 pylark.FindSheetReq(
 spreadsheet_token="x",
 sheet_id="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_replace_sheet(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.replace_sheet(
 pylark.ReplaceSheetReq(
 spreadsheet_token="x",
 sheet_id="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_create_sheet_condition_format(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.create_sheet_condition_format(
 pylark.CreateSheetConditionFormatReq(
 spreadsheet_token="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_get_sheet_condition_format(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.get_sheet_condition_format(
 pylark.GetSheetConditionFormatReq(
 spreadsheet_token="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_update_sheet_condition_format(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.update_sheet_condition_format(
 pylark.UpdateSheetConditionFormatReq(
 spreadsheet_token="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_delete_sheet_condition_format(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.delete_sheet_condition_format(
 pylark.DeleteSheetConditionFormatReq(
 spreadsheet_token="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_create_sheet_protected_dimension(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.create_sheet_protected_dimension(
 pylark.CreateSheetProtectedDimensionReq(
 spreadsheet_token="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_get_sheet_protected_dimension(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.get_sheet_protected_dimension(
 pylark.GetSheetProtectedDimensionReq(
 spreadsheet_token="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_update_sheet_protected_dimension(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.update_sheet_protected_dimension(
 pylark.UpdateSheetProtectedDimensionReq(
 spreadsheet_token="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_delete_sheet_protected_dimension(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.delete_sheet_protected_dimension(
 pylark.DeleteSheetProtectedDimensionReq(
 spreadsheet_token="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_create_sheet_data_validation_dropdown(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.create_sheet_data_validation_dropdown(
 pylark.CreateSheetDataValidationDropdownReq(
 spreadsheet_token="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_delete_sheet_data_validation_dropdown(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.delete_sheet_data_validation_dropdown(
 pylark.DeleteSheetDataValidationDropdownReq(
 spreadsheet_token="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_update_sheet_data_validation_dropdown(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.update_sheet_data_validation_dropdown(
 pylark.UpdateSheetDataValidationDropdownReq(
 spreadsheet_token="x",
 sheet_id="x",
 data_validation_id=1,
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_get_sheet_data_validation_dropdown(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.get_sheet_data_validation_dropdown(
 pylark.GetSheetDataValidationDropdownReq(
 spreadsheet_token="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_create_sheet_filter(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.create_sheet_filter(
 pylark.CreateSheetFilterReq(
 spreadsheet_token="x",
 sheet_id="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_delete_sheet_filter(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.delete_sheet_filter(
 pylark.DeleteSheetFilterReq(
 spreadsheet_token="x",
 sheet_id="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_update_sheet_filter(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.update_sheet_filter(
 pylark.UpdateSheetFilterReq(
 spreadsheet_token="x",
 sheet_id="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_get_sheet_filter(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.get_sheet_filter(
 pylark.GetSheetFilterReq(
 spreadsheet_token="x",
 sheet_id="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_create_sheet_filter_view(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.create_sheet_filter_view(
 pylark.CreateSheetFilterViewReq(
 spreadsheet_token="x",
 sheet_id="x",
 )
 )
 assert e.type is pylark.PyLarkError
 assert e.value.code > 0
 def test_real_request_delete_sheet_filter_view(self):
 with pytest.raises(pylark.PyLarkError) as e:
 self.module_cli.delete_sheet_filter_view(
 pylark.DeleteSheetFilterViewReq(
 spreadsheet_token="x",
 | |
| 
	XXXX XXXX XXXXXXX XXXXXXXXXXXXXXXXXXXXX XXXXXXX
XX XXX XXXXX XXXXXXXX XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXX XXXXXX XX XXXXXXXXXXXXXXXXX XXX XXXXXXXXXXXX XXXX X XXXXXXX XX XX
XXXX XX XXXXXX XX X XXXXXXX XXXXX XXXXXXXXXX XXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXX XXX XX XXXX XXX XXXXXX
XXXX XXX XXXX XX XXX XXXXXX XXXXX XXXXXXX XX XX X XXXXXXXX XXXXXXX
 XXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX
 XXXX
 XXXXXXXXXXXXXXXXXXXX
 XXXXXX XXXXXXXXXXXXXXX XX XX
 XXXXXXXXXXXXXXXX X XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXXX XXXXXXXXXX XX X XXXXXXXX XXXXXX XXXXXXXXXXX XXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXX X XXXXXXXXXXX XXXXX XXX
XXXX XXX XXX XXXXX XXXXXX XX XXXXX XXXXXX XXX XXXXXXXX XXXXXXX XX XXXXXX XXX
XXXXX XX XXX XXXXXXXXXXXX XXXXXXX
 XXXXX XXXXXXXXXXXXXXXXXXXXXX
 XXX
 XXX XXXXXXXXXXXX
 X XXXXX XXXXX XXXXX XXXXXXX XX XXXX X XXXXXXXXX
 XX XXXXXXXXXXX XX XXXXXXX XXX XXXXXXXXXXXXX XX XXX XXXXX
 XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXX XXXXXXX XXX XXX XXXX X XXXXXXXXXXX XXXXXXXXX
 XXX
XX XXX XXXXXX XXXXXX XX XXXXXXXX XXXXXX XXXXXX XXXXXXXXXXXXXXXXXX XXX XXX XXXX
XXXX X XXXXXXXXXX XXXXXXX XXXXX XXXXX XX XXXXXXXX
 XXXXX XXXXXXXXXXXXXXXXX
 XXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXX XXXXXXXXXXXXXXXXX
 XXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXX XXXXXXXXXXXXXXXX
 XX
XXXXXXXX XXXXXXXXXXXXXXXX XXXX XXXXX XXX XXXXXX XXXXXXXXXXX XX XXXX XXXXXX
XX XXXXXXXXXXXX XXX XX XXXXX XXXXXXXXXXXXXX XXXXXXXXXX XXXXXX XX XXXXX XXXXXX XXXXX XXXXXX XX X XXXXXXXXXXXXX
 XXX XXXXX XXXXX XXXXXXXXXX XXXXXX XX XXXXXXXXXXXXXXXXX XXX XXXXXX XXXX
 XXXXX XXXXXX XX X XXXXX XXXX XX XXXX XXX XXXXX XXX XXXXXX XXXXX
 XXXXXXXXXXXXXXX XX XXXXXXXXXXXXXXXXXX XXXXX XX XXXX XXXXX X XXXXXXXXXXXXXX
 XXXXXXX XXX XXXXXXXXXX XXXXX XXXXX XX XXXX XX XX XXXXXXXXXX XXXX XXX
 XXXXXXXX XXXXXX
 XX XXXX XXXXXX XXXX XXXXXXXX XXXXXXX XXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX
 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XX XXXXXXXX XXX XXXX XX XXXXXX
 XXXX XXX XXXXXXXX XXXX XXXXXXXXXXX XXX XXXXXXXXX
 XXXXX XXXXXXXXXXXXXXXXXXXXXX
 XXX
 XXX XXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXX
 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
 XX XXXXXXXXXXX XX XXXXXXX XXX XXXXXXXXXXXXX XX XXX XXXXX
 XX XXXXXXX XXX XXXXXXXX XX XXXXXXXX
 XXXXX XXXXXXXXXXXXXXXX
 XXXXXXXX XXXXXXX XXX XXX XXXX X XXXXXXXXXXX XXXXXXX
 X
 XXXXX
 XXXXX XXXXXXXXXXXXXXXXX
 XXXXXXXXX XX
 XXXX XXXXXX XX XXXXX XX XXXXX XX XXX X X
 XXXXXXXXXXXX XXXXXX
 XX
 XX
XX XXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXX XXXXXX XX XXXXXXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXXXXXXXX XXX
XXXXXXXXXX XXXXXXXXXXX XX XXXX XXXXX XXXXXXX XX XXXXXXXXXX XXXXX XXXXXXX XXX
XXXXXXXX XXXXXXXXXXX XXXXXXXX XXXXXX XXX XX XXXXXXX X XXXX XX XXXXX XXXXX XX
XXXXXXX XXXX XXXXXXXXXXX XX XXXX XXXXX X
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XX XXX XXXXXX XXXX XXXXXXXXXXX
XXXX XXXX XX XXX XXXXXXX XX XXXXXXXXXXX XXXXXXXX XX XXXXXXXXXXXXXXXXXXXXXX XXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXXXXXX XXX XX
XXX XXXXXX XXX XXXXXXXX XXXX XXX XX XXXXXXXX
XXXXXX XXXXXXX
XXXXXXXXXXXXXX
XX XXXX XX XXXXXX XXXX XX XXX XXXXXXXXX XXXX XXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX
XX XXX XXXX XXXXXXXXXX XXXXXX XXXXXXXXX XXX XXX XXXXXXXX XXXX XXXXXXXXXX
XXXXXXX XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXXX XXXXXXXX
XXX XXXXX XXXX XXXXXXX XXXX XXX XXXX XXXXXXXXXXX XXX XXX XXXXXXXX XXXXXX
XXXXXXXXXXXXXXXXX XXXXXXX XXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXX XXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX X XX XXXXXXXXXXXXXXXXX
XXXXXXX XXX X XXXX XXXX XXXXXXXXXXXXXXXX XXXXX XXXX XX XXXXXXXXXX XXX XXXXX XX
XX XXXXXXXXX XX XXXX XXXXXX XXX XXXXX XXXX XXX XXXX XXXXXXXXXXXX
 XXX XX X XXXXXXXXXXXXXXXXXX XXXXXX XXXXXXXXXXXXXXXXX XX XXXXXXXXX
 XXX XXXXX X XXXXXXX XXXXX XXXXXXX XX XXXXXXX XXXX XX XX XXXX
 XXX XXXXXXXXX
 XXX XXXXX X XXXXXXX XXX XX XX XXXX XXX XXXXXXX
XXXXXXX XX XXX XX XXXX XXXX XXX XXXXX XX XX XX XXXX XX XXXXXX XXX XXXX
XXXXXXXXXXX XXXXXXX XXXX XXXXX XX XXXXXXXXXX XX XXXX XXXXXXXXX XXX XX XXXXXXX
XXX XXXXXXXXXXXX XXXX XXXXX XXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX
XXXXXX XX XXXXXXX XXXXXX XXX XXXXXXXXXX XXXXXXX XXXXXXXXXXXXXXXXXXXX XX X XXXXX
XX XXXX XXXXXX XXX XXX XXXXXXXXXXXXX XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX XXXX XXXXXXXX
XXX XXXXXX XXXXXXXX
XXXXXXXXXXXXXXXXXXX
XX XXXXXXXXXXX XXXXXXXX
XXXXXXXXXX XX XXXXXXX XXX XXXXXX X XXXXXXX XXX XXXXX XXXXXXXXX XX XXX XXXXXX
XXXXXX XXX XXX XXXX XXXX XXXXX XXXX XXXX X XXXXXXXX XXXXXX XXXXXXX XX XXXXXXX
XXXX X XXXXXX XXXXXXXXX XX XXX XXXXXX XXX XX XXXXXXXX XX XXXXX XXX XXXXXXXXX
XXXXXXXXX XX XXX XXXXXXX XXX XXXXX XXX XXX XXXXXX XXX XXX XXXX XXX XXX XXXX
XXXXXX XXXX XX XXX XXXXX XXX XXX XXXXX XXXXXXXXXX XXX XX XXXX XXXXXX XXX
XXXXXXX XXXXX XX XXX XXXXXX
XXXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXXX XXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX X XXXXX XXX XX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX XXX XXXX XX XXXXXX X
XXX XXXXXXXX XX XXXXXXXXXX XXXX XXXXXXX XXXXXX XX XXXXXXXXXX XXXXXX XXXXXXX
XXXXXX XXXX XXXXXXX XX XXX XXXXXXXXXXXXXXX XX XXX XXXX
 XXX XX X XXXXXXXXXX XXXXXXXXXXXXX XXXXXX XXXXXXXXXXXXXXXXX XX XXXXXXXXX
 XXX XXXXX X XXXXXXX XX
 XXX XXXXXXXXX
 XXX XXXXX X XXXXXXX XX
XX XXX XXXXXX XXXXXXXXXXXXXXXX XXXXXX XXXXXXXXX XXXX XXXX XXX XX XXX XX
XXXXXXXXXXXXXXXX XXXXXXXXXXX XXXXXX XX XXX XXXXXX X XXX XXXXXX XXXX XX XXXXXXXX
XXXXXXXXXXX XXXXX XXXX XXXXXXX XXXXXX XX XXX XXXXXXXXX XXXXXX XXXX XXXXXX XXXXXX
XXXXXXXX XXX XXXXXXXX XXXXXX XXXXXX XXXX XXXXXXXX X XXX XXXX
XXXXX XXX XXXXX XXXXXXXXXX XXXXXXX XXXX XXXXXXXX XXXX XXXXXXX XXXXX XXXXXXXX XXX
XXXXXXXX XXXXXX XX XXX XXXXXXXXXX
 XX X XXXXXXXXXX XXXXXXXXX XXXXXXXXX XXXXXXXXXXXXXXXXX XXX XXXXXXXXX
 XXXXXXXXX X XXXXXXXXX XXX XXXXXXXX XXXX XXXX XXXXX
XXX XXXX XXXXXX XXXXX XX XXXXXX XXX XXXXXXXXX XXXXXX XXX XXX XXXXXX XXXX
XXXXXXXX
XXXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXXX XXXXXX XX XXXXXX XXXXXX XXX XXXXXXXXXXX
XXXXXXXX XXXX XXXXXX XXXXXXXXX XXX XXXXX XXXX XXXXXXXXXXX XXXXXXXXXX
XX XXXXXX XXXXX XXXXXXXXXXX XXX XXXXXXXX XXXXXXXXXX XXXX XXX XXXXXXX XXX XXXXX
XXXX XX XX XXXXXXXX XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXX XXXXXXX XXXX XXX XXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXX XXX XXXX XX XXXXXXX XXXXXX XXXXXXXX XXX XXXXXXXXX XXXXXX
XX XXXXXX X XXXXXXXX XXXXXXXXX XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
 XXXXXX XX XXXXX XXXXXXXX XXX XXXXXXXXX XXXXXXXXX XXX XXXX XXXXXX XX XX
 XXXXXXXXXX
XX XXXXXXXXXXXX XXX XXXXXXX XXXX XXXXXXX
 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XX XXXXXX XX XXXXXXX XXX
 XXXXXXXXX XXXX XXXXXXXXXXXX XXXXXX XXXXXXX XXX XXXXXXXX XXX XXXXXXXXX XXXXXX
 XXXXXXXX XXXXXXXXXXXXXX XX XXXXXXXXX
 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXX
 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXXX XXX XXXX XXX XXX XXXXXXXXXXX XXXX XXXXXXX
 XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XX XXXXX XX XXXXXXX
 XXX XXXXXXX XXXXX XX X XXXX XXXX XXXX XXX XX XXXXXXX XX XXX XXXXXXXXX
 XXXX XXXXXX XXXXX XXXXXXX XXXX XXXXXXXXXXXX XXXXXX XXXX XXXXXX XXXX XX
 XXXXXXXX XXX XXXXXXXX XXX XXXXXX XX XXXXXX XX X XXXXXX XXXXXXX XXXXXXXX XXXX
 XXXXXXX XXXX XXXXX XXXXX XXXXXXX XXXX XXXXXXXXXXXXX
 XXX XXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXX X XXXXXX
 XXXXXXXXXXXX XXXXXX XX XXXXX XXXXX XXXXXXXXX XXXXX XXXXX XXXXXXXXXXXX
 XXXXXXXX XX XXX XXXXX XXXXX XXXX XX XXXXXXXXX XXXX XX XXXXXXXXXXXXX XXXX
 XXXXXX XXX XXXXXXXXX XXXX XXX XXXXXXXXX
XX XXXXXXXX XXX XXXX XXXX XXX XXXXXXXXXXX XXX XXXXXXXXXXXXX XXXXXXXX XXXX XX
 XXXXXXXX XXXX XX XXX XXXXXXXXX XXX XXXXXXXXX XXXX XXX XXXXXXXXX
XX XXXXXX X XXXXXXXXX XXXXXXXXX XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
 XXXXXX XX XXXXX XXXXXXXX XXX XXXXXXXXX XXXXXXXXX XXX XXXX XXXXXX XX XX
 XXXXXXXXXX
XXX XXXXXX XXXXX XX XXXXXX XXX XXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXX XXX XXXX XXXXXXX XXXXXX XXXXXXXX XXXXXXX XXX XXX XXXX XXXXXXXXXX XXXXXX
XXX XXXXXXXX XXX XXXXXXXX XXXXXXXX XXXXXX XXXXXXXXX XXX XXXX XX XXX XXXXXXXXXX
XX XXXXXXXXXX XXX XXXXXXXXXXX XXXXXXXXXXXXX XXXX XXX XXXX XXXXXXXXXX XXX XXX
XXXXXXXX XXXXXXX XXX XXXXXXXXX XXXX XXXXXXX XXXXXX X
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXXX XXXX XXXXXXXXXX
X XX XXX XXXXXXXX XXXXXXX XXX XXXXXXXXX XX XXX XX X XXXXX XXXX XXXXXXXXX XX
 XXXXXXXX XXXXXX X XXXXX XXXXX XXXX XXXXXXXX XX XXX XXXXX XXXXXXXX XXXXXX
 XXXXXXXX XX XXXXXXXXXXX
X XX XXX XXXXXXXX XXXXXXX XXX XXXXXXXXX XX XXXXX XXX XX XX XXX XXXXXXXXXX
 XXXXXX XXXXXX XXXXXXXX XXXXX XX XXXXXXX XXX XX XXX XX X XXXXX XXXX XXXXXXX
 XXXXX XX XXX XXXXXXXXXX XXXXXX XXXXXXXX XX XXXXXXXXXXX
XX XXX XXXXXXXX XXXXXXX XXX XXXXXXXXX XXXXXXX X
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXX XXXXXX XXXXXXXX XX XXXXXXXXXX XX
XX XX XX XXXXXXXX XXXXX XXXXXXXX XXX XXXXXXX XXX XX XXX XX X XXXXX XXXX XXXXXX
XX XXX XXXXXXXXX XXXXXXXXXX XXXXXX XXXXXXXX XX XXXXXXXXXXX
XXX XXX XXXXXX XXXX XX XXXX XXX XXXXXX XX XXXXXXX XXX XX XXXXXXX X XXXXXXXXXXX
XXXXX XXXXXXXXXX XXXX XXXXXX XXX XXXXXXXX XX XXX XXXXXX XXXXXXXXX XXX
XXXXXXXXXXX XXXXX XX XXXXXXX XXX XXXX XX XXXX XXXXXXX XXX XXXXXXXXXXX XXXXXXXXXX
XXXXXXXXXXXXXXXX XXXXXXXX XXXXX XXX XXXXXXXX XX XXXXXX XX XXXXXXXX XXXXXX
XX XXXXXX XXX XXX XXXXXXXX XXXXXX XXX X XXXXXXXXXX XXXX XXX XXXXXXX XXX
XXXXXXXXX XXX XXXX XX XXX XXXXXXXXXX XXXXX X XXXX XXXX XXXXXX XXX XX XXXXXXXXXXX
XXXXXXXXX XX XXX XX XXXXXXXXXXX XXX XXX XXXXXXXXX XXXXXXX XX XXX XXXX XXXXX XX
XXX XXXXXXXXXX XXXXX XXXXX XXX XXXX XXXX XXXXX XXXXX XXX XXXXXXXX XXXXXXX
XXXXXX XXXX X XXX XXX XXXXXXX XXXX XX XXX XXXXXXXX XXXXXXXX X XXX XXX XXX
XXXXXXXX XXXXXXX XXX XXXXXX XX XXXXXXX XX XXX XXXXXXXXXX XXXX XXXXXXXX XXXXXXX
XXXXX XXXXXXX XXXXXXXXX XX XXXX XXXXX XX XX XXXXXXXX XX XXXXXX XX XXX XXX
XXXXXXXXX XX XXXXXXX XXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXX XX XXXXXXXXX
XX XXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XX XXXXXX XX XXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXX
XX XXXX XXXX XXXXXXXXXXXXXX XXXX XXXXXXXXX XX XX XXXX XX XXXXX XXX
XXXXXXXXXXXXXXXXXXXXX XXXXXX XX XXXXXXX XX XXX XXXXXXXXXX XXX XXX XXXX XXXX XX
XXXXX XX XXXXXXXXXXX XX XXXXXXXXXXX XXXXXXX XX XXXXXXXXX XXX XXX XXXXXX X XXX
XXXX | |
| 
	"""
IBM PAIRS RESTful API wrapper: A Python module to access PAIRS's core API to
load data into Python compatible data formats.
Copyright 2019-2021 Physical Analytics, IBM Research All Rights Reserved.
SPDX-License-Identifier: BSD-3-Clause
"""
# fold: Import Python Standard Library {{{
# Python Standard Library:
import os
from typing import List, Any
from pathlib import Path
#}}}
# fold: Import ibmpairs Modules {{{
# ibmpairs Modules:
import ibmpairs.constants as constants
import ibmpairs.common as common
from ibmpairs.logger import logger
import ibmpairs.messages as messages
#}}}
# fold: Import Third Party Libraries {{{
# Third Party Libraries:
import ibm_boto3
from ibm_botocore.client import Config as IBMConfig
from ibm_botocore.client import ClientError as IBMClientError
import json
import requests
#}}}
class IBMCOSHMACKeys(object):
 #_access_key_id: str
 #_secret_access_key: str
 
 """
 An object to represent IBM Cloud Object Storage (COS) HMAC Keys.
 :param access_key_id: Access key ID
 :type access_key_id: str
 :param secret_access_key: Secret access key
 :type secret_access_key: str
 """
 
 #
 def __str__(self):
 
 """
 The method creates a string representation of the internal class structure.
 
 :returns: A string representation of the internal class structure.
 :rtype: str
 """
 
 return_dict = self.to_dict()
 
 if ("access_key_id" in return_dict):
 return_dict["access_key_id"] = "********"
 if ("secret_access_key" in return_dict):
 return_dict["secret_access_key"] = "********"
 
 return json.dumps(return_dict, 
 indent = constants.GLOBAL_JSON_REPR_INDENT, 
 sort_keys = constants.GLOBAL_JSON_REPR_SORT_KEYS)
 #
 def __repr__(self):
 
 """
 The method creates a dict representation of the internal class structure.
 
 :returns: A dict representation of the internal class structure.
 :rtype: dict
 """
 
 return_dict = self.to_dict()
 
 if ("access_key_id" in return_dict):
 return_dict["access_key_id"] = "********"
 if ("secret_access_key" in return_dict):
 return_dict["secret_access_key"] = "********"
 
 return json.dumps(return_dict, 
 indent = constants.GLOBAL_JSON_REPR_INDENT, 
 sort_keys = constants.GLOBAL_JSON_REPR_SORT_KEYS)
 
 #
 def __init__(self,
 access_key_id: str = None,
 secret_access_key: str = None
 ) -> None:
 
 self._access_key_id = access_key_id
 self._secret_access_key = secret_access_key
 
 # 
 def get_access_key_id(self):
 return self._access_key_id
 #
 def set_access_key_id(self, access_key_id):
 self._access_key_id = common.check_str(access_key_id)
 
 # 
 def del_access_key_id(self): 
 del self._access_key_id
 # 
 access_key_id = property(get_access_key_id, set_access_key_id, del_access_key_id)
 
 # 
 def get_secret_access_key(self):
 return self._secret_access_key
 #
 def set_secret_access_key(self, secret_access_key):
 self._secret_access_key = common.check_str(secret_access_key)
 
 # 
 def del_secret_access_key(self): 
 del self._secret_access_key
 # 
 secret_access_key = property(get_secret_access_key, set_secret_access_key, del_secret_access_key)
 
 # 
 def from_dict(cos_hmac_keys_dict: Any):
 
 """
 Create an IBMCOSHMACKeys object from a dictionary.
 
 :param cos_hmac_keys_dict: A dictionary that contains the keys of an IBMCOSHMACKeys.
 :type cos_hmac_keys_dict: Any 
 :rtype: ibmpairs.external.ibm.IBMCOSHMACKeys
 :raises Exception: If not a dictionary.
 """
 
 access_key_id = None
 secret_access_key = None
 
 common.check_dict(cos_hmac_keys_dict)
 if "access_key_id" in cos_hmac_keys_dict:
 if cos_hmac_keys_dict.get("access_key_id") is not None:
 access_key_id = common.check_str(cos_hmac_keys_dict.get("access_key_id"))
 if "secret_access_key" in cos_hmac_keys_dict:
 if cos_hmac_keys_dict.get("secret_access_key") is not None:
 secret_access_key = common.check_str(cos_hmac_keys_dict.get("secret_access_key"))
 return IBMCOSHMACKeys(access_key_id = access_key_id,
 secret_access_key = secret_access_key
 )
 #
 def to_dict(self):
 
 """
 Create a dictionary from the objects structure. 
 
 :rtype: dict
 """
 
 cos_hmac_keys_dict: dict = {}
 if self._access_key_id is not None:
 cos_hmac_keys_dict["access_key_id"] = self._access_key_id
 if self._secret_access_key is not None:
 cos_hmac_keys_dict["secret_access_key"] = self._secret_access_key
 return cos_hmac_keys_dict
 #
 def from_json(cos_hmac_keys_json: Any):
 
 """
 Create an IBMCOSHMACKeys object from json (dictonary or str).
 
 :param cos_hmac_keys_json: A json dictionary that contains the keys of an IBMCOSHMACKeys or a string representation of a json dictionary.
 :type cos_hmac_keys_json: Any 
 :rtype: ibmpairs.external.ibm.IBMCOSHMACKeys
 :raises Exception: If not a dictionary or a string.
 """
 if isinstance(cos_hmac_keys_json, dict):
 cos_hmac_keys = IBMCOSHMACKeys.from_dict(cos_hmac_keys_json)
 elif isinstance(cos_hmac_keys_json, str):
 cos_hmac_keys_dict = json.loads(cos_hmac_keys_json)
 cos_hmac_keys = IBMCOSHMACKeys.from_dict(cos_hmac_keys_dict)
 else:
 msg = messages.ERROR_FROM_JSON_TYPE_NOT_RECOGNIZED.format(type(cos_hmac_keys_json), "cos_hmac_keys_json")
 logger.error(msg)
 raise common.PAWException(msg)
 return cos_hmac_keys
 #
 def to_json(self):
 
 """
 Create a string representation of a json dictionary from the objects structure. 
 
 :rtype: string
 """
 
 return json.dumps(self.to_dict())
class IBMCOSServiceCredentials(object):
 #_api_key: str
 #_cos_hmac_keys: IBMCOSHMACKeys
 #_endpoints: str
 #_iam_api_key_description: str
 #_iam_api_key_name: str
 #_iam_role_crn: str
 #_iam_service_id_crn: str
 #_resource_instance_id: str
 
 """
 An object to represent IBM Cloud Object Storage (COS) Service Credentials.
 :param api_key: API Key
 :type api_key: str
 :param cos_hmac_keys: IBM COS HMAC Keys
 :type cos_hmac_keys: ibmpairs.external.ibm.IBMCOSHMACKeys
 :param endpoints: IBM Cloud Enpoints URL
 :type endpoints: str
 :param iam_api_key_description: IAM API Key Description
 :type iam_api_key_description: str
 :param iam_api_key_name: IAM API Key Name
 :type iam_api_key_name: str
 :param iam_role_crn: IAM Role CRN
 :type iam_role_crn: str
 :param iam_service_id_crn: IAM Service ID CRN
 :type iam_service_id_crn: str
 :param resource_instance_id: Resource Instance ID
 :type resource_instance_id: str
 :raises Exception: If IBMCOSHMACKeys type is unknown.
 """
 
 #
 def __str__(self):
 
 """
 The method creates a string representation of the internal class structure.
 
 :returns: A string representation of the internal class structure.
 :rtype: str
 """
 
 return_dict = self.to_dict()
 
 if ("api_key" in return_dict):
 return_dict["api_key"] = "********"
 elif ("apikey" in return_dict):
 return_dict["apikey"] = "********"
 
 return json.dumps(return_dict, 
 indent = constants.GLOBAL_JSON_REPR_INDENT, 
 sort_keys = constants.GLOBAL_JSON_REPR_SORT_KEYS)
 #
 def __repr__(self):
 
 """
 The method creates a dict representation of the internal class structure.
 
 :returns: A dict representation of the internal class structure.
 :rtype: dict
 """
 
 return_dict = self.to_dict()
 
 if ("api_key" in return_dict):
 return_dict["api_key"] = "********"
 elif ("apikey" in return_dict):
 return_dict["apikey"] = "********"
 
 return json.dumps(return_dict, 
 indent = constants.GLOBAL_JSON_REPR_INDENT, 
 sort_keys = constants.GLOBAL_JSON_REPR_SORT_KEYS)
 
 #
 def __init__(self,
 api_key: str = None,
 cos_hmac_keys = None,
 endpoints: str = None,
 iam_api_key_description: str = None,
 iam_api_key_name: str = None,
 iam_role_crn: str = None,
 iam_service_id_crn: str = None,
 resource_instance_id: str = None
 ) -> None:
 
 self._api_key = api_key
 if isinstance(cos_hmac_keys, IBMCOSHMACKeys):
 self._cos_hmac_keys = cos_hmac_keys
 elif isinstance(cos_hmac_keys, dict):
 cos_hmac_keys_object = IBMCOSHMACKeys()
 self._cos_hmac_keys = cos_hmac_keys_object.from_dict(cos_hmac_keys)
 elif isinstance(cos_hmac_keys, str):
 cos_hmac_keys_object = IBMCOSHMACKeys()
 self._cos_hmac_keys = cos_hmac_keys_object.from_json(cos_hmac_keys)
 elif cos_hmac_keys is None:
 cos_hmac_keys_object = IBMCOSHMACKeys()
 else:
 msg = messages.ERROR_AUTHENTICATION_IBM_COS_HMAC_KEYS_UNKNOWN_TYPE.format(str(type(cos_hmac_keys)))
 logger.error(msg)
 raise common.PAWException(msg)
 self._endpoints = endpoints
 self._iam_api_key_description = iam_api_key_description
 self._iam_api_key_name = iam_api_key_name
 self._iam_role_crn = iam_role_crn
 self._iam_service_id_crn = iam_service_id_crn
 self._resource_instance_id = resource_instance_id
 # 
 def get_api_key(self):
 return self._api_key
 #
 def set_api_key(self, api_key):
 self._api_key = common.check_str(api_key)
 
 # 
 def del_api_key(self): 
 del self._api_key
 # 
 api_key = property(get_api_key, set_api_key, del_api_key)
 
 # 
 def get_cos_hmac_keys(self):
 return self._cos_hmac_keys
 #
 def set_cos_hmac_keys(self, cos_hmac_keys):
 self._cos_hmac_keys = common.check_class(cos_hmac_keys, IBMCOSHMACKeys)
 
 # 
 def del_cos_hmac_keys(self): 
 del self._cos_hmac_keys
 # 
 cos_hmac_keys = property(get_cos_hmac_keys, set_cos_hmac_keys, del_cos_hmac_keys)
 # 
 def get_endpoints(self):
 return self._endpoints
 #
 def set_endpoints(self, endpoints):
 self._endpoints = common.check_str(endpoints)
 
 # 
 def del_endpoints(self): 
 del self._endpoints
 # 
 endpoints = property(get_endpoints, set_endpoints, del_endpoints)
 # 
 def get_iam_api_key_description(self):
 return self._iam_api_key_description
 #
 def set_iam_api_key_description(self, iam_api_key_description):
 self._iam_api_key_description = common.check_str(iam_api_key_description)
 
 # 
 def del_iam_api_key_description(self): 
 del self._iam_api_key_description
 # 
 iam_api_key_description = property(get_iam_api_key_description, set_iam_api_key_description, del_iam_api_key_description)
 # 
 def get_iam_api_key_name(self):
 return self._iam_api_key_name
 #
 def set_iam_api_key_name(self, iam_api_key_name):
 self._iam_api_key_name = common.check_str(iam_api_key_name)
 
 # 
 def del_iam_api_key_name(self): 
 del self._iam_api_key_name
 # 
 iam_api_key_name = property(get_iam_api_key_name, set_iam_api_key_name, del_iam_api_key_name)
 # 
 def get_iam_role_crn(self):
 return self._iam_role_crn
 #
 def set_iam_role_crn(self, iam_role_crn):
 self._iam_role_crn = common.check_str(iam_role_crn)
 
 # 
 def del_iam_role_crn(self): 
 del self._iam_role_crn
 # 
 iam_role_crn = property(get_iam_role_crn, set_iam_role_crn, del_iam_role_crn)
 # 
 def get_iam_service_id_crn(self):
 return self._iam_service_id_crn
 #
 def set_iam_service_id_crn(self, iam_service_id_crn):
 self._iam_service_id_crn = common.check_str(iam_service_id_crn)
 
 # 
 def del_iam_service_id_crn(self): 
 del self._iam_service_id_crn
 # 
 iam_service_id_crn = property(get_iam_service_id_crn, set_iam_service_id_crn, del_iam_service_id_crn)
 # 
 def get_resource_instance_id(self):
 return self._resource_instance_id
 #
 def set_resource_instance_id(self, resource_instance_id):
 self._resource_instance_id = common.check_str(resource_instance_id)
 # 
 def del_resource_instance_id(self): 
 del self._resource_instance_id
 # 
 resource_instance_id = property(get_resource_instance_id, set_resource_instance_id, del_resource_instance_id)
 
 # 
 def from_dict(cos_service_credentials_dict: Any):
 
 """
 Create an IBMCOSServiceCredentials object from a dictionary.
 
 :param cos_service_credentials_dict: A dictionary that contains the keys of an IBMCOSServiceCredentials.
 :type cos_service_credentials_dict: Any 
 :rtype: ibmpairs.external.ibm.IBMCOSServiceCredentials
 :raises Exception: If not a dictionary.
 """
 
 api_key = None
 cos_hmac_keys = None
 endpoints = None
 iam_api_key_description = None
 iam_api_key_name = None
 iam_role_crn = None
 iam_service_id_crn = None
 resource_instance_id = None
 
 common.check_dict(cos_service_credentials_dict)
 if "apikey" in cos_service_credentials_dict:
 if cos_service_credentials_dict.get("apikey") is not None:
 api_key = common.check_str(cos_service_credentials_dict.get("apikey"))
 elif "api_key" in cos_service_credentials_dict:
 if cos_service_credentials_dict.get("api_key") is not None:
 api_key = common.check_str(cos_service_credentials_dict.get("api_key"))
 if "cos_hmac_keys" in cos_service_credentials_dict:
 if cos_service_credentials_dict.get("cos_hmac_keys") is not None:
 cos_hmac_keys = IBMCOSHMACKeys.from_dict(cos_service_credentials_dict.get("cos_hmac_keys"))
 if "endpoints" in cos_service_credentials_dict:
 if cos_service_credentials_dict.get("endpoints") is not None:
 endpoints = common.check_str(cos_service_credentials_dict.get("endpoints"))
 if "iam_apikey_description" in cos_service_credentials_dict:
 if cos_service_credentials_dict.get("iam_apikey_description") is not None:
 iam_api_key_description = common.check_str(cos_service_credentials_dict.get("iam_apikey_description"))
 elif "iam_api_key_description" in cos_service_credentials_dict:
 if cos_service_credentials_dict.get("iam_api_key_description") is not None:
 iam_api_key_description = common.check_str(cos_service_credentials_dict.get("iam_api_key_description"))
 if "iam_apikey_name" in cos_service_credentials_dict:
 if cos_service_credentials_dict.get("iam_apikey_name") is not None:
 iam_api_key_name = common.check_str(cos_service_credentials_dict.get("iam_apikey_name"))
 elif "iam_api_key_name" in cos_service_credentials_dict:
 if cos_service_credentials_dict.get("iam_api_key_name") is not None:
 iam_api_key_name = common.check_str(cos_service_credentials_dict.get("iam_api_key_name"))
 if "iam_role_crn" in cos_service_credentials_dict:
 if cos_service_credentials_dict.get("iam_role_crn") is not None:
 iam_role_crn = common.check_str(cos_service_credentials_dict.get("iam_role_crn"))
 if "iam_serviceid_crn" in cos_service_credentials_dict:
 if cos_service_credentials_dict.get("iam_serviceid_crn") is not None:
 iam_service_id_crn = common.check_str(cos_service_credentials_dict.get("iam_serviceid_crn"))
 elif | |
| 
	<reponame>MHelena45/feup-iope
import math
from gurobipy import *
d={}
d={ ("Doce","Doce"): 0, ("Doce","Bom"):math.inf, ("Doce","Sky"):6000, ("Doce","Moon"):5000, ("Doce","Mars"):5500,
 ("Bom","Doce"):math.inf, ("Bom","Bom"):0, ("Bom","Sky"): 6000, ("Bom","Moon"): 5800, ("Bom","Mars"):4800,
 ("Sky","Doce"): 6000, ("Sky","Bom"): 6000, ("Sky","Sky"): 0, ("Sky","Moon"): 500 , ("Sky","Mars"): 2000,
 ("Moon","Doce"):5000, ("Moon","Bom"):5800, ("Moon","Sky"): 500, ("Moon","Moon"): 0, ("Moon","Mars"): 1000,
 ("Mars","Doce"):5500, ("Mars","Bom"):4800, ("Mars","Sky"): 2000, ("Mars","Moon"): 1000, ("Mars","Mars"): 0}
'''
ID Route Vessel type Product
1 Doce – Moon – Doce 1 Corn e Copper
2 Doce – Moon - Mars - Doce 1 Corn e Iron
3 Doce – Moon - Sky - Doce 1 Corn e Copper
4 Doce – Moon - Sky - Doce 1 Corn e Iron
5 Doce – Mars – Moon – Doce 1 Corn e Copper
6 Doce – Mars – Doce 1 e 2 Corn e Iron
7 Doce – Mars – Sky – Doce 1 e 2 Corn e Copper
8 Doce – Mars – Sky – Doce 1 e 2 Corn e Iron
9 Bom – Sky – Bom 1 e 2 Wheat e Iron
10 Bom – Mars – Bom 1 e 2 Wheat e Iron
11 Bom – Sky - Mars – Bom 1 e 2 Wheat e Iron
12 Bom – Mars - Sky - Bom 1 e 2 Wheat e Iron
'''
t={}
# Doce – Moon – Doce
t[1, 1] = d["Doce","Moon"]/25 * 2 # time needed to perform de trip
# Doce – Moon - Mars - Doce
t[2, 1] = d["Doce","Moon"]/25 + d["Moon","Mars"]/30 + d["Mars","Doce"]/25
# Doce – Moon - Sky - Doce
t[3, 1] = d["Doce","Moon"]/25 + d["Moon","Sky"]/30 + d["Sky","Doce"]/25
t[4, 1] = d["Doce","Moon"]/25 + d["Moon","Sky"]/30 + d["Sky","Doce"]/25
# Doce – Mars – Moon – Doce
t[5, 1] = d["Doce","Mars"]/25 + d["Mars","Moon"]/30 + d["Moon","Doce"]/25
# Doce – Mars – Doce
t[6, 1] = d["Doce","Mars"]/25 * 2
t[6, 2] = d["Doce","Mars"]/20 * 2
# Doce – Mars – Sky – Doce
t[7, 1] = d["Doce","Mars"]/25 + d["Mars","Sky"]/30 + d["Sky","Doce"]/25
t[7, 2] = d["Doce","Mars"]/20 + d["Mars","Sky"]/24 + d["Sky","Doce"]/20
t[8, 1] = d["Doce","Mars"]/25 + d["Mars","Sky"]/30 + d["Sky","Doce"]/25
t[8, 2] = d["Doce","Mars"]/20 + d["Mars","Sky"]/24 + d["Sky","Doce"]/20
# Bom – Sky – Bom
t[9, 1] = d["Bom","Sky"]/25 * 2
t[9, 2] = d["Bom","Sky"]/20 * 2
# Bom – Mars – Bom
t[10, 1] = d["Bom","Mars"]/25 * 2
t[10, 2] = d["Bom","Mars"]/20 * 2
# Bom – Sky - Mars – Bom
t[11, 1] = d["Bom","Sky"]/25 + d["Sky","Mars"]/30 + d["Mars","Bom"]/25
t[11, 2] = d["Bom","Sky"]/20 + d["Sky","Mars"]/24 + d["Mars","Bom"]/20
# Bom – Mars - Sky - Bom
t[12, 1] = d["Bom","Mars"]/25 + d["Mars","Sky"]/30 + d["Sky","Bom"]/25
t[12, 2] = d["Bom","Mars"]/20 + d["Mars","Sky"]/24 + d["Sky","Bom"]/20
type2Trips = [i for i in range(6, 13)] # the range is [6, 13[
type1Trips = [i for i in range(1, 13)] # the range is [1, 13[
M = 200; # Value greater than the number of needed vehicles for sure
model = Model("P2")
# number of ships of type 1 needed
vessel1 = model.addVar(vtype="I", name="vessel1")
# number of ships of type 2 needed
vessel2 = model.addVar(vtype="I", name="vessel2")
x = {}
a = {} # assignment of trips
for vessel in range(1, M):
 x[vessel,1] = model.addVar(vtype="B", name="x(%s,%s)"% (vessel,1))
 x[vessel,2] = model.addVar(vtype="B", name="x(%s,%s)"% (vessel,2))
 for tripType in type1Trips: 
 a[vessel,tripType,1] = model.addVar(vtype="I", name="a(%s,%s,%s)" % (vessel,tripType,1))
 for type in type2Trips: 
 a[vessel,type,2] = model.addVar(vtype="I", name="a(%s,%s,%s)" % (vessel,type,2))
# distance traveled with the type 1 vessel in Loaded
dLoaded1 = model.addVar(vtype="I", name="dLoaded(%s)" % (1))
# distance traveled with the type 2 vessel in Loaded
dLoaded2 = model.addVar(vtype="I", name="dLoaded(%s)" % (2))
# distance traveled with the type 1 vessel empty
dEmpty1 = model.addVar(vtype="I", name="dEmpty(%s)" % (1))
# distance traveled with the type 2 vessel empty
dEmpty2 = model.addVar(vtype="I", name="dEmpty(%s)" % (2))
trips = {}
# number of trips made by ship type 1 of trip 1 to 12
for tripType in type1Trips: 
 trips[tripType,1] = model.addVar(vtype="I", name="trips(%s,%s)" % (tripType,1))
# number of trips made by ship type 1 of trip 6 to 12
for type in type2Trips: 
 trips[type,2] = model.addVar(vtype="I", name="trips(%s,%s)" % (type,2))
model.update()
# Wheat
model.addConstr(quicksum(a[vessel,trip,1] for trip in range(9,13) for vessel in range(1, M)) * 35 + 
 quicksum(a[vessel,trip,2] for trip in range(9,13) for vessel in range(1, M)) * 70 >= 50000, "c1")
# Corn
model.addConstr(quicksum(a[vessel,trip,1] for trip in range(1,9) for vessel in range(1, M)) * 35 + 
 quicksum(a[vessel,trip,2] for trip in range(6,9) for vessel in range(1, M)) * 70 >= 40000, "c2")
# Iron of BOM
model.addConstr(quicksum(a[vessel,trip,1] for trip in range(9,13) for vessel in range(1, M)) * 35 
 + ( quicksum(a[vessel,trip,2] for trip in range(9,13) for vessel in range(1, M)))* 70 >= 50000, "c3")
# Copper
model.addConstr(quicksum(a[vessel,1,1] + a[vessel,3,1] + a[vessel,5,1] + a[vessel,7,1] for vessel in range(1, M)) * 35 + 
 quicksum(a[vessel,7,2] for vessel in range(1, M))* 70 >= 20000, "c4")
# Iron
model.addConstr(quicksum(a[vessel,2,1] + a[vessel,4,1] + a[vessel,6,1] + a[vessel,8,1] for vessel in range(1, M)) * 35 + 
 quicksum(a[vessel,6,2] + a[vessel,8,2] for vessel in range(1, M))* 70 >= 20000, "c5")
# Iron - Mars
model.addConstr(quicksum(a[vessel,2,1] + a[vessel,6,1] + a[vessel,10,1] + a[vessel,11,1] for vessel in range(1, M)) * 35 +
 quicksum(a[vessel,6,2] + a[vessel,10,2] + a[vessel,11,2] for vessel in range(1, M))* 70 >= 30000, "c6")
# Wheat - Mars
model.addConstr(quicksum(a[vessel,10,1] + a[vessel,12,1]for vessel in range(1, M)) * 35 + 
 quicksum(a[vessel,10,2] + a[vessel,12,2] for vessel in range(1, M))* 70 >= 20000, "c7")
# Corn - Mars
model.addConstr(quicksum(a[vessel,trip,1] for trip in range(5,9) for vessel in range(1, M)) * 35 
 + ( quicksum(a[vessel,trip,2] for trip in range(6,9) for vessel in range(1, M)))* 70 >= 10000, "c8")
# Copper - Sky
model.addConstr(quicksum(a[vessel,3,1] + a[vessel,7,1] for vessel in range(1, M)) * 35 + 
 quicksum(a[vessel,7,2] for vessel in range(1, M))* 70 >= 10000, "c9")
# Iron - Sky
model.addConstr(quicksum(a[vessel,4,1] + a[vessel,8,1] + a[vessel,9,1] + a[vessel,12,1] for vessel in range(1, M)) * 35 + 
 quicksum(a[vessel,8,2] + a[vessel,9,2] + a[vessel,12,2] for vessel in range(1, M))* 70 >= 40000, "c10")
# Wheat - Sky
model.addConstr(quicksum(a[vessel,9,1] + a[vessel,11,1] for vessel in range(1, M)) * 35 + 
 quicksum(a[vessel,9,2] + a[vessel,11,2] for vessel in range(1, M))* 70 >= 30000, "c11")
# Copper - Moon
model.addConstr(quicksum(a[vessel,1,1] + a[vessel,5,1] for vessel in range(1, M)) * 35 >= 10000, "c12")
# Corn - Moon
model.addConstr(quicksum(a[vessel,trip,1] for trip in range(1,5) for vessel in range(1, M)) * 35 >= 30000, "c13")
# for each vehicle
for vessel in range(1, M):
 #makes sure that the trips assignee last less than the operation time
 model.addConstr(quicksum(t[tripType,1] * a[vessel,tripType,1] for tripType in type1Trips) <= 345 * 24, "c14")
 model.addConstr(quicksum(t[tripType,2] * a[vessel,tripType,2] for tripType in type2Trips) <= 345 * 24, "c15")
# for each vehicle
for vessel in range(1, M):
 # if a trip is assignee to a boot, it is used
 model.addConstr(quicksum(a[vessel,tripType,1] for tripType in type1Trips) 
 >= x[vessel,1], "c20(%s,%s)" % (vessel,1)) # if nothing is assigned, the value is 0
 model.addConstr(x[vessel,1] * quicksum(a[vessel,tripType,1] for tripType in type1Trips) 
 >= quicksum(a[vessel,tripType,1] for tripType in type1Trips), "c21(%s,%s)" % (vessel,1)) # if something is assigned, the value is 1
 
 model.addConstr(quicksum(a[vessel,tripType,2] for tripType in type2Trips) 
 >= x[vessel,2], "c22(%s,%s)" % (vessel,2)) # if nothing is assigned, the value is 0
 model.addConstr(x[vessel,2] * quicksum(a[vessel,tripType,2] for tripType in type2Trips) 
 >= quicksum(a[vessel,tripType,2] for tripType in type2Trips), "c23(%s,%s)" % (vessel,2)) # if something is assigned, the value is 1
 # ensure that a boot x can only be use if the boot x-1 has been used
 if vessel >=2:
 model.addConstr(x[vessel,1] <= x[vessel-1,1], "c24(%s,%s)" % (vessel,1))
 model.addConstr(x[vessel,2] <= x[vessel-1,2], "c25(%s,%s)" % (vessel,2))
model.addConstr(dLoaded1 == quicksum(a[vessel,1, 1] * d["Doce","Moon"] * 2 + # Doce – Moon – Doce
a[vessel,2, 1] * (d["Doce","Moon"] + d["Mars","Doce"]) + # Doce – Moon - Mars - Doce
(a[vessel,4, 1] + a[vessel,3, 1]) * (d["Doce","Moon"] + d["Sky","Doce"]) + # Doce – Moon - Sky - Doce
a[vessel,4, 1] * (d["Doce","Moon"] + d["Sky","Doce"]) + # Doce – Moon - Sky - Doce
a[vessel,5, 1] * (d["Doce","Mars"] + d["Moon","Doce"]) + # Doce – Mars – Moon – Doce
a[vessel,6, 1] * d["Doce","Mars"] * 2 + # Doce – Mars – Doce
(a[vessel,7, 1] + a[vessel,8, 1]) * (d["Doce","Mars"] + d["Sky","Doce"]) + # Doce – Mars – Sky – Doce
a[vessel,9, 1] * d["Bom","Sky"] * 2 + # Bom – Sky – Bom
a[vessel,10, 1] * d["Bom","Mars"] * 2 + # Bom – Mars – Bom
a[vessel,11, 1] * (d["Bom","Sky"] + d["Mars","Bom"]) + # Bom – Sky - Mars – Bom
a[vessel,12, 1] * (d["Bom","Mars"] + d["Sky","Bom"]) for vessel in range(1, M)),"c16") # Bom – Mars - Sky - Bom
model.addConstr(dLoaded2 == quicksum(a[vessel,6, 2] * d["Doce","Mars"] * 2 + # | |
| 
	<gh_stars>0
#!/usr/bin/env python2.7
import functools
import pydot
import tac
from symbol_table import *
node_counter = 0
def make_node(name, graph):
 global node_counter
 node = pydot.Node("{}".format(node_counter), label='"{}"'.format(name))
 graph.add_node(node)
 node_counter += 1
 return node
def add_edge(graph, node0, node1, label = ""):
 graph.add_edge(pydot.Edge(node0, node1, label='"{}"'.format(label)))
class SemaData(object):
 def __init__(self):
 self.ret_type = None
class SemaError(RuntimeError):
 def __init__(self, message):
 super(SemaError, self).__init__(message)
class SemaIdentifierUndefinedError(SemaError):
 pass
class SemaCallingNonFunctionError(SemaError):
 pass
class SemaParamMismatchError(SemaError):
 pass
class SemaFunctionUndefinedError(SemaError):
 pass
class SemaReturnValueFromVoidError(SemaError):
 pass
class SemaNoReturnValueError(SemaError):
 pass
class SemaMultipleDeclarationError(SemaError):
 pass
class SemaIncorrectReturnTypeError(SemaError):
 pass
class SemaTypeResolveError(SemaError):
 pass
def semafunc(function):
 @functools.wraps(function)
 def wrap(*args, **kwargs):
 try:
 retval = function(*args, **kwargs)
 except Exception as e:
 if not hasattr(e, "ast"):
 e.ast = args[0]
 raise
 return retval 
 return wrap
class AST(object):
 def __init__(self):
 self.symbol_table = None
 self.parent = None
 self.start_token = None
 self.end_token = None
 def __repr__(self):
 return "ast.{}".format(type(self).__name__)
 def output_graph(self, filename):
 graph = pydot.Dot(graph_type="digraph")
 self.make_graph(graph)
 graph.write_png(filename)
 def make_graph(self, graph):
 raise NotImplementedError(type(self).__name__)
 def make_tables(self, table = None):
 raise NotImplementedError(type(self).__name__)
 @semafunc
 def sema(self, data):
 raise NotImplementedError(type(self).__name__)
 def make_tac(self, state):
 raise NotImplementedError(type(self).__name__)
class Program(AST):
 def __init__(self, statements):
 super(Program, self).__init__()
 self.statements = statements
 def make_graph(self, graph):
 global node_count
 node_count = 0
 node0 = make_node("Top", graph)
 for s in self.statements:
 node1 = s.make_graph(graph)
 add_edge(graph, node0, node1)
 return node0
 def make_tac(self, state):
 out = []
 for s in self.statements:
 out += s.make_tac(state)
 return out
 def make_tables(self, table = None):
 self.symbol_table = SymbolTable()
 for s in self.statements:
 s.make_tables(self.symbol_table)
 @semafunc
 def sema(self, data = None):
 if not data:
 data = SemaData()
 for s in self.statements:
 s.sema(data)
class StatementList(AST):
 def __init__(self, *statements):
 super(StatementList, self).__init__()
 self.statements = statements
 def make_tables(self, table):
 self.symbol_table = table
 for s in self.statements:
 s.make_tables(self.symbol_table)
 def __iter__(self):
 return iter(self.statements)
 @semafunc
 def sema(self, data):
 for s in self:
 s.sema(data)
 def make_tac(self, state):
 out = []
 for s in self:
 out += s.make_tac(state)
 return out
class Function(AST):
 def __init__(self, name, params, ret_type, statements):
 super(Function, self).__init__()
 self.name = name
 self.params = params
 self.ret_type = ret_type
 self.statements = statements
 def make_graph(self, graph):
 node0 = make_node("function {}".format(str(self.name)), graph)
 for x in self.params:
 node1 = make_node("param", graph)
 node2 = x.type.make_graph(graph)
 node3 = x.var.make_graph(graph)
 add_edge(graph, node0, node1)
 add_edge(graph, node1, node2)
 add_edge(graph, node1, node3)
 if self.ret_type:
 node1 = self.ret_type.make_graph(graph)
 add_edge(graph, node0, node1, "returns")
 for s in self.statements:
 node1 = s.make_graph(graph)
 add_edge(graph, node0, node1)
 return node0
 def make_tables(self, table):
 table[self.name] = self
 self.symbol_table = ParamTable(table)
 self.name.make_tables(table)
 self.ret_type.make_tables(table)
 self.params.make_tables(self.symbol_table)
 self.statements.make_tables(SubTable(self.symbol_table))
 @semafunc
 def sema(self, data):
 temp = data.ret_type
 data.ret_type = self.ret_type
 self.statements.sema(data)
 data.ret_type = temp
 def make_tac(self, state):
 out = [tac.StartFunc(self.name, self.symbol_table)]
 with state.rename_table.scope():
 out += self.params.make_tac(state)
 state.decl_list = set()
 temp = [tac.EndDecls()]
 temp += self.statements.make_tac(state)
 temp.append(tac.EndFunc(self.name))
 return out + list(state.decl_list) + temp
 
class If(AST):
 def __init__(self, cond, success, failure):
 super(If, self).__init__()
 self.cond = cond
 self.success = success
 self.failure = failure
 def make_graph(self, graph):
 node0 = make_node("if", graph)
 node1 = self.cond.make_graph(graph)
 add_edge(graph, node0, node1, "cond")
 node2 = make_node("success", graph)
 node3 = make_node("fail", graph)
 add_edge(graph, node0, node2)
 add_edge(graph, node0, node3)
 for s in self.success:
 node1 = s.make_graph(graph)
 add_edge(graph, node2, node1)
 if self.failure:
 for s in self.failure:
 node1 = s.make_graph(graph)
 add_edge(graph, node3, node1)
 return node0
 def make_tables(self, table):
 self.symbol_table = table
 self.cond.make_tables(table)
 self.success.make_tables(SubTable(self.symbol_table))
 if self.failure:
 self.failure.make_tables(SubTable(self.symbol_table))
 @semafunc
 def sema(self, data):
 type0 = self.cond.sema(data)
 resolve_type(type0, Type("int"))
 self.success.sema(data)
 if self.failure:
 self.failure.sema(data)
 def make_tac(self, state):
 """
 CMP
 JZ L0
 S0
 ...
 SN
 JP L1
 L0:
 F0
 ...
 FN
 L1:
 """
 out = []
 l0 = state.make_label()
 out += self.cond.make_tac(state)
 out.append(tac.JZ(l0, state.last_var()))
 with state.rename_table.scope():
 out += self.success.make_tac(state)
 if self.failure:
 l1 = state.make_label()
 out.append(tac.JP(l1))
 out.append(l0)
 if self.failure:
 with state.rename_table.scope():
 out += self.failure.make_tac(state)
 out.append(l1)
 return out
class Return(AST):
 def __init__(self, statement): 
 super(Return, self).__init__()
 self.statement = statement
 def make_graph(self, graph):
 node0 = make_node("return", graph)
 if self.statement:
 node1 = self.statement.make_graph(graph)
 add_edge(graph, node0, node1)
 return node0
 def make_tables(self, table):
 self.symbol_table = table
 if self.statement:
 self.statement.make_tables(table)
 @semafunc
 def sema(self, data):
 if self.statement:
 # Check if we are returning from a void function
 if data.ret_type == ast.Type("void"):
 msg = "Returning value from void function"
 raise SemaReturnValueFromVoidError(msg)
 # Check that the return type matched the data returned.
 type0 = self.statement.sema(data)
 try:
 resolve_type(type0, data.ret_type)
 except SemaTypeResolveError:
 raise SemaIncorrectReturnTypeError("{} {}".format(type0, data.ret_type))
 elif data.ret_type != ast.Type("void"):
 #TODO: Improve the error message given.
 msg = "No return value given"
 raise SemaNoReturnValueError(msg)
 def make_tac(self, state):
 if self.statement:
 out = self.statement.make_tac(state)
 out.append(tac.Return(state.last_var()))
 return out
 return [tac.Return(None)]
def resolve_type(type0, type1, operation = None):
 if not type0 == type1:
 raise SemaTypeResolveError("{} != {}".format(type0, type1))
 return type0
class Binop(AST):
 depth = 0
 def __init__(self, optype, lhs, rhs):
 super(Binop, self).__init__()
 self.optype = optype
 self.lhs = lhs
 self.rhs = rhs
 def __str__(self):
 out = self.optype
 Binop.depth += 1
 out += "\n" + Binop.depth * "\t" + str(self.lhs)
 out += "\n" + Binop.depth * "\t" + str(self.rhs)
 Binop.depth -= 1
 return out
 def make_graph(self, graph):
 node0 = make_node(self.optype, graph)
 node1 = self.lhs.make_graph(graph)
 node2 = self.rhs.make_graph(graph)
 add_edge(graph, node0, node1, "lhs")
 add_edge(graph, node0, node2, "rhs")
 return node0
 def make_tables(self, table):
 self.symbol_table = table
 self.lhs.make_tables(table)
 self.rhs.make_tables(table)
 #Check if this should be a string
 #self.optype.make_tables(table)
 @semafunc
 def sema(self, data):
 type0 = self.lhs.sema(data)
 type1 = self.rhs.sema(data)
 return resolve_type(type0, type1, self.optype)
 def make_tac(self, state):
 out = self.lhs.make_tac(state)
 t0 = state.last_var()
 out += self.rhs.make_tac(state)
 t1 = state.last_var()
 t2 = state.make_temp()
 out.append(tac.Op(self.optype, t2, t0, t1))
 return out
class Op(Binop): pass
class Comp(Binop): pass
class Assign(Binop): 
 def make_tac(self, state):
 out = []
 if self.optype == ":=":
 out += self.rhs.make_tac(state)
 rhs_temp = state.last_var()
 out += self.lhs.make_tac(state)
 out.append(tac.Assign(state.last_var(), rhs_temp))
 else:
 mapping = {"-=" : "-", "+=" : "+"}
 op = mapping[self.optype]
 out += self.rhs.make_tac(state)
 t0 = state.last_var()
 out += self.lhs.make_tac(state)
 t1 = state.last_var()
 out.append(tac.Op(op,state.last_var(), t1, t0))
 out += self.lhs.make_tac(state)
 return out
class Import(AST):
 def __init__(self, identifier):
 self.identifier = identifier
 def make_graph(self, graph):
 node0 = make_node("import", graph)
 node1 = self.identifier.make_graph(graph)
 add_edge(graph, node0, node1)
 return node0
 @semafunc
 def sema(self, data):
 return None
class FuncCall(AST):
 def __init__(self, identifier, params):
 self.identifier = identifier
 self.params = params
 def make_graph(self, graph):
 node0 = make_node("funccall", graph)
 node1 = make_node(self.identifier, graph)
 add_edge(graph, node0, node1, "name")
 for param in self.params:
 node2 = param.make_graph(graph)
 add_edge(graph, node0, node2, "param")
 return node0
 def make_tables(self, table):
 self.symbol_table = table
 self.identifier.make_tables(table)
 for s in self.params:
 s.make_tables(table)
 @semafunc
 def sema(self, data):
 try:
 function = self.symbol_table[self.identifier]
 except KeyError:
 msg = "function {} cannot be found.".format(self.identifier.value),
 raise SemaFunctionUndefinedError(msg)
 if not isinstance(function, Function):
 msg = "identifier {} is not a function".format(function)
 raise SemaCallingNonFunctionError(msg)
 if len(function.params) != len(self.params):
 raise SemaParamMismatchError(
 "number of arguments to function does not match")
 for type0, statement in zip(function.params, self.params):
 type1 = statement.sema(data)
 resolve_type(type0.type, type1)
 return function.ret_type
 def make_tac(self, state):
 out = [] 
 #TODO: Add function names to rename table
 name = self.identifier
 for p in self.params[::-1]:
 out += p.make_tac(state)
 out.append(tac.Param(state.last_var()))
 out.append(tac.FuncCall(name, state.make_temp()))
 return out
class Type(AST):
 def __init__(self, identifier):
 super(Type, self).__init__()
 if isinstance(identifier, str):
 self.identifier = Identifier(identifier)
 elif isinstance(identifier, Identifier):
 self.identifier = identifier
 else:
 raise Exception("Type must be Identifier or str")
 def make_graph(self, graph):
 return self.identifier.make_graph(graph)
 def make_tables(self, table):
 self.symbol_table = table
 self.identifier.make_tables(table)
 def make_tac(self, state):
 #TODO: Add types to the rename table
 state.set_var(self)
 return []
 def __str__(self):
 return str(self.identifier)
 def __eq__(self, other):
 return self.identifier == other.identifier
class For(AST):
 def __init__(self, decl, invariant, post, statements):
 self.decl = decl
 self.invariant = invariant
 self.post = post
 self.statements = statements
 def make_graph(self, graph):
 node0 = make_node("for", graph)
 if self.decl:
 node1 = self.decl.make_graph(graph)
 add_edge(graph, node0, node1, "decl")
 if self.invariant:
 node1 = self.invariant.make_graph(graph)
 add_edge(graph, node0, node1, "invariant")
 if self.post:
 node1 = self.post.make_graph(graph)
 add_edge(graph, node0, node1, "post")
 for s in self.statements:
 node1 = s.make_graph(graph)
 add_edge(graph, node0, node1)
 return node0
 def make_tables(self, table):
 self.symbol_table = SubTable(table)
 if self.decl:
 self.decl.make_tables(self.symbol_table)
 if self.invariant:
 self.invariant.make_tables(self.symbol_table)
 if self.post:
 self.post.make_tables(self.symbol_table)
 self.statements.make_tables(self.symbol_table)
 @semafunc
 def sema(self, data):
 if self.decl:
 type0 = self.decl.sema(data)
 if self.invariant:
 type0 = self.invariant.sema(data)
 resolve_type(type0, Type("int"))
 if self.post:
 type0 = self.post.sema(data)
 self.statements.sema(data)
 def make_tac(self, state):
 out = []
 """
 INIT
 JP L1
 L0:
 S0
 ...
 SN
 POST
 L1:
 CMP
 JNZ L0
 """
 l0 = state.make_label()
 l1 = state.make_label()
 out = []
 with state.rename_table.scope():
 if self.decl:
 out += self.decl.make_tac(state)
 out.append(tac.JP(l1))
 out.append(l0)
 with state.rename_table.scope():
 out += self.statements.make_tac(state)
 if self.post:
 out += self.post.make_tac(state)
 out.append(l1)
 if self.invariant:
 out += self.invariant.make_tac(state)
 out.append(tac.JNZ(l0, state.last_var()))
 return out
class While(AST):
 def __init__(self, cond, statements):
 self.cond = cond
 self.statements = statements
 def make_graph(self, graph):
 node0 = make_node("while", graph)
 node1 = self.cond.make_graph(graph)
 add_edge(graph, node0, node1, "cond")
 for s in | |
| 
	import sublime
import sys
import threading
# Helper module
try:
 from .helper import H
except:
 from helper import H
# Settings variables
try:
 from . import settings as S
except:
 import settings as S
# DBGp protocol constants
try:
 from . import dbgp
except:
 import dbgp
# Config module
from .config import get_value
# Log module
from .log import debug, info
# Protocol module
from .protocol import ProtocolConnectionException
# Util module
from .util import get_real_path
# View module
from .view import DATA_CONTEXT, DATA_STACK, DATA_WATCH, TITLE_WINDOW_WATCH, generate_context_output, generate_stack_output, get_response_properties, has_debug_view, render_regions, show_content, show_file, show_panel_content
ACTION_EVALUATE = 'action_evaluate'
ACTION_EXECUTE = 'action_execute'
ACTION_INIT = 'action_init'
ACTION_REMOVE_BREAKPOINT = 'action_remove_breakpoint'
ACTION_SET_BREAKPOINT = 'action_set_breakpoint'
ACTION_STATUS = 'action_status'
ACTION_USER_EXECUTE = 'action_user_execute'
ACTION_WATCH = 'action_watch'
def is_connected(show_status=False):
 """
 Check if client is connected to debugger engine.
 Keyword arguments:
 show_status -- Show message why client is not connected in status bar.
 """
 if S.SESSION and S.SESSION.connected:
 return True
 elif S.SESSION and show_status:
 sublime.status_message('Xdebug: Waiting for response from debugger engine.')
 elif show_status:
 sublime.status_message('Xdebug: No Xdebug session running.')
 return False
def connection_error(message):
 """
 Template for showing error message on connection error/loss.
 Keyword arguments:
 message -- Exception/reason of connection error/loss.
 """
 sublime.error_message('Please restart Xdebug debugging session.\nDisconnected from Xdebug debugger engine.\n' + message)
 info('Connection lost with debugger engine.')
 debug(message)
 # Reset connection
 try:
 S.SESSION.clear()
 except:
 pass
 finally:
 S.SESSION = None
 S.SESSION_BUSY = False
 S.BREAKPOINT_EXCEPTION = None
 S.BREAKPOINT_ROW = None
 S.BREAKPOINT_RUN = None
 S.CONTEXT_DATA.clear()
 async_session = SocketHandler(ACTION_WATCH)
 async_session.start()
 # Reset layout
 sublime.active_window().run_command('xdebug_layout')
 # Render breakpoint markers
 render_regions()
class SocketHandler(threading.Thread):
 def __init__(self, action, **options):
 threading.Thread.__init__(self)
 self.action = action
 self.options = options
 def get_option(self, option, default_value=None):
 if option in self.options.keys():
 return self.options[option]
 return default_value
 def run_command(self, command, args=None):
 if not isinstance(args, dict):
 args = {}
 self.timeout(lambda: self._run_command(command, args))
 def _run_command(self, command, args=None):
 try:
 sublime.active_window().run_command(command, args)
 except:
 # In case active_window() is not available
 pass
 def run_view_command(self, command, args=None):
 if not isinstance(args, dict):
 args = {}
 self.timeout(lambda: self._run_view_command)
 def _run_view_command(self, command, args=None):
 try:
 sublime.active_window().active_view().run_command(command, args)
 except:
 # In case there is no active_view() available
 pass
 def status_message(self, message):
 sublime.set_timeout(lambda: sublime.status_message(message), 100)
 def timeout(self, function):
 sublime.set_timeout(function, 0)
 def run(self):
 # Make sure an action is defined
 if not self.action:
 return
 try:
 S.SESSION_BUSY = True
 # Evaluate
 if self.action == ACTION_EVALUATE:
 self.evaluate(self.get_option('expression'))
 # Execute
 elif self.action == ACTION_EXECUTE:
 self.execute(self.get_option('command'))
 # Init
 elif self.action == ACTION_INIT:
 self.init()
 # Remove breakpoint
 elif self.action == ACTION_REMOVE_BREAKPOINT:
 self.remove_breakpoint(self.get_option('breakpoint_id'))
 # Set breakpoint
 elif self.action == ACTION_SET_BREAKPOINT:
 self.set_breakpoint(self.get_option('filename'), self.get_option('lineno'), self.get_option('expression'))
 # Status
 elif self.action == ACTION_STATUS:
 self.status()
 # User defined execute
 elif self.action == ACTION_USER_EXECUTE:
 self.user_execute(self.get_option('command'), self.get_option('args'))
 # Watch expression
 elif self.action == ACTION_WATCH:
 self.watch_expression()
 # Show dialog on connection error
 except ProtocolConnectionException:
 e = sys.exc_info()[1]
 self.timeout(lambda: connection_error('%s' % e))
 finally:
 S.SESSION_BUSY = False
 def evaluate(self, expression):
 if not expression or not is_connected():
 return
 # Send 'eval' command to debugger engine with code to evaluate
 S.SESSION.send(dbgp.EVAL, expression=expression)
 if get_value(S.KEY_PRETTY_OUTPUT):
 response = S.SESSION.read()
 properties = get_response_properties(response, expression)
 response = generate_context_output(properties)
 else:
 response = S.SESSION.read(return_string=True)
 # Show response data in output panel
 self.timeout(lambda: show_panel_content(response))
 def execute(self, command):
 # Do not execute if no command is set
 if not command or not is_connected():
 return
 # Send command to debugger engine
 S.SESSION.send(command)
 response = S.SESSION.read()
 # Reset previous breakpoint values
 S.BREAKPOINT_EXCEPTION = None
 S.BREAKPOINT_ROW = None
 S.CONTEXT_DATA.clear()
 self.watch_expression()
 # Set debug layout
 self.run_command('xdebug_layout')
 # Handle breakpoint hit
 for child in response:
 if child.tag == dbgp.ELEMENT_BREAKPOINT or child.tag == dbgp.ELEMENT_PATH_BREAKPOINT:
 # Get breakpoint attribute values
 fileuri = child.get(dbgp.BREAKPOINT_FILENAME)
 lineno = child.get(dbgp.BREAKPOINT_LINENO)
 exception = child.get(dbgp.BREAKPOINT_EXCEPTION)
 filename = get_real_path(fileuri)
 if (exception):
 info(exception + ': ' + child.text)
 # Remember Exception name and first line of message
 S.BREAKPOINT_EXCEPTION = {'name': exception, 'message': child.text.split('\n')[0], 'filename': fileuri, 'lineno': lineno}
 # Check if temporary breakpoint is set and hit
 if S.BREAKPOINT_RUN is not None and S.BREAKPOINT_RUN['filename'] == filename and S.BREAKPOINT_RUN['lineno'] == lineno:
 # Remove temporary breakpoint
 if S.BREAKPOINT_RUN['filename'] in S.BREAKPOINT and S.BREAKPOINT_RUN['lineno'] in S.BREAKPOINT[S.BREAKPOINT_RUN['filename']]:
 self.run_view_command('xdebug_breakpoint', {'rows': [S.BREAKPOINT_RUN['lineno']], 'filename': S.BREAKPOINT_RUN['filename']})
 S.BREAKPOINT_RUN = None
 # Skip if temporary breakpoint was not hit
 if S.BREAKPOINT_RUN is not None and (S.BREAKPOINT_RUN['filename'] != filename or S.BREAKPOINT_RUN['lineno'] != lineno):
 self.run_command('xdebug_execute', {'command': 'run'})
 return
 # Show debug/status output
 self.status_message('Xdebug: Breakpoint')
 info('Break: ' + filename + ':' + lineno)
 # Store line number of breakpoint for displaying region marker
 S.BREAKPOINT_ROW = {'filename': filename, 'lineno': lineno}
 # Focus/Open file window view
 self.timeout(lambda: show_file(filename, lineno))
 # On breakpoint get context variables and stack history
 if response.get(dbgp.ATTRIBUTE_STATUS) == dbgp.STATUS_BREAK:
 # Context variables
 context = self.get_context_values()
 self.timeout(lambda: show_content(DATA_CONTEXT, context))
 # Stack history
 stack = self.get_stack_values()
 self.timeout(lambda: show_content(DATA_STACK, stack))
 # Watch expressions
 self.watch_expression()
 # Reload session when session stopped, by reaching end of file or interruption
 if response.get(dbgp.ATTRIBUTE_STATUS) == dbgp.STATUS_STOPPING or response.get(dbgp.ATTRIBUTE_STATUS) == dbgp.STATUS_STOPPED:
 self.run_command('xdebug_session_stop', {'restart': True})
 self.run_command('xdebug_session_start', {'restart': True})
 self.status_message('Xdebug: Finished executing file on server. Reload page to continue debugging.')
 # Render breakpoint markers
 self.timeout(lambda: render_regions())
 def get_context_values(self):
 """
 Get variables in current context.
 """
 if not is_connected():
 return
 context = H.new_dictionary()
 try:
 # Super global variables
 if get_value(S.KEY_SUPER_GLOBALS):
 S.SESSION.send(dbgp.CONTEXT_GET, c=dbgp.CONTEXT_ID_SUPERGLOBALS)
 response = S.SESSION.read()
 context.update(get_response_properties(response))
 # Local variables
 S.SESSION.send(dbgp.CONTEXT_GET)
 response = S.SESSION.read()
 context.update(get_response_properties(response))
 except ProtocolConnectionException:
 e = sys.exc_info()[1]
 self.timeout(lambda: connection_error('%s' % e))
 # Store context variables in session
 S.CONTEXT_DATA = context
 return generate_context_output(context)
 def get_stack_values(self):
 """
 Get stack information for current context.
 """
 response = None
 if is_connected():
 try:
 # Get stack information
 S.SESSION.send(dbgp.STACK_GET)
 response = S.SESSION.read()
 except ProtocolConnectionException:
 e = sys.exc_info()[1]
 self.timeout(lambda: connection_error('%s' % e))
 return generate_stack_output(response)
 def get_watch_values(self):
 """
 Evaluate all watch expressions in current context.
 """
 for index, item in enumerate(S.WATCH):
 # Reset value for watch expression
 S.WATCH[index]['value'] = None
 # Evaluate watch expression when connected to debugger engine
 if is_connected():
 if item['enabled']:
 watch_value = None
 try:
 S.SESSION.send(dbgp.EVAL, expression=item['expression'])
 response = S.SESSION.read()
 watch_value = get_response_properties(response, item['expression'])
 except ProtocolConnectionException:
 pass
 S.WATCH[index]['value'] = watch_value
 def init(self):
 if not is_connected():
 return
 # Connection initialization
 init = S.SESSION.read()
 # More detailed internal information on properties
 S.SESSION.send(dbgp.FEATURE_SET, n='show_hidden', v=1)
 S.SESSION.read()
 # Set max children limit
 max_children = get_value(S.KEY_MAX_CHILDREN)
 if max_children is not False and max_children is not True and (H.is_number(max_children) or H.is_digit(max_children)):
 S.SESSION.send(dbgp.FEATURE_SET, n=dbgp.FEATURE_NAME_MAX_CHILDREN, v=max_children)
 S.SESSION.read()
 # Set max data limit
 max_data = get_value(S.KEY_MAX_DATA)
 if max_data is not False and max_data is not True and (H.is_number(max_data) or H.is_digit(max_data)):
 S.SESSION.send(dbgp.FEATURE_SET, n=dbgp.FEATURE_NAME_MAX_DATA, v=max_data)
 S.SESSION.read()
 # Set max depth limit
 max_depth = get_value(S.KEY_MAX_DEPTH)
 if max_depth is not False and max_depth is not True and (H.is_number(max_depth) or H.is_digit(max_depth)):
 S.SESSION.send(dbgp.FEATURE_SET, n=dbgp.FEATURE_NAME_MAX_DEPTH, v=max_depth)
 S.SESSION.read()
 # Set breakpoints for files
 for filename, breakpoint_data in S.BREAKPOINT.items():
 if breakpoint_data:
 for lineno, bp in breakpoint_data.items():
 if bp['enabled']:
 self.set_breakpoint(filename, lineno, bp['expression'])
 debug('breakpoint_set: ' + filename + ':' + lineno)
 # Set breakpoints for exceptions
 break_on_exception = get_value(S.KEY_BREAK_ON_EXCEPTION)
 if isinstance(break_on_exception, list):
 for exception_name in break_on_exception:
 self.set_exception(exception_name)
 # Determine if client should break at first line on connect
 if get_value(S.KEY_BREAK_ON_START):
 # Get init attribute values
 fileuri = init.get(dbgp.INIT_FILEURI)
 filename = get_real_path(fileuri)
 # Show debug/status output
 self.status_message('Xdebug: Break on start')
 info('Break on start: ' + filename)
 # Store line number of breakpoint for displaying region marker
 S.BREAKPOINT_ROW = {'filename': filename, 'lineno': 1}
 # Focus/Open file window view
 self.timeout(lambda: show_file(filename, 1))
 # Context variables
 context = self.get_context_values()
 self.timeout(lambda: show_content(DATA_CONTEXT, context))
 # Stack history
 stack = self.get_stack_values()
 if not stack:
 stack = H.unicode_string('[{level}] {filename}.{where}:{lineno}\n'
 .format(level=0, where='{main}', lineno=1, filename=fileuri))
 self.timeout(lambda: show_content(DATA_STACK, stack))
 # Watch expressions
 self.watch_expression()
 else:
 # Tell script to run it's process
 self.run_command('xdebug_execute', {'command': 'run'})
 def remove_breakpoint(self, breakpoint_id):
 if not breakpoint_id or not is_connected():
 return
 S.SESSION.send(dbgp.BREAKPOINT_REMOVE, d=breakpoint_id)
 S.SESSION.read()
 def set_breakpoint(self, filename, lineno, expression=None):
 if not filename or not lineno or not is_connected():
 return
 # Get path of file on server
 fileuri = get_real_path(filename, True)
 # Set breakpoint
 S.SESSION.send(dbgp.BREAKPOINT_SET, t='line', f=fileuri, n=lineno, expression=expression)
 response = S.SESSION.read()
 # Update breakpoint id
 breakpoint_id = response.get(dbgp.ATTRIBUTE_BREAKPOINT_ID)
 if breakpoint_id:
 S.BREAKPOINT[filename][lineno]['id'] = breakpoint_id
 def set_exception(self, exception):
 if not is_connected():
 return
 S.SESSION.send(dbgp.BREAKPOINT_SET, t='exception', x='"%s"' % exception)
 S.SESSION.read()
 def status(self):
 if not is_connected():
 return
 # Send 'status' command to debugger engine
 S.SESSION.send(dbgp.STATUS)
 response = S.SESSION.read()
 # Show response in status bar
 self.status_message('Xdebug status: ' + response.get(dbgp.ATTRIBUTE_REASON) + ' - ' + response.get(dbgp.ATTRIBUTE_STATUS))
 def user_execute(self, command, args=None):
 if not command or not is_connected():
 return
 # Send command to debugger engine
 S.SESSION.send(command, args)
 response = S.SESSION.read(return_string=True)
 # | |
| 
	<reponame>pwmarcz/madness
from random import choice, random, shuffle
from math import log
import tcod as T
from settings import *
from util import *
from item import Item
import ui
class Mob(object):
 x, y = None, None
 glyph = UNKNOWN_GLYPH
 map = None
 enters_walls = False
 sanity_dice = None
 real = True
 # -4 = rests every other turn
 # -1 = rests every 5th turn
 # +1 = extra move every 5th turn
 # +4 = extra move every other turn
 speed = 0
 # N = regens N/10 health points every turn
 regen = 1
 # damage reduction
 armor = 0
 def __init__(self):
 self.to_regen = 0
 @property
 def tile(self):
 return self.map.tiles[self.x][self.y]
 def put(self, m, x, y):
 tile = m.tiles[x][y]
 self.map = m
 self.x, self.y = x, y
 assert self.tile.mob is None
 self.tile.mob = self
 m.mobs.append(self)
 def remove(self):
 self.tile.mob = None
 self.map.mobs.remove(self)
 def move(self, x, y):
 self.tile.mob = None
 self.x, self.y = x, y
 assert self.tile.mob is None
 self.tile.mob = self
 def can_walk(self, dx, dy):
 destx, desty = self.x+dx, self.y+dy
 if not in_map(destx, desty):
 return False
 tile = self.map.tiles[destx][desty]
 return (tile.walkable or self.enters_walls) and \
 not tile.mob
 def walk(self, dx, dy):
 self.move(self.x+dx, self.y+dy)
 def is_besides(self, mob):
 return max(abs(self.x-mob.x),abs(self.y-mob.y)) == 1
 # Called every time a mob has an opportunity to act (depending on speed)
 def act(self):
 if self.hp < self.max_hp:
 self.to_regen += self.regen
 if self.to_regen > 10:
 self.hp = min(self.max_hp, self.to_regen/10+self.hp)
 self.to_regen %= 10
 # Called every turn
 def heartbeat(self):
 pass
class Player(Mob):
 glyph = '@', T.white
 regen = 4
 name = 'you'
 def __init__(self, wizard):
 super(Player, self).__init__()
 self.level = 1
 self.sanity = MAX_SANITY
 # dict letter -> effect
 self.effects = {}
 self.max_hp = 28
 self.speed = 0
 self.hp = self.max_hp
 import item
 self.items = [item.Torch(), item.PotionSanity(), item.PotionSanity()]
 self.items.append(random_by_level(1, Item.ALL)())
 if wizard:
 self.items += [item.Torch(), item.EterniumSword()]
 self.equipment = dict((slot, None) for slot in INVENTORY_SLOTS)
 self.fov_range = 3
 self.light_range = 0
 self.action_turns = 1
 self.armor = 0
 self.exp = 0
 self.death = None
 self.won = False
 @property
 def dice(self):
 weapon = self.equipment['w']
 if weapon:
 a, b, c = weapon.dice
 else:
 a, b, c = 1, 3, 0
 c += self.level-1
 return a, b, c
 def add_exp(self, mob):
 self.exp += int(1.7 ** mob.level)
 new_level = min(int(log(self.exp/5+2, 2)), MAX_CLEVEL)
 while new_level > self.level:
 self.advance()
 def advance(self):
 self.level += 1
 hp_inc = roll(2,6,self.level+3)
 self.max_hp += hp_inc
 self.hp += hp_inc
 ui.message('Congratulations! You advance to level %d.' % self.level,
 T.yellow)
 def change_light_range(self, n):
 self.light_range += n
 self.fov_range += n
 self.map.recalc_fov()
 def has_equipped(self, item):
 return item.slot and self.equipment[item.slot] == item
 def put(self, m, x, y):
 super(Player, self).put(m, x, y)
 self.map.player = self
 self.map.recalc_fov()
 def move(self, x, y):
 super(Player, self).move(x, y)
 self.map.recalc_fov()
 self.tile.on_enter()
 if self.tile.items:
 if len(self.tile.items) == 1:
 ui.message('You see here %s.' % self.tile.items[0].a)
 else:
 ui.message('Several items are lying here.')
 self.use_energy()
 def walk(self, dx, dy, panic=True):
 destx, desty = self.x+dx, self.y+dy
 if not in_map(destx, desty):
 return False
 tile = self.map.tiles[destx][desty]
 if panic and 'f' in self.effects:
 neighbors = self.map.neighbor_tiles(self.x, self.y)
 n_monsters = sum(1 if tile.mob else 0 for tile in neighbors)
 if roll(1, 12) <= min(6, n_monsters+1):
 ui.message('You panic!', T.yellow)
 dx, dy = choice(ALL_DIRS)
 self.walk(dx, dy, False)
 return
 if tile.mob:
 self.attack(tile.mob)
 elif not tile.walkable:
 ui.message('You bump into a wall.')
 pass
 else:
 self.move(destx, desty)
 def use(self, item):
 if item.slot is None:
 item.on_use(self)
 self.use_energy()
 elif self.has_equipped(item):
 self.unequip(item)
 else:
 self.equip(item)
 def unequip(self, item):
 ui.message('You unequip the %s.' % item.descr)
 item.on_unequip(self)
 self.equipment[item.slot] = None
 self.use_energy()
 def equip(self, item):
 old_item = self.equipment[item.slot]
 if old_item:
 self.unequip(old_item)
 ui.message('You equip the %s.' % item.descr)
 item.on_equip(self)
 self.equipment[item.slot] = item
 self.use_energy()
 def attack(self, mon):
 dmg = roll(*self.dice)
 if roll(1, 20) < 20:
 ui.message('You hit the %s.' % mon.name)
 else:
 ui.message('You critically hit the %s!' % mon.name, T.yellow)
 dmg *= 2
 mon.damage(dmg)
 self.use_energy()
 def damage(self, dmg, mon):
 dmg -= self.armor
 if dmg < 0:
 ui.message('Your armor protects you.')
 return
 self.hp -= dmg
 if self.hp <= 0:
 if not self.death:
 ui.message('You die...', T.red)
 # everything has to look normal?
 mon.look_normal()
 self.death = 'killed by %s%s' % (
 ('imaginary ' if not mon.real else ''),
 mon.name)
 def pick_up(self, item):
 if len(self.items) == INVENTORY_SIZE:
 ui.message('You can\'t carry anymore items.', T.red)
 return
 assert item in self.tile.items
 self.tile.items.remove(item)
 self.items.append(item)
 ui.message('You pick up the %s.' % item.descr)
 self.use_energy()
 def drop(self, item):
 if self.has_equipped(item):
 self.unequip(item)
 self.items.remove(item)
 self.tile.items.append(item)
 ui.message('You drop the %s.' % item.descr)
 self.use_energy()
 def act(self):
 if not self.death:
 super(Player, self).act()
 self.action_turns += 1
 def use_energy(self):
 self.action_turns -= 1
 def wait(self):
 self.use_energy()
 def extinguish(self, light):
 ui.message('Your %s is extinguished!' % light.descr)
 if 'd' in self.effects:
 ui.message('You are likely to be eaten by a grue.')
 light.on_unequip(self)
 self.equipment['l'] = None
 self.items.remove(light)
 def heartbeat(self):
 super(Player, self).heartbeat()
 light = self.equipment['l']
 if light:
 light.turns_left -= 1
 if light.turns_left <= 0:
 self.extinguish(light)
 if roll(1, 11) == 1:
 self.decrease_sanity(roll(1, max(2, self.map.level-4)))
 def decrease_sanity(self, n):
 if n <= 0:
 return
 from effect import add_insane_effects
 self.sanity -= n
 if self.sanity <= 0:
 ui.message('You feel reality slipping away...', T.red)
 self.death = 'insane'
 else:
 add_insane_effects(self)
 for eff in list(self.effects.values()):
 if roll(1, 80) > self.sanity:
 severity = roll(1, (8-self.sanity//10))
 eff.do_effect(severity)
 def restore_sanity(self):
 self.sanity = MAX_SANITY
 ui.message('You feel more awake.', T.yellow)
 for eff in list(self.effects.values()):
 eff.remove()
 def resurrect(self):
 self.death = None
 if self.hp <= 0:
 self.hp = self.max_hp
 if self.sanity <= 0:
 self.restore_sanity()
class Monster(Mob, metaclass=Register):
 ALL = []
 ABSTRACT = True
 real = True
 multi = 1
 common = 10
 summoner = False
 fov_range = 5
 # n/30 is probability of item drop
 drop_rate = 3
 fears_light = False
 def __init__(self):
 super(Monster, self).__init__()
 self.hp = self.max_hp
 #self.real = real
 def look_like(self, cls):
 self.name = cls.name
 self.glyph = cls.glyph
 def look_normal(self):
 try:
 del self.name
 del self.glyph
 except AttributeError:
 pass
 def disappear(self):
 ui.message('The %s disappears!' % self.name)
 self.remove()
 def damage(self, dmg):
 if not (self.real or ('r' in self.map.player.effects)):
 self.disappear()
 return
 dmg -= self.armor
 if dmg < 0:
 ui.message('The %s shrugs off the hit.' % self.name)
 return
 self.hp -= dmg
 if self.hp <= 0:
 if roll(1, 30) <= self.drop_rate:
 item = random_by_level(self.map.level, Item.ALL)()
 self.tile.items.append(item)
 self.die()
 else:
 ui.message('The %s is %s.' % (self.name, self.get_wounds()))
 def die(self):
 self.look_normal()
 if self.map.is_visible(self.x, self.y):
 ui.message('The %s dies!' % self.name)
 self.remove()
 self.map.player.add_exp(self)
 def get_wounds(self):
 p = 100*self.hp/self.max_hp
 if p < 10:
 return 'almost dead'
 elif p < 30:
 return 'severely wounded'
 elif p < 70:
 return 'moderately wounded'
 else:
 return 'lightly wounded'
 # return distance if monster can see player, None if not
 def see_player(self):
 player = self.map.player
 fov_range = self.fov_range + player.light_range/2
 if T.map_is_in_fov(self.map.fov_map, self.x, self.y):
 d = distance(self.x, self.y, player.x, player.y)
 if d <= fov_range:
 return d
 return None
 def walk_randomly(self):
 dirs = [dx_dy for dx_dy in ALL_DIRS if self.can_walk(dx_dy[0], dx_dy[1])]
 if dirs != []:
 self.walk(*choice(dirs))
 def summon_monsters(self):
 if self.map.is_visible(self.x, self.y):
 ui.message('The %s summons monsters!' % self.name)
 else:
 ui.message('You hear arcane mumbling.')
 n = roll(2, 3)
 mcls = random_by_level(self.map.level, Monster.ALL)
 dirs = [(-1, 0), (1, 0), (0, -1), (0, 1)]
 shuffle(dirs)
 for dx, dy in dirs:
 n = self.map.flood(self.x+dx, self.y+dy, mcls, n)
 def act(self):
 player = self.map.player
 d = self.see_player()
 if d:
 if self.summoner and roll(1, 6) == 1:
 self.summon_monsters()
 return
 dx, dy = dir_towards(self.x, self.y,
 player.x, player.y)
 if player.light_range > 0 and self.fears_light:
 if self.can_walk(-dx, -dy):
 self.walk(-dx, -dy)
 elif player.is_besides(self):
 self.attack_player()
 else:
 self.walk_randomly()
 else:
 if player.is_besides(self):
 self.attack_player()
 elif self.can_walk(dx, dy):
 self.walk(dx, dy)
 else:
 self.walk_randomly()
 else:
 self.walk_randomly()
 def attack_player(self):
 player = self.map.player
 dmg = roll(*self.dice)
 if roll(1, 20) < 20:
 ui.message('The %s hits you.' % self.name)
 else:
 ui.message('The %s critically hits you!' % self.name, T.yellow)
 dmg *= 2
 if self.real or ('r' in player.effects):
 player.damage(dmg, self)
 if self.sanity_dice and not player.death:
 d = roll(*self.sanity_dice)
 ui.message('You have trouble thinking straight!', T.yellow)
 player.decrease_sanity(d)
class UnrealMonster(Monster):
 ABSTRACT = True
 real = False
 drop_rate = 0
class HappyMonster(UnrealMonster):
 ABSTRACT = True
 ALL = []
class DarkMonster(UnrealMonster):
 ABSTRACT = True
 ALL = []
 fears_light = True
 enters_walls = True
##### MONSTERS
class Rat(Monster):
 name = 'rat'
 glyph = 'r', T.dark_orange
 max_hp = 4
 dice = 1, 3, 0
 drop_rate = 1
 multi = 4
 level = 1
class Bat(Monster):
 name = 'bat'
 glyph = 'B', | |
| 
	<filename>fairseq/models/nat/vatex_cmlm_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
This file implements:
Ghazvininejad, Marjan, et al.
"Constant-time machine translation with conditional masked language models."
arXiv preprint arXiv:1904.09324 (2019).
"""
from fairseq.models import register_model, register_model_architecture
from fairseq.models.nat import NATransformerModel
from fairseq.utils import new_arange
from fairseq.models import (
 FairseqEncoder)
from fairseq.models.transformer import (
 Embedding,
 TransformerDecoderLayer)
from fairseq import options, utils
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from fairseq.modules import (
 TransformerEncoderLayer,
 LayerNorm,
 PositionalEmbedding,
 SinusoidalPositionalEmbedding
)
from typing import Optional, Dict
from fairseq.models.fairseq_encoder import EncoderOut
from fairseq.iterative_refinement_generator import DecoderOut
from fairseq.models.nat import (
 FairseqNATDecoder,
 ensemble_decoder
)
from fairseq.modules.transformer_sentence_encoder import init_bert_params
def _skeptical_unmasking(output_scores, output_masks, p):
 sorted_index = output_scores.sort(-1)[1]
 boundary_len = (
 (output_masks.sum(1, keepdim=True).type_as(output_scores) - 2) * p
 ).long()
 skeptical_mask = new_arange(output_masks) < boundary_len
 return skeptical_mask.scatter(1, sorted_index, skeptical_mask)
def _mean_pooling(enc_feats, src_masks):
 # enc_feats: T x B x C
 # src_masks: B x T or None
 if src_masks is None:
 enc_feats = enc_feats.mean(0)
 else:
 src_masks = (~src_masks).transpose(0, 1).type_as(enc_feats)
 enc_feats = (
 (enc_feats / src_masks.sum(0)[None, :, None]) * src_masks[:, :, None]
 ).sum(0)
 return enc_feats
def _uniform_assignment(src_lens, trg_lens):
 max_trg_len = trg_lens.max()
 steps = (src_lens.float() - 1) / (trg_lens.float() - 1) # step-size
 # max_trg_len
 index_t = utils.new_arange(trg_lens, max_trg_len).float()
 index_t = steps[:, None] * index_t[None, :] # batch_size X max_trg_len
 index_t = torch.round(index_t).long().detach()
 return index_t
@register_model("vatex_cmlm_transformer")
class VatexCMLMNATransformerModel(NATransformerModel):
 def __init__(self, args, encoder, decoder):
 super(VatexCMLMNATransformerModel, self).__init__(args, encoder, decoder)
 self.args = args
 @staticmethod
 def add_args(parser):
 NATransformerModel.add_args(parser)
 @classmethod
 def build_encoder(cls, args, embed_dim, **kwargs):
 return VatexEncoder(args, embed_dim)
 @classmethod
 def build_decoder(cls, args, tgt_dict, embed_tokens):
 decoder = VatexNATransformerDecoder(args, tgt_dict, embed_tokens)
 if getattr(args, "apply_bert_init", False):
 decoder.apply(init_bert_params)
 return decoder
 @classmethod
 def build_model(cls, args, task):
 tgt_dict = task.target_dictionary
 assert args.decoder_embed_dim == args.encoder_embed_dim
 embed_dim = args.decoder_embed_dim
 def build_embedding(dictionary, embed_dim, path=None):
 num_embeddings = len(dictionary)
 padding_idx = dictionary.pad()
 emb = Embedding(num_embeddings, embed_dim, padding_idx)
 # if provided, load from preloaded dictionaries
 if path:
 embed_dict = utils.parse_embedding(path)
 utils.load_embedding(embed_dict, dictionary, emb)
 return emb
 decoder_embed_tokens = build_embedding(
 tgt_dict, args.decoder_embed_dim, args.encoder_embed_path
 )
 encoder = cls.build_encoder(args, embed_dim)
 decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens)
 return cls(args, encoder, decoder)
 def forward(
 self, src_tokens, src_lengths, prev_output_tokens, tgt_tokens, tgt_lengths,
 positions, langs, **kwargs
 ):
 assert not self.decoder.src_embedding_copy, "do not support embedding copy."
 # encoding
 encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs)
 # length prediction
 length_out = self.decoder.forward_length(normalize=False, encoder_out=encoder_out)
 length_tgt = self.decoder.forward_length_prediction(length_out, tgt_lengths)
 # decoding
 word_ins_out = self.decoder(
 normalize=False,
 prev_output_tokens=prev_output_tokens,
 encoder_out=encoder_out,
 positions=positions,
 langs=langs)
 word_ins_mask = prev_output_tokens.eq(self.unk)
 return {
 "word_ins": {
 "out": word_ins_out, "tgt": tgt_tokens,
 "mask": word_ins_mask, "ls": self.args.label_smoothing,
 "nll_loss": True
 },
 "length": {
 "out": length_out, "tgt": length_tgt,
 "factor": self.decoder.length_loss_factor
 }
 }
 
 def initialize_output_tokens(self, encoder_out, src_tokens, tgt_lang):
 # length prediction
 length_tgt = self.decoder.forward_length_prediction(
 self.decoder.forward_length(normalize=True, encoder_out=encoder_out),
 tgt_lengths=None
 )
 print("predict tgt_lengths: ", length_tgt)
 max_length = length_tgt.clamp_(min=2).max()
 idx_length = utils.new_arange(src_tokens, max_length)
 positions = torch.arange(1, max_length + 1)[None, :].repeat(src_tokens.size(0), 1).to(src_tokens.device)
 positions.masked_fill_(idx_length[None, :] + 1 > length_tgt[:, None], 0)
 
 initial_output_tokens = src_tokens.new_zeros(
 src_tokens.size(0), max_length
 ).long().fill_(self.pad)
 initial_output_tokens.masked_fill_(
 idx_length[None, :] < length_tgt[:, None], self.unk
 )
 initial_output_tokens[:, 0] = self.bos
 if tgt_lang == "en":
 initial_output_tokens[:, 1] = self.en_tag
 langs = src_tokens.new_zeros(src_tokens.size(0), max_length).long()
 elif tgt_lang == "ch":
 initial_output_tokens[:, 1] = self.ch_tag
 langs = src_tokens.new_ones(src_tokens.size(0), max_length).long()
 else:
 assert tgt_lang == ("en", "ch")
 pass
 initial_output_tokens.scatter_(1, length_tgt[:, None] - 1, self.eos)
 initial_output_scores = initial_output_tokens.new_zeros(
 *initial_output_tokens.size()
 ).type_as(encoder_out.encoder_out)
 
 
 
 return langs, positions, DecoderOut(
 output_tokens=initial_output_tokens,
 output_scores=initial_output_scores,
 attn=None,
 step=0,
 max_step=0,
 history=None
 )
 def forward_decoder(self, decoder_out, encoder_out, positions, langs, decoding_format=None, **kwargs):
 step = decoder_out.step
 max_step = decoder_out.max_step
 output_tokens = decoder_out.output_tokens
 output_scores = decoder_out.output_scores
 history = decoder_out.history
 # execute the decoder
 output_masks = output_tokens.eq(self.unk)
 _scores, _tokens = self.decoder(
 normalize=True,
 prev_output_tokens=output_tokens,
 encoder_out=encoder_out,
 positions=positions,
 langs=langs
 ).max(-1)
 output_tokens.masked_scatter_(output_masks, _tokens[output_masks])
 output_scores.masked_scatter_(output_masks, _scores[output_masks])
 if history is not None:
 history.append(output_tokens.clone())
 # skeptical decoding (depend on the maximum decoding steps.)
 if (step + 1) < max_step:
 skeptical_mask = _skeptical_unmasking(
 output_scores, output_tokens.ne(self.pad), 1 - (step + 1) / max_step
 )
 output_tokens.masked_fill_(skeptical_mask, self.unk)
 output_scores.masked_fill_(skeptical_mask, 0.0)
 if history is not None:
 history.append(output_tokens.clone())
 return decoder_out._replace(
 output_tokens=output_tokens,
 output_scores=output_scores,
 attn=None,
 history=history
 )
class VatexEncoder(FairseqEncoder):
 def __init__(self, args, embed_dim):
 super(VatexEncoder, self).__init__(dictionary=None)
 self.cnn = nn.Conv1d(1024, 512, kernel_size=3, stride=1, padding=1)
 self.padding_idx = 0 # 这里不同于 tgt_dict 的 padding_idx.
 self.embed_positions = (
 PositionalEmbedding(
 args.max_source_positions,
 embed_dim,
 self.padding_idx,
 learned=args.encoder_learned_pos,
 )
 if not args.no_token_positional_embeddings
 else None
 )
 def forward(self, src_videos, src_lengths=None, **kwargs):
 x = self.cnn(src_videos.transpose(1, 2).contiguous()) # B x C x T
 x = x.transpose(1, 2).contiguous().transpose(0, 1) # T X B X C
 return EncoderOut(
 encoder_out=x, # T x B x C
 encoder_padding_mask=None, # B x T
 encoder_embedding=None, # B x T x C
 encoder_states=None, # List[T x B x C]
 )
 
 @torch.jit.export
 def reorder_encoder_out(self, encoder_out: EncoderOut, new_order):
 """
 Reorder encoder output according to *new_order*.
 Args:
 encoder_out: output from the ``forward()`` method
 new_order (LongTensor): desired order
 Returns:
 *encoder_out* rearranged according to *new_order*
 """
 new_encoder_out: Dict[str, Tensor] = {}
 new_encoder_out["encoder_out"] = (
 encoder_out.encoder_out
 if encoder_out.encoder_out is None
 else encoder_out.encoder_out.index_select(1, new_order)
 )
 new_encoder_out["encoder_padding_mask"] = (
 encoder_out.encoder_padding_mask
 if encoder_out.encoder_padding_mask is None
 else encoder_out.encoder_padding_mask.index_select(0, new_order)
 )
 new_encoder_out["encoder_embedding"] = (
 encoder_out.encoder_embedding
 if encoder_out.encoder_embedding is None
 else encoder_out.encoder_embedding.index_select(0, new_order)
 )
 encoder_states = encoder_out.encoder_states
 if encoder_states is not None:
 for idx, state in enumerate(encoder_states):
 encoder_states[idx] = state.index_select(1, new_order)
 return EncoderOut(
 encoder_out=new_encoder_out["encoder_out"], # T x B x C
 encoder_padding_mask=new_encoder_out["encoder_padding_mask"], # B x T
 encoder_embedding=new_encoder_out["encoder_embedding"], # B x T x C
 encoder_states=encoder_states, # List[T x B x C]
 )
class VatexNATransformerDecoder(FairseqNATDecoder):
 def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):
 super().__init__(
 args, dictionary, embed_tokens, no_encoder_attn=no_encoder_attn
 )
 self.dictionary = dictionary
 self.bos = dictionary.bos()
 self.unk = dictionary.unk()
 self.eos = dictionary.eos()
 self.encoder_embed_dim = args.encoder_embed_dim
 self.max_target_positions = args.max_target_positions
 self.sg_length_pred = getattr(args, "sg_length_pred", False)
 self.pred_length_offset = getattr(args, "pred_length_offset", False)
 self.length_loss_factor = getattr(args, "length_loss_factor", 0.1)
 self.src_embedding_copy = getattr(args, "src_embedding_copy", False)
 self.embed_length = Embedding(256, self.encoder_embed_dim, None)
 self.embed_positions = nn.Embedding(num_embeddings=args.max_target_positions,
 embedding_dim=self.encoder_embed_dim,
 padding_idx=self.padding_idx)
 self.embed_langs = nn.Embedding(num_embeddings=2,
 embedding_dim=self.encoder_embed_dim,
 padding_idx=None)
 nn.init.normal_(self.embed_positions.weight, mean=0, std=0.02)
 nn.init.normal_(self.embed_langs.weight, mean=0, std=0.02)
 @ensemble_decoder
 def forward(self, normalize, encoder_out, prev_output_tokens, positions, langs, step=0, **unused):
 features, _ = self.extract_features(
 prev_output_tokens, positions, langs,
 encoder_out=encoder_out,
 embedding_copy=(step == 0) & self.src_embedding_copy,
 )
 decoder_out = self.output_layer(features)
 return F.log_softmax(decoder_out, -1) if normalize else decoder_out
 @ensemble_decoder
 def forward_length(self, normalize, encoder_out):
 enc_feats = encoder_out.encoder_out # T x B x C
 src_masks = encoder_out.encoder_padding_mask # B x T or None
 enc_feats = _mean_pooling(enc_feats, src_masks)
 if self.sg_length_pred:
 enc_feats = enc_feats.detach()
 length_out = F.linear(enc_feats, self.embed_length.weight)
 return F.log_softmax(length_out, -1) if normalize else length_out
 def extract_features(
 self,
 prev_output_tokens,
 positions: Optional[Tensor]=None,
 langs: Optional[Tensor]=None,
 encoder_out=None,
 early_exit=None,
 embedding_copy=False,
 **unused
 ):
 """
 Similar to *forward* but only return features.
 Inputs:
 prev_output_tokens: Tensor(B, T)
 encoder_out: a dictionary of hidden states and masks
 Returns:
 tuple:
 - the decoder's features of shape `(batch, tgt_len, embed_dim)`
 - a dictionary with any model-specific outputs
 the LevenshteinTransformer decoder has full-attention to all generated tokens
 """
 # embedding
 x, decoder_padding_mask = self.forward_embedding(prev_output_tokens, positions, langs)
 # B x T x C -> T x B x C
 x = x.transpose(0, 1)
 attn = None
 inner_states = [x]
 # decoder layers
 for i, layer in enumerate(self.layers):
 # early exit from the decoder.
 if (early_exit is not None) and (i >= early_exit):
 break
 x, attn, _ = layer(
 x,
 encoder_out.encoder_out if encoder_out is not None else None,
 encoder_out.encoder_padding_mask if encoder_out is not None else None,
 self_attn_mask=None,
 self_attn_padding_mask=decoder_padding_mask,
 )
 inner_states.append(x)
 if self.layer_norm:
 x = self.layer_norm(x)
 # T x B x C -> B x T x C
 x = x.transpose(0, 1)
 if self.project_out_dim is not None:
 x = self.project_out_dim(x)
 return x, {"attn": attn, "inner_states": inner_states}
 def forward_embedding(self, x, positions, langs):
 # embed positions
 embed_positions = self.embed_positions(positions)
 # embed langs
 embed_langs = self.embed_langs(langs)
 # embed token
 x = self.embed_scale * self.embed_tokens(x)
 if positions is not None:
 x += (embed_positions + embed_langs)
 x = F.dropout(x, p=self.dropout, training=self.training)
 decoder_padding_mask = positions.eq(self.padding_idx)
 return x, decoder_padding_mask
 def forward_copying_source(self, src_embeds, src_masks, tgt_masks):
 length_sources = src_masks.sum(1)
 length_targets = tgt_masks.sum(1)
 mapped_inputs = _uniform_assignment(length_sources, length_targets).masked_fill(
 ~tgt_masks, 0
 )
 copied_embedding = torch.gather(
 src_embeds,
 1,
 mapped_inputs.unsqueeze(-1).expand(
 *mapped_inputs.size(), src_embeds.size(-1)
 ),
 )
 return copied_embedding
 def forward_length_prediction(self, length_out, tgt_lengths=None):
 if tgt_lengths is not None:
 # obtain the length target
 length_tgt = tgt_lengths.clamp(min=0, max=255)
 else:
 # predict the length target (greedy for now)
 # TODO: implementing length-beam
 pred_lengs = length_out.max(-1)[1]
 length_tgt = pred_lengs
 return length_tgt
 def max_positions(self):
 """Maximum output length supported by the decoder."""
 return self.max_target_positions
 # return min(self.max_target_positions, self.embed_positions.max_positions)
@register_model_architecture("vatex_cmlm_transformer", "vatex_cmlm_transformer")
def cmlm_base_architecture(args):
 args.encoder_embed_path = getattr(args, | |
| 
	"""
build layer
Programmer: <NAME>
Date: 2021.3
"""
import inspect
import platform
import torch.nn as nn
import torch.nn.functional as F
if platform.system() == 'Windows':
 import regex as re
else:
 import re
from dl_toolbox_cwm.model.utils import xavier_init
from dl_toolbox_cwm.model.utils.core.misc import is_tuple_of
from dl_toolbox_cwm.model.utils.core.registry import Registry, build_from_cfg
from dl_toolbox_cwm.model.utils.core.parrots_wrapper import SyncBatchNorm, _BatchNorm, _InstanceNorm
__all__ = [
 'build_conv_layer',
 'build_norm_layer',
 'build_activation_layer',
 'build_padding_layer',
 'build_upsample_layer',
 'build_plugin_layer'
]
CONV_LAYERS = Registry('conv layer')
CONV_LAYERS.register_module('Conv1d', module=nn.Conv1d)
CONV_LAYERS.register_module('Conv2d', module=nn.Conv2d)
CONV_LAYERS.register_module('Conv3d', module=nn.Conv3d)
CONV_LAYERS.register_module('Conv', module=nn.Conv2d)
def build_conv_layer(cfg, *args, **kwargs):
 """
 Build convolution layer.
 Args:
 cfg (None or dict): The conv layer config, which should contain:
 - type (str): Layer type.
 - layer args: Args needed to instantiate an activation layer.
 args (argument list): Arguments passed to the `__init__`
 method of the corresponding conv layer.
 kwargs (keyword arguments): Keyword arguments passed to the `__init__`
 method of the corresponding conv layer.
 Returns:
 nn.Module: Created conv layer.
 """
 if cfg is None:
 cfg_ = dict(type='Conv2d')
 else:
 if not isinstance(cfg, dict):
 raise TypeError('cfg must be a dict')
 if 'type' not in cfg:
 raise KeyError('the cfg dict must contain the key "type"')
 cfg_ = cfg.copy()
 layer_type = cfg_.pop('type')
 if layer_type not in CONV_LAYERS:
 raise KeyError(f'Unrecognized norm type {layer_type}')
 else:
 conv_layer = CONV_LAYERS.get(layer_type)
 layer = conv_layer(*args, **kwargs, **cfg_)
 return layer
NORM_LAYERS = Registry('norm layer')
NORM_LAYERS.register_module('BN', module=nn.BatchNorm2d)
NORM_LAYERS.register_module('BN1d', module=nn.BatchNorm1d)
NORM_LAYERS.register_module('BN2d', module=nn.BatchNorm2d)
NORM_LAYERS.register_module('BN3d', module=nn.BatchNorm3d)
NORM_LAYERS.register_module('SyncBN', module=SyncBatchNorm)
NORM_LAYERS.register_module('GN', module=nn.GroupNorm)
NORM_LAYERS.register_module('LN', module=nn.LayerNorm)
NORM_LAYERS.register_module('IN', module=nn.InstanceNorm2d)
NORM_LAYERS.register_module('IN1d', module=nn.InstanceNorm1d)
NORM_LAYERS.register_module('IN2d', module=nn.InstanceNorm2d)
NORM_LAYERS.register_module('IN3d', module=nn.InstanceNorm3d)
def infer_norm_abbr(class_type):
 """Infer abbreviation from the class name.
 When we build a norm layer with `build_norm_layer()`, we want to preserve
 the norm type in variable names, e.g, self.bn1, self.gn. This method will
 infer the abbreviation to map class types to abbreviations.
 Rule 1: If the class has the property "_abbr_", return the property.
 Rule 2: If the parent class is _BatchNorm, GroupNorm, LayerNorm or
 InstanceNorm, the abbreviation of this layer will be "bn", "gn", "ln" and
 "in" respectively.
 Rule 3: If the class name contains "batch", "group", "layer" or "instance",
 the abbreviation of this layer will be "bn", "gn", "ln" and "in"
 respectively.
 Rule 4: Otherwise, the abbreviation falls back to "norm".
 Args:
 class_type (type): The norm layer type.
 Returns:
 str: The inferred abbreviation.
 """
 if not inspect.isclass(class_type):
 raise TypeError(
 f'class_type must be a type, but got {type(class_type)}')
 if hasattr(class_type, '_abbr_'):
 return class_type._abbr_
 if issubclass(class_type, _InstanceNorm): # IN is a subclass of BN
 return 'in'
 elif issubclass(class_type, _BatchNorm):
 return 'bn'
 elif issubclass(class_type, nn.GroupNorm):
 return 'gn'
 elif issubclass(class_type, nn.LayerNorm):
 return 'ln'
 else:
 class_name = class_type.__name__.lower()
 if 'batch' in class_name:
 return 'bn'
 elif 'group' in class_name:
 return 'gn'
 elif 'layer' in class_name:
 return 'ln'
 elif 'instance' in class_name:
 return 'in'
 else:
 return 'norm'
def build_norm_layer(cfg, num_features, postfix=''):
 """Build normalization layer.
 Args:
 cfg (dict): The norm layer config, which should contain:
 - type (str): Layer type.
 - layer args: Args needed to instantiate a norm layer.
 - requires_grad (bool, optional): Whether stop gradient updates.
 num_features (int): Number of input channels.
 postfix (int | str): The postfix to be appended into norm abbreviation
 to create named layer.
 Returns:
 (str, nn.Module): The first element is the layer name consisting of
 abbreviation and postfix, e.g., bn1, gn. The second element is the
 created norm layer.
 """
 if not isinstance(cfg, dict):
 raise TypeError('cfg must be a dict')
 if 'type' not in cfg:
 raise KeyError('the cfg dict must contain the key "type"')
 cfg_ = cfg.copy()
 layer_type = cfg_.pop('type')
 if layer_type not in NORM_LAYERS:
 raise KeyError(f'Unrecognized norm type {layer_type}')
 norm_layer = NORM_LAYERS.get(layer_type)
 abbr = infer_norm_abbr(norm_layer)
 assert isinstance(postfix, (int, str))
 name = abbr + str(postfix)
 requires_grad = cfg_.pop('requires_grad', True)
 cfg_.setdefault('eps', 1e-5)
 if layer_type != 'GN':
 layer = norm_layer(num_features, **cfg_)
 if layer_type == 'SyncBN':
 layer._specify_ddp_gpu_num(1)
 else:
 assert 'num_groups' in cfg_
 layer = norm_layer(num_channels=num_features, **cfg_)
 for param in layer.parameters():
 param.requires_grad = requires_grad
 return name, layer
def is_norm(layer, exclude=None):
 """Check if a layer is a normalization layer.
 Args:
 layer (nn.Module): The layer to be checked.
 exclude (type | tuple[type]): Types to be excluded.
 Returns:
 bool: Whether the layer is a norm layer.
 """
 if exclude is not None:
 if not isinstance(exclude, tuple):
 exclude = (exclude, )
 if not is_tuple_of(exclude, type):
 raise TypeError(
 f'"exclude" must be either None or type or a tuple of types, '
 f'but got {type(exclude)}: {exclude}')
 if exclude and isinstance(layer, exclude):
 return False
 all_norm_bases = (_BatchNorm, _InstanceNorm, nn.GroupNorm, nn.LayerNorm)
 return isinstance(layer, all_norm_bases)
ACTIVATION_LAYERS = Registry('activation layer')
for module in [
 nn.ReLU, nn.LeakyReLU, nn.PReLU, nn.RReLU, nn.ReLU6, nn.ELU,
 nn.Sigmoid, nn.Tanh
]:
 ACTIVATION_LAYERS.register_module(module=module)
def build_activation_layer(cfg):
 """Build activation layer.
 Args:
 cfg (dict): The activation layer config, which should contain:
 - type (str): Layer type.
 - layer args: Args needed to instantiate an activation layer.
 Returns:
 nn.Module: Created activation layer.
 """
 return build_from_cfg(cfg, ACTIVATION_LAYERS)
PADDING_LAYERS = Registry('padding layer')
PADDING_LAYERS.register_module('zero', module=nn.ZeroPad2d)
PADDING_LAYERS.register_module('reflect', module=nn.ReflectionPad2d)
PADDING_LAYERS.register_module('replicate', module=nn.ReplicationPad2d)
def build_padding_layer(cfg, *args, **kwargs):
 """Build padding layer.
 Args:
 cfg (None or dict): The padding layer config, which should contain:
 - type (str): Layer type.
 - layer args: Args needed to instantiate a padding layer.
 Returns:
 nn.Module: Created padding layer.
 """
 if not isinstance(cfg, dict):
 raise TypeError('cfg must be a dict')
 if 'type' not in cfg:
 raise KeyError('the cfg dict must contain the key "type"')
 cfg_ = cfg.copy()
 padding_type = cfg_.pop('type')
 if padding_type not in PADDING_LAYERS:
 raise KeyError(f'Unrecognized padding type {padding_type}.')
 else:
 padding_layer = PADDING_LAYERS.get(padding_type)
 layer = padding_layer(*args, **kwargs, **cfg_)
 return layer
UPSAMPLE_LAYERS = Registry('upsample layer')
UPSAMPLE_LAYERS.register_module('nearest', module=nn.Upsample)
UPSAMPLE_LAYERS.register_module('bilinear', module=nn.Upsample)
@UPSAMPLE_LAYERS.register_module(name='pixel_shuffle')
class PixelShufflePack(nn.Module):
 """Pixel Shuffle upsample layer.
 This module packs `F.pixel_shuffle()` and a nn.Conv2d module together to
 achieve a simple upsampling with pixel shuffle.
 Args:
 in_channels (int): Number of input channels.
 out_channels (int): Number of output channels.
 scale_factor (int): Upsample ratio.
 upsample_kernel (int): Kernel size of the conv layer to expand the
 channels.
 """
 def __init__(self, in_channels, out_channels, scale_factor,
 upsample_kernel):
 super(PixelShufflePack, self).__init__()
 self.in_channels = in_channels
 self.out_channels = out_channels
 self.scale_factor = scale_factor
 self.upsample_kernel = upsample_kernel
 self.upsample_conv = nn.Conv2d(
 self.in_channels,
 self.out_channels * scale_factor * scale_factor,
 self.upsample_kernel,
 padding=(self.upsample_kernel - 1) // 2)
 self.init_weights()
 def init_weights(self):
 xavier_init(self.upsample_conv, distribution='uniform')
 def forward(self, x):
 x = self.upsample_conv(x)
 x = F.pixel_shuffle(x, self.scale_factor)
 return x
def build_upsample_layer(cfg, *args, **kwargs):
 """Build upsample layer.
 Args:
 cfg (dict): The upsample layer config, which should contain:
 - type (str): Layer type.
 - scale_factor (int): Upsample ratio, which is not applicable to
 deconv.
 - layer args: Args needed to instantiate a upsample layer.
 args (argument list): Arguments passed to the ``__init__``
 method of the corresponding conv layer.
 kwargs (keyword arguments): Keyword arguments passed to the
 ``__init__`` method of the corresponding conv layer.
 Returns:
 nn.Module: Created upsample layer.
 """
 if not isinstance(cfg, dict):
 raise TypeError(f'cfg must be a dict, but got {type(cfg)}')
 if 'type' not in cfg:
 raise KeyError(
 f'the cfg dict must contain the key "type", but got {cfg}')
 cfg_ = cfg.copy()
 layer_type = cfg_.pop('type')
 if layer_type not in UPSAMPLE_LAYERS:
 raise KeyError(f'Unrecognized upsample type {layer_type}')
 else:
 upsample = UPSAMPLE_LAYERS.get(layer_type)
 if upsample is nn.Upsample:
 cfg_['mode'] = layer_type
 layer = upsample(*args, **kwargs, **cfg_)
 return layer
PLUGIN_LAYERS = Registry('plugin layer')
def infer_plugin_abbr(class_type):
 """Infer abbreviation from the class name.
 This method will infer the abbreviation to map class types to
 abbreviations.
 Rule 1: If the class has the property "abbr", return the property.
 Rule 2: Otherwise, the abbreviation falls back to snake case of class
 name, e.g. the abbreviation of ``FancyBlock`` will be ``fancy_block``.
 Args:
 class_type (type): The norm layer type.
 Returns:
 str: The inferred abbreviation.
 """
 def camel2snack(word):
 """Convert camel case word into snack case.
 Modified from `inflection lib
 <https://inflection.readthedocs.io/en/latest/#inflection.underscore>`_.
 Example::
 >>> camel2snack("FancyBlock")
 'fancy_block'
 """
 word = re.sub(r'([A-Z]+)([A-Z][a-z])', r'\1_\2', word)
 word = re.sub(r'([a-z\d])([A-Z])', r'\1_\2', word)
 word = word.replace('-', '_')
 return word.lower()
 if not inspect.isclass(class_type):
 raise TypeError(
 f'class_type must be a type, but got {type(class_type)}')
 if hasattr(class_type, '_abbr_'):
 return class_type._abbr_
 else:
 return camel2snack(class_type.__name__)
def build_plugin_layer(cfg, postfix='', **kwargs):
 """Build plugin layer.
 Args:
 cfg (None or dict): cfg should contain:
 type (str): identify plugin layer type.
 layer args: args needed to instantiate a plugin layer.
 postfix (int, str): appended into norm abbreviation to
 create named layer. Default: ''.
 Returns:
 tuple[str, nn.Module]:
 name (str): abbreviation + postfix
 layer (nn.Module): created plugin layer
 """
 if not isinstance(cfg, dict):
 raise TypeError('cfg must be a dict')
 if 'type' not in cfg:
 raise KeyError('the cfg dict must contain the key "type"')
 cfg_ = cfg.copy()
 layer_type = cfg_.pop('type')
 if layer_type not in PLUGIN_LAYERS:
 raise KeyError(f'Unrecognized plugin | |
| 
	-np.inf,
 335: -np.inf,
 336: -np.inf,
 337: -np.inf,
 338: -np.inf,
 339: -np.inf,
 340: -np.inf,
 341: -np.inf,
 342: -np.inf,
 343: -np.inf,
 344: -np.inf,
 345: -np.inf,
 346: -np.inf,
 347: -np.inf,
 348: -np.inf,
 349: -np.inf,
 350: -np.inf,
 351: -np.inf,
 352: -np.inf,
 353: -np.inf,
 354: -np.inf,
 355: -np.inf,
 356: -np.inf,
 357: -np.inf,
 358: -np.inf,
 359: -np.inf,
 360: -np.inf,
 361: -np.inf,
 362: -np.inf,
 363: -np.inf,
 364: -np.inf,
 365: -np.inf,
 366: -np.inf,
 367: -np.inf,
 368: -np.inf,
 369: -np.inf,
 370: -np.inf,
 371: -np.inf,
 372: -np.inf,
 373: -np.inf,
 374: -np.inf,
 375: -np.inf,
 376: -np.inf,
 377: -np.inf,
 378: -np.inf,
 379: -np.inf,
 380: -np.inf,
 381: -np.inf,
 382: -np.inf,
 383: -np.inf,
 384: -np.inf,
 385: -np.inf,
 386: -np.inf,
 387: -np.inf,
 388: -np.inf,
 389: -np.inf,
 390: -np.inf,
 391: -np.inf,
 392: -np.inf,
 393: -np.inf,
 },
 "fcst_upper": {
 0: np.inf,
 1: np.inf,
 2: np.inf,
 3: np.inf,
 4: np.inf,
 5: np.inf,
 6: np.inf,
 7: np.inf,
 8: np.inf,
 9: np.inf,
 10: np.inf,
 11: np.inf,
 12: np.inf,
 13: np.inf,
 14: np.inf,
 15: np.inf,
 16: np.inf,
 17: np.inf,
 18: np.inf,
 19: np.inf,
 20: np.inf,
 21: np.inf,
 22: np.inf,
 23: np.inf,
 24: np.inf,
 25: np.inf,
 26: np.inf,
 27: np.inf,
 28: np.inf,
 29: np.inf,
 30: np.inf,
 31: np.inf,
 32: np.inf,
 33: np.inf,
 34: np.inf,
 35: np.inf,
 36: np.inf,
 37: np.inf,
 38: np.inf,
 39: np.inf,
 40: np.inf,
 41: np.inf,
 42: np.inf,
 43: np.inf,
 44: np.inf,
 45: np.inf,
 46: np.inf,
 47: np.inf,
 48: np.inf,
 49: np.inf,
 50: np.inf,
 51: np.inf,
 52: np.inf,
 53: np.inf,
 54: np.inf,
 55: np.inf,
 56: np.inf,
 57: np.inf,
 58: np.inf,
 59: np.inf,
 60: np.inf,
 61: np.inf,
 62: np.inf,
 63: np.inf,
 64: np.inf,
 65: np.inf,
 66: np.inf,
 67: np.inf,
 68: np.inf,
 69: np.inf,
 70: np.inf,
 71: np.inf,
 72: np.inf,
 73: np.inf,
 74: np.inf,
 75: np.inf,
 76: np.inf,
 77: np.inf,
 78: np.inf,
 79: np.inf,
 80: np.inf,
 81: np.inf,
 82: np.inf,
 83: np.inf,
 84: np.inf,
 85: np.inf,
 86: np.inf,
 87: np.inf,
 88: np.inf,
 89: np.inf,
 90: np.inf,
 91: np.inf,
 92: np.inf,
 93: np.inf,
 94: np.inf,
 95: np.inf,
 96: np.inf,
 97: np.inf,
 98: np.inf,
 99: np.inf,
 100: np.inf,
 101: np.inf,
 102: np.inf,
 103: np.inf,
 104: np.inf,
 105: np.inf,
 106: np.inf,
 107: np.inf,
 108: np.inf,
 109: np.inf,
 110: np.inf,
 111: np.inf,
 112: np.inf,
 113: np.inf,
 114: np.inf,
 115: np.inf,
 116: np.inf,
 117: np.inf,
 118: np.inf,
 119: np.inf,
 120: np.inf,
 121: np.inf,
 122: np.inf,
 123: np.inf,
 124: np.inf,
 125: np.inf,
 126: np.inf,
 127: np.inf,
 128: np.inf,
 129: np.inf,
 130: np.inf,
 131: np.inf,
 132: np.inf,
 133: np.inf,
 134: np.inf,
 135: np.inf,
 136: np.inf,
 137: np.inf,
 138: np.inf,
 139: np.inf,
 140: np.inf,
 141: np.inf,
 142: np.inf,
 143: np.inf,
 144: np.inf,
 145: np.inf,
 146: np.inf,
 147: np.inf,
 148: np.inf,
 149: np.inf,
 150: np.inf,
 151: np.inf,
 152: np.inf,
 153: np.inf,
 154: np.inf,
 155: np.inf,
 156: np.inf,
 157: np.inf,
 158: np.inf,
 159: np.inf,
 160: np.inf,
 161: np.inf,
 162: np.inf,
 163: np.inf,
 164: np.inf,
 165: np.inf,
 166: np.inf,
 167: np.inf,
 168: np.inf,
 169: np.inf,
 170: np.inf,
 171: np.inf,
 172: np.inf,
 173: np.inf,
 174: np.inf,
 175: np.inf,
 176: np.inf,
 177: np.inf,
 178: np.inf,
 179: np.inf,
 180: np.inf,
 181: np.inf,
 182: np.inf,
 183: np.inf,
 184: np.inf,
 185: np.inf,
 186: np.inf,
 187: np.inf,
 188: np.inf,
 189: np.inf,
 190: np.inf,
 191: np.inf,
 192: np.inf,
 193: np.inf,
 194: np.inf,
 195: np.inf,
 196: np.inf,
 197: np.inf,
 198: np.inf,
 199: np.inf,
 200: np.inf,
 201: np.inf,
 202: np.inf,
 203: np.inf,
 204: np.inf,
 205: np.inf,
 206: np.inf,
 207: np.inf,
 208: np.inf,
 209: np.inf,
 210: np.inf,
 211: np.inf,
 212: np.inf,
 213: np.inf,
 214: np.inf,
 215: np.inf,
 216: np.inf,
 217: np.inf,
 218: np.inf,
 219: np.inf,
 220: np.inf,
 221: np.inf,
 222: np.inf,
 223: np.inf,
 224: np.inf,
 225: np.inf,
 226: np.inf,
 227: np.inf,
 228: np.inf,
 229: np.inf,
 230: np.inf,
 231: np.inf,
 232: np.inf,
 233: np.inf,
 234: np.inf,
 235: np.inf,
 236: np.inf,
 237: np.inf,
 238: np.inf,
 239: np.inf,
 240: np.inf,
 241: np.inf,
 242: np.inf,
 243: np.inf,
 244: np.inf,
 245: np.inf,
 246: np.inf,
 247: np.inf,
 248: np.inf,
 249: np.inf,
 250: np.inf,
 251: np.inf,
 252: np.inf,
 253: np.inf,
 254: np.inf,
 255: np.inf,
 256: np.inf,
 257: np.inf,
 258: np.inf,
 259: np.inf,
 260: np.inf,
 261: np.inf,
 262: np.inf,
 263: np.inf,
 264: np.inf,
 265: np.inf,
 266: np.inf,
 267: np.inf,
 268: np.inf,
 269: np.inf,
 270: np.inf,
 271: np.inf,
 272: np.inf,
 273: np.inf,
 274: np.inf,
 275: np.inf,
 276: np.inf,
 277: np.inf,
 278: np.inf,
 279: np.inf,
 280: np.inf,
 281: np.inf,
 282: np.inf,
 283: np.inf,
 284: np.inf,
 285: np.inf,
 286: np.inf,
 287: np.inf,
 288: np.inf,
 289: np.inf,
 290: np.inf,
 291: np.inf,
 292: np.inf,
 293: np.inf,
 294: np.inf,
 295: np.inf,
 296: np.inf,
 297: np.inf,
 298: np.inf,
 299: np.inf,
 300: np.inf,
 301: np.inf,
 302: np.inf,
 303: np.inf,
 304: np.inf,
 305: np.inf,
 306: np.inf,
 307: np.inf,
 308: np.inf,
 309: np.inf,
 310: np.inf,
 311: np.inf,
 312: np.inf,
 313: np.inf,
 314: np.inf,
 315: np.inf,
 316: np.inf,
 317: np.inf,
 318: np.inf,
 319: np.inf,
 320: np.inf,
 321: np.inf,
 322: np.inf,
 323: np.inf,
 324: np.inf,
 325: np.inf,
 326: np.inf,
 327: np.inf,
 328: np.inf,
 329: np.inf,
 330: np.inf,
 331: np.inf,
 332: np.inf,
 333: np.inf,
 334: np.inf,
 335: np.inf,
 336: np.inf,
 337: np.inf,
 338: np.inf,
 339: np.inf,
 340: np.inf,
 341: np.inf,
 342: np.inf,
 343: np.inf,
 344: np.inf,
 345: np.inf,
 346: np.inf,
 347: np.inf,
 348: np.inf,
 349: np.inf,
 350: np.inf,
 351: np.inf,
 352: np.inf,
 353: np.inf,
 354: np.inf,
 355: np.inf,
 356: np.inf,
 357: np.inf,
 358: np.inf,
 359: np.inf,
 360: np.inf,
 361: np.inf,
 362: np.inf,
 363: np.inf,
 364: np.inf,
 365: np.inf,
 366: np.inf,
 367: np.inf,
 368: np.inf,
 369: np.inf,
 370: np.inf,
 371: np.inf,
 372: np.inf,
 373: np.inf,
 374: np.inf,
 375: np.inf,
 376: np.inf,
 377: np.inf,
 378: np.inf,
 379: np.inf,
 380: np.inf,
 381: np.inf,
 382: np.inf,
 383: np.inf,
 384: np.inf,
 385: np.inf,
 386: np.inf,
 387: np.inf,
 388: np.inf,
 389: np.inf,
 390: np.inf,
 391: np.inf,
 392: np.inf,
 393: np.inf,
 },
 }
)
PEYTON_FCST_LINEAR_INVALID_NEG_ONE = pd.DataFrame(
 {
 "time": {
 0: pd.Timestamp("2012-05-02 00:00:00"),
 1: pd.Timestamp("2012-05-03 00:00:00"),
 2: pd.Timestamp("2012-05-04 00:00:00"),
 3: pd.Timestamp("2012-05-05 00:00:00"),
 4: pd.Timestamp("2012-05-06 00:00:00"),
 5: pd.Timestamp("2012-05-07 00:00:00"),
 6: pd.Timestamp("2012-05-08 00:00:00"),
 7: pd.Timestamp("2012-05-09 00:00:00"),
 8: pd.Timestamp("2012-05-10 00:00:00"),
 9: pd.Timestamp("2012-05-11 00:00:00"),
 10: pd.Timestamp("2012-05-12 00:00:00"),
 11: pd.Timestamp("2012-05-13 00:00:00"),
 12: pd.Timestamp("2012-05-14 00:00:00"),
 13: pd.Timestamp("2012-05-15 00:00:00"),
 14: pd.Timestamp("2012-05-16 00:00:00"),
 15: pd.Timestamp("2012-05-17 00:00:00"),
 16: pd.Timestamp("2012-05-18 00:00:00"),
 17: pd.Timestamp("2012-05-19 00:00:00"),
 18: pd.Timestamp("2012-05-20 00:00:00"),
 19: pd.Timestamp("2012-05-21 00:00:00"),
 20: pd.Timestamp("2012-05-22 00:00:00"),
 21: pd.Timestamp("2012-05-23 00:00:00"),
 22: pd.Timestamp("2012-05-24 00:00:00"),
 23: pd.Timestamp("2012-05-25 00:00:00"),
 24: pd.Timestamp("2012-05-26 00:00:00"),
 25: pd.Timestamp("2012-05-27 00:00:00"),
 26: pd.Timestamp("2012-05-28 00:00:00"),
 27: pd.Timestamp("2012-05-29 00:00:00"),
 28: pd.Timestamp("2012-05-30 00:00:00"),
 29: pd.Timestamp("2012-05-31 00:00:00"),
 30: pd.Timestamp("2012-06-01 00:00:00"),
 31: pd.Timestamp("2012-06-02 00:00:00"),
 32: pd.Timestamp("2012-06-03 00:00:00"),
 33: pd.Timestamp("2012-06-04 00:00:00"),
 34: pd.Timestamp("2012-06-05 00:00:00"),
 35: pd.Timestamp("2012-06-06 00:00:00"),
 36: pd.Timestamp("2012-06-07 00:00:00"),
 37: pd.Timestamp("2012-06-08 00:00:00"),
 38: pd.Timestamp("2012-06-09 00:00:00"),
 39: pd.Timestamp("2012-06-10 00:00:00"),
 40: pd.Timestamp("2012-06-11 00:00:00"),
 41: pd.Timestamp("2012-06-12 00:00:00"),
 42: pd.Timestamp("2012-06-13 00:00:00"),
 43: pd.Timestamp("2012-06-14 00:00:00"),
 44: pd.Timestamp("2012-06-15 00:00:00"),
 45: pd.Timestamp("2012-06-16 00:00:00"),
 46: pd.Timestamp("2012-06-17 00:00:00"),
 47: pd.Timestamp("2012-06-18 00:00:00"),
 48: pd.Timestamp("2012-06-19 00:00:00"),
 49: pd.Timestamp("2012-06-20 00:00:00"),
 50: pd.Timestamp("2012-06-21 00:00:00"),
 51: pd.Timestamp("2012-06-22 00:00:00"),
 52: pd.Timestamp("2012-06-23 00:00:00"),
 53: pd.Timestamp("2012-06-24 00:00:00"),
 54: pd.Timestamp("2012-06-25 00:00:00"),
 55: pd.Timestamp("2012-06-26 00:00:00"),
 56: pd.Timestamp("2012-06-27 00:00:00"),
 57: pd.Timestamp("2012-06-28 00:00:00"),
 58: pd.Timestamp("2012-06-29 00:00:00"),
 59: pd.Timestamp("2012-06-30 00:00:00"),
 60: pd.Timestamp("2012-07-01 00:00:00"),
 61: pd.Timestamp("2012-07-02 00:00:00"),
 62: pd.Timestamp("2012-07-03 00:00:00"),
 63: pd.Timestamp("2012-07-04 00:00:00"),
 64: pd.Timestamp("2012-07-05 00:00:00"),
 65: pd.Timestamp("2012-07-06 00:00:00"),
 66: pd.Timestamp("2012-07-07 00:00:00"),
 67: pd.Timestamp("2012-07-08 00:00:00"),
 68: pd.Timestamp("2012-07-09 00:00:00"),
 69: pd.Timestamp("2012-07-10 00:00:00"),
 70: pd.Timestamp("2012-07-11 00:00:00"),
 71: pd.Timestamp("2012-07-12 00:00:00"),
 72: pd.Timestamp("2012-07-13 00:00:00"),
 73: pd.Timestamp("2012-07-14 00:00:00"),
 74: pd.Timestamp("2012-07-15 00:00:00"),
 75: pd.Timestamp("2012-07-16 00:00:00"),
 76: pd.Timestamp("2012-07-17 00:00:00"),
 77: pd.Timestamp("2012-07-18 00:00:00"),
 78: pd.Timestamp("2012-07-19 00:00:00"),
 79: pd.Timestamp("2012-07-20 00:00:00"),
 80: pd.Timestamp("2012-07-21 00:00:00"),
 81: pd.Timestamp("2012-07-22 00:00:00"),
 82: pd.Timestamp("2012-07-23 00:00:00"),
 83: pd.Timestamp("2012-07-24 00:00:00"),
 84: pd.Timestamp("2012-07-25 00:00:00"),
 85: pd.Timestamp("2012-07-26 00:00:00"),
 86: pd.Timestamp("2012-07-27 00:00:00"),
 87: pd.Timestamp("2012-07-28 00:00:00"),
 88: pd.Timestamp("2012-07-29 00:00:00"),
 89: pd.Timestamp("2012-07-30 00:00:00"),
 90: pd.Timestamp("2012-07-31 00:00:00"),
 91: pd.Timestamp("2012-08-01 00:00:00"),
 92: pd.Timestamp("2012-08-02 00:00:00"),
 93: pd.Timestamp("2012-08-03 00:00:00"),
 94: pd.Timestamp("2012-08-04 00:00:00"),
 95: pd.Timestamp("2012-08-05 00:00:00"),
 96: pd.Timestamp("2012-08-06 00:00:00"),
 97: pd.Timestamp("2012-08-07 00:00:00"),
 98: pd.Timestamp("2012-08-08 00:00:00"),
 99: pd.Timestamp("2012-08-09 00:00:00"),
 100: pd.Timestamp("2012-08-10 00:00:00"),
 101: pd.Timestamp("2012-08-11 00:00:00"),
 102: pd.Timestamp("2012-08-12 00:00:00"),
 103: pd.Timestamp("2012-08-13 00:00:00"),
 104: pd.Timestamp("2012-08-14 00:00:00"),
 105: pd.Timestamp("2012-08-15 00:00:00"),
 106: pd.Timestamp("2012-08-16 00:00:00"),
 107: pd.Timestamp("2012-08-17 00:00:00"),
 108: pd.Timestamp("2012-08-18 00:00:00"),
 109: pd.Timestamp("2012-08-19 00:00:00"),
 110: pd.Timestamp("2012-08-20 00:00:00"),
 111: pd.Timestamp("2012-08-21 00:00:00"),
 112: pd.Timestamp("2012-08-22 00:00:00"),
 113: pd.Timestamp("2012-08-23 00:00:00"),
 114: pd.Timestamp("2012-08-24 00:00:00"),
 115: pd.Timestamp("2012-08-25 00:00:00"),
 116: pd.Timestamp("2012-08-26 00:00:00"),
 117: pd.Timestamp("2012-08-27 00:00:00"),
 118: pd.Timestamp("2012-08-28 00:00:00"),
 119: pd.Timestamp("2012-08-29 00:00:00"),
 120: pd.Timestamp("2012-08-30 00:00:00"),
 | |
| 
	eol_))
 else:
 outfile.write('/>%s' % (eol_, ))
 def exportAttributes(self, outfile, level, already_processed, namespaceprefix_='', name_='Contact'):
 pass
 def exportChildren(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='Contact', fromsubclass_=False, pretty_print=True):
 if pretty_print:
 eol_ = '\n'
 else:
 eol_ = ''
 if self.ContactId is not None:
 namespaceprefix_ = self.ContactId_nsprefix_ + ':' if (UseCapturedNS_ and self.ContactId_nsprefix_) else ''
 showIndent(outfile, level, pretty_print)
 outfile.write('<%sContactId>%s</%sContactId>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.ContactId), input_name='ContactId')), namespaceprefix_ , eol_))
 if self.PersonName is not None:
 namespaceprefix_ = self.PersonName_nsprefix_ + ':' if (UseCapturedNS_ and self.PersonName_nsprefix_) else ''
 showIndent(outfile, level, pretty_print)
 outfile.write('<%sPersonName>%s</%sPersonName>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.PersonName), input_name='PersonName')), namespaceprefix_ , eol_))
 if self.Title is not None:
 namespaceprefix_ = self.Title_nsprefix_ + ':' if (UseCapturedNS_ and self.Title_nsprefix_) else ''
 showIndent(outfile, level, pretty_print)
 outfile.write('<%sTitle>%s</%sTitle>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.Title), input_name='Title')), namespaceprefix_ , eol_))
 if self.CompanyName is not None:
 namespaceprefix_ = self.CompanyName_nsprefix_ + ':' if (UseCapturedNS_ and self.CompanyName_nsprefix_) else ''
 showIndent(outfile, level, pretty_print)
 outfile.write('<%sCompanyName>%s</%sCompanyName>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.CompanyName), input_name='CompanyName')), namespaceprefix_ , eol_))
 if self.PhoneNumber is not None:
 namespaceprefix_ = self.PhoneNumber_nsprefix_ + ':' if (UseCapturedNS_ and self.PhoneNumber_nsprefix_) else ''
 showIndent(outfile, level, pretty_print)
 outfile.write('<%sPhoneNumber>%s</%sPhoneNumber>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.PhoneNumber), input_name='PhoneNumber')), namespaceprefix_ , eol_))
 if self.PhoneExtension is not None:
 namespaceprefix_ = self.PhoneExtension_nsprefix_ + ':' if (UseCapturedNS_ and self.PhoneExtension_nsprefix_) else ''
 showIndent(outfile, level, pretty_print)
 outfile.write('<%sPhoneExtension>%s</%sPhoneExtension>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.PhoneExtension), input_name='PhoneExtension')), namespaceprefix_ , eol_))
 if self.TollFreePhoneNumber is not None:
 namespaceprefix_ = self.TollFreePhoneNumber_nsprefix_ + ':' if (UseCapturedNS_ and self.TollFreePhoneNumber_nsprefix_) else ''
 showIndent(outfile, level, pretty_print)
 outfile.write('<%sTollFreePhoneNumber>%s</%sTollFreePhoneNumber>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.TollFreePhoneNumber), input_name='TollFreePhoneNumber')), namespaceprefix_ , eol_))
 if self.PagerNumber is not None:
 namespaceprefix_ = self.PagerNumber_nsprefix_ + ':' if (UseCapturedNS_ and self.PagerNumber_nsprefix_) else ''
 showIndent(outfile, level, pretty_print)
 outfile.write('<%sPagerNumber>%s</%sPagerNumber>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.PagerNumber), input_name='PagerNumber')), namespaceprefix_ , eol_))
 if self.FaxNumber is not None:
 namespaceprefix_ = self.FaxNumber_nsprefix_ + ':' if (UseCapturedNS_ and self.FaxNumber_nsprefix_) else ''
 showIndent(outfile, level, pretty_print)
 outfile.write('<%sFaxNumber>%s</%sFaxNumber>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.FaxNumber), input_name='FaxNumber')), namespaceprefix_ , eol_))
 if self.EMailAddress is not None:
 namespaceprefix_ = self.EMailAddress_nsprefix_ + ':' if (UseCapturedNS_ and self.EMailAddress_nsprefix_) else ''
 showIndent(outfile, level, pretty_print)
 outfile.write('<%sEMailAddress>%s</%sEMailAddress>%s' % (namespaceprefix_ , self.gds_encode(self.gds_format_string(quote_xml(self.EMailAddress), input_name='EMailAddress')), namespaceprefix_ , eol_))
 def build(self, node, gds_collector_=None):
 self.gds_collector_ = gds_collector_
 if SaveElementTreeNode:
 self.gds_elementtree_node_ = node
 already_processed = set()
 self.ns_prefix_ = node.prefix
 self.buildAttributes(node, node.attrib, already_processed)
 for child in node:
 nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
 self.buildChildren(child, node, nodeName_, gds_collector_=gds_collector_)
 return self
 def buildAttributes(self, node, attrs, already_processed):
 pass
 def buildChildren(self, child_, node, nodeName_, fromsubclass_=False, gds_collector_=None):
 if nodeName_ == 'ContactId':
 value_ = child_.text
 value_ = self.gds_parse_string(value_, node, 'ContactId')
 value_ = self.gds_validate_string(value_, node, 'ContactId')
 self.ContactId = value_
 self.ContactId_nsprefix_ = child_.prefix
 elif nodeName_ == 'PersonName':
 value_ = child_.text
 value_ = self.gds_parse_string(value_, node, 'PersonName')
 value_ = self.gds_validate_string(value_, node, 'PersonName')
 self.PersonName = value_
 self.PersonName_nsprefix_ = child_.prefix
 elif nodeName_ == 'Title':
 value_ = child_.text
 value_ = self.gds_parse_string(value_, node, 'Title')
 value_ = self.gds_validate_string(value_, node, 'Title')
 self.Title = value_
 self.Title_nsprefix_ = child_.prefix
 elif nodeName_ == 'CompanyName':
 value_ = child_.text
 value_ = self.gds_parse_string(value_, node, 'CompanyName')
 value_ = self.gds_validate_string(value_, node, 'CompanyName')
 self.CompanyName = value_
 self.CompanyName_nsprefix_ = child_.prefix
 elif nodeName_ == 'PhoneNumber':
 value_ = child_.text
 value_ = self.gds_parse_string(value_, node, 'PhoneNumber')
 value_ = self.gds_validate_string(value_, node, 'PhoneNumber')
 self.PhoneNumber = value_
 self.PhoneNumber_nsprefix_ = child_.prefix
 elif nodeName_ == 'PhoneExtension':
 value_ = child_.text
 value_ = self.gds_parse_string(value_, node, 'PhoneExtension')
 value_ = self.gds_validate_string(value_, node, 'PhoneExtension')
 self.PhoneExtension = value_
 self.PhoneExtension_nsprefix_ = child_.prefix
 elif nodeName_ == 'TollFreePhoneNumber':
 value_ = child_.text
 value_ = self.gds_parse_string(value_, node, 'TollFreePhoneNumber')
 value_ = self.gds_validate_string(value_, node, 'TollFreePhoneNumber')
 self.TollFreePhoneNumber = value_
 self.TollFreePhoneNumber_nsprefix_ = child_.prefix
 elif nodeName_ == 'PagerNumber':
 value_ = child_.text
 value_ = self.gds_parse_string(value_, node, 'PagerNumber')
 value_ = self.gds_validate_string(value_, node, 'PagerNumber')
 self.PagerNumber = value_
 self.PagerNumber_nsprefix_ = child_.prefix
 elif nodeName_ == 'FaxNumber':
 value_ = child_.text
 value_ = self.gds_parse_string(value_, node, 'FaxNumber')
 value_ = self.gds_validate_string(value_, node, 'FaxNumber')
 self.FaxNumber = value_
 self.FaxNumber_nsprefix_ = child_.prefix
 elif nodeName_ == 'EMailAddress':
 value_ = child_.text
 value_ = self.gds_parse_string(value_, node, 'EMailAddress')
 value_ = self.gds_validate_string(value_, node, 'EMailAddress')
 self.EMailAddress = value_
 self.EMailAddress_nsprefix_ = child_.prefix
# end class Contact
class DangerousGoodsHandlingUnitShippingDetail(GeneratedsSuper):
 """This provides the information needed for shipping, rating, validation,
 and label generation."""
 __hash__ = GeneratedsSuper.__hash__
 subclass = None
 superclass = None
 def __init__(self, TrackingNumberUnits=None, Description=None, AircraftCategoryType=None, DangerousGoodsDescriptors=None, Accessibility=None, Options=None, DryIceWeight=None, gds_collector_=None, **kwargs_):
 self.gds_collector_ = gds_collector_
 self.gds_elementtree_node_ = None
 self.original_tagname_ = None
 self.parent_object_ = kwargs_.get('parent_object_')
 self.ns_prefix_ = None
 if TrackingNumberUnits is None:
 self.TrackingNumberUnits = []
 else:
 self.TrackingNumberUnits = TrackingNumberUnits
 self.TrackingNumberUnits_nsprefix_ = None
 self.Description = Description
 self.Description_nsprefix_ = None
 self.AircraftCategoryType = AircraftCategoryType
 self.validate_DangerousGoodsAircraftCategoryType(self.AircraftCategoryType)
 self.AircraftCategoryType_nsprefix_ = None
 if DangerousGoodsDescriptors is None:
 self.DangerousGoodsDescriptors = []
 else:
 self.DangerousGoodsDescriptors = DangerousGoodsDescriptors
 self.DangerousGoodsDescriptors_nsprefix_ = None
 self.Accessibility = Accessibility
 self.validate_DangerousGoodsAccessibilityType(self.Accessibility)
 self.Accessibility_nsprefix_ = None
 if Options is None:
 self.Options = []
 else:
 self.Options = Options
 self.Options_nsprefix_ = None
 self.DryIceWeight = DryIceWeight
 self.DryIceWeight_nsprefix_ = None
 def factory(*args_, **kwargs_):
 if CurrentSubclassModule_ is not None:
 subclass = getSubclassFromModule_(
 CurrentSubclassModule_, DangerousGoodsHandlingUnitShippingDetail)
 if subclass is not None:
 return subclass(*args_, **kwargs_)
 if DangerousGoodsHandlingUnitShippingDetail.subclass:
 return DangerousGoodsHandlingUnitShippingDetail.subclass(*args_, **kwargs_)
 else:
 return DangerousGoodsHandlingUnitShippingDetail(*args_, **kwargs_)
 factory = staticmethod(factory)
 def get_ns_prefix_(self):
 return self.ns_prefix_
 def set_ns_prefix_(self, ns_prefix):
 self.ns_prefix_ = ns_prefix
 def get_TrackingNumberUnits(self):
 return self.TrackingNumberUnits
 def set_TrackingNumberUnits(self, TrackingNumberUnits):
 self.TrackingNumberUnits = TrackingNumberUnits
 def add_TrackingNumberUnits(self, value):
 self.TrackingNumberUnits.append(value)
 def insert_TrackingNumberUnits_at(self, index, value):
 self.TrackingNumberUnits.insert(index, value)
 def replace_TrackingNumberUnits_at(self, index, value):
 self.TrackingNumberUnits[index] = value
 def get_Description(self):
 return self.Description
 def set_Description(self, Description):
 self.Description = Description
 def get_AircraftCategoryType(self):
 return self.AircraftCategoryType
 def set_AircraftCategoryType(self, AircraftCategoryType):
 self.AircraftCategoryType = AircraftCategoryType
 def get_DangerousGoodsDescriptors(self):
 return self.DangerousGoodsDescriptors
 def set_DangerousGoodsDescriptors(self, DangerousGoodsDescriptors):
 self.DangerousGoodsDescriptors = DangerousGoodsDescriptors
 def add_DangerousGoodsDescriptors(self, value):
 self.DangerousGoodsDescriptors.append(value)
 def insert_DangerousGoodsDescriptors_at(self, index, value):
 self.DangerousGoodsDescriptors.insert(index, value)
 def replace_DangerousGoodsDescriptors_at(self, index, value):
 self.DangerousGoodsDescriptors[index] = value
 def get_Accessibility(self):
 return self.Accessibility
 def set_Accessibility(self, Accessibility):
 self.Accessibility = Accessibility
 def get_Options(self):
 return self.Options
 def set_Options(self, Options):
 self.Options = Options
 def add_Options(self, value):
 self.Options.append(value)
 def insert_Options_at(self, index, value):
 self.Options.insert(index, value)
 def replace_Options_at(self, index, value):
 self.Options[index] = value
 def get_DryIceWeight(self):
 return self.DryIceWeight
 def set_DryIceWeight(self, DryIceWeight):
 self.DryIceWeight = DryIceWeight
 def validate_DangerousGoodsAircraftCategoryType(self, value):
 result = True
 # Validate type DangerousGoodsAircraftCategoryType, a restriction on xs:string.
 if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None:
 if not isinstance(value, str):
 lineno = self.gds_get_node_lineno_()
 self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, })
 return False
 value = value
 enumerations = ['CARGO_AIRCRAFT_ONLY', 'PASSENGER_AND_CARGO_AIRCRAFT']
 if value not in enumerations:
 lineno = self.gds_get_node_lineno_()
 self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd enumeration restriction on DangerousGoodsAircraftCategoryType' % {"value" : encode_str_2_3(value), "lineno": lineno} )
 result = False
 return result
 def validate_DangerousGoodsDescriptorType(self, value):
 result = True
 # Validate type DangerousGoodsDescriptorType, a restriction on xs:string.
 if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None:
 if not isinstance(value, str):
 lineno = self.gds_get_node_lineno_()
 self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, })
 return False
 value = value
 enumerations = ['ALCOHOLIC_BEVERAGE', 'DRY_ICE', 'EMERGENCY_CONTACT_PHONE_REQUIRED', 'EXCEPTED_QUANTITIES', 'INFECTIOUS_SUBSTANCE', 'RADIOACTIVE']
 if value not in enumerations:
 lineno = self.gds_get_node_lineno_()
 self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd enumeration restriction on DangerousGoodsDescriptorType' % {"value" : encode_str_2_3(value), "lineno": lineno} )
 result = False
 return result
 def validate_DangerousGoodsAccessibilityType(self, value):
 result = True
 # Validate type DangerousGoodsAccessibilityType, a restriction on xs:string.
 if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None:
 if not isinstance(value, str):
 lineno = self.gds_get_node_lineno_()
 self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, })
 return False
 value = value
 enumerations = ['ACCESSIBLE', 'INACCESSIBLE']
 if value not in enumerations:
 lineno = self.gds_get_node_lineno_()
 self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd enumeration restriction on DangerousGoodsAccessibilityType' % {"value" : encode_str_2_3(value), "lineno": lineno} )
 result = False
 return result
 def validate_HazardousCommodityOptionType(self, value):
 result = True
 # Validate type HazardousCommodityOptionType, a restriction on xs:string.
 if value is not None and Validate_simpletypes_ and self.gds_collector_ is not None:
 if not isinstance(value, str):
 lineno = self.gds_get_node_lineno_()
 self.gds_collector_.add_message('Value "%(value)s"%(lineno)s is not of the correct base simple type (str)' % {"value": value, "lineno": lineno, })
 return False
 value = value
 enumerations = ['BATTERY', 'HAZARDOUS_MATERIALS', 'LIMITED_QUANTITIES_COMMODITIES', 'ORM_D', 'REPORTABLE_QUANTITIES', 'SMALL_QUANTITY_EXCEPTION']
 if value not in enumerations:
 lineno = self.gds_get_node_lineno_()
 self.gds_collector_.add_message('Value "%(value)s"%(lineno)s does not match xsd enumeration restriction on HazardousCommodityOptionType' % {"value" : encode_str_2_3(value), "lineno": lineno} )
 result = False
 return result
 def hasContent_(self):
 if (
 self.TrackingNumberUnits or
 self.Description is not None or
 self.AircraftCategoryType is not None or
 self.DangerousGoodsDescriptors or
 self.Accessibility is not None or
 self.Options or
 self.DryIceWeight is not None
 ):
 return True
 else:
 return False
 def export(self, outfile, level, namespaceprefix_='', namespacedef_='', name_='DangerousGoodsHandlingUnitShippingDetail', pretty_print=True):
 imported_ns_def_ | |
| 
	+ __d3diskmass__
 time_ = d3class.get_time_for_it(it, "profiles", "prof")
 time_arr.append(time_)
 it_arr.append(it)
 if os.path.isfile(fpath):
 data_ = np.float(np.loadtxt(fpath, unpack=True))
 data_arr.append(data_)
 else:
 data_arr.append(np.nan)
 #
 it_arr = np.array(it_arr, dtype=int)
 time_arr = np.array(time_arr, dtype=float)
 data_arr = np.array(data_arr, dtype=float)
 #
 if len(it_arr) > 0:
 x = np.vstack((it_arr, time_arr, data_arr)).T
 np.savetxt(parfilepath+__d3diskmass__, x, header="1:it 2:time[s] 3:mass[Msun]", fmt='%i %0.5f %0.5f')
 else:
 Printcolor.yellow("No disk mass found")
 #
 if len(it_arr) > 0:
 time_arr = time_arr * 1e3
 o_plot = PLOT_MANY_TASKS()
 o_plot.gen_set["figdir"] = parfilepath
 o_plot.gen_set["type"] = "cartesian"
 o_plot.gen_set["figsize"] = (4.2, 3.6) # <->, |]
 o_plot.gen_set["figname"] = __d3diskmass__.replace(".txt",".png")
 o_plot.gen_set["sharex"] = False
 o_plot.gen_set["sharey"] = False
 o_plot.gen_set["subplots_adjust_h"] = 0.2
 o_plot.gen_set["subplots_adjust_w"] = 0.0
 o_plot.set_plot_dics = []
 # plot
 plot_dic = {
 'task': 'line', 'ptype': 'cartesian',
 'xarr': time_arr, 'yarr': data_arr,
 'position': (1, 1),
 'v_n_x': 'times', 'v_n_y': 'int_phi_r abs',
 'marker': '.', 'color': 'black', 'ms': 5., 'alpha': 1.0, #'ds': 'default',
 'label': None, 'ylabel': r'$M_{\rm{disk}}$ [$M_{\odot}$]', 'xlabel': r"$t$ [ms]",
 'xmin': -5., 'xmax': time_arr.max(), 'ymin': 0, 'ymax': 0.5,
 'xscale': None, 'yscale': None,
 'fancyticks': True, 'minorticks': True,
 'legend': {'loc': 'upper right', 'ncol': 2, 'fontsize': 10, 'shadow': False, 'framealpha': 0.5,
 'borderaxespad': 0.0},
 'fontsize': 14,
 'labelsize': 14,
 'title': {'text': "Disk Mass Evolution", 'fontsize': 14},
 # 'mark_end': {'marker': 'x', 'ms': 5, 'color': 'red', 'alpha': 0.7, 'label': 'end'},
 # 'mark_beginning': {'marker': 's', 'ms': 5, 'color': 'blue', 'alpha': 0.7, 'label': 'beginning'},
 # 'axvline': {'x': 0, 'linestyle': 'dashed', 'color': 'gray', 'linewidth': 0.5},
 # 'axhline': {'y': 0, 'linestyle': 'dashed', 'color': 'gray', 'linewidth': 0.5}
 }
 o_plot.set_plot_dics.append(plot_dic)
 o_plot.main()
 else:
 print_colored_string(["task:", "plotmass", ":", "skipping"],
 ["blue", "green", "", "blue"])
 except IOError:
 print_colored_string(["task:", "plotmass", ":", "IOError"],
 ["blue", "green", "", "red"])
 except KeyboardInterrupt:
 exit(1)
 except:
 print_colored_string(["task:", "plotmass", ":", "failed"],
 ["blue", "green", "", "red"])
""" ==============================================| D3 OTHER |======================================================="""
""" ===============================================| D3 ALL |======================================================= """
def d3_main_computational_loop():
 outdir = Paths.ppr_sims + glob_sim + '/'
 if not os.path.isdir(outdir):
 os.mkdir(outdir)
 outdir += __rootoutdir__
 if not os.path.isdir(outdir):
 os.mkdir(outdir)
 # methods that required inteprolation [No masks used!]
 if "mjenclosed" in glob_tasklist:
 new_type = {'type': 'cyl', 'n_r': 75, 'n_phi': 64, 'n_z': 100}
 o_grid = CYLINDRICAL_GRID(grid_info=new_type)
 o_d3int = INTMETHODS_STORE(glob_sim, o_grid, glob_symmetry)
 d3_interpolate_mjenclosed(o_d3int, outdir=outdir, rewrite=glob_overwrite)
 if "vtk" in glob_tasklist:
 o_grid = CARTESIAN_GRID()
 o_d3int = INTMETHODS_STORE(glob_sim, o_grid, glob_symmetry)
 d3_int_data_to_vtk(o_d3int, outdir=outdir, rewrite=glob_overwrite)
 for it in glob_its:
 sys.stdout.flush()
 o_d3int.save_vtk_file(it, glob_v_ns, glob_overwrite, outdir="profiles/", private_dir="vtk/")
 sys.stdout.flush()
 if "densmodeint" in glob_tasklist:
 o_grid = POLAR_GRID()
 o_d3int = INTMETHODS_STORE(glob_sim, o_grid, glob_symmetry)
 o_d3int.enforce_xy_grid = True
 d3_dens_modes_int(o_d3int, outdir=outdir, rewrite=glob_overwrite)
 # methods that do not require interplation [Use masks for reflevels and lapse]
 d3corr_class = MAINMETHODS_STORE(glob_sim)
 d3corr_class.update_storage_lists(new_iterations=glob_its, new_times=glob_times) # remove corrupt
 # d3corr_class.mask_setup = {'rm_rl': True, # REMOVE previouse ref. level from the next
 # 'rho': [6.e4 / 6.176e+17, 1.e13 / 6.176e+17], # REMOVE atmo and NS
 # 'lapse': [0.15, 1.]} # remove apparent horizon
 # tasks for each iteration
 for it in glob_its:
 _outdir = outdir + str(it) + '/'
 if not os.path.isdir(_outdir):
 os.mkdir(_outdir)
 for task in glob_tasklist:
 # if task in ["all", "plotall", "densmode"]: pass
 if task == "corr": d3_corr_for_it(it, d3corr_class, outdir=_outdir, rewrite=glob_overwrite)
 if task == "hist": d3_hist_for_it(it, d3corr_class, outdir=_outdir, rewrite=glob_overwrite)
 if task == "slice": d3_to_d2_slice_for_it(it, d3corr_class, outdir=_outdir, rewrite=glob_overwrite)
 if task == "mass":
 d3_disk_mass_for_it(it, d3corr_class, outdir=_outdir, rewrite=glob_overwrite)
 d3_remnant_mass_for_it(it, d3corr_class, outdir=_outdir, rewrite=glob_overwrite)
 # else:
 # raise NameError("d3 method is not recognized: {}".format(task))
 d3corr_class.delete_for_it(it=it, except_v_ns=[], rm_masks=True, rm_comp=True, rm_prof=False)
 sys.stdout.flush()
 print("\n")
 # methods that require all iterations loaded
 if "densmode" in glob_tasklist:
 d3_dens_modes(d3corr_class, outdir=outdir, rewrite=glob_overwrite)
 # summary plot of values in every iteration
 if "plotmass" in glob_tasklist:
 plot_disk_mass(d3corr_class, rewrite=glob_overwrite)
 #
 d3_slices = MAINMETHODS_STORE_XYXZ(glob_sim)
 d3_corr = LOAD_RES_CORR(glob_sim)
 dm_class = LOAD_DENSITY_MODES(glob_sim)
 # tasks that rely on the previos outputs
 for it in glob_its:
 _outdir = outdir + str(it) + '/'
 for task in glob_tasklist:
 if task == "slicecorr":
 d2_slice_corr_for_it(it, d3_slices, _outdir, rewrite=glob_overwrite)
 sys.stdout.flush()
 # plotting tasks
 for task in glob_tasklist:
 if task.__contains__("plot"):
 # if task in ["all", "plotall", "densmode"]: pass
 if task == "plotcorr": plot_d3_corr(d3_corr, rewrite=glob_overwrite)
 if task == "plotslicecorr": plot_d2_slice_corr(d3_corr, rewrite=glob_overwrite)
 if task == "plotslice": plot_d3_prof_slices(d3_slices, rewritefigs=glob_overwrite)
 if task == "plothist": plot_d3_hist(d3_corr, rewrite=glob_overwrite)
 if task == "plotdensmode": plot_density_modes(dm_class, rewrite=glob_overwrite)
 if task == "plotcenterofmass": plot_center_of_mass(dm_class, rewrite=glob_overwrite)
 if task == "plotdensmodephase": plot_density_modes_phase(dm_class, rewrite=glob_overwrite)
 sys.stdout.flush()
 # else:
 # raise NameError("glob_task for plotting is not recognized: {}"
 # .format(task))
# python profile.py -s LS220_M14691268_M0_LK_SR --it 1409024 --plane xz -t slice --overwrite yes
# python slices.py -s LS220_M14691268_M0_LK_SR -t addm0 --it 1409024 --rl all --v_n all
# python profile.py -s LS220_M14691268_M0_LK_SR --it 1409024 --plane xz -t slicecorr plotslicecorr --v_n Q_eff_nua_dens_unb_bern --overwrite yes
if __name__ == '__main__':
 #
 parser = ArgumentParser(description="postprocessing pipeline")
 parser.add_argument("-s", dest="sim", required=True, help="task to perform")
 parser.add_argument("-t", dest="tasklist", required=False, nargs='+', default=[], help="tasks to perform")
 parser.add_argument("--v_n", dest="v_ns", required=False, nargs='+', default=[], help="variable (or group) name")
 parser.add_argument("--rl", dest="reflevels", required=False, nargs='+', default=[], help="reflevels")
 parser.add_argument("--it", dest="iterations", required=False, nargs='+', default=[], help="iterations")
 parser.add_argument('--time', dest="times", required=False, nargs='+', default=[], help='Timesteps')
 parser.add_argument('--plane', dest="plane", required=False, nargs='+', default=[], help='Plane: xy,xz,yz for slice analysis')
 parser.add_argument('--mask', dest="mask", required=False, nargs='+', default=[],
 help="Mask data for specific analysis. 'disk' is default ")
 #
 parser.add_argument("-o", dest="outdir", required=False, default=Paths.ppr_sims, help="path for output dir")
 parser.add_argument("-i", dest="simdir", required=False, default=Paths.gw170817, help="path to simulation dir")
 parser.add_argument("--overwrite", dest="overwrite", required=False, default="no", help="overwrite if exists")
 parser.add_argument("--usemaxtime", dest="usemaxtime", required=False, default="no",
 help=" auto/no to use ittime.h5 set value. Or set a float [ms] to overwrite ")
 #
 parser.add_argument("--sym", dest="symmetry", required=False, default=None, help="symmetry (like 'pi')")
 # Info/checks
 args = parser.parse_args()
 glob_tasklist = args.tasklist
 glob_sim = args.sim
 glob_simdir = args.simdir
 glob_outdir = args.outdir
 glob_v_ns = args.v_ns
 glob_rls = args.reflevels
 glob_its = args.iterations
 glob_times = args.times
 glob_planes = args.plane
 glob_symmetry = args.symmetry
 glob_overwrite = args.overwrite
 glob_masks = args.mask
 # simdir = Paths.gw170817 + glob_sim + '/'
 # resdir = Paths.ppr_sims + glob_sim + '/'
 glob_usemaxtime = args.usemaxtime
 glob_maxtime = np.nan
 # check given data
 if glob_symmetry != None:
 if not click.confirm("Selected symmetry: {} Is it correct?".format(glob_symmetry),
 default=True, show_default=True):
 exit(1)
 # checking if to use maxtime
 stat_it_dic = {}
 if glob_usemaxtime == "no":
 glob_usemaxtime = False
 glob_maxtime = np.nan
 elif glob_usemaxtime == "auto":
 glob_usemaxtime = True
 glob_maxtime = np.nan
 elif re.match(r'^-?\d+(?:\.\d+)?$', glob_usemaxtime):
 glob_maxtime = float(glob_usemaxtime) / 1.e3 # [s]
 glob_usemaxtime = True
 else: raise NameError("for '--usemaxtime' option use 'yes' or 'no' or float. Given: {}"
 .format(glob_usemaxtime))
 # check mask
 if len(glob_masks) == 0:
 glob_masks = ["disk"]
 elif len(glob_masks) == 1 and "all" in glob_masks:
 glob_masks = __masks__
 else:
 for mask in glob_masks:
 if not mask in __masks__:
 raise NameError("mask: {} is not recognized. Use: \n{}"
 .format(mask, __masks__))
 # TODO Implement mask for every method, make clear that fr interpolation cases it is not used. See 'd2_slice_corr_for_it' for example
 # check plane
 if len(glob_planes) == 0:
 pass
 elif len(glob_planes) == 1 and "all" in glob_planes:
 glob_planes = __d3slicesplanes__
 elif len(glob_planes) > 1:
 for plane in glob_planes:
 if not plane in __d3slicesplanes__:
 raise NameError("plane:{} is not in the list of the __d3slicesplanes__:{}"
 .format(plane, __d3slicesplanes__))
 # check if the simulations dir exists
 if not os.path.isdir(glob_simdir + glob_sim):
 raise NameError("simulation dir: {} does not exist in rootpath: {} "
 .format(glob_sim, glob_simdir))
 if not os.path.isdir(glob_outdir):
 raise NameError("output dir does not exist, please check: {}".format(glob_outdir))
 #
 Paths.gw170817 = glob_simdir
 Paths.ppr_sims = glob_outdir
 # check if tasks are set properly
 if len(glob_tasklist) == 0:
 raise NameError("tasklist is empty. Set what tasks to perform with '-t' option")
 elif len(glob_tasklist) == 1 and "all" in glob_tasklist:
 glob_tasklist = __profile__["tasklist"]
 glob_tasklist.remove("vtk")
 Printcolor.print_colored_string(["Set", "All", "tasks"],
 ["blue", "green", "blue"])
 else:
 for task in glob_tasklist:
 if not task in __profile__["tasklist"]:
 raise NameError("task: {} is not among available ones: {}"
 .format(task, __profile__["tasklist"]))
 # check if there any profiles to use
 ittime = LOAD_ITTIME(glob_sim)
 _, itprof, tprof = ittime.get_ittime("profiles", d1d2d3prof="prof")
 #
 if len(itprof) == 0:
 Printcolor.red("No profiles found. Please, extract profiles for {} "
 "and save them in /sim_dir/profiles/3d/ and/or update ittime.h5".format(glob_sim))
 exit(0)
 else:
 Printcolor.print_colored_string(["Available", "{}".format(len(itprof)), "profiles to postprocess"],
 ["blue", "green", "blue"])
 for it, t in zip(itprof, tprof):
 Printcolor.print_colored_string(["\tit:", "{:d}".format(it), "time:", "{:.1f}".format(t*1e3), "[ms]"],
 ["blue", "green", "blue", "green", "blue"])
 # check which iterations/timesteps to use
 if len(glob_its) > 0 and len(glob_times) > 0:
 raise ValueError("Please, set either iterations (--it) or times (--time) "
 "but NOT both")
 elif len(glob_its) == 0 and len(glob_times) == 0:
 raise ValueError("Please, set either iterations (--it) or times (--time)")
 elif (len(glob_times) == 1 and "all" in glob_times) or (len(glob_its) == 1 and "all" | |
| 
	- 2
 label_length[i] = 1
 source_str.append('')
 else:
 if K.image_data_format() == 'channels_first':
 X_data[i, 0, 0:self.img_w, :] = (
 self.paint_func(self.X_text[index + i])[0, :, :].T)
 else:
 X_data[i, 0:self.img_w, :, 0] = (
 self.paint_func(self.X_text[index + i])[0, :, :].T)
 labels[i, :] = self.Y_data[index + i]
 input_length[i] = self.img_w // self.downsample_factor - 2
 label_length[i] = self.Y_len[index + i]
 source_str.append(self.X_text[index + i])
 inputs = {'the_input': X_data,
 'the_labels': labels,
 'input_length': input_length,
 'label_length': label_length,
 'source_str': source_str # used for visualization only
 }
 outputs = {'ctc': np.zeros([size])} # dummy data for dummy loss function
 return (inputs, outputs)
 def next_train(self):
 while 1:
 ret = self.get_batch(self.cur_train_index,
 self.minibatch_size, train=True)
 self.cur_train_index += self.minibatch_size
 if self.cur_train_index >= self.val_split:
 self.cur_train_index = self.cur_train_index % 32
 (self.X_text, self.Y_data, self.Y_len) = shuffle_mats_or_lists(
 [self.X_text, self.Y_data, self.Y_len], self.val_split)
 yield ret
 # to get next validation data
 def next_val(self):
 while 1:
 ret = self.get_batch(self.cur_val_index,
 self.minibatch_size, train=False)
 self.cur_val_index += self.minibatch_size
 if self.cur_val_index >= self.num_words:
 self.cur_val_index = self.val_split + self.cur_val_index % 32
 yield ret
 def on_train_begin(self, logs={}):
 self.build_word_list(16000, 4, 1)
 self.paint_func = lambda text: paint_text(
 text, self.img_w, self.img_h,
 rotate=False, ud=False, multi_fonts=False)
 def on_epoch_begin(self, epoch, logs={}):
 # rebind the paint function to implement curriculum learning
 if 3 <= epoch < 6:
 self.paint_func = lambda text: paint_text(
 text, self.img_w, self.img_h,
 rotate=False, ud=True, multi_fonts=False)
 elif 6 <= epoch < 9:
 self.paint_func = lambda text: paint_text(
 text, self.img_w, self.img_h,
 rotate=False, ud=True, multi_fonts=True)
 elif epoch >= 9:
 self.paint_func = lambda text: paint_text(
 text, self.img_w, self.img_h,
 rotate=True, ud=True, multi_fonts=True)
 if epoch >= 21 and self.max_string_len < 12:
 self.build_word_list(32000, 12, 0.5)
# the actual loss calc occurs here despite it not being
# an internal Keras loss function
def ctc_lambda_func(args):
 y_pred, labels, input_length, label_length = args
 # the 2 is critical here since the first couple outputs of the RNN
 # tend to be garbage:
 y_pred = y_pred[:, 2:, :]
 return K.ctc_batch_cost(labels, y_pred, input_length, label_length)
# For a real OCR application, this should be beam search with a dictionary
# and language model. For this example, best path is sufficient.
# in this example, test_func is defined as:
# test_func = K.function([input_data], [y_pred])
# K.function takes the input and output tensors as list 
# so that you can create a function from many input to many output. 
# here, we define a function whose input_data is input while y_pred will be output
def decode_batch(test_func, word_batch):
 # out will be the prediction
 out = test_func([word_batch])[0]
 ret = []
 for j in range(out.shape[0]):
 out_best = list(np.argmax(out[j, 2:], 1))
 # itertools is a python module 
 # https://docs.python.org/3/library/itertools.html#itertools.groupby
 # Make an iterator that returns consecutive keys and groups 
 # from the out_best, where k, g are the key, and group
 # for example:
 # [k for k, g in groupby('AAAABBBCCDAABBB')] --> A B C D A B
 # this is exactly what we needed for CTC decoding
 out_best = [k for k, g in itertools.groupby(out_best)]
 outstr = labels_to_text(out_best)
 ret.append(outstr)
 return ret
class VizCallback(keras.callbacks.Callback):
 def __init__(self, run_name, test_func, text_img_gen, num_display_words=6):
 self.test_func = test_func
 self.output_dir = os.path.join(
 OUTPUT_DIR, run_name)
 self.text_img_gen = text_img_gen
 self.num_display_words = num_display_words
 if not os.path.exists(self.output_dir):
 os.makedirs(self.output_dir)
 def show_edit_distance(self, num):
 num_left = num
 mean_norm_ed = 0.0
 mean_ed = 0.0
 while num_left > 0:
 word_batch = next(self.text_img_gen)[0]
 num_proc = min(word_batch['the_input'].shape[0], num_left)
 decoded_res = decode_batch(self.test_func,
 word_batch['the_input'][0:num_proc])
 for j in range(num_proc):
 edit_dist = editdistance.eval(decoded_res[j],
 word_batch['source_str'][j])
 mean_ed += float(edit_dist)
 mean_norm_ed += float(edit_dist) / len(word_batch['source_str'][j])
 num_left -= num_proc
 mean_norm_ed = mean_norm_ed / num
 mean_ed = mean_ed / num
 print('\nOut of %d samples: Mean edit distance:'
 '%.3f Mean normalized edit distance: %0.3f'
# % (num, mean_ed, mean_norm_ed))
 def on_epoch_end(self, epoch, logs={}):
 self.model.save_weights(
 os.path.join(self.output_dir, 'weights%02d.h5' % (epoch)))
 self.show_edit_distance(256)
 word_batch = next(self.text_img_gen)[0]
 res = decode_batch(self.test_func,
 word_batch['the_input'][0:self.num_display_words])
 if word_batch['the_input'][0].shape[0] < 256:
 cols = 2
 else:
 cols = 1
 for i in range(self.num_display_words):
 pylab.subplot(self.num_display_words // cols, cols, i + 1)
 if K.image_data_format() == 'channels_first':
 the_input = word_batch['the_input'][i, 0, :, :]
 else:
 the_input = word_batch['the_input'][i, :, :, 0]
 pylab.imshow(the_input.T, cmap='Greys_r')
 pylab.xlabel(
 'Truth = \'%s\'\nDecoded = \'%s\'' %
 (word_batch['source_str'][i], res[i]))
 fig = pylab.gcf()
 fig.set_size_inches(10, 13)
 pylab.savefig(os.path.join(self.output_dir, 'e%02d.png' % (epoch)))
 pylab.close()
# here the high-level overview of this function
# we first download wordlists with mono-gram/bi-gram words
# then we use TextImageGenerator to generate 
# training dataset based on those words
# Our model is: two CNN layers, two RNN layers, 
# dense layer, finally output softmax
# And we use CTC to calculate loss.
def train(run_name, start_epoch, stop_epoch, img_w):
 # Input Parameters
 img_h = 64
 words_per_epoch = 16000
 val_split = 0.2
 val_words = int(words_per_epoch * (val_split))
 # Network parameters
 conv_filters = 16
 kernel_size = (3, 3)
 pool_size = 2
 time_dense_size = 32
 rnn_size = 512
 minibatch_size = 32
 if K.image_data_format() == 'channels_first':
 input_shape = (1, img_w, img_h)
 else:
 input_shape = (img_w, img_h, 1)
 fdir = os.path.dirname(
 get_file('wordlists.tgz',
 origin='http://www.mythic-ai.com/datasets/wordlists.tgz',
 untar=True))
 img_gen = TextImageGenerator(
 monogram_file=os.path.join(fdir, 'wordlist_mono_clean.txt'),
 bigram_file=os.path.join(fdir, 'wordlist_bi_clean.txt'),
 minibatch_size=minibatch_size,
 img_w=img_w,
 img_h=img_h,
 downsample_factor=(pool_size ** 2),
 val_split=words_per_epoch - val_words)
 act = 'relu'
 input_data = Input(name='the_input', shape=input_shape, dtype='float32')
 inner = Conv2D(conv_filters, kernel_size, padding='same',
 activation=act, kernel_initializer='he_normal',
 name='conv1')(input_data)
 inner = MaxPooling2D(pool_size=(pool_size, pool_size), name='max1')(inner)
 inner = Conv2D(conv_filters, kernel_size, padding='same',
 activation=act, kernel_initializer='he_normal',
 name='conv2')(inner)
 inner = MaxPooling2D(pool_size=(pool_size, pool_size), name='max2')(inner)
 conv_to_rnn_dims = (img_w // (pool_size ** 2),
 (img_h // (pool_size ** 2)) * conv_filters)
 inner = Reshape(target_shape=conv_to_rnn_dims, name='reshape')(inner)
 # cuts down input size going into RNN:
 inner = Dense(time_dense_size, activation=act, name='dense1')(inner)
 # Two layers of bidirectional GRUs
 # GRU seems to work as well, if not better than LSTM:
 gru_1 = GRU(rnn_size, return_sequences=True,
 kernel_initializer='he_normal', name='gru1')(inner)
 gru_1b = GRU(rnn_size, return_sequences=True,
 go_backwards=True, kernel_initializer='he_normal',
 name='gru1_b')(inner)
 gru1_merged = add([gru_1, gru_1b])
 gru_2 = GRU(rnn_size, return_sequences=True,
 kernel_initializer='he_normal', name='gru2')(gru1_merged)
 gru_2b = GRU(rnn_size, return_sequences=True, go_backwards=True,
 kernel_initializer='he_normal', name='gru2_b')(gru1_merged)
 # transforms RNN output to character activations:
 inner = Dense(img_gen.get_output_size(), kernel_initializer='he_normal',
 name='dense2')(concatenate([gru_2, gru_2b]))
 y_pred = Activation('softmax', name='softmax')(inner)
 predict_model = Model(inputs=input_data, outputs=y_pred)
 predict_model.summary()
 labels = Input(name='the_labels',
 shape=[img_gen.absolute_max_string_len], dtype='float32')
 input_length = Input(name='input_length', shape=[1], dtype='int64')
 label_length = Input(name='label_length', shape=[1], dtype='int64')
 # Keras doesn't currently support loss funcs with extra parameters
 # so CTC loss is implemented in a lambda layer
 loss_out = Lambda(
 ctc_lambda_func, output_shape=(1,),
 name='ctc')([y_pred, labels, input_length, label_length])
 # clipnorm seems to speeds up convergence
 sgd = SGD(lr=0.02, decay=1e-6, momentum=0.9, nesterov=True, clipnorm=5)
 # The model will include all layers required in the computation of loss_out 
 # given inputs=[input_data, labels, input_length, label_length] 
 # where loss_out is lambda function
 model = Model(inputs=[input_data, labels, input_length, label_length],
 outputs=loss_out)
 # the loss calc occurs elsewhere, so use a dummy lambda func for the loss
 model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=sgd)
 if start_epoch > 0:
 weight_file = os.path.join(
 OUTPUT_DIR,
 os.path.join(run_name, 'weights%02d.h5' % (start_epoch - 1)))
 model.load_weights(weight_file)
 # captures output of softmax so we can decode the output during visualization
 test_func = K.function([input_data], [y_pred])
 viz_cb = VizCallback(run_name, test_func, img_gen.next_val())
 model.fit_generator(
 generator=img_gen.next_train(),
 steps_per_epoch=(words_per_epoch - val_words) // minibatch_size,
 epochs=stop_epoch,
 validation_data=img_gen.next_val(),
 validation_steps=val_words // minibatch_size,
 callbacks=[viz_cb, img_gen],
 initial_epoch=start_epoch)
 model.save_weights("ocr_crc_weight.h5")
# this is a help function to get the predict_model
# similar to train(), but without doing the real train
# code could be further optimized with train().
# but for the purpose of understanding original image_ocr.py, 
# we want to keep train() as the same.
def get_predict_model(img_w, pre_trained_file):
 # Input Parameters
 img_h = 64
 words_per_epoch = 16000
 val_split = 0.2
 val_words = int(words_per_epoch * (val_split))
 # Network parameters
 conv_filters = 16
 kernel_size = (3, 3)
 pool_size = 2
 time_dense_size = 32
 rnn_size = 512
 minibatch_size = 32
 if K.image_data_format() == 'channels_first':
 input_shape = (1, img_w, img_h)
 else:
 input_shape = (img_w, img_h, 1)
 fdir = os.path.dirname(
 get_file('wordlists.tgz',
 origin='http://www.mythic-ai.com/datasets/wordlists.tgz',
 untar=True))
 img_gen = TextImageGenerator(
 monogram_file=os.path.join(fdir, 'wordlist_mono_clean.txt'),
 bigram_file=os.path.join(fdir, 'wordlist_bi_clean.txt'),
 minibatch_size=minibatch_size,
 img_w=img_w,
 img_h=img_h,
 downsample_factor=(pool_size ** 2),
 val_split=words_per_epoch - val_words)
 act = 'relu'
 input_data = Input(name='the_input', shape=input_shape, dtype='float32')
 inner = Conv2D(conv_filters, kernel_size, padding='same',
 activation=act, kernel_initializer='he_normal',
 name='conv1')(input_data)
 inner = MaxPooling2D(pool_size=(pool_size, pool_size), name='max1')(inner)
 inner = Conv2D(conv_filters, kernel_size, padding='same',
 activation=act, kernel_initializer='he_normal',
 name='conv2')(inner)
 inner = MaxPooling2D(pool_size=(pool_size, pool_size), name='max2')(inner)
 conv_to_rnn_dims = (img_w // (pool_size ** 2),
 (img_h // (pool_size ** 2)) * conv_filters)
 inner = Reshape(target_shape=conv_to_rnn_dims, name='reshape')(inner)
 # cuts down input size going into RNN:
 inner = Dense(time_dense_size, activation=act, name='dense1')(inner)
 # Two layers of bidirectional GRUs
 # GRU seems to work as well, if not better than LSTM:
 gru_1 = GRU(rnn_size, return_sequences=True,
 kernel_initializer='he_normal', name='gru1')(inner)
 gru_1b = GRU(rnn_size, return_sequences=True,
 go_backwards=True, kernel_initializer='he_normal',
 name='gru1_b')(inner)
 | |
| 
	GeoTiff file using the
 ``gs`` scheme. If the :py:class:`~descarteslabs.catalog.StorageState` is
 :py:attr:`~descarteslabs.catalog.StorageState.REMOTE`, this field is optional
 and you can use one of the schemes ``gs``, ``http``, ``https``, ``ftp``, or
 ``ftps``; if the scheme is ``gs``, it must be a valid reference
 but can be any format.
 size_bytes : int
 Size of the file in bytes. Required when the
 :py:class:`~descarteslabs.catalog.StorageState` is
 :py:attr:`~descarteslabs.catalog.StorageState.AVAILABLE`.
 hash : str
 The md5 hash for the given file. Required when the
 :py:class:`~descarteslabs.catalog.StorageState` is
 :py:attr:`~descarteslabs.catalog.StorageState.AVAILABLE`.
 provider_id : str
 Optional ID for the external provider when the
 :py:class:`~descarteslabs.catalog.StorageState` is
 :py:attr:`~descarteslabs.catalog.StorageState.REMOTE`.
 provider_href : str
 A URI to describe the remote image in more detail. Either the `provider_href`
 or the `href` must be specified when the
 :py:class:`~descarteslabs.catalog.StorageState` is
 :py:attr:`~descarteslabs.catalog.StorageState.REMOTE`.
 """
 href = Attribute()
 size_bytes = Attribute()
 hash = Attribute()
 provider_id = Attribute()
 provider_href = Attribute()
class ListAttribute(ModelAttribute, MutableSequence):
 """Base class for attributes that are lists.
 Can be set using an iterable of items. The type is the same for all list items,
 and created automatically to hold a given deserialized value if it's not already
 that type. The type can reject the value with a `AttributeValidationError`.
 ListAttributes behave similarly to `MappingAttributes` but provide additional
 operations that allow list-like interactions (slicing, appending, etc.)
 One major difference between ListAttributes and `MappingAttributes` is that
 ListAttributes shouldn't be subclassed or instantiated directly - it's much easier
 for users to construct and assign a list or iterable, and allow __set__ to handle
 the coercing of the values to the correct type.
 Parameters
 ----------
 attribute_type : Attribute
 All items in the ListAttribute must be of the same Attribute type. The actual
 values must be able to be deserialized by that Attribute type.
 items : Iterable
 An iterable of items from which to construct the initial content.
 validate : bool
 Whether or not to verify whether the values are valid for the given Attribute
 type. ``True`` be default.
 Raises
 ------
 AttributeValidationError
 If any of the items cannot be successfully deserialized to the given attribute
 type and `validate` is ``True``.
 Example
 -------
 This is the recommended way to instantiate a ListAttribute, you don't maintain a
 reference to the original list but the semantics are much cleaner.
 >>> from descarteslabs.catalog import CatalogObject, File
 >>> from descarteslabs.catalog.attributes import ListAttribute
 >>> class ExampleCatalogObject(CatalogObject):
 ... files = ListAttribute(File)
 >>> files = [
 ... File(href="https://foo.com/1"),
 ... File(href="https://foo.com/2"),
 ... ]
 >>> obj = ExampleCatalogObject(files=files)
 >>> assert obj.files is not files
 """
 # this value is ONLY used for for instances of the attribute that
 # are attached to class definitions. It's confusing to put this
 # instantiation into __init__, because the value is only ever set
 # from AttributeMeta.__new__, after it's already been instantiated
 _attribute_name = None
 def __init__(self, attribute_type, validate=True, items=None, **kwargs):
 if isinstance(attribute_type, Attribute):
 self._attribute_type = attribute_type
 elif issubclass(attribute_type, Attribute):
 self._attribute_type = attribute_type(**kwargs)
 else:
 raise AttributeValidationError(
 "First argument for {} must be an Attribute type".format(
 self.__class__.__name__
 )
 )
 # give the attribute_type our own name for meaningful error messages
 self._attribute_type._attribute_name = self._attribute_name
 self._items = []
 super(ListAttribute, self).__init__(**kwargs)
 if items is not None:
 self._items = [
 self._instantiate_item(item, validate=validate) for item in items
 ]
 def __repr__(self):
 """A string representation for this instance.
 The representation is broken up over multiple lines for readability.
 """
 sections = []
 for item in self._items:
 sections.append(repr(item))
 return "[" + ", ".join(sections) + "]"
 def _instantiate_item(self, item, validate=True, add_model=True):
 """Handles coercing the provided value to the correct type.
 Handles coercing the provided value to the correct type, optionally registers
 this instance of the ListAttribute as the model object for ModelAttribute
 item types.
 """
 item = self._attribute_type.deserialize(item, validate=validate)
 if add_model and isinstance(item, ModelAttribute):
 item._add_model_object(self)
 return item
 def serialize(self, values, jsonapi_format=False):
 """Serialize a value to a json-serializable type.
 See :meth:`Attribute.serialize`.
 """
 if values is None:
 return None
 return [
 self._attribute_type.serialize(v, jsonapi_format=jsonapi_format)
 for v in values
 ]
 def deserialize(self, values, validate=True):
 """Deserialize a value to a native type.
 See :meth:`Attribute.deserialize`.
 Parameters
 ----------
 values : Iterable
 An iterator used to initialize a `ListAttribute` instance.
 Returns
 -------
 ListAttribute
 A `ListAttribute` with the given items.
 Raises
 ------
 AttributeValidationError
 If the value is not an iterable or if the value cannot be successfully
 deserialized to the given attribute type and `validate` is ``True``.
 """
 if values is None:
 return None
 if isinstance(values, ListAttribute):
 return values
 if not isinstance(values, Iterable) or isinstance(values, (str, bytes)):
 raise AttributeValidationError(
 "{} expects a non-string/bytes iterable for attribute {}, not {}".format(
 self.__class__.__name__,
 self._attribute_name,
 values.__class__.__name__,
 )
 )
 # ensures subclasses are handled correctly
 type_ = type(self)
 return type_(
 self._attribute_type,
 validate=validate,
 items=values,
 **self._get_attr_params()
 )
 # MutableSequence methods
 def __getitem__(self, n):
 return self._items[n]
 def __setitem__(self, n, item):
 self._raise_if_immutable_or_readonly("set")
 previous_value = self._items[n]
 # handling slice assignment
 if isinstance(n, slice):
 try:
 iter(item)
 except TypeError:
 # mimic the error you get from the builtin
 raise TypeError("Can only assign an iterable")
 new_item = list(self._instantiate_item(o) for o in item)
 else:
 new_item = self._instantiate_item(item)
 # `_set_modified()` will raise exception if change is not allowed
 self._set_modified(changed=(previous_value != new_item))
 # will throw IndexError which is what we want if previous value isn't set
 self._items[n] = new_item
 # slicing returns a list of items
 if not isinstance(n, slice):
 previous_value = [previous_value]
 for val in previous_value:
 if isinstance(val, MappingAttribute):
 val._remove_model_object(self)
 def __delitem__(self, n):
 self._raise_if_immutable_or_readonly("delete")
 previous_value = self._items[n]
 # slicing returns a list of items
 if not isinstance(n, slice):
 previous_value = [previous_value]
 for val in previous_value:
 if isinstance(val, MappingAttribute):
 val._remove_model_object(self)
 new_items = list(self._items)
 # will throw IndexError which is what we want if previous value isn't set
 del new_items[n]
 # `_set_modified()` will raise exception if change is not allowed
 self._set_modified(changed=(self._items != new_items))
 self._items = new_items
 def __len__(self):
 return len(self._items)
 def insert(self, index, value):
 self._raise_if_immutable_or_readonly("insert")
 new_value = self._instantiate_item(value)
 # `_set_modified()` will raise exception if change is not allowed
 self._set_modified()
 self._items.insert(index, new_value)
 # Remaining Sequence methods
 def __add__(self, other):
 # emulating how concatenation works for lists
 if not isinstance(other, Iterable) or isinstance(other, (str, bytes)):
 raise TypeError(
 "{} can only concatenate non-string/bytes iterables"
 "for attribute {}, not {}".format(
 self.__class__.__name__,
 self._attribute_name,
 other.__class__.__name__,
 )
 )
 # this is a shallow copy operations, so we don't attach the new item to this
 # model object
 new_other = [self._instantiate_item(o, add_model=False) for o in other]
 return self._items + new_other
 def __mul__(self, other):
 return self._items * other
 def __imul__(self, other):
 # `_set_modified()` will raise exception if change is not allowed
 self._set_modified(changed=(self._items and other != 1))
 self._items *= other
 return self
 def __rmul__(self, other):
 return self._items * other
 def copy(self):
 """Return a shallow copy of the list."""
 return self._items.copy()
 def sort(self, key=None, reverse=False):
 self._raise_if_immutable_or_readonly("sort")
 """Stable sort *IN PLACE*."""
 new_items = list(self._items)
 new_items.sort(key=key, reverse=reverse)
 # `_set_modified()` will raise exception if change is not allowed
 self._set_modified(changed=(self._items != new_items))
 self._items = new_items
 # Comparison methods
 def __eq__(self, other):
 if self is other:
 return True
 if not isinstance(other, (self.__class__, Iterable)):
 return False
 if len(self) != len(other):
 return False
 for (i1, i2) in zip(self, other):
 if i1 != i2:
 return False
 return True
 def __ge__(self, other):
 if isinstance(other, self.__class__):
 other = other._items
 # allow list __ge__ to raise/return
 return self._items >= other
 def __gt__(self, other):
 if isinstance(other, self.__class__):
 other = other._items
 # allow list __gt__ to raise/return
 return self._items > other
 def __le__(self, other):
 if isinstance(other, self.__class__):
 other = other._items
 # allow list __le__ to raise/return
 return self._items <= other
 def __lt__(self, other):
 if isinstance(other, self.__class__):
 other = other._items
 # allow list __lt__ to raise/return
 return self._items < other
class ExtraPropertiesAttribute(ModelAttribute, MutableMapping):
 """An attribute that contains properties (key/value pairs).
 Can be set using a dictionary of items or any `Mapping`, or an instance of this
 attribute. All keys must be string and values can be string or numbers.
 ExtraPropertiesAttribute behaves similar to dictionaries.
 Example
 -------
 This is the recommended way to instantiate a ExtraPropertiesAttribute, you don't
 maintain a reference to the original list but the semantics are much cleaner.
 >>> from descarteslabs.catalog import CatalogObject
 >>> from descarteslabs.catalog.attributes import ExtraPropertiesAttribute
 >>> class ExampleCatalogObject(CatalogObject):
 ... extra_properties = ExtraPropertiesAttribute()
 | |
| 
	== "offset":
 estoffset = rawvalue if self.showunits else value
 elif name == "pmode":
 # FIXME, pmode never used.
 pmode = value
 elif name == "ppoll":
 ppoll = value
 if ppoll < 0:
 ppoll = ntp.magic.NTP_MINPOLL
 elif name == "precision":
 # FIXME, precision never used.
 precision = value
 elif name == "reach":
 # Shipped as hex, displayed in octal
 reach = value
 elif name == "refid":
 # The C code for this looked crazily overelaborate. Best
 # guess is that it was designed to deal with formats that
 # no longer occur in this field.
 if "refid" in self.__header:
 dstadr_refid = rawvalue
 elif name == "rec":
 rec = value # l_fp timestamp
 last_sync = int(now - ntp.ntpc.lfptofloat(rec))
 elif name == "reftime":
 reftime = value # l_fp timestamp
 last_sync = int(now - ntp.ntpc.lfptofloat(reftime))
 elif name == "rootdelay":
 # FIXME, rootdelay never used.
 rootdelay = value # l_fp timestamp
 elif name == "rootdisp" or name == "dispersion":
 estdisp = rawvalue if self.showunits else value
 elif name in ("srcadr", "peeradr"):
 srcadr = value
 elif name == "srchost":
 srchost = value
 elif name == "srcport" or name == "peerport":
 # FIXME, srcport never used.
 srcport = value
 elif name == "stratum":
 stratum = value
 elif name == "ttl":
 # FIXME, ttl never used.
 ttl = value
 elif name == "unreach":
 # FIXME, unreach never used.
 unreach = value
 elif name == "xmt":
 # FIXME, xmt never used.
 xmt = value
 else:
 # unknown name?
 # line = " name=%s " % (name) # debug
 # return line # debug
 continue
 if hmode == ntp.magic.MODE_BCLIENTX:
 # broadcastclient or multicastclient
 ptype = 'b'
 elif hmode == ntp.magic.MODE_BROADCAST:
 # broadcast or multicast server
 if srcadr.startswith("224."): # IANA multicast address prefix
 ptype = 'M'
 else:
 ptype = 'B'
 elif hmode == ntp.magic.MODE_CLIENT:
 if PeerSummary.is_clock(variables):
 ptype = 'l' # local refclock
 elif dstadr_refid == "POOL":
 ptype = 'p' # pool
 elif srcadr.startswith("224."):
 ptype = 'a' # manycastclient
 else:
 ptype = 'u' # unicast
 elif hmode == ntp.magic.MODE_ACTIVE:
 ptype = 's' # symmetric active
 elif hmode == ntp.magic.MODE_PASSIVE:
 ptype = 'S' # symmetric passive
 #
 # Got everything, format the line
 #
 line = ""
 poll_sec = 1 << min(ppoll, hpoll)
 self.polls.append(poll_sec)
 if self.pktversion > ntp.magic.NTP_OLDVERSION:
 c = " x.-+#*o"[ntp.control.CTL_PEER_STATVAL(rstatus) & 0x7]
 else:
 c = " .+*"[ntp.control.CTL_PEER_STATVAL(rstatus) & 0x3]
 # Source host or clockname or poolname or servername
 # After new DNS, 2017-Apr-17
 # servers setup via numerical IP Address have only srcadr
 # servers setup via DNS have both srcadr and srchost
 # refclocks have both srcadr and srchost
 # pool has "0.0.0.0" (or "::") and srchost
 # slots setup via pool have only srcadr
 if srcadr is not None \
 and srcadr != "0.0.0.0" \
 and srcadr[:7] != "127.127" \
 and srcadr != "::":
 if self.showhostnames:
 try:
 if self.debug:
 self.logfp.write("DNS lookup begins...\n")
 clock_name = canonicalize_dns(srcadr)
 if self.debug:
 self.logfp.write("DNS lookup ends.\n")
 except TypeError:
 return ''
 else:
 clock_name = srcadr
 else:
 clock_name = srchost
 if clock_name is None:
 if srcadr:
 clock_name = srcadr
 else:
 clock_name = ""
 if self.wideremote and len(clock_name) > self.namewidth:
 line += ("%c%s\n" % (c, clock_name))
 line += (" " * (self.namewidth + 2))
 else:
 line += ("%c%-*.*s " % (c, self.namewidth, self.namewidth,
 clock_name[:self.namewidth]))
 # Destination address, assoc ID or refid.
 assocwidth = 7 if self.displaymode == "apeers" else 0
 if "." not in dstadr_refid and ":" not in dstadr_refid:
 dstadr_refid = "." + dstadr_refid + "."
 if assocwidth and len(dstadr_refid) >= self.refidwidth - assocwidth:
 visible = "..."
 else:
 visible = dstadr_refid
 line += self.high_truncate(visible, self.refidwidth)
 if self.displaymode == "apeers":
 line += (" " * (self.refidwidth - len(visible) - assocwidth + 1))
 line += ("%-6d" % (associd))
 else:
 line += (" " * (self.refidwidth - len(visible)))
 # The rest of the story
 if last_sync is None:
 last_sync = now
 jd = estjitter if have_jitter else estdisp
 line += (
 " %2ld %c %4.4s %4.4s %3lo"
 % (stratum, ptype,
 PeerSummary.prettyinterval(last_sync),
 PeerSummary.prettyinterval(poll_sec), reach))
 if saw6:
 if self.showunits:
 line += (
 " %s %s %s" %
 (unitify(estdelay, UNIT_MS),
 unitify(estoffset, UNIT_MS),
 unitify(jd, UNIT_MS)))
 else:
 line += (
 " %s %s %s" %
 (f8dot4(estdelay), f8dot4(estoffset), f8dot4(jd)))
 else:
 # old servers only have 3 digits of fraction
 # don't print a fake 4th digit
 if self.showunits:
 line += (
 " %s %s %s" %
 (unitify(estdelay, UNIT_MS),
 unitify(estoffset, UNIT_MS),
 unitify(jd, UNIT_MS)))
 else:
 line += (
 " %s %s %s" %
 (f8dot3(estdelay), f8dot3(estoffset), f8dot3(jd)))
 line += "\n"
 # for debugging both case
 # if srcadr != None and srchost != None:
 # line += "srcadr: %s, srchost: %s\n" % (srcadr, srchost)
 return line
 def intervals(self):
 "Return and flush the list of actual poll intervals."
 res = self.polls[:]
 self.polls = []
 return res
class MRUSummary:
 "Reusable class for MRU entry summary generation."
 def __init__(self, showhostnames, wideremote=False,
 debug=0, logfp=sys.stderr):
 self.debug = debug
 self.logfp = logfp
 self.now = None
 self.showhostnames = showhostnames # If false, display numeric IPs
 self.wideremote = wideremote
 header = " lstint avgint rstr r m v count rport remote address"
 def summary(self, entry):
 last = ntp.ntpc.lfptofloat(entry.last)
 if self.now:
 lstint = int(self.now - last + 0.5)
 stats = "%7d" % lstint
 else:
 # direct mode doesn't have a reference time
 MJD_1970 = 40587 # MJD for 1 Jan 1970, Unix epoch
 days, lstint = divmod(int(last), 86400)
 stats = "%5d %5d" % (days + MJD_1970, lstint)
 first = ntp.ntpc.lfptofloat(entry.first)
 active = float(last - first)
 if entry.ct == 1:
 favgint = 0
 else:
 favgint = active / (entry.ct-1)
 avgint = int(favgint + 0.5)
 if 5.0 < favgint or 1 == entry.ct:
 stats += " %6d" % avgint
 elif 1.0 <= favgint:
 stats += " %6.2f" % favgint
 else:
 stats += " %6.3f" % favgint
 if entry.rs & ntp.magic.RES_KOD:
 rscode = 'K'
 elif entry.rs & ntp.magic.RES_LIMITED:
 rscode = 'L'
 else:
 rscode = '.'
 (ip, port) = portsplit(entry.addr)
 try:
 if not self.showhostnames:
 dns = ip
 else:
 dns = canonicalize_dns(ip)
 # Forward-confirm the returned DNS
 confirmed = canonicalization_cache.get(dns)
 if confirmed is None:
 confirmed = False
 try:
 ai = socket.getaddrinfo(dns, None)
 for (_, _, _, _, sockaddr) in ai:
 if sockaddr and sockaddr[0] == ip:
 confirmed = True
 break
 except socket.gaierror:
 pass
 canonicalization_cache.set(dns, confirmed)
 if not confirmed:
 dns = "%s (%s)" % (ip, dns)
 if not self.wideremote:
 # truncate for narrow display
 dns = dns[:40]
 stats += " %4hx %c %d %d %6d %5s %s" % \
 (entry.rs, rscode,
 ntp.magic.PKT_MODE(entry.mv),
 ntp.magic.PKT_VERSION(entry.mv),
 entry.ct, port[1:], dns)
 return stats
 except ValueError:
 # This can happen when ntpd ships a corrupt varlist
 return ''
class ReslistSummary:
 "Reusable class for reslist entry summary generation."
 header = """\
 hits addr/prefix or addr mask
 restrictions
"""
 width = 72
 @staticmethod
 def __getPrefix(mask):
 if not mask:
 prefix = ''
 if ':' in mask:
 sep = ':'
 base = 16
 else:
 sep = '.'
 base = 10
 prefix = sum([bin(int(x, base)).count('1')
 for x in mask.split(sep) if x])
 return '/' + str(prefix)
 def summary(self, variables):
 hits = variables.get("hits", "?")
 address = variables.get("addr", "?")
 mask = variables.get("mask", "?")
 if address == '?' or mask == '?':
 return ''
 address += ReslistSummary.__getPrefix(mask)
 flags = variables.get("flags", "?")
 # reslist responses are often corrupted
 s = "%10s %s\n %s\n" % (hits, address, flags)
 # Throw away corrupted entries. This is a shim - we really
 # want to make ntpd stop generating garbage
 for c in s:
 if not c.isalnum() and c not in "/.: \n":
 return ''
 return s
class IfstatsSummary:
 "Reusable class for ifstats entry summary generation."
 header = """\
 interface name send
 # address/broadcast drop flag received sent failed peers uptime
 """
 width = 74
 # Numbers are the fieldsize
 fields = {'name': '%-24.24s',
 'flags': '%4x',
 'rx': '%6d',
 'tx': '%6d',
 'txerr': '%6d',
 'pc': '%5d',
 'up': '%8d'}
 def summary(self, i, variables):
 formatted = {}
 try:
 # Format the fields
 for name in self.fields.keys():
 value = variables.get(name, "?")
 if value == "?":
 fmt = value
 else:
 fmt = self.fields[name] | |
| 
	import numpy as np
import math
import sys
import scipy.ndimage
import pickle
import graph as splfy
import code
import random
import showTOPO
from rtree import index
from time import time 
from hopcroftkarp import HopcroftKarp
from sets import Set
from subprocess import Popen
def latlonNorm(p1, lat = 40):
 p11 = p1[1] * math.cos(math.radians(lat))
 l = np.sqrt(p11 * p11 + p1[0] * p1[0])
 return p1[0]/l, p11/l
def pointToLineDistance(p1,p2,p3):
 # p1 --> p2 is the line
 # p1 is (0,0)
 dist = np.sqrt(p2[0] * p2[0] + p2[1] * p2[1]) 
 proj_length = (p2[0] * p3[0] + p2[1] * p3[1]) / dist 
 if proj_length > dist :
 a = p3[0] - p2[0]
 b = p3[1] - p2[1]
 return np.sqrt(a*a + b*b)
 if proj_length < 0 :
 a = p3[0] - p1[0]
 b = p3[1] - p1[1]
 return np.sqrt(a*a + b*b)
 alpha = proj_length / dist
 p4 = [0,0]
 p4[0] = alpha * p2[0]
 p4[1] = alpha * p2[1]
 a = p3[0] - p4[0]
 b = p3[1] - p4[1]
 return np.sqrt(a*a + b*b)
def pointToLineDistanceLatLon(p1,p2,p3):
 pp2 = [0,0]
 pp3 = [0,0]
 pp2[0] = p2[0] - p1[0]
 pp2[1] = (p2[1] - p1[1]) * math.cos(math.radians(p1[0]))
 pp3[0] = p3[0] - p1[0]
 pp3[1] = (p3[1] - p1[1]) * math.cos(math.radians(p1[0]))
 return pointToLineDistance((0,0), pp2, pp3)
def Coord2Pixels(lat, lon, min_lat, min_lon, max_lat, max_lon, sizex, sizey):
 #print(max_lat, min_lat, sizex)
 ilat = sizex - int((lat-min_lat) / ((max_lat - min_lat)/sizex))
 #ilat = int((lat-min_lat) / ((max_lat - min_lat)/sizex))
 ilon = int((lon-min_lon) / ((max_lon - min_lon)/sizey))
 return ilat, ilon
def distance(p1, p2):
 a = p1[0] - p2[0]
 b = (p1[1] - p2[1])*math.cos(math.radians(p1[0]))
 return np.sqrt(a*a + b*b)
def angleDistance(p1, p2):
 l1 = np.sqrt(p1[0] * p1[0] + p1[1] * p1[1])
 l2 = np.sqrt(p2[0] * p2[0] + p2[1] * p2[1])
 if l1 == 0 or l2 == 0:
 return 100000
 a = (p1[0]/l1 - p2[0]/l2)
 b = (p1[1]/l1 - p2[1]/l2)
 return np.sqrt(a*a + b * b)
def TOPOGenerateStartingPoints(OSMMap, check = True, density = 0.00050, region = None, image = None, direction = False, metaData = None, mergin=0.07):
 result = []
 tunnel_skip_num = 0
 svgEdges = []
 if image != 'NULL':
 img = scipy.ndimage.imread(image)
 sizex = np.shape(img)[0]
 sizey = np.shape(img)[1]
 if len(np.shape(img)) > 2:
 img = img[:,:,3].reshape((sizex, sizey))
 else:	
 img = None
 
 def Coord2Pixels(lat, lon, min_lat, min_lon, max_lat, max_lon, sizex, sizey):
 ilat = sizex - int((lat-min_lat) / ((max_lat - min_lat)/sizex))
 ilon = int((lon-min_lon) / ((max_lon - min_lon)/sizey))
 return ilat, ilon
 visitedNodes = []
 for nodeid in OSMMap.nodes.keys():
 if nodeid in visitedNodes:
 continue
 cur_node = nodeid 
 next_nodes = {}
 for nn in OSMMap.nodeLink[cur_node] + OSMMap.nodeLinkReverse[cur_node]:
 next_nodes[nn] = 1
 if len(next_nodes.keys()) == 2:
 continue 
 for nextnode in next_nodes.keys():
 if nextnode in visitedNodes:
 continue
 node_list = [nodeid]
 cur_node = nextnode 
 while True:
 node_list.append(cur_node)
 neighbor = {}
 for nn in OSMMap.nodeLink[cur_node] + OSMMap.nodeLinkReverse[cur_node]:
 neighbor[nn] = 1
 if len(neighbor.keys()) != 2:
 break
 if node_list[-2] == neighbor.keys()[0] :
 cur_node = neighbor.keys()[1]
 else:
 cur_node = neighbor.keys()[0]
 for i in range(1, len(node_list)-1):
 visitedNodes.append(node_list[i])
 dists = []
 dist = 0
 for i in range(0, len(node_list)-1):
 dists.append(dist)
 dist += distance(OSMMap.nodes[node_list[i]],OSMMap.nodes[node_list[i+1]])
 dists.append(dist)
 if dist < density/2:
 continue
 n = max(int(dist / density),1)
 alphas = [float(x+1)/float(n+1) for x in range(n)]
 
 
 for alpha in alphas:
 for j in range(len(node_list)-1):
 # Don't add starting locations in the tunnel
 if metaData is not None:
 nnn1 = OSMMap.nodeHashReverse[node_list[j]]
 nnn2 = OSMMap.nodeHashReverse[node_list[j+1]]
 if metaData.edgeProperty[metaData.edge2edgeid[(nnn1,nnn2)]]['layer'] < 0:
 tunnel_skip_num += 1
 continue
 if alpha * dist >= dists[j] and alpha * dist <= dists[j+1]:
 a = (alpha * dist - dists[j]) / (dists[j+1] - dists[j])
 lat = (1-a)*OSMMap.nodes[node_list[j]][0] + a * OSMMap.nodes[node_list[j+1]][0]
 lon = (1-a)*OSMMap.nodes[node_list[j]][1] + a * OSMMap.nodes[node_list[j+1]][1]
 if img != None:
 x,y = Coord2Pixels(lat, lon, region[0], region[1], region[2], region[3], sizex, sizey)
 if x>0 and x<sizex and y>0 and y < sizey:
 if img[x,y] > 0:
 result.append((lat, lon, node_list[j], node_list[j+1], alpha * dist - dists[j], dists[j+1] - alpha * dist))
 else:
 lat_mergin = mergin*(region[2]-region[0])
 lon_mergin = mergin*(region[3]-region[1])
 # These was 0.00100 and 0.00150 for lat and lon
 if lat-region[0] > lat_mergin and region[2] - lat > lat_mergin and lon-region[1] > lon_mergin and region[3] - lon > lon_mergin:
 result.append((lat, lon, node_list[j], node_list[j+1], alpha * dist - dists[j], dists[j+1] - alpha * dist))
 for _,edge in OSMMap.edges.iteritems():
 svgEdges.append((OSMMap.nodes[edge[0]][0],OSMMap.nodes[edge[0]][1], OSMMap.nodes[edge[1]][0], OSMMap.nodes[edge[1]][1]))
 showTOPO.RenderRegion(result, svgEdges, region, "gt.svg")
 print(len(result))
 print("Skipped tunnels ", tunnel_skip_num)
 return result
def TOPOGeneratePairs(GPSMap, OSMMap, OSMList, threshold = 0.00010, region = None, single = False, edgeids = None):
 result = {}
 matchedLoc = []
 idx = index.Index()
 if edgeids is not None:
 for edgeid in edgeids:	
 if edgeid not in GPSMap.edges.keys():
 continue
 n1 = GPSMap.edges[edgeid][0]
 n2 = GPSMap.edges[edgeid][1]
 lat1 = GPSMap.nodes[n1][0]
 lon1 = GPSMap.nodes[n1][1]
 lat2 = GPSMap.nodes[n2][0]
 lon2 = GPSMap.nodes[n2][1]
 idx.insert(edgeid, (min(lat1, lat2), min(lon1, lon2), max(lat1, lat2), max(lon1, lon2)))
 
 else:
 for edgeid in GPSMap.edges.keys():	
 n1 = GPSMap.edges[edgeid][0]
 n2 = GPSMap.edges[edgeid][1]
 lat1 = GPSMap.nodes[n1][0]
 lon1 = GPSMap.nodes[n1][1]
 lat2 = GPSMap.nodes[n2][0]
 lon2 = GPSMap.nodes[n2][1]
 idx.insert(edgeid, (min(lat1, lat2), min(lon1, lon2), max(lat1, lat2), max(lon1, lon2)))
 
 #for item in OSMList:
 for i in range(len(OSMList)):
 item = OSMList[i]
 lat = item[0]
 lon = item[1]
 possible_edges = list(idx.intersection((lat-threshold*2,lon-threshold*2, lat+threshold*2, lon+threshold*2)))
 min_dist = 10000
 min_edge = -1
 for edgeid in possible_edges:
 n1 = GPSMap.edges[edgeid][0]
 n2 = GPSMap.edges[edgeid][1]
 n3 = item[2]
 n4 = item[3]
 lat1 = GPSMap.nodes[n1][0]
 lon1 = GPSMap.nodes[n1][1]
 lat2 = GPSMap.nodes[n2][0]
 lon2 = GPSMap.nodes[n2][1]
 lat3 = OSMMap.nodes[n3][0]
 lon3 = OSMMap.nodes[n3][1]
 lat4 = OSMMap.nodes[n4][0]
 lon4 = OSMMap.nodes[n4][1]
 nlat1, nlon1 = latlonNorm((lat2-lat1,lon2-lon1))
 nlat2, nlon2 = latlonNorm((lat4-lat3,lon4-lon3))
 dist = pointToLineDistanceLatLon((lat1,lon1), (lat2, lon2), (lat,lon))
 if dist < threshold and dist < min_dist:
 angle_dist = 1.0 - abs(nlat1 * nlat2 + nlon1 * nlon2)
 #angle_dist = angleDistance((nlat1, nlon1), (nlat2, nlon2))
 #if angle_dist < 0.1 or angle_dist > 1.9 :
 if edgeids is None:
 #if angle_dist < 0.25 or angle_dist > 1.75 :
 print(angle_dist)
 #if angle_dist < 0.13 : # 30 degrees
 if angle_dist < 0.04 : # 15 degrees
 min_edge = edgeid 
 min_dist = dist 
 else:
 min_edge = edgeid 
 min_dist = dist 
 if min_edge != -1 :
 edgeid = min_edge
 n1 = GPSMap.edges[edgeid][0]
 n2 = GPSMap.edges[edgeid][1]
 lat1 = GPSMap.nodes[n1][0]
 lon1 = GPSMap.nodes[n1][1]
 lat2 = GPSMap.nodes[n2][0]
 lon2 = GPSMap.nodes[n2][1]
 
 result[i] = [edgeid, n1, n2, distance((lat1,lon1),(lat, lon)), distance((lat2,lon2),(lat, lon)), lat,lon]
 matchedLoc.append((lat, lon))
 if single == True :
 return result
 
 svgEdges = []
 for _,edge in OSMMap.edges.iteritems():
 svgEdges.append((OSMMap.nodes[edge[0]][0],OSMMap.nodes[edge[0]][1], OSMMap.nodes[edge[1]][0], OSMMap.nodes[edge[1]][1]))
 if region is not None:
 showTOPO.RenderRegion2(OSMList, matchedLoc, svgEdges, region, "coverage.svg")
 return result
def TOPOGenerateList(GPSMap, OSMMap, check = True, threshold = 0.00010, region = None, image = None, direction = False):
 result = {}
 
 img = scipy.ndimage.imread(image)
 sizex = np.shape(img)[0]
 sizey = np.shape(img)[1]
 if len(np.shape(img)) > 2:
 img = img[:,:,0].reshape((sizex, sizey))
 def Coord2Pixels(lat, lon, min_lat, min_lon, max_lat, max_lon, sizex, sizey):
 ilat = sizex - int((lat-min_lat) / ((max_lat - min_lat)/sizex))
 ilon = int((lon-min_lon) / ((max_lon - min_lon)/sizey))
 return ilat, ilon
 idx = index.Index()
 for idthis in OSMMap.nodes.keys():	
 x,y = Coord2Pixels(OSMMap.nodes[idthis][0], OSMMap.nodes[idthis][1], region[0], region[1], region[2], region[3], sizex, sizey)
 if x>0 and x<sizex and y>0 and y < sizey:
 if img[x,y] > 0:
 idx.insert(idthis, (OSMMap.nodes[idthis][0], OSMMap.nodes[idthis][1],OSMMap.nodes[idthis][0]+0.000001, OSMMap.nodes[idthis][1]+0.000001))
 
 candidateNode = {}
 for edgeId, edge in GPSMap.edges.iteritems():
 n1 = edge[0]
 n2 = edge[1]
 if check :
 if n1 in GPSMap.deletedNodes.keys() or n2 in GPSMap.deletedNodes.keys():
 continue
 if GPSMap.nodeScore[n1] < 1 or GPSMap.nodeScore[n2] < 1 :
 continue
 if n1 in GPSMap.nodeTerminate.keys() or n2 in GPSMap.nodeTerminate.keys():
 continue
 score = GPSMap.edgeScore[GPSMap.edgeHash[n1*10000000 + n2]]
 if score <1:
 continue
 candidateNode[n1] = 1
 candidateNode[n2] = 1
 for nid in candidateNode.keys():
 lat = GPSMap.nodes[nid][0]
 lon = GPSMap.nodes[nid][1]
 input_dir = []
 for nnode in GPSMap.nodeLink[nid]:
 nlat = GPSMap.nodes[nnode][0]
 nlon = GPSMap.nodes[nnode][1]
 input_dir.append((nlat-lat, nlon-lon))
 if direction == False:
 input_dir.append((-nlat+lat, -nlon+lon))
 possible_nodes = list(idx.intersection((lat-threshold,lon-threshold, lat+threshold, lon+threshold)))
 min_dist = 100000
 min_node = -1
 for pnode in possible_nodes:
 latp = OSMMap.nodes[pnode][0]
 lonp = OSMMap.nodes[pnode][1]
 target_dir = []
 for nnode in OSMMap.nodeLink[pnode]:
 nlat = OSMMap.nodes[nnode][0]
 nlon = OSMMap.nodes[nnode][1]
 target_dir.append((nlat-latp, nlon-lonp))
 if direction == False:
 target_dir.append((-nlat+latp, -nlon+lonp))
 match_dir = False
 for dir1 in input_dir:
 for dir2 in target_dir:
 if angleDistance(dir1,dir2) < 0.1:
 match_dir = True
 break
 if match_dir == False:
 continue
 d = distance((lat,lon),(latp, lonp))
 if d < min_dist:
 min_dist = d
 | |
| 
	eval acc, best eval micro_f1, best eval macro_f1, best test acc,
 for each experiment we record:
 1. the args used for the experiment
 2. append record eval and test performance after each training epoch
 3. append the summary performance to a file.
 4. save the best evaluation model.
 5. copy all model files to tensorboard folder.
 """
 def __init__(self, args, tensorboard_dir, master_kpi_path, avg_method="macro"):
 self.args = args
 self.avg_method = avg_method
 # ******** setup the tensor board path
 self.time_of_run = datetime.now().strftime("%Y%m%d-%H%M%S")
 self.log_root = tensorboard_dir
 self.tensorboard_path = os.path.join(self.log_root, self.time_of_run)
 self.eval_dir = os.path.join(self.tensorboard_path, "eval")
 self.test_dir = os.path.join(self.tensorboard_path, "test")
 self.__init_folder()
 self.current_exp_dir = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
 self.writer = None
 self.best_eval_metrics = {}
 self.__init_metrics()
 self.metric_df = []
 self.eval_metric_df = []
 self.best_model_path = ""
 self.previous_best_model_path = ""
 self.best_eval_epoch_idx = None
 self.best_eval_epoch_idx_list = []
 self.test_metric_df = []
 self.master_kpi_path = master_kpi_path # master kpi path is the epoch level summary
 write_arguments_to_file(args, os.path.join(self.tensorboard_path, "args.csv"))
 def __init_folder(self):
 if not os.path.exists(self.eval_dir):
 Path(self.eval_dir).mkdir(parents=True, exist_ok=True)
 if not os.path.exists(self.test_dir):
 Path(self.test_dir).mkdir(parents=True, exist_ok=True)
 Path(self.tensorboard_path).mkdir(parents=True, exist_ok=True)
 if not os.path.exists(self.log_root):
 Path(self.log_root).mkdir(parents=True, exist_ok=True)
 print(" Launch TensorBoard with: tensorboard --logdir=%s" % self.tensorboard_path)
 def __init_metrics(self):
 self.best_eval_metrics = {'accuracy': 0, 'macro_f1': 0}
 def reset_best_eval_metrics_models(self):
 self.__init_metrics()
 self.best_model_path = ""
 self.best_eval_epoch_idx = 0
 self.previous_best_model_path = ""
 def __log_scalar(self, name, value, step):
 """Log a scalar value to both MLflow and TensorBoard"""
 self.writer.add_scalar(name, value, step)
 def __setup_fold_train_val_test(self, fold_index):
 fold_dir = os.path.join(self.tensorboard_path, f"fold_{fold_index}")
 Path(fold_dir).mkdir(parents=True, exist_ok=True)
 if self.writer and self.current_fold != fold_index: # writer is not for the current fold
 self.writer.close()
 elif not self.writer: # or the writer is not existed
 self.current_fold = fold_index
 self.writer = SummaryWriter(fold_dir)
 def log_train_fold_epoch(self, y_gt, y_pred, losses_2_record, fold_idx, dataset_len, epoch_idx, batch_idx):
 """
 record the training losses and metrics, it doesn't save it to csv file only tensorboard and mlflow
 Parameters
 ----------
 y_gt: ground truth
 y_pred: predictions 1D vector
 losses_2_record: all losses of interested
 fold_idx : the fold num of xross validation
 dataset_len: the length of dataloader
 epoch_idx: epoch index e.g. 100 epochs
 batch_idx: the batch index when the interval to record is satisfied
 ------
 """
 # log all metrics of training fro tensorboard
 self.__setup_fold_train_val_test(fold_idx)
 metrics = calc_metrics(y_gt, y_pred, avg_method=self.avg_method)
 step_idx = epoch_idx * 10 ** len(str(dataset_len)) + batch_idx
 for metric_name, metric_value in metrics.items():
 self.__log_scalar(name=f"{metric_name}/train", value=metric_value, step=step_idx)
 for key, val in losses_2_record.items():
 self.__log_scalar(name=f"{key}/train", value=val, step=step_idx)
 print("fold: %s, epoch: %s/%s,step [%s/%s] loss: %s, training metrics: %s"
 % (fold_idx, epoch_idx + 1, self.args.epochs, batch_idx, dataset_len, str(losses_2_record),
 ["%s: %.3f" % (key, val) for key, val in sorted(metrics.items(), key=lambda x: x[0])]))
 def log_eval_fold_epoch(self, y_gt, y_pred, losses_2_record, fold_idx, epoch_idx, model, key_metric='macro_f1'):
 """
 record the training losses and metrics, it doesn't save it to csv file only tensorboard and mlflow
 Parameters
 ----------
 y_gt: ground truth
 y_pred: predictions 1D vector
 losses_2_record: all losses of interested
 fold_idx : the fold num of xross validation
 epoch_idx: epoch index e.g. 100 epochs
 model : pytorch model
 key_metric: metric that used to save the model
 ------
 """
 # log all metrics for eval
 self.__setup_fold_train_val_test(fold_idx)
 metrics = calc_metrics(y_gt, y_pred, avg_method=self.avg_method)
 print("fold_%s, epoch: %s/%s, eval loss: %s, eval metrics: %s" %
 (fold_idx, epoch_idx + 1, self.args.epochs, losses_2_record,
 {"%s: %.3f" % (key, val) for key, val in sorted(metrics.items(), key=lambda x: x[0])}))
 for key, value in {**losses_2_record, **metrics}.items(): # merge them then log in one line code
 self.__log_scalar(name=f"{key}/val", value=value, step=epoch_idx)
 if metrics[key_metric] > self.best_eval_metrics[key_metric]:
 self.best_eval_metrics = metrics
 for key, value in {**losses_2_record, **metrics}.items(): # merge them then log in one line code
 self.__log_scalar(name=f"{key}/val", value=value, step=epoch_idx)
 # here we only save the last epoch test results from each fold
 model_save_dir = os.path.join(self.tensorboard_path, "saved_models")
 if not os.path.exists(model_save_dir):
 Path(model_save_dir).mkdir(parents=True, exist_ok=True)
 self.best_model_path = os.path.join(model_save_dir, "fold_%s_epoch_%s.pth" % (fold_idx, epoch_idx))
 torch.save(model, self.best_model_path)
 if os.path.exists(self.previous_best_model_path):
 os.remove(self.previous_best_model_path)
 self.previous_best_model_path = self.best_model_path
 self.best_eval_epoch_idx = epoch_idx
 self.best_eval_epoch_idx_list.append(epoch_idx)
 print("current best eval model index: %s" % self.best_eval_epoch_idx)
 print("fold: %s, epoch: %s/%s, best eval loss: %s, current best eval metrics: %s"
 % (fold_idx, epoch_idx + 1, self.args.epochs, str(losses_2_record),
 ["%s: %.3f" % (key, val) for key, val in sorted(self.best_eval_metrics.items(), key=lambda x: x[0])]))
 to_json = {'fold_num': [fold_idx],
 'epoch_num': [epoch_idx],
 'type': ['eval'],
 'macro_accuracy': [metrics['macro_accuracy']],
 'macro_precision': [metrics['macro_precision']],
 'macro_recall': [metrics['macro_recall']],
 'macro_specificity': [metrics['macro_specificity']],
 'macro_cohen': [metrics['macro_cohen']],
 'best_macro_accuracy': [self.best_eval_metrics['macro_accuracy']],
 'best_macro_f1': [self.best_eval_metrics['macro_f1']]}
 self.__write_metrics(df_2_write=pd.DataFrame.from_dict(to_json),
 path=os.path.join(self.tensorboard_path, "eval_metrics.csv"))
 def load_best_eval_model(self, current_model, model_dir=None):
 if model_dir is None:
 model = load_torch_model_param(current_model=current_model, model_path=self.best_model_path)
 else:
 model = load_torch_model_param(current_model=current_model, model_path=model_dir)
 return model
 @staticmethod
 def __write_metrics(df_2_write, path):
 if os.path.exists(path):
 df = pd.read_csv(path)
 df = pd.concat([df, df_2_write], axis=0, ignore_index=True)
 else:
 df = df_2_write
 df.to_csv(path, index=False)
 def log_test_fold_epoch(self, fold_idx, epoch_idx, y_gt, y_pred, losses_2_record):
 # log all metrics for testing
 test_metrics = calc_metrics(y_gt, y_pred, avg_method=self.avg_method)
 for key, value in {**losses_2_record, **test_metrics}.items(): # merge them then log in one line code
 self.__log_scalar(name=f"{key}/test", value=value, step=epoch_idx)
 print("fold_idx: %s, best model epoch: %s, test loss: %s, testing metrics: %s"
 % (fold_idx, self.best_eval_epoch_idx, str(losses_2_record),
 ["%s: %.3f" % (key, val) for key, val in sorted(test_metrics.items(), key=lambda x: x[0])]))
 to_json = {'fold_num': [fold_idx],
 'epoch_num': [epoch_idx],
 'type': ['test'],
 'macro_accuracy': [test_metrics['macro_accuracy']],
 'macro_precision': [test_metrics["macro_precision"]],
 'macro_recall': [test_metrics["macro_recall"]],
 'macro_cohen': [test_metrics['macro_cohen']],
 'macro_f1': [test_metrics['macro_f1']]
 }
 # cache test results
 self.__write_metrics(df_2_write=pd.DataFrame.from_dict(to_json),
 path=os.path.join(self.tensorboard_path, "test_metrics.csv"))
 def reg_test_score_to_leaderboard(self, y_gt, y_pred, df_test, summary_folder_dic):
 # these are the experiment summary csv file
 metrics = calc_metrics(y_gt, y_pred, avg_method=self.avg_method)
 results_series = pd.Series(
 {**self.args.__dict__, **metrics, # **{"fold_idx": fold_idx, "epoch_idx": epoch_idx},
 **{'tf': str(self.time_of_run), "machine": platform.uname()[1]},
 "best epochs": self.best_eval_epoch_idx_list[-1]
 })
 summary_results = pd.DataFrame(results_series).transpose()
 if os.path.exists(self.master_kpi_path):
 previous_results_df = pd.read_csv(self.master_kpi_path)
 summary_results = pd.concat([previous_results_df, summary_results], axis=0, ignore_index=True)
 summary_results.drop_duplicates(inplace=True)
 summary_results.sort_index(axis=1, ascending=False, inplace=True)
 summary_results.sort_values(by=['dataset', 'tf'], ascending=[False, True], inplace=True)
 summary_results.to_csv(self.master_kpi_path, index=False)
 # let's do the evaluation on the sp and rp
 if self.args.dataset == "apple":
 df_test = df_test.rename(columns={"appleid": "pid", "linetime": "line"})
 else:
 df_test = df_test.rename(columns={"mesaid": "pid"})
 for eval_period, summary_path in summary_folder_dic.items():
 clf_metric_summary, min_sum, label_level_sum, epoch_sleep_metrics = \
 evaluate_sleep_alg(self.tensorboard_path, df_test, num_classes=3, algorithm_name=self.args.nn_type,
 recording_period=eval_period, feature_type=self.args.feature_type)
 # 0: clf_metric_sum, 1: min_sum, 2: label_level_sum,
 clf_metric_summary["best epochs"] = self.best_eval_epoch_idx_list[-1]
 save_evaluation_summary(clf_metric_summary, min_sum, epoch_sleep_metrics, self.args, summary_path,
 period=eval_period, tf=self.time_of_run)
 print("all models have been evaluated")
 return
 def save_test_analysis_visualisation_results(self, y_gt, y_hat, feature, epoch=0, run_type='eval', fold_num=0):
 """
 This is a customized function for sleep analysis
 """
 # !TODO save the confusion matrix, classification report, T-SNE plot, entropy statistics
 label_values, target_names = sleep_class_name_mapping(self.args.num_classes)
 if len(y_gt.shape) > 2:
 y_gt = np.reshape(y_gt, -1)
 matrix = confusion_matrix(y_gt, y_hat)
 report = classification_report(y_gt, y_hat, target_names=target_names, digits=4)
 print("Classification report: \n")
 print(report)
 if run_type == 'eval':
 save_file_path = self.eval_dir
 else:
 save_file_path = self.test_dir
 file_title = "_%s_fold_%s_epoch_%s_" % (run_type, fold_num, epoch)
 np.savetxt(os.path.join(save_file_path, file_title + 'confusion_matrix.txt'), matrix, fmt='%d', delimiter=',')
 with open(os.path.join(save_file_path, file_title + "classification_report.txt"), "w") as text_file:
 text_file.write(report)
 # pd.DataFrame({"gt": y_gt, "pred": y_hat, "run_type": [run_type]*len(y_gt)}).to_csv(
 # os.path.join(self.tensorboard_path, "%s_%s_prediction.csv" % (self.args.dataset, self.args.nn_type)))
 # save the best trained model as well.
 plot_save_confusion_matrix(y_gt, y_hat, normalize=True, class_names=target_names,
 location=save_file_path, title=file_title)
 if feature is not None:
 generate_tsne(feature, self.args.num_classes, gt=y_gt[:feature.shape[0]],
 output_path=save_file_path, title="%s_num_classes_%s_fold_%s_epoch_%s" %
 (run_type, self.args.num_classes,
 fold_num, epoch))
 return None
 def append_and_save_test_prediction(self, y_gt, y_hat, y_pred_prob, test_fold_idx, df_test):
 """
 df_test is the test dataset.
 """
 num_classes = y_pred_prob.shape[1]
 extra_column_to_save = [self.args.nn_type]
 df = pd.DataFrame({self.args.nn_type: y_hat, 'gt': y_gt, "window_idx": test_fold_idx})
 for class_label in np.arange(num_classes):
 df[class_label] = y_pred_prob[:, class_label]
 extra_column_to_save.append(class_label)
 #!TODO we need refactor this code to not hard code dataset name
 if self.args.dataset == "apple":
 df = pd.merge(left=df_test, right=df, on="window_idx")
 else:
 df_test.reset_index(inplace=True, drop=True) # the original df_test has the index which cause misalignment
 df = pd.concat([df_test, df], axis=1)
 df['chk'] = (df['stages'] - df['gt']).abs()
 assert df['chk'].sum() == 0, print("ground truth misaligned!")
 df = df[['pid', 'stages', 'line', 'gt_sleep_block'] + extra_column_to_save]
 df.to_csv(os.path.join(self.tensorboard_path, '%s_stages_30s_%s_100_%s.csv' %
 (self.args.num_classes, self.args.nn_type, self.args.feature_type)), index=False)
 return df
 def copy_py_files(self, files_path):
 files = get_all_files_include_sub(files_path, ".py")
 with ZipFile(os.path.join(self.tensorboard_path, time.strftime("%Y-%m-%d_%H%M") + '_' + "model_bak.zip"),
 'w') as zipObj:
 for file in files:
 zipObj.write(file, arcname=os.path.basename(file))
 # file_name = time.strftime("%Y-%m-%d_%H%M") + '_' + os.path.basename(file)
 # shutil.copy(file, os.path.join(self.tensorboard_path, file_name))
 def copy_main_run_file(self, file):
 copied_script_name = time.strftime("%Y-%m-%d_%H%M") + '_' + os.path.basename(file) + ".zip"
 to_file_name = os.path.join(self.tensorboard_path, copied_script_name)
 with ZipFile(to_file_name, "w") as zipObj:
 zipObj.write(file, arcname=os.path.basename(file))
 # shutil.copy(file, os.path.join(self.tensorboard_path, copied_script_name))
def mkdir_if_missing(directory):
 if not osp.exists(directory):
 try:
 os.makedirs(directory)
 except OSError as e:
 if e.errno != errno.EEXIST:
 raise
class Logger(object):
 """
 Write console output to external text file.
 Code imported from https://github.com/Cysu/open-reid/blob/master/reid/utils/logging.py.
 """
 def __init__(self, fpath=None):
 self.console = sys.stdout
 self.file = None
 if fpath is not None:
 mkdir_if_missing(os.path.dirname(fpath))
 self.file = open(fpath, 'w')
 def __del__(self):
 self.close()
 def __enter__(self):
 pass
 def __exit__(self, *args):
 self.close()
 def write(self, msg):
 self.console.write(msg)
 if self.file is not None:
 self.file.write(msg)
 def flush(self):
 | |
| 
	(ex: NameArea='P1')
 
 noiseReduction (int) 
 Maximal size of the area which will be removed in the function 'noiseRemoval' 
 (ex: noiseReduction=100)
 
 numberOfClasses (int):
 Number of classes 
 (ex: numberOfClasses=2)
 
 classesNamesList (List of strings):
 List of the names of the classes 
 (ex: classesNamesList=['PaddyRice','Background'] )
 
 ROI (List of list of int):
 List of the coordinates of each region of interest (ROI) in the same order as ListAreaNames.
 For each ROI, the first two numbers are the coordinates of the top left corner and the other two are the coordinates of the bottum right corner.
 (ex: ROI=[[0,0,50,50],[50,0,100,50],[0,50,50,100]] )
 
 ListAreaNames (List of strings):
 List of the names of the areas in the same order as the list ROI 
 (ex: ListAreaNames=['P1','P2','P1xP2'] ) 
 
 fusionClassesY_N (string): 
 Two possible values 'Y' or 'N'. If fusionClassesY_N='Y', the user have chosen to fusion two or more classes.
 
 maskY_N (string): 
 Two possible values 'Y' or 'N'. If maskY_N='Y', the user have chosen to save the mask (binary image if there is only two classes and colored flat image if more than two classes)
 
 imageY_N (string): 
 Two possible values 'Y' or 'N'. If imageY_N='Y', the user have chosen to save the reconstructed image (only show the class of interest if there is only two classes and image+colored filter if there is more than two classes)
 
 InfoY_N (string): 
 Two possible values 'Y' or 'N'. If imageY_N='Y', the user have chosen to save the information file containing for each plant : 'Area/Plant','Image Name','Surface','Coverage', 'Aspect Ratio','Extent','Solidity', 'Equivalent Diameter', 'Main axe', 'Secondary axe'
 
 NFMaskY_N (string): 
 Two possible values 'Y' or 'N'. If maskY_N='Y', the user have chosen to save the mask before any noise reduction and morphological filtering. 
 
 BiggestBlobY_N(string): 
 Two possible values 'Y' or 'N'. If BiggestBlobY_N='Y', the user have chosen to only keep the biggest blob of the mask for analysis.
 chosenArea (string): 
 Name of the class of interest (the one that will be mesured) 
 (ex: 'PaddyRice')
 
 OutputMaskName (string):
 Address used to save the mask.
 (ex: OutputNFMaskName=/Users/Name/Desktop/folder/Masks/P1/image_crop_P1_mask.png')
 
 OutputimageName (string):
 Address used to save the masked image.
 (ex: OutputNFMaskName=/Users/Name/Desktop/folder/MaskedImages/P1/image_crop_P1_maskedImage.png')
 
 OutputNFMaskName (string):
 Address used to save the non-filtered mask.
 (ex: OutputNFMaskName=/Users/Name/Desktop/folder/NonFilteredMasks/P1/image_crop_P1_NFMask.png')
 
 ListAirs (List float)
 List of the areas (number of pixels) of the class of interest for each picture 
 (ex: ListAirs=[1500, 517, 641])
 
 ListTestDataTimes (List float): 
 List of the times to create the test data array (read the picture) for each picture in sec. 
 (ex: ListAirs=[2.1, 2.2, 2.1])
 
 ListApplyModelTimes (List float): 
 List of the times to apply the model to the picture array for each picture in sec. 
 (ex: ListApplyModelTimes=[3.2, 3.2, 3.0])
 
 ListSaveOutputTimes (List float): 
 List of the times to save all the output for each picture in sec. 
 (ex: ListSaveOutputTimes=[1.6, 1.7, 1.5])
 
 Return:
 
 ListAirs (List float)
 List of the areas (number of pixels) of the class of interest for each picture 
 (ex: ListAirs=[1500, 517, 641, 555])
 
 ListTestDataTimes (List float): 
 Argument list with one more element 
 (ex: ListAirs=[2.1, 2.2, 2.1, 2.3])
 
 ListApplyModelTimes (List float): 
 Argument list with one more element 
 (ex: ListApplyModelTimes=[3.2, 3.2, 3.0, 3.1])
 
 ListSaveOutputTimes (List float): 
 Argument list with one more element 
 (ex: ListSaveOutputTimes=[1.6, 1.7, 1.5, 1.6])
 """
 ### Create the test data array
 start_timeTestData = time.monotonic()
 
 TestData=creatTestData(imageArray)
 
 end_timeTestData = time.monotonic() 
 RunningTime=timedelta(seconds=end_timeTestData - start_timeTestData)
 sec=float(RunningTime.days*86400+RunningTime.seconds+RunningTime.microseconds/1000000)
 ListTestDataTimes.append(sec)
 
 ### Apply the model to the test data
 start_timeModel = time.monotonic()
 
 Resultmodel=ApplyModel(TestData, modelname, model)
 
 end_timeModel = time.monotonic() 
 RunningTime=timedelta(seconds=end_timeModel - start_timeModel)
 sec=float(RunningTime.days*86400+RunningTime.seconds+RunningTime.microseconds/1000000)
 ListApplyModelTimes.append(sec)
 
 ### Create and save the output 
 start_timeOutput = time.monotonic()
 
 Mask=Resultmodel.reshape(np.shape(imageArray)[0],np.shape(imageArray)[1])
 
 #Save the non filtered mask in shades of gray
 if NFMaskY_N=='Y':
 NFMask=Mask.astype('int')
 NFMask=(NFMask/(numberOfClasses-1))*255
 NFMask=NFMask.astype(int)
 cv2.imwrite(OutputNFMaskName,NFMask)
 
 # apply a noise reduction filter to the mask
 FilteredMask=noiseRemoval(Mask, noiseReduction, numberOfClasses) 
 
 if numberOfClasses>2 and fusionClassesY_N=='N' :
 #create a colored mask with 1 color=1class
 coloredMask=colorfilter(FilteredMask)
 if maskY_N=='Y': 
 cv2.imwrite(OutputMaskName,coloredMask)
 
 if imageY_N=='Y':
 MaskedImage=0.3*coloredMask+0.7*imageArray
 cv2.imwrite(OutputimageName,MaskedImage)
 
 else:
 # create a black and white mask with the class of interest in white
 BandWMask=FilteredMask*0
 List=[]
 for AreaName in chosenArea:
 if AreaName in classesNamesList:
 List.append(classesNamesList.index(AreaName))
 
 for AreaNumber in List:
 BandWMask[FilteredMask==(AreaNumber)]=255
 
 BandWMask=BandWMask.astype('uint8')
 
 #If the user choosed to only keep the biggest blob and do shape analysis
 if BiggestBlobY_N=='Y':
 
 # Detect the blobs and there contour in this black and white mask
 im2, contours, hierarchy = cv2.findContours(BandWMask,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
 
 #find the biggest blob, erase the others and keep the smaller black blobs in the biggest white blob
 if contours!=[]:
 surfaceMainBlob = 0
 contourMainBlob=[]
 RankMainBlob=0
 for i in range(len(contours)):
 if cv2.contourArea(contours[i])>surfaceMainBlob:
 contourMainBlob=contours[i]
 surfaceMainBlob=cv2.contourArea(contours[i])
 RankMainBlob=i
 
 ListSecondaryBlod=[]
 
 for i in range(len(hierarchy[0])):
 if hierarchy[0,i][3] ==RankMainBlob:
 ListSecondaryBlod.append(contours[i]) 
 
 FilteredMask2=imageArray*0
 L=[]
 L.append(contourMainBlob)
 FilteredMask2=cv2.drawContours(FilteredMask2, L, 0, (255,255,255), -1)
 FilteredMask2=cv2.drawContours(FilteredMask2, ListSecondaryBlod, -1, (0,0,0), -1)
 
 #Save the final mask
 if maskY_N=='Y': 
 cv2.imwrite(OutputMaskName,FilteredMask2)
 
 # calculate some of the properties of the main blob 
 hull = cv2.convexHull(contourMainBlob)
 rect = cv2.minAreaRect(contourMainBlob)
 box = cv2.boxPoints(rect)
 box = np.int0(box)
 axes=rect[1]
 axe1=axes[0]
 axe2=axes[1]
 
 if axe1<axe2:
 a=axe1
 axe1=axe2
 axe2=a
 
 # Save the masked image and draw some of the blob properties (convexhull, rectangle, main axes...) 
 if imageY_N=='Y':
 FilteredMask3=FilteredMask2
 FilteredMask3[FilteredMask2==255]=1
 FilteredMask3[FilteredMask2==0]=0.1 
 MaskedImage=FilteredMask3*imageArray
 
 MaskedImage=cv2.drawContours(MaskedImage,[box],0,(0,255,0),1)
 MaskedImage=cv2.ellipse(MaskedImage,rect,(0,255,0),1)
 
 x1,y1=box[0]
 x2,y2=box[1]
 x3,y3=box[2]
 x4,y4=box[3]
 
 l1x1=int((x3+x2)/2)
 l1y1=int((y3+y2)/2)
 
 l1x2=int((x4+x1)/2)
 l1y2=int((y4+y1)/2) 
 
 l2x1=int((x1+x2)/2)
 l2y1=int((y1+y2)/2)
 
 l2x2=int((x4+x3)/2)
 l2y2=int((y4+y3)/2) 
 
 MaskedImage=cv2.line(MaskedImage,(l1x1,l1y1),(l1x2,l1y2),(255,255,0),1) # blue
 MaskedImage=cv2.line(MaskedImage,(l2x1,l2y1),(l2x2,l2y2),(255,255,255),1) # white
 L=[]
 L.append(hull)
 MaskedImage=cv2.drawContours(MaskedImage, L, 0, (0,0,255), 1)
 
 cv2.imwrite(OutputimageName,MaskedImage)
 
 #Save the information in ListAirs
 if InfoY_N=='Y':
 for i in ListSecondaryBlod:
 surfaceSecondaryBlobi=cv2.contourArea(i)
 surfaceMainBlob=surfaceMainBlob-surfaceSecondaryBlobi
 
 
 x,y,w,h = cv2.boundingRect(contourMainBlob)
 aspect_ratio = float(w)/h
 rect_area = w*h
 extent = float(surfaceMainBlob)/rect_area
 equi_diameter = np.sqrt(4*surfaceMainBlob/np.pi)
 hull_area = cv2.contourArea(hull)
 solidity = float(surfaceMainBlob)/hull_area
 
 TotalSurface=len(imageArray)*len(imageArray[0])
 ListAirs=np.vstack([ListAirs, [NameArea, ImageName , surfaceMainBlob, surfaceMainBlob/TotalSurface, aspect_ratio, extent, solidity,equi_diameter, axe1, axe2]])
 
 
 else: #if No blob is found, just save a black rectangle
 FilteredMask2=imageArray*0
 if maskY_N=='Y': 
 cv2.imwrite(OutputMaskName,FilteredMask2)
 if imageY_N=='Y':
 cv2.imwrite(OutputimageName,FilteredMask2)
 if InfoY_N=='Y':
 ListAirs=np.vstack([ListAirs, [NameArea, ImageName , 0, 0, 0, 0, 0, 0,0,0]])
 
 #If the user decided to keep all the blobes and not do the shape analysis
 else:
 #Save the final mask
 if maskY_N=='Y': 
 cv2.imwrite(OutputMaskName,BandWMask)
 
 # Save the masked image and draw some of the blob properties (convexhull, rectangle, main axes...) 
 if imageY_N=='Y':
 FilteredMask3=np.zeros((len(BandWMask),len(BandWMask[0]),3))
 FilteredMask3[BandWMask==255]=[1,1,1]
 FilteredMask3[BandWMask==0]=[0.1,0.1,0.1] 
 MaskedImage=FilteredMask3*imageArray
 
 cv2.imwrite(OutputimageName,MaskedImage)
 
 #Save the information in ListAirs
 if InfoY_N=='Y':
 surfaceClassOfInterest=np.sum(BandWMask)/255
 TotalSurface=len(imageArray)*len(imageArray[0])
 ListAirs=np.vstack([ListAirs, [NameArea, ImageName , surfaceClassOfInterest, surfaceClassOfInterest/TotalSurface]])
 
 
 
 
 
 end_timeOutput = time.monotonic() 
 RunningTime=timedelta(seconds=end_timeOutput - start_timeOutput)
 sec=float(RunningTime.days*86400+RunningTime.seconds+RunningTime.microseconds/1000000)
 ListSaveOutputTimes.append(sec)
 return ListAirs, ListTestDataTimes,ListApplyModelTimes,ListSaveOutputTimes
 
 
############################################################################################ 
 
 
def creatTestData(testImage):
 """ Function to create the test data array.
 
 Args:
 testImage (numpy array ): 
 Image array with for each pixel the B G R values of the pixel
 (ex: imageArray=np.array([[[106,255,0],[0,50,13]...], [[106,255,0],[0,50,13]...],...])
 
 Return:
 fusion (numpy array): 
 Array containing for each pixel RGB HSV and Lab. One pixel per line 
 (ex: )
 
 Note:
 This function is used in the function 'ApplyModelAndSaveOutput' in this file. 
 
 """
 
 bgrArray = testImage[:,:,0:3] # make sure that there is only 3 channels (BGR) per pixel
 
 bgrArray=bgrArray.reshape(np.shape(bgrArray)[0]*np.shape(bgrArray)[1],1,3) # transform the rectangular array into a column with one pixel per row
 bgrArray=bgrArray.astype('uint8')
 
 #Calculate other color properties from the bgr values
 hsvArray = cv2.cvtColor(bgrArray,cv2.COLOR_BGR2HSV)
 LabArray=cv2.cvtColor(bgrArray,cv2.COLOR_BGR2Lab) 
 
 #Save everything in a big array
 fusion=np.concatenate((bgrArray, hsvArray), axis=1)
 fusion=np.concatenate((fusion, LabArray),axis=1)
 fusion=fusion.reshape((len(fusion),9))
 return fusion
############################################################################################ 
 
def TrainModel(trainData, modelname,classesNamesList):
 """ Function to create and train the machine learning model
 
 Args: 
 trainData (numpy array): 
 Array with for each pixel the class and the BGR HSV Lab values of the pixel 
 (ex: trainData=np.array([[PaddyRice, 116, 179, 147, 45, 90, 179, 177, 106, 157],[Background,132, 121, 123, 125, 21, 132, 131, 131, 122]]))
 
 modelname (string): 
 Name of the chosen model 
 ('Support Vector Machine (Sklearn)','Random Forest Classifier | |
| 
	<filename>sdk/python/pulumi_google_native/notebooks/v1/instance.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['InstanceArgs', 'Instance']
@pulumi.input_type
class InstanceArgs:
 def __init__(__self__, *,
 instance_id: pulumi.Input[str],
 machine_type: pulumi.Input[str],
 accelerator_config: Optional[pulumi.Input['AcceleratorConfigArgs']] = None,
 boot_disk_size_gb: Optional[pulumi.Input[str]] = None,
 boot_disk_type: Optional[pulumi.Input['InstanceBootDiskType']] = None,
 container_image: Optional[pulumi.Input['ContainerImageArgs']] = None,
 custom_gpu_driver_path: Optional[pulumi.Input[str]] = None,
 data_disk_size_gb: Optional[pulumi.Input[str]] = None,
 data_disk_type: Optional[pulumi.Input['InstanceDataDiskType']] = None,
 disk_encryption: Optional[pulumi.Input['InstanceDiskEncryption']] = None,
 install_gpu_driver: Optional[pulumi.Input[bool]] = None,
 instance_owners: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
 kms_key: Optional[pulumi.Input[str]] = None,
 labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
 location: Optional[pulumi.Input[str]] = None,
 metadata: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
 network: Optional[pulumi.Input[str]] = None,
 nic_type: Optional[pulumi.Input['InstanceNicType']] = None,
 no_proxy_access: Optional[pulumi.Input[bool]] = None,
 no_public_ip: Optional[pulumi.Input[bool]] = None,
 no_remove_data_disk: Optional[pulumi.Input[bool]] = None,
 post_startup_script: Optional[pulumi.Input[str]] = None,
 project: Optional[pulumi.Input[str]] = None,
 reservation_affinity: Optional[pulumi.Input['ReservationAffinityArgs']] = None,
 service_account: Optional[pulumi.Input[str]] = None,
 service_account_scopes: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
 shielded_instance_config: Optional[pulumi.Input['ShieldedInstanceConfigArgs']] = None,
 subnet: Optional[pulumi.Input[str]] = None,
 tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
 upgrade_history: Optional[pulumi.Input[Sequence[pulumi.Input['UpgradeHistoryEntryArgs']]]] = None,
 vm_image: Optional[pulumi.Input['VmImageArgs']] = None):
 """
 The set of arguments for constructing a Instance resource.
 :param pulumi.Input[str] machine_type: The [Compute Engine machine type](/compute/docs/machine-types) of this instance.
 :param pulumi.Input['AcceleratorConfigArgs'] accelerator_config: The hardware accelerator used on this instance. If you use accelerators, make sure that your configuration has [enough vCPUs and memory to support the `machine_type` you have selected](/compute/docs/gpus/#gpus-list).
 :param pulumi.Input[str] boot_disk_size_gb: Input only. The size of the boot disk in GB attached to this instance, up to a maximum of 64000 GB (64 TB). The minimum recommended value is 100 GB. If not specified, this defaults to 100.
 :param pulumi.Input['InstanceBootDiskType'] boot_disk_type: Input only. The type of the boot disk attached to this instance, defaults to standard persistent disk (`PD_STANDARD`).
 :param pulumi.Input['ContainerImageArgs'] container_image: Use a container image to start the notebook instance.
 :param pulumi.Input[str] custom_gpu_driver_path: Specify a custom Cloud Storage path where the GPU driver is stored. If not specified, we'll automatically choose from official GPU drivers.
 :param pulumi.Input[str] data_disk_size_gb: Input only. The size of the data disk in GB attached to this instance, up to a maximum of 64000 GB (64 TB). You can choose the size of the data disk based on how big your notebooks and data are. If not specified, this defaults to 100.
 :param pulumi.Input['InstanceDataDiskType'] data_disk_type: Input only. The type of the data disk attached to this instance, defaults to standard persistent disk (`PD_STANDARD`).
 :param pulumi.Input['InstanceDiskEncryption'] disk_encryption: Input only. Disk encryption method used on the boot and data disks, defaults to GMEK.
 :param pulumi.Input[bool] install_gpu_driver: Whether the end user authorizes Google Cloud to install GPU driver on this instance. If this field is empty or set to false, the GPU driver won't be installed. Only applicable to instances with GPUs.
 :param pulumi.Input[Sequence[pulumi.Input[str]]] instance_owners: Input only. The owner of this instance after creation. Format: `<EMAIL>` Currently supports one owner only. If not specified, all of the service account users of your VM instance's service account can use the instance.
 :param pulumi.Input[str] kms_key: Input only. The KMS key used to encrypt the disks, only applicable if disk_encryption is CMEK. Format: `projects/{project_id}/locations/{location}/keyRings/{key_ring_id}/cryptoKeys/{key_id}` Learn more about [using your own encryption keys](/kms/docs/quickstart).
 :param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Labels to apply to this instance. These can be later modified by the setLabels method.
 :param pulumi.Input[Mapping[str, pulumi.Input[str]]] metadata: Custom metadata to apply to this instance.
 :param pulumi.Input[str] network: The name of the VPC that this instance is in. Format: `projects/{project_id}/global/networks/{network_id}`
 :param pulumi.Input['InstanceNicType'] nic_type: Optional. The type of vNIC to be used on this interface. This may be gVNIC or VirtioNet.
 :param pulumi.Input[bool] no_proxy_access: If true, the notebook instance will not register with the proxy.
 :param pulumi.Input[bool] no_public_ip: If true, no public IP will be assigned to this instance.
 :param pulumi.Input[bool] no_remove_data_disk: Input only. If true, the data disk will not be auto deleted when deleting the instance.
 :param pulumi.Input[str] post_startup_script: Path to a Bash script that automatically runs after a notebook instance fully boots up. The path must be a URL or Cloud Storage path (`gs://path-to-file/file-name`).
 :param pulumi.Input['ReservationAffinityArgs'] reservation_affinity: Optional. The optional reservation affinity. Setting this field will apply the specified [Zonal Compute Reservation](https://cloud.google.com/compute/docs/instances/reserving-zonal-resources) to this notebook instance.
 :param pulumi.Input[str] service_account: The service account on this instance, giving access to other Google Cloud services. You can use any service account within the same project, but you must have the service account user permission to use the instance. If not specified, the [Compute Engine default service account](https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) is used.
 :param pulumi.Input[Sequence[pulumi.Input[str]]] service_account_scopes: Optional. The URIs of service account scopes to be included in Compute Engine instances. If not specified, the following [scopes](https://cloud.google.com/compute/docs/access/service-accounts#accesscopesiam) are defined: - https://www.googleapis.com/auth/cloud-platform - https://www.googleapis.com/auth/userinfo.email If not using default scopes, you need at least: https://www.googleapis.com/auth/compute
 :param pulumi.Input['ShieldedInstanceConfigArgs'] shielded_instance_config: Optional. Shielded VM configuration. [Images using supported Shielded VM features](https://cloud.google.com/compute/docs/instances/modifying-shielded-vm).
 :param pulumi.Input[str] subnet: The name of the subnet that this instance is in. Format: `projects/{project_id}/regions/{region}/subnetworks/{subnetwork_id}`
 :param pulumi.Input[Sequence[pulumi.Input[str]]] tags: Optional. The Compute Engine tags to add to runtime (see [Tagging instances](https://cloud.google.com/compute/docs/label-or-tag-resources#tags)).
 :param pulumi.Input[Sequence[pulumi.Input['UpgradeHistoryEntryArgs']]] upgrade_history: The upgrade history of this instance.
 :param pulumi.Input['VmImageArgs'] vm_image: Use a Compute Engine VM image to start the notebook instance.
 """
 pulumi.set(__self__, "instance_id", instance_id)
 pulumi.set(__self__, "machine_type", machine_type)
 if accelerator_config is not None:
 pulumi.set(__self__, "accelerator_config", accelerator_config)
 if boot_disk_size_gb is not None:
 pulumi.set(__self__, "boot_disk_size_gb", boot_disk_size_gb)
 if boot_disk_type is not None:
 pulumi.set(__self__, "boot_disk_type", boot_disk_type)
 if container_image is not None:
 pulumi.set(__self__, "container_image", container_image)
 if custom_gpu_driver_path is not None:
 pulumi.set(__self__, "custom_gpu_driver_path", custom_gpu_driver_path)
 if data_disk_size_gb is not None:
 pulumi.set(__self__, "data_disk_size_gb", data_disk_size_gb)
 if data_disk_type is not None:
 pulumi.set(__self__, "data_disk_type", data_disk_type)
 if disk_encryption is not None:
 pulumi.set(__self__, "disk_encryption", disk_encryption)
 if install_gpu_driver is not None:
 pulumi.set(__self__, "install_gpu_driver", install_gpu_driver)
 if instance_owners is not None:
 pulumi.set(__self__, "instance_owners", instance_owners)
 if kms_key is not None:
 pulumi.set(__self__, "kms_key", kms_key)
 if labels is not None:
 pulumi.set(__self__, "labels", labels)
 if location is not None:
 pulumi.set(__self__, "location", location)
 if metadata is not None:
 pulumi.set(__self__, "metadata", metadata)
 if network is not None:
 pulumi.set(__self__, "network", network)
 if nic_type is not None:
 pulumi.set(__self__, "nic_type", nic_type)
 if no_proxy_access is not None:
 pulumi.set(__self__, "no_proxy_access", no_proxy_access)
 if no_public_ip is not None:
 pulumi.set(__self__, "no_public_ip", no_public_ip)
 if no_remove_data_disk is not None:
 pulumi.set(__self__, "no_remove_data_disk", no_remove_data_disk)
 if post_startup_script is not None:
 pulumi.set(__self__, "post_startup_script", post_startup_script)
 if project is not None:
 pulumi.set(__self__, "project", project)
 if reservation_affinity is not None:
 pulumi.set(__self__, "reservation_affinity", reservation_affinity)
 if service_account is not None:
 pulumi.set(__self__, "service_account", service_account)
 if service_account_scopes is not None:
 pulumi.set(__self__, "service_account_scopes", service_account_scopes)
 if shielded_instance_config is not None:
 pulumi.set(__self__, "shielded_instance_config", shielded_instance_config)
 if subnet is not None:
 pulumi.set(__self__, "subnet", subnet)
 if tags is not None:
 pulumi.set(__self__, "tags", tags)
 if upgrade_history is not None:
 pulumi.set(__self__, "upgrade_history", upgrade_history)
 if vm_image is not None:
 pulumi.set(__self__, "vm_image", vm_image)
 @property
 @pulumi.getter(name="instanceId")
 def instance_id(self) -> pulumi.Input[str]:
 return pulumi.get(self, "instance_id")
 @instance_id.setter
 def instance_id(self, value: pulumi.Input[str]):
 pulumi.set(self, "instance_id", value)
 @property
 @pulumi.getter(name="machineType")
 def machine_type(self) -> pulumi.Input[str]:
 """
 The [Compute Engine machine type](/compute/docs/machine-types) of this instance.
 """
 return pulumi.get(self, "machine_type")
 @machine_type.setter
 def machine_type(self, value: pulumi.Input[str]):
 pulumi.set(self, "machine_type", value)
 @property
 @pulumi.getter(name="acceleratorConfig")
 def accelerator_config(self) -> Optional[pulumi.Input['AcceleratorConfigArgs']]:
 """
 The hardware accelerator used on this instance. If you use accelerators, make sure that your configuration has [enough vCPUs and memory to support the `machine_type` you have selected](/compute/docs/gpus/#gpus-list).
 """
 return pulumi.get(self, "accelerator_config")
 @accelerator_config.setter
 def accelerator_config(self, value: Optional[pulumi.Input['AcceleratorConfigArgs']]):
 pulumi.set(self, "accelerator_config", value)
 @property
 @pulumi.getter(name="bootDiskSizeGb")
 def boot_disk_size_gb(self) -> Optional[pulumi.Input[str]]:
 """
 Input only. The size of the boot disk in GB attached to this instance, up to a maximum of 64000 GB (64 TB). The minimum recommended value is 100 GB. If not specified, this defaults to 100.
 """
 return pulumi.get(self, "boot_disk_size_gb")
 @boot_disk_size_gb.setter
 def boot_disk_size_gb(self, value: Optional[pulumi.Input[str]]):
 pulumi.set(self, "boot_disk_size_gb", value)
 @property
 @pulumi.getter(name="bootDiskType")
 def boot_disk_type(self) -> Optional[pulumi.Input['InstanceBootDiskType']]:
 """
 Input only. The type of the boot disk attached to this instance, defaults to standard persistent disk (`PD_STANDARD`).
 """
 return pulumi.get(self, "boot_disk_type")
 @boot_disk_type.setter
 def boot_disk_type(self, value: Optional[pulumi.Input['InstanceBootDiskType']]):
 pulumi.set(self, "boot_disk_type", value)
 @property
 @pulumi.getter(name="containerImage")
 def container_image(self) -> Optional[pulumi.Input['ContainerImageArgs']]:
 """
 Use a container image to | |
| 
	import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn import Parameter
from torch.distributions import Bernoulli
import math
from core import metrics
class ModuleWrapper(nn.Module):
 """Wrapper for nn.Module with support for arbitrary flags and a universal forward pass"""
 def __init__(self):
 super(ModuleWrapper, self).__init__()
 def set_flag(self, flag_name, value):
 setattr(self, flag_name, value)
 for m in self.children():
 if hasattr(m, 'set_flag'):
 m.set_flag(flag_name, value)
 def forward(self, x):
 for module in self.children():
 x = module(x)
 return x
class Abs(ModuleWrapper):
 def __init__(self):
 super(Abs, self).__init__()
 def forward(self, x):
 return x.abs_()
class FlattenLayer(ModuleWrapper):
 def __init__(self, num_features):
 super(FlattenLayer, self).__init__()
 self.num_features = num_features
 def forward(self, x):
 return x.view(-1, self.num_features)
class LinearVariance(ModuleWrapper):
 def __init__(self, in_features, out_features, bias=True):
 super(LinearVariance, self).__init__()
 self.in_features = in_features
 self.out_features = out_features
 self.sigma = Parameter(torch.Tensor(out_features, in_features))
 if bias:
 self.bias = Parameter(torch.Tensor(1, out_features))
 else:
 self.register_parameter('bias', None)
 self.reset_parameters()
 def reset_parameters(self):
 stdv = 1. / math.sqrt(self.sigma.size(1))
 self.sigma.data.uniform_(-stdv, stdv)
 if self.bias is not None:
 self.bias.data.zero_()
 def forward(self, x):
 if self.bias is not None:
 lrt_mean = self.bias
 else:
 lrt_mean = 0.0
 lrt_std = Variable.sqrt_(1e-16 + F.linear(x * x, self.sigma * self.sigma))
 if self.training:
 eps = Variable(lrt_std.data.new(lrt_std.size()).normal_())
 else:
 eps = 0.0
 return lrt_mean + eps * lrt_std
 def __repr__(self):
 return self.__class__.__name__ + '(' \
 + 'in_features=' + str(self.in_features) \
 + ', out_features=' + str(self.out_features) \
 + ', bias=' + str(self.bias is not None) + ')'
class LinearVarianceBe(ModuleWrapper):
 def __init__(self, in_features, out_features, bias=True):
 super(LinearVarianceBe, self).__init__()
 self.in_features = in_features
 self.out_features = out_features
 self.probs = torch.ones([out_features, in_features]).cuda() * 0.5
 self.W = Parameter(torch.Tensor(out_features, in_features))
 if bias:
 self.bias = Parameter(torch.Tensor(1, out_features))
 else:
 self.register_parameter('bias', None)
 self.reset_parameters()
 def reset_parameters(self):
 stdv = 1. / math.sqrt(self.W.size(1))
 self.W.data.uniform_(-stdv, stdv)
 if self.bias is not None:
 self.bias.data.zero_()
 def forward(self, x):
 if self.training:
 eps = Variable(torch.bernoulli(self.probs) - 0.5)
 else:
 eps = 0.0
 output = F.linear(x, self.W*eps)
 if self.bias is not None:
 output = output + self.bias
 return output
 def __repr__(self):
 return self.__class__.__name__ + '(' \
 + 'in_features=' + str(self.in_features) \
 + ', out_features=' + str(self.out_features) \
 + ', bias=' + str(self.bias is not None) + ')'
class LinearVarianceUnif(ModuleWrapper):
 def __init__(self, in_features, out_features, bias=True):
 super(LinearVarianceUnif, self).__init__()
 self.in_features = in_features
 self.out_features = out_features
 self.W = Parameter(torch.Tensor(out_features, in_features))
 if bias:
 self.bias = Parameter(torch.Tensor(1, out_features))
 else:
 self.register_parameter('bias', None)
 self.reset_parameters()
 def reset_parameters(self):
 stdv = 1. / math.sqrt(self.W.size(1))
 self.W.data.uniform_(-stdv, stdv)
 if self.bias is not None:
 self.bias.data.zero_()
 def forward(self, x):
 if self.training:
 eps = Variable(self.W.data.new(self.W.size()).uniform_() - 0.5)
 else:
 eps = 0.0
 output = F.linear(x, self.W*eps)
 if self.bias is not None:
 output = output + self.bias
 return output
 def __repr__(self):
 return self.__class__.__name__ + '(' \
 + 'in_features=' + str(self.in_features) \
 + ', out_features=' + str(self.out_features) \
 + ', bias=' + str(self.bias is not None) + ')'
class LinearVDO(ModuleWrapper):
 def __init__(self, in_features, out_features, prior='loguni', alpha_shape=(1, 1), bias=True):
 super(LinearVDO, self).__init__()
 self.in_features = in_features
 self.out_features = out_features
 self.alpha_shape = alpha_shape
 self.W = Parameter(torch.Tensor(out_features, in_features))
 self.log_alpha = Parameter(torch.Tensor(*alpha_shape))
 if bias:
 self.bias = Parameter(torch.Tensor(1, out_features))
 else:
 self.register_parameter('bias', None)
 self.reset_parameters()
 self.zero_mean = False
 self.permute_sigma = False
 self.prior = prior
 if prior == 'loguni':
 self.kl_fun = metrics.kl_loguni
 else:
 self.kl_fun = metrics.kl_ard
 def reset_parameters(self):
 stdv = 1. / math.sqrt(self.W.size(1))
 self.W.data.uniform_(-stdv, stdv)
 self.log_alpha.data.fill_(-5.0)
 if self.bias is not None:
 self.bias.data.zero_()
 def forward(self, x):
 if self.zero_mean:
 lrt_mean = 0.0
 else:
 lrt_mean = F.linear(x, self.W)
 if self.bias is not None:
 lrt_mean = lrt_mean + self.bias
 sigma2 = Variable.exp(self.log_alpha) * self.W * self.W
 if self.permute_sigma:
 sigma2 = sigma2.view(-1)[torch.randperm(self.in_features * self.out_features).cuda()].view(self.out_features, self.in_features)
 lrt_std = Variable.sqrt(1e-16 + F.linear(x * x, sigma2))
 if self.training:
 eps = Variable(lrt_std.data.new(lrt_std.size()).normal_())
 else:
 eps = 0.0
 return lrt_mean + lrt_std * eps
 def kl_reg(self):
 return self.W.nelement() * self.kl_fun(self.log_alpha) / self.log_alpha.nelement()
 def __repr__(self):
 return self.__class__.__name__ + '(' \
 + 'in_features=' + str(self.in_features) \
 + ', out_features=' + str(self.out_features) \
 + ', alpha_shape=' + str(self.alpha_shape) \
 + ', prior=' + self.prior \
 + ', bias=' + str(self.bias is not None) + ')' ', bias=' + str(self.bias is not None) + ')'
class ConvVDO(ModuleWrapper):
 def __init__(self, in_channels, out_channels, kernel_size, alpha_shape, stride=1,
 padding=0, dilation=1, prior='loguni', bias=True):
 super(ConvVDO, self).__init__()
 self.in_channels = in_channels
 self.out_channels = out_channels
 self.kernel_size = (kernel_size, kernel_size)
 self.stride = stride
 self.padding = padding
 self.dilation = dilation
 self.alpha_shape = alpha_shape
 self.groups = 1
 self.weight = Parameter(torch.Tensor(
 out_channels, in_channels, *self.kernel_size))
 if bias:
 self.bias = Parameter(torch.Tensor(1, out_channels, 1, 1))
 else:
 self.register_parameter('bias', None)
 self.op_bias = lambda input, kernel: F.conv2d(input, kernel, self.bias, self.stride, self.padding, self.dilation, self.groups)
 self.op_nobias = lambda input, kernel: F.conv2d(input, kernel, None, self.stride, self.padding, self.dilation, self.groups)
 self.log_alpha = Parameter(torch.Tensor(*alpha_shape))
 self.reset_parameters()
 self.zero_mean = False
 self.permute_sigma = False
 self.prior = prior
 if prior == 'loguni':
 self.kl_fun = metrics.kl_loguni
 else:
 self.kl_fun = metrics.kl_ard
 def reset_parameters(self):
 n = self.in_channels
 for k in self.kernel_size:
 n *= k
 stdv = 1. / math.sqrt(n)
 self.weight.data.uniform_(-stdv, stdv)
 if self.bias is not None:
 self.bias.data.uniform_(-stdv, stdv)
 self.log_alpha.data.fill_(-5.0)
 def forward(self, x):
 if self.zero_mean:
 lrt_mean = self.op_bias(x, 0.0 * self.weight)
 else:
 lrt_mean = self.op_bias(x, self.weight)
 sigma2 = Variable.exp(self.log_alpha) * self.weight * self.weight
 if self.permute_sigma:
 sigma2 = sigma2.view(-1)[torch.randperm(self.weight.nelement()).cuda()].view(self.weight.shape)
 lrt_std = Variable.sqrt(1e-16 + self.op_nobias(x * x, sigma2))
 if self.training:
 eps = Variable(lrt_std.data.new(lrt_std.size()).normal_())
 else:
 eps = 0.0
 return lrt_mean + lrt_std * eps
 def kl_reg(self):
 return self.weight.nelement() / self.log_alpha.nelement() * metrics.kl_loguni(self.log_alpha)
 def __repr__(self):
 s = ('{name}({in_channels}, {out_channels}, kernel_size={kernel_size}'
 ', stride={stride}')
 s += ', padding={padding}'
 s += ', alpha_shape=' + str(self.alpha_shape)
 s += ', prior=' + self.prior
 s += ', dilation={dilation}'
 if self.bias is None:
 s += ', bias=False'
 s += ')'
 return s.format(name=self.__class__.__name__, **self.__dict__)
class ConvVariance(ModuleWrapper):
 def __init__(self, in_channels, out_channels, kernel_size, stride=1,
 padding=0, dilation=1, bias=True):
 super(ConvVariance, self).__init__()
 self.in_channels = in_channels
 self.out_channels = out_channels
 self.kernel_size = (kernel_size, kernel_size)
 self.stride = stride
 self.padding = padding
 self.dilation = dilation
 self.groups = 1
 self.sigma = Parameter(torch.Tensor(
 out_channels, in_channels, *self.kernel_size))
 if bias:
 self.bias = Parameter(torch.Tensor(1, out_channels, 1, 1))
 else:
 self.register_parameter('bias', None)
 self.op_bias = lambda input, kernel: F.conv2d(input, kernel, self.bias, self.stride, self.padding, self.dilation, self.groups)
 self.op_nobias = lambda input, kernel: F.conv2d(input, kernel, None, self.stride, self.padding, self.dilation, self.groups)
 self.reset_parameters()
 self.zero_mean = False
 self.permute_sigma = False
 def reset_parameters(self):
 n = self.in_channels
 for k in self.kernel_size:
 n *= k
 stdv = 1. / math.sqrt(n)
 self.sigma.data.uniform_(-stdv, stdv)
 if self.bias is not None:
 self.bias.data.uniform_(-stdv, stdv)
 def forward(self, x):
 lrt_mean = 0.0
 if self.bias is not None:
 lrt_mean = self.bias
 sigma2 = self.sigma * self.sigma
 if self.permute_sigma:
 sigma2 = sigma2.view(-1)[torch.randperm(self.weight.shape).cuda()].view(self.weight.shape)
 lrt_std = Variable.sqrt(1e-16 + self.op_nobias(x * x, sigma2))
 if self.training:
 eps = Variable(lrt_std.data.new(lrt_std.size()).normal_())
 else:
 eps = 0.0
 return lrt_mean + lrt_std * eps
 def __repr__(self):
 s = ('{name}({in_channels}, {out_channels}, kernel_size={kernel_size}'
 ', stride={stride}')
 s += ', padding={padding}'
 s += ', dilation={dilation}'
 if self.bias is None:
 s += ', bias=False'
 s += ')'
 return s.format(name=self.__class__.__name__, **self.__dict__)
class ConvVarianceBe(ModuleWrapper):
 def __init__(self, in_channels, out_channels, kernel_size, stride=1,
 padding=0, dilation=1, bias=True):
 super(ConvVarianceBe, self).__init__()
 self.in_channels = in_channels
 self.out_channels = out_channels
 self.kernel_size = (kernel_size, kernel_size)
 self.stride = stride
 self.padding = padding
 self.dilation = dilation
 self.groups = 1
 self.probs = torch.ones([out_channels, in_channels, *self.kernel_size]).cuda() * 0.5
 self.W = Parameter(torch.Tensor(out_channels, in_channels, *self.kernel_size))
 if bias:
 self.bias = Parameter(torch.Tensor(1, out_channels, 1, 1))
 else:
 self.register_parameter('bias', None)
 self.op_bias = lambda input, kernel: F.conv2d(input, kernel, self.bias, self.stride, self.padding, self.dilation, self.groups)
 self.op_nobias = lambda input, kernel: F.conv2d(input, kernel, None, self.stride, self.padding, self.dilation, self.groups)
 self.reset_parameters()
 def reset_parameters(self):
 n = self.in_channels
 for k in self.kernel_size:
 n *= k
 stdv = 1. / math.sqrt(n)
 self.W.data.uniform_(-stdv, stdv)
 if self.bias is not None:
 self.bias.data.uniform_(-stdv, stdv)
 def forward(self, x):
 if self.training:
 eps = Variable(torch.bernoulli(self.probs) - 0.5)
 else:
 eps = 0.0
 output = self.op_nobias(x, self.W*eps)
 if self.bias is not None:
 output = output + self.bias
 return output
 def __repr__(self):
 s = ('{name}({in_channels}, {out_channels}, kernel_size={kernel_size}'
 ', stride={stride}')
 s += ', padding={padding}'
 s += ', dilation={dilation}'
 if self.bias is None:
 s += ', bias=False'
 s += ')'
 return s.format(name=self.__class__.__name__, **self.__dict__)
class ConvVarianceUnif(ModuleWrapper):
 def __init__(self, in_channels, out_channels, kernel_size, stride=1,
 padding=0, dilation=1, bias=True):
 super(ConvVarianceUnif, self).__init__()
 self.in_channels = in_channels
 self.out_channels = out_channels
 self.kernel_size = (kernel_size, kernel_size)
 self.stride = stride
 self.padding = padding
 self.dilation = dilation
 self.groups = 1
 self.W = Parameter(torch.Tensor(out_channels, in_channels, *self.kernel_size))
 if bias:
 self.bias = Parameter(torch.Tensor(1, out_channels, 1, 1))
 else:
 self.register_parameter('bias', None)
 self.op_bias = lambda input, kernel: F.conv2d(input, kernel, self.bias, self.stride, self.padding, self.dilation, self.groups)
 self.op_nobias = lambda input, kernel: F.conv2d(input, kernel, None, self.stride, self.padding, self.dilation, self.groups)
 self.reset_parameters()
 def reset_parameters(self):
 n = self.in_channels
 for k in self.kernel_size:
 n *= k
 stdv = 1. | |
| 
	as
 well as time variables t_age and t_cycle. Normalized assets and permanent income levels
 are drawn from lognormal distributions given by aNrmInitMean and aNrmInitStd (etc).
 Parameters
 ----------
 which_agents : np.array(Bool)
 Boolean array of size self.AgentCount indicating which agents should be "born".
 Returns
 -------
 None
 '''
 # Get and store states for newly born agents
 N = np.sum(which_agents) # Number of new consumers to make
 self.aNrmNow[which_agents] = Lognormal(
 mu=self.aNrmInitMean,
 sigma=self.aNrmInitStd,
 seed=self.RNG.randint(0,2**31-1)).draw(N)
 pLvlInitMeanNow = self.pLvlInitMean + np.log(self.PlvlAggNow) # Account for newer cohorts having higher permanent income
 self.pLvlNow[which_agents] = Lognormal(
 pLvlInitMeanNow,
 self.pLvlInitStd,
 seed=self.RNG.randint(0,2**31-1)).draw(N)
 self.t_age[which_agents] = 0 # How many periods since each agent was born
 self.t_cycle[which_agents] = 0 # Which period of the cycle each agent is currently in
 return None
 def simDeath(self):
 '''
 Determines which agents die this period and must be replaced. Uses the sequence in LivPrb
 to determine survival probabilities for each agent.
 Parameters
 ----------
 None
 Returns
 -------
 which_agents : np.array(bool)
 Boolean array of size AgentCount indicating which agents die.
 '''
 # Determine who dies
 DiePrb_by_t_cycle = 1.0 - np.asarray(self.LivPrb)
 DiePrb = DiePrb_by_t_cycle[self.t_cycle-1] # Time has already advanced, so look back one
 DeathShks = Uniform(
 seed=self.RNG.randint(0,2**31-1)).draw(N=self.AgentCount)
 which_agents = DeathShks < DiePrb
 if self.T_age is not None: # Kill agents that have lived for too many periods
 too_old = self.t_age >= self.T_age
 which_agents = np.logical_or(which_agents,too_old)
 return which_agents
 def getShocks(self):
 '''
 Finds permanent and transitory income "shocks" for each agent this period. As this is a
 perfect foresight model, there are no stochastic shocks: PermShkNow = PermGroFac for each
 agent (according to their t_cycle) and TranShkNow = 1.0 for all agents.
 Parameters
 ----------
 None
 Returns
 -------
 None
 '''
 PermGroFac = np.array(self.PermGroFac)
 self.PermShkNow = PermGroFac[self.t_cycle-1] # cycle time has already been advanced
 self.TranShkNow = np.ones(self.AgentCount)
 def getRfree(self):
 '''
 Returns an array of size self.AgentCount with self.Rfree in every entry.
 Parameters
 ----------
 None
 Returns
 -------
 RfreeNow : np.array
 Array of size self.AgentCount with risk free interest rate for each agent.
 '''
 RfreeNow = self.Rfree*np.ones(self.AgentCount)
 return RfreeNow
 def getStates(self):
 '''
 Calculates updated values of normalized market resources and permanent income level for each
 agent. Uses pLvlNow, aNrmNow, PermShkNow, TranShkNow.
 Parameters
 ----------
 None
 Returns
 -------
 None
 '''
 pLvlPrev = self.pLvlNow
 aNrmPrev = self.aNrmNow
 RfreeNow = self.getRfree()
 # Calculate new states: normalized market resources and permanent income level
 self.pLvlNow = pLvlPrev*self.PermShkNow # Updated permanent income level
 self.PlvlAggNow = self.PlvlAggNow*self.PermShkAggNow # Updated aggregate permanent productivity level
 ReffNow = RfreeNow/self.PermShkNow # "Effective" interest factor on normalized assets
 self.bNrmNow = ReffNow*aNrmPrev # Bank balances before labor income
 self.mNrmNow = self.bNrmNow + self.TranShkNow # Market resources after income
 return None
 def getControls(self):
 '''
 Calculates consumption for each consumer of this type using the consumption functions.
 Parameters
 ----------
 None
 Returns
 -------
 None
 '''
 cNrmNow = np.zeros(self.AgentCount) + np.nan
 MPCnow = np.zeros(self.AgentCount) + np.nan
 for t in range(self.T_cycle):
 these = t == self.t_cycle
 cNrmNow[these], MPCnow[these] = self.solution[t].cFunc.eval_with_derivative(self.mNrmNow[these])
 self.cNrmNow = cNrmNow
 self.MPCnow = MPCnow
 return None
 def getPostStates(self):
 '''
 Calculates end-of-period assets for each consumer of this type.
 Parameters
 ----------
 None
 Returns
 -------
 None
 '''
 self.aNrmNow = self.mNrmNow - self.cNrmNow
 self.aLvlNow = self.aNrmNow*self.pLvlNow # Useful in some cases to precalculate asset level
 return None
 def checkCondition(self,
 name,
 test,
 messages,
 verbose,
 verbose_messages=None):
 """
 Checks one condition.
 Parameters
 ----------
 name : string
 Name for the condition.
 test : function(self -> boolean)
 A function (of self) which tests the condition
 messages : dict{boolean : string}
 A dictiomary with boolean keys containing values
 for messages to print if the condition is
 true or false.
 verbose_messages : dict{boolean : string}
 (Optional) A dictiomary with boolean keys containing values
 for messages to print if the condition is
 true or false under verbose printing.
 """
 self.conditions[name] = test(self)
 set_verbosity_level((4-verbose)*10)
 _log.info(messages[self.conditions[name]].format(self))
 if verbose_messages:
 _log.debug(verbose_messages[self.conditions[name]].format(self))
 def checkAIC(self, verbose=None):
 '''
 Evaluate and report on the Absolute Impatience Condition
 '''
 name = "AIC"
 test = lambda agent : agent.thorn < 1
 messages = {
 True: "The value of the absolute impatience factor (APF) for the supplied parameter values satisfies the Absolute Impatience Condition.",
 False: "The given type violates the Absolute Impatience Condition with the supplied parameter values; the APF is {0.thorn}"}
 verbose_messages = {
 True : " Because the APF < 1, the absolute amount of consumption is expected to fall over time.",
 False : " Because the APF > 1, the absolute amount of consumption is expected to grow over time."
 }
 verbose = self.verbose if verbose is None else verbose
 self.checkCondition(name, test, messages, verbose, verbose_messages)
 def checkGICPF(self, verbose=None):
 '''
 Evaluate and report on the Growth Impatience Condition for the Perfect Foresight model
 '''
 name = "GICPF"
 self.GPFPF = self.thorn/self.PermGroFac[0]
 test = lambda agent : agent.GPFPF < 1
 messages = {
 True : 'The value of the Growth Patience Factor for the supplied parameter values satisfies the Perfect Foresight Growth Impatience Condition.',
 False : 'The value of the Growth Patience Factor for the supplied parameter values fails the Perfect Foresight Growth Impatience Condition; the GPFPF is: {0.GPFPF}',
 }
 verbose_messages = {
 True: ' Therefore, for a perfect foresight consumer, the ratio of individual wealth to permanent income will fall indefinitely.',
 False: ' Therefore, for a perfect foresight consumer, the ratio of individual wealth to permanent income is expected to grow toward infinity.',
 }
 verbose = self.verbose if verbose is None else verbose
 self.checkCondition(name, test, messages, verbose, verbose_messages)
 def checkRIC(self, verbose=None):
 '''
 Evaluate and report on the Return Impatience Condition
 '''
 self.RPF = self.thorn/self.Rfree
 name = "RIC"
 test = lambda agent: self.RPF < 1
 
 messages = {
 True : 'The value of the Return Patience Factor for the supplied parameter values satisfies the Return Impatience Condition.',
 False : 'The value of the Return Patience Factor for the supplied parameter values fails the Return Impatience Condition; the factor is {0.RIF}'
 }
 verbose_messages = {
 True : ' Therefore, the limiting consumption function is not c(m)=0 for all m',
 False : ' Therefore, the limiting consumption function is c(m)=0 for all m'
 }
 verbose = self.verbose if verbose is None else verbose
 self.checkCondition(name, test, messages, verbose,verbose_messages)
 def checkFHWC(self, verbose=None):
 '''
 Evaluate and report on the Finite Human Wealth Condition
 '''
 self.FHWF = self.PermGroFac[0]/self.Rfree
 self.cNrmPDV = 1.0/(1.0-self.thorn/self.Rfree)
 name = "FHWC"
 test = lambda agent: self.FHWF < 1
 
 messages = {
 True : 'The Finite Human wealth factor value for the supplied parameter values satisfies the Finite Human Wealth Condition.',
 False : 'The given type violates the Finite Human Wealth Condition; the Finite Human wealth factor value {0.FHWF}',
 }
 verbose_messages = {
 True : ' Therefore, the limiting consumption function is not c(m)=Infinity\nand human wealth normalized by permanent income is {0.hNrm}\nand the PDV of future consumption growth is {0.cNrmPDV}',
 False : ' Therefore, the limiting consumption function is c(m)=Infinity for all m'
 }
 verbose = self.verbose if verbose is None else verbose
 self.checkCondition(name, test, messages, verbose)
 def checkConditions(self, verbose=None):
 '''
 This method checks whether the instance's type satisfies the
 Absolute Impatience Condition (AIC), 
 the Return Impatience Condition (RIC),
 the Finite Human Wealth Condition (FHWC) and the perfect foresight 
 model's version of the Finite Value of the Growth Impatience Condition (GICPF) and 
 Autarky Condition (FVACPF). Depending on the configuration of parameter values, some 
 combination of these conditions must be satisfied in order for the problem to have 
 a nondegenerate solution. To check which conditions are required, in the verbose mode
 a reference to the relevant theoretical literature is made.
 Parameters
 ----------
 verbose : boolean
 Specifies different levels of verbosity of feedback. When False, it only reports whether the
 instance's type fails to satisfy a particular condition. When True, it reports all results, i.e.
 the factor values for all conditions.
 Returns
 -------
 None
 '''
 self.conditions = {}
 self.violated = False
 # This method only checks for the conditions for infinite horizon models
 # with a 1 period cycle. If these conditions are not met, | |
| 
	<filename>dimod/serialization/format.py<gh_stars>0
# Copyright 2019 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# =============================================================================
# developer note: It is hard to provide code examples for these because doing
# so will overwrite the settings in sphinx's setup.
import numbers
import sys
import collections.abc as abc
from collections import deque
from io import StringIO
import numpy as np
import dimod
__all__ = 'set_printoptions', 'Formatter'
_format_options = {
 'width': 79,
 'depth': None,
 'sorted_by': 'energy',
}
def set_printoptions(**kwargs):
 """Set print options globally.
 Args:
 width (int, optional, default=79):
 The maximum number of characters to a single line.
 depth (int, optional, default=None):
 The maximum number of rows printed, summation is used if
 exceeded. Default is unlimited.
 sorted_by (str/None, optional, default='energy'):
 Selects the field used to sort the samples when printing samplesets.
 If None, samples are printed in record order.
 Note:
 All arguments must be provided as keyword arguments.
 """
 _format_options.update(kwargs)
def _spinstr(v, rjust=0):
 s = '-1' if v <= 0 else '+1'
 return s.rjust(rjust)
def _binarystr(v, rjust=0):
 s = '0' if v <= 0 else '1'
 return s.rjust(rjust)
class _SampleTable(object):
 """Creates the table for printing samples. Acts like a deque in that
 it can be rotated and appended on either side.
 """
 def __init__(self, space_length=1):
 self.deque = deque()
 self.items_length = 0
 self.space_length = space_length # space between columns
 @property
 def ncol(self):
 return len(self.deque)
 @property
 def width(self):
 """the width of the table if made into a string"""
 return self.items_length + (self.ncol - 1)*self.space_length
 def append(self, header, f, _left=False):
 """Add a column to the table.
 Args:
 header (str):
 Column header
 f (function(datum)->str):
 Makes the row string from the datum. Str returned by f should
 have the same width as header.
 """
 self.items_length += len(header)
 if _left:
 self.deque.appendleft((header, f))
 else:
 self.deque.append((header, f))
 def appendleft(self, header, f):
 self.append(header, f, _left=True)
 def append_index(self, num_rows):
 """Add an index column.
 Left justified, width is determined by the space needed to print the
 largest index.
 """
 width = len(str(num_rows - 1))
 def f(datum):
 return str(datum.idx).ljust(width)
 header = ' '*width
 self.append(header, f)
 def append_sample(self, v, vartype, _left=False):
 """Add a sample column"""
 vstr = str(v).rjust(2) # the variable will be len 0, or 1
 length = len(vstr)
 if vartype is dimod.SPIN:
 def f(datum):
 return _spinstr(datum.sample[v], rjust=length)
 else:
 def f(datum):
 return _binarystr(datum.sample[v], rjust=length)
 self.append(vstr, f, _left=_left)
 def appendleft_sample(self, v, vartype):
 self.append_sample(v, vartype, _left=True)
 def append_vector(self, name, vector, _left=False):
 """Add a data vectors column."""
 if np.issubdtype(vector.dtype, np.integer):
 # determine the length we need
 largest = str(max(vector.max(), vector.min(), key=abs))
 length = max(len(largest), min(7, len(name))) # how many spaces we need to represent
 if len(name) > length:
 header = name[:length-1] + '.'
 else:
 header = name.rjust(length)
 def f(datum):
 return str(getattr(datum, name)).rjust(length)
 elif np.issubdtype(vector.dtype, np.floating):
 largest = np.format_float_positional(max(vector.max(), vector.min(), key=abs),
 precision=6, trim='0')
 length = max(len(largest), min(7, len(name))) # how many spaces we need to represent
 if len(name) > length:
 header = name[:length-1] + '.'
 else:
 header = name.rjust(length)
 def f(datum):
 return np.format_float_positional(getattr(datum, name),
 precision=6, trim='0',
 ).rjust(length)
 else:
 length = 7
 if len(name) > length:
 header = name[:length-1] + '.'
 else:
 header = name.rjust(length)
 def f(datum):
 r = repr(getattr(datum, name))
 if len(r) > length:
 r = r[:length-3] + '...'
 return r.rjust(length)
 self.append(header, f, _left=_left)
 def appendleft_vector(self, name, vector):
 self.append_vector(name, vector, _left=True)
 def dump_to_list(self):
 """deconstructs self into a list"""
 return [self.deque.popleft() for _ in range(self.ncol)]
 def pop(self):
 header, _ = self.deque.pop()
 self.items_length -= len(header)
 def popleft(self):
 header, _ = self.deque.popleft()
 self.items_length -= len(header)
 def rotate(self, r):
 self.deque.rotate(r)
class Formatter(object):
 """Used to create nice string formats for dimod objects.
 Args:
 width (int, optional, default=79):
 The maximum number of characters to a single line.
 depth (int, optional, default=None):
 The maximum number of rows printed, summation is used if
 exceeded. Default is unlimited.
 sorted_by (str/None, optional, default='energy'):
 Selects the field used to sort the samples when printing samplesets.
 If None, samples are printed in record order.
 Examples:
 >>> from dimod.serialization.format import Formatter
 >>> sampleset = dimod.SampleSet.from_samples(([-1, 1], ['a', 'b']), dimod.SPIN, energy=1)
 >>> Formatter(width=45).print(sampleset)
 a b energy num_oc.
 0 -1 +1 1 1
 ['SPIN', 1 rows, 1 samples, 2 variables]
 >>> Formatter(width=30).print(sampleset)
 a b energy num_oc.
 0 -1 +1 1 1
 ['SPIN',
 1 rows,
 1 samples,
 2 variables]
 """
 def __init__(self, **kwargs):
 self.options = options = _format_options.copy()
 options.update(kwargs)
 def format(self, obj, **kwargs):
 """Return the formatted representation of the object as a string."""
 sio = StringIO()
 self.fprint(obj, stream=sio, **kwargs)
 return sio.getvalue()
 def fprint(self, obj, stream=None, **kwargs):
 """Prints the formatted representation of the object on stream"""
 if stream is None:
 stream = sys.stdout
 options = self.options
 options.update(kwargs)
 if isinstance(obj, dimod.SampleSet):
 self._print_sampleset(obj, stream, **options)
 return
 raise TypeError("cannot format type {}".format(type(obj)))
 def _print_sampleset(self, sampleset, stream,
 width, depth, sorted_by,
 **other):
 if len(sampleset) > 0:
 self._print_samples(sampleset, stream, width, depth, sorted_by)
 else:
 stream.write('Empty SampleSet\n')
 # write the data vectors
 stream.write('Record Fields: [')
 self._print_items(sampleset.record.dtype.names, stream, width - len('Data Vectors: [') - 1)
 stream.write(']\n')
 # write the variables
 stream.write('Variables: [')
 self._print_items(sampleset.variables, stream, width - len('Variables: [') - 1)
 stream.write(']\n')
 # add the footer
 stream.write('[')
 footer = [repr(sampleset.vartype.name),
 '{} rows'.format(len(sampleset)),
 '{} samples'.format(sampleset.record.num_occurrences.sum()),
 '{} variables'.format(len(sampleset.variables))
 ]
 if sum(map(len, footer)) + (len(footer) - 1)*2 > width - 2:
 # if the footer won't fit in width
 stream.write(',\n '.join(footer))
 else:
 # if width the minimum footer object then we don't respect it
 stream.write(', '.join(footer))
 stream.write(']')
 def _print_samples(self, sampleset, stream, width, depth, sorted_by):
 if len(sampleset) == 0:
 raise ValueError("Cannot print empty samplesets")
 # we need to know what goes into each row. We know we will use
 # datum as returned by sampleset.data() to populate the values,
 # so let's store our row formatters in the following form:
 # row[(header, f(datum): str)]
 table = _SampleTable()
 # there are a minimum set of headers:
 # idx energy num_oc.
 table.append_index(len(sampleset))
 table.append_vector('energy', sampleset.record.energy)
 table.append_vector('num_occurrences', sampleset.record.num_occurrences)
 # if there are more vectors, let's just put a placeholder in for now
 # we might replace it later if we still have space
 if len(sampleset.record.dtype.names) > len(sampleset._REQUIRED_FIELDS):
 table.append('...', lambda _: '...')
 # next we want to add variables until we run out of width
 table.rotate(-1) # move the index to the end
 num_added = 0
 for v in sampleset.variables:
 table.append_sample(v, sampleset.vartype)
 num_added += 1
 if table.width > width:
 # we've run out of space, need to make room for the last
 # variable and a spacer
 last = sampleset.variables[-1]
 table.appendleft_sample(last, sampleset.vartype)
 table.appendleft('...', lambda _: '...')
 while table.width > width:
 # remove variables until we have space for the last one
 table.pop()
 num_added -= 1
 break
 table.rotate(num_added + 1) # move the index back to the front
 # finally any remaining space should be used for other fields. We assume
 # at this point that deque looks like [idx variables energy num_occ. ...]
 other_fields = set(sampleset.record.dtype.names).difference(sampleset._REQUIRED_FIELDS)
 if other_fields:
 num_added = 0
 while len(other_fields):
 name = min(other_fields, key=len)
 table.appendleft_vector(name, sampleset.record[name])
 other_fields.remove(name)
 num_added += 1
 if table.width > width:
 table.popleft()
 num_added -= 1
 break
 else:
 # we have no other fields to add
 assert len(other_fields) == 0
 table.pop() # remove the summary
 table.rotate(-num_added) # put index back at the front
 # turn rows into a list because we're done rotating etc
 rows = table.dump_to_list()
 # ok, now let's print.
 stream.write(' '.join(header for header, _ in rows))
 stream.write('\n')
 if depth is None:
 depth = float('inf')
 for idx, datum in enumerate(sampleset.data(index=True)):
 stream.write(' '.join(f(datum) for _, f in rows))
 stream.write('\n')
 if idx + 3 >= depth and len(sampleset) > depth:
 stream.write('...\n')
 datum = next(sampleset.data(reverse=True, index=True)) # get the last one
 stream.write(' '.join(f(datum) for _, f in rows))
 stream.write('\n')
 break
 def _print_items(self, iterable, stream, width):
 iterator = map(repr, iterable)
 try:
 first = next(iterator)
 except StopIteration:
 # nothing to represent
 | |
| 
	= DataFrame([[1, 'A'], [2, 'A']], columns=midx)
 expected = df.groupby('to filter').groups
 result = df.groupby([('to filter', '')]).groups
 tm.assert_dict_equal(result, expected)
 def test_groupby_multiindex_tuple(self):
 # GH 17979
 df = pd.DataFrame([[1, 2, 3, 4], [3, 4, 5, 6], [1, 4, 2, 3]],
 columns=pd.MultiIndex.from_arrays(
 [['a', 'b', 'b', 'c'],
 [1, 1, 2, 2]]))
 expected = df.groupby([('b', 1)]).groups
 result = df.groupby(('b', 1)).groups
 tm.assert_dict_equal(expected, result)
 df2 = pd.DataFrame(df.values,
 columns=pd.MultiIndex.from_arrays(
 [['a', 'b', 'b', 'c'],
 ['d', 'd', 'e', 'e']]))
 expected = df2.groupby([('b', 'd')]).groups
 result = df.groupby(('b', 1)).groups
 tm.assert_dict_equal(expected, result)
 df3 = pd.DataFrame(df.values,
 columns=[('a', 'd'), ('b', 'd'), ('b', 'e'), 'c'])
 expected = df3.groupby([('b', 'd')]).groups
 result = df.groupby(('b', 1)).groups
 tm.assert_dict_equal(expected, result)
 @pytest.mark.parametrize('sort', [True, False])
 def test_groupby_level(self, sort, mframe, df):
 # GH 17537
 frame = mframe
 deleveled = frame.reset_index()
 result0 = frame.groupby(level=0, sort=sort).sum()
 result1 = frame.groupby(level=1, sort=sort).sum()
 expected0 = frame.groupby(deleveled['first'].values, sort=sort).sum()
 expected1 = frame.groupby(deleveled['second'].values, sort=sort).sum()
 expected0.index.name = 'first'
 expected1.index.name = 'second'
 assert result0.index.name == 'first'
 assert result1.index.name == 'second'
 assert_frame_equal(result0, expected0)
 assert_frame_equal(result1, expected1)
 assert result0.index.name == frame.index.names[0]
 assert result1.index.name == frame.index.names[1]
 # groupby level name
 result0 = frame.groupby(level='first', sort=sort).sum()
 result1 = frame.groupby(level='second', sort=sort).sum()
 assert_frame_equal(result0, expected0)
 assert_frame_equal(result1, expected1)
 # axis=1
 result0 = frame.T.groupby(level=0, axis=1, sort=sort).sum()
 result1 = frame.T.groupby(level=1, axis=1, sort=sort).sum()
 assert_frame_equal(result0, expected0.T)
 assert_frame_equal(result1, expected1.T)
 # raise exception for non-MultiIndex
 msg = "level > 0 or level < -1 only valid with MultiIndex"
 with pytest.raises(ValueError, match=msg):
 df.groupby(level=1)
 def test_groupby_level_index_names(self):
 # GH4014 this used to raise ValueError since 'exp'>1 (in py2)
 df = DataFrame({'exp': ['A'] * 3 + ['B'] * 3,
 'var1': lrange(6), }).set_index('exp')
 df.groupby(level='exp')
 msg = "level name foo is not the name of the index"
 with pytest.raises(ValueError, match=msg):
 df.groupby(level='foo')
 @pytest.mark.parametrize('sort', [True, False])
 def test_groupby_level_with_nas(self, sort):
 # GH 17537
 index = MultiIndex(levels=[[1, 0], [0, 1, 2, 3]],
 codes=[[1, 1, 1, 1, 0, 0, 0, 0], [0, 1, 2, 3, 0, 1,
 2, 3]])
 # factorizing doesn't confuse things
 s = Series(np.arange(8.), index=index)
 result = s.groupby(level=0, sort=sort).sum()
 expected = Series([6., 22.], index=[0, 1])
 assert_series_equal(result, expected)
 index = MultiIndex(levels=[[1, 0], [0, 1, 2, 3]],
 codes=[[1, 1, 1, 1, -1, 0, 0, 0], [0, 1, 2, 3, 0,
 1, 2, 3]])
 # factorizing doesn't confuse things
 s = Series(np.arange(8.), index=index)
 result = s.groupby(level=0, sort=sort).sum()
 expected = Series([6., 18.], index=[0.0, 1.0])
 assert_series_equal(result, expected)
 def test_groupby_args(self, mframe):
 # PR8618 and issue 8015
 frame = mframe
 msg = "You have to supply one of 'by' and 'level'"
 with pytest.raises(TypeError, match=msg):
 frame.groupby()
 msg = "You have to supply one of 'by' and 'level'"
 with pytest.raises(TypeError, match=msg):
 frame.groupby(by=None, level=None)
 @pytest.mark.parametrize('sort,labels', [
 [True, [2, 2, 2, 0, 0, 1, 1, 3, 3, 3]],
 [False, [0, 0, 0, 1, 1, 2, 2, 3, 3, 3]]
 ])
 def test_level_preserve_order(self, sort, labels, mframe):
 # GH 17537
 grouped = mframe.groupby(level=0, sort=sort)
 exp_labels = np.array(labels, np.intp)
 assert_almost_equal(grouped.grouper.labels[0], exp_labels)
 def test_grouping_labels(self, mframe):
 grouped = mframe.groupby(mframe.index.get_level_values(0))
 exp_labels = np.array([2, 2, 2, 0, 0, 1, 1, 3, 3, 3], dtype=np.intp)
 assert_almost_equal(grouped.grouper.labels[0], exp_labels)
 def test_list_grouper_with_nat(self):
 # GH 14715
 df = pd.DataFrame({'date': pd.date_range('1/1/2011',
 periods=365, freq='D')})
 df.iloc[-1] = pd.NaT
 grouper = pd.Grouper(key='date', freq='AS')
 # Grouper in a list grouping
 result = df.groupby([grouper])
 expected = {pd.Timestamp('2011-01-01'): pd.Index(list(range(364)))}
 tm.assert_dict_equal(result.groups, expected)
 # Test case without a list
 result = df.groupby(grouper)
 expected = {pd.Timestamp('2011-01-01'): 365}
 tm.assert_dict_equal(result.groups, expected)
# get_group
# --------------------------------
class TestGetGroup():
 def test_get_group(self):
 # GH 5267
 # be datelike friendly
 df = DataFrame({'DATE': pd.to_datetime(
 ['10-Oct-2013', '10-Oct-2013', '10-Oct-2013', '11-Oct-2013',
 '11-Oct-2013', '11-Oct-2013']),
 'label': ['foo', 'foo', 'bar', 'foo', 'foo', 'bar'],
 'VAL': [1, 2, 3, 4, 5, 6]})
 g = df.groupby('DATE')
 key = list(g.groups)[0]
 result1 = g.get_group(key)
 result2 = g.get_group(Timestamp(key).to_pydatetime())
 result3 = g.get_group(str(Timestamp(key)))
 assert_frame_equal(result1, result2)
 assert_frame_equal(result1, result3)
 g = df.groupby(['DATE', 'label'])
 key = list(g.groups)[0]
 result1 = g.get_group(key)
 result2 = g.get_group((Timestamp(key[0]).to_pydatetime(), key[1]))
 result3 = g.get_group((str(Timestamp(key[0])), key[1]))
 assert_frame_equal(result1, result2)
 assert_frame_equal(result1, result3)
 # must pass a same-length tuple with multiple keys
 msg = "must supply a tuple to get_group with multiple grouping keys"
 with pytest.raises(ValueError, match=msg):
 g.get_group('foo')
 with pytest.raises(ValueError, match=msg):
 g.get_group(('foo'))
 msg = ("must supply a same-length tuple to get_group with multiple"
 " grouping keys")
 with pytest.raises(ValueError, match=msg):
 g.get_group(('foo', 'bar', 'baz'))
 def test_get_group_empty_bins(self, observed):
 d = pd.DataFrame([3, 1, 7, 6])
 bins = [0, 5, 10, 15]
 g = d.groupby(pd.cut(d[0], bins), observed=observed)
 # TODO: should prob allow a str of Interval work as well
 # IOW '(0, 5]'
 result = g.get_group(pd.Interval(0, 5))
 expected = DataFrame([3, 1], index=[0, 1])
 assert_frame_equal(result, expected)
 msg = r"Interval\(10, 15, closed='right'\)"
 with pytest.raises(KeyError, match=msg):
 g.get_group(pd.Interval(10, 15))
 def test_get_group_grouped_by_tuple(self):
 # GH 8121
 df = DataFrame([[(1, ), (1, 2), (1, ), (1, 2)]], index=['ids']).T
 gr = df.groupby('ids')
 expected = DataFrame({'ids': [(1, ), (1, )]}, index=[0, 2])
 result = gr.get_group((1, ))
 assert_frame_equal(result, expected)
 dt = pd.to_datetime(['2010-01-01', '2010-01-02', '2010-01-01',
 '2010-01-02'])
 df = DataFrame({'ids': [(x, ) for x in dt]})
 gr = df.groupby('ids')
 result = gr.get_group(('2010-01-01', ))
 expected = DataFrame({'ids': [(dt[0], ), (dt[0], )]}, index=[0, 2])
 assert_frame_equal(result, expected)
 def test_groupby_with_empty(self):
 index = pd.DatetimeIndex(())
 data = ()
 series = pd.Series(data, index)
 grouper = pd.Grouper(freq='D')
 grouped = series.groupby(grouper)
 assert next(iter(grouped), None) is None
 def test_groupby_with_single_column(self):
 df = pd.DataFrame({'a': list('abssbab')})
 tm.assert_frame_equal(df.groupby('a').get_group('a'), df.iloc[[0, 5]])
 # GH 13530
 exp = pd.DataFrame(index=pd.Index(['a', 'b', 's'], name='a'))
 tm.assert_frame_equal(df.groupby('a').count(), exp)
 tm.assert_frame_equal(df.groupby('a').sum(), exp)
 tm.assert_frame_equal(df.groupby('a').nth(1), exp)
 def test_gb_key_len_equal_axis_len(self):
 # GH16843
 # test ensures that index and column keys are recognized correctly
 # when number of keys equals axis length of groupby
 df = pd.DataFrame([['foo', 'bar', 'B', 1],
 ['foo', 'bar', 'B', 2],
 ['foo', 'baz', 'C', 3]],
 columns=['first', 'second', 'third', 'one'])
 df = df.set_index(['first', 'second'])
 df = df.groupby(['first', 'second', 'third']).size()
 assert df.loc[('foo', 'bar', 'B')] == 2
 assert df.loc[('foo', 'baz', 'C')] == 1
# groups & iteration
# --------------------------------
class TestIteration():
 def test_groups(self, df):
 grouped = df.groupby(['A'])
 groups = grouped.groups
 assert groups is grouped.groups # caching works
 for k, v in grouped.groups.items():
 assert (df.loc[v]['A'] == k).all()
 grouped = df.groupby(['A', 'B'])
 groups = grouped.groups
 assert groups is grouped.groups # caching works
 for k, v in grouped.groups.items():
 assert (df.loc[v]['A'] == k[0]).all()
 assert (df.loc[v]['B'] == k[1]).all()
 def test_grouping_is_iterable(self, tsframe):
 # this code path isn't used anywhere else
 # not sure it's useful
 grouped = tsframe.groupby([lambda x: x.weekday(), lambda x: x.year])
 # test it works
 for g in grouped.grouper.groupings[0]:
 pass
 def test_multi_iter(self):
 s = Series(np.arange(6))
 k1 = np.array(['a', 'a', 'a', 'b', 'b', 'b'])
 k2 = np.array(['1', '2', '1', '2', '1', '2'])
 grouped = s.groupby([k1, k2])
 iterated = list(grouped)
 expected = [('a', '1', s[[0, 2]]), ('a', '2', s[[1]]),
 ('b', '1', s[[4]]), ('b', '2', s[[3, 5]])]
 for i, ((one, two), three) in enumerate(iterated):
 e1, e2, e3 = expected[i]
 assert e1 == one
 assert e2 == two
 assert_series_equal(three, e3)
 def test_multi_iter_frame(self, three_group):
 k1 = np.array(['b', 'b', 'b', 'a', 'a', 'a'])
 k2 = np.array(['1', '2', '1', '2', '1', '2'])
 df = DataFrame({'v1': np.random.randn(6),
 'v2': np.random.randn(6),
 'k1': k1, 'k2': k2},
 index=['one', 'two', 'three', 'four', 'five', 'six'])
 grouped = df.groupby(['k1', 'k2'])
 # things get sorted!
 iterated = list(grouped)
 idx = df.index
 expected = [('a', '1', df.loc[idx[[4]]]),
 ('a', '2', df.loc[idx[[3, 5]]]),
 ('b', '1', df.loc[idx[[0, 2]]]),
 ('b', '2', df.loc[idx[[1]]])]
 for i, ((one, two), three) in enumerate(iterated):
 e1, e2, e3 = expected[i]
 assert e1 == one
 assert e2 == two
 assert_frame_equal(three, e3)
 # don't iterate through groups with no data
 df['k1'] = np.array(['b', 'b', 'b', 'a', 'a', 'a'])
 df['k2'] = np.array(['1', '1', '1', '2', '2', '2'])
 grouped = df.groupby(['k1', 'k2'])
 groups = {key: gp for key, gp in grouped}
 assert len(groups) == 2
 # axis = 1
 three_levels = three_group.groupby(['A', 'B', 'C']).mean()
 grouped = three_levels.T.groupby(axis=1, level=(1, 2))
 for key, group in grouped:
 pass
 def test_dictify(self, df):
 dict(iter(df.groupby('A')))
 dict(iter(df.groupby(['A', 'B'])))
 dict(iter(df['C'].groupby(df['A'])))
 dict(iter(df['C'].groupby([df['A'], df['B']])))
 dict(iter(df.groupby('A')['C']))
 dict(iter(df.groupby(['A', 'B'])['C']))
 def test_groupby_with_small_elem(self):
 # GH 8542
 # length=2
 df = pd.DataFrame({'event': ['start', 'start'],
 'change': [1234, 5678]},
 index=pd.DatetimeIndex(['2014-09-10', '2013-10-10']))
 grouped = df.groupby([pd.Grouper(freq='M'), 'event'])
 assert len(grouped.groups) == 2
 assert grouped.ngroups == 2
 assert (pd.Timestamp('2014-09-30'), 'start') in grouped.groups
 assert (pd.Timestamp('2013-10-31'), 'start') in grouped.groups
 res = grouped.get_group((pd.Timestamp('2014-09-30'), 'start'))
 tm.assert_frame_equal(res, df.iloc[[0], :])
 res = grouped.get_group((pd.Timestamp('2013-10-31'), 'start'))
 tm.assert_frame_equal(res, df.iloc[[1], :])
 df = pd.DataFrame({'event': ['start', 'start', 'start'],
 'change': [1234, 5678, 9123]},
 index=pd.DatetimeIndex(['2014-09-10', '2013-10-10',
 '2014-09-15']))
 grouped = df.groupby([pd.Grouper(freq='M'), 'event'])
 assert len(grouped.groups) == 2
 assert grouped.ngroups == 2
 assert (pd.Timestamp('2014-09-30'), 'start') in grouped.groups
 assert (pd.Timestamp('2013-10-31'), 'start') in grouped.groups
 res = grouped.get_group((pd.Timestamp('2014-09-30'), 'start'))
 tm.assert_frame_equal(res, df.iloc[[0, 2], :])
 | |
| 
	{
 content: "\\f1cd";
}
.uk-icon-circle-o-notch:before {
 content: "\\f1ce";
}
.uk-icon-ra:before,
.uk-icon-rebel:before {
 content: "\\f1d0";
}
.uk-icon-ge:before,
.uk-icon-empire:before {
 content: "\\f1d1";
}
.uk-icon-git-square:before {
 content: "\\f1d2";
}
.uk-icon-git:before {
 content: "\\f1d3";
}
.uk-icon-hacker-news:before {
 content: "\\f1d4";
}
.uk-icon-tencent-weibo:before {
 content: "\\f1d5";
}
.uk-icon-qq:before {
 content: "\\f1d6";
}
.uk-icon-wechat:before,
.uk-icon-weixin:before {
 content: "\\f1d7";
}
.uk-icon-send:before,
.uk-icon-paper-plane:before {
 content: "\\f1d8";
}
.uk-icon-send-o:before,
.uk-icon-paper-plane-o:before {
 content: "\\f1d9";
}
.uk-icon-history:before {
 content: "\\f1da";
}
.uk-icon-genderless:before,
.uk-icon-circle-thin:before {
 content: "\\f1db";
}
.uk-icon-header:before {
 content: "\\f1dc";
}
.uk-icon-paragraph:before {
 content: "\\f1dd";
}
.uk-icon-sliders:before {
 content: "\\f1de";
}
.uk-icon-share-alt:before {
 content: "\\f1e0";
}
.uk-icon-share-alt-square:before {
 content: "\\f1e1";
}
.uk-icon-bomb:before {
 content: "\\f1e2";
}
.uk-icon-soccer-ball-o:before,
.uk-icon-futbol-o:before {
 content: "\\f1e3";
}
.uk-icon-tty:before {
 content: "\\f1e4";
}
.uk-icon-binoculars:before {
 content: "\\f1e5";
}
.uk-icon-plug:before {
 content: "\\f1e6";
}
.uk-icon-slideshare:before {
 content: "\\f1e7";
}
.uk-icon-twitch:before {
 content: "\\f1e8";
}
.uk-icon-yelp:before {
 content: "\\f1e9";
}
.uk-icon-newspaper-o:before {
 content: "\\f1ea";
}
.uk-icon-wifi:before {
 content: "\\f1eb";
}
.uk-icon-calculator:before {
 content: "\\f1ec";
}
.uk-icon-paypal:before {
 content: "\\f1ed";
}
.uk-icon-google-wallet:before {
 content: "\\f1ee";
}
.uk-icon-cc-visa:before {
 content: "\\f1f0";
}
.uk-icon-cc-mastercard:before {
 content: "\\f1f1";
}
.uk-icon-cc-discover:before {
 content: "\\f1f2";
}
.uk-icon-cc-amex:before {
 content: "\\f1f3";
}
.uk-icon-cc-paypal:before {
 content: "\\f1f4";
}
.uk-icon-cc-stripe:before {
 content: "\\f1f5";
}
.uk-icon-bell-slash:before {
 content: "\\f1f6";
}
.uk-icon-bell-slash-o:before {
 content: "\\f1f7";
}
.uk-icon-trash:before {
 content: "\\f1f8";
}
.uk-icon-copyright:before {
 content: "\\f1f9";
}
.uk-icon-at:before {
 content: "\\f1fa";
}
.uk-icon-eyedropper:before {
 content: "\\f1fb";
}
.uk-icon-paint-brush:before {
 content: "\\f1fc";
}
.uk-icon-birthday-cake:before {
 content: "\\f1fd";
}
.uk-icon-area-chart:before {
 content: "\\f1fe";
}
.uk-icon-pie-chart:before {
 content: "\\f200";
}
.uk-icon-line-chart:before {
 content: "\\f201";
}
.uk-icon-lastfm:before {
 content: "\\f202";
}
.uk-icon-lastfm-square:before {
 content: "\\f203";
}
.uk-icon-toggle-off:before {
 content: "\\f204";
}
.uk-icon-toggle-on:before {
 content: "\\f205";
}
.uk-icon-bicycle:before {
 content: "\\f206";
}
.uk-icon-bus:before {
 content: "\\f207";
}
.uk-icon-ioxhost:before {
 content: "\\f208";
}
.uk-icon-angellist:before {
 content: "\\f209";
}
.uk-icon-cc:before {
 content: "\\f20a";
}
.uk-icon-shekel:before,
.uk-icon-sheqel:before,
.uk-icon-ils:before {
 content: "\\f20b";
}
.uk-icon-meanpath:before {
 content: "\\f20c";
}
.uk-icon-buysellads:before {
 content: "\\f20d";
}
.uk-icon-connectdevelop:before {
 content: "\\f20e";
}
.uk-icon-dashcube:before {
 content: "\\f210";
}
.uk-icon-forumbee:before {
 content: "\\f211";
}
.uk-icon-leanpub:before {
 content: "\\f212";
}
.uk-icon-sellsy:before {
 content: "\\f213";
}
.uk-icon-shirtsinbulk:before {
 content: "\\f214";
}
.uk-icon-simplybuilt:before {
 content: "\\f215";
}
.uk-icon-skyatlas:before {
 content: "\\f216";
}
.uk-icon-cart-plus:before {
 content: "\\f217";
}
.uk-icon-cart-arrow-down:before {
 content: "\\f218";
}
.uk-icon-diamond:before {
 content: "\\f219";
}
.uk-icon-ship:before {
 content: "\\f21a";
}
.uk-icon-user-secret:before {
 content: "\\f21b";
}
.uk-icon-motorcycle:before {
 content: "\\f21c";
}
.uk-icon-street-view:before {
 content: "\\f21d";
}
.uk-icon-heartbeat:before {
 content: "\\f21e";
}
.uk-icon-venus:before {
 content: "\\f221";
}
.uk-icon-mars:before {
 content: "\\f222";
}
.uk-icon-mercury:before {
 content: "\\f223";
}
.uk-icon-transgender:before {
 content: "\\f224";
}
.uk-icon-transgender-alt:before {
 content: "\\f225";
}
.uk-icon-venus-double:before {
 content: "\\f226";
}
.uk-icon-mars-double:before {
 content: "\\f227";
}
.uk-icon-venus-mars:before {
 content: "\\f228";
}
.uk-icon-mars-stroke:before {
 content: "\\f229";
}
.uk-icon-mars-stroke-v:before {
 content: "\\f22a";
}
.uk-icon-mars-stroke-h:before {
 content: "\\f22b";
}
.uk-icon-neuter:before {
 content: "\\f22c";
}
.uk-icon-facebook-official:before {
 content: "\\f230";
}
.uk-icon-pinterest-p:before {
 content: "\\f231";
}
.uk-icon-whatsapp:before {
 content: "\\f232";
}
.uk-icon-server:before {
 content: "\\f233";
}
.uk-icon-user-plus:before {
 content: "\\f234";
}
.uk-icon-user-times:before {
 content: "\\f235";
}
.uk-icon-hotel:before,
.uk-icon-bed:before {
 content: "\\f236";
}
.uk-icon-viacoin:before {
 content: "\\f237";
}
.uk-icon-train:before {
 content: "\\f238";
}
.uk-icon-subway:before {
 content: "\\f239";
}
.uk-icon-medium-logo:before {
 content: "\\f23a";
}
.uk-icon-500px:before {
 content: "\\f26e";
}
.uk-icon-amazon:before {
 content: "\\f270";
}
.uk-icon-balance-scale:before {
 content: "\\f24e";
}
.uk-icon-battery-empty:before,
.uk-icon-battery-0:before {
 content: "\\f244";
}
.uk-icon-battery-quarter:before,
.uk-icon-battery-1:before {
 content: "\\f243";
}
.uk-icon-battery-half:before,
.uk-icon-battery-2:before {
 content: "\\f242";
}
.uk-icon-battery-three-quarters:before,
.uk-icon-battery-3:before {
 content: "\\f241";
}
.uk-icon-battery-full:before,
.uk-icon-battery-4:before {
 content: "\\f240";
}
.uk-icon-black-tie:before {
 content: "\\f27e";
}
.uk-icon-calendar-check-o:before {
 content: "\\f274";
}
.uk-icon-calendar-minus-o:before {
 content: "\\f272";
}
.uk-icon-calendar-plus-o:before {
 content: "\\f271";
}
.uk-icon-calendar-times-o:before {
 content: "\\f273";
}
.uk-icon-cc-diners-club:before {
 content: "\\f24c";
}
.uk-icon-cc-jcb:before {
 content: "\\f24b";
}
.uk-icon-chrome:before {
 content: "\\f268";
}
.uk-icon-clone:before {
 content: "\\f24d";
}
.uk-icon-commenting:before {
 content: "\\f27a";
}
.uk-icon-commenting-o:before {
 content: "\\f27b";
}
.uk-icon-contao:before {
 content: "\\f26d";
}
.uk-icon-creative-commons:before {
 content: "\\f25e";
}
.uk-icon-expeditedssl:before {
 content: "\\f23e";
}
.uk-icon-firefox:before {
 content: "\\f269";
}
.uk-icon-fonticons:before {
 content: "\\f280";
}
.uk-icon-get-pocket:before {
 content: "\\f265";
}
.uk-icon-gg:before {
 content: "\\f260";
}
.uk-icon-gg-circle:before {
 content: "\\f261";
}
.uk-icon-hand-lizard-o:before {
 content: "\\f258";
}
.uk-icon-hand-stop-o:before,
.uk-icon-hand-paper-o:before {
 content: "\\f256";
}
.uk-icon-hand-peace-o:before {
 content: "\\f25b";
}
.uk-icon-hand-pointer-o:before {
 content: "\\f25a";
}
.uk-icon-hand-grab-o:before,
.uk-icon-hand-rock-o:before {
 content: "\\f255";
}
.uk-icon-hand-scissors-o:before {
 content: "\\f257";
}
.uk-icon-hand-spock-o:before {
 content: "\\f259";
}
.uk-icon-hourglass:before {
 content: "\\f254";
}
.uk-icon-hourglass-o:before {
 content: "\\f250";
}
.uk-icon-hourglass-1:before,
.uk-icon-hourglass-start:before {
 content: "\\f251";
}
.uk-icon-hourglass-2:before,
.uk-icon-hourglass-half:before {
 content: "\\f252";
}
.uk-icon-hourglass-3:before,
.uk-icon-hourglass-end:before {
 content: "\\f253";
}
.uk-icon-houzz:before {
 content: "\\f27c";
}
.uk-icon-i-cursor:before {
 content: "\\f246";
}
.uk-icon-industry:before {
 content: "\\f275";
}
.uk-icon-internet-explorer:before {
 content: "\\f26b";
}
.uk-icon-map:before {
 content: "\\f279";
}
.uk-icon-map-o:before {
 content: "\\f278";
}
.uk-icon-map-pin:before {
 content: "\\f276";
}
.uk-icon-map-signs:before {
 content: "\\f277";
}
.uk-icon-mouse-pointer:before {
 content: "\\f245";
}
.uk-icon-object-group:before {
 content: "\\f247";
}
.uk-icon-object-ungroup:before {
 content: "\\f248";
}
.uk-icon-odnoklassniki:before {
 content: "\\f263";
}
.uk-icon-odnoklassniki-square:before {
 content: "\\f264";
}
.uk-icon-opencart:before {
 content: "\\f23d";
}
.uk-icon-opera:before {
 content: "\\f26a";
}
.uk-icon-optin-monster:before {
 content: "\\f23c";
}
.uk-icon-registered:before {
 content: "\\f25d";
}
.uk-icon-safari:before {
 content: "\\f267";
}
.uk-icon-sticky-note:before {
 content: "\\f249";
}
.uk-icon-sticky-note-o:before {
 content: "\\f24a";
}
.uk-icon-tv:before,
.uk-icon-television:before {
 content: "\\f26c";
}
.uk-icon-trademark:before {
 content: "\\f25c";
}
.uk-icon-tripadvisor:before {
 content: "\\f262";
}
.uk-icon-vimeo:before {
 content: "\\f27d";
}
.uk-icon-wikipedia-w:before {
 content: "\\f266";
}
.uk-icon-yc:before,
.uk-icon-y-combinator:before {
 content: "\\f23b";
}
.uk-icon-yc-square:before,
.uk-icon-y-combinator-square:before {
 content: "\\f1d4";
}
.uk-icon-bluetooth:before {
 content: "\\f293";
}
.uk-icon-bluetooth-b:before {
 content: "\\f294";
}
.uk-icon-codiepie:before {
 content: "\\f284";
}
.uk-icon-credit-card-alt:before {
 content: "\\f283";
}
.uk-icon-edge:before {
 content: "\\f282";
}
.uk-icon-fort-awesome:before {
 content: "\\f286";
}
.uk-icon-hashtag:before {
 content: "\\f292";
}
.uk-icon-mixcloud:before {
 content: "\\f289";
}
.uk-icon-modx:before {
 content: "\\f285";
}
.uk-icon-pause-circle:before {
 content: "\\f28b";
}
.uk-icon-pause-circle-o:before {
 content: "\\f28c";
}
.uk-icon-percent:before {
 content: "\\f295";
}
.uk-icon-product-hunt:before {
 content: "\\f288";
}
.uk-icon-reddit-alien:before {
 content: "\\f281";
}
.uk-icon-scribd:before {
 content: "\\f28a";
}
.uk-icon-shopping-bag:before {
 content: "\\f290";
}
.uk-icon-shopping-basket:before {
 content: "\\f291";
}
.uk-icon-stop-circle:before {
 content: "\\f28d";
}
.uk-icon-stop-circle-o:before {
 content: "\\f28e";
}
.uk-icon-usb:before {
 content: "\\f287";
}
.uk-icon-american-sign-language-interpreting:before,
.uk-icon-asl-interpreting:before {
 content: "\\f2a3";
}
.uk-icon-assistive-listening-systems:before {
 content: "\\f2a2";
}
.uk-icon-audio-description:before {
 content: "\\f29e";
}
.uk-icon-blind:before {
 content: "\\f29d";
}
.uk-icon-braille:before {
 content: "\\f2a1";
}
.uk-icon-deaf:before,
.uk-icon-deafness:before {
 content: "\\f2a4";
}
.uk-icon-envira:before {
 content: "\\f299";
}
.uk-icon-font-awesome:before,
.uk-icon-fa:before {
 content: "\\f2b4";
}
.uk-icon-first-order:before {
 content: "\\f2b0";
}
.uk-icon-gitlab:before {
 content: "\\f296";
}
.uk-icon-glide:before {
 content: "\\f2a5";
}
.uk-icon-glide-g:before {
 content: "\\f2a6";
}
.uk-icon-hard-of-hearing:before {
 content: "\\f2a4";
}
.uk-icon-low-vision:before {
 content: "\\f2a8";
}
.uk-icon-question-circle-o:before {
 content: "\\f29c";
}
.uk-icon-sign-language:before,
.uk-icon-signing:before {
 content: "\\f2a7";
}
.uk-icon-snapchat:before {
 content: "\\f2ab";
}
.uk-icon-snapchat-ghost:before {
 content: "\\f2ac";
}
.uk-icon-snapchat-square:before {
 content: "\\f2ad";
}
.uk-icon-themeisle:before {
 content: "\\f2b2";
}
.uk-icon-universal-access:before {
 content: "\\f29a";
}
.uk-icon-viadeo:before {
 content: "\\f2a9";
}
.uk-icon-viadeo-square:before {
 content: "\\f2aa";
}
.uk-icon-volume-control-phone:before {
 content: "\\f2a0";
}
.uk-icon-wheelchair-alt:before {
 content: "\\f29b";
}
.uk-icon-wpbeginner:before {
 content: "\\f297";
}
.uk-icon-wpforms:before {
 content: "\\f298";
}
.uk-icon-yoast:before {
 content: "\\f2b1";
}
/* ========================================================================
 Component: Close
 ========================================================================== */
/*
 * Removes inner padding and border in Firefox 4+.
 */
.uk-close::-moz-focus-inner {
 border: 0;
 padding: 0;
}
/*
 * 1. Correct inability to style clickable `input` types in iOS.
 * 2. Remove margins in Chrome, Safari and Opera.
 * 3. Remove borders for `button`.
 * 4. Address `overflow` set to `hidden` in IE 8/9/10/11.
 * 5. Correct `font` properties and `color` not being inherited for `button`.
 * 6. Address inconsistent `text-transform` inheritance which is only inherit in Firefox and IE
 * 7. Remove default `button` padding and background color
 * 8. Style
 */
.uk-close {
 /* 1 */
 -webkit-appearance: none;
 /* 2 */
 margin: 0;
 /* 3 */
 border: none;
 /* 4 */
 overflow: visible;
 /* 5 */
 font: inherit;
 color: inherit;
 /* 6 */
 text-transform: none;
 /* 7. */
 padding: 0;
 background: transparent;
 /* 8 */
 display: inline-block;
 box-sizing: content-box;
 width: 20px;
 line-height: 20px;
 text-align: center;
 vertical-align: middle;
 opacity: 0.3;
}
/* Icon */
.uk-close:after {
 display: block;
 content: "\\f00d";
 font-family: FontAwesome;
}
/*
 * Hover
 * 1. Apply hover style also to focus state
 * 2. Remove default focus style
 * 3. Required for `a` elements
 */
.uk-close:hover,
.uk-close:focus {
 opacity: 0.5;
 /* 2 */
 outline: none;
 /* 3 */
 color: inherit;
 text-decoration: none;
 cursor: pointer;
}
/* Modifier
 ========================================================================== */
.uk-close-alt {
 padding: 2px;
 border-radius: 50%;
 background: #fff;
 opacity: 1;
 box-shadow: 0 0 0 1px rgba(0, 0, 0, 0.1), 0 0 6px rgba(0, 0, 0, 0.3);
}
/* Hover */
.uk-close-alt:hover,
.uk-close-alt:focus {
 opacity: 1;
}
/* Icon */
.uk-close-alt:after {
 opacity: 0.5;
}
.uk-close-alt:hover:after,
.uk-close-alt:focus:after {
 opacity: 0.8;
}
/* ========================================================================
 Component: Badge
 ========================================================================== */
.uk-badge {
 display: inline-block;
 padding: 0 5px;
 background: #009dd8;
 font-size: 10px;
 font-weight: bold;
 line-height: 14px;
 color: #fff;
 text-align: center;
 vertical-align: middle;
 text-transform: none;
 border: 1px solid rgba(0, 0, 0, 0.2);
 border-bottom-color: rgba(0, 0, 0, 0.3);
 background-origin: border-box;
 background-image: -webkit-linear-gradient(top, #00b4f5, #008dc5);
 background-image: linear-gradient(to bottom, #00b4f5, #008dc5);
 border-radius: 2px;
 text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.2);
}
/*
 * Keep color when badge is a link
 */
a.uk-badge:hover {
 color: #fff;
}
/* Modifier: `uk-badge-notification`;
 ========================================================================== */
.uk-badge-notification {
 box-sizing: border-box;
 min-width: 18px;
 border-radius: 500px;
 font-size: 12px;
 line-height: 18px;
}
/* Color modifier
 ========================================================================== */
/*
 * Modifier: `uk-badge-success`
 */
.uk-badge-success {
 background-color: #82bb42;
 background-image: -webkit-linear-gradient(top, #9fd256, #6fac34);
 background-image: linear-gradient(to bottom, #9fd256, #6fac34);
}
/*
 * Modifier: `uk-badge-warning`
 */
.uk-badge-warning {
 background-color: #f9a124;
 background-image: -webkit-linear-gradient(top, #fbb450, #f89406);
 background-image: linear-gradient(to bottom, #fbb450, #f89406);
}
/*
 * Modifier: `uk-badge-danger`
 */
.uk-badge-danger {
 background-color: #d32c46;
 background-image: -webkit-linear-gradient(top, #ee465a, #c11a39);
 background-image: linear-gradient(to bottom, #ee465a, #c11a39);
}
/* ========================================================================
 Component: Alert
 ========================================================================== */
.uk-alert {
 margin-bottom: 15px;
 padding: 10px;
 background: #ebf7fd;
 color: #2d7091;
 border: 1px solid rgba(45, 112, 145, 0.3);
 border-radius: 4px;
 text-shadow: 0 1px 0 #fff;
}
/*
 * Add margin if adjacent element
 */
* + .uk-alert {
 margin-top: 15px;
}
/*
 * Remove margin from the last-child
 */
.uk-alert > :last-child {
 margin-bottom: 0;
}
/*
 * Keep color for headings if the default heading color is changed
 */
.uk-alert h1,
.uk-alert h2,
.uk-alert h3,
.uk-alert h4,
.uk-alert h5,
.uk-alert h6 {
 color: inherit;
}
/* Close in alert
 ========================================================================== */
.uk-alert > .uk-close:first-child {
 float: right;
}
/*
 * Remove margin from adjacent element
 */
.uk-alert > .uk-close:first-child + * {
 margin-top: 0;
}
/* Modifier: `uk-alert-success`
 ========================================================================== */
.uk-alert-success {
 background: #f2fae3;
 color: #659f13;
 border-color: rgba(101, 159, 19, 0.3);
}
/* Modifier: `uk-alert-warning`
 ========================================================================== */
.uk-alert-warning {
 background: #fffceb;
 color: #e28327;
 border-color: rgba(226, 131, 39, 0.3);
}
/* Modifier: `uk-alert-danger`
 ========================================================================== */
.uk-alert-danger {
 background: #fff1f0;
 color: #d85030;
 border-color: rgba(216, 80, 48, 0.3);
}
/* Modifier: `uk-alert-large`
 ========================================================================== */
.uk-alert-large {
 padding: 20px;
}
.uk-alert-large > .uk-close:first-child {
 margin: -10px -10px 0 0;
}
/* ========================================================================
 Component: Thumbnail
 ========================================================================== */
/*
 * 1. Container width fits its content
 * 2. Responsive behavior
 * 3. Corrects `max-width` behavior sed
 * 4. Required for `figure` element
 * 5. Style
 */
.uk-thumbnail {
 /* 1 */
 display: inline-block;
 /* 2 */
 max-width: 100%;
 /* 3 */
 box-sizing: border-box;
 /* 3 */
 margin: 0;
 /* 4 */
 padding: 4px;
 border: 1px solid #ddd;
 background: #fff;
 border-radius: 4px;
 box-shadow: 0 1px 3px rgba(0, 0, 0, 0.05);
}
/*
 * Hover state for `a` elements
 * 1. Apply hover style also to focus state
 * 2. Needed for caption
 * 3. Remove default focus style
 */
a.uk-thumbnail:hover,
a.uk-thumbnail:focus | |
| 
	from __future__ import absolute_import, division, unicode_literals
import re
import time, datetime
import threading
from. import settings
import xbmc
import xbmcgui
from . import kodigui
from .smoothstreams import timeutils, chanutils, skinutils, schedule, authutils, windowutils
from . import util
from .kodijsonrpc import builtin
from .util import T
KEY_MOVE_SET = frozenset(
 (
 xbmcgui.ACTION_MOVE_LEFT,
 xbmcgui.ACTION_MOVE_RIGHT,
 xbmcgui.ACTION_MOVE_UP,
 xbmcgui.ACTION_MOVE_DOWN
 )
)
class SeekDialog(kodigui.BaseDialog, util.CronReceiver):
 xmlFile = 'script-smoothstreams-v3-video_osd.xml'
 path = util.ADDON.getAddonInfo('path')
 theme = 'Main'
 res = '1080i'
 width = 1920
 height = 1080
 MAIN_BUTTON_ID = 100
 SEEK_IMAGE_ID = 200
 POSITION_IMAGE_ID = 201
 SELECTION_INDICATOR = 202
 BIF_IMAGE_ID = 300
 SEEK_IMAGE_WIDTH = 1920
 INFO_BUTTON_ID = 401
 SHUFFLE_BUTTON_ID = 402
 SETTINGS_BUTTON_ID = 403
 PREV_BUTTON_ID = 404
 SKIP_BACK_BUTTON_ID = 405
 PLAY_PAUSE_BUTTON_ID = 406
 STOP_BUTTON_ID = 407
 SKIP_FORWARD_BUTTON_ID = 408
 NEXT_BUTTON_ID = 409
 PLAYLIST_BUTTON_ID = 410
 EVENTS_PLAYLIST_BUTTON_ID = 411
 EPG_BUTTON_ID = 412
 BIG_SEEK_GROUP_ID = 500
 BIG_SEEK_LIST_ID = 501
 NO_OSD_BUTTON_ID = 800
 BAR_X = 0
 BAR_Y = 921
 BAR_RIGHT = 1920
 BAR_BOTTOM = 969
 HIDE_DELAY = 4 # This uses the Cron tick so is +/- 1 second accurate
 def __init__(self, *args, **kwargs):
 kodigui.BaseDialog.__init__(self, *args, **kwargs)
 self.osdHandler = kwargs.get('osdHandler')
 self.live = True
 self.initialVideoSettings = {}
 self.initialAudioStream = None
 self.initialSubtitleStream = None
 self.bifURL = None
 self.baseURL = None
 self.hasBif = True
 self.channel = 0
 self._duration = 0
 self.offset = 0
 self.selectedOffset = 0
 self.bigSeekOffset = 0
 self.title = ''
 self.title2 = ''
 self.fromSeek = 0
 self.initialized = False
 self.playlistDialog = None
 self.eventsplaylistDialog = None
 self.timeout = None
 self.hasDialog = False
 self.lastFocusID = None
 self.playlistDialogVisible = False
 self._delayedSeekThread = None
 self._delayedSeekTimeout = 0
 self.program = self.osdHandler.getProgram()
 self.secsComplete = 0
 @property
 def player(self):
 return self.osdHandler.player
 def resetTimeout(self):
 self.timeout = time.time() + self.HIDE_DELAY
 def trueOffset(self):
 return self.osdHandler.getRatioComplete(self.channel)
 def onFirstInit(self):
 try:
 self._onFirstInit()
 except RuntimeError:
 util.ERROR(hide_tb=True)
 self.started = False
 def _onFirstInit(self):
 settings.CRON.registerReceiver(self)
 self.resetTimeout()
 self.seekbarControl = self.getControl(self.SEEK_IMAGE_ID)
 self.positionControl = self.getControl(self.POSITION_IMAGE_ID)
 self.bifImageControl = self.getControl(self.BIF_IMAGE_ID)
 self.selectionIndicator = self.getControl(self.SELECTION_INDICATOR)
 self.selectionBox = self.getControl(203)
 self.bigSeekControl = kodigui.ManagedControlList(self, self.BIG_SEEK_LIST_ID, 12)
 self.bigSeekGroupControl = self.getControl(self.BIG_SEEK_GROUP_ID)
 self.initialized = True
 self.setBoolProperty('subtitle.downloads', util.getSetting('subtitle_downloads', False))
 self.updateProperties()
 # self.videoSettingsHaveChanged()
 self.started = True
 self.update()
 def onReInit(self):
 chanutils.createChannelsList()
 self.resetTimeout()
 self.updateProperties()
 # self.videoSettingsHaveChanged()
 self.updateProgress()
 def onAction(self, action):
 try:
 self.resetTimeout()
 controlID = self.getFocusId()
 if action.getId() in KEY_MOVE_SET:
 self.setProperty('mouse.mode', '')
 if not controlID:
 self.setBigSeekShift()
 self.setFocusId(400)
 return
 elif action == xbmcgui.ACTION_MOUSE_MOVE:
 if not self.osdVisible():
 self.showOSD()
 self.setProperty('mouse.mode', '1')
 # if controlID == self.MAIN_BUTTON_ID:
 # 	if action == xbmcgui.ACTION_MOUSE_MOVE:
 # 		return self.seekMouse(action)
 # 	elif action in (xbmcgui.ACTION_MOVE_RIGHT, xbmcgui.ACTION_STEP_FORWARD):
 # 		return self.seekForward(10000)
 # 	elif action in (xbmcgui.ACTION_MOVE_LEFT, xbmcgui.ACTION_STEP_BACK):
 # 		return self.seekBack(10000)
 # elif action == xbmcgui.ACTION_MOVE_DOWN:
 # 	self.updateBigSeek()
 if controlID == self.NO_OSD_BUTTON_ID:
 if not self.live:
 if action == xbmcgui.ACTION_MOVE_LEFT:
 xbmc.executebuiltin('Action(StepBack)')
 if action == xbmcgui.ACTION_MOVE_RIGHT:
 xbmc.executebuiltin('Action(StepForward)')
 elif action in (xbmcgui.ACTION_MOVE_RIGHT, xbmcgui.ACTION_MOVE_LEFT, xbmcgui.ACTION_MOUSE_LEFT_CLICK):
 self.showOSD()
 self.setFocusId(400)
 elif action in (
 xbmcgui.ACTION_NEXT_ITEM,
 xbmcgui.ACTION_PREV_ITEM,
 xbmcgui.ACTION_BIG_STEP_FORWARD,
 xbmcgui.ACTION_BIG_STEP_BACK
 ):
 self.selectedOffset = self.trueOffset()
 self.setBigSeekShift()
 self.updateProgress()
 self.showOSD()
 self.setFocusId(400)
 # elif action ==xbmcgui.ACTION_SHOW_INFO:
 # 	xbmc.executebuiltin('Action(CodecInfo)')
 # elif action == xbmcgui.ACTION_SHOW_GUI:
 # 	self.showOSD()
 # elif action == xbmcgui.ACTION_SHOW_PLAYLIST:
 # 	self.showPlaylistDialog()
 # elif action == xbmcgui.ACTION_SHOW_VIDEOMENU:
 # 	xbmc.executebuiltin('ActivateWindow(OSDVideoSettings)')
 # elif action == xbmcgui.ACTION_SHOW_AUDIOMENU:
 # 	xbmc.executebuiltin('ActivateWindow(OSDAudioSettings)')
 elif action.getButtonCode() == 258127:
 xbmc.executebuiltin('Action(PlayerDebug)')
 elif action.getButtonCode() == 61519:
 # xbmc.executebuiltin('Action(PlayerProcessInfo)')
 xbmc.executebuiltin('Action(PlayerProcessInfo)')
 # elif controlID == self.BIG_SEEK_LIST_ID:
 # 	if action in (xbmcgui.ACTION_MOVE_RIGHT, xbmcgui.ACTION_BIG_STEP_FORWARD):
 # 		return self.updateBigSeek()
 # 	elif action in (xbmcgui.ACTION_MOVE_LEFT, xbmcgui.ACTION_BIG_STEP_BACK):
 # 		return self.updateBigSeek()
 if action.getButtonCode() == 61516:
 builtin.Action('CycleSubtitle')
 elif action.getButtonCode() == 61524:
 builtin.Action('ShowSubtitles')
 elif action == xbmcgui.ACTION_NEXT_ITEM or action == xbmcgui.ACTION_PAGE_UP:
 self.osdHandler.next()
 self.setBigSeekShift()
 self.update()
 elif action == xbmcgui.ACTION_PREV_ITEM or action == xbmcgui.ACTION_PAGE_DOWN:
 self.osdHandler.prev()
 self.setBigSeekShift()
 self.update()
 elif action in (xbmcgui.ACTION_PREVIOUS_MENU, xbmcgui.ACTION_NAV_BACK, xbmcgui.ACTION_STOP):
 if self.osdVisible():
 self.hideOSD()
 else:
 self.doClose()
 self.osdHandler.player.stop()
 return
 if self.checkChannelEntry(action):
 return
 except:
 util.ERROR()
 kodigui.BaseDialog.onAction(self, action)
 def onFocus(self, controlID):
 return
 def onClick(self, controlID):
 if controlID == self.MAIN_BUTTON_ID:
 # todo remove seek
 self.osdHandler.seek(self.selectedOffset)
 elif controlID == self.NO_OSD_BUTTON_ID:
 self.showOSD()
 # elif controlID == self.SETTINGS_BUTTON_ID:
 # 	self.handleDialog(self.showSettings)
 elif controlID == self.INFO_BUTTON_ID:
 xbmc.executebuiltin('Action(PlayerProcessInfo)')
 elif controlID == self.SHUFFLE_BUTTON_ID:
 self.osdHandler.previousChannel()
 elif controlID == self.PREV_BUTTON_ID:
 self.osdHandler.prev()
 elif controlID == self.STOP_BUTTON_ID:
 self.hideOSD()
 self.doClose()
 self.osdHandler.player.stop()
 elif controlID == self.NEXT_BUTTON_ID:
 self.osdHandler.next()
 elif controlID == self.EPG_BUTTON_ID:
 self.showEpgDialog()
 elif controlID == self.PLAYLIST_BUTTON_ID:
 self.showPlaylistDialog()
 elif controlID == self.EVENTS_PLAYLIST_BUTTON_ID:
 self.showEventsPlaylistDialog()
 elif controlID == self.SETTINGS_BUTTON_ID:
 self.handleDialog(self.optionsButtonClicked)
 elif controlID == self.BIG_SEEK_LIST_ID:
 self.bigSeekSelected()
 elif controlID == self.SKIP_BACK_BUTTON_ID:
 self.skipBack()
 elif controlID == self.SKIP_FORWARD_BUTTON_ID:
 self.skipForward()
 # elif controlID == self.INFO_BUTTON_ID:
 # 	xbmc.executebuiltin('Action(CodecInfo)')
 def doClose(self, delete=False):
 # add to hear about leaving playing
 try:
 if self.playlistDialog:
 self.playlistDialog.doClose()
 if delete:
 del self.playlistDialog
 self.playlistDialog = None
 util.garbageCollect()
 finally:
 settings.CRON.cancelReceiver(self)
 kodigui.BaseDialog.doClose(self)
 def doChannelEntry(self, digit):
 window = windowutils.KodiChannelEntry('script-smoothstreams-v3-channel_entry.xml',
 util.ADDON.getAddonInfo('path'), 'Main', '1080i', viewManager=self,
 digit=digit)
 window.doModal()
 ret = None
 if window.set:
 ret = window.digits
 del window
 return ret
 def checkChannelEntry(self, action):
 if action.getId() >= xbmcgui.REMOTE_0 and action.getId() <= xbmcgui.REMOTE_9:
 targetChannel = self.doChannelEntry(str(action.getId() - 58))
 return True
 return False
 def skipForward(self):
 return
 def skipBack(self):
 return
 def delayedSeek(self):
 return
 def _delayedSeek(self):
 return
 def handleDialog(self, func):
 self.hasDialog = True
 try:
 func()
 finally:
 self.resetTimeout()
 self.hasDialog = False
 def videoSettingsHaveChanged(self):
 changed = False
 return changed
 def repeatButtonClicked(self):
 return
 def shuffleButtonClicked(self):
 return
 def optionsButtonClicked(self): # Button currently commented out.
 # pass
 from . import dropdown
 options = []
 options.append({'key': 'sstv', 'display': 'SSTV Options'})
 options.append({'key': 'kodi_video', 'display': 'Video Options'})
 options.append({'key': 'kodi_audio', 'display': 'Audio Options'})
 choice = dropdown.showDropdown(options, (600, 1060), close_direction='down', pos_is_bottom=True,
 close_on_playback_ended=True)
 if not choice:
 return
 if choice['key'] == 'kodi_video':
 xbmc.executebuiltin('ActivateWindow(OSDVideoSettings)')
 elif choice['key'] == 'kodi_audio':
 xbmc.executebuiltin('ActivateWindow(OSDAudioSettings)')
 elif choice['key'] == 'sstv':
 self.showSettings()
 def subtitleButtonClicked(self):
 return
 def showSettings(self):
 stream = util.getSetting('server_type')
 qual = util.getSetting('high_def')
 region = util.getSetting('server_region', 'North America')
 server = authutils.servers['NA Mix']
 try:
 if region == 'North America':
 server = authutils.servers[util.getSetting('server_r0', 'NA Mix')]
 elif region == 'Europe':
 server = authutils.servers[util.getSetting('server_r1', 'Euro Mix')]
 elif region == 'Asia':
 server = authutils.servers[util.getSetting('server_r2', 'Asia Mix')]
 except:
 # unknown server detected, using NA mix
 util.setSetting('server_region', 'North America')
 util.setSetting('server_r0', 'NA Mix')
 util.setSetting('server_r1', 'Euro Mix')
 util.setSetting('server_r2', 'Asia Mix')
 pass
 util.openSettings()
 skinutils.setColours()
 new_region = util.getSetting('server_region', 'North America')
 new_server = authutils.servers['NA Mix']
 try:
 if new_region == 'North America':
 new_server = authutils.servers[util.getSetting('server_r0', 'NA Mix')]
 elif new_region == 'Europe':
 new_server = authutils.servers[util.getSetting('server_r1', 'Euro Mix')]
 elif new_region == 'Asia':
 new_server = authutils.servers[util.getSetting('server_r2', 'Asia Mix')]
 except:
 pass
 if stream != util.getSetting('server_type') or qual != util.getSetting(
 'high_def') or region != new_region or server != new_server:
 self.osdHandler.restartChannel()
 return
 def setBigSeekShift(self):
 closest = None
 for mli in self.bigSeekControl:
 if mli.dataSource > self.osdHandler.getRatioComplete(self.channel):
 break
 closest = mli
 if not closest:
 return
 self.bigSeekOffset = self.osdHandler.getRatioComplete(self.channel) - closest.dataSource
 pxOffset = int(
 self.osdHandler.getRatioComplete(self.channel) / float(self.osdHandler.getDuration(self.channel)) * 1920)
 self.bigSeekGroupControl.setPosition(-8 + pxOffset, 917)
 self.bigSeekControl.selectItem(closest.pos())
 # xbmc.sleep(100)
 def updateBigSeek(self):
 return
 def bigSeekSelected(self):
 return
 # self.setFocusId(self.MAIN_BUTTON_ID)
 def updateProperties(self, **kwargs):
 if not self.started:
 return
 if self.fromSeek:
 # self.setFocusId(self.MAIN_BUTTON_ID)
 self.fromSeek = 0
 self.setProperty('has.bif', True and '1' or '')
 self.setProperty('video.title', self.title)
 self.setProperty('video.title2', self.title2)
 self.setProperty('is.show', False and '1' or '')
 self.setProperty('time.left', util.timeDisplay(
 int(self.osdHandler.getDuration(self.channel)) - self.osdHandler.getRatioComplete(self.channel)))
 self.updateCurrent()
 # I think this is the coloured bar
 div = int((self.osdHandler.getDuration(self.channel)) / 12)
 items = []
 for x in range(12):
 offset = div * x
 items.append(kodigui.ManagedListItem(data_source=offset))
 self.bigSeekControl.reset()
 self.bigSeekControl.addItems(items)
 def updateCurrent(self):
 ratio = self.osdHandler.getRatioComplete(self.channel) / float(self.osdHandler.getDuration(self.channel))
 w = int(ratio * self.SEEK_IMAGE_WIDTH)
 self.selectionIndicator.setPosition(w, 896)
 self.positionControl.setWidth(w)
 # to = self.trueOffset()
 self.updateProgress()
 prog = settings.CHANNELSLIST[settings.CURCHAN - 1].dataSource
 self.setProperty('PlotOutline', prog.description)
 self.setProperty('Title', prog.title)
 self.setProperty('Genre', prog.category)
 self.setProperty('Fake', prog.fake)
 self.setProperty('StartTime', timeutils.secs2stringLocal_time(prog.start))
 self.setProperty('EndTime', timeutils.secs2stringLocal_time(prog.stop))
 self.setProperty('Duration', timeutils.secs2stringLocal_dur(prog.duration))
 self.setProperty('time.left', timeutils.secs2stringLocal_dur(
 int(self.osdHandler.getDuration(self.channel)) - self.osdHandler.getRatioComplete(self.channel)))
 self.setProperty('time.end', timeutils.secs2stringLocal(self.program.stop))
 self.setProperty('ChannelName', prog.channelName)
 self.setProperty('ChannelNumber', prog.channel_number)
 # self.setProperty('Genre', prog.)
 self.setProperty('time.current', timeutils.secs2stringLocal(timeutils.timeInDayLocalSeconds()))
 def seekForward(self, offset):
 return
 def seekBack(self, offset):
 return
 def seekMouse(self, action):
 return
 def setup(self, duration, channel=0, bif_url=None, title='', title2='', program='', live=True):
 self.title = title
 self.title2 = title2
 self.setProperty('video.title', title)
 self.setProperty('is.show', True and '1' or '')
 self.setProperty('has.playlist', self.osdHandler.playlist and '1' or '')
 self.setProperty('shuffled', (self.osdHandler.playlist) and '1' or '')
 self.channel = channel
 self.offset = 0
 self.live = live
 self._duration = duration
 self.setProperty('bif.image', bif_url if bif_url else self.osdHandler.getIcon(self.program.channelName,
 self.program.channel_number))
 self.bifURL = bif_url
 self.hasBif = True
 if self.hasBif:
 self.baseURL = re.sub('/\d+\?', '/{0}?', self.bifURL)
 self.update()
 self.program = program
 def update(self, offset=None, from_seek=False):
 self.updateProgress()
 @property
 def duration(self):
 try:
 return self._duration or int(self.osdHandler.player.getTotalTime() * 1000)
 except RuntimeError: # Not playing
 return 1
 def updateProgress(self):
 if not self.started:
 self.onFirstInit()
 ratio = self.osdHandler.getRatioComplete(self.channel) / float(self.osdHandler.getDuration(self.channel))
 w = int(ratio * self.SEEK_IMAGE_WIDTH)
 # seek time label
 self.selectionIndicator.setPosition(w, 896)
 if w < 51:
 self.selectionBox.setPosition(-50 + (50 - w), 0)
 elif w > 1869:
 self.selectionBox.setPosition(-100 + (1920 - w), 0)
 else:
 self.selectionBox.setPosition(-50, 0)
 self.setProperty('time.selection',
 timeutils.secs2stringLocal_time(self.osdHandler.getRatioComplete(self.channel)))
 # todo
 self.setProperty('time.left', timeutils.secs2stringLocal_dur(
 self.osdHandler.getDuration(self.channel) - self.osdHandler.getRatioComplete(self.channel)))
 self.bifImageControl.setPosition(1200, 25)
 self.bigSeekControl.setPosition(0, 0)
 self.getControl(302).setPosition(0, 965)
 # seek bar length (done as width)
 self.seekbarControl.setWidth(w)
 self.seekbarControl.setPosition(0, 1)
 | |
| 
	<filename>skyfit.py
"""
Sky fitting.
These functions were ported from IDL codes used by
ACS/WFC reference files generation and statistics.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from astropy.io import fits
>>> import skyfit
Read EXT 1 of ACS/WFC superbias image:
>>> im = fits.getdata('x1o1958ej_bia.fits', 1)
Calculate clipped mean for each row and plot it:
>>> x = skyfit.total(im, 1)
>>> plt.plot(x)
Calculate mode and sigma of pixel distribution using
polynomial fitting:
>>> m, s = skyfit.msky(im, do_plot=True, verbose=True, ptitle='bias')
"""
from __future__ import division, print_function
# STDLIB
import logging
# THIRD-PARTY
import numpy as np
import scipy
from scipy import optimize
__organization__ = 'Space Telescope Science Institute'
module_logger = logging.getLogger('skyfit')
def robust_sigma(in_y, zero=0):
 """
 Calculate a resistant estimate of the dispersion of
 a distribution. For an uncontaminated distribution,
 this is identical to the standard deviation.
 Use the median absolute deviation as the initial
 estimate, then weight points using Tukey Biweight.
 See, for example, Understanding Robust and
 Exploratory Data Analysis, by <NAME>
 and Tukey, <NAME> and Sons, 1983.
 .. note:: ROBUST_SIGMA routine from IDL ASTROLIB.
 Examples
 --------
 >>> result = robust_sigma(in_y, zero=1)
 Parameters
 ----------
 in_y : array_like
 Vector of quantity for which the dispersion is
 to be calculated
 zero : int
 If set, the dispersion is calculated w.r.t. 0.0
 rather than the central value of the vector. If
 Y is a vector of residuals, this should be set.
 Returns
 -------
 out_val : float
 Dispersion value. If failed, returns -1.
 """
 # Flatten array
 y = in_y.ravel()
 eps = 1.0E-20
 c1 = 0.6745
 c2 = 0.80
 c3 = 6.0
 c4 = 5.0
 c_err = -1.0
 min_points = 3
 if zero:
 y0 = 0.0
 else:
 y0 = np.median(y)
 dy = y - y0
 del_y = abs( dy )
 # First, the median absolute deviation MAD about the median:
 mad = np.median( del_y ) / c1
 # If the MAD=0, try the MEAN absolute deviation:
 if mad < eps:
 mad = del_y.mean() / c2
 if mad < eps:
 return 0.0
 # Now the biweighted value:
 u = dy / (c3 * mad)
 uu = u * u
 q = np.where(uu <= 1.0)
 count = len(q[0])
 if count < min_points:
 module_logger.warn('ROBUST_SIGMA: This distribution is TOO WEIRD! '
 'Returning {}'.format(c_err))
 return c_err
 numerator = np.sum( (y[q] - y0)**2.0 * (1.0 - uu[q])**4.0 )
 n = y.size
 den1 = np.sum( (1.0 - uu[q]) * (1.0 - c4 * uu[q]) )
 siggma = n * numerator / ( den1 * (den1 - 1.0) )
 if siggma > 0:
 out_val = np.sqrt( siggma )
 else:
 out_val = 0.0
 return out_val
def meanclip(indata, clipsig=3.0, maxiter=5, converge_num=0.02, verbose=False):
 """
 Computes an iteratively sigma-clipped mean on a
 data set. Clipping is done about median, but mean
 is returned.
 .. note:: MYMEANCLIP routine from ACS library.
 Examples
 --------
 >>> mean, sigma = meanclip(indata)
 Parameters
 ----------
 indata : array_like
 Input data.
 clipsig : float
 Number of sigma at which to clip.
 maxiter : int
 Ceiling on number of clipping iterations.
 converge_num : float
 If the proportion of rejected pixels is less than
 this fraction, the iterations stop.
 verbose : bool
 Print messages to screen?
 Returns
 -------
 mean : float
 N-sigma clipped mean.
 sigma : float
 Standard deviation of remaining pixels.
 """
 # Flatten array
 skpix = indata.ravel()
 ct = indata.size
 iter = 0
 c1 = 1.0
 c2 = 0.0
 while (c1 >= c2) and (iter < maxiter):
 lastct = ct
 medval = np.median(skpix)
 sig = skpix.std().astype(np.float64) # Bug - Need to recast
 wsm = np.where( abs(skpix - medval) < (clipsig * sig) )
 ct = len(wsm[0])
 if ct > 0:
 skpix = skpix[wsm]
 c1 = abs(ct - lastct)
 c2 = converge_num * lastct
 iter += 1
 mean = skpix.mean()
 sigma = robust_sigma(skpix)
 if verbose:
 print('MEANCLIP: {:.1f}-sigma clipped mean\n'
 'MEANCLIP: Mean computed in {} iterations\n'
 'MEANCLIP: Mean = {:.6f}, sigma = {:.6f}'.format(
 clipsig, iter, mean, sigma))
 return mean, sigma
def total(inarray, axis, type='meanclip'):
 """
 Collapse 2-D array in one dimension.
 .. note:: MYTOTAL routine from ACS library.
 Examples
 --------
 >>> collapsed_array = total(inarray, 1, type='median')
 Parameters
 ----------
 inarray : array_like
 Input 2-D array.
 axis : {1, 2}
 Axis to collapse.
 * 1 - Return values along Y.
 * 2 - Return values along X.
 type : {'median', 'meanclip', 'stdev'}
 Algorithm to use.
 Returns
 -------
 out_arr : array_like
 1-D array collapsed along desired axis with desired
 algorithm.
 """
 out_arr = 0.0
 # Check inarray
 if inarray.ndim != 2:
 module_logger.warn('TOTAL: Input array must be 2D')
 return out_arr
 # Check axis
 if axis == 1:
 n_out = inarray.shape[0]
 elif axis == 2:
 n_out = inarray.shape[1]
 else:
 module_logger.warn('TOTAL: Axis not supported - {}'.format(axis))
 return out_arr
 # Check type
 if type not in ('median', 'meanclip', 'stdev'):
 module_logger.warn('TOTAL: Type not supported - {}'.format(type))
 return out_arr
 # Initialize output array
 out_arr = np.zeros(n_out)
 out_rng = range(n_out)
 if type == 'meanclip':
 for i in out_rng:
 if axis == 1:
 im_i = inarray[i,:]
 else:
 im_i = inarray[:,i]
 mmean, msigma = meanclip(im_i, maxiter=10, converge_num=0.001)
 out_arr[i] = mmean
 elif type == 'stdev':
 for i in out_rng:
 if axis == 1:
 im_i = inarray[i,:]
 else:
 im_i = inarray[:,i]
 mmean, msigma = meanclip(im_i, maxiter=10, converge_num=0.001)
 out_arr[i] = msigma
 elif type == 'median':
 for i in out_rng:
 if axis == 1:
 im_i = inarray[i,:]
 else:
 im_i = inarray[:,i]
 out_arr[i] = np.median(im_i)
 return out_arr
def gaussian(height, center_x, width_x):
 """
 Returns a gaussian function with the given parameters.
 This is used for least square fitting optimization.
 .. note:: This is used by `msky`.
 Parameters
 ----------
 height: float
 Peak amplitude.
 center_x: float
 Peak location.
 width_x: float
 Sigma of gaussian curve.
 Returns
 -------
 x: lambda function
 Function used for optimization.
 """
 return lambda x: height * np.exp(-(center_x - x)**2 / (2.0 * width_x**2))
def msky(inarray, do_plot=False, verbose=False, ptitle='', func=0):
 """
 Find modal sky on an array.
 First step is determination of median value and sigma.
 Histogram of the data and fit parabola to the
 logaritmic histogram. The coefficient of the parabola
 are used to get mode and sigma of the sky on the
 assumption that it is well fitted by a gaussian or
 2nd-degree polynomial.
 .. note:: MYSKY5 routine from ACS library.
 Parameters
 ----------
 inarray : array_like
 Input data.
 do_plot : bool
 Do plot?
 verbose : bool
 Print info to screen?
 ptitle : string
 Title of plot. Only used if plotting is done.
 func : {0, 1}
 Function for fitting:
 * 0 - 2nd degree polynomial
 * 1 - Gaussian
 Returns
 -------
 mmean : float
 Mode of fitted function.
 sigma : float
 Sigma of fitted function.
 """
 nsig = 8.0
 c1 = 2.5 # was 2.8
 c2 = 0.8 # was 1.3
 # Min/max of input array
 arr_min = inarray.min()
 arr_max = inarray.max()
 # Get sigma
 mmean, sigma = meanclip(inarray, clipsig=5.0, maxiter=10, verbose=verbose)
 if sigma <= 0:
 module_logger.warn(
 'MSKY: Weird distribution\n'
 'MEAN: {}\n'
 'STDDEV: {}\n'
 'MIN: {}\n'
 'MAX: {}'.format(mmean, sigma, arr_min, arr_max))
 return mmean, sigma
 # Print info
 if verbose:
 print('\nMSKY input array info\n'
 'MIN: {}\n'
 'MAX: {}'.format(arr_min, arr_max))
 # Flatten input array
 arr_1d = inarray.ravel()
 # Define min and max for the histogram
 x = nsig * sigma
 mmean = np.median(arr_1d)
 minhist = mmean - x
 maxhist = mmean + x
 ufi = inarray[ np.where((inarray > minhist) & (inarray < maxhist)) ]
 # Calculate 25% and 75% percentile to get the interquartile range
 # IRQ = pc75-pc25
 # <NAME>. 1991.
 sixd = np.argsort( ufi )
 ndata = ufi.size
 pc25 = ufi[ sixd[0.25 * ndata] ]
 pc75 = ufi[ sixd[0.75 * ndata] ]
 irq = pc75 - pc25
 step = 2.0 * irq * ndata**(-1.0 / 3.0)
 # Calculate number of bins to use
 nbin = round(2 * x / step - 1)
 # Histogram
 # http://www.scipy.org/Tentative_NumPy_Tutorial
 yhist, hbin = np.histogram(arr_1d, range=(minhist, maxhist), bins=nbin)
 xhist = 0.5 * (hbin[1:] + hbin[:-1])
 # Define xmin and xmax for the 2-0rder fit
 x1 = mmean - c1 * sigma
 x2 = mmean + c2 * sigma
 # Select the points beween x1 and x2 for the fit
 w = np.where((xhist > x1) & (xhist < x2) | |
| 
	},
 bases=('gedungbangunan.tahunberkuranggedungbangunan',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangGedungBangunanPariwisata',
 fields=[
 ],
 options={
 'verbose_name': '46 Tahun Berkurang Gedung Pariwisata',
 'proxy': True,
 'verbose_name_plural': '46 Tahun Berkurang Gedung Pariwisata',
 },
 bases=('gedungbangunan.tahunberkuranggedungbangunan',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangGedungBangunanPerdagangan',
 fields=[
 ],
 options={
 'verbose_name': '47 Tahun Berkurang Gedung Perdagangan',
 'proxy': True,
 'verbose_name_plural': '47 Tahun Berkurang Gedung Perdagangan',
 },
 bases=('gedungbangunan.tahunberkuranggedungbangunan',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangGedungBangunanPerikanan',
 fields=[
 ],
 options={
 'verbose_name': '45 Tahun Berkurang Gedung Perikanan',
 'proxy': True,
 'verbose_name_plural': '45 Tahun Berkurang Gedung Perikanan',
 },
 bases=('gedungbangunan.tahunberkuranggedungbangunan',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangGedungBangunanPerpustakaan',
 fields=[
 ],
 options={
 'verbose_name': '08 Tahun Berkurang Gedung Perpustakaan',
 'proxy': True,
 'verbose_name_plural': '08 Tahun Berkurang Gedung Perpustakaan',
 },
 bases=('gedungbangunan.tahunberkuranggedungbangunan',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangGedungBangunanPertanian',
 fields=[
 ],
 options={
 'verbose_name': '13 Tahun Berkurang Gedung Pertanian',
 'proxy': True,
 'verbose_name_plural': '13 Tahun Berkurang Gedung Pertanian',
 },
 bases=('gedungbangunan.tahunberkuranggedungbangunan',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangGedungBangunanRSUD',
 fields=[
 ],
 options={
 'verbose_name': '06 Tahun Berkurang Gedung RSUD',
 'proxy': True,
 'verbose_name_plural': '06 Tahun Berkurang Gedung RSUD',
 },
 bases=('gedungbangunan.tahunberkuranggedungbangunan',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangGedungBangunanSATPOLPP',
 fields=[
 ],
 options={
 'verbose_name': '25 Tahun Berkurang Gedung SATPOLPP',
 'proxy': True,
 'verbose_name_plural': '25 Tahun Berkurang Gedung SATPOLPP',
 },
 bases=('gedungbangunan.tahunberkuranggedungbangunan',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangGedungBangunanSekretariatKorpri',
 fields=[
 ],
 options={
 'verbose_name': '27 Tahun Berkurang Gedung Sekretariat Korpri',
 'proxy': True,
 'verbose_name_plural': '27 Tahun Berkurang Gedung Sekretariat Korpri',
 },
 bases=('gedungbangunan.tahunberkuranggedungbangunan',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangGedungBangunanSetda',
 fields=[
 ],
 options={
 'verbose_name': '02 Tahun Berkurang Gedung Setda',
 'proxy': True,
 'verbose_name_plural': '02 Tahun Berkurang Gedung Setda',
 },
 bases=('gedungbangunan.tahunberkuranggedungbangunan',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangGedungBangunanSetwan',
 fields=[
 ],
 options={
 'verbose_name': '01 Tahun Berkurang Gedung Setwan',
 'proxy': True,
 'verbose_name_plural': '01 Tahun Berkurang Gedung Setwan',
 },
 bases=('gedungbangunan.tahunberkuranggedungbangunan',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangGedungBangunanSosial',
 fields=[
 ],
 options={
 'verbose_name': '09 Tahun Berkurang Gedung Sosial',
 'proxy': True,
 'verbose_name_plural': '09 Tahun Berkurang Gedung Sosial',
 },
 bases=('gedungbangunan.tahunberkuranggedungbangunan',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangGedungBangunanTebingTinggi',
 fields=[
 ],
 options={
 'verbose_name': '38 Tahun Berkurang Gedung Tebing Tinggi',
 'proxy': True,
 'verbose_name_plural': '38 Tahun Berkurang Gedung Tebing Tinggi',
 },
 bases=('gedungbangunan.tahunberkuranggedungbangunan',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangUsulHapusGedungAwayan',
 fields=[
 ],
 options={
 'verbose_name': '34 Usul Hapus Gedung Awayan',
 'proxy': True,
 'verbose_name_plural': '34 Usul Hapus Gedung Awayan',
 },
 bases=('gedungbangunan.tahunberkurangusulhapusgedung',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangUsulHapusGedungBAPPEDA',
 fields=[
 ],
 options={
 'verbose_name': '21 Usul Hapus Gedung BAPPEDA',
 'proxy': True,
 'verbose_name_plural': '21 Usul Hapus Gedung BAPPEDA',
 },
 bases=('gedungbangunan.tahunberkurangusulhapusgedung',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangUsulHapusGedungBatumandi',
 fields=[
 ],
 options={
 'verbose_name': '32 Usul Hapus Gedung Batumandi',
 'proxy': True,
 'verbose_name_plural': '32 Usul Hapus Gedung Batumandi',
 },
 bases=('gedungbangunan.tahunberkurangusulhapusgedung',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangUsulHapusGedungBatuPiring',
 fields=[
 ],
 options={
 'verbose_name': '37 Usul Hapus Gedung Batu Piring',
 'proxy': True,
 'verbose_name_plural': '37 Usul Hapus Gedung Batu Piring',
 },
 bases=('gedungbangunan.tahunberkurangusulhapusgedung',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangUsulHapusGedungBKD',
 fields=[
 ],
 options={
 'verbose_name': '19 Usul Hapus Gedung BKD',
 'proxy': True,
 'verbose_name_plural': '19 Usul Hapus Gedung BKD',
 },
 bases=('gedungbangunan.tahunberkurangusulhapusgedung',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangUsulHapusGedungBKPPD',
 fields=[
 ],
 options={
 'verbose_name': '26 Usul Hapus Gedung BKPPD',
 'proxy': True,
 'verbose_name_plural': '26 Usul Hapus Gedung BKPPD',
 },
 bases=('gedungbangunan.tahunberkurangusulhapusgedung',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangUsulHapusGedungBPBD',
 fields=[
 ],
 options={
 'verbose_name': '39 Usul Hapus Gedung BPBD',
 'proxy': True,
 'verbose_name_plural': '39 Usul Hapus Gedung BPBD',
 },
 bases=('gedungbangunan.tahunberkurangusulhapusgedung',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangUsulHapusGedungBPPD',
 fields=[
 ],
 options={
 'verbose_name': '48 Usul Hapus Gedung BPPD',
 'proxy': True,
 'verbose_name_plural': '48 Usul Hapus Gedung BPPD',
 },
 bases=('gedungbangunan.tahunberkurangusulhapusgedung',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangUsulHapusGedungDinkes',
 fields=[
 ],
 options={
 'verbose_name': '05 Usul Hapus Gedung Dinkes',
 'proxy': True,
 'verbose_name_plural': '05 Usul Hapus Gedung Dinkes',
 },
 bases=('gedungbangunan.tahunberkurangusulhapusgedung',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangUsulHapusGedungDisdik',
 fields=[
 ],
 options={
 'verbose_name': '07 Usul Hapus Gedung Disdik',
 'proxy': True,
 'verbose_name_plural': '07 Usul Hapus Gedung Disdik',
 },
 bases=('gedungbangunan.tahunberkurangusulhapusgedung',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangUsulHapusGedungDishub',
 fields=[
 ],
 options={
 'verbose_name': '04 Usul Hapus Gedung Dishub',
 'proxy': True,
 'verbose_name_plural': '04 Usul Hapus Gedung Dishub',
 },
 bases=('gedungbangunan.tahunberkurangusulhapusgedung',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangUsulHapusGedungDisnakertrans',
 fields=[
 ],
 options={
 'verbose_name': '41 Usul Hapus Gedung Disnakertrans',
 'proxy': True,
 'verbose_name_plural': '41 Usul Hapus Gedung Disnakertrans',
 },
 bases=('gedungbangunan.tahunberkurangusulhapusgedung',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangUsulHapusGedungDistamben',
 fields=[
 ],
 options={
 'verbose_name': '17 Usul Hapus Gedung Distamben',
 'proxy': True,
 'verbose_name_plural': '17 Usul Hapus Gedung Distamben',
 },
 bases=('gedungbangunan.tahunberkurangusulhapusgedung',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangUsulHapusGedungDKO',
 fields=[
 ],
 options={
 'verbose_name': '23 Usul Hapus Gedung DKO',
 'proxy': True,
 'verbose_name_plural': '23 Usul Hapus Gedung DKO',
 },
 bases=('gedungbangunan.tahunberkurangusulhapusgedung',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangUsulHapusGedungDKP',
 fields=[
 ],
 options={
 'verbose_name': '15 Usul Hapus Gedung DKP',
 'proxy': True,
 'verbose_name_plural': '15 Usul Hapus Gedung DKP',
 },
 bases=('gedungbangunan.tahunberkurangusulhapusgedung',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangUsulHapusGedungDKUKMP',
 fields=[
 ],
 options={
 'verbose_name': '16 Usul Hapus Gedung DKUKMP',
 'proxy': True,
 'verbose_name_plural': '16 Usul Hapus Gedung DKUKMP',
 },
 bases=('gedungbangunan.tahunberkurangusulhapusgedung',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangUsulHapusGedungDLH',
 fields=[
 ],
 options={
 'verbose_name': '22 Usul Hapus Gedung DLH',
 'proxy': True,
 'verbose_name_plural': '22 Usul Hapus Gedung DLH',
 },
 bases=('gedungbangunan.tahunberkurangusulhapusgedung',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangUsulHapusGedungDPKP',
 fields=[
 ],
 options={
 'verbose_name': '40 Usul Hapus Gedung DPKP',
 'proxy': True,
 'verbose_name_plural': '40 Usul Hapus Gedung DPKP',
 },
 bases=('gedungbangunan.tahunberkurangusulhapusgedung',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangUsulHapusGedungDPMD',
 fields=[
 ],
 options={
 'verbose_name': '10 Usul Hapus Gedung DPMD',
 'proxy': True,
 'verbose_name_plural': '10 Usul Hapus Gedung DPMD',
 },
 bases=('gedungbangunan.tahunberkurangusulhapusgedung',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangUsulHapusGedungDPMPTSP',
 fields=[
 ],
 options={
 'verbose_name': '18 Usul Hapus Gedung DPMPTSP',
 'proxy': True,
 'verbose_name_plural': '18 Usul Hapus Gedung DPMPTSP',
 },
 bases=('gedungbangunan.tahunberkurangusulhapusgedung',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangUsulHapusGedungDPPKB',
 fields=[
 ],
 options={
 'verbose_name': '42 Usul Hapus Gedung DPPKB',
 'proxy': True,
 'verbose_name_plural': '42 Usul Hapus Gedung DPPKB',
 },
 bases=('gedungbangunan.tahunberkurangusulhapusgedung',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangUsulHapusGedungDPPPA',
 fields=[
 ],
 options={
 'verbose_name': '11 Usul Hapus Gedung DPPPA',
 'proxy': True,
 'verbose_name_plural': '11 Usul Hapus Gedung DPPPA',
 },
 bases=('gedungbangunan.tahunberkurangusulhapusgedung',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangUsulHapusGedungDPUPR',
 fields=[
 ],
 options={
 'verbose_name': '03 Usul Hapus Gedung DPUPR',
 'proxy': True,
 'verbose_name_plural': '03 Usul Hapus Gedung DPUPR',
 },
 bases=('gedungbangunan.tahunberkurangusulhapusgedung',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangUsulHapusGedungDukCatPil',
 fields=[
 ],
 options={
 'verbose_name': '12 Usul Hapus Gedung DukCatPil',
 'proxy': True,
 'verbose_name_plural': '12 Usul Hapus Gedung DukCatPil',
 },
 bases=('gedungbangunan.tahunberkurangusulhapusgedung',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangUsulHapusGedungHalong',
 fields=[
 ],
 options={
 'verbose_name': '35 Usul Hapus Gedung Halong',
 'proxy': True,
 'verbose_name_plural': '35 Usul Hapus Gedung Halong',
 },
 bases=('gedungbangunan.tahunberkurangusulhapusgedung',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangUsulHapusGedungInspektorat',
 fields=[
 ],
 options={
 'verbose_name': '20 Usul Hapus Gedung Inspektorat',
 'proxy': True,
 'verbose_name_plural': '20 Usul Hapus Gedung Inspektorat',
 },
 bases=('gedungbangunan.tahunberkurangusulhapusgedung',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangUsulHapusGedungJuai',
 fields=[
 ],
 options={
 'verbose_name': '33 Usul Hapus Gedung Juai',
 'proxy': True,
 'verbose_name_plural': '33 Usul Hapus Gedung Juai',
 },
 bases=('gedungbangunan.tahunberkurangusulhapusgedung',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangUsulHapusGedungKearsipan',
 fields=[
 ],
 options={
 'verbose_name': '44 Usul Hapus Gedung Kearsipan',
 'proxy': True,
 'verbose_name_plural': '44 Usul Hapus Gedung Kearsipan',
 },
 bases=('gedungbangunan.tahunberkurangusulhapusgedung',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangUsulHapusGedungKehutanan',
 fields=[
 ],
 options={
 'verbose_name': '14 Usul Hapus Gedung Kehutanan',
 'proxy': True,
 'verbose_name_plural': '14 Usul Hapus Gedung Kehutanan',
 },
 bases=('gedungbangunan.tahunberkurangusulhapusgedung',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangUsulHapusGedungKESBANGPOL',
 fields=[
 ],
 options={
 'verbose_name': '24 Usul Hapus Gedung KESBANGPOL',
 'proxy': True,
 'verbose_name_plural': '24 Usul Hapus Gedung KESBANGPOL',
 },
 bases=('gedungbangunan.tahunberkurangusulhapusgedung',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangUsulHapusGedungKominfo',
 fields=[
 ],
 options={
 'verbose_name': '43 Usul Hapus Gedung Kominfo',
 'proxy': True,
 'verbose_name_plural': '43 Usul Hapus Gedung Kominfo',
 },
 bases=('gedungbangunan.tahunberkurangusulhapusgedung',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangUsulHapusGedungLampihong',
 fields=[
 ],
 options={
 'verbose_name': '31 Usul Hapus Gedung Lampihong',
 'proxy': True,
 'verbose_name_plural': '31 Usul Hapus Gedung Lampihong',
 },
 bases=('gedungbangunan.tahunberkurangusulhapusgedung',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangUsulHapusGedungParingin',
 fields=[
 ],
 options={
 'verbose_name': '28 Usul Hapus Gedung Paringin',
 'proxy': True,
 'verbose_name_plural': '28 Usul Hapus Gedung Paringin',
 },
 bases=('gedungbangunan.tahunberkurangusulhapusgedung',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangUsulHapusGedungParinginKota',
 fields=[
 ],
 options={
 'verbose_name': '29 Usul Hapus Gedung Paringin Kota',
 'proxy': True,
 'verbose_name_plural': '29 Usul Hapus Gedung Paringin Kota',
 },
 bases=('gedungbangunan.tahunberkurangusulhapusgedung',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangUsulHapusGedungParinginSelatan',
 fields=[
 ],
 options={
 'verbose_name': '36 Usul Hapus Gedung Paringin Selatan',
 'proxy': True,
 'verbose_name_plural': '36 Usul Hapus Gedung Paringin Selatan',
 },
 bases=('gedungbangunan.tahunberkurangusulhapusgedung',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangUsulHapusGedungParinginTimur',
 fields=[
 ],
 options={
 'verbose_name': '30 Usul Hapus Gedung Paringin Timur',
 'proxy': True,
 'verbose_name_plural': '30 Usul Hapus Gedung Paringin Timur',
 },
 bases=('gedungbangunan.tahunberkurangusulhapusgedung',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangUsulHapusGedungPariwisata',
 fields=[
 ],
 options={
 'verbose_name': '46 Usul Hapus Gedung Pariwisata',
 'proxy': True,
 'verbose_name_plural': '46 Usul Hapus Gedung Pariwisata',
 },
 bases=('gedungbangunan.tahunberkurangusulhapusgedung',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangUsulHapusGedungPerdagangan',
 fields=[
 ],
 options={
 'verbose_name': '47 Usul Hapus Gedung Perdagangan',
 'proxy': True,
 'verbose_name_plural': '47 Usul Hapus Gedung Perdagangan',
 },
 bases=('gedungbangunan.tahunberkurangusulhapusgedung',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangUsulHapusGedungPerikanan',
 fields=[
 ],
 options={
 'verbose_name': '45 Usul Hapus Gedung Perikanan',
 'proxy': True,
 'verbose_name_plural': '45 Usul Hapus Gedung Perikanan',
 },
 bases=('gedungbangunan.tahunberkurangusulhapusgedung',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangUsulHapusGedungPerpustakaan',
 fields=[
 ],
 options={
 'verbose_name': '08 Usul Hapus Gedung Perpustakaan',
 'proxy': True,
 'verbose_name_plural': '08 Usul Hapus Gedung Perpustakaan',
 },
 bases=('gedungbangunan.tahunberkurangusulhapusgedung',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangUsulHapusGedungPertanian',
 fields=[
 ],
 options={
 'verbose_name': '13 Usul Hapus Gedung Pertanian',
 'proxy': True,
 'verbose_name_plural': '13 Usul Hapus Gedung Pertanian',
 },
 bases=('gedungbangunan.tahunberkurangusulhapusgedung',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangUsulHapusGedungRSUD',
 fields=[
 ],
 options={
 'verbose_name': '06 Usul Hapus Gedung RSUD',
 'proxy': True,
 'verbose_name_plural': '06 Usul Hapus Gedung RSUD',
 },
 bases=('gedungbangunan.tahunberkurangusulhapusgedung',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangUsulHapusGedungSATPOLPP',
 fields=[
 ],
 options={
 'verbose_name': '25 Usul Hapus Gedung SATPOLPP',
 'proxy': True,
 'verbose_name_plural': '25 Usul Hapus Gedung SATPOLPP',
 },
 bases=('gedungbangunan.tahunberkurangusulhapusgedung',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangUsulHapusGedungSekretariatKorpri',
 fields=[
 ],
 options={
 'verbose_name': '27 Usul Hapus Gedung Sekretariat Korpri',
 'proxy': True,
 'verbose_name_plural': '27 Usul Hapus Gedung Sekretariat Korpri',
 },
 bases=('gedungbangunan.tahunberkurangusulhapusgedung',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangUsulHapusGedungSetda',
 fields=[
 ],
 options={
 'verbose_name': '02 Usul Hapus Gedung Setda',
 'proxy': True,
 'verbose_name_plural': '02 Usul Hapus Gedung Setda',
 },
 bases=('gedungbangunan.tahunberkurangusulhapusgedung',),
 ),
 migrations.CreateModel(
 name='TahunBerkurangUsulHapusGedungSetwan',
 fields=[
 ],
 options={
 'verbose_name': '01 Usul Hapus | |
| 
	post_processing=post_processing)
 def ProcessAction(self, action: actions.Action, post_processing=True):
 if self.is_gameover:
 return
 # print(f"Processed action: {action.direction}, {action.rotation}, {action.swap}")
 # self.test += 1
 # print(self.test)
 if action.swap:
 self.Swap()
 self.Rotate(action.rotation)
 self.Move(action, post_processing=post_processing)
 def _ProcessActionsThread(self):
 while True:
 while not self.action_list.empty():
 act = self.action_list.get()
 self.ProcessAction(act)
 self.action_list.task_done()
 time.sleep(0.001)
 def SetLevel(self, level: int = 0):
 """Let the front end set!"""
 self.level = level
 i = min(len(self.interval_decrease), self.level)
 self._current_spawn_interval = max(
 10, self._init_spawn_interval - self.interval_decrease[i])
 def IncreaseLevel(self, inc: int = 1):
 """Let the front end decide!"""
 self.level += inc
 self.SetLevel(self.level)
 def Move(self, action: actions.Action, post_processing=True) -> bool:
 """Moves the current piece.
 :param direction: Direction to move
 :param post_processing: if True, put the piece to color_map and
 apply line eliminate. Otherwise just update the current_piece's states.
 :return True if moved; False otherwise
 """
 if (action.direction == actions.NONE and
 not action.down):
 return False
 moved = False
 if action.down:
 try:
 self.mutex_current_piece.acquire()
 if self.CheckValidity(self.current_piece, (1, 0)):
 self.current_piece.x += 1
 moved = True
 self.soft_drop = True
 finally:
 self.mutex_current_piece.release()
 if action.direction == actions.LEFT:
 try:
 self.mutex_current_piece.acquire()
 if self.CheckValidity(self.current_piece, (0, -1)):
 self.current_piece.y += -1
 moved = True
 finally:
 self.mutex_current_piece.release()
 if action.direction == actions.RIGHT:
 try:
 self.mutex_current_piece.acquire()
 if self.CheckValidity(self.current_piece, (0, 1)):
 self.current_piece.y += 1
 moved = True
 finally:
 self.mutex_current_piece.release()
 if action.direction == actions.HARD_DROP or action.direction == actions.SOFT_DROP:
 try:
 self.mutex_current_piece.acquire()
 while self.CheckValidity(self.current_piece, (1, 0)):
 self.current_piece.x += 1
 moved = True
 finally:
 self.mutex_current_piece.release()
 if post_processing and action.direction == actions.HARD_DROP:
 self.PutPiece()
 if moved:
 self.last_action = action
 at_bottom = not self.CheckValidity(self.current_piece, (1, 0))
 if (at_bottom and action.direction != actions.HARD_DROP and
 action.source_user):
 self._RefreshLockTime()
 return moved
 def _RefreshLockTime(self):
 self._enable_lock_time = True
 if self.accumulate_lock_time >= self.current_maximum_lock_time:
 self.current_maximum_lock_time = min(
 self.current_maximum_lock_time + self.incremental_lock_time,
 self.maximum_lock_time)
 def _ResetLockTime(self):
 self._enable_lock_time = False
 self.accumulate_lock_time = 0
 self.current_maximum_lock_time = 0
 def Swap(self):
 """Swaps the held piece and the current if its swappable"""
 if not self.can_swap:
 return
 try:
 self.mutex_current_piece.acquire()
 t = self.held_piece
 self.held_piece = self.current_piece
 self.current_piece = t
 if not self.current_piece:
 self._TakePieceFromList()
 self.current_piece.Init()
 self.held_piece.Init()
 self.can_swap = False
 finally:
 self.mutex_current_piece.release()
 def CheckGameOver(self):
 self.is_gameover = np.any(
 self.GetMapArea((0, 0), (self.map_height_padding, self.width)) != 0)
 return self.is_gameover
 def _AnalyzeElimination(self, n_eliminate: int) -> int:
 ret = 0
 is_last_put_t = isinstance(self.last_put_piece, shape.T)
 if n_eliminate == 1:
 if (is_last_put_t and self.last_action and self.last_action.rotation != 0):
 print("TSS")
 ret += TSS
 self.line_tobesent += ATTACK_TSS
 else:
 ret += SINGLE
 if n_eliminate == 2:
 # TSD
 if (is_last_put_t and self.last_action and self.last_action.rotation != 0):
 print("TSD")
 ret += TSD
 self.line_tobesent += ATTACK_TSD
 # Normal Double
 else:
 ret += DOUBLE
 self.line_tobesent += ATTACK_DOUBLE
 if n_eliminate == 3:
 # TST
 if (is_last_put_t and self.last_action and self.last_action.rotation != 0):
 print("TST")
 ret += TST
 self.line_tobesent += ATTACK_TST
 else:
 ret += TRIPLE
 self.line_tobesent += ATTACK_TRIPLE
 if n_eliminate == 4:
 ret += QUAD
 self.line_tobesent += ATTACK_QUAD
 # Checks for PC
 if np.all(self.color_map == 0):
 print("PC")
 ret += PC
 self.line_tobesent += ATTACK_PC
 return ret * (self.level + 3)
 def _LineClear(self):
 elimated_lines = []
 elimated_cnt = 0
 # Checks the 4 lines... This is not adapt to shape with higher than 4 lines
 # but that's not a part of this game. I don't have plan to support custom
 # shapes.
 for row in range(4):
 if not (self.last_put_piece.x + row >= 0 and
 self.last_put_piece.x + row < self.height + self.map_height_padding):
 continue
 if np.all(self.color_map[self.last_put_piece.x + row, :] != 0):
 elimated_lines.append(row + self.last_put_piece.x)
 elimated_cnt += 1
 self.color_map = np.vstack((np.zeros((elimated_cnt, self.width),
 dtype=self.dtype),
 np.delete(self.color_map, elimated_lines, axis=0)))
 # Updates the bit_map
 side_padding = (1 << self.map_side_padding) - 1
 init_row = (side_padding << (self.map_side_padding + self.width)) | side_padding
 self.bit_map = np.concatenate((elimated_cnt * [init_row],
 np.delete(self.bit_map, elimated_lines))).astype(self.dtype)
 self.accumulated_lines_eliminated += elimated_cnt
 self.score += self._AnalyzeElimination(n_eliminate=elimated_cnt)
 def _SendAttack(self):
 """Send attack to target."""
 # This feature has not been implemented yet.
 self.line_sent += self.line_tobesent
 self.line_tobesent = 0
 def PutPiece(self, piece: shape.Shape = None):
 """ Puts a piece to color_map if it is a valid placement then execute the post processing.
 :param piece: The piece to put, if None, put the self.current_piece
 :param color_map: The color_map where the piece puts, if None, self.color_map will be used.
 :returns: True if the piece has been put. False otherwise.
 """
 if self._PrePutPiece(piece):
 self._PostPutPiece(piece)
 return True
 else:
 return False
 def _PrePutPiece(self, piece: shape.Shape = None, map: np.array = None):
 """ Puts a piece to color_map if it is a valid placement.
 Post put processing such as self._LineClear will not be executed
 :param piece: The piece to put, if None, put the self.current_piece
 :param map: The color_map where the piece puts, if None, self.color_map will be used.
 :returns: True if the piece has been put. False otherwise.
 """
 try:
 if not piece:
 self.mutex_current_piece.acquire()
 piece = self.current_piece
 if map is None:
 map = self.color_map
 if not self.CheckValidity(piece):
 return False
 for (i, j) in piece.GetShape():
 self.SetMap((piece.x + i, piece.y + j), piece.id, map)
 return True
 finally:
 if self.mutex_current_piece.locked():
 self.mutex_current_piece.release()
 def _PostPutPiece(self, piece: shape.Shape = None):
 if piece is not None:
 self.last_put_piece = piece
 else:
 self.last_put_piece = self.current_piece
 # LineClear should be called prior to SendAttack
 self._LineClear()
 if piece is None:
 self._TakePieceFromList()
 self.CheckGameOver()
 self._ResetLockTime()
 self._SendAttack()
 self.can_swap = True
 self.piece_dropped += 1
 def TextDraw(self):
 preview_map = self.color_map.copy()
 self._PrePutPiece(self.current_piece, preview_map)
 for i in preview_map:
 print(i)
 print()
 def SpawnPiece(self, piece: shape.Shape = None) -> bool:
 if not piece:
 self._TakePieceFromList()
 else:
 self.current_piece = piece.copy()
 return self.CheckValidity(self.current_piece)
 def _FindFittedPiece(self, piece: shape.Shape = None, num_90rotations: int = 0):
 """Finds a location that fits this piece with n 90rotations.
 Ref: https://tetris.fandom.com/wiki/SRS
 :param piece: The piece to be put in the color_map. If none, it will be set to the current_piece
 :param num_90rotations: How many 90 rotations
 :return: piece - shape.Shape: the piece with rotations that fits the color_map.
 """
 if not piece:
 piece = self.current_piece
 def _IsJLSTZ(piece: shape.Shape):
 jlstz = [shape.J, shape.L, shape.S, shape.T, shape.Z]
 for s in jlstz:
 if isinstance(piece, s):
 return True
 return False
 # The 180 rotation wall kick table is copied from
 # https://tetris.fandom.com/wiki/SRS#180.C2.B0_rotation
 # which is origined from
 # https://github.com/JoshuaWebb/nullpomino/blob/master/src/mu/nu/nullpo/game/subsystem/wallkick/StandardWallkick.java
 offset_map_jlstz = [
 # state 0
 ([(0, 0), (0, -1), (-1, -1), (2, 0), (2, -1)], # 0>>1
 # 0>>2, 180 rotation
 # [(0,0), (1, 0), (2, 0), (1, 1), (2, 1), (-1, 0), (-2, 0), (-1, 1), (-2, 1), (0, -1), (3, 0), (-3, 0)],
 [(0, 0)],
 [(0, 0), (0, 1), (-1, 1), (2, 0), (2, 1)]), # 0>>3
 # state 1
 ([(0, 0), (0, 1), (1, 1), (-2, 0), (-2, 1)], # 1>>2
 # l>>3, 180 rotation
 # [(0,0), (0, 1), (0, 2), (-1, 1), (-1, 2), (0, -1), (0, -2), (-1, -1), (-1, -2), (1, 0), (0, 3), (0, -3)],
 [(0, 0)],
 [(0, 0), (0, 1), (1, 1), (-2, 0), (-2, 1)]), # 1>>0
 # state 2
 ([(0, 0), (0, 1), (-1, 1), (2, 0), (2, 1)], # 2>>3
 # [(0,0), (-1, 0), (-2, 0), (-1, -1), (-2, -1), (1, 0), (2, 0), (1, -1), (2, -1), (0, 1), (-3, 0), (3, 0)], # 2>>0,
 [(0, 0)],
 [(0, 0), (0, -1), (-1, -1), (2, 0), (2, -1)]), # 2>>1
 # state 3
 ([(0, 0), (0, -1), (1, -1), (2, 0), (-2, -1)], # 3>>0
 # 3>>1, 180 rotation
 # [(0,0), (0, 1), (0, 2), (1, 1), (1, 2), (0, -1), (0, -2), (1, -1), (1, -2), (-1, 0), (0, 3), (0, -3)],
 [(0, 0)],
 [(0, 0), (0, -1), (1, -1), (2, 0), (-2, -1)]), # 3>>2
 ]
 offset_map_i = [
 # state 0
 [[(0, 0), (0, -2), (0, 1), (1, -2), (-2, 1), ], # 0>>1
 # [(0,0), (-1, 0), (-2, 0), (1, 0), (2, 0), (0, 1)], # 0>>2, 180 rotation
 [(0, 0)],
 [(0, 0), (0, -1), (0, 2), (-2, -1), (1, 2)]], # 0>>3
 # state 1
 [[(0, 0), (0, -1), (0, 2), (-2, -1), (1, 2)], # 1>>2
 # [(0,0), (0, 1), (0, 2), (0, -1), (0, -2), (-1, 0)], # 1>>3, 180 rotation,
 [(0, 0)],
 [(0, 0), (0, 2), (0, -1), (-1, 2), (2, -1)]], # 1>>0
 # state 2
 [[(0, 0), (0, 2), (0, -1), (-1, 2), (2, -1)], # 2>>3
 # [(0, 0), (1, 0), (2, 0), (-1, 0), | |
| 
	# NOTE: See note above for variable name "expr"
 # for expr,conc in self.concjs:
 for exprj, concj in self.concjs:
 def rule_j_lb(m, i, j):
 e = exprj._pyomo_expr(index=(j,))
 c = concj.expr._pyomo_expr(index=(j,))
 body = e - c
 body_LB = getLB(body)
 MLBj = (
 body_LB
 if body_LB is not None
 else -ImpliesSiteCombination.DEFAULT_BIG_M
 )
 return MLBj * (1 - var._pyomo_var[i, j]) <= body
 def rule_j_ub(m, i, j):
 e = exprj._pyomo_expr(index=(j,))
 c = concj.expr._pyomo_expr(index=(j,))
 body = e - c
 body_UB = getUB(body)
 MUBj = (
 body_UB
 if body_UB is not None
 else ImpliesSiteCombination.DEFAULT_BIG_M
 )
 return body <= MUBj * (1 - var._pyomo_var[i, j])
 if isinstance(concj, GreaterThan) or isinstance(concj, EqualTo):
 result.append(Constraint(*Comb.index_sets, rule=rule_j_lb))
 if isinstance(concj, LessThan) or isinstance(concj, EqualTo):
 result.append(Constraint(*Comb.index_sets, rule=rule_j_ub))
 return result
class ImpliesNeighbors(DescriptorRule):
 """A class for rules that define logical implications on neighbor sites.
 Spelled out: 'if this site-indexed descriptor is true (i.e., is equal
 to one), then a set of simple rules hold on each of the neighboring
 sites'
 Attributes:
 concs (list<tuple<MaterialDescriptor,SimpleDescriptorRule>>):
 list of conclusions to enforce if the logical predicate is true.
 neighborhoods (list<list<int>>): neighborhood data structure to use
 if you do not want to use the neighborhoods of the descriptor
 that this rule is attached to.
 (index information inherited from IndexedElem)
 See DescriptorRule for more information on rules and Canvas for more
 information on 'neighborhoods'.
 """
 DEFAULT_BIG_M = 9999
 # === STANDARD CONSTRUCTOR
 def __init__(self, concs, neighborhoods=None, **kwargs):
 """Standard constructor for ImpliesNeighbors rules.
 Args:
 concs (list<tuple<MaterialDescriptor,SimpleDescriptorRule>>):
 list of conclusions to conditionally enforce. Also, a single
 conclusion can be provided (i.e., a tuple<MaterialDescriptor,
 SimpleDescriptorRule>) and will be placed in a list.
 neighborhoods (list<list<int>>) Optional, data structure to use
 as neighborhoods of interest. If not provided, then the
 neighborhoods of the descriptor that this rule is attached to
 is used.
 **kwargs: Optional, index information passed to IndexedElem if
 interested in a subset of indices
 Possible choices: sites, bonds, site_types, bond_types, confs.
 """
 self.concs = concs if type(concs) is list else [concs]
 self.neighborhoods = neighborhoods
 Comb = IndexedElem.fromComb(
 *(desc for desc, conc in self.concs), *(conc for desc, conc in self.concs)
 )
 assert Comb.sites is not None
 kwargs = {**Comb.index_dict, **kwargs}
 DescriptorRule.__init__(self, **kwargs)
 # === PROPERTY EVALUATION METHODS
 def _pyomo_cons(self, var):
 """Method to create a Pyomo constraint from this rule.
 Args:
 var (MaterialDescriptor): The descriptor to be defined by this rule.
 Returns:
 (list<Constraint>) list of Pyomo constraint objects.
 """
 var_dict_wo_s = var.index_dict
 var_dict_wo_s.pop("sites") # no need to capture these sites
 neighborhoods = (
 self.neighborhoods
 if self.neighborhoods is not None
 else var.canv.NeighborhoodIndexes
 )
 bonds = [(i, j) for i in var.sites for j in neighborhoods[i] if j is not None]
 result = []
 # NOTE: After much confusion, I found a bug in the line of code
 # below. Be careful not to use variable names "expr"
 # because it gets mixed up with the Pyomo module "expr".
 # No error, but it gives garbage expressions and wasn't
 # clear to me what was being generated...
 # for expr,conc in self.concs:
 for expr_, conc in self.concs:
 Comb = IndexedElem.fromComb(expr_, conc)
 r_dict_wo_s = Comb.index_dict
 r_dict_wo_s.pop("sites") # no need to capture these sites
 ConIndexes = IndexedElem.fromComb(
 IndexedElem(bonds=bonds),
 IndexedElem(**var_dict_wo_s),
 IndexedElem(**r_dict_wo_s),
 )
 def rule_lb(m, *args):
 i, j, *args = args
 v = var._pyomo_var[var.mask((i, None, *args), ConIndexes)]
 e = expr_._pyomo_expr(index=expr_.mask((j, None, *args), ConIndexes))
 c = conc.expr._pyomo_expr(
 index=conc.expr.mask((j, None, *args), ConIndexes)
 )
 body = e - c
 body_LB = getLB(body)
 MLB = (
 body_LB if body_LB is not None else -ImpliesNeighbors.DEFAULT_BIG_M
 )
 return MLB * (1 - v) <= body
 def rule_ub(m, *args):
 i, j, *args = args
 v = var._pyomo_var[var.mask((i, None, *args), ConIndexes)]
 e = expr_._pyomo_expr(index=expr_.mask((j, None, *args), ConIndexes))
 c = conc.expr._pyomo_expr(
 index=conc.expr.mask((j, None, *args), ConIndexes)
 )
 body = e - c
 body_UB = getUB(body)
 MUB = body_UB if body_UB is not None else ImpliesNeighbors.DEFAULT_BIG_M
 return body <= MUB * (1 - v)
 if isinstance(conc, GreaterThan) or isinstance(conc, EqualTo):
 result.append(Constraint(*ConIndexes.index_sets, rule=rule_lb))
 if isinstance(conc, LessThan) or isinstance(conc, EqualTo):
 result.append(Constraint(*ConIndexes.index_sets, rule=rule_ub))
 return result
class MaterialDescriptor(IndexedElem):
 """A class to represent material geometric and energetic descriptors.
 This class holds the information to define mathematical optimization
 variables for the properties of materials. Additionally, each descriptor
 has a 'rules' list to which the user can append rules defining the
 descriptor and constraining the design space.
 Attributes:
 name (string): A unique (otherwise Pyomo will complain) name
 canv (``Canvas``): The canvas that the descriptor will be indexed over
 atoms (list<``BBlock``>): The building blocks to index the descriptor over.
 confDs (list<``Design``>): The designs for conformations to index over.
 integer (bool): Flag to indicate if the descriptor takes integer values.
 binary (bool): Flag to indicate if the descriptor takes boolean values.
 rules (list<``DescriptorRules``>): List of rules to define and constrain
 the material descriptor design space.
 bounds (tuple/dict/func): If tuple, the lower and upper bounds on the
 descriptor values across all indices. If dict, the bounds can be
 individually set for each index.
 See ``IndexedElem`` for more information on indexing.
 See ``DescriptorRule`` for information on defining descriptors.
 """
 DBL_TOL = 1e-5
 # === STANDARD CONSTRUCTOR
 def __init__(
 self,
 name,
 canv=None,
 atoms=None,
 confDs=None,
 bounds=(None, None),
 integer=False,
 binary=False,
 rules=[],
 **kwargs
 ):
 """Standard constuctor for material descriptors.
 Note: It is generally not necessary for users to create
 MaterialDescriptors themselves. Instead, use the
 MatOptModel.add____Descriptor() methods for the right
 type of descriptor (i.e., Site, Bond, etc.).
 Args:
 name (string): A unique (otherwise Pyomo will complain) name
 canv (Canvas): The canvas that the descriptor will be indexed over
 atoms (list<BBlock>): Building blocks to index the descriptor over.
 confDs (list<Design>): The designs for conformations to index over.
 bounds (tuple/dict/func): If tuple, the lower and upper bounds on the
 descriptor values across all indices. If dict, the bounds can be
 individually set for each index. Otherwise, advanced users can
 specify a function to be interpreted by Pyomo.
 integer (bool): Flag to indicate if the descriptor is integer.
 binary (bool): Flag to indicate if the descriptor is boolean.
 rules (list<DescriptorRules>): List of rules to define and constrain
 the material descriptor design space.
 **kwargs: Optional, index information passed to IndexedElem if
 interested in a subset of indices.
 Possible choices: sites, bonds, site_types, bond_types, confs.
 """
 self._name = name
 self._canv = canv
 self._atoms = atoms
 self._confDs = confDs
 self._integer = integer or binary
 self._binary = binary
 self._rules = rules if type(rules) is list else [rules]
 self._bounds = bounds
 self._pyomo_var = None # Will be set by MatOptModel._make_pyomo_model
 IndexedElem.__init__(self, **kwargs)
 # === AUXILIARY METHODS
 def _fix_pyomo_var_by_rule(self, r, m):
 if self.name in ("Yik", "Yi", "Xijkl", "Xij", "Cikl", "Ci", "Zic"):
 return self.__fix_basic_pyomo_vars_by_rule(r, m)
 else:
 Comb = IndexedElem.fromComb(self, r)
 for k in Comb.keys():
 self._pyomo_var[k].fix(r.val)
 def __fix_basic_pyomo_vars_by_rule(self, r, m):
 Comb = IndexedElem.fromComb(self, r)
 if self.name == "Yik":
 for i in Comb.sites:
 for k in Comb.site_types:
 fixYik(m, i, k, r.val)
 elif self.name == "Yi":
 for i in Comb.sites:
 fixYi(m, i, r.val)
 elif self.name == "Xijkl":
 for i, j in Comb.bonds:
 for k, l in Comb.bond_types:
 fixXijkl(m, i, j, k, l, r.val)
 elif self.name == "Xij":
 for i, j in Comb.bonds:
 fixXij(m, i, j, r.val)
 elif self.name == "Cikl":
 for i in Comb.sites:
 for k, l in Comb.bond_types:
 fixCikl(m, i, k, l, r.val)
 elif self.name == "Ci":
 for i in Comb.sites:
 fixCi(m, i, r.val)
 elif self.name == "Zic":
 for i in Comb.sites:
 for c in Comb.confs:
 fixZic(m, i, c, r.val)
 # === PROPERTY EVALUATION METHODS
 def _pyomo_cons(self, m):
 """Create a list of Pyomo constraints related to this descriptor."""
 result = []
 for rule in self.rules:
 if rule is not None:
 result.extend(rule._pyomo_cons(self))
 return result
 @property
 def _pyomo_bounds(self):
 """Creates a bound rule/tuple that can interpreted by Pyomo."""
 if type(self.bounds) is tuple:
 return self.bounds
 elif type(self.bounds) is dict:
 def rule_gen(m, *args):
 if args is not None and len(args) == 1:
 args = args[0]
 return self.bounds[args]
 return rule_gen
 else:
 # Else, assume that the user knows what they're doing
 # with functions for pyomo bounds
 return self.bounds
 def _pyomo_expr(self, index=None):
 """Interprets a variable as a Pyomo expression.
 Note: | |
| 
	# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""A miscellaneous collection of basic functions."""
import sys
import copy
import os
import warnings
from collections.abc import Iterable
from pathlib import Path
import tempfile
from scipy.interpolate import interp1d
from scipy.stats import ncx2
import numpy as np
from numpy import histogram2d as histogram2d_np
from numpy import histogram as histogram_np
from astropy.logger import AstropyUserWarning
from astropy import log
from stingray.stats import pds_probability, pds_detection_level
from stingray.stats import z2_n_detection_level, z2_n_probability
from stingray.stats import fold_detection_level, fold_profile_probability
from stingray.pulse.pulsar import _load_and_prepare_TOAs
try:
 import pint.toa as toa
 import pint
 from pint.models import get_model
 HAS_PINT = True
except ImportError:
 HAS_PINT = False
try:
 from skimage.feature import peak_local_max
 HAS_SKIMAGE = True
except ImportError:
 HAS_SKIMAGE = False
try:
 from tqdm import tqdm as show_progress
except ImportError:
 def show_progress(a):
 return a
from . import (
 prange,
 array_take,
 HAS_NUMBA,
 njit,
 vectorize,
 float32,
 float64,
 int32,
 int64,
)
__all__ = [
 "array_take",
 "njit",
 "prange",
 "show_progress",
 "z2_n_detection_level",
 "z2_n_probability",
 "pds_detection_level",
 "pds_probability",
 "fold_detection_level",
 "fold_profile_probability",
 "r_in",
 "r_det",
 "_assign_value_if_none",
 "_look_for_array_in_array",
 "is_string",
 "_order_list_of_arrays",
 "mkdir_p",
 "common_name",
 "hen_root",
 "optimal_bin_time",
 "gti_len",
 "deorbit_events",
 "_add_default_args",
 "check_negative_numbers_in_args",
 "interpret_bintime",
 "get_bin_edges",
 "compute_bin",
 "hist1d_numba_seq",
 "hist2d_numba_seq",
 "hist3d_numba_seq",
 "hist2d_numba_seq_weight",
 "hist3d_numba_seq_weight",
 "index_arr",
 "index_set_arr",
 "histnd_numba_seq",
 "histogram2d",
 "histogram",
 "touch",
 "log_x",
 "get_list_of_small_powers",
 "adjust_dt_for_power_of_two",
 "adjust_dt_for_small_power",
 "memmapped_arange",
 "nchars_in_int_value",
]
DEFAULT_PARSER_ARGS = {}
DEFAULT_PARSER_ARGS["loglevel"] = dict(
 args=["--loglevel"],
 kwargs=dict(
 help=(
 "use given logging level (one between INFO, "
 "WARNING, ERROR, CRITICAL, DEBUG; "
 "default:WARNING)"
 ),
 default="WARNING",
 type=str,
 ),
)
DEFAULT_PARSER_ARGS["nproc"] = dict(
 args=["--nproc"],
 kwargs=dict(help=("Number of processors to use"), default=1, type=int),
)
DEFAULT_PARSER_ARGS["debug"] = dict(
 args=["--debug"],
 kwargs=dict(
 help=("set DEBUG logging level"), default=False, action="store_true"
 ),
)
DEFAULT_PARSER_ARGS["bintime"] = dict(
 args=["-b", "--bintime"],
 kwargs=dict(help="Bin time", type=np.longdouble, default=1),
)
DEFAULT_PARSER_ARGS["energies"] = dict(
 args=["-e", "--energy-interval"],
 kwargs=dict(
 help="Energy interval used for filtering",
 nargs=2,
 type=float,
 default=None,
 ),
)
DEFAULT_PARSER_ARGS["pi"] = dict(
 args=["--pi-interval"],
 kwargs=dict(
 help="PI interval used for filtering",
 nargs=2,
 type=int,
 default=[-1, -1],
 ),
)
DEFAULT_PARSER_ARGS["deorbit"] = dict(
 args=["-p", "--deorbit-par"],
 kwargs=dict(
 help=(
 "Deorbit data with this parameter file (requires PINT installed)"
 ),
 default=None,
 type=str,
 ),
)
DEFAULT_PARSER_ARGS["output"] = dict(
 args=["-o", "--outfile"],
 kwargs=dict(help="Output file", default=None, type=str),
)
DEFAULT_PARSER_ARGS["usepi"] = dict(
 args=["--use-pi"],
 kwargs=dict(
 help="Use the PI channel instead of energies",
 default=False,
 action="store_true",
 ),
)
DEFAULT_PARSER_ARGS["test"] = dict(
 args=["--test"],
 kwargs=dict(
 help="Only used for tests", default=False, action="store_true"
 ),
)
DEFAULT_PARSER_ARGS["pepoch"] = dict(
 args=["--pepoch"],
 kwargs=dict(
 type=float,
 required=False,
 help="Reference epoch for timing parameters (MJD)",
 default=None,
 ),
)
def r_in(td, r_0):
 """Calculate incident countrate given dead time and detected countrate."""
 tau = 1 / r_0
 return 1.0 / (tau - td)
def r_det(td, r_i):
 """Calculate detected countrate given dead time and incident countrate."""
 tau = 1 / r_i
 return 1.0 / (tau + td)
def _assign_value_if_none(value, default):
 if value is None:
 return default
 return value
def _look_for_array_in_array(array1, array2):
 """
 Examples
 --------
 >>> _look_for_array_in_array([1, 2], [2, 3, 4])
 2
 >>> _look_for_array_in_array([1, 2], [3, 4, 5]) is None
 True
 """
 for a1 in array1:
 if a1 in array2:
 return a1
 return None
def is_string(s):
 """Portable function to answer this question."""
 return isinstance(s, str) # NOQA
def _order_list_of_arrays(data, order):
 """
 Examples
 --------
 >>> order = [1, 2, 0]
 >>> new = _order_list_of_arrays({'a': [4, 5, 6], 'b':[7, 8, 9]}, order)
 >>> np.all(new['a'] == [5, 6, 4])
 True
 >>> np.all(new['b'] == [8, 9, 7])
 True
 >>> new = _order_list_of_arrays([[4, 5, 6], [7, 8, 9]], order)
 >>> np.all(new[0] == [5, 6, 4])
 True
 >>> np.all(new[1] == [8, 9, 7])
 True
 >>> _order_list_of_arrays(2, order) is None
 True
 """
 if hasattr(data, "items"):
 data = dict((i[0], np.asarray(i[1])[order]) for i in data.items())
 elif hasattr(data, "index"):
 data = [np.asarray(i)[order] for i in data]
 else:
 data = None
 return data
class _empty:
 def __init__(self):
 pass
def mkdir_p(path):
 """Safe mkdir function."""
 return os.makedirs(path, exist_ok=True)
def common_name(str1, str2, default="common"):
 """Strip two strings of the letters not in common.
 Filenames must be of same length and only differ by a few letters.
 Parameters
 ----------
 str1 : str
 str2 : str
 Returns
 -------
 common_str : str
 A string containing the parts of the two names in common
 Other Parameters
 ----------------
 default : str
 The string to return if common_str is empty
 Examples
 --------
 >>> common_name('strAfpma', 'strBfpmb')
 'strfpm'
 >>> common_name('strAfpma', 'strBfpmba')
 'common'
 >>> common_name('asdfg', 'qwerr')
 'common'
 >>> common_name('A_3-50_A.nc', 'B_3-50_B.nc')
 '3-50'
 """
 if not len(str1) == len(str2):
 return default
 common_str = ""
 # Extract the HEN root of the name (in case they're event files)
 str1 = hen_root(str1)
 str2 = hen_root(str2)
 for i, letter in enumerate(str1):
 if str2[i] == letter:
 common_str += letter
 # Remove leading and trailing underscores and dashes
 common_str = common_str.rstrip("_").rstrip("-")
 common_str = common_str.lstrip("_").lstrip("-")
 if common_str == "":
 common_str = default
 # log.debug('common_name: %s %s -> %s', str1, str2, common_str)
 return common_str
def hen_root(filename):
 """Return the root file name (without _ev, _lc, etc.).
 Parameters
 ----------
 filename : str
 Examples
 --------
 >>> fname = "blabla_ev_calib.nc"
 >>> hen_root(fname)
 'blabla'
 >>> fname = "blablu_ev_bli.fits.gz"
 >>> hen_root(fname)
 'blablu_ev_bli'
 >>> fname = "blablu_ev_lc.nc"
 >>> hen_root(fname)
 'blablu'
 >>> fname = "blablu_lc_asrd_ev_lc.nc"
 >>> hen_root(fname)
 'blablu_lc_asrd'
 """
 fname = filename.replace(".gz", "")
 fname = os.path.splitext(fname)[0]
 todo = True
 while todo:
 todo = False
 for ending in ["_ev", "_lc", "_pds", "_cpds", "_calib"]:
 if fname.endswith(ending):
 fname = fname[: -len(ending)]
 todo = True
 return fname
def optimal_bin_time(fftlen, tbin):
 """Vary slightly the bin time to have a power of two number of bins.
 Given an FFT length and a proposed bin time, return a bin time
 slightly shorter than the original, that will produce a power-of-two number
 of FFT bins.
 Examples
 --------
 >>> optimal_bin_time(512, 1.1)
 1.0
 """
 current_nbin = fftlen / tbin
 new_nbin = 2 ** np.ceil(np.log2(current_nbin))
 return fftlen / new_nbin
def gti_len(gti):
 """Return the total good time from a list of GTIs.
 Examples
 --------
 >>> gti_len([[0, 1], [2, 4]])
 3
 """
 return np.sum(np.diff(gti, axis=1))
def simple_orbit_fun_from_parfile(
 mjdstart, mjdstop, parfile, ntimes=1000, ephem="DE421", invert=False
):
 """Get a correction for orbital motion from pulsar parameter file.
 Parameters
 ----------
 mjdstart, mjdstop : float
 Start and end of the time interval where we want the orbital solution
 parfile : str
 Any parameter file understood by PINT (Tempo or Tempo2 format)
 Other parameters
 ----------------
 ntimes : int
 Number of time intervals to use for interpolation. Default 1000
 invert : bool
 Invert the solution (e.g. to apply an orbital model instead of
 subtracting it)
 Returns
 -------
 correction_mjd : function
 Function that accepts times in MJDs and returns the deorbited times.
 """
 from scipy.interpolate import interp1d
 from astropy import units
 if not HAS_PINT:
 raise ImportError(
 "You need the optional dependency PINT to use this "
 "functionality: github.com/nanograv/pint"
 )
 mjds = np.linspace(mjdstart, mjdstop, ntimes)
 toalist = _load_and_prepare_TOAs(mjds, ephem=ephem)
 m = get_model(parfile)
 delays = m.delay(toalist)
 if invert:
 delays = -delays
 correction = interp1d(
 mjds,
 (toalist.table["tdbld"] * units.d - delays).to(units.d).value,
 fill_value="extrapolate",
 )
 return correction
def deorbit_events(events, parameter_file=None, invert=False, ephem=None):
 """Refer arrival times to the center of mass of binary system.
 Parameters
 ----------
 events : `stingray.events.EventList` object
 The event list
 parameter_file : str
 The parameter file, in Tempo-compatible format, containing
 the orbital solution (e.g. a BT model)
 """
 events = copy.deepcopy(events)
 if parameter_file is None:
 warnings.warn(
 "No parameter file specified for deorbit. Returning"
 " unaltered event list"
 )
 return events
 if not os.path.exists(parameter_file):
 raise FileNotFoundError(
 "Parameter file {} does not exist".format(parameter_file)
 )
 if events.mjdref < 33282.0:
 raise ValueError(
 "MJDREF is very low (<01-01-1950), " "this is unsupported."
 )
 model = get_model(parameter_file)
 porb = model.PB.value
 pepoch = events.gti[0, 0]
 pepoch_mjd = pepoch / 86400 + events.mjdref
 length = np.max(events.time) - np.min(events.time)
 length_d = length / 86400
 ntimes = max(100, int(length // 60), int(length_d / porb * 100))
 log.info(f"Interpolating orbital solution with {ntimes} points")
 if ephem is None and hasattr(events, "ephem") and events.ephem is not None:
 ephem = events.ephem
 log.info(f"Using default ephemeris: {ephem}")
 elif ephem is None:
 ephem = "DE421"
 orbital_correction_fun = simple_orbit_fun_from_parfile(
 pepoch_mjd - 1,
 pepoch_mjd + length_d + 1,
 parameter_file,
 ntimes=ntimes,
 invert=invert,
 ephem=ephem,
 )
 mjdtimes = events.time / 86400 + events.mjdref
 mjdgtis = events.gti / 86400 + events.mjdref
 outtime = (orbital_correction_fun(mjdtimes) - events.mjdref) * 86400
 outgtis = (orbital_correction_fun(mjdgtis) - events.mjdref) * 86400
 events.time = outtime
 events.gti = outgtis
 return events
def _add_default_args(parser, list_of_args):
 for key in list_of_args:
 arg = DEFAULT_PARSER_ARGS[key]
 a = arg["args"]
 k = arg["kwargs"]
 parser.add_argument(*a, **k)
 return parser
def check_negative_numbers_in_args(args):
 """If there are negative numbers in args, prepend a space.
 Examples
 --------
 >>> args = ['events.nc', '-f', '103', '--fdot', '-2e-10']
 >>> newargs = check_negative_numbers_in_args(args)
 >>> args[:4] == newargs[:4]
 True
 >>> newargs[4] == ' -2e-10'
 True
 """
 if args is None:
 args = sys.argv[1:]
 newargs = []
 for arg in args:
 | |
| 
	initialized or checked
 * @param val The integer value to set/check against, see below
 *
 * Note that equality means something special for strings. Each byte
 * is initialized to an incrementing value. So check is done against that.
 *
 */
""")
 for t in scalar_types:
 if t in integer_types:
 out.write("""
#define VAR_%s_INIT(var, val) var = (%s)(val)
#define VAR_%s_CHECK(var, val) ((var) == (%s)(val))
""" % (t.upper(), t, t.upper(), t))
 else:
 out.write("""
#define VAR_%s_INIT(var, val) \\
 of_test_str_fill((uint8_t *)&(var), val, sizeof(var))
#define VAR_%s_CHECK(var, val) \\
 of_test_str_check((uint8_t *)&(var), val, sizeof(var))
""" % (t.upper(), t.upper()))
 gen_fill_string(out)
 gen_scalar_set_check_funs(out)
 gen_unified_accessor_funs(out)
 gen_ident_tests(out)
 gen_log_test(out)
def gen_message_scalar_test(out, name):
 """
 Generate test cases for message objects, scalar accessors
 """
 loxi_utils.gen_c_copy_license(out)
 out.write("""
/**
 *
 * AUTOMATICALLY GENERATED FILE. Edits will be lost on regen.
 *
 * Message-scalar tests for all versions
 */
#include <locitest/test_common.h>
""")
 for version in of_g.of_version_range:
 v_name = loxi_utils.version_to_name(version)
 out.write("""
/**
 * Message-scalar tests for version %s
 */
""" % v_name)
 for cls in of_g.standard_class_order:
 if type_maps.class_is_virtual(cls):
 continue
 if version in of_g.unified[cls]:
 message_scalar_test(out, version, cls)
 out.write("""
int
run_scalar_acc_tests(void)
{
""")
 for version in of_g.of_version_range:
 v_name = loxi_utils.version_to_name(version)
 for cls in of_g.standard_class_order:
 if type_maps.class_is_virtual(cls):
 continue
 if version in of_g.unified[cls]:
 test_name = "%s_%s" % (cls, v_name)
 out.write(" RUN_TEST(%s_scalar);\n" % test_name)
 out.write(" return TEST_PASS;\n}\n");
def message_scalar_test(out, version, cls):
 """
 Generate one test case for the given version and class
 """
 members, member_types = scalar_member_types_get(cls, version)
 length = of_g.base_length[(cls, version)]
 v_name = loxi_utils.version_to_name(version)
 out.write("""
static int
test_%(cls)s_%(v_name)s_scalar(void)
{
 %(cls)s_t *obj;
 obj = %(cls)s_new(%(v_name)s);
 TEST_ASSERT(obj != NULL);
 TEST_ASSERT(obj->version == %(v_name)s);
 TEST_ASSERT(obj->length == %(length)d);
 TEST_ASSERT(obj->parent == NULL);
 TEST_ASSERT(obj->object_id == %(u_cls)s);
""" % dict(cls=cls, u_cls=cls.upper(),
 v_name=v_name, length=length, version=version))
 # If this class is a concrete member of an inheritance hierarchy,
 # run the hierarchy's root wire type parser and assert it returns
 # the expected object id.
 ofclass = loxi_globals.unified.class_by_name(cls)
 if ofclass and not ofclass.virtual:
 root = ofclass
 while root.superclass:
 root = root.superclass
 if root.virtual:
 out.write("""
 {
 of_object_id_t object_id;
 %(root_cls)s_wire_object_id_get(obj, &object_id);
 TEST_ASSERT(object_id == %(u_cls)s);
 }
""" % dict(root_cls=root.name, u_cls=cls.upper()))
 if not type_maps.class_is_virtual(cls):
 out.write("""
 if (loci_class_metadata[obj->object_id].wire_length_get != NULL) {
 int length;
 loci_class_metadata[obj->object_id].wire_length_get((of_object_t *)obj, &length);
 TEST_ASSERT(length == %(length)d);
 }
 /* Set up incrementing values for scalar members */
 %(cls)s_%(v_name)s_populate_scalars(obj, 1);
 /* Check values just set */
 TEST_ASSERT(%(cls)s_%(v_name)s_check_scalars(obj, 1) != 0);
""" % dict(cls=cls, u_cls=cls.upper(),
 v_name=v_name, length=length, version=version))
 out.write("""
 %(cls)s_delete(obj);
 /* To do: Check memory */
 return TEST_PASS;
}
""" % dict(cls=cls))
# Get the members and list of scalar types for members of a given class
def scalar_member_types_get(cls, version):
 member_types = []
 if not version in of_g.unified[cls]:
 return ([], [])
 if "use_version" in of_g.unified[cls][version]:
 v = of_g.unified[cls][version]["use_version"]
 members = of_g.unified[cls][v]["members"]
 else:
 members = of_g.unified[cls][version]["members"]
 # Accumulate variables that are supported
 for member in members:
 m_type = member["m_type"]
 m_name = member["name"]
 if (not loxi_utils.type_is_scalar(m_type) or
 ignore_member(cls, version, m_name, m_type)):
 continue
 if not m_type in member_types:
 member_types.append(m_type)
 return (members, member_types)
def scalar_funs_instance(out, cls, version, members, member_types):
 """
 Generate one instance of scalar set/check functions
 """
 out.write("""
/**
 * Populate the scalar values in obj of type %(cls)s,
 * version %(v_name)s
 * @param obj Pointer to an object to populate
 * @param value The seed value to use in populating the object
 * @returns The value after increments for this object's values
 */
int %(cls)s_%(v_name)s_populate_scalars(
 %(cls)s_t *obj, int value) {
""" % dict(cls=cls, v_name=loxi_utils.version_to_name(version)))
 # Declare string types
 for t in member_types:
 out.write(" %s %s;\n" % (t, var_name_map(t)))
 for member in members:
 m_type = member["m_type"]
 m_name = member["name"]
 if (not loxi_utils.type_is_scalar(m_type) or
 ignore_member(cls, version, m_name, m_type)):
 continue
 v_name = var_name_map(m_type);
 out.write("""
 VAR_%(u_type)s_INIT(%(v_name)s, value);
 %(cls)s_%(m_name)s_set(obj, %(v_name)s);
 value += 1;
""" % dict(cls=cls, m_name=m_name, u_type=m_type.upper(), v_name=v_name))
 out.write("""
 return value;
}
""")
 out.write("""
/**
 * Check scalar values in obj of type %(cls)s,
 * version %(v_name)s
 * @param obj Pointer to an object to check
 * @param value Starting value for checking
 * @returns The value after increments for this object's values
 */
int %(cls)s_%(v_name)s_check_scalars(
 %(cls)s_t *obj, int value) {
""" % dict(cls=cls, v_name=loxi_utils.version_to_name(version)))
 for t in member_types:
 out.write(" %s %s;\n" % (t, var_name_map(t)))
 for member in members:
 m_type = member["m_type"]
 m_name = member["name"]
 if (not loxi_utils.type_is_scalar(m_type) or
 ignore_member(cls, version, m_name, m_type)):
 continue
 v_name = var_name_map(m_type);
 out.write("""
 %(cls)s_%(m_name)s_get(obj, &%(v_name)s);
 TEST_ASSERT(VAR_%(u_type)s_CHECK(%(v_name)s, value));
 value += 1;
""" % dict(cls=cls, m_name=m_name, u_type=m_type.upper(), v_name=v_name))
 out.write("""
 return value;
}
""")
def gen_scalar_set_check_funs(out):
 """
 For each object class with scalar members, generate functions that
 set and check their values
 """
 for version in of_g.of_version_range:
 for cls in of_g.standard_class_order:
 if type_maps.class_is_virtual(cls):
 continue
 (members, member_types) = scalar_member_types_get(cls, version)
 scalar_funs_instance(out, cls, version, members, member_types)
# Helper function to set up a subclass instance for a test
def setup_instance(out, cls, subcls, instance, v_name, version):
 base_type = loxi_utils.list_to_entry_type(cls)
 setup_template = """
 %(subcls)s_init(%(inst)s, %(v_name)s, -1, 1);
 %(cls)s_append_bind(list, %(inst)s);
 value = %(subcls)s_%(v_name)s_populate(
 %(inst)s, value);
 cur_len += %(inst)s->length;
 TEST_ASSERT(list->length == cur_len);
"""
 out.write("""
 /* Append two instances of type %s */
""" % subcls)
 for i in range(2):
 out.write(setup_template %
 dict(inst=instance, subcls=subcls, v_name=v_name,
 base_type=base_type, cls=cls,
 version=version))
def check_instance(out, cls, subcls, instance, v_name, version, last):
 check_template = """
 TEST_ASSERT(%(inst)s->object_id == %(elt_name)s);
 value = %(subcls)s_%(v_name)s_check(
 %(inst)s, value);
 TEST_ASSERT(value != 0);
"""
 out.write("\n /* Check two instances of type %s */" % instance)
 out.write(check_template %
 dict(elt_name=loxi_utils.enum_name(subcls),
 inst=instance, subcls=subcls,
 v_name=loxi_utils.version_to_name(version)))
 out.write("""\
 TEST_OK(%(cls)s_next(list, &elt));
""" % dict(cls=cls))
 out.write(check_template %
 dict(elt_name=loxi_utils.enum_name(subcls),
 inst=instance, subcls=subcls,
 v_name=loxi_utils.version_to_name(version)))
 if last:
 out.write("""\
 TEST_ASSERT(%(cls)s_next(list, &elt) == OF_ERROR_RANGE);
""" % dict(cls=cls))
 else:
 out.write("""\
 TEST_OK(%(cls)s_next(list, &elt));
""" % dict(cls=cls))
# Maybe: Get a map from list class to parent, mem_name of container
def list_test(out, version, cls):
 out.write("""
static int
test_%(cls)s_%(v_name)s(void)
{
""" % dict(cls=cls, v_name=loxi_utils.version_to_name(version)))
 base_type = loxi_utils.list_to_entry_type(cls)
 out.write(""" %(cls)s_t *list;
 int value = 1;
""" % dict(cls=cls, base_type=base_type))
 out.write("""
 list = %(cls)s_new(%(v_name)s);
 TEST_ASSERT(list != NULL);
 TEST_ASSERT(list->version == %(v_name)s);
 TEST_ASSERT(list->length == 0);
 TEST_ASSERT(list->parent == NULL);
 TEST_ASSERT(list->object_id == %(enum_cls)s);
 value = %(cls)s_%(v_name)s_populate(list, value);
 TEST_ASSERT(value != 0);
""" % dict(cls=cls, base_type=base_type, v_name=loxi_utils.version_to_name(version),
 enum_cls=loxi_utils.enum_name(cls)))
 out.write("""
 /* Now check values */
 value = 1;
 value = %(cls)s_%(v_name)s_check(list, value);
 TEST_ASSERT(value != 0);
""" % dict(cls=cls, v_name=loxi_utils.version_to_name(version)))
 out.write("""
 %(cls)s_delete(list);
 return TEST_PASS;
}
""" % dict(cls=cls))
def gen_list_test(out, name):
 """
 Generate base line test cases for lists
 @param out The file handle to write to
 """
 loxi_utils.gen_c_copy_license(out)
 out.write("""
/**
 *
 * AUTOMATICALLY GENERATED FILE. Edits will be lost on regen.
 *
 * Message-scalar tests for all versions
 */
#include <locitest/test_common.h>
""")
 for version in of_g.of_version_range:
 v_name = loxi_utils.version_to_name(version)
 out.write("""
/**
 * Baseline list tests for version %s
 */
""" % v_name)
 for cls in of_g.ordered_list_objects:
 if version in of_g.unified[cls]:
 list_test(out, version, cls)
 out.write("""
int
run_list_tests(void)
{
""")
 for version in of_g.of_version_range:
 v_name = loxi_utils.version_to_name(version)
 for cls in of_g.ordered_list_objects:
 if version in of_g.unified[cls]:
 test_name = "%s_%s" % (cls, v_name)
 out.write(" RUN_TEST(%s);\n" % test_name)
 out.write("\n return TEST_PASS;\n}\n");
def gen_match_test(out, name):
 """
 Generate baseline tests for match functions
 """
 loxi_utils.gen_c_copy_license(out)
 out.write("""\
/**
 *
 * AUTOMATICALLY GENERATED FILE. Edits will be lost on regen.
 *
 * Message-scalar tests for all versions
 * @fixme These are mostly hard coded now.
 */
#include <locitest/test_common.h>
static int
test_match_1(void)
{
""")
 for version in of_g.of_version_range:
 out.write(" of_match_v%(v)d_t *m_v%(v)d;\n" % dict(v=version))
 out.write("""\
 of_match_t match;
 int value = 1;
 int idx;
 uint32_t exp_value;
 /* Verify default values for ip mask map */
 for (idx = 0; idx < 64; idx++) {
 exp_value = (idx < 32) ? ~((1 << idx) - 1) : 0;
 TEST_ASSERT(of_ip_index_to_mask(idx) == exp_value);
 if (idx < 32) {
 TEST_ASSERT(of_ip_mask_to_index(exp_value) == idx);
 }
 }
""")
 for version in of_g.of_version_range:
 out.write("""
 /* Create/populate/convert and delete for version %(v_name)s */
 m_v%(version)d = of_match_v%(version)d_new(%(v_name)s);
 TEST_ASSERT(m_v%(version)d != NULL);
 TEST_ASSERT((value = of_match_populate(&match, %(v_name)s, value)) > 0);
 TEST_OK(of_match_to_wire_match_v%(version)d(&match, m_v%(version)d));
 of_match_v%(version)d_delete(m_v%(version)d);
""" % dict(v_name=loxi_utils.version_to_name(version), version=version))
 out.write("""
 return TEST_PASS;
}
""")
 out.write("""
static int
test_match_2(void)
{
""")
 for version in of_g.of_version_range:
 out.write(" of_match_v%(v)d_t *m_v%(v)d;\n" % dict(v=version))
 out.write("""\
 of_match_t match1;
 of_match_t match2;
 int value = 1;
""")
 for version in of_g.of_version_range:
 out.write("""
 TEST_ASSERT((value = of_match_populate(&match1, %(v_name)s, value)) > 0);
 m_v%(version)d = of_match_v%(version)d_new(%(v_name)s);
 TEST_ASSERT(m_v%(version)d != NULL);
 TEST_OK(of_match_to_wire_match_v%(version)d(&match1, m_v%(version)d));
 TEST_OK(of_match_v%(version)d_to_match(m_v%(version)d, &match2));
 TEST_ASSERT(memcmp(&match1, &match2, sizeof(match1)) == 0);
 of_match_v%(version)d_delete(m_v%(version)d);
""" % dict(v_name=loxi_utils.version_to_name(version), version=version))
 out.write("""
 return TEST_PASS;
}
""")
 out.write("""
static int
test_match_3(void)
{
 of_match_t match1;
 of_match_t match2;
 int value = 1;
 of_octets_t octets;
 of_object_storage_t storage;
 memset(&storage, 0, sizeof(storage));
 storage.obj.wbuf = &storage.wbuf;
""")
 for version in of_g.of_version_range:
 out.write("""
 /* Serialize to version %(v_name)s */
 TEST_ASSERT((value = of_match_populate(&match1, %(v_name)s, value)) > 0);
 TEST_ASSERT(of_match_serialize(%(v_name)s, &match1, &octets) ==
 OF_ERROR_NONE);
 storage.obj.wbuf->buf = octets.data;
 storage.obj.wbuf->alloc_bytes = octets.bytes;
 storage.obj.wbuf->current_bytes = octets.bytes;
 TEST_ASSERT(of_match_deserialize(%(v_name)s, &match2, &storage.obj, 0, octets.bytes) ==
 OF_ERROR_NONE);
 TEST_ASSERT(memcmp(&match1, &match2, sizeof(match1)) == 0);
 FREE(octets.data);
""" % dict(v_name=loxi_utils.version_to_name(version), version=version))
 out.write("""
 return TEST_PASS;
}
""")
 out.write("""
int run_match_tests(void)
{
 RUN_TEST(match_1);
 RUN_TEST(match_2);
 RUN_TEST(match_3);
 RUN_TEST(match_utils);
 return TEST_PASS;
}
""")
def gen_msg_test(out, name):
 | |
| 
	for spatial plots
# ====================================
sub_fig_text = ['(a)', '(b)', '(c)',
 '(d)', '(e)', '(f)']
Wins_to_Plot = ['1850-74', '1900-24', '1950-74', '2000-24', '2050-74', '2075-99']
Wins_to_Plot_idxs = [0,2,4,6,8,9]
import cartopy.crs as ccrs
from matplotlib.axes import Axes
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
from mpl_toolkits.axes_grid1 import AxesGrid
proj_trans = ccrs.PlateCarree()
proj_output = ccrs.PlateCarree()
# Plotting individual drivers
# ===========================
# Spatial plot of individual driver correlatons
# for idx, dri in enumerate (drivers_names):
sub_fig_text = ['(a)', '(b)', '(c)',
 '(d)', '(e)', '(f)']
Wins_to_Plot = ['1850-74', '1900-24', '1950-74', '2000-24', '2050-74', '2075-99']
Wins_to_Plot_idxs = [0,2,4,6,8,9]
ymax = 1
ymin = -1
import cartopy.crs as ccrs
from matplotlib.axes import Axes
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
from mpl_toolkits.axes_grid1 import AxesGrid
proj_trans = ccrs.PlateCarree()
proj_output = ccrs.PlateCarree()
for dri_idx, dri in enumerate (drivers_names):
 fig = plt.figure(figsize = (12,9), dpi = 200)
 #pwin = Wins_to_Plot_idxs[0]
 plag = 1
 ax = {}
 gl = {}
 for plot_idx, win_idx in enumerate(Wins_to_Plot_idxs):
 #plot_idx = 0 #
 gl[plot_idx] = 0
 if plot_idx == 0:
 ax[plot_idx] = fig.add_subplot(
 2, 3, plot_idx+1, projection= proj_output
 )
			# Mean Correlation Coefficient of the Selected climate Drivers at any rank
 plot_data = np.ma.mean(np.ma.masked_array(data=dom_dri_cc[:,win_idx,plag,:,:], 
 mask = np.ma.masked_not_equal(dom_dri_ids[:,win_idx,plag,:,:], 
 drivers_code[dri_idx]) .mask),axis = 0)
 h = ax[plot_idx].pcolormesh(lon_edges[...],lat_edges[...], plot_data, 
 transform=ccrs.PlateCarree(), vmax=ymax, vmin=ymin, cmap='PuOr')
 for srex_idx,abr in enumerate (srex_abr):
 ax[plot_idx].add_geometries([srex_polygons[srex_idx]], crs = proj_trans, facecolor='none', edgecolor='black', alpha=0.4)
 elif plot_idx>0:
 ax[plot_idx] = fig.add_subplot(
 2, 3, plot_idx+1, projection= proj_output,
 sharex=ax[0], sharey=ax[0]
 )
			# Mean Correlation Coefficient of the Selected climate Drivers at any rank
 plot_data = np.ma.mean(np.ma.masked_array(data=dom_dri_cc[:,win_idx,plag,:,:], 
 mask = np.ma.masked_not_equal(dom_dri_ids[:,win_idx,plag,:,:], 
 drivers_code[dri_idx]) .mask),axis = 0)
 h = ax[plot_idx].pcolormesh(lon_edges[...],lat_edges[...], plot_data, 
 transform=ccrs.PlateCarree(), vmax=ymax, vmin=ymin, cmap='PuOr')
 for srex_idx,abr in enumerate (srex_abr):
 ax[plot_idx].add_geometries([srex_polygons[srex_idx]], crs = proj_trans, facecolor='none', edgecolor='black', alpha=0.4)
 for plot_idx in range(len(Wins_to_Plot)):
 ax[plot_idx].coastlines(alpha=0.75)
 ax[plot_idx].text(-85, -10, sub_fig_text[plot_idx] + ' '+ Wins_to_Plot[plot_idx],
 horizontalalignment="right",
 verticalalignment='center',
 fontsize = 9)
 gl[plot_idx] = ax[plot_idx].gridlines(crs=ccrs.PlateCarree(), draw_labels=False,
 linewidth=.5, color='gray', alpha=0.5, linestyle='--')
 gl[3].xlabels_bottom = True
 gl[4].xlabels_bottom = True
 gl[5].xlabels_bottom = True
 gl[3].xformatter = LONGITUDE_FORMATTER
 gl[4].xformatter = LONGITUDE_FORMATTER
 gl[5].xformatter = LONGITUDE_FORMATTER
 gl[0].ylabels_left = True
 gl[3].ylabels_left = True
 gl[0].yformatter = LATITUDE_FORMATTER
 gl[3].yformatter = LATITUDE_FORMATTER
 plt.subplots_adjust(wspace=0.02,hspace=-.695)
 cax = plt.axes([0.92, 0.335, 0.015, 0.34])
 plt.colorbar( h, cax=cax, orientation='vertical', pad=0.04, shrink=0.95);
 ax[1].set_title("Correlation Coefficient of %s with %s extremes"%(dri,variable.upper()), fontsize=14)
 fig.savefig(web_path + "Spatial_Corr_%s_%s_lag_%d.pdf"%(variable,dri,plag),
 bbox_inches = "tight", edgecolor="w")
 fig.savefig(web_path + "Spatial_Corr_%s_%s_lag_%d.png"%(variable,dri,plag),
 bbox_inches = "tight", edgecolor="w")
 fig.savefig(path_save + "Correlations/Spatial_Maps/Spatial_Corr_%s_%s_lag_%d.pdf"%(variable,dri,plag),
 bbox_inches = "tight", edgecolor="w")
 del fig
# Dominant Driver spatial plot at lag =1 month
# ===========================================
# Spatial plot of Dominant driver correlatons
# for idx, dri in enumerate (drivers_names):
ymax = 45
ymin = 5
rk = 0 #Dominant driver
plag = 1 # lag =1 month
fig = plt.figure(figsize = (12,9), dpi = 200)
ax = {}
gl = {}
for plot_idx, win_idx in enumerate(Wins_to_Plot_idxs):
#plot_idx = 0 #
 gl[plot_idx] = 0
 if plot_idx == 0:
 ax[plot_idx] = fig.add_subplot(
 2, 3, plot_idx+1, projection= proj_output
 )
 plot_data = np.ma.masked_equal(np.ma.masked_invalid(dom_dri_ids[rk,win_idx,plag,:,:]),0)
 cmap = plt.get_cmap('rainbow', drivers_code.size)
 h = ax[plot_idx].pcolormesh(lon_edges[...],lat_edges[...], plot_data,
 transform=ccrs.PlateCarree(), vmax=ymax, vmin=ymin, cmap=cmap)
 for srex_idx,abr in enumerate (srex_abr):
 ax[plot_idx].add_geometries([srex_polygons[srex_idx]], crs = proj_trans, facecolor='none', edgecolor='black', alpha=0.4)
 elif plot_idx>0:
 ax[plot_idx] = fig.add_subplot(
 2, 3, plot_idx+1, projection= proj_output,
 sharex=ax[0], sharey=ax[0]
 )
 plot_data = np.ma.masked_equal(np.ma.masked_invalid(dom_dri_ids[rk,win_idx,plag,:,:]),0)
 h = ax[plot_idx].pcolormesh(lon_edges[...],lat_edges[...], plot_data,
 transform=ccrs.PlateCarree(), vmax=ymax, vmin=ymin, cmap= cmap)
 for srex_idx,abr in enumerate (srex_abr):
 ax[plot_idx].add_geometries([srex_polygons[srex_idx]], crs = proj_trans, facecolor='none', edgecolor='black', alpha=0.4)
for plot_idx in range(len(Wins_to_Plot)):
 ax[plot_idx].coastlines(alpha=0.75)
 ax[plot_idx].text(-85, -10, sub_fig_text[plot_idx] + ' '+ Wins_to_Plot[plot_idx],
 horizontalalignment="right",
 verticalalignment='center',
 fontsize = 9)
 gl[plot_idx] = ax[plot_idx].gridlines(crs=ccrs.PlateCarree(), draw_labels=False,
 linewidth=.5, color='gray', alpha=0.5, linestyle='--')
gl[3].xlabels_bottom = True
gl[4].xlabels_bottom = True
gl[5].xlabels_bottom = True
gl[3].xformatter = LONGITUDE_FORMATTER
gl[4].xformatter = LONGITUDE_FORMATTER
gl[5].xformatter = LONGITUDE_FORMATTER
gl[0].ylabels_left = True
gl[3].ylabels_left = True
gl[0].yformatter = LATITUDE_FORMATTER
gl[3].yformatter = LATITUDE_FORMATTER
plt.subplots_adjust(wspace=0.02,hspace=-.695)
cax = plt.axes([0.92, 0.335, 0.015, 0.34])
cbar = plt.colorbar(h, cax=cax, ticks = range(drivers_code[0],drivers_code[-1]+1,10))
cbar .ax.set_yticklabels(drivers_names)
#plt.colorbar( h, cax=cax, orientation='vertical', pad=0.04, shrink=0.95);
ax[1].set_title("Dominant Drivers of %s extremes"%(variable.upper()), fontsize=14)
fig.savefig(web_path + "Spatial_Dominant_Driver_%s_lag_%d.pdf"%(variable,plag), 
 bbox_inches = "tight", edgecolor="w")
fig.savefig(web_path + "Spatial_Dominant_Driver_%s_lag_%d.png"%(variable,plag),
 bbox_inches = "tight", edgecolor="w")
fig.savefig(path_save + "Correlations/Spatial_Maps/Dominant_Driver_%s_lag_%d.pdf"%(variable,plag),
 bbox_inches = "tight", edgecolor="w")
del fig
# Plotting of "Regional Dominance"
# =====================================
#dict_counts[region_abr][win][lg][rk] ['Dri_Name']
#dict_counts[region_abr][win][lg][rk] ['Corr_Coeff'] 
rk=0
lg=1
plag=1
values_range = []
sign = {}
for r in srex_abr:
 sign[r] = {}
 for win_idx, wi in enumerate(Wins_to_Plot):
 values_range.append(dict_counts[r][Wins_to_Plot_idxs[win_idx]][lg][rk] ['Corr_Coeff'])
 #print(win_idx,dict_counts[r][Wins_to_Plot_idxs[win_idx]][lg][rk] ['Corr_Coeff'] )
 if dict_counts[r][Wins_to_Plot_idxs[win_idx]][lg][rk] ['Corr_Coeff'] > 0:
 sign[r][wi] = '+' 
 elif dict_counts[r][Wins_to_Plot_idxs[win_idx]][lg][rk] ['Corr_Coeff'] < 0:
 sign[r][wi] = u"\u2212"
 else:
 sign[r][wi] = ' '
print ("To check for the range of values")
print (np.array(values_range).min())
print (np.array(values_range).max())
ymax = 45
ymin = 5
# Creating the NBP Values for 1850-74 for all regions for NBP du Ext
ploting_stats = {}
for win_idx, wi in enumerate(Wins_to_Plot):
 ploting_stats[wi] = {}
 
 all_masked = np.ma.masked_equal(np.ma.zeros(srex_mask_ma.shape),0)
 for s_idx in srex_idxs:
 tmp = np.ma.masked_equal(srex_mask_ma,s_idx+ 1).mask # +1 because srex_idxs start from 1
 all_masked[tmp] = dict_counts[srex_abr[s_idx]][Wins_to_Plot_idxs[win_idx]][lg][rk] ['Dri_Code']
 del tmp
 all_masked = np.ma.masked_array(all_masked, mask = srex_mask_ma.mask)
 ploting_stats[wi] ['Dri_Codes'] = np.ma.masked_equal(np.ma.masked_invalid(all_masked),0)
# test plot
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
from mpl_toolkits.axes_grid1 import AxesGrid
proj_trans = ccrs.PlateCarree()
#proj_output = ccrs.Robinson(central_longitude=0)
proj_output = ccrs.PlateCarree()
fig = plt.figure(figsize = (12,9), dpi = 400)
plt.style.use("classic")
ax = {}
gl = {}
for plot_idx in range(len(Wins_to_Plot)):
 gl[plot_idx] = 0
 if plot_idx == 0 :
 ax[plot_idx] = fig.add_subplot(
 2, 3, plot_idx+1, projection= proj_output
 )
 plot_data = np.ma.masked_equal(np.ma.masked_invalid(ploting_stats[Wins_to_Plot[plot_idx]]['Dri_Codes']),0)
 cmap = plt.get_cmap('rainbow', drivers_code.size)
 h = ax[plot_idx].pcolormesh(lon_edges[...],lat_edges[...], plot_data, 
 transform=ccrs.PlateCarree(),vmax=ymax, vmin=ymin,cmap= cmap)
 for srex_idx,abr in enumerate (srex_abr):
 ax[plot_idx].text ( srex_centroids[srex_idx][0], srex_centroids[srex_idx][-1], sign[abr][Wins_to_Plot[plot_idx]],
 horizontalalignment='center',
 color = 'white', fontweight = 'bold',fontsize=10,
 transform = proj_trans)
 ax[plot_idx].add_geometries([srex_polygons[srex_idx]], crs = proj_trans, facecolor='none', edgecolor='black', alpha=0.4)
 
 
 elif plot_idx>0:
 
 ax[plot_idx] = fig.add_subplot(
 2, 3, plot_idx+1, projection= proj_output,
 sharex=ax[0], sharey=ax[0]
 )
 plot_data = np.ma.masked_equal(np.ma.masked_invalid(ploting_stats[Wins_to_Plot[plot_idx]]['Dri_Codes']),0)
 h = ax[plot_idx].pcolormesh(lon_edges[...],lat_edges[...], plot_data, 
 transform=ccrs.PlateCarree(),vmax=ymax,vmin=ymin,cmap= cmap)
 for srex_idx,abr in enumerate (srex_abr):
 ax[plot_idx].text ( srex_centroids[srex_idx][0], srex_centroids[srex_idx][-1], 
 sign[abr][Wins_to_Plot[plot_idx]],
 horizontalalignment='center',
 color = 'white', fontweight = 'bold',fontsize=10,
 transform = proj_trans)
 ax[plot_idx].add_geometries([srex_polygons[srex_idx]], crs = proj_trans, facecolor='none', edgecolor='black', alpha=0.4)
 
for plot_idx in range(len(Wins_to_Plot)):
 ax[plot_idx].coastlines(alpha=0.75)
 ax[plot_idx].text(80, -60, sub_fig_text[plot_idx] + ' '+ Wins_to_Plot[plot_idx],
 horizontalalignment="right",
 verticalalignment='center',
 fontsize = 12)
 gl[plot_idx] = ax[plot_idx].gridlines(crs=ccrs.PlateCarree(), draw_labels=False,
 linewidth=.5, color='gray', alpha=0.5, linestyle='--')
 
gl[3].xlabels_bottom = True
gl[4].xlabels_bottom = True
gl[5].xlabels_bottom = True
gl[3].xformatter = LONGITUDE_FORMATTER
gl[4].xformatter = LONGITUDE_FORMATTER
gl[5].xformatter = LONGITUDE_FORMATTER
gl[0].ylabels_left = True
gl[3].ylabels_left = True
gl[0].yformatter = LATITUDE_FORMATTER
gl[3].yformatter = LATITUDE_FORMATTER
plt.subplots_adjust(wspace=0.02,hspace=-.695)
cax = plt.axes([0.92, 0.335, 0.015, 0.34])
cbar = plt.colorbar(h, cax=cax, ticks = range(drivers_code[0],drivers_code[-1]+1,10))
drivers_names_plotting = np.array(['Prcp', 'SM','TAS','Fire'])
cbar .ax.set_yticklabels(drivers_names_plotting)
# cbar .ax.set_yticklabels(drivers_names)
#plt.colorbar(h, orientation='horizontal', pad=0.04);
ax[1].set_title("Regional Distribution of Dominant Drivers of %s extremes \n"%(variable.upper()), fontsize=14)
fig.savefig(web_path + "Spatial_Regional_Dominant_Driver_%s_lag_%d.pdf"%(variable,plag), 
 edgecolor = "w", bbox_inches = "tight")
fig.savefig(web_path + "Spatial_Regional_Dominant_Driver_%s_lag_%d.png"%(variable,plag),
 bbox_inches = "tight")
fig.savefig(path_save + "Correlations/Spatial_Maps/Dominant_Regional_Driver_%s_lag_%d.pdf"%(variable,plag), 
 edgecolor = "w", bbox_inches = "tight")
# Calculation of the count of pixels of different regions...
# ...with positive and negative correlation coefficients!
# ========================================================
# For MRSO
# --------
dri_idx = 1 #for MRSO
plag = 1
# Dict to store the counts of pos/neg extremes
# --------------------------------------------
dict_mrso_cc_count = {}
for region_abr in srex_abr: 
 dict_mrso_cc_count[region_abr] = {}
 for win_idx, win_str in enumerate(win_yr):
 dict_mrso_cc_count[region_abr][win_str] = {}
del region_abr,win_idx, win_str
# Calculation of counts:
for region_abr in srex_abr: 
 for win_idx, win_str in enumerate(win_yr):
 driver_cc_win_tmp = np.ma.masked_array(data=dom_dri_cc[:,win_idx,plag,:,:], 
 mask = np.ma.masked_not_equal(dom_dri_ids[:,win_idx,plag,:,:], 
 drivers_code[dri_idx]) .mask)
 filter_region 	= np.array(srex_abr) == region_abr
 region_idx		= srex_idxs[filter_region][0]
 region_number	= np.array(srex_nums)[filter_region][0]
 region_name		= np.array(srex_names)[filter_region][0]
 region_abr		= np.array(srex_abr)[filter_region][0] 
 region_mask_not	= np.ma.masked_not_equal(srex_mask_ma, region_number).mask # Masked everthing but the region
 region_mask		= ~region_mask_not # Only the regions is masked
 cc_values_tmp = driver_cc_win_tmp[np.array([region_mask]*4)][driver_cc_win_tmp[np.array([region_mask]*4)].mask ==False]
 dict_mrso_cc_count[region_abr][win_str]['pos'] = (cc_values_tmp > 0).sum()
 dict_mrso_cc_count[region_abr][win_str]['neg'] = (cc_values_tmp < 0).sum()
 
del region_abr,win_idx, win_str,cc_values_tmp,region_mask
# For TAS
# --------
dri_idx = 2 #for TAS
plag = 1
# Dict to store the counts of pos/neg extremes
# --------------------------------------------
dict_tas_cc_count = {}
for region_abr in srex_abr: 
 dict_tas_cc_count[region_abr] = {}
 for win_idx, win_str in enumerate(win_yr):
 dict_tas_cc_count[region_abr][win_str] = {}
del region_abr,win_idx, win_str
# Calculation of counts:
for region_abr in srex_abr: 
 for win_idx, win_str in enumerate(win_yr):
 driver_cc_win_tmp = np.ma.masked_array(data=dom_dri_cc[:,win_idx,plag,:,:], 
 mask = np.ma.masked_not_equal(dom_dri_ids[:,win_idx,plag,:,:], 
 drivers_code[dri_idx]) .mask)
 filter_region 	= np.array(srex_abr) == region_abr
 region_idx		= srex_idxs[filter_region][0]
 region_number	= np.array(srex_nums)[filter_region][0]
 region_name		= np.array(srex_names)[filter_region][0]
 region_abr		= np.array(srex_abr)[filter_region][0] 
 region_mask_not	= np.ma.masked_not_equal(srex_mask_ma, region_number).mask # Masked everthing but the region
 region_mask		= ~region_mask_not # Only the regions is masked
 cc_values_tmp = driver_cc_win_tmp[np.array([region_mask]*4)][driver_cc_win_tmp[np.array([region_mask]*4)].mask ==False]
 dict_tas_cc_count[region_abr][win_str]['pos'] = (cc_values_tmp > 0).sum()
 dict_tas_cc_count[region_abr][win_str]['neg'] = (cc_values_tmp < 0).sum()
 
del region_abr,win_idx, win_str,cc_values_tmp,region_mask
# Analysis and presentation of data on correlation coefficient:
# -------------------------------------------------------------
# MRSO
df_mrso_cc = {}
for region_abr in srex_abr:
 df_mrso_cc[region_abr] = pd.DataFrame.from_dict(dict_mrso_cc_count[region_abr], orient='index')
 df_mrso_cc[region_abr].loc[:,"%pos"] = (df_mrso_cc[region_abr].loc[:,"pos"]*100/(
 df_mrso_cc[region_abr].loc[:,"pos"] + 
 df_mrso_cc[region_abr].loc[:,"neg"])
 ).round(decimals=1)
 df_mrso_cc[region_abr].loc[:,"%neg"] = (df_mrso_cc[region_abr].loc[:,"neg"]*100/(
 df_mrso_cc[region_abr].loc[:,"pos"] + 
 df_mrso_cc[region_abr].loc[:,"neg"])
 ).round(decimals=1)
del region_abr
#TAS
df_tas_cc = {}
for region_abr in srex_abr:
 df_tas_cc[region_abr] = pd.DataFrame.from_dict(dict_tas_cc_count[region_abr], orient='index')
 df_tas_cc[region_abr].loc[:,"%pos"] = (df_tas_cc[region_abr].loc[:,"pos"]*100/(
 df_tas_cc[region_abr].loc[:,"pos"] + 
 df_tas_cc[region_abr].loc[:,"neg"])
 ).round(decimals=1)
 df_tas_cc[region_abr].loc[:,"%neg"] = (df_tas_cc[region_abr].loc[:,"neg"]*100/(
 df_tas_cc[region_abr].loc[:,"pos"] + 
 df_tas_cc[region_abr].loc[:,"neg"])
 ).round(decimals=1)
del region_abr
# Ploting in Jupyter Notebook
# ---------------------------
# Percent count of pixels that are positively...
# ...or negatively correlated with MRSO
region_abr = srex_abr[2]
import pylab as plot
params = {'legend.fontsize': 20,
 'legend.handlelength': 2}
plot.rcParams.update(params)
df_mrso_cc[region_abr].iloc[2:,2:].plot.bar(stacked =False, 
 figsize=(9,4), 
 fontsize = 14,
 grid='--')
plt.legend(loc='upper right', bbox_to_anchor=(1.25,.6), fontsize=14, ncol=1)
plt.ylim([0,100])
plt.title(f"Percent count of the pixel with pos/neg correlation with TAS for {region_abr}", 
 loc='left',fontsize =15)
#plt.text(0,18,"Total Regions: 26", fontsize=14, fontweight='bold', color='brown')
# The number 10 or y axis represents the number of pixels 
for | |
| 
	<reponame>deepmind/kfac_jax<gh_stars>1-10
# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""""K-FAC loss functions objects, tags and registration functions."""
import abc
from typing import Optional, Sequence, Tuple
import chex
import distrax
import jax
import jax.numpy as jnp
from kfac_jax._src import layers_and_loss_tags as tags
from kfac_jax._src import utils
class LossFunction(utils.Finalizable):
 """Abstract base class for loss functions.
 Note that unlike typical loss functions used in neural networks these are
 neither summed nor averaged over the batch and the output of evaluate() will
 not be a scalar. It is up to the user to then to correctly manipulate them as
 needed.
 """
 def __init__(self, weight: float):
 """Initializes the loss instance.
 Args:
 weight: The relative weight attributed to the loss.
 """
 super().__init__()
 self._weight = weight
 self.finalize()
 @property
 def weight(self) -> float:
 """The relative weight of the loss."""
 return self._weight
 @property
 @abc.abstractmethod
 def targets(self) -> Optional[chex.Array]:
 """The targets being predicted by the model.
 Returns:
 None or Tensor of appropriate shape for calling self._evaluate() on.
 """
 @property
 @abc.abstractmethod
 def inputs(self) -> Tuple[chex.Array, ...]:
 """The inputs to the loss function (excluding the targets)."""
 @abc.abstractmethod
 def copy_with_different_inputs(
 self,
 inputs: Sequence[chex.Array],
 ) -> "LossFunction":
 """Creates the same :class:`~LossFunction` object, but with different inputs."""
 def evaluate(
 self,
 targets: Optional[chex.Array] = None,
 coefficient_mode: str = "regular",
 ) -> chex.Array:
 """Evaluates the loss function on the targets.
 Args:
 targets: The targets, on which to evaluate the loss. If this is set to
 ``None`` will use ``self.targets`` instead.
 coefficient_mode: Specifies how to use the relative weight of the loss in
 the returned value. There are three options:
 1. 'regular' - returns ``self.weight * loss(targets)``
 2. 'sqrt' - returns ``sqrt(self.weight) * loss(targets)``
 3. 'off' - returns ``loss(targets)``
 Returns:
 The value of the loss scaled appropriately by ``self.weight`` according to
 the coefficient mode.
 Raises:
 ValueError if both ``targets`` and ``self.targets`` are ``None``.
 """
 if targets is None and self.targets is None:
 raise ValueError("Cannot evaluate losses with unspecified targets.")
 elif targets is None:
 targets = self.targets
 if coefficient_mode == "regular":
 multiplier = self.weight
 elif coefficient_mode == "sqrt":
 multiplier = jnp.sqrt(self.weight)
 elif coefficient_mode == "off":
 multiplier = 1.0
 else:
 raise ValueError(f"Unrecognized coefficient_mode={coefficient_mode}.")
 return self._evaluate(targets) * multiplier
 @abc.abstractmethod
 def _evaluate(self, targets: chex.Array) -> chex.Array:
 """Evaluates the value of the loss, disregarding the relative weight."""
 def grad_of_evaluate(
 self,
 targets: Optional[chex.Array],
 coefficient_mode: str,
 ) -> Tuple[chex.Array, ...]:
 """Evaluates the gradient of the loss function, w.r.t. its inputs.
 Args:
 targets: The targets at which to evaluate the loss. If this is ``None``
 will use ``self.targets`` instead.
 coefficient_mode: The coefficient mode to use for evaluation. See
 ``self.evaluate`` for more details.
 Returns:
 The gradient of the loss function w.r.t. its inputs, at the provided
 targets.
 """
 targets = self.targets if targets is None else targets
 def evaluate_sum(inputs: Sequence[chex.Array]) -> chex.Array:
 """Evaluates the loss summed over all axis, including batch etc."""
 instance = self.copy_with_different_inputs(inputs)
 return jnp.sum(instance.evaluate(targets, coefficient_mode))
 return jax.grad(evaluate_sum)(self.inputs)
 def multiply_ggn(
 self,
 vector: Sequence[chex.Array],
 ) -> Tuple[chex.Array, ...]:
 """Right-multiplies a vector by the GGN of the loss function.
 Here the GGN is the Generalized Gauss-Newton matrix (whose definition is
 somewhat flexible) of the loss function with respect to its inputs.
 Args:
 vector: The vector to multiply. Must have the same shape(s) as
 ``self.inputs``.
 Returns:
 The vector right-multiplied by the GGN. Will have the same shape(s) as
 ``self.inputs``.
 """
 return utils.scalar_mul(self.multiply_ggn_unweighted(vector), self.weight)
 @abc.abstractmethod
 def multiply_ggn_unweighted(
 self,
 vector: Sequence[chex.Array],
 ) -> Tuple[chex.Array, ...]:
 """Same as :func:`~LossFunction.multiply_ggn`, disregarding the relative weight."""
 def multiply_ggn_factor(
 self,
 vector: chex.Array,
 ) -> Tuple[chex.Array, ...]:
 """Right-multiplies a vector by a factor B of the GGN.
 Here the GGN is the Generalized Gauss-Newton matrix (whose definition is
 somewhat flexible) of the loss function with respect to its inputs.
 Typically this will be block-diagonal across different cases in the batch,
 since the loss function is typically summed across cases.
 Note that B can be any matrix satisfying ``B * B^T = G`` where ``G`` is the
 GGN, but will agree with the one used in the other methods of this class.
 Args:
 vector: The vector to multiply. Must be of the shape(s) given by
 'self.ggn_factor_inner_shape'.
 Returns:
 The vector right-multiplied by B. Will be of the same shape(s) as
 ``self.inputs``.
 """
 return utils.scalar_mul(
 self.multiply_ggn_factor_unweighted(vector), jnp.sqrt(self.weight))
 @abc.abstractmethod
 def multiply_ggn_factor_unweighted(
 self, vector: chex.Array
 ) -> Tuple[chex.Array, ...]:
 """Same as :func:`~LossFunction.multiply_ggn_factor`, disregarding the relative weight."""
 def multiply_ggn_factor_transpose(
 self,
 vector: Sequence[chex.Array],
 ) -> chex.Array:
 """Right-multiplies a vector by the transpose of a factor B of the GGN.
 Here the GGN is the Generalized Gauss-Newton matrix (whose definition is
 somewhat flexible) of the loss function with respect to its inputs.
 Typically this will be block-diagonal across different cases in the batch,
 since the loss function is typically summed across cases.
 Note that B can be any matrix satisfying ``B * B^T = G`` where G is the GGN,
 but will agree with the one used in the other methods of this class.
 Args:
 vector: The vector to multiply. Must have the same shape(s) as
 ``self.inputs``.
 Returns:
 The vector right-multiplied by B^T. Will be of the shape(s) given by
 ``self.ggn_factor_inner_shape``.
 """
 return utils.scalar_mul(
 self.multiply_ggn_factor_transpose_unweighted(vector),
 jnp.sqrt(self.weight))
 @abc.abstractmethod
 def multiply_ggn_factor_transpose_unweighted(
 self,
 vector: Sequence[chex.Array],
 ) -> chex.Array:
 """Same as :func:`~LossFunction.multiply_ggn_factor_transpose`, disregarding the relative weight."""
 def multiply_ggn_factor_replicated_one_hot(
 self,
 index: Sequence[int],
 ) -> Tuple[chex.Array, ...]:
 """Right-multiplies a replicated-one-hot vector by a factor B of the GGN.
 Here the GGN is the Generalized Gauss-Newton matrix (whose definition is
 somewhat flexible) of the loss function with respect to its inputs.
 Typically this will be block-diagonal across different cases in the batch,
 since the loss function is typically summed across cases.
 A replicated-one-hot vector means a tensor which, for each slice along the
 batch dimension (assumed to be dimension 0), is 1.0 in the entry
 corresponding to the given index and 0 elsewhere.
 Note that B can be any matrix satisfying ``B * B^T = G`` where G is the GGN,
 but will agree with the one used in the other methods of this class.
 Args:
 index: A tuple representing in the index of the entry in each slice that
 is 1.0. Note that len(index) must be equal to the number of elements of
 the ``ggn_factor_inner_shape`` tensor minus one.
 Returns:
 The vector right-multiplied by B^T. Will be of the same shape(s) as the
 ``inputs`` property.
 """
 return utils.scalar_mul(
 self.multiply_ggn_factor_replicated_one_hot_unweighted(index),
 jnp.sqrt(self.weight))
 @abc.abstractmethod
 def multiply_ggn_factor_replicated_one_hot_unweighted(
 self,
 index: Sequence[int],
 ) -> Tuple[chex.Array, ...]:
 """Same as :func:`~LossFunction.multiply_ggn_factor_replicated_one_hot`, disregarding the relative weight."""
 @property
 @abc.abstractmethod
 def ggn_factor_inner_shape(self) -> chex.Shape:
 """The shape of the array returned by `self.multiply_ggn_factor`."""
class NegativeLogProbLoss(LossFunction):
 """Base class for loss functions that represent negative log-probability."""
 @property
 def inputs(self) -> Tuple[chex.Array, ...]:
 return self.params
 @property
 @abc.abstractmethod
 def params(self) -> Tuple[chex.Array, ...]:
 """Parameters to the underlying distribution."""
 def multiply_fisher(
 self,
 vector: Sequence[chex.Array],
 ) -> Tuple[chex.Array, ...]:
 """Right-multiplies a vector by the Fisher.
 Args:
 vector: The vector to multiply. Must have the same shape(s) as
 ``self.inputs``.
 Returns:
 The vector right-multiplied by the Fisher. Will have of the same shape(s)
 as ``self.inputs``.
 """
 return utils.scalar_mul(
 self.multiply_fisher_unweighted(vector), self.weight)
 @abc.abstractmethod
 def multiply_fisher_unweighted(
 self,
 vector: Sequence[chex.Array],
 ) -> Tuple[chex.Array, ...]:
 """Same as :func:`~LossFunction.multiply_fisher`, disregarding the relative weight."""
 def multiply_fisher_factor(
 self,
 vector: chex.Array,
 ) -> Tuple[chex.Array, ...]:
 """Right-multiplies a vector by a factor B of the Fisher.
 Here the Fisher is the Fisher information matrix (i.e. expected outer-
 product of gradients) with respect to the parameters of the underlying
 probability distribution (whose log-prob defines the loss). Typically this
 will be block-diagonal across different cases in the batch, since the
 distribution is usually (but not always) | |
| 
	== 'macro', average f-beta of all the class is given.
 * When ``average`` == `weighted`, average f-beta of all the class weighted by support of every true classes is given.
 :Example:
 Assume we have a table named 'predicted' as follows:
 ======== ===================
 label prediction_result
 ======== ===================
 0 1
 1 2
 2 1
 1 1
 1 0
 2 2
 ======== ===================
 Different options of ``average`` parameter outputs different values:
.. code-block:: python
 >>> fbeta_score(predicted, 'label', average=None, beta=0.5)
 array([ 0. , 0.33333333, 0.5 ])
 >>> fbeta_score(predicted, 'label', average='macro', beta=0.5)
 0.27
 >>> fbeta_score(predicted, 'label', average='micro', beta=0.5)
 0.33
 >>> fbeta_score(predicted, 'label', average='weighted', beta=0.5)
 0.33
 """
 if not col_pred:
 col_pred = get_field_name_by_role(df, FieldRole.PREDICTED_CLASS)
 mat, label_list = _run_cm_node(df, col_true, col_pred)
 class_dict = dict((label, idx) for idx, label in enumerate(label_list))
 tps = np.diag(mat)
 pred_count = np.sum(mat, axis=0)
 supp_count = np.sum(mat, axis=1)
 beta2 = beta ** 2
 precision = tps * 1.0 / pred_count
 recall = tps * 1.0 / supp_count
 ppr = precision * beta2 + recall
 ppr[ppr == 0] = 1e-6
 fbeta = (1 + beta2) * precision * recall / ppr
 if average is None:
 return fbeta
 elif average == 'binary':
 class_idx = class_dict[pos_label]
 return fbeta[class_idx]
 elif average == 'micro':
 g_precision = np.sum(tps) * 1.0 / np.sum(supp_count)
 g_recall = np.sum(tps) * 1.0 / np.sum(pred_count)
 return (1 + beta2) * g_precision * g_recall / (beta2 * g_precision + g_recall)
 elif average == 'macro':
 return np.mean(fbeta)
 elif average == 'weighted':
 return sum(fbeta * supp_count) / sum(supp_count)
@metrics_result(_run_roc_node)
def f1_score(df, col_true=None, col_pred='precision_result', pos_label=1, average=None):
 r"""
 Compute f-1 score of a predicted data set. f-1 is defined as
 .. math::
 \frac{2 \cdot precision \cdot recall}{precision + recall}
 :Parameters:
 - **df** - predicted data frame
 - **col_true** - column name of true label
 - **col_pred** - column name of predicted label, 'prediction_result' by default.
 - **pos_label** - denote the desired class label when ``average`` == `binary`
 - **average** - denote the method to compute average.
 :Returns:
 Recall score
 :Return type:
 float | numpy.array[float]
 The parameter ``average`` controls the behavior of the function.
 * When ``average`` == None (by default), f-1 of every class is given as a list.
 * When ``average`` == 'binary', f-1 of class specified in ``pos_label`` is given.
 * When ``average`` == 'micro', f-1 of overall precision and recall is given, where overall precision and recall are computed in micro-average mode.
 * When ``average`` == 'macro', average f-1 of all the class is given.
 * When ``average`` == `weighted`, average f-1 of all the class weighted by support of every true classes is given.
 :Example:
 Assume we have a table named 'predicted' as follows:
 ======== ===================
 label prediction_result
 ======== ===================
 0 1
 1 2
 2 1
 1 1
 1 0
 2 2
 ======== ===================
 Different options of ``average`` parameter outputs different values:
.. code-block:: python
 >>> f1_score(predicted, 'label', average=None)
 array([ 0. , 0.33333333, 0.5 ])
 >>> f1_score(predicted, 'label', average='macro')
 0.27
 >>> f1_score(predicted, 'label', average='micro')
 0.33
 >>> f1_score(predicted, 'label', average='weighted')
 0.33
 """
 if not col_pred:
 col_pred = get_field_name_by_role(df, FieldRole.PREDICTED_CLASS)
 return fbeta_score(df, col_true, col_pred, pos_label=pos_label, average=average)
@metrics_result(_run_roc_node)
def roc_curve(df, col_true=None, col_pred=None, col_scores=None, pos_label=1):
 r"""
 Compute true positive rate (TPR), false positive rate (FPR) and threshold from predicted data set.
 Note that this method will trigger the defined flow to execute.
 :param df: predicted data frame
 :type df: DataFrame
 :param pos_label: positive label
 :type pos_label: str
 :param col_true: true column
 :type col_true: str
 :param col_pred: predicted column, 'prediction_result' if absent.
 :type col_pred: str
 :param col_scores: score column, 'prediction_score' if absent.
 :type col_scores: str
 :return: False positive rate, true positive rate and threshold, in numpy array format.
 :Example:
 >>> import matplotlib.pyplot as plt
 >>> fpr, tpr, thresh = roc_curve(predicted, "class")
 >>> plt.plot(fpr, tpr)
 """
 if not col_pred:
 col_pred = get_field_name_by_role(df, FieldRole.PREDICTED_CLASS)
 if not col_scores:
 col_scores = get_field_name_by_role(df, FieldRole.PREDICTED_SCORE)
 thresh, tp, fn, tn, fp = _run_roc_node(df, pos_label, col_true, col_pred, col_scores)
 if np is not None:
 tpr = tp * 1.0 / (tp + fn)
 fpr = fp * 1.0 / (fp + tn)
 else:
 tpr = [tp[i] * 1.0 / (tp[i] + fn[i]) for i in range(len(tp))]
 fpr = [fp[i] * 1.0 / (fp[i] + tn[i]) for i in range(len(fp))]
 roc_result = namedtuple('ROCResult', 'fpr tpr thresh')
 return roc_result(fpr=fpr, tpr=tpr, thresh=thresh)
@metrics_result(_run_roc_node)
def gain_chart(df, col_true=None, col_pred=None, col_scores=None, pos_label=1):
 r"""
 Compute positive proportion, true positive rate (TPR) and threshold from predicted data set. The trace can be plotted as a cumulative gain chart
 Note that this method will trigger the defined flow to execute.
 :param df: predicted data frame
 :type df: DataFrame
 :param pos_label: positive label
 :type pos_label: str
 :param col_true: true column
 :type col_true: str
 :param col_pred: predicted column, 'prediction_result' if absent.
 :type col_pred: str
 :param col_scores: score column, 'prediction_score' if absent.
 :type col_scores: str
 :return: positive proportion, true positive rate and threshold, in numpy array format.
 :Example:
 >>> import matplotlib.pyplot as plt
 >>> depth, tpr, thresh = gain_chart(predicted)
 >>> plt.plot(depth, tpr)
 """
 if not col_pred:
 col_pred = get_field_name_by_role(df, FieldRole.PREDICTED_CLASS)
 if not col_scores:
 col_scores = get_field_name_by_role(df, FieldRole.PREDICTED_SCORE)
 thresh, tp, fn, tn, fp = _run_roc_node(df, pos_label, col_true, col_pred, col_scores)
 depth = (tp + fp) * 1.0 / (tp + fp + tn + fn)
 tpr = tp * 1.0 / (tp + fn)
 gain_result = namedtuple('GainChartResult', 'depth tpr thresh')
 return gain_result(depth=depth, tpr=tpr, thresh=thresh)
@metrics_result(_run_roc_node)
def lift_chart(df, col_true=None, col_pred=None, col_scores=None, pos_label=1):
 r"""
 Compute life value, true positive rate (TPR) and threshold from predicted data set.
 Note that this method will trigger the defined flow to execute.
 :param df: predicted data frame
 :type df: DataFrame
 :param pos_label: positive label
 :type pos_label: str
 :param col_true: true column
 :type col_true: str
 :param col_pred: predicted column, 'prediction_result' if absent.
 :type col_pred: str
 :param col_scores: score column, 'prediction_score' if absent.
 :type col_scores: str
 :return: lift value, true positive rate and threshold, in numpy array format.
 :Example:
 >>> import matplotlib.pyplot as plt
 >>> depth, lift, thresh = lift_chart(predicted)
 >>> plt.plot(depth, lift)
 """
 if not col_pred:
 col_pred = get_field_name_by_role(df, FieldRole.PREDICTED_CLASS)
 if not col_scores:
 col_scores = get_field_name_by_role(df, FieldRole.PREDICTED_SCORE)
 thresh, tp, fn, tn, fp = _run_roc_node(df, pos_label, col_true, col_pred, col_scores)
 depth = (tp + fp) * 1.0 / (tp + fp + tn + fn)
 tpr = tp * 1.0 / (tp + fn)
 lift = tpr / depth
 lift_result = namedtuple('LiftResult', 'depth lift thresh')
 return lift_result(depth=depth, lift=lift, thresh=thresh)
def auc(tpr, fpr):
 """
 Calculate AUC value from true positive rate (TPR) and false positive rate (FPR)\
 with trapezoidal rule.
 Note that calculation on data sets should use ``roc_auc_score`` instead.
 :param tpr: True positive rate array
 :param fpr: False positive rate array
 :return: AUC value
 :rtype: float
 """
 return abs(np.trapz(tpr, fpr))
@metrics_result(_run_roc_node)
def roc_auc_score(df, col_true=None, col_pred=None, col_scores=None, pos_label=1):
 """
 Compute Area Under the Curve (AUC) from prediction scores with trapezoidal rule.
 Note that this method will trigger the defined flow to execute.
 :param df: predicted data frame
 :type df: DataFrame
 :param pos_label: positive label
 :type pos_label: str
 :param col_true: true column
 :type col_true: str
 :param col_pred: predicted column, 'prediction_result' if absent.
 :type col_pred: str
 :param col_scores: score column, 'prediction_score' if absent.
 :type col_scores: str
 :return: AUC value
 :rtype: float
 """
 if not col_pred:
 col_pred = get_field_name_by_role(df, FieldRole.PREDICTED_CLASS)
 if not col_scores:
 col_scores = get_field_name_by_role(df, FieldRole.PREDICTED_SCORE)
 thresh, tp, fn, tn, fp = _run_roc_node(df, pos_label, col_true, col_pred, col_scores)
 tpr = tp * 1.0 / (tp + fn)
 fpr = fp * 1.0 / (fp + tn)
 return auc(tpr, fpr)
@metrics_result(_run_roc_node)
def precision_recall_curve(df, col_true=None, col_pred=None, col_scores=None, pos_label=1):
 """
 Compute precision and recall value with different thresholds. These precision and recall\
 values can be used to plot a precision-recall curve.
 Note that this method will trigger the defined flow to execute.
 :param df: predicted data frame
 :type df: DataFrame
 :param pos_label: positive label
 :type pos_label: str
 :param col_true: true column
 :type col_true: str
 :param col_pred: predicted column, 'prediction_result' if absent.
 :type col_pred: str
 :param col_scores: score column, 'prediction_score' if absent.
 :type col_scores: str
 :return: precision, recall and threshold, in numpy arrays.
 """
 if not col_pred:
 | |
| 
	<filename>lib/kinematics/HTM.py
# Access to parent folder to get its files
import sys, os
from pandas import array
sys.path.append(sys.path[0].replace(r'/lib/kinematics', r''))
# Libraries
import numpy as np
from lib.movements.HTM import *
from lib.dynamics.Solver import *
from sympy import *
def forwardHTM(robot : object, symbolic = False):
 """Using Homogeneous Transformation Matrices, this function computes forward kinematics of a serial robot given joints positions in radians. Serial robot's kinematic parameters have to be set before using this function
 Args:
 robot (Serial): serial robot (this won't work with other type of robots)
 symbolic (bool, optional): used to calculate symbolic equations. Defaults to False.
 Returns:
 framesHTM (list): Homogeneous Transformation Matrices with frames' poses (numerical or symbolical)
 """
 # Initial conditions
 framesHTM = []
 if symbolic:
 
 # Get Denavit - Hartenberg Parameters Matrix
 DH = robot.symbolicDHParameters
 
 else:
 
 # Update Denavit - Hartenberg Parameters Matrix
 robot.denavitHartenberg()
 
 # Get Denavit - Hartenberg Matrix
 DH = robot.dhParameters
 # Create Homogeneous Transformation Matrix for Inertial Frame
 fkHTM = eye(4) if symbolic else np.identity(4)
 
 # Iteration through all the rows in Denavit - Hartenberg Matrix
 for frame in range(DH.rows) if symbolic else DH:
 
 # Operates matrices: Rz * Tz * Tx * Rx
 fkHTM = trigsimp(fkHTM * rz(DH[frame, 0], symbolic) * tz(DH[frame, 1], symbolic) * tx(DH[frame, 2], symbolic) * rx(DH[frame, 3], symbolic)) if symbolic else fkHTM.dot(rz(frame[0]).dot(tz(frame[1])).dot(tx(frame[2])).dot(rx(frame[3])))
 # Append each calculated Homogeneous Transformation Matrix
 framesHTM.append(nsimplify(fkHTM.evalf(), tolerance = 1e-10) if symbolic else fkHTM)
 return framesHTM
def forwardCOMHTM(robot : object, symbolic = False):
 """Using Homogeneous Transformation Matrices, this function computes forward kinematics of a serial robot's centers of mass given joints positions in radians. Serial robot's kinematic parameters have to be set before using this function
 Args:
 robot (Serial): serial robot (this won't work with other type of robots)
 symbolic (bool, optional): used to calculate symbolic equations. Defaults to False.
 Returns:
 framesHTM (list): Homogeneous Transformation Matrices with COMs' poses (numerical or symbolical)
 """
 
 # Calculate forward kinematics
 framesHTM = forwardHTM(robot, symbolic)
 # Initial conditions
 framesCOMHTM = [eye(4) if symbolic else np.identity(4)]
 
 if symbolic:
 
 # Get Denavit - Hartenberg Parameters Matrix
 comDH = robot.symbolicDHParametersCOM
 
 else:
 
 # Update Denavit - Hartenberg Parameters Matrix
 robot.denavitHartenbergCOM()
 
 # Get Denavit - Hartenberg Matrix
 comDH = robot.dhParametersCOM
 
 # Iteration through all the Centers of Mass
 for i in range(robot.symbolicCOMs.shape[0]):
 
 # Check where is the current Center of Mass
 rowCOM, column = robot.whereIsTheCOM(COM = i + 1)
 
 # Center of Mass Homogeneous Transformation Matrix 
 COM = rz(comDH[rowCOM, 0] if column >= 0 else 0, symbolic) * tz(comDH[rowCOM, 1] if column >= 1 else 0, symbolic) * tx(comDH[rowCOM, 2] if column >= 2 else 0, symbolic) * rx(comDH[rowCOM, 3] if column >= 3 else 0, symbolic) if symbolic else rz(comDH[rowCOM, 0] if column >= 0 else 0).dot(tz(comDH[rowCOM, 1] if column >= 1 else 0)).dot(tx(comDH[rowCOM, 2] if column >= 2 else 0)).dot(rx(comDH[rowCOM, 3] if column >= 3 else 0))
 
 # Forward kinematics to Center of Mass
 fkCOMHTM = trigsimp(framesHTM[rowCOM - 1] * COM) if symbolic else framesHTM[rowCOM - 1].dot(COM)
 
 # Append results
 framesCOMHTM.append(nsimplify(fkCOMHTM.evalf(), tolerance = 1e-10) if symbolic else fkCOMHTM)
 
 return framesCOMHTM
def axisAngle(H : np.array, symbolic = False):
 """This function computes the axis - angle vector «X» using the Homogeneous Transformation Matrix of a reference frame
 Args:
 H (np.array): Homogeneous Transformation Matrix (numerical)
 symbolic (bool, optional): used to calculate symbolic equations. Defaults to False.
 Returns:
 X (NumPy Array): Axis - Angle vector (numerical)
 X (SymPy Matrix): Axis - Angle vector (symbolical)
 """
 
 # Calculate angle of rotation
 theta = acos((H[0 : 3, 0 : 3].trace() - 1) / 2) if symbolic else np.arccos((np.trace(H[0 : 3, 0 : 3]) - 1)/2)
 
 # Calculate axis of rotation
 n = (1 / (2 * sin(theta))) * Matrix([[H[2, 1] - H[1, 2]],
 [H[0, 2] - H[2, 0]],
 [H[1 ,0] - H[0, 1]]]) if symbolic else (1/(2 * np.sin(theta))) * np.array([[H[2, 1] - H[1, 2]],
 [H[0, 2] - H[2, 0]],
 [H[1 ,0] - H[0, 1]]])
 
 # Append position and orientation in one single vector
 X = H[0 : 3, 3].row_insert(3, trigsimp(theta * n)) if symbolic else np.append(H[0 : 3, 3], theta * n)
 
 return nsimplify(X.evalf(), tolerance = 1e-10) if symbolic else X.reshape((6, 1))
def geometricJacobian(robot : object, symbolic = False):
 """Using Homogeneous Transformation Matrices, this function computes Geometric Jacobian Matrix of a serial robot given joints positions in radians. Serial robot's kinematic parameters have to be set before using this function
 Args:
 robot (Serial): serial robot (this won't work with other type of robots)
 symbolic (bool, optional): used to calculate symbolic equations. Defaults to False.
 Returns:
 J (np.array): Geometric Jacobian Matrix (numerical)
 J (SymPy Matrix): Geometric Jacobian Matrix (symbolical)
 """
 # Get number of joints (generalized coordinates)
 n = robot.jointsPositions.shape[0]
 
 # Calculate forward kinematics
 fkHTM = forwardHTM(robot, symbolic)
 
 # Initializes jacobian matrix with zeros
 J = zeros(6, n) if symbolic else np.zeros((6, n))
 
 # Iterates through all colums (generalized coordinates)
 for j in range(n):
 
 # Check in what row of Denavit Hartenberg Parameters Matrix is the current joint (the sum is because of the way Python indexes arrays)
 row, column = robot.whereIsTheJoint(j + 1)
 
 # Get row where joint is stored
 frame = robot.symbolicDHParameters[4 * (row) : 4 * (row + 1)] if symbolic else robot.dhParameters[row, :]
 
 # Get pose of the joint
 H = fkHTM[row - 1] * rz(frame[0] if column >= 0 else 0, symbolic) * tz(frame[1] if column >= 1 else 0, symbolic) * tx(frame[2] if column >= 2 else 0) * rx(frame[3] if column >= 3 else 0) if symbolic else fkHTM[row - 1].dot(rz(frame[0] if column >= 0 else 0)).dot(tz(frame[1] if column >= 1 else 0)).dot(tx(frame[2] if column >= 2 else 0, symbolic)).dot(rx(frame[3] if column >= 3 else 0))
 
 # Get axis of actuation of current joint
 z = H[0: 3, 2]
 
 # Calculate distance between end - effector and current joint
 r = fkHTM[-1][0: 3, 3] - H[0: 3, 3]
 
 # Calculate axes of actuation of Center of Mass or End - Effector caused by current joint
 J[0: 3, j] = nsimplify(trigsimp(z.cross(r)).evalf(), tolerance = 1e-10) if symbolic else np.cross(z, r)
 
 # Set axis of actuation
 J[3: 6, j] = nsimplify(trigsimp(z).evalf(), tolerance = 1e-10) if symbolic else z
 
 return J
def geometricJacobianDerivative(robot : object, dq = 0.001, symbolic = False):
 """Using Homogeneous Transformation Matrices, this function computes the derivative of Geometric Jacobian Matrix of a serial robot given joints positions in radians. Serial robot's kinematic parameters have to be set before using this function
 Args:
 robot (Serial): serial robot (this won't work with other type of robots)
 dq (float, optional): step size for numerical derivative. Defaults to 0.001.
 symbolic (bool, optional): used to calculate symbolic equations. Defaults to False.
 Returns:
 dJ (np.array): Derivative of Geometric Jacobian Matrix (numerical)
 dJ (SymPy Matrix): Derivative of Geometric Jacobian Matrix (symbolical)
 """
 # Get number of joints (generalized coordinates)
 n = robot.jointsPositions.shape[0]
 
 # Auxiliar variable to keep original joints positions
 q = robot.jointsPositions.copy()
 
 # Initializes auxiliar derivative matrix with zeros
 V = zeros(6, n) if symbolic else np.zeros((6, n))
 
 # Derivative of Jacobian Matrix
 dJ = zeros(6, n) if symbolic else np.zeros((6, n))
 
 # Calculate jacobian matrix
 J = geometricJacobian(robot, symbolic)
 
 # Iterates through all colums (generalized coordinates)
 for j in range(n):
 
 # If symbolic calculation was requested
 if symbolic:
 
 # Differentiates current column with respect to joints positions
 V = J[:, j].jacobian(robot.qSymbolic)
 # Else, calculate derivative numerically
 else:
 
 # Iterates through all the generalized coordinates to calculate the derivative of current column
 for k in range(n):
 
 # Set increment to current generalized coordinate: z[j] = q[j] + dq
 robot.jointsPositions[k] += dq
 
 # Calculate geometric jacobian matrix with current step size
 Ji = geometricJacobian(robot, symbolic)
 
 # Calculate | |
| 
	
 `pdf` = if not None, figures will be appended to the existing PDF 
 
 Returns:
 `D_params` = dictionary of fit parameters 
 
 If `with_delay=True`, returns `D_params, delays`, where `delays` dictionary of fitted delay with same structure as `D_params`. Here, `D_params` loses delay parameters. 
 """ 
 
 if not return_plots:
 self.canvas = None 
 
 D_params, D_res = self.do_fitting(with_delay=with_delay, third=third, plot_every=plot_every)
 tau_f, tau_s, amp_f = self.extract_fit_params(D_params)
 
 if with_delay:
 self.D_params, self.delays = self.return_fit_results(D_params, with_delay=with_delay,
 third=third)
 else:
 self.D_params = self.return_fit_results(D_params, with_delay=with_delay, third=third)
 if plot_results: 
 canvas = self.create_figure(both=True)
 self.plot_traces(D_params, D_res, canvas=canvas[:2])
 self.plot_params(D_params, tau_f, tau_s, amp_f, self.delays, canvas=canvas[2:],
 third=third, with_delay=with_delay)
 
 if return_plots:
 self.canvas = canvas 
 
 if pdf is not None:
 for i in range(0, 4, 2):
 pdf.savefig(canvas[i])
 
 if save_path is not None:
 canvas[0].savefig(save_path + self.fname + "_traces.png", dpi=300, bbox_inches='tight')
 canvas[2].savefig(save_path + self.fname + "_params.png", dpi=300, bbox_inches='tight')
 print("Figures successfully saved at < %s >" % (save_path + self.fname + "..."))
 
 if show_plots:
 plt.show()
 plt.close()
 return self.D_params, self.delays 
 
 def create_figure(self, both=True, traces=False, params=False):
 """
 Create figure for plotting fit results.
 `both` = figures and axes for both individual traces and fit parameters
 If `both` is False, 
 `traces` = figure for only individual traces 
 `fit` = figure for only fit parameters 
 """
 if both or traces:
 # number of rows and columns for plotting individual traces 
 N = self.N 
 if 2 < N < 5:
 d = (2, 2) 
 elif N > 4:
 d = int(N**0.5) 
 if d**2 < N:
 d = (d, d+1)
 else:
 d = (d, d)
 else:
 d = (1, 2) 
 
 fe, axe = plt.subplots(d[0], d[1], squeeze=False, figsize=(14,6), constrained_layout=True)
 
 if both or params:
 #plots for time constants, parameters, and delay 
 fr = plt.figure(figsize=(10,6), constrained_layout=True)
 gs = fr.add_gridspec(nrows=7, ncols=2)
 axr = [fr.add_subplot(gs[:4,:]), fr.add_subplot(gs[4:,0]), fr.add_subplot(gs[4:,1])]
 
 axr[0].set_title(r"Rates, $\tau^{-1}$ (1/s)")
 axr[1].set_title(r"$\frac{A_f}{A_f + A_s}$ for $2^o$")
 axr[2].set_title("Delay (ms)")
 
 axr[0].set_ylabel(r"$\tau_{1}^{-1}$" + "\n " + r"$\tau_{f}^{-1}$", 
 labelpad=15, fontsize=12, rotation=0)
 
 axr_slow = axr[0].twinx() 
 axr_slow.set_ylabel(r"$\tau_{s}^{-1}$", labelpad=15, fontsize=12, rotation=0)
 
 for a in axr:
 a.set_xlabel("Voltage (mV)")
 
 if both:
 return fe, axe, fr, axr, axr_slow
 elif traces:
 return fe, axe 
 elif params:
 return fr, axr, axr_slow 
 
 def plot_traces(self, D_params, D_res, canvas=None):
 """
 Plot individual traces overlayed with exponential fits 
 
 `D_params` = dictionary of fit parameters, {i : {1 : [..], 2 : [..], 3: [..]} }
 e.g. D_params[i][1] indexes the monoexponential fit of the ith sweep 
 
 `D_res` = dictionary of fit residuals, follows the same structure as `D_params`
 
 If `canvas` is None, then new figures are made using `self.create_figure()`
 Else, `canvas` contains `[fig, ax, fig, ax]` which are the figure and axes of individual traces and fit parameters, respectively.
 """
 
 if canvas is None:
 fe, axe = self.create_figure(both=False, traces=True, params=False)
 else:
 if len(canvas) == 2:
 fe, axe = canvas 
 else:
 raise Exception("`canvas` must be of length 2, holding [figure, ax]")
 
 # dimensions of axis 
 d = axe.shape 
 
 h = 0 
 for i in range(d[0]):
 for j in range(d[1]): 
 
 # clear unused plots 
 if h not in D_params.keys():
 axe[i,j].axis('off')
 h += 1 
 continue 
 
 # plot data 
 y = self.df.iloc[:,h].dropna()
 # time for simulation 
 x = y.index.values 
 # time for plotting 
 ts = y.index.values * 1e-3 
 # plot data 
 axe[i,j].plot(ts, y, c='white', lw=3, alpha=0.5)
 
 # number of parameter sets fit for ith sweep 
 npar = len(D_params[h].keys()) 
 
 # simulate and plot exp1 
 dt, e1 = self.get_sim(D_params[h][1], self.exp1, x)
 
 # indicate delay with fitting exp1 
 lab = exp_label(1, dt/self.khz, D_res[h][0])
 # lab = "Exp1 = %d (%.1e)" % (dt, D_res[h][0])
 if dt > 0:
 axe[i,j].plot(ts[dt:], e1, c='r', lw=2, label=lab)
 axe[i,j].axvline(ts[dt], c='r', lw=2, ls='--')
 else:
 axe[i,j].plot(ts, e1, c='r', lw=2, label=lab)
 
 # if 2 or more parameter sets, then there are higher order fits 
 if npar >= 2:
 dt, e2 = self.get_sim(D_params[h][2], self.exp2, x)
 
 if dt is None:
 h += 1 
 continue 
 
 lab = exp_label(2, dt/self.khz, D_res[h][1])
 # "Exp2 = %d (%.1e)" % (dt, D_res[h][1])
 if dt > 0:
 axe[i,j].plot(ts[dt:], e2, c='lightblue', lw=2, label=lab)
 axe[i,j].axvline(ts[dt], c='lightblue', lw=2, ls='--') 
 else:
 axe[i,j].plot(ts, e2, c='lightblue', lw=2, label=lab)
 
 if npar == 3:
 # no delay for triple exponential fits, so ignore `dt` 
 dt, e3 = self.get_sim(D_params[h][3], self.exp3, x)
 
 if dt is None:
 h += 1 
 continue 
 
 lab = exp_delay(3, 0, D_res[h][2])
 # "Exp3 (%.1e)" % D_res[h][2]
 axe[i,j].plot(ts, e3, c='gray', lw=2, label=lab)
 
 # title each subplot with test voltage 
 axe[i,j].set_title(self.volts[h]) 
 
 # ylabel in first column of plots 
 if j == 0:
 axe[i,j].set_ylabel("Current (pA)")
 # xlabel in bottom row of plots 
 if i == (d[0] - 1):
 axe[i,j].set_xlabel("Time (s)")
 # legend 
 axe[i,j].legend(loc='center right', fontsize=10)
 
 h += 1 
 
 def plot_params(self, D_params, tau_f, tau_s, amp_f, delays, 
 with_delay=True, third=False, canvas=None):
 """
 Plot parameters from exponential fitting
 
 `D_params` = dictionary of fit parameters, see docstring of `self.plot_traces` for structure 
 
 The following are lists of [[2, 3], [2, 3], ...], where [2, 3] represent given parameters for 2nd and 3rd order exponentials, respectively 
 `tau_f` = fast taus 
 `tau_s` = slow taus 
 `amp_f` = fast amplitude / sum of amplitudes 
 `delays` = delays, structured as delays for each order of fit, for each sweep
 e.g. [[delay1, delay2, ...] [...]]
 `with_delay` = whether delay is used 
 If `canvas` is None, new figure is made using `self.create_figure(both=False, params=True)`
 """
 
 if canvas is None:
 fr, axr, axr_slow = self.create_figure(both=False, traces=False, params=True)
 else:
 if len(canvas) == 3:
 fr, axr, axr_slow = canvas 
 else:
 raise Exception("`canvas` must be of length 3, holding [figure, axs, axs_slow]")
 
 # elements of `tau_f`, `tau_s`, and `amp_f` are lists for all parameter sets of given trace 
 
 # taus of exp2 
 v, tau_f2, tau_s2 = sort_lists(
 self.volts[:len(tau_f)], 
 [[1000/a[0] for a in tau_f], 
 [1000/a[0] for a in tau_s]]
 )
 # fast tau 
 axr[0].plot(v, tau_f2, marker='s', lw=0.5, label=r"$2^o$, $\tau_f$")
 # slow tau 
 axr_slow.plot(v, tau_s2, marker='s', fillstyle='none', lw=0.5, label=r"$2^o$, $\tau_s$")
 
 # taus of exp3 
 if third:
 v, tau_f3, tau_s3 = sort_lists(
 self.volts[:len(tau_f)],
 [[1000/a[1] for a in tau_f],
 [1000/a[1] for a in tau_s]]
 )
 
 axr[0].plot(v, tau_f3, marker='o', lw=0.5, label=r"$3^o$, $\tau_f$")
 axr_slow.plot(v, tau_s3, marker='o', fillstyle='none', lw=0.5, label=r"$3^o$, $\tau_s$")
 
 # fast amplitude ratio for exp3 
 # axr[1].plot(self.volts[:len(tau_f)], [a[1] for a in amp_f],
 # marker='o', label=r"Exp3")
 
 # exp1 tau 
 v, tau_1 = sort_lists(
 self.volts[:len(D_params.keys())], [1000/v[1]["tau1"] for v in D_params.values()]
 )
 axr[0].plot(v, tau_1, marker='x', lw=0.5, label=r"$1^o$, $\tau$")
 
 # fast amplitude ratio for exp2 
 v, amp_f = sort_lists(
 self.volts[:len(tau_f)], [a[0] for a in amp_f]
 )
 axr[1].plot(v, amp_f, marker='s', label=r"$2^o$")
 
 # delay for exp1 and exp2 
 if with_delay:
 for j in range(2):
 # select j-th order delay from `delays`
 dt = [x[j] for x in delays]
 
 # sort delays with test voltages 
 v, dt = sort_lists( self.volts[:self.N], dt)
 
 # marker for 2- vs 1-exp delay
 m = 'x' if (j == 1) else 's' 
 axr[2].plot(v, dt, marker=m, markersize=8, label="%d$^o$" % j)
 
 #get handles from both plots, then add legend to axr[0] 
 h_f, l_f = axr[0].get_legend_handles_labels()
 h_s, l_s = axr_slow.get_legend_handles_labels() 
 axr[0].legend(h_f + h_s, l_f + l_s, loc='upper center', ncol=3, framealpha=0.5)
 
 def return_plots(self):
 if self.canvas is None:
 raise Exception("`return_plots()` called, but `self.canvas = None`")
 else:
 return self.canvas 
 def return_fit_results(self, D_params, with_delay=True, third=False):
 """
 Convert values in `D_params` into list 
 """
 
 # convert lmfit-style dictionary of parameters to normal dictionary 
 # k1 = sweep #, k2 = order of exponential fit, v2 = lmfit parameters object 
 D_params = | |
| 
	<reponame>rt112000/CDM
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
import importlib
import json
from collections import OrderedDict
from typing import Any, Optional, TYPE_CHECKING
from cdm.enums import CdmObjectType
from cdm.utilities import AttributeResolutionDirectiveSet, CdmError, logger, ResolveOptions, StorageUtils
from cdm.enums import CdmLogCode
from cdm.utilities.string_utils import StringUtils
if TYPE_CHECKING:
 from cdm.objectmodel import CdmCorpusContext, CdmObject
 from cdm.utilities import CopyOptions, JObject
class PersistenceLayer:
 CDM_EXTENSION = '.cdm.json'
 FOLIO_EXTENSION = '.folio.cdm.json'
 MANIFEST_EXTENSION = '.manifest.cdm.json'
 MODEL_JSON_EXTENSION = 'model.json'
 CDM_FOLDER = 'CdmFolder'
 MODEL_JSON = 'ModelJson'
 SYMS = 'Syms'
 SYMS_DATABASES = 'databases.manifest.cdm.json'
 def __init__(self, corpus: 'CdmCorpusDefinition'):
 self._TAG = PersistenceLayer.__name__
 self._corpus = corpus
 self._registered_persistence_formats = OrderedDict() # type: Dictionary[str, object]
 self._is_registered_persistence_async = OrderedDict() # type: Dictionary[object, bool]
 @property
 def _ctx(self) -> 'CdmCorpusContext':
 return self._corpus.ctx
 @classmethod
 def from_data(cls, *args) -> 'CdmObject':
 """
 * @param args arguments passed to the persistence class.
 * @param objectType any of cdmObjectType.
 * @param persistence_type a type supported by the persistence layer. Can by any of PersistenceTypes.
 """
 arglist = list(args)
 persistence_type = arglist.pop()
 object_type = arglist.pop()
 return cls.fetch_persistence_class(object_type, persistence_type).from_data(*arglist)
 @classmethod
 def to_data(cls, instance: 'CdmObject', res_opt: 'ResolveOptions', copy_options: 'CopyOptions',
 persistence_type: str) -> 'JObject':
 """
 * @param instance the instance that is going to be serialized.
 * @param res_opt information about how to resolve the instance.
 * @param copy_options set of options to specify how the output format.
 * @param persistence_type a type supported by the persistence layer. Can by any of PersistenceTypes.
 """
 return cls.fetch_persistence_class(instance.object_type, persistence_type).to_data(instance, res_opt,
 copy_options)
 async def _load_document_from_path_async(self, folder: 'CdmFolderDefinition', doc_name: str,
 doc_container: 'CdmDocumentDefinition',
 res_opt: Optional[ResolveOptions] = None) \
 -> 'CdmDocumentDefinition':
 # go get the doc
 doc_content = None # type: Optional[CdmDocumentDefinition]
 json_data = None
 fs_modified_time = None
 doc_path = folder._folder_path + doc_name
 adapter = self._ctx.corpus.storage.fetch_adapter(folder._namespace) # type: StorageAdapter
 try:
 if adapter.can_read():
 # log message used by navigator, do not change or remove
 logger.debug(self._ctx, self._TAG, self._load_document_from_path_async.__name__, doc_path,
 'request file: {}'.format(doc_path))
 json_data = await adapter.read_async(doc_path)
 # log message used by navigator, do not change or remove
 logger.debug(self._ctx, self._TAG, self._load_document_from_path_async.__name__, doc_path,
 'received file: {}'.format(doc_path))
 else:
 raise Exception('Storage Adapter is not enabled to read.')
 except Exception as e:
 # log message used by navigator, do not change or remove
 logger.debug(self._ctx, self._TAG, self._load_document_from_path_async.__name__, doc_path,
 'fail file: {}'.format(doc_path))
 # when shallow validation is enabled, log messages about being unable to find referenced documents as warnings instead of errors.
 if res_opt and res_opt.shallow_validation:
 logger.warning(self._ctx, self._TAG, PersistenceLayer._load_document_from_path_async.__name__, doc_path,
 CdmLogCode.WARN_PERSIST_FILE_READ_FAILURE, doc_path, folder._namespace, e)
 else:
 logger.error(self._ctx, self._TAG, self._load_document_from_path_async.__name__, doc_path,
 CdmLogCode.ERR_PERSIST_FILE_READ_FAILURE, doc_path, folder._namespace, e)
 return None
 try:
 fs_modified_time = await adapter.compute_last_modified_time_async(doc_path)
 except Exception as e:
 logger.warning(self._ctx, self._TAG, PersistenceLayer._load_document_from_path_async.__name__, doc_path,
 CdmLogCode.WARN_PERSIST_FILE_MOD_COMPUTE_FAILED, e.Message)
 if not doc_name:
 logger.error(self._ctx, self._TAG, self._load_document_from_path_async.__name__, doc_path,
 CdmLogCode.ERR_PERSIST_NULL_DOC_NAME)
 return None
 doc_name_lower = doc_name.lower()
 # If loading an model.json file, check that it is named correctly.
 if doc_name_lower.endswith(self.MODEL_JSON_EXTENSION) and not doc_name.lower() == self.MODEL_JSON_EXTENSION:
 logger.error(self._ctx, self._TAG, self._load_document_from_path_async.__name__, doc_path,
 CdmLogCode.ERR_PERSIST_DOC_NAME_LOAD_FAILURE, doc_name, self.MODEL_JSON_EXTENSION)
 return None
 try:
 from cdm.persistence.syms import utils
 if utils.check_if_syms_adapter(adapter):
 from cdm.persistence.syms import ManifestDatabasesPersistence
 from cdm.persistence.syms.types import SymsDatabasesResponse
 if doc_name_lower == self.SYMS_DATABASES:
 from cdm.persistence.syms.models.query_artifacts_response import QueryArtifactsResponse
 databases = QueryArtifactsResponse()
 databases = databases.deserialize(json.loads(json_data))
 doc_content = ManifestDatabasesPersistence.from_object(self._ctx, doc_name, folder._namespace,
 folder._folder_path,
 databases)
 elif self.MANIFEST_EXTENSION in doc_name_lower:
 from cdm.persistence.syms import ManifestPersistence
 manifest_content = await utils.get_syms_model(adapter, json_data, doc_path)
 doc_content = ManifestPersistence.from_object(self._ctx, doc_name, folder._namespace,
 folder._folder_path,
 manifest_content)
 elif self.CDM_EXTENSION in doc_name_lower:
 from cdm.persistence.syms.models import TableEntity
 from cdm.persistence.syms import DocumentPersistence
 table = TableEntity(None, None).deserialize(json.loads(json_data))
 doc_content = DocumentPersistence.from_object(self._ctx, doc_name, folder._namespace,
 folder._folder_path,
 table)
 elif doc_name_lower.endswith(PersistenceLayer.MANIFEST_EXTENSION) or doc_name_lower.endswith(
 PersistenceLayer.FOLIO_EXTENSION):
 from cdm.persistence.cdmfolder import ManifestPersistence
 from cdm.persistence.cdmfolder.types import ManifestContent
 manifest = ManifestContent()
 manifest.decode(json_data)
 doc_content = ManifestPersistence.from_object(self._ctx, doc_name, folder._namespace, folder._folder_path,
 manifest)
 elif doc_name_lower.endswith(PersistenceLayer.MODEL_JSON_EXTENSION):
 from cdm.persistence.modeljson import ManifestPersistence
 from cdm.persistence.modeljson.types import Model
 model = Model()
 model.decode(json_data)
 doc_content = await ManifestPersistence.from_object(self._ctx, model, folder)
 elif doc_name_lower.endswith(PersistenceLayer.CDM_EXTENSION):
 from cdm.persistence.cdmfolder import DocumentPersistence
 from cdm.persistence.cdmfolder.types import DocumentContent
 document = DocumentContent()
 document.decode(json_data)
 doc_content = DocumentPersistence.from_object(self._ctx, doc_name, folder._namespace, folder._folder_path,
 document)
 else:
 # Could not find a registered persistence class to handle this document type.
 logger.error(self._ctx, self._TAG, self._load_document_from_path_async.__name__, doc_path,
 CdmLogCode.ERR_PERSIST_CLASS_MISSING, doc_name)
 return None
 except Exception as e:
 logger.error(self._ctx, self._TAG, self._load_document_from_path_async.__name__, doc_path,
 CdmLogCode.ERR_PERSIST_DOC_CONVERSION_FAILURE, doc_path, e)
 return None
 # add document to the folder, this sets all the folder/path things, caches name to content association and may trigger indexing on content
 if doc_content is not None:
 if doc_container:
 # there are situations where a previously loaded document must be re-loaded.
 # the end of that chain of work is here where the old version of the document has been removed from
 # the corpus and we have created a new document and loaded it from storage and after this call we will probably
 # add it to the corpus and index it, etc.
 # it would be really rude to just kill that old object and replace it with this replicant, especially because
 # the caller has no idea this happened. so... sigh ... instead of returning the new object return the one that
 # was just killed off but make it contain everything the new document loaded.
 doc_content = doc_content.copy(
 ResolveOptions(wrt_doc=doc_container, directives=self._ctx.corpus.default_resolution_directives),
 doc_container)
 folder.documents.append(doc_content, doc_name)
 doc_content._file_system_modified_time = fs_modified_time
 doc_content._is_dirty = False
 return doc_content
 @classmethod
 def fetch_persistence_class(cls, object_type: CdmObjectType, persistence_type: str) -> 'object':
 object_name = object_type.name.lower() # CdmObjectType[object_type]
 if object_name.endswith('def'):
 object_name = object_name[0:-4]
 elif object_name.endswith('ref'):
 object_name += 'erence'
 persistence_module_name = '{}_persistence'.format(object_name)
 persistence_class_name = ''.join([x.title() for x in persistence_module_name.split('_')])
 if persistence_class_name == 'ProjectionPersistence':
 # Projection persistence class is in a nested folder
 persistence_module = importlib.import_module(
 'cdm.persistence.{}.projections.{}'.format(persistence_type.lower(), persistence_module_name))
 else:
 persistence_module = importlib.import_module(
 'cdm.persistence.{}.{}'.format(persistence_type.lower(), persistence_module_name))
 PersistenceClass = getattr(persistence_module, persistence_class_name, None)
 if not PersistenceClass:
 raise CdmError('Persistence class for {} is not implemented in type {}.'.format(persistence_class_name,
 persistence_type))
 instance = PersistenceClass()
 return instance
 def _fetch_registered_persistence_format(self, doc_name: str) -> 'object':
 for registered_persistence_format in self._registered_persistence_formats:
 # find the persistence class to use for this document.
 if doc_name.lower().endswith(registered_persistence_format):
 return self._registered_persistence_formats[registered_persistence_format]
 return None
 async def _save_document_as_async(self, doc: 'CdmDocumentDefinition', options: 'CopyOptions', new_name: str,
 save_referenced: bool) -> bool:
 """a manifest or document can be saved with a new or exisitng name. This function on the corpus does all the actual work
 because the corpus knows about persistence types and about the storage adapters
 if saved with the same name, then consider this document 'clean' from changes. if saved with a back compat model or
 to a different name, then the source object is still 'dirty'
 an option will cause us to also save any linked documents."""
 # find out if the storage adapter is able to write.
 namespace = StorageUtils.split_namespace_path(new_name)[0]
 if not namespace:
 namespace = doc._namespace
 if not namespace:
 namespace = self._corpus.storage.default_namespace
 adapter = self._corpus.storage.fetch_adapter(namespace)
 if adapter is None:
 logger.error(self._ctx, self._TAG, self._save_document_as_async.__name__, doc.at_corpus_path,
 CdmLogCode.ERR_PERSIST_ADAPTER_NOT_FOUND_FOR_NAMESPACE, namespace)
 return False
 if not adapter.can_write():
 logger.error(self._ctx, self._TAG, self._save_document_as_async.__name__, doc.at_corpus_path,
 CdmLogCode.ERR_PERSIST_ADAPTER_WRITE_FAILURE, namespace)
 return False
 if not new_name:
 logger.error(self._ctx, self._TAG, self._save_document_as_async.__name__, doc.at_corpus_path,
 CdmLogCode.ERR_PERSIST_NULL_DOC_NAME)
 return None
 # what kind of document is requested?
 persistence_type = ''
 from cdm.persistence.syms import utils
 if utils.check_if_syms_adapter(adapter):
 if new_name == self.SYMS_DATABASES:
 logger.error(self._ctx, self._TAG, self._save_document_as_async.__name__, doc.at_corpus_path,
 CdmLogCode.ERR_PERSIST_SYMS_UNSUPPORTED_MANIFEST, new_name)
 return False
 elif not new_name.lower().endswith(self.MANIFEST_EXTENSION) and new_name.lower().endswith(self.CDM_EXTENSION):
 logger.error(self._ctx, self._TAG, self._save_document_as_async.__name__, doc.at_corpus_path,
 CdmLogCode.ERR_PERSIST_SYMS_UNSUPPORTED_CDM_CONVERSION, new_name)
 return False
 persistence_type = self.SYMS
 options.persistence_type_name = self.SYMS
 else:
 if new_name.lower().endswith(self.MODEL_JSON_EXTENSION):
 persistence_type = self.MODEL_JSON
 else:
 persistence_type = self.CDM_FOLDER
 if persistence_type == self.MODEL_JSON and new_name.lower() != self.MODEL_JSON_EXTENSION:
 logger.error(self._ctx, self._TAG, self._save_document_as_async.__name__, doc.at_corpus_path, CdmLogCode.ERR_PERSIST_FAILURE,
 new_name, self.MODEL_JSON_EXTENSION)
 return False
 # save the object into a json blob
 res_opt = {'wrt_doc': doc, 'directives': AttributeResolutionDirectiveSet()}
 persisted_doc = None
 try:
 if new_name.lower().endswith(PersistenceLayer.MODEL_JSON_EXTENSION) or new_name.lower().endswith(
 PersistenceLayer.MANIFEST_EXTENSION) or new_name.lower().endswith(PersistenceLayer.FOLIO_EXTENSION):
 if persistence_type == self.CDM_FOLDER:
 from cdm.persistence.cdmfolder import ManifestPersistence
 persisted_doc = ManifestPersistence.to_data(doc, res_opt, options)
 elif persistence_type == self.SYMS:
 from cdm.persistence.syms.manifest_persistence import ManifestPersistence
 persisted_doc = await ManifestPersistence.convert_manifest_to_syms(doc, adapter, new_name, res_opt, options)
 else:
 if new_name != self.MODEL_JSON_EXTENSION:
 logger.error(self._ctx, self._TAG, self._save_document_as_async.__name__, doc.at_corpus_path,
 CdmLogCode.ERR_PERSIST_FAILURE, new_name)
 return False
 from cdm.persistence.modeljson import ManifestPersistence
 persisted_doc = await ManifestPersistence.to_data(doc, res_opt, options)
 elif new_name.lower().endswith(PersistenceLayer.CDM_EXTENSION):
 if persistence_type == self.CDM_FOLDER:
 from cdm.persistence.cdmfolder import DocumentPersistence
 persisted_doc = DocumentPersistence.to_data(doc, res_opt, options)
 elif persistence_type == self.SYMS:
 from cdm.persistence.syms.document_persistence import DocumentPersistence
 persisted_doc = await DocumentPersistence.convert_doc_to_syms_table(self._ctx, doc, adapter, new_name, res_opt, options)
 else:
 # Could not find a registered persistence class to handle this document type.
 logger.error(self._ctx, self._TAG, self._save_document_as_async.__name__, doc.at_corpus_path,
 CdmLogCode.ERR_PERSIST_CLASS_MISSING, new_name)
 return False
 except Exception as e:
 logger.error(self._ctx, self._TAG, self._save_document_as_async.__name__, doc.at_corpus_path,
 CdmLogCode.ERR_PERSIST_FILE_PERSIST_ERROR, new_name, e)
 return False
 if not persisted_doc:
 logger.error(self._ctx, | |
| 
	ex_primary_nic_private_ipv4='10.0.0.1',
 ex_is_started=False,
 ex_disks=disks)
 def test_create_node_ipv4_gateway(self):
 rootPw = NodeAuthPassword('<PASSWORD>')
 image = self.driver.list_images()[0]
 node = self.driver.create_node(name='test2',
 image=image,
 auth=rootPw,
 ex_description='test2 node',
 ex_network_domain='fakenetworkdomain',
 ex_primary_nic_private_ipv4='10.0.0.1',
 ex_is_started=False,
 ex_ipv4_gateway='10.2.2.2')
 self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
 self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
 def test_create_node_network_domain_no_vlan_no_ipv4_fail(self):
 rootPw = NodeAuthPassword('<PASSWORD>')
 image = self.driver.list_images()[0]
 with self.assertRaises(ValueError):
 self.driver.create_node(name='test2',
 image=image,
 auth=rootPw,
 ex_description='test2 node',
 ex_network_domain='fake_network_domain',
 ex_is_started=False)
 def test_create_node_mcp2_additional_nics_legacy(self):
 rootPw = NodeAuthPassword('<PASSWORD>')
 image = self.driver.list_images()[0]
 additional_vlans = ['fakevlan1', 'fakevlan2']
 additional_ipv4 = ['10.0.0.2', '10.0.0.3']
 node = self.driver.create_node(
 name='test2',
 image=image,
 auth=rootPw,
 ex_description='test2 node',
 ex_network_domain='fakenetworkdomain',
 ex_primary_ipv4='10.0.0.1',
 ex_additional_nics_vlan=additional_vlans,
 ex_additional_nics_ipv4=additional_ipv4,
 ex_is_started=False)
 self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
 self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
 def test_create_node_bad_additional_nics_ipv4(self):
 rootPw = NodeAuthPassword('<PASSWORD>')
 image = self.driver.list_images()[0]
 with self.assertRaises(TypeError):
 self.driver.create_node(name='test2',
 image=image,
 auth=rootPw,
 ex_description='test2 node',
 ex_network_domain='fake_network_domain',
 ex_vlan='fake_vlan',
 ex_additional_nics_ipv4='badstring',
 ex_is_started=False)
 def test_create_node_additional_nics(self):
 root_pw = NodeAuthPassword('<PASSWORD>')
 image = self.driver.list_images()[0]
 nic1 = DimensionDataNic(vlan='fake_vlan',
 network_adapter_name='v1000')
 nic2 = DimensionDataNic(private_ip_v4='10.1.1.2',
 network_adapter_name='v1000')
 additional_nics = [nic1, nic2]
 node = self.driver.create_node(name='test2',
 image=image,
 auth=root_pw,
 ex_description='test2 node',
 ex_network_domain='fakenetworkdomain',
 ex_primary_nic_private_ipv4='10.0.0.1',
 ex_additional_nics=additional_nics,
 ex_is_started=False)
 self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
 self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
 def test_create_node_additional_nics_vlan_ipv4_coexist_fail(self):
 root_pw = NodeAuthPassword('<PASSWORD>')
 image = self.driver.list_images()[0]
 nic1 = DimensionDataNic(private_ip_v4='10.1.1.1', vlan='fake_vlan',
 network_adapter_name='v1000')
 nic2 = DimensionDataNic(private_ip_v4='10.1.1.2', vlan='fake_vlan2',
 network_adapter_name='v1000')
 additional_nics = [nic1, nic2]
 with self.assertRaises(ValueError):
 self.driver.create_node(name='test2',
 image=image,
 auth=root_pw,
 ex_description='test2 node',
 ex_network_domain='fakenetworkdomain',
 ex_primary_nic_private_ipv4='10.0.0.1',
 ex_additional_nics=additional_nics,
 ex_is_started=False
 )
 def test_create_node_additional_nics_invalid_input_fail(self):
 root_pw = NodeAuthPassword('<PASSWORD>')
 image = self.driver.list_images()[0]
 additional_nics = 'blah'
 with self.assertRaises(TypeError):
 self.driver.create_node(name='test2',
 image=image,
 auth=root_pw,
 ex_description='test2 node',
 ex_network_domain='fakenetworkdomain',
 ex_primary_nic_private_ipv4='10.0.0.1',
 ex_additional_nics=additional_nics,
 ex_is_started=False
 )
 def test_create_node_additional_nics_vlan_ipv4_not_exist_fail(self):
 root_pw = NodeAuthPassword('<PASSWORD>')
 image = self.driver.list_images()[0]
 nic1 = DimensionDataNic(network_adapter_name='v1000')
 nic2 = DimensionDataNic(network_adapter_name='v1000')
 additional_nics = [nic1, nic2]
 with self.assertRaises(ValueError):
 self.driver.create_node(name='test2',
 image=image,
 auth=root_pw,
 ex_description='test2 node',
 ex_network_domain='fakenetworkdomain',
 ex_primary_nic_private_ipv4='10.0.0.1',
 ex_additional_nics=additional_nics,
 ex_is_started=False)
 def test_create_node_bad_additional_nics_vlan(self):
 rootPw = NodeAuthPassword('<PASSWORD>')
 image = self.driver.list_images()[0]
 with self.assertRaises(TypeError):
 self.driver.create_node(name='test2',
 image=image,
 auth=rootPw,
 ex_description='test2 node',
 ex_network_domain='fake_network_domain',
 ex_vlan='fake_vlan',
 ex_additional_nics_vlan='badstring',
 ex_is_started=False)
 def test_create_node_mcp2_indicate_dns(self):
 rootPw = NodeAuthPassword('<PASSWORD>')
 image = self.driver.list_images()[0]
 node = self.driver.create_node(name='test2',
 image=image,
 auth=rootPw,
 ex_description='test node dns',
 ex_network_domain='fakenetworkdomain',
 ex_primary_ipv4='10.0.0.1',
 ex_primary_dns='8.8.8.8',
 ex_secondary_dns='8.8.4.4',
 ex_is_started=False)
 self.assertEqual(node.id, 'e75ead52-692f-4314-8725-c8a4f4d13a87')
 self.assertEqual(node.extra['status'].action, 'DEPLOY_SERVER')
 def test_ex_shutdown_graceful(self):
 node = Node(id='11', name=None, state=None,
 public_ips=None, private_ips=None, driver=self.driver)
 ret = self.driver.ex_shutdown_graceful(node)
 self.assertTrue(ret is True)
 def test_ex_shutdown_graceful_INPROGRESS(self):
 DimensionDataMockHttp.type = 'INPROGRESS'
 node = Node(id='11', name=None, state=None,
 public_ips=None, private_ips=None, driver=self.driver)
 with self.assertRaises(DimensionDataAPIException):
 self.driver.ex_shutdown_graceful(node)
 def test_ex_start_node(self):
 node = Node(id='11', name=None, state=None,
 public_ips=None, private_ips=None, driver=self.driver)
 ret = self.driver.ex_start_node(node)
 self.assertTrue(ret is True)
 def test_ex_start_node_INPROGRESS(self):
 DimensionDataMockHttp.type = 'INPROGRESS'
 node = Node(id='11', name=None, state=None,
 public_ips=None, private_ips=None, driver=self.driver)
 with self.assertRaises(DimensionDataAPIException):
 self.driver.ex_start_node(node)
 def test_ex_power_off(self):
 node = Node(id='11', name=None, state=None,
 public_ips=None, private_ips=None, driver=self.driver)
 ret = self.driver.ex_power_off(node)
 self.assertTrue(ret is True)
 def test_ex_update_vm_tools(self):
 node = Node(id='11', name=None, state=None,
 public_ips=None, private_ips=None, driver=self.driver)
 ret = self.driver.ex_update_vm_tools(node)
 self.assertTrue(ret is True)
 def test_ex_power_off_INPROGRESS(self):
 DimensionDataMockHttp.type = 'INPROGRESS'
 node = Node(id='11', name=None, state='STOPPING',
 public_ips=None, private_ips=None, driver=self.driver)
 with self.assertRaises(DimensionDataAPIException):
 self.driver.ex_power_off(node)
 def test_ex_reset(self):
 node = Node(id='11', name=None, state=None,
 public_ips=None, private_ips=None, driver=self.driver)
 ret = self.driver.ex_reset(node)
 self.assertTrue(ret is True)
 def test_ex_attach_node_to_vlan(self):
 node = self.driver.ex_get_node_by_id('e75ead52-692f-4314-8725-c8a4f4d13a87')
 vlan = self.driver.ex_get_vlan('0e56433f-d808-4669-821d-812769517ff8')
 ret = self.driver.ex_attach_node_to_vlan(node, vlan)
 self.assertTrue(ret is True)
 def test_ex_destroy_nic(self):
 node = self.driver.ex_destroy_nic('a202e51b-41c0-4cfc-add0-b1c62fc0ecf6')
 self.assertTrue(node)
 def test_list_networks(self):
 nets = self.driver.list_networks()
 self.assertEqual(nets[0].name, 'test-net1')
 self.assertTrue(isinstance(nets[0].location, NodeLocation))
 def test_ex_create_network(self):
 location = self.driver.ex_get_location_by_id('NA9')
 net = self.driver.ex_create_network(location, "Test Network", "test")
 self.assertEqual(net.id, "208e3a8e-9d2f-11e2-b29c-001517c4643e")
 self.assertEqual(net.name, "Test Network")
 def test_ex_create_network_NO_DESCRIPTION(self):
 location = self.driver.ex_get_location_by_id('NA9')
 net = self.driver.ex_create_network(location, "Test Network")
 self.assertEqual(net.id, "208e3a8e-9d2f-11e2-b29c-001517c4643e")
 self.assertEqual(net.name, "Test Network")
 def test_ex_delete_network(self):
 net = self.driver.ex_list_networks()[0]
 result = self.driver.ex_delete_network(net)
 self.assertTrue(result)
 def test_ex_rename_network(self):
 net = self.driver.ex_list_networks()[0]
 result = self.driver.ex_rename_network(net, "barry")
 self.assertTrue(result)
 def test_ex_create_network_domain(self):
 location = self.driver.ex_get_location_by_id('NA9')
 plan = NetworkDomainServicePlan.ADVANCED
 net = self.driver.ex_create_network_domain(location=location,
 name='test',
 description='test',
 service_plan=plan)
 self.assertEqual(net.name, 'test')
 self.assertTrue(net.id, 'f14a871f-9a25-470c-aef8-51e13202e1aa')
 def test_ex_create_network_domain_NO_DESCRIPTION(self):
 location = self.driver.ex_get_location_by_id('NA9')
 plan = NetworkDomainServicePlan.ADVANCED
 net = self.driver.ex_create_network_domain(location=location,
 name='test',
 service_plan=plan)
 self.assertEqual(net.name, 'test')
 self.assertTrue(net.id, 'f14a871f-9a25-470c-aef8-51e13202e1aa')
 def test_ex_get_network_domain(self):
 net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
 self.assertEqual(net.id, '8cdfd607-f429-4df6-9352-162cfc0891be')
 self.assertEqual(net.description, 'test2')
 self.assertEqual(net.name, 'test')
 def test_ex_update_network_domain(self):
 net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
 net.name = 'new name'
 net2 = self.driver.ex_update_network_domain(net)
 self.assertEqual(net2.name, 'new name')
 def test_ex_delete_network_domain(self):
 net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
 result = self.driver.ex_delete_network_domain(net)
 self.assertTrue(result)
 def test_ex_list_networks(self):
 nets = self.driver.ex_list_networks()
 self.assertEqual(nets[0].name, 'test-net1')
 self.assertTrue(isinstance(nets[0].location, NodeLocation))
 def test_ex_list_network_domains(self):
 nets = self.driver.ex_list_network_domains()
 self.assertEqual(nets[0].name, 'Aurora')
 self.assertTrue(isinstance(nets[0].location, NodeLocation))
 def test_ex_list_network_domains_ALLFILTERS(self):
 DimensionDataMockHttp.type = 'ALLFILTERS'
 nets = self.driver.ex_list_network_domains(location='fake_location', name='fake_name',
 service_plan='fake_plan', state='fake_state')
 self.assertEqual(nets[0].name, 'Aurora')
 self.assertTrue(isinstance(nets[0].location, NodeLocation))
 def test_ex_list_vlans(self):
 vlans = self.driver.ex_list_vlans()
 self.assertEqual(vlans[0].name, "Primary")
 def test_ex_list_vlans_ALLFILTERS(self):
 DimensionDataMockHttp.type = 'ALLFILTERS'
 vlans = self.driver.ex_list_vlans(location='fake_location', network_domain='fake_network_domain',
 name='fake_name', ipv4_address='fake_ipv4', ipv6_address='fake_ipv6', state='fake_state')
 self.assertEqual(vlans[0].name, "Primary")
 def test_ex_create_vlan(self,):
 net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
 vlan = self.driver.ex_create_vlan(network_domain=net,
 name='test',
 private_ipv4_base_address='10.3.4.0',
 private_ipv4_prefix_size='24',
 description='test vlan')
 self.assertEqual(vlan.id, '0e56433f-d808-4669-821d-812769517ff8')
 def test_ex_create_vlan_NO_DESCRIPTION(self,):
 net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
 vlan = self.driver.ex_create_vlan(network_domain=net,
 name='test',
 private_ipv4_base_address='10.3.4.0',
 private_ipv4_prefix_size='24')
 self.assertEqual(vlan.id, '0e56433f-d808-4669-821d-812769517ff8')
 def test_ex_get_vlan(self):
 vlan = self.driver.ex_get_vlan('0e56433f-d808-4669-821d-812769517ff8')
 self.assertEqual(vlan.id, '0e56433f-d808-4669-821d-812769517ff8')
 self.assertEqual(vlan.description, 'test2')
 self.assertEqual(vlan.status, 'NORMAL')
 self.assertEqual(vlan.name, 'Production VLAN')
 self.assertEqual(vlan.private_ipv4_range_address, '10.0.3.0')
 self.assertEqual(vlan.private_ipv4_range_size, 24)
 self.assertEqual(vlan.ipv6_range_size, 64)
 self.assertEqual(vlan.ipv6_range_address, 'fdf8:f53e:61e4::18')
 self.assertEqual(vlan.ipv4_gateway, '10.0.3.1')
 self.assertEqual(vlan.ipv6_gateway, 'fc00:db20:35b:7399::5')
 def test_ex_wait_for_state(self):
 self.driver.ex_wait_for_state('NORMAL',
 self.driver.ex_get_vlan,
 vlan_id='0e56433f-d808-4669-821d-812769517ff8')
 def test_ex_wait_for_state_NODE(self):
 self.driver.ex_wait_for_state('running',
 self.driver.ex_get_node_by_id,
 id='e75ead52-692f-4314-8725-c8a4f4d13a87')
 def test_ex_wait_for_state_FAIL(self):
 with self.assertRaises(DimensionDataAPIException) as context:
 self.driver.ex_wait_for_state('starting',
 self.driver.ex_get_node_by_id,
 id='e75ead52-692f-4314-8725-c8a4f4d13a87',
 timeout=2
 )
 self.assertEqual(context.exception.code, 'running')
 self.assertTrue('timed out' in context.exception.msg)
 def test_ex_update_vlan(self):
 vlan = self.driver.ex_get_vlan('0e56433f-d808-4669-821d-812769517ff8')
 vlan.name = 'new name'
 vlan2 = self.driver.ex_update_vlan(vlan)
 self.assertEqual(vlan2.name, 'new name')
 def test_ex_delete_vlan(self):
 vlan = self.driver.ex_get_vlan('0e56433f-d808-4669-821d-812769517ff8')
 result = self.driver.ex_delete_vlan(vlan)
 self.assertTrue(result)
 def test_ex_expand_vlan(self):
 vlan = self.driver.ex_get_vlan('0e56433f-d808-4669-821d-812769517ff8')
 vlan.private_ipv4_range_size = '23'
 vlan = self.driver.ex_expand_vlan(vlan)
 self.assertEqual(vlan.private_ipv4_range_size, '23')
 def test_ex_add_public_ip_block_to_network_domain(self):
 net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
 block = self.driver.ex_add_public_ip_block_to_network_domain(net)
 self.assertEqual(block.id, '9945dc4a-bdce-11e4-8c14-b8ca3a5d9ef8')
 def test_ex_list_public_ip_blocks(self):
 net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
 blocks = self.driver.ex_list_public_ip_blocks(net)
 self.assertEqual(blocks[0].base_ip, '172.16.17.32')
 self.assertEqual(blocks[0].size, '2')
 self.assertEqual(blocks[0].id, '9945dc4a-bdce-11e4-8c14-b8ca3a5d9ef8')
 self.assertEqual(blocks[0].location.id, 'NA9')
 self.assertEqual(blocks[0].network_domain.id, net.id)
 def test_ex_get_public_ip_block(self):
 net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
 block = self.driver.ex_get_public_ip_block('9945dc4a-bdce-11e4-8c14-b8ca3a5d9ef8')
 self.assertEqual(block.base_ip, '172.16.17.32')
 self.assertEqual(block.size, '2')
 self.assertEqual(block.id, '9945dc4a-bdce-11e4-8c14-b8ca3a5d9ef8')
 self.assertEqual(block.location.id, 'NA9')
 self.assertEqual(block.network_domain.id, net.id)
 def test_ex_delete_public_ip_block(self):
 block = self.driver.ex_get_public_ip_block('9945dc4a-bdce-11e4-8c14-b8ca3a5d9ef8')
 result = self.driver.ex_delete_public_ip_block(block)
 self.assertTrue(result)
 def test_ex_list_firewall_rules(self):
 net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
 rules = self.driver.ex_list_firewall_rules(net)
 self.assertEqual(rules[0].id, '756cba02-b0bc-48f4-aea5-9445870b6148')
 self.assertEqual(rules[0].network_domain.id, '8cdfd607-f429-4df6-9352-162cfc0891be')
 self.assertEqual(rules[0].name, 'CCDEFAULT.BlockOutboundMailIPv4')
 self.assertEqual(rules[0].action, 'DROP')
 self.assertEqual(rules[0].ip_version, 'IPV4')
 self.assertEqual(rules[0].protocol, 'TCP')
 self.assertEqual(rules[0].source.ip_address, 'ANY')
 self.assertTrue(rules[0].source.any_ip)
 self.assertTrue(rules[0].destination.any_ip)
 def test_ex_create_firewall_rule(self):
 net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
 rules = self.driver.ex_list_firewall_rules(net)
 rule = self.driver.ex_create_firewall_rule(net, rules[0], 'FIRST')
 self.assertEqual(rule.id, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c')
 def test_ex_create_firewall_rule_with_specific_source_ip(self):
 net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
 rules = self.driver.ex_list_firewall_rules(net)
 specific_source_ip_rule = list(filter(lambda x: x.name == 'SpecificSourceIP',
 rules))[0]
 rule = self.driver.ex_create_firewall_rule(net, specific_source_ip_rule, 'FIRST')
 self.assertEqual(rule.id, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c')
 def test_ex_create_firewall_rule_with_source_ip(self):
 net = self.driver.ex_get_network_domain(
 '8cdfd607-f429-4df6-9352-162cfc0891be')
 rules = self.driver.ex_list_firewall_rules(net)
 specific_source_ip_rule = \
 list(filter(lambda x: x.name == 'SpecificSourceIP',
 rules))[0]
 specific_source_ip_rule.source.any_ip = False
 specific_source_ip_rule.source.ip_address = '10.0.0.1'
 specific_source_ip_rule.source.ip_prefix_size = '15'
 rule = self.driver.ex_create_firewall_rule(net,
 specific_source_ip_rule,
 'FIRST')
 self.assertEqual(rule.id, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c')
 def test_ex_create_firewall_rule_with_any_ip(self):
 net = self.driver.ex_get_network_domain(
 '8cdfd607-f429-4df6-9352-162cfc0891be')
 rules = self.driver.ex_list_firewall_rules(net)
 specific_source_ip_rule = \
 list(filter(lambda x: x.name == 'SpecificSourceIP',
 rules))[0]
 specific_source_ip_rule.source.any_ip = True
 rule = self.driver.ex_create_firewall_rule(net,
 specific_source_ip_rule,
 'FIRST')
 self.assertEqual(rule.id, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c')
 def test_ex_create_firewall_rule_ip_prefix_size(self):
 net = self.driver.ex_get_network_domain(
 '8cdfd607-f429-4df6-9352-162cfc0891be')
 rule = self.driver.ex_list_firewall_rules(net)[0]
 rule.source.address_list_id = None
 rule.source.any_ip = False
 rule.source.ip_address = '10.2.1.1'
 rule.source.ip_prefix_size = '10'
 rule.destination.address_list_id = None
 rule.destination.any_ip = False
 rule.destination.ip_address = '10.0.0.1'
 rule.destination.ip_prefix_size = '20'
 self.driver.ex_create_firewall_rule(net, rule, 'LAST')
 def test_ex_create_firewall_rule_address_list(self):
 net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
 rule = self.driver.ex_list_firewall_rules(net)[0]
 rule.source.address_list_id = '12345'
 rule.destination.address_list_id = '12345'
 self.driver.ex_create_firewall_rule(net, rule, 'LAST')
 def test_ex_create_firewall_rule_port_list(self):
 net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
 rule = self.driver.ex_list_firewall_rules(net)[0]
 rule.source.port_list_id = '12345'
 rule.destination.port_list_id = '12345'
 self.driver.ex_create_firewall_rule(net, rule, 'LAST')
 def test_ex_create_firewall_rule_port(self):
 net = self.driver.ex_get_network_domain(
 '8cdfd607-f429-4df6-9352-162cfc0891be')
 rule = self.driver.ex_list_firewall_rules(net)[0]
 rule.source.port_list_id = None
 rule.source.port_begin = '8000'
 rule.source.port_end = '8005'
 rule.destination.port_list_id = None
 rule.destination.port_begin = '7000'
 rule.destination.port_end = '7005'
 self.driver.ex_create_firewall_rule(net, rule, 'LAST')
 def test_ex_create_firewall_rule_ALL_VALUES(self):
 net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
 rules = self.driver.ex_list_firewall_rules(net)
 for rule in rules:
 self.driver.ex_create_firewall_rule(net, rule, 'LAST')
 def test_ex_create_firewall_rule_WITH_POSITION_RULE(self):
 net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
 rules = self.driver.ex_list_firewall_rules(net)
 rule = self.driver.ex_create_firewall_rule(net, rules[-2], 'BEFORE', rules[-1])
 self.assertEqual(rule.id, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c')
 def test_ex_create_firewall_rule_WITH_POSITION_RULE_STR(self):
 net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
 rules = self.driver.ex_list_firewall_rules(net)
 rule = self.driver.ex_create_firewall_rule(net, rules[-2], 'BEFORE', 'RULE_WITH_SOURCE_AND_DEST')
 self.assertEqual(rule.id, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c')
 def test_ex_create_firewall_rule_FAIL_POSITION(self):
 net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
 rules = self.driver.ex_list_firewall_rules(net)
 with self.assertRaises(ValueError):
 self.driver.ex_create_firewall_rule(net, rules[0], 'BEFORE')
 def test_ex_create_firewall_rule_FAIL_POSITION_WITH_RULE(self):
 net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
 rules = self.driver.ex_list_firewall_rules(net)
 with self.assertRaises(ValueError):
 self.driver.ex_create_firewall_rule(net, rules[0], 'LAST', 'RULE_WITH_SOURCE_AND_DEST')
 def test_ex_get_firewall_rule(self):
 net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
 rule = self.driver.ex_get_firewall_rule(net, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c')
 self.assertEqual(rule.id, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c')
 def test_ex_set_firewall_rule_state(self):
 net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
 rule = self.driver.ex_get_firewall_rule(net, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c')
 result = self.driver.ex_set_firewall_rule_state(rule, False)
 self.assertTrue(result)
 def test_ex_delete_firewall_rule(self):
 net = self.driver.ex_get_network_domain('8cdfd607-f429-4df6-9352-162cfc0891be')
 rule = self.driver.ex_get_firewall_rule(net, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c')
 result = self.driver.ex_delete_firewall_rule(rule)
 self.assertTrue(result)
 def test_ex_edit_firewall_rule(self):
 net = self.driver.ex_get_network_domain(
 '8cdfd607-f429-4df6-9352-162cfc0891be')
 rule = self.driver.ex_get_firewall_rule(
 net, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c')
 rule.source.any_ip = True
 result = self.driver.ex_edit_firewall_rule(rule=rule, position='LAST')
 self.assertTrue(result)
 def test_ex_edit_firewall_rule_source_ipaddresslist(self):
 net = self.driver.ex_get_network_domain(
 '8cdfd607-f429-4df6-9352-162cfc0891be')
 rule = self.driver.ex_get_firewall_rule(
 net, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c')
 rule.source.address_list_id = '802abc9f-45a7-4efb-9d5a-810082368222'
 rule.source.any_ip = False
 rule.source.ip_address = '10.0.0.1'
 rule.source.ip_prefix_size = 10
 result = self.driver.ex_edit_firewall_rule(rule=rule, position='LAST')
 self.assertTrue(result)
 def test_ex_edit_firewall_rule_destination_ipaddresslist(self):
 net = self.driver.ex_get_network_domain(
 '8cdfd607-f429-4df6-9352-162cfc0891be')
 rule = self.driver.ex_get_firewall_rule(
 net, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c')
 rule.destination.address_list_id = '802abc9f-45a7-4efb-9d5a-810082368222'
 rule.destination.any_ip = False
 rule.destination.ip_address = '10.0.0.1'
 rule.destination.ip_prefix_size = 10
 result = self.driver.ex_edit_firewall_rule(rule=rule, position='LAST')
 self.assertTrue(result)
 def test_ex_edit_firewall_rule_destination_ipaddress(self):
 net = self.driver.ex_get_network_domain(
 '8cdfd607-f429-4df6-9352-162cfc0891be')
 rule = self.driver.ex_get_firewall_rule(
 net, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c')
 rule.source.address_list_id = None
 rule.source.any_ip = False
 rule.source.ip_address = '10.0.0.1'
 rule.source.ip_prefix_size = '10'
 result = self.driver.ex_edit_firewall_rule(rule=rule, position='LAST')
 self.assertTrue(result)
 def test_ex_edit_firewall_rule_source_ipaddress(self):
 net = self.driver.ex_get_network_domain(
 '8cdfd607-f429-4df6-9352-162cfc0891be')
 rule = self.driver.ex_get_firewall_rule(
 net, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c')
 rule.destination.address_list_id = None
 rule.destination.any_ip = False
 rule.destination.ip_address = '10.0.0.1'
 rule.destination.ip_prefix_size = '10'
 result = self.driver.ex_edit_firewall_rule(rule=rule, position='LAST')
 self.assertTrue(result)
 def test_ex_edit_firewall_rule_with_relative_rule(self):
 net = self.driver.ex_get_network_domain(
 '8cdfd607-f429-4df6-9352-162cfc0891be')
 rule = self.driver.ex_get_firewall_rule(
 net, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c')
 placement_rule = self.driver.ex_list_firewall_rules(
 network_domain=net)[-1]
 result = self.driver.ex_edit_firewall_rule(
 rule=rule, position='BEFORE',
 relative_rule_for_position=placement_rule)
 self.assertTrue(result)
 def test_ex_edit_firewall_rule_with_relative_rule_by_name(self):
 net = self.driver.ex_get_network_domain(
 '8cdfd607-f429-4df6-9352-162cfc0891be')
 rule = self.driver.ex_get_firewall_rule(
 net, 'd0a20f59-77b9-4f28-a63b-e58496b73a6c')
 placement_rule = self.driver.ex_list_firewall_rules(
 network_domain=net)[-1]
 result = self.driver.ex_edit_firewall_rule(
 rule=rule, position='BEFORE',
 relative_rule_for_position=placement_rule.name)
 self.assertTrue(result)
 def test_ex_edit_firewall_rule_source_portlist(self):
 net | |
| 
	#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import numpy as np
import os
import re
import sys
from argparse import ArgumentParser
from datetime import datetime, timedelta
from itertools import product
from scipy.interpolate import griddata
from serial import Serial
from time import sleep, time
class Probe():
 def __init__(self, device, input_gcode, grid_spacing, feed_rate, overscan, min_z, max_z):
 self.ser = None
 self.device = device
 self.input_gcode = input_gcode
 self.grid_spacing = grid_spacing
 self.feed_rate = feed_rate
 self.overscan = overscan
 self.min_z = min_z
 self.max_z = max_z
 self.ser_timeout = 120
 self.fine_feed_probe = 1
 self.coarse_feed_probe = 40
 self.z_max_travel = 40
 self.x_coords_re = re.compile(r'X\s*(-?[0-9]+(?:\.[0-9]+)?)')
 self.y_coords_re = re.compile(r'Y\s*(-?[0-9]+(?:\.[0-9]+)?)')
 self.mpos_re = re.compile(r'\|MPos:(-?[0-9]+\.[0-9]+),(-?[0-9]+\.[0-9]+),(-?[0-9]+\.[0-9]+)')
 self.probe_re = re.compile(r'\[PRB:(-?[0-9]+\.[0-9]+),(-?[0-9]+\.[0-9]+),(-?[0-9]+\.[0-9]+):([0-1])\]')
 def init_grbl(self):
 # open serial port and wait for welcome msg
 self.ser = Serial(self.device, 115200, timeout=self.ser_timeout)
 data = ''
 while "Grbl 1.1f ['$' for help]" != data:
 data = self.ser.readline().strip()
 self.ser.timeout = 1
 if '''[MSG:'$H'|'$X' to unlock]''' in self.ser.readline().strip():
 self.send('$X', wait_for_idle=False)
 self.ser.reset_input_buffer()
 self.ser.timeout = self.ser_timeout
 # set millimeter mode
 self.send('G21')
 # set adbsolute coords
 self.send('G90')
 # reset work coords
 self.send('G92X0Y0Z0')
 # set local relative offset
 self.zero_wpos = self.get_abs_pos()
 def send(self, data, newline=True, wait_for_idle=True):
 # open serial only on first send
 if self.ser is None:
 self.init_grbl()
 # wait for machine to be idle (not moving)
 if wait_for_idle:
 while True:
 self.ser.write('?')
 if '<Idle|' in self.ser.readline():
 break
 sleep(.25)
 # send data and wait for answer
 self.ser.write(data + ('\n' if newline else ''))
 resp = self.ser.readline().strip()
 # parse and return responses
 if resp == 'ok':
 return True
 elif 'error:' in resp or 'ALARM:' in resp:
 raise Exception(resp)
 elif resp.startswith('['):
 out = [resp]
 while True:
 resp = self.ser.readline().strip()
 if resp.startswith('['):
 out.append(resp)
 elif resp == 'ok':
 return '\n'.join(out)
 return resp
 def get_rel_coord(self, coords):
 resp = {}
 for coord in 'xyz':
 if coord in coords:
 resp[coord] = -self.zero_wpos[coord] + coords[coord]
 return resp
 def get_abs_pos(self):
 # wait for machine to be idle
 while True:
 mpos = self.send('?', newline=False)
 if '<Idle|' in mpos:
 break
 sleep(.25)
 mpos = tuple(map(float, self.mpos_re.findall(mpos)[0]))
 return {'x': mpos[0], 'y': mpos[1], 'z': mpos[2]}
 def get_pos(self):
 # get current position in relative coords
 return self.get_rel_coord(self.get_abs_pos())
 def probe(self, min_z, feed_rate, retract=None, zero_coords=False):
 assert (min_z < 0)
 assert (retract is None or retract >= 0)
 resp = self.send('G38.3 Z{:.5f} F{:.0f}'.format(min_z, feed_rate))
 resp = self.probe_re.findall(resp)[0]
 probe_point, probe_success = tuple(map(float, resp[:3])), bool(resp[-1])
 # zero out work coords
 if probe_success and zero_coords:
 # zero out work offset
 self.send('G92Z{:.5f}'.format(self.get_abs_pos()['z'] - probe_point[2]))
 # go to effective zero since probe might have stopped after
 # the probe touchdown (due to deceleration)
 self.send('G01Z0F1')
 # set new local relative offset
 self.zero_wpos = self.get_abs_pos()
 if retract is not None:
 self.send('G0Z{:.5f}'.format(retract))
 probe_point = {'x': probe_point[0], 'y': probe_point[1], 'z': 0. if zero_coords else probe_point[2]}
 return self.get_rel_coord(probe_point), probe_success
 def probe_origin(self):
 sys.stdout.write('\n[I] Zeroing Z in origin using coarse mode (F{:.0f})... '.format(self.coarse_feed_probe))
 sys.stdout.flush()
 # raise Z axis a bit to avoid potential alarm
 self.send('G0Z1')
 if not self.probe(-self.z_max_travel, self.coarse_feed_probe, zero_coords=True)[1]:
 print('\n\n[E] Probe error!')
 sys.exit(1)
 self.send('G1Z.1F1')
 sys.stdout.write('Done.\n[I] Zeroing Z in origin using fine mode (F{:.0f})... '.format(self.fine_feed_probe))
 sys.stdout.flush()
 if not self.probe(-.4, self.fine_feed_probe, zero_coords=True)[1]:
 print('\n\n[E] Probe error!')
 sys.exit(1)
 print('Done.')
 def return_home(self):
 print('\n[I] Returning home. X0 Y0 Z0.2')
 self.send('G0Z5')
 self.send('G0X0Y0')
 self.send('G0Z.5')
 self.send('G1Z.2F10')
 def get_workspace_size(self):
 # get all X and Y coords in the gcode file
 X = np.asarray(self.x_coords_re.findall(self.input_gcode), np.double)
 Y = np.asarray(self.y_coords_re.findall(self.input_gcode), np.double)
 # find boundaries
 return min(X), max(X), min(Y), max(Y)
 def get_probe_coords(self):
 minx, maxx, miny, maxy = self.get_workspace_size()
 print('\n[I] Gcode area (WxH): {:.2f}mm x {:.2f}mm'.format(abs(maxx - minx), abs(maxy - miny)))
 if self.overscan != 0:
 minx, maxx = minx - self.overscan, maxx + self.overscan
 miny, maxy = miny - self.overscan, maxy + self.overscan
 print('[I] Probe area with overscan (WxH): {:.2f}mm x {:.2f}mm'.format(abs(maxx - minx), abs(maxy - miny)))
 x_steps = max(2, int(round(abs(maxx - minx) / self.grid_spacing)) + 1)
 x_spacing = abs(maxx - minx) / (x_steps - 1)
 X = np.linspace(minx, maxx, x_steps)
 y_steps = max(2, int(round(abs(maxy - miny) / self.grid_spacing)) + 1)
 y_spacing = abs(maxy - miny) / (y_steps - 1)
 Y = np.linspace(miny, maxy, y_steps)
 coords = tuple(product(X, Y))
 # sort probing coords in zig-zag to minimize path length
 sorted_coords = []
 for x in sorted(X):
 tmp = [point for point in coords if point[0] == x]
 sorted_coords.append(sorted(tmp, key=lambda point: point[1], reverse=len(sorted_coords) % 2 == 1))
 sorted_coords = [item for sublist in sorted_coords for item in sublist]
 self.probe_coords = sorted_coords
 self.X, self.Y = X, Y
 print('[I] Probing {:d} points, {:.5f}mm x-grid, {:.5f}mm y-grid:'.format(
 len(sorted_coords), x_spacing, y_spacing))
 # return the probing grid
 return sorted_coords
 def probe_grid(self):
 # probe the surface using the calculated grid
 self.probe_result = []
 start_t = time()
 for i, (x, y) in enumerate(self.probe_coords):
 sys.stdout.write('[{:03d}] Probing x: {:.1f} y: {:.1f} '.format(i + 1, x, y))
 sys.stdout.flush()
 # skip probing point X0 Y0 if exists
 if x == y == 0.:
 probe_point, probe_success = {'z': 0.}, True
 else:
 # raising probe Z to max_z
 self.send('G0Z{:.5f}'.format(self.max_z))
 # moving to next probe point
 self.send('G0X{:.5f}Y{:.5f}'.format(x, y))
 # do probe
 probe_point, probe_success = self.probe(self.min_z, self.feed_rate, retract=self.max_z)
 if not probe_success:
 print('\n[E] Unable to probe point!')
 self.return_home()
 sys.exit(1)
 now = datetime.fromtimestamp(int(time())).strftime('%Y-%m-%dT%H:%M:%S.%fZ')
 result = {
 "sent": True,
 "done": True,
 "x": float(x),
 "y": float(y),
 "z": float(probe_point['z']),
 "ts": now,
 "xindx": int(np.where(self.X == x)[0][0]),
 "yindx": int(np.where(self.Y == y)[0][0]),
 }
 self.probe_result.append(result)
 elapsed_t = time() - start_t
 eta_t = (elapsed_t / (i + 1)) * (len(self.probe_coords) - (i + 1))
 print('z: {:.5f}\t\tETA: {}'.format(result['z'], timedelta(seconds=int(eta_t))))
 print('')
 def get_json(self):
 # return a json string with the probe result
 return json.dumps(self.probe_result)
def correct_gcode(input_gcode, probe_json):
 probe_json = json.loads(probe_json)
 X = np.asarray([point['x'] for point in probe_json], np.double)
 Y = np.asarray([point['y'] for point in probe_json], np.double)
 points = np.vstack((X, Y)).T
 values = np.asarray([point['z'] for point in probe_json], np.double)
 regexps = {
 'x': re.compile(r'x\s*(-?[0-9]+\.[0-9]+)', re.IGNORECASE),
 'y': re.compile(r'y\s*(-?[0-9]+\.[0-9]+)', re.IGNORECASE),
 'z': re.compile(r'z\s*(-?[0-9]+\.[0-9]+)', re.IGNORECASE),
 }
 # split input gcode by line, filtering empty lines
 input_gcode = list(filter(lambda x: x, map(lambda x: x.strip(), input_gcode.split('\n'))))
 result = []
 cur_coords = [0] * 3
 for i, line in enumerate(input_gcode):
 # skip comments
 if line.startswith(';') or line.startswith('('):
 continue
 cur_line = ''
 # update current gcode coordinates
 for j, coord in enumerate(('x', 'y', 'z')):
 match = regexps[coord].search(line)
 if match:
 cur_coords[j] = float(match.group(1))
 # keep track of which coordinate we have found in this gcode line
 cur_line += coord
 # if this gcode line contains a Z coord, correct it
 if 'z' in cur_line:
 result.append((i, 'sub', cur_coords[:]))
 # no Z coord in this line, let's add it
 elif 'x' in cur_line or 'y' in cur_line:
 result.append((i, 'append', cur_coords[:]))
 # points that we need to adjust (x,y,z)
 gcode_points = np.vstack(zip(*[item[2] for item in result])).T
 # calculate new Z value for each point in gcode_points using both linear and nearest interpolation
 newZval_lin = griddata(points, values, gcode_points[:, :2], method='linear') + gcode_points[:, 2]
 newZval_near = griddata(points, values, gcode_points[:, :2], method='nearest') + gcode_points[:, 2]
 for i, newZval in enumerate(newZval_lin):
 j, action = result[i][:2]
 # if the new Z value is nan, than the point is probably outside the probing grid
 # we use the nearest point as an approximation
 if np.isnan(newZval):
 newZval = newZval_near[i]
 # replace or add the new Z value
 if action == 'sub':
 input_gcode[j] = regexps['z'].sub('Z{:.5f}'.format(newZval), input_gcode[j])
 else:
 input_gcode[j] += ' Z{:.5f}'.format(newZval)
 return '\n'.join(input_gcode).encode('ascii')
def parse_args():
 # parse command line arguments
 parser = ArgumentParser(description='pcb surface autoprober')
 subparsers = parser.add_subparsers(title='actions')
 probe_parsers = subparsers.add_parser('probe', help='probe the surface and generate JSON report')
 probe_parsers.set_defaults(which='probe')
 probe_parsers.add_argument(
 '-i',
 metavar='INPUT_GCODE',
 dest='input_gcode',
 help='input gcode for automatic surface probing',
 required=True)
 probe_parsers.add_argument('-l', dest='output', help='output JSON file containing probe points', required=True)
 probe_parsers.add_argument(
 '-g', '--grid', metavar='mm', type=float, dest='grid_spacing', help='probe grid spacing (mm)', required=True)
 probe_parsers.add_argument(
 '-d', '--device', metavar='serial_device', dest='device', default='/dev/ttyUSB0', help='GRBL device')
 probe_parsers.add_argument(
 '-f',
 '--feed',
 metavar='mm/min',
 type=int,
 dest='feed_rate',
 default=5,
 help='probing feed rate on Z axis (default 5 mm/min)')
 probe_parsers.add_argument(
 '--maxz',
 metavar='mm',
 type=float,
 dest='max_z',
 default=.5,
 help='start probing at this Z axis value (default 0.5 mm)')
 probe_parsers.add_argument(
 '--minz',
 metavar='mm',
 type=float,
 dest='min_z',
 default=-.5,
 help='stop probing if Z axis reaches this value (default -0.5 mm)')
 probe_parsers.add_argument(
 '--overscan',
 metavar='mm',
 type=float,
 default=1.0,
 dest='overscan',
 help='probe grid overscan. the probe grid will be this value larger on every edge (mm)')
 correct_parsers = subparsers.add_parser('correct', help='correct the input gcode with the probing result')
 correct_parsers.set_defaults(which='correct')
 correct_parsers.add_argument(
 metavar='INPUT_GCODE', dest='input_gcode', help='input gcode file to be | |
| 
	# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2019 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for processing X.509 v3 certificates."""
import contextlib
import datetime
import ipaddress
import logging
import re
import socket
import ssl
import urllib.parse
import cryptography
import cryptography.hazmat
import cryptography.hazmat.backends
import cryptography.hazmat.primitives
import cryptography.hazmat.primitives.asymmetric
import cryptography.hazmat.primitives.asymmetric.ec
import cryptography.hazmat.primitives.asymmetric.rsa
import cryptography.hazmat.primitives.hashes
import cryptography.hazmat.primitives.serialization
import cryptography.x509
import cryptography.x509.oid
import pyasn1.codec.der
import pyasn1.codec.der.decoder
import d1_common.const
OID_TO_SHORT_NAME_DICT = {
 """Map OID to short names for use when creating DataONE compliant serialization of the
 DN.
 
 This is pulled from LDAPv3 RFCs (RFC 4510 TO RFC 4519).
 
 The set of OIDs that can occur in RDNs seems to be poorly defined. RFC 4514 refers to
 a registry but, if the registry exists, it's probably too large to be useful to us. So
 we pull in OIDs for a small set that can be expected in RDNs in certs from CILogon and
 will just need to expand it if required.
 
 RFC 4514 section 2: Converting DistinguishedName from ASN.1 to a String
 
 If the AttributeType is defined to have a short name (descriptor) [RFC4512] and that
 short name is known to be registered [REGISTRY] [RFC4520] as identifying the
 AttributeType , that short name a <descr>, is used. Otherwise the AttributeType is
 encoded as the dotted-decimal encoding , a <numericoid> of its OBJECT IDENTIFIER. The
 <descr> and <numericoid> are defined in [RFC4512].
 """
 "0.9.2342.19200300.100.1.1": "UID", # userId
 "0.9.2342.19200300.100.1.25": "DC", # domainComponent
 "1.2.840.113549.1.9.1": "email", # emailAddress
 "2.5.4.3": "CN", # commonName
 "2.5.4.4": "SN", # surname
 "2.5.4.6": "C", # countryName
 "2.5.4.7": "L", # localityName
 "2.5.4.8": "ST", # stateOrProvinceName
 "2.5.4.9": "STREET", # streetAddress
 "172.16.31.10": "O", # organizationName
 "172.16.31.10": "OU", # organizationalUnitName
}
DATAONE_SUBJECT_INFO_OID = "1.3.6.1.4.1.34998.2.1"
AUTHORITY_INFO_ACCESS_OID = "1.3.6.1.5.5.7.1.1" # authorityInfoAccess
CA_ISSUERS_OID = "1.3.6.1.5.172.16.17.32" # caIssuers
OCSP_OID = "1.3.6.1.5.172.16.31.10" # OCSP
UBUNTU_CA_BUNDLE_PATH = "/etc/ssl/certs/ca-certificates.crt"
# Subjects
def extract_subjects(cert_pem):
 """Extract primary subject and SubjectInfo from a DataONE PEM (Base64) encoded X.509
 v3 certificate.
 Args:
 cert_pem: str or bytes
 PEM (Base64) encoded X.509 v3 certificate
 Returns:
 2-tuple:
 - Primary subject (str) extracted from the certificate DN.
 - SubjectInfo (XML str) if present (see the subject_info module for parsing)
 """
 cert_obj = deserialize_pem(cert_pem)
 return extract_subject_from_dn(cert_obj), extract_subject_info_extension(cert_obj)
def extract_subject_from_dn(cert_obj):
 """Serialize a DN to a DataONE subject string.
 Args:
 cert_obj: cryptography.Certificate
 Returns:
 str:
 Primary subject extracted from the certificate DN.
 The certificate DN (DistinguishedName) is a sequence of RDNs
 (RelativeDistinguishedName). Each RDN is a set of AVAs (AttributeValueAssertion /
 AttributeTypeAndValue). A DataONE subject is a plain string. As there is no single
 standard specifying how to create a string representation of a DN, DataONE selected
 one of the most common ways, which yield strings such as:
 CN=Some Name A123,O=Some Organization,C=US,DC=Some Domain,DC=org
 In particular, the sequence of RDNs is reversed. Attribute values are escaped,
 attribute type and value pairs are separated by "=", and AVAs are joined together
 with ",". If an RDN contains an unknown OID, the OID is serialized as a dotted
 string.
 As all the information in the DN is preserved, it is not possible to create the
 same subject with two different DNs, and the DN can be recreated from the subject.
 """
 return ",".join(
 "{}={}".format(
 OID_TO_SHORT_NAME_DICT.get(v.oid.dotted_string, v.oid.dotted_string),
 rdn_escape(v.value),
 )
 for v in reversed(list(cert_obj.subject))
 )
def create_mn_dn(node_urn):
 """Create a certificate DN suitable for use in Member Node client side certificates
 issued by DataONE, and thus in Certificate Signing Requests (CSR). The DN will be on
 the form:
 .. highlight:: none
 ::
 DC=org, DC=dataone, CN=urn:node:<ID>
 where <ID> typically is a short acronym for the name of the organization responsible
 for the Member Node.
 The DN is formatted into a DataONE subject, which is used in authentication,
 authorization and event tracking.
 Args:
 node_urn (str): Node URN. E.g.:
 - Production certificate: ``urn:node:XYZ``.
 - Test certificate ``urn:node:mnTestXYZ``.
 Returns:
 cryptography.x509.Name
 """
 return create_simple_dn(node_urn, domain_component_list=["org", "dataone"])
def create_simple_dn(common_name_str, domain_component_list=None):
 """Create a simple certificate DN suitable for use in testing and for generating
 self signed CA and other certificate.
 ::
 DC=local, DC=dataone, CN=<common name>
 Args:
 common_name_str: The Common Name to use for the certificate.
 DataONE uses simple DNs without physical location information, so only the
 ``common_name_str`` (``CommonName``) needs to be specified.
 For Member Node Client Side certificates or CSRs, ``common_name_str`` is the
 ``node_id``, e.g., ``urn:node:ABCD`` for production, or
 ``urn:node:mnTestABCD`` for the test environments.
 For a local CA, something like ``localCA`` may be used.
 For a locally trusted client side certificate, something like
 ``localClient`` may be used.
 domain_component_list: list
 Optionally set custom domain components.
 fqdn_list: list of str
 List of Fully Qualified Domain Names (FQDN) and/or IP addresses for which
 this certificate will provide authentication.
 E.g.: ['my.membernode.org', '172.16.31.10']
 This is mainly useful for creating a self signed server side certificate or
 a CSR that will be submitted to a trusted CA, such as Verisign, for signing.
 Returns:
 cryptography.x509.Name
 """
 domain_component_list = domain_component_list or ["local", "dataone"]
 attr_list = []
 for dc_str in domain_component_list:
 attr_list.append(
 cryptography.x509.NameAttribute(
 cryptography.x509.oid.NameOID.DOMAIN_COMPONENT, dc_str
 )
 )
 attr_list.append(
 cryptography.x509.NameAttribute(
 cryptography.x509.oid.NameOID.COMMON_NAME, common_name_str
 )
 )
 return cryptography.x509.Name(attr_list)
# CSR
def generate_csr(private_key_bytes, dn, fqdn_list=None):
 """Generate a Certificate Signing Request (CSR).
 Args:
 private_key_bytes: bytes
 Private key with which the CSR will be signed.
 dn: cryptography.x509.Name
 The dn can be built by passing a list of cryptography.x509.NameAttribute to
 cryptography.x509.Name.
 Simple DNs can be created with the ``create_dn*`` functions in this module.
 fqdn_list: list of str
 List of Fully Qualified Domain Names (FQDN) and/or IP addresses for which
 this certificate will provide authentication.
 E.g.: ['my.membernode.org', '172.16.31.10']
 This is mainly useful for creating a self signed server side certificate or
 a CSR that will be submitted to a trusted CA, such as Verisign, for signing.
 Returns:
 cryptography.x509.CertificateSigningRequest
 """
 csr = cryptography.x509.CertificateSigningRequestBuilder(subject_name=dn)
 if fqdn_list:
 csr.add_extension(
 extension=cryptography.x509.SubjectAlternativeName(
 [cryptography.x509.DNSName(v) for v in fqdn_list]
 ),
 critical=False,
 )
 return csr.sign(
 private_key=private_key_bytes,
 algorithm=cryptography.hazmat.primitives.hashes.SHA256(),
 backend=cryptography.hazmat.backends.default_backend(),
 )
# PEM
def deserialize_pem(cert_pem):
 """Deserialize PEM (Base64) encoded X.509 v3 certificate.
 Args:
 cert_pem: str or bytes
 PEM (Base64) encoded X.509 v3 certificate
 Returns:
 cert_obj: cryptography.Certificate
 """
 if isinstance(cert_pem, str):
 cert_pem = cert_pem.encode("utf-8")
 return cryptography.x509.load_pem_x509_certificate(
 data=cert_pem, backend=cryptography.hazmat.backends.default_backend()
 )
def deserialize_pem_file(cert_path):
 """Deserialize PEM (Base64) encoded X.509 v3 certificate in file.
 Args:
 cert_path: str or bytes
 Path to PEM (Base64) encoded X.509 v3 certificate file
 Returns:
 cert_obj: cryptography.Certificate
 """
 with open(cert_path, "rb") as f:
 return deserialize_pem(f.read())
def serialize_cert_to_pem(cert_obj):
 """Serialize certificate to PEM.
 The certificate can be also be a Certificate Signing Request (CSR).
 Args:
 cert_obj: cryptography.Certificate
 Returns:
 bytes: PEM encoded certificate
 """
 return cert_obj.public_bytes(
 encoding=cryptography.hazmat.primitives.serialization.Encoding.PEM
 )
# DataONE SubjectInfo Extension
def extract_subject_info_extension(cert_obj):
 """Extract DataONE SubjectInfo XML doc from certificate.
 Certificates issued by DataONE may include an embedded XML doc containing
 additional information about the subject specified in the certificate DN. If
 present, the doc is stored as an extension with an OID specified by DataONE and
 formatted as specified in the DataONE SubjectInfo schema definition.
 Args:
 cert_obj: cryptography.Certificate
 Returns:
 str : SubjectInfo XML doc if present, else None
 """
 try:
 subject_info_der = cert_obj.extensions.get_extension_for_oid(
 cryptography.x509.oid.ObjectIdentifier(DATAONE_SUBJECT_INFO_OID)
 ).value.value
 return str(pyasn1.codec.der.decoder.decode(subject_info_der)[0])
 except Exception as e:
 logging.debug('SubjectInfo not extracted. reason="{}"'.format(e))
# Download Certificate
def download_as_der(
 base_url=d1_common.const.URL_DATAONE_ROOT,
 timeout_sec=d1_common.const.DEFAULT_HTTP_TIMEOUT,
):
 """Download public certificate from a TLS/SSL web server as DER encoded ``bytes``.
 If the certificate is being downloaded in order to troubleshoot validation issues,
 the download itself may fail due to the validation issue that is being investigated.
 To work around such chicken-and-egg problems, temporarily wrap calls to the
 download_* functions with the ``disable_cert_validation()`` context manager (also in
 this module).
 Args:
 base_url : str
 A full URL to a DataONE service endpoint or a server hostname
 timeout_sec : int or float
 Timeout for the SSL socket operations
 Returns:
 bytes: The server's public certificate | |
| 
	all data from the MGH file and return it as a numpy array. Optionally, collect meta data from the mgh file header.
 Parameters
 ----------
 mgh_file_name: string
 A string representing a full path to a file in FreeSurfer MGH file format. If the file name end with '.mgz' or '.mgh.gz', the file is assumed to be in gzipped MGH format.
 collect_meta_data: bool, optional
 Whether or not to collect meta data from the MGH file header. Defaults to True.
 collect_data: bool, optional
 Whether or not to collect the file data (voxel values) from the MGH file. Defaults to True.
 Returns
 -------
 mgh_data: numpy array
 The data from the MGH file, usually one scalar value per voxel.
 mgh_meta_data: dictionary
 The meta data collected from the header, or an empty dictionary if the argument `collect_meta_data` was 'False'. The keys correspond to the names of the respective nibabel function used to retrieve the data. The values are the data as returned by nibabel.
 Examples
 --------
 Read a file in MGH format from the surf dir of a subject:
 >>> import os
 >>> import brainload.freesurferdata as fsd
 >>> mgh_file = os.path.join('my_subjects_dir', 'subject1', 'surf', 'rh.area.fsaverage.mgh')
 >>> mgh_data, mgh_meta_data = fsd.read_mgh_file(mgh_file)
 See also
 --------
 - https://surfer.nmr.mgh.harvard.edu/fswiki/CoordinateSystems
 - https://github.com/nipy/nibabel/blob/master/nibabel/freesurfer/mghformat.py
 - https://surfer.nmr.mgh.harvard.edu/fswiki/FileFormats
 """
 mgh_meta_data = {}
 if mgh_file_name.endswith(".mgz") or mgh_file_name.endswith(".gz"):
 mgh_file_handle = gzip.open(mgh_file_name, 'rb')
 else:
 mgh_file_handle = open(mgh_file_name, 'rb')
 header = fsmgh.MGHHeader.from_fileobj(mgh_file_handle)
 if collect_meta_data:
 mgh_meta_data['data_shape'] = header.get_data_shape()
 mgh_meta_data['affine'] = header.get_affine()
 mgh_meta_data['best_affine'] = header.get_best_affine() # identical to get_affine for MGH format
 mgh_meta_data['data_bytespervox'] = header.get_data_bytespervox()
 mgh_meta_data['data_dtype'] = header.get_data_dtype()
 mgh_meta_data['data_offset'] = header.get_data_offset() # MGH format has a header, then data, then a footer
 mgh_meta_data['data_shape'] = header.get_data_shape()
 mgh_meta_data['data_size'] = header.get_data_size()
 mgh_meta_data['footer_offset'] = header.get_footer_offset()
 mgh_meta_data['ras2vox'] = header.get_ras2vox()
 mgh_meta_data['slope_inter'] = header.get_slope_inter()
 mgh_meta_data['vox2ras'] = header.get_vox2ras()
 mgh_meta_data['vox2ras_tkr'] = header.get_vox2ras_tkr()
 mgh_meta_data['zooms'] = header.get_zooms() # the voxel dimensions (along all 3 axes in space)
 mgh_data = None
 if collect_data:
 mgh_data = header.data_from_fileobj(mgh_file_handle)
 mgh_file_handle.close()
 return mgh_data, mgh_meta_data
def get_num_fsaverage_verts_per_hemi(fsversion=6):
 """
 Return the number of vertices per fsaverage hemisphere.
 Returns
 -------
 vertcount: int
 The number of vertices per fsaverage hemisphere.
 """
 if fsversion == 6:
 return 163842
 else:
 raise ValueError("Currently the only supported FreeSurfer version is 6.")
def merge_morphometry_data(morphometry_data_arrays, dtype=float):
 """
 Merge morphometry data horizontally.
 Merge morphometry data read from several meshes of the same subject horizontally. This is used to merge data from the left and right hemispheres.
 Parameters
 ----------
 morphometry_data_arrays: 2D array
 An array of arrays, each of which represents morphometry data from different hemispheres of the same subject.
 dtype: data type, optional
 Data type for the output numpy array. Defaults to float.
 Returns
 -------
 numpy array
 Horizontally stacked array containing the data from all arrays in the input array.
 Examples
 --------
 Merge some data:
 >>> lh_morphometry_data = np.array([0.0, 0.1, 0.2, 0.3]) # some fake data
 >>> rh_morphometry_data = np.array([0.5, 0.6])
 >>> merged_data = fsd.merge_morphometry_data(np.array([lh_morphometry_data, rh_morphometry_data]))
 >>> print merged_data.shape
 (6, )
 Typically, the `lh_morphometry_data` and `rh_morphometry_data` come from calls to `read_fs_morphometry_data_file_and_record_meta_data` as shown here:
 >>> lh_morphometry_data, meta_data = read_fs_morphometry_data_file_and_record_meta_data(lh_morphometry_data_file, 'lh')
 >>> rh_morphometry_data, meta_data = read_fs_morphometry_data_file_and_record_meta_data(rh_morphometry_data_file, 'rh', meta_data=meta_data)
 >>> both_hemis_morphometry_data = merge_morphometry_data(np.array([lh_morphometry_data, rh_morphometry_data]))
 """
 merged_data = np.empty((0), dtype=dtype)
 for morphometry_data in morphometry_data_arrays:
 merged_data = np.hstack((merged_data, morphometry_data))
 return merged_data
def _get_morphometry_data_suffix_for_surface(surf):
 """
 Determine FreeSurfer surface representation string.
 Determine the substring representing the given surface in a FreeSurfer output curv file. For FreeSurfer's default surface 'white', the surface is not represented in the output file name pattern. For all others, it is represented by a dot followed by the name.
 Parameters
 ----------
 surf: string
 A string representing a FreeSurfer surface, e.g., 'white' or 'pial'.
 Returns
 -------
 string
 The empty string if `surf` is 'white'. A dot followed by the string in the input argument `surf` otherwise.
 Examples
 --------
 >>> import brainload.freesurferdata as fsd
 >>> print fsd._get_morphometry_data_suffix_for_surface('pial')
 .pial
 """
 if surf == 'white':
 return ''
 return '.' + surf
def read_fs_surface_file_and_record_meta_data(surf_file, hemisphere_label, meta_data=None):
 """
 Read a surface file and record meta data on it.
 Read a surface file and record meta data on it. A surface file is a mesh file in FreeSurfer format, e.g., 'lh.white'. It contains vertices and 3-faces made out of them.
 Parameters
 ----------
 surf_file: string
 A string representing an absolute path to a surface (or 'mesh') file (e.g., the path to 'lh.white').
 hemisphere_label: {'lh' or 'rh'}
 A string representing the hemisphere this file belongs to. This is used to write the correct meta data.
 meta_data: dictionary | None, optional
 Meta data to merge into the output `meta_data`. Defaults to the empty dictionary.
 Returns
 -------
 vert_coords: numpy array
 A 2D array containing 3 coordinates for each vertex in the `surf_file`.
 faces: numpy array
 A 2D array containing 3 vertex indices per face. Look at the respective indices in `vert_coords` to get the vertex coordinates.
 meta_data: dictionary
 Contains detailed information on the data that was loaded. The following keys are available (replace `?h` with the value of the argument `hemisphere_label`, which must be 'lh' or 'rh').
 - `?h.num_vertices` : number of vertices in the loaded mesh
 - `?h.num_faces` : number of faces in the loaded mesh
 - `?lh.surf_file` : value of the `surf_file` argument: the mesh file that was loaded
 Examples
 --------
 >>> vert_coords, faces, meta_data = fsd.read_fs_surface_file_and_record_meta_data(surf_file, 'lh')
 >>> print meta_data['lh.num_vertices']
 121567 # arbitrary number, depends on the subject mesh
 """
 if hemisphere_label not in ('lh', 'rh'):
 raise ValueError("ERROR: hemisphere_label must be one of {'lh', 'rh'} but is '%s'." % hemisphere_label)
 if meta_data is None:
 meta_data = {}
 vert_coords, faces = fsio.read_geometry(surf_file)
 label_num_vertices = hemisphere_label + '.num_vertices'
 meta_data[label_num_vertices] = vert_coords.shape[0]
 label_num_faces = hemisphere_label + '.num_faces'
 meta_data[label_num_faces] = faces.shape[0]
 label_surf_file = hemisphere_label + '.surf_file'
 meta_data[label_surf_file] = surf_file
 return vert_coords, faces, meta_data
def _deduce_hemisphere_label_from_file_path(file_path, default="lh"):
 """
 Guess a hemisphere label from a file path.
 Guess a hemisphere label, i.e., one of ```lh``` or ```rh```, from a file path like ```/some/path/to/subjects/subject1/surf/lh.area```.
 Parameters
 ----------
 file_path: str
 The full path to a file.
 default: str
 What to return if the function cannot deduce the information from the given file_path.
 Returns
 -------
 hemi: str
 The hemisphere label
 is_default: boolean
 True if the file_path contained no information to deduce the hemisphere label, and the returned ```hemi``` value thus is the one from the ```default``` parameter.
 """
 path, file_name = os.path.split(file_path)
 accepted_third_chars = [".", "_"]
 for h in ["lh", "rh"]:
 for c in accepted_third_chars:
 if file_name.startswith(h + c):
 return h, False
 return default, True
def read_fs_morphometry_data_file_and_record_meta_data(curv_file, hemisphere_label, meta_data=None, format='curv'):
 """
 Read a morphometry file and record meta data on it.
 Read a morphometry file and record meta data on it. A morphometry file is file containing a scalar value for each vertex on the surface of a FreeSurfer mesh. An example is the file 'lh.area', which contains the area values for all vertices of the left hemisphere of the white surface. Such a file can be in two different formats: 'curv' or 'mgh'. The former is used when the data refers to the surface mesh of the original subject, the latter when it has been mapped to a standard subject like fsaverage.
 Parameters
 ----------
 curv_file: string
 A string representing a path to a morphometry file (e.g., the path to 'lh.area').
 hemisphere_label: {'lh' or 'rh'}
 A string representing the hemisphere this file belongs to. This is used to write the correct meta data.
 meta_data: dictionary | None, optional
 Meta data to merge into the output `meta_data`. Defaults to the empty dictionary.
 format: {'curv', 'mgh'}, optional
 The file format for the files that are to be loaded. Defaults to 'curv'.
 Returns
 -------
 per_vertex_data: numpy array
 A 1D array containing one scalar value per vertex.
 meta_data: dictionary
 Contains detailed information on the data that was loaded. The following keys are available (replace `?h` with the value of the argument `hemisphere_label`, which must be 'lh' or 'rh').
 - `?h.num_data_points` : the number of data points loaded.
 - `?h.morphometry_file` : the value of the `curv_file` argument (data file that was loaded)
 - `?h.morphometry_file_format` : the value for `format` that was | |
| 
	#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
 'status': ['preview'],
 'supported_by': 'community'}
DOCUMENTATION = """
---
module: ce_netstream_aging
version_added: "2.4"
short_description: Manages timeout mode of NetStream on HUAWEI CloudEngine switches.
description:
 - Manages timeout mode of NetStream on HUAWEI CloudEngine switches.
author: YangYang (@QijunPan)
options:
 timeout_interval:
 description:
 - Netstream timeout interval.
 If is active type the interval is 1-60.
 If is inactive ,the interval is 5-600.
 default: 30
 type:
 description:
 - Specifies the packet type of netstream timeout active interval.
 choices: ['ip', 'vxlan']
 state:
 description:
 - Specify desired state of the resource.
 choices: ['present', 'absent']
 default: present
 timeout_type:
 description:
 - Netstream timeout type.
 choices: ['active', 'inactive', 'tcp-session', 'manual']
 manual_slot:
 description:
 - Specifies the slot number of netstream manual timeout.
"""
EXAMPLES = '''
- name: netstream aging module test
 hosts: cloudengine
 connection: local
 gather_facts: no
 vars:
 cli:
 host: "{{ inventory_hostname }}"
 port: "{{ ansible_ssh_port }}"
 username: "{{ username }}"
 password: "{{ password }}"
 transport: cli
 tasks:
 - name: Configure netstream ip timeout active interval , the interval is 40 minutes.
 ce_netstream_aging:
 timeout_interval: 40
 type: ip
 timeout_type: active
 state: present
 provider: "{{ cli }}"
 - name: Configure netstream vxlan timeout active interval , the interval is 40 minutes.
 ce_netstream_aging:
 timeout_interval: 40
 type: vxlan
 timeout_type: active
 active_state: present
 provider: "{{ cli }}"
 - name: Delete netstream ip timeout active interval , set the ip timeout interval to 30 minutes.
 ce_netstream_aging:
 type: ip
 timeout_type: active
 state: absent
 provider: "{{ cli }}"
 - name: Delete netstream vxlan timeout active interval , set the vxlan timeout interval to 30 minutes.
 ce_netstream_aging:
 type: vxlan
 timeout_type: active
 state: absent
 provider: "{{ cli }}"
 - name: Enable netstream ip tcp session timeout.
 ce_netstream_aging:
 type: ip
 timeout_type: tcp-session
 state: present
 provider: "{{ cli }}"
 - name: Enable netstream vxlan tcp session timeout.
 ce_netstream_aging:
 type: vxlan
 timeout_type: tcp-session
 state: present
 provider: "{{ cli }}"
 - name: Disable netstream ip tcp session timeout.
 ce_netstream_aging:
 type: ip
 timeout_type: tcp-session
 state: absent
 provider: "{{ cli }}"
 - name: Disable netstream vxlan tcp session timeout.
 ce_netstream_aging:
 type: vxlan
 timeout_type: tcp-session
 state: absent
 provider: "{{ cli }}"
'''
RETURN = '''
proposed:
 description: k/v pairs of parameters passed into module
 returned: verbose mode
 type: dict
 sample: {"timeout_interval": "40",
 "type": "ip",
 "state": "absent",
 "timeout_type": active}
existing:
 description: k/v pairs of existing configuration
 returned: verbose mode
 type: dict
 sample: {"active_timeout": [
 {
 "ip": "40",
 "vxlan": 30
 }
 ],
 "inactive_timeout": [
 {
 "ip": 30,
 "vxlan": 30
 }
 ],
 "tcp_timeout": [
 {
 "ip": "disable",
 "vxlan": "disable"
 }
 ]}
end_state:
 description: k/v pairs of configuration after module execution
 returned: verbose mode
 type: dict
 sample: {"active_timeout": [
 {
 "ip": 30,
 "vxlan": 30
 }
 ],
 "inactive_timeout": [
 {
 "ip": 30,
 "vxlan": 30
 }
 ],
 "tcp_timeout": [
 {
 "ip": "disable",
 "vxlan": "disable"
 }
 ]}
updates:
 description: commands sent to the device
 returned: always
 type: list
 sample: ["undo netstream timeout ip active 40"]
changed:
 description: check to see if a change was made on the device
 returned: always
 type: bool
 sample: true
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import get_config, load_config
from ansible.module_utils.network.cloudengine.ce import ce_argument_spec
class NetStreamAging(object):
 """
 Manages netstream aging.
 """
 def __init__(self, argument_spec):
 self.spec = argument_spec
 self.module = None
 self.init_module()
 # module input info
 self.timeout_interval = self.module.params['timeout_interval']
 self.type = self.module.params['type']
 self.state = self.module.params['state']
 self.timeout_type = self.module.params['timeout_type']
 self.manual_slot = self.module.params['manual_slot']
 # host info
 self.host = self.module.params['host']
 self.username = self.module.params['username']
 self.port = self.module.params['port']
 # state
 self.changed = False
 self.updates_cmd = list()
 self.commands = list()
 self.results = dict()
 self.proposed = dict()
 self.existing = dict()
 self.end_state = dict()
 # local parameters
 self.existing["active_timeout"] = list()
 self.existing["inactive_timeout"] = list()
 self.existing["tcp_timeout"] = list()
 self.end_state["active_timeout"] = list()
 self.end_state["inactive_timeout"] = list()
 self.end_state["tcp_timeout"] = list()
 self.active_changed = False
 self.inactive_changed = False
 self.tcp_changed = False
 def init_module(self):
 """init module"""
 self.module = AnsibleModule(argument_spec=self.spec, supports_check_mode=True)
 def cli_load_config(self, commands):
 """load config by cli"""
 if not self.module.check_mode:
 load_config(self.module, commands)
 def cli_add_command(self, command, undo=False):
 """add command to self.update_cmd and self.commands"""
 if undo and command.lower() not in ["quit", "return"]:
 cmd = "undo " + command
 else:
 cmd = command
 self.commands.append(cmd)
 if command.lower() not in ["quit", "return"]:
 self.updates_cmd.append(cmd)
 def get_exist_timer_out_para(self):
 """Get exist netstream timeout parameters"""
 active_tmp = dict()
 inactive_tmp = dict()
 tcp_tmp = dict()
 active_tmp["ip"] = "30"
 active_tmp["vxlan"] = "30"
 inactive_tmp["ip"] = "30"
 inactive_tmp["vxlan"] = "30"
 tcp_tmp["ip"] = "absent"
 tcp_tmp["vxlan"] = "absent"
 flags = list()
 exp = " | ignore-case include netstream timeout"
 flags.append(exp)
 config = get_config(self.module, flags)
 if config:
 config = config.lstrip()
 config_list = config.split('\n')
 for config_mem in config_list:
 config_mem = config_mem.lstrip()
 config_mem_list = config_mem.split(' ')
 if config_mem_list[2] == "ip":
 if config_mem_list[3] == "active":
 active_tmp["ip"] = config_mem_list[4]
 if config_mem_list[3] == "inactive":
 inactive_tmp["ip"] = config_mem_list[4]
 if config_mem_list[3] == "tcp-session":
 tcp_tmp["ip"] = "present"
 if config_mem_list[2] == "vxlan":
 if config_mem_list[4] == "active":
 active_tmp["vxlan"] = config_mem_list[5]
 if config_mem_list[4] == "inactive":
 inactive_tmp["vxlan"] = config_mem_list[5]
 if config_mem_list[4] == "tcp-session":
 tcp_tmp["vxlan"] = "present"
 self.existing["active_timeout"].append(active_tmp)
 self.existing["inactive_timeout"].append(inactive_tmp)
 self.existing["tcp_timeout"].append(tcp_tmp)
 def get_end_timer_out_para(self):
 """Get end netstream timeout parameters"""
 active_tmp = dict()
 inactive_tmp = dict()
 tcp_tmp = dict()
 active_tmp["ip"] = "30"
 active_tmp["vxlan"] = "30"
 inactive_tmp["ip"] = "30"
 inactive_tmp["vxlan"] = "30"
 tcp_tmp["ip"] = "absent"
 tcp_tmp["vxlan"] = "absent"
 flags = list()
 exp = " | ignore-case include netstream timeout"
 flags.append(exp)
 config = get_config(self.module, flags)
 if config:
 config = config.lstrip()
 config_list = config.split('\n')
 for config_mem in config_list:
 config_mem = config_mem.lstrip()
 config_mem_list = config_mem.split(' ')
 if len(config_mem_list) > 4 and config_mem_list[2] == "ip":
 if config_mem_list[3] == "active":
 active_tmp["ip"] = config_mem_list[4]
 if config_mem_list[3] == "inactive":
 inactive_tmp["ip"] = config_mem_list[4]
 if config_mem_list[3] == "tcp-session":
 tcp_tmp["ip"] = "present"
 if len(config_mem_list) > 5 and config_mem_list[2] == "vxlan":
 if config_mem_list[4] == "active":
 active_tmp["vxlan"] = config_mem_list[5]
 if config_mem_list[4] == "inactive":
 inactive_tmp["vxlan"] = config_mem_list[5]
 if config_mem_list[4] == "tcp-session":
 tcp_tmp["vxlan"] = "present"
 self.end_state["active_timeout"].append(active_tmp)
 self.end_state["inactive_timeout"].append(inactive_tmp)
 self.end_state["tcp_timeout"].append(tcp_tmp)
 def check_params(self):
 """Check all input params"""
 # interval check
 if not str(self.timeout_interval).isdigit():
 self.module.fail_json(
 msg='Error: Timeout interval should be numerical.')
 if self.timeout_type == "active":
 if int(self.timeout_interval) < 1 or int(self.timeout_interval) > 60:
 self.module.fail_json(
 msg="Error: Active interval should between 1 - 60 minutes.")
 if self.timeout_type == "inactive":
 if int(self.timeout_interval) < 5 or int(self.timeout_interval) > 600:
 self.module.fail_json(
 msg="Error: Inactive interval should between 5 - 600 seconds.")
 if self.timeout_type == "manual":
 if not self.manual_slot:
 self.module.fail_json(
 msg="Error: If use manual timeout mode,slot number is needed.")
 if not str(self.manual_slot).isdigit():
 self.module.fail_json(
 msg='Error: Slot number should be numerical.')
 def get_proposed(self):
 """get proposed info"""
 if self.timeout_interval:
 self.proposed["timeout_interval"] = self.timeout_interval
 if self.timeout_type:
 self.proposed["timeout_type"] = self.timeout_type
 if self.type:
 self.proposed["type"] = self.type
 if self.state:
 self.proposed["state"] = self.state
 if self.manual_slot:
 self.proposed["manual_slot"] = self.manual_slot
 def get_existing(self):
 """get existing info"""
 active_tmp = dict()
 inactive_tmp = dict()
 tcp_tmp = dict()
 self.get_exist_timer_out_para()
 if self.timeout_type == "active":
 for active_tmp in self.existing["active_timeout"]:
 if self.state == "present":
 if str(active_tmp[self.type]) != self.timeout_interval:
 self.active_changed = True
 else:
 if self.timeout_interval != "30":
 if str(active_tmp[self.type]) != "30":
 if str(active_tmp[self.type]) != self.timeout_interval:
 self.module.fail_json(
 msg='Error: The specified active interval do not exist.')
 if str(active_tmp[self.type]) != "30":
 self.timeout_interval = active_tmp[self.type]
 self.active_changed = True
 if self.timeout_type == "inactive":
 for inactive_tmp in self.existing["inactive_timeout"]:
 if self.state == "present":
 if str(inactive_tmp[self.type]) != self.timeout_interval:
 self.inactive_changed = True
 else:
 if self.timeout_interval != "30":
 if str(inactive_tmp[self.type]) != "30":
 if str(inactive_tmp[self.type]) != self.timeout_interval:
 self.module.fail_json(
 msg='Error: The specified inactive interval do not exist.')
 if str(inactive_tmp[self.type]) != "30":
 self.timeout_interval = inactive_tmp[self.type]
 self.inactive_changed = True
 if self.timeout_type == "tcp-session":
 for tcp_tmp in self.existing["tcp_timeout"]:
 if str(tcp_tmp[self.type]) != self.state:
 self.tcp_changed = True
 def operate_time_out(self):
 """configure timeout parameters"""
 cmd = ""
 if self.timeout_type == "manual":
 if self.type == "ip":
 self.cli_add_command("quit")
 cmd = "reset netstream cache ip slot %s" % self.manual_slot
 self.cli_add_command(cmd)
 elif self.type == "vxlan":
 self.cli_add_command("quit")
 cmd = "reset netstream cache vxlan inner-ip slot %s" | |
| 
	5],[8, 325])) 
df = pd.DataFrame(datos, columns = ["Masculino", "Femenino"])
MC = df 
indices_personalizados(MC)
# #### 7. Compare los resultados con los obtenidos en las tareas del curso anterior.
# In[104]:
cadena = "Cuadro Comparativo entre Modelos Supervisados"
print(cadena.center(35," "))
print(" ========================================")
print(" Modelo K Vecinos Mas Cercanos:\n**************************") 
print("Precisión Global: 0.9479495268138801\nError Global: 0.05205047318611988\nPrecision Positiva (PP): 0.9779874213836478\nPrecision Negativa (PN): 0.9177215189873418\nFalsos Positivos (PFP): 0.08227848101265822\nFalsos Negativos (PFN): 0.0220125786163522\nAsertividad Positiva (AP): 0.9228486646884273\nAsertividad Negativa (AN): 0.9764309764309764\n**************************")
print(" Arbol de decision:\n**************************")
print("Precisión Global: 0.9684542586750788\nError Global: 0.03154574132492116\nPrecision Positiva (PP): 0.9688473520249221\nPrecision Negativa (PN): 0.9680511182108626\nFalsos Positivos (PFP): 0.03194888178913738\nFalsos Negativos (PFN): 0.03115264797507788\nAsertividad Positiva (AP): 0.9688473520249221\nAsertividad Negativa (AN): 0.9680511182108626\n**************************")
print(" Arboles Aleatorios:\n**************************")
print("Precisión Global: 0.9889589905362776\nError Global: 0.01104100946372244\nPrecision Positiva (PP): 0.99375\nPrecision Negativa (PN): 0.9840764331210191\nFalsos Positivos (PFP): 0.01592356687898089\nFalsos Negativos (PFN): 0.00625\nAsertividad Positiva (AP): 0.9845201238390093\nAsertividad Negativa (AN): 0.9935691318327974\n**************************")
print(" Modelo ADA Boosting:\n**************************")
print("Precisión Global: 0.9810725552050473,\nError Global: 0.018927444794952675\nPrecision Positiva (PP): 0.990625\nPrecision Negativa (PN): 0.9713375796178344\nFalsos Positivos (PFP): 0.028662420382165606\nFalsos Negativos (PFN): 0.009375\nAsertividad Positiva (AP): 0.9723926380368099\nAsertividad Negativa (AN): 0.9902597402597403\n**************************")
print(" Modelo XG Boosting:\n**************************")
print("Precisión Global: 0.9889589905362776,\nError Global: 0.01104100946372244\nPrecision Positiva (PP): 0.99375\nPrecision Negativa (PN): 0.9840764331210191\nFalsos Positivos (PFP): 0.01592356687898089\nFalsos Negativos (PFN): 0.00625\nAsertividad Positiva (AP): 0.9845201238390093\nAsertividad Negativa (AN): 0.9935691318327974\n**************************")
print(" Modelo Maquinas de Soporte Vectorial:\n**************************")
print("Precisión Global: 0.9826498422712934\nError Global: 0.017350157728706628\nPrecision Positiva (PP): 0.9821958456973294\nPrecision Negativa (PN): 0.9831649831649831\nFalsos Positivos (PFP): 0.016835016835016835\nFalsos Negativos (PFN): 0.017804154302670624\nAsertividad Positiva (AP): 0.9851190476190477\nAsertividad Negativa (AN): 0.9798657718120806\n**************************")
print(" Modelo Redes Neuronales - TensorFlow y Keras\n**************************")
print("Precisión Global: 0.9794952681388013\nError Global: 0.02050473186119872\nPrecision Positiva (PP): 0.975975975975976\nPrecision Negativa (PN): 0.9833887043189369\nFalsos Positivos (PFP): 0.016611295681063124\nFalsos Negativos (PFN): 0.024024024024024024\nAsertividad Positiva (AP): 0.9848484848484849\nAsertividad Negativa (AN): 0.9736842105263158\n**************************")
print(" ========================================")
# ##### Analisis
# * Comparando los resultados, se puede ver que la Red Neuronal usando TensorFlow da muy buenos resultados, sin embargo, el que sigue dando los mejores resultados es el Arbol de Decision junto con el Modelo XG Boosting (igual que el ejercicio previo) ya que se alcanza casi un 99% de precision global, y mas de un 98% de Asertividad Positiva y mas de un 99% de Asertividad Negativa. 
# #### Ejercicio 2:
# #### Esta pregunta utiliza los datos (tumores.csv). Se trata de un conjunto de datos de caracterısticas del tumor cerebral que incluye cinco variables de primer orden y ocho de textura y cuatro parametros de evaluacion de la calidad con el nivel objetivo. La variables son: Media, Varianza, Desviacion estandar, Asimetrıa, Kurtosis, Contraste, Energıa, ASM (segundo momento angular), Entropıa, Homogeneidad, Disimilitud, Correlacion, Grosor, PSNR (Pico de la relacion senal-ruido), SSIM (Indice de Similitud Estructurada), MSE (Mean Square Error), DC (Coeficiente de Dados) y la variable a predecir tipo (1 = Tumor, 0 = No-Tumor).
# #### 1. Usando el paquete MLPClassifier y el paquete Keras en Python genere modelos predictivos para la tabla Tumores.csv usando 70 % de los datos para tabla aprendizaje y un 30 % para la tabla testing. Utilice una cantidad suficiente de capas ocultas y nodos para que la prediccion sea buena.
# #### Utilizando paquete MLPClassifier
# In[7]:
tumores = pd.read_csv("tumores.csv", delimiter = ',', decimal = '.')
tumores.head()
# In[8]:
tumores.info()
# In[9]:
# Convierte las variables de object a categórica
tumores['imagen'] = tumores['imagen'].astype('category')
print(tumores.info())
print(tumores.head())
# Recodifica las categorías usando números
tumores["imagen"] = tumores["imagen"].cat.codes
print(tumores.info())
print(tumores.head())
# Convierte las variables de entero a categórica
tumores['imagen'] = tumores['imagen'].astype('category')
print(tumores.info())
print(tumores.head())
# In[10]:
tumores.tail() #variable categorica ha sido convertida a numero 
# #### Distribución de la variable a predecir
# In[11]:
distribucion_variable_predecir(tumores,"tipo") #Problema altamente desequilibrado. 
# In[12]:
# Normalizando y centrando la tabla ya que hay valores en diferentes escalas
tumores_1 = tumores.iloc[:,0:17]
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaled_values = scaler.fit_transform(tumores_1) 
tumores_1.loc[:,:] = scaled_values
tumores_1.head()
# Variables con escalas diferentes han sido reescaladas.
# #### Elimina la variable catégorica, deja las variables predictoras en X
# In[13]:
X = tumores_1.iloc[:,0:17] 
X.head()
# #### Deja la variable a predecir en y
# In[14]:
y = tumores.iloc[:,17:18] 
y.head()
# #### Se separan los datos con el 70% de los datos para entrenamiento y el 30% para testing
# In[15]:
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.7)
# #### Usando 10000 capas ocultas de 150 nodos cada una
# In[17]:
instancia_red = MLPClassifier(solver='lbfgs', random_state=0,hidden_layer_sizes=[10000, 150])
print(instancia_red)
# #### Entrenando al modelo mediante el metodo Fit
# In[18]:
instancia_red.fit(X_train,y_train.iloc[:,0].values)
print("Las predicciones en Testing son: {}".format(instancia_red.predict(X_test)))
# #### Índices de Calidad del Modelo
# In[19]:
prediccion = instancia_red.predict(X_test)
MC = confusion_matrix(y_test, prediccion)
indices = indices_general(MC,list(np.unique(y)))
for k in indices:
 print("\n%s:\n%s"%(k,str(indices[k])))
# #### Usando Paquete Keras
# In[20]:
distribucion_variable_predecir(tumores,"tipo") # Es un problema desbalanceado.
# In[21]:
tumores.info() # Hay una categoria dentro de los datos, sin embargo esa variable ya que se habia convertido. 
# tumores_1.info()
# #### Elimina la variable catégorica, deja las variables predictoras en X
# In[22]:
X = tumores_1.iloc[:,0:17] 
X.head()
# #### Deja la variable a predecir en y 
# #### Como la variable a predecir la dan en terminos de 0 y 1, es necesario con vertirla a Si y No. 
# In[24]:
import pandas as pd
d = tumores
df = pd.DataFrame(data=d)
df
# In[40]:
df.replace({0: "No", 1: "Si"}, inplace = True)
print(df.iloc[:,17:18]) #Resultado fue reemplazado con exito. 
# In[26]:
y = df.iloc[:,17:18] 
y.head()
# #### Se separan los datos con el 70% de los datos para entrenamiento y el 30% para testing
# #### Como la variable a predecir ya viene dada por "0" y "1" no es necesario utilizar codigo disyuntivo ni reescalar
# In[27]:
dummy_y = pd.get_dummies(y)
scaler = MinMaxScaler(feature_range = (0, 1))
scaled_X = pd.DataFrame(scaler.fit_transform(X), columns = list(X))
X_train, X_test, y_train, y_test = train_test_split(scaled_X, dummy_y, train_size = 0.7, random_state = 0)
print(dummy_y)
# #### Creando Modelo en Keras 
# In[33]:
model = Sequential()
model.add(Dense(1000, input_dim = 17, activation = 'relu')) # primera capa oculta con 5000 neuronas
model.add(Dense(500, activation = 'sigmoid')) # segunda capa oculta con 10000 neuronas
model.add(Dense(300, activation = 'sigmoid')) # tercera capa oculta con 5000 neuronas
model.add(Dense(50, activation = 'relu')) # Agregamos tercera capa oculta con 4998 neuronas
model.add(Dense(2, activation = 'softmax')) # Agregamos capa output con 2 neuronas
# #### Complilando el Modelo
# In[34]:
model.compile(loss = 'categorical_crossentropy', optimizer = 'adam', metrics = ['accuracy'])
# #### Resumen del Modelo
# In[35]:
print(model.summary())
# #### Ajustes del Modelo
# #### Usamos 10000 etapas de entrenamiento (epochs) y actualizando los pesos de la red cada 150 observaciones procesadas (batch_size).
# In[36]:
model.fit(X_train, y_train, epochs = 10000, batch_size = 150, verbose = 0)
# La predicción es una matriz con 3 columnas
y_pred = model.predict(X_test)
# Convertimos a columna
y_test_class = np.argmax(np.asanyarray(y_test), axis = 1) # Convertimos a array
y_pred_class = np.argmax(y_pred, axis = 1)
# #### Predicciones y Calidad del Modelo
# In[37]:
scores = model.evaluate(X_test, y_test)
MC = confusion_matrix(y_test_class, y_pred_class)
indices = indices_general(MC,list(np.unique(y)))
for k in indices:
 print("\n%s:\n%s"%(k,str(indices[k])))
# #### 2. Calcule para los datos de testing la precision global y la matriz de confusion. Interprete la calidad de los resultados. Ademas compare respecto a los resultados obtenidos en las tareas del curso anterior.
# In[43]:
cadena = "Cuadro Comparativo entre Calidades de los Modelos Supervisados"
print(cadena.center(100," "))
print(" ========================================")
print(" Modelo K Vecinos Mas Cercanos:\n**************************") 
print("Precisión Global: 0.9479495268138801\nError Global: 0.05205047318611988\n**************************")
print(" Arbol de decision:\n**************************")
print("Precisión Global: 0.9684542586750788\nError Global: 0.03154574132492116\n**************************")
print(" Arboles Aleatorios:\n**************************")
print("Precisión Global: 0.9889589905362776\nError Global: 0.01104100946372244\n**************************")
print(" Modelo ADA Boosting:\n**************************")
print("Precisión Global: 0.9810725552050473,\nError Global: 0.018927444794952675\n**************************")
print(" Modelo XG Boosting:\n**************************")
print("Precisión Global: 0.9889589905362776,\nError Global: 0.01104100946372244\n**************************")
print(" Modelo Maquinas de Soporte Vectorial:\n**************************")
print("Precisión Global: 0.9826498422712934\nError Global: 0.017350157728706628\n**************************")
print(" Modelo utilizando paquete MLPClassifier\n**************************")
print("Precisión Global: 0.9686684073107049\nError Global: 0.031331592689295085\n**************************")
print(" Modelo Redes Neuronales - TensorFlow y Keras\n**************************")
print("Precisión Global: 0.9712793733681462\nError Global: 0.02872062663185382\n**************************")
print(" ========================================")
# #### 3. Compare los resultados con los obtenidos en las tareas del curso anterior.
# #### Analisis
# * Comparando los resultados obtenidos con las redes neuronales con los de las tareas anteriores se puede ver como se mantienen los mejores resultados usando los Arboles Aleatorios y el XG Boosting a nivel de Precision Global (es casi de un 99%) mientras que el error es poco mas de un 1%. 
# * Se rescata que utilizando Keras y usando el paquete de TensoFlow de Google, los resultados por la categoria especifica de "no" cuenta con un tumor son considerablemente mejores que en casi todos los modelos, ya que al ser un problema desequilibrado, tiende a solo dar buenos resultados en el "Si" ya quie es donde hay mas datos para hacer la prediccion. 
# ### Ejercicio 3: 
# #### [no usar MLPClassifier ni Keras] Disene una Red Neuronal de una capa (Perceptron) para la tabla de verdad del nand:
# In[45]:
from IPython.display import Image
Image(filename="/Users/heinerleivagmail.com/cadena.png")
# #### Es decir, encuentre los pesos w1, w2 y el umbral θ para la Red Neuronal que se muestra en el siguiente grafico, usando una funcion de activacion tipo Sigmoidea:
# In[46]:
from IPython.display import Image
Image(filename="/Users/heinerleivagmail.com/screen.png")
# In[2]:
import numpy as np
import pandas as pd
import math
def sigmoidea(pesos, predictoras):
 x = 0
 for i | 
			Subsets and Splits
				
	
				
			
				
No community queries yet
The top public SQL queries from the community will appear here once available.
