input
stringlengths 2.65k
237k
| output
stringclasses 1
value |
---|---|
keyword_parameters):
build = keyword_parameters['build']
else:
build = 'hg19'
if build == 'hg19':
L = np.array([249250621, 243199373, 198022430, 191154276, 180915260, 171115067, 159138663,
146364022, 141213431, 135534747, 135006516, 133851895, 115169878, 107349540,
102531392, 90354753, 81195210, 78077248, 59128983, 63025520, 48129895, 51304566,
155270560, 59373566, 16569]) # chromosome lengths from genome-mysql.cse.ucsc.edu
if build == 'hg38':
L = np.array([248956422,242193529,198295559,190214555,181538259,170805979,159345973,
145138636,138394717,133797422,135086622,133275309,114364328,107043718,
101991189,90338345,83257441,80373285,58617616,64444167,46709983,50818468,
156040895,57227415,16569])
C = np.append(1, np.cumsum(L))
x = np.array([chromosome[int(i)] for i in np.arange(0, len(position))],dtype=int)
return C[x] + position
def fix_het_file_header(het_file):
# allowing flexibility in het file headers to accommodate changing versions of GATK4 and other CN tools
# in order to add support for your het file headers please modify the alternate header lists above
headers = alternate_file_headers()
required_headers = ['CONTIG', 'POSITION', 'ALT_COUNT', 'REF_COUNT']
if np.sum(np.isfinite((is_member(required_headers, het_file.columns)))) == 4:
return het_file
else:
missing_idx = np.where(~np.isfinite((is_member(required_headers, het_file.columns))))
for i in missing_idx[0]:
if required_headers[i] == 'POSITION':
if np.sum(np.isfinite(is_member(headers['alternate_headers_position'], het_file.columns))) == 0 or np.sum(
np.isfinite(is_member(headers['alternate_headers_position'], het_file.columns))) > 1:
sys.exit('missing required header POSITION and could not replace with POS,position, or pos!')
else:
idx_replace = np.where(np.isfinite(is_member(headers['alternate_headers_position'], het_file.columns)))
het_file.rename(columns={headers['alternate_headers_position'][idx_replace[0][0]]: 'POSITION'}, inplace=True)
print('changing header of het file from ' + headers['alternate_headers_position'][
idx_replace[0][0]] + ' to POSITION')
if required_headers[i] == 'CONTIG':
if np.sum(np.isfinite(is_member(headers['alternate_headers_chromosome'], het_file.columns))) == 0 or np.sum(
np.isfinite(is_member(headers['alternate_headers_chromosome'], het_file.columns))) > 1:
sys.exit(
'missing required header CONTIG and could not replace with any one of CHR, chrom, Chromosome, chr, Chrom!')
else:
idx_replace = np.where(np.isfinite(is_member(headers['alternate_headers_chromosome'], het_file.columns)))
het_file.rename(columns={headers['alternate_headers_chromosome'][idx_replace[0][0]]: 'CONTIG'}, inplace=True)
print('changing header of het file from ' + headers['alternate_headers_chromosome'][
idx_replace[0][0]] + ' to CONTIG')
if required_headers[i] == 'ALT_COUNT':
if np.sum(np.isfinite(is_member(headers['alternate_headers_alt_count'], het_file.columns))) == 0 or np.sum(
np.isfinite(is_member(headers['alternate_headers_alt_count'], het_file.columns))) > 1:
sys.exit(
'missing required header ALT_COUNT and could not replace with any one of t_alt_count, n_alt_count, alt_count')
else:
idx_replace = np.where(np.isfinite(is_member(headers['alternate_headers_alt_count'], het_file.columns)))
het_file.rename(columns={headers['alternate_headers_alt_count'][idx_replace[0][0]]: 'ALT_COUNT'}, inplace=True)
print('changing header of het file from ' + headers['alternate_headers_alt_count'][
idx_replace[0][0]] + ' to ALT_COUNT')
if required_headers[i] == 'REF_COUNT':
if np.sum(np.isfinite(is_member(headers['alternate_headers_ref_count'], het_file.columns))) == 0 or np.sum(
np.isfinite(is_member(headers['alternate_headers_ref_count'], het_file.columns))) > 1:
sys.exit(
'missing required header ALT_COUNT and could not replace with any one of t_ref_count, n_ref_count, ref_count')
else:
idx_replace = np.where(np.isfinite(is_member(headers['alternate_headers_ref_count'], het_file.columns)))
het_file.rename(columns={headers['alternate_headers_ref_count'][idx_replace[0][0]]: 'REF_COUNT'}, inplace=True)
print('changing header of het file from ' + headers['alternate_headers_ref_count'][
idx_replace[0][0]] + ' to REF_COUNT')
return het_file
def fix_seg_file_header(seg_file):
# allowing flexibility in seg file headers to accommodate changing versions of GATK4 and other CN tools
# in order to add support for your seg file headers please modify the alternate header lists above
headers = alternate_file_headers()
required_headers = ['Chromosome', 'Start.bp', 'End.bp', 'f', 'tau','n_probes']
if np.sum(np.isfinite((is_member(required_headers, seg_file.columns)))) == 5:
return seg_file
else:
missing_idx = np.where(~np.isfinite((is_member(required_headers, seg_file.columns))))
for i in missing_idx[0]:
if required_headers[i] == 'Start.bp':
if np.sum(np.isfinite(is_member(headers['alternate_headers_start_position'], seg_file.columns))) == 0 or np.sum(
np.isfinite(is_member(headers['alternate_headers_start_position'], seg_file.columns))) > 1:
sys.exit('missing required header Start.bp and could not replace with Start or Start_bp')
else:
idx_replace = np.where(np.isfinite(is_member(headers['alternate_headers_start_position'], seg_file.columns)))
seg_file.rename(columns={headers['alternate_headers_start_position'][idx_replace[0][0]]: 'Start.bp'},
inplace=True)
print('changing header of seg file from ' + headers['alternate_headers_start_position'][
idx_replace[0][0]] + ' to Start.bp')
if required_headers[i] == 'End.bp':
if np.sum(np.isfinite(is_member(headers['alternate_headers_end_position'], seg_file.columns))) == 0 or np.sum(
np.isfinite(is_member(headers['alternate_headers_end_position'], seg_file.columns))) > 1:
sys.exit('missing required header End.bp and could not replace with End or End_bp')
else:
idx_replace = np.where(np.isfinite(is_member(headers['alternate_headers_end_position'], seg_file.columns)))
seg_file.rename(columns={headers['alternate_headers_end_position'][idx_replace[0][0]]: 'End.bp'}, inplace=True)
print('changing header of seg file from ' + headers['alternate_headers_end_position'][
idx_replace[0][0]] + ' to End.bp')
if required_headers[i] == 'Chromosome':
if np.sum(np.isfinite(is_member(headers['alternate_headers_chromosome'], seg_file.columns))) == 0 or np.sum(
np.isfinite(is_member(headers['alternate_headers_chromosome'], seg_file.columns))) > 1:
sys.exit(
'missing required header Chromosome and could not replace with any other header')
else:
idx_replace = np.where(np.isfinite(is_member(headers['alternate_headers_chromosome'], seg_file.columns)))
seg_file.rename(columns={headers['alternate_headers_chromosome'][idx_replace[0][0]]: 'Chromosome'},
inplace=True)
print('changing header of seg file from ' + headers['alternate_headers_chromosome'][
idx_replace[0][0]] + ' to Chromosome')
if required_headers[i] == 'f':
if np.sum(np.isfinite(is_member(headers['alternate_headers_f'], seg_file.columns))) == 0 or np.sum(
np.isfinite(is_member(headers['alternate_headers_f'], seg_file.columns))) > 1:
sys.exit(
'missing required header f and could not replace with any one of f_acs')
else:
idx_replace = np.where(np.isfinite(is_member(headers['alternate_headers_f'], seg_file.columns)))
seg_file.rename(columns={headers['alternate_headers_f'][idx_replace[0][0]]: 'f'}, inplace=True)
print('changing header of seg file from ' + headers['alternate_headers_f'][idx_replace[0][0]] + ' to f')
if required_headers[i] == 'tau':
if np.sum(np.isfinite(is_member(headers['alternate_headers_tau'], seg_file.columns))) == 0 or np.sum(
np.isfinite(is_member(headers['alternate_headers_tau'], seg_file.columns))) > 1:
sys.exit(
'missing required header tau and could not replace with any one of CN')
else:
idx_replace = np.where(np.isfinite(is_member(headers['alternate_headers_tau'], seg_file.columns)))
seg_file.rename(columns={headers['alternate_headers_tau'][idx_replace[0][0]]: 'tau'}, inplace=True)
if headers['alternate_headers_tau'][idx_replace[0][0]] == 'LOG2_COPY_RATIO_POSTERIOR_50':
print('transforming log2 data tau column to 2 centered: 2^(CNratio)+1')
seg_file['tau'] = np.power(2,seg_file['tau'])+1
print('changing header of seg file from ' + headers['alternate_headers_tau'][idx_replace[0][0]] + ' to tau')
if required_headers[i] == 'n_probes':
if np.sum(np.isfinite(is_member(headers['alternate_headers_n_probes'], seg_file.columns))) == 0 or np.sum(
np.isfinite(is_member(headers['alternate_headers_n_probes'], seg_file.columns))) > 1:
sys.exit(
'missing required header n_probes and could not replace with any one of alternates')
else:
idx_replace = np.where(np.isfinite(is_member(headers['alternate_headers_n_probes'], seg_file.columns)))
seg_file.rename(columns={headers['alternate_headers_n_probes'][idx_replace[0][0]]: 'n_probes'}, inplace=True)
print('changing header of seg file from ' + headers['alternate_headers_n_probes'][idx_replace[0][0]] + ' to n_probes')
return seg_file
def read_indel_vcf(vcf,seg_table,indel_type):
content = []
if vcf[-2:] == 'gz':
with gzip.open(vcf, 'r') as f:
content = f.readlines()
else:
with open(vcf) as f:
content = f.readlines()
cols_type = {0: str}
for line in content:
if line[0] == '#' and line[1] != '#':
headerline = line.split('\t')
break
if indel_type.lower() == 'strelka':
indel_table = pd.read_csv(vcf, sep='\t', comment='#', header=None, low_memory=False, dtype=cols_type)
indel_table.rename(columns={0: 'contig', 1: 'position',2:'ID',3:'REF',4:'ALT',5:'QUAL',7:'INFO', 8: 'format', 6: 'filter', 9: headerline[9].lower(), 10: headerline[10][0:-1].lower()},
inplace=True)
counts_format = indel_table['format'][0].split(':')
depth_ix = counts_format.index('DP')
alt_indel_ix = counts_format.index('TIR')
ref_indel_ix = counts_format.index('TAR')
indel_table = indel_table[np.isfinite(is_member(indel_table['filter'], ['PASS', 'QSI_ref']))]
indel_table.reset_index(inplace=True, drop=True)
elif indel_type.lower() == 'mutect2':
indel_table = pd.read_csv(vcf, sep='\t', comment='#', header=None, low_memory=False, dtype=cols_type)
# CHROM POS ID REF ALT QUAL FILTER INFO FORMAT TUMOR NORMAL
normal_sample = 'normal'
tumor_sample = 'tumor'
for line in content:
if line[0:15] == '##normal_sample':
normal_sample = line.split('=')[1][0:-1]
if line[0:14] == '##tumor_sample':
tumor_sample = line.split('=')[1][0:-1]
if tumor_sample == 'tumor' and normal_sample == 'normal':
indel_table.rename(
columns={0: 'contig', 1: 'position', 2: 'ID', 3: 'REF', 4: 'ALT', 5: 'QUAL', 7: 'INFO', 8: 'format',
6: 'filter', 9: 'tumor', 10: 'normal'},
inplace=True)
else:
if tumor_sample == headerline[9]:
indel_table.rename(
columns={0: 'contig', 1: 'position', 2: 'ID', 3: 'REF', 4: 'ALT', 5: 'QUAL', 7: 'INFO', 8: 'format',
6: 'filter', 9: 'tumor', 10: 'normal'},
inplace=True)
elif tumor_sample == headerline[10][0:-1]:
indel_table.rename(
columns={0: 'contig', 1: 'position', 2: 'ID', 3: 'REF', 4: 'ALT', 5: 'QUAL', 7: 'INFO', 8: 'format',
6: 'filter', 9: 'normal', 10: 'tumor'},
inplace=True)
else:
print('failed to read MuTect 2 indels VCF')
sys.exit()
counts_format = indel_table['format'][0].split(':')
depth_ix = counts_format.index('AD')
indel_table = indel_table[np.isfinite(is_member(indel_table['filter'], ['PASS', 'alt_allele_in_normal','artifact_in_normal']))]
indel_table.reset_index(inplace=True, drop=True)
elif indel_type.lower() == 'sanger':
indel_table = pd.read_csv(vcf, sep='\t', comment='#', header=None, low_memory=False, dtype=cols_type)
# CHROM POS ID REF ALT QUAL FILTER INFO FORMAT NORMAL TUMOUR
indel_table.rename(columns={0: 'contig', 1: 'position',2:'ID',3:'REF',4:'ALT',5:'QUAL',7:'INFO',8: 'format', 6: 'filter', 9: headerline[9].lower(), 10: headerline[10][0:-1].lower()},
inplace=True)
b1 = np.logical_or.reduce([indel_table['filter'] == 'F012', indel_table['filter'] == 'F012;F015'])
b2 = np.logical_or.reduce([indel_table['filter'] == 'PASS', indel_table['filter'] == 'F015'])
indel_table = indel_table[np.logical_or.reduce([b1, b2])]
indel_table.reset_index(inplace=True,drop=True)
format_string = indel_table['format'][0].split(':')
total_depth_idx = [format_string.index('PR'), format_string.index('NR')]
alt_count_idx = [format_string.index('PU'), format_string.index('NU')]
# parsing format line and file to determine required alt and ref columns
# we use "tier 1" read counts for varaints
n_depth = np.zeros([len(indel_table), 1])
n_alt_count = np.zeros([len(indel_table), 1])
n_ref_count = np.zeros([len(indel_table), 1])
t_depth = np.zeros([len(indel_table), 1])
t_alt_count = np.zeros([len(indel_table), 1])
t_ref_count = np.zeros([len(indel_table), 1])
for index, row in indel_table.iterrows():
spl_n = row['normal'].split(':')
spl_t = row['tumor'].split(':')
if indel_type.lower() == 'strelka':
n_depth[index] = int(spl_n[depth_ix])
n_alt_count[index] = int(spl_n[alt_indel_ix].split(',')[0])
n_ref_count[index] = int(spl_n[ref_indel_ix].split(',')[0])
t_depth[index] = int(spl_t[depth_ix])
t_alt_count[index] = int(spl_t[alt_indel_ix].split(',')[0])
t_ref_count[index] = int(spl_t[ref_indel_ix].split(',')[0])
if indel_type.lower() == 'mutect2':
n_alt_count[index] = int(spl_n[depth_ix].split(',')[1])
n_ref_count[index] = int(spl_n[depth_ix].split(',')[0])
n_depth[index] = n_alt_count[index]+n_ref_count[index]
t_alt_count[index] = int(spl_t[depth_ix].split(',')[1])
t_ref_count[index] = int(spl_t[depth_ix].split(',')[0])
t_depth[index] = t_alt_count[index] + t_ref_count[index]
if indel_type.lower() == 'sanger':
n_depth[index] = np.sum([int(spl_n[i]) for i in total_depth_idx])
n_alt_count[index] = np.sum([int(spl_n[i]) for i in alt_count_idx])
n_ref_count[index] = n_depth[index] - n_alt_count[index]
t_depth[index] = np.sum([int(spl_t[i]) for i in total_depth_idx])
t_alt_count[index] = np.sum([int(spl_t[i]) for i in alt_count_idx])
t_ref_count[index] = t_depth[index] - t_alt_count[index]
if len(indel_table) == 0:
indel_table = pd.DataFrame(index=[0],columns=['contig', 'position','ID','REF','ALT','QUAL','INFO','format', 'filter',headerline[9].lower(), headerline[10][0:-1].lower(),
't_depth','t_alt_count','t_ref_count','n_alt_count','n_depth','n_ref_count','tau','f_acs','Chromosome','genomic_coord_x'])
else:
indel_table['t_depth'] = t_alt_count + t_ref_count
indel_table['t_alt_count'] = t_alt_count
indel_table['t_ref_count'] = t_ref_count
indel_table['n_depth'] = n_alt_count + n_ref_count
indel_table['n_alt_count'] = n_alt_count
indel_table['n_ref_count'] = n_ref_count
# only consider sites which were rejected as germline or were passed
if type(indel_table['contig'][0]) == str :
indel_table['Chromosome'] = chr2num(indel_table['contig'])
else:
indel_table['Chromosome'] = indel_table['contig']-1
# add linear position field and consider only sites which are rejected as germline i.e. PASS or QSI_ref
indel_table = indel_table[np.isfinite(indel_table['Chromosome'])]
indel_table.reset_index(inplace=True, drop=True)
indel_table['genomic_coord_x'] = hg19_to_linear_positions(indel_table['Chromosome'], indel_table['position'])
# annotate with acs data
f_acs = np.zeros([len(indel_table), 1]) + 0.5
tau = np.zeros([len(indel_table), 1]) + 2
for i, r in seg_table.iterrows():
f_acs[np.logical_and(np.array(indel_table['genomic_coord_x']) >= | |
is
intended as a hint to an application that may opt not to offer
lookup operations to unauthorized users.
return: (boolean) - ``false`` if looking up mappings is not
authorized, ``true`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinSession.can_lookup_resource_bin_mappings
# NOTE: It is expected that real authentication hints will be
# handled in a service adapter above the pay grade of this impl.
return True
@utilities.arguments_not_none
def get_log_entry_ids_by_log(self, log_id):
"""Gets the list of ``LogEntry`` ``Ids`` associated with a ``Log``.
arg: log_id (osid.id.Id): ``Id`` of a ``Log``
return: (osid.id.IdList) - list of related logEntry ``Ids``
raise: NotFound - ``log_id`` is not found
raise: NullArgument - ``log_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinSession.get_resource_ids_by_bin
id_list = []
for log_entry in self.get_log_entries_by_log(log_ids):
id_list.append(log_entry.get_id())
return IdList(id_list)
@utilities.arguments_not_none
def get_log_entries_by_log(self, log_id):
"""Gets the list of log entries associated with a ``Log``.
arg: log_id (osid.id.Id): ``Id`` of a ``Log``
return: (osid.logging.LogEntryList) - list of related logEntry
raise: NotFound - ``log_id`` is not found
raise: NullArgument - ``log_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinSession.get_resources_by_bin
mgr = self._get_provider_manager('LOGGING', local=True)
lookup_session = mgr.get_log_entry_lookup_session_for_log(log_id, proxy=self._proxy)
lookup_session.use_isolated_log_view()
return lookup_session.get_log_entries()
@utilities.arguments_not_none
def get_log_entry_ids_by_log(self, log_ids):
"""Gets the list of ``LogEntry Ids`` corresponding to a list of ``Log`` objects.
arg: log_ids (osid.id.IdList): list of log ``Ids``
return: (osid.id.IdList) - list of logEntry ``Ids``
raise: NullArgument - ``log_ids`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinSession.get_resource_ids_by_bin
id_list = []
for log_entry in self.get_log_entries_by_log(log_ids):
id_list.append(log_entry.get_id())
return IdList(id_list)
@utilities.arguments_not_none
def get_log_entrie_by_log(self, log_ids):
"""Gets the list of log entries corresponding to a list of ``Log``.
arg: log_ids (osid.id.IdList): list of log ``Ids``
return: (osid.logging.LogEntryList) - list of log entries
raise: NullArgument - ``log_ids`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinSession.get_resources_by_bin
mgr = self._get_provider_manager('LOGGING', local=True)
lookup_session = mgr.get_log_entry_lookup_session_for_log(log_ids, proxy=self._proxy)
lookup_session.use_isolated_log_view()
return lookup_session.get_log_entries()
@utilities.arguments_not_none
def get_log_ids_by_log_entry(self, log_entry_id):
"""Gets the list of ``Log`` ``Ids`` mapped to a ``LogEntry``.
arg: log_entry_id (osid.id.Id): ``Id`` of a ``LogEntry``
return: (osid.id.IdList) - list of log ``Ids``
raise: NotFound - ``log_entry_id`` is not found
raise: NullArgument - ``log_entry_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinSession.get_bin_ids_by_resource
mgr = self._get_provider_manager('LOGGING', local=True)
lookup_session = mgr.get_log_entry_lookup_session(proxy=self._proxy)
lookup_session.use_federated_log_view()
log_entry = lookup_session.get_log_entry(log_entry_id)
id_list = []
for idstr in log_entry._my_map['assignedLogIds']:
id_list.append(Id(idstr))
return IdList(id_list)
@utilities.arguments_not_none
def get_log_by_log_entry(self, log_entry_id):
"""Gets the list of ``Log`` objects mapped to a ``LogEntry``.
arg: log_entry_id (osid.id.Id): ``Id`` of a ``LogEntry``
return: (osid.logging.LogList) - list of log
raise: NotFound - ``log_entry_id`` is not found
raise: NullArgument - ``log_entry_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
class LogEntryLogAssignmentSession(abc_logging_sessions.LogEntryLogAssignmentSession, osid_sessions.OsidSession):
"""This session provides methods to re-assign log entries to ``Logs``.
A ``LogEntry`` may map to multiple ``Log`` objects and removing the
last reference to a ``LogEntry`` is the equivalent of deleting it.
Each ``Log`` may have its own authorizations governing who is
allowed to operate on it.
Moving or adding a reference of a ``LogEntry`` to another ``Log`` is
not a copy operation (eg: does not change its ``Id`` ).
"""
_session_namespace = 'logging.LogEntryLogAssignmentSession'
def __init__(self, proxy=None, runtime=None, **kwargs):
OsidSession._init_catalog(self, proxy, runtime)
self._catalog_name = 'Log'
self._forms = dict()
self._kwargs = kwargs
def can_assign_log_entries(self):
"""Tests if this user can alter log entry/log mappings.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known mapping methods in
this session will result in a ``PermissionDenied``. This is
intended as a hint to an application that may opt not to offer
assignment operations to unauthorized users.
return: (boolean) - ``false`` if mapping is not authorized,
``true`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinAssignmentSession.can_assign_resources
# NOTE: It is expected that real authentication hints will be
# handled in a service adapter above the pay grade of this impl.
return True
@utilities.arguments_not_none
def can_assign_log_entries_to_log(self, log_id):
"""Tests if this user can alter log entry/log mappings.
A return of true does not guarantee successful authorization. A
return of false indicates that it is known mapping methods in
this session will result in a ``PermissionDenied``. This is
intended as a hint to an application that may opt not to offer
assignment operations to unauthorized users.
arg: log_id (osid.id.Id): the ``Id`` of the ``Log``
return: (boolean) - ``false`` if mapping is not authorized,
``true`` otherwise
raise: NullArgument - ``log_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinAssignmentSession.can_assign_resources_to_bin
# NOTE: It is expected that real authentication hints will be
# handled in a service adapter above the pay grade of this impl.
if log_id.get_identifier() == '000000000000000000000000':
return False
return True
@utilities.arguments_not_none
def get_assignable_log_ids(self, log_id):
"""Gets a list of log including and under the given log node in which any log entry can be assigned.
arg: log_id (osid.id.Id): the ``Id`` of the ``Log``
return: (osid.id.IdList) - list of assignable log ``Ids``
raise: NullArgument - ``log_id`` is ``null``
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinAssignmentSession.get_assignable_bin_ids
# This will likely be overridden by an authorization adapter
mgr = self._get_provider_manager('LOGGING', local=True)
lookup_session = mgr.get_log_lookup_session(proxy=self._proxy)
logs = lookup_session.get_logs()
id_list = []
for log in logs:
id_list.append(log.get_id())
return IdList(id_list)
@utilities.arguments_not_none
def get_assignable_log_ids_for_log_entry(self, log_id, log_entry_id):
"""Gets a list of log including and under the given log node in which a specific log entry can be assigned.
arg: log_id (osid.id.Id): the ``Id`` of the ``Log``
arg: log_entry_id (osid.id.Id): the ``Id`` of the
``LogEntry``
return: (osid.id.IdList) - list of assignable log ``Ids``
raise: NullArgument - ``log_id`` or ``log_entry_id`` is
``null``
raise: OperationFailed - unable to complete request
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinAssignmentSession.get_assignable_bin_ids_for_resource
# This will likely be overridden by an authorization adapter
return self.get_assignable_log_ids(log_id)
@utilities.arguments_not_none
def assign_log_entry_to_log(self, log_entry_id, log_id):
"""Adds an existing ``LogEntry`` to a ``Log``.
arg: log_entry_id (osid.id.Id): the ``Id`` of the
``LogEntry``
arg: log_id (osid.id.Id): the ``Id`` of the ``Log``
raise: AlreadyExists - ``log_entry_id`` is already assigned to
``log_id``
raise: NotFound - ``log_entry_id`` or ``log_id`` not found
raise: NullArgument - ``log_entry_id`` or ``log_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinAssignmentSession.assign_resource_to_bin
mgr = self._get_provider_manager('LOGGING', local=True)
lookup_session = mgr.get_log_lookup_session(proxy=self._proxy)
lookup_session.get_log(log_id) # to raise NotFound
self._assign_object_to_catalog(log_entry_id, log_id)
@utilities.arguments_not_none
def unassign_log_entry_from_log(self, log_entry_id, log_id):
"""Removes a ``LogEntry`` from a ``Log``.
arg: log_entry_id (osid.id.Id): the ``Id`` of the
``LogEntry``
arg: log_id (osid.id.Id): the ``Id`` of the ``Log``
raise: NotFound - ``log_entry_id`` or ``log_id`` not found or
``log_entry_id`` not assigned to ``log_id``
raise: NullArgument - ``log_entry_id`` or ``log_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinAssignmentSession.unassign_resource_from_bin
mgr = self._get_provider_manager('LOGGING', local=True)
lookup_session = mgr.get_log_lookup_session(proxy=self._proxy)
lookup_session.get_log(log_id) # to raise NotFound
self._unassign_object_from_catalog(log_entry_id, log_id)
@utilities.arguments_not_none
def reassign_log_entry_to_log(self, log_entry_id, from_log_id, to_log_id):
"""Moves a ``LogEntry`` from one ``Log`` to another.
Mappings to other ``Logs`` are unaffected.
arg: log_entry_id (osid.id.Id): the ``Id`` of | |
+= following_span.tokens
span.before_dash += following_span.before_dash
following_span.in_base = True
# если это ПРИМЫКАЮЩИЙ спан!!!
else:
if span.after_dash:
if span.accept_base(following_span) and span.coordinate(following_span):
# print span.tokens[0].content, following_span.tokens[0].content, 777, j
# print following_span.tokens[0].content, backward
span.ellipsis = False
span.base = False
span.in_base = False
span.tokens += following_span.tokens
span.shared_tokens += following_span.tokens
span.before_dash += following_span.before_dash
following_span.in_base = True
following_span.base = False
last_added = j
else:
# print span.tokens[0].content, following_span.tokens[0].content,\
# span.accept_base(following_span), span.coordinate(following_span)
# print span.tokens[0].content
"""
Было и работало неверно:
print span.before_dash, span.finite(), following_span.finite()
if (span.before_dash and not span.finite() and not following_span.finite()) or\
(not span.before_dash and (span.finite() or following_span.finite())):
"""
# print span.before_dash, span.finite(), following_span.finite()
if (span.before_dash and not span.finite() and not following_span.finite()) or\
(not (span.finite() and following_span.finite())):
# print 1
if span.accept_base(following_span) and span.coordinate(following_span):
# print span.tokens[0].content, following_span.tokens[0].content, 777, j
# print following_span.tokens[0].content, backward
span.tokens += following_span.tokens
span.shared_tokens += following_span.tokens
span.before_dash += following_span.before_dash
following_span.in_base = True
following_span.base = False
last_added = j
if span.before_dash:
span.after_dash = True
if span.tokens[0].lex not in myData.specificators:
# print 1
span.ellipsis = True
# if following_span.base:
# break
if span.finite() and switch:
# print span.tokens[0].content, 888
break
def split_embedded(self):
find = False
for span in self.spans:
# print span.basic, span.tokens[0].content
self.new_spans.append(span)
if span.embedded:
# print span.embedded_type
find_gerund = False
if span.embedded_type == u'gerund':
if span.gerund > 1:
for i, token in reversed(list(enumerate(span.tokens))):
if len(token.pos) > 2:
if token.pos[0] == u'V':
if token.pos[2] == u'g':
find_gerund = True
find += True
elif token.lex == u'и':
if find_gerund:
if i > 0:
new_span = Span()
new_span.embedded = True
new_span.embedded_type = span.embedded_type
for following_token in span.tokens[i::]:
new_span.tokens.append(following_token)
self.new_spans[-1].tokens = span.tokens[:i:]
self.new_spans.append(new_span)
break
find_participle = False
if span.embedded_type == u'participle':
# print span.participle_number()
if span.participle_number() > 1:
# print 1
for i, token in reversed(list(enumerate(span.tokens))):
if len(token.pos) > 2:
if token.pos[0] == u'V':
if token.pos[2] == u'p':
find_participle = True
find += True
elif token.lex == u'и':
if find_participle:
if i > 0:
new_span = Span()
new_span.embedded = True
new_span.embedded_type = span.embedded_type
for following_token in span.tokens[i::]:
new_span.tokens.append(following_token)
self.new_spans[-1].tokens = span.tokens[:i:]
self.new_spans.append(new_span)
break
else:
find += self.find_coordination(span)
else:
# print span.tokens[0].content
find += self.find_coordination(span)
if find:
# print 1
self.spans = copy.deepcopy(self.new_spans)
def split_base(self):
self.new_spans = []
find = False
for span in self.spans:
self.new_spans.append(span)
if not span.embedded and not span.in_embedded and not span.inserted:
# print span.tokens[0].content
span.basic = True
find += self.find_coordination(span)
if find:
# print u'\n'.join([span.tokens[1].content for span in self.new_spans])
self.spans = copy.deepcopy(self.new_spans)
def split_spans(self, span, i, embedded_type=None):
# print following_token.content
new_span = copy.deepcopy(span)
new_span.tokens = span.tokens[i::]
new_span.shared_tokens = span.shared_tokens[i::]
if embedded_type:
new_span.embedded = True
new_span.embedded_type = embedded_type
span.tokens = span.tokens[:i:]
span.shared_tokens = span.shared_tokens[:i:]
self.new_spans.append(new_span)
return True
def find_coordination(self, span):
# and_number = len([True for token in span.tokens if token.lex == u'и'])
predicate_number = len([True for token in span.tokens if token.predicate()])
infinitive_number = len([True for token in span.tokens if token.infinitive()])
predicate_after_and = len([True for i, token in enumerate(span.tokens[:-1:]) if token.lex == u'и' and
(span.tokens[i+1].predicate() or span.tokens[i+1].infinitive() or
span.tokens[i+1].gerund_participle())])
# Это для ФИНИТНЫХ ПРЕДИКАТОВ
# print span.tokens[0].content, predicate_number, predicate_after_and
if predicate_number > 1 or predicate_after_and:
# # print 1
# if and_number == 1 or predicate_after_and:
# # print 1
# for i, token in reversed(list(enumerate(span.tokens))):
# if token.lex == u'и':
# if i > 0:
# for j, following_token in enumerate(span.tokens[i+1::], start=i+1):
# # print following_token.content
# if following_token.lex == u'который':
# continue
# if following_token.predicate():
# return self.split_spans(span, i)
#
# # здесь написал continue т.к. есть случаи "это лишь опасения и он не придет"
# elif following_token.pos[0] != u'R':
# continue
# # return False
# elif and_number > 1:
for i, token in reversed(list(enumerate(span.tokens))):
if token.lex == u'и':
if i > 0 and i < len(span.tokens) - 1:
# print len(span.tokens) - 1
# print i
# print span.tokens[0].content, span.tokens[-1].content
left_span = Span()
left_span.tokens = left_span.shared_tokens = span.tokens[:i:]
right_span = Span()
right_span.tokens = right_span.shared_tokens = span.tokens[i+1::]
if left_span.coordinate(right_span):
# print left_span.tokens[-1].content, right_span.tokens[0].content
continue
else:
# print 1
for j, following_token in enumerate(span.tokens[i+1::], start=i+1):
# if following_token.lex == u'который':
# continue
if following_token.predicate():
return self.split_spans(span, i)
# elif following_token.pos[0] != u'R':
# return False
# Это для ИНФИНИТИВОВ
# print span.tokens[0].content, predicate_number
if infinitive_number > 1 or (predicate_after_and and not span.finite() and infinitive_number < 2):
for i, token in reversed(list(enumerate(span.tokens))):
if token.lex == u'и':
if i > 0:
left_span = Span()
left_span.tokens = left_span.shared_tokens = span.tokens[:i:]
right_span = Span()
right_span.tokens = right_span.shared_tokens = span.tokens[i+1::]
if left_span.coordinate(right_span):
# print left_span.tokens[-1].content, right_span.tokens[0].content
continue
else:
# print 1
for j, following_token in enumerate(span.tokens[i+1::], start=i+1):
# if following_token.lex == u'который':
# continue
if following_token.infinitive():
return self.split_spans(span, i)
# elif following_token.pos[0] != u'R':
# return False
# Это для ПРИЧАСТИЙ
"""
Я себя хочу убить за такое рукожопие, но пока так
"""
# print span.tokens[0].content, predicate_number, predicate_after_and
if predicate_after_and:
for i, token in reversed(list(enumerate(span.tokens))):
if token.lex == u'и':
if i > 0:
left_span = Span()
left_span.tokens = left_span.shared_tokens = span.tokens[:i:]
right_span = Span()
right_span.tokens = right_span.shared_tokens = span.tokens[i+1::]
if left_span.coordinate(right_span):
# print left_span.tokens[-1].content, right_span.tokens[0].content
continue
else:
# print 1
for j, following_token in enumerate(span.tokens[i+1::], start=i+1):
# if following_token.lex == u'который':
# continue
if following_token.gerund_participle():
return self.split_spans(span, i, embedded_type=u'participle')
# elif following_token.pos[0] != u'R':
# return False
return False
def find_complimentizers(self):
for i, token in enumerate(self.tokens):
if token.content.lower() in myData.complex_complimentizers:
# print token.content
for item in myData.complex_complimentizers[token.content.lower()]:
end = i + item[1]
if len(self.tokens) > end + 1:
# print len(self.tokens), end + 1
new = [token]
j = i
while len(new) != item[1]:
j += 1
# try:
if u'COMMA' not in self.tokens[j].pos:
new.append(self.tokens[j])
# except:
# print u' '.join([token.content for token in self.tokens])
new_complimentizer = u' '.join([next_token.content.lower() for next_token in new])
# print new_complimentizer_lex, 2
if new_complimentizer == item[0]:
# print new_complimentizer
token.content = new_complimentizer
token.lex = new_complimentizer
token.pos = u'C'
token.end = new[-1].end
for next_token in self.tokens[i+1:j+1:]:
self.tokens.remove(next_token)
if i != 0:
if u'COMMA' not in self.tokens[i-1].pos:
new_comma = Token()
new_comma.pos = u'COMMA'
self.tokens.insert(i, new_comma)
def find_np(self):
match = -1
for i, token in enumerate(self.tokens):
if i > match:
if not token.in_pp:
if token.is_adj():
# print self.tokens[i].content
for j, following_token in enumerate(self.tokens[i+1::], start=i+1):
# print self.tokens[j].content
if not following_token.in_pp:
if not following_token.in_np:
# self.tokens[j].in_pp = True
if following_token.pos == u'S':
if token.agree_adj_noun(following_token):
self.np.append([i, j])
# print self.tokens[j].content
for k in xrange(i, j+1):
self.tokens[k].in_np = True
match = j
break
def find_pp(self):
for i, token in reversed(list(enumerate(self.tokens))):
if token.pos[0] == u'S':
# print token.content, u'\n'
for j, following_token in enumerate(self.tokens[i+1::], start=i+1):
# print following_token.content, 555, following_token.in_pp
if not following_token.in_pp:
# print following_token.content, 777
if following_token.pos[0] in u'NP':
# print following_token.content, 888
if token.agree_pr_noun(following_token):
# print following_token.content
self.pp.append([i, j])
for inner_token in self.tokens[i: j+1]:
print inner_token.content
inner_token.in_pp = True
if inner_token.pos == u'COMMA':
inner_token.pos = u'pseudoCOMMA'
break
def eliminate_and_disambiguate(self):
pass
# for pair in self.np:
# for token in self.tokens[pair[0]+1: pair[1]]:
# if token.pos == u'COMMA':
# token.pos = u'pseudoCOMMA'
class Span:
def __init__(self):
self.tokens = []
self.shared_tokens = []
self.begin = 0
self.end = 0
self.embedded = False
self.quasi_embedded = False
self.in_embedded = False
self.embedded_type = None
self.base = False
self.in_base = False
self.basic = False
self.inserted = False
self.indicative = False
self.gerund = 0
self.participle = 0
self.relative = 0
self.inside_quotes = False
self.semicolon = False
self.before_dash = False
self.after_dash = False
self.before_colon = False
self.complement_type = None
self.null_copula = False
self.ellipsis = False
self.entity_number = None
# self.finite = False
def incomplete(self):
if self.embedded_type == u'complement' or self.embedded_type == u'relative':
return not self.finite()
def predicate_coordination(self, following_span):
# print self.shared_tokens[1].content, self.nominative(),
# following_span.nominative(), following_span.shared_tokens[0].content
if self.inside_quotes is following_span.inside_quotes:
if not(self.nominative() and following_span.nominative()):
if self.embedded_type and following_span.embedded_type:
if self.embedded_type != following_span.embedded_type:
return False
if self.embedded_type == u'complement':
if self.complement_type != following_span.complement_type:
return False
for token in reversed(self.shared_tokens):
if re.match(u'(N...n.)|(P....n.)|(M...[n-])', token.pos):
return False
if token.predicate():
# print token.content
for other_token in following_span.tokens:
# if token.pos[0] == u'V' and other_token.pos[0] == u'V':
# print token.content
if token.coordinate(other_token):
# print | |
and data_set_type == TYPE_RESULT_TABLE:
tmp_sql += ' and 1=0 '
tmp_sql += """ ${ds_where_cond}
and concat(id,'') not in(select target_id from tag_target
where active=1 ${bk_biz_id_cond} and target_type in('raw_data','data_id')
and tag_code in('metric_domain') and target_id in(
select concat(id,'') from access_raw_data where 1=1 ${ds_where_cond}
))"""
tdw_cal_sql = """select ${need_fields} from tdw_table where 1=1 """
if need_detail_type == NEED_DATA_SET_ID_DETAIL and data_set_type == TYPE_RAW_DATA:
tdw_cal_sql += ' and 1=0 '
tdw_cal_sql += """ ${tdw_where_cond}
and table_id not in(select target_id from tag_target where active=1 ${bk_biz_id_cond}
and target_type in('tdw_table') and tag_code in('metric_domain') and target_id in(
select table_id from tdw_table where 1=1 ${tdw_where_cond}
))"""
tdw_detail_sql = tdw_cal_sql.replace(
'${need_fields}', " 'tdw_table' as target_type,table_id as target_id,bk_biz_id,null project_id,updated_at "
)
tdw_cal_sql = tdw_cal_sql.replace('${need_fields}', " count(*) dataset_count ")
detail_sql = 'select t1.* ' + tmp_sql
if need_detail_type == NEED_DATA_SET_ID_DETAIL and platform == TYPE_ALL:
detail_sql += ' union all '
detail_sql += tdw_detail_sql
elif need_detail_type == NEED_DATA_SET_ID_DETAIL and platform == TYPE_TDW:
detail_sql = 'select t1.* from(' + tdw_detail_sql
detail_sql += ")t1"
tmp_sql += """)t1"""
tmp_sql = replace_other_params(tmp_sql, rt_where_cond, ds_where_cond, tdw_where_cond)
detail_sql = replace_other_params(detail_sql, rt_where_cond, ds_where_cond, tdw_where_cond)
tdw_cal_sql = replace_other_params(tdw_cal_sql, rt_where_cond, ds_where_cond, tdw_where_cond)
cal_sql += tmp_sql
return cal_sql, detail_sql, tdw_cal_sql
def replace_other_params(tmp_sql, rt_where_cond, ds_where_cond, tdw_where_cond):
tmp_sql = tmp_sql.replace('${rt_where_cond}', rt_where_cond)
tmp_sql = tmp_sql.replace('${ds_where_cond}', ds_where_cond)
tmp_sql = tmp_sql.replace('${bk_biz_id_cond}', exclude_bk_biz_id_cond())
tmp_sql = tmp_sql.replace('${tdw_where_cond}', tdw_where_cond)
return tmp_sql
def get_optimize_search_sql(
sys_where_cond, bk_biz_id, project_id
): # keyword为空,不需要根据内容过滤result_table表和access_raw_data表的sql优化
cal_sql = """select tag_code data_category,count(distinct target_id) dataset_count from tag_target where active=1
and target_type in('result_table','table','raw_data','data_id') ${bus_where_cond} ${source_tag_where_cond} """
if bk_biz_id is not None:
cal_sql += ' and bk_biz_id=' + str(bk_biz_id)
if project_id:
cal_sql += ' and project_id=' + str(project_id)
if sys_where_cond:
cal_sql += """ and target_id in(select target_id from tag_target
where target_type in('result_table','table','raw_data','data_id') ${sys_where_cond})"""
cal_sql = cal_sql.replace('${sys_where_cond}', sys_where_cond)
cal_sql += " group by data_category"
return cal_sql
def data_mart_node_count(
sys_where_cond,
bus_where_cond,
source_tag_where_cond,
rt_where_cond,
ds_where_cond,
tdw_where_cond,
only_standard=False,
data_set_type=TYPE_ALL,
need_detail_type=NEED_DEFAULT_DETAIL,
platform=TYPE_ALL,
): # 数据集市节点的计算
cal_sql = """select
count(target_id) total_count,
count(case when target_type in('result_table','table','tdw_table') then target_id end) dataset_count,
count(case when target_type in('raw_data','data_id') then target_id end) data_source_count,
count(distinct bk_biz_id) bk_biz_count,count(distinct project_id) project_count
"""
detail_sql = ''
cal_sql += """ from("""
rt_cal_sql = """select ${need_fields} from result_table where 1=1 ${rt_where_cond}
"""
if need_detail_type == NEED_DATA_SET_ID_DETAIL and data_set_type == TYPE_RAW_DATA:
rt_cal_sql += ' and 1=0 '
rt_cal_sql = rt_cal_sql.replace('${rt_where_cond}', rt_where_cond)
if only_standard:
rt_cal_sql += get_only_standard_filter_cond('result_table_id', only_rt=True)
rt_detail_sql = rt_cal_sql
rt_cal_sql = rt_cal_sql.replace(
'${need_fields}', " 'result_table' as target_type,result_table_id as target_id,bk_biz_id,project_id,updated_at "
)
detail_sql += rt_cal_sql
detail_sql += ' union all '
ds_cal_sql = """ select ${need_fields} from access_raw_data where 1=1 ${ds_where_cond}
"""
if need_detail_type == NEED_DATA_SET_ID_DETAIL and data_set_type == TYPE_RESULT_TABLE:
ds_cal_sql += ' and 1=0 '
if only_standard:
ds_cal_sql += get_only_standard_filter_cond("id", only_rd=True)
ds_cal_sql = ds_cal_sql.replace('${ds_where_cond}', ds_where_cond)
ds_detail_sql = ds_cal_sql
ds_cal_sql = ds_cal_sql.replace(
'${need_fields}', " 'raw_data' as target_type,concat(id,'') target_id,bk_biz_id,null project_id,updated_at "
)
detail_sql += ds_cal_sql
# 添加tdw的数据
# detail_sql += ' union all '
tdw_cal_sql = """ select ${need_fields} from tdw_table where 1=1 ${tdw_where_cond}
"""
if need_detail_type == NEED_DATA_SET_ID_DETAIL and data_set_type == TYPE_RAW_DATA:
tdw_cal_sql += ' and 1=0 '
if only_standard:
tdw_cal_sql += ' and 1=0 '
tdw_cal_sql = tdw_cal_sql.replace('${tdw_where_cond}', tdw_where_cond)
tdw_detail_sql = tdw_cal_sql
tmp_tdw_detail_sql = tdw_cal_sql.replace(
'${need_fields}', " 'tdw_table' as target_type,table_id target_id,bk_biz_id,null project_id,updated_at "
)
tdw_cal_sql = tdw_cal_sql.replace('${need_fields}', " count(*) dataset_count ")
cal_sql += detail_sql
cal_sql += ')tmp'
if need_detail_type == NEED_DATA_SET_ID_DETAIL and platform == TYPE_ALL:
detail_sql += ' union all '
detail_sql += tmp_tdw_detail_sql
elif need_detail_type == NEED_DATA_SET_ID_DETAIL and platform == TYPE_TDW:
detail_sql = tmp_tdw_detail_sql
return cal_sql, detail_sql, rt_detail_sql, ds_detail_sql, tdw_cal_sql, tdw_detail_sql
def parse_tag_codes_list(tag_codes_list):
if tag_codes_list:
parse_result_list = []
for tag_code in tag_codes_list:
if '|' in tag_code:
parse_result_list.extend(tag_code.split('|'))
else:
parse_result_list.append(tag_code)
return list(set(parse_result_list))
else:
return tag_codes_list
def gen_filter_tag_sql(code_list):
where_cond = ''
if code_list:
where_cond += ' and tag_code in ('
for sys_code in code_list:
where_cond += "'" + sys_code + "',"
where_cond = where_cond[: len(where_cond) - 1] + ")"
return where_cond
def get_data_set_id_cond(keyword, bk_biz_id, project_id, rt_where_cond):
data_set_id_where_cond = ''
data_set_id_where_cond += ' and data_set_id in(select result_table_id from result_table where 1=1 ${rt_where_cond})'
data_set_id_where_cond = data_set_id_where_cond.replace('${rt_where_cond}', rt_where_cond)
return data_set_id_where_cond
def get_standard_node_cal_sql_v2(
keyword,
bk_biz_id,
project_id,
sys_where_cond_list,
bus_where_cond,
source_tag_where_cond,
rt_where_cond,
ds_where_cond,
platform=TYPE_ALL,
):
platform_cond = ''
if platform == TYPE_TDW:
platform_cond = ' and 1=0 '
if not sys_where_cond_list and source_tag_where_cond == '':
data_set_id_where_cond = get_data_set_id_cond(keyword, bk_biz_id, project_id, rt_where_cond)
cal_sql = """select t1.*,(case when t2.dataset_count is null then 0 else t2.dataset_count end) dataset_count,
(case when t2.all_count is null then 0 else t2.all_count end) all_count from(
select 0 as is_selected,1 as kpath,'standard' as me_type,b.standard_id,
b.id standard_version_id,concat(b.id,'') as category_name,
a.standard_name as category_alias,a.description,a.category_id,c.tag_code parent_code,b.standard_version_status
from dm_standard_config a,dm_standard_version_config b,tag_target c where b.standard_version_status='online'
and a.id=b.standard_id and a.id=c.target_id and a.active=1 and c.active=1 and c.active=1
and c.tag_code=c.source_tag_code and c.target_type='standard') t1
left join
(
select standard_version_id data_category,count(distinct data_set_id) dataset_count,count(data_set_id) all_count
from dm_task_detail where active=1 and data_set_type='result_table'
${data_set_id_where_cond} ${platform_cond} group by standard_version_id
) t2
on t1.standard_version_id=t2.data_category"""
cal_sql = cal_sql.replace('${rt_where_cond}', rt_where_cond)
cal_sql = cal_sql.replace('${data_set_id_where_cond}', data_set_id_where_cond)
cal_sql = cal_sql.replace('${platform_cond}', platform_cond)
return cal_sql
else:
cal_sql = """select t1.*,(case when t2.dataset_count is null then 0 else t2.dataset_count end) dataset_count,
(case when t2.all_count is null then 0 else t2.all_count end) all_count from(
select 0 as is_selected,1 as kpath,'standard' as me_type,b.standard_id,
b.id standard_version_id,concat(b.id,'') as category_name,
a.standard_name as category_alias,a.description,a.category_id,c.tag_code parent_code,b.standard_version_status
from dm_standard_config a,dm_standard_version_config b,tag_target c where b.standard_version_status='online'
and a.id=b.standard_id and a.id=c.target_id and a.active=1 and c.active=1 and c.active=1
and c.tag_code=c.source_tag_code and c.target_type='standard') t1
left join
(select a1.data_category,a2.tag_code as parent_code ,count(distinct a1.target_id) dataset_count,
count(a1.target_id) all_count from (
select standard_version_id data_category,data_set_id target_id from dm_task_detail
where active=1 and data_set_type='result_table' ${platform_cond}
and data_set_id in(select result_table_id from result_table where 1=1 ${rt_where_cond} ) ) a1
left join tag_target a2 on a1.target_id=a2.target_id
where a2.tag_code=a2.source_tag_code and active=1 and tag_type='business' ${a2_bk_biz_id_cond}
${source_tag_where_cond} ${bus_where_cond} ${sys_where_cond2}
group by a1.data_category,a2.tag_code) t2
on t1.standard_version_id=t2.data_category and t1.parent_code=t2.parent_code"""
sys_where_cond2 = ''
if sys_where_cond_list:
for sys_where_cond in sys_where_cond_list:
sys_where_cond2 = """ and a2.target_id in(select target_id from tag_target
where active=1 and target_type in('result_table','table') ${sys_where_cond})"""
sys_where_cond2 = sys_where_cond2.replace('${sys_where_cond}', sys_where_cond)
cal_sql = cal_sql.replace('${a2_bk_biz_id_cond}', exclude_bk_biz_id_cond('a2'))
cal_sql = cal_sql.replace('${rt_where_cond}', rt_where_cond)
cal_sql = cal_sql.replace('${ds_where_cond}', ds_where_cond)
cal_sql = cal_sql.replace('${source_tag_where_cond}', source_tag_where_cond)
cal_sql = cal_sql.replace('${bus_where_cond}', bus_where_cond)
cal_sql = cal_sql.replace('${sys_where_cond2}', sys_where_cond2)
cal_sql = cal_sql.replace('${platform_cond}', platform_cond)
return cal_sql
def get_standard_node_cal_sql(sys_where_cond, bus_where_cond, source_tag_where_cond, rt_where_cond, ds_where_cond):
cal_sql = """select t1.*,(case when t2.dataset_count is null then 0 else t2.dataset_count end) dataset_count from(
select 0 as is_selected,1 as kpath,'standard' as me_type,b.standard_id,b.id standard_version_id,
concat(b.id,'') as category_name,
a.standard_name as category_alias,a.description,a.category_id,c.tag_code parent_code,b.standard_version_status
from dm_standard_config a,dm_standard_version_config b,tag_target c where b.standard_version_status='online'
and a.id=b.standard_id and a.id=c.target_id and a.active=1 and c.active=1 and c.active=1
and c.tag_code=c.source_tag_code and c.target_type='standard') t1
left join
(select a1.data_category,a2.tag_code as parent_code ,count(distinct a1.target_id) dataset_count from (
select standard_version_id data_category,data_set_id target_id from dm_task_detail
where active=1 and data_set_type='result_table'
and data_set_id in(select result_table_id from result_table where 1=1 ${rt_where_cond} )
union all
select standard_version_id data_category,data_set_id target_id from dm_task_detail
where active=1 and data_set_type='raw_data'
and data_set_id in(select id from access_raw_data where 1=1 ${ds_where_cond} )) a1
left join tag_target a2 on a1.target_id=a2.target_id
where a2.tag_code=a2.source_tag_code and active=1 and tag_type='business'
${source_tag_where_cond} ${bus_where_cond} ${sys_where_cond2}
group by a1.data_category,a2.tag_code) t2
on t1.standard_version_id=t2.data_category and t1.parent_code=t2.parent_code"""
sys_where_cond2 = ''
if sys_where_cond:
sys_where_cond2 = """ and a2.target_id in(select target_id from tag_target
where active=1 and target_type in('result_table','table') ${sys_where_cond})"""
sys_where_cond2 = sys_where_cond2.replace('${sys_where_cond}', sys_where_cond)
cal_sql = cal_sql.replace('${rt_where_cond}', rt_where_cond)
cal_sql = cal_sql.replace('${ds_where_cond}', ds_where_cond)
cal_sql = cal_sql.replace('${source_tag_where_cond}', source_tag_where_cond)
cal_sql = cal_sql.replace('${bus_where_cond}', bus_where_cond)
cal_sql = cal_sql.replace('${sys_where_cond2}', sys_where_cond2)
return cal_sql
def delete_not_kpath_node(tree_list):
for index in range(len(tree_list) - 1, -1, -1): # 从后向前删除
node_dict = tree_list[index]
kpath = node_dict['kpath']
if kpath == 0:
del tree_list[index]
else:
sub_list = node_dict.get('sub_list')
if sub_list:
delete_not_kpath_node(sub_list)
def add_virtual_other_node(show_list):
for show_dict in show_list:
cal_virtual_other_node(show_dict)
def cal_virtual_other_node(node_dict):
sub_list = node_dict.get('sub_list')
dataset_count = node_dict.get('all_count', 0)
category_name = node_dict['category_name']
category_alias = node_dict['category_alias']
category_id = node_dict['category_id']
if sub_list:
vir_other_name = category_alias + _('-其他')
vir_category_name = VIRTUAL_OTHER_NODE_PRE + category_name
me_type = CAL_TYPE_TAG
if sub_list[0]['me_type'] == CAL_TYPE_STANDARD:
vir_other_name = _('非标准数据')
vir_category_name = VIRTUAL_OTHER_NOT_STANDARD_NODE_PRE + category_name
sub_dataset_count_sum = 0
for sub_node_dict in sub_list:
sub_category_name = sub_node_dict['category_name']
if sub_category_name.startswith(VIRTUAL_OTHER_NODE_PRE):
break
sub_dataset_count_sum += sub_node_dict.get('all_count', 0)
if dataset_count - sub_dataset_count_sum > 0:
sub_list.append(
{
"tag_type": "business",
"category_alias": vir_other_name,
"description": OTHER_STR,
"icon": None,
"is_selected": 0,
"kpath": 1,
"sync": 1,
"sub_list": [],
"parent_id": category_id,
"parent_code": category_name,
"me_type": me_type,
"dataset_count": dataset_count - sub_dataset_count_sum,
"category_id": -2,
"seq_index": 100,
"category_name": vir_category_name,
}
)
for sub_node_dict in sub_list:
cal_virtual_other_node(sub_node_dict)
def acculate_tree_node(tree_list, id_name): # 累计各个父节点的值
need_dict = {} # | |
. group
lisp . lisp_send_map_request ( iI1Ii11iII1 , 0 , O0oOO0O , oO , i1 )
if 7 - 7: o0oOOo0O0Ooo - I1IiiI
if 100 - 100: oO0o + I11i . OOooOOo * Ii1I
if 73 - 73: i1IIi + I1IiiI
if 46 - 46: OoO0O00 . Oo0Ooo - OoooooooOO
if 93 - 93: iII111i
if 10 - 10: I11i
lisp . lisp_write_ipc_map_cache ( True , mc )
return ( True , parms )
if 82 - 82: I1ii11iIi11i - iIii1I11I1II1 / OOooOOo + Ii1I
if 87 - 87: oO0o * I1ii11iIi11i + OOooOOo / iIii1I11I1II1 / iII111i
if 37 - 37: iII111i - ooOoO0o * oO0o % i11iIiiIii - I1Ii111
if 83 - 83: I11i / I1IiiI
if 34 - 34: IiII
if 57 - 57: oO0o . I11i . i1IIi
if 42 - 42: I11i + I1ii11iIi11i % O0
def i1iIIIi1i ( mc , parms ) :
if 43 - 43: OoOoOO00 % OOooOOo
if 5 - 5: i11iIiiIii - i1IIi / iIii1I11I1II1
if 26 - 26: I11i . OoooooooOO
if 39 - 39: iII111i - O0 % i11iIiiIii * I1Ii111 . IiII
if ( mc . group . is_null ( ) ) : return ( II1i1IiiIIi11 ( mc , parms ) )
if 58 - 58: OoO0O00 % i11iIiiIii . iII111i / oO0o
if ( mc . source_cache == None ) : return ( True , parms )
if 84 - 84: iII111i . I1ii11iIi11i / Oo0Ooo - I1IiiI / OoooooooOO / o0oOOo0O0Ooo
if 12 - 12: I1IiiI * iII111i % i1IIi % iIii1I11I1II1
if 20 - 20: OOooOOo % Ii1I / Ii1I + Ii1I
if 45 - 45: oO0o - IiII - OoooooooOO - OoO0O00 . II111iiii / O0
if 51 - 51: O0 + iII111i
mc . source_cache . walk_cache ( II1i1IiiIIi11 , parms )
return ( True , parms )
if 8 - 8: oO0o * OoOoOO00 - Ii1I - OoO0O00 * OOooOOo % I1IiiI
if 48 - 48: O0
if 11 - 11: I11i + OoooooooOO - OoO0O00 / o0oOOo0O0Ooo + Oo0Ooo . II111iiii
if 41 - 41: Ii1I - O0 - O0
if 68 - 68: OOooOOo % I1Ii111
if 88 - 88: iIii1I11I1II1 - ooOoO0o + OOooOOo
if 40 - 40: I1IiiI * Ii1I + OOooOOo % iII111i
if 74 - 74: oO0o - Oo0Ooo + OoooooooOO + I1Ii111 / OoOoOO00
def i1I1iI1iIi111i ( sockets , hostname , rloc , port ) :
lisp . lisp_map_cache . walk_cache ( i1iIIIi1i ,
[ sockets , rloc , port , hostname ] )
return
if 44 - 44: i1IIi % II111iiii + I11i
if 45 - 45: iII111i / iII111i + I1Ii111 + ooOoO0o
if 47 - 47: o0oOOo0O0Ooo + ooOoO0o
if 82 - 82: II111iiii . IiII - iIii1I11I1II1 - IiII * II111iiii
if 77 - 77: iIii1I11I1II1 * OoO0O00
if 95 - 95: I1IiiI + i11iIiiIii
if 6 - 6: ooOoO0o / i11iIiiIii + iII111i * oO0o
if 80 - 80: II111iiii
def O0O ( lisp_packet , thread_name ) :
global II1iII1i , i1I1I , iiI1I
global OOo , Ii1IIii11
global oO0oIIII
if 12 - 12: i11iIiiIii - i1IIi - OoO0O00 . i1IIi - OOooOOo + O0
oO0OOOO0 = lisp_packet
if 26 - 26: Ii1I
if 35 - 35: Ii1I - I1IiiI % o0oOOo0O0Ooo . OoooooooOO % Ii1I
if 47 - 47: iII111i - Ii1I . II111iiii + OoooooooOO . i11iIiiIii
if 94 - 94: o0oOOo0O0Ooo * Ii1I / Oo0Ooo / Ii1I
oO0 = oO0OOOO0 . packet
O0OO0O = oO0
O0OO0O , OO , IIIIii , OoOoO = lisp . lisp_is_rloc_probe ( O0OO0O , - 1 )
if ( oO0 != O0OO0O ) :
if ( OO == None ) : return
lisp . lisp_parse_packet ( II1iII1i , O0OO0O , OO , IIIIii , OoOoO )
return
if 43 - 43: i11iIiiIii + Oo0Ooo * II111iiii * I1Ii111 * O0
if 64 - 64: OOooOOo % iIii1I11I1II1 * oO0o
if 79 - 79: O0
if 78 - 78: I1ii11iIi11i + OOooOOo - I1Ii111
if 38 - 38: o0oOOo0O0Ooo - oO0o + iIii1I11I1II1 / OoOoOO00 % Oo0Ooo
oO0OOOO0 . packet = lisp . lisp_reassemble ( oO0OOOO0 . packet )
if ( oO0OOOO0 . packet == None ) : return
if 57 - 57: OoO0O00 / ooOoO0o
if 29 - 29: iIii1I11I1II1 + OoOoOO00 * OoO0O00 * OOooOOo . I1IiiI * I1IiiI
if 7 - 7: IiII * I1Ii111 % Ii1I - o0oOOo0O0Ooo
if 13 - 13: Ii1I . i11iIiiIii
if 56 - 56: I1ii11iIi11i % O0 - I1IiiI
if ( lisp . lisp_flow_logging ) : oO0OOOO0 = copy . deepcopy ( oO0OOOO0 )
if 100 - 100: Ii1I - O0 % oO0o * OOooOOo + I1IiiI
if ( oO0OOOO0 . decode ( True , None , lisp . lisp_decap_stats ) == None ) : return
if 88 - 88: OoooooooOO - OoO0O00 * O0 * OoooooooOO . OoooooooOO
if 33 - 33: I1Ii111 + iII111i * oO0o / iIii1I11I1II1 - I1IiiI
if 54 - 54: I1Ii111 / OOooOOo . oO0o % iII111i
if 57 - 57: i11iIiiIii . I1ii11iIi11i - Ii1I - oO0o + OoOoOO00
oO0OOOO0 . print_packet ( "Receive-({})" . format ( thread_name ) , True )
if 63 - 63: OoOoOO00 * iII111i
if 69 - 69: O0 . OoO0O00
if 49 - 49: I1IiiI - I11i
if 74 - 74: iIii1I11I1II1 * I1ii11iIi11i + OoOoOO00 / i1IIi / II111iiii . Oo0Ooo
oO0OOOO0 . strip_outer_headers ( )
if 62 - 62: OoooooooOO * I1IiiI
if 58 - 58: OoOoOO00 % o0oOOo0O0Ooo
if 50 - 50: I1Ii111 . o0oOOo0O0Ooo
if 97 - 97: O0 + OoOoOO00
if 89 - 89: o0oOOo0O0Ooo + OoO0O00 * I11i * Ii1I
if 37 - 37: OoooooooOO - O0 - o0oOOo0O0Ooo
if 77 - 77: OOooOOo * iIii1I11I1II1
if 98 - 98: I1IiiI % Ii1I * OoooooooOO
if 51 - 51: iIii1I11I1II1 . OoOoOO00 / oO0o + o0oOOo0O0Ooo
if 33 - 33: ooOoO0o . II111iiii % iII111i + o0oOOo0O0Ooo
if 71 - 71: Oo0Ooo % OOooOOo
if ( oO0OOOO0 . lisp_header . get_instance_id ( ) == 0xffffff ) :
O00oO000O0O = lisp . lisp_control_header ( )
O00oO000O0O . decode ( oO0OOOO0 . packet )
if ( O00oO000O0O . is_info_request ( ) ) :
I1i1i1iii = lisp . lisp_info ( )
I1i1i1iii . decode ( oO0OOOO0 . packet )
I1i1i1iii . print_info ( )
if 16 - 16: Ii1I + IiII * O0 % i1IIi . I1IiiI
if 67 - 67: OoooooooOO / I1IiiI * Ii1I + I11i
if 65 - 65: OoooooooOO - I1ii11iIi11i / ooOoO0o / II111iiii / i1IIi
if 71 - 71: I1Ii111 + Ii1I
if 28 - 28: OOooOOo
I11ii1IIiIi = I1i1i1iii . hostname if ( I1i1i1iii . hostname != None ) else ""
OoOOo0OOoO = oO0OOOO0 . outer_source
ooO0O00Oo0o = oO0OOOO0 . udp_sport
if ( lisp . lisp_store_nat_info ( I11ii1IIiIi , OoOOo0OOoO , ooO0O00Oo0o ) ) :
i1I1iI1iIi111i ( II1iII1i , I11ii1IIiIi , OoOOo0OOoO , ooO0O00Oo0o )
if 65 - 65: I1ii11iIi11i . I11i - I1Ii111 * IiII / I1Ii111 / ooOoO0o
else :
OO = oO0OOOO0 . outer_source . print_address_no_iid ( )
OoOoO = oO0OOOO0 . outer_ttl
oO0OOOO0 = oO0OOOO0 . packet
if ( lisp . lisp_is_rloc_probe_request ( oO0OOOO0 [ 28 ] ) == False and
lisp . lisp_is_rloc_probe_reply ( oO0OOOO0 [ 28 ] ) == False ) : OoOoO = - 1
oO0OOOO0 = oO0OOOO0 [ 28 : : ]
lisp . lisp_parse_packet ( II1iII1i , oO0OOOO0 , OO , 0 , OoOoO )
if 40 - 40: ooOoO0o * IiII * i11iIiiIii
return
if 57 - 57: ooOoO0o
if 29 - 29: OoOoOO00 - IiII * OoooooooOO + OoooooooOO . II111iiii + OoooooooOO
if 74 - 74: Ii1I - IiII / iII111i * O0 - OOooOOo
if 19 - 19: I1IiiI
if | |
if p <= lowest]
if override > 0:
partial = partial[:-override]
only_mine = 37 - len(placed)
lowest_cnt = only_mine + len(partial)
if lowest_cnt == 0:
return 0
ret = 0.0
ret += lowest * 36.0 * only_mine / float(lowest_cnt)
for p in partial:
ret += 36.0 * (lowest - p) * 1.0 / lowest_cnt
return ret
def func_e1a520bf58c74b5f95fd5e1fb9835983(lowest_cnt, lowest,
remaining_budget, can_replicate, larger):
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1, remaining_budget / lowest_cnt
)
return next_larger
def func_717b5659538b41bd88c3ece95f35a098(lowest, placed, exclude,
needed_budget):
cand = get_expected(placed, lowest, exclude) - exclude - needed_budget
ret = max(ret, cand)
return ret
def func_53abe815e79f40a1b75b6da20024e6e1(lowest, placed, exclude,
needed_budget):
cand = get_expected(placed, lowest, exclude) - exclude - needed_budget
ret = max(ret, cand)
return cand
def func_0a79f5bb24484ca39aec13faa8111d46(lowest, placed):
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
return needed_budget
def func_ab9640aa475841d6891a8dee2b2ca9c4(lowest, placed):
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
return p
def func_586c631fab234f4abee0ae2b83a97524(lowest, budget, placed, needed_budget
):
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
return p
def func_4c057f6f577c4d45b063e72434a1a8b9(lowest, budget, placed, needed_budget
):
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
return remaining_budget
def func_f786d824fa0743ab91f810f4b5b2ecc6(lowest, budget, placed, needed_budget
):
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
return partial
def func_239cff9d8f2a4c149f4bee34d2a26c66(lowest, placed):
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
return partial
def func_1faf349b06954d568194f1b2fdc2a331(lowest, placed):
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
return p
def func_9e543731501f45b7a9f7111e08dc6fee(lowest, placed):
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
return lowest_cnt
def func_a10e776df323436c80bf480b9fc7a7ca(lowest_cnt, lowest,
remaining_budget, can_replicate, placed):
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1, remaining_budget /
lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
return larger
def func_6d48a285cace454baa0cbf40e34a8ace(lowest_cnt, lowest,
remaining_budget, can_replicate, placed):
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1, remaining_budget /
lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
return next_larger
def func_faa8e882935c4474ab27832a082ddeb4(lowest_cnt, lowest,
remaining_budget, can_replicate, placed):
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1, remaining_budget /
lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
return p
def func_0ccd2127f2214a5caa91cd8829a4d56d(lowest, budget, placed, needed_budget
):
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
return p
def func_823ff6d2a5a745c2b4d5c99dca6bf1f8(lowest, budget, placed, needed_budget
):
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
return lowest_cnt
def func_d61fa8489acc46ac8c5a3d2490865ffb(lowest, budget, placed, needed_budget
):
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
return remaining_budget
def func_365835b6f7484ef28c09afbc58638e15(lowest, budget, placed, needed_budget
):
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
return partial
def func_6875a669551a4eba80d27edaf9ab29da(infile):
budget, bets = map(int, infile.readline().split())
placed = sorted(map(int, infile.readline().split()))
return budget
def func_083d5dfe2aed4a6f8e8cc78f4922fe2a(infile):
budget, bets = map(int, infile.readline().split())
placed = sorted(map(int, infile.readline().split()))
return bets
def func_e4af148c84f94b669df248bb3bfc5a36(infile):
budget, bets = map(int, infile.readline().split())
placed = sorted(map(int, infile.readline().split()))
return placed
def func_37511302e85e4608a016bf88c5d04943(infile):
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
return placed
def func_a629d90826ed496883b5466c5eb53a3f(infile):
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
return ret
def func_83b242b2d1fa4e4fa7bca9942c560ab2(placed):
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed
]
return queue
def func_4b8e3f3803e8494aa0b4c9d0dcdae0a8(placed):
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed
]
return ret
def func_7d4ef7b2dd064ef093c471ba71ceeb2f(placed):
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed
]
return p
def func_b163268f8ff64d3f84e3aaa6d8010b7b(placed):
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed
]
queue = sorted(set(queue))
return queue
def func_6ef8e71e217a4110bac33d08a52272ad(placed):
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed
]
queue = sorted(set(queue))
return p
def func_88c0d8eea3ce4744a926669c6082994e():
queue = sorted(set(queue))
seen = set(queue)
return queue
def func_49238305546a42b88fab1e662bc3886f():
queue = sorted(set(queue))
seen = set(queue)
return seen
def func_ce981d0535964624bf3ee9531c331713(infile):
budget, bets = map(int, infile.readline().split())
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
return ret
def func_c1fd60988f424224aad069548db86cb3(infile):
budget, bets = map(int, infile.readline().split())
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
return bets
def func_a5ecdfa35a234ecab08849a0d96e3eb5(infile):
budget, bets = map(int, infile.readline().split())
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
return placed
def func_6a2ab7d45b58407abcc316b7add8fc78(infile):
budget, bets = map(int, infile.readline().split())
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
return budget
def func_26126a771ebf4efba104529689bafd56(infile):
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed
]
return placed
def func_f59c5bdd3baf4e7ba6fa44bcdc21f8ad(infile):
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed
]
return p
def func_31618675966746e8a3b27760bcb0835c(infile):
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed
]
return queue
def func_99f09453144841aba7d754444625a900(infile):
placed = sorted(map(int, infile.readline().split()))
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed
]
return ret
def func_7712e59f1d0f481fbe89bcc5f0aac879(placed):
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed
]
queue = sorted(set(queue))
return ret
def func_a8c596f467194fb1b2d14694fa657671(placed):
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed
]
queue = sorted(set(queue))
return queue
def func_46d8aef8af3a4db38f56975d1aefd163(placed):
ret = 0.0
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed
]
queue = sorted(set(queue))
return p
def func_adbd3f3140124e6190f58d7026e60ce3(placed):
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed
]
queue = sorted(set(queue))
seen = set(queue)
return seen
def func_024786bb17704b50afeed4c0be4abf3d(placed):
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed
]
queue = sorted(set(queue))
seen = set(queue)
return p
def func_85f0ac2c5e5c456f8d538f8e34213058(placed):
queue = [1] + placed + [(p - 1) for p in placed] + [(p + 1) for p in placed
]
queue = sorted(set(queue))
seen = set(queue)
return queue
def func_a9f293f95d944e7d866f0d196e3e2d57(budget, placed):
queue = sorted(set(queue))
seen = set(queue)
while queue:
lowest = queue.pop()
if lowest == 0:
continue
needed_budget = (37 - len(placed)) * lowest
for p in placed:
needed_budget += max(0, lowest - p)
if budget < needed_budget:
continue
remaining_budget = budget - needed_budget
partial = len([p for p in placed if p <= lowest])
lowest_cnt = 37 - len(placed) + partial
if lowest_cnt == 0:
continue
larger = [p for p in placed if p > lowest]
if larger:
next_larger = min(larger)
can_replicate = min(next_larger - lowest - 1, remaining_budget /
lowest_cnt)
else:
can_replicate = remaining_budget / lowest_cnt
if can_replicate > 0:
if lowest + can_replicate not in seen:
seen.add(lowest + can_replicate)
queue.append(lowest + can_replicate)
if lowest + can_replicate - 1 not in seen:
seen.add(lowest + can_replicate - 1)
queue.append(lowest + can_replicate - 1)
for exclude in xrange(0, min(remaining_budget, partial) + 1):
cand = get_expected(placed, lowest, exclude
) - exclude - needed_budget
ret = max(ret, cand)
return ret
def func_4be1d115232a411493011c98316bf5c7(budget, placed):
queue | |
self._get("siteelevation")
@SiteElevation.setter
def SiteElevation(self, SiteElevation: float):
self._put("siteelevation", SiteElevation=SiteElevation)
@property
def SiteLatitude(self) -> float:
"""(Read/Write) The latitude (degrees) of the observing site. See Notes.
Raises:
NotImplementedException: If the property is not implemented
InvalidValueException: If the given value is outside the range -90 through
90 degrees.
InvalidOperationException: If the application must set the SiteLatitude
before reading it, but has not. See Notes.
NotConnectedException: If the device is not connected
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
Notes:
* This is geodetic (map) latitude, degrees, WGS84, positive North.
* Some mounts supply this via input to their control systems, in
other scenarios the application will set this on initialization.
* If a change is made via SiteLatitude, most mounts will save the value
persistently across power off/on.
* If the value hasn't been set by any means, an InvalidOperationException
will be raised.
"""
return self._get("sitelatitude")
@SiteLatitude.setter
def SiteLatitude(self, SiteLatitude: float):
self._put("sitelatitude", SiteLatitude=SiteLatitude)
@property
def SiteLongitude(self) -> float:
"""(Read/Write) The longitude (degrees) of the observing site. See Notes.
Raises:
NotImplementedException: If the property is not implemented
InvalidValueException: If the given value is outside the range -180 through
180 degrees.
InvalidOperationException: If the application must set the SiteLatitude
before reading it, but has not. See Notes.
NotConnectedException: If the device is not connected
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
Notes:
* This is geodetic (map) longitude, degrees, WGS84, **positive East**.
* Some mounts supply this via input to their control systems, in
other scenarios the application will set this on initialization.
* If a change is made via SiteLongitude, most mounts will save the value
persistently across power off/on.
* If the value hasn't been set by any means, an InvalidOperationException
will be raised.
Attention:
West longitude is negative.
"""
return self._get("sitelongitude")
@SiteLongitude.setter
def SiteLongitude(self, SiteLongitude: float):
self._put("sitelongitude", SiteLongitude=SiteLongitude)
@property
def Slewing(self) -> bool:
"""The mount is in motion resulting from a slew or a move-axis. See :ref:`async_faq`
Raises:
NotImplementedException: If the property is not implemented (none of the CanSlew
properties are True, this is a manual mount)
NotConnectedException: If the device is not connected
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
Notes:
* This is the correct property to use to determine *successful* completion of
a (non-blocking) :py:meth:`SlewToCoordinatesAsync()`, :py:meth:`SlewToTargetAsync()`,
:py:meth:`SlewToCoordinatesAsync()`, or by writing to :py:attr:`SideOfPier`
to force a flip.
* See :ref:`async_faq`
* Slewing will be True immediately upon
returning from any of these calls, and will remain True until *successful*
completion, at which time Slewing will become False.
* You might see Slewing = False on returning from a slew or move-axis
if the operation takes a very short time. If you see False (and not an exception)
in this state, you can be certain that the operation completed *successfully*.
* Slewing will not be True during pulse-guiding or application of tracking
offsets.
"""
return self._get("slewing")
@property
def SlewSettleTime(self) -> int:
"""(Read/Write) The post-slew settling time (seconds).
Artificially lengthen all slewing operations. Useful for mounts or
buildings that require additional mechanical settling time after a
slew to stabilize.
Raises:
NotImplementedException: If the property is not implemented
InvalidValueException: If the given settling time is invalid (negative or
ridiculously high)
NotConnectedException: If the device is not connected
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
"""
return self._get("slewsettletime")
@SlewSettleTime.setter
def SlewSettleTime(self, SlewSettleTime: int):
self._put("slewsettletime", SlewSettleTime=SlewSettleTime)
@property
def TargetDeclination(self) -> float:
"""(Read/Write) Set or return the target declination. See Notes.
Raises:
NotImplementedException: If the property is not implemented
InvalidValueException: If the given value is outside the range -90 through
90 degrees.
InvalidOperationException: If the application must set the TargetDeclination
before reading it, but has not. See Notes.
NotConnectedException: If the device is not connected
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
Notes:
* This is a pre-set target coordinate for :py:meth:`SlewToTargetAsync()`
and :py:meth:`SyncToTarget()`
* Target coordinates are for the current :py:attr:`EquatorialSystem`.
"""
return self._get("targetdeclination")
@TargetDeclination.setter
def TargetDeclination(self, TargetDeclination: float):
self._put("targetdeclination", TargetDeclination=TargetDeclination)
@property
def TargetRightAscension(self) -> float:
"""(Read/Write) Set or return the target declination. See Notes.
Raises:
NotImplementedException: If the property is not implemented
InvalidValueException: If the given value is outside the range -180 through
180 degrees.
InvalidOperationException: If the application must set the TargetRightAscension
before reading it, but has not. See Notes.
NotConnectedException: If the device is not connected
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
Notes:
* This is a pre-set target coordinate for :py:meth:`SlewToTargetAsync()`
and :py:meth:`SyncToTarget()`
* Target coordinates are for the current :py:attr:`EquatorialSystem`.
"""
return self._get("targetrightascension")
@TargetRightAscension.setter
def TargetRightAscension(self, TargetRightAscension: float):
self._put("targetrightascension", TargetRightAscension=TargetRightAscension)
@property
def Tracking(self) -> bool:
"""(Read/Write) The on/off state of the mount's sidereal tracking drive. See Notes.
Raises:
NotImplementedException: If writing to the property is not implemented.
:py:attr:`CanSetTracking` will be False.
NotConnectedException: If the device is not connected
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
Notes:
* When on, the mount will use the last selected :py:attr:`TrackingRate`.
* Even if the mount doesn't support changing this, it will report the
current state.
"""
return self._get("tracking")
@Tracking.setter
def Tracking(self, Tracking: bool):
self._put("tracking", Tracking=Tracking)
@property
def TrackingRate(self) -> DriveRates:
"""(Read/Write) The current (sidereal) tracking rate of the mount. See Notes.
Raises:
InvalidValueException: If value being written is not one of the
:py:class:`DriveRates`, or if the requested rate is not
supported by the mount (not all are).
NotImplementedException: If the mount doesn't support writing this
property to change the tracking rate.
NotConnectedException: If the device is not connected
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
Notes:
* Even if the mount doesn't support changing this, it will report the
current state.
"""
return DriveRates(self._get("trackingrate"))
@TrackingRate.setter
def TrackingRate(self, TrackingRate: DriveRates):
self._put("trackingrate", TrackingRate=TrackingRate.value)
@property
def TrackingRates(self) -> List[DriveRates]:
"""Return a list of supported :py:class:`DriveRates` values
Raises:
NotConnectedException: If the device is not connected
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
Notes:
* At a minimum, this list will contain an item for
:py:class:`~DriveRates.driveSidereal`
"""
return self._get("trackingrates")
@property
def UTCDate(self) -> datetime:
"""(Read/Write) The UTC date/time of the mount's internal clock. See Notes.
You may write either a Python datetime (tz=UTC) or an ISO 8601 string
for example::
2022-04-22T20:21:01.123+00:00
Raises:
InvalidValueException: if an illegal ISO 8601 string or a bad Python
datetime value is written to change the time. See Notes.
NotImplementedException: If the mount doesn't support writing this
property to change the UTC time
InvalidOperationException: When UTCDate is read and the
mount cannot provide this property itslef and a value has
not yet be established by writing to the property.
NotConnectedException: If the device is not connected
DriverException: An error occurred that is not described by
one of the more specific ASCOM exceptions.
The device did not *successfully* complete the request.
Notes:
* Changing time by writing to this property can be done with either a
Python datetime value or an ISO | |
self.machine.processes_per_node,
self.codes.keys())
else:
node_layout = NodeLayout(node_layout)
# TODO: validate node layout against machine model
sweep_runs = [Run(inst, self.codes, self.app_dir,
os.path.join(
group_output_dir,
'run-{}.iteration-{}'.format(
group_run_offset + i,
repeat_index)),
self.inputs,
self.machine,
node_layout,
sweep.rc_dependency,
group.component_subdirs,
group.sosflow_profiling,
group.sosflow_analysis,
group.component_inputs)
for i, inst in enumerate(
sweep.get_instances())]
# we dont support mpmd mode with dependencies
try:
if group.launch_mode.lower() == 'mpmd':
assert sweep.rc_dependency is None, \
"Dependencies in MPMD mode not supported"
except AttributeError:
pass
# we dont support mpmd on deepthought2
try:
if self.machine.name.lower() == 'deepthought2':
assert group.launch_mode.lower() not in 'mpmd',\
"mpmd mode not implemented for deepthought2"
except AttributeError:
pass
group_runs.extend(sweep_runs)
group_run_offset += len(sweep_runs)
self.runs.extend(group_runs)
if group.max_procs is None:
max_procs = max([r.get_total_nprocs() for r in group_runs])
else:
procs_per_run = max([r.get_total_nprocs() for r in group_runs])
if group.max_procs < procs_per_run:
# TODO: improve error message, specifying which
# group and by how much it's off etc
raise exc.CheetahException("max_procs for group is too low")
max_procs = group.max_procs
if group.per_run_timeout:
per_run_seconds = parse_timedelta_seconds(group.per_run_timeout)
walltime_guess = (per_run_seconds * len(group_runs)) + 60
walltime_group = parse_timedelta_seconds(group.walltime)
if walltime_group < walltime_guess:
warnings.warn('group "%s" walltime %d is less than '
'(per_run_timeout * nruns) + 60 = %d, '
'it is recommended to set it higher to '
'avoid problems with the workflow '
'engine being killed before it can write '
'all status information'
% (group.name, walltime_group, walltime_guess))
# TODO: refactor so we can just pass the campaign and group
# objects, i.e. add methods so launcher can get all info it needs
# and simplify this loop.
group.nodes = launcher.create_group_directory(
self.name, self.app_dir, group_name,
group_runs,
max_procs,
nodes=group.nodes,
launch_mode=group.launch_mode,
component_subdirs=group.component_subdirs,
walltime=group.walltime,
timeout=group.per_run_timeout,
node_exclusive=self.machine.node_exclusive,
tau_profiling=group.tau_profiling,
tau_tracing=group.tau_tracing,
machine=self.machine,
sosd_path=self.sosd_path,
sos_analysis_path=self.sos_analysis_path,
kill_on_partial_failure=self.kill_on_partial_failure,
run_post_process_script=self.run_post_process_script,
run_post_process_stop_on_failure=
self.run_post_process_stop_group_on_failure,
scheduler_options=self.machine_scheduler_options,
run_dir_setup_script=self.run_dir_setup_script)
# TODO: track directories and ids and add to this file
all_params_json_path = os.path.join(output_dir, "params.json")
with open(all_params_json_path, "w") as f:
json.dump([run.get_app_param_dict()
for run in self.runs], f, indent=2)
def _check_code_paths(self):
if not os.path.isdir(self.app_dir):
raise exc.CheetahException(
'specified app directory "%s" does not exist' % self.app_dir)
for code_name, code in self.codes.items():
exe_path = code['exe']
if not os.path.isfile(exe_path):
raise exc.CheetahException(
'code "%s" exe at "%s" is not a file'
% (code_name, exe_path))
if not os.access(exe_path, os.X_OK):
raise exc.CheetahException(
'code "%s" exe at "%s" is not executable by current user'
% (code_name, exe_path))
def _assert_unique_group_names(self, campaign_dir):
"""Assert new groups being added to the campaign do not have the
same name as existing groups.
"""
requested_group_names = []
for group_i, group in enumerate(self.sweeps):
if not isinstance(group, parameters.SweepGroup):
raise ValueError("'sweeps' must be a list of SweepGroup "
"objects. Some objects are of type "
"{}".format(type(group)))
requested_group_names.append(group.name)
existing_groups = next(os.walk(campaign_dir))[1]
common_groups = set(requested_group_names) & set(existing_groups)
if common_groups:
raise FileExistsError("One or more SweepGroups already exist: "
+ ", ".join(common_groups))
def _experiment_relative_path(self, p):
if p.startswith("/"):
return p
experiment_spec_path = inspect.getsourcefile(self.__class__)
experiment_dir = os.path.dirname(experiment_spec_path)
return os.path.join(experiment_dir, p)
class Run(object):
"""
Class representing how to actually run an instance on a given environment,
including how to generate arg arrays for executing each code required for
the application.
TODO: create a model shared between workflow and cheetah, i.e. codar.model
"""
def __init__(self, instance, codes, codes_path, run_path, inputs,
machine, node_layout, rc_dependency, component_subdirs,
sosflow_profiling, sosflow_analyis, component_inputs=None):
self.instance = instance
self.codes = codes
self.codes_path = codes_path
self.run_path = run_path
self.run_id = os.path.basename(run_path)
self.inputs = inputs
self.machine = machine
# Note: the layout will be modified if sosflow is set, so it's
# important to use a copy.
self.node_layout = node_layout.copy()
self.component_subdirs = component_subdirs
self.sosflow_profiling = sosflow_profiling
self.sosflow_analysis = sosflow_analyis
self.component_inputs = component_inputs
self.total_nodes = 0
self.run_components = self._get_run_components()
# populate nodelayout to contain all RCs
self.node_layout.populate_remaining([rc.name for rc in
self.run_components],
self.machine.processes_per_node)
# Get the RCs that this rc depends on
# This must be done before the total no. of nodes are calculated
# below
self._populate_rc_dependency(rc_dependency)
# Set the total nodes after the run components are initialized above
self._set_total_nodes()
# Filename in the run dir that will store the size of the run dir
# prior to submitting the campaign
self._pre_submit_dir_size_fname = \
".codar.cheetah.pre_submit_dir_size.out"
def _get_run_components(self):
comps = []
codes_argv = self._get_codes_argv_ordered()
for (target, argv) in codes_argv.items():
exe_path = self.codes[target]['exe']
sleep_after = self.codes[target].get('sleep_after', 0)
runner_override = self.codes[target].get('runner_override', False)
assert type(runner_override) == bool, \
"The runner_override property for the " + target + " codes " \
"object must be a boolean value True/False"
# Set separate subdirs for individual components if requested
if self.component_subdirs:
working_dir = os.path.join(self.run_path, target)
else:
working_dir = self.run_path
component_inputs = None
if self.component_inputs:
component_inputs = self.component_inputs.get(target)
if component_inputs:
assert type(component_inputs) is list, \
"component_inputs for {} must be a list.".format(target)
# Get the full path of inputs
# Separate the strings from symlinks to preserve their type
str_inputs = [input for input in component_inputs if type(
input) == str]
str_inputs = relative_or_absolute_path_list(self.codes_path,
str_inputs)
symlinks = [input for input in component_inputs if type(
input) == SymLink]
symlinks = relative_or_absolute_path_list(self.codes_path,
symlinks)
symlinks = [SymLink(input) for input in symlinks]
component_inputs = str_inputs + symlinks
linked_with_sosflow = self.codes[target].get(
'linked_with_sosflow', False)
adios_xml_file = self.codes[target].get('adios_xml_file', None)
if adios_xml_file:
adios_xml_file = relative_or_absolute_path(
self.codes_path, adios_xml_file)
sched_args = self.instance.get_sched_opts(target)
comp = RunComponent(name=target, exe=exe_path, args=argv,
sched_args=sched_args,
nprocs=self.instance.get_nprocs(target),
sleep_after=sleep_after,
working_dir=working_dir,
component_inputs=component_inputs,
linked_with_sosflow=linked_with_sosflow,
adios_xml_file=adios_xml_file,
hostfile=self.instance.get_hostfile(target),
runner_override=runner_override)
comps.append(comp)
return comps
def _populate_rc_dependency(self, rc_dependency):
"""
Retrieve the object reference for RCs and populate their
after_rc_done field with object references
"""
if rc_dependency is not None:
for k,v in rc_dependency.items():
assert type(k) is str, "rc_dependency dictionary key must " \
"be code name"
assert v is not None, "Dict value cannot be None"
assert type(v) is str, "rc_dependency dictionary value must " \
"be a string"
k_rc = self._get_rc_by_name(k)
v_rc = self._get_rc_by_name(v)
k_rc.after_rc_done = v_rc
# k_rc = self._get_rc_by_name(k)
# assert k_rc is not None, "RC {0} not found".format(k)
# v_rc = self._get_rc_by_name(v)
# assert v_rc is not None, "RC {0} not found".format(v)
# k_rc.after_rc_done = v_rc
def get_fob_data_list(self):
return [comp.as_fob_data() for comp in self.run_components]
def _get_codes_argv_ordered(self):
"""Wrapper around instance.get_codes_argv which uses correct order
from self.codes OrderedDict."""
codes_argv = self.instance.get_codes_argv()
undefined_codes = set(codes_argv.keys()) - set(self.codes.keys())
if undefined_codes:
raise exc.CampaignParseError(
'Parameter references undefined codes(s): %s'
% ','.join(undefined_codes))
# Note that a given Run may not use all codes, e.g. for base
# case app runs that don't use adios stage_write or dataspaces.
return OrderedDict((k, codes_argv[k]) for k in self.codes.keys()
if k in codes_argv)
def get_total_nprocs(self):
return sum(rc.nprocs for rc in self.run_components)
def _get_rc_by_name(self, name):
for rc in self.run_components:
if rc.name == name:
return rc
raise CheetahException("Did not find run component with name {0}"
.format(name))
def _set_total_nodes(self):
"""
Get the total number of nodes that will be required by the Run.
Group codes based upon the node layout (separate/shared nodes),
then consider the dependency between components to calculate the
total no. of nodes.
TODO This functionality exists in Savanna already.
"""
# num_nodes_rc = {}
# for rc in self.run_components:
# code_node = self.node_layout.get_node_containing_code(rc.name)
# code_procs_per_node = code_node[rc.name]
# num_nodes_rc[rc.name] = int(math.ceil(rc.nprocs /
# code_procs_per_node))
# group codes by node
code_groups = self.node_layout.group_codes_by_node()
# now further group codes based on the dependency
self._group_codes_by_dependencies(code_groups)
# Get the max no. of nodes required based on the node layout
group_max_nodes = []
for code_group in code_groups:
group_max = 0
for codename in code_group:
rc = self._get_rc_by_name(codename)
# FIXME: Cleanup this hack
# For summit: its something like {'xgc':{0,1,2,4,5}}, i.e.
# its a dict of sets. For other machines, its a dict of
# int that represents ppn.
if isinstance(self.node_layout.layout_list[0],
machines.MachineNode):
num_nodes_code = math.ceil(
rc.nprocs/len(code_group[codename]))
else:
num_nodes_code = math.ceil(
rc.nprocs / code_group[codename])
rc.num_nodes = num_nodes_code
group_max = max(group_max, num_nodes_code)
group_max_nodes.append(group_max)
self.total_nodes = sum(group_max_nodes)
def _group_codes_by_dependencies(self, code_groups):
"""Group RCs based upon the dependencies.
Input is a list of dictionaries where the key is the code and value
is the no. of ranks on a node"""
def parse_dicts(l_d):
for d in l_d:
for rc_name in d:
rc = self._get_rc_by_name(rc_name)
if rc.after_rc_done:
if rc.after_rc_done.name not in list(d.keys()):
target_d = None
for d2 in l_d:
if rc.after_rc_done.name in list(d2.keys()):
target_d = d2
break
assert target_d is not None, \
"Internal dependency | |
<reponame>ehunaj/tkn1
# -*- coding: utf-8 -*-
from linepy import *
from akad.ttypes import Message
from datetime import datetime
from time import sleep
from bs4 import BeautifulSoup
from humanfriendly import format_timespan, format_size, format_number, format_length
import time, random, sys, json, codecs, threading, glob, re, string, os, requests, subprocess, six, ast, pytz, urllib, urllib.parse
from gtts import gTTS
from googletrans import Translator
botStart = time.time()
cl = LINE("EMBsWALkcN5b9eFACvZ4.BvIR7Jw9VfAdsJzU/uMhba.0B0gmjifbNpXr64iuuaP3WfDuDcDMsGNI5JPgtUDJVc=")
ki = LINE("EMtrt7ARXnC3xEk8Ucq4.wOkmirNWe41AVCDdjgw/za.A1FVNahoA+6huK6SgkY1RPm0OME079ykEnVFZC5BPYg=")
kk = LINE("EMBf7j3JuEw9g1lNx0Qe.pYxjKbe7w+1iIyyJvihN7G.b8RYl5BL8w2UxuqYXKvOfbffVmXDArhXDRMi7YUwDZw=")
kc = LINE("EMeJYAVgrLiRHIkxibac.xmOZkKoDi9P/nYUM4NOsla.hwTymmBJr4fixtVoC8NVwIudzJ1DCufBWH8a6ddDp2Q=")
k1 = LINE("EMwUjryeKhXe5WiOI1Ud.PVmbdG5nG77Km5hrSxbgtq.2tACTY0iXYW3UZ+v58SZVe77FePSFzAT2fXlMJJVQ5g=")
k2 = LINE("<KEY>8=")
k3 = LINE("EM5mVyaw71y6pJU6UPD9.p6BZ50ZbaAbHQXlkeO8fkq./7AOtEIfFViAp2AT6zTTKSghszjeZ15k4u32NAd20J8=")
#k4 = LINE("
#k5 = LINE("
#k6 = LINE("
readOpen = codecs.open("read.json","r","utf-8")
settingsOpen = codecs.open("temp.json","r","utf-8")
mid = cl.profile.mid
cl.Profile = cl.getProfile()
cl.Settings = cl.getSettings()
Amid = ki.profile.mid
ki.Profile = ki.getProfile()
ki.Settings = ki.getSettings()
Bmid = kk.profile.mid
kk.Profile = kk.getProfile()
kk.Settings = kk.getSettings()
Cmid = kc.profile.mid
kc.Profile = kc.getProfile()
kc.Settings = kc.getSettings()
Dmid = k1.profile.mid
k1.Profile = k1.getProfile()
k1.Settings = k1.getSettings()
Emid = k2.profile.mid
k2.Profile = k2.getProfile()
k2.Settings = k2.getSettings()
Fmid = k3.profile.mid
k3.Profile = k3.getProfile()
k3.Settings = k3.getSettings()
#Gmid = k4.profile.mid
#k4.Profile = k4.getProfile()
#k4.Settings = k4.getSettings()
#Hmid = k5.profile.mid
#k5.Profile = k5.getProfile()
#k5.Settings = k5.getSettings()
#Imid = k6.profile.mid
#k6.Profile = k6.getProfile()
#k6.Settings = k6.getSettings()
oepoll = OEPoll(cl)
call = cl
ABC = [ki,kk,kc]
Bots = [mid,Amid,Bmid,Cmid,Dmid,Emid,Fmid] #,Gmid,Hmid,Imid]
admin = ["ub<PASSWORD>"]
#cl.log("Auth Token : " + str(cl.authToken))
read = json.load(readOpen)
settings = json.load(settingsOpen)
wait = {
"invite":{},
"Bot":True,
"mention":"║┝──────────────\n║│Yuk kak chat sini 🙋\n║╰❉ Jangan ngelamun😁\n╰━━━━━━━━━━━━━━━━n ━━━━┅═❉ইई❉═┅━━━━",
"Respontag":"Hoi Jgn ngtag semm",
"welcome":"Selamat datang & semoga betah",
"comment":"Like like & like by Ehun",
"message":"Terimakasih sudah add saya",
"AutoJoinCancel":True,
"memberscancel":30,
"members":1,
}
settings = {
"changePicture":[],
"autoAdd": False,
"autoJoin": False,
"autoJoinTicket":True,
"autoLeave": False,
"autoRead": False,
"lang":"JP",
"detectMention": True,
"detectMentionadmin": True,
"changeGroupPicture":[],
"notifikasi": False,
"Sider":{},
"checkSticker": False,
"userAgent": [
"Mozilla/5.0 (X11; U; Linux i586; de; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (X11; U; Linux amd64; rv:5.0) Gecko/20100101 Firefox/5.0 (Debian)",
"Mozilla/5.0 (X11; U; Linux amd64; en-US; rv:5.0) Gecko/20110619 Firefox/5.0",
"Mozilla/5.0 (X11; Linux) Gecko Firefox/5.0",
"Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 FirePHP/0.5",
"Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 Firefox/5.0",
"Mozilla/5.0 (X11; Linux x86_64) Gecko Firefox/5.0",
"Mozilla/5.0 (X11; Linux ppc; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (X11; Linux AMD64) Gecko Firefox/5.0",
"Mozilla/5.0 (X11; FreeBSD amd64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.2; WOW64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:5.0) Gecko/20110619 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1; rv:6.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1.1; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.2; WOW64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.1; U; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.1; rv:2.0.1) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.0; WOW64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.0; rv:5.0) Gecko/20100101 Firefox/5.0"
],
"mimic": {
"copy": False,
"status": False,
"target": {}
}
}
read = {
"readPoint": {},
"readMember": {},
"readTime": {},
"ROM": {}
}
myProfile = {
"displayName": "",
"statusMessage": "",
"pictureStatus": ""
}
cctv = {
"cyduk":{},
"point":{},
"MENTION":{},
"sidermem":{}
}
bl = {
"blacklist":{}
}
with open('bl.json', 'r') as fp:
bl = json.load(fp)
#with open('admin.json', 'r') as fp:
# admin = json.load(fp)
myProfile["displayName"] = cl.Profile.displayName
myProfile["statusMessage"] = cl.Profile.statusMessage
myProfile["pictureStatus"] = cl.Profile.pictureStatus
def restartBot():
print ("[ INFO ] BOT RESETTED")
time.sleep(3)
python = sys.executable
os.execl(python, python, *sys.argv)
def logError(text):
cl.log("[ ERROR ] " + str(text))
time_ = datetime.now()
with open("errorLog.txt","a") as error:
error.write("\n[%s] %s" % (str(time), text))
def welcomeMembers(to, mid):
try:
arrData = ""
textx = "Haii ".format(str(len(mid)))
arr = []
no = 1
num = 2
for i in mid:
ginfo = cl.getGroup(to)
mention = "@x\n"
slen = str(len(textx))
elen = str(len(textx) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':i}
arr.append(arrData)
textx += mention+wait["welcome"]+"\nNama grup : "+str(ginfo.name)
if no < len(mid):
no += 1
textx += "%i " % (num)
num=(num+1)
else:
try:
no = "\n╚══[ {} ]".format(str(cl.getGroup(to).name))
except:
no = "\n╚══[ Success ]"
cl.sendMessage(to, textx, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
cl.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention(to, mid, firstmessage, lastmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x "
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
text += mention + str(lastmessage)
cl.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
logError(error)
cl.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMessage(to, Message, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes._from = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def sendMessageWithMention(to, mid):
try:
aa = '{"S":"0","E":"3","M":'+json.dumps(mid)+'}'
text_ = '@x '
cl.sendMessage(to, text_, contentMetadata={'MENTION':'{"MENTIONEES":['+aa+']}'}, contentType=0)
except Exception as error:
logError(error)
def siderMembers(to, mid):
try:
arrData = ""
textx = "╭━━━┅═❉ইई❉═┅━━━━\n║ Haii ".format(str(len(mid)))
arr = []
no = 1
num = 2
for i in mid:
mention = "@x\n"
slen = str(len(textx))
elen = str(len(textx) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':i}
arr.append(arrData)
textx += mention+wait["mention"]
if no < len(mid):
no += 1
textx += "%i. " % (num)
num=(num+1)
else:
try:
no = "\n╚══[ {} ]".format(str(cl.getGroup(to).name))
except:
no = "\n╚══[ Success ]"
cl.sendMessage(to, textx, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
cl.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def mentionMembers(to, mid):
try:
arrData = ""
textx = format(str(len(mid)))
arr = []
no = 1
for i in mid:
mention = "@x\n"
slen = str(len(textx))
elen = str(len(textx) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':i}
arr.append(arrData)
textx += mention
if no < len(mid):
no += 1
textx += "╠ "
else:
try:
textx += "╚══[ {} ]".format(str(cl.getGroup(to).name))
except:
pass
cl.sendMessage(to, textx, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
logError(error)
cl.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def backupData():
try:
backup = settings
f = codecs.open('temp.json','w','utf-8')
json.dump(backup, f, sort_keys=True, indent=4, ensure_ascii=False)
backup = read
f = codecs.open('read.json','w','utf-8')
json.dump(backup, f, sort_keys=True, indent=4, ensure_ascii=False)
return True
except Exception as error:
logError(error)
return False
def helpmessage():
helpMessage = "━━━━┅═❉ইई❉═┅━━━━\n ❇ SELFBOT ❇\n╭━━━━━━━━━━━━━━━━\n║╭❉ MENU HELP ❇\n║┝───────────────" + "\n" + \
"║┝──[❇ STATUS ❇ ]" + "\n" + \
"║│ Restart" + "\n" + \
"║│ Runtime" + "\n" + \
"║│ Speed" + "\n" + \
"║│ Status" + "\n" + \
"║│ Bot(on/off" + "\n" + \
"║│ Dell「Removechat」" + "\n" + \
"║┝───────────────" + "\n" + \
"║┝──[ ❇ SETTING ❇ ]" + "\n" + \
"║│ Allstatus「On/Off」" + "\n" + \
"║│ Notif「On/Off」" + "\n" + \
"║│ Sider「On/Off」" + "\n" + \
"║│ AutoAdd「On/Off」" + "\n" + \
"║│ AutoJoin「On/Off」" + "\n" + \
"║│ AutoLeave「On/Off」" + "\n" + \
"║│ AutoRead「On/Off」" + "\n" + \
"║│ CheckSticker「On/Off」" + "\n" + \
"║│ DetectMention「On/Off」" + "\n" + \
"║┝───────────────" + "\n" + \
"║┝──[ ❇ SELF ❇]" + "\n" + \
"║│ Me" + "\n" + \
"║│ MyMid" + "\n" + \
"║│ MyName" + "\n" + \
"║│ MyBio" + "\n" + \
"║│ MyPicture" + "\n" + \
"║│ MyVideoProfile" + "\n" + \
"║│ MyCover" + "\n" + \
"║│ StealContact「@」" + "\n" + \
"║│ StealMid「@」" + "\n" + \
"║│ StealName「@」" + "\n" + \
"║│ StealBio「@」" + "\n" + \
"║│ StealPicture「@」" + "\n" + \
"║│ StealVideoProfile「@」" + "\n" + \
"║│ StealCover「@」" + "\n" + \
"║│ CloneProfile「@」" + "\n" + \
"║│ RestoreProfile" + "\n" + \
"║┝───────────────" + "\n" + \
"║┝──[ ❇ GROUP ❇ ]" + "\n" + \
"║│ GroupCreator" + "\n" + \
"║│ GroupId" + "\n" + \
"║│ GroupName" + "\n" + \
"║│ GroupPicture" + "\n" + \
"║│ GroupTicket" + "\n" + \
"║│ GroupTicket「On/Off」" + "\n" + \
"║│ GroupList" + "\n" + \
"║│ GroupMemberList" + "\n" + \
"║│ GroupInfo" + "\n" + \
"║│ Mimic「On/Off」" + "\n" + \
"║│ MimicList" + "\n" + \
"║│ MimicAdd「@」" + "\n" + \
"║│ MimicDel「@」" + "\n" + \
"║│ Tag" + "\n" + \
"║│ Lurking「On/Off/Reset」" + "\n" + \
"║│ Lurking" + "\n" + \
"║┝───────────────" + "\n" + \
"║┝──[ ❇ MEDIA ❇]" + "\n" + \
"║│ Kalender" + "\n" + \
"║│ CheckDate「Date」" + "\n" + \
"║┝───────────────\n║╰❉ EHUN BOT ❇\n╰━━━━━━━━━━━━━━━━\n━━━━┅═❉ইई❉═┅━━━━"
return helpMessage
def clBot(op):
global time
global ast
global groupParam
try:
if op.type == 0:
print ("[ 0 ] END OF OPERATION")
return
if op.type == 55:
if op.param2 in | |
dst_backup_paths):
class DriveBackupDaemon(plugin.TaskDaemon):
def __init__(self, domain_uuid):
super(DriveBackupDaemon, self).__init__(task_spec, 'TakeVolumeBackup', report_progress=False)
self.domain_uuid = domain_uuid
def __exit__(self, exc_type, exc_val, exc_tb):
super(DriveBackupDaemon, self).__exit__(exc_type, exc_val, exc_tb)
os.unlink(tmp_workspace)
def _cancel(self):
logger.debug("cancel vm[uuid:%s] backup" % self.domain_uuid)
ImageStoreClient().stop_backup_jobs(self.domain_uuid)
def _get_percent(self):
pass
tmp_workspace = os.path.join(tempfile.gettempdir(), uuidhelper.uuid())
with DriveBackupDaemon(self.uuid):
self._do_take_volumes_top_drive_backup(volumes, dst_backup_paths, tmp_workspace)
def _do_take_volumes_top_drive_backup(self, volumes, dst_backup_paths, tmp_workspace):
args = {}
for volume in volumes:
target_disk, _ = self._get_target_disk(volume)
args[str(volume.deviceId)] = VmPlugin.get_backup_device_name(target_disk), 0
dst_workspace = os.path.join(os.path.dirname(dst_backup_paths['0']), 'workspace')
linux.mkdir(dst_workspace)
os.symlink(dst_workspace, tmp_workspace)
res = ImageStoreClient().top_backup_volumes(self.uuid, args.values(), tmp_workspace)
job_res = jsonobject.loads(res)
for device_id, dst_path in dst_backup_paths.items():
device_name = args[device_id][0]
back_path = os.path.join(dst_workspace, job_res[device_name].backupFile)
linux.mkdir(os.path.dirname(dst_path))
shutil.move(back_path, dst_path)
def _take_volumes_shallow_block_copy(self, task_spec, volumes, dst_backup_paths):
# type: (Vm, jsonobject.JsonObject, list[xmlobject.XmlObject], dict[str, str]) -> None
class VolumeInfo(object):
def __init__(self, dev_name):
self.dev_name = dev_name # type: str
self.end_time = None # type: float
class ShallowBackupDaemon(plugin.TaskDaemon):
def __init__(self, domain):
super(ShallowBackupDaemon, self).__init__(task_spec, 'TakeVolumeBackup', report_progress=False)
self.domain = domain
def _cancel(self):
logger.debug("cancel vm[uuid:%s] backup" % self.domain.name())
for v in volume_backup_info.values():
if self.domain.blockJobInfo(v.dev_name, 0):
self.domain.blockJobAbort(v.dev_name)
def _get_percent(self):
pass
volume_backup_info = {}
for volume in volumes:
target_disk, _ = self._get_target_disk(volume)
volume_backup_info[str(volume.deviceId)] = VolumeInfo(target_disk.target.dev_)
with ShallowBackupDaemon(self.domain):
self._do_take_volumes_shallow_block_copy(volume_backup_info, dst_backup_paths)
def _do_take_volumes_shallow_block_copy(self, volume_backup_info, dst_backup_paths):
dom = self.domain
flags = libvirt.VIR_DOMAIN_BLOCK_COPY_TRANSIENT_JOB | libvirt.VIR_DOMAIN_BLOCK_COPY_SHALLOW
for device_id, v in volume_backup_info.items():
vol_dir = os.path.dirname(dst_backup_paths[device_id])
linux.mkdir(vol_dir)
logger.info("start copying {}/{} ...".format(self.uuid, v.dev_name))
dom.blockCopy(v.dev_name, "<disk type='file'><source file='{}'/><driver type='qcow2'/></disk>"
.format(dst_backup_paths[device_id]), None, flags)
while time.sleep(5) or any(not v.end_time for v in volume_backup_info.values()):
for v in volume_backup_info.values():
if v.end_time:
continue
info = dom.blockJobInfo(v.dev_name, 0)
if not info:
raise Exception('blockjob not found on disk: ' + v.dev_name)
elif info['cur'] == info['end']:
v.end_time = time.time()
logger.info("completed copying {}/{} ...".format(self.uuid, v.dev_name))
with vm_operator.TemporaryPauseVmOperator(dom):
for v in volume_backup_info.values():
dom.blockJobAbort(v.dev_name)
@staticmethod
def from_virt_domain(domain):
vm = Vm()
vm.domain = domain
(state, _, _, _, _) = domain.info()
vm.state = Vm.power_state[state]
vm.domain_xml = domain.XMLDesc(0)
vm.domain_xmlobject = xmlobject.loads(vm.domain_xml)
vm.uuid = vm.domain_xmlobject.name.text_
return vm
@staticmethod
def from_StartVmCmd(cmd):
use_numa = cmd.useNuma
machine_type = get_machineType(cmd.machineType)
if HOST_ARCH == "aarch64" and cmd.bootMode == 'Legacy':
raise kvmagent.KvmError("Aarch64 does not support legacy, please change boot mode to UEFI instead of Legacy on your VM or Image.")
if cmd.architecture and cmd.architecture != HOST_ARCH:
raise kvmagent.KvmError("Image architecture[{}] not matched host architecture[{}].".format(cmd.architecture, HOST_ARCH))
default_bus_type = ('ide', 'sata', 'scsi')[max(machine_type == 'q35', (HOST_ARCH in ['aarch64', 'mips64el']) * 2)]
elements = {}
def make_root():
root = etree.Element('domain')
root.set('type', get_domain_type())
root.set('xmlns:qemu', 'http://libvirt.org/schemas/domain/qemu/1.0')
elements['root'] = root
def make_memory_backing():
root = elements['root']
backing = e(root, 'memoryBacking')
e(backing, "hugepages")
e(backing, "nosharepages")
e(backing, "allocation", attrib={'mode': 'immediate'})
def make_cpu():
if use_numa:
root = elements['root']
tune = e(root, 'cputune')
def on_x86_64():
e(root, 'vcpu', '128', {'placement': 'static', 'current': str(cmd.cpuNum)})
# e(root,'vcpu',str(cmd.cpuNum),{'placement':'static'})
if cmd.nestedVirtualization == 'host-model':
cpu = e(root, 'cpu', attrib={'mode': 'host-model'})
e(cpu, 'model', attrib={'fallback': 'allow'})
elif cmd.nestedVirtualization == 'host-passthrough':
cpu = e(root, 'cpu', attrib={'mode': 'host-passthrough'})
e(cpu, 'model', attrib={'fallback': 'allow'})
elif cmd.nestedVirtualization == 'custom':
cpu = e(root, 'cpu', attrib={'mode': 'custom', 'match': 'minimum'})
e(cpu, 'model', cmd.vmCpuModel, attrib={'fallback': 'allow'})
else:
cpu = e(root, 'cpu')
# e(cpu, 'topology', attrib={'sockets': str(cmd.socketNum), 'cores': str(cmd.cpuOnSocket), 'threads': '1'})
mem = cmd.memory / 1024
e(cpu, 'topology', attrib={'sockets': '32', 'cores': '4', 'threads': '1'})
numa = e(cpu, 'numa')
e(numa, 'cell', attrib={'id': '0', 'cpus': '0-127', 'memory': str(mem), 'unit': 'KiB'})
def on_aarch64():
cpu = e(root, 'cpu', attrib={'mode': 'custom'})
e(cpu, 'model', 'host', attrib={'fallback': 'allow'})
mem = cmd.memory / 1024
e(cpu, 'topology', attrib={'sockets': '32', 'cores': '4', 'threads': '1'})
numa = e(cpu, 'numa')
e(numa, 'cell', attrib={'id': '0', 'cpus': '0-127', 'memory': str(mem), 'unit': 'KiB'})
def on_mips64el():
e(root, 'vcpu', '8', {'placement': 'static', 'current': str(cmd.cpuNum)})
# e(root,'vcpu',str(cmd.cpuNum),{'placement':'static'})
cpu = e(root, 'cpu', attrib={'mode': 'custom', 'match': 'exact', 'check': 'partial'})
e(cpu, 'model', 'Loongson-3A4000-COMP', attrib={'fallback': 'allow'})
mem = cmd.memory / 1024
e(cpu, 'topology', attrib={'sockets': '2', 'cores': '4', 'threads': '1'})
numa = e(cpu, 'numa')
e(numa, 'cell', attrib={'id': '0', 'cpus': '0-7', 'memory': str(mem), 'unit': 'KiB'})
eval("on_{}".format(HOST_ARCH))()
else:
root = elements['root']
# e(root, 'vcpu', '128', {'placement': 'static', 'current': str(cmd.cpuNum)})
e(root, 'vcpu', str(cmd.cpuNum), {'placement': 'static'})
tune = e(root, 'cputune')
# enable nested virtualization
def on_x86_64():
if cmd.nestedVirtualization == 'host-model':
cpu = e(root, 'cpu', attrib={'mode': 'host-model'})
e(cpu, 'model', attrib={'fallback': 'allow'})
elif cmd.nestedVirtualization == 'host-passthrough':
cpu = e(root, 'cpu', attrib={'mode': 'host-passthrough'})
e(cpu, 'model', attrib={'fallback': 'allow'})
elif cmd.nestedVirtualization == 'custom':
cpu = e(root, 'cpu', attrib={'mode': 'custom'})
e(cpu, 'model', cmd.vmCpuModel, attrib={'fallback': 'allow'})
else:
cpu = e(root, 'cpu')
return cpu
def on_aarch64():
if is_virtual_machine():
cpu = e(root, 'cpu')
e(cpu, 'model', 'cortex-a57')
else :
cpu = e(root, 'cpu', attrib={'mode': 'host-passthrough'})
e(cpu, 'model', attrib={'fallback': 'allow'})
return cpu
def on_mips64el():
cpu = e(root, 'cpu', attrib={'mode': 'custom', 'match': 'exact', 'check': 'partial'})
e(cpu, 'model', 'Loongson-3A4000-COMP', attrib={'fallback': 'allow'})
return cpu
cpu = eval("on_{}".format(HOST_ARCH))()
e(cpu, 'topology', attrib={'sockets': str(cmd.socketNum), 'cores': str(cmd.cpuOnSocket), 'threads': '1'})
if cmd.addons.cpuPinning:
for rule in cmd.addons.cpuPinning:
e(tune, 'vcpupin', attrib={'vcpu': str(rule.vCpu), 'cpuset': rule.pCpuSet})
def make_memory():
root = elements['root']
mem = cmd.memory / 1024
if use_numa:
e(root, 'maxMemory', str(34359738368), {'slots': str(16), 'unit': 'KiB'})
# e(root,'memory',str(mem),{'unit':'k'})
e(root, 'currentMemory', str(mem), {'unit': 'k'})
else:
e(root, 'memory', str(mem), {'unit': 'k'})
e(root, 'currentMemory', str(mem), {'unit': 'k'})
def make_os():
root = elements['root']
os = e(root, 'os')
host_arch = kvmagent.os_arch
def on_x86_64():
e(os, 'type', 'hvm', attrib={'machine': machine_type})
# if boot mode is UEFI
if cmd.bootMode == "UEFI":
e(os, 'loader', '/usr/share/edk2.git/ovmf-x64/OVMF_CODE-pure-efi.fd', attrib={'readonly': 'yes', 'type': 'pflash'})
e(os, 'nvram', '/var/lib/libvirt/qemu/nvram/%s.fd' % cmd.vmInstanceUuid, attrib={'template': '/usr/share/edk2.git/ovmf-x64/OVMF_VARS-pure-efi.fd'})
elif cmd.bootMode == "UEFI_WITH_CSM":
e(os, 'loader', '/usr/share/edk2.git/ovmf-x64/OVMF_CODE-with-csm.fd', attrib={'readonly': 'yes', 'type': 'pflash'})
e(os, 'nvram', '/var/lib/libvirt/qemu/nvram/%s.fd' % cmd.vmInstanceUuid, attrib={'template': '/usr/share/edk2.git/ovmf-x64/OVMF_VARS-with-csm.fd'})
elif cmd.addons['loaderRom'] is not None:
e(os, 'loader', cmd.addons['loaderRom'], {'type': 'rom'})
def on_aarch64():
def on_redhat():
e(os, 'type', 'hvm', attrib={'arch': 'aarch64', 'machine': machine_type})
e(os, 'loader', '/usr/share/edk2/aarch64/QEMU_EFI-pflash.raw', attrib={'readonly': 'yes', 'type': 'pflash'})
e(os, 'nvram', '/var/lib/libvirt/qemu/nvram/%s.fd' % cmd.vmInstanceUuid, attrib={'template': '/usr/share/edk2/aarch64/vars-template-pflash.raw'})
def on_debian():
e(os, 'type', 'hvm', attrib={'arch': 'aarch64', 'machine': machine_type})
e(os, 'loader', '/usr/share/OVMF/QEMU_EFI-pflash.raw', attrib={'readonly': 'yes', 'type': 'rom'})
e(os, 'nvram', '/var/lib/libvirt/qemu/nvram/%s.fd' % cmd.vmInstanceUuid, attrib={'template': '/usr/share/OVMF/vars-template-pflash.raw'})
eval("on_{}".format(kvmagent.get_host_os_type()))()
def on_mips64el():
e(os, 'type', 'hvm', attrib={'arch': 'mips64el', 'machine': 'loongson3a'})
e(os, 'loader', '/usr/share/qemu/ls3a_bios.bin', attrib={'readonly': 'yes', 'type': 'rom'})
eval("on_{}".format(host_arch))()
if cmd.useBootMenu:
e(os, 'bootmenu', attrib={'enable': 'yes'})
if cmd.systemSerialNumber and HOST_ARCH != 'mips64el':
e(os, 'smbios', attrib={'mode': 'sysinfo'})
def make_sysinfo():
if not cmd.systemSerialNumber:
return
root = elements['root']
sysinfo = e(root, 'sysinfo', attrib={'type': 'smbios'})
system = e(sysinfo, 'system')
e(system, 'entry', cmd.systemSerialNumber, attrib={'name': 'serial'})
if cmd.chassisAssetTag is not None:
chassis = e(sysinfo, 'chassis')
e(chassis, 'entry', cmd.chassisAssetTag, attrib={'name': 'asset'})
def make_features():
root = elements['root']
features = e(root, 'features')
for f in ['apic', 'pae']:
e(features, f)
@linux.with_arch(todo_list=['x86_64'])
def make_acpi():
e(features, 'acpi')
make_acpi()
if cmd.kvmHiddenState is True:
kvm = e(features, "kvm")
e(kvm, 'hidden', None, {'state': 'on'})
if cmd.vmPortOff is True:
e(features, 'vmport', attrib={'state': 'off'})
if cmd.emulateHyperV is True:
hyperv = e(features, "hyperv")
e(hyperv, 'relaxed', attrib={'state': 'on'})
e(hyperv, 'vapic', attrib={'state': 'on'})
if is_hv_freq_supported(): e(hyperv, 'frequencies', attrib={'state': 'on'})
e(hyperv, 'spinlocks', attrib={'state': 'on', 'retries': '4096'})
e(hyperv, 'vendor_id', attrib={'state': 'on', 'value': 'ZStack_Org'})
# always set ioapic driver to kvm after libvirt 3.4.0
if is_ioapic_supported():
e(features, "ioapic", attrib={'driver': 'kvm'})
if get_gic_version(cmd.cpuNum) == 2:
e(features, "gic", attrib={'version': '2'})
def make_qemu_commandline():
if not os.path.exists(QMP_SOCKET_PATH):
os.mkdir(QMP_SOCKET_PATH)
root = elements['root']
qcmd = e(root, 'qemu:commandline')
vendor_id, model_name = linux.get_cpu_model()
if "hygon" in model_name.lower():
if isinstance(cmd.imagePlatform, str) and cmd.imagePlatform.lower() not in ["other", "paravirtualization"]:
e(qcmd, "qemu:arg", attrib={"value": "-cpu"})
e(qcmd, "qemu:arg", attrib={"value": "EPYC,vendor=AuthenticAMD,model_id={} Processor,+svm".format(" ".join(model_name.split(" ")[0:3]))})
else:
e(qcmd, "qemu:arg", attrib={"value": "-qmp"})
e(qcmd, "qemu:arg", attrib={"value": "unix:{}/{}.sock,server,nowait".format(QMP_SOCKET_PATH, cmd.vmInstanceUuid)})
args = cmd.addons['qemuCommandLine']
if args is not None:
for arg in args:
e(qcmd, "qemu:arg", attrib={"value": arg.strip('"')})
if cmd.useColoBinary:
e(qcmd, "qemu:arg", attrib={"value": '-L'})
e(qcmd, "qemu:arg", attrib={"value": '/usr/share/qemu-kvm/'})
if cmd.coloPrimary:
e(qcmd, "qemu:arg", attrib={"value": '-L'})
e(qcmd, "qemu:arg", attrib={"value": '/usr/share/qemu-kvm/'})
count = 0
primary_host_ip = cmd.addons['primaryVmHostIp']
for config in cmd.addons['primaryVmNicConfig']:
e(qcmd, "qemu:arg", attrib={"value": '-chardev'})
e(qcmd, "qemu:arg", attrib={"value": 'socket,id=zs-mirror-%s,host=%s,port=%s,server,nowait'
% (count, primary_host_ip, config.mirrorPort)})
e(qcmd, "qemu:arg", attrib={"value": '-chardev'})
e(qcmd, "qemu:arg", attrib={"value": 'socket,id=primary-in-s-%s,host=%s,port=%s,server,nowait'
% (count, primary_host_ip, config.primaryInPort)})
e(qcmd, "qemu:arg", attrib={"value": '-chardev'})
e(qcmd, "qemu:arg", attrib={"value": 'socket,id=secondary-in-s-%s,host=%s,port=%s,server,nowait'
% (count, primary_host_ip, config.secondaryInPort)})
e(qcmd, "qemu:arg", attrib={"value": '-chardev'})
e(qcmd, "qemu:arg", attrib={"value": 'socket,id=primary-in-c-%s,host=%s,port=%s,nowait'
% (count, primary_host_ip, config.primaryInPort)})
e(qcmd, "qemu:arg", attrib={"value": '-chardev'})
e(qcmd, "qemu:arg", attrib={"value": 'socket,id=primary-out-s-%s,host=%s,port=%s,server,nowait'
% (count, primary_host_ip, config.primaryOutPort)})
e(qcmd, "qemu:arg", attrib={"value": '-chardev'})
e(qcmd, "qemu:arg", attrib={"value": 'socket,id=primary-out-c-%s,host=%s,port=%s,nowait'
% (count, primary_host_ip, config.primaryOutPort)})
count += 1
e(qcmd, "qemu:arg", attrib={"value": '-monitor'})
e(qcmd, "qemu:arg", attrib={"value": 'tcp:%s:%s,server,nowait' % (primary_host_ip, cmd.addons['primaryMonitorPort'])})
elif cmd.coloSecondary:
e(qcmd, "qemu:arg", attrib={"value": '-L'})
e(qcmd, "qemu:arg", attrib={"value": '/usr/share/qemu-kvm/'})
count = 0
for config in cmd.addons['ftSecondaryVmNicConfig']:
e(qcmd, "qemu:arg", attrib={"value": '-chardev'})
e(qcmd, "qemu:arg", attrib={"value": 'socket,id=red-mirror-%s,host=%s,port=%s'
% (count, cmd.addons['primaryVmHostIp'], config.mirrorPort)})
e(qcmd, "qemu:arg", attrib={"value": '-chardev'})
e(qcmd, "qemu:arg", attrib={"value": 'socket,id=red-secondary-%s,host=%s,port=%s'
% (count, | |
<gh_stars>0
# File: xmlparser.py ; This file is part of Twister.
# version: 3.038
# Copyright (C) 2012-2014 , Luxoft
# Authors:
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Parser for xml configuration files
"""
import os
import sys
import hashlib
from collections import OrderedDict
TWISTER_PATH = os.getenv('TWISTER_PATH')
if not TWISTER_PATH:
print 'TWISTER_PATH environment variable is not set! Exiting!'
exit(1)
sys.path.append(TWISTER_PATH)
from lxml import etree
from ConfigParser import SafeConfigParser
from plugins import BasePlugin
from common.helpers import userHome
from common.tsclogging import logDebug, logFull, logWarning, logInfo, logError
from common.suitesmanager import SuitesManager
from common.constants import FWMCONFIG_TAGS, PROJECTCONFIG_TAGS
from common.constants import SUITES_TAGS, TESTS_TAGS
from server.CeFs import LocalFS
parser = etree.XMLParser(ns_clean=True, remove_blank_text=True)
etree.set_default_parser(parser)
localFs = LocalFS() # Singleton
__all__ = ['TSCParser', 'DBParser', 'PluginParser', 'ClearCaseParser']
# # # Helpers # # #
def parseXML(user, fname):
"""
Read 1 XML file via remote client and parse the content.
"""
data = localFs.read_user_file(user, fname)
try:
return etree.fromstring(data)
except Exception as e:
logError('Error parsing file `{}`, for user `{}`: `{}`!'.format(fname, user, e))
return None
def dumpXML(user, fname, tree):
"""
Write 1 XML file via remote client.
"""
data = etree.tostring(tree, pretty_print=True)
try:
return localFs.write_user_file(user, fname, data)
except Exception as e:
logError('Error dumping XML into file `{}`, for user `{}`: `{}`!'.format(fname, user, e))
return None
# # # Main Parser # # #
class TSCParser(object):
"""
Requirements: LXML.
This parser reads all client configuration files and returns information like:
- Test Suite config File
- Logs Path
- Reports Path
- EPs list, active EPs
- E-mail and Globals config
"""
def __init__(self, user, base_config='', files_config=''):
self.user = user
self.user_home = userHome(user)
if os.path.isfile(base_config):
base_config = localFs.read_user_file(user, base_config)
elif base_config and (type(base_config) == type('') or type(base_config) == type(u'')) \
and (base_config[0] == '<' and base_config[-1] == '>'):
pass
else:
raise Exception('Parser ERROR: Invalid config data : `{}`!'.format(base_config))
try:
self.xmlDict = etree.fromstring(base_config)
except Exception as e:
raise Exception('Parser ERROR: Cannot access XML config! `{}`'.format(e))
self.configTS = None
self.configHash = None
self.project_globals = {}
self.files_config = ''
self.updateConfigTS(files_config)
self.updateProjectGlobals()
def updateConfigTS(self, files_config=''):
"""
Updates Test Suite Cofig file hash and recreates internal XML structure,
only if the XML file is changed.
The file number and suite number have to be unique.
"""
if files_config and (type(files_config) == type('') or type(files_config) == type(u'')) \
and (files_config[0] == '<' and files_config[-1] == '>'):
# This is pure XML data
config_ts = files_config
# Hash check the XML file, to see if is changed
newConfigHash = hashlib.md5(files_config).hexdigest()
else:
if not files_config or not os.path.isfile(files_config):
# Get path to Test-Suites XML from Master config
files_config = self.files_config
if files_config.startswith('~/'):
files_config = userHome(self.user) + files_config[1:]
if not os.path.isfile(files_config):
logError('User {}: Parser: Test-Suites XML file `{}` does '\
'not exist! Please check framework config XML file!'.format(self.user, files_config))
self.configTS = None
return -1
else:
config_ts = localFs.read_user_file(self.user, files_config)
# Hash check the XML file, to see if is changed
newConfigHash = hashlib.md5(config_ts).hexdigest()
if self.configHash != newConfigHash:
logDebug('User {}: Parser: Test-Suites XML file changed, '\
'rebuilding internal structure...\n'.format(self.user))
# Use the new hash
self.configHash = newConfigHash
# Create XML Soup from the new XML file
try:
self.configTS = etree.fromstring(config_ts)
except Exception:
logError('User {}: Parser ERROR: Cannot access Test-Suites XML data!'.format(self.user))
self.configTS = None
return -1
self.files_config = files_config
def updateProjectGlobals(self):
"""
Returns the values of many global tags, from FWM and Test-Suites XML.
"""
logFull('xmlparser:updateProjectGlobals')
if self.configTS is None:
logError('User {}: Parser: Cannot get project globals, because'\
' Test-Suites XML is invalid!'.format(self.user))
return False
# Reset globals
self.project_globals = OrderedDict()
# Parse all known FWMCONFIG tags
for tag_dict in FWMCONFIG_TAGS:
# Create default entry
self.project_globals[tag_dict['name']] = tag_dict['default']
# Update value from XML
if self.xmlDict.xpath(tag_dict['tag'] + '/text()'):
path = self.xmlDict.xpath(tag_dict['tag'])[0].text
if path[0] == '~':
path = self.user_home + path[1:]
self.project_globals[tag_dict['name']] = path
# Parse all known PROJECT tags
for tag_dict in PROJECTCONFIG_TAGS:
# Create default entry
self.project_globals[tag_dict['name']] = tag_dict['default']
# Update value from XML
if self.configTS.xpath(tag_dict['tag'] + '/text()'):
# If the variable should be a Boolean
if tag_dict.get('type') == 'bool':
if self.configTS.xpath(tag_dict['tag'] + '/text()')[0].lower() == 'true':
value = True
else:
value = False
# If the variable should be a Number
elif tag_dict.get('type') == 'number':
value = self.configTS.xpath('round({})'.format(tag_dict['tag']))
else:
value = self.configTS.xpath(tag_dict['tag'])[0].text
self.project_globals[tag_dict['name']] = value
return True
def getActiveEps(self):
"""
Returns a list with all EPs that appear in Test-Suites XML.
"""
logFull('xmlparser:getActiveEps')
if self.configTS is None:
logError('User {}: Parser ERROR: Cannot get active EPs, because' \
' Test-Suites XML is invalid!'.format(self.user))
return []
activeEPs = []
for epname in self.configTS.xpath('//EpId/text()'):
ep = str(epname).strip()
# Ignore the empty EP names
if not ep:
continue
# Don't add EP twice
if ep in activeEPs:
continue
activeEPs.append(ep)
return activeEPs
def listSettings(self, xmlFile, xFilter=''):
"""
High level function for listing all settings from a Twister XML config file.
"""
logFull('xmlparser:listSettings')
if not os.path.isfile(xmlFile):
logError('User {}: Parse settings error! File path `{}` does not exist!'.format(self.user, xmlFile))
return False
xmlSoup = parseXML(self.user, xmlFile)
if xmlSoup is None:
return []
if xFilter:
return [x.tag for x in xmlSoup.xpath('//*') if xFilter in x.tag]
else:
return [x.tag for x in xmlSoup.xpath('//*')]
def getSettingsValue(self, xmlFile, key):
"""
High level function for getting a value from a Twister XML config file.
"""
logFull('xmlparser:getSettingsValue')
if not os.path.isfile(xmlFile):
logError('User {}: Parse settings error! File path `{}` does not exist!'.format(self.user, xmlFile))
return False
if not key:
return False
else:
key = str(key)
xmlSoup = parseXML(self.user, xmlFile)
if xmlSoup is None:
return False
if xmlSoup.xpath(key):
txt = xmlSoup.xpath(key)[0].text
return txt or ''
else:
return False
def setSettingsValue(self, xmlFile, key, value):
"""
High level function for setting a value in a Twister XML config file.
"""
logFull('xmlparser:setSettingsValue')
if not os.path.isfile(xmlFile):
logError('User {}: Parse settings error! File path `{}` does not exist!'.format(self.user, xmlFile))
return False
if not key:
return False
else:
key = str(key)
if not value:
value = ''
else:
value = str(value)
xmlSoup = parseXML(self.user, xmlFile)
if xmlSoup is None:
return False
xml_key = xmlSoup.xpath(key)
# If the key is found, update it
if xml_key:
xml_key[0].text = value
# Else, create it
else:
# Try and split the key into parent and node
if '/' in key:
parent_path, node_name = '/'.join(key.split('/')[:-1]), key.split('/')[-1]
else:
parent_path, node_name = '/', key
parent = xmlSoup.xpath(parent_path)
# Invalid parent path ?
if not parent:
return False
# Create the new node
node = etree.Element(node_name)
node.text = value
node.tail = '\n'
parent[0].insert(-1, node)
return dumpXML(self.user, xmlFile, xmlSoup)
def delSettingsKey(self, xmlFile, key, index=0):
"""
High level function for deleting a value from a Twister XML config file.
If the `index` is specified and the `key` returns more values, only the
index-th value is deleted; unless the `index` is -1, in this case, all
values are deleted.
"""
logFull('xmlparser:delSettingsKey')
if not os.path.isfile(xmlFile):
logError('User {}: Parse settings error! File path `{}` does not exist!'.format(self.user, xmlFile))
return False
# The key must be string
if not (isinstance(key, str) or isinstance(key, unicode)):
return False
# The index must be integer
if not isinstance(index, int):
return False
# The key must not be Null
if not key:
return False
else:
key = str(key)
xmlSoup = parseXML(self.user, xmlFile)
if xmlSoup is None:
return False
xml_key = xmlSoup.xpath(key)
if xml_key is None:
return False
# For index -1, delete all matches
if index == -1:
for xml_v in xml_key:
xml_parent = xml_v.getparent()
xml_parent.remove(xml_v)
else:
# Use the index-th occurence, or, if the index is wrong, exit
try:
xml_key = xml_key[index]
except Exception:
return False
xml_parent = xml_key.getparent()
xml_parent.remove(xml_key)
return dumpXML(self.user, xmlFile, xmlSoup)
def setPersistentSuite(self, xmlFile, suite, info={}, order=-1):
"""
This function writes in TestSuites.XML file.
"""
logFull('xmlparser:setPersistentSuite')
if not os.path.isfile(xmlFile):
logError('User | |
""" Cisco_IOS_XR_wd_oper
This module contains a collection of YANG definitions
for Cisco IOS\-XR wd package operational data.
This module contains definitions
for the following management objects\:
watchdog\: Watchdog information
Copyright (c) 2013\-2016 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class MemoryStateEnum(Enum):
"""
MemoryStateEnum
Memory state options
.. data:: unknown = 0
Memory state unknown
.. data:: normal = 1
Memory state normal
.. data:: minor = 2
Memory state minor
.. data:: severe = 3
Memory state severe
.. data:: critical = 4
Memory state critical
"""
unknown = 0
normal = 1
minor = 2
severe = 3
critical = 4
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_wd_oper as meta
return meta._meta_table['MemoryStateEnum']
class OverloadCtrlNotifEnum(Enum):
"""
OverloadCtrlNotifEnum
Overload control notification
.. data:: disabled = 0
Diabled
.. data:: enabled = 1
Enabled
"""
disabled = 0
enabled = 1
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_wd_oper as meta
return meta._meta_table['OverloadCtrlNotifEnum']
class Watchdog(object):
"""
Watchdog information
.. attribute:: nodes
List of nodes
**type**\: :py:class:`Nodes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_wd_oper.Watchdog.Nodes>`
"""
_prefix = 'wd-oper'
_revision = '2015-11-09'
def __init__(self):
self.nodes = Watchdog.Nodes()
self.nodes.parent = self
class Nodes(object):
"""
List of nodes
.. attribute:: node
Node ID
**type**\: list of :py:class:`Node <ydk.models.cisco_ios_xr.Cisco_IOS_XR_wd_oper.Watchdog.Nodes.Node>`
"""
_prefix = 'wd-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.node = YList()
self.node.parent = self
self.node.name = 'node'
class Node(object):
"""
Node ID
.. attribute:: node_name <key>
Node name
**type**\: str
**pattern:** ([a\-zA\-Z0\-9\_]\*\\d+/){1,2}([a\-zA\-Z0\-9\_]\*\\d+)
.. attribute:: memory_state
Memory state
**type**\: :py:class:`MemoryState <ydk.models.cisco_ios_xr.Cisco_IOS_XR_wd_oper.Watchdog.Nodes.Node.MemoryState>`
.. attribute:: overload_state
Display overload control state
**type**\: :py:class:`OverloadState <ydk.models.cisco_ios_xr.Cisco_IOS_XR_wd_oper.Watchdog.Nodes.Node.OverloadState>`
.. attribute:: threshold_memory
Threshold memory
**type**\: :py:class:`ThresholdMemory <ydk.models.cisco_ios_xr.Cisco_IOS_XR_wd_oper.Watchdog.Nodes.Node.ThresholdMemory>`
"""
_prefix = 'wd-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.node_name = None
self.memory_state = Watchdog.Nodes.Node.MemoryState()
self.memory_state.parent = self
self.overload_state = Watchdog.Nodes.Node.OverloadState()
self.overload_state.parent = self
self.threshold_memory = Watchdog.Nodes.Node.ThresholdMemory()
self.threshold_memory.parent = self
class ThresholdMemory(object):
"""
Threshold memory
.. attribute:: configured
Memory configured by user
**type**\: :py:class:`Configured <ydk.models.cisco_ios_xr.Cisco_IOS_XR_wd_oper.Watchdog.Nodes.Node.ThresholdMemory.Configured>`
.. attribute:: default
System default memory
**type**\: :py:class:`Default <ydk.models.cisco_ios_xr.Cisco_IOS_XR_wd_oper.Watchdog.Nodes.Node.ThresholdMemory.Default>`
"""
_prefix = 'wd-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.configured = Watchdog.Nodes.Node.ThresholdMemory.Configured()
self.configured.parent = self
self.default = Watchdog.Nodes.Node.ThresholdMemory.Default()
self.default.parent = self
class Default(object):
"""
System default memory
.. attribute:: configured_memory
Configured memory
**type**\: :py:class:`ConfiguredMemory <ydk.models.cisco_ios_xr.Cisco_IOS_XR_wd_oper.Watchdog.Nodes.Node.ThresholdMemory.Default.ConfiguredMemory>`
.. attribute:: memory
Memory Information
**type**\: :py:class:`Memory <ydk.models.cisco_ios_xr.Cisco_IOS_XR_wd_oper.Watchdog.Nodes.Node.ThresholdMemory.Default.Memory>`
"""
_prefix = 'wd-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.configured_memory = Watchdog.Nodes.Node.ThresholdMemory.Default.ConfiguredMemory()
self.configured_memory.parent = self
self.memory = Watchdog.Nodes.Node.ThresholdMemory.Default.Memory()
self.memory.parent = self
class ConfiguredMemory(object):
"""
Configured memory
.. attribute:: critical
Critical memory in bytes
**type**\: int
**range:** 0..18446744073709551615
**units**\: byte
.. attribute:: minor
Minor memory threshold in bytes
**type**\: int
**range:** 0..4294967295
**units**\: byte
.. attribute:: severe
Severe memory threshold in bytes
**type**\: int
**range:** 0..4294967295
**units**\: byte
"""
_prefix = 'wd-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.critical = None
self.minor = None
self.severe = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-wd-oper:configured-memory'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.critical is not None:
return True
if self.minor is not None:
return True
if self.severe is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_wd_oper as meta
return meta._meta_table['Watchdog.Nodes.Node.ThresholdMemory.Default.ConfiguredMemory']['meta_info']
class Memory(object):
"""
Memory Information
.. attribute:: free_memory
Free memory in bytes
**type**\: int
**range:** 0..18446744073709551615
**units**\: byte
.. attribute:: memory_state
State of memory
**type**\: :py:class:`MemoryStateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_wd_oper.MemoryStateEnum>`
.. attribute:: physical_memory
Physical memory in bytes
**type**\: int
**range:** 0..4294967295
**units**\: byte
"""
_prefix = 'wd-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.free_memory = None
self.memory_state = None
self.physical_memory = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-wd-oper:memory'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.free_memory is not None:
return True
if self.memory_state is not None:
return True
if self.physical_memory is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_wd_oper as meta
return meta._meta_table['Watchdog.Nodes.Node.ThresholdMemory.Default.Memory']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-wd-oper:default'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.configured_memory is not None and self.configured_memory._has_data():
return True
if self.memory is not None and self.memory._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_wd_oper as meta
return meta._meta_table['Watchdog.Nodes.Node.ThresholdMemory.Default']['meta_info']
class Configured(object):
"""
Memory configured by user
.. attribute:: critical
Critical memory in bytes
**type**\: int
**range:** 0..18446744073709551615
**units**\: byte
.. attribute:: minor
Minor memory threshold in bytes
**type**\: int
**range:** 0..4294967295
**units**\: byte
.. attribute:: severe
Severe memory threshold in bytes
**type**\: int
**range:** 0..4294967295
**units**\: byte
"""
_prefix = 'wd-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.critical = None
self.minor = None
self.severe = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-wd-oper:configured'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.critical is not None:
return True
if self.minor is not None:
return True
if self.severe is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_wd_oper as meta
return meta._meta_table['Watchdog.Nodes.Node.ThresholdMemory.Configured']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-wd-oper:threshold-memory'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.configured is not None and self.configured._has_data():
return True
if self.default is not None and self.default._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_wd_oper as meta
return meta._meta_table['Watchdog.Nodes.Node.ThresholdMemory']['meta_info']
class MemoryState(object):
"""
Memory state
.. attribute:: free_memory
Free memory in bytes
**type**\: int
**range:** 0..18446744073709551615
**units**\: byte
.. attribute:: memory_state
State of memory
**type**\: :py:class:`MemoryStateEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_wd_oper.MemoryStateEnum>`
.. attribute:: physical_memory
Physical memory in bytes
**type**\: int
**range:** 0..4294967295
**units**\: byte
"""
_prefix = 'wd-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.free_memory = None
self.memory_state = None
self.physical_memory = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-wd-oper:memory-state'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if not self.is_config():
return False
if self.free_memory is not None:
return True
if self.memory_state is not None:
return True
if self.physical_memory is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_wd_oper as meta
return meta._meta_table['Watchdog.Nodes.Node.MemoryState']['meta_info']
class OverloadState(object):
"""
Display overload control state
.. attribute:: configured_wdsysmon_throttle
Configured resmon throttle
**type**\: int
**range:** 0..4294967295
.. attribute:: current_throttle
Current throttle information
**type**\: :py:class:`CurrentThrottle <ydk.models.cisco_ios_xr.Cisco_IOS_XR_wd_oper.Watchdog.Nodes.Node.OverloadState.CurrentThrottle>`
.. attribute:: default_wdsysmon_throttle
Default resmon throttle
**type**\: int
**range:** 0..4294967295
.. attribute:: last_throttle
Last throttle information
**type**\: list of :py:class:`LastThrottle <ydk.models.cisco_ios_xr.Cisco_IOS_XR_wd_oper.Watchdog.Nodes.Node.OverloadState.LastThrottle>`
.. attribute:: overload_control_notification
State of overload control notification
**type**\: :py:class:`OverloadCtrlNotifEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_wd_oper.OverloadCtrlNotifEnum>`
"""
_prefix = 'wd-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.configured_wdsysmon_throttle = None
self.current_throttle = Watchdog.Nodes.Node.OverloadState.CurrentThrottle()
| |
return redirect('qa:questionDetailView', pk=data.id)
getAllReviews = FlagPost.objects.filter(
answer_forFlag=getAnswer,
actions_Flag_Q="NOT_AN_ANSWER").exclude(
ended=True)
if getCreateFlag_object.how_many_votes_on_spamANDRude >= 2:
for rev in getAllReviews:
rev.ended = True
rev.save()
else:
getCreateFlag_object = FlagPost.objects.filter(
answer_forFlag=getAnswer).filter(
Q(
actions_Flag_Q="IN_NEED_OF_MODERATOR_INTERVATION") | Q(
actions_Flag_Q="ABOUT_PROFESSIONAL")).exclude(
ended=True).first()
if getCreateFlag_object:
messages.error(
request, 'Previous Flag is Waiting for Review')
else:
new_post.flagged_by = request.user
new_post.answer_forFlag = getAnswer
new_post.save()
createReviewInstance, created = ReviewFlagPost.objects.get_or_create(
flag_answer_to_view_if=getAnswer, flag_of=new_post)
createReviewInstance.flag_reviewed_by = request.user
createReviewInstance.save()
getAllReviews = FlagPost.objects.filter(
answer_forFlag=getAnswer).filter(
Q(
actions_Flag_Q="IN_NEED_OF_MODERATOR_INTERVATION") | Q(
actions_Flag_Q="ABOUT_PROFESSIONAL")).exclude(
ended=True)
if getCreateFlag_object.how_many_votes_on_spamANDRude >= 2:
for rev in getAllReviews:
rev.ended = True
rev.save()
ser_instance = serializers.serialize('json', [
new_post,
])
# send to client side.
return JsonResponse({"action": "saved"}, status=200)
else:
return JsonResponse({'action': "lackOfPrivelege"})
# else:
# return JsonResponse({"action": "cannotCreate"}, status=200)
else:
# some form errors occured.
return JsonResponse({"error": Flag_Form.errors}, status=400)
# some error occured
return JsonResponse({"error": ""}, status=400)
def AjaxBountyForm(request, question_id):
"""
Ajax form to save Bounty of question.
"""
data = get_object_or_404(Question, pk=question_id)
if request.is_ajax and request.method == 'POST':
bounty_form = BountyForm(data=request.POST)
if bounty_form.is_valid():
if request.user.profile.set_bounties:
formCleanedData = bounty_form.cleaned_data['bounty_value']
print(formCleanedData)
# created = False
new_post = bounty_form.save(commit=False)
new_post.by_user = request.user
new_post.question_bounty = data
data.limit_exced = True
data.is_bountied = True
data.bounty_date_announced = timezone.now()
# ! Invenstor Badge - First Bounty i manually Declare on another person's Q
if data.post_owner != request.user:
# request.user.profile.investor_B = True
# request.user.profile.save()
TagBadge.objects.get_or_create(
awarded_to_user=request.user,
badge_type="BRONZE",
tag_name="Investor",
bade_position="BADGE",
questionIf_TagOf_Q=data)
if formCleanedData == "50":
Reputation.objects.create(
question_O=data,
question_rep_C=-50,
awarded_to=request.user,
reputation_on_what='Applied_Bounty')
elif formCleanedData == "100":
Reputation.objects.create(
question_O=data,
question_rep_C=-100,
awarded_to=request.user,
reputation_on_what='Applied_Bounty')
elif formCleanedData == "150":
Reputation.objects.create(
question_O=data,
question_rep_C=-150,
awarded_to=request.user,
reputation_on_what='Applied_Bounty')
elif formCleanedData == "200":
Reputation.objects.create(
question_O=data,
question_rep_C=-200,
awarded_to=request.user,
reputation_on_what='Applied_Bounty')
elif formCleanedData == "250":
Reputation.objects.create(
question_O=data,
question_rep_C=-250,
awarded_to=request.user,
reputation_on_what='Applied_Bounty')
elif formCleanedData == "300":
Reputation.objects.create(
question_O=data,
question_rep_C=-300,
awarded_to=request.user,
reputation_on_what='Applied_Bounty')
elif formCleanedData == "350":
Reputation.objects.create(
question_O=data,
question_rep_C=-350,
awarded_to=request.user,
reputation_on_what='Applied_Bounty')
elif formCleanedData == "400":
Reputation.objects.create(
question_O=data,
question_rep_C=-400,
awarded_to=request.user,
reputation_on_what='Applied_Bounty')
elif formCleanedData == "450":
Reputation.objects.create(
question_O=data,
question_rep_C=-450,
awarded_to=request.user,
reputation_on_what='Applied_Bounty')
elif formCleanedData == "500":
Reputation.objects.create(
question_O=data,
question_rep_C=-500,
awarded_to=request.user,
reputation_on_what='Applied_Bounty')
# UNCOMMENT IT. IT WORKED - ONLY ONE LINE
# Bronze_TagBadge.objects.get_or_create(awarded_to=request.user, badge_type="Bronze", tag_name="investor-badge")
# ! Promoter Badge - First Bounty i manually Declare on my Q
else:
# UNCOMMENT IT. IT WORKED - ONLY ONE LINE
# Bronze_TagBadge.objects.get_or_create(awarded_to=request.user, badge_type="Bronze", tag_name="promoter-badge")
# createTag = Tag.objects.get_or_create(name="Promoter Badge")
TagBadge.objects.get_or_create(
awarded_to_user=request.user,
badge_type="BRONZE",
tag_name="Promoter",
bade_position="BADGE",
questionIf_TagOf_Q=data)
# request.user.profile.save()
if formCleanedData == "50":
Reputation.objects.create(
question_O=data,
question_rep_C=-50,
awarded_to=request.user,
reputation_on_what='Applied_Bounty')
elif formCleanedData == "100":
Reputation.objects.create(
question_O=data,
question_rep_C=-100,
awarded_to=request.user,
reputation_on_what='Applied_Bounty')
elif formCleanedData == "150":
Reputation.objects.create(
question_O=data,
question_rep_C=-150,
awarded_to=request.user,
reputation_on_what='Applied_Bounty')
elif formCleanedData == "200":
Reputation.objects.create(
question_O=data,
question_rep_C=-200,
awarded_to=request.user,
reputation_on_what='Applied_Bounty')
elif formCleanedData == "250":
Reputation.objects.create(
question_O=data,
question_rep_C=-250,
awarded_to=request.user,
reputation_on_what='Applied_Bounty')
elif formCleanedData == "300":
Reputation.objects.create(
question_O=data,
question_rep_C=-300,
awarded_to=request.user,
reputation_on_what='Applied_Bounty')
elif formCleanedData == "350":
Reputation.objects.create(
question_O=data,
question_rep_C=-350,
awarded_to=request.user,
reputation_on_what='Applied_Bounty')
elif formCleanedData == "400":
Reputation.objects.create(
question_O=data,
question_rep_C=-400,
awarded_to=request.user,
reputation_on_what='Applied_Bounty')
elif formCleanedData == "450":
Reputation.objects.create(
question_O=data,
question_rep_C=-450,
awarded_to=request.user,
reputation_on_what='Applied_Bounty')
elif formCleanedData == "500":
Reputation.objects.create(
question_O=data,
question_rep_C=-500,
awarded_to=request.user,
reputation_on_what='Applied_Bounty')
# Save the thread with its ID.
new_post.save()
data.save()
t = threading.Thread(
target=end_bounty_thread, args=[
new_post.id])
t.setDaemon(True)
t.start()
# messages.success(request, "Successfully Applied Bounty")
# return redirect('qa:questionDetailView', pk=data.id,) #
# slug=slug)
ser_instance = serializers.serialize('json', [
new_post,
])
# send to client side.
return JsonResponse({"action": "saved"}, status=200)
else:
return JsonResponse({'action': "lackOfPrivelege"})
else:
return JsonResponse({"error": bounty_form.errors}, status=400)
return JsonResponse({"error": ""}, status=400)
# Under Construction - Need Improvement
def getCommunityWikiAnswerDetails(request, answer_id):
"""
Edit history of answer, if answer is part of Community Wiki
"""
post = get_object_or_404(Answer, pk=answer_id)
historyDate = post.anshis
# getAllHistoryUsers = post.anshis.history_user
for s in historyDate.all():
print(s.history_user)
countAllTheEditors = post.anshis.aggregate(countAll=Count('history_user'))
print(countAllTheEditors)
# for s in countAllTheEditors:
# k = s['history_user']
# print(k)
context = {'post': post, 'historyDate': historyDate,
'countAllTheEditors': countAllTheEditors, }
return render(request, 'qa/communityWikiPostDtls.html', context)
def ProtectQuestionAjax(request, question_id):
"""
Form to Protect Question using Ajax.
"""
data = get_object_or_404(Question, pk=question_id)
if request.is_ajax and request.method == 'POST':
protectForm = ProtectForm(data=request.POST)
if protectForm.is_valid():
if request.user.profile.protect_questions:
new_post = protectForm.save(commit=False)
new_post.protected_by = request.user
new_post.protectionRemovedBy = request.user
new_post.protecting_question = data
new_post.stillProtected = True
data.is_protected = True
data.save()
new_post.save()
ser_instance = serializers.serialize('json', [
new_post,
])
# send to client side.
return JsonResponse({"action": "formSaved"}, status=200)
else:
return JsonResponse({'action': "lackOfPrivelege"})
# else:
# return JsonResponse({"action": "cannotCreate"}, status=200)
else:
return JsonResponse({"error": protectForm.errors}, status=400)
return JsonResponse({"error": ""}, status=400)
def ReOpenVotesAjax(request, question_id):
"""
Form to save ReOpen Closed Question votes using Ajax
"""
data = get_object_or_404(Question, pk=question_id)
# QUESTION RE-OPEN FORM - START
getCreatedObject = ReOpenQuestionVotes.objects.filter(
question_to_opening=data).exclude(ended=True).first()
get_LIVE_Reviwing_object = ReviewQuestionReOpenVotes.objects.filter(
question_opened=data).exclude(is_completed=True).first()
# TAGGING - START
winned_gold_tags = TagBadge.objects.filter(
awarded_to_user=request.user, badge_type="GOLD")
# TAGGING - END
if request.is_ajax and request.method == 'POST':
re_open_form = VoteToReOpenForm(data=request.POST)
if re_open_form.is_valid():
new_post = re_open_form.save(commit=False)
formData = re_open_form.cleaned_data['why_opening']
print(formData)
if request.user.profile.cast_close_AND_Reopen_votes:
# USER WITH GOLDEN BADGE IS TRYING TO REOPEN A QUESTION THEN NO
# REVIEW WILL BE REQUIRED
if winned_gold_tags:
for s in winned_gold_tags:
if s.tag_name in data.tags.all().values_list('name', flat=True):
new_post.user = request.user
new_post.question_to_opening = data
# print("First First Statement is Excecuting in ReOpen")
# print("Golden Badge's User is ReOpening")
# createInstance, created = ReviewQuestionReOpenVotes.objects.get_or_create(question_opened=data, is_completed=False)
# createInstance.reopen_reviewed_by.add(request.user)
# get_LIVE_Reviwing_object.reopen_reviewed_by.add(request.user)
# getCreatedObject.how_many_votes_on_Open += 1
# getCreatedObject.save()
new_post.question_to_opening.is_closed = False
data.save()
new_post.save()
elif getCreatedObject:
if formData == getCreatedObject.why_opening:
new_post.user = request.user
new_post.question_to_opening = data
# print("First Statement is Excecuting in ReOpen")
# createInstance, created = ReviewQuestionReOpenVotes.objects.get_or_create(question_opened=data, is_completed=False)
# new_post.how_many_votes_on_Open += 1
getCreatedObject.how_many_votes_on_Open += 1
getCreatedObject.save()
new_post.save()
if get_LIVE_Reviwing_object:
get_LIVE_Reviwing_object.reopen_reviewed_by.add(
request.user)
else:
print("This")
new_post.user = request.user
new_post.question_to_opening = data
# print("Second Statement is Excecuting in ReOpen")
createInstance, created = ReviewQuestionReOpenVotes.objects.get_or_create(
question_opened=data, is_completed=False)
createInstance.reopen_reviewed_by.add(
request.user)
# get_LIVE_Reviwing_object.reopen_reviewed_by.add(request.user)
getCreatedObject.how_many_votes_on_Open += 1
getCreatedObject.save()
new_post.save()
createInstance.review_of = new_post
createInstance.save()
else:
new_post.user = request.user
new_post.question_to_opening = data
# print("Third Statement is Excecuting in ReOpen")
createInstance, created = ReviewQuestionReOpenVotes.objects.get_or_create(
question_opened=data, is_completed=False)
createInstance.reopen_reviewed_by.add(request.user)
new_post.how_many_votes_on_Open += 1
# print("Instance Created")
new_post.save()
createInstance.review_of = new_post
createInstance.save()
ser_instance = serializers.serialize('json', [
new_post,
])
# send to client side.
return JsonResponse({"action": "saved"}, status=200)
# return redirect('qa:questionDetailView', pk=data.id)
else:
if getCreatedObject:
if formData == getCreatedObject.why_opening:
new_post.user = request.user
new_post.question_to_opening = data
# print("First Statement is Excecuting in ReOpen")
# createInstance, created = ReviewQuestionReOpenVotes.objects.get_or_create(question_opened=data, is_completed=False)
# new_post.how_many_votes_on_Open += 1
getCreatedObject.how_many_votes_on_Open += 1
getCreatedObject.save()
new_post.save()
if get_LIVE_Reviwing_object:
get_LIVE_Reviwing_object.reopen_reviewed_by.add(
request.user)
else:
print("This")
new_post.user = request.user
new_post.question_to_opening = data
# print("Second Statement is Excecuting in ReOpen")
createInstance, created = ReviewQuestionReOpenVotes.objects.get_or_create(
question_opened=data, is_completed=False)
createInstance.reopen_reviewed_by.add(request.user)
# get_LIVE_Reviwing_object.reopen_reviewed_by.add(request.user)
getCreatedObject.how_many_votes_on_Open += 1
getCreatedObject.save()
new_post.save()
createInstance.review_of = new_post
createInstance.save()
else:
new_post.user = request.user
new_post.question_to_opening = data
# print("Third Statement is Excecuting in ReOpen")
createInstance, created = ReviewQuestionReOpenVotes.objects.get_or_create(
question_opened=data, is_completed=False)
createInstance.reopen_reviewed_by.add(request.user)
new_post.how_many_votes_on_Open += 1
# print("Instance Created")
new_post.save()
createInstance.review_of = new_post
createInstance.save()
# return redirect('qa:questionDetailView', pk=data.id)
ser_instance = serializers.serialize('json', [
new_post,
])
# send to client side.
return JsonResponse({"action": "saved"}, status=200)
else:
return JsonResponse({'action': "lackOfPrivelege"})
else:
# some form errors occured.
return JsonResponse({"error": re_open_form.errors}, status=400)
return JsonResponse({"error": ""}, status=400)
# @cache_page(60 * 15)
def questionDetailView(request, pk,): # slug):
data = get_object_or_404(Question, pk=pk)
answers_of_questions = data.answer_set.all().exclude(is_deleted=True)
STORING_THE_ORIGINAL = []
# If the last Answer's Edit is Approved then show the new edited answer but
# if last answer's edit is rejected then show previous one
for anss in answers_of_questions:
getAnsHistory = anss.history.first()
getLastEditVotes = QuestionEditVotes.objects.filter(
edited_answer=anss).last()
if getLastEditVotes:
if getLastEditVotes.rev_Action == "Approve" and getLastEditVotes.is_completed or getLastEditVotes.rev_Action == "Approve_Through_Edit" and getLastEditVotes.is_completed:
getTheOriginal = anss
print("First Statement in Storing is Excecuting")
STORING_THE_ORIGINAL.append(getTheOriginal)
else:
print("Second Statement in Storing is Excecuting")
getTheOriginal = getAnsHistory.prev_record
STORING_THE_ORIGINAL.append(getTheOriginal)
else:
getTheOriginal = anss
STORING_THE_ORIGINAL.append(getTheOriginal)
# Answer - Pagination - START
page = request.GET.get('page', 1)
paginator = Paginator(STORING_THE_ORIGINAL, 10)
try:
answers = paginator.page(page)
except PageNotAnInteger:
answers = paginator.page(1)
except EmptyPage:
answers = paginator.page(paginator.num_pages)
# Answer - Pagination - END
# # Need to Review - START
# if data.date < timezone.now() - timedelta(days=7) and data.viewers.count(
# ) < 5 and data.commentq_set.all().count() < 1 and data.answer_set.all().count() < 1:
# TagBadge.objects.get_or_create(
# awarded_to_user=data.post_owner,
# badge_type="GOLD",
# tag_name="Great Answer",
# bade_position="BADGE")
# print("Answer is older")
# # Need to Review - END
# answers_of_questions = Answer.objects.filter(questionans=data)
# QUESTION BOOKMARKED BY WHOM
bookmarks = Profile.objects.filter(bookmark_questions=data).count()
# voted_time = data.date
is_it_first = data.answer_set.first()
if is_it_first:
if is_it_first.a_vote_ups.all().count() >= 1 and is_it_first.accepted:
TagBadge.objects.get_or_create(
awarded_to_user=is_it_first.answer_owner,
badge_type="SILVER",
tag_name="Enlightened",
bade_position="BADGE",
answerIf_TagOf_A=is_it_first)
countingActiveBounties = Question.objects.filter(limit_exced=True,
is_bountied=True).count()
# BOUNTY LIMIT SETTER - START
# (If bounty of the -data- Q is more than or equal to 3 then the Bounty-
# button will hide and it will show a message instead)
# haveActiveBounties = | |
"""
Written by <NAME> copyright big willy incorporated
"""
import jax.numpy as np
from jax import random
from jax.experimental import stax
from jax.experimental.stax import Dense, Relu, Conv
from jax.nn import log_sigmoid
from utils import squeeze2d, unsqueeze2d
"""
Probability utils
"""
def sample_n01(rng, shape):
return random.normal(rng, shape)
def log_prob_n01(x):
logpx = -np.square(x) / 2 - np.log(np.sqrt(2 * np.pi))
logpx = logpx.reshape(logpx.shape[0], -1)
return np.sum(logpx, axis=-1)
"""
Overview of flow interface
flow interface: init_flow(rng, *params):
returns params, forward_fn, reverse_fn
forward_fn(params, prev_sample, prev_logp):
return this_sample, this_logp
reverse_fn(params, next_sample, next_logp):
return this_sample, this_logp
"""
"""
Utils for chaining together flows
"""
def init_flow_chain(rng, init_fns, init_params, init_batch=None):
assert len(init_fns) == len(init_params)
p_chain, f_chain, r_chain = [], [], []
for init_fn, init_param in zip(init_fns, init_params):
rng, srng = random.split(rng)
p, f, r = init_fn(srng, *init_param, init_batch=init_batch)
p_chain.append(p)
f_chain.append(f)
r_chain.append(r)
if init_batch is not None:
init_batch, _ = f(p, init_batch, 0.)
def chain_forward(params, prev_sample, prev_logp=0.):
x, logp = prev_sample, prev_logp
for p, f in zip(params, f_chain):
x, logp = f(p, x, logp)
return x, logp
def chain_reverse(params, next_sample, next_logp=0.):
x, logp = next_sample, next_logp
for p, r in reversed(list(zip(params, r_chain))):
x, logp = r(p, x, logp)
return x, logp
return p_chain, chain_forward, chain_reverse
def init_factor_out(flow_params, flow_forward, flow_reverse, split_fn, rejoin_fn):
"""
takes an existing flow and turns it into a flow which factors out dimensions
"""
def factor_forward(params, x, prev_logp=0.):
y, delta_logp = flow_forward(params, x, prev_logp=prev_logp)
y_keep, y_factorout = split_fn(y)
return y_keep, delta_logp, y_factorout
def factor_reverse(params, y_next, next_logp=0., y_factorout=None):
y = rejoin_fn(y_next, y_factorout)
return flow_reverse(params, y, next_logp=next_logp)
return flow_params, factor_forward, factor_reverse
def init_factor_out_chain(rng, init_fns, init_params, split_fn, rejoin_fn, init_batch=None):
assert len(init_fns) == len(init_params)
p_chain, f_chain, r_chain = [], [], []
for init_fn, init_param in zip(init_fns, init_params):
rng, srng = random.split(rng)
p, f, r = init_fn(srng, *init_param, init_batch=init_batch)
p, f, r = init_factor_out(p, f, r, split_fn, rejoin_fn)
p_chain.append(p)
f_chain.append(f)
r_chain.append(r)
if init_batch is not None:
init_batch, _, __ = f(p, init_batch, 0.)
def fo_chain_forward(params, prev_sample, prev_logp=0.):
x_next, logp = prev_sample, prev_logp
zs = []
for p, f in zip(params, f_chain):
x_next, logp, x_factorout = f(p, x_next, logp)
zs.append(x_factorout)
zs.append(x_next)
return zs, logp
def fo_chain_reverse(params, zs, next_logp=0.):
zs, z_next = zs[:-1], zs[-1] # split off the final z
logp = next_logp
for p, r, z_factorout in reversed(list(zip(params, r_chain, zs))):
z_next, logp = r(p, z_next, logp, z_factorout)
return z_next, logp
return p_chain, fo_chain_forward, fo_chain_reverse
"""
Some basic split/rejoin fns
"""
def split_dims(x):
d = x.shape[1] // 2
return x[:, :d], x[:, d:]
def rejoin_dims(x1, x2):
return np.concatenate([x1, x2], 1)
def split_channels(x):
c = x.shape[3] // 2
return x[:, :, :, :c], x[:, :, :, c:]
def rejoin_channels(x1, x2):
return np.concatenate([x1, x2], 3)
"""
Linear Real-NVP
"""
def init_nvp(rng, dim, flip, init_batch=None):
net_init, net_apply = stax.serial(Dense(512), Relu, Dense(512), Relu, Dense(dim))
in_shape = (-1, dim // 2)
_, net_params = net_init(rng, in_shape)
def shift_and_log_scale_fn(net_params, x1):
s = net_apply(net_params, x1)
return np.split(s, 2, axis=1)
def nvp_forward(net_params, prev_sample, prev_logp=0.):
d = dim // 2
x1, x2 = prev_sample[:, :d], prev_sample[:, d:]
if flip:
x2, x1 = x1, x2
shift, log_scale = shift_and_log_scale_fn(net_params, x1)
y2 = x2 * np.exp(log_scale) + shift
if flip:
x1, y2 = y2, x1
y = np.concatenate([x1, y2], axis=-1)
return y, prev_logp + np.sum(log_scale, axis=-1)
def nvp_reverse(net_params, next_sample, next_logp=0.):
d = dim // 2
y1, y2 = next_sample[:, :d], next_sample[:, d:]
if flip:
y1, y2 = y2, y1
shift, log_scale = shift_and_log_scale_fn(net_params, y1)
x2 = (y2 - shift) * np.exp(-log_scale)
if flip:
y1, x2 = x2, y1
x = np.concatenate([y1, x2], axis=-1)
return x, next_logp - np.sum(log_scale, axis=-1)
return net_params, nvp_forward, nvp_reverse
def init_nvp_chain(rng, dim, n=2, init_batch=None, actnorm=False):
"""Helper for making Real-NVP chains"""
flip = False
params = []
chain = []
for _ in range(n):
if actnorm:
params.append(())
chain.append(init_actnorm)
params.append((dim, flip))
chain.append(init_nvp)
flip = not flip
return init_flow_chain(rng, chain, params, init_batch=init_batch)
"""
Linear Actnorm
"""
def init_actnorm(rng, init_batch=None):
assert init_batch is not None, "Actnorm requires data-dependent init"
mu, sig = np.mean(init_batch, axis=0), np.std(init_batch, axis=0)
log_scale = np.log(sig)
params = (mu, log_scale)
def actnorm_forward(params, prev_sample, prev_logp=0.):
mu, log_scale = params
y = (prev_sample - mu[None]) * np.exp(-log_scale)[None]
return y, prev_logp - np.sum(log_scale)
def actnorm_reverse(params, next_sample, next_logp=0.):
mu, log_scale = params
x = next_sample * np.exp(log_scale)[None] + mu[None]
return x, next_logp + np.sum(log_scale)
return params, actnorm_forward, actnorm_reverse
"""
Convolutional Actnorm
"""
def init_conv_actnorm(rng, init_batch=None):
assert init_batch is not None, "Actnorm requires data-dependent init"
mu, sig = np.mean(init_batch, axis=(0, 1, 2)), np.std(init_batch, axis=(0, 1, 2))
log_scale = np.log(sig)
params = (mu, log_scale)
def actnorm_forward(params, prev_sample, prev_logp=0.):
mu, log_scale = params
y = (prev_sample - mu[None, None, None, :]) * np.exp(-log_scale)[None, None, None, :]
b, h, w, c = prev_sample.shape
return y, prev_logp - np.sum(log_scale) * w * h
def actnorm_reverse(params, next_sample, next_logp=0.):
mu, log_scale = params
x = next_sample * np.exp(log_scale)[None, None, None, :] + mu[None, None, None, :]
b, h, w, c = next_sample.shape
return x, next_logp + np.sum(log_scale) * w * h
return params, actnorm_forward, actnorm_reverse
"""
Squeeze Layers
"""
def init_squeeze(rng, init_batch=None):
params = ()
def squeeze_forward(params, prev_sample, prev_logp=0.):
return squeeze2d(prev_sample), prev_logp
def squeeze_reverse(params, next_sample, next_logp=0.):
return unsqueeze2d(next_sample), next_logp
return params, squeeze_forward, squeeze_reverse
"""
Convolutional Coupling Layers
"""
def init_conv_affine_coupling(rng, in_shape, n_channels, flip, sigmoid=True, init_batch=None):
"""
in_shape: tuple of (h, w, c)
"""
h, w, c = in_shape
assert c % 2 == 0, "channels must be even doooooooog!"
half_c = c // 2
net_init, net_apply = stax.serial(Conv(n_channels, (3, 3), padding="SAME"), Relu,
Conv(n_channels, (3, 3), padding="SAME"), Relu,
Conv(c, (3, 3), padding="SAME"))
_, net_params = net_init(rng, (-1, h, w, half_c))
def shift_and_log_scale_fn(net_params, x1):
s = net_apply(net_params, x1)
return np.split(s, 2, axis=3)
def conv_coupling_forward(net_params, prev_sample, prev_logp=0.):
x1, x2 = prev_sample[:, :, :, :half_c], prev_sample[:, :, :, half_c:]
if flip:
x2, x1 = x1, x2
shift, log_scale = shift_and_log_scale_fn(net_params, x1)
if sigmoid:
log_scale = log_sigmoid(log_scale + 2.)
y2 = x2 * np.exp(log_scale) + shift
if flip:
x1, y2 = y2, x1
y = np.concatenate([x1, y2], axis=-1)
return y, prev_logp + np.sum(log_scale, axis=(1, 2, 3))
def conv_coupling_reverse(net_params, next_sample, next_logp=0.):
y1, y2 = next_sample[:, :, :, :half_c], next_sample[:, :, :, half_c:]
if flip:
y1, y2 = y2, y1
shift, log_scale = shift_and_log_scale_fn(net_params, y1)
if sigmoid:
log_scale = log_sigmoid(log_scale + 2.)
x2 = (y2 - shift) * np.exp(-log_scale)
if flip:
y1, x2 = x2, y1
x = np.concatenate([y1, x2], axis=-1)
return x, next_logp - np.sum(log_scale, axis=(1, 2, 3))
return net_params, conv_coupling_forward, conv_coupling_reverse
"""
High level convolutional flows
"""
def init_conv_flow_step(rng, in_shape, n_channels, flip, init_batch=None):
""" One step of flow actnorm --> affine coupling"""
return init_flow_chain(rng,
[init_conv_actnorm, init_conv_affine_coupling],
[(), (in_shape, n_channels, flip)],
init_batch=init_batch)
def init_conv_flow_block(rng, in_shape, n_steps, n_channels, init_batch=None):
""" Flow block: squeeze --> n_steps * flow_step """
flip = False
init_fns = [init_squeeze]
init_params = [()]
h, w, c = in_shape
squeeze_shape = h // 2, w // 2, c * 4
for _ in range(n_steps):
init_fns.append(init_conv_flow_step)
init_params.append((squeeze_shape, n_channels, flip))
flip = not flip
return init_flow_chain(rng, init_fns, init_params, init_batch=init_batch)
def init_multiscale_conv_flow(rng, in_shape, n_channels, n_blocks, n_steps, init_batch=None):
""" Creates a multi-scale convolutional normalizing flow like Glow but currently no 1x1 convolutions """
params = []
chain = []
cur_shape = in_shape
for _ in range(n_blocks):
chain.append(init_conv_flow_block)
params.append(
(cur_shape, n_steps, n_channels)
)
h, w, c = cur_shape
cur_shape = h // 2, w // 2, c * 2
return init_factor_out_chain(rng, chain, params, split_channels, rejoin_channels, init_batch=init_batch)
"""
Utilities to build functions for chain flows training and eval
"""
def make_log_prob_fn(forward_fn, base_dist_log_prob):
def log_prob(p, x):
z, logp = forward_fn(p, x)
return base_dist_log_prob(z) + logp
return log_prob
def make_sample_fn(reverse_fn, base_dist_sample):
def sample(rng, p, n):
z = base_dist_sample(rng, n)
return reverse_fn(p, z, 0.)[0]
return sample
"""
Same shit for multi-scale flows
"""
def make_multiscale_log_prob_fn(forward_fn, base_dist_log_prob):
def log_prob(p, x):
zs, delta_logp = forward_fn(p, x)
logpz = np.sum([base_dist_log_prob(z) for z in zs], axis=0)
return logpz + delta_logp
return log_prob
def make_multiscale_sample_fn(reverse_fn, base_dist_sample, shapes):
def sample(rng, p, n):
zs = [base_dist_sample(rng, (n,) + s) for s in shapes]
return reverse_fn(p, zs, 0.)[0]
return sample
if __name__ == "__main__":
rng = random.PRNGKey(0)
rng, srng = random.split(rng)
init_batch = random.normal(srng, (13, 32, 32, 4))
#ps, forward, reverse = init_conv_actnorm(rng, init_batch=init_batch)
#1/0
#ps, forward, reverse = init_conv_affine_coupling(rng, (32, 32, 4), 64, True, init_batch=init_batch)
#1/0
#ps, forward, reverse = init_conv_flow_step(rng, (32, 32, 4), 64, True, init_batch=init_batch)
#1/0
init_batch = random.normal(srng, (13, 32, 32, 3))
#ps, forward, reverse = init_conv_flow_block(rng, (32, | |
<filename>scene_generation/scene_generation/trainer.py
import os
import numpy as np
from random import randint
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from tensorboardX import SummaryWriter
import pickle
from scene_generation.data import imagenet_deprocess_batch
from scene_generation.discriminators import AcCropDiscriminator, define_mask_D, define_D
from scene_generation.losses import get_gan_losses, GANLoss, VGGLoss
from scene_generation.model import Model
from scene_generation.utils import LossManager, Result
from lib.object_detector import gather_res
class Trainer(nn.Module):
def __init__(self, args, vocab, checkpoint):
super(Trainer, self).__init__()
self.vocab = vocab
self.args = args
self.num_obj = len(vocab['object_to_idx'])
print(args.output_dir)
self.writer = SummaryWriter(args.output_dir)
self.colors = torch.randint(0, 256, [self.num_obj, 3]).float()
self.gan_g_loss, self.gan_d_loss = get_gan_losses(args.gan_loss_type)
self.init_generator(args, checkpoint)
self.init_image_discriminator(args, checkpoint)
self.init_obj_discriminator(args, checkpoint)
self.init_mask_discriminator(args, checkpoint)
self.forward_D = True
self.features = None
if not args.use_gt_textures:
features_path = os.path.join(args.output_dir, args.features_file_name)
print(features_path)
if os.path.isfile(features_path):
self.features = np.load(features_path, allow_pickle=True).item()
else:
raise ValueError('No features file')
# crops_path = os.path.join(args.output_dir, args.features_file_name[:-4] + "_crops.pkl")
# print(crops_path)
# if os.path.isfile(crops_path):
# self.crops_dict = pickle.load(open(crops_path, "rb"))
# else:
# raise ValueError('No crops file')
def init_generator(self, args, checkpoint):
if args.restore_from_checkpoint:
model_kwargs = checkpoint['model_kwargs']
else:
model_kwargs = {
'vocab': self.vocab,
'image_size': args.image_size,
'embedding_dim': args.embedding_dim,
'gconv_dim': args.gconv_dim,
'gconv_hidden_dim': args.gconv_hidden_dim,
'gconv_num_layers': args.gconv_num_layers,
'mlp_normalization': args.mlp_normalization,
'appearance_normalization': args.appearance_normalization,
'activation': args.activation,
'mask_size': args.mask_size,
'n_downsample_global': args.n_downsample_global,
'box_dim': args.box_dim,
'use_attributes': args.use_attributes,
'box_noise_dim': args.box_noise_dim,
'mask_noise_dim': args.mask_noise_dim,
'pool_size': args.pool_size,
'rep_size': args.rep_size,
}
checkpoint['model_kwargs'] = model_kwargs
self.model = model = Model(**model_kwargs).to('cuda')
# model.type(torch.cuda.FloatTensor)
self.criterionVGG = VGGLoss() if args.vgg_features_weight > 0 else None
self.criterionFeat = torch.nn.L1Loss()
self.criterionGAN = GANLoss(use_lsgan=not args.no_lsgan, tensor=torch.cuda.FloatTensor)
self.optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate, betas=(args.beta1, 0.999))
def init_obj_discriminator(self, args, checkpoint):
obj_discriminator, d_obj_kwargs, optimizer_d_obj = None, {}, None
if args.d_obj_weight > 0:
if args.restore_from_checkpoint:
d_obj_kwargs = checkpoint['d_obj_kwargs']
else:
d_obj_kwargs = {
'vocab': self.vocab,
'arch': args.d_obj_arch,
'normalization': args.d_normalization,
'activation': args.d_activation,
'padding': args.d_padding,
'object_size': args.crop_size,
}
checkpoint['d_obj_kwargs'] = d_obj_kwargs
obj_discriminator = AcCropDiscriminator(**d_obj_kwargs).to('cuda')
# obj_discriminator.type(torch.cuda.FloatTensor)
obj_discriminator.train()
optimizer_d_obj = torch.optim.Adam(obj_discriminator.parameters(), lr=args.learning_rate,
betas=(args.beta1, 0.999))
self.obj_discriminator = obj_discriminator
self.optimizer_d_obj = optimizer_d_obj
def init_mask_discriminator(self, args, checkpoint):
mask_discriminator, d_mask_kwargs, optimizer_d_mask = None, {}, None
if args.d_mask_weight > 0:
if args.restore_from_checkpoint:
d_mask_kwargs = checkpoint['d_mask_kwargs']
else:
use_sigmoid = args.no_lsgan
netD_input_nc = 1
d_mask_kwargs = {
'input_nc': netD_input_nc,
'ndf': args.ndf_mask,
'n_layers_D': args.n_layers_D_mask,
'norm': args.norm_D_mask,
'use_sigmoid': use_sigmoid,
'num_D': args.num_D_mask,
'num_objects': self.num_obj
}
checkpoint['d_mask_kwargs'] = d_mask_kwargs
mask_discriminator = define_mask_D(**d_mask_kwargs).to('cuda')
# mask_discriminator.type(torch.cuda.FloatTensor)
mask_discriminator.train()
optimizer_d_mask = torch.optim.Adam(mask_discriminator.parameters(), lr=args.mask_learning_rate,
betas=(args.beta1, 0.999))
self.mask_discriminator = mask_discriminator
self.optimizer_d_mask = optimizer_d_mask
def init_image_discriminator(self, args, checkpoint):
if args.d_img_weight == 0:
self.netD = None
self.optimizer_d_img = None
return
use_sigmoid = args.no_lsgan
netD_input_nc = self.num_obj + args.rep_size + args.output_nc
if args.restore_from_checkpoint:
d_img_kwargs = checkpoint['d_img_kwargs']
else:
d_img_kwargs = {
'input_nc': netD_input_nc,
'ndf': args.ndf,
'n_layers_D': args.n_layers_D,
'norm': args.norm_D,
'use_sigmoid': use_sigmoid,
'num_D': args.num_D,
}
checkpoint['d_img_kwargs'] = d_img_kwargs
self.netD = netD = define_D(**d_img_kwargs).to('cuda')
# netD.type(torch.cuda.FloatTensor)
netD.train()
self.optimizer_d_img = torch.optim.Adam(list(netD.parameters()), lr=args.learning_rate,
betas=(args.beta1, 0.999))
def restore_checkpoint(self, checkpoint):
self.model.load_state_dict(checkpoint['model_state'])
self.optimizer.load_state_dict(checkpoint['optim_state'])
if self.obj_discriminator is not None:
self.obj_discriminator.load_state_dict(checkpoint['d_obj_state'])
self.optimizer_d_obj.load_state_dict(checkpoint['d_obj_optim_state'])
if self.mask_discriminator is not None:
self.mask_discriminator.load_state_dict(checkpoint['d_mask_state'])
self.optimizer_d_mask.load_state_dict(checkpoint['d_mask_optim_state'])
if self.netD is not None:
self.netD.load_state_dict(checkpoint['d_img_state'])
self.optimizer_d_img.load_state_dict(checkpoint['d_img_optim_state'])
def save_checkpoint(self, checkpoint, t, args, epoch, train_results, val_results):
print('checking on train')
index = int(t / args.print_every)
t_avg_iou, t_inception_mean, t_inception_std, _ = train_results
self.writer.add_scalar('checkpoint/{}'.format('train_iou'), t_avg_iou, index)
self.writer.add_scalar('checkpoint/{}'.format('train_inception_mean'), t_inception_mean, index)
self.writer.add_scalar('checkpoint/{}'.format('train_inception_std'), t_inception_std, index)
checkpoint['checkpoint_ts'].append(t)
checkpoint['train_inception'].append(t_inception_mean)
print('checking on val')
val_avg_iou, val_inception_mean, val_inception_std, _ = val_results
self.writer.add_scalar('checkpoint/{}'.format('val_iou'), val_avg_iou, index)
self.writer.add_scalar('checkpoint/{}'.format('val_inception_mean'), val_inception_mean, index)
self.writer.add_scalar('checkpoint/{}'.format('val_inception_std'), val_inception_std, index)
checkpoint['val_inception'].append(val_inception_mean)
print('train iou: ', t_avg_iou)
print('val iou: ', val_avg_iou)
if self.obj_discriminator is not None:
checkpoint['d_obj_state'] = self.obj_discriminator.state_dict()
checkpoint['d_obj_optim_state'] = self.optimizer_d_obj.state_dict()
if self.mask_discriminator is not None:
checkpoint['d_mask_state'] = self.mask_discriminator.state_dict()
checkpoint['d_mask_optim_state'] = self.optimizer_d_mask.state_dict()
if self.netD is not None:
checkpoint['d_img_state'] = self.netD.state_dict()
checkpoint['d_img_optim_state'] = self.optimizer_d_img.state_dict()
checkpoint['model_state'] = self.model.state_dict()
checkpoint['optim_state'] = self.optimizer.state_dict()
if len(checkpoint['best_t']) == 0 or max(checkpoint['val_inception']) < val_inception_mean:
checkpoint['best_t'].append(t)
checkpoint['d_obj_best_state'] = checkpoint['d_obj_state']
checkpoint['d_obj_optim_best_state'] = checkpoint['d_obj_optim_state']
checkpoint['d_mask_best_state'] = checkpoint['d_mask_state']
checkpoint['d_mask_optim_best_state'] = checkpoint['d_mask_optim_state']
checkpoint['d_img_best_state'] = checkpoint['d_img_state']
checkpoint['d_img_optim_best_state'] = checkpoint['d_img_optim_state']
checkpoint['model_best_state'] = checkpoint['model_state']
checkpoint['optim_best_state'] = checkpoint['optim_state']
checkpoint['counters']['t'] = t
checkpoint['counters']['epoch'] = epoch
checkpoint_path = os.path.join(args.output_dir, '%s_with_model.pt' % args.checkpoint_name)
print('Saving checkpoint to ', checkpoint_path)
torch.save(checkpoint, checkpoint_path)
def forward(self, gt_imgs, img_offset, boxes_gt, gt_classes, gt_fmaps, test_mode=False, use_gt_box=False, features=None):
objs = gt_classes[:, 1]
obj_to_img = gt_classes[:, 0] - img_offset
# print("obj_to_img.min(), obj_to_img.max(), len(imgs) {} {} {}".format(obj_to_img.min(), obj_to_img.max(), len(imgs)))
assert obj_to_img.min() >= 0 and obj_to_img.max() < len(gt_imgs), \
"obj_to_img.min() >= 0 and obj_to_img.max() < len(gt_imgs) is not satidfied: {} {} {}" \
.format(obj_to_img.min(), obj_to_img.max(), len(gt_imgs))
if self.args.use_gt_textures:
all_features = None
change_indexes = None
crop_indexes = None
else:
# all_features = []
# for obj_name in objs:
# obj_feature = self.features[obj_name.item()]
# random_index = randint(0, obj_feature.shape[0] - 1)
# feat = torch.from_numpy(obj_feature[random_index, :]).type(torch.float32).cuda()
# all_features.append(feat)
all_features = [None] * len(objs)
change_indexes = []
crop_indexes = []
for ind in range(len(gt_imgs)):
obj_index = (obj_to_img == ind).nonzero()[:, 0]
change_ind = obj_index[torch.randperm(len(obj_index))[0]]
change_indexes.append(change_ind)
obj_feature = self.features[objs[change_ind].item()]
random_index = randint(0, obj_feature.shape[0] - 1)
crop_indexes.append(random_index)
feat = torch.from_numpy(obj_feature[random_index, :]).type(torch.float32).cuda()
all_features[change_ind] = feat
change_indexes = torch.LongTensor(change_indexes).cuda()
crop_indexes = torch.LongTensor(crop_indexes).cuda()
imgs_pred, boxes_pred, masks_pred, layout, layout_pred, layout_wrong, obj_repr, crops = self.model(gt_imgs, objs, gt_fmaps,
obj_to_img, boxes_gt=boxes_gt, test_mode=test_mode, use_gt_box=use_gt_box, features=all_features)
if not self.forward_D:
return Result(
imgs=gt_imgs, imgs_pred=imgs_pred, obj_repr=obj_repr, objs=objs, crops=crops,
change_indexes=change_indexes, crop_indexes=crop_indexes, boxes=boxes_gt, obj_to_img=obj_to_img + img_offset
)
scores_fake, ac_loss, g_fake_crops = self.obj_discriminator(imgs_pred, objs, boxes_gt, obj_to_img)
mask_loss, loss_mask_feat = None, None
if self.mask_discriminator is not None:
O, _, mask_size = masks_pred.shape
one_hot_size = (O, self.num_obj)
one_hot_obj = torch.zeros(one_hot_size, dtype=masks_pred.dtype, device=masks_pred.device)
one_hot_obj = one_hot_obj.scatter_(1, objs.view(-1, 1).long(), 1.0)
scores_fake = self.mask_discriminator(masks_pred.unsqueeze(1), one_hot_obj)
mask_loss = self.criterionGAN(scores_fake, True)
if self.args.d_mask_features_weight > 0:
scores_real = self.mask_discriminator(masks.float().unsqueeze(1), one_hot_obj)
loss_mask_feat = self.calculate_features_loss(scores_fake, scores_real)
g_gan_img_loss, loss_g_gan_feat_img = None, None
if self.netD is not None:
# Train textures
pred_real = self.netD.forward(torch.cat((layout_pred, gt_imgs), dim=1))
# Train image generation
match_layout = layout_pred.detach()
img_pred_fake = self.netD.forward(torch.cat((match_layout, imgs_pred), dim=1))
g_gan_img_loss = self.criterionGAN(img_pred_fake, True)
if self.args.d_img_features_weight > 0:
loss_g_gan_feat_img = self.calculate_features_loss(img_pred_fake, pred_real)
imgs_pred_detach = imgs_pred.detach()
# masks_pred_detach = masks_pred.detach()
# boxes_pred_detach = boxes.detach()
layout_pred_detach = layout_pred.detach()
layout_wrong_detach = layout_wrong.detach()
# trainer.train_mask_discriminator(masks, masks_pred_detach, objs)
fake_loss, real_loss = None, None
assert self.mask_discriminator is None, "self.mask_discriminator is not None, check please"
# if self.mask_discriminator is not None:
# O, _, mask_size = masks_pred.shape
# one_hot_size = (O, self.num_obj)
# one_hot_obj = torch.zeros(one_hot_size, dtype=masks_pred.dtype, device=masks_pred.device)
# one_hot_obj = one_hot_obj.scatter_(1, objs.view(-1, 1).long(), 1.0)
#
# scores_fake = self.mask_discriminator(masks_pred.unsqueeze(1), one_hot_obj)
# scores_real = self.mask_discriminator(masks.float().unsqueeze(1), one_hot_obj)
#
# fake_loss = self.criterionGAN(scores_fake, False)
# real_loss = self.criterionGAN(scores_real, True)
# trainer.train_obj_discriminator(imgs, imgs_pred_detach, objs, boxes, boxes_pred_detach, obj_to_img)
d_obj_gan_loss, ac_loss_fake, ac_loss_real = None, None, None
d_fake_crops, d_real_crops = None, None
if self.obj_discriminator is not None:
scores_fake, ac_loss_fake, d_fake_crops = self.obj_discriminator(imgs_pred_detach, objs, boxes_gt,
obj_to_img)
scores_real, ac_loss_real, d_real_crops = self.obj_discriminator(gt_imgs, objs, boxes_gt, obj_to_img)
d_obj_gan_loss = self.gan_d_loss(scores_real, scores_fake)
# trainer.train_image_discriminator(imgs, imgs_pred_detach, layout_detach, layout_wrong_detach)
loss_d_fake_img, loss_d_wrong_texture, loss_D_real = None, None, None
if self.netD is not None:
# Fake images, Real layout
pred_fake_pool_img = self.discriminate(layout_pred_detach, imgs_pred_detach)
loss_d_fake_img = self.criterionGAN(pred_fake_pool_img, False)
# Real images, Right layout Wrong textures
pred_wrong_pool_img = self.discriminate(layout_wrong_detach, gt_imgs)
loss_d_wrong_texture = self.criterionGAN(pred_wrong_pool_img, False)
# Real Detection and Loss
pred_real = self.discriminate(layout_pred_detach, gt_imgs)
loss_D_real = self.criterionGAN(pred_real, True)
return Result(
imgs=gt_imgs, imgs_pred=imgs_pred, layout_pred=layout_pred,
scores_fake=scores_fake, ac_loss=ac_loss, mask_loss=mask_loss, loss_mask_feat=loss_mask_feat,
g_gan_img_loss=g_gan_img_loss, loss_g_gan_feat_img=loss_g_gan_feat_img, d_obj_gan_loss=d_obj_gan_loss,
ac_loss_real=ac_loss_real, ac_loss_fake=ac_loss_fake, fake_loss=fake_loss, real_loss=real_loss,
loss_d_fake_img=loss_d_fake_img, loss_d_wrong_texture=loss_d_wrong_texture, loss_D_real=loss_D_real,
d_fake_crops=d_fake_crops, d_real_crops=d_real_crops,
)
def __getitem__(self, batch):
""" Hack to do multi-GPU training"""
batch.scatter()
if self.args.num_gpus == 1:
return self(*batch[0])
replicas = nn.parallel.replicate(self, devices=list(range(self.args.num_gpus)))
outputs = nn.parallel.parallel_apply(replicas, [batch[i] for i in range(self.args.num_gpus)])
return gather_res(outputs, 0, dim=0)
# def train_generator(self, imgs, imgs_pred, masks, masks_pred, layout,
# objs, boxes, boxes_pred, obj_to_img, use_gt):
def train_generator(self, imgs, imgs_pred, use_gt, scores_fake, ac_loss, mask_loss, loss_mask_feat, g_gan_img_loss,
loss_g_gan_feat_img):
args = self.args
self.generator_losses = LossManager()
if use_gt:
if args.l1_pixel_loss_weight > 0:
l1_pixel_loss = F.l1_loss(imgs_pred, imgs)
self.generator_losses.add_loss(l1_pixel_loss, 'L1_pixel_loss', args.l1_pixel_loss_weight)
# loss_bbox = F.mse_loss(boxes_pred, boxes)
# self.generator_losses.add_loss(loss_bbox, 'bbox_pred', args.bbox_pred_loss_weight)
# VGG feature matching loss
if self.criterionVGG is not None:
loss_G_VGG = self.criterionVGG(imgs_pred, imgs)
self.generator_losses.add_loss(loss_G_VGG, 'g_vgg', args.vgg_features_weight)
# scores_fake, ac_loss, g_fake_crops = self.obj_discriminator(imgs_pred, objs, boxes, obj_to_img)
self.generator_losses.add_loss(ac_loss.mean(), 'ac_loss', args.ac_loss_weight)
weight = args.d_obj_weight
self.generator_losses.add_loss(self.gan_g_loss(scores_fake), 'g_gan_obj_loss', weight)
if self.mask_discriminator is not None:
# O, _, mask_size = masks_pred.shape
# one_hot_size = (O, self.num_obj)
# one_hot_obj = torch.zeros(one_hot_size, dtype=masks_pred.dtype, device=masks_pred.device)
# one_hot_obj = one_hot_obj.scatter_(1, objs.view(-1, 1).long(), 1.0)
#
# scores_fake = self.mask_discriminator(masks_pred.unsqueeze(1), one_hot_obj)
# mask_loss = self.criterionGAN(scores_fake, True)
self.generator_losses.add_loss(mask_loss.mean(), 'g_gan_mask_obj_loss', args.d_mask_weight)
# GAN feature matching loss
if args.d_mask_features_weight > 0:
# scores_real = self.mask_discriminator(masks.float().unsqueeze(1), one_hot_obj)
# loss_mask_feat = self.calculate_features_loss(scores_fake, scores_real)
self.generator_losses.add_loss(loss_mask_feat.mean(), 'g_mask_features_loss', args.d_mask_features_weight)
if self.netD is not None:
# # Train textures
# pred_real = self.netD.forward(torch.cat((layout, imgs), dim=1))
#
# # Train image generation
# match_layout = layout.detach()
# img_pred_fake = self.netD.forward(torch.cat((match_layout, imgs_pred), | |
or isinstance(
args[0], datetime.date
):
t = Epoch(args[0].year, args[0].month, args[0].day, **kwargs)
else:
raise TypeError("Invalid input type")
elif len(args) == 2:
raise ValueError("Invalid input: Date given is not valid")
elif len(args) >= 3:
# We will rely on Epoch capacity to handle improper input
t = Epoch(args[0], args[1], args[2], **kwargs)
return t
@staticmethod
def is_julian(year, month, day):
"""This method returns True if given date is in the Julian calendar.
:param year: Year
:type y: int
:param month: Month
:type m: int
:param day: Day
:type day: int
:returns: Whether the provided date belongs to Julian calendar or not.
:rtype: bool
>>> Epoch.is_julian(1997, 5, 27.1)
False
>>> Epoch.is_julian(1397, 7, 7.0)
True
"""
if (
(year < 1582)
or (year == 1582 and month < 10)
or (year == 1582 and month == 10 and day < 5.0)
):
return True
else:
return False
def julian(self):
"""This method returns True if this Epoch object holds a date in the
Julian calendar.
:returns: Whether this Epoch object holds a date belonging to Julian
calendar or not.
:rtype: bool
>>> e = Epoch(1997, 5, 27.1)
>>> e.julian()
False
>>> e = Epoch(1397, 7, 7.0)
>>> e.julian()
True
"""
y, m, d = self.get_date()
return Epoch.is_julian(y, m, d)
@staticmethod
def get_month(month, as_string=False):
"""Method to get the month as a integer in the [1, 12] range, or as a
full name.
:param month: Month, in numeric, short name or long name format
:type month: int, float, str
:param as_string: Whether the output will be numeric, or a long name.
:type as_string: bool
:returns: Month as integer in the [1, 12] range, or as a long name.
:rtype: int, str
:raises: ValueError if input month value is invalid.
>>> Epoch.get_month(4.0)
4
>>> Epoch.get_month('Oct')
10
>>> Epoch.get_month('FEB')
2
>>> Epoch.get_month('August')
8
>>> Epoch.get_month('august')
8
>>> Epoch.get_month('NOVEMBER')
11
>>> Epoch.get_month(9.0, as_string=True)
'September'
>>> Epoch.get_month('Feb', as_string=True)
'February'
>>> Epoch.get_month('March', as_string=True)
'March'
"""
months_mmm = [
"Jan",
"Feb",
"Mar",
"Apr",
"May",
"Jun",
"Jul",
"Aug",
"Sep",
"Oct",
"Nov",
"Dec",
]
months_full = [
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December",
]
if isinstance(month, (int, float)):
month = int(month) # Truncate if it has decimals
if month >= 1 and month <= 12:
if not as_string:
return month
else:
return months_full[month - 1]
else:
raise ValueError("Invalid value for the input month")
elif isinstance(month, str):
month = month.strip().capitalize()
if len(month) == 3:
if month in months_mmm:
if not as_string:
return months_mmm.index(month) + 1
else:
return months_full[months_mmm.index(month)]
else:
raise ValueError("Invalid value for the input month")
else:
if month in months_full:
if not as_string:
return months_full.index(month) + 1
else:
return month
else:
raise ValueError("Invalid value for the input month")
@staticmethod
def is_leap(year):
"""Method to check if a given year is a leap year.
:param year: Year to be checked.
:type year: int, float
:returns: Whether or not year is a leap year.
:rtype: bool
:raises: ValueError if input year value is invalid.
>>> Epoch.is_leap(2003)
False
>>> Epoch.is_leap(2012)
True
>>> Epoch.is_leap(1900)
False
>>> Epoch.is_leap(-1000)
True
>>> Epoch.is_leap(1000)
True
"""
if isinstance(year, (int, float)):
# Mind the difference between Julian and Gregorian calendars
if year >= 1582:
year = iint(year)
return calendar.isleap(year)
else:
return (abs(year) % 4) == 0
else:
raise ValueError("Invalid value for the input year")
def leap(self):
"""This method checks if the current Epoch object holds a leap year.
:returns: Whether or the year in this Epoch object is a leap year.
:rtype: bool
>>> e = Epoch(2003, 1, 1)
>>> e.leap()
False
>>> e = Epoch(2012, 1, 1)
>>> e.leap()
True
>>> e = Epoch(1900, 1, 1)
>>> e.leap()
False
>>> e = Epoch(-1000, 1, 1)
>>> e.leap()
True
>>> e = Epoch(1000, 1, 1)
>>> e.leap()
True
"""
y, m, d = self.get_date()
return Epoch.is_leap(y)
@staticmethod
def get_doy(yyyy, mm, dd):
"""This method returns the Day Of Year (DOY) for the given date.
:param yyyy: Year, in four digits format
:type yyyy: int, float
:param mm: Month, in numeric format (1 = January, 2 = February, etc)
:type mm: int, float
:param dd: Day, in numeric format
:type dd: int, float
:returns: Day Of Year (DOY).
:rtype: float
:raises: ValueError if input values correspond to a wrong date.
>>> Epoch.get_doy(1999, 1, 29)
29.0
>>> Epoch.get_doy(1978, 11, 14)
318.0
>>> Epoch.get_doy(2017, 12, 31.7)
365.7
>>> Epoch.get_doy(2012, 3, 3.1)
63.1
>>> Epoch.get_doy(-400, 2, 29.9)
60.9
"""
# Let's carry out first some basic checks
if dd < 1 or dd >= 32 or mm < 1 or mm > 12:
raise ValueError("Invalid input data")
day = int(dd)
frac = dd % 1
if yyyy >= 1: # datetime's minimum year is 1
try:
d = datetime.date(yyyy, mm, day)
except ValueError:
raise ValueError("Invalid input date")
doy = d.timetuple().tm_yday
else:
k = 2 if Epoch.is_leap(yyyy) else 1
doy = (iint((275.0 * mm) / 9.0) -
k * iint((mm + 9.0) / 12.0) + day - 30.0)
return float(doy + frac)
@staticmethod
def doy2date(year, doy):
"""This method takes a year and a Day Of Year values, and returns the
corresponding date.
:param year: Year, in four digits format
:type year: int, float
:param doy: Day of Year number
:type doy: int, float
:returns: Year, month, day.
:rtype: tuple
:raises: ValueError if either input year or doy values are invalid.
>>> t = Epoch.doy2date(1999, 29)
>>> print("{}/{}/{}".format(t[0], t[1], round(t[2], 1)))
1999/1/29.0
>>> t = Epoch.doy2date(2017, 365.7)
>>> print("{}/{}/{}".format(t[0], t[1], round(t[2], 1)))
2017/12/31.7
>>> t = Epoch.doy2date(2012, 63.1)
>>> print("{}/{}/{}".format(t[0], t[1], round(t[2], 1)))
2012/3/3.1
>>> t = Epoch.doy2date(-1004, 60)
>>> print("{}/{}/{}".format(t[0], t[1], round(t[2], 1)))
-1004/2/29.0
>>> t = Epoch.doy2date(0, 60)
>>> print("{}/{}/{}".format(t[0], t[1], round(t[2], 1)))
0/2/29.0
>>> t = Epoch.doy2date(1, 60)
>>> print("{}/{}/{}".format(t[0], t[1], round(t[2], 1)))
1/3/1.0
>>> t = Epoch.doy2date(-1, 60)
>>> print("{}/{}/{}".format(t[0], t[1], round(t[2], 1)))
-1/3/1.0
>>> t = Epoch.doy2date(-2, 60)
>>> print("{}/{}/{}".format(t[0], t[1], round(t[2], 1)))
-2/3/1.0
>>> t = Epoch.doy2date(-3, 60)
>>> print("{}/{}/{}".format(t[0], t[1], round(t[2], 1)))
-3/3/1.0
>>> t = Epoch.doy2date(-4, 60)
>>> print("{}/{}/{}".format(t[0], t[1], round(t[2], 1)))
-4/2/29.0
>>> t = Epoch.doy2date(-5, 60)
>>> print("{}/{}/{}".format(t[0], t[1], round(t[2], 1)))
-5/3/1.0
"""
if isinstance(year, (int, float)) and isinstance(doy, (int, float)):
frac = float(doy % 1)
doy = int(doy)
if year >= 1: # datetime's minimum year is 1
ref = datetime.date(year, 1, 1)
mydate = datetime.date.fromordinal(ref.toordinal() + doy - 1)
return year, mydate.month, mydate.day + frac
else:
# The algorithm provided by Meeus doesn't work for years below
# +1. This little hack solves that problem (the 'if' result is
# inverted here).
k = 1 if Epoch.is_leap(year) else 2
if doy < 32:
m = 1
else:
m = iint((9.0 * (k + doy)) / 275.0 + 0.98)
d = (doy - iint((275.0 * m) / 9.0) +
k * iint((m + 9.0) / 12.0) + 30)
return year, int(m), d + frac
else:
raise ValueError("Invalid input values")
@staticmethod
def leap_seconds(year, month):
"""Returns the leap seconds accumulated for the given year and month.
:param year: Year
:type year: int
:param month: Month, in numeric format ([1:12] range)
:type month: int
:returns: Leap seconds accumulated for given year and month.
:rtype: int
>>> Epoch.leap_seconds(1972, 4)
0
>>> Epoch.leap_seconds(1972, 6)
0
>>> Epoch.leap_seconds(1972, 7)
1
>>> Epoch.leap_seconds(1983, 6)
11
>>> Epoch.leap_seconds(1983, 7)
12
>>> Epoch.leap_seconds(1985, 8)
13
>>> Epoch.leap_seconds(2016, 11)
26
>>> Epoch.leap_seconds(2017, 1)
27
>>> Epoch.leap_seconds(2018, 7)
27
"""
list_years = sorted(LEAP_TABLE.keys())
# First test the extremes of the table
if (year + month / 12.0) <= list_years[0]:
return 0
if (year + month / 12.0) >= list_years[-1]:
return LEAP_TABLE[list_years[-1]]
lyear = (year + 0.25) if month <= 6 else (year + 0.75)
idx = 0
while lyear > list_years[idx]:
idx += 1
return LEAP_TABLE[list_years[idx - 1]]
@staticmethod
def get_last_leap_second():
"""Method to get the date and value of the last leap second added to
the table
:returns: Tuple with year, month, day, leap second value.
:rtype: tuple
| |
"""
Events, for Survol, are RDF triples inserted in a graph database by Python daemons, executing scripts
in the background. These scripts are the usual CGI scripts, executed in a daemon controlled by
the Python module "supervisor" (supervisor-win" on Windows).
Conclusion: Plain CGI scripts which are norammly called by a HTTP server can also be used as daemons
filling a graph database (RDFLIB and SqlAlchemy).
These events are fetched when the same scripts are executed from a HTTP server: Then, instead of running
in background, they fetch the events stored by their counterparts. These events are tagged in the database
by the URL of the script.
Conclusion: A CGI script returns the same type of events, possibly stored in a graph database by its counterpart,
or immediately retried. Technically, these events are stored on RDF contexts, labelled by the URL.
This file has no knowledge of what scripts are doing, the object possible associated to a daemon etc...
"""
# This should avoid using lib_util, lib_common etc... because the intention of code
# in the "scripts/" directory is to be stand-alone, as much as possible.
import os
import sys
import subprocess
import psutil
import time
import datetime
import tempfile
import configparser
import logging
import traceback
# xmlrpc is used to manage the supervisor: Creation of new programs, start/stop etc...
# A new supervisor program and daemon is created for each URL.
_is_py3 = sys.version_info >= (3,)
if _is_py3:
import xmlrpc.client as xmlrpclib
else:
import xmlrpclib
# This starts a supervisor process in interactive mode, except if a daemon is already started.
try:
# Linux : Module supervisor.
# Windows: Module supervisor-win
import supervisor
from supervisor.xmlrpc import Faults as SupervisorFaults
except ImportError:
logging.debug("Cannot import supervisor module")
supervisor = None
def _must_start_factory():
"""
When running in pytest, it starts a specific supervisor in a dedicated subprocess.
It has nothing to do with a possible sup[ervisor process whcih would be used for a reaql usage.
This dedicated subprocess is completely controlled, and it is started and stopped at will.
"""
if not supervisor:
logging.error("Could not import supervisor")
return False
# This is for performance reasons.
# PYTEST_CURRENT_TEST= tests/test_lib_daemon.py::CgiScriptTest::test_start_events_feeder_daemon
return "PYTEST_CURRENT_TEST" not in os.environ or "START_DAEMON_FACTORY" in os.environ
# This is not stored with credentials because the supervisor might be part of the machine setup,
# so Survol would use it instead of starting its own supervisord process.
# Also, code in "scripts/" directory must be as standalone as possible.
_supervisor_config_file = os.path.join(os.path.dirname(__file__), "supervisord.conf")
def _log_supervisor_access(function_name, step_name, **kwargs):
"""
This writes into a file all accesses to the supervisor.
This is a debugging helper because this log file gives a complete history of events creations and reads.
"""
# TODO: This file should be truncated when the CGI server starts.
if "TRAVIS" in os.environ:
log_supervisor_file = None
else:
tmp_dir = tempfile.gettempdir()
log_supervisor_file = os.path.join(tmp_dir, "survol_supervisor.log")
if not log_supervisor_file:
return
timestamp_now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
# Three retries in case another process accesses it at the same time.
open_try_count = 3
for counter in range(open_try_count):
try:
db_log_file = open(log_supervisor_file, "a")
arguments_as_string = str(kwargs)
db_log_file.write("%s %6d f=%25s s=%s a=%s\n" % (
timestamp_now,
os.getpid(),
function_name,
step_name,
arguments_as_string))
db_log_file.flush()
db_log_file.close()
break
except Exception as exc:
logging.error("Could not open survol supervisor log: %s. Retry" % exc)
time.sleep(1)
_log_supervisor_access("", "import")
def _get_parsed_configuration():
"""
This parses the supervisord configuration file into a dict.
It does not connect to anything.
"""
parsed_config = configparser.ConfigParser()
if not os.path.exists(_supervisor_config_file):
raise Exception("Cannot find supervisor config file:" + _supervisor_config_file)
logging.info("config_file=%s" % _supervisor_config_file)
if _is_py3:
config_status = parsed_config.read(_supervisor_config_file)
else:
config_status = parsed_config.read(_supervisor_config_file.decode())
if not config_status:
raise Exception("config_status should be True")
logging.info("config_status=%s" % config_status)
logging.info("Sections=" % parsed_config.sections())
return parsed_config
def _clean_config_value(config_value):
"""
https://bugs.python.org/issue27762
Python 2 bug when the value contains a semicolon after a space which normally should be stripped.
This can be avoided from Python 3.2 with ConfigParser(inline_comment_prefixes=';')
However, this portable function optimistically parses the value for hosts, usernames and passwords.
"""
config_value = config_value.strip()
# TODO: Beware if a semicolon in the password.
config_value = config_value.split(";")[0]
config_value = config_value.strip()
return config_value
def _get_supervisor_url():
"""This parses the supervisord configuration file to get the url, username and password."""
parsed_config = _get_parsed_configuration()
# For example '127.0.0.1:9001'
supervisor_port = _clean_config_value(parsed_config['inet_http_server']['port'])
# TODO: Use https instead of http.
try:
supervisor_user = _clean_config_value(parsed_config['inet_http_server']['username'])
supervisor_pass = _clean_config_value(parsed_config['inet_http_server']['password'])
# 'http://chris:123@127.0.0.1:9001'
supervisor_url = 'http://%s:%s@%s' % (supervisor_user, supervisor_pass, supervisor_port)
except KeyError:
# 'http://127.0.0.1:9001'
supervisor_url = 'http://%s' % supervisor_port
return supervisor_url
def _create_server_proxy():
supervisor_url = _get_supervisor_url()
# Now, create the connection the supervisor process.
# Typical call: srv_prox = xmlrpclib.ServerProxy('http://chris:123@127.0.0.1:9001')
xmlrpc_server_proxy = xmlrpclib.ServerProxy(supervisor_url)
return xmlrpc_server_proxy
def supervisorctl_url():
"""This parses supervisord.conf which contains the URL of supervisorctl."""
parsed_config = _get_parsed_configuration()
# For example 'http://localhost:9001'
control_url = _clean_config_value(parsed_config['supervisorctl']['serverurl'])
logging.debug("control_url=%s" % control_url)
return control_url
_supervisor_process = None
def _local_supervisor_start():
"""This starts a local supervisor process."""
global _supervisor_process
logging.info("begin")
# Maybe it is already started.
if not _supervisor_process is None:
# TODO: Should check that it is still there.
logging.info("leaving _supervisor_process.pid=%d" % _supervisor_process.pid)
is_running = psutil.pid_exists(_supervisor_process.pid)
if is_running:
logging.info("running fine")
else:
logging.warning("SHOULD BE RUNNING")
process_stdout, process_stderr = _supervisor_process.communicate()
return
supervisor_command = [sys.executable, "-m", "supervisor.supervisord", "-c", _supervisor_config_file]
logging.info("supervisor_command=%s" % str(supervisor_command))
if "TRAVIS" in os.environ:
# Travis does not give access to locally generated files.
if _is_py3:
null_device = subprocess.DEVNULL
else:
null_device = open(os.devnull, 'wb')
supervisor_stdout = null_device
supervisor_stderr = null_device
else:
supervisor_files_directory = tempfile.gettempdir()
supervisor_stdout_name = os.path.join(supervisor_files_directory, "survol_supervisor_stdout.log")
supervisor_stderr_name = os.path.join(supervisor_files_directory, "survol_supervisor_stderr.log")
supervisor_stdout = open(supervisor_stdout_name, "w")
supervisor_stderr = open(supervisor_stderr_name, "w")
# No Shell, otherwise the subprocess running supervisor, will not be stopped.
# BEWARE: DO NOT WRITE IN stdout AND stderr, it collides and blocks !!!
_supervisor_process = subprocess.Popen(
supervisor_command,
stdout=supervisor_stdout,
stderr=supervisor_stderr,
shell=False)
logging.info("proc_popen.pid=%d" % _supervisor_process.pid)
def _local_supervisor_stop():
"""This stops a local supervisor process."""
global _supervisor_process
# global _cache_xmlrpc_server_proxy
_log_supervisor_access("_local_supervisor_stop", "entry")
if _supervisor_process is None:
logging.info("Already stopped")
return
logging.info("_supervisor_process.pid=%d" % _supervisor_process.pid)
is_running = psutil.pid_exists(_supervisor_process.pid)
if is_running:
logging.info("Running fine. Pid=%d", _supervisor_process.pid)
else:
logging.error("SHOULD BE RUNNING")
_supervisor_process.kill()
_supervisor_process.communicate()
try:
logging.info("being terminated. Pid=%d", _supervisor_process.pid)
_supervisor_process.terminate()
logging.info("terminated. Pid=%d", _supervisor_process.pid)
except Exception as exc:
logging.error("_supervisor_process. Pid=%d: %s" % (_supervisor_process.pid, str(exc)))
if _supervisor_process is not None:
del _supervisor_process
_supervisor_process = None
# TODO: Should call _xmlrpc_server_proxy.supervisor.shutdown()
### NOT YET ############### del xmlrpc_server_proxy
_log_supervisor_access("_local_supervisor_stop", "exit")
def supervisor_startup():
"""This starts the supervisor process as a subprocess.
This can be done only by web servers which are persistent.
TODO: Check that maybe a supervisord process is already there. """
_log_supervisor_access("supervisor_startup", "entry")
# Do not start the supervisor if:
# - Testing and a specific environment variable is not set.
# - The Python package supervisor is not available.
if not _must_start_factory():
error_message = "supervisor_startup: Do not start. "
logging.info(error_message)
return None
# Maybe this is a supervisor service, or a local process.
# TODO: The process should not be started if a service is already runing supervisor
logging.info("about to start _supervisor_process")
_local_supervisor_start()
logging.info("supervisor_startup _supervisor_process.pid=%d" % _supervisor_process.pid)
# Extra test to be sure that supervisor is running.
if not psutil.pid_exists(_supervisor_process.pid):
error_message = "supervisor_startup not running _supervisor_process.pid=%d" % _supervisor_process.pid
logging.error(error_message)
raise Exception("supervisor_startup did not start _supervisor_process.pid=%d" % _supervisor_process.pid)
_log_supervisor_access("supervisor_startup", "entry", pid=_supervisor_process.pid)
return _supervisor_process.pid
def supervisor_stop():
global _supervisor_process
_log_supervisor_access("supervisor_stop", "entry")
logging.info("supervisor_stop")
# TODO: In the general case, detect a global supervisor started by somethign else.
_local_supervisor_stop()
_log_supervisor_access("supervisor_stop", "exit")
return True
def is_supervisor_running():
"""
This tells if the supervisor process is running or not.
"""
_log_supervisor_access("is_supervisor_running", "entry")
logging.info(" _supervisor_process.pid=%d" % _supervisor_process.pid)
xmlrpc_server_proxy = None
try:
xmlrpc_server_proxy = _create_server_proxy()
api_version = xmlrpc_server_proxy.supervisor.getAPIVersion()
logging.info("api_version=%s" % api_version)
except Exception as exc:
logging.error("exc=%s" % exc)
api_version = None
finally:
del xmlrpc_server_proxy
if _supervisor_process is None:
logging.error("SUPERVISOR NOT CREATED")
else:
if psutil.pid_exists(_supervisor_process.pid):
logging.info("OK _supervisor_process.pid=%d" % _supervisor_process.pid)
else:
logging.error("NOT HERE _supervisor_process.pid=%d" % _supervisor_process.pid)
logging.info("api_version=%s" % api_version)
_log_supervisor_access("is_supervisor_running", "exit", api_version=api_version)
return api_version
_survol_group_name = "survol_group"
def _display_configuration_file(configuration_file_name):
"""
Used for debugging purpose.
"""
try:
with open(configuration_file_name) as config_file:
config_content = "".join(config_file.readlines())
logging.info("_display_configuration_file: _survol_group_name=%s" % _survol_group_name)
logging.info("_display_configuration_file: Configuration start ================================")
logging.info("%s" % config_content)
logging.info("_display_configuration_file: Configuration end ================================")
except Exception as exc:
logging.error("_display_configuration_file: Cannot read configuration exc=%s" % str(exc))
def _add_and_start_program_to_group(process_name, user_command, environment_parameter):
"""Add the program and starts it immediately: This is faster."""
program_options = {
'command': user_command,
'autostart': 'true',
'autorestart': 'false',
'environment': environment_parameter}
xmlrpc_server_proxy = _create_server_proxy()
try:
add_status = xmlrpc_server_proxy.twiddler.addProgramToGroup(
_survol_group_name,
process_name,
program_options)
except xmlrpclib.ProtocolError as exc:
| |
<gh_stars>0
#!/usr/bin/env python
import copy, math, numpy, os, pdb, sys
import chipseq_analysis, cmdlineProgs, random, selexDb, seq_utility, seqUtils, utility
import numpy
# SUMMARY: I have created this class to encapsulate motifs, for use in my
# chipseq qc work. Should possibly have done this earlier, as there are
# various motif-related functions etc. scattered throughout my python libraries.
# Introduced on June 18th 2013, for use in snv analysis. Should be useful
# elsewhere too:
def motifPrefixes2meme(outputFile, motifPrefixes, baseDir, suffix):
for motifPrefix in motifPrefixes:
currMotif = pwm(open(baseDir + motifPrefix + suffix))
currMotif.setName(motifPrefix)
currMotif.writeToMEME(outputFile)
print >> outputFile, ""
# Introduced on June 18th 2013, for use in snv analysis. Should be useful
# elsewhere too:
def getTFsForMotifPrefix(motifPrefix, motifsCursor):
"""Retrieves all TFs corresponding to the specified motif prefix,
by identity and sequence similarity, according to the motifs database."""
# Get the TFs linked by gene ID:
cmdStr = "select genes.Name from motifs.genes inner join motifs.tfMotifs on motifs.genes.geneID = motifs.tfMotifs.geneID where motifs.tfMotifs.motifPrefix = \"" + str(motifPrefix) + "\";"
motifsCursor.execute(cmdStr)
rows = motifsCursor.fetchall()
tfsByGeneID = map(lambda row: row[0], rows)
# Get the TFs linked by DBD sequence similarity:
cmdStr = "select distinct g2.Name from motifs.genes inner join motifs.tfMotifs on motifs.genes.geneID = motifs.tfMotifs.geneID inner join geneProteins on genes.GeneID = geneProteins.GeneID inner join proteins on geneProteins.proteinID = proteins.proteinID inner join dbdBlocks on proteins.proteinID = dbdBlocks.proteinID inner join aaSeq on dbdBlocks.aaSeqID = aaSeq.seqID inner join aaSeqSim on aaSeq.seqID = aaSeqSim.seq1id inner join aaSeq as2 on aaSeqSim.seq2id = as2.seqID inner join dbdBlocks db2 on as2.seqID = db2.aaSeqID inner join proteins p2 on db2.proteinID = p2.proteinID inner join geneProteins gp2 on p2.proteinID = gp2.proteinID inner join genes g2 on gp2.geneID = g2.geneID where motifs.tfMotifs.motifPrefix = \"" + str(motifPrefix) + "\" and aaSeqSim.similarity >= 1;"
motifsCursor.execute(cmdStr)
rows = motifsCursor.fetchall()
tfsBySeqSim = map(lambda row: row[0], rows)
if (len(rows) > 0):
tfName = rows[0][0]
else:
tfName = None # There are no chip-seq TFs for that motif
# Fixed bug on July 29th 2013: Uniquify the resulting list before returning:
tfsForMotifDict = {}
for tf in tfsByGeneID + tfsBySeqSim:
tfsForMotifDict[tf] = 1
return tfsForMotifDict.keys()
def cons2countMatrix(consString, count):
"""Takes a consensus sequence and a count value as input, and generates a
numpy matrix representing the equivelant pwm. Returns that matrix object
(rather than a pwm object)."""
# Set up the alphabet mapping dictionary, in order to map from letters to
# indeces in the matrix:
mappingDict = {'A':0, 'C':1, 'G':2, 'T':3, 'a':0, 'c':1, 'g':2, 't':3}
aLen = 4
# Generate a new zeros matrix as long as the alphabet and as wide as the
# consensus string, to build the count matrix on:
countMat = numpy.zeros((len(consString), aLen))
# Set the count value in each column of the matrix, based on the consensus
# sequence letter at that position, and the specified input count value:
for seqIdx in range(len(consString)):
lettIdx = mappingDict[consString[seqIdx]]
countMat[seqIdx][lettIdx] = count
return countMat
class pwm(object):
"""A single pwm motif model."""
def __init__(self, motifData, dataType="memeFile"):
self.name = None
self.matrix = None
self.memeFilePath = None
self.eValue = None
self.consensusCount = None # Optional, for count data such as selex
if (dataType == "memeFile"):
assert (isinstance(motifData, file))
# Input data is a MEME file => Initialise accordingly:
self.initFromMEME(motifData)
if (self.matrix == []):
# Matrix was still empty after trying to initialise from
# MEME file -> Report this as an exception:
raise ValueError("MEME file had no more motifs in it:" +
motifData.name)
if (dataType == "xxMotifFile"):
assert (isinstance(motifData, file))
# Input data is a xxMotif file => Initialise accordingly:
self.initFrom_xxMotif(motifData)
if (self.matrix == []):
# Matrix was still empty after trying to initialise from
# xxMotif file -> Report this as an exception:
raise ValueError("xxMotif file had no more motifs in it:" +
motifData.name)
elif (dataType == "countsFile"):
assert (isinstance(motifData, file))
self.initFromCounts(motifData)
elif (dataType == "freqMatrix"):
self.initFromFreqMatrix(motifData)
elif (dataType == "seqAln"):
# Input data is a list of strings representing aligned DNA sequences
# from which this pwm should be constructed:
self.initFromAlign(motifData)
elif (dataType == "iniMotifFile"):
self.initFromInimotifFile(motifData)
self.logoFilename = None
def getScoreProf(self, dnaSeq, bgFreqs = [0.25,0.25,0.25,0.25,0.25]):
"""Introduced April 15th 2011.
Calculates the motif LLR score contribution at each position in the
specified DNA sequence. Returns the resulting LLR score contributions
as an array. Sequence positions with the letter "N" result in a zero
value contribution.
NOTE: This only works/makes sense when a zero-order background
model is used. Thus, the bgFreqs is assumed to be an array describing
such a background model (for A, C, G, T and N)."""
assert len(dnaSeq) == self.getWidth()
# Generate a data structure mapping from letter to pwm (and background
# model) index:
lett2colIdx = {'A':0, 'C':1, 'G':2, 'T':3, 'N':4, \
'a':0, 'c':1, 'g':2, 't':3, 'n':4}
# The array showing contributions of each letter to the total LLR:
llrScoreContribs = []
# Consider each position in the motif...
for columnIdx in range(self.getWidth()):
# Current column; bgFreqs[-1] gives the "N" frequency:
motifColumn = self.getMatrix()[columnIdx] + [bgFreqs[-1]]
# Get motif likelihood, bg likelihood, and then calculate llr:
letter = dnaSeq[columnIdx]
letterColIdx = lett2colIdx[letter]
motifProb = motifColumn[letterColIdx]
bgProb = bgFreqs[letterColIdx]
llrScoreContribs.append(math.log(motifProb/bgProb, 10))
return llrScoreContribs
def getName(self):
return self.name
def setName(self, name):
self.name = name
def getIC_arr(self):
"""Returns an array storing the information content of each column of
this pwm, in order of motif position."""
matrix = self.matrix
ICs = []
for col in matrix:
ICs.append(seq_utility.calc_IC(col))
return ICs
def getMatrix(self):
return self.matrix
def getWidth(self):
return len(self.matrix)
def getLogoFilename(self):
return self.logoFilename
def getMemeFilePath(self):
return self.memeFilePath
def getEValue(self):
return self.eValue
def trimLowIC(self, icThresh=0.5, copy=False):
"""Trims off low information-content flanking columns from the motif."""
trimmedMotif = self.getMatrix()
# Trim the leading low IC columns...
while ((len(trimmedMotif) > 0) and
(seq_utility.calc_IC(trimmedMotif[0]) < icThresh)):
trimmedMotif = trimmedMotif[1:]
# Trim the tailing low IC columns...
while ((len(trimmedMotif) > 0) and
(seq_utility.calc_IC(trimmedMotif[-1]) < icThresh)):
trimmedMotif = trimmedMotif[:-1]
if (not copy):
# Editing the original motif => Set matrix:
self.matrix = trimmedMotif
return None
else:
# Making a copy matrix:
copyMotif = pwm(trimmedMotif, dataType="freqMatrix")
return copyMotif
def makeSeqLogo(self, outFilePrefix, format="eps"):
"""Generates an eps sequence logo for this motif, writing it out to the
specified filename."""
self.logoFilename = outFilePrefix + "." + format
tmpMemeFilename = \
utility.makeTempFilename(outFilePrefix + "_tmp_MEME_file_ceqlogo",
fileSuffix = ".meme")
tmpMemeFile = open(tmpMemeFilename, 'w')
self.writeToMEME(tmpMemeFile)
print >> tmpMemeFile, ""
tmpMemeFile.flush()
tmpMemeFile.close()
# FIXME: Currently, I assume the matrix file exists, and that it is
# in MEME format. Will need to adapt this in the future when
# dynamically-generated motifs are run instead:
seqUtils.run_ceqlogo(tmpMemeFilename, outFilePrefix, format=format)
cmdlineProgs.deleteFiles([tmpMemeFilename])
def writeToMAST(self, outFile, pseudo=0.01):
"""Writes the motif out the specified filehandle in MAST format."""
# The motif matrix data must have been set before this method can
# be called:
assert (self.matrix != None)
lengthStr = str(len(self.matrix))
hdrString = """MEME version 4.5
ALPHABET= ACGT
strands: + -
Background letter frequencies (from dataset with add-one prior applied):
A 0.25 C 0.25 G 0.25 T 0.25
MOTIF """
extra = str(self.name) + "\nBL MOTIF " + str(self.name) + " width= " + lengthStr + " seqs=0\n\nlog-odds matrix: alength= 4 w= " + lengthStr + "\n"
hdrString = hdrString + extra
outFile.write(hdrString)
# Currently just assume uniform background model:
for col in self.matrix:
# Add pseudo-counts to the elements of the matrix...
# FIXME: This could be done better. What pseudo-count
# value is best? Shouldn't this be in a psfm2llr function?
newCol = map(lambda prob: (prob+pseudo)/(1+(pseudo*4)), col)
# From mast source code, it seems the log base is 10, although I'm
# not 100% sure!:
try:
llrCol = map(lambda prob: math.log(prob/0.25, 10), newCol)
except ValueError, e:
print >> sys.stderr, "Invalid probability column:", newCol
raise e
outFile.write(reduce(lambda p1, p2: str(p1) + " " + str(p2),
llrCol, "") + "\n")
print >> outFile, ""
def writeToTRANSFAC(self, outFile, nSeqs=1000, pseudo=0.01):
"""Writes the motif out the specified filehandle in TRANSFAC format."""
# The motif matrix data must have been set before this method can
# be called:
assert (self.matrix != None)
lengthStr = str(len(self.matrix))
hdrString = "AC " + self.name | |
<reponame>Niclnx/service-stac<gh_stars>0
import logging
import time
logger = logging.getLogger(__name__)
class CollectionTemporalExtentMixin():
def update_temporal_extent(self, item, trigger, original_item_values):
'''Updates the collection's temporal extent if needed when items are inserted, updated or
deleted.
For all the given parameters this function checks, if the corresponding parameters of the
collection need to be updated. If so, they will be updated.
Args:
item:
Item thats being inserted/updated or deleted
trigger:
Item trigger event, one of 'insert', 'update' or 'delete'
original_item_values: (optional)
Dictionary with the original values of item's ['properties_datetime',
'properties_start_datetime', 'properties_end_datetime'].
Returns:
bool: True if the collection summaries has been updated, false otherwise
'''
updated = False
# Get the start end datetimes independently if we have a range or not, when there is no
# range then we use the same start and end datetime
start_datetime = item.properties_start_datetime
end_datetime = item.properties_end_datetime
if start_datetime is None or end_datetime is None:
start_datetime = item.properties_datetime
end_datetime = item.properties_datetime
# Get the original start end datetimes independently if we have a range or not, when there
# is no range then we use the same start and end datetime
old_start_datetime = original_item_values.get('properties_start_datetime', None)
old_end_datetime = original_item_values.get('properties_end_datetime', None)
if old_start_datetime is None or old_end_datetime is None:
old_start_datetime = original_item_values.get('properties_datetime', None)
old_end_datetime = original_item_values.get('properties_datetime', None)
if trigger == 'insert':
updated |= self._update_temporal_extent(
item, trigger, None, start_datetime, None, end_datetime
)
elif trigger in ['update', 'delete']:
updated |= self._update_temporal_extent(
item, trigger, old_start_datetime, start_datetime, old_end_datetime, end_datetime
)
else:
logger.critical(
'Failed to update collection temporal extent; invalid trigger parameter %s',
trigger,
extra={
'collection': self.name, 'item': item.name
}
)
raise ValueError(f'Invalid trigger parameter; {trigger}')
return updated
def _update_temporal_extent_on_item_insert(
self, new_start_datetime, new_end_datetime, item_name
):
'''This function is called from within update_temporal_extent() when a new item is inserted
to the collection.
Args:
collection: Collection
Collection instance on which to operate
new_start_datetime: datetime
item's updated value for properties_start_datetime
new_end_datetime: datetime
item's updated value for properties_end_datetime
item_name: string
the name of the item being treated
Returns:
bool: True if temporal extent has been updated, false otherwise
'''
updated = False
if (self.extent_start_datetime is None or self.extent_start_datetime > new_start_datetime):
logger.info(
"Collection temporal extent start_datetime=%s updated to the "
"item start_datetime=%s",
self.extent_start_datetime,
new_start_datetime,
extra={
'collection': self.name, 'item': item_name, 'trigger': 'item-insert'
}
)
updated |= True
# first item in collection, as extent_start_datetime is None:
# or
# new item starts earlier that current collection range starts
self.extent_start_datetime = new_start_datetime
if self.extent_end_datetime is None or self.extent_end_datetime < new_end_datetime:
logger.info(
"Collection temporal extent end_datetime=%s updated to item end_datetime=%s",
self.extent_end_datetime,
new_end_datetime,
extra={
'collection': self.name, 'item': item_name, 'trigger': 'item-insert'
}
)
updated |= True
# first item in collection, as extent_start_datetime is None
# or
# new item starts after current collection's range ends
self.extent_end_datetime = new_end_datetime
return updated
def _update_start_temporal_extent_on_item_update(
self, old_start_datetime, new_start_datetime, item_name, qs_other_items=None
):
'''This function is called from within update_temporal_extent() when the
start_datetime of an item in the collection is updated to check if the
collection's start_datetime needs to be updated.
Args:
collection: Collection
Collection instance on which to operate
old_start_datetime: datetime
item's old value for properties_start_datetime
new_start_datetime: datetime
item's updated value for properties_start_datetime
item_name: str
the name of the item being treated
qs_other_items: QuerySet | None
queryset with all items of the collection excluding the one being updated.
(optional)
Returns:
bool: True if temporal extent has been updated, false otherwise
return_qs: QuerySet | None
Queryset containing all items (but the one currently updated) that have
non-null properties_datetime values. This queryset might be used in
update_end_temporal_extent_on_item_update() and can be passed in already
evaluated state to save one DB hit.
'''
updated = False
return_qs = None
logger.debug(
"Updating collection extent start datetime %s with item (old start: %s, new start: %s)",
self.extent_start_datetime,
old_start_datetime,
new_start_datetime,
extra={
'collection': self.name, 'item': item_name, 'trigger': 'item-update'
}
)
if old_start_datetime == self.extent_start_datetime:
# item's old start_datetime was defining left bound of the temporal
# extent interval of collection before update
if new_start_datetime < old_start_datetime:
logger.info(
"Collection temporal extent start_datetime=%s updated "
"to item start_datetime=%s",
self.extent_start_datetime,
new_start_datetime,
extra={
'collection': self.name, 'item': item_name, 'trigger': 'item-update'
}
)
updated |= True
# item's start_datetime was shifted to the left (earlier)
self.extent_start_datetime = new_start_datetime
else:
# item's start_datetime was shifted to the right (later)
# but was defining the left bound of the temporal extent
# of the collection before
# --> hence the new start_datetime of the collection
# needs to be determined:
# set earliest start_datetime to min(earliest_start_datetime
# of all items but the one currently updated and
# new_start_datetime).
logger.warning(
'Item was defining the start extent and its new start is more recent; '
'Looping over all items of the collection in order to find the new '
'start extent, this may take a while !',
extra={
'collection': self.name, 'item': item_name, 'trigger': 'item-update'
}
)
start = time.time()
qs_other_items_with_properties_start_datetime = qs_other_items.filter(
properties_start_datetime__isnull=False
).only('properties_start_datetime', 'collection')
if qs_other_items_with_properties_start_datetime.exists():
earliest_properties_start_datetime = (
qs_other_items_with_properties_start_datetime.
earliest('properties_start_datetime').properties_start_datetime
)
earliest_start_datetime = min(
new_start_datetime, earliest_properties_start_datetime
)
else:
earliest_start_datetime = new_start_datetime
logger.info(
'Found the item with the earliest start_datetime properties %s in %ss',
earliest_start_datetime,
time.time() - start,
extra={
'collection': self.name, 'item': item_name, 'trigger': 'item-update'
}
)
start = time.time()
# set earliest datetime to min(earliest_datetime of all items
# but the one currently updated and new_start_datetime)
qs_other_items_with_properties_datetime = qs_other_items.filter(
properties_datetime__isnull=False
).only('properties_datetime', 'collection')
if qs_other_items_with_properties_datetime.exists():
other_items_earliest_properties_datetime = (
qs_other_items_with_properties_datetime.earliest('properties_datetime'
).properties_datetime
)
earliest_datetime = min(
new_start_datetime, other_items_earliest_properties_datetime
)
return_qs = qs_other_items_with_properties_datetime
else:
earliest_datetime = new_start_datetime
logger.info(
'Found the item with the earliest datetime properties %s in %ss',
earliest_datetime,
time.time() - start,
extra={
'collection': self.name, 'item': item_name, 'trigger': 'item-update'
}
)
updated |= True
new_extent_start = min(earliest_start_datetime, earliest_datetime)
logger.info(
"Collection temporal extent start_datetime updated from %s to %s",
self.extent_start_datetime,
new_extent_start,
extra={
'collection': self.name, 'item': item_name, 'trigger': 'item-update'
}
)
self.extent_start_datetime = new_extent_start
elif new_start_datetime < self.extent_start_datetime:
# item's start_datetime did not define the left bound of the
# collection's temporal extent before update, which does not
# matter anyways, as it defines the new left bound after update
# and collection's start_datetime can be simply adjusted
logger.info(
"Collection temporal extent start_datetime=%s updated to item start_datetime=%s",
self.extent_start_datetime,
new_start_datetime,
extra={
'collection': self.name, 'item': item_name, 'trigger': 'item-update'
}
)
updated |= True
self.extent_start_datetime = new_start_datetime
return updated, return_qs
def _update_end_temporal_extent_on_item_update(
self,
old_end_datetime,
new_end_datetime,
item_name,
qs_other_items=None,
qs_other_items_with_properties_datetime=None
):
'''This function is called from within update_temporal_extent() when an
item in the collection is updated to check if the collection's
end_datetime needs to be updated.
Args:
collection: Collection
Collection instance on which to operate
old_end_datetime: datetime
item's old value for properties_end_datetime
new_end_datetime: datetime
item's updated value for properties_end_datetime
item_name: str
the name of the item being treated
qs_other_items: QuerySet | None
queryset with all items of the collection excluding the one being updated.
(optional)
qs_other_items_with_properties_datetimes: QuerySet | None
Already evaluated queryset with all items (but the one currently updated) that have
non-null properties_datetime values (optional).
Returns:
bool: True if temporal extent has been updated, false otherwise
'''
updated = False
logger.debug(
"Updating collection extent_end_datetime %s with item "
"(old end_datetime: %s, new end_datetime: %s)",
self.extent_end_datetime,
old_end_datetime,
new_end_datetime,
extra={
'collection': self.name, 'item': item_name, 'trigger': 'item-update'
}
)
if old_end_datetime == self.extent_end_datetime:
# item's old end_datetime was defining the right bound of
# the collection's temporal extent interval before update
if new_end_datetime > old_end_datetime:
logger.info(
"Collection temporal extent_end_datetime %s updated to item end_datetime %s",
self.extent_end_datetime,
new_end_datetime,
extra={
'collection': self.name, 'item': item_name, 'trigger': 'item-update'
}
)
# item's end_datetime was shifted to the right (later)
updated |= True
self.extent_end_datetime = new_end_datetime
else:
# item's end_datetime was shifted to the left (earlier)
# but was defining the right bound of the collection's
# temporal extent.
# --> hence the new end_datetime of the collection needs
# to be determined:
# set latest end_datetime to max(new_end_datetime and
# end_datetime of all items but the one currently updated).
logger.warning(
'Item was defining the end extent and its new end is less recent; '
'Looping over all items | |
<filename>Libraries/Python/wxGlade/v0.9,5/wxGlade-0.9.5-py3.6.egg/wxglade/widgets/menubar/menubar.py
"""\
wxMenuBar objects
@copyright: 2002-2007 <NAME>
@copyright: 2014-2016 <NAME>
@copyright: 2016-2019 <NAME>
@license: MIT (see LICENSE.txt) - THIS PROGRAM COMES WITH NO WARRANTY
"""
import wx
import common, compat, config, misc
from MenuTree import *
from tree import Node
from wcodegen.taghandler import BaseXmlBuilderTagHandler
import new_properties as np
from edit_windows import EditBase, PreviewMixin
class MenuItemDialog(wx.Dialog):
def __init__(self, parent, owner, items=None):
style = wx.DEFAULT_DIALOG_STYLE|wx.RESIZE_BORDER|wx.WANTS_CHARS
wx.Dialog.__init__(self, parent, -1, _("Menu editor"), style=style)
# menu item fields
self.label = wx.TextCtrl(self, wx.ID_ANY, "")
self.event_handler = wx.TextCtrl(self, wx.ID_ANY, "")
self.name = wx.TextCtrl(self, wx.ID_ANY, "")
self.help_str = wx.TextCtrl(self, wx.ID_ANY, "")
self.id = wx.TextCtrl(self, wx.ID_ANY, "")
# radio box for type
self.check_radio = wx.RadioBox(self, wx.ID_ANY, "Type", choices=["Normal", "Checkable", "Radio"],
majorDimension=1, style=wx.RA_SPECIFY_COLS)
self.check_radio.SetSelection(0)
# dialog action buttons; these will be handled, instead of using stock OK/Cancel buttons
self.ok = wx.Button(self, wx.ID_ANY, "OK")
self.cancel = wx.Button(self, wx.ID_ANY, "Cancel")
# editor action buttons
self.move_left = wx.Button(self, wx.ID_ANY, "&<")
self.move_right = wx.Button(self, wx.ID_ANY, "&>")
self.move_up = wx.Button(self, wx.ID_ANY, "&Up")
self.move_down = wx.Button(self, wx.ID_ANY, "&Down")
self.add = wx.Button(self, wx.ID_ANY, "&Add")
self.remove = wx.Button(self, wx.ID_ANY, "&Remove")
self.add_sep = wx.Button(self, wx.ID_ANY, "Add &Separator")
self.menu_items = wx.ListCtrl(self, wx.ID_ANY, style=wx.BORDER_DEFAULT | wx.BORDER_SUNKEN | wx.LC_EDIT_LABELS |
wx.LC_REPORT | wx.LC_SINGLE_SEL | wx.NO_FULL_REPAINT_ON_RESIZE)
self.SetTitle("Menu Editor")
self.__do_layout()
self.Bind(wx.EVT_TEXT, self.on_label_edited, self.label)
self.Bind(wx.EVT_TEXT, self.on_event_handler_edited, self.event_handler)
self.Bind(wx.EVT_TEXT, self.on_name_edited, self.name)
self.Bind(wx.EVT_TEXT, self.on_help_str_edited, self.help_str)
self.Bind(wx.EVT_TEXT, self.on_id_edited, self.id)
self.Bind(wx.EVT_RADIOBOX, self.on_type_edited, self.check_radio)
self.Bind(wx.EVT_BUTTON, self.move_item_left, self.move_left)
self.Bind(wx.EVT_BUTTON, self.move_item_right, self.move_right)
self.Bind(wx.EVT_BUTTON, self.move_item_up, self.move_up)
self.Bind(wx.EVT_BUTTON, self.move_item_down, self.move_down)
self.Bind(wx.EVT_BUTTON, self.add_item, self.add)
self.Bind(wx.EVT_BUTTON, self.remove_item, self.remove)
self.Bind(wx.EVT_BUTTON, self.add_separator, self.add_sep)
self.Bind(wx.EVT_BUTTON, self.on_cancel, self.cancel)
self.Bind(wx.EVT_BUTTON, self.on_OK, self.ok)
self.Bind(wx.EVT_LIST_ITEM_SELECTED, self.show_item, self.menu_items)
self.Bind(wx.EVT_CHAR_HOOK, self.on_char)
self.remove.Bind(wx.EVT_CHAR_HOOK, self.on_button_char) # to ignore the Enter key while the focus is on Remove
self.owner = owner
# ALB 2004-09-26: workaround to make the scroll wheel work...
self.menu_items.Bind(wx.EVT_MOUSEWHEEL, lambda e: e.Skip())
self.menu_items.InsertColumn(0, _("Label"))
self.menu_items.InsertColumn(1, _("Event Handler"))
self.menu_items.InsertColumn(2, _("Name"))
self.menu_items.InsertColumn(3, _("Type"))
self.menu_items.InsertColumn(4, _("Help String"))
self.menu_items.InsertColumn(5, _("Id"))
self.menu_items.SetColumnWidth(0, 180)
self.menu_items.SetColumnWidth(1, 180)
self.menu_items.SetColumnWidth(2, 120)
self.menu_items.SetColumnWidth(3, 35)
self.menu_items.SetColumnWidth(4, 250)
self.menu_items.SetColumnWidth(5, 50)
self.SetSize( (900, 600) )
import re
self.handler_re = self.name_re = re.compile(r'^[a-zA-Z_]+[\w-]*(\[\w*\])*$')
self.selected_index = -1 # index of the selected element in the wx.ListCtrl menu_items
self._ignore_events = False
self._last_focus = None
if items:
self.add_items(items)
self._select_item(0)
def on_char(self, event):
# keyboard navigation: up/down arrows
focus = self.FindFocus()
if focus is self.check_radio:
event.Skip()
return
if isinstance(focus, wx.Button):
self.label.SetFocus()
elif isinstance(focus, wx.TextCtrl):
self._last_focus = focus
k = event.GetKeyCode()
if k==wx.WXK_RETURN: # ignore Enter key
return
if k==wx.WXK_DOWN:
if event.AltDown():
self.move_item_down(event)
else:
self._select_item(self.selected_index+1)
return
if k==wx.WXK_UP:
if event.AltDown():
self.move_item_up(event)
else:
self._select_item(self.selected_index-1)
return
if k==wx.WXK_RIGHT and event.AltDown():
self.move_item_right(event)
return
if k==wx.WXK_LEFT and event.AltDown():
self.move_item_left(event)
return
event.Skip()
def on_button_char(self, event):
# for e.g. the Remove button we don't want an action on the Return button
if event.GetKeyCode() != wx.WXK_RETURN:
event.Skip()
def __do_layout(self):
sizer_1 = wx.BoxSizer(wx.VERTICAL)
sizer_2 = wx.BoxSizer(wx.HORIZONTAL)
sizer_5 = wx.BoxSizer(wx.HORIZONTAL)
sizer_6 = wx.BoxSizer(wx.VERTICAL)
grid_sizer_2 = wx.FlexGridSizer(5, 2, 0, 0)
label_6 = wx.StaticText(self, wx.ID_ANY, "Label:")
grid_sizer_2.Add(label_6, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT | wx.RIGHT, 4)
grid_sizer_2.Add(self.label, 1, wx.EXPAND, 0)
label_7 = wx.StaticText(self, wx.ID_ANY, "Event Handler:")
grid_sizer_2.Add(label_7, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT | wx.RIGHT, 4)
grid_sizer_2.Add(self.event_handler, 1, wx.EXPAND, 0)
label_8 = wx.StaticText(self, wx.ID_ANY, "(Attribute) Name:")
grid_sizer_2.Add(label_8, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT | wx.RIGHT, 4)
grid_sizer_2.Add(self.name, 1, wx.EXPAND, 0)
label_9 = wx.StaticText(self, wx.ID_ANY, "Help String:")
grid_sizer_2.Add(label_9, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT | wx.RIGHT, 4)
grid_sizer_2.Add(self.help_str, 1, wx.EXPAND, 0)
label_10 = wx.StaticText(self, wx.ID_ANY, "ID:")
grid_sizer_2.Add(label_10, 0, wx.ALIGN_CENTER_VERTICAL | wx.LEFT | wx.RIGHT, 4)
grid_sizer_2.Add(self.id, 0, 0, 0)
grid_sizer_2.AddGrowableCol(1)
sizer_5.Add(grid_sizer_2, 2, wx.EXPAND, 0)
sizer_5.Add(self.check_radio, 0, wx.ALL | wx.EXPAND, 4)
sizer_5.Add((20, 20), 1, wx.ALIGN_CENTER_VERTICAL | wx.EXPAND, 0)
sizer_6.Add(self.ok, 0, wx.ALL, 5)
sizer_6.Add(self.cancel, 0, wx.ALL, 5)
sizer_5.Add(sizer_6, 0, wx.EXPAND, 0)
sizer_1.Add(sizer_5, 0, wx.EXPAND, 0)
sizer_2.Add(self.move_left, 0, wx.BOTTOM | wx.LEFT | wx.TOP, 8)
sizer_2.Add(self.move_right, 0, wx.BOTTOM | wx.RIGHT | wx.TOP, 8)
sizer_2.Add(self.move_up, 0, wx.BOTTOM | wx.LEFT | wx.TOP, 8)
sizer_2.Add(self.move_down, 0, wx.BOTTOM | wx.RIGHT | wx.TOP, 8)
sizer_2.Add((20, 20), 1, wx.ALIGN_CENTER_VERTICAL, 0)
sizer_2.Add(self.add, 0, wx.BOTTOM | wx.LEFT | wx.TOP, 8)
sizer_2.Add(self.remove, 0, wx.BOTTOM | wx.TOP, 8)
sizer_2.Add(self.add_sep, 0, wx.ALL, 8)
sizer_2.Add((20, 20), 2, wx.ALIGN_CENTER_VERTICAL, 0)
sizer_1.Add(sizer_2, 0, wx.EXPAND, 0)
sizer_1.Add(self.menu_items, 1, wx.EXPAND, 0)
self.SetSizer(sizer_1)
sizer_1.Fit(self)
sizer_1.SetSizeHints(self)
self.Layout()
# set tooltips
for c in (label_6, self.label):
compat.SetToolTip(c, "The menu entry text;\nenter & for access keys (using ALT key)\nappend e.g. \\tCtrl-X for keyboard shortcut")
for c in (label_7, self.event_handler):
compat.SetToolTip(c, "Enter the name of an event handler method; this will be created as stub")
for c in (label_8, self.name):
compat.SetToolTip(c, "optional: enter a name to store the menu item as attribute of the menu bar")
for c in (label_10, self.id):
compat.SetToolTip(c, "optional: enter wx ID")
compat.SetToolTip( self.move_up, "Move selected item up" )
compat.SetToolTip( self.move_down, "Move selected item down" )
compat.SetToolTip( self.menu_items, "For navigation use the mouse or the up/down arrows" )
compat.SetToolTip( self.move_left, "Move the selected item up by one menu level" )
compat.SetToolTip( self.move_right, "Move the selected item down by one menu level" )
def _enable_fields(self, enable=True):
for s in (self.event_handler, self.id, self.name, self.help_str, self.check_radio, self.label):
s.Enable(enable)
def add_item(self, event):
"Event handler called when the Add button is clicked"
index = self.selected_index = self.selected_index + 1
indent = ""
if not self.menu_items.GetItemCount():
self._enable_fields()
if index < 0:
index = self.menu_items.GetItemCount()
elif index > 0:
indent = " " * self.item_level(index-1)
name, label, id, check_radio = "", "item", "", "0"
self.menu_items.InsertStringItem(index, indent + label)
self.menu_items.SetStringItem(index, 2, name)
self.menu_items.SetStringItem(index, 3, check_radio)
self.menu_items.SetStringItem(index, 5, id)
# fix bug 698074
self.menu_items.SetItemState(index, wx.LIST_STATE_SELECTED, wx.LIST_STATE_SELECTED)
self._select_item(index, force=True)
def add_separator(self, event):
"Event handler called when the Add Separator button is clicked"
index = self.selected_index + 1
label = '---'
if not self.menu_items.GetItemCount():
self._enable_fields()
if index < 0:
index = self.menu_items.GetItemCount()
elif index > 0:
label = " " * self.item_level(index-1) + '---'
self.menu_items.InsertStringItem(index, label)
self.menu_items.SetStringItem(index, 2, '---') # name
self.menu_items.SetStringItem(index, 5, '---') # id
# fix bug 698074
self.menu_items.SetItemState(index, wx.LIST_STATE_SELECTED, wx.LIST_STATE_SELECTED)
def show_item(self, event):
"Event handler called when a menu item in the list is selected"
if not self._ignore_events:
self._select_item(event.GetIndex())
event.Skip()
def _select_item(self, index, force=False):
if index >= self.menu_items.GetItemCount() or index<0 or (index==self.selected_index and not force): return
self._ignore_events = True
self.menu_items.Select(index)
self.selected_index = index
if self.menu_items.GetItem(index, 2).GetText() != '---':
# skip if the selected item is a separator
for (s, i) in ((self.label, 0), (self.event_handler, 1), (self.name, 2), (self.help_str, 4), (self.id, 5)):
# at this point, the value should be validated already
s.SetBackgroundColour( compat.wx_SystemSettings_GetColour(wx.SYS_COLOUR_WINDOW) )
s.SetValue(self.menu_items.GetItem(index, i).GetText())
self.label.SetValue(self.label.GetValue().lstrip())
try:
self.check_radio.SetSelection( int(self.menu_items.GetItem(index, 3).GetText()) )
except:
self.check_radio.SetSelection(0)
self._enable_fields(True)
# set focus to text field again
focus = self.FindFocus()
if not isinstance(focus, wx.TextCtrl) and isinstance(self._last_focus, wx.TextCtrl):
self._last_focus.SetFocus()
else:
for c in (self.label, self.event_handler, self.name, self.help_str, self.id):
c.SetValue("")
self._enable_fields(False)
self._enable_buttons()
if force:
self.label.SetFocus()
self.label.SelectAll()
def _enable_buttons(self):
# activate the left/right/up/down buttons
index = self.selected_index
item_level = self.item_level(index)
item_count = self.menu_items.GetItemCount()
self.move_left.Enable( not (index+1<item_count and (item_level < self.item_level(index+1)) ))
self.move_right.Enable( index>=1 and item_level <= self.item_level(index-1) )
self.move_up.Enable( index>0 )
self.move_down.Enable( index<item_count-1 )
self._ignore_events = False
def on_label_edited(self, event):
if not self._ignore_events:
value = " " * self.item_level(self.selected_index) + self.label.GetValue().lstrip()
self.menu_items.SetStringItem(self.selected_index, 0, value)
event.Skip()
def on_event_handler_edited(self, event):
value = self.event_handler.GetValue()
if not value or self.handler_re.match(value):
self.event_handler.SetBackgroundColour( compat.wx_SystemSettings_GetColour(wx.SYS_COLOUR_WINDOW) )
valid = True
else:
self.event_handler.SetBackgroundColour(wx.RED)
valid = False
self.event_handler.Refresh()
if valid and not self._ignore_events:
self.menu_items.SetStringItem(self.selected_index, 1, value)
event.Skip()
def on_name_edited(self, event):
value = self.name.GetValue()
if not value or self.name_re.match(value):
self.name.SetBackgroundColour( compat.wx_SystemSettings_GetColour(wx.SYS_COLOUR_WINDOW) )
valid = True
else:
self.name.SetBackgroundColour(wx.RED)
valid = False
if value and valid and not self._ignore_events:
# check for double names
for i in range(self.menu_items.GetItemCount()):
if i==self.selected_index: continue
if value == self.menu_items.GetItem(i, 2).GetText():
valid = False
self.name.SetBackgroundColour( wx.Colour(255, 255, 0, 255) ) # YELLOW
break
self.name.Refresh()
if valid and not self._ignore_events:
self.menu_items.SetStringItem(self.selected_index, 2, value)
event.Skip()
def on_type_edited(self, event):
if not self._ignore_events:
self.menu_items.SetStringItem(self.selected_index, 3, str(self.check_radio.GetSelection()))
event.Skip()
def on_help_str_edited(self, event):
if not self._ignore_events:
self.menu_items.SetStringItem(self.selected_index, 4, self.help_str.GetValue())
event.Skip()
def on_id_edited(self, event):
if not self._ignore_events:
self.menu_items.SetStringItem(self.selected_index, 5, self.id.GetValue())
event.Skip()
def item_level(self, index, label=None):
"returns the indentation level of the menu item at the given index"
label = self.menu_items.GetItem(index, 0).GetText()
return (len(label) - len(label.lstrip())) // 4
def remove_item(self, event):
"Event handler called when the Remove button is clicked"
if self.selected_index < 0: return
index = self.selected_index+1
if index<self.menu_items.GetItemCount() and (self.item_level(self.selected_index) < self.item_level(index)):
# the item to be deleted is parent to the | |
<reponame>thetisproject/cofs<gh_stars>0
"""
TELEMAC-2D `Point Discharge with Diffusion' test case
=====================================================
Solves a steady-state tracer advection equation in a
rectangular domain with uniform fluid velocity, constant
diffusivity and a constant tracer source term. Neumann
conditions are imposed on the channel walls, an inflow
condition is imposed on the left-hand boundary, and the
right-hand boundary remains open. An analytical solution
involving modified Bessel functions exists [1].
The two different functional quantities of interest considered
in [2] are evaluated on each mesh and convergence is assessed.
A Gaussian parametrisation for the point source is adopted,
with the radius calibrated using gradient-based optimisation.
Further details for the test case can be found in [1].
[1] <NAME>, <NAME>, <NAME>, "TELEMAC modeling system:
2D hydrodynamics TELEMAC-2D software release 7.0 user
manual." Paris: R&D, Electricite de France, p. 134
(2014).
[2] <NAME>, <NAME>, <NAME>, <NAME>,
"Goal-Oriented Error Estimation and Mesh Adaptation for
Tracer Transport Modelling", submitted to Computer
Aided Design (2021).
[3] <NAME>, W.H. Press, <NAME>, <NAME>,
"Numerical recipes in C", Press Syndicate of the University
of Cambridge, New York (1992).
"""
from thetis import *
import thetis.diagnostics as diagnostics
import pytest
def bessi0(x):
"""
Modified Bessel function of the first kind. Code taken from [3].
"""
ax = abs(x)
y1 = x/3.75
y1 *= y1
expr1 = 1.0 + y1*(3.5156229 + y1*(3.0899424 + y1*(1.2067492 + y1*(
0.2659732 + y1*(0.360768e-1 + y1*0.45813e-2)))))
y2 = 3.75/ax
expr2 = exp(ax)/sqrt(ax)*(0.39894228 + y2*(0.1328592e-1 + y2*(
0.225319e-2 + y2*(-0.157565e-2 + y2*(0.916281e-2 + y2*(
-0.2057706e-1 + y2*(0.2635537e-1 + y2*(-0.1647633e-1 + y2*0.392377e-2))))))))
return conditional(le(ax, 3.75), expr1, expr2)
def bessk0(x):
"""
Modified Bessel function of the second kind. Code taken from [3].
"""
y1 = x*x/4.0
expr1 = -ln(x/2.0)*bessi0(x) + (-0.57721566 + y1*(0.42278420 + y1*(
0.23069756 + y1*(0.3488590e-1 + y1*(0.262698e-2 + y1*(0.10750e-3 + y1*0.74e-5))))))
y2 = 2.0/x
expr2 = exp(-x)/sqrt(x)*(1.25331414 + y2*(-0.7832358e-1 + y2*(0.2189568e-1 + y2*(
-0.1062446e-1 + y2*(0.587872e-2 + y2*(-0.251540e-2 + y2*0.53208e-3))))))
return conditional(ge(x, 2), expr2, expr1)
class PointDischargeParameters(object):
"""
Problem parameter class, including point source representation.
Delta functions are difficult to represent in numerical models. Here we
use a Gaussian approximation with a small radius. The small radius has
been calibrated against the analytical solution. See [2] for details.
"""
def __init__(self, offset, tracer_element_family):
self.offset = offset
# Physical parameters
self.diffusivity = Constant(0.1)
self.viscosity = None
self.drag = Constant(0.0025)
self.uv = Constant(as_vector([1.0, 0.0]))
self.elev = Constant(0.0)
# Parametrisation of point source
self.source_x, self.source_y = 2.0, 5.0
self.source_r = 0.05606298 if tracer_element_family == 'dg' else 0.05606388
self.source_value = 100.0
# Specification of receiver region
self.receiver_x = 20.0
self.receiver_y = 7.5 if self.offset else 5.0
self.receiver_r = 0.5
# Boundary conditions
self.boundary_conditions = {
'tracer': {
1: {'value': Constant(0.0)}, # inflow
# ouflow -> natural BC
},
'shallow_water': {
1: {
'uv': Constant(as_vector([1.0, 0.0])),
'elev': Constant(0.0)
}, # inflow
2: {
'uv': Constant(as_vector([1.0, 0.0])),
'elev': Constant(0.0)
}, # outflow
}
}
def ball(self, mesh, scaling=1.0, eps=1.0e-10):
x, y = SpatialCoordinate(mesh)
expr = lt((x-self.receiver_x)**2 + (y-self.receiver_y)**2, self.receiver_r**2 + eps)
return conditional(expr, scaling, 0.0)
def gaussian(self, mesh, scaling=1.0):
x, y = SpatialCoordinate(mesh)
expr = exp(-((x-self.source_x)**2 + (y-self.source_y)**2)/self.source_r**2)
return scaling*expr
def source(self, fs):
return self.gaussian(fs.mesh(), scaling=self.source_value)
def bathymetry(self, fs):
return Function(fs).assign(5.0)
def quantity_of_interest_kernel(self, mesh):
area = assemble(self.ball(mesh)*dx)
area_analytical = pi*self.receiver_r**2
scaling = 1.0 if numpy.allclose(area, 0.0) else area_analytical/area
return self.ball(mesh, scaling=scaling)
def quantity_of_interest_form(self, sol):
kernel = self.quantity_of_interest_kernel(sol.function_space().mesh())
return inner(kernel, sol)*dx(degree=12)
def quantity_of_interest(self, sol):
return assemble(self.quantity_of_interest_form(sol))
def analytical_quantity_of_interest(self, mesh):
"""
The analytical solution can be found in [1]. Due to the modified
Bessel function, it cannot be evaluated exactly and instead must
be computed using a quadrature rule.
"""
x, y = SpatialCoordinate(mesh)
x0, y0 = self.source_x, self.source_y
u = self.uv[0]
D = self.diffusivity
Pe = 0.5*u/D # Mesh Peclet number
r = sqrt((x-x0)*(x-x0) + (y-y0)*(y-y0))
r = max_value(r, self.source_r) # (Bessel fn explodes at (x0, y0))
sol = 0.5/(pi*D)*exp(Pe*(x-x0))*bessk0(Pe*r)
kernel = self.quantity_of_interest_kernel(mesh)
return assemble(kernel*sol*dx(degree=12))
def solve_tracer(mesh2d, offset, hydrodynamics=False, solve_adjoint=False, **model_options):
"""
Solve the `Point Discharge with Diffusion' steady-state tracer transport
test case from [1]. This problem has a source term, which involves a
Dirac delta function. It also has an analytical solution, which may be
expressed in terms of modified Bessel functions.
As in [2], convergence of two diagnostic quantities of interest is
assessed. These are simple integrals of the tracer concentration over
circular 'receiver' regions. The 'aligned' receiver is directly downstream
in the flow and the 'offset' receiver is shifted in the positive y-direction.
:arg mesh2d: mesh upon which to solve the tracer transport problem.
:arg offset: toggle between aligned and offset source/receiver.
:kwarg hydrodynamics: solve shallow water equations?
:kwarg solve_adjoint: solve the adjoint problem as well as the forward one?
"""
P1_2d = FunctionSpace(mesh2d, "CG", 1)
# Set up parameter class
tracer_element_family = model_options.get("tracer_element_family", "cg")
params = PointDischargeParameters(offset, tracer_element_family)
source = params.source(P1_2d)
# Solve tracer transport problem
solver_obj = solver2d.FlowSolver2d(mesh2d, params.bathymetry(P1_2d))
options = solver_obj.options
options.swe_timestepper_type = 'SteadyState'
options.tracer_timestepper_type = 'SteadyState'
options.tracer_element_family = tracer_element_family
options.timestep = 20.0
options.simulation_end_time = 18.0
options.simulation_export_time = 18.0
options.swe_timestepper_options.solver_parameters['pc_factor_mat_solver_type'] = 'mumps'
options.swe_timestepper_options.solver_parameters['snes_monitor'] = None
options.tracer_timestepper_options.solver_parameters['pc_factor_mat_solver_type'] = 'mumps'
options.tracer_timestepper_options.solver_parameters['snes_monitor'] = None
options.fields_to_export = ['tracer_2d', 'uv_2d', 'elev_2d']
# Hydrodynamics
options.element_family = 'dg-dg'
options.horizontal_viscosity = params.viscosity
options.quadratic_drag_coefficient = params.drag
options.use_lax_friedrichs_velocity = True
options.lax_friedrichs_velocity_scaling_factor = Constant(1.0)
# Passive tracer
options.add_tracer_2d('tracer_2d', 'Depth averaged tracer', 'Tracer2d',
diffusivity=params.diffusivity, source=source)
options.horizontal_velocity_scale = Constant(1.0)
options.horizontal_diffusivity_scale = Constant(0.0)
options.tracer_only = not hydrodynamics
options.use_supg_tracer = tracer_element_family == 'cg'
options.use_lax_friedrichs_tracer = tracer_element_family == 'dg'
options.lax_friedrichs_tracer_scaling_factor = Constant(1.0)
options.use_limiter_for_tracers = tracer_element_family == 'dg'
options.update(model_options)
# Initial and boundary conditions
solver_obj.bnd_functions = params.boundary_conditions
uv_init = Constant(as_vector([1.0e-08, 0.0])) if hydrodynamics else params.uv
solver_obj.assign_initial_conditions(tracer=source, uv=uv_init, elev=params.elev)
# Solve
solver_obj.iterate()
c_2d = solver_obj.fields.tracer_2d
if not solve_adjoint:
return c_2d
# Solve adjoint problem
J = params.quantity_of_interest_form(c_2d)
F = solver_obj.timestepper.timesteppers["tracer_2d"].F
Q_2d = solver_obj.function_spaces.Q_2d
adj_sol = Function(Q_2d)
dFdc = derivative(F, c_2d, TrialFunction(Q_2d))
dFdc_transpose = adjoint(dFdc)
dJdc = derivative(J, c_2d, TestFunction(Q_2d))
solve(dFdc_transpose == dJdc, adj_sol)
return solver_obj, adj_sol
def run_convergence(offset, num_levels=3, plot=False, **kwargs):
"""
Assess convergence of the quantity of interest with increasing DoF count.
:arg offset: toggle between aligned and offset source/receiver.
:kwarg num_levels: number of uniform refinements to consider.
:kwarg plot: toggle plotting of convergence curves.
:kwargs: other kwargs are passed to `solve_tracer`.
"""
J = []
dof_count = []
tracer_element_family = kwargs.get('tracer_element_family')
params = PointDischargeParameters(offset, tracer_element_family)
# Run model on a sequence of uniform meshes and compute QoI error
for n in range(num_levels):
mesh2d = RectangleMesh(100*2**n, 20*2**n, 50, 10)
sol = solve_tracer(mesh2d, offset, **kwargs)
J.append(params.quantity_of_interest(sol))
dof_count.append(sol.function_space().dof_count)
J_analytical = params.analytical_quantity_of_interest(sol.function_space().mesh())
relative_error = numpy.abs((numpy.array(J) - J_analytical)/J_analytical)
# Plot convergence curves
if plot:
import matplotlib.pyplot as plt
fig, axes = plt.subplots()
axes.loglog(dof_count, relative_error, '--x')
axes.set_xlabel("DoF count")
axes.set_ylabel("QoI error")
axes.grid(True)
alignment = 'offset' if offset else 'aligned'
fname = f"steady_state_convergence_{alignment}_{tracer_element_family}.png"
plot_dir = create_directory(os.path.join(os.path.dirname(__file__), 'outputs'))
plt.savefig(os.path.join(plot_dir, fname))
# Check for linear convergence
delta_y = numpy.log10(relative_error[-1]) - numpy.log10(relative_error[0])
delta_x = numpy.log10(dof_count[-1]) - numpy.log10(dof_count[0])
rate = abs(delta_y/delta_x)
assert rate > 0.9, f"Sublinear convergence rate {rate:.4f}"
def estimate_error(mesh, offset, **model_options):
model_options["solve_adjoint"] = True
# Create a two level mesh hierarchy
mesh0, mesh1 = MeshHierarchy(mesh, 1)
tm = TransferManager()
# Solve both forward and adjoint on both meshes
solver_obj, a0 = solve_tracer(mesh0, offset, **model_options)
f0 = solver_obj.fields.tracer_2d
P0 = solver_obj.function_spaces.P0_2d
solver_obj, a1 = solve_tracer(mesh1, offset, **model_options)
# Approximate adjoint error
Q1 = solver_obj.function_spaces.Q_2d
a0plg = Function(Q1)
tm.prolong(a0, a0plg)
a1err = Function(Q1).assign(a1 - a0plg)
# Compute dual weighted residual
ei = diagnostics.TracerDualWeightedResidual2D(solver_obj, a1err)
ei.solve()
# Project down to base space
error = Function(P0, name="Error indicator")
error.project(ei.error)
error.interpolate(abs(error))
# Plot
if not model_options.get("no_exports", False):
File("outputs/forward.pvd").write(f0)
a0.rename("Adjoint solution")
File("outputs/adjoint.pvd").write(a0)
File("outputs/error.pvd").write(error)
return f0, a0, error
# ---------------------------
# standard tests for pytest
# ---------------------------
@pytest.fixture(params=['dg', 'cg'])
def family(request):
return request.param
@pytest.fixture(params=[False, True], ids=["aligned", "offset"])
def offset(request):
return request.param
def test_hydrodynamics(offset, family):
"""
Test that we can solve the coupled system
on a coarse mesh.
"""
mesh2d = RectangleMesh(100, 20, 50, 10)
solve_tracer(mesh2d, offset, tracer_element_family=family,
no_exports=True)
def test_convergence(offset, family):
"""
Test that the quantity of interest converges
linearly with uniform mesh refinement.
"""
run_convergence(offset, tracer_element_family=family,
no_exports=True)
def test_dwr(offset, family):
"""
Test that we can successfully compute dual
weighted residual contributions.
"""
mesh2d = RectangleMesh(100, 20, 50, 10)
estimate_error(mesh2d, offset, tracer_element_family=family,
no_exports=True)
# ---------------------------
# run individual setup for debugging
# ---------------------------
if __name__ == "__main__":
| |
# Copyright (C) 2013 Ion Torrent Systems, Inc. All Rights Reserved
from iondb.rundb import models
from django.utils.translation import ugettext_lazy
import datetime
from django.utils import timezone
import logging
from django.db import transaction
from iondb.rundb import models
from iondb.rundb.models import (
Sample,
SampleSet,
SampleSetItem,
SampleAttribute,
SampleGroupType_CV,
SampleAttributeDataType,
SampleAttributeValue,
)
from django.conf import settings
from django.contrib.auth.models import User
import sample_validator
from iondb.utils import validation
from distutils.version import StrictVersion
from traceback import format_exc
from datetime import datetime
from django.shortcuts import get_object_or_404
logger = logging.getLogger(__name__)
COLUMN_SAMPLE_CSV_VERSION = "CSV Version (required)"
COLUMN_SAMPLE_EXT_ID = "Sample ID"
COLUMN_SAMPLE_NAME = "Sample Name (required)"
COLUMN_GENDER = "Gender"
COLUMN_GROUP_TYPE = "Type"
COLUMN_GROUP = "Group"
COLUMN_SAMPLE_DESCRIPTION = "Description"
COLUMN_BARCODE_KIT = "Barcodekit"
COLUMN_BARCODE = "Barcode"
COLUMN_CANCER_TYPE = "Cancer Type"
COLUMN_CELLULARITY_PCT = "Cellularity %"
COLUMN_NUCLEOTIDE_TYPE = "Nucleotide Type"
ALTERNATE_COLUMN_NUCLEOTIDE_TYPE = "DNA/RNA/Fusions"
COLUMN_PCR_PLATE_POSITION = "PCR Plate Position"
COLUMN_BIOPSY_DAYS = "Biopsy Days"
COLUMN_CELL_NUM = "Cell Number"
COLUMN_COUPLE_ID = "Couple ID"
COLUMN_EMBRYO_ID = "Embryo ID"
COLUMN_CONTROLTYPE = "Control Type"
COLUMN_SAMPLE_SOURCE = "Sample Source"
COLUMN_PANEL_POOL_TYPE = "Panel Pool Type"
COLUMN_SAMPLE_COLLECTION_DATE = "Sample Collection Date"
COLUMN_SAMPLE_RECEIPT_DATE = "Sample Receipt Date"
COLUMN_SAMPLE_POPULATION = "Population"
COLUMN_SAMPLE_MOUSE_STRAINS = "Mouse Strains"
def process_csv_sampleSet(csvSampleDict, request, user, sampleSet_ids):
""" read csv contents and convert data to raw data to prepare for sample persistence
returns: a collection of error messages if errors found, a dictionary of raw data values
"""
logger.debug(
"ENTER import_sample_processor.process_csv_sampleSet() csvSampleDict=%s; "
% (csvSampleDict)
)
failed = []
isToSkipRow = False
# check if mandatory fields are present
requiredColumns = ["sampleName", "sample"]
isRequired = [
csvSampleDict.get(requiredColumn, None) for requiredColumn in requiredColumns
]
if not any(isRequired):
failed.append(
(COLUMN_SAMPLE_NAME, validation.required_error(COLUMN_SAMPLE_NAME))
)
sample, sampleSetItem, ssi_sid = _create_sampleSetItem(
csvSampleDict, request, user, sampleSet_ids
)
siv_sid = _create_sampleAttributeValue(csvSampleDict, request, user, sample)
return failed, sample, sampleSetItem, isToSkipRow, ssi_sid, siv_sid
def get_sampleSetItem_kwargs(csvSampleDict, user):
sampleDisplayedName = csvSampleDict.get(COLUMN_SAMPLE_NAME, "").strip()
sampleExtId = csvSampleDict.get(COLUMN_SAMPLE_EXT_ID, "").strip()
sampleGender = csvSampleDict.get(COLUMN_GENDER, "").strip()
sampleControlType = csvSampleDict.get(COLUMN_CONTROLTYPE, "").strip()
sampleGroupType = csvSampleDict.get(COLUMN_GROUP_TYPE, None)
sampleGroup = csvSampleDict.get(COLUMN_GROUP, "0").strip()
if not sampleGroup:
sampleGroup = "0"
sampleDescription = csvSampleDict.get(COLUMN_SAMPLE_DESCRIPTION, "").strip()
barcodeKit = csvSampleDict.get(COLUMN_BARCODE_KIT, "").strip()
barcodeAssignment = csvSampleDict.get(COLUMN_BARCODE, "").strip()
pcrPlateRow = csvSampleDict.get(COLUMN_PCR_PLATE_POSITION, "").strip()
nucleotideType = (
csvSampleDict.get(COLUMN_NUCLEOTIDE_TYPE, "").strip()
or csvSampleDict.get(ALTERNATE_COLUMN_NUCLEOTIDE_TYPE, "").strip()
)
sampleSource = csvSampleDict.get(COLUMN_SAMPLE_SOURCE, "").strip()
panelPoolType = csvSampleDict.get(COLUMN_PANEL_POOL_TYPE, "").strip()
sampleCollectionDate = csvSampleDict.get(COLUMN_SAMPLE_COLLECTION_DATE, "").strip()
sampleReceiptDate = csvSampleDict.get(COLUMN_SAMPLE_RECEIPT_DATE, "").strip()
if sampleCollectionDate:
sampleCollectionDate = datetime.strptime(
str(sampleCollectionDate), "%Y-%m-%d"
).date()
if sampleReceiptDate:
sampleReceiptDate = datetime.strptime(str(sampleReceiptDate), "%Y-%m-%d").date()
cancerType = csvSampleDict.get(COLUMN_CANCER_TYPE, "").strip()
cellularityPct = csvSampleDict.get(COLUMN_CELLULARITY_PCT, None)
if cellularityPct:
cellularityPct = cellularityPct.strip()
if not cellularityPct:
cellularityPct = None
biopsyDays = csvSampleDict.get(COLUMN_BIOPSY_DAYS, "0").strip()
cellNum = csvSampleDict.get(COLUMN_CELL_NUM, "").strip()
coupleId = csvSampleDict.get(COLUMN_COUPLE_ID, None)
if coupleId:
coupleId = coupleId.strip()
embryoId = csvSampleDict.get(COLUMN_EMBRYO_ID, "").strip()
population = csvSampleDict.get(COLUMN_SAMPLE_POPULATION, "").strip()
mouseStrains = csvSampleDict.get(COLUMN_SAMPLE_MOUSE_STRAINS, "").strip()
currentDateTime = timezone.now() ##datetime.datetime.now()
sampleName = sampleDisplayedName.replace(" ", "_")
isValid, errorMessage, nucleotideType_internal_value = sample_validator.validate_nucleotideType(
nucleotideType, field_label=COLUMN_NUCLEOTIDE_TYPE
)
# validation has been done already, this is just to get the official value
isValid, errorMessage, gender_CV_value = sample_validator.validate_sampleGender(
sampleGender, field_label=COLUMN_GENDER
)
isValid, errorMessage, role_CV_value = sample_validator.validate_sampleGroupType(
sampleGroupType, field_label=COLUMN_GROUP_TYPE
)
isValid, errorMessage, controlType_CV_value = sample_validator.validate_controlType(
sampleControlType, field_label=COLUMN_CONTROLTYPE
)
isValid, errorMessage, cancerType_CV_value = sample_validator.validate_cancerType(
cancerType, field_label=COLUMN_CANCER_TYPE
)
isValid, errorMessage, pcrPlateRow_internal_value = sample_validator.validate_pcrPlateRow(pcrPlateRow, field_label=COLUMN_PCR_PLATE_POSITION)
sampleSetItem_kwargs = {
"sampleName": sampleName,
"sampleDisplayedName": sampleDisplayedName,
"sampleExtId": sampleExtId,
"barcodeKit": barcodeKit,
"barcodeAssignment": barcodeAssignment,
"gender": gender_CV_value,
"relationshipRole": role_CV_value,
"relationshipGroup": sampleGroup,
"cancerType": cancerType_CV_value,
"pcrPlateRow": pcrPlateRow_internal_value,
"nucleotideType": nucleotideType_internal_value,
"sampleSource": sampleSource,
"panelPoolType": panelPoolType,
"cellularityPct": cellularityPct if cellularityPct else None,
"biopsyDays": int(biopsyDays) if biopsyDays else 0,
"cellNum": cellNum,
"coupleId": coupleId,
"embryoId": embryoId,
"creator": user,
"creationDate": currentDateTime,
"lastModifiedUser": user,
"lastModifiedDate": currentDateTime,
"description": sampleDescription,
"controlType": controlType_CV_value,
"displayedName": sampleDisplayedName,
"sampleStatus": "created",
"sampleDescription": sampleDescription,
"sampleCollectionDate": sampleCollectionDate or None,
"sampleReceiptDate": sampleReceiptDate or None,
"population": population,
"mouseStrains": mouseStrains,
"date": timezone.now(),
}
return sampleSetItem_kwargs
def _create_sampleSetItem(csvSampleDict, request, user, sampleSet_id):
sampleDisplayedName = csvSampleDict.get("sampleDisplayedName", "")
sampleExtId = csvSampleDict.get("sampleExtId", "")
sampleGender = csvSampleDict.get("gender", "")
sampleControlType = csvSampleDict.get("controlType", "")
relationshipRole = csvSampleDict.get("relationshipRole", "")
sampleGroup = csvSampleDict.get("relationshipGroup", "") or 0
sampleDescription = csvSampleDict.get("sampleDescription", "")
barcodeKit = csvSampleDict.get("barcodeKit", "")
barcodeAssignment = csvSampleDict.get("barcodeAssignment", "")
nucleotideType = csvSampleDict.get("nucleotideType", "")
cancerType = csvSampleDict.get("cancerType", "")
cellularityPct = csvSampleDict.get("cellularityPct", None)
pcrPlateRow = csvSampleDict.get("pcrPlateRow", "")
biopsyDays = csvSampleDict.get("coupleId", "0")
cellNum = csvSampleDict.get("cellNum", "")
coupleId = csvSampleDict.get("coupleId", None)
embryoId = csvSampleDict.get("embryoId", "")
assayGroup = csvSampleDict.get("assayGroup", "")
sampleSource = csvSampleDict.get("sampleSource", "")
panelPoolType = csvSampleDict.get("panelPoolType", "")
tubePosition = csvSampleDict.get("tubePosition", "")
population = csvSampleDict.get("population", "")
mouseStrains = csvSampleDict.get("mouseStrains", "")
sampleCollectionDate = csvSampleDict.get("sampleCollectionDate", "")
sampleReceiptDate = csvSampleDict.get("sampleReceiptDate", "")
dnabarcodeID = csvSampleDict.get("dnabarcode", "")
sampleName = sampleDisplayedName.replace(" ", "_")
samplePK = csvSampleDict.get("sample", "")
sample_kwargs = {
"displayedName": sampleDisplayedName,
"status": "created",
"description": sampleDescription,
"date": timezone.now(),
}
isCreated = None
if samplePK:
sample = Sample.objects.get(pk=samplePK)
else:
sample, isCreated = Sample.objects.get_or_create(
name=sampleName, externalId=sampleExtId, defaults=sample_kwargs
)
if isCreated:
logger.debug(
"import_sample_processor._create_sampleSetItem() new sample created for sample=%s; id=%d"
% (sampleDisplayedName, sample.id)
)
else:
if sample.description != sampleDescription:
sample.description = sampleDescription
sample.save()
logger.debug(
"import_sample_processor._create_sampleSetItem() just updated sample description for sample=%s; id=%d"
% (sampleDisplayedName, sample.id)
)
logger.debug(
"import_sample_processor._create_sampleSetItem() going to create sampleSetItem for sample=%s; sampleSetId=%s in sampleSet_ids=%s"
% (sampleDisplayedName, str(sampleSet_id), sampleSet_id)
)
currentDateTime = timezone.now()
dnabarcode = None
if barcodeKit and barcodeAssignment:
dnabarcode = models.dnaBarcode.objects.get(
name__iexact=barcodeKit, id_str__iexact=barcodeAssignment
)
elif dnabarcodeID:
dnabarcode = models.dnaBarcode.objects.get(id=dnabarcodeID)
pcrPlateColumn = "1" if pcrPlateRow else ""
sampleSetItem_kwargs = {
"gender": sampleGender,
"relationshipRole": relationshipRole,
"relationshipGroup": sampleGroup,
"cancerType": cancerType,
"cellularityPct": cellularityPct if cellularityPct else None,
"biopsyDays": int(biopsyDays) if biopsyDays else 0,
"cellNum": cellNum,
"coupleId": coupleId,
"embryoId": embryoId,
"creator": user,
"creationDate": currentDateTime,
"lastModifiedUser": user,
"lastModifiedDate": currentDateTime,
"description": sampleDescription,
"controlType": sampleControlType,
"assayGroup": assayGroup,
"sampleSource": sampleSource,
"panelPoolType": panelPoolType,
"tubePosition": tubePosition,
"population": population,
"mouseStrains": mouseStrains,
"sampleCollectionDate": sampleCollectionDate,
"sampleReceiptDate": sampleReceiptDate,
}
item_id = csvSampleDict.get("id", "")
if item_id:
sampleSetItem = get_object_or_404(SampleSetItem, pk=item_id)
sampleSetItem_kwargs["pcrPlateRow"] = pcrPlateRow
sampleSetItem_kwargs["dnabarcode"] = dnabarcode
for field, value in sampleSetItem_kwargs.items():
setattr(sampleSetItem, field, value)
logger.debug(
"import_sample_processor._create_sampleSetItem() sampleSetItem_kwargs=%s"
% (sampleSetItem_kwargs)
)
sampleSetItem.save()
else:
sampleSetItem, isCreated = SampleSetItem.objects.get_or_create(
sample=sample,
sampleSet_id=sampleSet_id,
description=sampleDescription,
nucleotideType=nucleotideType,
dnabarcode=dnabarcode,
sampleSource=sampleSource,
panelPoolType=panelPoolType,
pcrPlateColumn=pcrPlateColumn,
pcrPlateRow=pcrPlateRow,
defaults=sampleSetItem_kwargs,
)
logger.debug(
"import_sample_processor._create_sampleSetItem() after get_or_create isCreated=%s; sampleSetItem=%s; samplesetItem.id=%d"
% (str(isCreated), sampleDisplayedName, sampleSetItem.id)
)
ssi_sid = transaction.savepoint()
return sample, sampleSetItem, ssi_sid
def _create_sampleAttributeValue(csvSampleDict, request, user, sample):
"""
save sample customer attribute value to db.
"""
customAttributes = SampleAttribute.objects.filter(isActive=True)
currentDateTime = timezone.now() ##datetime.datetime.now()
for attribute in customAttributes:
newValue = None
if attribute.displayedName not in list(csvSampleDict.keys()):
# add mandatory custom attributes for an imported sample if user has not added it
if attribute.isMandatory:
if attribute.dataType and attribute.dataType.dataType == "Integer":
newValue = "0"
else:
newValue = ""
else:
newValue = csvSampleDict.get(attribute.displayedName, "")
if newValue is None:
logger.debug(
"import_sample_processor._create_sampleAttributeValue SKIPPING due to NO VALUE for attribute=%s;"
% (attribute.displayedName)
)
else:
logger.debug(
"import_sample_processor._create_sampleAttributeValue going to get_or_create sample=%s; attribute=%s; value=%s"
% (sample.displayedName, attribute.displayedName, newValue)
)
sampleAttributeValues = SampleAttributeValue.objects.filter(
sample=sample, sampleAttribute=attribute
)
if sampleAttributeValues:
sampleAttributeValue = sampleAttributeValues[0]
# logger.debug("import_sample_processor._create_sampleAttributeValue ORIGINAL VALUE pk=%s; sample=%s; attribute=%s; orig value=%s" %(sampleAttributeValue.id, sample.displayedName, attribute.displayedName, sampleAttributeValue.value))
# there should only be 1 attribute value for each sample/attribute pair if the old entry has value but the new import doesn't, do not override it.
if newValue:
sampleAttributeValue_kwargs = {
"value": newValue,
"lastModifiedUser": user,
"lastModifiedDate": currentDateTime,
}
for field, value in sampleAttributeValue_kwargs.items():
setattr(sampleAttributeValue, field, value)
sampleAttributeValue.save()
# logger.debug("import_sample_processor._create_sampleAttributeValue UPDATED pk=%s; sample=%s; attribute=%s; newValue=%s" %(sampleAttributeValue.id, sample.displayedName, attribute.displayedName, newValue))
else:
# logger.debug("import_sample_processor._create_sampleAttributeValue going to DELETE pk=%s; sample=%s; attribute=%s; newValue=%s" %(sampleAttributeValue.id, sample.displayedName, attribute.displayedName, newValue))
sampleAttributeValue.delete()
else:
# create a record only there is a value
if newValue:
sampleAttributeValue_kwargs = {
"sample": sample,
"sampleAttribute": attribute,
"value": newValue,
"creator": user,
"creationDate": currentDateTime,
"lastModifiedUser": user,
"lastModifiedDate": currentDateTime,
}
sampleAttributeValue = SampleAttributeValue(
**sampleAttributeValue_kwargs
)
sampleAttributeValue.save()
logger.debug(
"import_sample_processor._create_sampleAttributeValue CREATED sampleAttributeValue.pk=%d; sample=%s; attribute=%s; newValue=%s"
% (
sampleAttributeValue.pk,
sample.displayedName,
attribute.displayedName,
newValue,
)
)
siv_sid = transaction.savepoint()
return siv_sid
def validate_csv_sample(csvSampleDict, request):
"""
validate csv contents and convert user input to raw data to prepare for sample persistence
returns: a collection of error messages if errors found and whether to skip the row
"""
failed = []
isToSkipRow = False
isToAbort = False
logger.debug(
"ENTER import_sample_processor.validate_csv_sample() csvSampleDict=%s; "
% (csvSampleDict)
)
try:
sampleDisplayedName = csvSampleDict.get(COLUMN_SAMPLE_NAME, "").strip()
sampleExtId = csvSampleDict.get(COLUMN_SAMPLE_EXT_ID, "").strip()
sampleControlType = csvSampleDict.get(COLUMN_CONTROLTYPE, "").strip()
sampleGender = csvSampleDict.get(COLUMN_GENDER, "").strip()
sampleGroupType = csvSampleDict.get(COLUMN_GROUP_TYPE, "").strip()
sampleGroup = csvSampleDict.get(COLUMN_GROUP, "").strip()
if not sampleGroup:
sampleGroup = None
sampleDescription = csvSampleDict.get(COLUMN_SAMPLE_DESCRIPTION, "").strip()
barcodeKit = csvSampleDict.get(COLUMN_BARCODE_KIT, "")
barcodeAssignment = csvSampleDict.get(COLUMN_BARCODE, "")
nucleotideType = csvSampleDict.get(COLUMN_NUCLEOTIDE_TYPE, "").strip()
cancerType = csvSampleDict.get(COLUMN_CANCER_TYPE, "").strip()
cellularityPct = csvSampleDict.get(COLUMN_CELLULARITY_PCT, None).strip()
if not cellularityPct:
cellularityPct = None
pcrPlateRow = csvSampleDict.get(COLUMN_PCR_PLATE_POSITION, "").strip()
biopsyDays = csvSampleDict.get(COLUMN_BIOPSY_DAYS, "0").strip()
cellNum = csvSampleDict.get(COLUMN_CELL_NUM, "").strip()
coupleId = csvSampleDict.get(COLUMN_COUPLE_ID, "").strip()
embryoId = csvSampleDict.get(COLUMN_EMBRYO_ID, "").strip()
sampleSource = csvSampleDict.get(COLUMN_SAMPLE_SOURCE, "").strip()
panelPoolType = csvSampleDict.get(COLUMN_PANEL_POOL_TYPE, "").strip()
sampleCollectionDate = csvSampleDict.get(
COLUMN_SAMPLE_COLLECTION_DATE, ""
).strip()
sampleReceiptDate = csvSampleDict.get(COLUMN_SAMPLE_RECEIPT_DATE, "").strip()
population = csvSampleDict.get(COLUMN_SAMPLE_POPULATION, "").strip()
mouseStrains = | |
len(row) >= row_limit:
keyboard.append(row)
row = []
if len(row) > 0:
keyboard.append(row)
# If there are no events
if len(keyboard) == 0:
bot.edit_message_text("You have no events. Use /create to create a new one for your section.",user_id,message_id)
temp_modify.del_temp_modify_event(user_id)
return
bot.edit_message_text('You have chosen section '+ section +'.\n\nPick an event that you want to mark as complete. Students will no longer be able to check their attendance for this event.',user_id,message_id)
markup = types.InlineKeyboardMarkup(keyboard)
bot.edit_message_reply_markup(user_id,message_id,reply_markup=markup)
except Exception as e:
bot.send_message(query.from_user.id,"An error occurred: " + str(e) + ". Please contact your instructor or notify the developer.")
temp_modify.del_temp_modify_event(user_id)
@bot.callback_query_handler(lambda query: query.data.split(":")[0] == "confirmComplete")
def confirmComplete(query):
user_id = query.from_user.id
message_id = query.message.id
temp_modify = getTempModifyEvent(user_id)
new_markup = types.InlineKeyboardMarkup([])
bot.edit_message_reply_markup(user_id,message_id,reply_markup=new_markup)
try:
event_id = query.data.split(":")[1]
event_name = Events.query.filter_by(event_id=event_id).first().event_name
temp_modify.setEventName(event_name)
bot.edit_message_text('You have chosen section '+ temp_modify.getSection() +'.\n\nYou have chosen to mark the following event as complete: '+ temp_modify.getEventName() +' \n\nConfirm? Be reminded that your students will no longer be able to check their attendance for this event.',user_id,message_id)
keyboard = [[types.InlineKeyboardButton("Yes",callback_data='completeEvent:yes'),types.InlineKeyboardButton("No",callback_data='completeEvent:no')]]
markup = types.InlineKeyboardMarkup(keyboard)
bot.edit_message_reply_markup(user_id,message_id,reply_markup=markup)
except Exception as e:
bot.send_message(query.from_user.id,"An error occurred: " + str(e) + ". Please contact your instructor or notify the developer.")
temp_modify.del_temp_modify_event(user_id)
@bot.callback_query_handler(lambda query: query.data.split(":")[0] == "completeEvent")
def completeEvent(query):
user_id = query.from_user.id
message_id = query.message.id
temp_modify = getTempModifyEvent(user_id)
new_markup = types.InlineKeyboardMarkup([])
bot.edit_message_reply_markup(user_id,message_id,reply_markup=new_markup)
try:
response = query.data.split(":")[1]
if response == "no":
# cancel event completion
bot.edit_message_text('Event Completion has been cancelled.',user_id,message_id)
temp_modify.del_temp_modify_event(user_id)
else:
# Proceed with event completion
event = Events.query.filter_by(section=temp_modify.getSection(),event_name=temp_modify.getEventName()).first()
event.completed = 1
db.session.commit()
bot.edit_message_text('The event, ' + temp_modify.getEventName() + ' for section ' + temp_modify.getSection() + ' has been marked as complete. Students can no longer check in their attendance for that event.',user_id,message_id)
temp_modify.del_temp_modify_event(user_id)
except Exception as e:
bot.send_message(query.from_user.id,"An error occurred: " + str(e) + ". Please contact your instructor or notify the developer.")
temp_modify.del_temp_modify_event(user_id)
# Admin command to remove an event from their section.#############################################################
@bot.message_handler(commands=["delete"])
def pickSection3(message):
try:
ongoing_action = doing_current_command(message.chat.id)
if not ongoing_action:
return
admin_check = isAdmin(message.chat.id)
if not admin_check:
return
section_list = retrieveSections(message.chat.id)
markup = getSectionsMarkup(3,section_list,3)
new_temp_modify = Temp_EventModify()
new_temp_modify.add_temp_modify_event(message.chat.id,'delete')
bot.send_message(message.chat.id,'Please pick the section that you want to delete an event for.',reply_markup=markup)
except Exception as e:
bot.send_message(message.chat.id,"An error occurred: " + str(e) + ". Please contact your instructor or notify the developer.")
new_temp_modify.del_temp_modify_event(message.chat.id)
@bot.callback_query_handler(lambda query: query.data.split(":")[0] == "pickSection3")
def pickEvent2(query):
try:
section = query.data.split(":")[1]
user_id = query.from_user.id
message_id = query.message.id
temp_modify = getTempModifyEvent(user_id)
temp_modify.setSection(section)
temp_modify.setMessageId(message_id)
new_markup = types.InlineKeyboardMarkup([])
bot.edit_message_reply_markup(user_id,message_id,reply_markup=new_markup)
# Generate existing incomplete events of the section
events = Events.query.filter_by(section=section)
row_limit = 4 # MODIFY IF REQUIRED
keyboard = []
row = []
for event in events:
row.append(types.InlineKeyboardButton(event.event_name,callback_data='confirmDelete:'+ str(event.event_id)))
if len(row) >= row_limit:
keyboard.append(row)
row = []
if len(row) > 0:
keyboard.append(row)
# If there are no events
if len(keyboard) == 0:
bot.edit_message_text("You have no events. Use /create to create a new one for your section.",user_id,message_id)
temp_modify.del_temp_modify_event(user_id)
return
bot.edit_message_text('You have chosen section '+ section +'.\n\nPick an event that you want to delete. This will erase all student attendance records for that particular event.',user_id,message_id)
markup = types.InlineKeyboardMarkup(keyboard)
bot.edit_message_reply_markup(user_id,message_id,reply_markup=markup)
except Exception as e:
bot.send_message(query.from_user.id,"An error occurred: " + str(e) + ". Please contact your instructor or notify the developer.")
temp_modify.del_temp_modify_event(user_id)
@bot.callback_query_handler(lambda query: query.data.split(":")[0] == "confirmDelete")
def confirmDelete(query):
user_id = query.from_user.id
message_id = query.message.id
temp_modify = getTempModifyEvent(user_id)
new_markup = types.InlineKeyboardMarkup([])
bot.edit_message_reply_markup(user_id,message_id,reply_markup=new_markup)
try:
event_id = query.data.split(":")[1]
event_name = Events.query.filter_by(event_id=event_id).first().event_name
temp_modify.setEventName(event_name)
bot.edit_message_text('You have chosen section '+ temp_modify.getSection() +'.\n\nYou have chosen to delete the following event: '+ temp_modify.getEventName() +' \n\nConfirm? Be reminded that this will delete all attendance records for this event.',user_id,message_id)
keyboard = [[types.InlineKeyboardButton("Yes",callback_data='deleteEvent:yes'),types.InlineKeyboardButton("No",callback_data='deleteEvent:no')]]
markup = types.InlineKeyboardMarkup(keyboard)
bot.edit_message_reply_markup(user_id,message_id,reply_markup=markup)
except Exception as e:
bot.send_message(query.from_user.id,"An error occurred: " + str(e) + ". Please contact your instructor or notify the developer.")
temp_modify.del_temp_modify_event(user_id)
@bot.callback_query_handler(lambda query: query.data.split(":")[0] == "deleteEvent")
def deleteEvent(query):
user_id = query.from_user.id
message_id = query.message.id
temp_modify = getTempModifyEvent(user_id)
new_markup = types.InlineKeyboardMarkup([])
bot.edit_message_reply_markup(user_id,message_id,reply_markup=new_markup)
try:
response = query.data.split(":")[1]
if response == "no":
# cancel event deletion
bot.edit_message_text('Event Deletion has been cancelled.',user_id,message_id)
temp_modify.del_temp_modify_event(user_id)
else:
# Proceed with event deletion
event = Events.query.filter_by(section=temp_modify.getSection(),event_name=temp_modify.getEventName()).first()
attendances = Attendance.query.filter_by(event_id=event.event_id)
late_attendances = Late_Attendance.query.filter_by(event_id=event.event_id)
to_delete_list = []
for attendance in attendances:
to_delete_list.append(attendance)
for late_attendance in late_attendances:
to_delete_list.append(late_attendance)
for e in to_delete_list:
db.session.delete(e)
db.session.delete(event)
db.session.commit()
bot.edit_message_text('The event, ' + temp_modify.getEventName() + ' for section ' + temp_modify.getSection() + ' and all its attendance records have been deleted successfully.',user_id,message_id)
temp_modify.del_temp_modify_event(user_id)
except Exception as e:
bot.send_message(query.from_user.id,"An error occurred: " + str(e) + ". Please contact your instructor or notify the developer.")
temp_modify.del_temp_modify_event(user_id)
# Admin command to delete a user from the section they have admin in. #####################################################################
@bot.message_handler(commands=['delete_student'])
def pickSection4(message):
try:
ongoing_action = doing_current_command(message.chat.id)
if not ongoing_action:
return
admin_check = isAdmin(message.chat.id)
if not admin_check:
return
section_list = retrieveSections(message.chat.id)
markup = getSectionsMarkup(4,section_list,3)
new_temp_student = Temp_Student()
new_temp_student.add_temp_student(message.chat.id)
bot.send_message(message.chat.id,'Please pick the section that you want to delete an event for.',reply_markup=markup)
except Exception as e:
bot.send_message(message.chat.id,"An error occurred: " + str(e) + ". Please contact your instructor or notify the developer.")
new_temp_student.del_temp_student(message.chat.id)
@bot.callback_query_handler(lambda query: query.data.split(":")[0] == "pickSection4")
def pickStudent(query):
try:
section = query.data.split(":")[1]
user_id = query.from_user.id
message_id = query.message.id
temp_student = getTempStudent(user_id)
temp_student.setSection(section)
new_markup = types.InlineKeyboardMarkup([])
bot.edit_message_reply_markup(user_id,message_id,reply_markup=new_markup)
# Generate existing incomplete events of the section
students = User_Sections.query.filter_by(section=section,role='Student')
row_limit = 4 # MODIFY IF REQUIRED
keyboard = []
row = []
for student in students:
name = Users.query.filter_by(chat_id=student.chat_id).first().name
row.append(types.InlineKeyboardButton(name,callback_data='pickStudent:'+ str(student.chat_id)))
if len(row) >= row_limit:
keyboard.append(row)
row = []
if len(row) > 0:
keyboard.append(row)
bot.edit_message_text('You have chosen section '+ section +'.\n\nPick a student that you want to remove from your section. This will erase all attendance records of the student from this section.',user_id,message_id)
markup = types.InlineKeyboardMarkup(keyboard)
bot.edit_message_reply_markup(user_id,message_id,reply_markup=markup)
except Exception as e:
bot.send_message(query.from_user.id,"An error occurred: " + str(e) + ". Please contact your instructor or notify the developer.")
temp_student.del_temp_student(user_id)
@bot.callback_query_handler(lambda query: query.data.split(":")[0] == "pickStudent")
def confirmDeleteStu(query):
user_id = query.from_user.id
message_id = query.message.id
temp_student = getTempStudent(user_id)
new_markup = types.InlineKeyboardMarkup([])
bot.edit_message_reply_markup(user_id,message_id,reply_markup=new_markup)
try:
chat_id = query.data.split(":")[1]
temp_student.setChatId(chat_id)
name = Users.query.filter_by(chat_id=chat_id).first().name
bot.edit_message_text('You have chosen section '+ temp_student.getSection() +'.\n\nYou have chosen to remove the following student: '+ name +' \n\nConfirm? Be reminded that this will delete all attendance records for that particular user in this section.',user_id,message_id)
keyboard = [[types.InlineKeyboardButton("Yes",callback_data='deleteStudent:yes'),types.InlineKeyboardButton("No",callback_data='deleteStudent:no')]]
markup = types.InlineKeyboardMarkup(keyboard)
bot.edit_message_reply_markup(user_id,message_id,reply_markup=markup)
except Exception as e:
bot.send_message(query.from_user.id,"An error occurred: " + str(e) + ". Please contact your instructor or notify the developer.")
temp_student.del_temp_student(user_id)
@bot.callback_query_handler(lambda query: query.data.split(":")[0] == "deleteStudent")
def deleteStu(query):
try:
response = query.data.split(":")[1]
user_id = query.from_user.id
message_id = query.message.id
temp_student = getTempStudent(user_id)
new_markup = types.InlineKeyboardMarkup([])
bot.edit_message_reply_markup(user_id,message_id,reply_markup=new_markup)
if response == "no":
# Cancel student deletion
bot.edit_message_text('Student Deletion has been cancelled.',user_id,message_id)
temp_student.del_temp_student(user_id)
else:
# Proceed with event deletion
to_delete_list = []
user_section = User_Sections.query.filter_by(chat_id=temp_student.getChatId(),section=temp_student.getSection()).first()
section_events = Events.query.filter_by(section=temp_student.getSection())
for event in section_events:
event_id = event.event_id
attendance = Attendance.query.filter_by(event_id=event_id,chat_id=temp_student.getChatId()).first()
if attendance:
to_delete_list.append(attendance)
else:
late_attendance = Late_Attendance.query.filter_by(event_id=event_id,chat_id=temp_student.getChatId()).first()
if late_attendance:
to_delete_list.append(late_attendance)
for a in to_delete_list:
db.session.delete(a)
db.session.delete(user_section)
db.session.commit()
name = Users.query.filter_by(chat_id=temp_student.getChatId()).first().name
bot.edit_message_text('The student, ' + name + ' has been removed from section ' + temp_student.getSection() + ' and all their attendance records have been deleted successfully.',user_id,message_id)
temp_student.del_temp_student(user_id)
except Exception as e:
bot.send_message(query.from_user.id,"An error occurred: " + str(e) + ". Please contact your instructor or notify the developer.")
temp_student.del_temp_student(user_id)
# ADMIN COMMAND TO VIEW ATTENDANCE ###############################################################
@bot.message_handler(commands=["view_attendance"])
def pickSection5(message):
try:
ongoing_action = doing_current_command(message.chat.id)
if not ongoing_action:
return
admin_check = isAdmin(message.chat.id)
if not admin_check:
return
section_list = retrieveSections(message.chat.id)
markup = getSectionsMarkup(5,section_list,3)
new_view_attendance = View_Attendance()
new_view_attendance.add_view_attendance(message.chat.id)
bot.send_message(message.chat.id,'Please pick the section that you want to view attendance for.',reply_markup=markup)
except Exception as e:
bot.send_message(message.chat.id,"An error occurred: " + str(e) + ". Please contact your instructor or notify the developer.")
new_view_attendance.del_view_attendance(message.chat.id)
@bot.callback_query_handler(lambda query: query.data.split(":")[0] == "pickSection5")
def pickEvent2(query):
try:
section = query.data.split(":")[1]
user_id = query.from_user.id
message_id = query.message.id
new_view_attendance = getViewAttendance(user_id)
new_view_attendance.setSection(section)
new_markup = types.InlineKeyboardMarkup([])
bot.edit_message_reply_markup(user_id,message_id,reply_markup=new_markup)
# Generate existing incomplete events of the section
events = Events.query.filter_by(section=section)
row_limit = 4 # MODIFY IF REQUIRED
keyboard = []
row = []
for event in events:
row.append(types.InlineKeyboardButton(event.event_name,callback_data='view_att:'+ str(event.event_id)))
if len(row) >= row_limit:
keyboard.append(row)
row = []
if len(row) > 0:
keyboard.append(row)
# If there are no events
if len(keyboard) == 0:
bot.edit_message_text("You have no events. Use /create to create a new one for your section.",user_id,message_id)
new_view_attendance.del_view_attendance(user_id)
return
bot.edit_message_text('You have | |
"""Type checker test cases"""
import os
import re
import shutil
import sys
from typing import Dict, List, Optional, Set, Tuple
from mypy import build
from mypy import defaults
from mypy.main import process_options
from mypy.test.config import test_temp_dir
from mypy.test.data import DataDrivenTestCase, DataSuite, has_stable_flags, is_incremental
from mypy.test.helpers import (
assert_string_arrays_equal, normalize_error_messages,
retry_on_error, testcase_pyversion, update_testcase_output,
)
from mypy.options import Options
from mypy import dmypy
from mypy import dmypy_server
# List of files that contain test case descriptions.
if sys.platform != 'win32':
dmypy_files = [
'check-enum.test',
'check-incremental.test',
'check-newtype.test',
'check-dmypy-fine-grained.test',
]
else:
dmypy_files = [] # type: List[str]
# By default we complain about missing files. This is a special module prefix
# for which we allow non-existence. This is used for testing missing files.
NON_EXISTENT_PREFIX = 'nonexistent'
# If this suffix is used together with NON_EXISTENT_PREFIX, the non-existent
# file is a .pyi file. Since the file doesn't exist, we can't automatically
# figure out the extension.
STUB_SUFFIX = '_stub'
class DmypySuite(DataSuite):
files = dmypy_files
base_path = test_temp_dir
optional_out = True
test_name_suffix = '_dmypy'
@classmethod
def filter(cls, testcase: DataDrivenTestCase) -> bool:
return has_stable_flags(testcase) and is_incremental(testcase)
def run_case(self, testcase: DataDrivenTestCase) -> None:
assert has_stable_flags(testcase), "Testcase has varying flags"
assert is_incremental(testcase), "Testcase is not incremental"
# All tests run once with a cold cache, then at least once
# with a warm cache and maybe changed files. Expected output
# is specified separately for each run.
self.clear_cache()
num_steps = max([2] + list(testcase.output2.keys()))
# Check that there are no file changes beyond the last run (they would be ignored).
for dn, dirs, files in os.walk(os.curdir):
for file in files:
m = re.search(r'\.([2-9])$', file)
if m and int(m.group(1)) > num_steps:
raise ValueError(
'Output file {} exists though test case only has {} runs'.format(
file, num_steps))
self.server = None # type: Optional[dmypy_server.Server]
for step in range(1, num_steps + 1):
self.run_case_once(testcase, step)
def clear_cache(self) -> None:
dn = defaults.CACHE_DIR
if os.path.exists(dn):
shutil.rmtree(dn)
def run_case_once(self, testcase: DataDrivenTestCase, incremental_step: int) -> None:
assert incremental_step >= 1
build.find_module_clear_caches()
original_program_text = '\n'.join(testcase.input)
if incremental_step > 1:
# In runs 2+, copy *.[num] files to * files.
for dn, dirs, files in os.walk(os.curdir):
for file in files:
if file.endswith('.' + str(incremental_step)):
full = os.path.join(dn, file)
target = full[:-2]
# Use retries to work around potential flakiness on Windows (AppVeyor).
retry_on_error(lambda: shutil.copy(full, target))
# In some systems, mtime has a resolution of 1 second which can cause
# annoying-to-debug issues when a file has the same size after a
# change. We manually set the mtime to circumvent this.
new_time = os.stat(target).st_mtime + 1
os.utime(target, times=(new_time, new_time))
# Delete files scheduled to be deleted in [delete <path>.num] sections.
for path in testcase.deleted_paths.get(incremental_step, set()):
# Use retries to work around potential flakiness on Windows (AppVeyor).
retry_on_error(lambda: os.remove(path))
module_data = self.parse_module(original_program_text, incremental_step)
if incremental_step == 1:
# In run 1, copy program text to program file.
for module_name, program_path, program_text in module_data:
if module_name == '__main__' and program_text is not None:
with open(program_path, 'w') as f:
f.write(program_text)
break
# Parse options after moving files (in case mypy.ini is being moved).
options = self.parse_options(original_program_text, testcase, incremental_step)
if incremental_step == 1:
server_options = [] # type: List[str]
if 'fine-grained' in testcase.file:
server_options.append('--experimental')
options.fine_grained_incremental = True
options.local_partial_types = True
self.server = dmypy_server.Server(server_options) # TODO: Fix ugly API
self.server.options = options
assert self.server is not None # Set in step 1 and survives into next steps
sources = []
for module_name, program_path, program_text in module_data:
# Always set to none so we're forced to reread the module in incremental mode
sources.append(build.BuildSource(program_path, module_name, None))
response = self.server.check(sources, alt_lib_path=test_temp_dir)
a = (response['out'] or response['err']).splitlines()
a = normalize_error_messages(a)
# Make sure error messages match
if incremental_step == 1:
msg = 'Unexpected type checker output in incremental, run 1 ({}, line {})'
output = testcase.output
elif incremental_step > 1:
msg = ('Unexpected type checker output in incremental, run {}'.format(
incremental_step) + ' ({}, line {})')
output = testcase.output2.get(incremental_step, [])
else:
raise AssertionError()
if output != a and self.update_data:
update_testcase_output(testcase, a)
assert_string_arrays_equal(output, a, msg.format(testcase.file, testcase.line))
manager = self.server.last_manager
if manager is not None:
if options.follow_imports == 'normal' and testcase.output is None:
self.verify_cache(module_data, a, manager)
if incremental_step > 1:
suffix = '' if incremental_step == 2 else str(incremental_step - 1)
self.check_module_equivalence(
'rechecked' + suffix,
testcase.expected_rechecked_modules.get(incremental_step - 1),
manager.rechecked_modules)
self.check_module_equivalence(
'stale' + suffix,
testcase.expected_stale_modules.get(incremental_step - 1),
manager.stale_modules)
def check_module_equivalence(self, name: str,
expected: Optional[Set[str]], actual: Set[str]) -> None:
if expected is not None:
expected_normalized = sorted(expected)
actual_normalized = sorted(actual.difference({"__main__"}))
assert_string_arrays_equal(
expected_normalized,
actual_normalized,
('Actual modules ({}) do not match expected modules ({}) '
'for "[{} ...]"').format(
', '.join(actual_normalized),
', '.join(expected_normalized),
name))
def verify_cache(self, module_data: List[Tuple[str, str, Optional[str]]], a: List[str],
manager: build.BuildManager) -> None:
# There should be valid cache metadata for each module except
# those in error_paths; for those there should not be.
#
# NOTE: When A imports B and there's an error in B, the cache
# data for B is invalidated, but the cache data for A remains.
# However build.process_graphs() will ignore A's cache data.
#
# Also note that when A imports B, and there's an error in A
# _due to a valid change in B_, the cache data for B will be
# invalidated and updated, but the old cache data for A will
# remain unchanged. As before, build.process_graphs() will
# ignore A's (old) cache data.
error_paths = self.find_error_paths(a)
modules = self.find_module_files()
modules.update({module_name: path for module_name, path, text in module_data})
missing_paths = self.find_missing_cache_files(modules, manager)
if not missing_paths.issubset(error_paths):
raise AssertionError("cache data discrepancy %s != %s" %
(missing_paths, error_paths))
def find_error_paths(self, a: List[str]) -> Set[str]:
hits = set()
for line in a:
m = re.match(r'([^\s:]+):\d+: error:', line)
if m:
# Normalize to Linux paths.
p = m.group(1).replace(os.path.sep, '/')
hits.add(p)
return hits
def find_module_files(self) -> Dict[str, str]:
modules = {}
for dn, dirs, files in os.walk(test_temp_dir):
dnparts = dn.split(os.sep)
assert dnparts[0] == test_temp_dir
del dnparts[0]
for file in files:
if file.endswith('.py'):
if file == "__init__.py":
# If the file path is `a/b/__init__.py`, exclude the file name
# and make sure the module id is just `a.b`, not `a.b.__init__`.
id = '.'.join(dnparts)
else:
base, ext = os.path.splitext(file)
id = '.'.join(dnparts + [base])
modules[id] = os.path.join(dn, file)
return modules
def find_missing_cache_files(self, modules: Dict[str, str],
manager: build.BuildManager) -> Set[str]:
ignore_errors = True
missing = {}
for id, path in modules.items():
meta = build.find_cache_meta(id, path, manager)
if not build.validate_meta(meta, id, path, ignore_errors, manager):
missing[id] = path
return set(missing.values())
def parse_module(self,
program_text: str,
incremental_step: int) -> List[Tuple[str, str, Optional[str]]]:
"""Return the module and program names for a test case.
Normally, the unit tests will parse the default ('__main__')
module and follow all the imports listed there. You can override
this behavior and instruct the tests to check multiple modules
by using a comment like this in the test case input:
# cmd: mypy -m foo.bar foo.baz
You can also use `# cmdN:` to have a different cmd for incremental
step N (2, 3, ...).
Return a list of tuples (module name, file name, program text).
"""
m = re.search('# cmd: mypy -m ([a-zA-Z0-9_. ]+)$', program_text, flags=re.MULTILINE)
regex = '# cmd{}: mypy -m ([a-zA-Z0-9_. ]+)$'.format(incremental_step)
alt_m = re.search(regex, program_text, flags=re.MULTILINE)
if alt_m is not None and incremental_step > 1:
# Optionally return a different command if in a later step
# of incremental mode, otherwise default to reusing the
# original cmd.
m = alt_m
if m:
# The test case wants to use a non-default main
# module. Look up the module and give it as the thing to
# analyze.
module_names = m.group(1)
out = [] # type: List[Tuple[str, str, Optional[str]]]
for module_name in module_names.split(' '):
path = build.find_module(module_name, [test_temp_dir])
if path is None and module_name.startswith(NON_EXISTENT_PREFIX):
# This is a special name for a file that we don't want to exist.
assert '.' not in module_name # TODO: Packages not supported here
if module_name.endswith(STUB_SUFFIX):
fnam = '{}.pyi'.format(module_name)
else:
fnam = '{}.py'.format(module_name)
path = os.path.join(test_temp_dir, fnam)
out.append((module_name, path, None))
else:
assert path is not None, "Can't | |
<filename>tests/examples/testcases.py
#!/usr/bin/env python3
import argparse
import unittest
import itertools
import json
import subprocess
import os
import sys
import shutil
import gzip
import aug_out_filter as afilter
import aug_comparator as comp
# This script executes AUGUSTUS test cases based on the examples
# folder and compares the current results with reference results
# if the option --compare is set. It is expected that both results
# are identical for a successful test.
# This script must be called from "tests/examples_test"!
# Python version 3.6 or higher is required for execution.
parser = argparse.ArgumentParser(description='Execute Augustus test cases.')
parser.add_argument('--mysql',
action='store_true',
help='cgp test cases are also executed with a MySQL database.')
parser.add_argument('--compare',
action='store_true',
help='Compare generated results with reference results.')
parser.add_argument('--html',
action='store_true',
help='Save diff results in html file.')
parser.add_argument('--clean',
action='store_true',
help='Remove all files created during the tests. If this option is set, no tests are executed.')
args = parser.parse_args()
# only import mysql connector if testcases using mysql should be executed
# MySQL Connector must be installed in this case
if args.mysql:
import mysql.connector
resultdir = 'results/'
refdir = 'expected_results/'
htmldir = 'output_html/'
tmpdir = 'data/tmp/'
exampledir = '../../examples/'
bindir = '../../bin/'
augustusbin = f'{bindir}augustus'
datadir = exampledir + 'chr2L/'
default_wd = os.getcwd()
def create_initial_resultdir():
clean(False)
os.mkdir(resultdir)
def clean(withtmpdir=True):
print('Removing generated test files...')
if os.path.exists(htmldir):
shutil.rmtree(htmldir)
if os.path.exists(resultdir):
shutil.rmtree(resultdir)
if withtmpdir and os.path.exists(tmpdir):
shutil.rmtree(tmpdir)
def check_working_dir(clean):
wd = os.getcwd()
if not (wd.endswith('tests/examples')):
errstr = 'Wrong working directory!' + '\n'
errstr += 'This script must be called from "tests/examples"!'
sys.exit(errstr)
if not clean and not (os.path.exists(augustusbin)):
errstr = 'Missing augustus binaries!' + '\n'
errstr += f'The augustus binaries must be accessible in this path: "{bindir}"!'
sys.exit(errstr)
class TestAugustus(unittest.TestCase):
dbname = None
dbhost = None
dbuser = None
dbpasswd = None
cpuno = 2
opt_compare = False
opt_html = False
opt_mysql = False
@classmethod
def read_config(cls):
with open('testconfig.json', 'r') as file:
config = json.load(file)
cls.dbname = config['dbname']
cls.dbhost = config['dbhost']
cls.dbuser = config['dbuser']
cls.dbpasswd = config['dbpasswd']
cls.cpuno = int(config['cpuno'])
@classmethod
def init_test_data(cls):
if not os.path.exists(tmpdir):
os.mkdir(tmpdir)
inputfile = os.path.join(tmpdir, 'chr2L.sm.fa.gz')
testfile = os.path.join(tmpdir, 'chr2L.sm.fa')
shutil.copyfile(os.path.join(datadir, 'chr2L.sm.fa.gz'), inputfile)
# '../../docs/tutorial2015/data/chr2L.sm.fa.gz', inputfile)
with gzip.open(inputfile, 'rb') as f_in:
with open(testfile, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(inputfile)
@classmethod
def init_sqlite_db(cls):
if not os.path.exists(tmpdir):
os.mkdir(tmpdir)
cmd_list = [[
f'{bindir}load2sqlitedb', '--species=hg19',
f'--dbaccess={tmpdir}vertebrates.db', '--clean',
f'{exampledir}cgp/human.fa'
],
[
f'{bindir}load2sqlitedb', '--species=mm9',
f'--dbaccess={tmpdir}vertebrates.db', '--clean',
f'{exampledir}cgp/mouse.fa'
],
[
f'{bindir}load2sqlitedb', '--species=bosTau4',
f'--dbaccess={tmpdir}vertebrates.db', '--clean',
f'{exampledir}cgp/cow.fa'
],
[
f'{bindir}load2sqlitedb', '--species=galGal3',
f'--dbaccess={tmpdir}vertebrates.db', '--clean',
f'{exampledir}cgp/chicken.fa'
],
[
f'{bindir}load2sqlitedb', '--noIdx', '--species=hg19',
f'--dbaccess={tmpdir}vertebrates.db', '--clean',
f'{exampledir}cgp/human.hints.gff'
],
[
f'{bindir}load2sqlitedb', '--noIdx', '--species=mm9',
f'--dbaccess={tmpdir}vertebrates.db', '--clean',
f'{exampledir}cgp/mouse.hints.gff'
],
[
f'{bindir}load2sqlitedb', '--makeIdx',
f'--dbaccess={tmpdir}vertebrates.db', '--clean'
]]
print('Creating SQLite database for cgp test cases...')
cls.init_db(cmd_list)
@classmethod
def init_mysql_db(cls):
cmd_list = [[
f'{bindir}load2db', '--species=hg19', '--dbaccess=' + cls.dbname +
',' + cls.dbhost + ',' + cls.dbuser + ',' + cls.dbpasswd,
f'{exampledir}cgp/human.fa'
],
[
f'{bindir}load2db', '--species=mm9',
'--dbaccess=' + cls.dbname + ',' + cls.dbhost + ',' +
cls.dbuser + ',' + cls.dbpasswd,
f'{exampledir}cgp/mouse.fa'
],
[
f'{bindir}load2db', '--species=bosTau4',
'--dbaccess=' + cls.dbname + ',' + cls.dbhost + ',' +
cls.dbuser + ',' + cls.dbpasswd,
f'{exampledir}cgp/cow.fa'
],
[
f'{bindir}load2db', '--species=galGal3',
'--dbaccess=' + cls.dbname + ',' + cls.dbhost + ',' +
cls.dbuser + ',' + cls.dbpasswd,
f'{exampledir}cgp/chicken.fa'
],
[
f'{bindir}load2db', '--species=hg19',
'--dbaccess=' + cls.dbname + ',' + cls.dbhost + ',' +
cls.dbuser + ',' + cls.dbpasswd,
f'{exampledir}cgp/human.hints.gff'
],
[
f'{bindir}load2db', '--species=mm9',
'--dbaccess=' + cls.dbname + ',' + cls.dbhost + ',' +
cls.dbuser + ',' + cls.dbpasswd,
f'{exampledir}cgp/mouse.hints.gff'
]]
print(' -' +
'Inserting data into MySQL database for testing purposes...')
cls.init_db(cmd_list)
@classmethod
def init_db(cls, cmd_list):
for cmd in cmd_list:
output = TestAugustus().process(cmd)
# print(output)
@classmethod
def cleanup(cls):
os.chdir(default_wd)
# remove generated SQLite database
if os.path.isfile(f'{tmpdir}vertebrates.db'):
os.remove(f'{tmpdir}vertebrates.db')
# remove copied/unzipped files
if os.path.isfile(f'{tmpdir}chr2L.sm.fa'):
os.remove(f'{tmpdir}chr2L.sm.fa')
@classmethod
def cleanup_mysqldb(cls):
mysqldb = mysql.connector.connect(host=cls.dbhost,
user=cls.dbuser,
passwd=cls.dbpasswd,
database=cls.dbname)
print('\n' + ' -' + 'Clean up MySQL database...')
augcursor = mysqldb.cursor()
augcursor.execute('DROP TABLE IF EXISTS genomes;')
augcursor.execute('DROP TABLE IF EXISTS speciesnames;')
augcursor.execute('DROP TABLE IF EXISTS seqnames;')
augcursor.execute('DROP TABLE IF EXISTS hints;')
augcursor.execute('DROP TABLE IF EXISTS featuretypes;')
@classmethod
def setUpClass(cls):
cls.read_config()
# check config
missing_arguments = False
if (cls.opt_mysql):
if TestAugustus.dbname is None:
print('The database name is missing!')
missing_arguments = True
if TestAugustus.dbhost is None:
print('The host name is missing!')
missing_arguments = True
if TestAugustus.dbuser is None:
print('The db user name is missing!')
missing_arguments = True
if TestAugustus.dbpasswd is None:
print('The db user passwd is missing!')
missing_arguments = True
if missing_arguments:
assert False, 'Test case using MySQL are not executed.'
cls.init_test_data()
cls.init_sqlite_db()
if (cls.opt_mysql):
cls.cleanup_mysqldb()
cls.init_mysql_db()
@classmethod
def tearDownClass(cls):
cls.cleanup()
if (cls.opt_mysql):
cls.cleanup_mysqldb()
def assertEqualFolders(self, reffolder, resfolder, html=None, outputfolder=None):
if TestAugustus.opt_compare:
if html is None:
html = self.opt_html
if outputfolder is None:
diff = comp.compare_folder(reffolder,
resfolder,
html=html)
else:
diff = comp.compare_folder(reffolder,
resfolder,
html=html,
outputfolder=outputfolder)
self.assertEqual(diff, '', diff)
def get_ref_folder(self, folder_name=None, path_to_wd=None):
if folder_name is None:
folder_name = self._testMethodName
if path_to_wd is None:
return os.path.join(refdir, folder_name)
else:
return os.path.join(path_to_wd, refdir, folder_name)
def get_res_folder(self, folder_name=None, path_to_wd=None):
if folder_name is None:
folder_name = self._testMethodName
if path_to_wd is None:
return os.path.join(resultdir, folder_name)
else:
return os.path.join(path_to_wd, resultdir, folder_name)
def process(self, cmd_list, out=subprocess.PIPE):
isFile = isinstance(out, str)
output = out
if isFile:
output = open(out, 'w')
p = subprocess.Popen(cmd_list,
stdout=output,
stderr=subprocess.PIPE,
universal_newlines=True)
rc = p.wait()
error = p.stderr.read()
p.stderr.close()
self.assertEqual(error, '', error)
self.assertEqual(rc, 0, f'Returncode not 0! Error: {error}')
if isFile:
self.assertTrue(os.path.isfile(out),
'Output file was not created as expected!')
else:
stdout = p.stdout.read()
p.stdout.close()
return stdout
return ''
def test_utr_on(self):
os.chdir(default_wd)
reffolder = self.get_ref_folder()
resfolder = self.get_res_folder()
testtmpfile = os.path.join(resfolder, 'aug_utr_on_tmp.gff')
testfile = os.path.join(resfolder, 'aug_utr_on.gff')
os.mkdir(resfolder)
self.process([
augustusbin, '--species=human', '--UTR=on', '--softmasking=0',
f'{exampledir}example.fa'
], testtmpfile)
# filter output file
afilter.pred(testtmpfile, testfile)
os.remove(testtmpfile)
# compare results
self.assertEqualFolders(reffolder, resfolder)
def test_iterative_prediction(self):
os.chdir(default_wd)
reffolder = self.get_ref_folder()
resfolder = self.get_res_folder()
os.mkdir(resfolder)
species_list = ['nasonia', 'zebrafish', 'tomato']
# run augustus several times with different parameter sets
for species in species_list:
testtmpfile = os.path.join(
resfolder, 'aug.' + species + '.1-1M_tmp.gff')
self.process([
augustusbin, '--species=' + species,
f'{tmpdir}chr2L.sm.fa', '--softmasking=on',
'--predictionEnd=1000000'
], testtmpfile)
# filter output
testfile = os.path.join(resfolder, 'aug.' + species + '.1-1M.gff')
afilter.pred(testtmpfile, testfile)
os.remove(testtmpfile)
# compare results
self.assertEqualFolders(reffolder, resfolder)
def test_iterative_prediction_with_hints(self):
os.chdir(default_wd)
reffolder = self.get_ref_folder()
resfolder = self.get_res_folder()
os.mkdir(resfolder)
if not os.path.isfile('data/tmp/chr2L.sm.fa'):
TestAugustus.init_test_data()
for i in range(0, 3):
testtmpfile = os.path.join(
resfolder, f'aug.nasonia.hints.{str(i)}_tmp.gff')
self.process([
augustusbin, '--species=nasonia',
f'{tmpdir}chr2L.sm.fa', '--softmasking=on',
'--predictionStart=' + str(i * 2000000),
'--predictionEnd=' + str((i + 1) * 2000000 + 50000),
f'--hintsfile={datadir}/hints.gff',
'--extrinsicCfgFile=extrinsic.M.RM.E.W.cfg'
], testtmpfile)
# filter output
testfile = os.path.join(
resfolder, f'aug.nasonia.hints.{str(i)}.gff')
afilter.pred(testtmpfile, testfile)
os.remove(testtmpfile)
# compare results
self.assertEqualFolders(reffolder, resfolder)
def test_training_new_species(self):
self.training_new_species(False)
def test_training_new_species_crf(self):
self.training_new_species(True)
def training_new_species(self, crf):
os.chdir(default_wd)
speciesname = 'test_aug_dev_species'
# Remove test species folder.
# Just in case the deletion fails for whatever reason.
if os.path.exists('../../config/species/' + speciesname):
shutil.rmtree('../../config/species/' + speciesname)
resfolder = self.get_res_folder()
reffolder = self.get_ref_folder()
testtmpfile = os.path.join(resfolder, 'test_tmp.out')
testfile = os.path.join(resfolder, 'test.out')
os.mkdir(resfolder)
# call script to initialize new species
self.process([
'perl', '../../scripts/new_species.pl', '--species=' + speciesname,
'--AUGUSTUS_CONFIG_PATH=../../config'
])
# training
self.process([
f'{bindir}etraining', os.path.join(datadir, 'genes.gb.train'),
'--species=' + speciesname
])
# test
cmd = [
augustusbin, os.path.join(datadir, 'genes.gb.test'),
'--species=' + speciesname, '--softmasking=0',
'--AUGUSTUS_CONFIG_PATH=../../config'
]
if (crf):
cmd.append('--CRF=on')
cmd.append('--CRF_N=2')
cmd.append('--UTR=off')
self.process(cmd, testtmpfile)
# filter output file
afilter.eval(testtmpfile, testfile)
os.remove(testtmpfile)
# move new species to result folder
shutil.move('../../config/species/' + speciesname, resfolder)
# compare results
self.assertEqualFolders(reffolder, resfolder)
def test_ab_initio_prediction(self):
os.chdir(default_wd)
reffolder = self.get_ref_folder()
resfolder = self.get_res_folder()
testtmpfile = os.path.join(resfolder, 'augustus_tmp.gff')
testfile = os.path.join(resfolder, 'augustus.gff')
os.mkdir(resfolder)
self.process([
augustusbin, f'{exampledir}autoAug/genome.fa', '--softmasking=1',
'--species=caenorhabditis'
], testtmpfile)
# filter output file
afilter.pred(testtmpfile, testfile)
os.remove(testtmpfile)
# compare results
self.assertEqualFolders(reffolder, resfolder)
def test_format_and_error_out(self):
os.chdir(default_wd)
reffolder = self.get_ref_folder()
resfolder = self.get_res_folder()
testtmpfile = os.path.join(resfolder, 'augustus_tmp.gff3')
testfile = os.path.join(resfolder, 'augustus.gff3')
os.mkdir(resfolder)
cmd = [
augustusbin, f'{exampledir}autoAug/genome.fa',
'--species=caenorhabditis', '--gff3=on', '--softmasking=1',
'--outfile=' + testtmpfile,
'--errfile=' + resfolder + '/augustus.err'
]
self.process(cmd)
# filter output file
self.assertTrue(os.path.isfile(testtmpfile),
'Output file was not created as expected!')
afilter.pred(testtmpfile, testfile)
os.remove(testtmpfile)
# compare results
self.assertEqualFolders(reffolder, resfolder)
def test_alternatives_from_sampling(self):
os.chdir(default_wd)
reffolder = self.get_ref_folder()
resfolder = self.get_res_folder()
testtmpfile = os.path.join(resfolder, 'augustus_tmp.gff')
testfile = os.path.join(resfolder, 'augustus.gff')
os.mkdir(resfolder)
cmd = [
augustusbin, f'{exampledir}autoAug/genome.fa',
'--species=caenorhabditis', '--alternatives-from-sampling=on',
'--minexonintronprob=0.08', '--minmeanexonintronprob=0.4',
'--maxtracks=3'
]
self.process(cmd, testtmpfile)
# filter output file
afilter.pred(testtmpfile, testfile)
os.remove(testtmpfile)
# compare results
self.assertEqualFolders(reffolder, resfolder)
def test_cgp(self):
reffolder = self.get_ref_folder(path_to_wd='../../tests/examples')
resfolder = self.get_res_folder(path_to_wd='../../tests/examples')
testtmpfile = os.path.join(resfolder, 'output_tmp.txt')
testfile = os.path.join(resfolder, 'output.txt')
os.chdir(os.path.join(default_wd, f'{exampledir}cgp'))
os.mkdir(resfolder)
cmd = [
augustusbin,
'--species=human',
'--speciesfilenames=genomes.tbl',
'--treefile=tree.nwk',
'--alnfile=aln.maf',
'--softmasking=0',
'--alternatives-from-evidence=0', # removes warning
'--/CompPred/outdir=' + resfolder + '/'
| |
#BEGIN_LEGAL
#
#Copyright (c) 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#END_LEGAL
# FIXME: 2019-10-21 rename this file to something like "gen_dyn_dec.py"
# dynamic decode for:
# (a) part 1 (for OSZ/ASZ NTs),
# (b) part 2 (for NTs within in instr patterns), and
# (c) operands (mostly register NTLUFs).
import os
import ildutil
import ild_nt
import ild_cdict
import mbuild
import codegen
import ild_codegen
import operand_storage
import verbosity
import tup2int
_key_ctype = 'xed_uint32_t'
_xed3_err_op = 'error'
_xed3_gen_error = 'XED_ERROR_GENERAL_ERROR'
_xed_reg_error_val = 'XED_ERROR_BAD_REGISTER'
_xed_no_err_val = 'XED_ERROR_NONE'
_xed_op_type = 'xed_operand_values_t'
def _vlog(f,s):
if verbosity.vcapture():
f.write(s)
def get_ii_constraints(ii, state_space, constraints):
"""
sets constraints[xed_operand_name][xed_operand_val] = True
xed_operand_name and xed_operand_val correspond to operands
encountered in ii (both operand deciders and constant prebindings)
"""
#set constraints that come from operands deciders
ild_nt.add_op_deciders(ii.ipattern, state_space, constraints)
#set constraints that come from prebindings
for name,binding in list(ii.prebindings.items()):
if binding.is_constant():
if name not in constraints:
constraints[name] = {}
val = int(binding.get_value(), 2)
constraints[name][val] = True
def _get_all_cnames(gi):
"""
Returns a set of all constraints used by a given gi
(generator_info - represents a single NT)
"""
cnames = []
for rule in gi.parser_output.instructions:
cnames.extend(list(rule.xed3_constraints.keys()))
return set(cnames)
def _gen_cdict(agi, nt_name, all_state_space):
"""
Creates a ild_cdict.constraint_dict_t corresponding to NT
defined by gi.
"""
gi = agi.generator_dict[nt_name]
options = agi.common.options
state_space = {}
for opname in all_state_space:
state_space[opname] = list(all_state_space[opname].keys())
cdict_list = []
for rule in gi.parser_output.instructions:
rule.xed3_constraints = {}
get_ii_constraints(rule, state_space, rule.xed3_constraints)
cnames = _get_all_cnames(gi)
for rule in gi.parser_output.instructions:
cdict = ild_cdict.constraint_dict_t(
cnames,
rule.xed3_constraints,
all_state_space,
rule)
cdict_list.append(cdict)
msg = "cdict conflict in NT %s\n" % nt_name
united_dict = ild_cdict.constraint_dict_t.unite_dicts(
cdict_list,
msg,
cnames)
return united_dict
_xed3_capture_fn_pfx = 'xed3_capture'
def _get_xed3_nt_capture_fn(nt_name):
"""
Return a xed3 capture function name for a given NT name.
"""
return '%s_nt_%s' % (_xed3_capture_fn_pfx, nt_name)
def _get_xed3_capture_chain_fn(nt_names, is_ntluf=False):
"""
Return a xed3 chain capture function name from a given list of
NT names.
is_ntluf==True for operands chain functions.
"""
suffix = '_'.join(nt_names)
if is_ntluf:
suffix = 'ntluf_%s' % suffix
return '%s_chain_%s' % (_xed3_capture_fn_pfx, suffix)
def _add_cgen_key_lines(fo,
nt_name,
gi,
all_ops_widths,
key_str='key',
inst='d'):
"""
Add C code to compute the key from constraints' values.
"""
fo.add_code_eol('%s %s = 0' % (_key_ctype, key_str))
cdict = gi.xed3_cdict
bit_shift = 0
for i,cname in enumerate(cdict.cnames):
#eosz_set=True indicates that current value of EOSZ is correct
#in the _operands array and we can take it from there.
#Otherwise we would have to use special eosz computing functions
#the same way as we do in ILD.
#eosz_set=True here because we are doing dynamic decoding
#and have processed the NTs that come before the current NT.
access_str = ild_codegen.emit_ild_access_call(cname, inst,
eoasz_set=True)
#constraints might have 1,2 or 3 bit widths
#and we allocate bits in the key vector appropriately
#e.g REXB operand gets only 1 bit in the key
#and RM gets 3 bits
shift_val = ('(%s)' % bit_shift)
bit_shift += all_ops_widths[cname]
fo.add_code_eol('%s += (%s) << (%s)' % (key_str,access_str, shift_val))
def _get_pattern_nts(rule):
"""
Return a list of NT names present in given rule.
"""
nt_names = []
for bt in rule.ipattern.bits:
if bt.is_nonterminal():
nt_name = bt.nonterminal_name()
nt_names.append(nt_name)
return nt_names
def _is_error_rule(rule):
for op in rule.operands:
if op.type == 'error':
return True
return False
def _add_capture_nt_call(fo, nt_name, inst='d', indent=0):
capture_fn = _get_xed3_nt_capture_fn(nt_name)
indent = ' ' * indent
fo.add_code_eol('%s%s(%s)' % (indent, capture_fn, inst))
def _add_op_assign_stmt(fo, opname, opval, inst='d', op=None,
indent=0):
if op:
fo.add_code('/* op.type=%s */' % op.type)
setter_fn = operand_storage.get_op_setter_fn(opname)
set_stmt = '%s(%s, %s)' %(setter_fn, inst, opval)
indentstr = ' ' * indent
fo.add_code_eol(indentstr + set_stmt)
def _is_reg_error_op(op):
return op.bits in ['XED_REG_ERROR']
def _add_nt_rhs_assignments(fo, nt_name, gi, rule, inst='d'):
#fo.add_code("/* %s */" % rule)
#first if it's error, we set general_error and quit
if _is_error_rule(rule):
_add_op_assign_stmt(fo, _xed3_err_op, _xed3_gen_error,
inst, indent=1)
return
#now check if there are NT calls in pattern, we need to call them first
pattern_nts = _get_pattern_nts(rule)
for nt_name in pattern_nts:
_add_capture_nt_call(fo, nt_name, inst, indent=1)
#now let's do the RHS - for each operand assign value
#FIXME: if we assign ERROR_REG or INVALID_REG set also error?
for op in rule.operands:
if op.name == 'ENCODER_PREFERRED':
#skip encoder preferred
continue
if op.type == 'imm':
#skip prebindings
continue
if op.type == 'nt_lookup_fn':
#NT as RHS, we call its capturing function
#and then assign op.name to OUTREG
_add_capture_nt_call(fo, op.lookupfn_name, inst, indent=1)
#now copy the outreg to op.name (unless it is outreg too!)
if op.name != 'OUTREG':
getter_fn = operand_storage.get_op_getter_fn('outreg')
outreg_expr = '%s(%s)' % (getter_fn, inst)
_add_op_assign_stmt(fo, op.name, outreg_expr, inst, indent=1)
else: #assignment of an operand to a constant
_add_op_assign_stmt(fo, op.name, op.bits, inst, indent=1)
if _is_reg_error_op(op):
_add_op_assign_stmt(fo, _xed3_err_op,
_xed_reg_error_val, inst, indent=1)
fo.add_code('/*pacify the compiler */')
fo.add_code_eol('(void)%s' % inst)
def _add_case_lines(fo, nt_name, gi, rule, inst='d'):
_add_nt_rhs_assignments(fo, nt_name, gi, rule, inst=inst)
fo.add_code_eol(' break')
def _add_switchcase_lines(fo,
nt_name,
gi,
all_ops_widths,
key_str='key',
inst='d'):
cdict = gi.xed3_cdict
fo.add_code('switch(%s) {' %key_str)
int2key = {}
key2int = {}
for key in list(cdict.tuple2rule.keys()):
keyval = tup2int.tuple2int(key, cdict.cnames, all_ops_widths)
#This checks for a nasty conflict that should never happen:
#when two different tuple keys have the same integer value.
#This conflict can happen when bit widths of all constraints are
#bigger than 32 bit (key is uint32 currently).
#In general such error will be caught by C compiler when we try
#to build a key and shift more than 32 bits.
#Checking here too just to be sure.
#FIXME: add an assertion to constraint_dict_t constructor to check
#for that?
#FIXME: this doesn't really checks for integer overflow, because
#python autmatically extends int32 if it overflows to int64.
#Need better checking.
if keyval in int2key:
msg = []
msg.append('CDICT TUPLE VALUE CONFLICT in nt %s !!!!' % nt_name)
msg.append('keyval %s' % keyval)
msg.append('key1 %s, key2 %s' % (key, int2key[keyval]))
msg.append('cdict %s')
msg = '\n'.join(msg)
ildutil.ild_err(msg)
int2key[keyval] = key
key2int[key] = keyval
covered_rules = set()
#we want cases sorted by value - prettier
for keyval in sorted(int2key.keys()):
key = int2key[keyval]
rule = cdict.tuple2rule[key]
if rule in covered_rules:
continue
covered_rules.add(rule)
keys = cdict.get_all_keys_by_val(rule)
for key in keys:
#FIXME: move tuple2int to ild_cdict?
keyval = key2int[key]
fo.add_code('case %s: /*%s -> %s*/' %(keyval, key, rule))
_add_case_lines(fo, nt_name, gi, rule)
fo.add_code('default:')
if gi.parser_output.otherwise_ok:
fo.add_code('/* otherwise_ok */')
else:
#FIXME: temporary using general error, later
#define more specific error enum
errval = 'XED_ERROR_GENERAL_ERROR'
_add_op_assign_stmt(fo, _xed3_err_op, errval,
inst, indent=1)
fo.add_code_eol(' break')
fo.add_code('}')
def _gen_capture_fo(agi, nt_name, all_ops_widths):
"""
Generate xed3 capturing function for a given NT name.
"""
gi = agi.generator_dict[nt_name]
cdict = gi.xed3_cdict
fname = _get_xed3_nt_capture_fn(nt_name)
inst = 'd'
keystr = 'key'
fo = codegen.function_object_t(fname,
return_type='void',
static=True,
inline=True)
fo.add_arg(ildutil.xed3_decoded_inst_t + '* %s' % inst)
if len(cdict.cnames) > 0:
_add_cgen_key_lines(fo, nt_name, gi, all_ops_widths, keystr, inst)
fo.add_code('/* now switch code..*/')
_add_switchcase_lines(fo, nt_name, gi, all_ops_widths, keystr, inst)
else:
rule = cdict.rule
_add_nt_rhs_assignments(fo, nt_name, gi, rule)
return fo
def _get_op_nt_names_from_ii(ii):
nt_names = []
for op in ii.operands:
if op.type == 'nt_lookup_fn':
nt_names.append(op.name + '_' + op.lookupfn_name)
elif op.type == 'imm_const':
suffix = '_const%s' % op.bits
nt_names.append(op.name + suffix)
elif op.type == 'reg':
suffix = '_%s' % op.bits
nt_names.append(op.name + suffix)
return nt_names
def _get_nt_names_from_ii(ii):
"""
@param ii - instruction_info_t
@return list of NT names in ii's pattern
"""
nt_names = []
for bt in ii.ipattern.bits:
if bt.is_nonterminal():
name = bt.nonterminal_name()
if not name:
ildutil.ild_err('Failed to get NT name in %s for %s' % (ii,bt))
nt_names.append(name)
return nt_names
def _gen_ntluf_capture_chain_fo(nt_names, ii):
"""
Given a list of OP_NAME_NT_NAME strings(nt_names), generate a function
object (function_object_t)
that calls corresponding xed3 NT capturing functions.
Each such function captures everything that xed2 decode graph would
capture for a given pattern with operands that | |
"""
QT based app to bring together several interactions with the dataset. 2D slicing, 3D visualisation and segmentation and countour tree.
Author: <NAME>
Date: 29th June 2018
Segmentation and Contour Tree
Author: <NAME>
"""
# Import viewer classes
from ccpi.viewer.CILViewer2D import CILViewer2D, Converter
from ccpi.viewer.CILViewer import CILViewer
from ccpi.viewer.undirected_graph import UndirectedGraph
# Import Class to convert vtk render window to QT widget
from ccpi.viewer.QVTKWidget import QVTKWidget
# Import temporary function to generate data for the graph view
# Will be replaced to read the data from the loaded image.
from ccpi.viewer.undirected_graph import generate_data
# Import modules requred to run the QT application
from PyQt5 import QtCore, QtGui, QtWidgets, Qt
import vtk
from natsort import natsorted
import imghdr
# Import linking class to join 2D and 3D viewers
import ccpi.viewer.viewerLinker as vlink
# Import segmenation algorithm and tools
from ccpi.segmentation.SimpleflexSegmentor import SimpleflexSegmentor
import numpy
import sys, traceback
class ReadError(Exception):
"""Raised when there is a problem reading the file into vtk"""
class ErrorObserver:
def __init__(self):
self.__ErrorOccurred = False
self.__ErrorMessage = None
self.CallDataType = 'string0'
def __call__(self, obj, event, message):
self.__ErrorOccurred = True
self.__ErrorMessage = message
def ErrorOccurred(self):
occ = self.__ErrorOccurred
self.__ErrorOccurred = False
return occ
def ErrorMessage(self):
return self.__ErrorMessage
class Worker(QtCore.QRunnable):
"""
Worker thread
Inherits from QRunnable to handle worker thread setup, signals and wrapup.
:param (function) callback:
The function callback to run on this worker thread. Supplied
args/kwargs will be pass to the runner.
:param args:
Arguments to pass to the callback function
:param kwargs:
Keyword arguments to pass to the callback function
"""
def __init__(self, fn, *args, **kwargs):
super(Worker, self).__init__()
self.fn = fn
self.args = args
self.kwargs = kwargs
self.signals = WorkerSignals()
# Add progress callback to kwargs
self.kwargs['progress_callback'] = self.signals.progress
@QtCore.pyqtSlot()
def run(self):
"""
Run the worker. Emits signals based on run state.
Signals:
- Error: Emitted when an exception is thrown in the workers function.
- Result: Emitted if function completes successfully. Contains the return value of the function.
- Finished: Emitted on completion of the worker thread.
"""
try:
result = self.fn(*self.args, **self.kwargs)
except:
traceback.print_exc()
exctype, value = sys.exc_info()[:2]
self.signals.error.emit((exctype, value, traceback.format_exc()))
else:
self.signals.result.emit(result)
finally:
self.signals.finished.emit()
class WorkerSignals(QtCore.QObject):
"""
Defines signals available when running a worker thread
Supported Signals:
finished
No Data
error
`tuple` (exctype, value, traceback.format_exc() )
result
`object` data returned from processing, anything
progress
`int` indicating % progress
"""
finished = QtCore.pyqtSignal()
error = QtCore.pyqtSignal(tuple)
result = QtCore.pyqtSignal(object)
progress = QtCore.pyqtSignal(int)
def sentenceCase(string):
if string:
first_word = string.split()[0]
world_len = len(first_word)
return first_word.capitalize() + string[world_len:]
else:
return ''
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
self.mainwindow = MainWindow
MainWindow.setWindowTitle("CIL Viewer")
MainWindow.resize(800, 600)
# Contains response from open file dialog
self.fn = None
# Set linked state
self.linked = True
# Create link icon for inital load state.
link_icon = QtGui.QIcon()
link_icon.addPixmap(QtGui.QPixmap('icons/link.png'), QtGui.QIcon.Normal, QtGui.QIcon.Off)
# Set numpy data array for graph
self.graph_numpy_input_data = None
# Dockable window flag
self.hasDockableWindow = False
self.centralwidget = QtWidgets.QWidget(MainWindow)
# Central widget layout
self.main_widget_form_layout = QtWidgets.QGridLayout(self.centralwidget)
# Add the 2D viewer widget
self.viewerWidget = QVTKWidget(viewer=CILViewer2D, interactorStyle=vlink.Linked2DInteractorStyle)
self.centralwidget.setStyleSheet("background-color: rgb(25,51,101)")
self.linkButton2D = QtWidgets.QPushButton(self.viewerWidget)
self.linkButton2D.setIcon(link_icon)
self.linkButton2D.setGeometry(0, 0, 30, 30)
self.linkButton2D.setStyleSheet("background-color: whitesmoke")
self.linkButton2D.setToolTip("State: Linked. Toggle status of link between 2D and 3D viewers")
self.linkButton2D.clicked.connect(self.linkViewers)
self.main_widget_form_layout.addWidget(self.linkButton2D, 0,0,1,1)
self.main_widget_form_layout.addItem(QtWidgets.QSpacerItem(1,1,QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Maximum),0,1,1,1)
self.main_widget_form_layout.addWidget(self.viewerWidget,1,0,1,2)
# Add the graph widget
self.graphWidget = QVTKWidget(viewer=UndirectedGraph)
self.graphDock = QtWidgets.QDockWidget(MainWindow)
self.graphDock.setMinimumWidth(300)
self.graphDock.setWidget(self.graphWidget)
self.graphDock.setWindowTitle("Graph View")
MainWindow.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self.graphDock)
# Add the 3D viewer widget
self.viewer3DWidget = QVTKWidget(viewer=CILViewer, interactorStyle=vlink.Linked3DInteractorStyle)
self.Dock3DContents = QtWidgets.QWidget()
self.Dock3DContents.setStyleSheet("background-color: rgb(25,51,101)")
f_layout3D = Qt.QFormLayout(self.Dock3DContents)
self.Dock3D = QtWidgets.QDockWidget(MainWindow)
self.Dock3D.setMinimumWidth(300)
self.Dock3D.setWindowTitle("3D View")
self.linkButton3D = QtWidgets.QPushButton(self.viewer3DWidget)
self.linkButton3D.setIcon(link_icon)
self.linkButton3D.setGeometry(0,0,30,30)
self.linkButton3D.setStyleSheet("background-color: whitesmoke")
self.linkButton3D.setToolTip("State: Linked. Toggle status of link between 2D and 3D viewers")
self.linkButton3D.clicked.connect(self.linkViewers)
f_layout3D.addWidget(self.linkButton3D)
f_layout3D.addWidget(self.viewer3DWidget)
self.Dock3D.setWidget(self.Dock3DContents)
MainWindow.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self.Dock3D)
# Set central widget
MainWindow.setCentralWidget(self.centralwidget)
# Create menu actions
openAction = QtWidgets.QAction("Open", MainWindow)
openAction.setShortcut("Ctrl+O")
openAction.triggered.connect(self.openFile)
closeAction = QtWidgets.QAction("Close", MainWindow)
closeAction.setShortcut("Ctrl+Q")
closeAction.triggered.connect(self.close)
#Create status bar
self.statusbar = QtWidgets.QStatusBar(MainWindow)
MainWindow.setStatusTip('Open file to begin visualisation...')
MainWindow.setStatusBar(self.statusbar)
# Initially link viewers
self.linkedViewersSetup()
self.link2D3D.enable()
# Create the toolbar
self.toolbar()
# Add threading
self.threadpool = QtCore.QThreadPool()
self.e = ErrorObserver()
# Add progress bar
self.progressBar = QtWidgets.QProgressBar()
self.progressBar.setMaximumWidth(250)
self.progressBar.hide()
self.statusbar.addPermanentWidget(self.progressBar)
def linkViewers(self, force_linked=False):
link_icon = QtGui.QIcon()
link_icon.addPixmap(QtGui.QPixmap('icons/link.png'), QtGui.QIcon.Normal, QtGui.QIcon.Off)
link_icon_broken = QtGui.QIcon()
link_icon_broken.addPixmap(QtGui.QPixmap('icons/broken_link.png'), QtGui.QIcon.Normal, QtGui.QIcon.Off)
if self.linked and not force_linked:
self.link2D3D.disable()
self.linkButton3D.setIcon(link_icon_broken)
self.linkButton2D.setIcon(link_icon_broken)
self.linkButton3D.setToolTip("State: Un-linked. Toggle status of link between 2D and 3D viewers")
self.linkButton2D.setToolTip("State: Un-linked. Toggle status of link between 2D and 3D viewers")
self.linked = False
else:
self.link2D3D.enable()
self.linkButton3D.setIcon(link_icon)
self.linkButton2D.setIcon(link_icon)
self.linkButton3D.setToolTip("State: Linked. Toggle status of link between 2D and 3D viewers")
self.linkButton2D.setToolTip("State: Linked. Toggle status of link between 2D and 3D viewers")
self.linked = True
def toolbar(self):
# Initialise the toolbar
self.toolbar = self.mainwindow.addToolBar('Viewer tools')
# define actions
openAction = QtWidgets.QAction(self.mainwindow.style().standardIcon(QtWidgets.QStyle.SP_DirOpenIcon), 'Open file', self.mainwindow)
openAction.triggered.connect(self.openFileTrigger)
saveAction = QtWidgets.QAction(self.mainwindow.style().standardIcon(QtWidgets.QStyle.SP_DialogSaveButton), 'Save current render as PNG', self.mainwindow)
saveAction.triggered.connect(self.saveFile)
tree_icon = QtGui.QIcon()
tree_icon.addPixmap(QtGui.QPixmap('icons/tree_icon.png'),QtGui.QIcon.Normal, QtGui.
QIcon.Off)
connectGraphAction = QtWidgets.QAction(tree_icon, 'Set Graph Widget parameters', self.mainwindow)
connectGraphAction.triggered.connect(self.createDockableWindow)
show_icon = QtGui.QIcon()
show_icon.addPixmap(QtGui.QPixmap('icons/show.png'),QtGui.QIcon.Normal, QtGui.
QIcon.Off)
showWidgetsAction = QtWidgets.QAction(show_icon, 'Display tree and 3D viewer', self.mainwindow)
showWidgetsAction.triggered.connect(self.dockWidgets)
# Add actions to toolbar
self.toolbar.addAction(openAction)
self.toolbar.addAction(saveAction)
self.toolbar.addAction(connectGraphAction)
self.toolbar.addAction(showWidgetsAction)
def linkedViewersSetup(self):
self.link2D3D = vlink.ViewerLinker(self.viewerWidget.viewer, self.viewer3DWidget.viewer)
self.link2D3D.setLinkPan(False)
self.link2D3D.setLinkZoom(False)
self.link2D3D.setLinkWindowLevel(True)
self.link2D3D.setLinkSlice(True)
def createDockableWindow(self):
# Check if the dockable window has already been created
if self.hasDockableWindow:
# If the dockable window has already been created and is visible then don't add another one
if self.graphDockWidget.isVisible():
return
else:
# If the dockable window has already been created and is not visible. Set it to visible and return
self.graphDockWidget.setVisible(True)
return
# The dockable window has been activated for the first time.
# Set the hasDockableWindow flag
self.hasDockableWindow = True
# Keep a collection of related elements to set enabled/disabled state
self.treeWidgetInitialElements = []
self.treeWidgetUpdateElements = []
# Setup segmentation
self.segmentor = SimpleflexSegmentor()
self.graphDockWidget = QtWidgets.QDockWidget(self.mainwindow)
self.graphDockWidgetContents = QtWidgets.QWidget()
# Add vertical layout to dock contents
self.graphDockVL = QtWidgets.QVBoxLayout(self.graphDockWidgetContents)
self.graphDockVL.setContentsMargins(0, 0, 0, 0)
# Create widget for dock contents
self.dockWidget = QtWidgets.QWidget(self.graphDockWidgetContents)
# Add vertical layout to dock widget
self.graphWidgetVL = QtWidgets.QVBoxLayout(self.dockWidget)
self.graphWidgetVL.setContentsMargins(0, 0, 0, 0)
# Add group box
self.graphParamsGroupBox = QtWidgets.QGroupBox(self.dockWidget)
self.graphParamsGroupBox.setTitle("Graph Parameters")
# Add form layout to group box
self.graphWidgetFL = QtWidgets.QFormLayout(self.graphParamsGroupBox)
# Create validation rule for text entry
validator = QtGui.QDoubleValidator()
validator.setDecimals(2)
# Add button to run graphing function
self.graphStart = QtWidgets.QPushButton(self.graphParamsGroupBox)
self.graphStart.setText("Generate Graph")
self.graphStart.clicked.connect(self.generateGraphTrigger)
self.graphWidgetFL.setWidget(0, QtWidgets.QFormLayout.SpanningRole, self.graphStart)
self.treeWidgetInitialElements.append(self.graphStart)
# Add horizonal seperator
self.seperator = QtWidgets.QFrame(self.graphParamsGroupBox)
self.seperator.setFrameShape(QtWidgets.QFrame.HLine)
self.seperator.setFrameShadow(QtWidgets.QFrame.Raised)
self.graphWidgetFL.setWidget(1, QtWidgets.QFormLayout.SpanningRole, self.seperator)
# Add ISO Value field
self.isoValueLabel = QtWidgets.QLabel(self.graphParamsGroupBox)
self.isoValueLabel.setText("Iso Value (%)")
self.graphWidgetFL.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.isoValueLabel)
self.isoValueEntry= QtWidgets.QLineEdit(self.graphParamsGroupBox)
self.isoValueEntry.setValidator(validator)
self.isoValueEntry.setText("35")
self.graphWidgetFL.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.isoValueEntry)
self.treeWidgetUpdateElements.append(self.isoValueEntry)
self.treeWidgetUpdateElements.append(self.isoValueLabel)
# Add local/global checkbox
self.isGlobalCheck = QtWidgets.QCheckBox(self.graphParamsGroupBox)
self.isGlobalCheck.setText("Global Iso")
self.isGlobalCheck.setChecked(True)
self.graphWidgetFL.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.isGlobalCheck)
self.treeWidgetUpdateElements.append(self.isGlobalCheck)
# Add colour surfaces checkbox
self.surfaceColourCheck = QtWidgets.QCheckBox(self.graphParamsGroupBox)
self.surfaceColourCheck.setText("Colour Surfaces")
self.graphWidgetFL.setWidget(3,QtWidgets.QFormLayout.FieldRole, self.surfaceColourCheck)
self.treeWidgetUpdateElements.append(self.surfaceColourCheck)
# Add Log Tree field
self.logTreeValueLabel = QtWidgets.QLabel(self.graphParamsGroupBox)
self.logTreeValueLabel.setText("Log Tree Size")
self.graphWidgetFL.setWidget(4, QtWidgets.QFormLayout.LabelRole, self.logTreeValueLabel)
self.logTreeValueEntry = QtWidgets.QLineEdit(self.graphParamsGroupBox)
self.logTreeValueEntry.setValidator(validator)
self.logTreeValueEntry.setText("0.34")
self.treeWidgetUpdateElements.append(self.logTreeValueEntry)
self.treeWidgetUpdateElements.append(self.logTreeValueLabel)
self.graphWidgetFL.setWidget(4, QtWidgets.QFormLayout.FieldRole, self.logTreeValueEntry)
# Add collapse priority field
self.collapsePriorityLabel = QtWidgets.QLabel(self.graphParamsGroupBox)
self.collapsePriorityLabel.setText("Collapse Priority")
self.graphWidgetFL.setWidget(5, QtWidgets.QFormLayout.LabelRole, self.collapsePriorityLabel)
self.collapsePriorityValue = QtWidgets.QComboBox(self.graphParamsGroupBox)
self.collapsePriorityValue.addItem("Height")
self.collapsePriorityValue.addItem("Volume")
self.collapsePriorityValue.addItem("Hypervolume")
self.collapsePriorityValue.addItem("Approx Hypervolume")
self.collapsePriorityValue.setCurrentIndex(1)
self.treeWidgetUpdateElements.append(self.collapsePriorityValue)
self.treeWidgetUpdateElements.append(self.collapsePriorityLabel)
self.graphWidgetFL.setWidget(5, QtWidgets.QFormLayout.FieldRole, self.collapsePriorityValue)
# Add submit button
self.graphParamsSubmitButton = QtWidgets.QPushButton(self.graphParamsGroupBox)
self.graphParamsSubmitButton.setText("Update")
self.graphParamsSubmitButton.clicked.connect(self.updateGraphTrigger)
self.graphWidgetFL.setWidget(6, QtWidgets.QFormLayout.FieldRole, self.graphParamsSubmitButton)
self.treeWidgetUpdateElements.append(self.graphParamsSubmitButton)
# Add elements to layout
self.graphWidgetVL.addWidget(self.graphParamsGroupBox)
self.graphDockVL.addWidget(self.dockWidget)
self.graphDockWidget.setWidget(self.graphDockWidgetContents)
self.mainwindow.addDockWidget(QtCore.Qt.RightDockWidgetArea, self.graphDockWidget)
# Set update elements to disabled when first opening the window
if self.segmentor.dimensions is None:
for element in self.treeWidgetUpdateElements:
element.setEnabled(False)
def dockWidgets(self):
"""
The 3D viewer widget and graph widget are Dockable windows. Once closed, they are hidden.
This method makes them visible again.
"""
self.Dock3D.show()
self.graphDock.show()
def updateGraph(self, progress_callback):
"""
Make updates to the graph based on user input.
:param (function) progress_callback:
Function to perform to emit progress signal.
"""
# Set parameter values
isoVal = float(self.isoValueEntry.text())
logTreeVal = float(self.logTreeValueEntry.text())
self.segmentor.collapsePriority = self.collapsePriorityValue.currentIndex()
# Make sure to set the LocalIsoValue when isGlobal is false (ie. Local)
if self.isGlobalCheck.isChecked():
self.segmentor.setIsoValuePercent(isoVal)
else:
self.segmentor.setLocalIsoValuePercent(isoVal)
# Update tree
self.segmentor.updateTreeFromLogTreeSize(logTreeVal, self.isGlobalCheck.isChecked())
progress_callback.emit(30)
# Display results
self.displaySurfaces(progress_callback)
self.displayTree()
def generateGraph(self, progress_callback):
"""
Generates the initial graph and 3D surface render
:param (function) progress_callback:
Function to perform to emit progress signal.
"""
self.segmentor.setInputData(self.graph_numpy_input_data)
progress_callback.emit(5)
self.segmentor.calculateContourTree()
progress_callback.emit(25)
self.segmentor.setIsoValuePercent(float(self.isoValueEntry.text()))
self.segmentor.collapsePriority = self.collapsePriorityValue.currentIndex()
self.segmentor.updateTreeFromLogTreeSize(float(self.logTreeValueEntry.text()), self.isGlobalCheck.isChecked())
progress_callback.emit(30)
# Display results
self.displaySurfaces(progress_callback)
self.displayTree()
# Once the graph has generated allow editing of the values and disable the generate button
for element in self.treeWidgetUpdateElements:
element.setEnabled(True)
for element in self.treeWidgetInitialElements:
element.setEnabled(False)
def displayTree(self):
tree | |
asset_name = utxo["asset"]
if asset_name not in self.assets:
self.assets[asset_name] = {"outpoints": []}
self.assets[asset_name]["outpoints"].append(utxo)
else:
# If we don't get a txout from a lock, it's no longer valid (wallet keeps them around for some reason.....)
self.logger.info(
"Removing Stale Wallet lock: {}".format(utxo_str))
self.wallet_lock_single(utxo=utxo_str, lock=False)
def wallet_unlock_all(self):
if self._use_unlock_all:
self.RVNpyRPC.do_rpc("lockunspent", unlock=True)
else:
self.logger.info("wallet_unlock_all SKIPPED !")
def invalidate_all(self):
self.utxos = []
self.assets = {}
self.trigger_cache = []
self.my_asset_names = []
self.total_balance = (0, 0, 0)
self.available_balance = (0, 0, 0)
self.clear_waiting()
def update_wallet(self):
self.check_waiting()
# Locked UTXO's are excluded from the list command
utxos = self.RVNpyRPC.do_rpc("listunspent")
# only include spendable UTXOs
self.utxos = [utxo for utxo in utxos if utxo["spendable"]]
# Pull list of assets for selecting
self.assets = self.RVNpyRPC.do_rpc("listmyassets", asset="", verbose=True)
# Load details of wallet-locked transactions, inserted into self.utxos/assets
self.load_wallet_locked()
removed_orders = self.search_completed()
for (trade, utxo) in removed_orders:
finished_order = trade.order_completed(utxo)
transaction = self.txUtils.search_swap_tx(utxo)
if transaction:
txid = transaction["txid"]
self.logger.info("Order Completed: TXID {}".format(txid))
self.add_waiting(txid, self.__on_swap_mempool,
self.__on_swap_confirmed, callback_data=finished_order)
else:
self.logger.info("Order executed on unknown transaction")
# Remove any locks we can't find with the gettxout command
self.clear_stale_locks()
# Actual balance calculation
self.calculate_balance()
# Cheat a bit and embed the asset name in it's metadata. This simplified things later
for name in self.my_asset_names:
try:
self.assets[name]["name"] = name
except Exception as e:
pass
#
# Lock Management
#
def add_lock(self, txid=None, vout=None, utxo=None):
if utxo != None and txid == None and vout == None:
(txid, vout) = self.txUtils.split_utxo(utxo)
for lock in self.locks:
if txid == lock["txid"] and vout == lock["vout"]:
return # Already added
self.logger.info("Locking UTXO {}-{}".format(txid, vout))
# True means this will be None when spent in mempool
txout = self.RVNpyRPC.do_rpc("gettxout", txid=txid, n=vout, include_mempool=True)
if txout:
utxo = self.txUtils.vout_to_utxo(txout, txid, vout)
self.locks.append(utxo)
if self.AtomicSwapMgr.CacheStorage.lock_mode():
self.wallet_lock_single(txid, vout)
def remove_lock(self, txid=None, vout=None, utxo=None):
if utxo != None and txid == None and vout == None:
(txid, vout) = self.txUtils.split_utxo(utxo)
found = False
for lock in self.locks:
if txid == lock["txid"] and int(vout) == int(lock["vout"]):
self.locks.remove(lock)
found = True
if not found:
return
self.logger.info("Unlocking UTXO {}-{}".format(txid, vout))
# in wallet-lock mode we need to return these to the wallet
if self.AtomicSwapMgr.CacheStorage.lock_mode():
self.wallet_lock_single(txid, vout, lock=False)
def refresh_locks(self, clear=False):
if clear:
self.wallet_unlock_all()
self.locks = []
for swap in self.swaps:
for utxo in swap.order_utxos:
self.add_lock(utxo=utxo)
if self.AtomicSwapMgr.CacheStorage.lock_mode():
self.wallet_lock_all_swaps()
def lock_quantity(self, type):
if type == "rvn":
return sum([float(lock["amount"]) for lock in self.locks if lock["type"] == "rvn"])
else:
return sum([float(lock["amount"]) for lock in self.locks if lock["type"] == "asset" and lock["name"] == type])
def check_missed_history(self):
# Re-Add listeners for incomplete orders, should be fully posted, but add events so full sequence can happen
for pending_order in [hist_order for hist_order in self.history if hist_order.state != "completed"]:
if pending_order.utxo not in self.trigger_cache:
swap_tx = self.txUtils.search_swap_tx(pending_order.utxo)
if swap_tx:
if pending_order.own:
self.add_waiting(
swap_tx["txid"], self.__on_swap_mempool, self.__on_swap_confirmed, pending_order)
else:
self.add_waiting(
swap_tx["txid"], self.__on_completed_mempool, self.__on_completed_confirmed, pending_order)
else:
self.logger.info("Failed to find transaction for presumably completed UTXO {}".format(
pending_order.utxo))
def search_completed(self, include_mempool=True):
all_found = []
for trade in self.swaps:
for utxo in trade.order_utxos:
if self.swap_utxo_spent(utxo, in_mempool=include_mempool, check_cache=False):
all_found.append((trade, utxo))
return all_found
def clear_stale_locks(self):
for lock in self.locks:
if not self.RVNpyRPC.do_rpc("gettxout", txid=lock["txid"], n=lock["vout"], include_mempool=True):
self.logger.info("Removing Stale Lock: {}".format(lock))
self.remove_lock(utxo=self.txUtils.make_utxo(lock))
#
# UTXO Searching
#
def find_utxo(self, type, quantity, name=None, exact=True, include_locked=False, skip_rounded=True, sort_utxo=False):
self.logger.info("Find {} UTXO: {} Exact: {} Include Locks: {}".format(
type, quantity, exact, include_locked))
available = self.get_utxos(type, name, include_locked=include_locked)
for utxo in available:
if(float(utxo["amount"]) == float(quantity) and exact) or (float(utxo["amount"]) >= float(quantity) and not exact):
return utxo
return None
def find_utxo_multiple_exact(self, type, quantity, name=None, include_locked=False):
self.logger.info("Find UTXO Multiple Exact: {} {} {} Include Locks: {}".format(
quantity, type, name, include_locked))
return [utxo for utxo in self.get_utxos(type, name=name, include_locked=include_locked) if utxo["amount"] == quantity]
def get_utxos(self, type, name=None, include_locked=False):
results = []
if type == "rvn":
results = [utxo for utxo in self.utxos]
elif type == "asset":
results = [utxo for utxo in self.assets[name]["outpoints"]]
else: # Use the type name itself
results = [utxo for utxo in self.assets[type]["outpoints"]]
if include_locked:
return results
else:
return [utxo for utxo in results if not self.is_locked(utxo)]
def find_utxo_set(self, type, quantity, mode="combine", name=None, include_locked=False):
found_set = None
total = 0
sorted_set = sorted(self.get_utxos(
type, name, include_locked=include_locked), key=lambda utxo: utxo["amount"])
if mode == "combine":
# Try to combine as many UTXO's as possible into a single Transaction
# This raises your transaction fees slighty (more data) but is ultimately a good thing for the network
# Don't need to do anything actualy b/c default behavior is to go smallest-to-largest
# However, if we have a single, unrounded UTXO that is big enough. it's always more efficient to use that instead
quick_check = self.find_utxo(
type, quantity, name=name, include_locked=include_locked, exact=False, sort_utxo=True)
if quick_check:
# If we have a single UTXO big enough, just use it and get change. sort_utxo ensures we find the smallest first
found_set = [quick_check]
total = quick_check["amount"]
elif mode == "minimize":
# Minimize the number of UTXO's used, to reduce transaction fees
# This minimizes transaction fees but
quick_check = self.find_utxo(
type, quantity, name=name, include_locked=include_locked, exact=False, sort_utxo=True)
quick_check_2 = self.find_utxo(
type, quantity, name=name, include_locked=include_locked, exact=False, skip_rounded=False, sort_utxo=True)
if quick_check:
# If we have a single UTXO big enough, just use it and get change. sort_utxo ensures we find the smallest first
found_set = [quick_check]
total = quick_check["amount"]
elif quick_check_2:
# In this case we had a large enough single UTXO but it was an evenly rounded one (and no un-rounded ones existed)
found_set = [quick_check_2]
total = quick_check_2["amount"]
else:
# Just need to reverse the search to make it build from the fewest UTXO's
sorted_set.reverse()
if found_set == None:
found_set = []
while total < quantity and len(sorted_set) > 0:
removed = sorted_set.pop(0)
total += removed["amount"]
found_set.append(removed)
if float(total) >= float(quantity):
self.logger.info("{} UTXOs: {} Requested: {:.8g} Total: {:.8g} Change: {:.8g}".format(
type, len(found_set), quantity, total, total - quantity))
return (total, found_set)
else:
self.logger.info("Not enough {} funds found. Requested: {:.8g} Total: {:.8g} Missing: {:.8g}".format(
type, quantity, total, total-quantity))
return (None, None)
# check if a swap's utxo has been spent
# if so then the swap has been executed!
def swap_utxo_spent(self, utxo, in_mempool=True, check_cache=True):
if check_cache:
# This will always go away immediately w/ mempool. so in_mempool doesnt work here
return self.search_utxo(utxo) == None
else:
(txid, vout) = self.txUtils.split_utxo(utxo)
txout = self.RVNpyRPC.do_rpc("gettxout", txid=txid, n=vout,
include_mempool=in_mempool)
return txout == None
# return ({type, utxo}, amount)
def search_utxo(self, utxo_str):
(txid, vout) = self.txUtils.split_utxo(utxo_str)
for utxo in self.utxos:
if utxo["txid"] == txid and utxo["vout"] == vout:
return utxo
for asset_name in self.my_asset_names:
for a_utxo in self.assets[asset_name]["outpoints"]:
if a_utxo["txid"] == txid and a_utxo["vout"] == vout:
return a_utxo
return None
def is_locked(self, utxo):
for lock in self.locks:
if lock["txid"] == utxo["txid"] and lock["vout"] == utxo["vout"]:
return True
return False
def is_taken(self, utxo, ignore_locks=False):
expected = self.txUtils.join_utxo(utxo["txid"], utxo["vout"])
if not ignore_locks:
if self.is_locked(utxo):
return True
for swap in self.swaps:
if expected in swap.order_utxos:
return True
return False
def fund_asset_transaction_raw(self, fn_rpc, asset_name, quantity, vins, vouts, asset_change_addr=None):
# Search for enough asset UTXOs
(asset_utxo_total, asset_utxo_set) = self.find_utxo_set(
"asset", quantity, name=asset_name, include_locked=True)
# Add our asset input(s)
for asset_utxo in asset_utxo_set:
vins.append({"txid": asset_utxo["txid"], "vout": asset_utxo["vout"]})
if not asset_change_addr:
asset_change_addr = self.addresses.get_single_address(
"asset_change")
# Add asset change if needed
if(asset_utxo_total > quantity):
# TODO: Send change to address the asset UTXO was originally sent to
self.logger.info("Asset change being sent to {}".format(asset_change_addr))
vouts[asset_change_addr] = self.txUtils.make_transfer(
asset_name, asset_utxo_total - quantity)
def fund_transaction_final(self, fn_rpc, send_rvn, recv_rvn, target_addr, vins, vouts, original_txs):
# Cost represents rvn sent to the counterparty, since we adjust send_rvn later
cost = send_rvn
# If this is a swap, we need to add pseduo-funds for fee calc
if recv_rvn == 0 and send_rvn == 0:
# Add dummy output for fee calc
vouts[target_addr] = round(
sum([self.txUtils.calculate_fee(tx) for tx in original_txs]) * 4, 8)
if recv_rvn > 0 and send_rvn == 0:
# If we are not supplying rvn, but expecting it, we | |
<filename>Similis/similis.py
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 07 17:56:46 2016
@author: Arenhart
"""
import PIL.Image as pil
import PIL.ImageTk as imagetk
import numpy as np
import scipy.ndimage as sp
import matplotlib.pyplot as plt
import skimage as sk
import skimage.filters as filters
import skimage.morphology as morphology
import skimage.measure as measure
import skimage.feature as feature
import tkinter as tk
import tkinter.filedialog as filedialog
import os
MAX_WIDTH = 300
MAX_HEIGHT = 300
def plt_to_image():
i = 0
filename = 'temp'+str(i)+'.tif'
while filename in os.listdir(os.getcwd()):
i += 1
filename = 'temp'+str(i)+'.tif'
if i >= 1000: return None
plt.savefig(filename)
with open(filename, 'rb') as file:
image = pil.open(file)
img = image.copy()
image.close()
os.remove(filename)
return img
def carregar():
return pil.open(filedialog.askopenfilename())
def salvar(imagem):
save_name = filedialog.asksaveasfilename()
if save_name == '': return
try:
imagem.save(save_name)
except:
if '.' in save_name:
save_name = save_name[:save_name.find('.')] + '.bmp'
else:
save_name = save_name + '.bmp'
imagem.save(save_name)
def verificar_binaria(matriz):
mat = matriz > 0
return np.sum(mat) * 255 == np.sum(matriz)
def vis(matriz):
return pil.fromarray(matriz)
def binarizar(matriz, limiar=None):
if limiar == None:
limiar = filters.threshold_otsu(matriz)
return ((matriz >= limiar) *255).astype('uint8')
def histograma(matriz, bins = 254):
plt.clf()
return plt.hist(matriz.flatten(),bins=bins)
def mapa_distancia(matriz_binarizada):
return sp.morphology.distance_transform_edt(matriz_binarizada)
def inverter(matriz):
return 255 - matriz
def expandir_contraste(matriz):
return sk.exposure.rescale_intensity(matriz)
def equalizar_histograma(matriz):
return (sk.exposure.equalize_hist(matriz)*255).astype('uint8')
def filtro_gaussiano(matriz,sigma):
return (filters.gaussian(matriz,
sigma=sigma)*255).astype('uint8')
def filtro_mediana(matriz,tamanho):
return filters.median(matriz,morphology.disk(tamanho))
def filtro_realce(matriz, tamanho=1):
return filters.rank.enhance_contrast(matriz,morphology.disk(tamanho))
def filtro_prewitt(matriz):
return (255-filters.prewitt(matriz)*255).astype('uint8')
def filtro_sobel(matriz):
return (255-filters.sobel(matriz)*255).astype('uint8')
def filtro_scharr(matriz):
return (255-filters.scharr(matriz)*255).astype('uint8')
def erosao(matriz_binaria,tamanho=1):
matriz_binaria = matriz_binaria//255
return (morphology.binary_erosion(
matriz_binaria,morphology.disk(tamanho))*255).astype('uint8')
def dilatacao(matriz_binaria,tamanho=1):
matriz_binaria = matriz_binaria//255
return (morphology.binary_dilation(
matriz_binaria,morphology.disk(tamanho))*255).astype('uint8')
def abertura(matriz_binaria,tamanho=1):
matriz_binaria = matriz_binaria//255
return (morphology.binary_opening(
matriz_binaria,morphology.disk(tamanho))*255).astype('uint8')
def fechamento(matriz_binaria,tamanho=1):
matriz_binaria = matriz_binaria//255
return (morphology.binary_closing(
matriz_binaria,morphology.disk(tamanho))*255).astype('uint8')
def granulometria(matriz_binaria):
matriz_binaria = matriz_binaria
area_inicial = matriz_binaria.sum()
menor_aresta = min(matriz_binaria.shape)
raio = [0]
area_cf = [0]
area = [0]
i = 1
while area_cf[-1] < 1 and i < menor_aresta and i < 50:
raio.append(i)
new_area = 1 - (abertura(matriz_binaria,i).sum()/area_inicial)
area.append(new_area-area_cf[-1])
area_cf.append(new_area)
i += 1
print(i)
plt.plot(raio,area,color='blue')
plt.plot(raio,area_cf,color='green')
def correlacao(matriz_binaria):
if not matriz_binaria.dtype == 'bool':
matriz_binaria = (matriz_binaria / matriz_binaria.max()).astype('uint8')
comprimento = min(matriz_binaria.shape)//2
correlacao_x = []
correlacao_y = []
correlacao_x.append(matriz_binaria.mean())
for i in range(1,comprimento):
correlacao_x.append(
( (matriz_binaria[0:-i,:] * matriz_binaria[i:,:]).sum() )
/ matriz_binaria[i:,:].size )
correlacao_y.append(matriz_binaria.mean())
for i in range(1,comprimento):
correlacao_y.append(
( (matriz_binaria[:,0:-i] * matriz_binaria[:,i:]).sum() )
/ matriz_binaria[:,i:].size )
correlacao_x = np.array(correlacao_x)
correlacao_y = np.array(correlacao_y)
correlacao = (correlacao_x + correlacao_y)/2
plt.plot(range(comprimento),correlacao_x,color='blue')
plt.plot(range(comprimento),correlacao_y,color='red')
plt.plot(range(comprimento),correlacao,color='green')
#plt.show()
return (correlacao_x, correlacao_y, correlacao)
def rotular(imagem_binaria):
return measure.label(imagem_binaria,background=0)
def rotular_colorido(matriz_binaria):
mat_rotulada = measure.label(matriz_binaria,background=0)
size = matriz_binaria.shape
mat = np.zeros((size[0],size[1],3),dtype = np.uint8)
max_index = mat_rotulada.max()
g_factor = int(max_index**(2/3))
r_factor = int(max_index**(1/3))
for i,j in [(i,j) for i in range(size[0]) for j in range(size[1])]:
index = mat_rotulada[i,j]
if index == 0:
mat[i,j,0], mat[i,j,1], mat[i,j,2] = 0, 0, 0
continue
b = 50 + int( 205 * (index / max_index) )
g = 50 + int( (index%g_factor) * (205/g_factor))
r = 50 + int( (index%r_factor) * (205/r_factor))
mat[i,j,0], mat[i,j,1], mat[i,j,2] = r,g,b
return mat
def conectividade(matriz_binaria):
matriz_binaria = rotular(matriz_binaria)
comprimento = range(min(matriz_binaria.shape)//2)
tamanho_total = matriz_binaria.shape[0]*matriz_binaria.shape[1]
conectividade_x = []
conectividade_y = []
matriz = matriz_binaria#.flatten()
for i in comprimento:
matriz_deslocada = np.append(matriz[i:,:],matriz[:i,:])
matriz_sobreposta = np.logical_and(matriz_deslocada==matriz,matriz != -1)
conectividade_x.append(sum(matriz_sobreposta)/tamanho_total)
#matriz = matriz_binaria.transpose().flatten()
for i in comprimento:
matriz_deslocada = np.append(matriz[:,i:],matriz[:,:i])
matriz_sobreposta = np.logical_and(matriz_deslocada==matriz,matriz != -1)
conectividade_y.append(sum(matriz_sobreposta)/tamanho_total)
conectividade = (np.array(conectividade_x) + np.array(conectividade_y))/2
plt.plot(comprimento,conectividade_x,color='blue')
plt.plot(comprimento,conectividade_y,color='red')
plt.plot(comprimento,conectividade,color='green')
#plt.show()
def propriedades(matriz_rotulada,bins=20):
prop = measure.regionprops(matriz_rotulada)
perimetros = []
areas = []
alongamento = []
rugosidade = []
for p in prop:
if p['minor_axis_length'] == 0 : continue
perimetros.append(p['perimeter'])
areas.append(p['area'])
rugosidade.append(p['perimeter']**2/(4*np.pi*p['area']))
alongamento.append(p['major_axis_length']/p['minor_axis_length'])
print ('Contagem: ' + str(len(perimetros)))
print ('Perimetros (media = ' + str(np.mean(perimetros)) + ' ; desvio padrao = ' + str(np.std(perimetros)) + ')')
plt.hist(perimetros,bins=bins)
plt.show()
print ('Areas (media = ' + str(np.mean(areas)) + ' ; desvio padrao = ' + str(np.std(areas)) + ')')
plt.hist(areas,bins=bins)
plt.show()
print ('Alongamento (media = ' + str(np.mean(alongamento)) + ' ; desvio padrao = ' + str(np.std(alongamento)) + ')')
plt.hist(alongamento,bins=bins)
plt.show()
print ('Rugosidade (media = ' + str(np.mean(rugosidade)) + ' ; desvio padrao = ' + str(np.std(rugosidade)) + ')')
plt.hist(rugosidade,bins=bins)
plt.show()
def gerar_ruido_gaussiano(matriz,desv_pad=0.01):
return (sk.util.random_noise(matriz,var=desv_pad)*255).astype('uint8')
def gerar_ruido_snp(matriz,quantidade=0.1):
return (sk.util.random_noise(matriz,mode='s&p',amount=quantidade)*255).astype('uint8')
def gerar_imagem_ruido(aresta,densidade):
return (sk.util.random_noise(np.zeros((aresta[0],aresta[1])),
mode='salt',amount=densidade)*255).astype('uint8')
def extrair_bordas(matriz, mediana = 1, gaussiano = 2, realce = 2,
limiar = None, mediana2 = 0,
janela = 100, offset = 0):
bordas = filtro_mediana(matriz,mediana)
bordas = filtro_gaussiano(bordas, gaussiano)
bordas = filtro_realce(bordas,realce)
bordas = filtro_scharr(bordas)
bordas = (filters.threshold_adaptive(bordas,janela,
offset=offset)*255).astype('uint8')
fundo = binarizar(matriz, limiar)
bordas = bordas * (fundo//255)
bordas = filtro_mediana(bordas,mediana2)
return bordas
def segregacao_watershed(bordas, pegada = 5, limiar = 0):
dist = mapa_distancia(bordas)
picos = feature.peak_local_max(dist,indices = False,
labels = bordas,
footprint = np.ones((pegada,pegada)),
threshold_rel = limiar)
marcadores = sp.label(picos)
rotulos = morphology.watershed(-dist, marcadores[0], mask = bordas)
return rotulos
'''
Interface
'''
class Interface():
def __init__(self, parent):
self.parent = parent
self.img = None
self.img_desfazer = None
self.main_frame = tk.Frame(self.parent)
self.main_frame.pack()
self.image_frame = tk.Frame(self.parent)
self.image_frame.pack(fill=tk.BOTH, expand = 1)
self.canvas = tk.Canvas(self.image_frame,
relief = tk.SUNKEN)
self.canvas.config(width=200,height=200)
self.canvas.pack(side=tk.TOP, fill=tk.BOTH, expand = 1)
self.sbV = tk.Scrollbar(self.canvas, orient=tk.VERTICAL)
self.sbH = tk.Scrollbar(self.canvas, orient=tk.HORIZONTAL)
self.sbV.config(command=self.canvas.yview)
self.sbH.config(command=self.canvas.xview)
self.canvas.config(yscrollcommand=self.sbV.set)
self.canvas.config(xscrollcommand=self.sbH.set)
self.sbV.pack(side=tk.RIGHT, fill=tk.Y)
self.sbH.pack(side=tk.BOTTOM, fill=tk.X)
'''
Inicializacao dos botoes de menu
'''
self.menu = tk.Menu(parent)
self.menu_arquivo = tk.Menu(self.menu,tearoff=0)
self.menu_arquivo.add_command(label="Abrir imagem",
command = self.carregar_imagem)
self.menu_arquivo.add_command(label="Salvar imagem",
command = self.salvar_imagem)
self.menu_arquivo.add_command(label="Fechar imagem",
command = self.fechar_imagem)
self.menu_arquivo.add_command(label="Defazer",
command = self.desfazer)
self.menu_arquivo.add_command(label="Sair",
command = self.fechar)
self.menu.add_cascade(label = 'Arquivo',
menu=self.menu_arquivo)
self.menu_transformar = tk.Menu(self.menu,tearoff=0)
self.menu_transformar.add_command(label='Converter escala de cinza',
command = self.escala_de_cinza)
self.menu_transformar.add_command(label = 'Binarizar...',
command = self.binarizar)
self.menu_transformar.add_command(label = 'Mapa de distancia',
command = self.mapa_distancia)
self.menu_transformar.add_command(label = 'Inverter',
command = self.inverter)
self.menu_transformar.add_command(label = 'Rotular',
command = self.rotular)
self.menu.add_cascade(label="Transformar",
menu = self.menu_transformar)
self.menu_filtros = tk.Menu(self.menu, tearoff = 0)
self.menu_filtros.add_command(label = 'Expandir Contraste',
command = self.expandir_contraste)
self.menu_filtros.add_command(label = 'Equalizar Histograma',
command = self.equalizar_histograma)
self.menu_filtros.add_command(label = 'Filtro Gaussiano...',
command = lambda: self.filtro('init gauss'))
self.menu_filtros.add_command(label = 'Filtro da Mediana...',
command = lambda: self.filtro('init media'))
self.menu_filtros.add_command(label = 'Filtro Realce...',
command = lambda: self.filtro('init real'))
self.menu_filtros.add_command(label = 'Filtro Prewitt',
command = self.filtro_prewitt)
self.menu_filtros.add_command(label = 'Filtro Sobel',
command = self.filtro_sobel)
self.menu_filtros.add_command(label = 'Filtro Scharr',
command = self.filtro_scharr)
self.menu.add_cascade(label="Filtros", menu = self.menu_filtros)
self.menu_info = tk.Menu(self.menu, tearoff = 0)
self.menu_info.add_command(label = 'Histograma...',
command = self.histograma)
self.menu_info.add_command(label = 'Correlacao',
command = self.correlacao)
self.menu_info.add_command(label = 'Conectividade',
command = self.conectividade)
self.menu_info.add_command(label = 'Propriedades',
command = self.propriedades)
self.menu.add_cascade(label="Info", menu = self.menu_info)
self.menu_morfologia = tk.Menu(self.menu, tearoff = 0)
self.menu_morfologia.add_command(label = 'Erosao...',
command = lambda: self.morfologia('init erosao'))
self.menu_morfologia.add_command(label = 'Dilatacao...',
command = lambda: self.morfologia('init dilatacao'))
self.menu_morfologia.add_command(label='Abertura...',
command = lambda: self.morfologia('init abertura'))
self.menu_morfologia.add_command(label = 'Fechamento...',
command = lambda: self.morfologia('init fechamento'))
self.menu_morfologia.add_command(label = 'Granulometria',
command = self.granulometria)
self.menu.add_cascade(label="Morfologia", menu=self.menu_morfologia)
self.menu_ruido = tk.Menu(self.menu, tearoff = 0)
self.menu_ruido.add_command(label = 'Gerar Ruido Gaussiano...',
command = lambda: self.filtro('init gaussiano'))
self.menu_ruido.add_command(label = 'Gerar Ruido "Sal e Pimenta"...',
command = lambda: self.filtro('init snp'))
self.menu_ruido.add_command(label = 'Criar Imagem com Ruido...',
command = lambda: self.gerar_imagem_ruido('init'))
self.menu.add_cascade(label="Ruido", menu=self.menu_ruido)
self.menu_bordas = tk.Menu(self.menu, tearoff=0)
self.menu_bordas.add_command(label = 'Extrair Bordas...',
command = self.extrair_bordas)
self.menu_bordas.add_command(label = 'Segregacao Watershed...',
command = self.segregacao_watershed)
self.menu.add_cascade(label="Bordas", menu=self.menu_bordas)
'''
fim da inicializacao dos botoes de menu
'''
'''
Inicializacao janelas secundarias
'''
# Histograma
self.janela_histograma = tk.Toplevel(self.parent)
self.janela_histograma.withdraw()
self.histograma_show = tk.Label(self.janela_histograma)
self.histograma_show.pack(side=tk.TOP)
self.histograma_button = tk.Button(self.janela_histograma,
text='Fechar',
command = self.janela_histograma.withdraw)
self.histograma_button.pack(side=tk.TOP)
# Binarizacao
self.janela_binarizar = tk.Toplevel(self.parent)
self.janela_binarizar.protocol('WM_DELETE_WINDOW', lambda: print('Invalido'))
self.janela_binarizar.withdraw()
self.binarizar_show = tk.Label(self.janela_binarizar)
self.binarizar_show.pack(side=tk.TOP)
self.binarizar_botoes = tk.Label(self.janela_binarizar)
self.binarizar_botoes.pack(side = tk.TOP)
self.binarizar_fechar = tk.Button(self.binarizar_botoes,
text='Cancelar',
command = lambda: self.binarizar('cancelar'))
self.binarizar_fechar.pack(side=tk.LEFT)
self.binarizar_ok = tk.Button(self.binarizar_botoes,
text = 'OK',
command = lambda: self.binarizar('confirmar'))
self.binarizar_ok.pack(side = tk.LEFT)
self.binarizar_parametros = tk.Label(self.janela_binarizar)
self.binarizar_parametros.pack(side = tk.TOP)
self.label_limiar = tk.Label(self.binarizar_parametros,
text = 'Limiar()')
self.label_limiar.grid(row=0,column=0)
self.limiar_binarizacao = tk.StringVar()
self.entry_limiar = tk.Entry(self.binarizar_parametros,
textvariable = self.limiar_binarizacao)
self.entry_limiar.grid(row=0,column=1)
self.limiar_binarizacao.trace('w',lambda a,b,c: self.binarizar('atualizar'))
self.binarizar_botao_aumentar = tk.Button(self.binarizar_parametros,
text = '+',
command = lambda: self.binarizar('aumentar'))
self.binarizar_botao_aumentar.grid(row=0,column=2)
self.binarizar_botao_diminuir = tk.Button(self.binarizar_parametros,
text = '-',
command = lambda: self.binarizar('diminuir'))
self.binarizar_botao_diminuir.grid(row=0,column=3)
# Filtros
self.funcao_filtro = None
self.janela_filtro = tk.Toplevel(self.parent)
self.janela_filtro.protocol('WM_DELETE_WINDOW', lambda: print('Invalido'))
self.filtro_label = tk.Label(self.janela_filtro)
self.filtro_label.grid(row = 0, column = 0)
self.filtro_var = tk.StringVar()
self.filtro_var.trace('w', lambda a,b,c: self.funcao_filtro('atualizar'))
self.filtro_campo = tk.Entry(self.janela_filtro,
textvariable = self.filtro_var)
self.filtro_campo.grid(row = 0, column = 1)
self.filtro_botao_aumentar = tk.Button(self.janela_filtro,
text = '+',
command = lambda: self.funcao_filtro('aumentar'))
self.filtro_botao_aumentar.grid(row=0, column = 2)
self.filtro_botao_diminuir = tk.Button(self.janela_filtro,
text = '-',
command = lambda: self.funcao_filtro('diminuir'))
self.filtro_botao_diminuir.grid(row=0, column = 3)
self.filtro_botao_ok = tk.Button(self.janela_filtro,
text = 'OK',
command = lambda: self.funcao_filtro('aceitar'))
self.filtro_botao_ok.grid(row=1, column = 0)
self.filtro_botao_cancelar = tk.Button(self.janela_filtro,
text = 'Cancelar',
command = lambda: self.funcao_filtro('cancelar'))
self.filtro_botao_cancelar.grid(row=1, column = 1)
self.janela_filtro.withdraw()
# Ruido
self.janela_ruido = tk.Toplevel(self.parent)
self.ruido_var1 = tk.StringVar()
self.ruido_var1.set('100')
self.ruido_var1.trace('w', lambda a,b,c: self.gerar_imagem_ruido('atualizar'))
self.ruido_label1 = tk.Label(self.janela_ruido, text = 'Altura(100): ')
self.ruido_label1.grid(column = 0, row = 0)
self.ruido_entry1 = tk.Entry(self.janela_ruido,
textvariable = self.ruido_var1)
self.ruido_entry1.grid(row = 0, column = 1)
self.ruido_var2 = tk.StringVar()
self.ruido_var2.set('100')
self.ruido_var2.trace('w', lambda a,b,c: self.gerar_imagem_ruido('atualizar'))
self.ruido_label2 = tk.Label(self.janela_ruido, text = 'Largura(100): ')
self.ruido_label2.grid(column = 0, row = 1)
self.ruido_entry2 = tk.Entry(self.janela_ruido,
textvariable = self.ruido_var2)
self.ruido_entry2.grid(column=1, row=1)
self.ruido_var3 = tk.StringVar()
self.ruido_var3.set('0.5')
self.ruido_var3.trace('w', lambda a,b,c: self.gerar_imagem_ruido('atualizar'))
self.ruido_label3 = tk.Label(self.janela_ruido, text = 'Proporcao(0.5): ')
self.ruido_label3.grid(column = 0, row = 2)
self.ruido_entry3 = tk.Entry(self.janela_ruido,
textvariable = self.ruido_var3)
self.ruido_entry3.grid(column=1, row=2)
self.ruido_ok = tk.Button(self.janela_ruido,
text = 'OK',
command = lambda: self.gerar_imagem_ruido('aceitar'))
self.ruido_ok.grid(column = 0, row = 3)
self.ruido_cancelar = tk.Button(self.janela_ruido,
text = 'Cancelar',
command = lambda: self.gerar_imagem_ruido('cancelar'))
self.ruido_cancelar.grid(row = 3, column = 1)
self.janela_ruido.withdraw()
parent.config(menu=self.menu)
parent.geometry('400x300')
def salvar_imagem(self):
if self.img != None:
salvar(self.img)
def fechar(self):
self.parent.quit()
self.parent.destroy()
def carregar_imagem(self):
self.img = pil.open(filedialog.askopenfilename())
self.img_desfazer = None
self.atualizar()
def fechar_imagem(self):
self.img, self.img_desfazer = None, self.img
self.atualizar()
def desfazer(self):
if self.img_desfazer == None:
print ('Sem imagem para desfazer')
return
self.img, self.img_desfazer = self.img_desfazer, self.img
self.atualizar()
def atualizar(self):
self.canvas.delete('all')
if self.img == None:
return
self.photo_img = imagetk.PhotoImage(self.img)
size = self.img.size
self.canvas_image = self.canvas.create_image(0,0,anchor='nw',
image=self.photo_img)
self.canvas.config(width=min(size[0],MAX_WIDTH),
height=min(size[1],MAX_HEIGHT) )
self.canvas.config(scrollregion=(0,0,size[0],size[1]))
def escala_de_cinza(self):
if self.img.mode == 'L':
return
self.img_desfazer = self.img
self.img = self.img.convert('L')
self.atualizar()
def binarizar(self, modo = 'iniciar'):
'''
Modos: iniciar, confirmar, atualizar, cancelar, aumentar, diminuir
'''
if modo == 'iniciar':
if self.img.mode == 'L':
mat = np.array(self.img)
else:
mat = np.array(self.img.convert('L'))
histograma(mat) #grava o grafico para uma variavel interna, plt.gcp()
self.hist_img = plt_to_image().copy()
self.hist_img = imagetk.PhotoImage(self.hist_img.convert('RGB'))
self.hist_ref = self.hist_img
self.binarizar_show.config(image=self.hist_img)
self.janela_binarizar.deiconify()
self.otsu = filters.threshold_otsu(np.array(self.img))
self.label_limiar.configure(text = 'Limiar ('+str(self.otsu)+') :')
self.img_original = self.img.copy()
self.limiar_valido = self.otsu
self.limiar_binarizacao.set(self.otsu)
elif modo == 'confirmar':
self.img_desfazer = self.img_original
self.janela_binarizar.withdraw()
elif modo == 'atualizar':
if not self.limiar_binarizacao.get().isdigit() and self.limiar_binarizacao.get() != '':
self.limiar_binarizacao.set(self.limiar_valido)
return
elif self.limiar_binarizacao.get() == "":
self.limiar_valido = ""
return
self.limiar_valido = int(self.limiar_binarizacao.get())
self.img = binarizar(np.array(self.img_original),
int(self.limiar_binarizacao.get()))
self.img = pil.fromarray(self.img)
self.atualizar()
elif modo == 'cancelar':
self.img = self.img_original
self.atualizar()
self.janela_binarizar.withdraw()
elif modo == 'aumentar':
self.limiar_binarizacao.set(str(int(self.limiar_binarizacao.get())+1))
elif modo == 'diminuir':
self.limiar_binarizacao.set(str(int(self.limiar_binarizacao.get())-1))
def histograma(self):
if self.img.mode == 'L':
mat = np.array(self.img)
else:
mat = np.array(self.img.convert('L'))
histograma(mat) #grava o grafico para uma variavel interna, plt.gcp()
self.hist_img = plt_to_image().copy()
self.hist_img = imagetk.PhotoImage(self.hist_img.convert('RGB'))
self.hist_ref = self.hist_img
self.histograma_show.config(image=self.hist_img)
self.janela_histograma.deiconify()
def mapa_distancia(self):
mat = np.array(self.img)
binaria = verificar_binaria(mat)
print (binaria)
if not binaria:
return
self.img_desfazer = self.img.copy()
self.img = pil.fromarray(mapa_distancia(mat))
self.atualizar()
def inverter(self):
if self.img.mode == 'L':
mat = np.array(self.img)
mat = inverter(mat)
else:
mat1, mat2, mat3 = self.img.split()
mat1, mat2, mat3 = list(map(np.array,(mat1,mat2,mat3)))
mat1, mat2, mat3 = list(map(inverter,(mat1,mat2,mat3)))
mat = np.stack((mat1, mat2, mat3),axis=-1)
self.img_desfazer = self.img
self.img = pil.fromarray(mat)
self.atualizar()
def expandir_contraste(self):
mat = np.array(self.img)
self.img_desfazer = self.img.copy()
self.img = pil.fromarray(expandir_contraste(mat))
self.atualizar()
def equalizar_histograma(self):
mat = np.array(self.img)
self.img_desfazer = self.img.copy()
self.img = pil.fromarray(equalizar_histograma(mat))
self.atualizar()
def filtro(self,modo):
'''
modos: | |
'interest_ids', 'interest_match', 'sort_field', 'sort_dir', 'since_last_campaign', 'unsubscribed_since'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_list_members_info" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'list_id' is set
if ('list_id' not in params or
params['list_id'] is None):
raise ValueError("Missing the required parameter `list_id` when calling ``") # noqa: E501
if 'count' in params and params['count'] > 1000: # noqa: E501
raise ValueError("Invalid value for parameter `count` when calling ``, must be a value less than or equal to `1000`") # noqa: E501
collection_formats = {}
path_params = {}
if 'list_id' in params:
path_params['list_id'] = params['list_id'] # noqa: E501
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
collection_formats['fields'] = 'csv' # noqa: E501
if 'exclude_fields' in params:
query_params.append(('exclude_fields', params['exclude_fields'])) # noqa: E501
collection_formats['exclude_fields'] = 'csv' # noqa: E501
if 'count' in params:
query_params.append(('count', params['count'])) # noqa: E501
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'email_type' in params:
query_params.append(('email_type', params['email_type'])) # noqa: E501
if 'status' in params:
query_params.append(('status', params['status'])) # noqa: E501
if 'since_timestamp_opt' in params:
query_params.append(('since_timestamp_opt', params['since_timestamp_opt'])) # noqa: E501
if 'before_timestamp_opt' in params:
query_params.append(('before_timestamp_opt', params['before_timestamp_opt'])) # noqa: E501
if 'since_last_changed' in params:
query_params.append(('since_last_changed', params['since_last_changed'])) # noqa: E501
if 'before_last_changed' in params:
query_params.append(('before_last_changed', params['before_last_changed'])) # noqa: E501
if 'unique_email_id' in params:
query_params.append(('unique_email_id', params['unique_email_id'])) # noqa: E501
if 'vip_only' in params:
query_params.append(('vip_only', params['vip_only'])) # noqa: E501
if 'interest_category_id' in params:
query_params.append(('interest_category_id', params['interest_category_id'])) # noqa: E501
if 'interest_ids' in params:
query_params.append(('interest_ids', params['interest_ids'])) # noqa: E501
if 'interest_match' in params:
query_params.append(('interest_match', params['interest_match'])) # noqa: E501
if 'sort_field' in params:
query_params.append(('sort_field', params['sort_field'])) # noqa: E501
if 'sort_dir' in params:
query_params.append(('sort_dir', params['sort_dir'])) # noqa: E501
if 'since_last_campaign' in params:
query_params.append(('since_last_campaign', params['since_last_campaign'])) # noqa: E501
if 'unsubscribed_since' in params:
query_params.append(('unsubscribed_since', params['unsubscribed_since'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/problem+json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/lists/{list_id}/members', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ListMembers2', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_list_member(self, list_id, subscriber_hash, **kwargs): # noqa: E501
"""Get member info # noqa: E501
Get information about a specific list member, including a currently subscribed, unsubscribed, or bounced member. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_list_member(list_id, subscriber_hash, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str list_id: The unique ID for the list. (required)
:param str subscriber_hash: The MD5 hash of the lowercase version of the list member's email address. (required)
:param list[str] fields: A comma-separated list of fields to return. Reference parameters of sub-objects with dot notation.
:param list[str] exclude_fields: A comma-separated list of fields to exclude. Reference parameters of sub-objects with dot notation.
:return: ListMembers2
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_list_member_with_http_info(list_id, subscriber_hash, **kwargs) # noqa: E501
else:
(data) = self.get_list_member_with_http_info(list_id, subscriber_hash, **kwargs) # noqa: E501
return data
def get_list_member_with_http_info(self, list_id, subscriber_hash, **kwargs): # noqa: E501
"""Get member info # noqa: E501
Get information about a specific list member, including a currently subscribed, unsubscribed, or bounced member. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_list_member_with_http_info(list_id, subscriber_hash, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str list_id: The unique ID for the list. (required)
:param str subscriber_hash: The MD5 hash of the lowercase version of the list member's email address. (required)
:param list[str] fields: A comma-separated list of fields to return. Reference parameters of sub-objects with dot notation.
:param list[str] exclude_fields: A comma-separated list of fields to exclude. Reference parameters of sub-objects with dot notation.
:return: ListMembers2
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['list_id', 'subscriber_hash', 'fields', 'exclude_fields'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_list_member" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'list_id' is set
if ('list_id' not in params or
params['list_id'] is None):
raise ValueError("Missing the required parameter `list_id` when calling ``") # noqa: E501
# verify the required parameter 'subscriber_hash' is set
if ('subscriber_hash' not in params or
params['subscriber_hash'] is None):
raise ValueError("Missing the required parameter `subscriber_hash` when calling ``") # noqa: E501
collection_formats = {}
path_params = {}
if 'list_id' in params:
path_params['list_id'] = params['list_id'] # noqa: E501
if 'subscriber_hash' in params:
path_params['subscriber_hash'] = params['subscriber_hash'] # noqa: E501
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
collection_formats['fields'] = 'csv' # noqa: E501
if 'exclude_fields' in params:
query_params.append(('exclude_fields', params['exclude_fields'])) # noqa: E501
collection_formats['exclude_fields'] = 'csv' # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/problem+json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth'] # noqa: E501
return self.api_client.call_api(
'/lists/{list_id}/members/{subscriber_hash}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ListMembers2', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_list_member_activity(self, list_id, subscriber_hash, **kwargs): # noqa: E501
"""View recent activity 50 # noqa: E501
Get the last 50 events of a member's activity on a specific list, including opens, clicks, and unsubscribes. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_list_member_activity(list_id, subscriber_hash, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str list_id: The unique ID for the list. (required)
:param str subscriber_hash: The MD5 hash of the lowercase version of the list member's email address. (required)
:param list[str] fields: A comma-separated list of fields to return. Reference parameters of sub-objects with dot notation.
:param list[str] exclude_fields: A comma-separated list of fields to exclude. Reference parameters of sub-objects with dot notation.
:param list[str] action: A comma seperated list of actions to return.
:return: MemberActivityEvents
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_list_member_activity_with_http_info(list_id, subscriber_hash, **kwargs) # noqa: E501
else:
(data) = self.get_list_member_activity_with_http_info(list_id, subscriber_hash, **kwargs) # noqa: E501
return data
def get_list_member_activity_with_http_info(self, list_id, subscriber_hash, **kwargs): # noqa: E501
"""View recent activity 50 # noqa: E501
Get the last 50 events of a member's activity on a specific list, including opens, clicks, and unsubscribes. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_list_member_activity_with_http_info(list_id, subscriber_hash, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str list_id: The unique ID for the list. (required)
:param str subscriber_hash: The MD5 hash of the lowercase version of the list member's email address. (required)
:param list[str] fields: A comma-separated list of fields to return. Reference parameters of sub-objects with dot notation.
:param list[str] exclude_fields: A comma-separated list of fields to exclude. Reference parameters of sub-objects with dot notation.
:param list[str] action: A comma seperated list of actions to return.
:return: MemberActivityEvents
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['list_id', 'subscriber_hash', 'fields', 'exclude_fields', 'action'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_list_member_activity" % key
)
params[key] = val
del params['kwargs']
| |
777 ~/upload' + '\n'
script = ''
script+='mkdir ~/efs &> /dev/null'+'\n'
script+='sudo apt-get update' + '\n'
script+='sudo apt-get install -y nfs-common' + '\n'
script+='sudo mount -t nfs -o nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport '+filesystem_dns+':/ ~/efs '+'\n'
#script+='echo "#!/bin/bash" > ~/startefs.sh ' + '\n'
#script+='echo "sudo mount -t nfs -o nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport '+filesystem_dns+':/ ~/efs " >> ~/startefs.sh ' + '\n'
#script+= 'chmod u+x ~/startefs.sh \n'
#script+= 'echo "[Unit]" | sudo tee /etc/systemd/system/efs.service\n'
#script+= 'echo "Description=Start EFS" | sudo tee -a /etc/systemd/system/efs.service\n'
#script+= 'echo "[Service]" | sudo tee -a /etc/systemd/system/efs.service\n'
#script+= 'echo "ExecStart=/home/ubuntu/startefs.sh" | sudo tee -a /etc/systemd/system/efs.service\n'
#script+= 'echo "[Install]" | sudo tee -a /etc/systemd/system/efs.service\n'
#script+= 'echo "WantedBy=multi-user.target" | sudo tee -a /etc/systemd/system/efs.service\n'
#script+= 'sudo systemctl start efs'
script+='cd ~/efs'+'\n'
script+='sudo chmod go+rw .'+'\n'
script+='mkdir ~/efs/data &> /dev/null'+'\n'
# sudo mount -t nfs -o nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport fs-8be06f08.efs.us-east-1.amazonaws.com:/ ~/efs
return script
import termios
import tty
def posix_shell(chan):
import select
oldtty = termios.tcgetattr(sys.stdin)
try:
tty.setraw(sys.stdin.fileno())
tty.setcbreak(sys.stdin.fileno())
chan.settimeout(0.0)
while True:
r, w, e = select.select([chan, sys.stdin], [], [])
if chan in r:
try:
x = u(chan.recv(1024))
if len(x) == 0:
sys.stdout.write("\r\n*** EOF\r\n")
break
sys.stdout.write(x)
sys.stdout.flush()
except socket.timeout:
pass
if sys.stdin in r:
x = sys.stdin.read(1)
if len(x) == 0:
break
chan.send(x)
finally:
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, oldtty)
interactive_shell = posix_shell
def printTotals(transferred, toBeTransferred):
'''Print paramiko upload transfer'''
print("Transferred: %.3f" % float(float(transferred)/float(toBeTransferred)), end="\r", flush=True)
import zipfile, os
def makeCompressed(zipname, skip_folder=['result', 'git', 'ec2', 'figs', 'tmpres']):
path = PATH
print('now compressing', path)
zipf = zipfile.ZipFile(path + '/../' + zipname, 'w', zipfile.ZIP_DEFLATED)
for root, dirs, files in os.walk(path):
skip = False
# print('root', root)
for k in skip_folder:
if k in root:
skip = True
break
if not skip: # skip previous results
for file in files:
if '.zip' in file or '.pdf' in file or '.json' in file or '.tex' in file:
continue # skip result backups and figures
print(file)
zipf.write(os.path.join(root, file), arcname=os.path.join(root.replace(path, ''), file))
def download_from_ec2(instance, user_name, files, local_dir='.', kp_dir=None, verbose=True):
'''
Upload files directly to an EC2 instance. Speed depends on internet connection and not instance type.
__________
parameters
- instance : dict. Response dictionary from ec2 instance describe_instances method
- user_name : string. SSH username for accessing instance, default usernames for AWS images can be found at https://alestic.com/2014/01/ec2-ssh-username/
- files : string or list of strings. single file, list of files or directory to upload. If it is a directory end in "/"
- remote_dir : '.' string.The directory on the instance where the files will be uploaded to
'''
if kp_dir is None:
kp_dir = KP_DIR
client = connect_to_instance(instance['PublicIpAddress'],kp_dir+'/'+instance['KeyName'], username = user_name,port=22)
if verbose:
print('Connected. Downloading files...')
sftp = client.open_sftp()
try:
for f in files:
f = f.replace('~', '/home/%s' % user_name)
if f[-1] == '/':
f = f[:-1]
# this is a directory
print('listing', f)
filelist = sftp.listdir(f)
for i in filelist:
print('stat', i)
i = f + '/' + i
lstatout=str(sftp.lstat(i)).split()[0]
if 'd' not in lstatout:
if verbose:
print('Downloading %s' % str(i.split('\\')[-1]))
sftp.get(i, local_dir+'/'+i.split('/')[-1], callback=printTotals)
else:
# is a file
if verbose:
print('Downloading %s' % str(f.split('/')[-1]))
sftp.get(f, local_dir+'/'+f.split('/')[-1], callback=printTotals)
except Exception as e:
raise e
if verbose:
print('Downloaded to %s' % local_dir)
return True
def upload_to_ec2(instance, user_name, files, remote_dir='.', kp_dir=None, verbose=False):
'''
Upload files directly to an EC2 instance. Speed depends on internet connection and not instance type.
__________
parameters
- instance : dict. Response dictionary from ec2 instance describe_instances method
- user_name : string. SSH username for accessing instance, default usernames for AWS images can be found at https://alestic.com/2014/01/ec2-ssh-username/
- files : string or list of strings. single file, list of files or directory to upload. If it is a directory end in "/"
- remote_dir : '.' string.The directory on the instance where the files will be uploaded to
'''
if kp_dir is None:
kp_dir = KP_DIR
client = connect_to_instance(instance['PublicIpAddress'],kp_dir+'/'+instance['KeyName'], username = user_name,port=22)
if verbose:
print('Connected. Uploading files...')
stfp = client.open_sftp()
try:
for f in files:
if verbose:
print('Uploading %s' % str(f.split('\\')[-1]))
stfp.put(f, remote_dir+'/'+f.split('/')[-1], callback=printTotals, confirm=True)
except Exception as e:
raise e
if verbose:
print('Uploaded to %s' % remote_dir)
return True
def active_shell(instance, user_name='ubuntu', port=22, kp_dir=None):
'''
Leave a shell active
__________
parameters
- instance : dict. Response dictionary from ec2 instance describe_instances method
- user_name : string. SSH username for accessing instance, default usernames for AWS images can be found at https://alestic.com/2014/01/ec2-ssh-username/
- port : port to use to connect to the instance
'''
if kp_dir is None:
kp_dir = '~/.ssh'
client = connect_to_instance(instance['PublicIpAddress'],kp_dir+'/'+instance['KeyName'],username=user_name,port=port)
console = client.invoke_shell()
console.keep_this = client
session = console.get_transport().open_session()
session.get_pty()
session.invoke_shell()
try:
interactive_shell(session)
except Exception as e:
print(e)
print('Logged out of interactive session.')
session.close()
return True
def mount_efs(fs_name='fs-8be06f08', instance=None):
try:
mount_target, instance_dns, filesystem_dns = retrieve_efs_mount(fs_name, instance, new_mount=False, region=REGION)
except Exception as e:
raise e
sys.exit(1)
print('Connecting to instance to link EFS...')
run_script(instance, 'ubuntu', compose_mount_script(filesystem_dns), cmd=True)
def fetchScripts(fileName):
s = open(fileName, 'r').read().split('\n')
return s
def initialize(scriptlist, instance):
for script in scriptlist:
try:
if not run_script(instance, 'ubuntu', script, cmd=True):
break
except Exception as e:
print(str(e))
print('Script %s failed with above error' % script)
def read_user_data_from_local_config():
user_data = config.get('EC2', 'user_data')
if config.get('EC2', 'user_data') is None or user_data == '':
try:
user_data = (open(config.get('EC2', 'user_data_file'), 'r')).read()
except:
user_data = ''
return user_data
def create_client():
client = boto3.client('ec2', region_name=REGION)
return client
def get_existing_instance_by_tag(client):
response = client.describe_instances(
Filters=[
{
'Name': 'tag:Name',
'Values': [config.get('EC2', 'tag')],
}
])
if len(response['Reservations']) > 0:
# print(response['Reservations'][0]['Instances'])
# print(response['Reservations'])
for res in response['Reservations']:
inst = res['Instances'][0]
if inst['State']['Code'] != 48:
return inst
print('Warniing: No instance is available')
return response['Reservations'][0]['Instances'][0]
else:
return None
def list_all_existing_instances(client):
response = client.describe_instances(
# Filters=[
# {
# 'Name': 'image-id',
# 'Values': [config.get('EC2', 'ami')]
# }
#]
)
reservations = response['Reservations']
if len(reservations) > 0:
r_instances = [
inst for resv in reservations for inst in resv['Instances']]
for inst in r_instances:
print("Instance Id: %s | %s | %s" %
(inst['InstanceId'], inst['InstanceType'], inst['State']['Name']))
def get_spot_price(client):
price_history = client.describe_spot_price_history(MaxResults=10,
InstanceTypes=[
config.get('EC2', 'type')],
ProductDescriptions=[config.get('EC2', 'product_description')])
import pprint
pprint.pprint(price_history)
minPrice = 10000
minInd = -1
minAZ = ''
if len(price_history['SpotPriceHistory']) == 0:
raise 0
for ind, priceItem in enumerate(price_history['SpotPriceHistory']):
if float(priceItem['SpotPrice']) < minPrice:
minPrice = float(priceItem['SpotPrice'])
minInd = ind
minAZ = priceItem['AvailabilityZone']
return float(price_history['SpotPriceHistory'][minInd]['SpotPrice']), minInd, minAZ
def provision_instance(client, user_data, az):
user_data_encode = (base64.b64encode(user_data.encode())).decode("utf-8")
req = client.request_spot_instances(
AvailabilityZoneGroup=az,
InstanceCount=1,
Type='one-time',
InstanceInterruptionBehavior='terminate',
LaunchSpecification={
'SecurityGroupIds': [
config.get(
'EC2', 'security_group')
],
'ImageId': config.get('EC2', 'ami'),
'InstanceType': config.get('EC2', 'type'),
'KeyName': config.get('EC2', 'key_pair'),
'UserData': user_data_encode
},
SpotPrice=config.get('EC2', 'max_bid')
)
print('Spot request created, status: ' +
req['SpotInstanceRequests'][0]['State'])
print('Waiting for spot provisioning')
while True:
current_req = client.describe_spot_instance_requests(
SpotInstanceRequestIds=[req['SpotInstanceRequests'][0]['SpotInstanceRequestId']])
if current_req['SpotInstanceRequests'][0]['State'] == 'active':
print('Instance allocated ,Id: ',
current_req['SpotInstanceRequests'][0]['InstanceId'])
instance = client.describe_instances(InstanceIds=[current_req['SpotInstanceRequests'][0]['InstanceId']])[
'Reservations'][0]['Instances'][0]
client.create_tags(Resources=[current_req['SpotInstanceRequests'][0]['InstanceId']],
Tags=[{
'Key': 'Name',
'Value': config.get('EC2', 'tag')
}]
)
return instance
print('Waiting...',
sleep(10))
def stop_instance(client, inst):
try:
print('Stopping', inst['InstanceId'])
client.stop_instances(
InstanceIds=[inst['InstanceId']])
print('complete (', inst['InstanceId'], ')')
client.delete_tags(
Resources=[
inst['InstanceId']
],
Tags=[
{
'Key': 'Name',
'Value': config.get('EC2', 'tag')
},
]
)
except:
print('Failed to stop:', sys.exc_info()[0])
def destroy_instance(client, inst):
try:
print('Terminating', inst['InstanceId'])
client.terminate_instances(
InstanceIds=[inst['InstanceId']])
print('Termination complete (', inst['InstanceId'], ')')
client.delete_tags(
Resources=[
inst['InstanceId']
],
Tags=[
{
'Key': 'Name',
'Value': config.get('EC2', 'tag')
},
]
)
except:
print('Failed to terminate:', sys.exc_info()[0])
def wait_for_up(client, inst):
print('Waiting for instance to come up')
while True:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# print(inst)
if inst['PublicIpAddress'] is None:
inst = get_existing_instance_by_tag(client)
try:
if inst['PublicIpAddress'] is None:
print('IP not assigned yet ...')
else:
s.connect((inst['PublicIpAddress'], 22))
s.shutdown(2)
print('Server is up!')
print('Server Public IP - %s' % inst['PublicIpAddress'])
print('ssh -i', '"' + config.get('EC2', 'key_pair') + '.pem' + '"',
config.get('EC2', 'username') + '@' + inst['PublicDnsName'])
break
except:
print('Waiting...', sleep(10))
# def run_code(client, inst):
# s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# s.connect((inst['PublicIpAddress'], 22))
# s.
def writeLabel():
label = subprocess.check_output(["git", "describe"]).strip()
open(PATH + '/label.txt', 'w').write(label.decode('utf-8'))
def main(action):
client = create_client()
if client is None:
print('Unable to create EC2 client')
sys.exit(0)
inst = get_existing_instance_by_tag(client)
user_data = read_user_data_from_local_config()
if action == 'start':
# print(inst)
if inst is None or inst['State']['Name'] == 'terminated':
spot_price, minInd, minAZ = get_spot_price(client)
print('Spot price is ', str(spot_price))
if | |
= True
assert got_exception
# test get_parameters()
aug = iaa.Invert(p=1, per_channel=False, min_value=10, max_value=20)
params = aug.get_parameters()
assert isinstance(params[0], iap.Binomial)
assert isinstance(params[0].p, iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].p.value == 1
assert params[1].value == 0
assert params[2] == 10
assert params[3] == 20
def test_ContrastNormalization():
reseed()
zeros = np.zeros((4, 4, 3), dtype=np.uint8)
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=zeros.shape)]
# contrast stays the same
observed = iaa.ContrastNormalization(alpha=1.0).augment_image(zeros + 50)
expected = zeros + 50
assert np.array_equal(observed, expected)
# image with mean intensity (ie 128), contrast cannot be changed
observed = iaa.ContrastNormalization(alpha=2.0).augment_image(zeros + 128)
expected = zeros + 128
assert np.array_equal(observed, expected)
# increase contrast
observed = iaa.ContrastNormalization(alpha=2.0).augment_image(zeros + 128 + 10)
expected = zeros + 128 + 20
assert np.array_equal(observed, expected)
observed = iaa.ContrastNormalization(alpha=2.0).augment_image(zeros + 128 - 10)
expected = zeros + 128 - 20
assert np.array_equal(observed, expected)
# decrease contrast
observed = iaa.ContrastNormalization(alpha=0.5).augment_image(zeros + 128 + 10)
expected = zeros + 128 + 5
assert np.array_equal(observed, expected)
observed = iaa.ContrastNormalization(alpha=0.5).augment_image(zeros + 128 - 10)
expected = zeros + 128 - 5
assert np.array_equal(observed, expected)
# increase contrast by stochastic parameter
observed = iaa.ContrastNormalization(alpha=iap.Choice([2.0, 3.0])).augment_image(zeros + 128 + 10)
expected1 = zeros + 128 + 20
expected2 = zeros + 128 + 30
assert np.array_equal(observed, expected1) or np.array_equal(observed, expected2)
# change contrast by tuple
nb_iterations = 1000
nb_changed = 0
last = None
for i in sm.xrange(nb_iterations):
observed = iaa.ContrastNormalization(alpha=(0.5, 2.0)).augment_image(zeros + 128 + 40)
if last is None:
last = observed
else:
if not np.array_equal(observed, last):
nb_changed += 1
p_changed = nb_changed / (nb_iterations-1)
assert p_changed > 0.5
# per_channel=True
aug = iaa.ContrastNormalization(alpha=(1.0, 6.0), per_channel=True)
img = np.zeros((1, 1, 100), dtype=np.uint8) + 128 + 10
observed = aug.augment_image(img)
uq = np.unique(observed)
assert len(uq) > 5
# per_channel with probability
aug = iaa.ContrastNormalization(alpha=(1.0, 4.0), per_channel=0.7)
img = np.zeros((1, 1, 100), dtype=np.uint8) + 128 + 10
seen = [0, 0]
for _ in sm.xrange(1000):
observed = aug.augment_image(img)
uq = np.unique(observed)
if len(uq) == 1:
seen[0] += 1
elif len(uq) >= 2:
seen[1] += 1
assert 300 - 75 < seen[0] < 300 + 75
assert 700 - 75 < seen[1] < 700 + 75
# keypoints shouldnt be changed
aug = iaa.ContrastNormalization(alpha=2.0)
aug_det = iaa.ContrastNormalization(alpha=2.0).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
# test exceptions for wrong parameter types
got_exception = False
try:
aug = iaa.ContrastNormalization(alpha="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
aug = iaa.ContrastNormalization(alpha=1.5, per_channel="test")
except Exception:
got_exception = True
assert got_exception
# test get_parameters()
aug = iaa.ContrastNormalization(alpha=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_Affine():
reseed()
base_img = np.array([[0, 0, 0],
[0, 255, 0],
[0, 0, 0]], dtype=np.uint8)
base_img = base_img[:, :, np.newaxis]
images = np.array([base_img])
images_list = [base_img]
outer_pixels = ([], [])
for i in sm.xrange(base_img.shape[0]):
for j in sm.xrange(base_img.shape[1]):
if i != j:
outer_pixels[0].append(i)
outer_pixels[1].append(j)
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
# no translation/scale/rotate/shear, shouldnt change nothing
aug = iaa.Affine(scale=1.0, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
# ---------------------
# scale
# ---------------------
# zoom in
aug = iaa.Affine(scale=1.75, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()
observed = aug_det.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()
observed = aug.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()
observed = aug_det.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][outer_pixels[0], outer_pixels[1]] > 20).all()
assert (observed[0][outer_pixels[0], outer_pixels[1]] < 150).all()
observed = aug.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x < 0
assert observed[0].keypoints[0].y < 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x > 2
assert observed[0].keypoints[2].y > 2
observed = aug_det.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x < 0
assert observed[0].keypoints[0].y < 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x > 2
assert observed[0].keypoints[2].y > 2
# zoom in only on x axis
aug = iaa.Affine(scale={"x": 1.75, "y": 1.0}, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][[1, 1], [0, 2]] > 20).all()
assert (observed[0][[1, 1], [0, 2]] < 150).all()
assert (observed[0][0, :] < 5).all()
assert (observed[0][2, :] < 5).all()
observed = aug_det.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][[1, 1], [0, 2]] > 20).all()
assert (observed[0][[1, 1], [0, 2]] < 150).all()
assert (observed[0][0, :] < 5).all()
assert (observed[0][2, :] < 5).all()
observed = aug.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][[1, 1], [0, 2]] > 20).all()
assert (observed[0][[1, 1], [0, 2]] < 150).all()
assert (observed[0][0, :] < 5).all()
assert (observed[0][2, :] < 5).all()
observed = aug_det.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][[1, 1], [0, 2]] > 20).all()
assert (observed[0][[1, 1], [0, 2]] < 150).all()
assert (observed[0][0, :] < 5).all()
assert (observed[0][2, :] < 5).all()
observed = aug.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x < 0
assert observed[0].keypoints[0].y == 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x > 2
assert observed[0].keypoints[2].y == 2
observed = aug_det.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x < 0
assert observed[0].keypoints[0].y == 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x > 2
assert observed[0].keypoints[2].y == 2
# zoom in only on y axis
aug = iaa.Affine(scale={"x": 1.0, "y": 1.75}, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][[0, 2], [1, 1]] > 20).all()
assert (observed[0][[0, 2], [1, 1]] < 150).all()
assert (observed[0][:, 0] < 5).all()
assert (observed[0][:, 2] < 5).all()
observed = aug_det.augment_images(images)
assert observed[0][1, 1] > 250
assert (observed[0][[0, 2], [1, 1]] > 20).all()
assert (observed[0][[0, 2], [1, 1]] < 150).all()
assert (observed[0][:, 0] < 5).all()
assert (observed[0][:, 2] < 5).all()
observed = aug.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][[0, 2], [1, 1]] > 20).all()
assert (observed[0][[0, 2], [1, 1]] < 150).all()
assert (observed[0][:, 0] < 5).all()
assert (observed[0][:, 2] < 5).all()
observed = aug_det.augment_images(images_list)
assert observed[0][1, 1] > 250
assert (observed[0][[0, 2], [1, 1]] > 20).all()
assert (observed[0][[0, 2], [1, 1]] < 150).all()
assert (observed[0][:, 0] < 5).all()
assert (observed[0][:, 2] < 5).all()
observed = aug.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x == 0
assert observed[0].keypoints[0].y < 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x == 2
assert observed[0].keypoints[2].y > 2
observed = aug_det.augment_keypoints(keypoints)
assert observed[0].keypoints[0].x == 0
assert observed[0].keypoints[0].y < 0
assert observed[0].keypoints[1].x == 1
assert observed[0].keypoints[1].y == 1
assert observed[0].keypoints[2].x == 2
assert observed[0].keypoints[2].y > 2
# zoom out
# this one uses a 4x4 area of all 255, which is zoomed out to a 4x4 area
# in which the center 2x2 area is 255
# zoom in should probably be adapted to this style
# no separate tests here for x/y axis, should work fine if zoom in works with that
aug = iaa.Affine(scale=0.49, translate_px=0, rotate=0, shear=0)
aug_det = aug.to_deterministic()
image = np.ones((4, 4, 1), dtype=np.uint8) * 255
images = np.array([image])
images_list = [image]
outer_pixels = ([], [])
for y in sm.xrange(4):
xs = sm.xrange(4) if y in [0, 3] else [0, 3]
for x in xs:
outer_pixels[0].append(y)
outer_pixels[1].append(x)
inner_pixels = ([1, 1, 2, 2], [1, 2, 1, 2])
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=3, y=0),
ia.Keypoint(x=0, y=3), ia.Keypoint(x=3, y=3)],
shape=image.shape)]
keypoints_aug = [ia.KeypointsOnImage([ia.Keypoint(x=0.765, y=0.765), ia.Keypoint(x=2.235, y=0.765),
ia.Keypoint(x=0.765, y=2.235), ia.Keypoint(x=2.235, y=2.235)],
shape=image.shape)]
observed = aug.augment_images(images)
assert (observed[0][outer_pixels] < 25).all()
assert (observed[0][inner_pixels] > 200).all()
observed = aug_det.augment_images(images)
assert (observed[0][outer_pixels] < 25).all()
assert (observed[0][inner_pixels] > 200).all()
observed = aug.augment_images(images_list)
assert (observed[0][outer_pixels] < 25).all()
assert (observed[0][inner_pixels] > 200).all()
observed = | |
"""
<Author>
<NAME>
<Start Date>
March 14th, 2013
<Description>
A basic library that demonstrates PolyHash when applied to passwords (see
https://polypasswordhasher.poly.edu/ for details). This includes shielded
password support via AES 256.
<Usage>
import polypasswordhasher
# require knowledge of 10 shares to decode others. Create a blank, new
# password file...
pph = polypasswordhasher.PolyPasswordHasher(threshold = 10, passwordfile = None)
# create three admins so that any two have the appropriate threshold
pph.create_account('admin','correct horse',5)
pph.create_account('root','battery staple',5)
pph.create_account('superuser','purple monkey dishwasher',5)
# make some normal user accounts...
pph.create_account('alice','kitten',1)
pph.create_account('bob','puppy',1)
pph.create_account('charlie','velociraptor',1)
pph.create_account('dennis','menace',0)
pph.create_account('eve','iamevil',0)
# try some logins and make sure we see what we expect...
assert(pph.is_valid_login('alice','kitten') == True)
assert(pph.is_valid_login('admin','correct horse') == True)
assert(pph.is_valid_login('alice','nyancat!') == False)
assert(pph.is_valid_login('dennis','menace') == True)
assert(pph.is_valid_login('dennis','password') == False)
# persist the password file to disk
pph.write_password_data('securepasswords')
# If I remove this from memory, I can't use the data on disk to check
# passwords without a threshold
pph = None
# let's load it back in
pph = polypasswordhasher.PolyPasswordHasher(threshold = 10,passwordfile = 'securepasswords')
# The password information is essentially useless alone. You cannot know
# if a password is valid without threshold or more other passwords!!!
try:
pph.is_valid_login('alice','kitten')
except ValueError:
pass
else:
print "Can't get here! It's still bootstrapping!!!"
# with a threshold (or more) of correct passwords, it decodes and is usable.
pph.unlock_password_data([('admin','correct horse'), ('root','battery staple'), ('bob','<PASSWORD>'),('den<PASSWORD>','<PASSWORD>'])
# now, I can do the usual operations with it...
assert(pph.is_valid_login('alice','kitten') == True)
pph.create_account('moe','tadpole',1)
pph.create_account('larry','fish',0)
...
"""
__author__ = '<NAME> (<EMAIL>)'
__version__ = '0.1'
__license__ = 'MIT'
__all__ = ['PolyPasswordHasher']
from hashlib import sha256
# For shielded password support...
from Crypto.Cipher import AES
import fastshamirsecret as shamirsecret
#import shamirsecret
import os
import pickle
# This is a PolyHash object that has special routines for passwords...
class PolyPasswordHasher(object):
# this is keyed by user name. Each value is a list of dicts (really a
# struct) where each dict contains the salt, sharenumber, and
# passhash (saltedhash XOR shamirsecretshare).
accountdict = None
bootstrap_accounts = None
# This contains the shamirsecret object for this data store
shamirsecretobj = None
# Is the secret value known? In other words, is it safe to use the
# passwordfile
knownsecret = False
# length of the salt in bytes
saltsize = 16
# ICB iterations and recombination iterations...
icb_iterations = 1
recombination_iterations = 1
# secret verification routines
secret_length = 32
secret_integrity_check = None
# number of bytes of data used for isolated validation...
isolated_check_bits= 0
# shielded support. This could be random (and unknown) in the default
# algorithm
shieldedkey = None
# number of used shares. While I could duplicate shares for normal users,
# I don't do so in this implementation. This duplication would allow
# co-analysis of password hashes
nextavailableshare = None
def __init__(self, threshold, passwordfile = None, isolated_check_bits = 0):
"""Initialize a new (empty) object with the threshold. I could store
the threshold in the file, but don't do this currently. I just assume
it's known to the program"""
self.threshold = threshold
self.accountdict = {}
self.bootstrap_accounts = []
self.isolated_check_bits = isolated_check_bits
# creating a new password file
if passwordfile is None:
# generate a 256 bit key for AES. I need 256 bits anyways
# since I'll be XORing by the
# output of SHA256, I want it to be 256 bits (or 32 bytes) long
# we add an integrity check at the end of the secret
self.shieldedkey = self.create_secret()
# protect this key.
self.shamirsecretobj = shamirsecret.ShamirSecret(threshold, self.shieldedkey)
# I've generated it now, so it is safe to use!
self.knownsecret = True
self.nextavailableshare = 1
# create the integrity check
self.secret_integrity_check = self.create_integrity_check(self.shieldedkey)
return
# Okay, they have asked me to load in a password file!
self.shamirsecretobj = shamirsecret.ShamirSecret(threshold)
self.knownsecret = False
self.shieldedkey = None
# A real implementation would need much better error handling
passwordfiledata = pickle.load(open(passwordfile))
# just want to deserialize this data. Should do better validation
self.accountdict = passwordfiledata.accountdict
self.secret_integrity_check = passwordfiledata.secret_integrity_check
assert(type(self.accountdict) is dict)
# compute which share number is the largest used...
for username in self.accountdict:
# look at each share
for share in self.accountdict[username]:
self.nextavailableshare = max(self.nextavailableshare, share['sharenumber'])
# ...then use the one after when I need a new one.
self.nextavailableshare += self.nextavailableshare
def create_account(self, username, password, shares):
"""
Create a new account. Raises a ValueError if given bad data or if the
system isn't initialized
"""
if username in self.accountdict:
raise ValueError("Username exists already!")
# Were I to add support for changing passwords, etc. this code would be
# moved to an internal helper.
if shares>255 or shares<0:
raise ValueError("Invalid number of shares: "+str(shares)+".")
# Note this is just an implementation limitation. I could do all sorts
# of things to get around this (like just use a bigger field).
if shares + self.nextavailableshare > 255:
raise ValueError("Would exceed maximum number of shares: "+str(shares)+".")
# for each share, we will add the appropriate dictionary.
self.accountdict[username] = []
# we are bootstrapping, we will create a bootstrap account
if not self.knownsecret:
# We can only create shielded accounts while bootstrapping
if shares != 0:
del self.accountdict[username]
raise ValueError("Cannot produce shares, still bootstrapping!")
else:
thisentry = {}
thisentry['sharenumber'] = -1
thisentry['salt'] = os.urandom(self.saltsize)
saltedpasswordhash = sha256(thisentry['salt'] + password).digest()
thisentry['passhash'] = saltedpasswordhash
self.accountdict[username].append(thisentry)
# we will use this to update accounts one bootstrap accounts are finished
self.bootstrap_accounts.append(thisentry)
elif shares == 0:
thisentry = {}
thisentry['sharenumber'] = 0
# get a random salt, salt the password and store the salted hash
thisentry['salt'] = os.urandom(self.saltsize)
saltedpasswordhash = sha256(thisentry['salt']+password).digest()
# Encrypt the salted secure hash. The salt should make all entries
# unique when encrypted.
thisentry['passhash'] = AES.new(self.shieldedkey).encrypt(saltedpasswordhash)
# append the isolated validation data...
thisentry['passhash'] += self.create_isolated_validation_bits(saltedpasswordhash)
self.accountdict[username].append(thisentry)
# and exit (don't increment the share count!)
return
for sharenumber in range(self.nextavailableshare, self.nextavailableshare+shares):
thisentry = {}
thisentry['sharenumber'] = sharenumber
# take the bytearray part of this
shamirsecretdata = self.shamirsecretobj.compute_share(sharenumber)[1]
thisentry['salt'] = os.urandom(self.saltsize)
saltedpasswordhash = sha256(thisentry['salt']+password).digest()
# XOR the two and keep this. This effectively hides the hash unless
# protector hashes can be simultaneously decoded
thisentry['passhash'] = _do_bytearray_XOR(saltedpasswordhash, shamirsecretdata)
# append the isolated validation data...
thisentry['passhash'] += self.create_isolated_validation_bits(saltedpasswordhash)
self.accountdict[username].append(thisentry)
# increment the share counter.
self.nextavailableshare += shares
def is_valid_login(self,username,password):
""" Check to see if a login is valid."""
if not self.knownsecret and self.isolated_check_bits == 0:
raise ValueError("Still bootstrapping and isolated validation is disabled!")
if username not in self.accountdict:
raise ValueError("Unknown user '"+username+"'")
# I'll check every share. I probably could just check the first in almost
# every case, but this shouldn't be a problem since only admins have
# multiple shares. Since these accounts are the most valuable (for what
# they can access in the overall system), let's be thorough.
for entry in self.accountdict[username]:
saltedpasswordhash = sha256(entry['salt']+password).digest()
# if this is a bootstrap account...
if entry['sharenumber'] == -1:
return saltedpasswordhash == entry['passhash']
# If bootstrapping, isolated validation needs to be done here!
if not self.knownsecret:
# if saltedpasswordhash[len(saltedpasswordhash)-self.isolated_check_bits:] == entry['passhash'][len(entry['passhash'])-self.isolated_check_bits:]:
if self.isolated_validation(saltedpasswordhash, entry['passhash']):
return True
else:
return False
# XOR to remove the salted hash from the password
sharedata = _do_bytearray_XOR(saltedpasswordhash, entry['passhash'][:len(entry['passhash'])-self.isolated_check_bits])
if self.isolated_check_bits > 0:
isolated_check = self.isolated_validation(saltedpasswordhash, entry['passhash'])
else:
isolated_check = False
# If a shielded account...
if entry['sharenumber'] == 0:
# return true if the password encrypts the same way...
if AES.new(self.shieldedkey).encrypt(saltedpasswordhash) == entry['passhash'][:len(entry['passhash'])-self.isolated_check_bits]:
return True
# or false otherwise
if isolated_check:
print("Isolated check matches but full hash doesn't, this might be a break-in!")
return False
# now we should have a shamir share (if all is well.)
share = (entry['sharenumber'],sharedata)
# If a normal share, return T/F depending on if this share is valid.
if self.shamirsecretobj.is_valid_share(share):
return True
if isolated_check:
print("Isolated check matches but full hash doesn't, this might be a break-in!")
return False
def write_password_data(self, passwordfile):
""" Persist the password data to disk."""
if self.threshold >= self.nextavailableshare:
raise ValueError("Would write undecodable password file. Must have more shares before writing.")
# Need more error checking in a real implementation
# we will backup important information, set | |
and place in VVs.
:param vvx: Real X `GXVV <geosoft.gxapi.GXVV>`
:param vvy: Real Y `GXVV <geosoft.gxapi.GXVV>`
:param vvz: Real Z `GXVV <geosoft.gxapi.GXVV>`
:param img: `GXIMG <geosoft.gxapi.GXIMG>` for Z value, or `IMG_NULL <geosoft.gxapi.IMG_NULL>` for no Z.
:param prompt: Command line prompt string
:param newline: 0 for no newline 1 for automatic newline at each point
:type vvx: GXVV
:type vvy: GXVV
:type vvz: GXVV
:type img: GXIMG
:type prompt: str
:type newline: int
:returns: 0 if user digitized some points.
1 if user cancelled.
:rtype: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Limitations:** May not be available while executing a command line program.
**Note:** The command line will start to recieve digitized points
from the mouse. Whenever the left mouse button is
pressed, the current view X,Y are placed on the workspace
command line. If a valid `GXIMG <geosoft.gxapi.GXIMG>` is passed, the Z value is
also placed on the command line. If auto-newline is
specified, the line is immediately placed into the VVs,
otherwise the user has the oportunity to enter data
before pressing Enter.
Locations are in the current view user units
"""
ret_val = self._digitize2(vvx, vvy, vvz, img, prompt.encode(), newline)
return ret_val
def digitize_peaks(self, vvx, vvy, vvz, img, prompt, newline):
"""
Digitise points from the current map and place in VVs.
:param vvx: Real X `GXVV <geosoft.gxapi.GXVV>`
:param vvy: Real Y `GXVV <geosoft.gxapi.GXVV>`
:param vvz: Real Z `GXVV <geosoft.gxapi.GXVV>`
:param img: `GXIMG <geosoft.gxapi.GXIMG>` for Z value, or `IMG_NULL <geosoft.gxapi.IMG_NULL>` for no Z.
:param prompt: Command line prompt string
:param newline: 0 for no newline 1 for automatic newline at each point
:type vvx: GXVV
:type vvy: GXVV
:type vvz: GXVV
:type img: GXIMG
:type prompt: str
:type newline: int
:returns: 0 if user digitized some points.
1 if user cancelled.
:rtype: int
.. versionadded:: 9.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Limitations:** May not be available while executing a command line program.
**Note:** Same as `digitize2 <geosoft.gxapi.GXEMAP.digitize2>`, but the closest peaks to the selected locations are
returned instead of the selected location. The method chooses the highest value
of the 8 surrounding points, the repeats this process until no higher value can
be found in any of the 8 surrounding points. If there are two or more points with
a higher value, it will just take the first one and continue, and this method will
stall on flat areas as well (since no surrounding point is larger).
"""
ret_val = self._digitize_peaks(vvx, vvy, vvz, img, prompt.encode(), newline)
return ret_val
def digitize_polygon(self, vvx, vvy, vvz, img, prompt, newline, pixel_radius):
"""
Same as iDigitze2_EMAP, but automatically close polygons.
:param vvx: Real X `GXVV <geosoft.gxapi.GXVV>`
:param vvy: Real Y `GXVV <geosoft.gxapi.GXVV>`
:param vvz: Real Z `GXVV <geosoft.gxapi.GXVV>`
:param img: `GXIMG <geosoft.gxapi.GXIMG>` for Z value, or `IMG_NULL <geosoft.gxapi.IMG_NULL>` for no Z.
:param prompt: Command line prompt string
:param newline: 0 for no newline 1 for automatic newline at each point
:param pixel_radius: Close the polygon if the selected location is within this radius in screen pixels.
:type vvx: GXVV
:type vvy: GXVV
:type vvz: GXVV
:type img: GXIMG
:type prompt: str
:type newline: int
:type pixel_radius: int
:returns: 0 if user digitized some points.
1 if user cancelled.
:rtype: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Limitations:** May not be available while executing a command line program.
**Note:** This is the same as `digitize2 <geosoft.gxapi.GXEMAP.digitize2>`, except that it automatically
detects, (except for the 2nd and 3rd points) when a selected location
is within the entered number of pixels from the starting point. If yes,
the polygon is assumed to be closed, and the operation is the same as
the RMB "done" command, and the process returns 0.
"""
ret_val = self._digitize_polygon(vvx, vvy, vvz, img, prompt.encode(), newline, pixel_radius)
return ret_val
def get_box(self, str_val, min_x, min_y, max_x, max_y):
"""
Returns the coordinates of a user selected box.
:param str_val: User prompt string
:param min_x: X minimum in current view user units.
:param min_y: Y
:param max_x: X maximum
:param max_y: Y
:type str_val: str
:type min_x: float_ref
:type min_y: float_ref
:type max_x: float_ref
:type max_y: float_ref
:returns: 0 if point returned.
1 if user cancelled.
:rtype: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Limitations:** May not be available while executing a command line program.
"""
ret_val, min_x.value, min_y.value, max_x.value, max_y.value = self._get_box(str_val.encode(), min_x.value, min_y.value, max_x.value, max_y.value)
return ret_val
def get_box2(self, str_val, x1, y1, x2, y2, x3, y3, x4, y4):
"""
Returns the coordinates of a user selected box in a warped view.
:param str_val: User prompt string
:param x1: X1 bottom left corner
:param y1: Y1
:param x2: X2 bottom right corner
:param y2: Y2
:param x3: X3 top right corner
:param y3: Y3
:param x4: X4 top left corner
:param y4: Y4
:type str_val: str
:type x1: float_ref
:type y1: float_ref
:type x2: float_ref
:type y2: float_ref
:type x3: float_ref
:type y3: float_ref
:type x4: float_ref
:type y4: float_ref
:returns: 0 if point returned.
1 if user cancelled.
:rtype: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Limitations:** May not be available while executing a command line program.
**Note:** If the data view has a rotational (or other) warp, then the
`get_box <geosoft.gxapi.GXEMAP.get_box>` function returns only opposite diagonal points in the
box, not enough info to determine the other two corners. This
function returns the exact coordinates of all four corners, calculated
from the pixel locations.
"""
ret_val, x1.value, y1.value, x2.value, y2.value, x3.value, y3.value, x4.value, y4.value = self._get_box2(str_val.encode(), x1.value, y1.value, x2.value, y2.value, x3.value, y3.value, x4.value, y4.value)
return ret_val
def get_grid(self, str_val, nx, ny, angle, x1, y1, x_len, y_len):
"""
Position and size a grid on a map.
:param str_val: User prompt string
:param nx: Number of elements along primary axis to draw.
:param ny: Number of elements along secondary axis to draw.
:param angle: Angle of primary axis in degrees
:param x1: Grid origin X
:param y1: Grid origin Y
:param x_len: Primary axis length
:param y_len: Secondary axis length
:type str_val: str
:type nx: int
:type ny: int
:type angle: float_ref
:type x1: float_ref
:type y1: float_ref
:type x_len: float_ref
:type y_len: float_ref
:returns: 0 if line returned.
1 if user cancelled.
:rtype: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Limitations:** May not be available while executing a command line program.
**Note:** If the input angle is `rDUMMY <geosoft.gxapi.rDUMMY>`, an extra step is inserted
for the user to define the angle by drawing a line
with the mouse.
The output primary axis angle will always be in the
range -90 < angle <= 90. The grid origin is shifted to
whichever corner necessary to make this possible, while keeping
the secondary axis at 90 degrees greater than the primary (
going counter-clockwise).
The coordinates are returned in the current User projection
(See `GXMVIEW.get_user_ipj <geosoft.gxapi.GXMVIEW.get_user_ipj>` and `GXMVIEW.set_user_ipj <geosoft.gxapi.GXMVIEW.set_user_ipj>`.)
"""
ret_val, angle.value, x1.value, y1.value, x_len.value, y_len.value = self._get_grid(str_val.encode(), nx, ny, angle.value, x1.value, y1.value, x_len.value, y_len.value)
return ret_val
def get_line(self, str_val, min_x, min_y, max_x, max_y):
"""
Returns the end points of a line.
:param str_val: User prompt string
:param min_x: X1 in view user units
:param min_y: Y1
:param max_x: X2
:param max_y: Y2
:type str_val: str
:type min_x: float_ref
:type min_y: float_ref
:type max_x: float_ref
:type max_y: float_ref
:returns: 0 if line returned.
1 if user cancelled.
:rtype: int
.. versionadded:: 5.0
**License:** `Geosoft Open License <https://geosoftgxdev.atlassian.net/wiki/spaces/GD/pages/2359406/License#License-open-lic>`_
**Limitations:** May not be available while executing a command line program.
**Note:** The coordinates are returned in the current User projection
(See `GXMVIEW.get_user_ipj <geosoft.gxapi.GXMVIEW.get_user_ipj>` and `GXMVIEW.set_user_ipj <geosoft.gxapi.GXMVIEW.set_user_ipj>`.)
"""
ret_val, min_x.value, min_y.value, max_x.value, max_y.value = self._get_line(str_val.encode(), min_x.value, min_y.value, max_x.value, max_y.value)
return ret_val
def get_line_ex(self, str_val, min_x, min_y, max_x, max_y):
"""
Returns the end points of a line.
:param str_val: User prompt string
:param min_x: | |
sample in sample_list:
out.write('\t\t<dataset>' + sample + '</dataset>\n')
out.write('\t</datasets>\n')
# Write nodes
offset = 1
child_nodes, _ = get_lca_dataseries_tax_xml(
tax_profile, sample_list, ROOT_TAXONOMY_ID, offset, metric=metric
)
out.write(child_nodes)
# Close XML
out.write('</krona>')
# Run Krona
html_file = outfile + '.html'
krona_cmd = [krona_path, '-o', html_file, outfile]
run_external_program(krona_cmd)
def get_lca_dataseries_tax_xml(tax_profile, dataseries, taxid, offset, metric='efpkg'):
"""Returns XML node for a phylogenetic tree node and all its children.
Creates additional child node for a fictional "Unclassified..." taxon
if not all reads of the current node were mapped to children nodes.
Args:
tax_profile (:obj:TaxonomyProfile): taxonomy profile
dataseries (list of str): either sample identifiers or function identifiers,
depending on profile type (functional or taxonomic)
taxid (str): taxonomy identifier of a node of interest
offset (int): number of starting tabs
metric (str): scoring metric (default value 'efpkg')
Returns:
ret_val (str): XML node
attribute_values (defaultdict[str,dict[str,float]]): outer key is
one of dataseries members, inner key is in [metric, 'count', 'identity'
'hit_count'], value is float.
"""
attribute_values = autovivify(2, float)
if taxid not in tax_profile.tree.data:
raise KeyError(taxid, 'not found in the tree!!!')
ret_val = '\t'*offset + '<node name="' + tax_profile.tree.data[taxid].name + '">\n'
offset += 1
if tax_profile.tree.data[taxid].attributes:
if metric != 'readcount' and metric != 'proteincount':
ret_val += '\t'*offset + '<readcount>'
for datapoint in dataseries:
if (datapoint in tax_profile.tree.data[taxid].attributes) and (
'count' in tax_profile.tree.data[taxid].attributes[datapoint]
):
ret_val += '<val>' + format(
tax_profile.tree.data[taxid].attributes[datapoint]['count'], "0.0f"
) + '</val>'
else:
ret_val += '<val>0</val>'
ret_val += '</readcount>\n'
ret_val += '\t'*offset + '<' + metric + '>'
for datapoint in dataseries:
if datapoint in tax_profile.tree.data[taxid].attributes and (
metric in tax_profile.tree.data[taxid].attributes[datapoint]
):
ret_val += '<val>' + format(
tax_profile.tree.data[taxid].attributes[datapoint][metric], "0.6f"
) + '</val>'
else:
ret_val += '<val>0.0</val>'
ret_val += '</' + metric + '>\n' + '\t'*offset + '<identity>'
for datapoint in dataseries:
if datapoint in tax_profile.tree.data[taxid].attributes and (
'identity' in tax_profile.tree.data[taxid].attributes[datapoint]
):
ret_val += '<val>' + format((
tax_profile.tree.data[taxid].attributes[datapoint]['identity']
/ tax_profile.tree.data[taxid].attributes[datapoint]['hit_count']
), "0.1f") + '</val>'
else:
ret_val += '<val>0.0</val>'
ret_val += '</identity>\n'
else:
if metric != 'readcount' and metric != 'proteincount':
ret_val += '\t'*offset + '<readcount>'
ret_val += '<val>0</val>'*len(dataseries)
ret_val += '</readcount>\n'
ret_val += '\t'*offset + '<' + metric + '>'
ret_val += '<val>0.0</val>'*len(dataseries)
ret_val += '<' + metric + '>\n' + '\t'*offset + '<identity>'
ret_val += '<val>0.0</val>'*len(dataseries)
ret_val += '</identity>\n'
if tax_profile.tree.data[taxid].children:
for child_taxid in tax_profile.tree.data[taxid].children:
child_node, child_values = get_lca_dataseries_tax_xml(tax_profile,
dataseries,
child_taxid,
offset,
metric=metric)
ret_val += child_node
for datapoint in child_values.keys():
for key, val in child_values[datapoint].items():
attribute_values[datapoint][key] += val
# Add a child node for unidentified child taxon, if needed
unidentified_flag = False
for datapoint in dataseries:
if datapoint in tax_profile.tree.data[taxid].attributes:
if (
attribute_values[datapoint]['count']
< tax_profile.tree.data[taxid].attributes[datapoint]['count']
):
unidentified_flag = True
break
if unidentified_flag:
if offset == 2:
ret_val += '\t'*offset + '<node name="Unclassified">\n'
else:
ret_val += '\t'*offset + '<node name="Unclassified '\
+ tax_profile.tree.data[taxid].name + '">\n'
offset += 1
if metric != 'readcount' and metric != 'proteincount':
ret_val += '\t'*offset + '<readcount>'
for datapoint in dataseries:
if datapoint in tax_profile.tree.data[taxid].attributes and (
attribute_values[datapoint]['count']
< tax_profile.tree.data[taxid].attributes[datapoint]['count']
):
ret_val += '<val>' + format((
tax_profile.tree.data[taxid].attributes[datapoint]['count']
- attribute_values[datapoint]['count']
), "0.0f") + '</val>'
else:
ret_val += '<val>0</val>'
ret_val += '</readcount>\n'
ret_val += '\t'*offset + '<' + metric + '>'
for datapoint in dataseries:
if datapoint in tax_profile.tree.data[taxid].attributes and (
attribute_values[datapoint]['count']
< tax_profile.tree.data[taxid].attributes[datapoint]['count']
):
ret_val += '<val>' + format((
tax_profile.tree.data[taxid].attributes[datapoint][metric]
- attribute_values[datapoint][metric]
), "0.6f") + '</val>'
else:
ret_val += '<val>0.0</val>'
ret_val += '</' + metric + '>\n'
ret_val += '\t'*offset + '<identity>'
for datapoint in dataseries:
if datapoint in tax_profile.tree.data[taxid].attributes and (
'hit_count' in tax_profile.tree.data[taxid].attributes[datapoint]
) and (
attribute_values[datapoint]['hit_count']
< tax_profile.tree.data[taxid].attributes[datapoint]['hit_count']
):
ret_val += '<val>' + format(((
tax_profile.tree.data[taxid].attributes[datapoint]['identity']
- attribute_values[datapoint]['identity']
) / (
tax_profile.tree.data[taxid].attributes[datapoint]['hit_count']
- attribute_values[datapoint]['hit_count']
)), "0.1f") + '</val>'
else:
ret_val += '<val>0.0</val>'
ret_val += '</identity>\n'
offset -= 1
ret_val += '\t'*offset + '</node>\n'
offset -= 1
ret_val += '\t'*offset + '</node>\n'
attribute_values = autovivify(1)
for datapoint in dataseries:
if datapoint in tax_profile.tree.data[taxid].attributes:
if metric in tax_profile.tree.data[taxid].attributes[datapoint]:
attribute_values[datapoint][metric] = tax_profile.tree.data[taxid]\
.attributes[datapoint][metric]
if 'count' in tax_profile.tree.data[taxid].attributes[datapoint]:
attribute_values[datapoint]['count'] = tax_profile.tree.data[taxid]\
.attributes[datapoint]['count']
if 'identity' in tax_profile.tree.data[taxid].attributes[datapoint]:
attribute_values[datapoint]['identity'] = tax_profile.tree.data[taxid]\
.attributes[datapoint]['identity']
if 'hit_count' in tax_profile.tree.data[taxid].attributes[datapoint]:
attribute_values[datapoint]['hit_count'] = tax_profile.tree.data[taxid]\
.attributes[datapoint]['hit_count']
return ret_val, attribute_values
def get_dataseries_tax_xml(tax_profile, dataseries, taxid, offset, metric='efpkg'):
"""Returns XML node for a phylogenetic tree node and all its children.
Args:
tax_profile (:obj:TaxonomyProfile): taxonomy profile
dataseries (list of str): either sample identifiers or function identifiers,
depending on profile type (functional or taxonomic)
taxid (str): taxonomy identifier of a node of interest
offset (int): number of starting tabs
metric (str): scoring metric (default value 'efpkg')
Returns:
ret_val (str): XML node
"""
if taxid not in tax_profile.tree.data:
raise KeyError(taxid, 'not found in the tree!!!')
ret_val = '\t'*offset + '<node name="' + tax_profile.tree.data[taxid].name + '">\n'
offset += 1
if tax_profile.tree.data[taxid].attributes:
if metric != 'readcount' and metric != 'proteincount':
ret_val += '\t'*offset + '<readcount>'
for datapoint in dataseries:
if datapoint in tax_profile.tree.data[taxid].attributes:
ret_val += '<val>' + format(
tax_profile.tree.data[taxid].attributes[datapoint]['count'], "0.0f"
) + '</val>'
else:
ret_val += '<val>0</val>'
ret_val += '</readcount>\n'
ret_val += '\t'*offset + '<' + metric + '>'
for datapoint in dataseries:
if datapoint in tax_profile.tree.data[taxid].attributes:
ret_val += '<val>' + format((
tax_profile.tree.data[taxid].attributes[datapoint][metric]
), "0.5f") + '</val>'
else:
ret_val += '<val>0.0</val>'
ret_val += '</' + metric + '>\n' + '\t'*offset + '<identity>'
for datapoint in dataseries:
if datapoint in tax_profile.tree.data[taxid].attributes:
ret_val += '<val>' + format((
tax_profile.tree.data[taxid].attributes[datapoint]['identity']
/ tax_profile.tree.data[taxid].attributes[datapoint]['hit_count']
), "0.1f") + '</val>'
else:
ret_val += '<val>0.0</val>'
ret_val += '</identity>\n'
else:
if metric != 'readcount' and metric != 'proteincount':
ret_val += '\t'*offset + '<readcount>'
ret_val += '<val>0</val>'*len(dataseries)
ret_val += '</readcount>\n'
ret_val += '\t'*offset + '<' + metric + '>'
ret_val += '<val>0.0</val>'*len(dataseries)
ret_val += '<' + metric + '>\n' + '\t'*offset + '<identity>'
ret_val += '<val>0.0</val>'*len(dataseries)
ret_val += '</identity>\n'
if tax_profile.tree.data[taxid].children:
for child_taxid in tax_profile.tree.data[taxid].children:
ret_val += get_dataseries_tax_xml(
tax_profile, dataseries, child_taxid, offset, metric=metric
)
offset -= 1
ret_val += '\t'*offset + '</node>\n'
return ret_val
def make_function_taxonomy_chart(tax_profile, function_list, outfile, krona_path,
metric='efpkg'):
"""Writes XML file for taxonomy chart of multiple functions in one sample
and generates Krona plot from it
Args:
tax_profile (:obj:TaxonomyProfile): taxonomy profile object
function_list (list of str): function identifiers
outfile (str): path for XML output
krona_path (str): Krona Tools command
metric (str): scoring metric (efpkg by default)
"""
with open(outfile, 'w') as out:
# Write header
out.write('<krona key="false">\n')
out.write('\t<attributes magnitude="' + metric + '">\n')
if metric == 'proteincount':
out.write('\t\t<attribute display="Protein count">' + metric + '</attribute>\n')
else:
out.write('\t\t<attribute display="Read count">readcount</attribute>\n')
if metric != 'readcount' and metric != 'proteincount':
out.write('\t\t<attribute display="Score:' + metric + '">' + metric + '</attribute>\n')
out.write('\t\t<attribute display="Best hit identity %" mono="true">identity</attribute>\n')
out.write('\t</attributes>\n')
out.write('\t<color attribute="identity" valueStart="50" valueEnd="100" hueStart="0" '
+ 'hueEnd="240" default="true"></color>\n')
# Write dataset
out.write('\t<datasets>\n')
for function in function_list:
out.write('\t\t<dataset>' + function + '</dataset>\n')
out.write('\t</datasets>\n')
# Write nodes
offset = 1
out.write(
get_dataseries_tax_xml(
tax_profile, function_list, ROOT_TAXONOMY_ID, offset, metric=metric
)
)
# Close XML
out.write('</krona>')
# Run Krona
html_file = outfile + '.html'
krona_cmd = [krona_path, '-o', html_file, outfile]
run_external_program(krona_cmd)
def get_genes_xml(gene_data, gene_ids, dataseries, offset, metric):
"""Returns XML nodes for all predicted gene from one taxon.
Args:
gene_data (defaultdict[str,defaultdict[str,dict[str,float]]]): outer key is
gene identifier, middle key is function identifier, inner key is in
[metric, 'count', 'identity', 'coverage', 'Length', 'Completeness'],
value is float.
gene_ids (list of str): gene identifiers
dataseries (list of str): either sample identifiers or function identifiers,
depending on profile type (functional or taxonomic)
offset (int): number of starting tabs
metric (str): scoring metric
Returns:
ret_val (str): XML node
attribute_values (defaultdict[str,dict[str,float]]): outer key is
one of dataseries members, inner key is in [metric, 'count', 'identity'
'hit_count'], value is float.
"""
# gene data: gene_data[gene_id][function][parameter] = parameter_value
ret_val = ''
for gene_id in gene_ids:
ret_val += '\t'*offset + '<node name="' + gene_id + '">\n'
offset += 1
if metric != 'readcount':
ret_val += '\t'*offset + '<readcount>'
for datapoint in dataseries:
if datapoint in gene_data[gene_id]:
ret_val += '<val>' + gene_data[gene_id][datapoint]['count'] + '</val>'
else:
ret_val += '<val>0</val>'
ret_val += '</readcount>\n'
ret_val += '\t'*offset + '<' + metric + '>'
for datapoint in dataseries:
if datapoint in gene_data[gene_id]:
ret_val += '<val>' + gene_data[gene_id][datapoint][metric] + '</val>'
else:
ret_val += '<val>0</val>'
ret_val += | |
<gh_stars>0
# -*- coding: utf-8 -*-
#
import logging
import os
import traceback
import numpy as np
import concurrent.futures
from concurrent.futures import ThreadPoolExecutor
from module.MOptions import MMultiSplitOptions, MOptionsDataSet
from mmd.PmxData import PmxModel # noqa
from mmd.VmdData import VmdMotion, VmdBoneFrame, VmdCameraFrame, VmdInfoIk, VmdLightFrame, VmdMorphFrame, VmdShadowFrame, VmdShowIkFrame # noqa
from mmd.VmdWriter import VmdWriter
from module.MMath import MRect, MVector3D, MVector4D, MQuaternion, MMatrix4x4 # noqa
from utils import MUtils, MServiceUtils, MBezierUtils # noqa
from utils.MLogger import MLogger # noqa
from utils.MException import SizingException, MKilledException
logger = MLogger(__name__, level=1)
class ConvertMultiSplitService():
def __init__(self, options: MMultiSplitOptions):
self.options = options
# 分散前のを保持
self.prev_motion = options.motion.copy()
def execute(self):
logging.basicConfig(level=self.options.logging_level, format="%(message)s [%(module_name)s]")
try:
service_data_txt = "多段分割処理実行\n------------------------\nexeバージョン: {version_name}\n".format(version_name=self.options.version_name) \
service_data_txt = "{service_data_txt} VMD: {vmd}\n".format(service_data_txt=service_data_txt,
vmd=os.path.basename(self.options.motion.path)) # noqa
service_data_txt = "{service_data_txt} モデル: {model}({model_name})\n".format(service_data_txt=service_data_txt,
model=os.path.basename(self.options.motion.path), model_name=self.options.model.name) # noqa
service_data_txt = "{service_data_txt} 不要キー削除: {center_rotation}\n".format(service_data_txt=service_data_txt,
center_rotation=self.options.remove_unnecessary_flg) # noqa
selections = ["{0} → 回転(X): {1}, 回転(Y): {2}, 回転(Z): {3}, 移動(X): {4}, 移動(Y): {5}, 移動(Z): {6}" \
.format(bset[0], bset[1], bset[2], bset[3], bset[4], bset[5], bset[6]) for bset in self.options.target_bones]
service_data_txt = "{service_data_txt} 対象ボーン: {target_bones}\n".format(service_data_txt=service_data_txt,
target_bones='\n'.join(selections)) # noqa
logger.info(service_data_txt, decoration=MLogger.DECORATION_BOX)
motion = self.options.motion
model = self.options.model
futures = []
with ThreadPoolExecutor(thread_name_prefix="split", max_workers=self.options.max_workers) as executor:
center_mx = ""
center_my = ""
center_mz = ""
for (bone_name, rrxbn, rrybn, rrzbn, rmxbn, rmybn, rmzbn) in self.options.target_bones:
if bone_name not in model.bones or bone_name not in motion.bones:
continue
if bone_name == "センター":
center_mx = rmxbn
center_my = rmybn
center_mz = rmzbn
if bone_name == "グルーブ":
futures.append(executor.submit(self.convert_multi_split, bone_name, rrxbn, rrybn, rrzbn, rmxbn, rmybn, rmzbn, center_mx, center_my, center_mz))
else:
futures.append(executor.submit(self.convert_multi_split, bone_name, rrxbn, rrybn, rrzbn, rmxbn, rmybn, rmzbn, "", "", ""))
concurrent.futures.wait(futures, timeout=None, return_when=concurrent.futures.FIRST_EXCEPTION)
for f in futures:
if not f.result():
return False
if self.options.remove_unnecessary_flg:
# 不要キー削除
futures = []
with ThreadPoolExecutor(thread_name_prefix="remove", max_workers=self.options.max_workers) as executor:
for (bone_name, rrxbn, rrybn, rrzbn, rmxbn, rmybn, rmzbn) in self.options.target_bones:
if model.bones[bone_name].getRotatable():
if len(rrxbn) > 0:
futures.append(executor.submit(self.remove_unnecessary_bf, rrxbn))
if len(rrybn) > 0:
futures.append(executor.submit(self.remove_unnecessary_bf, rrybn))
if len(rrzbn) > 0:
futures.append(executor.submit(self.remove_unnecessary_bf, rrzbn))
if model.bones[bone_name].getTranslatable():
if len(rmxbn) > 0:
futures.append(executor.submit(self.remove_unnecessary_bf, rmxbn))
if len(rmybn) > 0:
futures.append(executor.submit(self.remove_unnecessary_bf, rmybn))
if len(rmzbn) > 0:
futures.append(executor.submit(self.remove_unnecessary_bf, rmzbn))
concurrent.futures.wait(futures, timeout=None, return_when=concurrent.futures.FIRST_EXCEPTION)
for f in futures:
if not f.result():
return False
# 最後に出力
VmdWriter(MOptionsDataSet(self.options.motion, None, self.options.model, self.options.output_path, False, False, [], None, 0, [])).write()
logger.info("出力終了: %s", os.path.basename(self.options.output_path), decoration=MLogger.DECORATION_BOX, title="成功")
return True
except MKilledException:
return False
except SizingException as se:
logger.error("多段分割処理が処理できないデータで終了しました。\n\n%s", se.message, decoration=MLogger.DECORATION_BOX)
except Exception:
logger.critical("多段分割処理が意図せぬエラーで終了しました。\n\n%s", traceback.format_exc(), decoration=MLogger.DECORATION_BOX)
finally:
logging.shutdown()
# 多段分割処理実行
def convert_multi_split(self, bone_name: str, rrxbn: str, rrybn: str, rrzbn: str, rmxbn: str, rmybn: str, rmzbn: str, center_mx: str, center_my: str, center_mz: str):
logger.info("多段分割【%s】", bone_name, decoration=MLogger.DECORATION_LINE)
motion = self.options.motion
model = self.options.model
# 事前に変化量全打ち
if bone_name == "センター" or bone_name == "グルーブ":
fnos = self.prev_motion.get_differ_fnos(0, ["センター", "グルーブ"], limit_degrees=70, limit_length=1)
else:
fnos = self.prev_motion.get_differ_fnos(0, [bone_name], limit_degrees=70, limit_length=1)
if len(fnos) == 0:
return
prev_sep_fno = 0
for fno in fnos:
# 一度そのままキーを登録
motion.regist_bf(motion.calc_bf(bone_name, fno), bone_name, fno)
# 補間曲線のため、もう一度取得しなおし
bf = motion.calc_bf(bone_name, fno)
if model.bones[bone_name].getRotatable():
rx_bf = motion.calc_bf(rrxbn, fno)
motion.copy_interpolation(bf, rx_bf, MBezierUtils.BZ_TYPE_R)
motion.regist_bf(rx_bf, rx_bf.name, fno, copy_interpolation=True)
ry_bf = motion.calc_bf(rrybn, fno)
motion.copy_interpolation(bf, ry_bf, MBezierUtils.BZ_TYPE_R)
motion.regist_bf(ry_bf, ry_bf.name, fno, copy_interpolation=True)
rz_bf = motion.calc_bf(rrzbn, fno)
motion.copy_interpolation(bf, rz_bf, MBezierUtils.BZ_TYPE_R)
motion.regist_bf(rz_bf, rz_bf.name, fno, copy_interpolation=True)
if model.bones[bone_name].getTranslatable():
mx_bf = motion.calc_bf(rmxbn, fno)
motion.copy_interpolation(bf, mx_bf, MBezierUtils.BZ_TYPE_MX)
motion.regist_bf(mx_bf, mx_bf.name, fno, copy_interpolation=True)
my_bf = motion.calc_bf(rmybn, fno)
motion.copy_interpolation(bf, my_bf, MBezierUtils.BZ_TYPE_MY)
motion.regist_bf(my_bf, my_bf.name, fno, copy_interpolation=True)
mz_bf = motion.calc_bf(rmzbn, fno)
motion.copy_interpolation(bf, mz_bf, MBezierUtils.BZ_TYPE_MZ)
motion.regist_bf(mz_bf, mz_bf.name, fno, copy_interpolation=True)
if fno // 500 > prev_sep_fno and fnos[-1] > 0:
logger.info("-- %sフレーム目:終了(%s%)【キーフレ追加 - %s】", fno, round((fno / fnos[-1]) * 100, 3), bone_name)
prev_sep_fno = fno // 500
logger.info("分割準備完了【%s】", bone_name, decoration=MLogger.DECORATION_LINE)
# ローカルX軸
local_x_axis = model.bones[bone_name].local_x_vector
if local_x_axis == MVector3D(1, 0, 0) or local_x_axis == MVector3D():
# 指定が無い場合、腕系はローカルX軸、それ以外はノーマル
if "腕" in bone_name or "ひじ" in bone_name or "手首" in bone_name:
local_x_axis = model.get_local_x_axis(bone_name)
else:
local_x_axis = None
logger.debug(f"{bone_name}, local_x_axis: {local_x_axis}")
prev_sep_fno = 0
for fno in fnos:
bf = motion.calc_bf(bone_name, fno)
# 多段分割
self.split_bf(fno, bf, local_x_axis, bone_name, rrxbn, rrybn, rrzbn, rmxbn, rmybn, rmzbn)
if fno // 500 > prev_sep_fno and fnos[-1] > 0:
logger.info("-- %sフレーム目:終了(%s%)【多段分割 - %s】", fno, round((fno / fnos[-1]) * 100, 3), bone_name)
prev_sep_fno = fno // 500
check_fnos = []
check_prev_next_fnos = {}
# 分離後に乖離起こしてないかチェック
for fno_idx, (prev_fno, next_fno) in enumerate(zip(fnos[:-1], fnos[1:])):
fno = int(prev_fno + ((next_fno - prev_fno) / 2))
if fno not in fnos:
check_fnos.append(fno)
check_prev_next_fnos[fno] = {"prev": prev_fno, "next": next_fno}
check_fnos = list(sorted(list(set(check_fnos))))
logger.debug("bone_name: %s, check_fnos: %s", bone_name, check_fnos)
prev_sep_fno = 0
for fno in check_fnos:
is_subdiv = False
prev_motion_bf = self.prev_motion.calc_bf(bone_name, fno).copy()
if model.bones[bone_name].getRotatable():
# 回転を分ける
if local_x_axis:
# ローカルX軸がある場合
x_qq, y_qq, z_qq, _ = MServiceUtils.separate_local_qq(fno, bone_name, prev_motion_bf.rotation, local_x_axis)
else:
# ローカルX軸の指定が無い場合、グローバルで分ける
euler = prev_motion_bf.rotation.toEulerAngles()
x_qq = MQuaternion.fromEulerAngles(euler.x(), 0, 0)
y_qq = MQuaternion.fromEulerAngles(0, euler.y(), 0)
z_qq = MQuaternion.fromEulerAngles(0, 0, euler.z())
if len(rrxbn) > 0:
rx_bf = motion.calc_bf(rrxbn, fno)
dot = MQuaternion.dotProduct(x_qq.normalized(), rx_bf.rotation.normalized())
if dot < 0.98:
is_subdiv = True
if len(rrybn) > 0:
ry_bf = motion.calc_bf(rrybn, fno)
dot = MQuaternion.dotProduct(y_qq.normalized(), ry_bf.rotation.normalized())
if dot < 0.98:
is_subdiv = True
if len(rrzbn) > 0:
rz_bf = motion.calc_bf(rrzbn, fno)
dot = MQuaternion.dotProduct(z_qq.normalized(), rz_bf.rotation.normalized())
if dot < 0.98:
is_subdiv = True
if model.bones[bone_name].getTranslatable():
if len(center_mx) > 0 or len(center_my) > 0 or len(center_mz) > 0:
# センターとグルーブを両方分割してる場合
prev_center_motion_bf = self.prev_motion.calc_bf("センター", fno).copy()
if len(center_mx) > 0 and rmxbn == center_mx:
prev_motion_bf.position.setX(prev_motion_bf.position.x() + prev_center_motion_bf.position.x())
if len(center_my) > 0 and rmybn == center_my:
prev_motion_bf.position.setY(prev_motion_bf.position.y() + prev_center_motion_bf.position.y())
if len(center_mz) > 0 and rmzbn == center_mz:
prev_motion_bf.position.setZ(prev_motion_bf.position.z() + prev_center_motion_bf.position.z())
# 移動を分ける
if len(rmxbn) > 0:
mx_bf = motion.calc_bf(rmxbn, fno)
if np.diff([mx_bf.position.x(), prev_motion_bf.position.x()]) > 0.1:
is_subdiv = True
if len(rmybn) > 0:
my_bf = motion.calc_bf(rmybn, fno)
if np.diff([my_bf.position.y(), prev_motion_bf.position.y()]) > 0.1:
is_subdiv = True
if len(rmzbn) > 0:
mz_bf = motion.calc_bf(rmzbn, fno)
if np.diff([mz_bf.position.z(), prev_motion_bf.position.z()]) > 0.1:
is_subdiv = True
if is_subdiv:
# 細分化ONの場合、更に分割する
if model.bones[bone_name].getRotatable():
if len(rrxbn) > 0:
motion.regist_bf(self.prev_motion.calc_bf(rrxbn, fno), rrxbn, fno)
if len(rrybn) > 0:
motion.regist_bf(self.prev_motion.calc_bf(rrybn, fno), rrybn, fno)
if len(rrzbn) > 0:
motion.regist_bf(self.prev_motion.calc_bf(rrzbn, fno), rrzbn, fno)
if model.bones[bone_name].getTranslatable():
if len(rmxbn) > 0:
motion.regist_bf(self.prev_motion.calc_bf(rmxbn, fno), rmxbn, fno)
if len(rmybn) > 0:
motion.regist_bf(self.prev_motion.calc_bf(rmybn, fno), rmybn, fno)
if len(rmzbn) > 0:
motion.regist_bf(self.prev_motion.calc_bf(rmzbn, fno), rmzbn, fno)
# 分割前の値を再登録
motion.regist_bf(self.prev_motion.calc_bf(bone_name, fno), bone_name, fno)
subdiv_bf = motion.calc_bf(bone_name, fno)
if bone_name == "グルーブ" and (len(center_mx) > 0 or len(center_my) > 0 or len(center_mz) > 0):
prev_center_motion_bf = self.prev_motion.calc_bf("センター", fno)
if len(center_mx) > 0 and rmxbn == center_mx:
subdiv_bf.position.setX(subdiv_bf.position.x() + prev_center_motion_bf.position.x())
if len(center_my) > 0 and rmybn == center_my:
subdiv_bf.position.setY(subdiv_bf.position.y() + prev_center_motion_bf.position.y())
if len(center_mz) > 0 and rmzbn == center_mz:
subdiv_bf.position.setZ(subdiv_bf.position.z() + prev_center_motion_bf.position.z())
# 多段分割
self.split_bf(fno, subdiv_bf, local_x_axis, bone_name, rrxbn, rrybn, rrzbn, rmxbn, rmybn, rmzbn)
# prev_fno = check_prev_next_fnos[fno]["prev"]
# next_fno = check_prev_next_fnos[fno]["next"]
# logger.info(f"-- 軌跡ズレ防止のため、「{bone_name}」の{prev_fno}F~{next_fno}F間を細分化・不要キー除去します")
# for f in range(prev_fno, next_fno + 1):
# # 区間内を初期登録
# if model.bones[bone_name].getRotatable():
# # 回転を分ける
# if local_x_axis:
# # ローカルX軸がある場合
# x_qq, y_qq, z_qq, _ = MServiceUtils.separate_local_qq(f, bone_name, prev_motion_bf.rotation, local_x_axis)
# else:
# # ローカルX軸の指定が無い場合、グローバルで分ける
# euler = prev_motion_bf.rotation.toEulerAngles()
# x_qq = MQuaternion.fromEulerAngles(euler.x(), 0, 0)
# y_qq = MQuaternion.fromEulerAngles(0, euler.y(), 0)
# z_qq = MQuaternion.fromEulerAngles(0, 0, euler.z())
# if len(rrxbn) > 0:
# prev_rx_bf = self.prev_motion.calc_bf(rrxbn, f).copy()
# prev_rx_bf.rotation = x_qq
# motion.regist_bf(prev_rx_bf, rrxbn, f)
# if len(rrybn) > 0:
# prev_ry_bf = self.prev_motion.calc_bf(rrybn, f).copy()
# prev_ry_bf.rotation = y_qq
# motion.regist_bf(prev_ry_bf, rrybn, f)
# if len(rrzbn) > 0:
# prev_rz_bf = self.prev_motion.calc_bf(rrzbn, f).copy()
# prev_rz_bf.rotation = z_qq
# motion.regist_bf(prev_rz_bf, rrzbn, f)
# if model.bones[bone_name].getTranslatable():
# if len(center_mx) > 0 or len(center_my) > 0 or len(center_mz) > 0:
# # センターとグルーブを両方分割してる場合
# prev_center_motion_bf = self.prev_motion.calc_bf("センター", fno).copy()
# if len(center_mx) > 0 and rmxbn == center_mx:
# prev_motion_bf.position.setX(prev_motion_bf.position.x() + prev_center_motion_bf.position.x())
# if len(center_my) > 0 and rmybn == center_my:
# prev_motion_bf.position.setY(prev_motion_bf.position.y() + prev_center_motion_bf.position.y())
# if len(center_mz) > 0 and rmzbn == center_mz:
# prev_motion_bf.position.setZ(prev_motion_bf.position.z() + prev_center_motion_bf.position.z())
# if len(rmxbn) > 0:
# prev_mx_bf = self.prev_motion.calc_bf(rmxbn, f).copy()
# prev_mx_bf.position.setX(prev_motion_bf.position.x())
# motion.regist_bf(prev_mx_bf, rmxbn, f)
# if len(rmybn) > 0:
# prev_my_bf = self.prev_motion.calc_bf(rmybn, f).copy()
# prev_my_bf.position.setY(prev_motion_bf.position.y())
# motion.regist_bf(prev_my_bf, rmybn, f)
# if len(rmzbn) > 0:
# prev_mz_bf = self.prev_motion.calc_bf(rmzbn, f).copy()
# prev_mz_bf.position.setZ(prev_motion_bf.position.z())
# motion.regist_bf(prev_mz_bf, rmzbn, f)
# # 不要キー削除
# futures = []
# with ThreadPoolExecutor(thread_name_prefix="remove", max_workers=self.options.max_workers) as executor:
# if model.bones[bone_name].getRotatable():
# if len(rrxbn) | |
<filename>src/OFS/CopySupport.py
##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Copy interface
"""
from cgi import escape
from marshal import dumps
from marshal import loads
import re
import sys
import tempfile
from urllib import quote
from urllib import unquote
import warnings
from zlib import compress
from zlib import decompress
import transaction
from AccessControl import ClassSecurityInfo
from AccessControl import getSecurityManager
from AccessControl.class_init import InitializeClass
from AccessControl.Permissions import view_management_screens
from AccessControl.Permissions import copy_or_move
from AccessControl.Permissions import delete_objects
from Acquisition import aq_base
from Acquisition import aq_inner
from Acquisition import aq_parent
from App.Dialogs import MessageDialog
from App.special_dtml import HTML
from App.special_dtml import DTMLFile
from ExtensionClass import Base
from webdav.Lockable import ResourceLockedError
from zExceptions import Unauthorized, BadRequest
from ZODB.POSException import ConflictError
from zope.interface import implements
from zope.event import notify
from zope.lifecycleevent import ObjectCopiedEvent
from zope.lifecycleevent import ObjectMovedEvent
from zope.container.contained import notifyContainerModified
from OFS.event import ObjectWillBeMovedEvent
from OFS.event import ObjectClonedEvent
from OFS.interfaces import ICopyContainer
from OFS.interfaces import ICopySource
from OFS.Moniker import loadMoniker
from OFS.Moniker import Moniker
from OFS.subscribers import compatibilityCall
class CopyError(Exception):
pass
copy_re = re.compile('^copy([0-9]*)_of_(.*)')
_marker=[]
class CopyContainer(Base):
"""Interface for containerish objects which allow cut/copy/paste"""
implements(ICopyContainer)
security = ClassSecurityInfo()
# The following three methods should be overridden to store sub-objects
# as non-attributes.
def _setOb(self, id, object):
setattr(self, id, object)
def _delOb(self, id):
delattr(self, id)
def _getOb(self, id, default=_marker):
if hasattr(aq_base(self), id):
return getattr(self, id)
if default is _marker:
raise AttributeError(id)
return default
def manage_CopyContainerFirstItem(self, REQUEST):
return self._getOb(REQUEST['ids'][0])
def manage_CopyContainerAllItems(self, REQUEST):
return [self._getOb(i) for i in REQUEST['ids']]
security.declareProtected(delete_objects, 'manage_cutObjects')
def manage_cutObjects(self, ids=None, REQUEST=None):
"""Put a reference to the objects named in ids in the clip board"""
if ids is None and REQUEST is not None:
return eNoItemsSpecified
elif ids is None:
raise ValueError('ids must be specified')
if type(ids) is type(''):
ids=[ids]
oblist=[]
for id in ids:
ob=self._getOb(id)
if ob.wl_isLocked():
raise ResourceLockedError('Object "%s" is locked via WebDAV'
% ob.getId())
if not ob.cb_isMoveable():
raise CopyError(eNotSupported % escape(id))
m = Moniker(ob)
oblist.append(m.dump())
cp=(1, oblist)
cp=_cb_encode(cp)
if REQUEST is not None:
resp=REQUEST['RESPONSE']
resp.setCookie('__cp', cp, path='%s' % cookie_path(REQUEST))
REQUEST['__cp'] = cp
return self.manage_main(self, REQUEST)
return cp
security.declareProtected(view_management_screens, 'manage_copyObjects')
def manage_copyObjects(self, ids=None, REQUEST=None, RESPONSE=None):
"""Put a reference to the objects named in ids in the clip board"""
if ids is None and REQUEST is not None:
return eNoItemsSpecified
elif ids is None:
raise ValueError('ids must be specified')
if type(ids) is type(''):
ids=[ids]
oblist=[]
for id in ids:
ob=self._getOb(id)
if not ob.cb_isCopyable():
raise CopyError(eNotSupported % escape(id))
m = Moniker(ob)
oblist.append(m.dump())
cp=(0, oblist)
cp=_cb_encode(cp)
if REQUEST is not None:
resp=REQUEST['RESPONSE']
resp.setCookie('__cp', cp, path='%s' % cookie_path(REQUEST))
REQUEST['__cp'] = cp
return self.manage_main(self, REQUEST)
return cp
def _get_id(self, id):
# Allow containers to override the generation of
# object copy id by attempting to call its _get_id
# method, if it exists.
match = copy_re.match(id)
if match:
n = int(match.group(1) or '1')
orig_id = match.group(2)
else:
n = 0
orig_id = id
while 1:
if self._getOb(id, None) is None:
return id
id='copy%s_of_%s' % (n and n+1 or '', orig_id)
n=n+1
security.declareProtected(view_management_screens, 'manage_pasteObjects')
def manage_pasteObjects(self, cb_copy_data=None, REQUEST=None):
"""Paste previously copied objects into the current object.
If calling manage_pasteObjects from python code, pass the result of a
previous call to manage_cutObjects or manage_copyObjects as the first
argument.
Also sends IObjectCopiedEvent and IObjectClonedEvent
or IObjectWillBeMovedEvent and IObjectMovedEvent.
"""
if cb_copy_data is not None:
cp = cb_copy_data
elif REQUEST is not None and REQUEST.has_key('__cp'):
cp = REQUEST['__cp']
else:
cp = None
if cp is None:
raise CopyError(eNoData)
try:
op, mdatas = _cb_decode(cp)
except:
raise CopyError(eInvalid)
oblist = []
app = self.getPhysicalRoot()
for mdata in mdatas:
m = loadMoniker(mdata)
try:
ob = m.bind(app)
except ConflictError:
raise
except:
raise CopyError(eNotFound)
self._verifyObjectPaste(ob, validate_src=op+1)
oblist.append(ob)
result = []
if op == 0:
# Copy operation
for ob in oblist:
orig_id = ob.getId()
if not ob.cb_isCopyable():
raise CopyError(eNotSupported % escape(orig_id))
try:
ob._notifyOfCopyTo(self, op=0)
except ConflictError:
raise
except:
raise CopyError(MessageDialog(
title="Copy Error",
message=sys.exc_info()[1],
action='manage_main'))
id = self._get_id(orig_id)
result.append({'id': orig_id, 'new_id': id})
orig_ob = ob
ob = ob._getCopy(self)
ob._setId(id)
notify(ObjectCopiedEvent(ob, orig_ob))
self._setObject(id, ob)
ob = self._getOb(id)
ob.wl_clearLocks()
ob._postCopy(self, op=0)
compatibilityCall('manage_afterClone', ob, ob)
notify(ObjectClonedEvent(ob))
if REQUEST is not None:
return self.manage_main(self, REQUEST, update_menu=1,
cb_dataValid=1)
elif op == 1:
# Move operation
for ob in oblist:
orig_id = ob.getId()
if not ob.cb_isMoveable():
raise CopyError(eNotSupported % escape(orig_id))
try:
ob._notifyOfCopyTo(self, op=1)
except ConflictError:
raise
except:
raise CopyError(MessageDialog(
title="Move Error",
message=sys.exc_info()[1],
action='manage_main'))
if not sanity_check(self, ob):
raise CopyError(
"This object cannot be pasted into itself")
orig_container = aq_parent(aq_inner(ob))
if aq_base(orig_container) is aq_base(self):
id = orig_id
else:
id = self._get_id(orig_id)
result.append({'id': orig_id, 'new_id': id})
notify(ObjectWillBeMovedEvent(ob, orig_container, orig_id,
self, id))
# try to make ownership explicit so that it gets carried
# along to the new location if needed.
ob.manage_changeOwnershipType(explicit=1)
try:
orig_container._delObject(orig_id, suppress_events=True)
except TypeError:
orig_container._delObject(orig_id)
warnings.warn(
"%s._delObject without suppress_events is discouraged."
% orig_container.__class__.__name__,
DeprecationWarning)
ob = aq_base(ob)
ob._setId(id)
try:
self._setObject(id, ob, set_owner=0, suppress_events=True)
except TypeError:
self._setObject(id, ob, set_owner=0)
warnings.warn(
"%s._setObject without suppress_events is discouraged."
% self.__class__.__name__, DeprecationWarning)
ob = self._getOb(id)
notify(ObjectMovedEvent(ob, orig_container, orig_id, self, id))
notifyContainerModified(orig_container)
if aq_base(orig_container) is not aq_base(self):
notifyContainerModified(self)
ob._postCopy(self, op=1)
# try to make ownership implicit if possible
ob.manage_changeOwnershipType(explicit=0)
if REQUEST is not None:
REQUEST['RESPONSE'].setCookie('__cp', 'deleted',
path='%s' % cookie_path(REQUEST),
expires='Wed, 31-Dec-97 23:59:59 GMT')
REQUEST['__cp'] = None
return self.manage_main(self, REQUEST, update_menu=1,
cb_dataValid=0)
return result
security.declareProtected(view_management_screens, 'manage_renameForm')
manage_renameForm = DTMLFile('dtml/renameForm', globals())
security.declareProtected(view_management_screens, 'manage_renameObjects')
def manage_renameObjects(self, ids=[], new_ids=[], REQUEST=None):
"""Rename several sub-objects"""
if len(ids) != len(new_ids):
raise BadRequest('Please rename each listed object.')
for i in range(len(ids)):
if ids[i] != new_ids[i]:
self.manage_renameObject(ids[i], new_ids[i], REQUEST)
if REQUEST is not None:
return self.manage_main(self, REQUEST, update_menu=1)
return None
security.declareProtected(view_management_screens, 'manage_renameObject')
def manage_renameObject(self, id, new_id, REQUEST=None):
"""Rename a particular sub-object.
"""
try:
self._checkId(new_id)
except:
raise CopyError(MessageDialog(
title='Invalid Id',
message=sys.exc_info()[1],
action ='manage_main'))
ob = self._getOb(id)
if ob.wl_isLocked():
raise ResourceLockedError('Object "%s" is locked via WebDAV'
% ob.getId())
if not ob.cb_isMoveable():
raise CopyError(eNotSupported % escape(id))
self._verifyObjectPaste(ob)
try:
ob._notifyOfCopyTo(self, op=1)
except ConflictError:
raise
except:
raise CopyError(MessageDialog(
title="Rename Error",
message=sys.exc_info()[1],
action ='manage_main'))
notify(ObjectWillBeMovedEvent(ob, self, id, self, new_id))
try:
self._delObject(id, suppress_events=True)
except TypeError:
self._delObject(id)
warnings.warn(
"%s._delObject without suppress_events is discouraged." %
self.__class__.__name__, DeprecationWarning)
ob = aq_base(ob)
ob._setId(new_id)
# Note - because a rename always keeps the same context, we
# can just leave the ownership info unchanged.
try:
self._setObject(new_id, ob, set_owner=0, suppress_events=True)
except TypeError:
self._setObject(new_id, ob, set_owner=0)
warnings.warn(
"%s._setObject without suppress_events is discouraged." %
self.__class__.__name__, DeprecationWarning)
ob = self._getOb(new_id)
notify(ObjectMovedEvent(ob, self, id, self, new_id))
notifyContainerModified(self)
ob._postCopy(self, op=1)
if REQUEST is not None:
return self.manage_main(self, REQUEST, update_menu=1)
return None
# Why did we give this a manage_ prefix if its really
# supposed to be public since it does its own auth ?
#
# Because it's still a "management" function.
security.declarePublic('manage_clone')
def manage_clone(self, ob, id, REQUEST=None):
"""Clone an object, creating a new object with the given id.
"""
if not ob.cb_isCopyable():
raise CopyError(eNotSupported % escape(ob.getId()))
try:
self._checkId(id)
except:
raise CopyError(MessageDialog(
title='Invalid Id',
message=sys.exc_info()[1],
action ='manage_main'))
self._verifyObjectPaste(ob)
try:
ob._notifyOfCopyTo(self, op=0)
except ConflictError:
raise
except:
raise CopyError(MessageDialog(
title="Clone Error",
message=sys.exc_info()[1],
action='manage_main'))
orig_ob = ob
ob = ob._getCopy(self)
ob._setId(id)
notify(ObjectCopiedEvent(ob, orig_ob))
self._setObject(id, ob)
ob = self._getOb(id)
ob._postCopy(self, op=0)
compatibilityCall('manage_afterClone', ob, ob)
notify(ObjectClonedEvent(ob))
return ob
def cb_dataValid(self):
# Return true if clipboard data seems valid.
try: cp=_cb_decode(self.REQUEST['__cp'])
except: return 0
return 1
def cb_dataItems(self):
# List of objects in the clip board
try: cp=_cb_decode(self.REQUEST['__cp'])
except: return []
oblist=[]
app = self.getPhysicalRoot()
for mdata in cp[1]:
m = loadMoniker(mdata)
oblist.append(m.bind(app))
return oblist
validClipData=cb_dataValid
def _verifyObjectPaste(self, object, validate_src=1):
# Verify whether the current user is allowed to paste the
# passed object into self. This is determined by checking
# to see if the user could create a new object of the same
# meta_type of the object passed in and checking that the
# user actually is allowed to access the passed in object
# in its existing context.
#
# Passing a false value for the validate_src argument will skip
# checking the passed in object | |
<filename>Detectors/Online_Detectors/Tests/VAE_Covariate_Tests.py
import torch
import numpy as np
import math
import torchvision
from torch import nn
from pytorch_lightning import Trainer
import time
import os
import OnlineShiftDetectors
from pytorch_lightning.core.lightning import LightningModule
import functools
from torch.autograd import Variable
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def set_up_data():
# Function: set_up_data
# Inputs: none
# Process: returns datasets for testing
# Output: cifar10_trainloader (pytorch dataloader)
# cifar10_testloader (pytorch dataloader)
# gen_testloader (pytorch dataloader)
n_epochs = 150
batch_size = int(1e2)
lr = 0.01
# define series of transforms to pre process images
transform = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])
classes = ["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"]
# load training set
cifar10_trainset = torchvision.datasets.CIFAR10('/home/fmejia/fmejia/Cypercat/cyphercat/datasets//', train=True, transform=transform, download=True)
cifar10_trainloader = torch.utils.data.DataLoader(cifar10_trainset, batch_size=batch_size, shuffle=True, num_workers=16)
# load test set
cifar10_testset = torchvision.datasets.CIFAR10('/home/fmejia/fmejia/Cypercat/cyphercat//datasets//', train=False, transform=transform, download=True)
cifar10_testloader = torch.utils.data.DataLoader(cifar10_testset, batch_size=batch_size, shuffle=True, num_workers=16)
train_transform = torchvision.transforms.Compose([
torchvision.transforms.Resize(256),
torchvision.transforms.CenterCrop(256),
torchvision.transforms.Resize(32),
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])
gen_testloader = torch.load('/home/jgornet/Generative_Models/Covariate_Measurement_Models/altcifar_dataloader.pth')
return cifar10_trainloader,cifar10_testloader,gen_testloader
class ResnetGenerator(LightningModule):
"""Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code and idea from <NAME>'s neural style transfer project(https://github.com/jcjohnson/fast-neural-style)
"""
def __init__(self,learning_rate=0.001):
"""Construct a Resnet-based generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
use_dropout = True
norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
# norm_layer = functools.partial(nn.InstanceNorm2d)
input_nc = 3
output_nc = 3
ngf = 64
ndf = 64
z_dim = 256
n_blocks=6
padding_type='reflect'
assert(n_blocks >= 0)
super(ResnetGenerator, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.LeakyReLU(0.2, inplace = True)]
n_downsampling = 5
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
if i < (n_downsampling-1):
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.LeakyReLU(0.2, inplace = True)]
else:
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias)]
## variance model
model2 = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.LeakyReLU(0.2, inplace = True)]
n_downsampling = 5
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
if i < (n_downsampling-1):
model2 += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.LeakyReLU(0.2, inplace = True)]
else:
model2 += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias)]
model_upsample1 = []
for i in range(n_downsampling-2):
mult = 2 ** (n_downsampling-i)
model_upsample1 += [
# nn.Conv2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, padding =1, stride = 1),
# nn.Upsample(scale_factor=2, mode='bilinear'),
nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.LeakyReLU(0.2, inplace = True)]
n_downsampling = 2
mult = 2 ** (n_downsampling)
model_resnet = []
for i in range(n_blocks): # add ResNet blocks
model_resnet += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
model_upsample = []
for i in range(n_downsampling): # add upsampling layers
mult = 2 ** (n_downsampling - i)
model_upsample += [
# nn.Conv2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, padding =1, stride = 1),
# nn.Upsample(scale_factor=2, mode='bilinear'),
nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.LeakyReLU(0.2, inplace = True)]
model_upsample += [nn.ReflectionPad2d(3)]
model_upsample += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model_upsample += [nn.Tanh()]
self.model = nn.Sequential(*model)
self.model_variance = nn.Sequential(*model2)
self.model_resnet = nn.Sequential(*model_resnet)
self.model_upsample1 = nn.Sequential(*model_upsample1)
self.model_upsample = nn.Sequential(*model_upsample)
self.mean = torch.tensor((0.4914, 0.4822, 0.4465))#.to(device)
self.mean = self.mean.view(-1,1,1)
self.var = torch.tensor((0.2023, 0.1994, 0.2010))#.to(device)
self.var = self.var.view(-1,1,1)
self.loss_function = nn.SmoothL1Loss()
self.learning_rate = learning_rate
def forward(self, input, decode = False):
"""Standard forward"""
if decode:
x = self.model_upsample1(input)
x = self.model_resnet(x)
x = self.model_upsample(x)
x = x / 2 + 0.5
x = (x - self.mean)/self.var
return x
mean = self.model(input)
variance = self.model_variance(input)
sample = Variable(torch.randn(mean.size()).type(torch.cuda.FloatTensor))
x1 = mean + (variance * sample)
x = self.model_upsample1(x1)
x = self.model_resnet(x)
x = self.model_upsample(x)
x = x / 2 + 0.5
x = (x - self.mean.cuda())/self.var.cuda()
return x, x1, mean, variance
def validation_step(self, batch, batch_idx):
imgs, labels = batch
out_img, embed_out, mean, variance = self(imgs)
AE_loss = self.loss_function(out_img, imgs)
kl_loss = (mean ** 2 + variance **2 - torch.log(variance ** 2) - 1).mean()
loss = AE_loss + kl_loss/10
return {'val_loss': loss}
def validation_epoch_end(self, outputs):
avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
tensorboard_logs = {'val_loss': avg_loss}
return {'val_loss': avg_loss, 'log': tensorboard_logs}
def training_step(self, batch, batch_idx):
imgs, labels = batch
out_img, embed_out, mean, variance = self(imgs)
AE_loss = self.loss_function(out_img, imgs)
kl_loss = (mean ** 2 + variance **2 - torch.log(variance ** 2) - 1).mean()
loss = AE_loss + kl_loss/10
tensorboard_logs = {'train_loss': loss}
return {'loss': loss, 'log': tensorboard_logs}
def configure_optimizers(self):
beta1 = 0.5
lr_adam = 1e-04
#optimizer_g = torch.optim.Adam(Generator.parameters(), lr = lr_adam, betas = (beta1, 0.999))
return torch.optim.Adam(self.parameters(), lr=(self.learning_rate), betas = (beta1, 0.999))
class ResnetBlock(nn.Module):
"""Define a Resnet block"""
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Initialize the Resnet block
A resnet block is a conv block with skip connections
We construct a conv block with build_conv_block function,
and implement skip connections in <forward> function.
Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
"""
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Construct a convolutional block.
Parameters:
dim (int) -- the number of channels in the conv layer.
padding_type (str) -- the name of padding layer: reflect | replicate | zero
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
use_bias (bool) -- if the conv layer uses bias or not
Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
"""
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.LeakyReLU(0.2, inplace = True)]
if use_dropout:
conv_block += [nn.Dropout(0.2)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
"""Forward function (with skip connections)"""
out = x + self.conv_block(x) # add skip connections
return out
def run_test(iteration,cifar10_trainloader,cifar10_testloader,gen_testloader):
# Function: run_test
# Inputs: iteration (int)
# cifar10_trainloader (pytorch dataloader)
# cifar10_testloader (pytorch dataloader)
# gen_testloader (pytorch dataloader)
# Process: runs test for VAE covariate detector
# Output: none
stats_path = '/home/jgornet/Covariate_Testing/statistics'
model_path = '/home/jgornet/Covariate_Testing/models'
model_file = os.path.join(model_path,'model_num_' + str(iteration) + '.ckpt')
train_file = os.path.join(stats_path,'train_num_' + str(iteration) + '.npy')
test_file = os.path.join(stats_path,'test_num_' + str(iteration) + '.npy')
gen_file = os.path.join(stats_path,'gen_num_' + str(iteration) + '.npy')
model = ResnetGenerator()
start_time = time.time()
trainer = Trainer(gpus=4, num_nodes=1, distributed_backend='dp',auto_lr_find=True,profiler=True,max_epochs=3,checkpoint_callback=False)
trainer.fit(model, cifar10_trainloader, cifar10_testloader)
#trainer.save_checkpoint(model_file)
#model = ResnetGenerator().load_from_checkpoint(checkpoint_path=model_file)
model.to(device)
variational_detector = OnlineShiftDetectors.VariationalDetector(0.1,2048)
latent_variable_list = variational_detector.set_latent_distribution(model,cifar10_trainloader,device,100)
r_train = np.array([])
for epoch in range(1):
for i, batch in enumerate(cifar10_trainloader, 0):
imgs, labels = batch
imgs, labels = imgs.to(device), labels.to(device)
_, embed_out, _, _ = model(imgs.to(device))
r_var = variational_detector.shift_filter(embed_out.cpu(),100)
r_train = np.append(r_train,r_var.cpu().detach().numpy())
r_test = np.array([])
for epoch in range(5):
for i, batch in enumerate(cifar10_testloader, 0):
imgs, labels = batch
imgs, labels = imgs.to(device), labels.to(device)
_, embed_out, _, _ = model(imgs.to(device))
r_var = variational_detector.shift_filter(embed_out.cpu(),100)
r_test = np.append(r_test,r_var.cpu().detach().numpy())
| |
TypeError:
pass
#Perform some basic sanity checks with parameters
if self.parmDict['timeStart'] > self.parmDict['timeStop']:
msg = 'Input start time must come before stop time.'
raise IOError(msg)
if (len(self.parmDict['inFieldNames']) != \
len(self.parmDict['outFieldNames']) or
len(self.parmDict['inFieldNames']) != \
len(self.parmDict['outUnits']) or
len(self.parmDict['inFieldNames']) != \
len(self.parmDict['extraDimLabel'])):
msg = 'All field/unit inputs ' + \
'should have the same number of elements.'
raise IOError(msg)
# create numpy arrays to hold our data
(minRow, maxRow, minCol, maxCol) = griddef.indLims()
nRows = maxRow - minRow + 1
nCols = maxCol - minCol + 1
nValidPixels = numpy.zeros((nRows, nCols))
sumWght = numpy.zeros((nRows, nCols, 1)) # needs extra dim to generalize for 3D vars
sumVars = dict()
for field, size in zip(self.parmDict['inFieldNames'], self.parmDict['extraDimSize']):
if size:
sumVars[field] = numpy.zeros((nRows, nCols, size))
else:
# pad with a singlet dim if it was 2D
sumVars[field] = numpy.zeros((nRows, nCols, 1))
# loop over maps
if not isinstance(maps, list):
maps = [maps] # create list if we only got a single map
for map in maps:
# open up context manager
with map.pop('parser') as parser: # remove parser for looping
if verbose:
print('Processing {0} for output at {1}.'.format(\
parser.name, str(datetime.datetime.now())))
# loop over gridboxes in map and calculate weights
for (gridCell, pixTup) in map.iteritems():
# translate gridCell to account for possible non-zero ll corner
gridRow = gridCell[0]
gridCol = gridCell[1]
gridInd = (gridRow - minRow, gridCol - minCol)
# get the values needed to calculate weight
for (pxInd, unused_weight) in pixTup:
# check summary flag
sumFlag = parser.get_cm(self.parmDict['overallQualFlag'], pxInd)
if sumFlag % 2:
continue
# check cloud fraction flag
cFrac = parser.get_cm(self.parmDict['cloudFrac'], pxInd)
if not (cFrac <= self.parmDict['cloudFractUpperCutoff']):
continue
# check solar zenith angle flag
solZenAng = parser.get_cm(self.parmDict['solarZenithAngle'], pxInd)
if solZenAng > self.parmDict['solarZenAngUpperCutoff']:
continue
# check time flag
time = parser.get_cm(self.parmDict['time'], pxInd)
# calculate and factor in offset if the user wanted us to
if self.parmDict['timeComparison'] == 'local':
pixLon = parser.get_cm(self.parmDict['longitude'], pxInd)
offset = utils.UTCoffset_from_lon(pixLon)
time += offset
if time < self.parmDict['timeStart'] or time > self.parmDict['timeStop']:
continue
# read in all the data, abandon ship if data is all NaN
rawDataDict = {}
try:
for field in self.parmDict['inFieldNames']:
rawData = parser.get_cm(field, pxInd)
if numpy.isnan(rawData).all():
raise invalidPixCeption
rawDataDict[field] = rawData
except invalidPixCeption:
continue
# compute the weight
fov = pxInd[self.parmDict['pixIndXtrackAxis']]
weight = _OMNO2e_formula(cFrac, fov)
assert weight != numpy.NaN
if weight > 0:
nValidPixels[gridInd] += 1
# add the weight tot the total for this cell
sumWght[gridInd] += weight
for field in self.parmDict['inFieldNames']:
weightVals = rawDataDict[field] * weight
if weightVals.size > 1:
sumVars[field][gridInd] = numpy.nansum([sumVars[field][gridInd], weightVals], axis=0)
else:
sumVars[field][gridInd] = numpy.nansum([sumVars[field][gridInd][0], weightVals])
map['parser'] = parser # return parser to map
# divide out variables by weights to get avgs.
oldSettings = numpy.seterr(divide='ignore')
avgs = dict()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
for (field,var) in sumVars.iteritems():
unfiltAvgs = var/sumWght
filtAvgs = numpy.where(sumWght != 0, unfiltAvgs, \
self.parmDict['fillVal'])
# strip trailing singlet for 2D arrays
if filtAvgs.shape[-1] == 1:
avgs[field] = filtAvgs.reshape(filtAvgs.shape[0:2])
else:
avgs[field] = filtAvgs
numpy.seterr(divide=oldSettings['divide'])
# associate coindexed parameters into dicts
# so we can loop by field
outFnames = dict(izip(self.parmDict['inFieldNames'], self.parmDict['outFieldNames']))
units = dict(izip(self.parmDict['inFieldNames'], self.parmDict['outUnits']))
extraDim = dict(izip(self.parmDict['inFieldNames'], self.parmDict['extraDimLabel']))
# write out results to a netcdf file
outFid = netCDF4.Dataset(outfilename, 'w', format='NETCDF3_CLASSIC')
# create the 2 dimensions all files use
outFid.createDimension('row', nRows)
outFid.createDimension('col', nCols)
# write global attributes
setattr(outFid, 'Version', vsnmsg(version))
setattr(outFid, 'File_start_time', utils.nsecs_to_timestr(self.parmDict['timeStart'], '00:00:00 01-01-1993'))
setattr(outFid, 'File_end_time', utils.nsecs_to_timestr(self.parmDict['timeStop'], '00:00:00 01-01-1993'))
setattr(outFid, 'Max_valid_cloud_fraction', self.parmDict['cloudFractUpperCutoff'])
setattr(outFid, 'Max_valid_solar_zenith_angle', self.parmDict['solarZenAngUpperCutoff'])
setattr(outFid, 'Time_comparison_scheme', self.parmDict['timeComparison'])
fileListStr = ' '.join([map['parser'].name for map in maps])
setattr(outFid, 'Input_files', fileListStr)
setattr(outFid, 'Projection', griddef.__class__.__name__[:-8])
for (k,v) in griddef.parms.iteritems():
setattr(outFid, k, v)
# loop over fields and write variables
for field in self.parmDict['inFieldNames']:
# create tuple of dimensions, defining new dim
# if necessary
if len(avgs[field].shape) == 2:
# only row/cols
varDims = ('row', 'col')
elif len(avgs[field].shape) == 3:
# has extra dim
dimName = extraDim[field]
dimSize = avgs[field].shape[2]
if dimName not in outFid.dimensions.keys():
outFid.createDimension(dimName, dimSize)
varDims = ('row', 'col', dimName)
# create and assign value to variable
varHandle = outFid.createVariable(outFnames[field], 'd', varDims, fill_value=self.parmDict['fillVal'])
varHandle[:] = avgs[field]
# assign variable attributes
setattr(varHandle, 'Units', units[field])
# Write out the pixel counts if the user requested them
if self.parmDict['includePixelCount']:
varDims = ('row', 'col')
varHandle = outFid.createVariable('ValidPixelCount', 'i', varDims,
fill_value=self.parmDict['fillVal'])
varHandle[:] = nValidPixels
outFid.close()
# create a dict with teh same data as avgs, but diff names
outAvg = dict()
for (k,v) in avgs.iteritems():
outAvg[outFnames[k]] = v
if self.parmDict['includePixelCount']:
outAvg['ValidPixelCount'] = nValidPixels
return outAvg
class wght_avg_netCDF(out_func):
'''
Generalized weighted average algorithm
Designed to compute the average of an arbitrary number of desired
parameters, with the value weights based on an arbitrary number of input
parameters. Note that values may be weighted according to their own value.
The output will be in the form of a netcdf file with name determined by the
outFileName parameter. This netCDF file will have dimensions determined by
the grid_geo file, as well as additional dimensions as required by the
input fields.
Owing to the complexity of the inputs required for this function and the
security problems posed by allowing users to input functions to be
evaluated, this output function does not support the I/O interface at this
time. It is designed to subclassed.
This function (and therefore subclasses of this function) at present can
only handle a single input map. It may be extended to properly handle
multiple input maps at some point in the future, but this is difficult
because the filter function is expected to apply to all pixels in the cell
(which would require looping over all the maps to find all the pixels)
but also requires a reference to the parser (which would require those
parsers be held open)
parmDict must contain the following keys:
time:
The field associated with the timestamps. Timestamps may be in any
format so long as a function is provided to convert them to Unix
timestamp values (as this is what the function will use internally)
longitude:
Field with the longitudes at cell centers. Used to estimate
timezones of the pixels if local is selected for timeComparison.
Not used when timeComparison is 'UTC'
inFieldNames:
List of strings corresponding to fields for which output is
desired. These must be valid keys for the parser. Each is output
as a seperate variable in the netcdf output file.
outFieldNames:
List of strings corresponding to the names the output variables
should have in the final netCDF file. Must be of the same length
and co-indexed to the list above.
outUnits:
List of strings corresponding to the labels for the units of each
output variable. These will be attached as the "units" attribute
for each variable in the output netCDF file. Must be of the same
length and co-indexed to the lists above.
logNormal:
Vector indicating whether or not we want to take the
lognormal mean (as opposed to the simple, arithmetic mean). If
this parameter is set to "True", the mean will be taken as follows:
logData = log(data)
logAvg = sum(logData*wghts)/sum(wghts)
avg = 10^logAvg
whereas if this parameter is set to "False" the mean will be simply:
avg = sum(data*wghts)/sum(wghts)
To allow finer-grained control of the output, logNormal must be
set individually for each output field (as it may be appropriate
to use the log normal distribution only for select fields). Thus,
logNormal must be of the same length and co-indexed to the lists
above.
dimLabels:
List of tuple-like strings(delimited by periods with no whitespace),
each of which contains as many strings as there are
extra dimensions in the corresponding field. IE, if a field has
dimensions (xtrack, time, layer, quant) and we allocate along
xtrack and time, then the tuple-like string for that field should be
"(layer.quant)" so | |
<filename>blueqat/pauli.py
# Copyright 2019 The Blueqat Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The module for calculate Pauli matrices."""
from collections import defaultdict, namedtuple
from functools import reduce
from itertools import combinations, product
from numbers import Number, Integral
from math import pi
import numpy as np
_PauliTuple = namedtuple("_PauliTuple", "n")
half_pi = pi / 2
def pauli_from_char(ch, n=0):
"""Make Pauli matrix from an character.
Args:
ch (str): "X" or "Y" or "Z" or "I".
n (int, optional): Make Pauli matrix as n-th qubits.
Returns:
If ch is "X" => X, "Y" => Y, "Z" => Z, "I" => I
Raises:
ValueError: When ch is not "X", "Y", "Z" nor "I".
"""
ch = ch.upper()
if ch == "I":
return I
if ch == "X":
return X(n)
if ch == "Y":
return Y(n)
if ch == "Z":
return Z(n)
raise ValueError("ch shall be X, Y, Z or I")
def term_from_chars(chars):
"""Make Pauli's Term from chars which is written by "X", "Y", "Z" or "I".
e.g. "XZIY" => X(0) * Z(1) * Y(3)
Args:
chars (str): Written in "X", "Y", "Z" or "I".
Returns:
Term: A `Term` object.
Raises:
ValueError: When chars conteins the character which is "X", "Y", "Z" nor "I".
"""
return Term.from_chars(chars)
def to_term(pauli):
"""Convert to Term from Pauli operator (X, Y, Z, I).
Args:
pauli (X, Y, Z or I): A Pauli operator
Returns:
Term: A `Term` object.
"""
return pauli.to_term()
def to_expr(term):
"""Convert to Expr from Term or Pauli operator (X, Y, Z, I).
Args:
term: (Term, X, Y, Z or I): A Term or Pauli operator.
Returns:
Expr: An `Expr` object.
"""
return term.to_expr()
def commutator(expr1, expr2):
"""Returns [expr1, expr2] = expr1 * expr2 - expr2 * expr1.
Args:
expr1 (Expr, Term or Pauli operator): Pauli's expression.
expr2 (Expr, Term or Pauli operator): Pauli's expression.
Returns:
Expr: expr1 * expr2 - expr2 * expr1.
"""
expr1 = expr1.to_expr().simplify()
expr2 = expr2.to_expr().simplify()
return (expr1 * expr2 - expr2 * expr1).simplify()
def is_commutable(expr1, expr2, eps=0.00000001):
"""Test whether expr1 and expr2 are commutable.
Args:
expr1 (Expr, Term or Pauli operator): Pauli's expression.
expr2 (Expr, Term or Pauli operator): Pauli's expression.
eps (float, optional): Machine epsilon.
If |[expr1, expr2]| < eps, consider it is commutable.
Returns:
bool: if expr1 and expr2 are commutable, returns True, otherwise False.
"""
return sum((x * x.conjugate()).real for x in commutator(expr1, expr2).coeffs()) < eps
# To avoid pylint error
def _n(pauli):
return pauli.n
def _GetItem(self_, n):
return type(self_)(n)
class _PauliImpl:
@property
def op(self):
"""Return operator type (X, Y, Z, I)"""
return self.__class__.__name__[1]
@property
def is_identity(self):
"""If `self` is I, returns True, otherwise False."""
return self.op == "I"
def __hash__(self):
return hash((self.op, _n(self)))
def __eq__(self, other):
if isinstance(other, _PauliImpl):
if self.is_identity:
return other.is_identity
return _n(self) == _n(other) and self.op == other.op
if isinstance(other, Term):
return self.to_term() == other
if isinstance(other, Expr):
return self.to_expr() == other
return NotImplemented
def __ne__(self, other):
return not self == other
def __mul__(self, other):
if isinstance(other, Number):
return Term.from_pauli(self, other)
if not isinstance(other, _PauliImpl):
return NotImplemented
if self.is_identity:
return other.to_term()
if other.is_identity:
return self.to_term()
if _n(self) == _n(other) and self.op == other.op:
return I.to_term()
return Term.from_paulipair(self, other)
def __rmul__(self, other):
if isinstance(other, Number):
return Term.from_pauli(self, other)
return NotImplemented
def __truediv__(self, other):
if isinstance(other, Number):
if other:
return Term.from_pauli(self, 1.0 / other)
raise ZeroDivisionError
return NotImplemented
def __add__(self, other):
return self.to_expr() + other
def __radd__(self, other):
return other + self.to_expr()
def __sub__(self, other):
return self.to_expr() - other
def __rsub__(self, other):
return other - self.to_expr()
def __neg__(self):
return Term.from_pauli(self, -1.0)
def __repr__(self):
if self.is_identity:
return "I"
return self.op + "[" + str(_n(self)) + "]"
def to_term(self):
"""Convert to Pauli Term"""
return Term.from_pauli(self)
def to_expr(self):
"""Convert to Pauli Expr"""
return self.to_term().to_expr()
_matrix = {
'I': np.array([[1, 0], [0, 1]], dtype=complex),
'X': np.array([[0, 1], [1, 0]], dtype=complex),
'Y': np.array([[0, -1j], [1j, 0]], dtype=complex),
'Z': np.array([[1, 0], [0, -1]], dtype=complex)
}
@property
def matrix(self):
"""Matrix reprentation of this operator."""
return self._matrix[self.op].copy()
def to_matrix(self, n_qubits=-1):
"""Convert to the matrix."""
if self.is_identity:
if n_qubits == -1:
return self.matrix
else:
return reduce(np.kron, [I.matrix for _ in range(n_qubits)])
if n_qubits == -1:
n_qubits = _n(self) + 1
if _n(self) == 0:
mat = self.matrix
else:
mat = reduce(np.kron, [I.matrix for _ in range(_n(self))])
mat = np.kron(mat, self.matrix)
if n_qubits > _n(self) + 1:
mat = reduce(np.kron, [I.matrix for _ in range(n_qubits - _n(self) - 1)], mat)
return mat
class _X(_PauliImpl, _PauliTuple):
"""Pauli's X operator"""
class _Y(_PauliImpl, _PauliTuple):
"""Pauli's Y operator"""
class _Z(_PauliImpl, _PauliTuple):
"""Pauli's Z operator"""
class _PauliCtor:
def __init__(self, ty):
self.ty = ty
def __call__(self, n):
return self.ty(n)
def __getitem__(self, n):
return self.ty(n)
X = _PauliCtor(_X)
Y = _PauliCtor(_Y)
Z = _PauliCtor(_Z)
class _I(_PauliImpl, namedtuple("_I", "")):
"""Identity operator"""
def __call__(self):
return self
I = _I()
_TermTuple = namedtuple("_TermTuple", "ops coeff")
class Term(_TermTuple):
"""Multiplication of Pauli matrices with coefficient.
Note that this class is immutable.
Multiplied Pauli matrices are very important for quantum computation
because it is an unitary matrix (without coefficient) and also
it can be consider the time evolution of the term (with real coefficient)
without Suzuki-Trotter expansion.
"""
@staticmethod
def from_paulipair(pauli1, pauli2):
"""Make new Term from two Pauli operator."""
return Term(Term.join_ops((pauli1,), (pauli2,)), 1.0)
@staticmethod
def from_pauli(pauli, coeff=1.0):
"""Make new Term from an Pauli operator"""
if pauli.is_identity or coeff == 0:
return Term((), coeff)
return Term((pauli,), coeff)
@staticmethod
def from_ops_iter(ops, coeff):
"""For internal use."""
return Term(tuple(ops), coeff)
@staticmethod
def from_chars(chars):
"""Make Pauli's Term from chars which is written by "X", "Y", "Z" or "I".
e.g. "XZIY" => X(0) * Z(1) * Y(3)
Args:
chars (str): Written in "X", "Y", "Z" or "I".
Returns:
Term: A `Term` object.
Raises:
ValueError: When chars conteins the character which is "X", "Y", "Z" nor "I".
"""
paulis = [pauli_from_char(c, n) for n, c in enumerate(chars) if c != "I"]
if not paulis:
return 1.0 * I
if len(paulis) == 1:
return 1.0 * paulis[0]
return reduce(lambda a, b: a * b, paulis)
@staticmethod
def join_ops(ops1, ops2):
"""For internal use."""
i = len(ops1) - 1
j = 0
while i >= 0 and j < len(ops2):
if ops1[i] == ops2[j]:
i -= 1
j += 1
else:
break
return ops1[:i + 1] + ops2[j:]
@property
def is_identity(self):
"""If `self` is I, returns True, otherwise False."""
return not self.ops
def __mul__(self, other):
if isinstance(other, Number):
return Term(self.ops, self.coeff * other)
if isinstance(other, Term):
ops = Term.join_ops(self.ops, other.ops)
coeff = self.coeff * other.coeff
return Term(ops, coeff)
if isinstance(other, _PauliImpl):
if other.is_identity:
return self
return Term(Term.join_ops(self.ops, (other,)), self.coeff)
return NotImplemented
def __rmul__(self, other):
if isinstance(other, Number):
return Term(self.ops, self.coeff * other)
if isinstance(other, _PauliImpl):
if other.is_identity:
return self
return Term(Term.join_ops((other,), self.ops), self.coeff)
return NotImplemented
def __truediv__(self, other):
if isinstance(other, (int, float)):
if other:
return Term(self.ops, self.coeff / other)
raise ZeroDivisionError
return NotImplemented
def __pow__(self, n):
if isinstance(n, Integral):
if n < 0:
raise ValueError("`pauli_term ** n` or `pow(pauli_term, n)`: " +
"n shall not be negative value.")
if n == 0:
return Term.from_pauli(I)
return Term(self.ops * n, self.coeff ** n)
return NotImplemented
def __add__(self, other):
return Expr.from_term(self) + other
def __radd__(self, other):
return other + Expr.from_term(self)
def __sub__(self, other):
return Expr.from_term(self) - other
def __rsub__(self, other):
return other - Expr.from_term(self)
def __neg__(self):
return Term(self.ops, -self.coeff)
def __repr__(self):
if self.coeff == 0:
return "0*I"
if self.coeff == -1.0:
s_coeff = "-"
else:
s_coeff = str(self.coeff) + "*"
if self.ops == ():
s_ops = "I"
else:
s_ops = "*".join(op.op + "[" + repr(op.n) + "]" for op in self.ops)
return s_coeff + s_ops
def __eq__(self, other):
if isinstance(other, _PauliImpl):
other = other.to_term()
return _TermTuple.__eq__(self, other) or \
_TermTuple.__eq__(self.simplify(), | |
'algo': 'darknet', 'grid': False, 'config_filepath': 'pretrained-v2-pascal', 'species_set' : species_set, 'classify': True, 'p': 1.0, 'classifier_masking': True},
# {'label': 'SS1', 'algo': 'selective-search', 'species_set' : species_set},
# {'label': 'YOLO1', 'algo': 'darknet', 'config_filepath': 'pretrained-tiny-pascal', 'species_set' : species_set},
# {'label': 'YOLO2', 'algo': 'darknet', 'config_filepath': 'pretrained-v2-pascal', 'species_set' : species_set},
# {'label': 'FRCNN1', 'algo': 'faster-rcnn', 'config_filepath': 'pretrained-zf-pascal', 'species_set' : species_set},
# {'label': 'FRCNN2', 'algo': 'faster-rcnn', 'config_filepath': 'pretrained-vgg-pascal', 'species_set' : species_set},
# {'label': 'SSD1', 'algo': 'ssd', 'config_filepath': 'pretrained-300-pascal', 'species_set' : species_set},
# {'label': 'SSD2', 'algo': 'ssd', 'config_filepath': 'pretrained-512-pascal', 'species_set' : species_set},
# {'label': 'SSD3', 'algo': 'ssd', 'config_filepath': 'pretrained-300-pascal-plus', 'species_set' : species_set},
# {'label': 'SSD4', 'algo': 'ssd', 'config_filepath': 'pretrained-512-pascal-plus', 'species_set' : species_set},
# {'label': 'COMBINED', 'algo': '_COMBINED', 'species_set' : species_set},
# {'label': 'COMBINED~0.5', 'algo': '_COMBINED', 'species_set' : species_set, 'nms': True, 'nms_thresh': 0.50, 'line_dotted': True},
# {'label': 'COMBINED` 0.5', 'algo': '_COMBINED', 'species_set' : species_set, 'thresh': True, 'index_thresh': 0.5},
# {'label': 'COMBINED` 0.1', 'algo': '_COMBINED', 'species_set' : species_set, 'thresh': True, 'index_thresh': 0.1},
# {'label': 'COMBINED` 0.05', 'algo': '_COMBINED', 'species_set' : species_set, 'thresh': True, 'index_thresh': 0.05},
# {'label': 'COMBINED` 0.01', 'algo': '_COMBINED', 'species_set' : species_set, 'thresh': True, 'index_thresh': 0.01},
# {'label': 'COMBINED', 'algo': '_COMBINED', 'species_set' : species_set},
# {'label': 'COMBINED 0', 'algo': '_COMBINED', 'species_set' : species_set},
# {'label': 'COMBINED 2 None', 'algo': '_COMBINED', 'species_set' : species_set, 'nms': True, 'nms_thresh': 0.25, 'thresh': True, 'index_thresh': 0.25, 'classify': True, 'p': None, 'classifier_algo': 'svm', 'classifier_weight_filepath': None},
# {'label': 'COMBINED 3 None', 'algo': '_COMBINED', 'species_set' : species_set, 'nms': True, 'nms_thresh': 0.25, 'thresh': True, 'index_thresh': 0.25, 'classify': True, 'p': None, 'classifier_algo': 'svm', 'classifier_weight_filepath': 'localizer-zebra-10'},
# {'label': 'COMBINED 4 None', 'algo': '_COMBINED', 'species_set' : species_set, 'nms': True, 'nms_thresh': 0.25, 'thresh': True, 'index_thresh': 0.25, 'classify': True, 'p': None, 'classifier_algo': 'svm', 'classifier_weight_filepath': 'localizer-zebra-50'},
# {'label': 'COMBINED 2 0.5', 'algo': '_COMBINED', 'species_set' : species_set, 'nms': True, 'nms_thresh': 0.25, 'thresh': True, 'index_thresh': 0.25, 'classify': True, 'p': 'mult', 'classifier_algo': 'svm', 'classifier_weight_filepath': None},
# {'label': 'COMBINED 3 0.5', 'algo': '_COMBINED', 'species_set' : species_set, 'nms': True, 'nms_thresh': 0.25, 'thresh': True, 'index_thresh': 0.25, 'classify': True, 'p': 'mult', 'classifier_algo': 'svm', 'classifier_weight_filepath': 'localizer-zebra-10'},
# {'label': 'COMBINED 4 0.5', 'algo': '_COMBINED', 'species_set' : species_set, 'nms': True, 'nms_thresh': 0.25, 'thresh': True, 'index_thresh': 0.25, 'classify': True, 'p': 'mult', 'classifier_algo': 'svm', 'classifier_weight_filepath': 'localizer-zebra-50'},
# {'label': 'COMBINED 4', 'algo': '_COMBINED', 'species_set' : species_set, 'nms': True, 'nms_thresh': 0.1, 'thresh': True, 'index_thresh': 0.10, 'classify': True, 'classifier_algo': 'svm', 'classifier_weight_filepath': 'localizer-zebra-100'},
# {
# 'label' : 'C_0',
# 'algo' : '_COMBINED',
# 'species_set' : species_set,
# 'classify' : True,
# 'classifier_algo': 'svm',
# 'classifier_weight_filepath': '/home/jason/code/wbia/models-bootstrap/classifier.svm.image.zebra.rbf.1.0.pkl',
# 'nms' : True,
# 'nms_thresh' : 0.50,
# # 'line_dotted' : True,
# },
# {
# 'label' : 'C_1',
# 'algo' : '_COMBINED',
# 'species_set' : species_set,
# 'classify' : True,
# 'classifier_algo': 'svm',
# 'classifier_weight_filepath': '/home/jason/code/wbia/models-bootstrap/classifier.svm.localization.zebra.10.rbf.1.0',
# 'nms' : True,
# 'nms_thresh' : 0.50,
# # 'line_dotted' : True,
# },
# {
# 'label' : 'C_2',
# 'algo' : '_COMBINED',
# 'species_set' : species_set,
# 'classify' : True,
# 'classifier_algo': 'svm',
# 'classifier_weight_filepath': '/home/jason/code/wbia/models-bootstrap/classifier.svm.localization.zebra.20.rbf.1.0',
# 'nms' : True,
# 'nms_thresh' : 0.50,
# # 'line_dotted' : True,
# },
# {
# 'label' : 'C_3',
# 'algo' : '_COMBINED',
# 'species_set' : species_set,
# 'classify' : True,
# 'classifier_algo': 'svm',
# 'classifier_weight_filepath': '/home/jason/code/wbia/models-bootstrap/classifier.svm.localization.zebra.30.rbf.1.0',
# 'nms' : True,
# 'nms_thresh' : 0.50,
# # 'line_dotted' : True,
# },
# {
# 'label' : 'C_4',
# 'algo' : '_COMBINED',
# 'species_set' : species_set,
# 'classify' : True,
# 'classifier_algo': 'svm',
# 'classifier_weight_filepath': '/home/jason/code/wbia/models-bootstrap/classifier.svm.localization.zebra.40.rbf.1.0',
# 'nms' : True,
# 'nms_thresh' : 0.50,
# # 'line_dotted' : True,
# },
# {
# 'label' : 'C_5',
# 'algo' : '_COMBINED',
# 'species_set' : species_set,
# 'classify' : True,
# 'classifier_algo': 'svm',
# 'classifier_weight_filepath': '/home/jason/code/wbia/models-bootstrap/classifier.svm.localization.zebra.50.rbf.1.0',
# 'nms' : True,
# 'nms_thresh' : 0.50,
# # 'line_dotted' : True,
# },
# {
# 'label' : 'C_6',
# 'algo' : '_COMBINED',
# 'species_set' : species_set,
# 'classify' : True,
# 'classifier_algo': 'svm',
# 'classifier_weight_filepath': '/home/jason/code/wbia/models-bootstrap/classifier.svm.localization.zebra.60.rbf.1.0',
# 'nms' : True,
# 'nms_thresh' : 0.50,
# # 'line_dotted' : True,
# },
# {
# 'label' : 'C_7',
# 'algo' : '_COMBINED',
# 'species_set' : species_set,
# 'classify' : True,
# 'classifier_algo': 'svm',
# 'classifier_weight_filepath': '/home/jason/code/wbia/models-bootstrap/classifier.svm.localization.zebra.70.rbf.1.0',
# 'nms' : True,
# 'nms_thresh' : 0.50,
# # 'line_dotted' : True,
# },
# {
# 'label' : 'C_8',
# 'algo' : '_COMBINED',
# 'species_set' : species_set,
# 'classify' : True,
# 'classifier_algo': 'svm',
# 'classifier_weight_filepath': '/home/jason/code/wbia/models-bootstrap/classifier.svm.localization.zebra.80.rbf.1.0',
# 'nms' : True,
# 'nms_thresh' : 0.50,
# # 'line_dotted' : True,
# },
# {
# 'label' : 'C_9',
# 'algo' : '_COMBINED',
# 'species_set' : species_set,
# 'classify' : True,
# 'classifier_algo': 'svm',
# 'classifier_weight_filepath': '/home/jason/code/wbia/models-bootstrap/classifier.svm.localization.zebra.90.rbf.1.0',
# 'nms' : True,
# 'nms_thresh' : 0.50,
# # 'line_dotted' : True,
# },
# {
# 'label' : 'C_10',
# 'algo' : '_COMBINED',
# 'species_set' : species_set,
# 'classify' : True,
# 'classifier_algo': 'svm',
# 'classifier_weight_filepath': '/home/jason/code/wbia/models-bootstrap/classifier.svm.localization.zebra.100.rbf.1.0',
# 'nms' : True,
# 'nms_thresh' : 0.50,
# # 'line_dotted' : True,
# },
# {
# 'label' : 'LINEAR,0.5',
# 'algo' : '_COMBINED',
# 'species_set' : species_set,
# 'classify' : True,
# 'classifier_algo': 'svm',
# 'classifier_weight_filepath': '/home/jason/code/wbia/models/classifier.svm.image.zebra.linear.0.5.pkl',
# },
# {
# 'label' : 'LINEAR,1.0',
# 'algo' : '_COMBINED',
# 'species_set' : species_set,
# 'classify' : True,
# 'classifier_algo': 'svm',
# 'classifier_weight_filepath': '/home/jason/code/wbia/models/classifier.svm.image.zebra.linear.1.0.pkl',
# },
# {
# 'label' : 'LINEAR,2.0',
# 'algo' : '_COMBINED',
# 'species_set' : species_set,
# 'classify' : True,
# 'classifier_algo': 'svm',
# 'classifier_weight_filepath': '/home/jason/code/wbia/models/classifier.svm.image.zebra.linear.2.0.pkl',
# },
# {
# 'label' : 'RBF,0.5',
# 'algo' : '_COMBINED',
# 'species_set' : species_set,
# 'classify' : True,
# 'classifier_algo': 'svm',
# 'classifier_weight_filepath': '/home/jason/code/wbia/models/classifier.svm.image.zebra.rbf.0.5.pkl',
# },
# {
# 'label' : 'RBF,1.0',
# 'algo' : '_COMBINED',
# 'species_set' : species_set,
# 'classify' : True,
# 'classifier_algo': 'svm',
# 'classifier_weight_filepath': '/home/jason/code/wbia/models/classifier.svm.image.zebra.rbf.1.0.pkl',
# },
# {
# 'label' : 'RBF,2.0',
# 'algo' : '_COMBINED',
# 'species_set' : species_set,
# 'classify' : True,
# 'classifier_algo': 'svm',
# 'classifier_weight_filepath': '/home/jason/code/wbia/models/classifier.svm.image.zebra.rbf.2.0.pkl',
# },
# {
# 'label' : 'LINEAR,0.5~0.5',
# 'algo' : '_COMBINED',
# 'species_set' : species_set,
# 'classify' : True,
# 'classifier_algo': 'svm',
# 'classifier_weight_filepath': '/home/jason/code/wbia/models/classifier.svm.image.zebra.linear.0.5.pkl',
# 'nms' : True,
# 'nms_thresh' : 0.50,
# 'line_dotted' : True,
# },
# {
# 'label' : 'LINEAR,1.0~0.5',
# 'algo' : '_COMBINED',
# 'species_set' : species_set,
# 'classify' : True,
# 'classifier_algo': 'svm',
# 'classifier_weight_filepath': '/home/jason/code/wbia/models/classifier.svm.image.zebra.linear.1.0.pkl',
# 'nms' : True,
# 'nms_thresh' : 0.50,
# 'line_dotted' : True,
# },
# {
# 'label' : 'LINEAR,2.0~0.5',
# 'algo' : '_COMBINED',
# 'species_set' : species_set,
# 'classify' : True,
# 'classifier_algo': 'svm',
# 'classifier_weight_filepath': '/home/jason/code/wbia/models/classifier.svm.image.zebra.linear.2.0.pkl',
# 'nms' : True,
# 'nms_thresh' : 0.50,
# 'line_dotted' : True,
# },
# {
# 'label' : 'RBF,0.5~0.5',
# 'algo' : '_COMBINED',
# 'species_set' : species_set,
# 'classify' : True,
# 'classifier_algo': 'svm',
# 'classifier_weight_filepath': '/home/jason/code/wbia/models/classifier.svm.image.zebra.rbf.0.5.pkl',
# 'nms' : True,
# 'nms_thresh' : 0.50,
# 'line_dotted' : True,
# },
# {
# 'label' : 'RBF,1.0~0.5',
# 'algo' : '_COMBINED',
# 'species_set' : species_set,
# 'classify' : True,
# 'classifier_algo': 'svm',
# 'classifier_weight_filepath': '/home/jason/code/wbia/models-bootstrap/classifier.svm.image.zebra.rbf.1.0.pkl',
# 'nms' : True,
# 'nms_thresh' : 0.30,
# # 'line_dotted' : True,
# },
# {
# 'label' : 'RBF,2.0~0.5',
# 'algo' : '_COMBINED',
# 'species_set' : species_set,
# 'classify' : True,
# 'classifier_algo': 'svm',
# 'classifier_weight_filepath': '/home/jason/code/wbia/models/classifier.svm.image.zebra.rbf.2.0.pkl',
# 'nms' : True,
# 'nms_thresh' : 0.50,
# 'line_dotted' : True,
# },
# {
# 'label' : 'WIC',
# 'algo' : '_COMBINED',
# 'species_set' : species_set,
# 'classify' : True,
# 'classifier_algo': 'svm',
# 'classifier_weight_filepath': '/home/jason/code/wbia/models-bootstrap/classifier.svm.image.zebra.pkl',
# },
# {
# 'label' : 'COMBINED ~0.75',
# 'algo' : '_COMBINED',
# 'species_set' : species_set,
# 'nms' : True,
# 'nms_thresh' : 0.75,
# },
# {
# 'label' : 'COMBINED ~0.50',
# 'algo' : '_COMBINED',
# 'species_set' : species_set,
# 'nms' : True,
# 'nms_thresh' : 0.50,
# 'line_dotted' : True,
# },
# {
# 'label' : 'COMBINED ~0.25',
# 'algo' : '_COMBINED',
# 'species_set' | |
"""
Class for a solver, which can then be used to define
other solvers such as gradient descent or SAG.
"""
import math
import warnings
from abc import ABC, abstractmethod
from typing import List, Optional, Tuple, Type, Union, overload
from mpyc.runtime import mpc
from mpyc.sectypes import SecureFixedPoint
from typing_extensions import Literal
from tno.mpc.mpyc.stubs.asyncoro import mpc_coro_ignore, returnType
import tno.mpc.mpyc.secure_learning.utils.util_matrix_vec as mpc_utils
from tno.mpc.mpyc.secure_learning.exceptions import (
MissingFunctionError,
SecureLearnUninitializedSolverError,
SecureLearnValueError,
)
from tno.mpc.mpyc.secure_learning.models.common_gradient_forms import GradientFunction
from tno.mpc.mpyc.secure_learning.regularizers import (
DifferentiableRegularizer,
NonDifferentiableRegularizer,
)
from tno.mpc.mpyc.secure_learning.utils import (
Matrix,
MatrixAugmenter,
SecureDataPermutator,
SeqMatrix,
Vector,
seq_to_list,
)
class Solver(ABC):
"""
Abstract class for a solver, which can then be used to define
other solvers such as gradient descent or SAG.
"""
name: str = ""
def __init__(self) -> None:
"""
Constructor method.
Notice that the relevant class variables are instantiated through `init_solver`.
"""
self.gradient_function: Optional[GradientFunction] = None
self._list_of_gradient_penalties: List[DifferentiableRegularizer] = []
self._proximal_function: Optional[NonDifferentiableRegularizer] = None
self.n: Optional[int] = None
self.minibatch_size: Optional[int] = None
self.mu_x: Optional[Vector[SecureFixedPoint]] = None
self.mu_y: Optional[SecureFixedPoint] = None
self.yfactor: Optional[SecureFixedPoint] = None
self.eta0: Optional[Union[float, SecureFixedPoint]] = None
self.data_permutator: Optional[SecureDataPermutator] = None
self.permutable_matrix: MatrixAugmenter[SecureFixedPoint] = MatrixAugmenter()
self.tolerance = 0.0
self.coef_init: Optional[Vector[SecureFixedPoint]] = None
self._gradient_function: Optional[GradientFunction] = None
self.secret_shared_coef_: Optional[Vector[SecureFixedPoint]] = None
self.nr_epochs: Optional[int] = None
self.rel_update_diff: Optional[float] = None
def __str__(self) -> str:
"""
Returns solver name
:return: Solver name
"""
return self.name
@property
def nr_inner_iters(self) -> int:
"""
Return the number of iterations that the inner loop should perform.
:raise SecureLearnUninitializedSolverError: Occurs when a solver has
not been fully initialised
:return: Number of iterations that the inner loop should perform
"""
if not isinstance(self.n, int):
raise SecureLearnUninitializedSolverError(
"Solver has not been fully initialized, \
parameter n has not been set."
)
if not isinstance(self.minibatch_size, int):
raise SecureLearnUninitializedSolverError(
"Solver has not been fully initialized, \
parameter minibatch_size has not been set."
)
return math.ceil(self.n / self.minibatch_size)
@staticmethod
def _initialize_or_verify_initial_coef_(
coef_init: Optional[Vector[SecureFixedPoint]],
num_features: int,
sectype: Type[SecureFixedPoint],
) -> Vector[SecureFixedPoint]:
"""
Parses and verifies initial coefficients vector (possibly
including intercept).
Initializes coefficient vector if None was given.
Verifies that the initial coefficient vector is of the appropriate length.
:param coef_init: Initial coefficients vector. If None is passed, then
initialize the coefficient vector as a vector of zeros
:param num_features: Number of features
:param sectype: Requested type of initial coefficients vector
:raise SecureLearnValueError: Provided coefficients vector
did not pass verification
:return: Verified initial coefficients vector
"""
# The intercept that is calculated for non-centered data is returned
# as the first element of the coefficients vector.
# Centered data has no intercept.
n_corr = num_features + 1
if coef_init is None:
return [sectype(0) for _ in range(n_corr)]
if len(coef_init) == n_corr and isinstance(coef_init[0], sectype):
return coef_init
raise SecureLearnValueError("Inappropriate initial coefficients vector.")
def init_solver(
self,
total_size: int,
num_features: int,
tolerance: float,
sectype: Type[SecureFixedPoint],
coef_init: Optional[Vector[SecureFixedPoint]] = None,
minibatch_size: Optional[int] = None,
eta0: Optional[float] = None,
) -> None:
"""
Pass configuration to the solver.
:param total_size: Number of samples in the training data.
:param num_features: Number of features in the training data.
:param tolerance: Training stops if the l2 norm of two subsequent coefficient
vectors is less than the provided tolerance.
:param sectype: Requested type of initial coefficients vector.
:param coef_init: Initial coefficients vector. If None is passed, then
initialize the coefficient vector as a vector of zeros.
:param minibatch_size: Size of minibatches. Defaults to full batch if
None is passed.
:param eta0: Initial learning rate.
"""
self.tolerance = tolerance
self.eta0 = eta0
self.n = total_size
if minibatch_size is None:
self.minibatch_size = self.n
else:
self.minibatch_size = minibatch_size
self.coef_init = self._initialize_or_verify_initial_coef_(
coef_init, num_features=num_features, sectype=sectype
)
def set_gradient_function(
self,
function: GradientFunction,
) -> None:
"""
Set the gradient function that is used by the solver.
:param function: Gradient function
"""
self._gradient_function = function
@overload
def _evaluate_gradient_function(
self,
X: Matrix[SecureFixedPoint],
y: Vector[SecureFixedPoint],
coef_: Vector[SecureFixedPoint],
grad_per_sample: Literal[False],
) -> Vector[SecureFixedPoint]:
...
@overload
def _evaluate_gradient_function(
self,
X: Matrix[SecureFixedPoint],
y: Vector[SecureFixedPoint],
coef_: Vector[SecureFixedPoint],
grad_per_sample: Literal[True],
) -> List[Vector[SecureFixedPoint]]:
...
@overload
def _evaluate_gradient_function(
self,
X: Matrix[SecureFixedPoint],
y: Vector[SecureFixedPoint],
coef_: Vector[SecureFixedPoint],
grad_per_sample: bool,
) -> Union[Vector[SecureFixedPoint], List[Vector[SecureFixedPoint]]]:
...
def _evaluate_gradient_function(
self,
X: Matrix[SecureFixedPoint],
y: Vector[SecureFixedPoint],
coef_: Vector[SecureFixedPoint],
grad_per_sample: bool,
) -> Union[Vector[SecureFixedPoint], List[Vector[SecureFixedPoint]]]:
"""
Evaluate the gradient function.
:param X: Independent data
:param y: Dependent data
:param coef_: Coefficient vector
:param grad_per_sample: Return gradient per sample if True, return
aggregated gradient of all data if False
:raise MissingFunctionError: No gradient function was initialized
:return: Value(s) of gradient evaluated with the provided parameters
"""
if self._gradient_function is None:
raise MissingFunctionError("Gradient function has not been initialized.")
return self._gradient_function(X, y, coef_, grad_per_sample=grad_per_sample)
@overload
def evaluate_gradient_function_for_minibatch(
self,
X: Matrix[SecureFixedPoint],
y: Vector[SecureFixedPoint],
coef_: Vector[SecureFixedPoint],
nr_samples_total: int,
grad_per_sample: Literal[False],
) -> Vector[SecureFixedPoint]:
...
@overload
def evaluate_gradient_function_for_minibatch(
self,
X: Matrix[SecureFixedPoint],
y: Vector[SecureFixedPoint],
coef_: Vector[SecureFixedPoint],
nr_samples_total: int,
grad_per_sample: Literal[True],
) -> List[Vector[SecureFixedPoint]]:
...
@overload
def evaluate_gradient_function_for_minibatch(
self,
X: Matrix[SecureFixedPoint],
y: Vector[SecureFixedPoint],
coef_: Vector[SecureFixedPoint],
nr_samples_total: int,
grad_per_sample: bool = ...,
) -> Union[Vector[SecureFixedPoint], List[Vector[SecureFixedPoint]]]:
...
def evaluate_gradient_function_for_minibatch(
self,
X: Matrix[SecureFixedPoint],
y: Vector[SecureFixedPoint],
coef_: Vector[SecureFixedPoint],
nr_samples_total: int,
grad_per_sample: bool = False,
) -> Union[Vector[SecureFixedPoint], List[Vector[SecureFixedPoint]]]:
"""
Evaluate the gradient function.
:param X: Independent data
:param y: Dependent data
:param coef_: Coefficient vector
:param nr_samples_total: Number of samples
:param grad_per_sample: Return gradient per sample if True, return
aggregated gradient of all data if False
:raise MissingGradientFunctionError: No gradient function was
initialized
:return: Value(s) of gradient evaluated with the provided parameters
"""
return mpc_utils.scale_vector_or_matrix(
len(X) / nr_samples_total,
self._evaluate_gradient_function(
X, y, coef_, grad_per_sample=grad_per_sample
),
)
def add_gradient_penalty_function(
self, function: DifferentiableRegularizer
) -> None:
"""
Add gradient penalty function to the list of gradient penalty
functions.
:param function: Function that evaluates the gradient penalty function in
a given point
"""
self._list_of_gradient_penalties.append(function)
def compute_aggregated_differentiable_regularizer_penalty(
self,
coef_: Vector[SecureFixedPoint],
nr_samples_minibatch: int,
nr_samples_total: int,
) -> Vector[SecureFixedPoint]:
"""
Compute the aggregated penalty from all gradient penalty functions
evaluated for the provided gradient. The penalty is weighted by the
ratio of samples that were used for computing the provided gradient
over the number of samples in the complete training data.
:param coef_: Unpenalized objective gradient vector
:param nr_samples_minibatch: Number of samples that were used for
computing the given gradient
:param nr_samples_minibatch: Total number of samples in training data
:param nr_samples_total: Number of samples
:raise MissingGradientFunctionError: No gradient function was
initialized
:return: Penalized objective gradient vector
"""
stype = type(coef_[0])
coef_0 = coef_.copy()
coef_0[0] = stype(0, integral=False)
aggregated_penalty = [stype(0, integral=False)] * len(coef_)
for penalty_func in self._list_of_gradient_penalties:
aggregated_penalty = mpc.vector_add(
aggregated_penalty,
mpc_utils.scale_vector_or_matrix(
nr_samples_minibatch / nr_samples_total, penalty_func(coef_0)
),
)
return aggregated_penalty
def set_proximal_function(
self,
func: NonDifferentiableRegularizer,
) -> None:
"""
Set the proximal function that is used by the solver.
:param func: A proximal function
"""
self._proximal_function = func
@property
def has_proximal_function(self) -> bool:
"""
Indicate whether the solver has a proximal function initialized.
:return: True if the proximal function has been initialized,
False otherwise
"""
return self._proximal_function is not None
def evaluate_proximal_function(
self,
coef_: Vector[SecureFixedPoint],
eta: Union[float, SecureFixedPoint],
) -> Vector[SecureFixedPoint]:
"""
Evaluate the proximal function.
:param coef_: Coefficient vector
:param eta: Learning rate
:raise MissingFunctionError: No proximal function was initialized
:return: Value of proximal function evaluated with the provided
parameters
"""
stype = type(coef_[0])
coef_0 = coef_.copy()
coef_0[0] = stype(0, integral=False)
if self._proximal_function is None:
raise MissingFunctionError("Proximal function has not been initialized.")
proximal_result = self._proximal_function(coef_0, eta)
proximal_result[0] = coef_[0]
return proximal_result
@abstractmethod
def preprocessing(
self,
X_init: Matrix[SecureFixedPoint],
y_init: Vector[SecureFixedPoint],
) -> Tuple[Matrix[SecureFixedPoint], Vector[SecureFixedPoint]]:
"""
Preprocess obtained data.
May include centering and scaling.
:param X_init: Independent data
:param y_init: Dependent data
:return: Preprocessed independent and dependent data
"""
@abstractmethod
def inner_loop_calculation(
self,
X: Matrix[SecureFixedPoint],
y: Vector[SecureFixedPoint],
coef_old: Vector[SecureFixedPoint],
epoch: int,
) -> Vector[SecureFixedPoint]:
"""
Performs one inner-loop iteration for the solver. Inner-loop refers
to iteratively looping through the data in batches rather than looping
over the complete data multiple times.
:param X: Independent data
:param y: Dependent data
:param coef_old: Current iterative solution
:param epoch: Number of times that the outer loop has completed
:return: Updated iterative solution
"""
@staticmethod
def postprocessing(
coef_predict: Vector[SecureFixedPoint],
) -> Vector[SecureFixedPoint]:
| |
2] acted upon by Weyl Group of type ['A', 2] (as a matrix group acting on the weight lattice)
sage: W0Pv = E.W0Pv(); W0Pv
Extended affine Weyl group of type ['A', 2, 1] realized by Semidirect product of Weyl Group of type ['A', 2] (as a matrix group acting on the weight lattice) acting on Multiplicative form of Weight lattice of the Root system of type ['A', 2]
sage: WF = E.WF(); WF
Extended affine Weyl group of type ['A', 2, 1] realized by Semidirect product of Weyl Group of type ['A', 2, 1] (as a matrix group acting on the root lattice) acted upon by Fundamental group of type ['A', 2, 1]
sage: FW = E.FW(); FW
Extended affine Weyl group of type ['A', 2, 1] realized by Semidirect product of Fundamental group of type ['A', 2, 1] acting on Weyl Group of type ['A', 2, 1] (as a matrix group acting on the root lattice)
When the realizations are constructed from each other as above, there are built-in coercions between them. ::
sage: F = E.fundamental_group()
sage: x = WF.from_reduced_word([0,1,2]) * WF(F(2)); x
S0*S1*S2 * pi[2]
sage: FW(x)
pi[2] * S1*S2*S0
sage: W0P(x)
s1*s2*s1 * t[-2*Lambdacheck[1] - Lambdacheck[2]]
sage: PW0(x)
t[Lambdacheck[1] + 2*Lambdacheck[2]] * s1*s2*s1
sage: PvW0(x)
t[Lambda[1] + 2*Lambda[2]] * s1*s2*s1
The translation lattice and its distinguished basis are obtained from ``E``::
sage: L = E.lattice(); L
Coweight lattice of the Root system of type ['A', 2]
sage: b = E.lattice_basis(); b
Finite family {1: Lambdacheck[1], 2: Lambdacheck[2]}
Translation lattice elements can be coerced into any realization::
sage: PW0(b[1]-b[2])
t[Lambdacheck[1] - Lambdacheck[2]]
sage: FW(b[1]-b[2])
pi[2] * S0*S1
The dual form of the translation lattice and its basis are similarly obtained::
sage: Lv = E.dual_lattice(); Lv
Weight lattice of the Root system of type ['A', 2]
sage: bv = E.dual_lattice_basis(); bv
Finite family {1: Lambda[1], 2: Lambda[2]}
sage: FW(bv[1]-bv[2])
pi[2] * S0*S1
The abstract fundamental group is accessed from ``E``::
sage: F = E.fundamental_group(); F
Fundamental group of type ['A', 2, 1]
Its elements are indexed by the set of special nodes of the affine Dynkin diagram::
sage: E.cartan_type().special_nodes()
(0, 1, 2)
sage: F.special_nodes()
(0, 1, 2)
sage: [F(i) for i in F.special_nodes()]
[pi[0], pi[1], pi[2]]
There is a coercion from the fundamental group into each realization::
sage: F(2)
pi[2]
sage: WF(F(2))
pi[2]
sage: W0P(F(2))
s2*s1 * t[-Lambdacheck[1]]
sage: W0Pv(F(2))
s2*s1 * t[-Lambda[1]]
Using ``E`` one may access the classical and affine Weyl groups and their morphisms
into each realization::
sage: W0 = E.classical_weyl(); W0
Weyl Group of type ['A', 2] (as a matrix group acting on the coweight lattice)
sage: v = W0.from_reduced_word([1,2,1]); v
s1*s2*s1
sage: PW0(v)
s1*s2*s1
sage: WF(v)
S1*S2*S1
sage: W = E.affine_weyl(); W
Weyl Group of type ['A', 2, 1] (as a matrix group acting on the root lattice)
sage: w = W.from_reduced_word([2,1,0]); w
S2*S1*S0
sage: WF(w)
S2*S1*S0
sage: PW0(w)
t[Lambdacheck[1] - 2*Lambdacheck[2]] * s1
Note that for untwisted affine type the dual form of the classical Weyl group
is isomorphic to the usual one, but acts on a different lattice and is therefore different to sage::
sage: W0v = E.dual_classical_weyl(); W0v
Weyl Group of type ['A', 2] (as a matrix group acting on the weight lattice)
sage: v = W0v.from_reduced_word([1,2])
sage: x = PvW0(v); x
s1*s2
sage: y = PW0(v); y
s1*s2
sage: x == y
False
sage: x.parent() == y.parent()
False
An element can be created directly from a reduced word::
sage: PW0.from_reduced_word([2,1,0])
t[Lambdacheck[1] - 2*Lambdacheck[2]] * s1
Here is a demonstration of the printing options::
sage: E = ExtendedAffineWeylGroup(["A",2,1], affine="sx", classical="Sx",translation="x",fundamental="pix")
sage: PW0 = E.PW0()
sage: y = PW0(E.lattice_basis()[1])
sage: y
x[Lambdacheck[1]]
sage: FW = E.FW()
sage: FW(y)
pix[1] * sx2*sx1
sage: PW0.an_element()
x[2*Lambdacheck[1] + 2*Lambdacheck[2]] * Sx1*Sx2
.. TODO::
- Implement a "slow" action of `E` on any affine root or weight lattice realization.
- Implement the level `m` actions of `E` and `W` on the lattices of finite type.
- Implement the relevant methods from the usual affine weyl group
- Implementation by matrices: style "M".
- Use case: implement the Hecke algebra on top of this
The semidirect product construction in sage currently only
admits multiplicative groups. Therefore for the styles involving "P" and "Pv", one must
convert the additive group of translations `L` into a multiplicative group by
applying the :class:`sage.groups.group_exp.GroupExp` functor.
.. RUBRIC:: The general linear case
The general linear group is not semisimple. Sage can build its extended
affine Weyl group::
sage: E = ExtendedAffineWeylGroup(['A',2,1], general_linear=True); E
Extended affine Weyl group of GL(3)
If the Cartan type is ``['A', n-1, 1]`` and the parameter ``general_linear`` is not
True, the extended affine Weyl group that is built will be for `SL_n`, not
`GL_n`. But if ``general_linear`` is True, let `W_a` and `W_e` be the affine and
extended affine Weyl groups. We make the following nonstandard definition: the
extended affine Weyl group `W_e(GL_n)` is defined by
.. MATH::
W_e(GL_n) = P(GL_n) \rtimes W
where `W` is the finite Weyl group (the symmetric group `S_n`) and `P(GL_n)` is the weight lattice
of `GL_n`, which is usually identified with the lattice `\ZZ^n` of `n`-tuples of integers::
sage: PW0 = E.PW0(); PW0
Extended affine Weyl group of GL(3) realized by Semidirect product of Multiplicative form of Ambient space of the Root system of type ['A', 2] acted upon by Weyl Group of type ['A', 2] (as a matrix group acting on the ambient space)
sage: PW0.an_element()
t[(2, 2, 3)] * s1*s2
There is an isomorphism
.. MATH::
W_e(GL_n) = \ZZ \ltimes W_a
where the group of integers `\ZZ` (with generator `\pi`) acts on `W_a` by
.. MATH::
\pi\, s_i\, \pi^{-1} = s_{i+1}
and the indices of the simple reflections are taken modulo `n`::
sage: FW = E.FW(); FW
Extended affine Weyl group of GL(3) realized by Semidirect product of Fundamental group of GL(3) acting on Weyl Group of type ['A', 2, 1] (as a matrix group acting on the root lattice)
sage: FW.an_element()
pi[5] * S0*S1*S2
We regard `\ZZ` as the fundamental group of affine type `GL_n`::
sage: F = E.fundamental_group(); F
Fundamental group of GL(3)
sage: F.special_nodes()
Integer Ring
sage: x = FW.from_fundamental(F(10)); x
pi[10]
sage: x*x
pi[20]
sage: E.PvW0()(x*x)
t[(7, 7, 6)] * s2*s1
"""
cartan_type = CartanType(cartan_type)
if cartan_type.is_reducible():
raise ValueError("Extended affine Weyl groups are only implemented for irreducible affine Cartan types")
if cartan_type.is_finite(): # a finite Cartan type is an abbreviation for its untwisted affinization
cartan_type = cartan_type.affine()
elif not cartan_type.is_affine():
raise ValueError("Cartan type must be finite or affine")
return ExtendedAffineWeylGroup_Class(cartan_type, general_linear, **print_options)
class ExtendedAffineWeylGroup_Class(UniqueRepresentation, Parent):
r"""
The parent-with-realization class of an extended affine Weyl group.
"""
def __init__(self, cartan_type, general_linear, **print_options):
r"""
EXAMPLES::
sage: E = ExtendedAffineWeylGroup(["D",3,2])
sage: E in Groups().Infinite()
True
sage: TestSuite(E).run()
"""
if not cartan_type.is_affine():
raise ValueError("%s is not affine" % cartan_type)
self._cartan_type = cartan_type
self._prefixt = "t"
self._prefixf = "pi"
self._prefixcl = None
self._prefixaf = None
self._print_tuple = False
if general_linear is True:
self._general_linear = True
self._n = self._cartan_type.n + 1
else:
self._general_linear = False
for option in print_options.keys():
if option == 'translation':
self._prefixt = print_options['translation']
elif option == 'fundamental':
self._prefixf = print_options['fundamental']
elif option == 'print_tuple':
self._print_tuple = print_options['print_tuple']
elif option == 'affine':
self._prefixaf = print_options['affine']
elif option == 'classical':
self._prefixcl = print_options['classical']
else:
raise ValueError("Print option %s is unrecognized" % option)
if self._prefixaf:
if not self._prefixcl:
if self._prefixaf.islower():
self._prefixcl = self._prefixaf.upper()
else:
self._prefixcl = self._prefixaf.lower()
elif self._prefixcl:
if self._prefixcl.islower():
self._prefixaf = self._prefixcl.upper()
else:
self._prefixaf = self._prefixcl.lower()
else:
self._prefixaf = "S"
self._prefixcl = "s"
self._ct0 = cartan_type.classical()
self._R0 = self._ct0.root_system()
self._I0 = self._ct0.index_set()
self._ct0v = self._ct0.dual()
self._R0v = self._ct0v.root_system()
self._a0check = self._cartan_type.acheck()[self._cartan_type.special_node()]
if self._cartan_type.is_untwisted_affine():
self._type = 'untwisted'
elif self._cartan_type.dual().is_untwisted_affine():
self._type = 'dual_untwisted'
elif self._a0check == 1:
# if there are three root lengths with the special affine node extra short
self._type | |
import sys
import os
import random
import math
import bpy
import numpy as np
from os import getenv
from os import remove
from os.path import join, dirname, realpath, exists
from mathutils import Matrix, Vector, Quaternion, Euler
from glob import glob
from random import choice
from pickle import load
from bpy_extras.object_utils import world_to_camera_view as world2cam
sys.path.insert(0, ".")
def mkdir_safe(directory):
try:
os.makedirs(directory)
except FileExistsError:
pass
def setState0():
for ob in bpy.data.objects.values():
ob.select=False
bpy.context.scene.objects.active = None
sorted_parts = ['hips','leftUpLeg','rightUpLeg','spine','leftLeg','rightLeg',
'spine1','leftFoot','rightFoot','spine2','leftToeBase','rightToeBase',
'neck','leftShoulder','rightShoulder','head','leftArm','rightArm',
'leftForeArm','rightForeArm','leftHand','rightHand','leftHandIndex1' ,'rightHandIndex1']
# order
part_match = {'root':'root', 'bone_00':'Pelvis', 'bone_01':'L_Hip', 'bone_02':'R_Hip',
'bone_03':'Spine1', 'bone_04':'L_Knee', 'bone_05':'R_Knee', 'bone_06':'Spine2',
'bone_07':'L_Ankle', 'bone_08':'R_Ankle', 'bone_09':'Spine3', 'bone_10':'L_Foot',
'bone_11':'R_Foot', 'bone_12':'Neck', 'bone_13':'L_Collar', 'bone_14':'R_Collar',
'bone_15':'Head', 'bone_16':'L_Shoulder', 'bone_17':'R_Shoulder', 'bone_18':'L_Elbow',
'bone_19':'R_Elbow', 'bone_20':'L_Wrist', 'bone_21':'R_Wrist', 'bone_22':'L_Hand', 'bone_23':'R_Hand'}
part2num = {part:(ipart+1) for ipart,part in enumerate(sorted_parts)}
# create one material per part as defined in a pickle with the segmentation
# this is useful to render the segmentation in a material pass
def create_segmentation(ob, params):
materials = {}
vgroups = {}
with open('pkl/segm_per_v_overlap.pkl', 'rb') as f:
vsegm = load(f)
bpy.ops.object.material_slot_remove()
parts = sorted(vsegm.keys())
for part in parts:
vs = vsegm[part]
vgroups[part] = ob.vertex_groups.new(part)
vgroups[part].add(vs, 1.0, 'ADD')
bpy.ops.object.vertex_group_set_active(group=part)
materials[part] = bpy.data.materials['Material'].copy()
materials[part].pass_index = part2num[part]
bpy.ops.object.material_slot_add()
ob.material_slots[-1].material = materials[part]
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.object.vertex_group_select()
bpy.ops.object.material_slot_assign()
bpy.ops.object.mode_set(mode='OBJECT')
return(materials)
# create the different passes that we render
def create_composite_nodes(tree, params, img=None, idx=0):
res_paths = {k:join(params['tmp_path'], '%05d_%s'%(idx, k)) for k in params['output_types'] if params['output_types'][k]}
# clear default nodes
for n in tree.nodes:
tree.nodes.remove(n)
# create node for foreground image
layers = tree.nodes.new('CompositorNodeRLayers')
layers.location = -300, 400
# create node for background image
bg_im = tree.nodes.new('CompositorNodeImage')
bg_im.location = -300, 30
if img is not None:
bg_im.image = img
if(params['output_types']['vblur']):
# create node for computing vector blur (approximate motion blur)
vblur = tree.nodes.new('CompositorNodeVecBlur')
vblur.factor = params['vblur_factor']
vblur.location = 240, 400
# create node for saving output of vector blurred image
vblur_out = tree.nodes.new('CompositorNodeOutputFile')
vblur_out.format.file_format = 'PNG'
vblur_out.base_path = res_paths['vblur']
vblur_out.location = 460, 460
# create node for mixing foreground and background images
mix = tree.nodes.new('CompositorNodeMixRGB')
mix.location = 40, 30
mix.use_alpha = True
# create node for the final output
composite_out = tree.nodes.new('CompositorNodeComposite')
composite_out.location = 240, 30
# create node for saving depth
if(params['output_types']['depth']):
depth_out = tree.nodes.new('CompositorNodeOutputFile')
depth_out.location = 40, 700
depth_out.format.file_format = 'OPEN_EXR'
depth_out.base_path = res_paths['depth']
# create node for saving normals
if(params['output_types']['normal']):
normal_out = tree.nodes.new('CompositorNodeOutputFile')
normal_out.location = 40, 600
normal_out.format.file_format = 'OPEN_EXR'
normal_out.base_path = res_paths['normal']
# create node for saving foreground image
if(params['output_types']['fg']):
fg_out = tree.nodes.new('CompositorNodeOutputFile')
fg_out.location = 170, 600
fg_out.format.file_format = 'PNG'
fg_out.base_path = res_paths['fg']
# create node for saving ground truth flow
if(params['output_types']['gtflow']):
gtflow_out = tree.nodes.new('CompositorNodeOutputFile')
gtflow_out.location = 40, 500
gtflow_out.format.file_format = 'OPEN_EXR'
gtflow_out.base_path = res_paths['gtflow']
# create node for saving segmentation
if(params['output_types']['segm']):
segm_out = tree.nodes.new('CompositorNodeOutputFile')
segm_out.location = 40, 400
segm_out.format.file_format = 'OPEN_EXR'
segm_out.base_path = res_paths['segm']
# merge fg and bg images
tree.links.new(bg_im.outputs[0], mix.inputs[1])
tree.links.new(layers.outputs['Image'], mix.inputs[2])
if(params['output_types']['vblur']):
tree.links.new(mix.outputs[0], vblur.inputs[0]) # apply vector blur on the bg+fg image,
tree.links.new(layers.outputs['Z'], vblur.inputs[1]) # using depth,
tree.links.new(layers.outputs['Speed'], vblur.inputs[2]) # and flow.
tree.links.new(vblur.outputs[0], vblur_out.inputs[0]) # save vblurred output
tree.links.new(mix.outputs[0], composite_out.inputs[0]) # bg+fg image
if(params['output_types']['fg']):
tree.links.new(layers.outputs['Image'], fg_out.inputs[0]) # save fg
if(params['output_types']['depth']):
tree.links.new(layers.outputs['Z'], depth_out.inputs[0]) # save depth
if(params['output_types']['normal']):
tree.links.new(layers.outputs['Normal'], normal_out.inputs[0]) # save normal
if(params['output_types']['gtflow']):
tree.links.new(layers.outputs['Speed'], gtflow_out.inputs[0]) # save ground truth flow
if(params['output_types']['segm']):
tree.links.new(layers.outputs['IndexMA'], segm_out.inputs[0]) # save segmentation
return(res_paths)
# creation of the spherical harmonics material, using an OSL script
def create_sh_material(tree, sh_path, img=None):
# clear default nodes
for n in tree.nodes:
tree.nodes.remove(n)
uv = tree.nodes.new('ShaderNodeTexCoord')
uv.location = -800, 400
uv_xform = tree.nodes.new('ShaderNodeVectorMath')
uv_xform.location = -600, 400
uv_xform.inputs[1].default_value = (0, 0, 1)
uv_xform.operation = 'AVERAGE'
uv_im = tree.nodes.new('ShaderNodeTexImage')
uv_im.location = -400, 400
if img is not None:
uv_im.image = img
rgb = tree.nodes.new('ShaderNodeRGB')
rgb.location = -400, 200
script = tree.nodes.new('ShaderNodeScript')
script.location = -230, 400
script.mode = 'EXTERNAL'
script.filepath = sh_path #'spher_harm/sh.osl' #using the same file from multiple jobs causes white texture
script.update()
# the emission node makes it independent of the scene lighting
emission = tree.nodes.new('ShaderNodeEmission')
emission.location = -60, 400
mat_out = tree.nodes.new('ShaderNodeOutputMaterial')
mat_out.location = 110, 400
tree.links.new(uv.outputs[2], uv_im.inputs[0])
tree.links.new(uv_im.outputs[0], script.inputs[0])
tree.links.new(script.outputs[0], emission.inputs[0])
tree.links.new(emission.outputs[0], mat_out.inputs[0])
# computes rotation matrix through Rodrigues formula as in cv2.Rodrigues
def Rodrigues(rotvec):
theta = np.linalg.norm(rotvec)
r = (rotvec/theta).reshape(3, 1) if theta > 0. else rotvec
cost = np.cos(theta)
mat = np.asarray([[0, -r[2], r[1]],
[r[2], 0, -r[0]],
[-r[1], r[0], 0]])
return(cost*np.eye(3) + (1-cost)*r.dot(r.T) + np.sin(theta)*mat)
def init_scene(scene, params, gender='female'):
# load fbx model
bpy.ops.import_scene.fbx(filepath=join(params['smpl_data_folder'], 'basicModel_%s_lbs_10_207_0_v1.0.2.fbx' % gender[0]),
axis_forward='Y', axis_up='Z', global_scale=100)
obname = '%s_avg' % gender[0]
ob = bpy.data.objects[obname]
ob.data.use_auto_smooth = False # autosmooth creates artifacts
# assign the existing spherical harmonics material
ob.active_material = bpy.data.materials['Material']
# delete the default cube (which held the material)
bpy.ops.object.select_all(action='DESELECT')
bpy.data.objects['Cube'].select = True
bpy.ops.object.delete(use_global=False)
# set camera properties and initial position
bpy.ops.object.select_all(action='DESELECT')
cam_ob = bpy.data.objects['Camera']
scn = bpy.context.scene
scn.objects.active = cam_ob
cam_ob.matrix_world = Matrix(((0., 0., 1, params['camera_distance']),
(1., 0., 0., 0.),
(0., 1., 0., 1.),
(0.0, 0.0, 0.0, 1.0)))
cam_ob.data.angle = math.radians(40)
cam_ob.data.lens = 60
cam_ob.data.clip_start = 0.1
cam_ob.data.sensor_width = 32
# setup an empty object in the center which will be the parent of the Camera
# this allows to easily rotate an object around the origin
scn.cycles.film_transparent = True
scn.render.layers["RenderLayer"].use_pass_vector = True
scn.render.layers["RenderLayer"].use_pass_normal = True
scene.render.layers['RenderLayer'].use_pass_emit = True
scene.render.layers['RenderLayer'].use_pass_emit = True
scene.render.layers['RenderLayer'].use_pass_material_index = True
# set render size
scn.render.resolution_x = params['resy']
scn.render.resolution_y = params['resx']
scn.render.resolution_percentage = 100
scn.render.image_settings.file_format = 'PNG'
# clear existing animation data
ob.data.shape_keys.animation_data_clear()
arm_ob = bpy.data.objects['Armature']
arm_ob.animation_data_clear()
return(ob, obname, arm_ob, cam_ob)
# transformation between pose and blendshapes
def rodrigues2bshapes(pose):
rod_rots = np.asarray(pose).reshape(24, 3)
mat_rots = [Rodrigues(rod_rot) for rod_rot in rod_rots]
bshapes = np.concatenate([(mat_rot - np.eye(3)).ravel()
for mat_rot in mat_rots[1:]])
return(mat_rots, bshapes)
# apply trans pose and shape to character
def apply_trans_pose_shape(trans, pose, shape, ob, arm_ob, obname, scene, cam_ob, frame=None):
# transform pose into rotation matrices (for pose) and pose blendshapes
mrots, bsh = rodrigues2bshapes(pose)
# set the location of the first bone to the translation parameter
arm_ob.pose.bones[obname+'_Pelvis'].location = trans
if frame is not None:
arm_ob.pose.bones[obname+'_root'].keyframe_insert('location', frame=frame)
# set the pose of each bone to the quaternion specified by pose
for ibone, mrot in enumerate(mrots):
bone = arm_ob.pose.bones[obname+'_'+part_match['bone_%02d' % ibone]]
bone.rotation_quaternion = Matrix(mrot).to_quaternion()
if frame is not None:
bone.keyframe_insert('rotation_quaternion', frame=frame)
bone.keyframe_insert('location', frame=frame)
# apply pose blendshapes
for ibshape, bshape in enumerate(bsh):
ob.data.shape_keys.key_blocks['Pose%03d' % ibshape].value = bshape
if frame is not None:
ob.data.shape_keys.key_blocks['Pose%03d' % ibshape].keyframe_insert('value', index=-1, frame=frame)
# apply shape blendshapes
for ibshape, shape_elem in enumerate(shape):
ob.data.shape_keys.key_blocks['Shape%03d' % ibshape].value = shape_elem
if frame is not None:
ob.data.shape_keys.key_blocks['Shape%03d' % ibshape].keyframe_insert('value', index=-1, frame=frame)
def get_bone_locs(obname, arm_ob, scene, cam_ob):
n_bones = 24
render_scale = scene.render.resolution_percentage / 100
render_size = (int(scene.render.resolution_x * render_scale),
int(scene.render.resolution_y * render_scale))
bone_locations_2d = np.empty((n_bones, 2))
bone_locations_3d = np.empty((n_bones, 3), dtype='float32')
# obtain the coordinates of each bone head in image space
for ibone in range(n_bones):
bone = arm_ob.pose.bones[obname+'_'+part_match['bone_%02d' % ibone]]
co_2d = world2cam(scene, cam_ob, arm_ob.matrix_world * bone.head)
co_3d = arm_ob.matrix_world * bone.head
bone_locations_3d[ibone] = (co_3d.x,
co_3d.y,
co_3d.z)
bone_locations_2d[ibone] = (round(co_2d.x * render_size[0]),
round(co_2d.y * render_size[1]))
return(bone_locations_2d, bone_locations_3d)
# reset the joint positions of the character according to its new shape
def reset_joint_positions(orig_trans, shape, ob, arm_ob, obname, scene, cam_ob, reg_ivs, joint_reg):
# since the regression is sparse, only the relevant vertex
# elements (joint_reg) and their indices (reg_ivs) are loaded
reg_vs = np.empty((len(reg_ivs), 3)) # empty array to hold vertices to regress from
# zero the pose and trans to obtain joint positions in zero pose
apply_trans_pose_shape(orig_trans, np.zeros(72), shape, ob, arm_ob, obname, scene, cam_ob)
# obtain a mesh after applying modifiers
bpy.ops.wm.memory_statistics()
# me holds the vertices after applying the shape blendshapes
me = ob.to_mesh(scene, True, 'PREVIEW')
# fill the regressor vertices matrix
for iiv, iv in enumerate(reg_ivs):
reg_vs[iiv] = me.vertices[iv].co
bpy.data.meshes.remove(me)
# regress joint positions in rest pose
joint_xyz = joint_reg.dot(reg_vs)
# adapt joint positions in rest pose
arm_ob.hide = False
bpy.ops.object.mode_set(mode='EDIT')
arm_ob.hide = True
for ibone in range(24):
bb = arm_ob.data.edit_bones[obname+'_'+part_match['bone_%02d' % ibone]]
bboffset = bb.tail - bb.head
bb.head = joint_xyz[ibone]
bb.tail = bb.head + bboffset
bpy.ops.object.mode_set(mode='OBJECT')
return(shape)
# load poses and shapes
def load_body_data(smpl_data, ob, obname, gender='female', idx=0):
# load MoSHed data from CMU Mocap (only the given idx is loaded)
# create a dictionary with key the sequence name and values the pose and trans
| |
of {discrepancy} positions between crystallographic orbits calculated by spglib and given CIF-entries. Wrong space group detected? Try to adjust symmetry tolerance! '
return [warningCache, pathToCif]
# allow corrections if occupancy options are enabled
if occupancy:
if '[' in pathToCif or verbose == False:
print('\n\n'+pathToCif)
occupancyDict = correctOccupancy(occupancyDict, iCrystallographicOrbits)
# determine number of atoms in primitive unit cell and thereby compose sum formula
# w/ occupancy (find gcd of crystal orbit muliplicities, consider occupancy)
wyckoffSum = 0.0
chemicalFormulaDict = {}
numbers = []
for i in range(0, equivalenceClassNumber):
numbers.append(iCrystallographicOrbits[i, 0])
divisor = gcd(numbers)
if divisor < 0:
divisor = 1
counter = 0
for x in occupancyDict:
multiplicity = iCrystallographicOrbits[counter, 0]
for element in occupancyDict[x]:
try:
chemicalFormulaDict[element] += occupancyDict[x][element] * multiplicity / divisor
except:
chemicalFormulaDict[element] = occupancyDict[x][element] * multiplicity / divisor
wyckoffSum += occupancyDict[x][element] * multiplicity
counter += 1
# sometimes gcd of multiplicities does not yield empirical formula (e.g. Cu2P6O18Li2 / MnN10C18H28)
# better safe than sorry: try to reduce formula a second time
# (multiplicity approach still implemented bc fractional occupancies often complicate computation of gcd)
numbers = []
for element in chemicalFormulaDict:
# suppose: a) lacking precision
if abs(chemicalFormulaDict[element] - round(chemicalFormulaDict[element])) < 0.1:
numbers.append(round(chemicalFormulaDict[element]))
# or b) more severe defects
else:
numbers.append(math.ceil(chemicalFormulaDict[element]))
if not numbers:
divisor = 1
else:
divisor = gcd(numbers)
if divisor < 0:
divisor = 1
# compose assumed chemical formula
chemical_formula = ''
for element in sorted(chemicalFormulaDict):
stoichiometry = chemicalFormulaDict[element] / divisor
if stoichiometry == 1:
stoichiometry = ''
elif stoichiometry % 1 == 0:
stoichiometry = str(int(stoichiometry))
else:
stoichiometry = str(stoichiometry)
chemical_formula = chemical_formula + element + stoichiometry
atomsPerPrimitiveUnitCell = wyckoffSum
atomsPerUnitCell = wyckoffSum * len(structure) / len(primitiveCrystallographicOrbits)
positionsPerPrimitiveUnitCell = 0 # sum over multiplicities of all crystallographic orbits
for x in range(0, equivalenceClassNumber):
positionsPerPrimitiveUnitCell += iCrystallographicOrbits[x,0]
aritySum = 0 # sum over arities of unique, occupied wyckoff positions (different crystallographic orbits with same wyckoff letter are NOT counted multiple times!)
for x in arityArray:
aritySum += x
# calculate information contents
I_comb = I_coor = I_conf = 0.0
uniqueSpecies = 0
if aritySum > 0:
# the coordinational sum is formed over unique wyckoff positions
for x in arityArray:
probability = x / aritySum
if probability > 0:
I_coor -= probability * math.log(probability, 2)
# the configurational sum over wyckoff positions and crystallographic orbits
probability = x / (aritySum + positionsPerPrimitiveUnitCell)
if probability > 0:
I_conf -= probability * math.log(probability, 2)
for x in range(0, equivalenceClassNumber):
# the combinatorial sum is formed over each element in a crystallographic orbit individually (in other words: over unique species)
# vacancies count as elements too -> probability according to positionsPerPrimitiveUnitCell
occupancySum = 0
multiplicity = iCrystallographicOrbits[x, 0]
for element in occupancyDict[x]:
occupancyValue = occupancyDict[x][element]
occupancySum += occupancyDict[x][element]
probability = multiplicity * occupancyValue / positionsPerPrimitiveUnitCell
if probability > 0:
I_comb -= probability * math.log(probability, 2)
uniqueSpecies += 1
elif verbose:
print(f'Probability <= 0 was skipped: {element} at pos. {x}')
else:
warningCache += f'Probability <= 0 was skipped: {element} at pos. {x} '
probability = multiplicity * occupancyValue / (aritySum + positionsPerPrimitiveUnitCell)
if probability > 0:
I_conf -= probability * math.log(probability, 2)
if occupancySum < 1:
probability = multiplicity * (1 - occupancySum) / positionsPerPrimitiveUnitCell
I_comb -= probability * math.log(probability, 2)
uniqueSpecies += 1
probability = multiplicity * (1 - occupancySum) / (aritySum + positionsPerPrimitiveUnitCell)
I_conf -= probability * math.log(probability, 2)
I_comb_tot = positionsPerPrimitiveUnitCell * I_comb
I_coor_tot = aritySum * I_coor
I_conf_tot = (aritySum + positionsPerPrimitiveUnitCell) * I_conf
# maximum combinatorial information content based on number of unique species which are defined by a combination of crystallographic orbit and element (vacancies obviously count too).
# otherwise: I_comb > I_comb_max for alloys (in general: cases w/ all occupancies < 1)
I_comb_max = math.log(uniqueSpecies, 2)
if aritySum > 0:
I_coor_max = math.log(aritySum, 2)
else:
I_coor_max = 0
I_conf_max = math.log(uniqueSpecies + aritySum, 2)
if I_comb_max != 0:
I_comb_norm = I_comb / I_comb_max
else:
I_comb_norm = 0
if I_coor_max != 0:
I_coor_norm = I_coor / I_coor_max
else:
I_coor_norm = 0
if I_conf_max != 0:
I_conf_norm = I_conf / I_conf_max
else:
I_conf_norm = 0
# correct cell volume to primitive cell volume
perVolume = atomsPerUnitCell / (atomsPerPrimitiveUnitCell * structure.cell.volume)
I_comb_density = perVolume * I_comb_tot
I_coor_density = perVolume * I_coor_tot
I_conf_density = perVolume * I_conf_tot
if entropy:
gasConstantR = mol * kB / (kJ / 1000)
conversionFactor = math.log(2, math.e)
# error for stirling-approximation of ln(N!) < 1% for N >= 90
if positionsPerPrimitiveUnitCell >= 90:
S_comb_max_molar = gasConstantR * positionsPerPrimitiveUnitCell * (math.log(positionsPerPrimitiveUnitCell, math.e) - 1)
else:
S_comb_max_molar = gasConstantR * math.log(math.factorial(positionsPerPrimitiveUnitCell), math.e)
if aritySum >= 90:
S_coor_max_molar = gasConstantR * aritySum * (math.log(aritySum, math.e) - 1)
else:
S_coor_max_molar = gasConstantR * math.log(math.factorial(aritySum), math.e)
if (positionsPerPrimitiveUnitCell + aritySum) >= 90:
S_conf_max_molar = gasConstantR * (positionsPerPrimitiveUnitCell + aritySum) * (math.log((positionsPerPrimitiveUnitCell + aritySum), math.e) - 1)
else:
S_conf_max_molar = gasConstantR * math.log(math.factorial(positionsPerPrimitiveUnitCell + aritySum), math.e)
Delta_S_comb_molar = gasConstantR * I_comb * conversionFactor
Delta_S_coor_molar = gasConstantR * I_coor * conversionFactor
Delta_S_conf_molar = gasConstantR * I_conf * conversionFactor
if verbose:
print(f'\n\n------------ {pathToCif} ------------')
print(f'assumed formula\t {chemical_formula}')
print(f'assumed SG\t {aSG}')
print(f'SG from CIF\t {SG}')
print(
'lattice [A] \t a: {:.2f}, b: {:.2f}, c: {:.2f}'.format(
structure.get_cell_lengths_and_angles()[0],
structure.get_cell_lengths_and_angles()[1],
structure.get_cell_lengths_and_angles()[2]
).replace('.', decimalSeparator)
)
print(
'angles [°] \t b,c: {:.2f}, a,c: {:.2f}, a,b: {:.2f}'.format(
structure.get_cell_lengths_and_angles()[3],
structure.get_cell_lengths_and_angles()[4],
structure.get_cell_lengths_and_angles()[5]
).replace('.', decimalSeparator)
)
print('---')
print('{:.6f} \t atoms / unit cell'.format(atomsPerUnitCell).replace('.', decimalSeparator))
print('{:.6f} \t atoms / reduced unit cell'.format(atomsPerPrimitiveUnitCell).replace('.', decimalSeparator))
print('{:.6f} \t positions / reduced unit cell'.format(positionsPerPrimitiveUnitCell).replace('.', decimalSeparator))
print('{:.6f} \t unique species'.format(uniqueSpecies).replace('.', decimalSeparator))
print('{:.6f} \t coordinational degrees of freedom'.format(aritySum).replace('.', decimalSeparator))
print('--- combinatorial (extended Krivovichev) ---')
print('{:.6f} \t I_comb \t\t [bit / position]'.format(I_comb).replace('.', decimalSeparator))
print('{:.6f} \t I_comb_max \t\t [bit / position]'.format(I_comb_max).replace('.', decimalSeparator))
print('{:.6f} \t I_comb_norm \t\t [-]'.format(I_comb_norm).replace('.', decimalSeparator))
print('{:.6f} \t I_comb_tot \t\t [bit / reduced unit cell]'.format(I_comb_tot).replace('.', decimalSeparator))
print('{:.6f} \t I_comb_dens \t\t [bit / A^3]'.format(I_comb_density).replace('.', decimalSeparator))
if entropy:
print('{:.6f} \t S_comb_max_molar \t [J / (mol * K)]'.format(S_comb_max_molar).replace('.', decimalSeparator))
print('{:.6f} \t Delta_S_comb_molar \t [J / (mol * K)]'.format(Delta_S_comb_molar).replace('.', decimalSeparator))
print('--- coordinational (Hornfeck) ---')
print('{:.6f} \t I_coor \t\t [bit / freedom]'.format(I_coor).replace('.', decimalSeparator))
print('{:.6f} \t I_coor_max \t\t [bit / freedom]'.format(I_coor_max).replace('.', decimalSeparator))
print('{:.6f} \t I_coor_norm \t\t [-]'.format(I_coor_norm).replace('.', decimalSeparator))
print('{:.6f} \t I_coor_tot \t\t [bit / reduced unit cell]'.format(I_coor_tot).replace('.', decimalSeparator))
print('{:.6f} \t I_coor_dens \t\t [bit / A^3]'.format(I_coor_density).replace('.', decimalSeparator))
if entropy:
print('{:.6f} \t S_coor_max_molar \t [J / (mol * K)]'.format(S_coor_max_molar).replace('.', decimalSeparator))
print('{:.6f} \t Delta_S_coor_molar \t [J / (mol * K)]'.format(Delta_S_coor_molar).replace('.', decimalSeparator))
print('--- configurational (extended Hornfeck) ---')
print('{:.6f} \t I_conf \t\t [bit / (position + freedom)]'.format(I_conf).replace('.', decimalSeparator))
print('{:.6f} \t I_conf_max \t\t [bit / (position + freedom)]'.format(I_conf_max).replace('.', decimalSeparator))
print('{:.6f} \t I_conf_norm \t\t [-]'.format(I_conf_norm).replace('.', decimalSeparator))
print('{:.6f} \t I_conf_tot \t\t [bit / reduced unit cell]'.format(I_conf_tot).replace('.', decimalSeparator))
print('{:.6f} \t I_conf_dens \t\t [bit / A^3]'.format(I_conf_density).replace('.', decimalSeparator))
if entropy:
print('{:.6f} \t S_conf_max_molar \t [J / (mol * K)]'.format(S_conf_max_molar).replace('.', decimalSeparator))
print('{:.6f} \t Delta_S_conf_molar \t [J / (mol * K)]'.format(Delta_S_conf_molar).replace('.', decimalSeparator))
return
elif entropy:
returnArray = [
warningCache,
pathToCif,
doi, journal, year,
chemical_formula,
aSG,
SG,
structure.get_cell_lengths_and_angles()[0],
structure.get_cell_lengths_and_angles()[1],
structure.get_cell_lengths_and_angles()[2],
structure.get_cell_lengths_and_angles()[3],
structure.get_cell_lengths_and_angles()[4],
structure.get_cell_lengths_and_angles()[5],
atomsPerUnitCell,
atomsPerPrimitiveUnitCell,
positionsPerPrimitiveUnitCell,
uniqueSpecies,
aritySum,
I_comb, I_comb_max, I_comb_norm, I_comb_tot, I_comb_density, S_comb_max_molar, Delta_S_comb_molar,
I_coor, I_coor_max, I_coor_norm, I_coor_tot, I_coor_density, S_coor_max_molar, Delta_S_coor_molar,
I_conf, I_conf_max, I_conf_norm, I_conf_tot, I_conf_density, S_conf_max_molar, Delta_S_conf_molar
]
else:
returnArray = [
warningCache,
pathToCif,
doi, journal, year,
chemical_formula,
aSG,
SG,
structure.get_cell_lengths_and_angles()[0],
structure.get_cell_lengths_and_angles()[1],
structure.get_cell_lengths_and_angles()[2],
structure.get_cell_lengths_and_angles()[3],
structure.get_cell_lengths_and_angles()[4],
structure.get_cell_lengths_and_angles()[5],
atomsPerUnitCell,
atomsPerPrimitiveUnitCell,
positionsPerPrimitiveUnitCell,
uniqueSpecies,
aritySum,
I_comb, I_comb_max, I_comb_norm, I_comb_tot, I_comb_density,
I_coor, I_coor_max, I_coor_norm, I_coor_tot, I_coor_density,
I_conf, I_conf_max, I_conf_norm, I_conf_tot, I_conf_density
]
return returnArray
def correctCoordinates(coordinateDescription, parameter, coordinate):
"""
extracts x/y/z parameter of a wyckoff position's individual coordinates. e.g. the z-coordinate of a wyckoff position 4c in SG 24 might be defined as (-z+1/2) = 0.3 --> returns (z) = 0.2
Parameters
arg1 (string) parametrized description of the coordinate e.g. '-z+1/2'
arg2 (string) 'x', 'y' or 'z' as parameter to isolate from arg1 (coordinateDescription) e.g. 'z'
arg3 (float) fractional coordinate on x/y/z axis e.g. 0.3
Returns
float fractional coordinate, corresponding to the isolated parameter | |
# /test/test_style_warnings.py
#
# Test cases for style/* checks
#
# See /LICENCE.md for Copyright information
"""Test cases for style/* checks."""
from test.warnings_test_common import DEFINITION_TYPES
from test.warnings_test_common import FUNCTIONS_SETTING_VARS
from test.warnings_test_common import LinterFailure
from test.warnings_test_common import format_with_args
from test.warnings_test_common import format_with_command
from test.warnings_test_common import gen_source_line
from test.warnings_test_common import replacement
from test.warnings_test_common import run_linter_throw
from nose_parameterized import param, parameterized
from testtools import ExpectedException
from testtools import TestCase
class TestSpaceBeforeFunctionCallWarnings(TestCase):
"""Test case for a single space between a function call and name."""
def test_lint_pass(self):
"""Check that style/space_before_func passes.
Test passes where there is a single space before a function name
and a call, like so:
function_name ()
"""
result = run_linter_throw("function_call ()\n",
whitelist=["style/space_before_func"])
self.assertTrue(result)
def test_lint_pass_comment(self):
"""Check that style/space_before_func passes for commented calls.
Test passes where there is no space before a function name
and a call, where that line is commented like so:
# function_name()
"""
result = run_linter_throw("# function_call()\n",
whitelist=["style/space_before_func"])
self.assertTrue(result)
def test_lint_pass_inside_quotes(self):
"""Check that style/space_before_func passes for quoted calls.
Test passes where there is no space before a function name
and a call, where that line is inside quotes
"function_name()"
"""
result = run_linter_throw("call (\"function_call()\")\n",
whitelist=["style/space_before_func"])
self.assertTrue(result)
def test_lint_fail_nospace(self): # suppress(no-self-use)
"""Check that style/space_before_func fails.
Test fails where there is no space between a function name and a
call, like so:
function_name()
"""
with ExpectedException(LinterFailure):
run_linter_throw("function_call()\n",
whitelist=["style/space_before_func"])
def test_lint_fail_excessive_space(self): # suppress(no-self-use)
"""Check that style/space_before_func fails.
Test fails where there is more than one space between a function name
and a call, like so
function_name ()
"""
with ExpectedException(LinterFailure):
run_linter_throw("function_call ()\n",
whitelist=["style/space_before_func"])
def test_replace_excess_one_space(self):
"""Check that the style/space_before_func replacement has one space."""
def get_replacement():
"""Get replacement for function call with excessive whitespace."""
run_linter_throw("function_call ()\n",
whitelist=["style/space_before_func"])
exception = self.assertRaises(LinterFailure, get_replacement)
self.assertEqual(replacement(exception),
(1, "function_call ()\n"))
def test_replace_nospace_one_space(self):
"""Check that the style/space_before_func replacement has one space."""
def get_replacement():
"""Get replacement for function call with no whitespace."""
run_linter_throw("function_call()\n",
whitelist=["style/space_before_func"])
exception = self.assertRaises(LinterFailure, get_replacement)
self.assertEqual(replacement(exception),
(1, "function_call ()\n"))
class TestFunctionsMustbeLowercaseOnly(TestCase):
"""Test case for functions and macros being lowercase."""
def test_pass_lowercase_call(self):
"""style/lowercase passes when calling lowercase func."""
result = run_linter_throw("lowercase_func (ARGUMENT)\n",
whitelist=["style/lowercase_func"])
self.assertTrue(result)
def test_fail_uppercase_call(self): # suppress(no-self-use)
"""style/lowercase fails when calling uppercase func."""
with ExpectedException(LinterFailure):
run_linter_throw("UPPERCASE_FUNC (ARGUMENT)\n",
whitelist=["style/lowercase_func"])
def test_replace_uppercase_call(self):
"""style/lowercase replaces uppercase call with lowercase call."""
func_name = "UPPERCASE_FUNC"
error_line = "{0} (ARGUMENT)\n".format(func_name)
replacement_line = "{0} (ARGUMENT)\n".format(func_name.lower())
def get_replacement():
"""Replacement for all uppercase function call."""
run_linter_throw(error_line,
whitelist=["style/lowercase_func"])
exception = self.assertRaises(LinterFailure, get_replacement)
self.assertEqual(replacement(exception),
(1, replacement_line))
def test_pass_lowercase_func_def(self):
"""style/lowercase passes when defining lowercase func."""
result = run_linter_throw("function (lowercase_func) endfunction ()\n",
whitelist=["style/lowercase_func"])
self.assertTrue(result)
def test_fail_uppercase_func_def(self): # suppress(no-self-use)
"""style/lowercase fails when defining uppercase func."""
with ExpectedException(LinterFailure):
run_linter_throw("function (UPPERCASE_FUNC) endfunction ()\n",
whitelist=["style/lowercase_func"])
def test_replace_uppercase_func_def(self):
"""style/lowercase replaces uppercase call with lowercase call."""
func_name = "UPPERCASE_FUNC"
lower_name = func_name.lower()
error = "function ({0}) endfunction ()\n".format(func_name)
expected_repl = "function ({0}) endfunction ()\n".format(lower_name)
def get_replacement():
"""Replace uppercase function call."""
run_linter_throw(error,
whitelist=["style/lowercase_func"])
exception = self.assertRaises(LinterFailure, get_replacement)
self.assertEqual(replacement(exception),
(1, expected_repl))
def test_pass_lowercase_macro_def(self):
"""style/lowercase passes when defining lowercase macro."""
result = run_linter_throw("macro (lowercase_macro) endmacro ()\n",
whitelist=["style/lowercase_func"])
self.assertTrue(result)
def test_fail_uppercase_macro(self): # suppress(no-self-use)
"""style/lowercase fails when defining uppercase macro."""
with ExpectedException(LinterFailure):
run_linter_throw("macro (UPPERCASE_MACRO) endmacro ()\n",
whitelist=["style/lowercase_func"])
def test_replace_uppercase_macro(self):
"""style/lowercase replaces uppercase definition with lowercase def."""
macro_name = "UPPERCASE_MACRO"
lower_name = macro_name.lower()
error = "macro ({0}) endmacro ()\n".format(macro_name)
expected_replacement = "macro ({0}) endmacro ()\n".format(lower_name)
def get_replacement():
"""Replacement for uppercase macro."""
run_linter_throw(error,
whitelist=["style/lowercase_func"])
exception = self.assertRaises(LinterFailure, get_replacement)
self.assertEqual(replacement(exception),
(1, expected_replacement))
class TestUppercaseDefinitionArguments(TestCase):
"""Check that all arguments to a definition are uppercase."""
@parameterized.expand(DEFINITION_TYPES)
def test_pass_no_args(self, defin):
"""Check style/uppercase_args passes where function has no args."""
script = "{0} (definition_name)\nend{0} ()\n".format(defin)
self.assertTrue(run_linter_throw(script,
whitelist=["style/uppercase_args"]))
@parameterized.expand(DEFINITION_TYPES)
def test_pass_uppercase_args(self, defin):
"""Check style/uppercase_args passes where args are uppercase."""
script = "{0} (definition_name UPPERCASE)\nend{0} ()\n".format(defin)
self.assertTrue(run_linter_throw(script,
whitelist=["style/uppercase_args"]))
@parameterized.expand(DEFINITION_TYPES)
def test_fail_lowercase_args(self, defin): # suppress(no-self-use)
"""Check style/uppercase_args passes where args are lowercase."""
script = "{0} (definition_name lowercase)\nend{0} ()\n".format(defin)
with ExpectedException(LinterFailure):
run_linter_throw(script, whitelist=["style/uppercase_args"])
@parameterized.expand(DEFINITION_TYPES)
def test_replace_with_upper(self, defin):
"""Check style/uppercase_args passes where args are lowercase."""
script = "{0} (name lowercase)\nend{0} ()\n".format(defin)
def get_replacement():
"""Replacement for lowercase argument."""
run_linter_throw(script, whitelist=["style/uppercase_args"])
exception = self.assertRaises(LinterFailure, get_replacement)
self.assertEqual(replacement(exception),
(1, "{0} (name LOWERCASE)\n".format(defin)))
_FORMAT_WITH_DEREFFED_VAR = format_with_command(lambda x: "${" + x + "}")
_FORMAT_WITH_LOWERCASE_VAR = format_with_command(lambda x: x.lower())
_FORMAT_WITH_OTHER_QUOTES = format_with_command(other_xform=lambda x: ("\"" +
x +
"\""))
_FORMAT_QUOTES_AND_LOWER = format_with_command(var_xform=lambda x: x.lower(),
other_xform=lambda x: ("\"" +
x +
"\""))
class TestUppercaseVariableNamesOnly(TestCase):
"""Test case for uppercase variable names only."""
parameters = [param(m) for m in FUNCTIONS_SETTING_VARS]
@parameterized.expand(parameters, testcase_func_doc=format_with_args(0))
def test_pass_no_var_set(self, matcher):
"""Check that style/set_var_case passes with {0.cmd}.
Where no variable is actually set, then there is no linter failure
"""
# This will trip up matchers that match other arguments
result = run_linter_throw("{0} ()\n".format(matcher.cmd),
whitelist=["style/set_var_case"])
self.assertTrue(result)
@parameterized.expand(parameters,
testcase_func_doc=format_with_command())
def test_pass_no_quotes(self, matcher):
"""Check that style/set_var_case passes with {}.
Variables set by another CMake command should only be uppercase
"""
result = run_linter_throw(gen_source_line(matcher),
whitelist=["style/set_var_case"])
self.assertTrue(result)
@parameterized.expand(parameters,
testcase_func_doc=_FORMAT_WITH_DEREFFED_VAR)
def test_pass_inside_deref(self, matcher):
"""Check that style/set_var_case passes when var in deref, like {}.
Pass if variable is uppercase and inside of a deref, because variable
dereferences are not sink variables.
"""
xform = lambda x: "${" + x + "}" # suppress(E731)
result = run_linter_throw(gen_source_line(matcher,
match_transform=xform),
whitelist=["style/set_var_case"])
self.assertTrue(result)
@parameterized.expand(parameters,
testcase_func_doc=_FORMAT_WITH_OTHER_QUOTES)
def test_pass_other_quotes(self, matcher):
"""Check that style/set_var_case pass with other args quoted in {}."""
quote = "\"{0}\""
xform = lambda x: quote.format(x) # suppress(unnecessary-lambda,E731)
line = gen_source_line(matcher,
other_transform=xform)
result = run_linter_throw(line,
whitelist=["style/set_var_case"])
self.assertTrue(result)
@parameterized.expand(parameters,
testcase_func_doc=_FORMAT_WITH_LOWERCASE_VAR)
def test_fail_no_quotes(self, matcher): # suppress(no-self-use)
"""Check that style/set_var_case fails with {}, because lowercase."""
line = gen_source_line(matcher,
match_transform=lambda x: x.lower())
with ExpectedException(LinterFailure):
run_linter_throw(line,
whitelist=["style/set_var_case"])
@parameterized.expand(parameters,
testcase_func_doc=_FORMAT_QUOTES_AND_LOWER)
def test_fail_other_quotes(self, matcher): # suppress(no-self-use)
"""Check that style/set_var_case fails with other args quoted in {}."""
quote = "\"{0}\""
xform = lambda x: quote.format(x) # suppress(unnecessary-lambda,E731)
line = gen_source_line(matcher,
match_transform=lambda x: x.lower(),
other_transform=xform)
with ExpectedException(LinterFailure):
run_linter_throw(line,
whitelist=["style/set_var_case"])
@parameterized.expand(parameters,
testcase_func_doc=_FORMAT_WITH_LOWERCASE_VAR)
def test_replace_no_quotes(self, matcher):
"""Check that style/set_var_case replaces {} with uppercase var.
Replacement should have uppercase matched argument
"""
correct = gen_source_line(matcher)
incorrect = gen_source_line(matcher,
match_transform=lambda x: x.lower())
def get_replacement():
"""Replacement for lowercase variable."""
run_linter_throw(incorrect,
whitelist=["style/set_var_case"])
exception = self.assertRaises(LinterFailure, get_replacement)
self.assertEqual(replacement(exception),
(1, correct))
class TestFunctionArgumentsFallOnLine(TestCase):
"""Test alignment of function arguments."""
def test_pass_args_on_same_line(self):
"""style/argument_align passes when args on same line."""
self.assertTrue(run_linter_throw("call ($[ONE} TWO THREE \"FOUR\")\n",
whitelist=["style/argument_align"]))
def test_fail_args_unevenly_spaced(self): # suppress(no-self-use)
"""style/argument_align fails if args on same line spaced unevenly."""
with ExpectedException(LinterFailure):
run_linter_throw("call (ONE TWO)\n",
whitelist=["style/argument_align"])
def test_suggest_even_spacing(self):
"""style/argument_align suggests even spacing on the same line."""
def get_replacement():
"""Get replacement for unevenly spaced lines."""
run_linter_throw("call (ONE TWO)\n",
whitelist=["style/argument_align"])
exception = self.assertRaises(LinterFailure, get_replacement)
self.assertEqual(replacement(exception),
(1, "call (ONE TWO)\n"))
def test_fail_args_not_aligned(self): # suppress(no-self-use)
"""style/argument_align fails when args do not fall on baseline col."""
with ExpectedException(LinterFailure):
run_linter_throw("call (ONE\nTWO)\n",
whitelist=["style/argument_align"])
def test_fail_args_dispersed(self): # suppress(no-self-use)
"""style/argument_align fails if args on same line spaced unevenly."""
with ExpectedException(LinterFailure):
run_linter_throw("call (ONE\n"
" ${TWO} \"THREE\"\n"
" FOUR)\n",
whitelist=["style/argument_align"])
def test_fail_bad_kw_align(self): # suppress(no-self-use)
"""style/argument_align fails if args on same line spaced unevenly."""
with ExpectedException(LinterFailure):
run_linter_throw("call (ONE\n"
" TWO THREE\n"
" FOUR)\n",
whitelist=["style/argument_align"])
def test_fail_inconsistent_align(self): # suppress(no-self-use)
"""style/argument_align fails when args not aligned after first."""
with ExpectedException(LinterFailure):
run_linter_throw("call (${ONE} TWO\n"
" THREE)\n",
whitelist=["style/argument_align"])
# Over and under-indent
@parameterized.expand([
" THREE)\n",
" THREE)\n"
])
def test_suggest_baseline_align(self, third_line):
"""style/argument_align suggests alignment to the baseline."""
def get_replacement():
"""Get replacement for unevenly spaced lines."""
run_linter_throw("call (ONE\n"
" TWO\n" +
third_line,
whitelist=["style/argument_align"])
exception = self.assertRaises(LinterFailure, get_replacement)
self.assertEqual(replacement(exception),
# eg call (ONE
(3, (" THREE)\n")))
def test_fail_align_func_name(self): # suppress(no-self-use)
"""style/argument_align fails when args not aligned after second."""
with ExpectedException(LinterFailure):
run_linter_throw("function (ONE TWO\n"
" THREE)\n"
"endfunction ()\n",
whitelist=["style/argument_align"])
def test_fail_align_macro_name(self): # suppress(no-self-use)
"""style/argument_align fails when args not aligned after second."""
with ExpectedException(LinterFailure):
run_linter_throw("macro (name TWO\n"
" THREE)\n"
"endmacro ()\n",
whitelist=["style/argument_align"])
def test_suggest_align_first_arg(self):
"""style/argument_align suggests alignment to function's first arg."""
def get_replacement():
"""Get replacement for unevenly spaced lines."""
run_linter_throw("function (name ONE\n"
" TWO)\n"
"endfunction ()\n",
whitelist=["style/argument_align"])
exception = self.assertRaises(LinterFailure, get_replacement)
self.assertEqual(replacement(exception),
# eg, function (name ONE
(2, (" TWO)\n")))
def test_pass_args_aligend(self):
"""style/argument_align passes when args aligned."""
self.assertTrue(run_linter_throw("call (ONE\n"
" TWO)\n",
whitelist=["style/argument_align"]))
def test_pass_align_after(self):
"""style/argument_align passes when args aligned after first."""
self.assertTrue(run_linter_throw("call (ONE TWO\n"
" THREE)\n",
whitelist=["style/argument_align"]))
def test_pass_args_after_keyword(self):
"""style/argument_align passes with args after keyword arg."""
self.assertTrue(run_linter_throw("call (ONE\n"
" KEYWORD TWO\n"
" KEYWORD THREE)\n",
whitelist=["style/argument_align"]))
def test_pass_align_after_keyword(self):
"""style/argument_align passes with args after keyword arg."""
self.assertTrue(run_linter_throw("call (ONE\n"
" KEYWORD TWO\n"
" THREE)\n",
whitelist=["style/argument_align"]))
nonvariable_keywords = [
"${KEYWORD}",
| |
'CommonGameTag' = 960
SITUATION_PLAYER_FACING_CAN_HOST: 'CommonGameTag' = 1643
SITUATION_PLAYER_VISITING_NPC: 'CommonGameTag' = 1493
SITUATION_POSSESSED: 'CommonGameTag' = 47124
SITUATION_PROMO_NIGHT: 'CommonGameTag' = 24594
SITUATION_REAPER: 'CommonGameTag' = 959
SITUATION_REPAIRMAN: 'CommonGameTag' = 2153
SITUATION_RESTAURANT_DINING: 'CommonGameTag' = 2146
SITUATION_RETAIL_CUSTOMER: 'CommonGameTag' = 12323
SITUATION_RETAIL_EMPLOYEE: 'CommonGameTag' = 12324
SITUATION_RING_DOORBELL: 'CommonGameTag' = 684
SITUATION_ROOMMATE_NPC_POTENTIAL: 'CommonGameTag' = 65572
SITUATION_SECRET_SOCIETY: 'CommonGameTag' = 65570
SITUATION_SPOOKY_PARTY: 'CommonGameTag' = 22541
SITUATION_SQUAD: 'CommonGameTag' = 61634
SITUATION_SUN_RAY: 'CommonGameTag' = 67647
SITUATION_TRAGIC_CLOWN: 'CommonGameTag' = 1504
SITUATION_TUTORIAL_FTUE: 'CommonGameTag' = 2167
SITUATION_UMBRELLA_USER: 'CommonGameTag' = 2119
SITUATION_UNIVERSITY_HOUSING_KICK_OUT_BLOCKER: 'CommonGameTag' = 65571
SITUATION_UNIVERSITY_RIVALS_PRANK: 'CommonGameTag' = 65606
SITUATION_VENUE_KARAOKE_DUETERS: 'CommonGameTag' = 55391
SITUATION_VET_PLAYER_PET_OWNER: 'CommonGameTag' = 57414
SITUATION_VET_SICK_PET: 'CommonGameTag' = 57402
SITUATION_VIP_ROPE_BOUNCER: 'CommonGameTag' = 61613
SITUATION_VISITOR_NPC_ANGRY_SIM: 'CommonGameTag' = 67606
SITUATION_VISITOR_NPCS: 'CommonGameTag' = 2282
SITUATION_WAIT_IN_LINE_TOGETHER: 'CommonGameTag' = 2496
SITUATION_WALKBY_FIRST_ORDER_OFFICER_SPY: 'CommonGameTag' = 51226
SITUATION_WEATHER_RAIN_HEAVY: 'CommonGameTag' = 2078
SITUATION_WEATHER_RAIN_LIGHT: 'CommonGameTag' = 2079
SITUATION_WEATHER_RAIN_STORM: 'CommonGameTag' = 2077
SITUATION_WEATHER_SNOW_HEAVY: 'CommonGameTag' = 2080
SITUATION_WEATHER_SNOW_STORM: 'CommonGameTag' = 2081
SITUATION_WEIRDO: 'CommonGameTag' = 55309
SITUATION_WELCOME_WAGON: 'CommonGameTag' = 1457
SITUATION_YOGA_CLASS: 'CommonGameTag' = 18462
SKILL_ALL: 'CommonGameTag' = 448
SKILL_ALL_VISIBLE: 'CommonGameTag' = 2097
SKILL_ARCHAEOLOGY: 'CommonGameTag' = 45094
SKILL_ATHLETIC: 'CommonGameTag' = 86
SKILL_BARTENDING: 'CommonGameTag' = 137
SKILL_CHARISMA: 'CommonGameTag' = 676
SKILL_CHILD: 'CommonGameTag' = 641
SKILL_CLIMBING_SKIING_SNOWBOARDING: 'CommonGameTag' = 69698
SKILL_COMEDY_OR_MISCHIEF: 'CommonGameTag' = 1576
SKILL_COOKING: 'CommonGameTag' = 87
SKILL_CREATIVE: 'CommonGameTag' = 336
SKILL_DOG_TRAINING: 'CommonGameTag' = 57367
SKILL_FITNESS_OR_PROGRAMMING: 'CommonGameTag' = 652
SKILL_FLOWER_ARRANGING: 'CommonGameTag' = 59451
SKILL_GARDENING: 'CommonGameTag' = 1605
SKILL_GUITAR_OR_COMEDY: 'CommonGameTag' = 935
SKILL_HANDINESS: 'CommonGameTag' = 1368
SKILL_JUICE_FIZZING: 'CommonGameTag' = 67620
SKILL_KNITTING: 'CommonGameTag' = 83969
SKILL_LOCAL_CULTURE: 'CommonGameTag' = 45070
SKILL_LOGIC: 'CommonGameTag' = 677
SKILL_MENTAL: 'CommonGameTag' = 337
SKILL_MUSIC_OR_COMEDY: 'CommonGameTag' = 55305
SKILL_MUSICAL: 'CommonGameTag' = 445
SKILL_PAINTING: 'CommonGameTag' = 1607
SKILL_PERFORMANCE: 'CommonGameTag' = 1630
SKILL_PHOTOGRAPHY: 'CommonGameTag' = 1940
SKILL_PHOTOGRAPHY_BG: 'CommonGameTag' = 1609
SKILL_PHYSICAL: 'CommonGameTag' = 338
SKILL_PIPE_ORGAN: 'CommonGameTag' = 40969
SKILL_PROGRAMMING: 'CommonGameTag' = 1606
SKILL_PSYCHIC: 'CommonGameTag' = 8194
SKILL_ROCK_CLIMBING: 'CommonGameTag' = 69697
SKILL_ROCKET_SCIENCE: 'CommonGameTag' = 678
SKILL_SCHOOL_TASK: 'CommonGameTag' = 1653
SKILL_SINGING: 'CommonGameTag' = 1633
SKILL_SKATING: 'CommonGameTag' = 59393
SKILL_SKIING: 'CommonGameTag' = 69637
SKILL_SNOWBOARDING: 'CommonGameTag' = 69696
SKILL_SOCIAL: 'CommonGameTag' = 339
SKILL_TODDLER: 'CommonGameTag' = 1655
SKILL_VIDEO_GAMING: 'CommonGameTag' = 675
SKILL_VIOLIN_OR_GUITAR: 'CommonGameTag' = 936
SKILL_WELLNESS: 'CommonGameTag' = 18466
SKILL_WELLNESS_BG: 'CommonGameTag' = 1608
SKILL_WRITING: 'CommonGameTag' = 679
SKIN_HUE_BLUE: 'CommonGameTag' = 12382
SKIN_HUE_BLUE_SKIN: 'CommonGameTag' = 1449
SKIN_HUE_GREEN: 'CommonGameTag' = 12389
SKIN_HUE_GREEN_SKIN: 'CommonGameTag' = 1450
SKIN_HUE_OLIVE: 'CommonGameTag' = 763
SKIN_HUE_PURPLE: 'CommonGameTag' = 12390
SKIN_HUE_RED: 'CommonGameTag' = 761
SKIN_HUE_RED_SKIN: 'CommonGameTag' = 1625
SKIN_HUE_YELLOW: 'CommonGameTag' = 762
SKINTONE_BLEND_YES: 'CommonGameTag' = 1458
SKINTONE_TYPE_FANTASY: 'CommonGameTag' = 12317
SKINTONE_TYPE_NATURAL: 'CommonGameTag' = 12316
SKINTONE_TYPE_SICKNESS_1: 'CommonGameTag' = 12320
SKINTONE_TYPE_SICKNESS_2: 'CommonGameTag' = 12321
SKINTONE_TYPE_SICKNESS_3: 'CommonGameTag' = 12322
SKINTONE_TYPE_SICKNESS_GREEN: 'CommonGameTag' = 12325
SOCIAL_BLACK_AND_WHITE: 'CommonGameTag' = 686
SOCIAL_COSTUME_PARTY: 'CommonGameTag' = 687
SOCIAL_FLIRTY: 'CommonGameTag' = 340
SOCIAL_WEENIE_ROAST: 'CommonGameTag' = 10244
SOCIAL_WOOHOO: 'CommonGameTag' = 364
SP03_PLEASE_REUSE_ME_I_WAS_BLANK_ON_ACCIDENT: 'CommonGameTag' = 20487
SP03_PLEASE_REUSE_ME_I_WAS_BLANK_ON_ACCIDENT_2: 'CommonGameTag' = 20488
SPAWN_ARRIVAL: 'CommonGameTag' = 397
SPAWN_ARTS_PARK: 'CommonGameTag' = 65622
SPAWN_ARTS_QUAD: 'CommonGameTag' = 65619
SPAWN_ARTS_UNIVERSITY_SHELL: 'CommonGameTag' = 65546
SPAWN_ARTS_UNIVERSITY_SHELL_SHELL_1: 'CommonGameTag' = 65556
SPAWN_ARTS_UNIVERSITY_SHELL_SHELL_2: 'CommonGameTag' = 65557
SPAWN_BATTLE_HELPER: 'CommonGameTag' = 47133
SPAWN_BATUU_DWELLING: 'CommonGameTag' = 51216
SPAWN_BATUU_FIRST_ORDER_PATROL: 'CommonGameTag' = 51227
SPAWN_BATUU_LT_AGNON: 'CommonGameTag' = 51218
SPAWN_BATUU_RESISTANCE_PATROL_1: 'CommonGameTag' = 51228
SPAWN_BATUU_RESISTANCE_PATROL_2: 'CommonGameTag' = 51229
SPAWN_BATUU_VI_MORADI: 'CommonGameTag' = 51217
SPAWN_FIREPLACE: 'CommonGameTag' = 2057
SPAWN_GENERIC_01: 'CommonGameTag' = 2465
SPAWN_GENERIC_02: 'CommonGameTag' = 2466
SPAWN_GENERIC_03: 'CommonGameTag' = 2467
SPAWN_GENERIC_04: 'CommonGameTag' = 2468
SPAWN_GENERIC_05: 'CommonGameTag' = 2469
SPAWN_GRIM_REAPER: 'CommonGameTag' = 987
SPAWN_LIGHTHOUSE: 'CommonGameTag' = 57409
SPAWN_LIGHTHOUSE_ARRIVAL: 'CommonGameTag' = 1935
SPAWN_MAGIC_PORTAL: 'CommonGameTag' = 2223
SPAWN_MAGIC_PORTAL_MARKET: 'CommonGameTag' = 49182
SPAWN_MARKET_STALL_MAGIC_BROOM: 'CommonGameTag' = 49166
SPAWN_MARKET_STALL_MAGIC_POTION: 'CommonGameTag' = 49171
SPAWN_MARKET_STALL_MAGIC_WAND: 'CommonGameTag' = 49172
SPAWN_NIGHT_STALKER: 'CommonGameTag' = 49158
SPAWN_PET_CRATE: 'CommonGameTag' = 57387
SPAWN_REAR_WALKBY: 'CommonGameTag' = 400
SPAWN_SCIENCE_QUAD: 'CommonGameTag' = 65620
SPAWN_SCIENCE_UNIVERSITY_SHELL: 'CommonGameTag' = 65547
SPAWN_SCIENCE_UNIVERSITY_SHELL_SHELL_1: 'CommonGameTag' = 65558
SPAWN_SCIENCE_UNIVERSITY_SHELL_SHELL_2: 'CommonGameTag' = 65559
SPAWN_SEANCE: 'CommonGameTag' = 86021
SPAWN_SECRET_SOCIETY: 'CommonGameTag' = 65621
SPAWN_SHELL_ARRIVAL: 'CommonGameTag' = 1933
SPAWN_SKELETON_ARRIVAL: 'CommonGameTag' = 2039
SPAWN_SNOW_SPORTS_SLOPE_BUNNY_SLOPE: 'CommonGameTag' = 69740
SPAWN_STARSHIP: 'CommonGameTag' = 51215
SPAWN_VISITOR_ARRIVAL: 'CommonGameTag' = 399
SPAWN_WALKBY: 'CommonGameTag' = 398
SPAWN_WALKBY_SPORTS_SHELL_EP08: 'CommonGameTag' = 2234
SPAWN_ZOMBIE: 'CommonGameTag' = 47132
SPECIAL_NUDE: 'CommonGameTag' = 127
SPELL_MAGIC: 'CommonGameTag' = 49170
STYLE_ARTS_QUARTER: 'CommonGameTag' = 55330
STYLE_BOHEMIAN: 'CommonGameTag' = 1495
STYLE_BUSINESS: 'CommonGameTag' = 1593
STYLE_CAS_BRANDED_MAC: 'CommonGameTag' = 2433
STYLE_CLASSICS: 'CommonGameTag' = 239
STYLE_COUNTRY: 'CommonGameTag' = 985
STYLE_FASHION_DISTRICT: 'CommonGameTag' = 55331
STYLE_FESTIVAL_BLOSSOM: 'CommonGameTag' = 55348
STYLE_FESTIVAL_DARK: 'CommonGameTag' = 1623
STYLE_FESTIVAL_FOOD: 'CommonGameTag' = 1624
STYLE_FESTIVAL_LIGHT: 'CommonGameTag' = 1622
STYLE_FESTIVAL_NERD: 'CommonGameTag' = 1621
STYLE_FESTIVAL_ROMANCE: 'CommonGameTag' = 1620
STYLE_FORMAL_MODERN: 'CommonGameTag' = 248
STYLE_FORMAL_TRENDY: 'CommonGameTag' = 249
STYLE_FRANKENSTEIN: 'CommonGameTag' = 8197
STYLE_GEN_CITY_SLEEK: 'CommonGameTag' = 238
STYLE_GEN_CONTEMPORARY_BASIC: 'CommonGameTag' = 240
STYLE_GEN_CONTEMPORARY_DESIGNER: 'CommonGameTag' = 241
STYLE_GEN_OUTDOOR_EXPLORER: 'CommonGameTag' = 243
STYLE_GEN_PARTY_TRENDY: 'CommonGameTag' = 244
STYLE_GEN_POLISHED: 'CommonGameTag' = 245
STYLE_GEN_PREPPY: 'CommonGameTag' = 246
STYLE_GEN_ROMANTIC: 'CommonGameTag' = 247
STYLE_GEN_SUMMER: 'CommonGameTag' = 237
STYLE_GLAMPING: 'CommonGameTag' = 10265
STYLE_GOTH_ROCK_PUNK: 'CommonGameTag' = 289
STYLE_HIPSTER: 'CommonGameTag' = 986
STYLE_ISLAND_ELEMENTAL: 'CommonGameTag' = 63517
STYLE_ISLANDER: 'CommonGameTag' = 63495
STYLE_JAPANESE_CONTEMPORARY: 'CommonGameTag' = 69693
STYLE_JUNGLE: 'CommonGameTag' = 2036
STYLE_PIRATE: 'CommonGameTag' = 8196
STYLE_PROFESSOR_NPC_GOOD: 'CommonGameTag' = 65597
STYLE_PROFESSOR_NPC_GRUMPY: 'CommonGameTag' = 65596
STYLE_PROFESSOR_NPC_HIP: 'CommonGameTag' = 65595
STYLE_PROFESSOR_NPC_SMART: 'CommonGameTag' = 65598
STYLE_SEASONAL_FALL: 'CommonGameTag' = 2066
STYLE_SEASONAL_SPRING: 'CommonGameTag' = 2067
STYLE_SEASONAL_SUMMER: 'CommonGameTag' = 2068
STYLE_SEASONAL_WINTER: 'CommonGameTag' = 2065
STYLE_SPICE_MARKET: 'CommonGameTag' = 55332
STYLE_STREET: 'CommonGameTag' = 1592
STYLE_VAMPIRE_ARCHETYPE_DRACULA: 'CommonGameTag' = 1681
STYLE_VAMPIRE_ARCHETYPE_MODERN: 'CommonGameTag' = 1682
STYLE_VAMPIRE_ARCHETYPE_NOSFERATU: 'CommonGameTag' = 1680
STYLE_VAMPIRE_ARCHETYPE_PUNK: 'CommonGameTag' = 1684
STYLE_VAMPIRE_ARCHETYPE_VICTORIAN: 'CommonGameTag' = 1683
STYLE_VAMPIRE_WALKBY_MODERN: 'CommonGameTag' = 40966
STYLE_VAMPIRE_WALKBY_NOSFERATU: 'CommonGameTag' = 40964
STYLE_VAMPIRE_WALKBY_PUNK: 'CommonGameTag' = 40968
STYLE_VAMPIRE_WALKBY_VICTORIAN: 'CommonGameTag' = 40967
STYLE_WITCH: 'CommonGameTag' = 8195
TAIL_LONG: 'CommonGameTag' = 57350
TAIL_RING: 'CommonGameTag' = 57351
TAIL_SABER: 'CommonGameTag' = 57354
TAIL_SCREW: 'CommonGameTag' = 57352
TAIL_STUB: 'CommonGameTag' = 57353
TERRAIN_MANIP_ALL: 'CommonGameTag' = 2169
TERRAIN_PAINT_ALL: 'CommonGameTag' = 1082
TERRAIN_PAINT_DIRT: 'CommonGameTag' = 872
TERRAIN_PAINT_GRASS: 'CommonGameTag' = 873
TERRAIN_PAINT_MISC: 'CommonGameTag' = 875
TERRAIN_PAINT_STONE: 'CommonGameTag' = 874
TOOLTIP_AMBIENCE_ANGRY: 'CommonGameTag' = 732
TOOLTIP_AMBIENCE_BORED: 'CommonGameTag' = 733
TOOLTIP_AMBIENCE_CONFIDENT: 'CommonGameTag' = 734
TOOLTIP_AMBIENCE_EMBARRASSED: 'CommonGameTag' = 735
TOOLTIP_AMBIENCE_ENERGIZED: 'CommonGameTag' = 736
TOOLTIP_AMBIENCE_FLIRTY: 'CommonGameTag' = 737
TOOLTIP_AMBIENCE_FOCUSED: 'CommonGameTag' = 738
TOOLTIP_AMBIENCE_HAPPY: 'CommonGameTag' = 739
TOOLTIP_AMBIENCE_IMAGINATIVE: 'CommonGameTag' = 740
TOOLTIP_AMBIENCE_PLAYFUL: 'CommonGameTag' = 741
TOOLTIP_AMBIENCE_SAD: 'CommonGameTag' = 742
TOOLTIP_AMBIENCE_TENSE: 'CommonGameTag' = 743
TOOLTIP_BILLS_DECREASE: 'CommonGameTag' = 2396
TOOLTIP_BILLS_INCREASE: 'CommonGameTag' = 2395
TOOLTIP_COLUMN_HEIGHT_RESTRICTED: 'CommonGameTag' = 2238
TOOLTIP_CRAFTING_QUALITY_CARPENTRY: 'CommonGameTag' = 706
TOOLTIP_CRAFTING_QUALITY_COOKING: 'CommonGameTag' = 703
TOOLTIP_CRAFTING_QUALITY_DRINKS: 'CommonGameTag' = 704
TOOLTIP_CRAFTING_QUALITY_PAINTING: 'CommonGameTag' = 705
TOOLTIP_ECO_FOOTPRINT_NEGATIVE: 'CommonGameTag' = 67624
TOOLTIP_ECO_FOOTPRINT_POSITIVE: 'CommonGameTag' = 67623
TOOLTIP_ENVIRONMENT_SCORE_NEGATIVE: 'CommonGameTag' = 2389
TOOLTIP_ENVIRONMENT_SCORE_POSITIVE: 'CommonGameTag' = 2390
TOOLTIP_EP09_ECO_FOOTPRINT_NEGATIVE: 'CommonGameTag' = 2422
TOOLTIP_EP09_ECO_FOOTPRINT_POSITIVE: 'CommonGameTag' = 2421
TOOLTIP_HIGH_FIRE_RESISTANCE: 'CommonGameTag' = 2392
TOOLTIP_HIGH_WATER_RESISTANCE: 'CommonGameTag' = 2394
TOOLTIP_LOW_FIRE_RESISTANCE: 'CommonGameTag' = 2391
TOOLTIP_LOW_WATER_RESISTANCE: 'CommonGameTag' = 2393
TOOLTIP_MISC_CATS_ONLY: 'CommonGameTag' = 2027
TOOLTIP_MISC_CHILDREN_ONLY: 'CommonGameTag' = 783
TOOLTIP_MISC_COMFORT: 'CommonGameTag' = 784
TOOLTIP_MISC_DOGS_ONLY: 'CommonGameTag' = 2026
TOOLTIP_MISC_PETS_ONLY: 'CommonGameTag' = 2025
TOOLTIP_MISC_RELIABILITY: 'CommonGameTag' = 907
TOOLTIP_MISC_TODDLER_ONLY: 'CommonGameTag' = 1667
TOOLTIP_MISC_UNBREAKABLE: 'CommonGameTag' = 731
TOOLTIP_MISC_UNCOMFORTABLE: 'CommonGameTag' = 747
TOOLTIP_MISC_UNCOMFORTABLE_FOR_ADULTS: 'CommonGameTag' = 940
TOOLTIP_MOOD_RELIEF_ANGRY: 'CommonGameTag' = 710
TOOLTIP_MOOD_RELIEF_BORED: 'CommonGameTag' = 711
TOOLTIP_MOOD_RELIEF_EMBARRASSED: 'CommonGameTag' = 712
TOOLTIP_MOOD_RELIEF_SAD: 'CommonGameTag' = 709
TOOLTIP_MOOD_RELIEF_STRESS: 'CommonGameTag' = 707
TOOLTIP_MOOD_RELIEF_UNCOMFORTABLE: 'CommonGameTag' = 708
TOOLTIP_MOTIVE_BLADDER: 'CommonGameTag' = 701
TOOLTIP_MOTIVE_ENERGY: 'CommonGameTag' = 698
TOOLTIP_MOTIVE_FUN: 'CommonGameTag' = 699
TOOLTIP_MOTIVE_HUNGER: 'CommonGameTag' = 702
TOOLTIP_MOTIVE_HYGIENE: 'CommonGameTag' = 697
TOOLTIP_MOTIVE_SOCIAL: 'CommonGameTag' = 700
TOOLTIP_OFF_THE_GRID: 'CommonGameTag' = 2207
TOOLTIP_POWER_CONSUMER: 'CommonGameTag' = 2398
TOOLTIP_POWER_PRODUCER: 'CommonGameTag' = 2397
TOOLTIP_SKILL_ACTING: 'CommonGameTag' = 61637
TOOLTIP_SKILL_ARCHAEOLOGY: 'CommonGameTag' = 45110
TOOLTIP_SKILL_BARTENDING: 'CommonGameTag' = 717
TOOLTIP_SKILL_CHARISMA: 'CommonGameTag' = 729
TOOLTIP_SKILL_COMEDY: 'CommonGameTag' = 726
TOOLTIP_SKILL_COMMUNICATION: 'CommonGameTag' = 1670
TOOLTIP_SKILL_COOKING: 'CommonGameTag' = 713
TOOLTIP_SKILL_CREATIVITY: 'CommonGameTag' = 927
TOOLTIP_SKILL_DANCE: 'CommonGameTag' = 24615
TOOLTIP_SKILL_DJ: 'CommonGameTag' = 24614
TOOLTIP_SKILL_DOG_TRAINING: 'CommonGameTag' = 2023
TOOLTIP_SKILL_FITNESS: 'CommonGameTag' = 716
TOOLTIP_SKILL_FLOWER_ARRANGING: 'CommonGameTag' = 2115
TOOLTIP_SKILL_GARDENING: 'CommonGameTag' = 728
TOOLTIP_SKILL_GUITAR: 'CommonGameTag' = 727
TOOLTIP_SKILL_HANDINESS: 'CommonGameTag' = 719
TOOLTIP_SKILL_IMAGINATION: 'CommonGameTag' = 1669
TOOLTIP_SKILL_LOGIC: 'CommonGameTag' = 721
TOOLTIP_SKILL_MENTAL: 'CommonGameTag' = 928
TOOLTIP_SKILL_MISCHIEF: 'CommonGameTag' = 722
TOOLTIP_SKILL_MOTOR: 'CommonGameTag' = 929
TOOLTIP_SKILL_MOVEMENT: 'CommonGameTag' = 1668
TOOLTIP_SKILL_PAINTING: 'CommonGameTag' = 718
TOOLTIP_SKILL_PIANO: 'CommonGameTag' = 724
TOOLTIP_SKILL_PIPE_ORGAN: 'CommonGameTag' = 40978
TOOLTIP_SKILL_POTTY: 'CommonGameTag' = 1672
TOOLTIP_SKILL_PROGRAMMING: 'CommonGameTag' = 715
TOOLTIP_SKILL_PSYCHIC: 'CommonGameTag' = 8212
TOOLTIP_SKILL_RESEARCH_DEBATE: 'CommonGameTag' = 2269
TOOLTIP_SKILL_ROBOTICS: 'CommonGameTag' = 2270
TOOLTIP_SKILL_ROCKET_SCIENCE: 'CommonGameTag' = 720
TOOLTIP_SKILL_SINGING: 'CommonGameTag' = 55434
TOOLTIP_SKILL_SOCIAL: 'CommonGameTag' = 930
TOOLTIP_SKILL_THINKING: 'CommonGameTag' = 1671
TOOLTIP_SKILL_VET: 'CommonGameTag' = 2024
TOOLTIP_SKILL_VIDEO_GAMING: 'CommonGameTag' = 714
TOOLTIP_SKILL_VIOLIN: 'CommonGameTag' = 725
TOOLTIP_SKILL_WELLNESS: 'CommonGameTag' = 18459
TOOLTIP_SKILL_WOOHOO: 'CommonGameTag' = 730
TOOLTIP_SKILL_WRITING: 'CommonGameTag' = 723
TOOLTIP_WATER_CONSUMER: 'CommonGameTag' = 2400
TOOLTIP_WATER_PRODUCER: 'CommonGameTag' = 2399
TOP_BIKINI: 'CommonGameTag' = 1236
TOP_BLOUSE: 'CommonGameTag' = 155
TOP_BRASSIERE: 'CommonGameTag' = 944
TOP_BUTTON_UPS: 'CommonGameTag' = 395
TOP_JACKET: 'CommonGameTag' = 295
TOP_POLO: 'CommonGameTag' = 943
TOP_SHIRT_TEE: 'CommonGameTag' = 296
TOP_SUIT_JACKET: 'CommonGameTag' = 942
TOP_SWEATER: | |
return Concatenate([zpre, InsertAxis(arg, axis, 1), zpost], axis)
def diagonalize(arg, axis=-1, newaxis=-1):
arg = asarray(arg)
axis = numeric.normdim(arg.ndim, axis)
newaxis = numeric.normdim(arg.ndim+1, newaxis)
assert axis < newaxis
return Diagonalize(arg, axis, newaxis)
def concatenate(args, axis=0):
args = _matchndim(*args)
axis = numeric.normdim(args[0].ndim, axis)
return Concatenate(args, axis)
def cross(arg1, arg2, axis):
arg1, arg2 = _numpy_align(arg1, arg2)
axis = numeric.normdim(arg1.ndim, axis)
assert arg1.shape[axis] == 3
return Cross(arg1, arg2, axis)
def outer(arg1, arg2=None, axis=0):
'outer product'
if arg2 is not None and arg1.ndim != arg2.ndim:
warnings.deprecation('varying ndims in function.outer; this will be forbidden in future')
arg1, arg2 = _matchndim(arg1, arg2 if arg2 is not None else arg1)
axis = numeric.normdim(arg1.ndim, axis)
return expand_dims(arg1,axis+1) * expand_dims(arg2,axis)
def sign(arg):
arg = asarray(arg)
return Sign(arg)
def eig(arg, axes=(-2,-1), symmetric=False):
arg = asarray(arg)
ax1, ax2 = _norm_and_sort(arg.ndim, axes)
assert ax2 > ax1 # strict
trans = [i for i in range(arg.ndim) if i not in (ax1, ax2)] + [ax1, ax2]
transposed = transpose(arg, trans)
eigval, eigvec = Eig(transposed, symmetric)
return Tuple([transpose(diagonalize(eigval), _invtrans(trans)), transpose(eigvec, _invtrans(trans))])
def polyfunc(coeffs, dofs, ndofs, transforms, *, issorted=True):
'''
Create an inflated :class:`Polyval` with coefficients ``coeffs`` and
corresponding dofs ``dofs``. The arguments ``coeffs``, ``dofs`` and
``transforms`` are assumed to have matching order. In addition, if
``issorted`` is true, the ``transforms`` argument is assumed to be sorted.
'''
transforms = tuple(transforms)
if issorted:
dofs = tuple(dofs)
coeffs = tuple(coeffs)
else:
dofsmap = dict(zip(transforms, dofs))
coeffsmap = dict(zip(transforms, coeffs))
transforms = tuple(sorted(transforms))
dofs = tuple(dofsmap[trans] for trans in transforms)
coeffs = tuple(coeffsmap[trans] for trans in transforms)
fromdims, = set(transform[-1].fromdims for transform in transforms)
promote = Promote(fromdims, trans=TRANS)
index = FindTransform(transforms, promote)
dofmap = DofMap(dofs, index=index)
depth = Get([len(trans) for trans in transforms], axis=0, item=index)
points = ApplyTransforms(TailOfTransform(promote, depth, fromdims))
func = Polyval(Elemwise(coeffs, index, dtype=float), points)
return Inflate(func, dofmap, ndofs, axis=0)
def elemwise(fmap, shape, default=None):
if default is not None:
raise NotImplemented('default is not supported anymore')
transforms = tuple(sorted(fmap))
values = tuple(fmap[trans] for trans in transforms)
fromdims, = set(transform[-1].fromdims for transform in transforms)
promote = Promote(fromdims, trans=TRANS)
index = FindTransform(transforms, promote)
return Elemwise(values, index, dtype=float)
def take(arg, index, axis):
arg = asarray(arg)
axis = numeric.normdim(arg.ndim, axis)
index = asarray(index)
assert index.ndim == 1
if index.dtype == bool:
assert index.shape[0] == arg.shape[axis]
if index.isconstant:
mask, = index.eval()
return Mask(arg, mask, axis)
index = find(index)
return Take(arg, index, axis)
def find(arg):
'find'
arg = asarray(arg)
assert arg.ndim == 1 and arg.dtype == bool
if arg.isconstant:
arg, = arg.eval()
index, = arg.nonzero()
return asarray(index)
return Find(arg)
def mask(arg, mask, axis=0):
arg = asarray(arg)
axis = numeric.normdim(arg.ndim, axis)
assert numeric.isarray(mask) and mask.ndim == 1 and mask.dtype == bool
assert arg.shape[axis] == len(mask)
return Mask(arg, mask, axis)
def J(geometry, ndims=None):
'''
Return :math:`\sqrt{|J^T J|}` with :math:`J` the gradient of ``geometry`` to
the local coordinate system with ``ndims`` dimensions (``localgradient(geom,
ndims)``).
'''
if ndims is None:
return DelayedJacobian(geometry)
elif ndims < 0:
ndims += len(geometry)
return jacobian(geometry, ndims)
def unravel(func, axis, shape):
func = asarray(func)
axis = numeric.normdim(func.ndim, axis)
shape = tuple(shape)
assert func.shape[axis] == numpy.product(shape)
return Unravel(func, axis, tuple(shape))
def ravel(func, axis):
func = asarray(func)
axis = numeric.normdim(func.ndim-1, axis)
return Ravel(func, axis)
@replace
def replace_arguments(value, arguments):
'''Replace :class:`Argument` objects in ``value``.
Replace :class:`Argument` objects in ``value`` according to the ``arguments``
map, taking into account derivatives to the local coordinates.
Args
----
value : :class:`Array`
Array to be edited.
arguments : :class:`collections.abc.Mapping` with :class:`Array`\\s as values
:class:`Argument`\\s replacements. The key correspond to the ``name``
passed to an :class:`Argument` and the value is the replacement.
Returns
-------
:class:`Array`
The edited ``value``.
'''
if isinstance(value, Argument) and value._name in arguments:
v = asarray(arguments[value._name])
assert value.shape[:value.ndim-value._nderiv] == v.shape
for ndims in value.shape[value.ndim-value._nderiv:]:
v = localgradient(v, ndims)
return v
def zero_argument_derivatives(func):
warnings.deprecation('function.zero_argument_derivatives can be safely removed: zero_argument_derivatives(func) -> func')
return func
def _eval_ast(ast, functions):
'''evaluate ``ast`` generated by :func:`nutils.expression.parse`'''
op, *args = ast
if op is None:
value, = args
return value
args = (_eval_ast(arg, functions) for arg in args)
if op == 'group':
array, = args
return array
elif op == 'arg':
name, *shape = args
return Argument(name, shape)
elif op == 'substitute':
array, *arg_value_pairs = args
subs = {}
assert len(arg_value_pairs) % 2 == 0
for arg, value in zip(arg_value_pairs[0::2], arg_value_pairs[1::2]):
assert isinstance(arg, Argument) and arg._nderiv == 0
assert arg._name not in subs
subs[arg._name] = value
return replace_arguments(array, subs)
elif op == 'call':
func, arg = args
return functions[func](arg)
elif op == 'd':
geom, = args
return DelayedJacobian(geom)
elif op == 'eye':
length, = args
return eye(length)
elif op == 'normal':
geom, = args
return normal(geom)
elif op == 'getitem':
array, dim, index = args
return get(array, dim, index)
elif op == 'trace':
array, n1, n2 = args
return trace(array, n1, n2)
elif op == 'sum':
array, axis = args
return sum(array, axis)
elif op == 'concatenate':
return concatenate(args, axis=0)
elif op == 'grad':
array, geom = args
return grad(array, geom)
elif op == 'surfgrad':
array, geom = args
return grad(array, geom, len(geom)-1)
elif op == 'derivative':
func, target = args
return derivative(func, target)
elif op == 'append_axis':
array, length = args
return repeat(asarray(array)[..., None], length, -1)
elif op == 'transpose':
array, trans = args
return transpose(array, trans)
elif op == 'jump':
array, = args
return jump(array)
elif op == 'mean':
array, = args
return mean(array)
elif op == 'neg':
array, = args
return -asarray(array)
elif op in ('add', 'sub', 'mul', 'truediv', 'pow'):
left, right = args
return getattr(operator, '__{}__'.format(op))(asarray(left), asarray(right))
else:
raise ValueError('unknown opcode: {!r}'.format(op))
class Namespace:
'''Namespace for :class:`Array` objects supporting assignments with tensor expressions.
The :class:`Namespace` object is used to store :class:`Array` objects.
>>> from nutils import function
>>> ns = function.Namespace()
>>> ns.A = function.zeros([3, 3])
>>> ns.x = function.zeros([3])
>>> ns.c = 2
In addition to the assignment of :class:`Array` objects, it is also possible
to specify an array using a tensor expression string — see
:func:`nutils.expression.parse` for the syntax. All attributes defined in
this namespace are available as variables in the expression. If the array
defined by the expression has one or more dimensions the indices of the axes
should be appended to the attribute name. Examples:
>>> ns.cAx_i = 'c A_ij x_j'
>>> ns.xAx = 'x_i A_ij x_j'
It is also possible to simply evaluate an expression without storing its
value in the namespace by passing the expression to the method ``eval_``
suffixed with appropriate indices:
>>> ns.eval_('2 c')
Array<>
>>> ns.eval_i('c A_ij x_j')
Array<3>
>>> ns.eval_ij('A_ij + A_ji')
Array<3,3>
For zero and one dimensional expressions the following shorthand can be used:
>>> '2 c' @ ns
Array<>
>>> 'A_ij x_j' @ ns
Array<3>
When evaluating an expression through this namespace the following functions
are available: ``opposite``, ``sin``, ``cos``, ``tan``, ``sinh``, ``cosh``,
``tanh``, ``arcsin``, ``arccos``, ``arctan2``, ``arctanh``, ``exp``, ``abs``,
``ln``, ``log``, ``log2``, ``log10``, ``sqrt`` and ``sign``.
Args
----
default_geometry_name : :class:`str`
The name of the default geometry. This argument is passed to
:func:`nutils.expression.parse`. Default: ``'x'``.
Attributes
----------
arg_shapes : view of :class:`dict`
A readonly map of argument names and shapes.
default_geometry_name : :class:`str`
The name of the default geometry. See argument with the same name.
'''
__slots__ = '_attributes', '_arg_shapes', 'arg_shapes', 'default_geometry_name'
_re_assign = re.compile('^([a-zA-Zα-ωΑ-Ω][a-zA-Zα-ωΑ-Ω0-9]*)(_[a-z]+)?$')
_functions = dict(
opposite=opposite, sin=sin, cos=cos, tan=tan, sinh=sinh, cosh=cosh,
tanh=tanh, arcsin=arcsin, arccos=arccos, arctan2=arctan2, arctanh=arctanh,
exp=exp, abs=abs, ln=ln, log=ln, log2=log2, log10=log10, sqrt=sqrt,
sign=sign,
)
_functions_nargs = {k: len(inspect.signature(v).parameters) for k, v in _functions.items()}
@types.apply_annotations
def __init__(self, *, default_geometry_name='x'):
if not isinstance(default_geometry_name, str):
raise ValueError('default_geometry_name: Expected a str, got {!r}.'.format(default_geometry_name))
if '_' in default_geometry_name or not self._re_assign.match(default_geometry_name):
raise ValueError('default_geometry_name: Invalid variable name: {!r}.'.format(default_geometry_name))
super().__setattr__('_attributes', {})
super().__setattr__('_arg_shapes', {})
super().__setattr__('arg_shapes', builtin_types.MappingProxyType(self._arg_shapes))
super().__setattr__('default_geometry_name', default_geometry_name)
super().__init__()
@property
def default_geometry(self):
''':class:`nutils.function.Array`: The default geometry, shorthand for ``getattr(ns, ns.default_geometry_name)``.'''
return getattr(self, self.default_geometry_name)
def __call__(*args, **subs):
'''Return a copy with arguments replaced by ``subs``.
Return a copy of this namespace with :class:`Argument` objects replaced
according to ``subs``.
Args
----
**subs : :class:`dict` of :class:`str` and :class:`nutils.function.Array` objects
Replacements of the :class:`Argument` objects, identified by their names.
Returns
-------
ns : :class:`Namespace`
The copy of | |
<reponame>crazynayan/tpf2<filename>flask_app/test_data_forms.py
from typing import List
from flask import request
from flask_login import current_user
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, BooleanField, IntegerField, SelectField, TextAreaField
from wtforms.validators import InputRequired, ValidationError, NumberRange, Length
from wtforms.widgets import Input
from config import Config
from flask_app import tpf2_app
from flask_app.server import Server
FIELD_DATA_TEXT: str = """
Enter multiple fields and data separated by comma. The field and data should be separated by colon. All fields should be
from a single macro mentioned above. Data by default is in hex characters. Odd number of digit will be considered a
4 byte number. Prefix with 0 to make it odd for enforcing a number. Non hex characters are considered as text. Prefix
with quote to enforce text.
"""
PNR_OUTPUT_FIELD_DATA_TEXT: str = """
Enter multiple fields with attributes separated by comma. Format of each field is FieldName:Length:ItemNumber.
FieldName should be from PNR macros. Length should start with L followed by a number. If it is not specified
then the length from the data macro will be automatically determined. ItemNumber should start with I followed by a
number. If it is not specified then item number 1 is assumed. An e.g. is as follows
PR00_G0_TYP,PR00_G0_TYP:I2,PR00_G0_TYP:L2:I3
"""
PNR_INPUT_FIELD_DATA_TEXT: str = """
Enter multiple fields with attributes separated by comma. Leave it blank if you want to provide PNR text.
Format of each field is FieldName:HexData:ItemNumber. FieldName should be from PNR macros.
ItemNumber should start with I followed by a number. All item numbers should be in sequence without gaps.
An e.g. is as follows PR00_G0_BAS_0_AAC:E2E2:I1,PR00_G0_TYP:02:I2
"""
def form_validate_field_data(data: str) -> str:
data = data.strip().upper()
if data.startswith("'"):
if len(data) == 1:
raise ValidationError("There needs to be some text after a single quote")
data = data[1:].encode("cp037").hex().upper()
elif data.startswith("-"):
if len(data) == 1 or not data[1:].isdigit():
raise ValidationError("Invalid Negative Number")
neg_data = int(data)
if neg_data < -0x80000000:
raise ValidationError(f"Negative Number cannot be less than {-0x80000000}")
data = f"{neg_data & tpf2_app.config['REG_MAX']:08X}"
elif len(data) % 2 == 1 and data.isdigit():
number_data = int(data)
if number_data > 0x7FFFFFFF:
raise ValidationError(f"Number cannot be greater than {0x7FFFFFFF}")
data = f"{number_data:08X}"
else:
try:
int(data, 16)
if len(data) % 2:
data = f"0{data}"
except ValueError:
data = data.encode("cp037").hex().upper()
return data
def form_validate_multiple_field_data(data: str, macro_name: str) -> str:
updated_field_data = list()
for key_value in data.split(","):
if key_value.count(":") != 1:
raise ValidationError(f"Include a single colon : to separate field and data - {key_value}")
field = key_value.split(":")[0].strip().upper()
label_ref = Server.search_field(field)
if not label_ref:
raise ValidationError(f"Field name not found - {field}")
if macro_name != label_ref["name"]:
raise ValidationError(f"Field not in the same macro - {field} not in {macro_name}")
data = form_validate_field_data(key_value.split(":")[1])
updated_field_data.append(f"{field}:{data}")
return ",".join(updated_field_data)
def form_field_lookup(data: str, macro_name: str) -> str:
data = data.upper()
label_ref = Server.search_field(data)
if not label_ref:
raise ValidationError(f"Field name not found - {data}")
if macro_name != label_ref["name"]:
raise ValidationError(f"Field not in the same macro - {data} not in {macro_name}")
return data
def form_validate_macro_name(macro_name: str) -> str:
macro_name = macro_name.upper()
label_ref = Server.search_field(macro_name)
if not label_ref or label_ref["name"] != macro_name:
raise ValidationError("This is not a valid macro name")
return macro_name
class TestDataForm(FlaskForm):
name = StringField("Name of Test Data (Must be unique in the system)", validators=[InputRequired()])
seg_name = StringField("Segment Name (Must exists in the system)", validators=[InputRequired()])
stop_segments = StringField("Stop Segment Name List (Separate multiple segments with comma). Optional")
save = SubmitField("Save & Continue - Add Further Data")
def __init__(self, test_data: dict = None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.segments: List[str] = list()
self.stop_segment_list: List[str] = list()
self.test_data: dict = test_data if test_data else dict()
if test_data and request.method == "GET":
self.name.data = test_data["name"]
self.seg_name.data = test_data["seg_name"]
stop_segments: List[str] = test_data["stop_segments"]
self.stop_segments.data = ", ".join(stop_segments)
def validate_seg_name(self, seg_name: StringField):
seg_name.data = seg_name.data.upper()
segment: str = seg_name.data
if not self.segments:
response = Server.segments()
self.segments: List[str] = response["segments"] if "segments" in response else list()
if segment not in self.segments:
raise ValidationError(f"{segment} not found")
return
def validate_stop_segments(self, stop_segments: StringField):
stop_segments.data = stop_segments.data.upper().strip()
if not stop_segments.data:
return
self.stop_segment_list: List[str] = stop_segments.data.split(",")
self.stop_segment_list = [segment.strip() for segment in self.stop_segment_list]
invalid_segments: List[str] = [segment for segment in self.stop_segment_list
if len(segment) != 4 or not segment.isalnum()]
if invalid_segments:
raise ValidationError(f"{', '.join(invalid_segments)} are invalid segments.")
return
def validate_name(self, name: StringField):
if (not self.test_data or self.test_data["name"] != name.data) and Server.get_test_data_by_name(name.data):
raise ValidationError(f"The name '{name.data}' already exists - Please use an unique name")
return
class DeleteForm(FlaskForm):
submit = SubmitField("Yes - Delete")
class RegisterForm(FlaskForm):
r0 = BooleanField("R0")
r1 = BooleanField("R1")
r2 = BooleanField("R2")
r3 = BooleanField("R3")
r4 = BooleanField("R4")
r5 = BooleanField("R5")
r6 = BooleanField("R6")
r7 = BooleanField("R7")
r8 = BooleanField("R8")
r9 = BooleanField("R9")
r10 = BooleanField("R10")
r11 = BooleanField("R11")
r12 = BooleanField("R12")
r13 = BooleanField("R13")
r14 = BooleanField("R14")
r15 = BooleanField("R15")
save = SubmitField("Save & Continue - Add Further Data")
class FieldSearchForm(FlaskForm):
field = StringField("Field name", validators=[InputRequired()])
search = SubmitField("Search")
@staticmethod
def validate_field(_, field: StringField) -> None:
field.data = field.data.upper()
label_ref = Server.search_field(field.data)
if not label_ref:
raise ValidationError("Field name not found")
field.data = label_ref
class FieldLengthForm(FlaskForm):
length = IntegerField("Length", validators=[NumberRange(1, 4095, "Length can be from 1 to 4095")])
base_reg = StringField("Base Register - Keep it blank for default macros like AAA, ECB, GLOBAL, IMG etc.")
save = SubmitField("Save & Continue - Add Further Data")
def __init__(self, macro_name: str, *args, **kwargs):
super().__init__(*args, **kwargs)
self.macro_name = macro_name
def validate_base_reg(self, base_reg: StringField) -> None:
base_reg.data = base_reg.data.upper()
if base_reg.data and base_reg.data not in tpf2_app.config["REGISTERS"]:
raise ValidationError("Invalid Base Register - Register can be from R0 to R15")
if (not base_reg.data or base_reg.data == "R0") and self.macro_name not in tpf2_app.config["DEFAULT_MACROS"]:
raise ValidationError(f"Base Register cannot be blank or R0 for macro {self.macro_name}")
return
class FieldDataForm(FlaskForm):
variation = SelectField("Select variation or choose 'New Variation' to create a new variation", coerce=int)
variation_name = StringField("New Variation Name - Leave it blank for existing variation")
field_data = StringField("Enter Data - Input hex characters. Odd number of digit will be considered a number. "
"Prefix with 0 to make the number a digit. Non hex characters are considered as text. "
"Prefix with quote to enforce text.", validators=[InputRequired()])
save = SubmitField("Save & Continue - Add Further Data")
@staticmethod
def validate_field_data(_, field_data: StringField) -> None:
field_data.data = form_validate_field_data(field_data.data)
def init_variation(variation: SelectField, variation_name: StringField, test_data_id: str, v_type: str) -> dict:
variations = Server.get_variations(test_data_id, v_type)
if not current_user.is_authenticated:
return dict()
variation.choices = [(item["variation"], f"{item['variation_name']} ({item['variation']})") for item in variations]
variation.choices.append((-1, "New Variation"))
if request.method != "POST":
return dict()
if variation.data == -1:
variation_name.data = variation_name.data.strip()
variation = variations[-1]["variation"] + 1 if variations else 0
else:
variation_name.data = next(variation_name for variation, variation_name in variation.choices)
variation = variation.data
return {"variation": variation, "variation_name": variation_name.data}
class HeapForm(FlaskForm):
variation = SelectField("Select variation or choose 'New Variation' to create a new variation", coerce=int)
variation_name = StringField("New Variation Name - Leave it blank for existing variation")
heap_name = StringField("Enter Heap Name - Must be alphanumeric", validators=[InputRequired()])
hex_data = StringField("Enter input data in hex format to initialize the heap. Leave it blank to init with zeroes")
seg_name = StringField("Segment Name. Leave it blank to either init with zeroes or with hex data")
field_data = TextAreaField("Enter multiple fields and data separated by comma. The field and data should be "
"separated by colon. Data should be in hex format. Leave it blank to either init with "
"zeroes or with hex data", render_kw={"rows": "5"})
save = SubmitField("Save & Continue - Add Further Data")
def __init__(self, test_data_id: str, *args, **kwargs):
super().__init__(*args, **kwargs)
body = init_variation(self.variation, self.variation_name, test_data_id, "core")
self.response: dict = dict()
if request.method == "POST":
body["heap_name"] = self.heap_name.data
body["hex_data"] = "".join(char.upper() for char in self.hex_data.data if char != " ")
body["seg_name"] = self.seg_name.data.upper()
body["field_data"] = self.field_data.data
self.response = Server.add_input_heap(test_data_id, body)
def validate_variation(self, variation):
if "error" in self.response and self.response["error"] and \
"message" in self.response and self.response["message"]:
raise ValidationError(self.response["message"])
if "error_fields" in self.response and "variation" in self.response["error_fields"]:
raise ValidationError(self.response["error_fields"]["variation"])
if variation.data == -1:
variation.data = 0
def validate_heap_name(self, _) -> None:
if "error_fields" in self.response and "heap_name" in self.response["error_fields"]:
raise ValidationError(self.response["error_fields"]["heap_name"])
def validate_hex_data(self, _):
if "error_fields" in self.response and "hex_data" in self.response["error_fields"]:
raise ValidationError(self.response["error_fields"]["hex_data"])
def validate_seg_name(self, _):
if "error_fields" in self.response and "seg_name" | |
<reponame>nightjuggler/brc<filename>gensvg.py
#!/usr/bin/python
import math
BLOCK_WIDTH = 200
HALF_STREET_WIDTH = 20
STREET_WIDTH = 2 * HALF_STREET_WIDTH
MAN_TO_ESPLANADE = 2500 # Distance from the center of the Man to the center of Esplanade
ESPLANADE_TO_A = 400 # Width of the block from Esplanade to A
MAN_TO_CENTER_CAMP = 2907
CENTER_CAMP = (0, MAN_TO_CENTER_CAMP)
CENTER_CAMP_RADIUS1 = 190
CENTER_CAMP_RADIUS2 = 330
CENTER_CAMP_RADIUS3 = (MAN_TO_ESPLANADE + HALF_STREET_WIDTH + ESPLANADE_TO_A
+ 3 * (STREET_WIDTH + BLOCK_WIDTH) - MAN_TO_CENTER_CAMP)
CENTER_CAMP_FLARE_ANGLE = 30
FOUR_THIRTY_FLARE_ANGLE = 28
THREE_O_CLOCK_FLARE_ANGLE = 20
PLAZA_RADIUS = 125
def rotatePoint(radians, point):
c = math.cos(radians)
s = math.sin(radians)
x, y = point
return (x*c - y*s, x*s + y*c)
def rotatePoints(radians, points):
c = math.cos(radians)
s = math.sin(radians)
return [(x*c - y*s, x*s + y*c) for x, y in points]
def radialRadians(radialIndex):
return (radialIndex - 4) * math.pi / 24
def radialDegrees(radialIndex):
return (radialIndex - 4) * 180.0 / 24
def genThreeOClock():
r = MAN_TO_ESPLANADE - HALF_STREET_WIDTH
radii = [r]
r += STREET_WIDTH
radii.append(r)
r += ESPLANADE_TO_A
radii.append(r)
for street in 'BCDEFGHIJKL':
r += STREET_WIDTH
radii.append(r)
r += BLOCK_WIDTH
radii.append(r)
r += STREET_WIDTH
radii.append(r)
y = HALF_STREET_WIDTH
yy = y*y
pointsL = []
pointsR = []
for r in radii:
x = math.sqrt(r*r - yy)
pointsR.append((x, y))
pointsL.append((x, -y))
return radii, pointsL, pointsR
class Path(object):
def __init__(self):
self.path3 = []
self.path9 = []
def moveto(self, point):
x = int(round(point[0]))
y = int(round(point[1]))
self.path3.append('M {} {}'.format(x, y))
self.path9.append('M -{} {}'.format(x, y))
def lineto(self, point):
x = int(round(point[0]))
y = int(round(point[1]))
self.path3.append('L {} {}'.format(x, y))
self.path9.append('L -{} {}'.format(x, y))
def arcto(self, endPoint, radius, sweep):
x = int(round(endPoint[0]))
y = int(round(endPoint[1]))
self.path3.append('A {} {} 0 0 {} {} {}'.format(radius, radius, sweep, x, y))
sweep = 0 if sweep else 1
self.path9.append('A {} {} 0 0 {} -{} {}'.format(radius, radius, sweep, x, y))
def closepath(self):
self.path3.append('Z')
self.path9.append('Z')
print '<path d="{}" />'.format(' '.join(self.path3))
print '<path d="{}" />'.format(' '.join(self.path9))
self.path3 = []
self.path9 = []
def circleXcircle(r1, c2, r2):
#
# This returns the points s1 and s2 where the circle with radius r1 centered at the Man
# (i.e. at 0, 0) intersects the circle with radius r2 centered at point c2.
#
x2, y2 = c2
# The below was derived from the general circle-circle intersection formulas derived at
# http://2000clicks.com/mathhelp/GeometryConicSectionCircleIntersection.aspx
# We can simplify those general formulas here since x1 and y1 are 0.
dd = x2*x2 + y2*y2
k = math.sqrt(((r1 + r2)**2 - dd) * (dd - (r1 - r2)**2))
x2 = x2/2.0
y2 = y2/2.0
x2dd = x2/dd
y2dd = y2/dd
r1r2 = r1*r1 - r2*r2
x2f = x2 + x2dd*r1r2
y2f = y2 + y2dd*r1r2
s1 = (x2f + k*y2dd, y2f - k*x2dd)
s2 = (x2f - k*y2dd, y2f + k*x2dd)
return s1, s2
def circleXcircleCC(r1, r2):
#
# This is a simplified version of circleXcircle for when the second circle is centered
# at Center Camp. Thus x2 is 0. It's equivalent to circleXcircle(r1, CENTER_CAMP, r2)[0]
#
# The equation for y was derived was follows:
# (1) Circle centered at the Man: x**2 + y**2 = r1**2
# (2) Circle centered at (0, y2): x**2 + (y - y2)**2 = r2**2
# (3) Rewrite equation (1) as x**2 = r1**2 - y**2 and substitute the right-hand side
# for x**2 in equation (2): r1**2 - y**2 + (y - y2)**2 = r2**2
# (4) Simplify and solve for y: r1**2 - y**2 + y**2 - 2*y*y2 + y2**2 = r2**2
# => r1**2 - r2**2 + y2**2 = 2*y*y2
# => y = (r1**2 - r2**2 + y2**2) / (2*y2)
y2 = MAN_TO_CENTER_CAMP
y = float(r1*r1 - r2*r2 + y2*y2) / (2*y2)
x = math.sqrt(r1*r1 - y*y)
return x, y
def lineXcircle(p1, p2, c, r):
#
# This returns the points s1 and s2 where the line which passes through points p1 and p2
# intersects the circle of radius r centered at point c.
#
x1, y1 = p1
x2, y2 = p2
cx, cy = c
if abs(x2 - x1) < 1e-12:
# The equation for a vertical line (where x1 == x2) is not of the form y = a + b*x.
# It's just x = x1. So, substituting x1 for x in the equation for the circle,
# (x - cx)**2 + (y - cy)**2 = r**2, and solving for y, we get the following:
k = math.sqrt(r*r - (x1 - cx)**2)
return (x1, cy + k), (x1, cy - k)
# The equation for a non-vertical line is y = a + b*x where b is the slope (delta y / delta x)
# and a is where the line crosses the y axis. The equation for a circle of radius r centered at
# point c is (x - cx)**2 + (y - cy)**2 = r**2. Substituting a + b*x for y in this equation and
# solving for x using the quadratic formula, we get the following:
b = float(y2 - y1) / (x2 - x1)
a = y1 - b*x1
d = a - cy
bb1 = b*b + 1
cxbd = cx - b*d
k = math.sqrt(r*r*bb1 - (b*cx + d)**2)
x = (cxbd + k) / bb1
s1 = (x, a + b*x)
x = (cxbd - k) / bb1
s2 = (x, a + b*x)
return s1, s2
def flareLine(radius, point, angle):
#
# This returns the point where the line that passes through the input point
# at the input angle (clockwise from the x axis) intersects the circle with
# the input radius centered at the Man.
#
x1, y1 = point
b = math.tan(math.radians(angle))
a = y1 - b*x1
bb1 = b*b + 1
x = (math.sqrt(radius*radius*bb1 - a*a) - a*b) / bb1
y = a + b*x
return x, y
def flareFromPlaza(points, radii, radial, ring, toRing, angle):
pointsL = points[radial][0]
pointsR = points[radial][1]
plazaCenter = (radii[ring] + HALF_STREET_WIDTH, 0)
plazaCenter = rotatePoint(radialRadians(radial), plazaCenter)
angle /= 2.0
radialAngle = radialDegrees(radial)
angle1 = radialAngle + angle
angle2 = radialAngle - angle
rings = xrange(toRing, ring + 1) if toRing < ring else xrange(ring + 1, toRing + 1)
for i in rings:
pointsL[i] = flareLine(radii[i], plazaCenter, angle1)
pointsR[i] = flareLine(radii[i], plazaCenter, angle2)
def plazaPath1(path, p1, p2, p3, p4, r12, r34, z1, z2, zr):
path.moveto(z1)
path.arcto(z2, zr, 1)
path.arcto(p2, r12, 1)
path.lineto(p3)
path.arcto(p4, r34, 0)
path.closepath()
def plazaPath2(path, p1, p2, p3, p4, r12, r34, z1, z2, zr):
path.moveto(p1)
path.arcto(z1, r12, 1)
path.arcto(z2, zr, 1)
path.lineto(p3)
path.arcto(p4, r34, 0)
path.closepath()
def plazaPath3(path, p1, p2, p3, p4, r12, r34, z1, z2, zr):
path.moveto(p1)
path.arcto(p2, r12, 1)
path.lineto(z1)
path.arcto(z2, zr, 1)
path.arcto(p4, r34, 0)
path.closepath()
def plazaPath4(path, p1, p2, p3, p4, r12, r34, z1, z2, zr):
path.moveto(p1)
path.arcto(p2, r12, 1)
path.lineto(p3)
path.arcto(z1, r34, 0)
path.arcto(z2, zr, 1)
path.closepath()
def addCircle(center, radius, stroke=None, fill=None):
cx, cy = center
print '<circle cx="{}" cy="{}" r="{}"{}{} />'.format(
int(round(cx)),
int(round(cy)),
radius,
'' if stroke is None else ' stroke="{}"'.format(stroke),
'' if fill is None else ' fill="{}"'.format(fill),
)
def previousRadial(radial, ring):
return radial - (1 if ring > 14 else 2)
def addPlaza(plazaHash, points, radii, radial, ring):
pointsL = points[radial][0]
pointsR = points[radial][1]
plazaCenter = (radii[ring] + HALF_STREET_WIDTH, 0)
plazaCenter = rotatePoint(radialRadians(radial), plazaCenter)
i, j, k = ring - 1, ring + 1, ring + 2
key1 = '{},{}'.format(radial, j)
key2 = '{},{}'.format(previousRadial(radial, j), j)
key3 = '{},{}'.format(previousRadial(radial, i), i)
key4 = '{},{}'.format(radial, i)
z32, z41 = circleXcircle(radii[ring], plazaCenter, PLAZA_RADIUS)
s1, z31 = lineXcircle(pointsL[i], pointsL[ring], plazaCenter, PLAZA_RADIUS)
s1, z42 = lineXcircle(pointsR[i], pointsR[ring], plazaCenter, PLAZA_RADIUS)
z21, z12 = circleXcircle(radii[j], plazaCenter, PLAZA_RADIUS)
z22, s2 = lineXcircle(pointsL[j], pointsL[k], plazaCenter, PLAZA_RADIUS)
z11, s2 = lineXcircle(pointsR[j], pointsR[k], plazaCenter, PLAZA_RADIUS)
plazaHash[key1] = (plazaPath1, z11, z12, PLAZA_RADIUS)
plazaHash[key2] = (plazaPath2, z21, z22, PLAZA_RADIUS)
plazaHash[key3] = (plazaPath3, z31, z32, PLAZA_RADIUS)
plazaHash[key4] = (plazaPath4, z41, z42, PLAZA_RADIUS)
def main():
radii, threeOClockL, threeOClockR = genThreeOClock()
points = []
for radial in xrange(18): # 0 thru 17 (every 15 minutes from 2:00 to 6:00 inclusive)
radians = radialRadians(radial)
pointsL = rotatePoints(radians, threeOClockL)
pointsR = rotatePoints(radians, threeOClockR)
points.append((pointsL, pointsR))
flareFromPlaza(points, radii, 4, 4, 1, THREE_O_CLOCK_FLARE_ANGLE)
flareFromPlaza(points, radii, 4, 4, 6, -THREE_O_CLOCK_FLARE_ANGLE)
flareFromPlaza(points, radii, 10, 14, 16, -THREE_O_CLOCK_FLARE_ANGLE)
pointsL = points[10][0]
pointsR = points[10][1]
pointsL[1] = flareLine(radii[1], pointsL[2], radialDegrees(10) + FOUR_THIRTY_FLARE_ANGLE/2)
pointsR[1] = flareLine(radii[1], pointsR[2], radialDegrees(10) - FOUR_THIRTY_FLARE_ANGLE/2)
print '<svg'
print '\txmlns="http://www.w3.org/2000/svg"'
print '\txmlns:xlink="http://www.w3.org/1999/xlink"'
print '\twidth="900" height="900" viewBox="-6000 -6000 12000 12000">'
print '<g fill="none" stroke="black" stroke-width="4" transform="rotate(45)">'
quarterHourIndex = len('ABCDEFG') * 2 + 1
wideBlockIndex = len('ABCDE') * 2 + 1
path = Path()
plazaHash = {}
addPlaza(plazaHash, points, radii, 4, 4)
addPlaza(plazaHash, points, radii, 4, 14)
addPlaza(plazaHash, points, radii, 10, 14)
addPlaza(plazaHash, points, radii, 16, 18)
odd = True
for n in xrange(16):
odd = not odd
if odd:
m_start = quarterHourIndex
n_right = n + 1
else:
m_start = 1
n_right = n + 2
pointsL = points[n][1]
pointsR = points[n_right][0]
isWide = n in (0, 6, 12, 14)
for m in xrange(m_start, len(pointsL) - 1, 2):
if m == quarterHourIndex:
pointsR = points[n + 1][0]
if n == 14 and m < 9:
continue
m_next = m + 1
if isWide:
if m == wideBlockIndex:
m_next = m + 3
elif m == wideBlockIndex + 2:
continue
p1, p2, r12 = pointsL[m], pointsR[m], radii[m]
p3, p4, r34 = pointsR[m_next], pointsL[m_next], radii[m_next]
key = '{},{}'.format(n, m)
plaza = plazaHash.get(key)
if plaza:
makePath, z1, z2, zr = plaza
makePath(path, p1, p2, p3, p4, r12, r34, z1, z2, zr)
else:
path.moveto(p1)
path.arcto(p2, r12, 1)
path.lineto(p3)
path.arcto(p4, r34, 0)
path.closepath()
#---------- Center Camp - Bottom Half ----------#
hs = HALF_STREET_WIDTH
r2 = CENTER_CAMP_RADIUS2
r3 = CENTER_CAMP_RADIUS3
d2 = math.sqrt(r2*r2 - hs*hs)
d3 = math.sqrt(r3*r3 | |
'bb': fliped_bb,
'gt_classes': roi_gt_ss[i]['gt_classes'],
'gt_overlaps': roi_gt_ss[i]['gt_overlaps'],
'max_overlap_area': roi_gt_ss[i]['max_overlap_area'],
'max_overlap_class': roi_gt_ss[i]['max_overlap_class'],
'img_id': roi_gt_ss[i]['img_id'],
'flipped': True,
'img_file': image_file,
'bb_targets': bb_targets_flipped
}
for cls in xrange(1, self.num_classes):
cls_inds = np.where(bb_targets[:, 0] == cls)[0]
if cls_inds.size > 0:
class_counts[cls] += cls_inds.size
sums[
cls, :] += bb_targets_flipped[cls_inds, 1:].sum(axis=0)
squared_sums[
cls, :] += (bb_targets_flipped[cls_inds, 1:] ** 2).sum(axis=0)
means = sums / class_counts
stds = np.sqrt(squared_sums / class_counts - means ** 2)
bbtarget_means = means.ravel()
bbtarget_stds = stds.ravel()
# Normalize targets
for i in xrange(self.num_images):
targets = roi_gt_ss[i]['bb_targets']
for cls in xrange(1, self.num_classes):
cls_inds = np.where(targets[:, 0] == cls)[0]
roi_gt_ss[i]['bb_targets'][cls_inds, 1:] -= means[cls, :]
roi_gt_ss[i]['bb_targets'][cls_inds, 1:] /= stds[cls, :]
return roi_gt_ss, bbtarget_means, bbtarget_stds
def _compute_bb_targets(self, gt_bb, rp_bb, labels):
# calculate the region proposal centers and width/height
rp_widths = rp_bb[:, BB_XMAX_IDX] - rp_bb[:, BB_XMIN_IDX] + FRCN_EPS
rp_heights = rp_bb[:, BB_YMAX_IDX] - rp_bb[:, BB_YMIN_IDX] + FRCN_EPS
rp_ctr_x = rp_bb[:, BB_XMIN_IDX] + 0.5 * rp_widths
rp_ctr_y = rp_bb[:, BB_YMIN_IDX] + 0.5 * rp_heights
# calculate the ground truth box
gt_widths = gt_bb[:, BB_XMAX_IDX] - gt_bb[:, BB_XMIN_IDX] + FRCN_EPS
gt_heights = gt_bb[:, BB_YMAX_IDX] - gt_bb[:, BB_YMIN_IDX] + FRCN_EPS
gt_ctr_x = gt_bb[:, BB_XMIN_IDX] + 0.5 * gt_widths
gt_ctr_y = gt_bb[:, BB_YMIN_IDX] + 0.5 * gt_heights
# the target will be how to adjust the bbox's center and width/height
# also notice the targets are generated based on the original RP, which has not
# been scaled by the image resizing
targets_dx = (gt_ctr_x - rp_ctr_x) / rp_widths
targets_dy = (gt_ctr_y - rp_ctr_y) / rp_heights
targets_dw = np.log(gt_widths / rp_widths)
targets_dh = np.log(gt_heights / rp_heights)
targets = np.concatenate((labels[:, np.newaxis],
targets_dx[:, np.newaxis],
targets_dy[:, np.newaxis],
targets_dw[:, np.newaxis],
targets_dh[:, np.newaxis],
), axis=1)
return targets
class PASCALVOCInference(PASCALVOC):
"""
PASCAL VOC 2007 and 2012 data set for testing and inference from
http://host.robots.ox.ac.uk/pascal/VOC/voc2007/index.html and
http://host.robots.ox.ac.uk/pascal/VOC/voc2012/index.html.
Construct a PASCAL VOC dataset object for testing and inference
It still loads precomputed selective search results as ROIs.
Notes:
1. The dataset iterator will use only batch size 1.
2. The inference/test dataset will keep all the precomputed selective
search to run through the model.
3. The preprocessed data will be saved into a cached file and re-use
if the same configuration is chosen.
Arguments:
image_set (str): 'trainval' or 'test'.
year (str): e.g. '2007'.
path (str): Path to data file.
n_mb (int, optional): how many minibatch to iterate through, can use
value smaller than nbatches for debugging.
img_per_batch (int, optional): how many images processed per batch.
rois_per_img (int, optional): how many rois to pool from each image.
im_fm_scale: (float, optional): how much the image is scaled down when
reaching the feature map layer. This scale
is used to remove duplicated ROIs once they
are projected to the feature map scale.
shuffle(bool, optional): randomly shuffle the samples in each epoch
not used when doing testing for accuracy metric,
but used when using this dataset iterator to do
demo, it can pick images randomly inside the dataset.
subset_pct (float, optional): value between 0 and 100 indicating what percentage of the
dataset partition to use. Defaults to 100
"""
FRCN_MIN_SCALE = 600
FRCN_MAX_SCALE = 1000
FRCN_IMG_PER_BATCH = 1
FRCN_ROI_PER_IMAGE = 5403
def __init__(self, image_set, year, path='.', subset_pct=100,
n_mb=None, rois_per_img=None, im_fm_scale=1. / 16, shuffle=False):
super(PASCALVOCInference, self).__init__(image_set, year, path, n_mb,
self.FRCN_IMG_PER_BATCH, rois_per_img)
self.n_mb = n_mb
self.im_fm_scale = im_fm_scale
self.last_im_height = None
self.last_im_width = None
self.last_num_boxes = None
self.shuffle = shuffle
# backend tensor to push the data
self.image_shape = (3, self.FRCN_MAX_SCALE, self.FRCN_MAX_SCALE)
self.img_np = np.zeros(
(3, self.FRCN_MAX_SCALE, self.FRCN_MAX_SCALE, self.be.bsz), dtype=np.float32)
self.dev_X_img = self.be.iobuf(self.image_shape, dtype=np.float32)
self.dev_X_img_chw = self.dev_X_img.reshape(
3, self.FRCN_MAX_SCALE, self.FRCN_MAX_SCALE, self.be.bsz)
# for rois, features are 4 + 1 (idx within the batch)
self.dev_X_rois = self.be.zeros((self.rois_per_batch, 5))
# the shape will indicate the shape for 1st path (ImageNet model), and
# 2nd path (ROIs)
self.shape = [self.image_shape, self.num_classes * 4]
assert os.path.exists(self.image_index_file), \
'Image index file does not exist: {}'.format(self.image_index_file)
with open(self.image_index_file) as f:
self.image_index = [x.strip() for x in f.readlines()]
self.num_images = len(self.image_index)
self.num_image_entries = self.num_images
self.ndata = self.num_image_entries * self.rois_per_img
assert (subset_pct > 0 and subset_pct <= 100), ('subset_pct must be between 0 and 100')
self.nbatches = int(self.num_image_entries / self.img_per_batch * subset_pct / 100)
if self.n_mb is not None:
self.nbatches = self.n_mb
if os.path.exists(self.cache_file):
self.roi_db = load_obj(self.cache_file)
neon_logger.display('ROI dataset loaded from file {}'.format(self.cache_file))
else:
# 2.
self.roi_gt = self.load_pascal_roi_groundtruth()
# 3.
self.roi_ss = self.load_pascal_roi_selectivesearch()
# 4.
self.roi_db = self.combine_gt_ss_roi()
save_obj(self.roi_db, self.cache_file)
neon_logger.display('wrote ROI dataset to {}'.format(self.cache_file))
def __iter__(self):
"""
Generator to iterate over this dataset.
Each minibatch is constructed from self.img_per_batch images,
and self.rois_per_img ROIs
Yields:
tuples, db, first tuple contains image and ROI data
second object contains the dataset structure for that image
which contains information for post-processing
"""
self.batch_index = 0
# permute the dataset each epoch
if self.shuffle is False:
shuf_idx = list(range(self.num_images))
else:
shuf_idx = self.be.rng.permutation(self.num_images)
self.image_index = [self.image_index[i] for i in shuf_idx]
for self.batch_index in xrange(self.nbatches):
start = self.batch_index * self.img_per_batch
end = (self.batch_index + 1) * self.img_per_batch
db_inds = shuf_idx[start:end]
mb_db = [self.roi_db[i] for i in db_inds]
rois_mb = np.zeros((self.rois_per_batch, 5), dtype=np.float32)
self.img_np[:] = 0
for im_i, db in enumerate(mb_db):
# load and process the image using PIL
im = Image.open(db['img_file']) # This is RGB order
im_scale = db['im_scale']
rois = db['bb'] * im_scale
# the im h/w are based on the unscaled image
# as the dx/dy/dw/dh will be adjustments on that
im_shape = np.array(im.size, np.int32)
self.last_im_height = im_shape[1]
self.last_im_width = im_shape[0]
im_shape *= im_scale
im = im.resize(im_shape, Image.LINEAR)
im = np.array(im)[:, :, ::-1]
# Mean subtract and scale an image
im = im.astype(np.float32, copy=False)
im -= FRCN_PIXEL_MEANS
self.last_num_boxes = min(rois.shape[0], self.rois_per_img)
rois = rois[:self.last_num_boxes]
slice_i = slice(im_i * self.rois_per_img,
im_i * self.rois_per_img + self.last_num_boxes)
batch_ind = im_i * np.ones((self.last_num_boxes, 1))
# add the corresponding image ind (within this batch) to the
# ROI data
rois_this_image = np.hstack((batch_ind, rois))
rois_mb[slice_i] = rois_this_image
self.img_np[:, :im_shape[1], :im_shape[0], im_i] = im.transpose(
FRCN_IMG_DIM_SWAP)
# write it to backend tensor
self.dev_X_img_chw.set(self.img_np)
self.dev_X_rois[:] = rois_mb
self.actual_seq_len = self.last_num_boxes
X = (self.dev_X_img, self.dev_X_rois)
yield X, db
def get_cache_file_name(self):
return 'inference_voc_{}_{}_size_{}_{}.pkl'.format(self.year,
self.image_set,
self.FRCN_MAX_SCALE,
self.FRCN_MIN_SCALE)
def get_dataset_msg(self):
return 'prepare PASCAL VOC {} from year {} for inference:'.format(self.image_set,
self.year)
def load_pascal_roi_selectivesearch(self):
"""
Load the pre-computed selective search data on PASCAL VOC in pickle file
The pickle file contains images and rp:
images: image indices for the dataset (Img, 1)
name in string is in images[i][0][0]
rp: all the proposed ROIs for each image (Img, 1)
in bb[i], there are (B, 4) for B proposed ROIs
The coordinates are ordered as:
[y1, x1, y2, x2]
While ground truth coordinates are:
[x1, y1, x2, y2]
So it needs re-ordering
"""
assert self.roi_gt is not None, 'Ground truth ROIs need to be loaded first'
assert os.path.exists(self.selective_search_file), \
'selected search data does not exist'
ss_data = load_obj(self.selective_search_file)
ss_bb = ss_data['boxes'].ravel()
ss_img_idx = ss_data['images'].ravel()
ss_num_img = ss_bb.shape[0]
assert ss_num_img == self.num_images, \
'Number of images in SS data must match number of image in the dataset'
roi_ss = []
# load the bb from SS and remove duplicate
for i in xrange(ss_num_img):
# make sure the image index match
assert self.image_index[i] == ss_img_idx[i][0]
bb = (ss_bb[i][:, (1, 0, 3, 2)] - 1)
num_boxes = bb.shape[0]
overlaps = np.zeros(
(num_boxes, self.num_classes), dtype=np.float32)
gt_bb = self.roi_gt[i]['gt_bb']
gt_classes = self.roi_gt[i]['gt_classes'].ravel()
gt_overlap, gt_dim = calculate_bb_overlap(bb.astype(np.float),
gt_bb.astype(np.float))
max_overlap_area = gt_overlap.max(axis=gt_dim)
max_overlap_arg = gt_overlap.argmax(axis=gt_dim)
# only put the non-zero overlaps into the table
I = np.where(max_overlap_area > 0)[0]
overlaps[I, gt_classes[max_overlap_arg[I]]] = max_overlap_area[I]
max_overlap_class = overlaps.argmax(axis=gt_dim)
img_file = os.path.join(self.image_path,
self.image_index[i] + self._image_file_ext)
roi_ss.append({
'ss_bb': bb,
'img_id': self.image_index[i],
'img_file': img_file,
'gt_classes': np.zeros((num_boxes, 1), dtype=np.int32),
'gt_overlaps': overlaps,
'max_overlap_area': max_overlap_area.reshape(-1, 1),
'max_overlap_class': max_overlap_class.reshape(-1, 1),
})
return roi_ss
def combine_gt_ss_roi(self):
assert len(self.roi_gt) == len(self.roi_ss) == self.num_images, \
'ROIs from GT and SS do not match the dataset images'
roi_gt_ss = [None] * self.num_image_entries
for i in xrange(self.num_images):
roi_gt_ss[i] = {}
roi_gt_ss[i]['bb'] = np.vstack((self.roi_gt[i]['gt_bb'],
self.roi_ss[i]['ss_bb']))
roi_gt_ss[i]['gt_classes'] = np.vstack([self.roi_gt[i]['gt_classes'],
self.roi_ss[i]['gt_classes']])
roi_gt_ss[i]['img_id'] = self.roi_ss[i]['img_id']
roi_gt_ss[i]['img_file'] = self.roi_ss[i]['img_file']
# load | |
<gh_stars>0
#!/usr/bin/env python
# coding: utf-8
# ## Our Mission ##
#
# Spam detection is one of the major applications of Machine Learning in the interwebs today. Pretty much all of the major email service providers have spam detection systems built in and automatically classify such mail as 'Junk Mail'.
#
# In this mission we will be using the Naive Bayes algorithm to create a model that can classify SMS messages as spam or not spam, based on the training we give to the model. It is important to have some level of intuition as to what a spammy text message might look like. Often they have words like 'free', 'win', 'winner', 'cash', 'prize' and the like in them as these texts are designed to catch your eye and in some sense tempt you to open them. Also, spam messages tend to have words written in all capitals and also tend to use a lot of exclamation marks. To the human recipient, it is usually pretty straightforward to identify a spam text and our objective here is to train a model to do that for us!
#
# Being able to identify spam messages is a binary classification problem as messages are classified as either 'Spam' or 'Not Spam' and nothing else. Also, this is a supervised learning problem, as we will be feeding a labelled dataset into the model, that it can learn from, to make future predictions.
#
# # Overview
#
# This project has been broken down in to the following steps:
#
# - Step 0: Introduction to the Naive Bayes Theorem
# - Step 1.1: Understanding our dataset
# - Step 1.2: Data Preprocessing
# - Step 2.1: Bag of Words (BoW)
# - Step 2.2: Implementing BoW from scratch
# - Step 2.3: Implementing Bag of Words in scikit-learn
# - Step 3.1: Training and testing sets
# - Step 3.2: Applying Bag of Words processing to our dataset.
# - Step 4.1: Bayes Theorem implementation from scratch
# - Step 4.2: Naive Bayes implementation from scratch
# - Step 5: Naive Bayes implementation using scikit-learn
# - Step 6: Evaluating our model
# - Step 7: Conclusion
#
# **Note**: If you need help with a step, you can find the solution notebook by clicking on the Jupyter logo in the top left of the notebook.
# ### Step 0: Introduction to the Naive Bayes Theorem ###
#
# Bayes Theorem is one of the earliest probabilistic inference algorithms. It was developed by <NAME> (which he used to try and infer the existence of God no less), and still performs extremely well for certain use cases.
#
# It's best to understand this theorem using an example. Let's say you are a member of the Secret Service and you have been deployed to protect the Democratic presidential nominee during one of his/her campaign speeches. Being a public event that is open to all, your job is not easy and you have to be on the constant lookout for threats. So one place to start is to put a certain threat-factor for each person. So based on the features of an individual, like age, whether the person is carrying a bag, looks nervous, etc., you can make a judgment call as to whether that person is a viable threat.
#
# If an individual ticks all the boxes up to a level where it crosses a threshold of doubt in your mind, you can take action and remove that person from the vicinity. Bayes Theorem works in the same way, as we are computing the probability of an event (a person being a threat) based on the probabilities of certain related events (age, presence of bag or not, nervousness of the person, etc.).
#
# One thing to consider is the independence of these features amongst each other. For example if a child looks nervous at the event then the likelihood of that person being a threat is not as much as say if it was a grown man who was nervous. To break this down a bit further, here there are two features we are considering, age AND nervousness. Say we look at these features individually, we could design a model that flags ALL persons that are nervous as potential threats. However, it is likely that we will have a lot of false positives as there is a strong chance that minors present at the event will be nervous. Hence by considering the age of a person along with the 'nervousness' feature we would definitely get a more accurate result as to who are potential threats and who aren't.
#
# This is the 'Naive' bit of the theorem where it considers each feature to be independent of each other which may not always be the case and hence that can affect the final judgement.
#
# In short, Bayes Theorem calculates the probability of a certain event happening (in our case, a message being spam) based on the joint probabilistic distributions of certain other events (in our case, the appearance of certain words in a message). We will dive into the workings of Bayes Theorem later in the mission, but first, let us understand the data we are going to work with.
# ### Step 1.1: Understanding our dataset ###
#
#
# We will be using a dataset originally compiled and posted on the UCI Machine Learning repository which has a very good collection of datasets for experimental research purposes. If you're interested, you can review the [abstract](https://archive.ics.uci.edu/ml/datasets/SMS+Spam+Collection) and the original [compressed data file](https://archive.ics.uci.edu/ml/machine-learning-databases/00228/) on the UCI site. For this exercise, however, we've gone ahead and downloaded the data for you.
#
#
# **Here's a preview of the data:**
#
# <img src="images/dqnb.png" height="1242" width="1242">
#
# The columns in the data set are currently not named and as you can see, there are 2 columns.
#
# The first column takes two values, 'ham' which signifies that the message is not spam, and 'spam' which signifies that the message is spam.
#
# The second column is the text content of the SMS message that is being classified.
# >**Instructions:**
# * Import the dataset into a pandas dataframe using the **read_table** method. The file has already been downloaded, and you can access it using the filepath 'smsspamcollection/SMSSpamCollection'. Because this is a tab separated dataset we will be using '\\t' as the value for the 'sep' argument which specifies this format.
# * Also, rename the column names by specifying a list ['label', 'sms_message'] to the 'names' argument of read_table().
# * Print the first five values of the dataframe with the new column names.
# In[1]:
# '!' allows you to run bash commands from jupyter notebook.
print("List all the files in the current directory\n")
get_ipython().system('ls')
# The required data table can be found under smsspamcollection/SMSSpamCollection
print("\n List all the files inside the smsspamcollection directory\n")
get_ipython().system('ls smsspamcollection')
# In[2]:
import pandas as pd
# Dataset available using filepath 'smsspamcollection/SMSSpamCollection'
df = pd.read_table('smsspamcollection/SMSSpamCollection', sep='\t', names=['label','sms_message']) #TODO
# Output printing out first 5 rows
df.head()
# ### Step 1.2: Data Preprocessing ###
#
# Now that we have a basic understanding of what our dataset looks like, let's convert our labels to binary variables, 0 to represent 'ham'(i.e. not spam) and 1 to represent 'spam' for ease of computation.
#
# You might be wondering why do we need to do this step? The answer to this lies in how scikit-learn handles inputs. Scikit-learn only deals with numerical values and hence if we were to leave our label values as strings, scikit-learn would do the conversion internally(more specifically, the string labels will be cast to unknown float values).
#
# Our model would still be able to make predictions if we left our labels as strings but we could have issues later when calculating performance metrics, for example when calculating | |
project progress
(default: True)
progress -- (bool) whether or not to update project progress
(default: True)
Returns: None
"""
if check \
and self.projects[_id].progress < Alaska.PROGRESS['qc_finished']:
raise Exception('{}: Quality control has not been performed.'
.format(_id))
# The project we are interested in.
proj = self.projects[_id]
# copy analysis script to project folder.
self.copy_script(_id, Alaska.ANL_SCRIPT)
# check if alignment is already queued
qu = list(self.queue.queue)
for job in qu:
if job.proj_id == _id and job.name == 'kallisto':
raise Exception('{}: already in queue'.format(_id))
# check if alignment is currently running
if self.current_job is not None \
and self.current_job.proj_id == _id \
and self.current_job.name == 'kallisto':
raise Exception('{}: currently running'.format(_id))
# make directories
self.broadcast(_id, ('{}: making directories for '
+ 'read alignment').format(_id))
for __id, sample in proj.samples.items():
f = '{}/{}'.format(proj.align_dir, sample.name)
os.makedirs(f, exist_ok=True)
self.broadcast(_id, '{}: {} created'.format(_id, f))
self.broadcast(_id, '{}: creating new job'.format(_id))
# begin job variables
__id = self.rand_str_except(Alaska.PROJECT_L, self.jobs.keys())
self.jobs[__id] = None # initialize empty job to prevent duplicate ids
# source and target mounting points
# src_proj = os.path.abspath(self.projects[_id].dir)
src = Alaska.DOCKER_DATA_VOLUME
tgt = Alaska.ROOT_PATH
wdir = '{}/{}'.format(Alaska.ROOT_PATH, proj.dir)
# volumes to mount to container
volumes = {
src: {'bind': tgt, 'mode': 'rw'},
}
cmd = ('python3 run_analysis.py kallisto '
+ '--threads {}').format(Alaska.NTHREADS)
args = {
'working_dir': wdir,
'volumes': volumes,
'cpuset_cpus': Alaska.CPUS,
}
# end job variables
job = AlaskaJob(__id, 'kallisto', _id,
Alaska.DOCKER_KALLISTO_TAG, cmd, **args)
job.save()
self.jobs[__id] = job
proj.jobs.append(__id)
self.enqueue_job(_id, job)
if progress:
proj.progress = Alaska.PROGRESS['quant_queued'] # added to queue
if close:
self.close(_id)
def diff_exp(self, _id, close=True, check=True, progress=True):
"""
Perform differential expression analysis.
Arguments:
_id -- (str) ZeroMQ socket id that sent this request
close -- (bool) whether or not to close the connection
(default: True)
check -- (bool) whether or not to check project progress
(default: True)
progress -- (bool) whether or not to update project progress
(default: True)
Returns: None
"""
if check \
and self.projects[_id].progress < Alaska.PROGRESS['quant_finished']:
raise Exception(('{}: project must be aligned before differential '
+ 'expression analysis').format(_id))
# The project we are interested in.
proj = self.projects[_id]
# copy scripts
self.copy_script(_id, Alaska.ANL_SCRIPT)
self.copy_script(_id, Alaska.SLE_SCRIPT)
self.copy_script(_id, Alaska.SHI_SCRIPT, dst=proj.diff_dir)
# Copy annotation file.
org = proj.samples[list(proj.samples.keys())[0]].organism
genus = org['genus']
species = org['species']
version = org['version']
org_path = os.path.join(Alaska.ORGS_DIR, genus, species, version,
Alaska.REF_DIR,
self.organisms[genus][species].refs[version].annotation)
ann_new = os.path.join(proj.diff_dir, 'annotations.tsv')
print(org_path)
print(ann_new)
if os.path.isfile(ann_new):
os.remove(ann_new)
shutil.copy2(org_path, ann_new)
# check if diff. exp. is already queued
qu = list(self.queue.queue)
for job in qu:
if job.proj_id == _id and job.name == 'sleuth':
raise Exception('{}: already in queue'.format(_id))
# check if diff. exp. is currently running
if self.current_job is not None \
and self.current_job.proj_id == _id \
and self.current_job.name == 'sleuth':
raise Exception('{}: currently running'.format(_id))
# write sleuth matrix and bash script
proj.write_matrix()
proj.write_info()
self.broadcast(_id, '{}: wrote sleuth design matrix'.format(_id))
self.broadcast(_id, '{}: creating new job'.format(_id))
# begin job variables
__id = self.rand_str_except(Alaska.PROJECT_L, self.jobs.keys())
self.jobs[__id] = None # initialize empty job to prevent duplicate ids
# source and target mouting points
src = Alaska.DOCKER_DATA_VOLUME
tgt = Alaska.ROOT_PATH
wdir = '{}/{}'.format(Alaska.ROOT_PATH, proj.dir)
# volumes to mount to container
volumes = {
src: {'bind': tgt, 'mode': 'rw'},
}
cmd = ('python3 run_analysis.py sleuth '
+ '--threads {}').format(Alaska.NTHREADS)
args = {
'working_dir': wdir,
'volumes': volumes,
'cpuset_cpus': self.CPUS,
}
# end job variables
job = AlaskaJob(__id, 'sleuth', _id,
self.DOCKER_SLEUTH_TAG, cmd, **args)
job.save()
self.jobs[__id] = job
proj.jobs.append(__id)
self.enqueue_job(_id, job)
if progress:
proj.progress = Alaska.PROGRESS['diff_queued'] # added to queue
if close:
self.close(_id)
def do_all(self, _id, close=True):
"""
Perform all three analyses. Assumes that the project is finalized.
Arguments:
_id -- (str) ZeroMQ socket id that sent this request
close -- (bool) whether or not to close the connection (default: True)
Returns: None
"""
def change_ftp(_id):
"""
Helper function to change ftp user's home directory
to root of the project.
Arguments:
_id -- (str) ftp user id
Returns: None
"""
try:
ftp = self.DOCKER.containers.get(Alaska.DOCKER_FTP_TAG)
if ftp.status != 'running':
self.broadcast(_id, ('WARNING: container {} is not '
+ 'running').format(
Alaska.DOCKER_FTP_TAG))
cmd = 'pure-pw usermod {} -d {}/{}/{}'.format(
_id,
Alaska.FTP_ROOT_PATH,
Alaska.PROJECTS_DIR,
_id)
out = ftp.exec_run(cmd)
cmd = 'pure-pw mkdb'
out = ftp.exec_run(cmd)
exit_code = out[0]
if exit_code != 0:
raise Exception('{}: FTP mkdb failed.'.format(__id))
except docker.errors.NotFound as e:
self.broadcast(_id, ('WARNING: container {} does not '
+ 'exist').format(Alaska.DOCKER_FTP_TAG))
self.broadcast(_id, '{}: performing all analyses'.format(_id))
if close:
self.close(_id)
if self.exists_var(_id):
proj = self.projects[_id]
else:
raise Exception('{}: not finalized'.format(_id))
change_ftp(_id)
# If the project is finalized.
if (proj.progress == Alaska.PROGRESS['finalized']) \
or (proj.progress == Alaska.PROGRESS['qc_error']):
self.out('{}: starting from qc'.format(_id))
self.qc(_id, close=False, check=False, progress=True)
self.read_quant(_id, close=False, check=False, progress=False)
self.diff_exp(_id, close=False, check=False, progress=False)
elif (proj.progress == Alaska.PROGRESS['quant_error']):
self.out('{}: starting from read quant'.format(_id))
self.read_quant(_id, close=False, check=False, progress=True)
self.diff_exp(_id, close=False, check=False, progress=False)
elif (proj.progress == Alaska.PROGRESS['diff_error']):
self.out('{}: starting from diff'.format(_id))
self.diff_exp(_id, close=False, check=False, progress=True)
email = proj.meta['corresponding']['email']
msg = ('Alaska has placed your project {} in the queue. '
+ 'Analysis will start shortly.').format(_id)
if email:
self.send_email(email, 'Analysis queued', msg, _id)
def open_sleuth_server(self, _id, close=True):
"""
Open sleuth shiny app.
Arguments:
_id -- (str) ZeroMQ socket id that sent this request
close -- (bool) whether or not to close the connection (default: True)
Returns: None
"""
if not self.exists_var(_id):
raise Exception('{}: project not found'.format(_id))
proj = self.projects[_id]
if proj.progress < Alaska.PROGRESS['diff_finished']:
raise Exception('{}: Sleuth not yet run'.format(_id))
# If the server for this project is already open, just return the
# port.
for port, item in self.sleuth_servers.items():
if item[0] == _id:
self.broadcast(_id, ('{}: server already open on '
+ 'port {}').format(_id, port))
# refresh open time
item[2] = dt.datetime.now()
if close:
self.close(_id)
return
self.broadcast(_id, '{}: starting Sleuth shiny app'.format(_id))
# source and target mouting points
src = Alaska.DOCKER_DATA_VOLUME
tgt = Alaska.ROOT_PATH
wdir = '{}/{}'.format(Alaska.ROOT_PATH, proj.diff_dir)
# volumes to mount to container
volumes = {
src: {'bind': tgt, 'mode': 'rw'},
}
# Randomly choose port.
port = random.choice(self.available_ports)
# Make sure port isn't taken.
if port in self.sleuth_servers:
raise Exception('{}: port {} is already taken!'.format(_id, port))
self.sleuth_servers[port] = None
ports = {
42427: port
}
cmd = 'Rscript {} --args alaska'.format(Alaska.SHI_SCRIPT)
###############################
cont = AlaskaDocker(Alaska.DOCKER_SLEUTH_TAG)
cont.run(cmd, working_dir=wdir,
volumes=volumes,
ports=ports,
remove=True)
cont_id = cont.id
self.out(('INFO: shiny app container started with '
+ 'id {}').format(cont_id))
self.sleuth_servers[port] = [_id, cont_id, dt.datetime.now()]
self.broadcast(_id, '{}: server opened on port {}'.format(_id, port))
if close:
self.close(_id)
def prepare_geo(self, _id, close=True):
"""
Prepare submission to geo.
Arguments:
_id -- (str) ZeroMQ socket id that sent this request
close -- (bool) whether or not to close the connection (default: True)
Returns: None
"""
if not self.exists_var(_id):
raise Exception('{}: project not found'.format(_id))
proj = self.projects[_id]
if proj.progress < Alaska.PROGRESS['diff_finished']:
raise Exception('{}: Sleuth not yet run'.format(_id))
self.broadcast(_id, '{}: preparing submission'.format(_id))
if close:
self.close(_id)
proj.progress = Alaska.PROGRESS['geo_compiling']
proj.prepare_submission()
proj.progress = Alaska.PROGRESS['geo_compiled']
email = proj.meta['corresponding']['email']
if email:
subject = 'Project has been compiled'
msg = ('Project {} has been successfully compiled for '
+ 'GEO submission.').format(_id)
self.send_email(email, subject, msg, _id)
def submit_geo(self, _id, close=True):
"""
Submit to geo.
Arguments:
_id -- (str) ZeroMQ socket id that sent this request
close -- (bool) whether or not to close the connection (default: True)
Returns: None
"""
if not self.exists_var(_id):
raise Exception('{}: project not found'.format(_id))
proj = self.projects[_id]
if proj.progress < Alaska.PROGRESS['geo_compiled']:
raise Exception(('{}: project not compiled for '
+ 'GEO submission').format(_id))
self.broadcast(_id, '{}: submitting project to GEO'.format(_id))
if close:
self.close(_id)
# Read json file.
with open('{}/ftp_info.json'.format(proj.temp_dir)) as f:
loaded = json.load(f)
geo_uname = loaded['geo_username']
host = loaded['ftp_host']
uname = loaded['ftp_username']
passwd = loaded['ftp_password']
fname = '{}_files.tar.gz'.format(geo_uname)
proj.progress = Alaska.PROGRESS['geo_submitting']
# proj.submit_geo(fname, host, uname, passwd)
proj.progress = Alaska.PROGRESS['geo_submitted']
email = proj.meta['corresponding']['email']
if email:
subject = 'Project has been submitted to GEO'
msg = ('Project {} has been successfully submitted '
+ 'to GEO.<br>').format(_id)
msg += ('Please send an email to <a href="mailto:'
+ '{}">{}</a>').format(Alaska.GEO_EMAIL, Alaska.GEO_EMAIL)
msg += ' with the following information:<br>'
msg += ('1) GEO account user name '
+ '(<strong>{}</strong>)<br>').format(geo_uname)
msg += ('2) Name of the archive file deposited '
+ '(<strong>{}</strong>)<br>').format(fname)
msg += '3) | |
in r.email_addresses()]
else:
msg.to.append( infotrope.message.Address( None, header=r.header() ) )
addresses += r.email_addresses()
for r in which[ID_CC]:
if r.email_addresses()[0].startswith('news:'):
msg.newsgroups += [x[5:] for x in r.email_addresses()]
else:
msg.cc.append( infotrope.message.Address( None, header=r.header() ) )
addresses += r.email_addresses()
for r in which[ID_BCC]:
if r.email_addresses()[0].startswith('news:'):
msg.newsgroups += [x[5:] for x in r.email_addresses()]
addresses += r.email_addresses()
msg.subparts.append( infotrope.message.FlowedTextPart( polymer.encode.decode_ui( self._text.GetText() ) ) )
if self._text_savedas_uri and not self._text.GetModify():
for n in self._text_savedas_uri:
msg.subparts[0].saved_as( n )
msg.subparts += [ x.part() for x in self._attachments ]
subj = polymer.encode.decode_ui( self._subject.GetValue() )
if len(subj):
msg['Subject'] = subj
self.add_headers( msg )
return msg,addresses
def message_close( self, event ):
e = wx.GetApp().personalities()[ self._from_sel.GetSelection() ]
if e['vendor.infotrope.personality.Drafts.IMAP'] is None:
dlg = polymer.dialogs.ErrorDialog( self, "No drafts folder defined for this personality.", "Infotrope Polymer" )
dlg.ShowModal()
return
msg,addresses = self.prepare_message( e )
if msg is None:
return
u = e['vendor.infotrope.personality.Drafts.IMAP']
drsrv = wx.GetApp().connection( u )
mi = drsrv.mbox_info( u.mailbox )
msg.msg_flags = ['\\Draft', '$MDNSent', '\\Seen']
mi.append( msg )
self._saved = msg
self._saved_addresses = addresses
self._text_savedas_uri = msg.subparts[0].uri
for x in range( len(self._attachments) ):
self._attachments[x].saved_as( msg.subparts[x+1].uri )
self.set_saved()
if event.GetId()==ID_MESSAGE_CLOSE:
self.Close( False )
def message_quit( self, event ):
self.Close( False )
def add_headers( self, msg ):
''' Add any extra headers. '''
pass
def get_subject( self ):
''' Return a suitable default subject line '''
return u''
def post_send( self, p, m ):
''' Do anything needed after message has been submitted for sending. '''
pass
def select_identity( self ):
''' Pick an identity to use by default. '''
if self.GetParent() is not None:
a = self.GetParent().notebook.GetSelection()
if a!=-1:
b = self.GetParent().notebook.GetPage(a)
if isinstance(b,polymer.imap.PanelMailbox):
c = b._controller.server()
d = c._email_entry
if 'email.personality' not in d:
return 0
e = d['email.personality']
if e is None:
return 0
f = e.path.split('/')[-1]
if f not in wx.GetApp().personalities():
return 0
return wx.GetApp().personalities().index( f )
return 0
#def __del__( self ):
# print " ** ** COMPOSER CLOSE ** ** "
# print `self`
class NewMessage( MessageBase ):
def __init__( self, parent ):
MessageBase.__init__( self, parent )
class MailtoMessage( NewMessage ):
def __init__( self, parent, url ):
import urllib
self.url = infotrope.url.URL( url )
self.params = {}
if self.url.query is not None:
for x in self.url.query.split( '&' ):
y = x.split('=')
self.params[urllib.unquote(y[0]).lower()] = urllib.unquote(y[1])
NewMessage.__init__( self, parent )
def add_text( self, tc ):
if 'body' in self.params:
tc.AddText( self.params['body'] )
def get_subject( self ):
if 'subject' in self.params:
return self.params['subject'].decode('utf-8')
return u''
def get_recipients( self, id ):
import urllib
if id==ID_TO:
t = [ polymer.addressbook.Recipient( self.GetParent(), field=x[0], email=x[1] ) for x in email.Utils.getaddresses( [urllib.unquote(self.url.path)] ) ]
if 'to' in self.params:
t += [ polymer.addressbook.Recipient( self.GetParent(), field=x[0], email=x[1] ) for x in email.Utils.getaddresses( [self.params['to']] ) ]
return t
if id==ID_CC:
if 'cc' in self.params:
return [ polymer.addressbook.Recipient( self.GetParent(), field=x[0], email=x[1] ) for x in email.Utils.getaddresses( [self.params['cc']] ) ]
if id==ID_BCC:
if 'bcc' in self.params:
return [ polymer.addressbook.Recipient( self.GetParent(), field=x[0], email=x[1] ) for x in email.Utils.getaddresses( [self.params['bcc']] ) ]
def add_headers( self, msg ):
for x,y in self.params.items():
if x not in ['body','subject','to','cc','bcc']:
msg[x] = y
class MessageReply( MessageBase ):
def __init__( self, parent, replyto ):
self.replyto = replyto
MessageBase.__init__( self, parent )
def select_identity( self ):
frmap = {}
idx = 0
for e in wx.GetApp().personalities().entries():
frmap[ wx.GetApp().personalities()[e]['personality.Return-Address'] ] = idx
idx += 1
# First, search TO. Then CC, etc. Then FROM.
for em in self.replyto.envelope().To:
if em.address in frmap:
return frmap[em.address]
for em in self.replyto.envelope().CC:
if em.address in frmap:
return frmap[em.address]
for em in self.replyto.envelope().BCC:
if em.address in frmap:
return frmap[em.address]
for em in self.replyto.envelope().From:
if em.address in frmap:
return frmap[em.address]
return MessageBase.select_identity( self )
def add_text( self, tc ):
tc.StyleSetForeground( 1, "#009C46" )
tc.StyleSetForeground( 2, "#DA6A00" )
tc.StyleSetForeground( 3, "#6404B5" )
tc.AddText( polymer.encode.encode_ui( u"On %s, %s wrote:\n" % ( time.asctime( self.replyto.get_sent_date_real() ), self.replyto.get_from_name() ) ) )
part = self.replyto.parts()
for p in part.children:
if p.part_id=='TEXT':
part = p
break
best,pref = part.find( 'TEXT', {'HTML':1,'PLAIN':2} )
if best is not None:
if best.subtype=='PLAIN':
if 'FORMAT' in best.params and best.params['FORMAT'].upper()=='FLOWED':
paras = infotrope.flowed.parse( self.replyto.body( best ) )
txt = u''
for p in paras:
p.quote_depth += 1
txt = polymer.encode.encode_ui( p.asText() )
st = p.quote_depth
if st > 3:
st = 3
l = tc.GetLength()
tc.AddText( txt )
tc.StartStyling( l, 31 )
tc.SetStyling( len(txt), st )
else:
f = StringIO.StringIO( self.replyto.body( best ) )
for l in f:
l = l.rstrip(' \r\n')
txt = polymer.encode.encode_ui( u'> ' + l + '\n' )
l = tc.GetLength()
tc.AddText( txt )
tc.StartStyling( l, 31 )
tc.SetStyling( len(txt), 1 )
else:
txt = polymer.encode.encode_ui( u'> [' + best.type + '/' + best.subtype + ' body]\n' )
l = tc.GetLength()
tc.AddText( txt )
tc.StartStyling( l, 31 )
tc.SetStyling( len(txt), 1 )
def add_headers( self, msg ):
refs = self.replyto.reply_header('references')
if refs is not None:
refs = refs.replace('\r',' ')
refs = refs.replace('\n',' ')
refs = ' '.join( [ x for x in refs.split(' ') if len(x) ] )
mid = self.replyto.envelope().MessageID
if refs is None:
msg['References'] = mid
else:
msg['References'] = refs + ' ' + mid
msg['In-Reply-To'] = mid
def get_recipients( self, id ):
raise "Abstract MessageReply called."
def get_subject( self ):
subj = self.replyto.envelope().Subject
if subj is None:
subj = 'Your Message'
if subj.strip()[0:3].upper()!='RE:':
subj = 'Re: '+subj
return subj
def post_send( self, personality, msg ):
try:
self.replyto.flag( '\\Answered' )
except:
d = wx.MessageDialog( self, "Warning: Couldn't set an Answered flag.\nYour mail has still been sent, however.", "Infotrope Polymer", wx.ICON_ERROR|wx.OK )
d.ShowModal()
class MessageReplySender( MessageReply ):
def __init__( self, parent, replyto ):
MessageReply.__init__( self, parent, replyto )
def get_recipients( self, id ):
""" We want to reply to the sender, using the reply-to address if it's set. """
if id==ID_TO:
# Conventiently, this is stored all ready in the envelope.
return [ polymer.addressbook.Recipient( self.GetParent(), field=xx.name, email=xx.address ) for xx in self.replyto.envelope().ReplyTo ]
class MessageReplyDirect( MessageReply ):
def __init__( self, parent, replyto ):
MessageReply.__init__( self, parent, replyto )
def get_recipients( self, id ):
""" We want to reply to the sender, ignoring any Reply-To header. """
if id==ID_TO:
return [ polymer.addressbook.Recipient( self.GetParent(), field=xx.name, email=xx.address ) for xx in self.replyto.envelope().From ]
class MessageReplyList( MessageReply ):
def __init__( self, parent, replyto ):
self.list_address = None
self.list_name = None
self.newsgroups = None
if replyto.list_header( 'List-Post' ) is not None:
uris = [ infotrope.url.URL( xx.strip( ' \r\n\t<>' ) ) for xx in replyto.list_header( 'List-Post' ).split(',') ]
for x in uris:
if x.scheme == 'mailto':
self.list_address = x.path
if self.list_address is None:
ng = replyto.list_header( 'Newsgroups', True )
if ng is not None:
self.newsgroups = ['news:'+x for x in ng.split(',')]
if self.list_address is None and self.newsgroups is None:
a = wx.GetApp().acap_home()
s = infotrope.acap.search( 'SEARCH "/option/~/vendor.infotrope/polymer/folders/" RETURN ("option.vendor.infotrope.mailing-list") EQUAL "option.vendor.infotrope.folder-name" "i;octet" "%s"' % replyto.mailbox().uri().asString(), connection=a )
s.wait()
if len(s)>0:
e = s[0]
if e['option.vendor.infotrope.mailing-list']['value'] is not None:
self.list_address = e['option.vendor.infotrope.mailing-list']['value']
if self.list_name is None and self.newsgroups is None:
if replyto.list_header( 'List-ID' ) is not None:
x = email.Utils.parseaddr( replyto.list_header( 'List-ID' ) )
if x[0]=='':
if x[1]!='':
self.list_name = x[1]
else:
self.list_name = x[0]
if self.list_name is None:
self.list_name = self.list_address
if self.list_address is None and self.newsgroups is None:
dlg = wx.MessageDialog( parent, "There are no mailing list headers, and the folder has no default. I'm stuck. Things will now crash.", "Infotrope Polymer", wx.ICON_INFORMATION )
dlg.ShowModal()
MessageReply.__init__( self, parent, replyto )
def list_recipients( self ):
""" We want to reply only to the list address. """
if self.newsgroups is not None:
return [ polymer.addressbook.Recipient( self.GetParent(), field=x ) for x in self.newsgroups ]
else:
return [ polymer.addressbook.Recipient( self.GetParent(), field=self.list_name, email=self.list_address ) ]
def get_recipients( self, id ):
if id == ID_TO:
return self.list_recipients()
class MessageReplyListSender( MessageReplyList ):
def __init__( self, parent, replyto ):
MessageReplyList.__init__( self, parent, replyto )
def get_recipients( self, id ):
""" We want to reply to the sender, using the reply-to address if it's set. """
if id==ID_CC:
# This might be in headers. | |
'readStart': 2,
'readEnd': 6,
'readStartInSubject': -2,
'readEndInSubject': 5,
}, normalized)
def testSubjectExtendsRightAndLeft(self):
"""
The subject extends to the right and left of the translated query.
s...sss
...q
"""
hsp = FakeHSP(subjectStart=2, subjectEnd=4, queryStart=12, queryEnd=4,
frame=-3)
normalized = normalizeHSP(hsp, 14, 'blastx')
self.assertEqual({
'subjectStart': 1,
'subjectEnd': 4,
'readStart': 0,
'readEnd': 3,
'readStartInSubject': 1,
'readEndInSubject': 5,
}, normalized)
class TestBlastxFramePlus1WithGaps(TestCase):
"""
Tests for normalizeHSP for DIAMOND blastx output when frame=1 (i.e., the
query matches in the order it was given to DIAMOND, and the translation
frame starts at the first nucleotide) and with gaps.
"""
# All query offsets and lengths must be in terms of nucleotides.
# Subject offsets are in terms of protein AA sequences. This is how
# DIAMOND reports those offsets.
#
# In the little diagrams in the docstrings below, the first line is the
# subject and the second the query. Dots indicate where the matched
# region is. The queries are shown translated so as to line up properly
# with the subjects. Gaps are shown as a hyphen.
def testIdenticalWithQueryGap(self):
"""
The subject start and end are identical to those of the translated
query, given one gap in the query.
....
.-..
"""
hsp = FakeHSP(subjectStart=1, subjectEnd=4, queryStart=1, queryEnd=9,
frame=1, btop='1-K2')
normalized = normalizeHSP(hsp, 9, 'blastx')
self.assertEqual({
'subjectStart': 0,
'subjectEnd': 4,
'readStart': 0,
'readEnd': 3,
'readStartInSubject': 0,
'readEndInSubject': 4,
}, normalized)
def testIdenticalWithTwoQueryGaps(self):
"""
The subject start and end are identical to those of the translated
query, given two gaps in the query.
....
.--.
"""
hsp = FakeHSP(subjectStart=1, subjectEnd=4, queryStart=1, queryEnd=6,
frame=1, btop='1-K-K1')
normalized = normalizeHSP(hsp, 6, 'blastx')
self.assertEqual({
'subjectStart': 0,
'subjectEnd': 4,
'readStart': 0,
'readEnd': 2,
'readStartInSubject': 0,
'readEndInSubject': 4,
}, normalized)
def testIdenticalWithSubjectGap(self):
"""
The subject start and end are identical to those of the translated
query, given one gap in the subject.
.-..
....
"""
hsp = FakeHSP(subjectStart=1, subjectEnd=3, queryStart=1, queryEnd=12,
frame=1, btop='1K-2')
normalized = normalizeHSP(hsp, 12, 'blastx')
self.assertEqual({
'subjectStart': 0,
'subjectEnd': 3,
'readStart': 0,
'readEnd': 4,
'readStartInSubject': 0,
'readEndInSubject': 4,
}, normalized)
def testIdenticalWithTwoSubjectGaps(self):
"""
The subject start and end are identical to those of the translated
query, given two gaps in the subject.
.--.
....
"""
hsp = FakeHSP(subjectStart=1, subjectEnd=2, queryStart=1, queryEnd=12,
frame=1, btop='1K-K-1')
normalized = normalizeHSP(hsp, 12, 'blastx')
self.assertEqual({
'subjectStart': 0,
'subjectEnd': 2,
'readStart': 0,
'readEnd': 4,
'readStartInSubject': 0,
'readEndInSubject': 4,
}, normalized)
def testIdenticalWithQueryAndSubjectGap(self):
"""
The subject start and end are identical to those of the translated
query, given one gap in the query and one in the subject.
.-..
..-.
"""
hsp = FakeHSP(subjectStart=1, subjectEnd=3, queryStart=1, queryEnd=9,
frame=1, btop='1K--K2')
normalized = normalizeHSP(hsp, 9, 'blastx')
self.assertEqual({
'subjectStart': 0,
'subjectEnd': 3,
'readStart': 0,
'readEnd': 3,
'readStartInSubject': 0,
'readEndInSubject': 4,
}, normalized)
def testSubjectExtendsLeftWithQueryGap(self):
"""
The subject overlaps the translated query to the left. The query has a
gap.
ss....
..-.
"""
hsp = FakeHSP(subjectStart=3, subjectEnd=6, queryStart=1, queryEnd=9,
frame=1, btop='2-K1')
normalized = normalizeHSP(hsp, 9, 'blastx')
self.assertEqual({
'subjectStart': 2,
'subjectEnd': 6,
'readStart': 0,
'readEnd': 3,
'readStartInSubject': 2,
'readEndInSubject': 6,
}, normalized)
def testSubjectExtendsLeftWithSubjectGap(self):
"""
The subject overlaps the translated query to the left. The subject has
a gap.
ss.-..
....
"""
hsp = FakeHSP(subjectStart=3, subjectEnd=5, queryStart=1, queryEnd=12,
frame=1, btop='1K-2')
normalized = normalizeHSP(hsp, 12, 'blastx')
self.assertEqual({
'subjectStart': 2,
'subjectEnd': 5,
'readStart': 0,
'readEnd': 4,
'readStartInSubject': 2,
'readEndInSubject': 6,
}, normalized)
def testSubjectExtendsLeftWithQueryAndSubjectGap(self):
"""
The subject overlaps the translated query to the left. The query and
subject both have a gap.
ss.-..
..-.
"""
hsp = FakeHSP(subjectStart=3, subjectEnd=5, queryStart=1, queryEnd=9,
frame=1, btop='1K--K1')
normalized = normalizeHSP(hsp, 9, 'blastx')
self.assertEqual({
'subjectStart': 2,
'subjectEnd': 5,
'readStart': 0,
'readEnd': 3,
'readStartInSubject': 2,
'readEndInSubject': 6,
}, normalized)
def testQueryExtendsLeftWithQueryGap(self):
"""
The translated query extends to the left of the subject and the query
has a gap.
....
qq..-.
"""
hsp = FakeHSP(subjectStart=1, subjectEnd=4, queryStart=7, queryEnd=15,
frame=1, btop='2-K1')
normalized = normalizeHSP(hsp, 15, 'blastx')
self.assertEqual({
'subjectStart': 0,
'subjectEnd': 4,
'readStart': 2,
'readEnd': 5,
'readStartInSubject': -2,
'readEndInSubject': 4,
}, normalized)
def testQueryExtendsLeftWithSubjectGap(self):
"""
The translated query extends to the left of the subject and the subject
has a gap.
..-.
qq....
"""
hsp = FakeHSP(subjectStart=1, subjectEnd=3, queryStart=7, queryEnd=18,
frame=1, btop='2K-1')
normalized = normalizeHSP(hsp, 18, 'blastx')
self.assertEqual({
'subjectStart': 0,
'subjectEnd': 3,
'readStart': 2,
'readEnd': 6,
'readStartInSubject': -2,
'readEndInSubject': 4,
}, normalized)
def testQueryExtendsLeftWithQueryAndSubjectGap(self):
"""
The translated query extends to the left of the subject and the query
and subject have gaps.
..-.
qq.-..
"""
hsp = FakeHSP(subjectStart=1, subjectEnd=3, queryStart=7, queryEnd=15,
frame=1, btop='1-KK-1')
normalized = normalizeHSP(hsp, 15, 'blastx')
self.assertEqual({
'subjectStart': 0,
'subjectEnd': 3,
'readStart': 2,
'readEnd': 5,
'readStartInSubject': -2,
'readEndInSubject': 4,
}, normalized)
def testSubjectExtendsRightWithQueryGap(self):
"""
The subject extends to the right of the translated query and the query
has a gap.
....ss
..-.
"""
hsp = FakeHSP(subjectStart=1, subjectEnd=4, queryStart=1, queryEnd=9,
frame=1, btop='2-K1')
normalized = normalizeHSP(hsp, 9, 'blastx')
self.assertEqual({
'subjectStart': 0,
'subjectEnd': 4,
'readStart': 0,
'readEnd': 3,
'readStartInSubject': 0,
'readEndInSubject': 4,
}, normalized)
def testSubjectExtendsRightWithSubjectGap(self):
"""
The subject extends to the right of the translated query and the
subject has a gap.
..-.ss
....
"""
hsp = FakeHSP(subjectStart=1, subjectEnd=3, queryStart=1, queryEnd=12,
frame=1, btop='2K-1')
normalized = normalizeHSP(hsp, 12, 'blastx')
self.assertEqual({
'subjectStart': 0,
'subjectEnd': 3,
'readStart': 0,
'readEnd': 4,
'readStartInSubject': 0,
'readEndInSubject': 4,
}, normalized)
def testSubjectExtendsRightWithQueryAndSubjectGap(self):
"""
The subject extends to the right of the translated query and the
query and subject both have gaps.
..-.ss
.-..
"""
hsp = FakeHSP(subjectStart=1, subjectEnd=3, queryStart=1, queryEnd=9,
frame=1, btop='2-KK-1')
normalized = normalizeHSP(hsp, 9, 'blastx')
self.assertEqual({
'subjectStart': 0,
'subjectEnd': 3,
'readStart': 0,
'readEnd': 3,
'readStartInSubject': 0,
'readEndInSubject': 4,
}, normalized)
def testQueryExtendsRightWithQueryGap(self):
"""
The translated query extends to the right of the subject and the
query has a gap.
....
..-.qq
"""
hsp = FakeHSP(subjectStart=1, subjectEnd=4, queryStart=1, queryEnd=9,
frame=1, btop='2-K1')
normalized = normalizeHSP(hsp, 15, 'blastx')
self.assertEqual({
'subjectStart': 0,
'subjectEnd': 4,
'readStart': 0,
'readEnd': 3,
'readStartInSubject': 0,
'readEndInSubject': 6,
}, normalized)
def testQueryExtendsRightWithSubjectGap(self):
"""
The translated query extends to the right of the subject and the
subject has a gap.
..-.
....qq
"""
hsp = FakeHSP(subjectStart=1, subjectEnd=3, queryStart=1, queryEnd=12,
frame=1, btop='2K-1')
normalized = normalizeHSP(hsp, 18, 'blastx')
self.assertEqual({
'subjectStart': 0,
'subjectEnd': 3,
'readStart': 0,
'readEnd': 4,
'readStartInSubject': 0,
'readEndInSubject': 6,
}, normalized)
def testQueryExtendsRightWithQueryAndSubjectGap(self):
"""
The translated query extends to the right of the subject and the
query and subject both have gaps.
..-.
.-..qq
"""
hsp = FakeHSP(subjectStart=1, subjectEnd=3, queryStart=1, queryEnd=9,
frame=1, btop='1-KK-1')
normalized = normalizeHSP(hsp, 15, 'blastx')
self.assertEqual({
'subjectStart': 0,
'subjectEnd': 3,
'readStart': 0,
'readEnd': 3,
'readStartInSubject': 0,
'readEndInSubject': 6,
}, normalized)
def testQueryExtendsRightAndLeftWithQueryGap(self):
"""
The translated query extends to the right and left of the subject and
the query has a gap.
....
qq.-..q
"""
hsp = FakeHSP(subjectStart=1, subjectEnd=4, queryStart=7, queryEnd=15,
frame=1, btop='1-K2')
normalized = normalizeHSP(hsp, 18, 'blastx')
self.assertEqual({
'subjectStart': 0,
'subjectEnd': 4,
'readStart': 2,
'readEnd': 5,
'readStartInSubject': -2,
'readEndInSubject': 5,
}, normalized)
def testQueryExtendsRightAndLeftWithSubjectGap(self):
"""
The translated query extends to the right and left of the subject and
the subject has a gap.
.-..
qq....q
"""
hsp = FakeHSP(subjectStart=1, subjectEnd=3, queryStart=7, queryEnd=18,
frame=1, btop='1K-2')
normalized = normalizeHSP(hsp, 21, 'blastx')
self.assertEqual({
'subjectStart': 0,
'subjectEnd': 3,
'readStart': 2,
'readEnd': 6,
'readStartInSubject': -2,
'readEndInSubject': 5,
}, normalized)
def testQueryExtendsRightAndLeftWithQueryAndSubjectGap(self):
"""
The translated query extends to the right and left of the subject and
the query and the subject have gaps.
.-..
qq..-.q
"""
hsp = FakeHSP(subjectStart=1, subjectEnd=3, queryStart=7, queryEnd=15,
frame=1, btop='1K--K1')
normalized = normalizeHSP(hsp, 18, 'blastx')
self.assertEqual({
'subjectStart': 0,
'subjectEnd': 3,
'readStart': 2,
'readEnd': 5,
'readStartInSubject': -2,
'readEndInSubject': 5,
}, normalized)
def testSubjectExtendsRightAndLeftWithQueryGap(self):
"""
The subject extends to the right and left of the translated query and
the query has a gap.
s....ss
.-..
"""
hsp = FakeHSP(subjectStart=2, subjectEnd=5, queryStart=3, queryEnd=9,
frame=1, btop='1-K1')
normalized = normalizeHSP(hsp, 9, 'blastx')
self.assertEqual({
'subjectStart': 1,
'subjectEnd': 5,
'readStart': 0,
'readEnd': 3,
'readStartInSubject': 1,
'readEndInSubject': 5,
}, normalized)
def testSubjectExtendsRightAndLeftWithSubjectGap(self):
"""
The subject extends to the right and left of the translated query and
the subject has a gap.
s.-..ss
....
"""
hsp = | |
import os
import ast
import datetime
import configparser
import MyUtilities.common
NULL = MyUtilities.common.NULL
openPlus = MyUtilities.common.openPlus
#Monkey Patches
configparser.ConfigParser.optionxform = str
class Configuration(MyUtilities.common.EnsureFunctions, MyUtilities.common.CommonFunctions):
"""Used to handle .ini files.
- Both keys and values can have spaces
- Multi-line values must have extra lines indented one line deeper
- Sections and single-line values can be indented with no consequence
- Keys can be separated from values by either = or :
- Keys without values can have no separator
- The separator can have spaces on each side
- Comments can be done using # or ;
___________________ EXAMPLE INI FILE ___________________
[DEFAULT]
scanDelay = %(delay) %(units)
units = ms
[main]
startup_user = admin
[AutoSave]
delay = 1
units = s
[GUI]
delay = 500
________________________________________________________
Use: https://pymotw.com/3/configparser/
Use: https://docs.python.org/3.6/library/configparser.html
Use: https://martin-thoma.com/configuration-files-in-python/#configparser
Use: https://www.blog.pythonlibrary.org/2010/01/01/a-brief-configobj-tutorial/
use: https://www.blog.pythonlibrary.org/2013/10/25/python-101-an-intro-to-configparser/
"""
def __init__(self, default_filePath = None, *, default_values = None, default_section = None, forceExists = False, forceCondition = None,
allowNone = True, interpolation = True, valid_section = None, readOnly = False, defaultFileExtension = None, backup_filePath = None,
knownTypes = None, knownTypesSection = "knownTypes", knownTypeDefault = None, version = None):
"""
allowNone (bool) - Determines what happens if a setting does not have a set value
- If True: Will use None
- If False: Will raise an error during load()
interpolation (bool) - Determines what kind of interpolation can be done in get()
- If True: Extended Interpolation
- If False: Basic Interpolation
- If None: No Interpolation
valid_section (list) - Which sections (excluding DEFAULT) to load
- If str: Will load only that section
- If None: Will load all sections
~ Optionally, variables can be defined in the section given to 'knownTypesSection'
knownTypesSection (str) - Which section is used to store knownTypes
- If None: Will not use a section to get knownTypes from
version (str) - What version the config file must have
- If None: Will not do a version check
- If different: Will replace the config file with the one from *default_filePath*
Example Input: Configuration(self)
Example Input: Configuration(self, source_directory = "database")
Example Input: Configuration(self, defaults = {"startup_user": "admin"})
"""
self.defaultFileExtension = defaultFileExtension or "ini"
self.default_section = default_section or "main"
self.default_filePath = default_filePath or f"settings.{self.defaultFileExtension}"
self.backup_filePath = backup_filePath
self.version = version
if (interpolation):
interpolation = self.MyExtendedInterpolation()
elif (interpolation is not None):
interpolation = configparser.BasicInterpolation()
self.setReset(converters = self.converters, allow_no_value = allowNone,
defaults = default_values or {}, interpolation = interpolation)
self.reset()
# self.config.optionxform = str
self.knownTypeDefault = knownTypeDefault or "_default_"
self.knownTypesSection = knownTypesSection or None
self.knownTypes = knownTypes or {}
self.readOnly = readOnly
self.set_validSection(valid_section)
if (default_filePath):
self.load(forceExists = forceExists, forceCondition = forceCondition)
def setReset(self, *args, **kwargs):
self._reset = (args, kwargs)
def _eval(self, *args, **kwargs):
value = self.config.get(*args, **kwargs)
return ast.literal_eval(value)
def reset(self):
self.config = configparser.ConfigParser(*self._reset[0], **self._reset[1])
self.dataType_catalogue = {
None: self.config.get,
eval: self._eval, "eval": self._eval,
str: self.config.get, "str": self.config.get,
int: self.config.getint, "int": self.config.getint,
float: self.config.getfloat, "float": self.config.getfloat,
bool: self.config.getboolean, "bool": self.config.getboolean,
datetime.datetime: self.config.getdatetime, "datetime": self.config.getdatetime,
}
def __repr__(self):
representation = f"{type(self).__name__}(id = {id(self)})"
return representation
def __str__(self):
output = f"{type(self).__name__}()\n-- id: {id(self)}\n"
return output
def __enter__(self):
return self.config
def __exit__(self, exc_type, exc_value, traceback):
if (traceback is not None):
print(exc_type, exc_value)
return False
def __getitem__(self, key):
self.check_invalidSection(key)
return self.config[key]
def __setitem__(self, key, value):
if (self.readOnly):
raise ReadOnlyError(self)
self.check_invalidSection(key)
self.config[key] = value
def __delitem__(self, key):
if (self.readOnly):
raise ReadOnlyError(self)
self.check_invalidSection(key)
del self.config[key]
def __contains__(self, key):
if (self.check_invalidSection(key, raiseError = False)):
return False
return key in self.config
def keys(self):
if (self.valid_section is None):
return tuple(self.config.keys())
return tuple(section for section in self.config.keys() if (section in self.valid_section))
def values(self):
if (self.valid_section is None):
return tuple(self.config.values())
return tuple(handle for section, handle in self.config.items() if (section in self.valid_section))
def items(self):
if (self.valid_section is None):
return tuple(self.config.items())
return tuple((section, handle) for section, handle in self.config.items() if (section in self.valid_section))
def _asdict(self):
if (self.valid_section is None):
return dict(self.config)
return {key: value for key, value in self.items()}
def check_invalidSection(self, section, *, raiseError = True, valid_section = NULL):
if (valid_section is NULL):
valid_section = self.valid_section
if ((valid_section is not None) and (section not in valid_section) and (not self.has_section(section, valid_section = None))):
if (raiseError):
raise InvalidSectionError(self, section)
return True
def _getType(self, variable, section = None, *, dataType = None):
"""Returns what type to use for the given variable.
Example Input: _getType("delay")
"""
if (dataType is None):
section = section or self.default_section
check_section = False
if ((self.knownTypesSection is not None) and (self.knownTypesSection in self.config.sections())):
if (self.has_setting(variable, self.knownTypesSection)):
function = self.dataType_catalogue.get(self.config[self.knownTypesSection][variable], None)
if (function is not None):
return function
check_section = True
if ((section in self.knownTypes) and (variable in self.knownTypes[section])):
return self.dataType_catalogue[self.knownTypes[section][variable]]
default_section = self.config.default_section
if ((default_section in self.knownTypes) and (variable in self.knownTypes[default_section])):
return self.dataType_catalogue[self.knownTypes[default_section][variable]]
if (variable in self.knownTypes):
return self.dataType_catalogue[self.knownTypes[variable]]
if (check_section and self.has_setting(self.knownTypeDefault, self.knownTypesSection)):
function = self.dataType_catalogue.get(self.config[self.knownTypesSection][self.knownTypeDefault], None)
if (function is not None):
return function
return self.dataType_catalogue[dataType]
def get(self, variable = None, section = None, *, dataType = None, default_values = None, include_defaults = True,
fallback = configparser._UNSET, raw = False, forceSection = False, forceSetting = False, valid_section = NULL):
"""Returns a setting from the given section.
variable (str) - What setting to get
- If list: Will return a dictionary of all settings in the list
- If None: Will return a dictionary of all settings in the section
section (str) - What section to write this setting in
- If None: Will use the default section
dataType (type) - What type the data should be in
- If None: Will read as str, unless the variable is logged in self.knownTypes under 'section' or DEFAULT
default_values (dict) - Local default values; overrides the global default values temporarily
include_defaults (bool) - Determines if the default section should be used as a fallback
raw (bool) - Determines if the value should be returned without applying interpolation
___________________ BASIC INTERPOLATION ___________________
Variables are denoted with a single '%', followed by closed paren
Example: scanDelay = %(delay) %(units)
To use an escaped %: %%
Example: units = %%
___________________ EXTENDED INTERPOLATION ___________________
Variables are denoted with a '$', followed by braces
Example: scanDelay = ${delay} ${units}
Variables from other sections can be used with a ':'
Example: scanDelay = ${delay} ${general:units}
Example Input: get()
Example Input: get("startup_user")
Example Input: get("scanDelay", section = "AutoSave")
Example Input: get("scanDelay", section = "AutoSave", dataType = int)
Example Input: get("startup_window", defaults = {"startup_window": "inventory"})
Example Input: get(("user", "password", "<PASSWORD>"), section = "Database_Admin")
Example Input: get({"Database_Admin": ("user", "password", "port")})
Example Input: get(include_defaults = False)
"""
section = section or self.default_section
self.check_invalidSection(section, valid_section = valid_section)
if (not self.has_section(section)):
section = self.config.default_section
if (variable is None):
if (include_defaults):
variableList = tuple(self[section].keys())
else:
variableList = tuple(self.config._sections[section].keys())
return self.get(variableList, section = section, dataType = dataType, default_values = default_values, fallback = fallback,
raw = raw, forceSetting = forceSetting, forceSection = forceSection, include_defaults = include_defaults, valid_section = valid_section)
if (isinstance(variable, dict)):
answer = {_section: self.get(_variable, section = _section, dataType = dataType, default_values = default_values, fallback = fallback,
raw = raw, forceSetting = forceSetting, forceSection = forceSection, include_defaults = include_defaults, valid_section = valid_section) for _section, _variable in variable.items()}
if (forceSection or len(answer) > 1):
return answer
elif (not answer):
return
return next(iter(answer.values()))
if (not isinstance(variable, (str, int, float))):
answer = {_variable: self.get(_variable, section = section, dataType = dataType, default_values = default_values, fallback = fallback,
raw = raw, forceSetting = forceSetting, forceSection = forceSection, include_defaults = include_defaults, valid_section = valid_section) for _variable in variable}
if (forceSetting or len(answer) > 1):
return answer
elif (not answer):
return
return next(iter(answer.values()))
function = self._getType(variable, section, dataType = dataType)
try:
return function(section, variable, vars = default_values or {}, raw = raw, fallback = fallback)
except (configparser.InterpolationDepthError, configparser.InterpolationMissingOptionError) as error:
print("@Configuration.get", error)
return function(section, variable, vars = default_values or {}, raw = True, fallback = fallback)
except Exception as error:
print("ERROR", [function, section, variable, default_values or {}, raw, fallback])
raise error
def set(self, variable, value = None, section = None, *, valid_section = NULL, save = False):
"""Adds a setting to the given section.
variable (str) - What setting to get
- If list: Wil set each variable in the list to 'value'
- If dict: Will ignore 'value' and set each key to it's given value
section (str) - What section to write this setting in
- If None: Will use the default section
Example Input: set("startup_user", "admin")
Example Input: set("scanDelay", 1000, section = "AutoSave")
Example Input: set({"startup_user": "admin"})
Example Input: set({"AutoSave": {"scanDelay": 1000}})
"""
if (self.readOnly):
raise ReadOnlyError(self)
self.check_invalidSection(section, valid_section = valid_section)
if (isinstance(variable, dict)):
for _variable, _value in variable.items():
if (isinstance(_value, dict)):
for __variable, __value in _value.items():
self.set(__variable, value = __value, section = _variable, valid_section = valid_section, save = save)
else:
self.set(_variable, value = _value, section = section, valid_section = valid_section, save = save)
return
if (not isinstance(variable, (str, int, float))):
for _variable in variable:
self.set(_variable, value = value, section = section, valid_section = valid_section, save = save)
return
section = section or self.default_section
if (not self.config.has_section(section)):
self.config.add_section(section)
if (value is None):
self.config.set(section, variable, "")
else:
self.config.set(section, variable, f"{value}")
if (save):
self.save()
def replaceWithDefault(self, filePath = None, *, forceExists = False, allowBackup = True, mustRead = False):
"""Replaces the file with the backup file, or throws an error
Example Input: replaceWithDefault()
Example Input: replaceWithDefault("database/settings_user.ini")
"""
global openPlus
filePath = filePath or self.default_filePath
if (allowBackup and (self.backup_filePath is not None)):
if (not os.path.exists(self.backup_filePath)):
raise FileExistsError(self.backup_filePath)
self.config.read(self.backup_filePath)
elif (mustRead):
raise ValueError("Could not read from a backup file")
if (forceExists and isinstance(forceExists, dict)):
self.set(forceExists, valid_section = None)
with openPlus(filePath) as config_file:
self.config.write(config_file)
def load(self, filePath = None, *, version = NULL, valid_section = NULL, forceExists = False, forceCondition = None, allowBackup = True):
"""Loads the configuration file.
filePath (str) | |
"""
Skeleton example of a Ginga local plugin called 'MyLocalPlugin'
To enable it, run ginga with the command
$ ginga --plugins=MyLocalPlugin
it will then be available from the "Operations" button.
"""
from ginga import GingaPlugin
from ginga.gw import Widgets
# import any other modules you want here--it's a python world!
import os
from datetime import datetime as dt
import numpy as np
from ginga import GingaPlugin, RGBImage, colors
from ginga.gw import Widgets
from ginga.misc import ParamSet, Bunch
from ginga.util import dp
from ginga.gw.GwHelp import FileSelection
from astropy.io import fits
from astropy.modeling import models, fitting
from scipy import ndimage
import socket
class CSU_initializer(GingaPlugin.LocalPlugin):
def __init__(self, fv, fitsimage):
"""
This method is called when the plugin is loaded for the first
time. ``fv`` is a reference to the Ginga (reference viewer) shell
and ``fitsimage`` is a reference to the specific ImageViewCanvas
object associated with the channel on which the plugin is being
invoked.
You need to call the superclass initializer and then do any local
initialization.
"""
super(CSU_initializer, self).__init__(fv, fitsimage)
# Load plugin preferences
prefs = self.fv.get_preferences()
self.settings = prefs.createCategory('plugin_CSU_initializer')
self.settings.setDefaults(ibar_num=1,
mbar_num=1,
ebar_num=1,
move_to_open=False,
bar_dest=0.0,
bar_pos=137.0,
)
self.settings.load(onError='silent')
self.instrument_hosts = ['vm-mosfire', 'nuu', 'vm-mosfirebld']
self.hostname = socket.gethostname().split('.')[0].lower()
self.bars_analysis = None
self.state_analysis = None
self.bars_file = None
self.state_file = None
self.bars_header = None
self.state_header = None
self.layertag = 'bars-canvas'
self.dc = fv.get_draw_classes()
canvas = self.dc.DrawingCanvas()
canvas.enable_draw(False)
canvas.set_surface(self.fitsimage)
self.canvas = canvas
self.colornames = colors.get_colors()
self.canvas_img = None
self.mfilesel = FileSelection(self.fv.w.root.get_widget())
## Fit relationship between bar position and pixels
tick = dt.now()
pixels, physical = self.get_data()
self.fit_transforms(pixels, physical)
tock = dt.now()
elapsed = (tock-tick).total_seconds()
# print('Completed fit of transforms in {:.3f} s'.format(elapsed))
## Determine slit angle and bar center to center distance in pixels
## from the transformation and the known longslit positions
## in longslit, bar 02 is at 145.472
## in longslit, bar 92 is at 129.480
physical = [ [145.472, self.bar_to_slit(2)],
[129.480, self.bar_to_slit(92)] ]
pixels = self.physical_to_pixel(physical)
dx = pixels[1][0] - pixels[0][0]
dy = pixels[0][1] - pixels[1][1]
self.slit_angle_pix = np.arctan(dx/dy)
# print("Slit Angle on CCD = {:.3f} deg".format(self.slit_angle_pix * 180./np.pi))
self.slit_height_pix = dy / (self.bar_to_slit(92) - self.bar_to_slit(2))
# print("Slit Height on CCD = {:.3f} pix".format(self.slit_height_pix))
def build_gui(self, container):
"""
This method is called when the plugin is invoked. It builds the
GUI used by the plugin into the widget layout passed as
``container``.
This method may be called many times as the plugin is opened and
closed for modal operations. The method may be omitted if there
is no GUI for the plugin.
This specific example uses the GUI widget set agnostic wrappers
to build the GUI, but you can also just as easily use explicit
toolkit calls here if you only want to support one widget set.
"""
top = Widgets.VBox()
top.set_border_width(4)
# this is a little trick for making plugins that work either in
# a vertical or horizontal orientation. It returns a box container,
# a scroll widget and an orientation ('vertical', 'horizontal')
vbox, sw, orientation = Widgets.get_oriented_box(container)
vbox.set_border_width(4)
vbox.set_spacing(2)
self.msg_font = self.fv.get_font("sansFont", 12)
## -----------------------------------------------------
## Acquire or Load Image
## -----------------------------------------------------
fr = Widgets.Frame("Image the CSU Mask")
vbox.add_widget(fr, stretch=0)
btns1 = Widgets.HBox()
btns1.set_spacing(1)
btn_acq_im = Widgets.Button("Acquire Mask Image")
btn_acq_im.add_callback('activated', lambda w: self.acq_mask_image())
btns1.add_widget(btn_acq_im, stretch=0)
btns1.add_widget(Widgets.Label(''), stretch=1)
vbox.add_widget(btns1, stretch=0)
## -----------------------------------------------------
## Analyze Image
## -----------------------------------------------------
fr = Widgets.Frame("Analyze CSU Mask Image")
vbox.add_widget(fr, stretch=0)
btns2 = Widgets.HBox()
btns2.set_spacing(3)
btn_analyze = Widgets.Button("Analyze Mask Image")
btn_analyze.add_callback('activated', lambda w: self.analyze_mask_image())
btns2.add_widget(btn_analyze, stretch=0)
btns2.add_widget(Widgets.Label(''), stretch=1)
btn_overlay = Widgets.Button("Overlay Analysis Results")
btn_overlay.add_callback('activated', lambda w: self.overlay_analysis_results())
btns2.add_widget(btn_overlay, stretch=0)
btns2.add_widget(Widgets.Label(''), stretch=1)
vbox.add_widget(btns2, stretch=0)
## -----------------------------------------------------
## Edit Analysis Results
## -----------------------------------------------------
fr = Widgets.Frame("Edit Analysis Results")
captions = [
("Set Bar Number", 'label',\
'set_ebar_num', 'entry',),\
("Set Position", 'label',\
'set_bar_pos', 'entry'),\
("Edit Bar #", 'label',\
'ebar_num', 'llabel',
'to', 'label',
'bar_pos', 'llabel',
"mm", 'label',\
"Edit Bar", 'button'),
]
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
ebar_num = int(self.settings.get('ebar_num', 1))
b.ebar_num.set_text('{:2d}'.format(ebar_num))
b.set_ebar_num.set_text('{:2d}'.format(ebar_num))
b.set_ebar_num.add_callback('activated', self.set_ebar_num_cb)
b.set_ebar_num.set_tooltip("Set bar number to move")
bar_pos = float(self.settings.get('bar_pos', 0.0))
b.bar_pos.set_text('{:+.1f}'.format(bar_pos))
b.set_bar_pos.set_text('{:+.1f}'.format(bar_pos))
b.set_bar_pos.add_callback('activated', self.set_bar_pos_cb)
b.set_bar_pos.set_tooltip("Set distance to move bar")
b.edit_bar.add_callback('activated', lambda w: self.edit_bar())
fr.set_widget(w)
vbox.add_widget(fr, stretch=0)
## -----------------------------------------------------
## Bar Overlay
## -----------------------------------------------------
fr = Widgets.Frame("Bar Positions Overlay")
vbox.add_widget(fr, stretch=0)
btns1 = Widgets.HBox()
btns1.set_spacing(1)
btn_csu_bar_state = Widgets.Button("From csu_bar_state")
btn_csu_bar_state.add_callback('activated', lambda w: self.overlaybars_from_file())
btns1.add_widget(btn_csu_bar_state, stretch=0)
btns1.add_widget(Widgets.Label(''), stretch=1)
btn_fits_header = Widgets.Button("From FITS Header")
btn_fits_header.add_callback('activated', lambda w: self.overlaybars_from_header())
btns1.add_widget(btn_fits_header, stretch=0)
btns1.add_widget(Widgets.Label(''), stretch=1)
vbox.add_widget(btns1, stretch=0)
btns2 = Widgets.HBox()
btns2.set_spacing(1)
btn_clear = Widgets.Button("Clear Overlays")
btn_clear.add_callback('activated', lambda w: self.clear_canvas())
btns2.add_widget(btn_clear, stretch=0)
btns2.add_widget(Widgets.Label(''), stretch=1)
vbox.add_widget(btns2, stretch=0)
## -----------------------------------------------------
## Initialize Bar
## -----------------------------------------------------
fr = Widgets.Frame("Individual Bar Initialization")
captions = [
("Set Bar Number", 'label',\
'set_ibar_num', 'entry',),\
("Initialize Bar #", 'label',\
'ibar_num', 'llabel',\
"Initialize Bar", 'button',\
"Open Before Init", 'checkbutton'),
]
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
ibar_num = int(self.settings.get('ibar_num', 1))
b.ibar_num.set_text('{:2d}'.format(ibar_num))
b.set_ibar_num.set_text('{:2d}'.format(ibar_num))
b.set_ibar_num.add_callback('activated', self.set_ibar_num_cb)
b.set_ibar_num.set_tooltip("Set bar number to initialize")
b.open_before_init.set_tooltip("Move bar to open position before initialization")
open_before_init = self.settings.get('move_to_open', False)
b.open_before_init.set_state(open_before_init)
b.open_before_init.add_callback('activated', self.open_before_init_cb)
b.initialize_bar.add_callback('activated', lambda w: self.initialize_bar())
fr.set_widget(w)
vbox.add_widget(fr, stretch=0)
## -----------------------------------------------------
## Move Bar
## -----------------------------------------------------
# Frame for instructions and add the text widget with another
# blank widget to stretch as needed to fill emp
fr = Widgets.Frame("Individual Bar Control")
captions = [
("Set Bar Number", 'label',\
'set_mbar_num', 'entry',),\
("Set Destination", 'label',\
'set_bar_dest', 'entry'),\
("Move Bar #", 'label',\
'mbar_num', 'llabel',
'to', 'label',
'bar_dest', 'llabel',
"mm", 'label',\
"Move Bar", 'button'),
]
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
mbar_num = int(self.settings.get('mbar_num', 1))
b.mbar_num.set_text('{:2d}'.format(mbar_num))
b.set_mbar_num.set_text('{:2d}'.format(mbar_num))
b.set_mbar_num.add_callback('activated', self.set_mbar_num_cb)
b.set_mbar_num.set_tooltip("Set bar number to move")
bar_dest = float(self.settings.get('bar_dest', 0.0))
b.bar_dest.set_text('{:+.1f}'.format(bar_dest))
b.set_bar_dest.set_text('{:+.1f}'.format(bar_dest))
b.set_bar_dest.add_callback('activated', self.set_bar_dest_cb)
b.set_bar_dest.set_tooltip("Set distance to move bar")
b.move_bar.add_callback('activated', lambda w: self.move_bar())
fr.set_widget(w)
vbox.add_widget(fr, stretch=0)
## -----------------------------------------------------
## Spacer
## -----------------------------------------------------
# Add a spacer to stretch the rest of the way to the end of the
# plugin space
spacer = Widgets.Label('')
vbox.add_widget(spacer, stretch=1)
# scroll bars will allow lots of content to be accessed
top.add_widget(sw, stretch=1)
## -----------------------------------------------------
## Bottom
## -----------------------------------------------------
# A button box that is always visible at the bottom
btns_close = Widgets.HBox()
btns_close.set_spacing(3)
# Add a close button for the convenience of the user
btn = Widgets.Button("Close")
btn.add_callback('activated', lambda w: self.close())
btns_close.add_widget(btn, stretch=0)
btns_close.add_widget(Widgets.Label(''), stretch=1)
top.add_widget(btns_close, stretch=0)
# Add our GUI to the container
container.add_widget(top, stretch=1)
# NOTE: if you are building a GUI using a specific widget toolkit
# (e.g. Qt) GUI calls, you need to extract the widget or layout
# from the non-toolkit specific container wrapper and call on that
# to pack your widget, e.g.:
#cw = container.get_widget()
#cw.addWidget(widget, stretch=1)
def close(self):
"""
Example close method. You can use this method and attach it as a
callback to a button that you place in your GUI to close the plugin
as a convenience to the user.
"""
self.fv.stop_local_plugin(self.chname, str(self))
return True
def start(self):
"""
This method is called just after ``build_gui()`` when the plugin
is invoked. This method may be called many times as the plugin is
opened and closed for modal operations. This method may be omitted
in many cases.
"""
# start ruler drawing operation
p_canvas = self.fitsimage.get_canvas()
try:
obj = p_canvas.get_object_by_tag(self.layertag)
except KeyError:
# Add ruler layer
p_canvas.add(self.canvas, tag=self.layertag)
self.resume()
def pause(self):
"""
This method is called when the plugin loses focus.
It should take any actions necessary to stop handling user
interaction events that were initiated in ``start()`` or
``resume()``.
This method may be called many times as the plugin is focused
or defocused. It may be omitted if there is no user event handling
to disable.
"""
pass
def resume(self):
"""
This method is called when the plugin gets focus.
It should take any actions necessary to start handling user
interaction events for the operations that it does.
This method may be called many times as the plugin is focused or
defocused. The method may be omitted if there is no user event
handling to enable.
"""
pass
def stop(self):
"""
This method is called when the plugin is stopped.
It should perform any special clean up necessary to terminate
the operation. The GUI will be destroyed by the plugin manager
so | |
to match a set of resources by specific criteria, such as tags, attributes, or IDs.\n\nname (string) --The name of the filter. Filter names are case-sensitive.\n\nvalues (list) --The filter values. Filter values are case-sensitive.\n\n(string) --\n\n\n\n\n\n
:type maxResults: integer
:param maxResults: The maximum items to return in a request.
:type nextToken: string
:param nextToken: A token to specify where to start paginating. This is the NextToken from a previously truncated response.
:rtype: dict
ReturnsResponse Syntax
{
'requestId': 'string',
'componentVersionList': [
{
'arn': 'string',
'name': 'string',
'version': 'string',
'description': 'string',
'platform': 'Windows'|'Linux',
'supportedOsVersions': [
'string',
],
'type': 'BUILD'|'TEST',
'owner': 'string',
'dateCreated': 'string'
},
],
'nextToken': 'string'
}
Response Structure
(dict) --
requestId (string) --
The request ID that uniquely identifies this request.
componentVersionList (list) --
The list of component semantic versions.
(dict) --
A high-level overview of a component semantic version.
arn (string) --
The Amazon Resource Name (ARN) of the component.
name (string) --
The name of the component.
version (string) --
The semantic version of the component.
description (string) --
The description of the component.
platform (string) --
The platform of the component.
supportedOsVersions (list) --
The operating system (OS) version supported by the component. If the OS information is available, a prefix match is performed against the parent image OS version during image recipe creation.
(string) --
type (string) --
The type of the component denotes whether the component is used to build the image or only to test it.
owner (string) --
The owner of the component.
dateCreated (string) --
The date that the component was created.
nextToken (string) --
The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.
Exceptions
imagebuilder.Client.exceptions.ServiceException
imagebuilder.Client.exceptions.ClientException
imagebuilder.Client.exceptions.ServiceUnavailableException
imagebuilder.Client.exceptions.InvalidRequestException
imagebuilder.Client.exceptions.InvalidPaginationTokenException
imagebuilder.Client.exceptions.ForbiddenException
imagebuilder.Client.exceptions.CallRateLimitExceededException
:return: {
'requestId': 'string',
'componentVersionList': [
{
'arn': 'string',
'name': 'string',
'version': 'string',
'description': 'string',
'platform': 'Windows'|'Linux',
'supportedOsVersions': [
'string',
],
'type': 'BUILD'|'TEST',
'owner': 'string',
'dateCreated': 'string'
},
],
'nextToken': 'string'
}
:returns:
(string) --
"""
pass
def list_distribution_configurations(filters=None, maxResults=None, nextToken=None):
"""
Returns a list of distribution configurations.
See also: AWS API Documentation
Exceptions
:example: response = client.list_distribution_configurations(
filters=[
{
'name': 'string',
'values': [
'string',
]
},
],
maxResults=123,
nextToken='string'
)
:type filters: list
:param filters: The filters.\n\n(dict) --A filter name and value pair that is used to return a more specific list of results from a list operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs.\n\nname (string) --The name of the filter. Filter names are case-sensitive.\n\nvalues (list) --The filter values. Filter values are case-sensitive.\n\n(string) --\n\n\n\n\n\n
:type maxResults: integer
:param maxResults: The maximum items to return in a request.
:type nextToken: string
:param nextToken: A token to specify where to start paginating. This is the NextToken from a previously truncated response.
:rtype: dict
ReturnsResponse Syntax
{
'requestId': 'string',
'distributionConfigurationSummaryList': [
{
'arn': 'string',
'name': 'string',
'description': 'string',
'dateCreated': 'string',
'dateUpdated': 'string',
'tags': {
'string': 'string'
}
},
],
'nextToken': 'string'
}
Response Structure
(dict) --
requestId (string) --
The request ID that uniquely identifies this request.
distributionConfigurationSummaryList (list) --
The list of distributions.
(dict) --
A high-level overview of a distribution configuration.
arn (string) --
The Amazon Resource Name (ARN) of the distribution configuration.
name (string) --
The name of the distribution configuration.
description (string) --
The description of the distribution configuration.
dateCreated (string) --
The date on which the distribution configuration was created.
dateUpdated (string) --
The date on which the distribution configuration was updated.
tags (dict) --
The tags associated with the distribution configuration.
(string) --
(string) --
nextToken (string) --
The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.
Exceptions
imagebuilder.Client.exceptions.ServiceException
imagebuilder.Client.exceptions.ClientException
imagebuilder.Client.exceptions.ServiceUnavailableException
imagebuilder.Client.exceptions.InvalidRequestException
imagebuilder.Client.exceptions.InvalidPaginationTokenException
imagebuilder.Client.exceptions.ForbiddenException
imagebuilder.Client.exceptions.CallRateLimitExceededException
:return: {
'requestId': 'string',
'distributionConfigurationSummaryList': [
{
'arn': 'string',
'name': 'string',
'description': 'string',
'dateCreated': 'string',
'dateUpdated': 'string',
'tags': {
'string': 'string'
}
},
],
'nextToken': 'string'
}
:returns:
(string) --
(string) --
"""
pass
def list_image_build_versions(imageVersionArn=None, filters=None, maxResults=None, nextToken=None):
"""
Returns a list of distribution configurations.
See also: AWS API Documentation
Exceptions
:example: response = client.list_image_build_versions(
imageVersionArn='string',
filters=[
{
'name': 'string',
'values': [
'string',
]
},
],
maxResults=123,
nextToken='string'
)
:type imageVersionArn: string
:param imageVersionArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the image whose build versions you want to retrieve.\n
:type filters: list
:param filters: The filters.\n\n(dict) --A filter name and value pair that is used to return a more specific list of results from a list operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs.\n\nname (string) --The name of the filter. Filter names are case-sensitive.\n\nvalues (list) --The filter values. Filter values are case-sensitive.\n\n(string) --\n\n\n\n\n\n
:type maxResults: integer
:param maxResults: The maximum items to return in a request.
:type nextToken: string
:param nextToken: A token to specify where to start paginating. This is the NextToken from a previously truncated response.
:rtype: dict
ReturnsResponse Syntax
{
'requestId': 'string',
'imageSummaryList': [
{
'arn': 'string',
'name': 'string',
'version': 'string',
'platform': 'Windows'|'Linux',
'osVersion': 'string',
'state': {
'status': 'PENDING'|'CREATING'|'BUILDING'|'TESTING'|'DISTRIBUTING'|'INTEGRATING'|'AVAILABLE'|'CANCELLED'|'FAILED'|'DEPRECATED'|'DELETED',
'reason': 'string'
},
'owner': 'string',
'dateCreated': 'string',
'outputResources': {
'amis': [
{
'region': 'string',
'image': 'string',
'name': 'string',
'description': 'string',
'state': {
'status': 'PENDING'|'CREATING'|'BUILDING'|'TESTING'|'DISTRIBUTING'|'INTEGRATING'|'AVAILABLE'|'CANCELLED'|'FAILED'|'DEPRECATED'|'DELETED',
'reason': 'string'
}
},
]
},
'tags': {
'string': 'string'
}
},
],
'nextToken': 'string'
}
Response Structure
(dict) --
requestId (string) --
The request ID that uniquely identifies this request.
imageSummaryList (list) --
The list of image build versions.
(dict) --
An image summary.
arn (string) --
The Amazon Resource Name (ARN) of the image.
name (string) --
The name of the image.
version (string) --
The version of the image.
platform (string) --
The platform of the image.
osVersion (string) --
The operating system version of the instance. For example, Amazon Linux 2, Ubuntu 18, or Microsoft Windows Server 2019.
state (dict) --
The state of the image.
status (string) --
The status of the image.
reason (string) --
The reason for the image\'s status.
owner (string) --
The owner of the image.
dateCreated (string) --
The date on which this image was created.
outputResources (dict) --
The output resources produced when creating this image.
amis (list) --
The EC2 AMIs created by this image.
(dict) --
Details of an EC2 AMI.
region (string) --
The AWS Region of the EC2 AMI.
image (string) --
The AMI ID of the EC2 AMI.
name (string) --
The name of the EC2 AMI.
description (string) --
The description of the EC2 AMI.
state (dict) --
Image state shows the image status and the reason for that status.
status (string) --
The status of the image.
reason (string) --
The reason for the image\'s status.
tags (dict) --
The tags of the image.
(string) --
(string) --
nextToken (string) --
The next token used for paginated responses. When this is not empty, there are additional elements that the service has not included in this request. Use this token with the next request to retrieve additional objects.
Exceptions
imagebuilder.Client.exceptions.ServiceException
imagebuilder.Client.exceptions.ClientException
imagebuilder.Client.exceptions.ServiceUnavailableException
imagebuilder.Client.exceptions.InvalidRequestException
imagebuilder.Client.exceptions.InvalidPaginationTokenException
imagebuilder.Client.exceptions.ForbiddenException
imagebuilder.Client.exceptions.CallRateLimitExceededException
:return: {
'requestId': 'string',
'imageSummaryList': [
{
'arn': 'string',
'name': 'string',
'version': 'string',
'platform': 'Windows'|'Linux',
'osVersion': 'string',
'state': {
'status': 'PENDING'|'CREATING'|'BUILDING'|'TESTING'|'DISTRIBUTING'|'INTEGRATING'|'AVAILABLE'|'CANCELLED'|'FAILED'|'DEPRECATED'|'DELETED',
'reason': 'string'
},
'owner': 'string',
'dateCreated': 'string',
'outputResources': {
'amis': [
{
'region': 'string',
'image': 'string',
'name': 'string',
'description': 'string',
'state': {
'status': 'PENDING'|'CREATING'|'BUILDING'|'TESTING'|'DISTRIBUTING'|'INTEGRATING'|'AVAILABLE'|'CANCELLED'|'FAILED'|'DEPRECATED'|'DELETED',
'reason': 'string'
}
},
]
},
'tags': {
'string': 'string'
}
},
],
'nextToken': 'string'
}
:returns:
(string) --
(string) --
"""
pass
def list_image_pipeline_images(imagePipelineArn=None, filters=None, maxResults=None, nextToken=None):
"""
Returns a list of images created by the specified pipeline.
See also: AWS API Documentation
Exceptions
:example: response = client.list_image_pipeline_images(
imagePipelineArn='string',
filters=[
{
'name': 'string',
'values': [
'string',
]
},
],
maxResults=123,
nextToken='string'
)
:type imagePipelineArn: string
:param imagePipelineArn: [REQUIRED]\nThe Amazon Resource Name (ARN) of the image pipeline whose images you want to view.\n
:type filters: list
:param filters: The filters.\n\n(dict) --A filter name and value pair that is used to return a more specific list of results from a list operation. Filters can be used to match a set of resources by specific criteria, such as tags, attributes, or IDs.\n\nname (string) --The name of the filter. Filter names are case-sensitive.\n\nvalues (list) --The filter values. Filter values are case-sensitive.\n\n(string) --\n\n\n\n\n\n
:type maxResults: integer
:param maxResults: The | |
"hif", "his", "lc1", "lc2", \
"lc3", "lob", "lof", "los", "mxs", "nbz", "nms", "pai", "rbs", \
"rc1", "rc2", "rc3", "rc4", "rc5", "rev", "rfc", "rls", "ros", \
"sc1", "sc2", "sc3", "sc4", "slf", "sty", "t1c", "t2c", "wbf", \
"wfc", "wty", "ezf", "smf", "loc", "wdf", "unt", \
"ALO", "AOC", "ASC", "AWC", "CBC", "CC1", "CC2", "CC3", "CFC", \
"COL", "DBC", "DTC", "HIB", "HIC", "HIF", "HIS", "LC1", "LC2", \
"LC3", "LOB", "LOF", "LOS", "MXS", "NBZ", "NMS", "PAI", "RBS", \
"RC1", "RC2", "RC3", "RC4", "RC5", "REV", "RFC", "RLS", "ROS", \
"SC1", "SC2", "SC3", "SC4", "SLF", "STY", "T1C", "T2C", "WBF", \
"WFC", "WTY", "EZF", "SMF", "LOC", "WDF", "UNT" \
]
rparms = [ \
"arc", "ard", "arl", "ars", "beg", "bet", "cht", "cmg", "cs1", \
"cs2", "dts", "dwd", "end", "lin", "lwd", "oer", "rht", "rmg", \
"sht", "sig", "sl1", "sl2", "smt", "swi", "tht", "wba", "wbc", \
"wbd", "wbl", "wbr", "wbs", "wbt", "wht", "blw", \
"ARC", "ARD", "ARL", "ARS", "BEG", "BET", "CHT", "CMG", "CS1", \
"CS2", "DTS", "DWD", "END", "LIN", "LWD", "OER", "RHT", "RMG", \
"SHT", "SIG", "SL1", "SL2", "SMT", "SWI", "THT", "WBA", "WBC", \
"WBD", "WBL", "WBR", "WBS", "WBT", "WHT", "BLW" \
]
cparms = [ "erf", "fro", "ERF", "FRO" ]
if (not isinstance(pname,str)):
print("wmgetp: Parameter '" + str(pname) + "' is not a string type.")
return None
if (iparms.count(pname) > 0):
return c_wmgetip(pname)
elif (rparms.count(pname) > 0):
return c_wmgetrp(pname)
elif (cparms.count(pname) > 0):
return c_wmgetcp(pname)
else:
print("wmgetp: specified value for " + pname + " is not of a recognized type.")
return None
################################################################
def wmsetp(pname,val):
"""
Sets control parameter values for Ngl.wmbarb and Ngl.wmbarbmap procedures.
Ngl.wmsetp(pname, pvalue)
pname -- Name of the parameter to set.
pvalue -- Value of the parameter you want to set.
"""
if (not isinstance(pname,str)):
print("wmsetp: Parameter '" + str(pname) + "' is not a string type.")
return None
if (isinstance(val,float)):
c_wmsetrp(pname,val)
elif (isinstance(val,int)):
c_wmsetip(pname,val)
elif (isinstance(val,str)):
c_wmsetcp(pname,val)
else:
print("wmsetp: specified value for " + pname + " is not of a recognized type.")
return None
################################################################
def wmstnm(wks,x,y,imdat):
"""
Draws station model data at specified locations.
Ngl.wmstnm(wks, x, y, imdat)
wks -- The identifier returned from calling Ngl.open_wks.
x, y -- Scalars, one-dimensional NumPy arrays or Python lists
specifying X and Y coordinate values.
imdat -- A string of 50 characters encoded as per the WMO/NOAA guidelines.
See the online documentation for details.
"""
#
# Get the GKS workstaton ID.
#
gksid = get_integer(wks,"wkGksWorkId")
#
# Process depending on whether we have scalar coordinates,
# NumPy arrays, or Python lists or tuples.
#
xa = _arg_with_scalar(numpy.array(x))
ya = _arg_with_scalar(numpy.array(y))
if (type(imdat) == type('a')):
imdata = numpy.array([imdat])
else:
imdata = numpy.array(imdat)
for i in range(len(xa)):
c_wmstnmp(gksid,xa[i],ya[i],imdata[i])
del xa,ya,imdata
return None
################################################################
def wrf_avo(u, v, msfu, msfv, msfm, cor, dx, dy, opt=0):
"""
Calculates absolute vorticity from WRF model output.
u - X-wind component. An array whose rightmost three dimensions are
bottom_top x south_north x west_east_stag.
v -- Y-wind component. An array whose rightmost three dimensions are
bottom_top x south_north_stag x west_east, and whose leftmost
dimensions are the same as u's.
msfu -- Map scale factor on u-grid. An array whose rightmost two
dimensions are the same as u's. If it contains additional leftmost
dimensions, they must be the same as the u and v arrays.
msfv -- Map scale factor on v-grid. An array with the same number of
dimensions as msfu, whose rightmost two dimensions are the same as
v's. If it contains additional leftmost dimensions, they must be the
same as the u and v arrays.
msfm -- Map scale factor on mass grid. An array with the same number
of dimensions as msfu and msfv, whose rightmost two dimensions are
south_north x west_east. If it contains additional leftmost
dimensions, they must be the same as the u and v arrays.
cor -- Coriolis sine latitude term. An array of the same
dimensionality as msfm.
dx -- A scalar representing the grid spacing in X.
dy -- A scalar representing the grid spacing in Y.
opt -- [optional] An integer option, not in use yet. Set to 0.
"""
wrf_deprecated()
u2 = _promote_scalar(u)
v2 = _promote_scalar(v)
msfu2 = _promote_scalar(msfu)
msfv2 = _promote_scalar(msfv)
msfm2 = _promote_scalar(msfm)
cor2 = _promote_scalar(cor)
dx2 = _promote_scalar(dx)
dy2 = _promote_scalar(dy)
return fplib.wrf_avo(u2,v2,msfu2,msfv2,msfm2,cor2,dx2,dy2,opt)
################################################################
def wrf_dbz(P, T, qv, qr, qs=None, qg=None, ivarint=0, iliqskin=0):
"""
Calculates simulated equivalent radar reflectivity factor [dBZ] from
WRF model output.
dbz = Ngl.wrf_dbz (P, T, qv, qr, qs, qg, ivarint, iliqskin)
P -- Full pressure (perturbation + base state pressure). The rightmost
dimensions are bottom_top x south_north x west_east. Units must be
[Pa].
T -- Temperature in [K]. An array with the same dimensionality as
P. This variable can be calculated by wrf_tk.
qv -- Water vapor mixing ratio in [kg/kg]. An array with the same
dimensionality as P.
qr -- Rain mixing ratio in [kg/kg]. An array with the same
dimensionality as P.
qs -- [optional] Snow mixing ratio in [kg/kg]. A scalar or an array
with the same dimensionality as P. If not set, a scalar value of 0.0
will be used.
qg -- [optional] Graupel mixing ratio in [kg/kg]. A scalar or array
with the same dimensionality as P. If not set, a scalar value of 0.0
will be used.
ivarint -- [optional, default=0] A scalar option for the behavior of
intercept parameters for the size distributions of rain, snow, and
graupel. See description below.
iliqskin -- [optional, default=0] A scalar option for scattering. If set to 1,
frozen particles that are at a temperature above freezing will be
assumed to scatter as a liquid particle.
"""
wrf_deprecated()
#
# Promote p and theta to numpy arrays that have at least a dimension of 1.
#
p2 = _promote_scalar(P)
t2 = _promote_scalar(T)
qv2 = _promote_scalar(qv)
qr2 = _promote_scalar(qr)
zero = 0.0
if not qs is None:
if _is_scalar(qs):
qs2 = numpy.zeros(qv.shape)
qs2[:] = qs
else:
qs2 = _promote_scalar(qs)
else:
qs2 = numpy.zeros(qv.shape)
if not qg is None:
if _is_scalar(qg):
qg2 = numpy.zeros(qv.shape)
qg2[:] = qg
else:
qg2 = _promote_scalar(qg)
else:
qg2 = numpy.zeros(qv.shape)
ivar2 = _promote_scalar(ivarint)
iliq2 = _promote_scalar(iliqskin)
return fplib.wrf_dbz(p2,t2,qv2,qr2,qs2,qg2,ivar2,iliq2)
################################################################
def wrf_ll_to_ij(lon, lat, map_proj, truelat1=-999.,truelat2=-999.,stand_lon=999., \
ref_lat=-999,ref_lon=-999,pole_lat=90,pole_lon=0,knowni=-999,\
knownj=-999,dx=-999, dy=-999, latinc=-999., loninc=-999):
"""
Converts lon/lat values to i/j index values.
lon,lat - lat,lon values to convert
map_proj -- map projection
"""
lon2 = _promote_scalar(lon)
lat2 = _promote_scalar(lat)
map_proj2 = _promote_scalar(map_proj)
truelat12 = _promote_scalar(truelat1)
truelat22 = _promote_scalar(truelat2)
stand_lon2 = _promote_scalar(stand_lon)
ref_lat2 = _promote_scalar(ref_lat)
ref_lon2 = _promote_scalar(ref_lon)
pole_lat2 = _promote_scalar(pole_lat)
pole_lon2 = _promote_scalar(pole_lon)
knowni2 = _promote_scalar(knowni)
knownj2 = _promote_scalar(knownj)
dx2 = _promote_scalar(dx)
dy2 = _promote_scalar(dy)
latinc2 = _promote_scalar(latinc)
loninc2 = _promote_scalar(loninc)
return fplib.wrf_ll_to_ij(lon2,lat2,map_proj2,truelat12,truelat22,stand_lon2, \
ref_lat2,ref_lon2,pole_lat2,pole_lon2,knowni2, knownj2,\
dx2, dy2, latinc2,loninc2)
################################################################
def wrf_ij_to_ll(iloc, jloc, map_proj, truelat1=-999.,truelat2=-999.,stand_lon=999., \
ref_lat=-999,ref_lon=-999,pole_lat=90,pole_lon=0,knowni=-999,\
knownj=-999,dx=-999, dy=-999, latinc=-999., loninc=-999):
"""
Converts i/j index values to lon/lat values.
lon,lat - lat,lon values to convert
map_proj -- map projection
"""
wrf_deprecated()
iloc2 = _promote_scalar(iloc)
jloc2 = _promote_scalar(jloc)
map_proj2 = _promote_scalar(map_proj)
truelat12 = _promote_scalar(truelat1)
truelat22 = _promote_scalar(truelat2)
stand_lon2 = _promote_scalar(stand_lon)
ref_lat2 = _promote_scalar(ref_lat)
ref_lon2 = _promote_scalar(ref_lon)
pole_lat2 = _promote_scalar(pole_lat)
pole_lon2 = _promote_scalar(pole_lon)
knowni2 = _promote_scalar(knowni)
knownj2 = _promote_scalar(knownj)
dx2 = _promote_scalar(dx)
dy2 = _promote_scalar(dy)
latinc2 = _promote_scalar(latinc)
loninc2 = _promote_scalar(loninc)
return fplib.wrf_ij_to_ll(iloc2,jloc2,map_proj2,truelat12,truelat22,stand_lon2, \
ref_lat2,ref_lon2,pole_lat2,pole_lon2,knowni2, knownj2,\
dx2, dy2, latinc2,loninc2)
################################################################
def wrf_pvo(u, v, th, p, msfu, msfv, msfm, cor, dx, dy, opt=0):
"""
Calculates potential vorticity from WRF model output.
u - X-wind component. An array whose rightmost three dimensions are
bottom_top x south_north x west_east_stag.
v -- Y-wind component. An array whose rightmost three dimensions are
bottom_top x south_north_stag x west_east, and whose leftmost
dimensions are the same as u's.
theta -- Potential temperature in K. An array whose rightmost
dimensions are bottom_top x south_north x west_east, and whose
leftmost dimensions are the same as u's.
P -- Full pressure (perturbation + base state pressure). An array with
the same dimensionality as theta. Units must be [Pa].
msfu -- Map scale factor on u-grid. An array whose rightmost two
dimensions are the same as u's. If it contains additional leftmost
dimensions, they must be the same as the u and v arrays.
msfv -- Map scale factor on v-grid. An array with the same number of
dimensions as msfu, whose rightmost two dimensions are the same as
v's. If it contains additional leftmost dimensions, they must be the
same as the u and v arrays.
msfm -- Map scale factor on mass grid. An array with the same number
of dimensions as msfu and msfv, | |
conv8 = Activation("relu")(fun(conv8))
conv8 = Conv3D(64, (3, 3, 3), padding="same")(conv8)
conv8 = Activation("relu")(fun(conv8))
conv10 = Conv3D(feature_num, out_kernel, activation="linear", padding="same")(conv8)
grid_centers = Input((None, 3))
conv10 = Lambda(lambda x: ops.spatial_softmax(x))(conv10)
output = Lambda(lambda x: ops.expected_value_3d(x[0], x[1]))([conv10, grid_centers])
# Because I think it is easier, use a layer to calculate the variance and return it as a second output to be used for variance loss
output_var = Lambda(lambda x: ops.var_3d(x[0], x[1], x[2]))(
[conv10, grid_centers, output]
)
if include_top:
if regularize_var:
model = Model(inputs=[inputs, grid_centers], outputs=[output, output_var])
else:
model = Model(inputs=[inputs, grid_centers], outputs=[output])
else:
model = Model(inputs=[inputs], outputs=[conv8])
# model.compile(optimizer=Adam(lr=lr), loss=[lossfunc[0], lossfunc[1]], metrics=['mse'])
model.compile(
optimizer=Adam(lr=lr), loss=lossfunc, metrics=metric, loss_weights=loss_weights
)
return model
def slice_input(inp, k):
print(K.int_shape(inp))
return inp[:, :, :, :, k * 3 : (k + 1) * 3]
def unet3d_big_tiedfirstlayer_expectedvalue(
lossfunc,
lr,
input_dim,
feature_num,
num_cams,
gridsize=(64, 64, 64),
batch_norm=False,
instance_norm=False,
include_top=True,
regularize_var=False,
loss_weights=None,
metric="mse",
):
if batch_norm and not instance_norm:
print("using batch normalization")
def fun(inputs):
return BatchNormalization()(inputs)
elif instance_norm:
print("using instance normalization")
def fun(inputs):
return ops.InstanceNormalization()(inputs)
else:
def fun(inputs):
return inputs
def slice_input(inp, k):
print(K.int_shape(inp))
return inp[:, :, :, :, k * input_dim : (k + 1) * input_dim]
inputs = Input((*gridsize, input_dim * num_cams))
conv1_layer = Conv3D(64, (3, 3, 3), padding="same")
conv1_in = []
for i in range(num_cams):
# conv1_in.append(conv1_layer(inputs[:,:,:,:,i*input_dim:(i+1)*input_dim]))
conv1_in.append(conv1_layer(Lambda(lambda x: slice_input(x, i))(inputs)))
conv1 = Add()(conv1_in)
conv1 = Activation("relu")(fun(conv1))
conv1 = Conv3D(64, (3, 3, 3), padding="same")(conv1)
conv1 = Activation("relu")(fun(conv1))
pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1)
conv2 = Conv3D(128, (3, 3, 3), padding="same")(pool1)
conv2 = Activation("relu")(fun(conv2))
conv2 = Conv3D(128, (3, 3, 3), padding="same")(conv2)
conv2 = Activation("relu")(fun(conv2))
pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)
conv3 = Conv3D(256, (3, 3, 3), padding="same")(pool2)
conv3 = Activation("relu")(fun(conv3))
conv3 = Conv3D(256, (3, 3, 3), padding="same")(conv3)
conv3 = Activation("relu")(fun(conv3))
pool3 = MaxPooling3D(pool_size=(2, 2, 2))(conv3)
conv4 = Conv3D(512, (3, 3, 3), padding="same")(pool3)
conv4 = Activation("relu")(fun(conv4))
conv4 = Conv3D(512, (3, 3, 3), padding="same")(conv4)
conv4 = Activation("relu")(fun(conv4))
up6 = concatenate(
[
Conv3DTranspose(256, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv4),
conv3,
],
axis=4,
)
conv6 = Conv3D(256, (3, 3, 3), padding="same")(up6)
conv6 = Activation("relu")(fun(conv6))
conv6 = Conv3D(256, (3, 3, 3), padding="same")(conv6)
conv6 = Activation("relu")(fun(conv6))
up7 = concatenate(
[
Conv3DTranspose(128, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv6),
conv2,
],
axis=4,
)
conv7 = Conv3D(128, (3, 3, 3), padding="same")(up7)
conv7 = Activation("relu")(fun(conv7))
conv7 = Conv3D(128, (3, 3, 3), padding="same")(conv7)
conv7 = Activation("relu")(fun(conv7))
up8 = concatenate(
[
Conv3DTranspose(64, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv7),
conv1,
],
axis=4,
)
conv8 = Conv3D(64, (3, 3, 3), padding="same")(up8)
conv8 = Activation("relu")(fun(conv8))
conv8 = Conv3D(64, (3, 3, 3), padding="same")(conv8)
conv8 = Activation("relu")(fun(conv8))
conv10 = Conv3D(feature_num, (1, 1, 1), activation="linear")(conv8)
grid_centers = Input((None, 3))
conv10 = Lambda(lambda x: ops.spatial_softmax(x))(conv10)
output = Lambda(lambda x: ops.expected_value_3d(x[0], x[1]))([conv10, grid_centers])
# Because I think it is easier, use a layer to calculate the variance and return it as a second output to be used for variance loss
output_var = Lambda(lambda x: ops.var_3d(x[0], x[1], x[2]))(
[conv10, grid_centers, output]
)
if include_top:
if regularize_var:
model = Model(inputs=[inputs, grid_centers], outputs=[output, output_var])
else:
model = Model(inputs=[inputs, grid_centers], outputs=[output])
else:
model = Model(inputs=[inputs], outputs=[conv8])
# model.compile(optimizer=Adam(lr=lr), loss=[lossfunc[0], lossfunc[1]], metrics=['mse'])
model.compile(
optimizer=Adam(lr=lr),
loss=lossfunc,
metrics=[metric],
loss_weights=loss_weights,
)
return model
def unet3d_big_1cam(
lossfunc,
lr,
input_dim,
feature_num,
num_cams,
batch_norm=False,
instance_norm=False,
):
if batch_norm and not instance_norm:
print("using batch normalization")
def fun(inputs):
return BatchNormalization()(inputs)
elif instance_norm:
print("using instance normalization")
def fun(inputs):
return ops.InstanceNormalization()(inputs)
else:
def fun(inputs):
return inputs
inputs = Input((None, None, None, input_dim))
conv1_layer = Conv3D(64, (3, 3, 3), padding="same")
conv1 = conv1_layer(inputs)
conv1 = Activation("relu")(fun(conv1))
conv1 = Conv3D(64, (3, 3, 3), padding="same")(conv1)
conv1 = Activation("relu")(fun(conv1))
pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1)
conv2 = Conv3D(128, (3, 3, 3), padding="same")(pool1)
conv2 = Activation("relu")(fun(conv2))
conv2 = Conv3D(128, (3, 3, 3), padding="same")(conv2)
conv2 = Activation("relu")(fun(conv2))
pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)
conv3 = Conv3D(256, (3, 3, 3), padding="same")(pool2)
conv3 = Activation("relu")(fun(conv3))
conv3 = Conv3D(256, (3, 3, 3), padding="same")(conv3)
conv3 = Activation("relu")(fun(conv3))
pool3 = MaxPooling3D(pool_size=(2, 2, 2))(conv3)
conv4 = Conv3D(512, (3, 3, 3), padding="same")(pool3)
conv4 = Activation("relu")(fun(conv4))
conv4 = Conv3D(512, (3, 3, 3), padding="same")(conv4)
conv4 = Activation("relu")(fun(conv4))
up6 = concatenate(
[
Conv3DTranspose(256, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv4),
conv3,
],
axis=4,
)
conv6 = Conv3D(256, (3, 3, 3), padding="same")(up6)
conv6 = Activation("relu")(fun(conv6))
conv6 = Conv3D(256, (3, 3, 3), padding="same")(conv6)
conv6 = Activation("relu")(fun(conv6))
up7 = concatenate(
[
Conv3DTranspose(128, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv6),
conv2,
],
axis=4,
)
conv7 = Conv3D(128, (3, 3, 3), padding="same")(up7)
conv7 = Activation("relu")(fun(conv7))
conv7 = Conv3D(128, (3, 3, 3), padding="same")(conv7)
conv7 = Activation("relu")(fun(conv7))
up8 = concatenate(
[
Conv3DTranspose(64, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv7),
conv1,
],
axis=4,
)
conv8 = Conv3D(64, (3, 3, 3), padding="same")(up8)
conv8 = Activation("relu")(fun(conv8))
conv8 = Conv3D(64, (3, 3, 3), padding="same")(conv8)
conv8 = Activation("relu")(fun(conv8))
conv10 = Conv3D(feature_num, (1, 1, 1), activation="sigmoid")(conv8)
model = Model(inputs=[inputs], outputs=[conv10])
model.compile(optimizer=Adam(lr=lr), loss=lossfunc, metrics=["mse"])
return model
def unet3d_big_tiedfirstlayer(
lossfunc,
lr,
input_dim,
feature_num,
num_cams,
batch_norm=False,
instance_norm=False,
bs=6,
):
if batch_norm and not instance_norm:
print("using batch normalization")
def fun(inputs):
return BatchNormalization()(inputs)
elif instance_norm:
print("using instance normalization")
def fun(inputs):
return ops.InstanceNormalization()(inputs)
else:
def fun(inputs):
return inputs
def slice_input(inp, k):
print(K.int_shape(inp))
return inp[:, :, :, :, k * input_dim : (k + 1) * input_dim]
inputs = Input((None, None, None, input_dim * num_cams))
conv1_layer = Conv3D(64, (3, 3, 3), padding="same")
conv1_in = []
for i in range(num_cams):
# conv1_in.append(conv1_layer(inputs[:,:,:,:,i*input_dim:(i+1)*input_dim]))
conv1_in.append(conv1_layer(Lambda(lambda x: slice_input(x, i))(inputs)))
conv1 = Add()(conv1_in)
conv1 = Activation("relu")(fun(conv1))
conv1 = Conv3D(64, (3, 3, 3), padding="same")(conv1)
conv1 = Activation("relu")(fun(conv1))
pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1)
conv2 = Conv3D(128, (3, 3, 3), padding="same")(pool1)
conv2 = Activation("relu")(fun(conv2))
conv2 = Conv3D(128, (3, 3, 3), padding="same")(conv2)
conv2 = Activation("relu")(fun(conv2))
pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)
conv3 = Conv3D(256, (3, 3, 3), padding="same")(pool2)
conv3 = Activation("relu")(fun(conv3))
conv3 = Conv3D(256, (3, 3, 3), padding="same")(conv3)
conv3 = Activation("relu")(fun(conv3))
pool3 = MaxPooling3D(pool_size=(2, 2, 2))(conv3)
conv4 = Conv3D(512, (3, 3, 3), padding="same")(pool3)
conv4 = Activation("relu")(fun(conv4))
conv4 = Conv3D(512, (3, 3, 3), padding="same")(conv4)
conv4 = Activation("relu")(fun(conv4))
up6 = concatenate(
[
Conv3DTranspose(256, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv4),
conv3,
],
axis=4,
)
conv6 = Conv3D(256, (3, 3, 3), padding="same")(up6)
conv6 = Activation("relu")(fun(conv6))
conv6 = Conv3D(256, (3, 3, 3), padding="same")(conv6)
conv6 = Activation("relu")(fun(conv6))
up7 = concatenate(
[
Conv3DTranspose(128, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv6),
conv2,
],
axis=4,
)
conv7 = Conv3D(128, (3, 3, 3), padding="same")(up7)
conv7 = Activation("relu")(fun(conv7))
conv7 = Conv3D(128, (3, 3, 3), padding="same")(conv7)
conv7 = Activation("relu")(fun(conv7))
up8 = concatenate(
[
Conv3DTranspose(64, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv7),
conv1,
],
axis=4,
)
conv8 = Conv3D(64, (3, 3, 3), padding="same")(up8)
conv8 = Activation("relu")(fun(conv8))
conv8 = Conv3D(64, (3, 3, 3), padding="same")(conv8)
conv8 = Activation("relu")(fun(conv8))
conv10 = Conv3D(feature_num, (1, 1, 1), activation="sigmoid")(conv8)
model = Model(inputs=[inputs], outputs=[conv10])
model.compile(optimizer=Adam(lr=lr), loss=lossfunc, metrics=["mse"])
return model
def unet3d_big(
lossfunc,
lr,
input_dim,
feature_num,
num_cams,
batch_norm=False,
instance_norm=False,
include_top=True,
last_kern_size=(1, 1, 1),
gridsize=None,
):
# Gridsize unused, necessary for argument consistency with other nets
if batch_norm and not instance_norm:
print("using batch normalization")
def fun(inputs):
return BatchNormalization()(inputs)
elif instance_norm:
print("using instance normalization")
def fun(inputs):
return ops.InstanceNormalization()(inputs)
else:
def fun(inputs):
return inputs
inputs = Input((None, None, None, input_dim * num_cams))
conv1 = Conv3D(64, (3, 3, 3), padding="same")(inputs)
conv1 = Activation("relu")(fun(conv1))
conv1 = Conv3D(64, (3, 3, 3), padding="same")(conv1)
conv1 = Activation("relu")(fun(conv1))
pool1 = MaxPooling3D(pool_size=(2, 2, 2))(conv1)
conv2 = Conv3D(128, (3, 3, 3), padding="same")(pool1)
conv2 = Activation("relu")(fun(conv2))
conv2 = Conv3D(128, (3, 3, 3), padding="same")(conv2)
conv2 = Activation("relu")(fun(conv2))
pool2 = MaxPooling3D(pool_size=(2, 2, 2))(conv2)
conv3 = Conv3D(256, (3, 3, 3), padding="same")(pool2)
conv3 = Activation("relu")(fun(conv3))
conv3 = Conv3D(256, (3, 3, 3), padding="same")(conv3)
conv3 = Activation("relu")(fun(conv3))
pool3 = MaxPooling3D(pool_size=(2, 2, 2))(conv3)
conv4 = Conv3D(512, (3, 3, 3), padding="same")(pool3)
conv4 = Activation("relu")(fun(conv4))
conv4 = Conv3D(512, (3, 3, 3), padding="same")(conv4)
conv4 = Activation("relu")(fun(conv4))
up6 = concatenate(
[
Conv3DTranspose(256, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv4),
conv3,
],
axis=4,
)
conv6 = Conv3D(256, (3, 3, 3), padding="same")(up6)
conv6 = Activation("relu")(fun(conv6))
conv6 = Conv3D(256, (3, 3, 3), padding="same")(conv6)
conv6 = Activation("relu")(fun(conv6))
up7 = concatenate(
[
Conv3DTranspose(128, (2, 2, 2), strides=(2, 2, 2), padding="same")(conv6),
conv2,
],
axis=4,
)
conv7 = Conv3D(128, (3, 3, 3), padding="same")(up7)
conv7 = Activation("relu")(fun(conv7))
conv7 = Conv3D(128, (3, 3, | |
0:
pass
else:
ARG = 1-G*Y
if ARG > SMALL:
pass
else:
if G < 0:
CDFKAP = 0
if G > 0:
CDFKAP = 1
return(CDFKAP)
Y = -sp.log(ARG)/G
Y = sp.exp(-Y)
if H == 0:
CDFKAP = sp.exp(-Y)
else:
ARG = 1-H*Y
if ARG > SMALL:
Y = -sp.log(ARG)/H
CDFKAP = sp.exp(-Y)
return(CDFKAP)
else:
CDFKAP = 0
return(CDFKAP)
#############################################################
def cdfnor(x,para):
if para[1] < 0:
print("Invalid Parameters")
cdfnor = 0.5+0.5*spsp.erf((x-para[0])/para[1]*1.0/sp.sqrt(2))
return(cdfnor)
#############################################################
def cdfpe3(x,para):
SMALL = 1e-6
CDFPE3 = 0
if para[1]<= 0:
print("Parameters Invalid")
return
else:
Gamma = para[2]
if abs(Gamma) <= SMALL:
Z = (x-para[0])/para[1]
CDFPE3 = 0.5+0.5*spsp.erf(Z*1/sp.sqrt(2))
return(CDFPE3)
else:
Alpha = 4/(Gamma**2)
Z = 2*(x-para[0])/(para[1]*Gamma)+Alpha
if Z > 0:
CDFPE3 = spsp.gammainc(Alpha,Z)
if Gamma < 0:
CDFPE3 = 1-CDFPE3
return(CDFPE3)
#############################################################
def cdfwak(x,para):
EPS = 1e-8
MAXIT = 20
ZINCMX =3
ZMULT = 0.2
UFL = -170
XI = para[0]
A = para[1]
B = para[2]
C = para[3]
D = para[4]
if B+D <= 0 and (B!=0 or C!=0 or D!= 0):
print("Invalid Parameters")
return
if A == 0 and B!= 0:
print("Invalid Parameters")
return
if C == 0 and D != 0:
print("Invalid Parameters")
return
if C < 0 or A+C < 0:
print("Invalid Parameters")
return
if A == 0 and C == 0:
print("Invalid Parameters")
return
CDFWAK = 0
if x <= XI:
return(CDFWAK)
#Test for special cases
if B == 0 and C == 0 and D == 0:
Z = (x-XI)/A
CDFWAK = 1
if -Z >= UFL:
CDFWAK = 1-sp.exp(-Z)
return(CDFWAK)
if C == 0:
CDFWAK = 1
if x >= (XI+A/B):
return(CDFWAK)
Z = -sp.log(1-(x-XI)*B/A)/B
if -Z >= UFL:
CDFWAK = 1-sp.exp(-Z)
return(CDFWAK)
if A == 0:
Z = sp.log(1+(x-XI)*D/C)/D
if -Z >= UFL:
CDFWAK = 1-sp.exp(-Z)
return(CDFWAK)
CDFWAK=1
if D <0 and x >= (XI+A/B-C/D):
return(CDFWAK)
Z=0.7
if x < quawak.quawak(0.1,para):
Z = 0
if x < quawak.quawak(0.99,para):
pass
else:
if D < 0:
Z = sp.log((x-XI-A/B)*D/C+1)/D
if D == 0:
Z = (x-XI-A/B)/C
if D > 0:
Z = sp.log((x-XI)*D/C+1)/D
for IT in range(1,MAXIT+1):
EB = 0
BZ = -B*Z
if BZ >= UFL:
EB = sp.exp(BZ)
GB = Z
if abs(B)>EPS:
GB = (1-EB)/B
ED = sp.exp(D*Z)
GD = -Z
if abs(D)>EPS:
GD = (1-ED)/D
XEST =XI +A*GB-C*GD
FUNC = x-XEST
DERIV1 = A*EB+C*ED
DERIV2 = -A*B*EB+C*D*ED
TEMP = DERIV1+0.5*FUNC*DERIV2/DERIV1
if TEMP <= 0:
TEMP = DERIV1
ZINC = FUNC/TEMP
if ZINC > ZINCMX:
ZINC = ZINCMX
ZNEW = Z+ZINC
if ZNEW <= 0:
Z = Z*ZMULT
else:
Z = ZNEW
if abs(ZINC) <= EPS:
CDFWAK = 1
if -Z >= UFL:
CDFWAK = 1-sp.exp(-Z)
return(CDFWAK)
#############################################################
def cdfwei(x,para):
U = para[0]
A = para[1]
G = para[2]
if len(para) != 3:
print("Invalid number of parameters")
return
elif para[1] <= 0 or para[2] <= 0:
print("Invalid Parameters")
return
else:
cdfwei = 1-sp.exp(-((x-para[0])/para[1])**para[2])
return(cdfwei)
#############################################################
#LMR FUNCTIONS
#############################################################
def lmrexp(para,nmom):
A=para[1]
if A <= 0:
print("Invalid Parameters")
return
if nmom > 20:
print("Parameter nmom too large")
xmom = []
xmom.append(para[0]+A)
if nmom == 1:
return(xmom)
xmom.append(0.5*A)
if nmom ==2:
return(xmom)
for i in range(3,nmom+1):
xmom.append(2/float(i*(i-1)))
return(xmom)
#############################################################
def lmrgam(para,nmom):
A0 = 0.32573501
[A1,A2,A3] = [0.16869150, 0.078327243,-0.0029120539]
[B1,B2] = [0.46697102, 0.24255406]
C0 = 0.12260172
[C1,C2,C3] = [0.053730130, 0.043384378, 0.011101277]
[D1,D2] = [0.18324466, 0.20166036]
[E1,E2,E3] = [2.3807576, 1.5931792, 0.11618371]
[F1,F2,F3] = [5.1533299, 7.1425260, 1.9745056]
[G1,G2,G3] = [2.1235833, 4.1670213, 3.1925299]
[H1,H2,H3] = [9.0551443, 26.649995, 26.193668]
Alpha = para[0]
Beta = para[1]
if Alpha <= 0 or Beta <= 0:
print("Invalid Parameters")
return
if nmom > 4:
print("Parameter nmom too large")
return
xmom = []
xmom.append(Alpha*Beta)
if nmom == 1:
return(xmom)
xmom.append(Beta*1/sp.sqrt(sp.pi)*sp.exp(spsp.gammaln(Alpha+0.5)-spsp.gammaln(Alpha)))
if nmom == 2:
return(xmom)
if Alpha < 1:
Z= Alpha
xmom.append((((E3*Z+E2)*Z+E1)*Z+1)/(((F3*Z+F2)*Z+F1)*Z+1))
if nmom == 3:
return(xmom)
xmom.append((((C3*Z+C2)*Z+C1)*Z+C0)/((D2*Z+D1)*Z+1))
if nmom == 4:
return(xmom)
else:
Z=1/Alpha
xmom.append(sp.sqrt(Z)*(((A3*Z+A2)*Z+A1)*Z+A0)/((B2*Z+B1)*Z+1))
if nmom == 3:
return(xmom)
xmom.append((((C3*Z+C2)*Z+C1)*Z+C0)/((D2*Z+D1)*Z+1))
if nmom == 4:
return(xmom)
#############################################################
def lmrgev(para,nmom):
ZMOM=[0.577215664901532861, 0.693147180559945309,
0.169925001442312363,0.150374992788438185,
0.558683500577583138e-1,0.581100239999710876e-1,
0.276242584297309125e-1,0.305563766579053126e-1,
0.164650282258328802e-1,0.187846624298170912e-1,
0.109328215063027148e-1,0.126973126676329530e-1,
0.778982818057231804e-2,0.914836179621999726e-2,
0.583332389328363588e-2,0.690104287590348154e-2,
0.453267970180679549e-2,0.538916811326595459e-2,
0.362407767772368790e-2,0.432387608605538096e-2]
SMALL = 1e-6
U = para[0]
A = para[1]
G = para[2]
if A<= 0 or G <= -1:
print("Invalid Parameters")
return
if nmom > 20:
print("Parameter nmom too large")
return
if abs(G)>SMALL:
GAM = sp.exp(spsp.gammaln(1+G))
xmom = [U+A*(1-GAM)/G]
if nmom == 1:
return(xmom)
XX2 = 1-2**(-G)
xmom.append(A*XX2*GAM/G)
if nmom == 2:
return(xmom)
Z0=1
for j in range(2,nmom):
DJ=j+1
BETA = (1-DJ**(-G))/XX2
Z0 = Z0*(4*DJ-6)/DJ
Z = Z0*3*(DJ-1)/(DJ+1)
SUM = Z0*BETA-Z
if j == 2:
xmom.append(SUM)
else:
for i in range(1,j-1):
DI = i+1
Z = Z*(DI+DI+1)*(DJ-DI)/((DI+DI-1)*(DJ+DI))
SUM = SUM-Z*xmom[i+1]
xmom.append(SUM)
return(xmom)
else:
xmom = [U]
if nmom == 1:
return(xmom)
xmom.append(A*ZMOM[1])
if nmom == 2:
return(xmom)
for i in range(2,nmom):
xmom.append(zmom[i-1])
return(xmom)
#############################################################
def lmrglo(para,nmom):
SMALL = 1e-4
C1 = sp.pi**2/6
C2 = 7*sp.pi**4/360
Z = [[0],[0]]
Z.append([1])
Z.append([0.166666666666666667, 0.833333333333333333])
Z.append([0.416666666666666667, 0.583333333333333333])
Z.append([0.666666666666666667e-1, 0.583333333333333333,
0.350000000000000000])
Z.append([0.233333333333333333, 0.583333333333333333,
0.183333333333333333])
Z.append([0.357142857142857143e-1, 0.420833333333333333,
0.458333333333333333, 0.851190476190476190e-1])
Z.append([0.150992063492063492, 0.515625000000000000,
0.297916666666666667, 0.354662698412698413e-1])
Z.append([0.222222222222222222e-1, 0.318893298059964727,
0.479976851851851852, 0.165509259259259259,
0.133983686067019400e-1])
Z.append([0.106507936507936508, 0.447663139329805996,
0.360810185185185185, 0.803902116402116402e-1,
0.462852733686067019e-2])
Z.append([0.151515151515151515e-1, 0.251316137566137566,
0.469695216049382716, 0.227650462962962963,
0.347139550264550265e-1, 0.147271324354657688e-2])
Z.append([0.795695045695045695e-1, 0.389765946502057613,
0.392917309670781893, 0.123813106261022928,
0.134998713991769547e-1, 0.434261597456041900e-3])
Z.append([0.109890109890109890e-1, 0.204132996632996633,
0.447736625514403292, 0.273053442827748383,
0.591917438271604938e-1, 0.477687757201646091e-2,
0.119302636663747775e-3])
Z.append([0.619345205059490774e-1, 0.342031759392870504,
0.407013705173427396, 0.162189192806752331,
0.252492100235155791e-1, 0.155093427662872107e-2,
0.306778208563922850e-4])
Z.append([0.833333333333333333e-2, 0.169768364902293474,
0.422191282868366202, 0.305427172894620811,
0.840827939972285210e-1, 0.972435791446208113e-2,
0.465280282988616322e-3, 0.741380670696146887e-5])
Z.append([0.497166028416028416e-1, 0.302765838589871328,
0.410473300089185506, 0.194839026503251764,
0.386598063704648526e-1, 0.341399407642897226e-2,
0.129741617371825705e-3, 0.168991182291033482e-5])
Z.append([0.653594771241830065e-2, 0.143874847595085690,
0.396432853710259464, 0.328084180720899471,
0.107971393165194318, 0.159653369932077769e-1,
0.110127737569143819e-2, 0.337982364582066963e-4,
0.364490785333601627e-6])
Z.append([0.408784570549276431e-1, 0.270244290725441519,
0.407599524514551521, 0.222111426489320008,
0.528463884629533398e-1, 0.598298239272872761e-2,
0.328593965565898436e-3, 0.826179113422830354e-5,
0.746033771150646605e-7])
Z.append([0.526315789473684211e-2, 0.123817655753054913,
0.371859291444794917, 0.343568747670189607,
0.130198662812524058, 0.231474364899477023e-1,
0.205192519479869981e-2, 0.912058258107571930e-4,
0.190238611643414884e-5, 0.145280260697757497e-7])
U = para[0]
A = para[1]
G = para[2]
if A <= 0 or abs(G) >= 1:
print("Invalid Parameters")
return
if nmom > 20:
print("Parameter nmom too large")
return
GG = G*G
ALAM1 = -G*(C1+GG*C2)
ALAM2 = 1+GG*(C1+GG*C2)
if abs(G) > SMALL:
ALAM2=G*sp.pi/sp.sin(G*sp.pi)
ALAM1=(1-ALAM2)/G
xmom = [U+A*ALAM1]
if nmom == 1:
return(xmom)
xmom.append(A*ALAM2)
if nmom == 2:
return(xmom)
for M in range(3,nmom+1):
kmax = M/2
SUMM=Z[M-1][kmax-1]
for K in range(kmax-1,0,-1):
SUMM = SUMM*GG+Z[M-1][K-1]
if M != M/2*2:
SUMM = -G*SUMM
xmom.append(SUMM)
return(xmom)
#############################################################
def lmrgno(para,nmom):
ZMOM = [0, 0.564189583547756287, 0, 0.122601719540890947,
0, 0.436611538950024944e-1,0, 0.218431360332508776e-1,
0, 0.129635015801507746e-1,0, 0.852962124191705402e-2,
0, 0.601389015179323333e-2,0, 0.445558258647650150e-2,
0, 0.342643243578076985e-2,0, 0.271267963048139365e-2]
RRT2 = 1/sp.sqrt(2)
RRTPI = 1/sp.sqrt(sp.pi)
RANGE = 5
EPS = 1e-8
MAXIT = 10
U = para[0]
A = para[1]
G = para[2]
if A <= 0:
print("Invalid Parameters")
return
if nmom > 20:
print("Parameter nmom too large")
return
if abs(G)<=EPS:
xmom = [U]
if nmom == 1:
return(xmom)
xmom.append(A*ZMOM[1])
if nmom == 2:
return(xmom)
for i in range(3,nmom+1):
xmom.append(zmom[i-1])
return(xmom)
EGG = sp.exp(0.5*G**2)
ALAM1 = (1-EGG)/G
xmom = [U+A*ALAM1]
if nmom == 1:
return(xmom)
ALAM2=EGG*spsp.erf(0.5*G)/G
xmom.append(A*ALAM2)
if nmom == 2:
return(xmom)
CC=-G*RRT2
XMIN=CC-RANGE
XMAX=CC+RANGE
SUMM = [0]*nmom
N=16
XINC=(XMAX-XMIN)/N
for i in range(1,N):
X = XMIN+i*XINC
E = sp.exp(-((X-CC)**2))
D = spsp.erf(X)
P1 = 1
P = D
for M in range(3,nmom+1):
C1=M+M-3
C2=M-2
C3=M-1
P2=P1
P1=P
P=(C1*D*P1-C2*P2)/C3
SUMM[M-1] = SUMM[M-1]+E*P
EST = []
for i in SUMM:
EST.append(i*XINC)
for IT in range(1,MAXIT+1):
ESTX = EST
N=N*2
XINC=(XMAX-XMIN)/N
for i in range(1,N-1,2):
X = XMIN+i*XINC
E = sp.exp(-((X-CC)**2))
D = spsp.erf(X)
P1 = 1
P = D
for M in range(3,nmom+1):
C1=M+M-3
C2=M-2
C3=M-1
P2=P1
P1=P
P=(C1*D*P1-C2*P2)/C3
SUMM[M-1] = SUMM[M-1]+E*P
NOTCGD = 0
for M in range(nmom,2,-1):
EST[M-1] = SUMM[M-1]*XINC
if abs(EST[M-1]-ESTX[M-1]) > EPS*abs(EST[M-1]):
NOTCGD = M
if NOTCGD == 0:
CONST = -sp.exp(CC**2)*RRTPI/(ALAM2*G)
for M in range(3,nmom+1):
xmom.append(CONST*EST[M-1])
return(xmom)
else:
print("Did Not Converge")
return
#############################################################
def lmrgpa(para,nmom):
U = para[0]
A = para[1]
G = para[2]
if A <=0 or G < -1:
print("Invalid Parameters")
return
if nmom > 20:
print("Parameter nmom too large")
return
Y = 1/(1+G)
xmom = [U+A*Y]
if nmom == 1:
return(xmom)
Y = Y/(2+G)
xmom.append(A*Y)
if nmom == 2:
return(xmom)
Y = 1
for i in range(3,nmom+1):
AM = i-2
Y = Y*(AM-G)/(i+G)
xmom.append(Y)
return(xmom)
#############################################################
def lmrgum(para,nmom):
ZMOM = [0.577215664901532861, 0.693147180559945309,
0.169925001442312363, 0.150374992788438185,
0.0558683500577583138, 0.0581100239999710876,
0.0276242584297309125, 0.0305563766579053126,
0.0164650282258328802, 0.0187846624298170912,
0.0109328215063027148, 0.0126973126676329530,
0.00778982818057231804, 0.00914836179621999726,
0.00583332389328363588, 0.00690104287590348154,
0.00453267970180679549, 0.00538916811326595459,
0.00362407767772368790, 0.00432387608605538096]
A = para[1]
| |
%s" % (bp[3], bp[7], bp[6], bp[2], bp[3])
faces.append(gface3)
gtop = "%s %s %s %s %s" % (bp[4], bp[5], bp[6], bp[7], bp[4])
faces.append(gtop)
else:
face1 = "%s %s %s %s %s" % (p[1], p[2], p[6], p[5], p[1])
faces.append(face1)
faceBottom = "%s %s %s %s %s" % (p[0], p[3], p[2], p[1], p[0])
faces.append(faceBottom)
faceTop = "%s %s %s %s %s" % (p[4], p[5], p[6], p[7], p[4])
faces.append(faceTop)
for face in faces:
addsurface(False, CompositeSurface, face)
def CityGMLbuildingLOD2Solid(CityModel, ID, attributes, o, x, y, z, h, rtype=None, width=None, ovh=None, rep=None, LOD=None, aux=None, buildingpart=None, fd=False):
"""
Create LOD2 of the building with a basic roof shape. Solid representation.
"""
cityObject = etree.SubElement(CityModel, "cityObjectMember")
bldg = etree.SubElement(cityObject, "{%s}Building" % ns_bldg)
bldg.attrib['{%s}id' % ns_gml] = ID
roofType = etree.SubElement(bldg, "{%s}roofType" % ns_bldg)
roofType.text = rtype
yearOfConstructionXML = etree.SubElement(bldg, "{%s}yearOfConstruction" % ns_bldg)
yearOfConstructionXML.text = attributes['yearOfConstruction']
functionXML = etree.SubElement(bldg, "{%s}function" % ns_bldg)
functionXML.text = attributes['function']
storeysAboveGroundXML = etree.SubElement(bldg, "{%s}storeysAboveGround" % ns_bldg)
storeysAboveGroundXML.text = attributes['storeysAboveGround']
p = verticesBody(o, x, y, z)
r = verticesRoof([o, x, y, z], h, rtype, width)
#-- Computations for the LOD2 with explicit overhangs
eaves = z
upperEaves = z
if ovh is not None:
overhangs, interiors, eaves, ovhy_recalculated = verticesOverhangs([o, x, y, z], p, h, rtype, ovh, r, width)
if rtype == 'Shed':
upperEaves = z + h + (z - eaves)
else:
overhangs = None
if rep == 'solid':
lod2rep = etree.SubElement(bldg, "{%s}lod2Solid" % ns_bldg)
repres = etree.SubElement(lod2rep, "{%s}Solid" % ns_gml)
exterior = etree.SubElement(repres, "{%s}exterior" % ns_gml)
CompositeSurface = etree.SubElement(exterior, "{%s}CompositeSurface" % ns_gml)
elif rep == 'brep':
lod2rep = etree.SubElement(bldg, "{%s}lod2MultiSurface" % ns_bldg)
repres = etree.SubElement(lod2rep, "{%s}MultiSurface" % ns_gml)
surfaceMember = etree.SubElement(repres, "{%s}surfaceMember" % ns_gml)
if ASSIGNID:
repres.attrib['{%s}id' % ns_gml] = str(uuid.uuid4())
#-- Is the building part covered by overhangs?
if buildingpart is not None:
if x > aux['xsize'] and aux['ovhx'] >= buildingpart['x']:
covered = True
else:
covered = False
else:
covered = None
east_faces = {}
east_faces['rest'] = []
east_faces['roof'] = []
east_faces['outerfloor'] = []
#-- Account for building parts
if buildingpart is not None and not covered:
#-- Accounting for overhangs
if x > aux['xsize']:# or x < aux['xsize']:
bp = [None] * 8
bp[0] = GMLPointList([o[0] + x, aux['origin'][1] + buildingpart['o'], aux['origin'][2]])
bp[1] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'], aux['origin'][2]])
bp[2] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2]])
bp[3] = GMLPointList([o[0] + x, aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2]])
bp[4] = GMLPointList([o[0] + x, aux['origin'][1] + buildingpart['o'], aux['origin'][2] + buildingpart['z']])
bp[5] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'], aux['origin'][2] + buildingpart['z']])
bp[6] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2] + buildingpart['z']])
bp[7] = GMLPointList([o[0] + x, aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2] + buildingpart['z']])
#-- Top with the rest of the building
bpT = [None] * 8
tH = GMLstring2points(p[4])[0][2]
bpT[4] = GMLPointList([o[0] + x, aux['origin'][1] + buildingpart['o'], tH])
bpT[5] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'], tH])
bpT[6] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'] + buildingpart['y'], tH])
bpT[7] = GMLPointList([o[0] + x, aux['origin'][1] + buildingpart['o'] + buildingpart['y'], tH])
elif fd and x < aux['xsize']:
bp = [None] * 8
eastline = GMLstring2points(p[1])[0][0]
bp[0] = GMLPointList([eastline, aux['origin'][1] + buildingpart['o'], aux['origin'][2]])
bp[1] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'], aux['origin'][2]])
bp[2] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2]])
bp[3] = GMLPointList([eastline, aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2]])
bp[4] = GMLPointList([eastline, aux['origin'][1] + buildingpart['o'], aux['origin'][2] + buildingpart['z']])
bp[5] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'], aux['origin'][2] + buildingpart['z']])
bp[6] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2] + buildingpart['z']])
bp[7] = GMLPointList([eastline, aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2] + buildingpart['z']])
#-- Top with the rest of the building
bpT = [None] * 8
tH = GMLstring2points(p[4])[0][2]
bpT[4] = GMLPointList([eastline, aux['origin'][1] + buildingpart['o'], tH])
bpT[5] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'], tH])
bpT[6] = GMLPointList([o[0] + x + buildingpart['x'] - .5*(x - aux['xsize']), aux['origin'][1] + buildingpart['o'] + buildingpart['y'], tH])
bpT[7] = GMLPointList([eastline, aux['origin'][1] + buildingpart['o'] + buildingpart['y'], tH])
else:
bp = [None] * 8
bp[0] = GMLPointList([aux['origin'][0] + aux['xsize'], aux['origin'][1] + buildingpart['o'], aux['origin'][2]])
bp[1] = GMLPointList([aux['origin'][0] + aux['xsize'] + buildingpart['x'], aux['origin'][1] + buildingpart['o'], aux['origin'][2]])
bp[2] = GMLPointList([aux['origin'][0] + aux['xsize'] + buildingpart['x'], aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2]])
bp[3] = GMLPointList([aux['origin'][0] + aux['xsize'], aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2]])
bp[4] = GMLPointList([aux['origin'][0] + aux['xsize'], aux['origin'][1] + buildingpart['o'], aux['origin'][2] + buildingpart['z']])
bp[5] = GMLPointList([aux['origin'][0] + aux['xsize'] + buildingpart['x'], aux['origin'][1] + buildingpart['o'], aux['origin'][2] + buildingpart['z']])
bp[6] = GMLPointList([aux['origin'][0] + aux['xsize'] + buildingpart['x'], aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2] + buildingpart['z']])
bp[7] = GMLPointList([aux['origin'][0] + aux['xsize'], aux['origin'][1] + buildingpart['o'] + buildingpart['y'], aux['origin'][2] + buildingpart['z']])
#-- Top with the rest of the building
bpT = [None] * 8
tH = GMLstring2points(p[4])[0][2]
bpT[4] = GMLPointList([aux['origin'][0] + aux['xsize'], aux['origin'][1] + buildingpart['o'], tH])
bpT[5] = GMLPointList([aux['origin'][0] + aux['xsize'] + buildingpart['x'], aux['origin'][1] + buildingpart['o'], tH])
bpT[6] = GMLPointList([aux['origin'][0] + aux['xsize'] + buildingpart['x'], aux['origin'][1] + buildingpart['o'] + buildingpart['y'], tH])
bpT[7] = GMLPointList([aux['origin'][0] + aux['xsize'], aux['origin'][1] + buildingpart['o'] + buildingpart['y'], tH])
if buildingpart['type'] == 'Alcove':
if LOD == '2.0':
face1 = "%s %s %s %s %s" % (p[1], p[2], p[6], p[5], p[1])
east_faces['wall'] = face1
faceBottom = "%s %s %s %s %s" % (p[0], p[3], p[2], p[1], p[0])
elif LOD == '2.1' or LOD == '2.2' or LOD == '2.3':
faceBottom = "%s %s %s %s %s %s %s %s %s" % (p[0], p[3], p[2], bp[3], bp[2], bp[1], bp[0], p[1], p[0])
face1 = "%s %s %s %s %s %s %s %s %s" % (p[1], bp[0], bp[4], bp[7], bp[3], p[2], p[6], p[5], p[1])
east_faces['wall'] = face1
gface0 = "%s %s %s %s %s" % (bp[0], bp[1], bp[5], bp[4], bp[0])
east_faces['rest'].append(gface0)
gface1 = "%s %s %s %s %s" % (bp[1], bp[2], bp[6], bp[5], bp[1])
east_faces['rest'].append(gface1)
gface3 = "%s %s %s %s %s" % (bp[3], bp[7], bp[6], bp[2], bp[3])
east_faces['rest'].append(gface3)
gtop = "%s %s %s %s %s" % (bp[4], bp[5], bp[6], bp[7], bp[4])
east_faces['outerfloor'].append(gtop)
elif buildingpart['type'] == 'Garage':
if LOD == '2.0' or LOD == '2.1' or LOD == '2.2' or LOD == '2.3':
faceBottom = "%s %s %s %s %s %s %s %s %s" % (p[0], p[3], p[2], bp[3], bp[2], bp[1], bp[0], p[1], p[0])
face1 = "%s %s %s %s %s %s %s %s %s" % (p[1], bp[0], bp[4], bp[7], bp[3], p[2], p[6], p[5], p[1])
east_faces['wall'] = face1
gface0 = "%s %s %s %s %s" % (bp[0], bp[1], bp[5], bp[4], bp[0])
east_faces['rest'].append(gface0)
gface1 = "%s %s %s %s %s" % (bp[1], bp[2], bp[6], bp[5], bp[1])
east_faces['rest'].append(gface1)
gface3 = "%s %s %s %s %s" % (bp[3], bp[7], bp[6], bp[2], bp[3])
east_faces['rest'].append(gface3)
gtop = "%s %s %s %s %s" % (bp[4], bp[5], bp[6], bp[7], bp[4])
east_faces['roof'].append(gtop)
else:
face1 = "%s %s %s %s %s" % (p[1], p[2], p[6], p[5], p[1])
east_faces['wall'] = face1
faceBottom = "%s %s %s %s %s" % (p[0], p[3], p[2], p[1], p[0])
#-- Bottom face (in all cases regardless of the roof type)
faceBottom = "%s %s %s %s %s" % (p[0], p[3], p[2], p[1], p[0])
if rep == 'solid':
addsurface(False, CompositeSurface, faceBottom)
elif rep == 'brep':
plainMultiSurface(surfaceMember, faceBottom)
#-- Roof surfaces and wall surfaces depending on the type of the roof.
if rtype == 'Gabled':
if rep == 'solid':
gabledRoof(CompositeSurface, p, r, east_faces)
elif rep == 'brep':
gabledRoof(surfaceMember, p, r, east_faces)
if overhangs is not None and ovh[0] > 0:
roofOverhangs(surfaceMember, overhangs, interiors)
elif rtype == 'Shed':
if rep == 'solid':
shedRoof(CompositeSurface, p, r, east_faces)
elif rep == 'brep':
shedRoof(surfaceMember, p, r, east_faces)
| |
<reponame>wgordon17/masline-warehouse
# Standard library
from concurrent.futures import ProcessPoolExecutor
from inspect import stack
from logging import getLogger
from platform import system
from sys import exc_info
import os
from urllib import parse
# Third party library
from pyodbc import connect, DatabaseError
from tornado import gen
# Local library
from warehouse.server import config
class DbManager:
db_pool = None
valid_user_data = ["MODS", "PASS", "LAST"]
""" Valid Queries and their inputs and outputs
+ Denotes only for internal use
test_if_shipment_any ship_no bool(True/False)
test_if_shipment_open ship_no bool(True/False)
test_if_lot lot_no bool(True/False)
test_if_picked ship_no, lin_no, lot_no bool(True/False)
get_user_name unique_token str(user_name)
get_user_data data_id (exclude TAB_), user_id str(data_value)
Valid data_ids:
PASS - Password
MODS - Authorized modules (comma separated)
LAST - The last module used, also contains last data (comma separated)
get_item_list ship_no list(item, manu, location, status)
get_item_detail item_no, manu_no list(ship_no, line, location, note, qty, lot_no, ship_status, line_status)
get_item_hist item_no, manu_no list(location)
get_item_pic_urls item_no, manu_no list(pic_url) (http://www.masline.com/sites/default/files/styles/medium/public/item_pics/___.JPG)
get_item_no lot_no item, manu
get_shipment_notes ship_no str(whse_note) (\n chars stripped)
get_alternate_lots ship_no, lot_no list(line, lot_no, status)
get_shipment_status ship_no str(status)
get_loc_int_id location int(loc_int_id)
insert_picked_wrong_lot location, wrong_lot, correct_lot, ship_no, lin_no, qty, user_id
insert_picked_lot location, lot_no, ship_no, lin_no, qty, user_id
+insert_all_lots_pulled ship_no, user_id
insert_user_data data_id (exclude TAB_), data_value, user_id
set_shipment_scanned ship_no, user_id
set_shipment_pulled ship_no, user_id
update_user_data data_id (exclude TAB_), data_value, user_id
"""
valid_queries = {
##################
# SELECT queries #
##################
"test_if_shipment_any": # language="SQL server"
"""SELECT ship_no
FROM shp_hedr
WHERE ship_no = ?""",
"test_if_shipment_open": # language="SQL server"
"""SELECT ship_no
FROM shp_hedr
WHERE ship_no = ? AND (ship_status = 'P' OR ship_status = 'A')""",
"test_if_lot": # language="SQL server"
"""SELECT lot_no
FROM lot_hedr
WHERE lot_no = ?""",
"test_if_picked": # language="SQL server"
"""SELECT pc.description AS status
FROM loc_trx
LEFT JOIN create_proc_codes AS pc ON loc_trx.create_proc = pc.code
WHERE ship_no = ? AND ship_lin_no = ? AND int_lot_no = ?""",
"get_user_name": # language="SQL server"
"""SELECT RTRIM(opr_id) AS user_name
FROM opr_pref
WHERE pref_id = 'TAB_PASS' AND pref_data = ?""",
"get_user_data": # language="SQL server"
"""SELECT pref_data AS data_value
FROM opr_pref
WHERE pref_id = 'TAB_' || ? AND opr_id = ?""",
"get_item_list": # language="SQL server"
"""SELECT RTRIM(sd.item_no) AS item, RTRIM(sd.manu_no) AS manu, s_list.loc_id AS location, RTRIM(IFNULL(create_proc_codes.description, '')) AS status
FROM shp_detl AS sd
INNER JOIN (
SELECT lot_hedr.item_no, lot_hedr.manu_no, location.loc_id, loc_trx.create_proc
FROM shp_hedr
INNER JOIN shp_lot ON shp_hedr.ship_no = shp_lot.ship_no
INNER JOIN lot_hedr ON shp_lot.lot_no = lot_hedr.lot_no
INNER JOIN location ON lot_hedr.loc_int_id = location.loc_int_id
LEFT JOIN loc_trx ON shp_lot.ship_no = loc_trx.ship_no AND shp_lot.lin_no = loc_trx.ship_lin_no AND shp_lot.lot_no = loc_trx.int_lot_no
WHERE (shp_hedr.ship_status = 'P' OR shp_hedr.ship_status = 'A')
GROUP BY lot_hedr.item_no, lot_hedr.manu_no, location.loc_id, loc_trx.create_proc
) as s_list ON sd.item_no = s_list.item_no AND sd.manu_no = s_list.manu_no
LEFT JOIN create_proc_codes ON s_list.create_proc = create_proc_codes.code
WHERE sd.ship_no = ?
ORDER BY sd.item_no, sd.manu_no""",
"get_item_detail": # language="SQL server"
"""SELECT sdetl.ship_no, sdetl.lin_no AS line, RTRIM(loc.loc_id) AS location, sdetl.od_ord_com AS note,
CASE slot.qty_to_shp WHEN 0 THEN slot.from_lot ELSE slot.qty_to_shp END AS qty,
RTRIM(slot.lot_no) AS lot_no, RTRIM(sstat.description) AS ship_status, RTRIM(IFNULL(pc.description, '')) AS line_status
FROM shp_hedr AS shedr
INNER JOIN shp_status AS sstat ON shedr.ship_status = sstat.ship_status
INNER JOIN shp_detl AS sdetl ON shedr.ship_no = sdetl.ship_no
INNER JOIN shp_lot AS slot ON sdetl.ship_no = slot.ship_no AND sdetl.lin_no = slot.lin_no
INNER JOIN lot_hedr AS lot ON slot.lot_no = lot.lot_no
INNER JOIN location AS loc ON lot.loc_int_id = loc.loc_int_id
LEFT JOIN loc_trx AS trx ON sdetl.ship_no = trx.ship_no AND sdetl.lin_no = trx.ship_lin_no
AND slot.lot_no = trx.int_lot_no AND trx.loc_trx_int_id IN
(SELECT MAX(loc_trx_int_id)
FROM loc_trx
WHERE ship_no = sdetl.ship_no AND ship_lin_no = sdetl.lin_no AND int_lot_no = slot.lot_no)
LEFT JOIN create_proc_codes AS pc ON trx.create_proc = pc.code
WHERE shedr.ship_status <> 'C' AND shedr.ship_status <> 'V' AND
sdetl.item_no = ? AND sdetl.manu_no = ?""",
"get_item_hist": # language="SQL Server"
"""exec item_loc_hist ?, ?""",
"get_item_pic_urls": # language="SQL Server"
"""SELECT RTRIM(item_pic2) AS picA, RTRIM(item_pic3) AS picB, RTRIM(item_pic4) AS picC
FROM item
WHERE item.item_no = ? AND item.manu_no = ?""",
"get_item_no": # language="SQL Server"
"""SELECT RTRIM(item_no) AS item, RTRIM(manu_no) AS manu
FROM lot_hedr
WHERE lot_no = ?""",
"get_shipment_notes": # language="SQL Server"
"""SELECT oh_ord_com AS whse_note
FROM shp_hedr
WHERE shp_hedr.ship_no = ?""",
"get_alternate_lots": # language="SQL server"
"""SELECT s.lin_no AS line, RTRIM(s.lot_no) AS lot_no, RTRIM(pc.description) AS status
FROM lot_hedr AS l
INNER JOIN shp_detl AS shp ON l.item_no = shp.item_no AND l.manu_no = shp.manu_no
INNER JOIN shp_lot AS s ON shp.ship_no = s.ship_no AND shp.lin_no = s.lin_no
LEFT JOIN loc_trx AS trx ON shp.ship_no = trx.ship_no AND shp.lin_no = trx.ship_lin_no
AND s.lot_no = trx.int_lot_no AND trx.loc_trx_int_id IN
(SELECT MAX(loc_trx_int_id)
FROM loc_trx
WHERE ship_no = shp.ship_no AND ship_lin_no = shp.lin_no AND int_lot_no = s.lot_no)
LEFT JOIN create_proc_codes AS pc ON trx.create_proc = pc.code
WHERE shp.ship_no = ? AND l.lot_no = ?""",
"get_shipment_status": # language="SQL server"
"""SELECT RTRIM(stat.description) AS status
FROM shp_hedr AS shp
INNER JOIN shp_status AS stat ON shp.ship_status = stat.ship_status
WHERE shp.ship_no = ?""",
"get_loc_int_id": # language="SQL server"
"""SELECT loc_int_id
FROM location
WHERE loc_id = ?""",
##################
# INSERT queries #
##################
"insert_picked_wrong_lot": # language="SQL server"
"""INSERT INTO loc_trx
(loc_int_id, create_dt, create_id, qty_from_loc, create_proc, int_lot_no, prg_id)
VALUES (?, date('now'), ?, ?, 'D', ?, 'TAB_PICK')""",
"insert_picked_lot": # language="SQL server"
"""INSERT INTO loc_trx
(loc_int_id, create_dt, create_id, qty_from_loc, create_proc, int_lot_no, ship_no, ship_lin_no, prg_id)
VALUES (?, date('now'), ?, ?, 'A', ?, ?, ?, 'TAB_PICK')""",
"+insert_all_lots_pulled": # language="SQL server"
"""INSERT INTO loc_trx
(loc_int_id, create_dt, create_id, qty_from_loc, create_proc, int_lot_no, ship_no, ship_lin_no, prg_id)
SELECT l.loc_int_id, date('now'), ?, 0, 'U', s.lot_no, s.ship_no, s.lin_no, 'TAB_PICK'
FROM shp_lot AS s
INNER JOIN lot_hedr AS l ON s.lot_no = l.lot_no
WHERE s.ship_no = ?""",
"insert_user_data": # language="SQL server"
"""INSERT INTO opr_pref
(opr_id, pref_id, pref_data, create_dt, create_id, updt_dt, updt_id)
VALUES (?, 'TAB_' + UPPER(?), ?, date('now'), 'TABLET', date('now'), 'TABLET')""",
##################
# UPDATE queries #
##################
"set_shipment_scanned": # language="SQL server"
"""UPDATE shp_hedr
SET ship_status = 'A', updt_id = ?, updt_dt = date('now')
WHERE ship_no = ?""",
"set_shipment_pulled": # language="SQL server"
"""UPDATE shp_hedr
SET ship_status = 'U', updt_id = ?, updt_dt = date('now')
WHERE ship_no = ?""",
"update_user_data": # language="SQL server"
"""UPDATE opr_pref
SET pref_data = ?, updt_dt = date('now'), updt_id = 'TABLET'
WHERE opr_id = ? AND pref_id = 'TAB_' + UPPER(?)"""
}
def __init__(self, username, password, database, appname, num_db_processes=3):
# ODBC Connection string used in production
#self.conn_str = "DSN=epds;UID={0};PWD={1};DATABASE={2};APP={3}".format(username, password, database, appname)
# ODBC Connection string for accessing local SQLite preview
if system() == "Linux":
self.conn_str = "DRIVER={{SQLite3}};SERVER=localhost;DATABASE={db}".format(db=os.path.join(config.BASE_DIR, "test.db"))
elif system() == "Windows":
self.conn_str = "DRIVER={{SQLite3 ODBC Driver}};SERVER=localhost;DATABASE{db}".format(db=os.path.join(config.BASE_DIR, "test.db"))
DbManager.db_pool = ProcessPoolExecutor(max_workers=num_db_processes)
def shutdown(self):
DbManager.db_pool.shutdown(True)
@gen.coroutine
def async_query(self, *args, module=None):
result = {"response": "", "args": ""}
log = getLogger(__name__)
pool_future = DbManager.db_pool.submit(self._init_query, args)
try:
result = yield pool_future
except DatabaseError as e:
if module is None:
log.error(e)
else:
module.display_msg("Database error!", "error", debug_info=str(e))
except Exception as e:
log.exception(e)
finally:
return result
def _init_query(self, args):
if len(args) < 2:
raise DatabaseError("Error in database.query: Not enough arguments in query_db:" + str(args))
if args[0] not in self.valid_queries:
raise DatabaseError("Error in database.query: Requested query is not a valid query:" + str(args[0]))
query_name = args[0]
query_args = args[1:]
db_connection = connect(self.conn_str, autocommit=True)
query = self.valid_queries[query_name]
rows = getattr(self, "_" + query_name)(db_connection, query, *query_args)
return {"response": rows, "args": query_args}
def _exec_query(self, db_object, qry_str, args):
rows = ""
try:
cursor = db_object.cursor()
cursor.execute(qry_str, args)
except Exception as e:
raise DatabaseError(str(exc_info()[0].__name__) + " in database._exec_query: " + str(e) +
"\n\t" + str(qry_str).replace("\n", " ").replace("\t", " ") + "\n\t" + str(args))
else:
if qry_str.split(" ", 1)[0].upper() in "SELECT":
rows = cursor.fetchall()
elif qry_str.split(" ", 1)[0].upper() in "EXEC":
rows = True
while rows and cursor.rowcount >= 0:
rows = cursor.nextset()
if rows:
rows = cursor.fetchall()
cursor.close()
return rows
def _test_if_shipment_any(self, db_connection, query_str, ship_no):
rows = self._exec_query(db_connection, query_str, ship_no)
if len(rows) > 0:
return True
return False
def _test_if_shipment_open(self, db_connection, query_str, ship_no):
rows = self._exec_query(db_connection, query_str, ship_no)
if len(rows) > 0:
return True
return False
def _test_if_lot(self, db_connection, query_str, lot_no):
rows = self._exec_query(db_connection, query_str, lot_no)
if len(rows) > 0:
return True
return False
def _test_if_picked(self, db_connection, query_str, ship_no, lin_no, lot_no):
rows = self._exec_query(db_connection, query_str, (ship_no, lin_no, lot_no))
if len(rows) > 0:
status = rows[0].status
if status in ["Scanned", "Pulled", "Found"]:
return True
| |
<reponame>Ardibid/Rotrics_arm<gh_stars>1-10
'''
File name: pyArm.py
Author: <NAME>
Date created: 04/--/2021
Date last modified: 08/10/2021
Python Version: 3.7.7
License: MIT
'''
##########################################################################################
###### Import
##########################################################################################
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.express as px
from dash_html_components.P import P
from dash.dependencies import Input, Output, State
#import plotly.graph_objs as go
import dash_bootstrap_components as dbc
from dash_bootstrap_components._components.CardBody import CardBody
import numpy as np
from numpy.core.fromnumeric import size
import serial
import re
import base64
import io
import os
import json
import argparse
from src.pydexarm import Dexarm
from src.josn_interface import Drawing_processor
######################################################################
### Arguments
######################################################################
parser = argparse.ArgumentParser(prog='PyArm',
description="A Dash app to communicate with Rotric Robotic Arms")
parser.add_argument('-mode',
help="Run the App in \"debug\", \"local\", or \"remote\" mode (str)",
default= "debug",
nargs='?',
type=str)
args = parser.parse_args()
mode_selection = args.mode
##########################################################################################
###### Global Variables
##########################################################################################
port = "COM3"
mac_port = "/dev/tty.usbmodem3063335534381"
arm = None
json_drawing_data = None
#######################
## Default variables
#######################
z_val = -50
z_val_adjusted = z_val + 0
scale = 25
x_offset = 0
y_offset = 250
x_default = 0
y_default = 300
z_clear_height = -25
pressure_factor = 5
default_JSON_file_Path = "./data/path_data.json"
dp = Drawing_processor(base_z = z_val, safe_z_val = z_clear_height, slider = False)
##########################################################################################
###### The app!
##########################################################################################
external_stylesheets=[dbc.themes.BOOTSTRAP]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
##########################################################################################
###### Initial GUI setup
##########################################################################################
def init_canvas():
"""
Making the initial canvas
"""
img = np.ones(shape=(140*scale,160*scale,3))*220.
fig = px.imshow(img, binary_string=True)
fig.update_layout(coloraxis_showscale=False)
fig.update_xaxes(showticklabels=False)
fig.update_yaxes(showticklabels=False)
fig.update_layout(dragmode="drawopenpath",
newshape_line_color='black',
newshape_line_width = 3,
)
return fig
config = {"modeBarButtonsToAdd": [
"drawopenpath",
"eraseshape",
]}
fig = init_canvas()
##########################################################################################
###### Cards
##########################################################################################
header_card = dbc.Card([
dbc.CardBody([
html.Div([html.H1("PyArm", className="card-title"),
html.Hr(),
html.P("by <NAME>, v.0.1, 2021")]),
])
])
drawing_tabs = dbc.Card(
[
dbc.CardHeader(
dbc.Tabs(
[
dbc.Tab([
html.Br(),
html.H3("Draw from Canvas"),
html.P("Draw using your mouse and then hit Draw Now"),
dcc.Graph(
id="graph_pic",
figure=fig, config=config,
style={'width': '100%',
'height':"800px",
'visibility':'visible'},
),
html.Br(),
dbc.Button(id='draw_now_canvas',
children= "Draw Now",
color="dark",
block=True,
className="mr-1"),
html.Br(),
dbc.Button(id='clear_draw',
children= "Clear Canvas",
# color="dark",
block=True,
className="mr-1"),
html.Br(),
html.Hr(),
html.P("", id="annotations-data-pre"),
],
label="Drawing",
tab_id="drawing_tab"),
dbc.Tab([
html.Br(),
html.H3("Draw from a JSON File"),
html.P("Load a JSON file and hit Draw Now"),
dcc.Upload(
id="json_upload",
children= [html.A('Drag and Drop JSON File Here')],
style= {'borderStyle': 'dashed',
'borderRadius': '5px',
'color': '#343a40',
'borderColor': '#aaa',
# 'backgroundColor': '#343a40',
'height': "60px",
'lineHeight': '60px',
'textAlign': 'center',
},
),
html.Br(),
dbc.Button(id='draw_now_JSON',
children= "Draw Now",
color="dark",
block=True,
className="mr-1"),
html.Br(),
dcc.Graph(
id="graph_json",
figure=fig, config=config,
style={'width': '1000px',
'height':"800px",
'visibility':'hidden'},
),
html.P("", id="json_upload_detail"),
html.Br(),
],
label="Loading",
tab_id="loading_tab",),
],
id="card_tabs",
card=True,
active_tab="drawing_tab",
)
),
dbc.CardBody(html.P(id="card-content", className="card-text")),
]
)
controls_card = dbc.Card([
dbc.CardBody([
html.H6("Connection Manager", className="card-title"),
html.Hr(),
dbc.Row([
dbc.Col([
dbc.Button(id='robot_connect',
children= "Connect Robot",
color="dark",
block=True,
className="mr-1"),
html.P("Robot waiting", id="robot_status"),
html.Br(),
html.Hr(),
dbc.Checklist(
options=[
{"label": "Using Slider", "value": 1},
{"label": "Init Slider", "value": 2},
],
value=[],
id="slider_toggle",
inline= True,
switch=True,
),
html.P("", id="slider_toggle_status"),
html.Br(),
html.Hr(),
dbc.Button(id='robot_disconnect',
children= "Disconnect",
color="dark",
block=True,
className="mr-1"),
html.P("", id="robot_disconnect_status"),
html.Br(),
html.Hr(),
dbc.DropdownMenu(
id="port_name",
label="Port",
children=[
dbc.DropdownMenuItem(id='COM1', children="COM1"),
dbc.DropdownMenuItem(id='COM3', children="COM3"),
dbc.DropdownMenuItem(id='MAC', children="MAC"),
],
color="dark",
className="mr-1"),
html.P("", id="port_status"),
]),
])
])
])
calibration_card = dbc.Card([
dbc.CardBody([
html.H6("Adjustments", className="card-title"),
html.Hr(),
dbc.Row([
dbc.Col([
dbc.Button(id='touch_paper',
children= "Touch Paper",
color="dark",
block=True,
className="mr-1"),
html.Br(),
html.P("", id="touch_paper_status"),
html.Hr(),
dcc.Slider(id="z_adjust",
min = -5,
max = 5,
value =0,
step = .25),
html.P(id="z_adjust_status", children= "Offset: 0"),
html.Hr(),
dcc.Slider(id="pressure_factor",
min = 0,
max = 20,
value = pressure_factor,
step = .1),
html.P(id="pressure_factor_status", children= "Pressure: -5"),
html.Hr(),
dbc.Button(id='stop',
children= "STOP",
color="danger",
block=True,
className="mr-1"),
html.Br(),
html.P("", id="stop_status"),
])
])
])
])
control_cards_deck = dbc.Card([
dbc.CardBody([
html.H4("Control Panel", className="card-title"),
dbc.CardDeck([
dbc.Col([controls_card]),
dbc.Col([calibration_card]),
# dbc.Col([]),
]),
html.P("", id='draw_now_status'),
]),
])
##########################################################################################
###### Ok, let's assemble the layout
##########################################################################################
app.layout = dbc.Container([
html.Br(),
header_card,
html.Br(),
control_cards_deck,
html.Br(),
drawing_tabs,
])
##########################################################################################
###### Callbacks
##########################################################################################
@app.callback(
Output('stop_status', 'children'),
Input('stop', 'n_clicks'),
prevent_initial_call=True)
def clear_drawing_canvas(value):
arm._send_cmd("G4")
return "EMERGENCY STOP"
@app.callback(
Output('graph_pic', 'figure'),
Input('clear_draw', 'n_clicks'),
prevent_initial_call=True)
def clear_drawing_canvas(value):
"""
Clears the drawing canvas
"""
global default_JSON_file_Path
dp.reset_JSON_file(json_path = default_JSON_file_Path)
tmp_fig = init_canvas()
return tmp_fig
@app.callback([
Output('json_upload_detail', 'children'),
Output('graph_json', 'style'),
Output('graph_json', 'figure')],
Input('json_upload', 'contents'),
State('json_upload', 'filename'),
State('json_upload', 'last_modified'),
prevent_initial_call=True)
def json_file_upload(contents, file_names, dates):
"""
Reads a JSON file from disk and saves it in the default path
The goal is to keep that file over there for the Draw JSON
Function to read and draw it.
Args:
ontents, file_names, dates: inputs from the Dash UI
"""
global arm, dp
global default_JSON_file_Path
if contents is not None:
msg = "{} is loaded".format(file_names)
print (msg)
try:
_, content_string = contents.split(',')
decoded = base64.b64decode(content_string)
data =decoded.decode('utf-8')
data = str(data)
except:
print ("Couldn\'t read the JSON file")
dp.write_dic_to_json_file(data, default_JSON_file_Path)
fig = quick_draw_graph(default_JSON_file_Path)
return (html.Div([html.Pre(data)]), {'visibility': 'visible'}, fig)
return "No file is loaded yet"
@app.callback(
Output("port_status", "children"),
[Input("COM1", "n_clicks"),
Input("COM3", "n_clicks"),
Input("MAC", "n_clicks")]
)
def set_com_port(c1, c3, c5):
"""
Sets the active port to connect to the robot
For Ubuntu (Raspberry Pi, find the exact name of port i.e.: '/dev/ttyACM0')
"""
global port, mac_port
ctx = dash.callback_context
if not ctx.triggered:
return "Active port: {}".format(port)
else:
port = ctx.triggered[0]["prop_id"].split(".")[0]
print (port)
if port == "MAC":
port = mac_port
return "Active port: {}".format(port)
################################
###### Connect/Disconnect ######
################################
@app.callback(
Output('robot_status', 'children'),
Input('robot_connect', 'n_clicks'),
prevent_initial_call=True,
)
def connect_robot(value):
"""
Initializes the serial connection with the robot.
"""
global arm, port
global x_default, y_default
if value == 0:
return "Nothing is clicked"
else:
print ("Contacting the robot for the {}th time!".format(value))
arm = Dexarm(port=port)
arm.go_home()
arm.set_module_type(0)
arm.move_to(x_default, y_default, z_clear_height+ 10)
arm.go_home()
x, y, z, e, a, b, c = arm.get_current_position()
message = "x: {}, y: {}, z: {}, e: {}\na: {}, b: {}, c: {}".format(x, y, z, e, a, b, c)
data = html.Div([html.P("Robot connected on {}".format(port)),
html.Br(),
html.P(message)])
return data
@app.callback(
Output('robot_disconnect_status', 'children'),
Input('robot_disconnect', 'n_clicks'),
prevent_initial_call=True,
)
def disconnect_arm(value):
"""
Disconnects the robot serial connection.
"""
global arm
if value == 0:
return ""
else:
if arm is not None:
if arm.ser.is_open:
arm.go_home()
arm.close()
arm = None
return "Disconnected"
else:
return "No arm is available"
@app.callback(
Output("slider_toggle_status", "children"),
Input("slider_toggle", "value"),
prevent_initial_call=True,
)
def init_slider(slider_toggle_val):
"""
Initializes the slider track if a robot exist and connected
"""
global arm
print (slider_toggle_val)
if 1 in slider_toggle_val:
print ("Using slider")
dp.slider = True
else:
dp.slider = False
return "Slider not in use"
if 2 in slider_toggle_val:
if arm is not None:
print ("arm is not None")
if arm.ser.is_open:
print ("arm.ser is open")
print ("going home")
arm.go_home()
print ("initing rail")
arm.sliding_rail_init()
dp.slider = True
print ("done with slider start!")
return "Slider is initiated and in use"
return "Slider in use"
#################################
########## Adjustments ##########
#################################
@app.callback(
Output("z_adjust_status","children"),
Input("z_adjust", 'value'),
prevent_initial_call=True,
)
def adjust_z_val(value):
"""
Reads the value from z_adjust slider (offset value) and changes the
z_val (the height where the paper is located). Also moves the robot
to show the user the new z_val.
"""
global z_val, z_val_adjusted
global arm, dp
z_val_adjusted = z_val + value
if dp.slider:
arm.move_to(e= 0, y= y_default, z= z_val_adjusted)
else:
arm.move_to(0, y_default, z_val_adjusted)
dp.base_z = z_val_adjusted
return "Z: {}, Offset: {}".format (z_val_adjusted, value)
@app.callback(
Output("pressure_factor_status","children"),
Input("pressure_factor", 'value'),
prevent_initial_call=True,
)
def adjust_pressure_factor(value):
"""
Reads the value from pressure_factor slider (Pressure factor) and Changes
the pressure range between the lower and highest pressure
"""
global pressure_factor
global arm, dp
pressure_factor = -value
dp.pressure_factor = pressure_factor
if dp.slider:
arm.move_to(e= 0, y= y_default, z= z_val_adjusted+pressure_factor)
arm.dealy_s(0.5)
arm.move_to(e= 0, y= y_default, z= z_val_adjusted-pressure_factor)
arm.dealy_s(0.5)
arm.move_to(e= 0, y = y_default, z= z_val_adjusted)
else:
arm.move_to(0, y_default, z_val_adjusted+pressure_factor)
arm.dealy_s(0.5)
arm.move_to(0, y_default, z_val_adjusted-pressure_factor)
arm.dealy_s(0.5)
arm.move_to(0, y_default, z_val_adjusted)
return "Pressure: {}".format (pressure_factor)
@app.callback(
Output('touch_paper_status', 'children'),
Output('touch_paper', 'children'),
Input('touch_paper', 'n_clicks'),
prevent_initial_call=True,
)
def adjust_marker(value):
"""
Forces the robot to touch the z_val, user can use this function
to adjust its marker or pen height.
"""
global arm
global z_val, z_val_adjusted, x_default, y_default
if value == 0:
return "Marker not adjusted", dash.no_update
elif arm:
if value%2 == 1:
# on odd clicks the robot touch the paper
if dp.slider:
arm.move_to(e= x_default, y= y_default, z= z_val_adjusted)
else:
arm.move_to(x_default, y_default, z_val_adjusted)
return "Adjust the marker", "Return Home"
else:
# on even clicks the robot comes back to normal height
if dp.slider:
arm.move_to(e= x_default, y= y_default, z= z_clear_height)
else:
arm.move_to(x_default, y_default, z_clear_height)
return "", "Touch Paper"
else:
return "No robot is available", dash.no_update
##########################################################################################
###### Drawing
##########################################################################################
@app.callback(
Output('draw_now_status', 'children'),
[Input('draw_now_canvas', 'n_clicks'),
Input('draw_now_JSON', 'n_clicks')],
prevent_initial_call=True)
def draw_now(value_graph, value_JSON):
"""
Reads the saved JSON file in the default path and | |
import uuid
import unittest
from taurus import Device
from taurus.core.tango.tangovalidator import TangoDeviceNameValidator
from sardana.pool import AcqSynchType
from sardana.taurus.core.tango.sardana.pool import registerExtensions
from sardana.tango.pool.test.base_sartest import SarTestTestCase
class TestMeasurementGroupConfiguration(SarTestTestCase, unittest.TestCase):
def setUp(self):
SarTestTestCase.setUp(self)
registerExtensions()
def tearDown(self):
SarTestTestCase.tearDown(self)
def _assertResult(self, result, channels, expected_value):
expected_channels = list(channels)
for channel, value in result.items():
msg = "unexpected key: {}".format(channel)
self.assertIn(channel, expected_channels, msg)
expected_channels.remove(channel)
self.assertEqual(value, expected_value)
msg = "{} are missing".format(expected_channels)
self.assertEqual(len(expected_channels), 0, msg)
def _assertMultipleResults(self, result, channels, expected_values):
expected_channels = list(channels)
for (channel, value), expected_value in zip(result.items(),
expected_values):
msg = "unexpected key: {}".format(channel)
self.assertIn(channel, expected_channels, msg)
expected_channels.remove(channel)
self.assertEqual(value, expected_value)
msg = "{} are missing".format(expected_channels)
self.assertEqual(len(expected_channels), 0, msg)
def test_enabled(self, elements=["_test_ct_1_1", "_test_ct_1_2",
"_test_2d_1_3",
"_test_mt_1_3/position"]):
mg_name = str(uuid.uuid1())
argin = [mg_name] + elements
self.pool.CreateMeasurementGroup(argin)
try:
mg = Device(mg_name)
# Check initial state of all kind of channels, nonexistent
# channels for the feature return None as result.
result = mg.getEnabled(*elements)
expected = [True] * len(elements)
self._assertMultipleResults(result, elements, expected)
# Test every possible combination of setting values
# Check that changing one channel doesn't affect the other
mg.setEnabled(False, *elements)
result = mg.getEnabled(*elements)
expected = [False] * len(elements)
self._assertMultipleResults(result, elements, expected)
mg.setEnabled(True, elements[0])
result = mg.getEnabled(*elements)
expected = [False] * len(elements)
expected[0] = True
self._assertMultipleResults(result, elements, expected)
mg.setEnabled(False, *elements)
resutl = mg.getEnabled(*elements)
self._assertResult(resutl, elements, False)
# Redefine elements to ony use existing values
elements = ["_test_ct_1_1", "_test_ct_1_2"]
# Set values using the controller instead of channels
mg.setEnabled(True, "_test_ct_ctrl_1")
resutl = mg.getEnabled(*elements)
self._assertResult(resutl, elements, True)
# Get values by controller
mg.setEnabled(False, *elements)
resutl = mg.getEnabled("_test_ct_ctrl_1")
self._assertResult(resutl, elements, False)
# Check ret_full_name
v = TangoDeviceNameValidator()
full_names = [v.getNames(element)[0] for element in elements]
resutl = mg.getEnabled(*full_names)
self._assertResult(resutl, elements, False)
mg.setEnabled(True, *full_names)
resutl = mg.getEnabled(*elements, ret_full_name=True)
self._assertResult(resutl, full_names, True)
finally:
mg.cleanUp()
self.pool.DeleteElement(mg_name)
def test_output(self, elements=["_test_ct_1_1", "_test_ct_1_2",
"_test_2d_1_3",
"_test_mt_1_3/position"]):
mg_name = str(uuid.uuid1())
argin = [mg_name] + elements
self.pool.CreateMeasurementGroup(argin)
try:
mg = Device(mg_name)
# Check initial state of all kind of channels, nonexistent
# channels for the feature return None as result.
enabled = mg.getOutput(*elements)
expected = [True] * len(elements)
self._assertMultipleResults(enabled, elements, expected)
# Test every possible combination of setting values
# Check that changing one channel doesn't affect the other
mg.setOutput(False, *elements)
is_output = mg.getOutput(*elements)
self._assertResult(is_output, elements, False)
mg.setOutput(True, elements[0])
result = mg.getOutput(*elements)
expected = [False] * len(elements)
expected[0] = True
self._assertMultipleResults(result, elements, expected)
mg.setOutput(False, *elements)
is_output = mg.getOutput(*elements)
self._assertResult(is_output, elements, False)
# Redefine elements to ony use existing values
elements = ["_test_ct_1_1", "_test_ct_1_2"]
# Set values using the controller instead of channels
mg.setOutput(True, "_test_ct_ctrl_1")
is_output = mg.getOutput(*elements)
self._assertResult(is_output, elements, True)
# Get values by controller
mg.setOutput(False, *elements)
is_output = mg.getOutput("_test_ct_ctrl_1")
self._assertResult(is_output, elements, False)
# Check ret_full_name
v = TangoDeviceNameValidator()
full_names = [v.getNames(element)[0] for element in elements]
is_output = mg.getOutput(*full_names)
self._assertResult(is_output, elements, False)
mg.setOutput(True, *full_names)
is_output = mg.getOutput(*elements, ret_full_name=True)
self._assertResult(is_output, full_names, True)
finally:
mg.cleanUp()
self.pool.DeleteElement(mg_name)
def test_PlotType(self, elements=["_test_ct_1_1", "_test_ct_1_2",
"_test_ct_1_3", "_test_2d_1_3",
"_test_mt_1_3/position"]):
mg_name = str(uuid.uuid1())
argin = [mg_name] + elements
self.pool.CreateMeasurementGroup(argin)
try:
mg = Device(mg_name)
plottype = mg.getPlotType()
self._assertResult(plottype, elements, 0)
mg.setPlotType("Image", elements[0])
mg.setPlotType("Spectrum", elements[1])
mg.setPlotType("No", elements[2])
mg.setPlotType("Image", elements[3])
mg.setPlotType("Image", elements[4])
plottype = mg.getPlotType()
expected_values = [2, 1, 0, 2, 2]
self._assertMultipleResults(plottype, elements, expected_values)
with self.assertRaises(ValueError):
mg.setPlotType("asdf", elements[2])
# Redefine elements
elements = ["_test_ct_1_1", "_test_ct_1_2", "_test_ct_1_3"]
# Set values using the controller instead of channels
mg.setPlotType("Image", "_test_ct_ctrl_1")
plottype = mg.getPlotType(*elements)
self._assertResult(plottype, elements, 2)
# Get values by controller
mg.setPlotType("Spectrum", *elements)
plottype = mg.getPlotType("_test_ct_ctrl_1")
self._assertResult(plottype, elements, 1)
# Check ret_full_name
v = TangoDeviceNameValidator()
full_names = [v.getNames(element)[0] for element in elements]
plottype = mg.getPlotType(*full_names)
self._assertResult(plottype, elements, 1)
mg.setPlotType("Image", *full_names)
plottype = mg.getPlotType(*elements, ret_full_name=True)
self._assertResult(plottype, full_names, 2)
finally:
mg.cleanUp()
self.pool.DeleteElement(mg_name)
def test_PlotAxes(self, elements=["_test_ct_1_1", "_test_ct_1_2",
"_test_ct_1_3", "_test_2d_1_3",
"_test_mt_1_3/position"]):
mg_name = str(uuid.uuid1())
argin = [mg_name] + elements
self.pool.CreateMeasurementGroup(argin)
try:
mg = Device(mg_name)
mg.setPlotType("Image", elements[0])
mg.setPlotType("Spectrum", elements[1])
mg.setPlotType("No", elements[2])
mg.setPlotType("Image", elements[3])
mg.setPlotType("Image", elements[4])
result = mg.getPlotAxes()
self._assertResult(result, elements, [])
mg.setPlotAxes(["<idx>", "<idx>"], elements[0])
mg.setPlotAxes(["<mov>"], elements[1])
with self.assertRaises(Exception):
mg.setPlotAxes(['<mov>'], elements[2])
mg.setPlotAxes(["<idx>", "<idx>"], elements[3])
mg.setPlotAxes(["<idx>", "<idx>"], elements[4])
result = mg.getPlotAxes()
expected_result = [['<idx>', '<idx>'], ['<mov>'], [],
['<idx>', '<idx>'], ['<idx>', '<idx>']]
self._assertMultipleResults(result, elements, expected_result)
mg.setPlotAxes(["<mov>", "<idx>"], elements[0])
mg.setPlotAxes(["<idx>"], elements[1])
result = mg.getPlotAxes()
expected_result = [['<mov>', '<idx>'], ['<idx>'], [],
['<idx>', '<idx>'], ['<idx>', '<idx>']]
self._assertMultipleResults(result, elements, expected_result)
mg.setPlotAxes(["<mov>", "<mov>"], elements[0])
result = mg.getPlotAxes()
expected_result = [['<mov>', '<mov>'], ['<idx>'], [],
['<idx>', '<idx>'], ['<idx>', '<idx>']]
self._assertMultipleResults(result, elements, expected_result)
with self.assertRaises(RuntimeError):
mg.setPlotAxes(["<mov>"], elements[2])
with self.assertRaises(ValueError):
mg.setPlotAxes(["<mov>", "<idx>"], elements[1])
with self.assertRaises(ValueError):
mg.setPlotAxes(["<mov>"], elements[0])
elements = ["_test_ct_1_1", "_test_ct_1_2", "_test_ct_1_3"]
# Set values using the controller instead of channels
with self.assertRaises(Exception):
mg.setPlotAxes(["<mov>"], "_test_ct_ctrl_1")
# TODO get method by controller doesn't give the order
# Get values by controller
result = mg.getPlotAxes("_test_ct_ctrl_1")
expected_result = [['<mov>', '<mov>'], ['<idx>'], []]
self._assertMultipleResults(result, elements, expected_result)
# Check ret_full_name
v = TangoDeviceNameValidator()
full_names = [v.getNames(element)[0] for element in elements]
result = mg.getPlotAxes(*full_names)
expected_result = [['<mov>', '<mov>'], ['<idx>'], []]
self._assertMultipleResults(result, elements, expected_result)
mg.setPlotAxes(["<idx>", "<idx>"], full_names[0])
mg.setPlotAxes(["<mov>"], full_names[1])
result = mg.getPlotAxes(*elements, ret_full_name=True)
expected_result = [['<idx>', '<idx>'], ['<mov>'], []]
self._assertMultipleResults(result, full_names, expected_result)
finally:
mg.cleanUp()
self.pool.DeleteElement(mg_name)
def test_Timer(self, elements=["_test_ct_1_1", "_test_ct_1_2",
"_test_ct_1_3",
"_test_mt_1_3/position"]):
mg_name = str(uuid.uuid1())
argin = [mg_name] + elements
self.pool.CreateMeasurementGroup(argin)
try:
mg = Device(mg_name)
result = mg.getTimer("_test_mt_1_3/position")
with self.assertRaises(Exception):
mg.setTimer("_test_mt_1_3/position")
self._assertResult(result, ["_test_mt_1_3/position"], None)
mg.setTimer('_test_ct_1_3')
result = mg.getTimer(*elements)
expected = ['_test_ct_1_3', '_test_ct_1_3', '_test_ct_1_3', None]
self._assertMultipleResults(result, elements, expected)
mg.setTimer('_test_ct_1_2')
result = mg.getTimer(*elements)
expected = ['_test_ct_1_2', '_test_ct_1_2', '_test_ct_1_2', None]
self._assertMultipleResults(result, elements, expected)
result = mg.getTimer(*elements, ret_by_ctrl=True)
self._assertMultipleResults(result,
['_test_ct_ctrl_1', '__tango__'],
['_test_ct_1_2', None])
# Check ret_full_name
v = TangoDeviceNameValidator()
counters = ["_test_ct_1_1", "_test_ct_1_2", "_test_ct_1_3"]
full_names = [v.getNames(counter)[0] for counter in counters]
mg.setTimer(v.getNames('_test_ct_1_1')[0])
result = mg.getTimer()
expected = ['_test_ct_1_1', '_test_ct_1_1', '_test_ct_1_1', None]
self._assertMultipleResults(result, elements, expected)
# TODO ret_full_name gives controler name
mg.setTimer("_test_ct_1_2")
result = mg.getTimer(*counters, ret_full_name=True)
self._assertResult(result, full_names, "_test_ct_1_2")
finally:
mg.cleanUp()
self.pool.DeleteElement(mg_name)
def test_Monitor(self, elements=["_test_ct_1_1", "_test_ct_1_2",
"_test_ct_1_3", "_test_2d_1_1",
'_test_2d_1_2',
"_test_mt_1_3/position"]):
mg_name = str(uuid.uuid1())
argin = [mg_name] + elements
self.pool.CreateMeasurementGroup(argin)
try:
mg = Device(mg_name)
with self.assertRaises(Exception):
mg.setMonitor("_test_mt_1_3/position")
mg.setMonitor('_test_2d_1_2')
mg.setMonitor("_test_ct_1_3")
expected = ["_test_ct_1_3", "_test_ct_1_3", "_test_ct_1_3",
"_test_2d_1_2", '_test_2d_1_2', None]
result = mg.getMonitor()
self._assertMultipleResults(result, elements, expected)
expected = ["_test_ct_1_3", '_test_2d_1_2', None]
result = mg.getMonitor(ret_by_ctrl=True)
ctrls = ['_test_ct_ctrl_1', '_test_2d_ctrl_1', '__tango__']
self._assertMultipleResults(result, ctrls, expected)
# Check ret_full_name
v = TangoDeviceNameValidator()
counters = ["_test_ct_1_1", "_test_ct_1_2", "_test_ct_1_3",
'_test_2d_1_1', '_test_2d_1_2']
full_names = [v.getNames(counter)[0] for counter in counters]
mg.setMonitor(v.getNames('_test_ct_1_1')[0])
mg.setMonitor(v.getNames('_test_2d_1_2')[0])
result = mg.getMonitor(*counters, ret_full_name=True)
expected = ["_test_ct_1_1", "_test_ct_1_1", "_test_ct_1_1",
"_test_2d_1_2", '_test_2d_1_2']
self._assertMultipleResults(result, full_names, expected)
finally:
mg.cleanUp()
self.pool.DeleteElement(mg_name)
def test_Synchronizer(self, elements=["_test_ct_1_1", "_test_ct_1_2",
"_test_ct_1_3", "_test_2d_1_1",
"_test_mt_1_3/position"]):
mg_name = str(uuid.uuid1())
argin = [mg_name] + elements
self.pool.CreateMeasurementGroup(argin)
try:
mg = Device(mg_name)
result = mg.getSynchronizer()
expected = ['software', 'software', 'software', 'software', None]
self._assertMultipleResults(result, elements, expected)
with self.assertRaises(Exception):
mg.setSynchronizer('_test_tg_1_2', "_test_mt_1_3/position")
mg.setSynchronizer('_test_tg_1_2', "_test_ct_ctrl_1",
"_test_2d_ctrl_1")
expected = ['_test_tg_1_2', '_test_tg_1_2', '_test_tg_1_2',
'_test_tg_1_2', None]
result = mg.getSynchronizer()
self._assertMultipleResults(result, elements, expected)
mg.setSynchronizer('software', "_test_ct_ctrl_1",
"_test_2d_ctrl_1")
result = mg.getSynchronizer()
expected = ['software', 'software', 'software', 'software', None]
self._assertMultipleResults(result, elements, expected)
with self.assertRaises(Exception):
mg.setSynchronizer('asdf', "_test_ct_ctrl_1",
"_test_2d_ctrl_1")
# Check ret_full_name
v = TangoDeviceNameValidator()
counters = ["_test_ct_1_1", "_test_ct_1_2", "_test_ct_1_3",
'_test_2d_1_1']
full_names = [v.getNames(counter)[0] for counter in counters]
mg.setSynchronizer('_test_tg_1_2', "_test_ct_ctrl_1",
"_test_2d_ctrl_1")
result = mg.getSynchronizer(*counters, ret_full_name=True)
self._assertResult(result, full_names, '_test_tg_1_2')
finally:
mg.cleanUp()
self.pool.DeleteElement(mg_name)
def test_Synchronization(self, elements=["_test_ct_1_1", "_test_ct_1_2",
"_test_ct_1_3", "_test_2d_1_1",
"_test_mt_1_3/position"]):
mg_name = str(uuid.uuid1())
argin = [mg_name] + elements
self.pool.CreateMeasurementGroup(argin)
try:
mg = Device(mg_name)
result = mg.getSynchronization()
expected = [AcqSynchType.Trigger, AcqSynchType.Trigger,
AcqSynchType.Trigger, AcqSynchType.Trigger, None]
self._assertMultipleResults(result, elements, expected)
# TODO: maybe we should raise an exception here?
# with self.assertRaises(Exception):
# mg.setSynchronization(AcqSynchType.Trigger,
# "_test_mt_1_3/position")
mg.setSynchronization(AcqSynchType.Gate, "_test_ct_ctrl_1",
"_test_2d_ctrl_1")
expected = [AcqSynchType.Gate, AcqSynchType.Gate,
AcqSynchType.Gate, AcqSynchType.Gate, None]
result = mg.getSynchronization()
self._assertMultipleResults(result, elements, expected)
mg.setSynchronization(AcqSynchType.Start, "_test_ct_ctrl_1",
"_test_2d_ctrl_1")
result = mg.getSynchronization()
expected = [AcqSynchType.Start, AcqSynchType.Start,
AcqSynchType.Start, AcqSynchType.Start, None]
self._assertMultipleResults(result, elements, expected)
with self.assertRaises(Exception):
mg.setSynchronization('asdf', "_test_ct_ctrl_1",
"_test_2d_ctrl_1")
# Check ret_full_name
v = TangoDeviceNameValidator()
counters = ["_test_ct_1_1", "_test_ct_1_2", "_test_ct_1_3",
'_test_2d_1_1']
full_names = [v.getNames(counter)[0] for counter in counters]
mg.setSynchronization(AcqSynchType.Trigger, "_test_ct_ctrl_1",
"_test_2d_ctrl_1")
result = mg.getSynchronization(*counters, ret_full_name=True)
self._assertResult(result, full_names, AcqSynchType.Trigger)
finally:
mg.cleanUp()
self.pool.DeleteElement(mg_name)
def test_ValueRefEnabled(self, elements=["_test_2d_1_1", "_test_2d_1_2",
"_test_ct_1_3",
"_test_mt_1_3/position"]):
mg_name = str(uuid.uuid1())
argin = [mg_name] + elements
self.pool.CreateMeasurementGroup(argin)
try:
mg = Device(mg_name)
# Check initial state of all kind of channels, nonexistent
# channels for the feature return None as result.
enabled = mg.getValueRefEnabled(*elements)
expected = [False, False, None, None]
self._assertMultipleResults(enabled, elements, expected)
# Check if the nonexistent channels raise error if trying to set
with self.assertRaises(Exception):
mg.setValueRefEnabled(True, *elements[-2])
with self.assertRaises(Exception):
mg.setValueRefEnabled(True, *elements[-1])
# Redefine elements to ony | |
)
self.assertEqual( k.getInterpolation(), newInterpolation )
self.assertTrue( s.undoAvailable() )
self.assertIn( k, ps )
self.assertNotIn( k0, ps )
self.assertNotIn( k1, ps )
ps.clear()
s.undo()
self.assertEqual( k.getInterpolation(), interpolation )
self.assertTrue( s.redoAvailable() )
self.assertIn( k, ps )
self.assertNotIn( k0, ps )
self.assertNotIn( k1, ps )
ps.clear()
s.redo()
self.assertEqual( k.getInterpolation(), newInterpolation )
self.assertIn( k, ps )
self.assertNotIn( k0, ps )
self.assertNotIn( k1, ps )
def testKeySetTime( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["user"]["f"] = Gaffer.FloatPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
curve = Gaffer.Animation.acquire( s["n"]["user"]["f"] )
time = 10
k = Gaffer.Animation.Key( 10, 5 )
curve.addKey( k )
self.assertTrue( k.isActive() )
self.assertIsNotNone( k.parent() )
self.assertTrue( k.parent().isSame( curve ) )
self.assertEqual( k.getTime(), time )
with Gaffer.UndoScope( s ) :
k.setTime( time )
self.assertEqual( k.getTime(), time )
self.assertFalse( s.undoAvailable() )
newTime = 12
with Gaffer.UndoScope( s ) :
k.setTime( newTime )
self.assertEqual( k.getTime(), newTime )
self.assertTrue( s.undoAvailable() )
s.undo()
self.assertEqual( k.getTime(), time )
self.assertTrue( s.redoAvailable() )
s.redo()
self.assertEqual( k.getTime(), newTime )
def testKeySetTimeSignals( self ) :
ps = set()
def changed( curve, key ) :
ps.add( key )
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["user"]["f"] = Gaffer.FloatPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
curve = Gaffer.Animation.acquire( s["n"]["user"]["f"] )
c = curve.keyTimeChangedSignal().connect( changed )
k0 = Gaffer.Animation.Key( 0, 1 )
k1 = Gaffer.Animation.Key( 12, 8 )
curve.addKey( k0 )
curve.addKey( k1 )
time = 10
k = Gaffer.Animation.Key( 10, 5 )
curve.addKey( k )
self.assertTrue( k.isActive() )
self.assertIsNotNone( k.parent() )
self.assertTrue( k.parent().isSame( curve ) )
self.assertEqual( k.getTime(), time )
newTime = 12
with Gaffer.UndoScope( s ) :
k.setTime( newTime )
self.assertEqual( k.getTime(), newTime )
self.assertTrue( s.undoAvailable() )
self.assertIn( k, ps )
self.assertNotIn( k0, ps )
self.assertNotIn( k1, ps )
ps.clear()
s.undo()
self.assertEqual( k.getTime(), time )
self.assertTrue( s.redoAvailable() )
self.assertIn( k, ps )
self.assertNotIn( k0, ps )
self.assertNotIn( k1, ps )
ps.clear()
s.redo()
self.assertEqual( k.getTime(), newTime )
self.assertIn( k, ps )
self.assertNotIn( k0, ps )
self.assertNotIn( k1, ps )
def testClosestKey( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["user"]["f"] = Gaffer.FloatPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
curve = Gaffer.Animation.acquire( s["n"]["user"]["f"] )
key0 = Gaffer.Animation.Key( 0, 0 )
key1 = Gaffer.Animation.Key( 1, 1 )
curve.addKey( key0 )
curve.addKey( key1 )
self.assertEqual( curve.closestKey( -1 ), key0 )
self.assertEqual( curve.closestKey( -0.1 ), key0 )
self.assertEqual( curve.closestKey( 0 ), key0 )
self.assertEqual( curve.closestKey( 0.1 ), key0 )
self.assertEqual( curve.closestKey( 0.49 ), key0 )
self.assertEqual( curve.closestKey( 0.51 ), key1 )
self.assertEqual( curve.closestKey( 0.75 ), key1 )
self.assertEqual( curve.closestKey( 1 ), key1 )
self.assertEqual( curve.closestKey( 1.1 ), key1 )
self.assertEqual( curve.closestKey( -1, 1 ), key0 )
self.assertEqual( curve.closestKey( -1, 0.9 ), None )
self.assertEqual( curve.closestKey( 0.75, 1 ), key1 )
self.assertEqual( curve.closestKey( 0.75, 0.2 ), None )
def testRemoveKey( self ) :
s = Gaffer.ScriptNode()
s["n"] = Gaffer.Node()
s["n"]["user"]["f"] = Gaffer.FloatPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
curve = Gaffer.Animation.acquire( s["n"]["user"]["f"] )
key = Gaffer.Animation.Key( time = 10, value = 10 )
curve.addKey( key )
self.assertTrue( curve.getKey( key.getTime() ).isSame( key ) )
self.assertTrue( curve.closestKey( 0 ).isSame( key ) )
self.assertTrue( key.parent().isSame( curve ) )
self.assertTrue( key.isActive() )
curve.removeKey( key )
self.assertEqual( curve.getKey( key.getTime() ), None )
self.assertEqual( curve.closestKey( 0 ), None )
self.assertEqual( key.parent(), None )
self.assertFalse( key.isActive() )
def testRemoveKeyWithInactiveKey( self ) :
s = Gaffer.ScriptNode()
s["n"] = GafferTest.AddNode()
curve = Gaffer.Animation.acquire( s["n"]["op1"] )
k1 = Gaffer.Animation.Key( time = 1, value = 1 )
k2 = Gaffer.Animation.Key( time = 1, value = 2 )
k3 = Gaffer.Animation.Key( time = 1, value = 3 )
curve.addKey( k1, removeActiveClashing = False )
curve.addKey( k2, removeActiveClashing = False )
curve.addKey( k3, removeActiveClashing = False )
self.assertTrue( curve.getKey( 1 ).isSame( k3 ) )
self.assertTrue( k3.parent().isSame( curve ) )
self.assertTrue( k3.isActive() )
self.assertTrue( k2.parent().isSame( curve ) )
self.assertFalse( k2.isActive() )
self.assertTrue( k1.parent().isSame( curve ) )
self.assertFalse( k1.isActive() )
with Gaffer.UndoScope( s ) :
curve.removeKey( k3 )
self.assertTrue( curve.getKey( 1 ).isSame( k2 ) )
self.assertIsNone( k3.parent() )
self.assertFalse( k3.isActive() )
self.assertTrue( k2.parent().isSame( curve ) )
self.assertTrue( k2.isActive() )
self.assertTrue( k1.parent().isSame( curve ) )
self.assertFalse( k1.isActive() )
with Gaffer.UndoScope( s ) :
curve.removeKey( k2 )
self.assertTrue( curve.getKey( 1 ).isSame( k1 ) )
self.assertIsNone( k3.parent() )
self.assertFalse( k3.isActive() )
self.assertIsNone( k2.parent() )
self.assertFalse( k2.isActive() )
self.assertTrue( k1.parent().isSame( curve ) )
self.assertTrue( k1.isActive() )
with Gaffer.UndoScope( s ) :
curve.removeKey( k1 )
self.assertIsNone( curve.getKey( 1 ) )
self.assertIsNone( k3.parent() )
self.assertFalse( k3.isActive() )
self.assertIsNone( k2.parent() )
self.assertFalse( k2.isActive() )
self.assertIsNone( k1.parent() )
self.assertFalse( k1.isActive() )
s.undo()
self.assertTrue( curve.getKey( 1 ).isSame( k1 ) )
self.assertIsNone( k3.parent() )
self.assertFalse( k3.isActive() )
self.assertIsNone( k2.parent() )
self.assertFalse( k2.isActive() )
self.assertTrue( k1.parent().isSame( curve ) )
self.assertTrue( k1.isActive() )
s.undo()
self.assertTrue( curve.getKey( 1 ).isSame( k2 ) )
self.assertIsNone( k3.parent() )
self.assertFalse( k3.isActive() )
self.assertTrue( k2.parent().isSame( curve ) )
self.assertTrue( k2.isActive() )
self.assertTrue( k1.parent().isSame( curve ) )
self.assertFalse( k1.isActive() )
s.undo()
self.assertTrue( curve.getKey( 1 ).isSame( k3 ) )
self.assertTrue( k3.parent().isSame( curve ) )
self.assertTrue( k3.isActive() )
self.assertTrue( k2.parent().isSame( curve ) )
self.assertFalse( k2.isActive() )
self.assertTrue( k1.parent().isSame( curve ) )
self.assertFalse( k1.isActive() )
s.redo()
self.assertTrue( curve.getKey( 1 ).isSame( k2 ) )
self.assertIsNone( k3.parent() )
self.assertFalse( k3.isActive() )
self.assertTrue( k2.parent().isSame( curve ) )
self.assertTrue( k2.isActive() )
self.assertTrue( k1.parent().isSame( curve ) )
self.assertFalse( k1.isActive() )
s.redo()
self.assertTrue( curve.getKey( 1 ).isSame( k1 ) )
self.assertIsNone( k3.parent() )
self.assertFalse( k3.isActive() )
self.assertIsNone( k2.parent() )
self.assertFalse( k2.isActive() )
self.assertTrue( k1.parent().isSame( curve ) )
self.assertTrue( k1.isActive() )
s.redo()
self.assertIsNone( curve.getKey( 1 ) )
self.assertIsNone( k3.parent() )
self.assertFalse( k3.isActive() )
self.assertIsNone( k2.parent() )
self.assertFalse( k2.isActive() )
self.assertIsNone( k1.parent() )
self.assertFalse( k1.isActive() )
def testRemoveInactiveKey( self ) :
s = Gaffer.ScriptNode()
s["n"] = GafferTest.AddNode()
curve = Gaffer.Animation.acquire( s["n"]["op1"] )
k1 = Gaffer.Animation.Key( time = 1, value = 1 )
k2 = Gaffer.Animation.Key( time = 1, value = 2 )
k3 = Gaffer.Animation.Key( time = 1, value = 3 )
curve.addKey( k1, removeActiveClashing = False )
curve.addKey( k2, removeActiveClashing = False )
curve.addKey( k3, removeActiveClashing = False )
self.assertTrue( curve.getKey( 1 ).isSame( k3 ) )
self.assertTrue( k3.parent().isSame( curve ) )
self.assertTrue( k3.isActive() )
self.assertTrue( k2.parent().isSame( curve ) )
self.assertFalse( k2.isActive() )
self.assertTrue( k1.parent().isSame( curve ) )
self.assertFalse( k1.isActive() )
with Gaffer.UndoScope( s ) :
curve.removeKey( k2 )
self.assertTrue( curve.getKey( 1 ).isSame( k3 ) )
self.assertTrue( k3.parent().isSame( curve ) )
self.assertTrue( k3.isActive() )
self.assertIsNone( k2.parent() )
self.assertFalse( k2.isActive() )
self.assertTrue( k1.parent().isSame( curve ) )
self.assertFalse( k1.isActive() )
with Gaffer.UndoScope( s ) :
curve.removeKey( k3 )
self.assertTrue( curve.getKey( 1 ).isSame( k1 ) )
self.assertIsNone( k3.parent() )
self.assertFalse( k3.isActive() )
self.assertIsNone( k2.parent() )
self.assertFalse( k2.isActive() )
self.assertTrue( k1.parent().isSame( curve ) )
self.assertTrue( k1.isActive() )
s.undo()
self.assertTrue( curve.getKey( 1 ).isSame( k3 ) )
self.assertTrue( k3.parent().isSame( curve ) )
self.assertTrue( k3.isActive() )
self.assertIsNone( k2.parent() )
self.assertFalse( k2.isActive() )
self.assertTrue( k1.parent().isSame( curve ) )
self.assertFalse( k1.isActive() )
s.undo()
self.assertTrue( curve.getKey( 1 ).isSame( k3 ) )
self.assertTrue( k3.parent().isSame( curve ) )
self.assertTrue( k3.isActive() )
self.assertTrue( k2.parent().isSame( curve ) )
self.assertFalse( k2.isActive() )
self.assertTrue( k1.parent().isSame( curve ) )
self.assertFalse( k1.isActive() )
s.redo()
self.assertTrue( curve.getKey( 1 ).isSame( k3 ) )
self.assertTrue( k3.parent().isSame( curve ) )
self.assertTrue( k3.isActive() )
self.assertIsNone( k2.parent() )
self.assertFalse( k2.isActive() )
self.assertTrue( k1.parent().isSame( curve ) )
self.assertFalse( k1.isActive() )
s.redo()
self.assertTrue( curve.getKey( 1 ).isSame( k1 ) )
self.assertIsNone( k3.parent() )
self.assertFalse( k3.isActive() )
self.assertIsNone( k2.parent() )
self.assertFalse( k2.isActive() )
self.assertTrue( k1.parent().isSame( curve ) )
self.assertTrue( k1.isActive() )
def testRemoveInactiveKeys( self ) :
s = Gaffer.ScriptNode()
s["n"] = GafferTest.AddNode()
curve = Gaffer.Animation.acquire( s["n"]["op1"] )
ka = set()
ki = set()
for i in range( -10, 10, 2 ) :
k = Gaffer.Animation.Key( time = i, value = 1 )
k1 = Gaffer.Animation.Key( time = i, value = 2 )
k2 = Gaffer.Animation.Key( time = i, value = 3 )
curve.addKey( k2, removeActiveClashing = False )
curve.addKey( k1, removeActiveClashing = False )
curve.addKey( k, removeActiveClashing = False )
self.assertTrue( k.parent().isSame( curve ) )
self.assertTrue( k1.parent().isSame( curve ) )
self.assertTrue( k2.parent().isSame( curve ) )
self.assertTrue( k.isActive() )
self.assertFalse( k1.isActive() )
self.assertFalse( k2.isActive() )
ka.add( k )
ki.add( k1 )
ki.add( k2 )
with Gaffer.UndoScope( s ) :
curve.removeInactiveKeys()
for k in ka :
self.assertTrue( k.parent().isSame( curve ) )
self.assertTrue( k.isActive() )
for k in ki :
self.assertIsNone( k.parent() )
self.assertFalse( k.isActive() )
s.undo()
for k in ka :
self.assertTrue( k.parent().isSame( curve ) )
self.assertTrue( k.isActive() )
for k in ki :
self.assertTrue( k.parent().isSame( curve ) )
self.assertFalse( k.isActive() )
s.redo()
for k in ka :
self.assertTrue( k.parent().isSame( curve ) )
self.assertTrue( k.isActive() )
for k in ki :
self.assertIsNone( k.parent() )
self.assertFalse( k.isActive() )
def testRemoveInactiveKeysAfterSetTime( self ) :
s = Gaffer.ScriptNode()
s["n"] = GafferTest.AddNode()
curve = Gaffer.Animation.acquire( s["n"]["op1"] )
k1 = Gaffer.Animation.Key( time = 1, value = 1 )
k2 = Gaffer.Animation.Key( time = 2, value = 1 )
curve.addKey( k1 )
curve.addKey( k2 )
self.assertTrue( k1.parent().isSame( curve ) )
self.assertTrue( k2.parent().isSame( curve ) )
self.assertTrue( k1.isActive() )
self.assertTrue( k2.isActive() )
# set time of first key so that second key becomes inactive
with Gaffer.UndoScope( s ) :
ck = k1.setTime( 2 )
self.assertTrue( k2.isSame( ck ) )
self.assertTrue( k1.parent().isSame( curve ) )
self.assertTrue( k2.parent().isSame( curve ) )
self.assertTrue( k1.isActive() )
self.assertFalse( k2.isActive() )
# remove inactive keys
with Gaffer.UndoScope( s ) :
curve.removeInactiveKeys()
self.assertTrue( k1.parent().isSame( curve ) )
self.assertIsNone( k2.parent() )
self.assertTrue( k1.isActive() )
self.assertFalse( k2.isActive() )
s.undo()
self.assertTrue( k1.parent().isSame( curve ) )
self.assertTrue( k2.parent().isSame( curve ) )
self.assertTrue( k1.isActive() )
self.assertFalse( k2.isActive() )
s.undo()
self.assertTrue( k1.parent().isSame( curve ) )
self.assertTrue( k2.parent().isSame( curve ) )
self.assertTrue( k1.isActive() )
self.assertTrue( k2.isActive() )
s.redo()
self.assertTrue( k1.parent().isSame( curve ) )
self.assertTrue( k2.parent().isSame( curve ) )
self.assertTrue( k1.isActive() )
self.assertFalse( k2.isActive() )
s.redo()
self.assertTrue( k1.parent().isSame( curve ) )
self.assertIsNone( k2.parent() )
self.assertTrue( k1.isActive() )
self.assertFalse( k2.isActive() )
def testRemoveInactiveKeysOutsideUndoAfterSetTime( self ) :
s = Gaffer.ScriptNode()
s["n"] = GafferTest.AddNode()
curve = Gaffer.Animation.acquire( s["n"]["op1"] )
k1 = Gaffer.Animation.Key( time = 1, value = 1 )
k2 = Gaffer.Animation.Key( time = 2, value = 1 )
curve.addKey( k1 )
curve.addKey( k2 )
self.assertTrue( k1.parent().isSame( curve ) )
self.assertTrue( k2.parent().isSame( curve ) )
self.assertTrue( k1.isActive() )
self.assertTrue( k2.isActive() )
# set time of first key so that second key becomes inactive
with Gaffer.UndoScope( s ) :
ck = k1.setTime( 2 )
self.assertTrue( k2.isSame( ck ) )
self.assertTrue( k1.parent().isSame( curve ) )
self.assertTrue( k2.parent().isSame( curve ) )
self.assertTrue( k1.isActive() )
self.assertFalse( k2.isActive() )
# remove inactive keys outside undo system
curve.removeInactiveKeys()
self.assertTrue( k1.parent().isSame( curve ) )
self.assertIsNone( | |
553, 968, 239, 810, 651, 132, 185, 14, 644, 474, 645,
200, 503, 600, 467, 884, 2, 280, 425, 105, 226, 557,
661, 814, 881, 477, 546, 801, 231, 378, 550, 482, 673,
320, 63, 847, 795, 792, 240, 474, 989, 203, 442, 595,
482, 277, 18, 116, 762, 557, 532, 610, 944, 171, 241,
484, 919, 987, 116, 255, 374, 300, 948, 323, 898, 296,
361, 455, 696, 888, 161, 34, 190, 487, 624, 748, 887,
778, 126, 465, 197, 607, 429, 3, 502, 218, 542, 594,
987, 322, 340, 346, 597, 843, 315, 736, 450, 74, 420,
214, 736, 519, 799, 137, 579, 123, 119, 416, 454, 757,
351, 474, 564, 813, 668, 962, 446, 177, 490, 400, 119,
183, 589, 888, 826, 717, 73, 576, 147, 873, 919, 795,
622, 981, 872, 326, 252, 193, 447, 974, 775, 150, 200,
443, 641, 64, 130, 636, 22, 172, 117, 885, 40, 654,
690, 324, 912, 995])
def test_snail_051(self):
self.assertEqual(snail([[697, 690, 45, 97, 974, 564, 828, 482, 459, 457,
247, 709, 849, 755, 636, 252, 174],
[878, 182, 418, 18, 296, 541, 463, 226, 390,
399, 86, 57, 352, 505, 880, 822, 596],
[312, 932, 870, 982, 37, 485, 327, 970, 614,
352, 485, 832, 443, 243, 116, 468, 437],
[283, 947, 1000, 474, 878, 672, 130, 269, 601,
862, 608, 896, 683, 65, 5, 7, 854],
[103, 886, 322, 406, 644, 252, 162, 590, 859,
997, 222, 316, 188, 581, 796, 969, 58],
[229, 54, 972, 517, 133, 800, 959, 577, 62, 954,
234, 40, 491, 22, 580, 862, 428],
[853, 197, 664, 207, 581, 868, 982, 935, 2, 818,
51, 950, 425, 673, 513, 507, 992],
[917, 788, 132, 184, 895, 383, 592, 175, 810,
711, 802, 86, 43, 192, 598, 515, 822],
[59, 393, 360, 66, 673, 904, 665, 258, 264, 39,
667, 780, 679, 563, 100, 30, 272],
[150, 367, 289, 44, 24, 249, 470, 487, 212, 802,
989, 338, 650, 813, 518, 64, 465],
[523, 744, 969, 535, 138, 123, 784, 424, 16,
638, 518, 692, 26, 253, 134, 334, 279],
[563, 345, 64, 97, 67, 966, 282, 163, 530, 69,
821, 159, 70, 657, 766, 312, 667],
[102, 543, 515, 548, 410, 417, 570, 834, 78,
297, 961, 164, 375, 429, 318, 636, 506],
[358, 824, 326, 229, 271, 557, 286, 19, 74, 375,
713, 292, 984, 730, 734, 281, 275],
[9, 812, 979, 24, 319, 707, 337, 99, 454, 499,
124, 291, 400, 809, 566, 290, 151],
[815, 554, 264, 774, 823, 520, 185, 11, 860,
938, 566, 15, 367, 729, 540, 623, 14],
[13, 808, 108, 848, 278, 568, 551, 248, 3, 814,
211, 204, 808, 452, 564, 477, 744]]),
[697, 690, 45, 97, 974, 564, 828, 482, 459, 457, 247,
709, 849, 755, 636, 252, 174, 596, 437, 854, 58, 428,
992, 822, 272, 465, 279, 667, 506, 275, 151, 14, 744,
477, 564, 452, 808, 204, 211, 814, 3, 248, 551, 568,
278, 848, 108, 808, 13, 815, 9, 358, 102, 563, 523,
150, 59, 917, 853, 229, 103, 283, 312, 878, 182, 418,
18, 296, 541, 463, 226, 390, 399, 86, 57, 352, 505,
880, 822, 468, 7, 969, 862, 507, 515, 30, 64, 334,
312, 636, 281, 290, 623, 540, 729, 367, 15, 566, 938,
860, 11, 185, 520, 823, 774, 264, 554, 812, 824, 543,
345, 744, 367, 393, 788, 197, 54, 886, 947, 932, 870,
982, 37, 485, 327, 970, 614, 352, 485, 832, 443, 243,
116, 5, 796, 580, 513, 598, 100, 518, 134, 766, 318,
734, 566, 809, 400, 291, 124, 499, 454, 99, 337, 707,
319, 24, 979, 326, 515, 64, 969, 289, 360, 132, 664,
972, 322, 1000, 474, 878, 672, 130, 269, 601, 862,
608, 896, 683, 65, 581, 22, 673, 192, 563, 813, 253,
657, 429, 730, 984, 292, 713, 375, 74, 19, 286, 557,
271, 229, 548, 97, 535, 44, 66, 184, 207, 517, 406,
644, 252, 162, 590, 859, 997, 222, 316, 188, 491, 425,
43, 679, 650, 26, 70, 375, 164, 961, 297, 78, 834,
570, 417, 410, 67, 138, 24, 673, 895, 581, 133, 800,
959, 577, 62, 954, 234, 40, 950, 86, 780, 338, 692,
159, 821, 69, 530, 163, 282, 966, 123, 249, 904, 383,
868, 982, 935, 2, 818, 51, 802, 667, 989, 518, 638,
16, 424, 784, 470, 665, 592, 175, 810, 711, 39, 802,
212, 487, 258, 264])
def test_snail_052(self):
self.assertEqual(snail(
[[20, 403, 806, 88, 823], [815, 182, 755, 134, 479],
[267, 452, 774, 27, 393], [680, 645, 139, 170, 600],
[345, 733, 858, 567, 786]]),
[20, 403, 806, 88, 823, 479, 393, 600, 786, 567, 858,
733, 345, 680, 267, 815, 182, 755, 134, 27, 170, 139,
645, 452, 774])
def test_snail_053(self):
self.assertEqual(snail([[196, 838, 193, 215, 121, 793, 196, 949, 361,
294, 910, 341, 538, 137, 777],
[733, 398, 687, 983, 435, 870, 229, 107, 407,
772, 68, 915, 209, 859, 737],
[212, 594, 822, 823, 492, 867, 788, 511, 744,
679, 68, 763, 663, 708, 835],
[207, 592, 305, 579, 378, 864, 922, 874, 424,
364, 237, 930, 250, 343, 516],
[817, 144, 317, 932, 246, 346, 160, 676, 51,
860, 889, 532, 902, 60, 300],
[132, 26, 383, 247, 812, 338, 673, 679, 88, 254,
502, 553, 165, 334, 186],
[59, 683, 976, 614, 311, 493, 17, 433, 171, 254,
478, 430, 6, 238, 216],
[70, 590, 861, 521, 494, 163, 91, 792, 848, 892,
525, 313, 845, 455, 222],
[471, 326, 678, 405, 72, 724, 69, 630, 206, 767,
730, 223, 860, 290, 477],
[848, 786, 184, 788, 614, 38, 213, 908, 258,
752, 927, 756, 780, 835, 260],
[240, 604, 469, 663, 791, 671, 405, 848, 731,
335, 905, 129, 239, 679, 516],
[28, 935, 400, 783, 206, 777, 836, 627, 32, 475,
736, 206, 469, 495, 543],
[271, 429, 63, 55, 402, 237, 622, 711, 443, 603,
307, 107, 892, 627, 360],
[265, 323, 177, 700, 4, 43, 396, 551, 646, 392,
735, 686, 784, 445, 603],
[807, 589, 84, 393, 478, 843, 317, 717, 678,
341, 257, 31, 498, 454, 260]]),
[196, 838, 193, 215, 121, 793, 196, 949, 361, 294, 910,
341, 538, 137, 777, 737, 835, 516, 300, 186, 216, 222,
477, 260, 516, 543, 360, 603, 260, 454, 498, 31, 257,
341, 678, 717, 317, 843, 478, 393, 84, 589, 807, 265,
271, 28, 240, 848, 471, 70, 59, 132, 817, 207, 212,
733, 398, 687, 983, 435, 870, 229, 107, 407, 772, 68,
915, 209, 859, 708, 343, 60, 334, 238, 455, 290, 835,
679, 495, 627, 445, 784, 686, 735, 392, 646, 551, 396,
43, 4, 700, 177, 323, 429, 935, 604, 786, 326, 590,
683, 26, 144, 592, 594, 822, 823, 492, 867, 788, 511,
744, 679, 68, 763, 663, 250, 902, 165, 6, 845, 860,
780, 239, 469, 892, 107, 307, 603, 443, 711, 622, 237,
402, 55, 63, 400, 469, 184, 678, 861, 976, 383, 317,
305, 579, 378, 864, 922, 874, 424, 364, 237, 930, 532,
553, 430, 313, 223, 756, 129, 206, 736, 475, 32, 627,
836, 777, 206, 783, 663, 788, 405, 521, 614, 247, 932,
246, 346, 160, 676, 51, 860, 889, 502, 478, 525, 730,
927, 905, 335, 731, 848, 405, 671, 791, 614, 72, 494,
311, 812, 338, 673, 679, 88, 254, 254, 892, 767, 752,
258, 908, 213, 38, 724, 163, 493, 17, 433, 171, 848,
206, 630, 69, 91, 792])
def test_snail_054(self):
self.assertEqual(snail(
[[680, 28, 574, 89, 186, 359], [110, 422, 21, 950, 715, 79],
[344, 688, 686, 338, 239, 840], [320, 321, 492, 418, 905, 628],
[684, 383, 704, 429, 457, 932],
| |
str
:param operation: Operation type: Read, write, delete, etc.
:type operation: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
}
def __init__(self, **kwargs):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = kwargs.get('provider', None)
self.resource = kwargs.get('resource', None)
self.operation = kwargs.get('operation', None)
class OperationListResult(Model):
"""Result of the request to list Microsoft.Insights operations. It contains a
list of operations and a URL link to get the next set of results.
:param value: List of operations supported by the Microsoft.Insights
provider.
:type value: list[~azure.mgmt.monitor.v2015_04_01.models.Operation]
:param next_link: URL to get the next set of operation list results if
there are any.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(self, **kwargs):
super(OperationListResult, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.next_link = kwargs.get('next_link', None)
class Recurrence(Model):
"""The repeating times at which this profile begins. This element is not used
if the FixedDate element is used.
All required parameters must be populated in order to send to Azure.
:param frequency: Required. the recurrence frequency. How often the
schedule profile should take effect. This value must be Week, meaning each
week will have the same set of profiles. For example, to set a daily
schedule, set **schedule** to every day of the week. The frequency
property specifies that the schedule is repeated weekly. Possible values
include: 'None', 'Second', 'Minute', 'Hour', 'Day', 'Week', 'Month',
'Year'
:type frequency: str or
~azure.mgmt.monitor.v2015_04_01.models.RecurrenceFrequency
:param schedule: Required. the scheduling constraints for when the profile
begins.
:type schedule: ~azure.mgmt.monitor.v2015_04_01.models.RecurrentSchedule
"""
_validation = {
'frequency': {'required': True},
'schedule': {'required': True},
}
_attribute_map = {
'frequency': {'key': 'frequency', 'type': 'RecurrenceFrequency'},
'schedule': {'key': 'schedule', 'type': 'RecurrentSchedule'},
}
def __init__(self, **kwargs):
super(Recurrence, self).__init__(**kwargs)
self.frequency = kwargs.get('frequency', None)
self.schedule = kwargs.get('schedule', None)
class RecurrentSchedule(Model):
"""The scheduling constraints for when the profile begins.
All required parameters must be populated in order to send to Azure.
:param time_zone: Required. the timezone for the hours of the profile.
Some examples of valid time zones are: Dateline Standard Time, UTC-11,
Hawaiian Standard Time, Alaskan Standard Time, Pacific Standard Time
(Mexico), Pacific Standard Time, US Mountain Standard Time, Mountain
Standard Time (Mexico), Mountain Standard Time, Central America Standard
Time, Central Standard Time, Central Standard Time (Mexico), Canada
Central Standard Time, SA Pacific Standard Time, Eastern Standard Time, US
Eastern Standard Time, Venezuela Standard Time, Paraguay Standard Time,
Atlantic Standard Time, Central Brazilian Standard Time, SA Western
Standard Time, Pacific SA Standard Time, Newfoundland Standard Time, E.
South America Standard Time, Argentina Standard Time, SA Eastern Standard
Time, Greenland Standard Time, Montevideo Standard Time, Bahia Standard
Time, UTC-02, Mid-Atlantic Standard Time, Azores Standard Time, Cape Verde
Standard Time, Morocco Standard Time, UTC, GMT Standard Time, Greenwich
Standard Time, W. Europe Standard Time, Central Europe Standard Time,
Romance Standard Time, Central European Standard Time, W. Central Africa
Standard Time, Namibia Standard Time, Jordan Standard Time, GTB Standard
Time, Middle East Standard Time, Egypt Standard Time, Syria Standard Time,
E. Europe Standard Time, South Africa Standard Time, FLE Standard Time,
Turkey Standard Time, Israel Standard Time, Kaliningrad Standard Time,
Libya Standard Time, Arabic Standard Time, Arab Standard Time, Belarus
Standard Time, Russian Standard Time, E. Africa Standard Time, Iran
Standard Time, Arabian Standard Time, Azerbaijan Standard Time, Russia
Time Zone 3, Mauritius Standard Time, Georgian Standard Time, Caucasus
Standard Time, Afghanistan Standard Time, West Asia Standard Time,
Ekaterinburg Standard Time, Pakistan Standard Time, India Standard Time,
Sri Lanka Standard Time, Nepal Standard Time, Central Asia Standard Time,
Bangladesh Standard Time, N. Central Asia Standard Time, Myanmar Standard
Time, SE Asia Standard Time, North Asia Standard Time, China Standard
Time, North Asia East Standard Time, Singapore Standard Time, W. Australia
Standard Time, Taipei Standard Time, Ulaanbaatar Standard Time, Tokyo
Standard Time, Korea Standard Time, Yakutsk Standard Time, Cen. Australia
Standard Time, AUS Central Standard Time, E. Australia Standard Time, AUS
Eastern Standard Time, West Pacific Standard Time, Tasmania Standard Time,
Magadan Standard Time, Vladivostok Standard Time, Russia Time Zone 10,
Central Pacific Standard Time, Russia Time Zone 11, New Zealand Standard
Time, UTC+12, Fiji Standard Time, Kamchatka Standard Time, Tonga Standard
Time, Samoa Standard Time, Line Islands Standard Time
:type time_zone: str
:param days: Required. the collection of days that the profile takes
effect on. Possible values are Sunday through Saturday.
:type days: list[str]
:param hours: Required. A collection of hours that the profile takes
effect on. Values supported are 0 to 23 on the 24-hour clock (AM/PM times
are not supported).
:type hours: list[int]
:param minutes: Required. A collection of minutes at which the profile
takes effect at.
:type minutes: list[int]
"""
_validation = {
'time_zone': {'required': True},
'days': {'required': True},
'hours': {'required': True},
'minutes': {'required': True},
}
_attribute_map = {
'time_zone': {'key': 'timeZone', 'type': 'str'},
'days': {'key': 'days', 'type': '[str]'},
'hours': {'key': 'hours', 'type': '[int]'},
'minutes': {'key': 'minutes', 'type': '[int]'},
}
def __init__(self, **kwargs):
super(RecurrentSchedule, self).__init__(**kwargs)
self.time_zone = kwargs.get('time_zone', None)
self.days = kwargs.get('days', None)
self.hours = kwargs.get('hours', None)
self.minutes = kwargs.get('minutes', None)
class ScaleAction(Model):
"""The parameters for the scaling action.
All required parameters must be populated in order to send to Azure.
:param direction: Required. the scale direction. Whether the scaling
action increases or decreases the number of instances. Possible values
include: 'None', 'Increase', 'Decrease'
:type direction: str or
~azure.mgmt.monitor.v2015_04_01.models.ScaleDirection
:param type: Required. the type of action that should occur when the scale
rule fires. Possible values include: 'ChangeCount', 'PercentChangeCount',
'ExactCount'
:type type: str or ~azure.mgmt.monitor.v2015_04_01.models.ScaleType
:param value: the number of instances that are involved in the scaling
action. This value must be 1 or greater. The default value is 1. Default
value: "1" .
:type value: str
:param cooldown: Required. the amount of time to wait since the last
scaling action before this action occurs. It must be between 1 week and 1
minute in ISO 8601 format.
:type cooldown: timedelta
"""
_validation = {
'direction': {'required': True},
'type': {'required': True},
'cooldown': {'required': True},
}
_attribute_map = {
'direction': {'key': 'direction', 'type': 'ScaleDirection'},
'type': {'key': 'type', 'type': 'ScaleType'},
'value': {'key': 'value', 'type': 'str'},
'cooldown': {'key': 'cooldown', 'type': 'duration'},
}
def __init__(self, **kwargs):
super(ScaleAction, self).__init__(**kwargs)
self.direction = kwargs.get('direction', None)
self.type = kwargs.get('type', None)
self.value = kwargs.get('value', "1")
self.cooldown = kwargs.get('cooldown', None)
class ScaleCapacity(Model):
"""The number of instances that can be used during this profile.
All required parameters must be populated in order to send to Azure.
:param minimum: Required. the minimum number of instances for the
resource.
:type minimum: str
:param maximum: Required. the maximum number of instances for the
resource. The actual maximum number of instances is limited by the cores
that are available in the subscription.
:type maximum: str
:param default: Required. the number of instances that will be set if
metrics are not available for evaluation. The default is only used if the
current instance count is lower than the default.
:type default: str
"""
_validation = {
'minimum': {'required': True},
'maximum': {'required': True},
'default': {'required': True},
}
_attribute_map = {
'minimum': {'key': 'minimum', 'type': 'str'},
'maximum': {'key': 'maximum', 'type': 'str'},
'default': {'key': 'default', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ScaleCapacity, self).__init__(**kwargs)
self.minimum = kwargs.get('minimum', None)
self.maximum = kwargs.get('maximum', None)
self.default = kwargs.get('default', None)
class ScaleRule(Model):
"""A rule that provide the triggers and parameters for the scaling action.
All required parameters must be populated in order to send to Azure.
:param metric_trigger: Required. the trigger that results in a scaling
action.
:type metric_trigger: ~azure.mgmt.monitor.v2015_04_01.models.MetricTrigger
:param scale_action: Required. the parameters for the scaling action.
:type scale_action: ~azure.mgmt.monitor.v2015_04_01.models.ScaleAction
"""
_validation = {
'metric_trigger': {'required': True},
'scale_action': {'required': True},
}
_attribute_map = {
'metric_trigger': {'key': 'metricTrigger', 'type': 'MetricTrigger'},
'scale_action': {'key': 'scaleAction', 'type': 'ScaleAction'},
}
def __init__(self, **kwargs):
super(ScaleRule, self).__init__(**kwargs)
self.metric_trigger = kwargs.get('metric_trigger', None)
self.scale_action = kwargs.get('scale_action', None)
class SenderAuthorization(Model):
"""the authorization used by the user who has performed the operation that led
to this event. | |
* self.radius)))
else:
path.append((Path.LINETO, [x, y]))
path.extend([(Path.LINETO, [x, y + sign * length]),
(Path.LINETO, [x - self.shoulder,
y + sign * length]),
(Path.LINETO, tip),
(Path.LINETO, [x + self.shoulder - flow,
y + sign * length]),
(Path.LINETO, [x - flow, y + sign * length])])
path.extend(self._arc(quadrant=quadrant,
cw=angle == DOWN,
radius=self.radius - flow,
center=(x - self.radius,
y + sign * self.radius)))
path.append((Path.LINETO, [x - flow, y + sign * flow]))
label_location = [tip[0], tip[1] + sign * self.offset]
return tip, label_location
def _revert(self, path, first_action=Path.LINETO):
"""
A path is not simply reversible by path[::-1] since the code
specifies an action to take from the **previous** point.
"""
reverse_path = []
next_code = first_action
for code, position in path[::-1]:
reverse_path.append((next_code, position))
next_code = code
return reverse_path
# This might be more efficient, but it fails because 'tuple' object
# doesn't support item assignment:
# path[1] = path[1][-1:0:-1]
# path[1][0] = first_action
# path[2] = path[2][::-1]
# return path
@docstring.dedent_interpd
def add(self, patchlabel='', flows=None, orientations=None, labels='',
trunklength=1.0, pathlengths=0.25, prior=None, connect=(0, 0),
rotation=0, **kwargs):
"""
Add a simple Sankey diagram with flows at the same hierarchical level.
Parameters
----------
patchlabel : str
Label to be placed at the center of the diagram.
Note that *label* (not *patchlabel*) can be passed as keyword
argument to create an entry in the legend.
flows : list of float
Array of flow values. By convention, inputs are positive and
outputs are negative.
Flows are placed along the top of the diagram from the inside out
in order of their index within *flows*. They are placed along the
sides of the diagram from the top down and along the bottom from
the outside in.
If the sum of the inputs and outputs is
nonzero, the discrepancy will appear as a cubic Bezier curve along
the top and bottom edges of the trunk.
orientations : list of {-1, 0, 1}
List of orientations of the flows (or a single orientation to be
used for all flows). Valid values are 0 (inputs from
the left, outputs to the right), 1 (from and to the top) or -1
(from and to the bottom).
labels : list of (str or None)
List of labels for the flows (or a single label to be used for all
flows). Each label may be *None* (no label), or a labeling string.
If an entry is a (possibly empty) string, then the quantity for the
corresponding flow will be shown below the string. However, if
the *unit* of the main diagram is None, then quantities are never
shown, regardless of the value of this argument.
trunklength : float
Length between the bases of the input and output groups (in
data-space units).
pathlengths : list of float
List of lengths of the vertical arrows before break-in or after
break-away. If a single value is given, then it will be applied to
the first (inside) paths on the top and bottom, and the length of
all other arrows will be justified accordingly. The *pathlengths*
are not applied to the horizontal inputs and outputs.
prior : int
Index of the prior diagram to which this diagram should be
connected.
connect : (int, int)
A (prior, this) tuple indexing the flow of the prior diagram and
the flow of this diagram which should be connected. If this is the
first diagram or *prior* is *None*, *connect* will be ignored.
rotation : float
Angle of rotation of the diagram in degrees. The interpretation of
the *orientations* argument will be rotated accordingly (e.g., if
*rotation* == 90, an *orientations* entry of 1 means to/from the
left). *rotation* is ignored if this diagram is connected to an
existing one (using *prior* and *connect*).
Returns
-------
Sankey
The current `.Sankey` instance.
Other Parameters
----------------
**kwargs
Additional keyword arguments set `matplotlib.patches.PathPatch`
properties, listed below. For example, one may want to use
``fill=False`` or ``label="A legend entry"``.
%(Patch)s
See Also
--------
Sankey.finish
"""
# Check and preprocess the arguments.
if flows is None:
flows = np.array([1.0, -1.0])
else:
flows = np.array(flows)
n = flows.shape[0] # Number of flows
if rotation is None:
rotation = 0
else:
# In the code below, angles are expressed in deg/90.
rotation /= 90.0
if orientations is None:
orientations = 0
try:
orientations = np.broadcast_to(orientations, n)
except ValueError:
raise ValueError(
f"The shapes of 'flows' {np.shape(flows)} and 'orientations' "
f"{np.shape(orientations)} are incompatible"
) from None
try:
labels = np.broadcast_to(labels, n)
except ValueError:
raise ValueError(
f"The shapes of 'flows' {np.shape(flows)} and 'labels' "
f"{np.shape(labels)} are incompatible"
) from None
if trunklength < 0:
raise ValueError(
"'trunklength' is negative, which is not allowed because it "
"would cause poor layout")
if abs(np.sum(flows)) > self.tolerance:
_log.info("The sum of the flows is nonzero (%f; patchlabel=%r); "
"is the system not at steady state?",
np.sum(flows), patchlabel)
scaled_flows = self.scale * flows
gain = sum(max(flow, 0) for flow in scaled_flows)
loss = sum(min(flow, 0) for flow in scaled_flows)
if prior is not None:
if prior < 0:
raise ValueError("The index of the prior diagram is negative")
if min(connect) < 0:
raise ValueError(
"At least one of the connection indices is negative")
if prior >= len(self.diagrams):
raise ValueError(
f"The index of the prior diagram is {prior}, but there "
f"are only {len(self.diagrams)} other diagrams")
if connect[0] >= len(self.diagrams[prior].flows):
raise ValueError(
"The connection index to the source diagram is {}, but "
"that diagram has only {} flows".format(
connect[0], len(self.diagrams[prior].flows)))
if connect[1] >= n:
raise ValueError(
f"The connection index to this diagram is {connect[1]}, "
f"but this diagram has only {n} flows")
if self.diagrams[prior].angles[connect[0]] is None:
raise ValueError(
f"The connection cannot be made, which may occur if the "
f"magnitude of flow {connect[0]} of diagram {prior} is "
f"less than the specified tolerance")
flow_error = (self.diagrams[prior].flows[connect[0]] +
flows[connect[1]])
if abs(flow_error) >= self.tolerance:
raise ValueError(
f"The scaled sum of the connected flows is {flow_error}, "
f"which is not within the tolerance ({self.tolerance})")
# Determine if the flows are inputs.
are_inputs = [None] * n
for i, flow in enumerate(flows):
if flow >= self.tolerance:
are_inputs[i] = True
elif flow <= -self.tolerance:
are_inputs[i] = False
else:
_log.info(
"The magnitude of flow %d (%f) is below the tolerance "
"(%f).\nIt will not be shown, and it cannot be used in a "
"connection.", i, flow, self.tolerance)
# Determine the angles of the arrows (before rotation).
angles = [None] * n
for i, (orient, is_input) in enumerate(zip(orientations, are_inputs)):
if orient == 1:
if is_input:
angles[i] = DOWN
elif not is_input:
# Be specific since is_input can be None.
angles[i] = UP
elif orient == 0:
if is_input is not None:
angles[i] = RIGHT
else:
if orient != -1:
raise ValueError(
f"The value of orientations[{i}] is {orient}, "
f"but it must be -1, 0, or 1")
if is_input:
angles[i] = UP
elif not is_input:
angles[i] = DOWN
# Justify the lengths of the paths.
if np.iterable(pathlengths):
if len(pathlengths) != n:
raise ValueError(
f"The lengths of 'flows' ({n}) and 'pathlengths' "
f"({len(pathlengths)}) are incompatible")
else: # Make pathlengths into a list.
urlength = pathlengths
ullength = pathlengths
lrlength = pathlengths
lllength = pathlengths
d = dict(RIGHT=pathlengths)
pathlengths = [d.get(angle, 0) for angle in angles]
# Determine the lengths of the top-side arrows
# from the middle outwards.
for i, (angle, is_input, flow) in enumerate(zip(angles, are_inputs,
scaled_flows)):
if angle == DOWN and is_input:
pathlengths[i] = ullength
ullength += flow
elif angle == UP and not is_input:
pathlengths[i] = urlength
urlength -= flow # Flow is negative for outputs.
# Determine the lengths of the bottom-side arrows
# from the middle outwards.
for i, (angle, is_input, flow) in enumerate(reversed(list(zip(
angles, are_inputs, scaled_flows)))):
if angle == UP and is_input:
pathlengths[n - i - 1] = lllength
lllength += flow
elif angle == DOWN and not is_input:
pathlengths[n - i - 1] = | |
needs units
def __init__( self, energy, theta, U ) :
self.energy = energy
self.theta = theta
self.U = U
self.norm = self.evaluateIndefiniteIntegral( energy - U )
@property
def domainMin( self ) :
return( 0.0 )
@property
def domainMax( self ) :
return( self.U )
def evaluate( self, energy ) :
return( math.sqrt( energy ) * math.exp( -energy / self.theta ) / self.norm )
def evaluateIndefiniteIntegral( self, energy ) :
X = energy / self.theta
sqrtX = math.sqrt( X )
return( self.theta**1.5 * ( 0.5 * math.sqrt( math.pi ) * math.erf( sqrtX ) - sqrtX * math.exp( -X ) ) )
def integrate( self, energyMin, energyMax ) :
energyMin = max( 0.0, min( energyMin, self.U ) ) / self.theta
energyMax = max( 0.0, min( energyMax, self.U ) ) / self.theta
return( ( self.evaluateIndefiniteIntegral( energyMax ) - self.evaluateIndefiniteIntegral( energyMin ) ) / self.norm )
class simpleMaxwellianFissionSpectrum( functionalBase ) :
moniker = 'simpleMaxwellianFission'
def __init__( self, U, thetas ) :
functionalBase.__init__( self, 7, U, thetas )
@property
def domainUnit( self ) :
return( str( self.U.unit ) )
@property
def theta( self ) :
return( self.parameter1 )
def averageEp( self, E ) :
theta = self.parameter1.data.evaluate( E )
a = ( E - self.U.value ) / theta
if( a < 1e-4 ) : return( theta * a * ( 1575. - a * ( 180. + 8 * a ) ) / 2625. )
sqrt_a = math.sqrt( a )
exp_a = math.exp( -a )
erf_sqrt_a = math.sqrt( math.pi ) * math.erf( sqrt_a )
return( theta * ( 0.75 * erf_sqrt_a - sqrt_a * ( 1.5 + a ) * exp_a ) / ( 0.5 * erf_sqrt_a - sqrt_a * exp_a ) )
def sqrtEp_AverageAtE( self, E ) :
theta = self.parameter1.data.evaluate( E )
a = ( E - self.U.value ) / theta
if( a < 1e-4 ) : return( math.sqrt( theta * a ) * ( 2100. - a * ( 140. + 9. * a ) ) / 2800. )
sqrt_a = math.sqrt( a )
exp_a = math.exp( -a )
erf_sqrt_a = math.sqrt( math.pi ) * math.erf( sqrt_a )
return( math.sqrt( theta ) * ( 1 - ( 1. + a ) * exp_a ) / ( 0.5 * erf_sqrt_a - sqrt_a * exp_a ) )
def energySpectrumAtEnergy( self, energyIn ) :
def A( energyOut, self ) :
return( self.evaluate( energyOut ) )
spectrum1 = self.evaluate( energyIn )
spectrum2 = XYsModule.pointwiseXY_C.createFromFunction( [ 0.0, energyIn - self.U.value ], A, spectrum1, 1e-3, 12 )
return( XYs1d( spectrum2, axes = defaultAxes( energyUnit = self.domainUnit ) ) )
def evaluate( self, energy ) :
return( simpleMaxwellianFissionSpectrum1d( energy, self.theta.data.evaluate( energy ), self.U.value ) )
def toPointwise_withLinearXYs( self, **kwargs ) :
def evaluateAtX( self, x ) :
return( math.sqrt( x ) * math.exp( -x / self.p1 ) )
ef = energyFunctionalDataToPointwise( self, evaluateAtX )
return( ef.toPointwise_withLinearXYs( **kwargs ) )
@staticmethod
def parseXMLNode( MFelement, xPath, linkData ) :
xPath.append( MFelement.tag )
theta_ = theta.parseXMLNode( MFelement.find(theta.moniker), xPath, linkData )
U = physicalQuantityModule.U.parseXMLNode( MFelement.find( 'U' ), xPath, linkData )
SMF = simpleMaxwellianFissionSpectrum( U, theta_ )
xPath.pop()
return SMF
class evaporationSpectrum1d : # FIXME, needs units
def __init__( self, energy, theta, U ) :
self.energy = energy
self.theta = theta
self.U = U
x = ( energy - U ) / theta
self.norm = theta**2 * ( 1.0 - math.exp( -x ) * ( 1 + x ) )
@property
def domainMin( self ) :
return( 0.0 )
@property
def domainMax( self ) :
return( self.U )
def evaluate( self, energy ) :
return( energy * math.exp( -energy / self.theta ) / self.norm )
def integrate( self, energyMin, energyMax ) :
energyMin = max( 0.0, min( energyMin, self.U ) ) / self.theta
energyMax = max( 0.0, min( energyMax, self.U ) ) / self.theta
value1 = ( 1 + energyMin ) * math.exp( -energyMin )
value2 = ( 1 + energyMax ) * math.exp( -energyMax )
integral = self.theta**2 * ( value1 - value2 ) / self.norm
return( integral )
class evaporationSpectrum( functionalBase ) :
moniker = 'evaporation'
def __init__( self, U, thetas ) :
functionalBase.__init__( self, 9, U, thetas )
@property
def domainUnit( self ) :
return( str( self.U.unit ) )
@property
def theta( self ) :
return( self.parameter1 )
def averageEp( self, E ) :
if( isinstance( self.parameter1.data, regionsModule.regions1d ) ) :
for region in self.parameter1.data :
if( E <= region[-1][0] ) : break
theta = region.evaluate( E )
else :
theta = self.parameter1.data.evaluate( E )
a = ( E - self.U.value ) / theta
if( a < 1e-4 ) : return( theta * a * ( 180. - a * ( 15. + a ) ) / 270. )
exp_a = math.exp( -a )
return( theta * ( 2. - a**2 * exp_a / ( 1. - ( 1. + a ) * exp_a ) ) )
def evaluate( self, energy ) :
return( evaporationSpectrum1d( energy, self.theta.data.evaluate( energy ), self.U.value ) )
def energySpectrumAtEnergy( self, energyIn ) :
def A( energyOut, self ) :
return( self.evaluate( energyOut ) )
spectrum1 = self.evaluate( energyIn )
spectrum2 = XYsModule.pointwiseXY_C.createFromFunction( [ 0.0, energyIn - self.U.value ], A, spectrum1, 1e-3, 12 )
return( XYs1d( spectrum2, axes = defaultAxes( energyUnit = self.domainUnit ) ) )
def sqrtEp_AverageAtE( self, E ) :
theta = self.parameter1.data.evaluate( E )
a = ( E - self.U.value ) / theta
if( a < 1e-4 ) : return( math.sqrt( theta * a ) * ( 252. - a * ( 12. + a ) ) / 315. )
sqrt_a = math.sqrt( a )
exp_a = math.exp( -a )
return( math.sqrt( theta ) * ( 1.32934038817913702 * math.erf( sqrt_a ) - sqrt_a * ( 1.5 + a ) * exp_a ) / ( 1. - ( 1. + a ) * exp_a ) )
def toPointwise_withLinearXYs( self, **kwargs ) :
def evaluateAtX( self, x ) :
return( x * math.exp( -x / self.p1 ) )
ef = energyFunctionalDataToPointwise( self, evaluateAtX )
return( ef.toPointwise_withLinearXYs( **kwargs ) )
@staticmethod
def parseXMLNode( evapElement, xPath, linkData ) :
xPath.append( evapElement.tag )
theta_ = theta.parseXMLNode( evapElement.find(theta.moniker), xPath, linkData )
U = physicalQuantityModule.U.parseXMLNode( evapElement.find( 'U' ), xPath, linkData )
ES = evaporationSpectrum( U, theta_ )
xPath.pop()
return ES
class WattSpectrum( functionalBase ) :
moniker = 'Watt'
def __init__( self, U, a, b ) :
functionalBase.__init__( self, 11, U, a, b )
def averageEp( self, E ) :
a, b = self.parameter1.data.evaluate( E ), self.parameter2.data.evaluate( E )
domainMax_a = ( E - self.U.value ) / a
domainMax_b = math.sqrt( b * ( E - self.U.value ) )
ab = a * b
sqrt_ab = math.sqrt( ab )
I = 0.25 * math.sqrt( math.pi * ab ) * math.exp( 0.25 * ab ) * \
( math.erf( math.sqrt( domainMax_a ) - 0.5 * sqrt_ab ) + math.erf( math.sqrt( domainMax_a ) + 0.5 * sqrt_ab ) ) \
- math.exp( -domainMax_a ) * math.sinh( domainMax_b )
EI = a * math.sqrt( math.pi * ab ) * ( ab + 6 ) * math.exp( 0.25 * ab ) * \
( math.erf( math.sqrt( domainMax_a ) - 0.5 * sqrt_ab ) + math.erf( math.sqrt( domainMax_a ) + 0.5 * sqrt_ab ) ) \
- 0.25 * a * math.exp( -domainMax_a ) * math.sinh( domainMax_b ) * ( 2. * domainMax_b + ab + 4. + 4. * domainMax_a )
return( EI / ( 16 * I ) )
def energySpectrumAtEnergy( self, energyIn ) :
return( self.evaluate( energyIn ) )
def evaluate( self, energyIn, extrapolation = standardsModule.noExtrapolationToken ) :
"""Returns an XYs1d instance of self evaluated at the incident energy **energyIn**."""
def A( energyOut, parameters ) :
a_parameter, b_parameter = parameters
return( math.exp( -energyOut / a_parameter ) * math.sinh( math.sqrt( b_parameter * energyOut ) ) )
energyUnit = self.parameter1.data.domainUnit
energyOut = PQUModule.PQU( 1e-8, 'MeV' ).getValueAs( energyUnit )
energyOutMax = | |
Else, inflow is 0.
self.i[m] = (self.s[m] - self.s_c[m, :].sum()) / self.sf[m,m] # allow for outflow during first year by rescaling with 1/sf[m,m]
# Add new inflow to stock and determine future decay of new age-cohort
self.s_c[m::, m] = self.i[m] * self.sf[m::, m]
self.o_c[m, m] = self.i[m] * (1 - self.sf[m, m])
# NOTE: This method of negative inflow correction is only of of many plausible methods of increasing the outflow to keep matching stock levels.
# It assumes that the surplus stock is removed in the year that it becomes obsolete. Each cohort loses the same fraction.
# Modellers need to try out whether this method leads to justifiable results.
# In some situations it is better to change the lifetime assumption than using the NegativeInflowCorrect option.
return self.s_c, self.o_c, self.i
else:
# No lifetime distribution specified
return None, None, None
else:
# No stock specified
return None, None, None
def compute_stock_driven_model_initialstock(self,InitialStock,SwitchTime,NegativeInflowCorrect = False):
""" With given total stock and lifetime distribution, the method builds the stock by cohort and the inflow.
The extra parameter InitialStock is a vector that contains the age structure of the stock at the END of the year Switchtime -1 = t0.
***
Convention 1: Stocks are measured AT THE END OF THE YEAR. Flows occur DURING THE YEAR.
Convention 2: The model time t spans both historic and future age-cohorts, and the index SwitchTime -1 indicates the first future age-cohort.
Convention 3: SwitchTime = len(InitialStock) + 1, that means SwitchTime is counted starting from 1 and not 0.
Convention 4: The future stock time series has 0 as its first len(InitialStock) elements.
***
In the year SwitchTime the model switches from the historic stock to the stock-driven approach.
The year SwitchTime is the first year with the stock-driven approach.
InitialStock contains the age-cohort composition of the stock AT THE END of year SwitchTime -1.
InitialStock must have length = SwithTime -1.
For the option "NegativeInflowCorrect", see the explanations for the method compute_stock_driven_model(self, NegativeInflowCorrect = True).
NegativeInflowCorrect only affects the future stock time series and works exactly as for the stock-driven model without initial stock.
"""
if self.s is not None:
if self.lt is not None:
self.s_c = np.zeros((len(self.t), len(self.t)))
self.s_c[SwitchTime -2,0:SwitchTime-1] = InitialStock # assign initialstock to stock-by-cohort variable at END OF YEAR SwitchTime (here -1, because indexing starts at 0.).
self.o_c = np.zeros((len(self.t), len(self.t)))
self.i = np.zeros(len(self.t))
# construct the sdf of a product of cohort tc leaving the stock in year t
self.compute_sf() # Computes sf if not present already.
# Construct historic inflows
for c in range(0,SwitchTime -1):
if self.sf[SwitchTime -2,c] != 0:
self.i[c] = InitialStock[c] / self.sf[SwitchTime -2,c]
else:
self.i[c] = InitialStock[c]
# Add stock from historic inflow
self.s_c[:,0:SwitchTime-1] = np.einsum('tc,c->tc',self.sf[:,0:SwitchTime-1],self.i[0:SwitchTime-1])
# calculate historic outflow
for m in range(0,SwitchTime-1):
self.o_c[m, m] = self.i[m] * (1 - self.sf[m, m])
self.o_c[m+1::,m] = self.s_c[m:-1,m] - self.s_c[m+1::,m]
# for future: year-by-year computation, starting from SwitchTime
if NegativeInflowCorrect is False:
for m in range(SwitchTime-1, len(self.t)): # for all years m, starting at SwitchTime
# 1) Determine inflow from mass balance:
if self.sf[m,m] != 0: # Else, inflow is 0.
self.i[m] = (self.s[m] - self.s_c[m, :].sum()) / self.sf[m,m] # allow for outflow during first year by rescaling with 1/sf[m,m]
# NOTE: The stock-driven method may lead to negative inflows, if the stock development is in contradiction with the lifetime model.
# In such situations the lifetime assumption must be changed, either by directly using different lifetime values or by adjusting the outlfows,
# cf. the option NegativeInflowCorrect in the method compute_stock_driven_model.
# 2) Add new inflow to stock and determine future decay of new age-cohort
self.s_c[m::, m] = self.i[m] * self.sf[m::, m]
self.o_c[m, m] = self.i[m] * (1 - self.sf[m, m])
self.o_c[m+1::,m] = self.s_c[m:-1,m] - self.s_c[m+1::,m]
if NegativeInflowCorrect is True:
for m in range(SwitchTime-1, len(self.t)): # for all years m, starting at SwitchTime
self.o_c[m, 0:m] = self.s_c[m-1, 0:m] - self.s_c[m, 0:m] # outflow table is filled row-wise, for each year m.
# 1) Determine text inflow from mass balance:
InflowTest = self.s[m] - self.s_c[m, :].sum()
if InflowTest < 0:
Delta = -1 * InflowTest # Delta > 0!
self.i[m] = 0 # Set inflow to 0 and distribute mass balance gap onto remaining cohorts:
if self.s_c[m,:].sum() != 0:
Delta_percent = Delta / self.s_c[m,:].sum()
# Distribute gap equally across all cohorts (each cohort is adjusted by the same %, based on surplus with regards to the prescribed stock)
# Delta_percent is a % value <= 100%
else:
Delta_percent = 0 # stock in this year is already zero, method does not work in this case.
# correct for outflow and stock in current and future years
# adjust the entire stock AFTER year m as well, stock is lowered in year m, so future cohort survival also needs to decrease.
# print(InflowTest)
# print((self.s_c[m, :] * Delta_percent).sum())
# print('_')
self.o_c[m, :] = self.o_c[m, :] + (self.s_c[m, :] * Delta_percent).copy() # increase outflow according to the lost fraction of the stock, based on Delta_c
self.s_c[m::,0:m] = self.s_c[m::,0:m] * (1-Delta_percent.copy()) # shrink future description of stock from previous age-cohorts by factor Delta_percent in current AND future years.
else:
if self.sf[m,m] != 0: # Else, inflow is 0.
self.i[m] = (self.s[m] - self.s_c[m, :].sum()) / self.sf[m,m] # allow for outflow during first year by rescaling with 1/sf[m,m]
# 2) Add new inflow to stock and determine future decay of new age-cohort
self.s_c[m::, m] = self.i[m] * self.sf[m::, m]
self.o_c[m, m] = self.i[m] * (1 - self.sf[m, m])
# Add historic stock series to total stock s:
self.s[0:SwitchTime-1]= self.s_c[0:SwitchTime-1,:].sum(axis =1).copy()
return self.s_c, self.o_c, self.i
else:
# No lifetime distribution specified
return None, None, None
else:
# No stock specified
return None, None, None
def compute_stock_driven_model_initialstock_typesplit(self,FutureStock,InitialStock,SFArrayCombined,TypeSplit):
"""
With given total future stock and lifetime distribution, the method builds the stock by cohort and the inflow.
The age structure of the initial stock is given for each technology, and a type split of total inflow into different technology types is given as well.
SPECIFICATION: Stocks are always measured AT THE END of the discrete time interval.
Indices:
t: time: Entire time frame: from earliest age-cohort to latest model year.
c: age-cohort: same as time.
T: Switch time: DEFINED as first year where historic stock is NOT present, = last year where historic stock is present +1.
Switchtime is calculated internally, by subtracting the length of the historic stock from the total model length.
g: product type
Data:
FutureStock[t], total future stock at end of each year, starting at T
InitialStock[c,g], 0...T-1;0...T-1, stock at the end of T-1, by age-cohort c, ranging from 0...T-1, and product type g
c-dimension has full length, all future years must be 0.
SFArrayCombined[t,c,g], Survival function of age-cohort c at end of year t for product type g
this array spans both historic and future age-cohorts
Typesplit[t,g], splits total inflow into product types for future years
The extra parameter InitialStock is a vector that contains the age structure of the stock at time t0, and it covers as many historic cohorts as there are elements in it.
In the year SwitchTime the model switches from the historic stock to the stock-driven approach.
Only future years, i.e., years after SwitchTime, are computed and returned.
The InitialStock is a vector of the age-cohort composition of the stock at SwitchTime, with length SwitchTime.
The parameter TypeSplit splits the total inflow into Ng types. """
if self.s is not None:
if self.lt is not None:
SwitchTime = SFArrayCombined.shape[0] - FutureStock.shape[0]
Ntt = SFArrayCombined.shape[0] # Total no of years
Nt0 = FutureStock.shape[0] # No of future years
Ng = SFArrayCombined.shape[2] | |
# -*- coding: UTF-8 -*-
"""
..
---------------------------------------------------------------------
___ __ __ __ ___
/ | \ | \ | \ / the automatic
\__ |__/ |__/ |___| \__ annotation and
\ | | | | \ analysis
___/ | | | | ___/ of speech
http://www.sppas.org/
Use of this software is governed by the GNU Public License, version 3.
SPPAS is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
SPPAS is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with SPPAS. If not, see <http://www.gnu.org/licenses/>.
This banner notice must not be removed.
---------------------------------------------------------------------
src.anndata.aio.transcriber.py
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Transcriber is a tool for assisting the manual annotation of speech signals.
It provides a graphical user interface for segmenting long duration speech
recordings, transcribing them, and labeling speech turns, topic changes and
acoustic conditions.
It is more specifically designed for the annotation of broadcast news
recordings.
http://trans.sourceforge.net
"""
import codecs
import xml.etree.cElementTree as ET
from .basetrs import sppasBaseIO
from ..anndataexc import AnnDataTypeError
from ..media import sppasMedia
from ..ctrlvocab import sppasCtrlVocab
from ..ann.annotation import sppasAnnotation
from ..ann.annlocation import sppasLocation
from ..ann.annlocation import sppasPoint
from ..ann.annlocation import sppasInterval
from ..ann.annlabel import sppasLabel
from ..ann.annlabel import sppasTag
from .aioutils import format_labels
# ---------------------------------------------------------------------------
NO_SPK_TIER = "Trans-NoSpeaker"
# list of Transcriber noise events with their conversion into SPPAS convention.
NOISE_EVENTS = {
"r": "* {respiration}",
"i": "* {inspiration}",
"e": "* {exhalation}",
"n": "* {sniffing}",
"pf": "* {breath}",
"bb": "* {mouth noise}",
"bg": "* {throaty noise}",
"tx": "* {coughing, sneeze}",
"sif": "{whistling}",
"b": "* {undetermined}",
"conv": "* {background conversations}",
"pap": "* {wrinkling of papers}",
"shh": "* {electric blast}",
"mic": "* {micro}",
"toux en fond": "* {background cough}",
"indicatif": "* {indicative signal}",
"jingle": "* {jingle}",
"top": "* {top}",
"musique": "* {music}",
"applaude": "* {applaude}",
"rire": "@",
"rire-": "@@", # begin/end of a laughing sequence
"rire_begin": "@@",
"rire_end": "@@",
"-rire": "@@",
"rire en fond": "@ {background laughter}",
"nontrans": "dummy"
}
# ---------------------------------------------------------------------------
class sppasTRS(sppasBaseIO):
"""SPPAS reader for TRS format.
:author: <NAME>
:organization: Laboratoire Parole et Langage, Aix-en-Provence, France
:contact: <EMAIL>
:license: GPL, v3
:copyright: Copyright (C) 2011-2018 <NAME>
"""
@staticmethod
def detect(filename):
"""Check whether a file is of TRS format or not.
:param filename: (str) Name of the file to check.
:returns: (bool)
"""
try:
with codecs.open(filename, 'r', "ISO-8859-1") as it:
it.next()
doctype_line = it.next().strip()
it.close()
except IOError:
return False
except UnicodeDecodeError:
return False
return '<!DOCTYPE Trans SYSTEM "trans' in doctype_line
# -----------------------------------------------------------------------
@staticmethod
def make_point(midpoint):
"""The localization is a time value, so a float."""
try:
midpoint = float(midpoint)
except ValueError:
raise AnnDataTypeError(midpoint, "float")
return sppasPoint(midpoint, radius=0.005)
# -----------------------------------------------------------------------
def __init__(self, name=None):
"""Initialize a new sppasTRS instance.
:param name: (str) This transcription name.
"""
if name is None:
name = self.__class__.__name__
super(sppasTRS, self).__init__(name)
self.default_extension = "trs"
self.software = "Transcriber"
self._accept_multi_tiers = True
self._accept_no_tiers = False
self._accept_metadata = True
self._accept_ctrl_vocab = False
self._accept_media = True
self._accept_hierarchy = True
self._accept_point = False
self._accept_interval = True
self._accept_disjoint = False
self._accept_alt_localization = False
self._accept_alt_tag = False
self._accept_radius = False
self._accept_gaps = False
self._accept_overlaps = False
# -----------------------------------------------------------------------
def read(self, filename):
"""Read a TRS file and fill the Transcription.
<!ELEMENT Trans ((Speakers|Topics)*,Episode)>
:param filename: (str)
"""
try:
tree = ET.parse(filename)
except ET.ParseError:
xmlp = ET.XMLParser(encoding="ISO-8859-1")
tree = ET.parse(filename, parser=xmlp)
root = tree.getroot()
# Get metadata for self
self._parse_metadata(root)
# Speakers. One tier by speaker is created.
self._parse_speakers(root.find('Speakers'))
self.create_tier(NO_SPK_TIER)
# Topics. Set the controlled vocabulary.
topics = self.create_tier("Topics")
sppasTRS._parse_topics(root.find('Topics'), topics)
# Episodes. Fill the tier.
episodes_tier = self.create_tier("Episodes")
for episode_root in root.iter('Episode'):
sppasTRS._parse_episode_attributes(episode_root, episodes_tier)
# Episodes. Examine sections.
section_tier = self.create_tier("Sections")
for section_root in root.iter('Section'):
self._parse_section_attributes(section_root, section_tier)
# Episodes. Examine each "Turn" (content of tiers)
self.create_tier("Turns")
for turn_root in root.iter('Turn'):
self._parse_turn(turn_root)
# Reformat the tags (problems of the transcription convention).
for tier in self:
if "Trans" in tier.get_name():
for ann in tier:
if ann.is_labelled():
for label in ann.get_labels():
tag = label.get_best()
new_content = sppasTRS.__format_tag(tag)
label.get_best().set_content(new_content)
# Create the hierarchy
self.add_hierarchy_link("TimeAlignment",
self.find('Turns'),
self.find('Sections'))
self.add_hierarchy_link("TimeAlignment",
self.find('Sections'),
self.find('Episodes'))
self.add_hierarchy_link("TimeAlignment",
self.find('Sections'),
self.find('Topics'))
# TurnRecordingQuality, TurnElocutionMode and TurnChannel should be
# "TimeAssociation" of Turns but... if sparse data (?) !
# Remove empty tiers.
for i in reversed(range(len(self))):
if len(self[i]) == 0:
self.pop(i)
# -----------------------------------------------------------------------
@staticmethod
def __format_tag(tag):
"""Reformat tokens in tags.
Remove specific markers of the transcription convention of
Transcriber.
"""
content = tag.get_content()
tokens = content.split(" ")
new_tokens = list()
for token in tokens:
if token.startswith("^^"):
token = token[2:]
if len(token) > 1 and \
(token.startswith("*") or
token.startswith('?')):
token = token[1:]
if "()" in token:
token = token.replace("()", "")
if len(token) > 0:
new_tokens.append(token)
return " ".join(new_tokens)
# -----------------------------------------------------------------------
def _parse_metadata(self, root):
"""Get metadata from attributes of the main root.
<!ATTLIST Trans
audio_filename CDATA #IMPLIED
scribe CDATA #IMPLIED
xml:lang NMTOKEN #IMPLIED
version NMTOKEN #IMPLIED
version_date CDATA #IMPLIED
elapsed_time CDATA "0"
>
:param root: (ET) Main XML Element tree root of a TRS file.
"""
# The media linked to this file.
if "audio_filename" in root.attrib:
media_url = root.attrib['audio_filename']
media = sppasMedia(media_url)
media.set_meta('media_source', 'primary')
self.set_media_list([media])
# Name of the annotator.
if "scribe" in root.attrib:
scribe = root.attrib['scribe']
self.set_meta("annotator_name", scribe)
# Version of the annotation.
if "version" in root.attrib:
version = root.attrib['version']
self.set_meta("annotator_version", version)
# Date of the annotation.
if "version_date" in root.attrib:
version_date = root.attrib['version_date']
self.set_meta("annotator_version_date", version_date)
# Language of the annotation. saved as a language name because
# it's iso639-1 and SPPAS is expecting iso639-3.
if "xml:lang" in root.attrib:
lang = root.attrib['xml:lang']
self.set_meta("language_name_0", lang)
# -----------------------------------------------------------------------
def _parse_speakers(self, spk_root):
"""Read the <Speakers> element and create tiers.
<!ELEMENT Speakers (Speaker*)>
<!ATTLIST Speakers>
<!ELEMENT Speaker EMPTY>
<!ATTLIST Speaker
id ID #REQUIRED
name CDATA #REQUIRED
check (yes|no) #IMPLIED
type (male|female|child|unknown) #IMPLIED
dialect (native|nonnative) #IMPLIED
accent CDATA #IMPLIED
scope (local|global) #IMPLIED
>
:param spk_root: (ET) XML Element tree root.
"""
if spk_root is not None:
for spk_node in spk_root.findall('Speaker'):
# Speaker identifier -> new tier
if "id" in spk_node.attrib:
value = spk_node.attrib['id']
tier = self.create_tier("Trans-" + value)
tier.set_meta("speaker_id", value)
# Speaker name: CDATA
if "name" in spk_node.attrib:
tier.set_meta("speaker_name",
spk_node.attrib['name'])
# Speaker type: male/female/child/unknown
if "type" in spk_node.attrib:
tier.set_meta("speaker_type",
spk_node.attrib['type'])
# "spelling checked" for speakers whose name
# has been checked: yes/no
if "check" in spk_node.attrib:
tier.set_meta("speaker_check",
spk_node.attrib['check'])
# Speaker dialect: native/nonnative
if "dialect" in spk_node.attrib:
tier.set_meta("speaker_dialect",
spk_node.attrib['dialect'])
# Speaker accent: CDATA
if "accent" in spk_node.attrib:
tier.set_meta("speaker_accent",
spk_node.attrib['accent'])
# Speaker scope: local/global
if "scope" in spk_node.attrib:
tier.set_meta("speaker_scope",
spk_node.attrib['scope'])
# -----------------------------------------------------------------------
@staticmethod
def _parse_topics(topic_root, topic_tier):
"""Read the <Topics> element and create a tier.
The topics and their description are stored in a controlled
vocabulary.
<!ELEMENT Topics (Topic*)>
<!ATTLIST Topics>
<!ELEMENT Topic EMPTY>
<!ATTLIST Topic
id ID #REQUIRED
desc CDATA #REQUIRED
>
:param topic_root: (ET) XML Element tree root.
:param topic_tier: (sppasTier) Tier to store topic segmentation
"""
if topic_root is None:
return
# assign the vocabulary.
ctrl_vocab = sppasCtrlVocab('topics')
for topic_node in topic_root.findall('Topic'):
# Topic identifier
try:
topic_id = topic_node.attrib['id']
except KeyError:
continue
# Topic description: CDATA
try:
topic_desc = topic_node.attrib['desc']
except KeyError:
topic_desc = ""
# Add an entry in the controlled vocabulary
ctrl_vocab.add(sppasTag(topic_id), topic_desc)
topic_tier.set_ctrl_vocab(ctrl_vocab)
# -----------------------------------------------------------------------
@staticmethod
def _parse_episode_attributes(episode_root, episodes_tier):
"""Read the episode attributes.
<!ELEMENT Episode (Section*)>
<!ATTLIST Episode
program CDATA #IMPLIED
air_date CDATA #IMPLIED
>
:param episode_root: (ET) XML Element tree root.
:param episodes_tier: (sppasTier) The tier to store the episodes.
"""
if episode_root is None:
return
if len(episode_root) == 0:
# no sections in this episode.
return
# Get this episode information
begin = episode_root[0].attrib['startTime']
end = episode_root[-1].attrib['endTime']
try:
program = episode_root.attrib['program']
except KeyError:
program = "episode"
# Add the episode in the tier
episodes_tier.create_annotation(
sppasLocation(
sppasInterval(
sppasTRS.make_point(begin),
sppasTRS.make_point(end))),
sppasLabel(sppasTag(program)))
# -----------------------------------------------------------------------
def _parse_section_attributes(self, section_root, section_tier):
"""Read the section attributes.
Sections are mainly used to segment the topics and to mention
un-transcribed segments.
<!ELEMENT Section (Turn*)>
<!ATTLIST Section
type (report | nontrans | filler) #REQUIRED
topic IDREF #IMPLIED
startTime CDATA #REQUIRED
endTime CDATA #REQUIRED
>
:param section_root: (ET) | |
"""
The Serpent solver!
"""
from abc import abstractmethod
import time
import pathlib
import logging
import numpy
from hydep.lib import HighFidelitySolver
from hydep.internal import TransportResult
import hydep.internal.features as hdfeat
from .writer import BaseWriter, SerpentWriter, ExtDepWriter
from .runner import BaseRunner, SerpentRunner, ExtDepRunner
from .processor import SerpentProcessor, WeightedFPYHelper, ConstantFPYHelper
from .xsavail import XS_2_1_30
__logger__ = logging.getLogger("hydep.serpent")
_FEATURES_ATLEAST_2_1_30 = hdfeat.FeatureCollection(
(
hdfeat.FISSION_MATRIX,
hdfeat.FISSION_YIELDS,
hdfeat.HOMOG_GLOBAL,
hdfeat.HOMOG_LOCAL,
hdfeat.MICRO_REACTION_XS,
),
XS_2_1_30,
)
class BaseSolver(HighFidelitySolver):
"""Base solver for interfacing with Serpent >= 2.1.30
Does not provide all methods needed by the
:class:`hydep.lib.HighFidelitySolver` other than
:attr:`features`, :meth:`setHooks`. :meth:`beforeMain`
is partially implemented, but requires a helper method for
writing solver-specific input files. Other methods for
directly interacting with Serpent are left to concrete
classes like :class:`SerpentSolver` and
:class:`CoupledSerpentSolver`.
Parameters
----------
writer : hydep.serpent.BaseWriter
Passed to :attr:`writer`
runner : hydep.serpent.BaseRunner
Passed to :attr:`runner`
processor : hydep.serpent.SerpentProcessor, optional
If not provided, use a :class:`hydep.serpent.SerpentProcessor`.
Passed to :attr:`processor`
Parameters
----------
writer : hydep.serpent.BaseWriter
Passed to :attr:`writer`
runner : hydep.serpent.BaseRunner
Passed to :attr:`runner`
processor : hydep.serpent.SerpentProcessor, optional
If not provided, use a :class:`hydep.serpent.SerpentProcessor`.
Passed to :attr:`processor`
Attributes
----------
writer : hydep.serpent.BaseWriter
Instance responsible for writing input files
runner : hydep.serpent.BaseRunner
Instance reponsible for controlling Serpent execution
processor : hydep.serpent.SerpentProcessor
Instance responsible for pulling data from output files
features : hydep.internal.features.FeatureCollection
A non-exhaustive list of features contained in Serpent >= 2.1.30
that are useful / necessary for this framework
hooks : hydep.internal.features.FeatureCollection
Collection of physics and cross sections needed by other
aspects of the framework
basedir : pathlib.Path or None
Path where results will be saved and auxillary files may be saved.
Configured in :meth:`self.beforeMain`
"""
def __init__(self, writer, runner, processor=None):
self._writer = writer
self._processor = processor or SerpentProcessor()
self._runner = runner
self._hooks = None
self._volumes = None
self._basedir = None
@property
def features(self):
return _FEATURES_ATLEAST_2_1_30
@property
def hooks(self):
return self._hooks
def setHooks(self, needs):
"""Instruct the solver and helpers what physics are needed
Parameters
----------
needs : hydep.internal.features.FeatureCollection
The needs of other solvers, e.g.
:class:`hydep.ReducedOrderSolver`
"""
# TODO Guard against hooks that aren't supported
if not isinstance(needs, hdfeat.FeatureCollection):
raise TypeError(
"Hooks must be {}, not {}".format(
hdfeat.FeatureCollection.__name__, type(needs)
)
)
self._hooks = needs
self._writer.hooks = needs
@property
def basedir(self):
return self._basedir
@property
def writer(self) -> BaseWriter:
return self._writer
@property
def runner(self) -> BaseRunner:
return self._runner
@property
def processor(self) -> SerpentProcessor:
return self._processor
def _process(self, basefile, index=0):
fluxes = self.processor.processDetectorFluxes(
basefile + f"_det{index}.m",
"flux",
) / self._volumes
if self.hooks is not None and self.hooks.macroXS:
resbundle = self.processor.processResult(
basefile + "_res.m",
self.hooks.macroXS,
index=index,
)
res = TransportResult(fluxes, resbundle.keff, macroXS=resbundle.macroXS)
else:
keff = self.processor.getKeff(basefile + "_res.m", index=index)
res = TransportResult(fluxes, keff)
if not self.hooks:
return res
for feature in self.hooks.features:
if feature is hdfeat.FISSION_MATRIX:
res.fmtx = self.processor.processFmtx(basefile + f"_fmtx{index}.m")
elif feature is hdfeat.MICRO_REACTION_XS:
res.microXS = self.processor.processMicroXS(basefile + f"_mdx{index}.m")
elif feature is hdfeat.FISSION_YIELDS:
res.fissionYields = self.processor.processFissionYields(
basefile + f"_det{index}.m"
)
return res
def beforeMain(self, model, manager, settings):
"""Prepare the base input file
Parameters
----------
model : hydep.Model
Geometry information to be written once
manager : hydep.Manager
Depletion information
settings : hydep.Settings
Shared settings
"""
self.runner.configure(settings.serpent)
self._basedir = settings.basedir
assert manager.burnable is not None
orderedBumat = manager.burnable
matids = []
self._volumes = numpy.empty((len(orderedBumat), 1))
for ix, m in enumerate(orderedBumat):
matids.append(str(m.id))
self._volumes[ix] = m.volume
self.writer.model = model
self.writer.burnable = orderedBumat
acelib = settings.serpent.acelib
if acelib is None:
raise AttributeError(
f"Serpent acelib setting not configured on {settings}"
)
self.writer.updateProblemIsotopes((iso.triplet for iso in manager.chain), acelib)
__logger__.info("Writing base Serpent input file")
mainfile = self._writeMainFile(model, manager, settings)
self.processor.burnable = matids
# Not super pretty, as this interacts both with the writer's roles
# and the processors roles
if hdfeat.FISSION_YIELDS in self.hooks.features:
mode = settings.serpent.fpyMode
if mode == "weighted":
fyproc = WeightedFPYHelper(matids, manager.chain)
elif mode == "constant":
fyproc = ConstantFPYHelper(
matids, manager.chain, settings.serpent.constantFPYSpectrum
)
else:
raise ValueError(f"FPY mode {mode} unsupported")
detlines = fyproc.makeDetectors()
if detlines:
with mainfile.open("a") as s:
s.write("\n".join(detlines))
self.processor.fyHelper = fyproc
if hdfeat.MICRO_REACTION_XS in self.hooks.features:
self.processor.reactionIndex = manager.chain.reactionIndex
__logger__.info("Done.")
@abstractmethod
def _writeMainFile(self, model, manager) -> pathlib.Path:
"""Write the primary input file before transport solutions"""
pass
class SerpentSolver(BaseSolver):
"""Primary entry point for using Serpent as high fidelity solver
Attributes
----------
writer : SerpentWriter
Responsible for writing Serpent inputs
runner : SerpentRunner
Responsible for runing Serpent
processor : SerpentProcessor
Responsible for processing outputs
features : hydep.internal.features.FeatureCollection
A non-exhaustive list of features contained in Serpent >= 2.1.30
that are useful / necessary for this framework
hooks : hydep.internal.features.FeatureCollection
Collection of physics and cross sections needed by other
aspects of the framework
"""
def __init__(self):
super().__init__(
writer=SerpentWriter(),
runner=SerpentRunner(),
)
self._curfile = None
self._tmpdir = None
self._tmpFile = None
def bosSolve(self, compositions, timestep, power):
"""Create and solve the BOS problem with updated compositions
Parameters
----------
compositions : hydep.internal.CompBundle
New compositions for this point in time such that
``compositions.densities[i][j]`` is the updated
atom density for ``compositions.zai[j]`` for material
``i``
timestep : hydep.internal.TimeStep
Current point in calendar time for the beginning
of this coarse step
power : float
Current reactor power [W]
Returns
-------
hydep.internal.TransportResult
Transport result with fluxes, multiplication factor,
run time, and other data needed in the framework
"""
return self._solve(compositions, timestep, power, final=False)
def eolSolve(self, compositions, timestep, power):
"""Create and solve the EOL problem with updated compositions
The only difference between this and :meth:`bosSolve` is that
burnable materials are not marked with ``burn 1``.
Parameters
----------
compositions : hydep.internal.CompBundle
New compositions for this point in time such that
``compositions.densities[i][j]`` is the updated
atom density for ``compositions.zai[j]`` for material
``i``
timestep : hydep.internal.TimeStep
Current point in calendar time for the beginning
of this coarse step
power : float
Current reactor power [W]
Returns
-------
hydep.internal.TransportResult
Transport result with fluxes, multiplication factor,
run time, and other data needed in the framework
"""
return self._solve(compositions, timestep, power, final=True)
def _solve(self, compositions, timestep, power, final=False):
curfile = self.writer.writeSteadyStateFile(
f"./serpent-s{timestep.coarse}", compositions, timestep, power, final=final)
start = time.time()
self.runner(curfile)
end = time.time()
res = self._process(str(curfile), index=0)
res.runTime = end - start
return res
def _writeMainFile(self, model, manager, settings):
basefile = pathlib.Path.cwd() / "serpent-base.sss"
self.writer.writeBaseFile(basefile, settings, manager.chain)
return basefile
class CoupledSerpentSolver(BaseSolver):
"""Utilize the external depletion interface
Attributes
----------
writer : hydep.serpent.ExtDepWriter
Instance that writes the main input file and new compositions
runner : hydep.serpent.ExtDepRunner
Instance that communicates with a Serpent process. Controls how
Serpent walks through time
processor : hydep.serpent.SerpentProcessor
Responsible for pulling information from output files
features : hydep.internal.features.FeatureCollection
A non-exhaustive list of features contained in Serpent >= 2.1.30
that are useful / necessary for this framework
hooks : hydep.internal.features.FeatureCollection
Collection of physics and cross sections needed by other
aspects of the framework
"""
def __init__(self):
super().__init__(
writer=ExtDepWriter(),
runner=ExtDepRunner(),
)
self._cstep = 0
self._fp = None
def _writeMainFile(self, model, manager, settings):
self._fp = basefile = pathlib.Path.cwd() / "serpent-extdep"
self.writer.writeCouplingFile(
basefile,
settings,
manager,
)
return self._fp
def bosSolve(self, compositions, timestep, _power):
"""Solve the BOS problem
For the first timestep, where ``timestep.coarse == 0``,
the :attr:`runner` will initiate the Serpent process
and solve the first point in time. Otherwise, the compositions
are written to the restart file, which are read into Serpent
using the external depletion interface.
Parameters
----------
compositions : hydep.internal.CompBundle
Material compositions for this point in time.
timestep : hydep.internal.TimeStep
Current time step
_power : float
Current reactor power [W]. Not needed since the
powers are already written to the full input file
Returns
-------
hydep.internal.TransportResult
Transport result with fluxes, multiplication factor,
run time, and other information needed by the framework.
"""
if timestep.coarse != 0:
self.writer.updateComps(compositions, timestep, threshold=0)
start = time.time()
self.runner.solveNext()
end = time.time()
else:
start = time.time()
self.runner.start(self._fp, self._fp.with_suffix(".log"))
end = time.time()
self.writer.updateFromRestart()
res = self._process(str(self._fp), timestep.coarse)
res.runTime = end - start
return res
def eolSolve(self, compositions, timestep, _power):
"""Solve the EOL problem
The only difference between this and :meth:`bosSolve`
is that that :meth:`ExtDepRunner.solveEOL` is used to
solve and then terminate the Serpent process.
Parameters
----------
compositions : hydep.internal.CompBundle
Material compositions for this point in time.
timestep : hydep.internal.TimeStep
Current time step
_power : float
Current reactor power [W]. Not needed since the
powers are already written to the full | |
(_('Select All'), _('Ctrl+A')))
# Separator
self.menuedit.addSeparator()
self.menueditpreferences = self.menuedit.addAction(
QtGui.QIcon(self.app.resource_location + '/pref.png'),
'%s\t%s' % (_('Preferences'), _('Shift+P')))
# ########################################################################
# ########################## OPTIONS # ###################################
# ########################################################################
self.menuoptions = self.menu.addMenu(_('Options'))
self.menuoptions_transform_rotate = self.menuoptions.addAction(
QtGui.QIcon(self.app.resource_location + '/rotate.png'),
'%s\t%s' % (_("Rotate Selection"), _('Shift+(R)')))
# Separator
self.menuoptions.addSeparator()
self.menuoptions_transform_skewx = self.menuoptions.addAction(
QtGui.QIcon(self.app.resource_location + '/skewX.png'),
'%s\t%s' % (_("Skew on X axis"), _('Shift+X')))
self.menuoptions_transform_skewy = self.menuoptions.addAction(
QtGui.QIcon(self.app.resource_location + '/skewY.png'),
'%s\t%s' % (_("Skew on Y axis"), _('Shift+Y')))
# Separator
self.menuoptions.addSeparator()
self.menuoptions_transform_flipx = self.menuoptions.addAction(
QtGui.QIcon(self.app.resource_location + '/flipx.png'),
'%s\t%s' % (_("Flip on X axis"), _('X')))
self.menuoptions_transform_flipy = self.menuoptions.addAction(
QtGui.QIcon(self.app.resource_location + '/flipy.png'),
'%s\t%s' % (_("Flip on Y axis"), _('Y')))
# Separator
self.menuoptions.addSeparator()
self.menuoptions_view_source = self.menuoptions.addAction(
QtGui.QIcon(self.app.resource_location + '/source32.png'),
'%s\t%s' % (_("View source"), _('Alt+S')))
self.menuoptions_tools_db = self.menuoptions.addAction(
QtGui.QIcon(self.app.resource_location + '/database32.png'),
'%s\t%s' % (_("Tools Database"), _('Ctrl+D')))
# Separator
self.menuoptions.addSeparator()
# ########################################################################
# ########################## View # ######################################
# ########################################################################
self.menuview = self.menu.addMenu(_('View'))
self.menuviewenable = self.menuview.addAction(
QtGui.QIcon(self.app.resource_location + '/replot16.png'),
'%s\t%s' % (_('Enable all'), _('Alt+1')))
self.menuviewdisableall = self.menuview.addAction(
QtGui.QIcon(self.app.resource_location + '/clear_plot16.png'),
'%s\t%s' % (_('Disable all'), _('Alt+2')))
self.menuviewenableother = self.menuview.addAction(
QtGui.QIcon(self.app.resource_location + '/replot16.png'),
'%s\t%s' % (_('Enable non-selected'), _('Alt+3')))
self.menuviewdisableother = self.menuview.addAction(
QtGui.QIcon(self.app.resource_location + '/clear_plot16.png'),
'%s\t%s' % (_('Disable non-selected'), _('Alt+4')))
# Separator
self.menuview.addSeparator()
self.menuview_zoom_fit = self.menuview.addAction(
QtGui.QIcon(self.app.resource_location + '/zoom_fit32.png'),
'%s\t%s' % (_("Zoom Fit"), _('V')))
self.menuview_zoom_in = self.menuview.addAction(
QtGui.QIcon(self.app.resource_location + '/zoom_in32.png'),
'%s\t%s' % (_("Zoom In"), _('=')))
self.menuview_zoom_out = self.menuview.addAction(
QtGui.QIcon(self.app.resource_location + '/zoom_out32.png'),
'%s\t%s' % (_("Zoom Out"), _('-')))
self.menuview.addSeparator()
# Replot all
self.menuview_replot = self.menuview.addAction(
QtGui.QIcon(self.app.resource_location + '/replot32.png'),
'%s\t%s' % (_("Redraw All"), _('F5')))
self.menuview.addSeparator()
self.menuview_toggle_code_editor = self.menuview.addAction(
QtGui.QIcon(self.app.resource_location + '/code_editor32.png'),
'%s\t%s' % (_('Toggle Code Editor'), _('Shift+E')))
self.menuview.addSeparator()
self.menuview_toggle_fscreen = self.menuview.addAction(
QtGui.QIcon(self.app.resource_location + '/fscreen32.png'),
'%s\t%s' % (_("Toggle FullScreen"), _('Alt+F10')))
self.menuview_toggle_parea = self.menuview.addAction(
QtGui.QIcon(self.app.resource_location + '/plot32.png'),
'%s\t%s' % (_("Toggle Plot Area"), _('Ctrl+F10')))
self.menuview_toggle_notebook = self.menuview.addAction(
QtGui.QIcon(self.app.resource_location + '/notebook32.png'),
'%s\t%s' % (_("Toggle Project/Properties/Tool"), _('`')))
self.menuview.addSeparator()
self.menuview_toggle_grid = self.menuview.addAction(
QtGui.QIcon(self.app.resource_location + '/grid32.png'),
'%s\t%s' % (_("Toggle Grid Snap"), _('G')))
self.menuview_toggle_grid_lines = self.menuview.addAction(
QtGui.QIcon(self.app.resource_location + '/grid_lines32.png'),
'%s\t%s' % (_("Toggle Grid Lines"), _('Shift+G')))
self.menuview_toggle_axis = self.menuview.addAction(
QtGui.QIcon(self.app.resource_location + '/axis32.png'),
'%s\t%s' % (_("Toggle Axis"), _('Shift+A')))
self.menuview_toggle_workspace = self.menuview.addAction(
QtGui.QIcon(self.app.resource_location + '/workspace24.png'),
'%s\t%s' % (_("Toggle Workspace"), _('Shift+W')))
self.menuview_toggle_hud = self.menuview.addAction(
QtGui.QIcon(self.app.resource_location + '/hud_32.png'),
'%s\t%s' % (_("Toggle HUD"), _('Shift+H')))
# ########################################################################
# ########################## Objects # ###################################
# ########################################################################
self.menuobjects = self.menu.addMenu(_('Objects'))
self.menuobjects.addSeparator()
self.menuobjects_selall = self.menuobjects.addAction(
QtGui.QIcon(self.app.resource_location + '/select_all.png'),
'%s\t%s' % (_('Select All'), ''))
self.menuobjects_unselall = self.menuobjects.addAction(
QtGui.QIcon(self.app.resource_location + '/deselect_all32.png'),
'%s\t%s' % (_('Deselect All'), ''))
# ########################################################################
# ########################## Tool # ######################################
# ########################################################################
self.menutool = QtWidgets.QMenu(_('Tool'))
self.menutoolaction = self.menu.addMenu(self.menutool)
self.menutoolshell = self.menutool.addAction(
QtGui.QIcon(self.app.resource_location + '/shell16.png'),
'%s\t%s' % (_('Command Line'), _('S')))
# ########################################################################
# ########################## Help # ######################################
# ########################################################################
self.menuhelp = self.menu.addMenu(_('Help'))
self.menuhelp_manual = self.menuhelp.addAction(
QtGui.QIcon(self.app.resource_location + '/globe16.png'),
'%s\t%s' % (_('Online Help'), _('F1')))
self.menuhelp_bookmarks = self.menuhelp.addMenu(
QtGui.QIcon(self.app.resource_location + '/bookmarks16.png'), _('Bookmarks'))
self.menuhelp_bookmarks.addSeparator()
self.menuhelp_bookmarks_manager = self.menuhelp_bookmarks.addAction(
QtGui.QIcon(self.app.resource_location + '/bookmarks16.png'),
'%s\t%s' % (_('Bookmarks Manager'), ''))
self.menuhelp.addSeparator()
self.menuhelp_report_bug = self.menuhelp.addAction(
QtGui.QIcon(self.app.resource_location + '/bug16.png'),
'%s\t%s' % (_('Report a bug'), ''))
self.menuhelp.addSeparator()
self.menuhelp_exc_spec = self.menuhelp.addAction(
QtGui.QIcon(self.app.resource_location + '/pdf_link16.png'),
'%s\t%s' % (_('Excellon Specification'), ''))
self.menuhelp_gerber_spec = self.menuhelp.addAction(
QtGui.QIcon(self.app.resource_location + '/pdf_link16.png'),
'%s\t%s' % (_('Gerber Specification'), ''))
self.menuhelp.addSeparator()
self.menuhelp_shortcut_list = self.menuhelp.addAction(
QtGui.QIcon(self.app.resource_location + '/shortcuts24.png'),
'%s\t%s' % (_('Shortcuts List'), _('F3')))
self.menuhelp_videohelp = self.menuhelp.addAction(
QtGui.QIcon(self.app.resource_location + '/youtube32.png'),
'%s\t%s' % (_('YouTube Channel'), _('F4')))
self.menuhelp.addSeparator()
self.menuhelp_readme = self.menuhelp.addAction(
QtGui.QIcon(self.app.resource_location + '/warning.png'),
'%s\t%s' % (_("How To"), ''))
self.menuhelp_about = self.menuhelp.addAction(
QtGui.QIcon(self.app.resource_location + '/about32.png'),
'%s\t%s' % (_('About'), ''))
# ########################################################################
# ########################## GEOMETRY EDITOR # ###########################
# ########################################################################
self.geo_editor_menu = QtWidgets.QMenu('>%s<' % _('Geo Editor'))
self.menu.addMenu(self.geo_editor_menu)
self.geo_add_circle_menuitem = self.geo_editor_menu.addAction(
QtGui.QIcon(self.app.resource_location + '/circle32.png'),
'%s\t%s' % (_('Add Circle'), _('O'))
)
self.geo_add_arc_menuitem = self.geo_editor_menu.addAction(
QtGui.QIcon(self.app.resource_location + '/arc16.png'),
'%s\t%s' % (_('Add Arc'), _('A')))
self.geo_editor_menu.addSeparator()
self.geo_add_rectangle_menuitem = self.geo_editor_menu.addAction(
QtGui.QIcon(self.app.resource_location + '/rectangle32.png'),
'%s\t%s' % (_('Add Rectangle'), _('R'))
)
self.geo_add_polygon_menuitem = self.geo_editor_menu.addAction(
QtGui.QIcon(self.app.resource_location + '/polygon32.png'),
'%s\t%s' % (_('Add Polygon'), _('N'))
)
self.geo_add_path_menuitem = self.geo_editor_menu.addAction(
QtGui.QIcon(self.app.resource_location + '/path32.png'),
'%s\t%s' % (_('Add Path'), _('P')))
self.geo_editor_menu.addSeparator()
self.geo_add_text_menuitem = self.geo_editor_menu.addAction(
QtGui.QIcon(self.app.resource_location + '/text32.png'),
'%s\t%s' % (_('Add Text'), _('T')))
self.geo_editor_menu.addSeparator()
self.geo_union_menuitem = self.geo_editor_menu.addAction(
QtGui.QIcon(self.app.resource_location + '/union16.png'),
'%s\t%s' % (_('Polygon Union'), _('U')))
self.geo_intersection_menuitem = self.geo_editor_menu.addAction(
QtGui.QIcon(self.app.resource_location + '/intersection16.png'),
'%s\t%s' % (_('Polygon Intersection'), _('E')))
self.geo_subtract_menuitem = self.geo_editor_menu.addAction(
QtGui.QIcon(self.app.resource_location + '/subtract16.png'),
'%s\t%s' % (_('Polygon Subtraction'), _('S'))
)
self.geo_editor_menu.addSeparator()
self.geo_cutpath_menuitem = self.geo_editor_menu.addAction(
QtGui.QIcon(self.app.resource_location + '/cutpath16.png'),
'%s\t%s' % (_('Cut Path'), _('X')))
# self.move_menuitem = self.menu.addAction(
# QtGui.QIcon(self.app.resource_location + '/move16.png'), "Move Objects 'm'")
self.geo_copy_menuitem = self.geo_editor_menu.addAction(
QtGui.QIcon(self.app.resource_location + '/copy16.png'),
'%s\t%s' % (_("Copy Geom"), _('C')))
self.geo_delete_menuitem = self.geo_editor_menu.addAction(
QtGui.QIcon(self.app.resource_location + '/deleteshape16.png'),
'%s\t%s' % (_("Delete Shape"), _('DEL'))
)
self.geo_editor_menu.addSeparator()
self.geo_move_menuitem = self.geo_editor_menu.addAction(
QtGui.QIcon(self.app.resource_location + '/move32.png'),
'%s\t%s' % (_("Move"), _('M')))
self.geo_buffer_menuitem = self.geo_editor_menu.addAction(
QtGui.QIcon(self.app.resource_location + '/buffer16.png'),
'%s\t%s' % (_("Buffer Tool"), _('B'))
)
self.geo_paint_menuitem = self.geo_editor_menu.addAction(
QtGui.QIcon(self.app.resource_location + '/paint16.png'),
'%s\t%s' % (_("Paint Tool"), _('I'))
)
self.geo_transform_menuitem = self.geo_editor_menu.addAction(
QtGui.QIcon(self.app.resource_location + '/transform.png'),
'%s\t%s' % (_("Transform Tool"), _('Alt+R'))
)
self.geo_editor_menu.addSeparator()
self.geo_cornersnap_menuitem = self.geo_editor_menu.addAction(
QtGui.QIcon(self.app.resource_location + '/corner32.png'),
'%s\t%s' % (_("Toggle Corner Snap"), _('K'))
)
# ########################################################################
# ########################## EXCELLON Editor # ###########################
# ########################################################################
self.exc_editor_menu = QtWidgets.QMenu('>%s<' % _('Excellon Editor'))
self.menu.addMenu(self.exc_editor_menu)
self.exc_add_array_drill_menuitem = self.exc_editor_menu.addAction(
QtGui.QIcon(self.app.resource_location + '/rectangle32.png'),
'%s\t%s' % (_('Add Drill Array'), _('A')))
self.exc_add_drill_menuitem = self.exc_editor_menu.addAction(
QtGui.QIcon(self.app.resource_location + '/plus16.png'),
'%s\t%s' % (_('Add Drill'), _('D')))
self.exc_editor_menu.addSeparator()
self.exc_add_array_slot_menuitem = self.exc_editor_menu.addAction(
QtGui.QIcon(self.app.resource_location + '/slot_array26.png'),
'%s\t%s' % (_('Add Slot Array'), _('Q')))
self.exc_add_slot_menuitem = self.exc_editor_menu.addAction(
QtGui.QIcon(self.app.resource_location + '/slot26.png'),
'%s\t%s' % (_('Add Slot'), _('W')))
self.exc_editor_menu.addSeparator()
self.exc_resize_drill_menuitem = self.exc_editor_menu.addAction(
QtGui.QIcon(self.app.resource_location + '/resize16.png'),
'%s\t%s' % (_('Resize Drill(S)'), _('R'))
)
self.exc_copy_drill_menuitem = self.exc_editor_menu.addAction(
QtGui.QIcon(self.app.resource_location + '/copy32.png'),
'%s\t%s' % (_('Copy'), _('C')))
self.exc_delete_drill_menuitem = self.exc_editor_menu.addAction(
QtGui.QIcon(self.app.resource_location + '/deleteshape32.png'),
'%s\t%s' % (_('Delete'), _('DEL'))
)
self.exc_editor_menu.addSeparator()
self.exc_move_drill_menuitem = self.exc_editor_menu.addAction(
QtGui.QIcon(self.app.resource_location + '/move32.png'),
'%s\t%s' % (_('Move Drill'), _('M')))
# ########################################################################
# ########################## GERBER Editor # #############################
# ########################################################################
self.grb_editor_menu = QtWidgets.QMenu('>%s<' % _('Gerber Editor'))
self.menu.addMenu(self.grb_editor_menu)
self.grb_add_pad_menuitem = self.grb_editor_menu.addAction(
QtGui.QIcon(self.app.resource_location + '/aperture16.png'),
'%s\t%s' % (_('Add Pad'), _('P')))
self.grb_add_pad_array_menuitem = self.grb_editor_menu.addAction(
QtGui.QIcon(self.app.resource_location + '/padarray32.png'),
'%s\t%s' % (_('Add Pad Array'), _('A')))
self.grb_add_track_menuitem = self.grb_editor_menu.addAction(
QtGui.QIcon(self.app.resource_location + '/track32.png'),
'%s\t%s' % (_('Add Track'), _('T')))
self.grb_add_region_menuitem = self.grb_editor_menu.addAction(
QtGui.QIcon(self.app.resource_location + '/rectangle32.png'),
'%s\t%s' % (_('Add Region'), _('N')))
self.grb_editor_menu.addSeparator()
self.grb_convert_poly_menuitem = self.grb_editor_menu.addAction(
QtGui.QIcon(self.app.resource_location + '/poligonize32.png'),
'%s\t%s' % (_("Poligonize"), _('Alt+N')))
self.grb_add_semidisc_menuitem = self.grb_editor_menu.addAction(
QtGui.QIcon(self.app.resource_location + '/semidisc32.png'),
'%s\t%s' % (_("Add SemiDisc"), _('E')))
self.grb_add_disc_menuitem = self.grb_editor_menu.addAction(
QtGui.QIcon(self.app.resource_location + '/disc32.png'),
'%s\t%s' % (_("Add Disc"), _('D')))
self.grb_add_buffer_menuitem = self.grb_editor_menu.addAction(
QtGui.QIcon(self.app.resource_location + '/buffer16-2.png'),
'%s\t%s' % (_('Buffer'), _('B')))
self.grb_add_scale_menuitem = self.grb_editor_menu.addAction(
QtGui.QIcon(self.app.resource_location + '/scale32.png'),
'%s\t%s' % (_('Scale'), _('S')))
self.grb_add_markarea_menuitem = self.grb_editor_menu.addAction(
QtGui.QIcon(self.app.resource_location + '/markarea32.png'),
'%s\t%s' % (_('Mark Area'), _('Alt+A')))
self.grb_add_eraser_menuitem = self.grb_editor_menu.addAction(
QtGui.QIcon(self.app.resource_location + '/eraser26.png'),
'%s\t%s' % (_('Eraser'), _('Ctrl+E')))
self.grb_transform_menuitem = self.grb_editor_menu.addAction(
QtGui.QIcon(self.app.resource_location + '/transform.png'),
'%s\t%s' % (_("Transform"), _('Alt+R')))
self.grb_editor_menu.addSeparator()
self.grb_copy_menuitem = self.grb_editor_menu.addAction(
QtGui.QIcon(self.app.resource_location + '/copy32.png'),
'%s\t%s' % (_('Copy'), _('C')))
self.grb_delete_menuitem = self.grb_editor_menu.addAction(
QtGui.QIcon(self.app.resource_location + '/deleteshape32.png'),
'%s\t%s' % (_('Delete'), _('DEL')))
self.grb_editor_menu.addSeparator()
self.grb_move_menuitem = self.grb_editor_menu.addAction(
QtGui.QIcon(self.app.resource_location + '/move32.png'),
'%s\t%s' % (_('Move'), _('M')))
self.grb_editor_menu.menuAction().setVisible(False)
self.grb_editor_menu.setDisabled(True)
self.geo_editor_menu.menuAction().setVisible(False)
self.geo_editor_menu.setDisabled(True)
self.exc_editor_menu.menuAction().setVisible(False)
self.exc_editor_menu.setDisabled(True)
# ########################################################################
# ########################## Project Tab Context Menu # ##################
# ########################################################################
self.menuproject = QtWidgets.QMenu()
self.menuprojectenable = self.menuproject.addAction(
QtGui.QIcon(self.app.resource_location + '/replot32.png'), _('Enable Plot'))
self.menuprojectdisable = self.menuproject.addAction(
QtGui.QIcon(self.app.resource_location + '/clear_plot32.png'), _('Disable Plot'))
self.menuproject.addSeparator()
self.menuprojectcolor = self.menuproject.addMenu(
QtGui.QIcon(self.app.resource_location + '/set_color32.png'), _('Set Color'))
self.menuproject_red = self.menuprojectcolor.addAction(
QtGui.QIcon(self.app.resource_location + '/red32.png'), _('Red'))
self.menuproject_blue = self.menuprojectcolor.addAction(
QtGui.QIcon(self.app.resource_location + '/blue32.png'), _('Blue'))
self.menuproject_yellow = self.menuprojectcolor.addAction(
QtGui.QIcon(self.app.resource_location + '/yellow32.png'), _('Yellow'))
self.menuproject_green = self.menuprojectcolor.addAction(
QtGui.QIcon(self.app.resource_location + '/green32.png'), _('Green'))
self.menuproject_purple = self.menuprojectcolor.addAction(
QtGui.QIcon(self.app.resource_location + '/violet32.png'), _('Purple'))
self.menuproject_brown = self.menuprojectcolor.addAction(
QtGui.QIcon(self.app.resource_location + '/brown32.png'), _('Brown'))
self.menuproject_brown = self.menuprojectcolor.addAction(
QtGui.QIcon(self.app.resource_location + '/white32.png'), _('White'))
self.menuproject_brown = self.menuprojectcolor.addAction(
QtGui.QIcon(self.app.resource_location + '/black32.png'), _('Black'))
self.menuprojectcolor.addSeparator()
self.menuproject_custom = self.menuprojectcolor.addAction(
QtGui.QIcon(self.app.resource_location + '/set_color32.png'), _('Custom'))
self.menuprojectcolor.addSeparator()
self.menuproject_custom = self.menuprojectcolor.addAction(
QtGui.QIcon(self.app.resource_location + '/set_color32.png'), _('Opacity'))
self.menuproject_custom = self.menuprojectcolor.addAction(
QtGui.QIcon(self.app.resource_location + '/set_color32.png'), _('Default'))
self.menuproject.addSeparator()
self.menuprojectgeneratecnc = self.menuproject.addAction(
QtGui.QIcon(self.app.resource_location + '/cnc32.png'), _('Create CNCJob'))
self.menuprojectviewsource = self.menuproject.addAction(
QtGui.QIcon(self.app.resource_location + '/source32.png'), _('View Source'))
self.menuprojectedit = self.menuproject.addAction(
QtGui.QIcon(self.app.resource_location + '/edit_ok32.png'), _('Edit'))
self.menuprojectcopy = self.menuproject.addAction(
QtGui.QIcon(self.app.resource_location + '/copy32.png'), _('Copy'))
self.menuprojectdelete = self.menuproject.addAction(
QtGui.QIcon(self.app.resource_location + '/delete32.png'), _('Delete'))
self.menuprojectsave = self.menuproject.addAction(
QtGui.QIcon(self.app.resource_location + '/save_as.png'), _('Save'))
self.menuproject.addSeparator()
self.menuprojectproperties = self.menuproject.addAction(
QtGui.QIcon(self.app.resource_location + '/properties32.png'), _('Properties'))
# ########################################################################
# ####################### Central Widget -> Splitter # ##################
# ########################################################################
# IMPORTANT #
# The order: SPLITTER -> NOTEBOOK -> SNAP TOOLBAR is important and without it the GUI will not be initialized as
# desired.
self.splitter = QtWidgets.QSplitter()
self.setCentralWidget(self.splitter)
# self.notebook = QtWidgets.QTabWidget()
self.notebook = FCDetachableTab(protect=True, parent=self)
self.notebook.setTabsClosable(False)
self.notebook.useOldIndex(True)
self.splitter.addWidget(self.notebook)
self.splitter_left = QtWidgets.QSplitter(Qt.Vertical)
self.splitter.addWidget(self.splitter_left)
self.splitter_left.addWidget(self.notebook)
self.splitter_left.setHandleWidth(0)
# ########################################################################
# | |
<reponame>Engine-B/CDM
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
from typing import cast, TYPE_CHECKING
from cdm.enums import CdmAttributeContextType
from .applier_state import ApplierState
from .attribute_resolution_applier import AttributeResolutionApplier
from .attribute_resolution_directive_set import AttributeResolutionDirectiveSet
from .attribute_context_parameters import AttributeContextParameters
if TYPE_CHECKING:
from cdm.objectmodel import CdmAttributeContext, CdmAttributeDefinition, CdmTraitDefinition, CdmTraitReference
from cdm.objectmodel.cdm_attribute_resolution_guidance_def import CdmAttributeResolutionGuidanceDefinition
from . import ApplierContext, ResolveOptions
def _create_child_under(res_opt: 'ResolveOptions', acp: 'AttributeContextParameters') -> 'CdmAttributeContext':
from cdm.objectmodel.cdm_attribute_context import CdmAttributeContext
return CdmAttributeContext._create_child_under(res_opt, acp)
def _is_removed_builder():
def will_remove(on_step: 'ApplierContext') -> bool: # pylint: disable=unused-argument
return True
_is_removed = AttributeResolutionApplier()
_is_removed._match_name = 'is.removed'
_is_removed._priority = 10
_is_removed._overrides_base = False
_is_removed._will_remove = will_remove
return _is_removed
_is_removed = _is_removed_builder()
def _does_reference_entity_builder():
def will_remove(app_ctx: 'ApplierContext') -> bool: # pylint: disable=unused-argument
# Return always false for the time being.
# visible = True
# if app_ctx.res_att_source:
# # All others go away.
# visible = False
# if app_ctx.res_att_source.target == app_ctx.res_guide.entity_by_reference.foreign_key_attribute:
# visible = True
return False
def will_round_add(app_ctx: 'ApplierContext') -> bool: # pylint: disable=unused-argument
return True
def do_round_add(app_ctx: 'ApplierContext') -> None:
# Get the added attribute and applied trait.
sub = cast('CdmAttributeDefinition', app_ctx.res_guide.entity_by_reference.foreign_key_attribute)
app_ctx.res_att_new.target = sub
# Use the default name.
app_ctx.res_att_new.resolved_name = sub.name
# Add the trait that tells them what this means.
if not sub.applied_traits or not next(filter(lambda atr: atr.fetch_object_definition_name() == 'is.linkedEntity.identifier', sub.applied_traits), False):
sub.applied_traits.append('is.linkedEntity.identifier', True)
# Get the resolved traits from attribute.
app_ctx.res_att_new.resolved_traits = sub._fetch_resolved_traits(app_ctx.res_opt)
app_ctx.res_guide_new = sub.resolution_guidance
if app_ctx.res_att_new.resolved_traits:
app_ctx.res_att_new.resolved_traits = app_ctx.res_att_new.resolved_traits.deep_copy()
def will_create_context(app_ctx: 'ApplierContext') -> bool: # pylint: disable=unused-argument
return True
def do_create_context(app_ctx: 'ApplierContext') -> None:
# Make a new attribute context to differentiate this supporting att.
acp = AttributeContextParameters()
acp._under = app_ctx.att_ctx
acp._type = CdmAttributeContextType.ADDED_ATTRIBUTE_IDENTITY
acp._name = '_foreignKey'
app_ctx.att_ctx = _create_child_under(app_ctx.res_opt, acp)
_does_reference_entity = AttributeResolutionApplier()
_does_reference_entity._match_name = 'does.referenceEntity'
_does_reference_entity._priority = 4
_does_reference_entity._overrides_base = True
_does_reference_entity._will_remove = will_remove
_does_reference_entity._will_round_add = will_round_add
_does_reference_entity._do_round_add = do_round_add
_does_reference_entity._will_create_context = will_create_context
_does_reference_entity._do_create_context = do_create_context
return _does_reference_entity
_does_reference_entity = _does_reference_entity_builder()
def _does_add_supporting_attribute_builder():
def will_attribute_add(app_ctx: 'ApplierContext') -> bool: # pylint: disable=unused-argument
return True
def do_attribute_add(app_ctx: 'ApplierContext') -> None:
# Get the added attribute and applied trait.
sub = cast('CdmAttributeDefinition', app_ctx.res_guide.add_supporting_attribute)
sub = cast('CdmAttributeDefinition', sub.copy(app_ctx.res_opt))
# Use the default name.
app_ctx.res_att_new.resolved_name = sub.name
# Add a supporting trait to this attribute.
sup_trait_ref = cast('CdmTraitReference', sub.applied_traits.append('is.addedInSupportOf', False))
sup_trait_def = cast('CdmTraitDefinition', sup_trait_ref.fetch_object_definition(app_ctx.res_opt))
# Get the resolved traits from attribute.
app_ctx.res_att_new.resolved_traits = sub._fetch_resolved_traits(app_ctx.res_opt)
# Assumes some things, like the argument name. Probably a dumb design should just take the name and assume the
# trait too. That simplifies the source docs.
supporting = '(unspecified)'
if app_ctx.res_att_source:
supporting = app_ctx.res_att_source.resolved_name
app_ctx.res_att_new.resolved_traits = app_ctx.res_att_new.resolved_traits.set_trait_parameter_value(
app_ctx.res_opt, sup_trait_def, 'inSupportOf', supporting)
app_ctx.res_att_new.target = sub
app_ctx.res_guide_new = sub.resolution_guidance
def will_create_context(app_ctx: 'ApplierContext') -> bool: # pylint: disable=unused-argument
return True
def do_create_context(app_ctx: 'ApplierContext') -> None:
# Make a new attribute context to differentiate this supporting att.
acp = AttributeContextParameters()
acp._under = app_ctx.att_ctx
acp._type = CdmAttributeContextType.ADDED_ATTRIBUTE_SUPPORTING
acp._name = 'supporting_' + app_ctx.res_att_source.resolved_name
acp._regarding = cast('CdmAttributeDefinition', app_ctx.res_att_source.target)
app_ctx.att_ctx = _create_child_under(app_ctx.res_opt, acp)
_does_add_supporting_attribute = AttributeResolutionApplier()
_does_add_supporting_attribute._match_name = 'does.addSupportingAttribute'
_does_add_supporting_attribute._priority = 8
_does_add_supporting_attribute._overrides_base = True
_does_add_supporting_attribute._will_attribute_add = will_attribute_add
_does_add_supporting_attribute._do_attribute_add = do_attribute_add
_does_add_supporting_attribute._will_create_context = will_create_context
_does_add_supporting_attribute._do_create_context = do_create_context
return _does_add_supporting_attribute
_does_add_supporting_attribute = _does_add_supporting_attribute_builder()
def _does_impose_directives_builder():
def will_alter_directives(res_opt: 'ResolveOptions', res_guide: 'CdmAttributeResolutionGuidanceDefinition') -> bool: # pylint: disable=unused-argument
return True
def do_alter_directives(res_opt: 'ResolveOptions', res_guide: 'CdmAttributeResolutionGuidanceDefinition') -> None: # pylint: disable=unused-argument
all_added = res_guide.imposed_directives
if all_added and res_opt.directives:
res_opt.directives = res_opt.directives.copy()
for d in all_added:
res_opt.directives.add(d)
_does_impose_directives = AttributeResolutionApplier()
_does_impose_directives._match_name = 'does.imposeDirectives'
_does_impose_directives._priority = 1
_does_impose_directives._overrides_base = True
_does_impose_directives._will_alter_directives = will_alter_directives
_does_impose_directives._do_alter_directives = do_alter_directives
return _does_impose_directives
_does_impose_directives = _does_impose_directives_builder()
def _does_remove_directives_builder():
def will_alter_directives(res_opt: 'ResolveOptions', res_guide: 'CdmAttributeResolutionGuidanceDefinition') -> bool: # pylint: disable=unused-argument
return True
def do_alter_directives(res_opt: 'ResolveOptions', res_guide: 'CdmAttributeResolutionGuidanceDefinition') -> None: # pylint: disable=unused-argument
all_removed = res_guide.removed_directives
if all_removed and res_opt.directives:
res_opt.directives = res_opt.directives.copy()
for d in all_removed:
res_opt.directives.delete(d)
_does_remove_directives = AttributeResolutionApplier()
_does_remove_directives._match_name = 'does.removeDirectives'
_does_remove_directives._priority = 2
_does_remove_directives._overrides_base = True
_does_remove_directives._will_alter_directives = will_alter_directives
_does_remove_directives._do_alter_directives = do_alter_directives
return _does_remove_directives
_does_remove_directives = _does_remove_directives_builder()
def _does_select_attributes_builder():
def will_alter_directives(res_opt: 'ResolveOptions', res_guide: 'CdmAttributeResolutionGuidanceDefinition') -> bool: # pylint: disable=unused-argument
return res_guide.selects_sub_attribute.selects == 'one'
def do_alter_directives(res_opt: 'ResolveOptions', res_guide: 'CdmAttributeResolutionGuidanceDefinition') -> None: # pylint: disable=unused-argument
res_opt.directives = res_opt.directives.copy() if res_opt.directives else AttributeResolutionDirectiveSet()
res_opt.directives.add('selectOne')
def will_round_add(app_ctx: 'ApplierContext') -> bool:
dirs = app_ctx.res_opt.directives
selects_one = dirs is not None and dirs.has('selectOne')
structured = dirs is not None and dirs.has('structured')
# When one class is being pulled from a list of them, add the class attribute unless this is a structured
# output (assumes they know the class).
return selects_one and not structured
def do_round_add(app_ctx: 'ApplierContext') -> None:
# Get the added attribute and applied trait.
sub = cast('CdmAttributeDefinition', app_ctx.res_guide.selects_sub_attribute.selected_type_attribute)
app_ctx.res_att_new.target = sub
app_ctx.res_att_new.applier_state._flex_remove = False
# Use the default name.
app_ctx.res_att_new.resolved_name = sub.name
# Add the trait that tells them what this means.
if not sub.applied_traits or not next(filter(lambda atr: atr.fetch_object_definition_name() == 'is.linkedEntity.name', sub.applied_traits), False):
sub.applied_traits.append('is.linkedEntity.name', True)
# Get the resolved traits from attribute.
app_ctx.res_att_new.resolved_traits = sub._fetch_resolved_traits(app_ctx.res_opt)
app_ctx.res_guide_new = sub.resolution_guidance
# make this code create a context for any copy of this attribute that gets repeated in an array
app_ctx.res_att_new.applier_state._array_specialized_context = _does_select_attributes._do_create_context
def will_create_context(app_ctx: 'ApplierContext') -> bool:
dirs = app_ctx.res_opt.directives
selects_one = dirs is not None and dirs.has('selectOne')
structured = dirs is not None and dirs.has('structured')
return selects_one and not structured
def do_create_context(app_ctx: 'ApplierContext') -> None:
# Make a new attributeContext to differentiate this supporting att.
acp = AttributeContextParameters()
acp._under = app_ctx.att_ctx
acp._type = CdmAttributeContextType.ADDED_ATTRIBUTE_SELECTED_TYPE
acp._name = '_selectedEntityName'
app_ctx.att_ctx = _create_child_under(app_ctx.res_opt, acp)
_does_select_attributes = AttributeResolutionApplier()
_does_select_attributes._match_name = 'does.selectAttributes'
_does_select_attributes._priority = 4
_does_select_attributes._overrides_base = False
_does_select_attributes._will_alter_directives = will_alter_directives
_does_select_attributes._do_alter_directives = do_alter_directives
_does_select_attributes._will_round_add = will_round_add
_does_select_attributes._do_round_add = do_round_add
_does_select_attributes._will_create_context = will_create_context
_does_select_attributes._do_create_context = do_create_context
return _does_select_attributes
_does_select_attributes = _does_select_attributes_builder()
def _does_disambiguate_names_builder():
def will_attribute_modify(app_ctx: 'ApplierContext') -> bool:
return app_ctx.res_att_source is not None and not app_ctx.res_opt.directives.has('structured')
def do_attribute_modify(app_ctx: 'ApplierContext') -> None:
if app_ctx.res_att_source:
ren_format = app_ctx.res_guide.rename_format
state = app_ctx.res_att_source.applier_state
ordinal = str(state._flex_current_ordinal) if state and state._flex_current_ordinal is not None else ''
format_length = len(ren_format)
if not ren_format or not format_length:
return
# Parse the format looking for positions of {a} and {o} and text chunks around them there are only 5 possibilies.
idx_a = ren_format.find('{a}')
upper = False
if idx_a < 0:
idx_a = ren_format.find('{A}')
upper = True
idx_o = ren_format.find('{o}')
def replace(start: int, at: int, length: int, value: str) -> str:
new_value = value[0].upper() + value[1:] if upper and value else value
return ren_format[start:at] + new_value + ren_format[at+3:length]
src_name = app_ctx.res_att_source.previous_resolved_name
if idx_a < 0 and idx_o < 0:
result = ren_format
elif idx_a < 0:
result = replace(0, idx_o, format_length, ordinal)
elif idx_o < 0:
result = replace(0, idx_a, format_length, src_name)
elif idx_a < idx_o:
result = replace(0, idx_a, idx_o, src_name) + replace(idx_o, idx_o, format_length, ordinal)
else:
result = replace(0, idx_o, idx_a, ordinal) + replace(idx_a, idx_a, format_length, src_name)
app_ctx.res_att_source.resolved_name = result
_does_disambiguate_names = AttributeResolutionApplier()
_does_disambiguate_names._match_name = 'does.disambiguateNames'
_does_disambiguate_names._priority = 9
_does_disambiguate_names._overrides_base = True
_does_disambiguate_names._will_attribute_modify = will_attribute_modify
_does_disambiguate_names._do_attribute_modify = do_attribute_modify
return _does_disambiguate_names
_does_disambiguate_names = _does_disambiguate_names_builder()
def _does_reference_entity_via_builder():
def will_remove(app_ctx: 'ApplierContext') -> bool:
dirs = app_ctx.res_opt.directives
is_norm = dirs is not None and dirs.has('normalized')
is_array = dirs is not None and dirs.has('isArray')
is_ref_only = dirs is not None and dirs.has('referenceOnly')
always_add = app_ctx.res_guide.entity_by_reference.always_include_foreign_key
do_fk = (always_add or is_ref_only) and (not is_norm or not is_array)
visible = True
if do_fk and app_ctx.res_att_source:
# If in reference only mode, then remove everything that isn't marked to retain.
visible = always_add or (app_ctx.res_att_source.applier_state and not app_ctx.res_att_source.applier_state._flex_remove)
return not visible
def will_round_add(app_ctx: 'ApplierContext') -> bool:
dirs = app_ctx.res_opt.directives
is_norm = dirs is not None and dirs.has('normalized')
is_array = dirs is not None and dirs.has('isArray')
is_ref_only = dirs is not None and dirs.has('referenceOnly')
always_add = app_ctx.res_guide.entity_by_reference.always_include_foreign_key is True
# Add a foreign key and remove everything else when asked to do so. However, avoid doing this for normalized
# arrays, since they remove all atts anyway.
return (is_ref_only or always_add) and (not is_norm or not is_array)
def do_round_add(app_ctx: 'ApplierContext') -> None:
# Get the added attribute and applied trait.
sub = cast('CdmAttributeDefinition', app_ctx.res_guide.entity_by_reference.foreign_key_attribute)
app_ctx.res_att_new.target = sub
app_ctx.res_att_new.applier_state._flex_remove = False
# Use the default name.
app_ctx.res_att_new.resolved_name = sub.name
# Add the trait that tells them | |
if not mean:
avg = np.nanmedian(data)
else:
avg = np.nanmean(data)
# Generate boostrap resamples, and take averages of them
samples = np.array([np.random.choice(data,size=len(data),replace=True) for i in range(n_samples)])
if not mean:
samples_avgs = np.nanmedian(samples, axis=1)
else:
samples_avgs = np.nanmean(samples, axis=1)
# Find uncertainty on sample averages
if percentile == False:
avg_unc = np.std(samples_avgs)
else:
avg_unc = np.nanpercentile(np.abs(samples_avgs-avg), percentile)
# Return result
return avg_unc
# Function to find the uncertainty on a median (or mean) of a dataset containing a selection of subsets by bootstrapping
# Args: 2D array of data in question [n_subsets X n_datapoints]; (int of number of boostrap resmaples to use; boolean of whether to use mean instead of median; boolean False or quantile to use if using percentile instead of standard deviation)
# Returns: Computed uncertainty on average
def SubsetAvgUncBootstrap(data, n_samples=100, mean=False, percentile=False):
# Take average
if not mean:
avg = np.nanmedian(data)
else:
avg = np.nanmean(data)
# Generate boostrap resamples, by drawing a random sample of the subsets
samples = np.nan * np.zeros([data.shape[0], data.shape[1], n_samples])
for b in range(n_samples):
indxs = np.random.choice(np.arange(data.shape[0]), size=data.shape[0], replace=True)
samples[:,:,b] = data[indxs,:]
# Take averages of resampled data
if not mean:
samples_avgs = np.nanmedian(samples, axis=(0,1))
else:
samples_avgs = np.nanmean(samples, axis=2)
# Find uncertainty on sample averages
if percentile == False:
avg_unc = np.std(samples_avgs)
else:
avg_unc = np.nanpercentile(np.abs(samples_avgs-avg), percentile)
# Return result
return avg_unc
# Function to trim an array to a given size
# Args: Array to be trimmed, i & j coordinates of centre of trimmed region, width of trimmed region
# Returns: Trimmed array
def Trim(data, i_centre, j_centre, width):
box_rad = int(round(float(width)/2.0))
i_cutout_min = max([0, i_centre-box_rad])
i_cutout_max = min([(data.shape)[0], i_centre+box_rad])
j_cutout_min = max([0, j_centre-box_rad])
j_cutout_max = min([(data.shape)[1], j_centre+box_rad])
trimmed = data[ int(round(i_cutout_min)):int(round(i_cutout_max)), int(round(j_cutout_min)):int(round(j_cutout_max)) ]
i_centre = (int(round(float(trimmed.shape[0])/2.0))+1) + ( (i_centre-box_rad) - (int(round(i_cutout_min))) )
j_centre = (int(round(float(trimmed.shape[1])/2.0))+1) + ( (j_centre-box_rad) - (int(round(j_cutout_min))) )
return trimmed, [i_centre, j_centre]
# Wrapper around Scipy grid interpolation function, to inpute NaN pixels in 2D data
# Args: Image to have NaN pixels imputed over
# Returns: Image with NaN pixels imputed
def ImputeImage(img_in):
bad_coords = np.where(np.isnan(img_in))
good_coords = np.where(np.isnan(img_in) == False)
good_values = img_in[good_coords]
bad_values_interp = scipy.interpolate.griddata(good_coords, good_values, bad_coords, method='cubic')
img_out = img_in.copy()
img_out[bad_coords] = bad_values_interp
return img_out
# Function that uses Driver & Robotham (2010) foruma to give percentage cosmic variance
# Args: Survey volume (in Mpc^3, assuming H0=70 km s^-1 Mpc^-1), number of survey fields, survey field aspect ratio
# Returns: Percentage cosmic variance
def CosmicVariance(v, n, x):
v, n, x = float(v), float(n), float(x)
first_term = 1.00 - ( 0.03 * np.sqrt(x-1.0) )
second_term = ( 219.7 - (52.4*np.log10(v)) + (3.21*(np.log10(v))**2.0) ) / n**0.5
cv = first_term * second_term
return cv
# Function to convert the bin-edge output of np.histogram to be bin-centred (and this of same dimensions as bin totals)
# Args: Array of bin edges
# Returns: Array of bin centres
def HistBinMerge(bin_edges):
bin_edges = np.array(bin_edges)
bin_centres = np.zeros([bin_edges.shape[0]-1])
for i in range(0, bin_edges.shape[0]-1):
bin_centres[i] = np.mean([bin_edges[i],bin_edges[i+1]])
return bin_centres
# Function to perform gaussian fit to data
# Args: Array of data to be fit
# Returns: Mean of fit, standard deviation of fit
def GaussFit(data, n_bins=50):
def QuickGauss(x, *p):
A, mu, sigma = p
return A * np.exp( (-((x-mu)**2.0)) / (2.0*sigma**2.0) )
hist_tots, hist_bins = np.histogram(data, bins=n_bins)
hist_bins = HistBinMerge(hist_bins)
hist_guesses = [np.max(hist_tots), np.mean(data), np.std(data)]
hist_fit = scipy.optimize.curve_fit(QuickGauss, hist_bins, hist_tots, p0=hist_guesses)
return abs(hist_fit[0][1]), abs(hist_fit[0][2])
# Function to convert an observed brightness into a luminosity
# Args: Flux (Jy), distance (pc) frequency or boolean for nuSnu luminosity (Hz or False), boolean to switch flux input to AB magnitudes
# Output; Luminosity in bolometric solar luminosities
def FluxToLum(flux, dist, freq=False, mags=False):
if mags==True:
flux = ABMagsToJy(flux)
watts_per_hz = 10.0**-26.0 * flux * 4.0 * np.pi * ( dist * 3.26 * 9.5E15 )**2.0
if freq==False:
watts = watts_per_hz
elif (freq!=False) and (freq>0):
watts = watts_per_hz * freq
Lsol = watts / 3.846E26
return Lsol
# Function to convert SDSS-III "nanomaggies" (nMgy) into pogson magnitudes
# Args: Value to be converted (nanomaggies)
# Returns: Pogson magnitudes (mags; duh)
def nMaggiesToMags(nMaggies):
mag = 22.5 - ( 2.5*np.log10(nMaggies) )
return mag
# Function to convert GAMA data units into AB pogson magnitudes
# Args: Value to be converted (data units)
# Returns: AB pogson magnitudes (mags; duh)
def GAMACountsToMags(GAMA):
mag = 30.0 - ( 2.5*np.log10(GAMA) )
return mag
# Function to convert from AB pogson magnitudes into GAMA data units
# Args: Value to be converted (mags)
# Returns: AB pogson magnitudes (data units)
def GAMAMagsToCounts(mag):
GAMA = 10.0**( (30.0-mag) / 2.5 )
return GAMA
# Function to convert from AB pogson magnitudes into flux in janskys
# Args: Value to be converted (mags)
# Returns: Source flux density (Jy)
def ABMagsToJy(mag):
Jy = 1E-6 * 10.0**((23.9-mag)/2.5)
return Jy
# Function to convert from flux in janskys to AB pogson magnitudes
# Args: Value to be converted (mags)
# Returns: Source flux density (Jy)
def JyToABMags(Jy):
mag = 23.9 - ( 2.5 * np.log10( Jy * 10**6.0 ) )
return mag
# Function to convert an uncertainty in AB pogson magnitudes to an uncertainty in janskys
# Args: Uncertainty to be converted (mags), and its associated measurement (mags)
# Returns: Uncertainty in flux density (Jy)
def ErrABMagsToJy(err, mag):
Jy_down = ABMagsToJy(mag) - ABMagsToJy(mag + err)
Jy_up = ABMagsToJy(mag - err) - ABMagsToJy(mag)
Jy = ( Jy_down + Jy_up ) / 2.0
return Jy
# Function to convery absolute AB pogson magnitudes into solar luminosities
# Args: Absolute AB pogson magnitude (Mags)
# Returns: Luminosity (Lsol):
def ABAbsToLsol(Mag):
Lsol = 10.0**( (4.58 - Mag ) / 2.51 )
return Lsol
# Function to convert an RMS deviation in relative linear flux to magnitudes
# Args: Relative RMS deviation in flux
# Returns: RMS deviation in mangitude
def RMSFluxToMags(S_rms):
M_rms = abs( 2.5 * np.log10(1.0-S_rms) )
return M_rms
# Function to convert an RMS deviation in magnitude to relative linear flux
# Args: RMS deviation in magnitude
# Returns: RMS deviation in relative flux
def RMSMagsToFlux(m_rms):
S_rms = 1.0 - abs( 10.0**(m_rms/-2.5) )
return S_rms
# New function to convert an uncertainty to log space
# Args: Value, uncertainty
# Returns: Logarithmic uncertainty
def LogError(value, error):
value, error = np.array(value), np.array(error)
frac = 1.0 + (error/value)
error_up = value * frac
error_down = value / frac
log_error_up = np.abs( np.log10(error_up) - np.log10(value) )
log_error_down = np.abs( np.log10(value) - np.log10(error_down) )
return 0.5*(log_error_up+log_error_down)
# Function to convert a logarithmic uncertainty to linear space
# Args: Logarithmic value, logarithmic uncertainty, boolean of whether average unlogged errors or return them asymetrically
# Returns: Linear uncertainty
def UnlogError(log_value, log_error, bounds=False):
if bounds==False:
value = 10**log_value
log_up = log_value + log_error
log_down = log_value - log_error
lin_up = 10**log_up
lin_down = 10**log_down
rel_up = lin_up / value
rel_down = lin_down / value
frac = (0.5 * (rel_up + (rel_down**-1))) - 1.0
return frac * value
elif bounds==True:
error_up = 10.0**(log_value + log_error) - 10.0**log_value
error_down = 10.0**(log_value - log_error) - 10.0**log_value
return [error_up, error_down]
# Function to generate appropriate dimensions plot divisions of a figure in along a given direction
# Args: Index of plot element in question, total number of plot elements, dimension of figure, x or y axis,
# Returns: Starting position of plot, dimension of plot
def GridPos(n_plot, n_tot, img_dim, axis='y', nonstandard=False, gaps=False):
if nonstandard>0:
base = nonstandard
elif nonstandard==False:
if axis=='y':
base = 6.0
elif axis=='x':
base = 8.0
n_plot, n_tot, img_dim = float(n_plot), float(n_tot), float(img_dim)
margin_start = 0.125 * (base / img_dim)
margin_end = (1.0 - 0.95) * (base / img_dim)
fig_start = margin_start
fig_end = 1.0 - margin_end
fig_dim = fig_end - fig_start
plot_dim = fig_dim / n_tot
plot_start = fig_start + (n_plot * plot_dim)
if gaps>0:
plot_start += (0.5 * gaps) * plot_dim
plot_dim *= 1.0 - (0.5 * gaps)
return plot_start, plot_dim
# Function to find the Sheather-Jones bandwidth estimator (<NAME>, 1991), adapted from: https://github.com/Neojume/pythonABC
# Args: Array of values of which bandwidth will be found
# Returns: Sheather-Jones bandwidth of array
def SheatherJonesBW(x, weights=None):
# Define Equation 12 from Sheather & Jones (1991)
def sj12(x, h):
phi6 = lambda x: (x ** 6 - 15 * x ** 4 + 45 * x ** 2 - 15) * scipy.stats.norm.pdf(x, loc=0.0, scale=1.0)
phi4 = lambda | |
'24608596',
'24608211', '24604616']},
{'id': 'vrvW_sr8_1en', 'name': 'Kristianstad', 'type': 'municipality', 'code': '1290', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 36,
'ad_ids': ['24649118', '24646547', '24645051', '24642902', '24639033', '24636394', '24636028', '24633571',
'24631796', '24623098']},
{'id': 'dLxo_EpC_oPe', 'name': 'Simrishamn', 'type': 'municipality', 'code': '1291', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 5, 'ad_ids': ['24636522', '24601452', '24550006', '24554806', '24502004']},
{'id': 'pCuv_P5A_9oh', 'name': 'Ängelholm', 'type': 'municipality', 'code': '1292', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 29,
'ad_ids': ['24648062', '24646052', '24645263', '24644897', '24643870', '24643102', '24642941', '24641238',
'24641268', '24635578']},
{'id': 'bP5q_53x_aqJ', 'name': 'Hässleholm', 'type': 'municipality', 'code': '1293', 'region_code': '12',
'region_name': 'Skåne län', 'hits': 20,
'ad_ids': ['24645166', '24639356', '24625756', '24621467', '24614517', '24615762', '24610880', '24605726',
'24588549', '24597762']},
{'id': 'ocMw_Rz5_B1L', 'name': 'Kil', 'type': 'municipality', 'code': '1715', 'region_code': '17',
'region_name': 'Värmlands län', 'hits': 3, 'ad_ids': ['24649278', '24649261', '24492496']},
{'id': 'N5HQ_hfp_7Rm', 'name': 'Eda', 'type': 'municipality', 'code': '1730', 'region_code': '17',
'region_name': 'Värmlands län', 'hits': 8,
'ad_ids': ['24650359', '24649389', '24641459', '24638309', '24625958', '24625946', '24621089', '24601242']},
{'id': 'hQdb_zn9_Sok', 'name': 'Torsby', 'type': 'municipality', 'code': '1737', 'region_code': '17',
'region_name': 'Värmlands län', 'hits': 6,
'ad_ids': ['24649740', '24649672', '24641489', '24622563', '24547171', '24539768']},
{'id': 'mPt5_3QD_LTM', 'name': 'Storfors', 'type': 'municipality', 'code': '1760', 'region_code': '17',
'region_name': 'Värmlands län', 'hits': 2, 'ad_ids': ['24605489', '24586914']},
{'id': 'x5qW_BXr_aut', 'name': 'Hammarö', 'type': 'municipality', 'code': '1761', 'region_code': '17',
'region_name': 'Värmlands län', 'hits': 2, 'ad_ids': ['24612858', '24606451']},
{'id': 'x73h_7rW_mXN', 'name': 'Munkfors', 'type': 'municipality', 'code': '1762', 'region_code': '17',
'region_name': 'Värmlands län', 'hits': 1, 'ad_ids': ['24546455']},
{'id': 'xnEt_JN3_GkA', 'name': 'Forshaga', 'type': 'municipality', 'code': '1763', 'region_code': '17',
'region_name': 'Värmlands län', 'hits': 2, 'ad_ids': ['24612399', '24602480']},
{'id': 'PSNt_P95_x6q', 'name': 'Grums', 'type': 'municipality', 'code': '1764', 'region_code': '17',
'region_name': 'Värmlands län', 'hits': 2, 'ad_ids': ['24539560', '24566552']},
{'id': 'ymBu_aFc_QJA', 'name': 'Årjäng', 'type': 'municipality', 'code': '1765', 'region_code': '17',
'region_name': 'Värmlands län', 'hits': 1, 'ad_ids': ['24621703']},
{'id': 'oqNH_cnJ_Tdi', 'name': 'Sunne', 'type': 'municipality', 'code': '1766', 'region_code': '17',
'region_name': 'Värmlands län', 'hits': 4, 'ad_ids': ['24642867', '24622363', '24601636', '24590484']},
{'id': 'hRDj_PoV_sFU', 'name': 'Karlstad', 'type': 'municipality', 'code': '1780', 'region_code': '17',
'region_name': 'Värmlands län', 'hits': 49,
'ad_ids': ['24650860', '24645012', '24642294', '24641834', '24641215', '24641140', '24639997', '24637408',
'24635991', '24635497']},
{'id': 'SVQS_uwJ_m2B', 'name': 'Kristinehamn', 'type': 'municipality', 'code': '1781', 'region_code': '17',
'region_name': 'Värmlands län', 'hits': 5, 'ad_ids': ['24649351', '24646057', '24617982', '24610610', '24535779']},
{'id': 'UXir_vKD_FuW', 'name': 'Filipstad', 'type': 'municipality', 'code': '1782', 'region_code': '17',
'region_name': 'Värmlands län', 'hits': 0, 'ad_ids': []},
{'id': 'qk9a_g5U_sAH', 'name': 'Hagfors', 'type': 'municipality', 'code': '1783', 'region_code': '17',
'region_name': 'Värmlands län', 'hits': 3, 'ad_ids': ['24632958', '24614330', '24504796']},
{'id': 'yGue_F32_wev', 'name': 'Arvika', 'type': 'municipality', 'code': '1784', 'region_code': '17',
'region_name': 'Värmlands län', 'hits': 5, 'ad_ids': ['24626919', '24595432', '24594812', '24591333', '24532373']},
{'id': 'wmxQ_Guc_dsy', 'name': 'Säffle', 'type': 'municipality', 'code': '1785', 'region_code': '17',
'region_name': 'Värmlands län', 'hits': 5, 'ad_ids': ['24649317', '24612041', '24574208', '24558595', '24381921']},
{'id': '4eS9_HX1_M7V', 'name': 'Vansbro', 'type': 'municipality', 'code': '2021', 'region_code': '20',
'region_name': 'Dalarnas län', 'hits': 2, 'ad_ids': ['24602964', '24464433']},
{'id': 'FPCd_poj_3tq', 'name': 'Malung-Sälen', 'type': 'municipality', 'code': '2023', 'region_code': '20',
'region_name': 'Dalarnas län', 'hits': 5, 'ad_ids': ['24649158', '24617319', '24608124', '24606239', '24544803']},
{'id': 'Nn7p_W3Z_y68', 'name': 'Gagnef', 'type': 'municipality', 'code': '2026', 'region_code': '20',
'region_name': 'Dalarnas län', 'hits': 1, 'ad_ids': ['24649209']},
{'id': '7Zsu_ant_gcn', 'name': 'Leksand', 'type': 'municipality', 'code': '2029', 'region_code': '20',
'region_name': 'Dalarnas län', 'hits': 3, 'ad_ids': ['24640824', '24626128', '24571716']},
{'id': 'Jy3D_2ux_dg8', 'name': 'Rättvik', 'type': 'municipality', 'code': '2031', 'region_code': '20',
'region_name': 'Dalarnas län', 'hits': 4, 'ad_ids': ['24647031', '24647028', '24621580', '24595880']},
{'id': 'CRyF_5Jg_4ht', 'name': 'Orsa', 'type': 'municipality', 'code': '2034', 'region_code': '20',
'region_name': 'Dalarnas län', 'hits': 6,
'ad_ids': ['24629334', '24608617', '24566875', '24561183', '24523938', '24488375']},
{'id': 'cZtt_qGo_oBr', 'name': 'Älvdalen', 'type': 'municipality', 'code': '2039', 'region_code': '20',
'region_name': 'Dalarnas län', 'hits': 4, 'ad_ids': ['24626713', '24621302', '24576229', '24576225']},
{'id': '5zZX_8FH_Sbq', 'name': 'Smedjebacken', 'type': 'municipality', 'code': '2061', 'region_code': '20',
'region_name': 'Dalarnas län', 'hits': 3, 'ad_ids': ['24645686', '24645204', '24593349']},
{'id': 'UGcC_kYx_fTs', 'name': 'Mora', 'type': 'municipality', 'code': '2062', 'region_code': '20',
'region_name': 'Dalarnas län', 'hits': 12,
'ad_ids': ['24648097', '24624498', '24623037', '24623017', '24593694', '24587438', '24585960', '24572253',
'24548037', '24539727']},
{'id': 'N1wJ_Cuu_7Cs', 'name': 'Falun', 'type': 'municipality', 'code': '2080', 'region_code': '20',
'region_name': 'Dalarnas län', 'hits': 33,
'ad_ids': ['24649195', '24646230', '24642403', '24640180', '24639093', '24637700', '24633983', '24628486',
'24622858', '24621668']},
{'id': 'cpya_jJg_pGp', 'name': 'Borlänge', 'type': 'municipality', 'code': '2081', 'region_code': '20',
'region_name': 'Dalarnas län', 'hits': 22,
'ad_ids': ['24648735', '24645716', '24643955', '24640978', '24638705', '24634803', '24627930', '24624426',
'24620908', '24615413']},
{'id': 'c3Zx_jBf_CqF', 'name': 'Säter', 'type': 'municipality', 'code': '2082', 'region_code': '20',
'region_name': 'Dalarnas län', 'hits': 3, 'ad_ids': ['24646152', '24564510', '24638537']},
{'id': 'DE9u_V4K_Z1S', 'name': 'Hedemora', 'type': 'municipality', 'code': '2083', 'region_code': '20',
'region_name': 'Dalarnas län', 'hits': 5, 'ad_ids': ['24617875', '24596329', '24595146', '24577346', '24518006']},
{'id': 'Szbq_2fg_ydQ', 'name': 'Avesta', 'type': 'municipality', 'code': '2084', 'region_code': '20',
'region_name': 'Dalarnas län', 'hits': 4, 'ad_ids': ['24643237', '24616778', '24612778', '24596510']},
{'id': 'Ny2b_2bo_7EL', 'name': 'Ludvika', 'type': 'municipality', 'code': '2085', 'region_code': '20',
'region_name': 'Dalarnas län', 'hits': 15,
'ad_ids': ['24641952', '24640038', '24636562', '24636403', '24636399', '24636392', '24627279', '24618666',
'24608534', '24607134']},
{'id': 'GEvW_wKy_A9H', 'name': 'Ockelbo', 'type': 'municipality', 'code': '2101', 'region_code': '21',
'region_name': 'Gävleborgs län', 'hits': 3, 'ad_ids': ['24647677', '24644885', '24636701']},
{'id': 'yuNd_3bg_ttc', 'name': 'Hofors', 'type': 'municipality', 'code': '2104', 'region_code': '21',
'region_name': 'Gävleborgs län', 'hits': 2, 'ad_ids': ['24643718', '24638172']},
{'id': 'JPSe_mUQ_NDs', 'name': 'Ovanåker', 'type': 'municipality', 'code': '2121', 'region_code': '21',
'region_name': 'Gävleborgs län', 'hits': 3, 'ad_ids': ['24648796', '24626141', '24588647']},
{'id': 'fFeF_RCz_Tm5', 'name': 'Nordanstig', 'type': 'municipality', 'code': '2132', 'region_code': '21',
'region_name': 'Gävleborgs län', 'hits': 0, 'ad_ids': []},
{'id': '63iQ_V6F_REB', 'name': 'Ljusdal', 'type': 'municipality', 'code': '2161', 'region_code': '21',
'region_name': 'Gävleborgs län', 'hits': 3, 'ad_ids': ['24624560', '24621604', '24604812']},
{'id': 'qk8Y_2b6_82D', 'name': 'Gävle', 'type': 'municipality', 'code': '2180', 'region_code': '21',
'region_name': 'Gävleborgs län', 'hits': 61,
'ad_ids': ['24648286', '24647457', '24645453', '24643119', '24641947', '24641752', '24641744', '24639606',
'24639443', '24638181']},
{'id': 'BbdN_xLB_k6s', 'name': 'Sandviken', 'type': 'municipality', 'code': '2181', 'region_code': '21',
'region_name': 'Gävleborgs län', 'hits': 11,
'ad_ids': ['24640410', '24639185', '24630586', '24610267', '24602729', '24587145', '24586302', '24578542',
'24576851', '24558652']},
{'id': 'JauG_nz5_7mu', 'name': 'Söderhamn', 'type': 'municipality', 'code': '2182', 'region_code': '21',
'region_name': 'Gävleborgs län', 'hits': 6,
'ad_ids': ['24640054', '24616079', '24614547', '24595502', '24503253', '24488845']},
{'id': 'KxjG_ig5_exF', 'name': 'Bollnäs', 'type': 'municipality', 'code': '2183', 'region_code': '21',
'region_name': 'Gävleborgs län', 'hits': 12,
'ad_ids': ['24647491', '24623857', '24623859', '24632941', '24631240', '24613810', '24612003', '24590238',
'24590045', '24548369']},
{'id': 'Utks_mwF_axY', 'name': 'Hudiksvall', 'type': 'municipality', 'code': '2184', 'region_code': '21',
'region_name': 'Gävleborgs län', 'hits': 26,
'ad_ids': ['24650607', '24650499', '24649045', '24648589', '24641095', '24641643', '24641698', '24647095',
'24646916', '24645190']},
{'id': 'swVa_cyS_EMN', 'name': 'Ånge', 'type': 'municipality', 'code': '2260', 'region_code': '22',
'region_name': 'Västernorrlands län', 'hits': 4, 'ad_ids': ['24639288', '24628389', '24610700', '24460553']},
{'id': 'oJ8D_rq6_kjt', 'name': 'Timrå', 'type': 'municipality', 'code': '2262', 'region_code': '22',
'region_name': 'Västernorrlands län', 'hits': 5,
'ad_ids': ['24649312', '24628388', '24620973', '24579351', '24504810']},
{'id': 'uYRx_AdM_r4A', 'name': 'Härnösand', 'type': 'municipality', 'code': '2280', 'region_code': '22',
'region_name': 'Västernorrlands län', 'hits': 14,
'ad_ids': ['24649670', '24634810', '24626434', '24626359', '24610521', '24604584', '24599753', '24595015',
'24588000', '24568790']},
{'id': 'dJbx_FWY_tK6', 'name': 'Sundsvall', 'type': 'municipality', 'code': '2281', 'region_code': '22',
'region_name': 'Västernorrlands län', 'hits': 58,
'ad_ids': ['24650752', '24650176', '24650130', '24650080', '24649995', '24649952', '24648358', '24646387',
'24645570', '24645032']},
{'id': 'yR8g_7Jz_HBZ', 'name': 'Kramfors', 'type': 'municipality', 'code': '2282', 'region_code': '22',
'region_name': 'Västernorrlands län', 'hits': 8,
'ad_ids': ['24649705', '24633992', '24633462', '24627834', '24587306', '24582328', '24574236', '24550420']},
{'id': 'v5y4_YPe_TMZ', 'name': 'Sollefteå', 'type': 'municipality', 'code': '2283', 'region_code': '22',
'region_name': 'Västernorrlands län', 'hits': 9,
'ad_ids': ['24649400', '24649380', '24642982', '24642980', '24634683', '24605190', '24588189', '24540108',
'24455320']},
{'id': 'zBmE_n6s_MnQ', 'name': 'Örnsköldsvik', 'type': 'municipality', 'code': '2284', 'region_code': '22',
'region_name': 'Västernorrlands län', 'hits': 23,
'ad_ids': ['24650185', '24649663', '24648830', '24648370', '24646067', '24643411', '24641851', '24634399',
'24632450', '24624920']},
{'id': 'Voto_egJ_FZP', 'name': 'Ragunda', 'type': 'municipality', 'code': '2303', 'region_code': '23',
'region_name': 'Jämtlands län', 'hits': 0, 'ad_ids': []},
{'id': 'eNSc_Nj1_CDP', 'name': 'Bräcke', 'type': 'municipality', 'code': '2305', 'region_code': '23',
'region_name': 'Jämtlands län', 'hits': 1, 'ad_ids': ['24615354']},
{'id': 'yurW_aLE_4ga', 'name': 'Krokom', 'type': 'municipality', 'code': '2309', 'region_code': '23',
'region_name': 'Jämtlands län', 'hits': 0, 'ad_ids': []},
{'id': 'ppjq_Eci_Wz9', 'name': 'Strömsund', 'type': 'municipality', 'code': '2313', 'region_code': '23',
'region_name': 'Jämtlands län', 'hits': 6,
'ad_ids': ['24646613', '24635521', '24634425', '24611237', '24608422', '24566029']},
{'id': 'D7ax_CXP_6r1', 'name': 'Åre', 'type': 'municipality', 'code': '2321', 'region_code': '23',
'region_name': 'Jämtlands län', 'hits': 2, 'ad_ids': ['24587180', '24572426']},
{'id': 'gRNJ_hVW_Gpg', 'name': 'Berg', 'type': 'municipality', 'code': '2326', 'region_code': '23',
'region_name': 'Jämtlands län', 'hits': 1, 'ad_ids': ['24626189']},
{'id': 'j35Q_VKL_NiM', 'name': 'Härjedalen', 'type': 'municipality', 'code': '2361', 'region_code': '23',
'region_name': 'Jämtlands län', 'hits': 8,
'ad_ids': ['24650648', '24649337', '24648475', '24626268', '24615961', '24600435', '24565037', '24560583']},
{'id': 'Vt7P_856_WZS', 'name': 'Östersund', 'type': 'municipality', 'code': '2380', 'region_code': '23',
'region_name': 'Jämtlands län', 'hits': 36,
'ad_ids': ['24650954', '24650862', '24648805', '24647118', '24640165', '24637613', '24634928', '24634409',
| |
<reponame>bobofei/Mohou_Box-master
#Copyright (c) 2015 3D Control Systems LTD
#3DPrinterOS client is free software: you can redistribute it and/or modify
#it under the terms of the GNU Affero General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#3DPrinterOS client is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU Affero General Public License for more details.
#You should have received a copy of the GNU Affero General Public License
#along with 3DPrinterOS client. If not, see <http://www.gnu.org/licenses/>.
# Author: <NAME> <<EMAIL>>
import re
import time
import logging
import threading
import os
# import makerbot_driver
# import serial
# import serial.serialutil
import datetime
# X = [0][0] | 0.1 = 8
# Y = [0][1] | 0.1 = 8
# Z = [0][2] | 0.1 = 4
# E = [0][3]
#import log
from printer import BasePrinter
from printer import BreakPointThread
import gpx
import Queue
#import config
class Printer(BasePrinter):
PAUSE_STEP_TIME = 0.5
BUFFER_OVERFLOW_WAIT = 0.01
IDLE_WAITING_STEP = 0.1
TEMP_UPDATE_PERIOD = 5
GODES_BETWEEN_READ_STATE = 100
BUFFER_OVERFLOWS_BETWEEN_STATE_UPDATE = 20
MAX_RETRY_BEFORE_ERROR = 100
def __init__(self, profile, usb_info):
BasePrinter.__init__(self, profile, usb_info)
self.logger = logging.getLogger(__name__)
self.logger.info('Makerbot printer created')
self.gpx_logfile = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "logs", "gpx.log")
self.gpx_profile = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "config", "gpx_profiles", profile['type']+'.ini')
self.gpx_verbose = profile['gpx_verbose']
# self.init_target_temp_regexps()
self.execution_lock = threading.Lock()
self.buffer_lock = threading.Lock()
self.parser = None
#self.finished_flag = False
#self.correct_baudrate = None
retries = 5
self.sending_thread = None
self.monitor_thread = None
self.monitor2_thread = None
self.breakpoint_thread = None
self.response_queue = Queue.Queue()
self.connect_flag = False
self.reinit_flag = False
self.printer_state = 1
self.start_time = None
self.print_time_escape = "00:00:00"
self.print_time_remain = "00:00:00"
self.print_total_time = None
self.current_tool = None
self.temp = {}
self.bed_temp = None
self.init()
def init(self):
if not self.usb_info.get('COM'):
raise RuntimeError("No serial port detected for serial printer")
for baudrate in self.profile['baudrate']:
while not self.sending_thread:
try:
self.printer_state = 2
connect_result = gpx.connect(self.usb_info.get('COM'), baudrate, self.gpx_profile, self.gpx_logfile, self.gpx_verbose)
self.logger.info("connect result: " + str(connect_result))
if 'start' in connect_result:
time.sleep(0.1)
start_result = gpx.start()
self.logger.info("connect result: " + str(start_result))
self.correct_baudrate = baudrate
else:
break
except Exception as e:
if retries > 0:
retries -= 1
self.logger.warning("Error connecting to printer %s\n%s" % (str(profile), str(e)))
time.sleep(1)
else:
break
else:
self.stop_flag = False
self.pause_flag = False
self.printing_flag = False
self.cancel_flag = False
self.current_tool = gpx.get_current_tool()
self.sending_thread = threading.Thread(target=self.send_gcodes, name="send_gcodes")
self.monitor_thread = threading.Thread(target=self.monitor, name="monitor")
self.monitor2_thread = threading.Thread(target=self.monitor2, name="monitor2")
self.breakpoint_thread = BreakPointThread(self, name="breakpoint_thread")
self.sending_thread.start()
self.monitor_thread.start()
self.monitor2_thread.start()
self.breakpoint_thread.start()
if self.sending_thread.is_alive():
self.breakpoint_index = 0 # 不支持断电续打
if self.breakpoint_index > 0:
self.unbuffered_gcodes("\n".join(self.outage_gcodes))
self.printer_state = 10
else:
self.unbuffered_gcodes("\n".join(self.profile["end_gcodes"]))
break
if self.sending_thread:
break
if not self.sending_thread:
self.printer_state = 130
raise RuntimeError("Error connecting to printer %s\n%s" % (str(profile), str(e)))
def reinit(self):
self.reinit_flag = True
gpx.disconnect()
time.sleep(0.2)
connect_res = gpx.connect(self.usb_info.get('COM'), self.correct_baudrate, self.gpx_profile, self.gpx_logfile, self.gpx_verbose)
self.reinit_flag = False
time.sleep(0.5)
start_res = gpx.start()
self.logger.info("reinit: connect result %s, start result %s" % (str(connect_res), str(start_res)));
self.start_time = None
self.print_time_escape = "00:00:00"
self.print_time_remain = "00:00:00"
self.stop_flag = False
self.pause_flag = False
self.printing_flag = False
self.cancel_flag = False
self.current_tool = gpx.get_current_tool()
self.current_print_file = None
self.initBreakPoint()
self.breakpoint_index = 0
self.breakpoint_print_time = 0
def append_position_and_lift_extruder(self):
position = gpx.get_current_position()
if position:
with self.buffer_lock:
self.buffer.appendleft('G1 Z' + str(position["z"]))
z = min(160, position["z"] + 30)
self.write('G1 Z' + str(z))
def _append(self, s):
if (s is not None and s != ''):
if '\n' in s:
for item in s.split('\n'):
self.response_queue.put(item)
else:
self.response_queue.put(s)
# length argument is used for unification with Printrun. DON'T REMOVE IT!
def set_total_gcodes(self, length):
#self.reinit()
self.total_gcodes = length
self.write('(@build "Mohou3D")')
self.write("M136 (Mohou3D)")
self.current_line_number = 0
self.logger.info('Begin of GCodes')
self.printing_flag = False
#self.execute(lambda: self.parser.s3g.set_RGB_LED(255, 255, 255, 0))
def load_gcodes(self, gcodes):
if gcodes is None or gcodes == "":
self.logger.info("load_gcodes(): Empty gcodes.")
return False
if self.printer_state == 0x87:
self.logger.info("load_gcodes(): previous print failed.")
return False
gcode_new = self.remove_comments(gcodes)
self.logger.info("printer.total_gcodes: " + str(self.total_gcodes))
self.total_gcodes_part1 = len(gcode_new)
#self.total_gcodes += 99
self.set_total_gcodes(self.total_gcodes)
if self.breakpoint_index > 0:
self.current_line_number = self.breakpoint_index - 11
else:
self.current_line_number = 0
with self.buffer_lock:
for code in gcode_new:
self.buffer.append(code)
self.printing_flag = True
self.start_time = None
self.print_time_escape = "00:00:00"
self.print_time_remain = "00:00:00"
return True
def append_gcodes(self, gcodes):
if gcodes is None or gcodes == "":
self.logger.info("append_gcodes(): Empty gcodes.")
return False
if self.printer_state == 0x87:
self.logger.info("load_gcodes(): previous print failed.")
return False
gcode_new = self.remove_comments(gcodes)
self.total_gcodes_part2 = len(gcode_new)
self.total_gcodes = self.total_gcodes_part1 + self.total_gcodes_part2
with self.buffer_lock:
for code in gcode_new:
self.buffer.append(code)
return True
def remove_comments(self, gcodes):
gcodes = self.preprocess_gcodes(gcodes)
gcode_new = []
#remove comments start
for gcode in gcodes:
if ";" in gcode:
line = gcode[0:gcode.find(";")]
line = line.strip()
if (len(line) != 0):
gcode_new.append(line)
line2 = gcode[gcode.find(";"):]
if line2.find(";Print time: ") == 0:
if self.print_total_time is None:
print_time = self.getGcodePrintTotalTime(line2);
if print_time > 0:
self.print_total_time = print_time
else:
gcode_new.append(gcode)
#end
return gcode_new
def cancel(self, go_home=True):
if self.cancel_download():
return
try:
gpx.abort()
except Exception as ex:
self.logger.error("gpx.abort() Exception: %s." % ex.message)
self.pause_flag = False
self.cancel_flag = True
self.printing_flag = False
with self.buffer_lock:
self.buffer.clear()
time.sleep(1)
self.reinit()
#gpx.disconnect()
#self.init()
def canceled(self):
with self.buffer_lock:
self.buffer.clear()
self.pause_flag = False
self.cancel_flag = True
self.printing_flag = False
self.start_time = None
self.print_time_escape = "00:00:00"
self.print_time_remain = "00:00:00"
self.current_print_file = None
self.initBreakPoint()
self.breakpoint_index = 0
self.breakpoint_print_time = 0
def pause(self):
if not self.pause_flag and not self.cancel_flag:
self.pause_flag = True
time.sleep(0.1)
self.append_position_and_lift_extruder()
return True
else:
return False
def unpause(self):
if self.breakpoint_index > 0:
self.breakStart()
self.printer_state = 7
elif self.pause_flag and not self.cancel_flag:
self.pause_flag = False
return True
else:
return False
def emergency_stop(self):
self.cancel(False)
self.current_print_file = None
# def immediate_pause(self):
# gpx.pause_resume()
def close(self):
self.logger.info("Makerbot printer is closing...")
self.stop_flag = True
if threading.current_thread() != self.sending_thread:
self.sending_thread.join(10)
if self.sending_thread.isAlive():
self.logger.error("Failed to join sending thread in makerbot_printer.")
self.monitor_thread.join(10)
if self.monitor_thread.isAlive():
self.logger.error("Failed to join monitor thread in makerbot_printer.")
self.monitor2_thread.join(10)
if self.monitor2_thread.isAlive():
self.logger.error("Failed to join monitor2 thread in makerbot_printer.")
self.breakpoint_thread.join(10)
if self.breakpoint_thread.isAlive():
self.logger.error("Failed to join break thread in makerbot_printer.")
self.sending_thread = None
self.monitor_thread = None
self.monitor2_thread = None
self.breakpoint_thread = None
#gpx.stop()
try:
gpx.disconnect()
except Exception as ex:
pass
self.pause_flag = False
self.printing_flag = False
self.cancel_flag = False
self.printer_state = 1
self.current_print_file = None
self.initBreakPoint()
self.breakpoint_index = 0
self.breakpoint_print_time = 0
self.logger.info("...done closing makerbot printer.")
def unbuffered_gcodes(self, gcodes):
self.logger.info("Gcodes for unbuffered execution: " + str(gcodes))
if self.printing_flag or self.pause_flag:
self.logger.warning("Can't execute gcodes - wrong mode")
return False
else:
# if not self.parser.state.values.get("build_name"):
# self.parser.state.values["build_name"] = 'Mohou3D'
for gcode in self.preprocess_gcodes(gcodes):
result = self.write(gcode)
if result:
#self.request_position_from_printer()
self.logger.info("Printers answer: " + result)
self.logger.info("Gcodes were sent to printer")
return True
def write(self, command):
res = None
try:
command = command.strip()
try:
#reprapSave = gpx.reprap_flavor(True)
timeout_retries = 0
bo_retries = 0
while not self.stop_flag:
if self.cancel_flag or self.reinit_flag:
#self.cancel_flag = False
break
try:
#self.printing_flag = True
#self.execution_lock.acquire()
res = gpx.write("%s" % command)
except gpx.BufferOverflow:
#self.execution_lock.release()
bo_retries += 1
try:
if gpx.build_paused():
if bo_retries == 1:
time.sleep(1) # 1 sec
elif bo_retries == 1:
self.logger.info('Makerbot BufferOverflow on ' + command)
except IOError:
pass
if self.start_time is None:
self.setBreakPrintTime(0)
else:
self.setBreakPrintTime(time.time() - self.start_time)
self.setBreakLineNumber(self.current_line_number + 1)
time.sleep(self.BUFFER_OVERFLOW_WAIT) # 100 ms
except gpx.Timeout:
#self.execution_lock.release()
time.sleep(1)
timeout_retries += 1
if (timeout_retries >= 5):
raise
else:
#self.execution_lock.release()
break
finally:
#gpx.reprap_flavor(reprapSave)
pass
except gpx.CancelBuild:
self.canceled()
self.logger.info("Write: print is canceled.")
return res
def read_state(self):
if self.is_operational():
try:
platform_temp = self.bed_temp[0]
except Exception as ex:
platform_temp = 0
try:
platform_ttemp = self.bed_temp[1]
except Exception as ex:
platform_ttemp = 0
try:
head_temp1 = self.temp[0][0]
except Exception as ex:
head_temp1 = 0
try:
head_ttemp1 = self.temp[0][1]
except Exception as ex:
head_ttemp1 = 0
try:
head_temp2 = self.temp[1][0]
except Exception as ex:
head_temp2 = 0
try:
head_ttemp2 = self.temp[1][1]
except Exception as ex:
head_ttemp2 = 0
else:
platform_temp = 0
platform_ttemp = 0
head_temp1 = 0
head_temp2 = 0
head_ttemp1 = 0
head_ttemp2 = 0
self.temps = [platform_temp, head_temp1, head_temp2]
self.target_temps = [platform_ttemp, head_ttemp1, head_ttemp2]
if self.printer_state == 10: #pause(outage)
pass
elif self.printer_state > 0x80:
pass
elif self.is_paused():
self.printer_state = 8
elif self.is_printing():
self.printer_state = 7
elif self.is_operational():
self.printer_state = 3
self.print_progress = self.get_percent()
self.fan_speed = 0
self.print_speed = 100
self.extruder_amount = 2
if self.finished_flag:
self.printer_state | |
we don't have a fixed top row then we recalculate it at every resize
if self.auto_scroll and height > 0 and len(self.content) > height:
self.top_row = len(self.content) - height
self.refresh()
# end cc_window
link_data = zlx.record.make('tui.link_data', 'index command selection_restyler start_row start_col end_row end_col')
doc_strip = zlx.record.make('tui.doc_strip', 'text style_name col link')
#* simple_doc_window ********************************************************
class simple_doc_window (window):
# simple_doc_window.__init__()
def __init__ (self,
wid = None,
styles = 'default',
active_styles = None,
doc_fmt = '',
doc_kwargs = {},
can_have_focus = True,
default_selection_style = 'selected'):
window.__init__(self,
wid = wid,
styles = styles,
active_styles = active_styles,
can_have_focus = can_have_focus)
self.set_doc(doc_fmt, **doc_kwargs)
self.display_top_row = 0
self.content_styles_ = { k: ''.join((STYLE_BEGIN, k, STYLE_END)) for k in self.inactive_style_markers if isinstance(k, str) }
self.default_selection_style = default_selection_style
self.selected_link = None
# simple_doc_window._reset_content()
def _reset_content (self):
self.content_ = []
self.last_row_ = []
self.last_row_width_ = 0
self.content_.append(self.last_row_)
self.links_ = [] # list of link_data
# simple_doc_window._fill_to_eol()
def _fill_to_eol (self, fill_char = ' '):
if self.last_row_width_ < self.width:
default_style_name = self.inactive_style_markers[()]
self.last_row_.append(
doc_strip(fill_char * (self.width - self.last_row_width_),
default_style_name, self.last_row_width_))
self.last_row_width_ = self.width
# simple_doc_window._new_row()
def _new_row (self, fill_char = ' '):
self._fill_to_eol(fill_char)
self.last_row_ = []
self.last_row_width_ = 0
self.content_.append(self.last_row_)
# simple_doc_window._add_text()
def _add_text (self, text, style = None, link = None):
if style is None:
style = self.last_row_[-1].style_name
if self.last_row_ and self.last_row_[-1].style_name == style and self.last_row_[-1].link == link:
self.last_row_[-1].text += text
else:
self.last_row_.append(doc_strip(text, style, self.last_row_width_, link))
self.last_row_width_ += compute_text_width(text)
# simple_doc_window._justify_last_row()
def _justify_last_row (self):
#dmsg('enter justifying: {!r}', self.last_row_)
#dmsg('width={}', self.last_row_width_)
while self.last_row_width_ < self.width:
skip_start_ws = True
col = 0
n = self.width - self.last_row_width_
for s in self.last_row_:
#dmsg('justifying: {!r}, n={}', s.text, n)
s.col = col
tl = []
for ch in s.text:
tl.append(ch)
if ch.isspace():
if skip_start_ws: continue
if n:
#dmsg('insert space')
tl.append(' ')
n -= 1
skip_start_ws = True
else:
#dmsg('skip_ws off')
skip_start_ws = False
s.text = ''.join(tl)
#dmsg('justified: {!r}', s.text)
col += compute_text_width(s.text)
#dmsg('new width: {}', col)
if col == self.last_row_width_:
# could not insert any space, give up
break
self.last_row_width_ = col
#dmsg('exit justifying: {!r}', self.last_row_)
return
# simple_doc_window.STYLE_CMDS
STYLE_CMDS = dict(
par = ''.join((STYLE_BEGIN, 'paragraph', STYLE_END)),
br = ''.join((STYLE_BEGIN, 'br', STYLE_END)),
hr = ''.join((STYLE_BEGIN, 'hr', STYLE_END)),
cpar = ''.join((STYLE_BEGIN, 'continue-paragraph', STYLE_END)),
justify = ''.join((STYLE_BEGIN, 'justify', STYLE_END)),
no_justify = ''.join((STYLE_BEGIN, 'no_justify', STYLE_END)),
verbatim = ''.join((STYLE_BEGIN, 'verbatim', STYLE_END)),
code = ''.join((STYLE_BEGIN, 'verbatim', STYLE_END)),
pre = ''.join((STYLE_BEGIN, 'pre', STYLE_END)),
indent = ''.join((STYLE_BEGIN, 'indent', STYLE_END)),
tab = ''.join((STYLE_BEGIN, 'indent', STYLE_END)),
wrap_indent = ''.join((STYLE_BEGIN, 'wrap_indent', STYLE_END)),
sp = ''.join((STYLE_BEGIN, 'space', STYLE_END)),
link = ''.join((STYLE_BEGIN, 'link', STYLE_END)),
end_link = ''.join((STYLE_BEGIN, 'end_link', STYLE_END)),
)
def selection_restyler (self, s):
return self.default_selection_style
# simple_doc_window._render()
def _render (self):
default_style_name = self.inactive_style_markers[()]
dmsg('default_style_name: {}', default_style_name)
if self.width < 1: return
self.empty_row_strips_ = [strip(' ' * self.width, default_style_name, 0)]
prev_selection_index = self.selected_link.index if self.selected_link else None
self._reset_content()
doc = self.doc_fmt.format(**self.STYLE_CMDS, **self.doc_kwargs, **self.content_styles_)
mode = 'verbatim'
current_style = default_style_name
wrap_indent = 0
justify = False
link = None
for style, text in styled_text_chunks(doc, default_style_name):
if style == 'verbatim':
mode = 'verbatim'
elif style == 'pre':
mode = 'verbatim'
self._new_row()
elif style == 'continue-paragraph':
mode = 'paragraph'
elif style == 'paragraph':
mode = 'paragraph'
wrap_indent = 0
justify = False
self._new_row()
elif style == 'br':
self._new_row()
mode = 'paragraph'
elif style == 'hr':
if self.last_row_width_ > 0:
self._new_row()
self._new_row('-')
elif style == 'wrap_indent':
wrap_indent = int(text)
continue
elif style == 'justify':
justify = True
elif style == 'no_justify':
justify = False
elif style == 'space':
self._add_text(' ', current_style, link)
elif style == 'indent':
n = int(text)
m = (self.last_row_width_ + n) // n * n
if m >= self.width:
self._new_row()
o = n
else:
o = m - self.last_row_width_
self._add_text(' ' * o, current_style, link)
continue
elif style == 'link':
if text.startswith('#'):
restyler, command = text.split('#', 3)[1:]
else:
restyler = 'selection_restyler'
command = text
pass
link = link_data(
index = len(self.links_),
command = text,
selection_restyler = getattr(self, restyler),
start_row = len(self.content_) - 1,
start_col = self.last_row_width_)
continue
elif style == 'end_link':
link.end_row = len(self.content_) - 1
link.end_col = self.last_row_width_
self.links_.append(link)
dmsg('recording link: {!r}', link)
link = None
else:
current_style = style
if mode == 'verbatim':
while text:
if '\n' in text:
t, r = text.split('\n', 1)
self._add_text(t, current_style, link)
self._new_row()
text = r
else:
self._add_text(text, current_style, link)
break
elif mode == 'paragraph':
first_para = True
text += '\x01'
for paragraph in text.split('\n\n'):
first = True
if first_para:
first_para = False
else:
self._new_row()
for text_chunk in paragraph.split():
if text_chunk.endswith('\x01'):
text_chunk = text_chunk[:-1]
tw = compute_text_width(text_chunk)
spc = 0 if first else 1
first = False
if self.last_row_width_ + tw + spc <= self.width:
if spc:
self._add_text(' ', current_style, link)
self._add_text(text_chunk, current_style, link)
continue
if wrap_indent + tw <= self.width:
if justify:
self._justify_last_row()
self._new_row()
if wrap_indent:
self._add_text(' ' * wrap_indent, current_style, link)
self._add_text(text_chunk, current_style, link)
continue
if spc:
if self.last_row_width_ and self.last_row_width_ + 1 + spc <= self.width:
self._add_text(' ', link)
else:
self._new_row()
while text_chunk:
i = compute_index_of_column(text_chunk, self.width - self.last_row_width_)
if i is None: i = len(text_width)
self._add_text(text_chunk[:i], current_style, link)
text_chunk = text_chunk[i:]
if self.last_row_width_ == self.width:
self._new_row()
pass
self._fill_to_eol()
self.selected_link = None if prev_selection_index is None else self.links_[prev_selection_index]
dmsg('render: {!r}', self.content_)
# simple_doc_window.on_resize()
def on_resize (self, width = None, height = None):
self._render()
self.refresh()
## simple_doc_window.on_focus_enter()
# def on_focus_enter (self):
# self.select_theme('active', do_refresh = False)
# self._render()
# self.refresh()
#
## simple_doc_window.on_focus_leave()
# def on_focus_leave (self):
# self.select_theme('inactive')
# self._render()
# self.refresh()
# simple_doc_window.set_doc()
def set_doc (self, fmt, **kwargs):
self.doc_fmt = fmt
self.doc_kwargs = kwargs
self._render()
# simple_doc_window._prepare_strip()
def _prepare_strip (self, s):
sty = s.style_name
if s.link and s.link is self.selected_link:
sty = self.selected_link.selection_restyler(s)
return strip(s.text, self.style_markers[sty][1:-1], s.col)
# simple_doc_window.refresh_strip()
def refresh_strip (self, row, col, width):
r = self.display_top_row + row
if r >= 0 and r < len(self.content_):
self._write_updates(row,
trim_strips(self.content_[r], col, width,
transform_strip = self._prepare_strip))
else:
self._write(row, col, self.default_style_name, ' ' * width)
def _next_link (self):
if self.selected_link is None:
return self.links_[0] if self.links_ else None
if self.selected_link.index + 1 < len(self.links_):
return self.links_[self.selected_link.index + 1]
return None
def _prev_link (self):
if self.selected_link is None: return None
if self.selected_link.index - 1 >= 0:
return self.links_[self.selected_link.index - 1]
return None
def _next_lower_link (self):
if self.selected_link is None:
current_row = -1
start_index = 0
else:
current_row = self.selected_link.end_row
start_index = self.selected_link.index + 1
for i in range(start_index, len(self.links_)):
if self.links_[i].start_row > current_row:
return self.links_[i]
return None
def _prev_upper_link (self):
if self.selected_link is None: return None
current_row = self.selected_link.start_row
start_index = self.selected_link.index - 1
for i in range(start_index, -1, -1):
if self.links_[i].start_row < current_row:
return self.links_[i]
return None
def _move (self, link, row_delta):
if link and link.start_row >= self.display_top_row and link.start_row < self.display_top_row + self.height:
prev_link = self.selected_link
self.selected_link = link
if prev_link:
self.refresh(start_row = prev_link.start_row - self.display_top_row,
height = prev_link.end_row + 1)
dmsg('selecting link: {!r}', link)
self.refresh(start_row = link.start_row - self.display_top_row,
height = link.end_row - link.start_row + 1)
elif row_delta > 0 and self.display_top_row + self.height < len(self.content_):
self.display_top_row += row_delta
dmsg('scrolling {}', row_delta)
self.refresh()
elif row_delta < 0 and self.display_top_row + row_delta >= 0:
self.display_top_row += row_delta
dmsg('scrolling {}', row_delta)
self.refresh()
def move_up (self):
self._move(self._prev_upper_link(), -1)
def move_down (self):
self._move(self._next_lower_link(), +1)
def move_left (self):
self._move(self._prev_link(), -1)
def move_right (self):
self._move(self._next_link(), +1)
def run_command (self, cmd):
'''derive to perform some action when a command needs to be executed'''
dmsg('running command: {!r}', cmd)
def on_key (self, key):
if key in ('j', 'Down'): self.move_down()
elif key in ('k', 'Up'): self.move_up()
elif key in ('h', 'Left'): self.move_left()
elif key in ('l', 'Right'): self.move_right()
elif key == 'Enter':
if self.selected_link and self.selected_link.start_row >= self.display_top_row and self.selected_link.end_row < self.display_top_row + self.height:
self.run_command(self.selected_link.command)
#* input_line ***************************************************************
class input_line (window):
# input_line.__init__()
def __init__ (self,
styles,
active_styles = None,
accept_text_func = lambda text: text,
cancel_text_func = lambda : None,
cursor_mode = CM_NORMAL):
window.__init__(self,
styles = styles,
active_styles = active_styles,
can_have_focus = True)
self.text = ''
self.pos = 0
self.accept_text_func = accept_text_func
self.cancel_text_func = cancel_text_func
self.cursor_mode = CM_NORMAL
# input_line.refresh_strip()
def refresh_strip (self, row, col, width):
if row != 0:
window.refresh_strip(self, row, col, width)
return
t = self.text
self.put(row, 0, t, clip_col = col, clip_width = width)
self.put(row, len(t), | |
properties
bar
Set the appearance of the gauge's value
bgcolor
Sets the gauge background color.
bordercolor
Sets the color of the border enclosing the gauge.
borderwidth
Sets the width (in px) of the border enclosing the
gauge.
shape
Set the shape of the gauge
steps
A tuple of
:class:`plotly.graph_objects.indicator.gauge.Step`
instances or dicts with compatible properties
stepdefaults
When used in a template (as
layout.template.data.indicator.gauge.stepdefaults),
sets the default property values to use for elements of
indicator.gauge.steps
threshold
:class:`plotly.graph_objects.indicator.gauge.Threshold`
instance or dict with compatible properties
Returns
-------
Gauge
"""
super(Gauge, self).__init__("gauge")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.indicator.Gauge
constructor must be a dict or
an instance of :class:`plotly.graph_objs.indicator.Gauge`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.indicator import gauge as v_gauge
# Initialize validators
# ---------------------
self._validators["axis"] = v_gauge.AxisValidator()
self._validators["bar"] = v_gauge.BarValidator()
self._validators["bgcolor"] = v_gauge.BgcolorValidator()
self._validators["bordercolor"] = v_gauge.BordercolorValidator()
self._validators["borderwidth"] = v_gauge.BorderwidthValidator()
self._validators["shape"] = v_gauge.ShapeValidator()
self._validators["steps"] = v_gauge.StepsValidator()
self._validators["stepdefaults"] = v_gauge.StepValidator()
self._validators["threshold"] = v_gauge.ThresholdValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("axis", None)
self["axis"] = axis if axis is not None else _v
_v = arg.pop("bar", None)
self["bar"] = bar if bar is not None else _v
_v = arg.pop("bgcolor", None)
self["bgcolor"] = bgcolor if bgcolor is not None else _v
_v = arg.pop("bordercolor", None)
self["bordercolor"] = bordercolor if bordercolor is not None else _v
_v = arg.pop("borderwidth", None)
self["borderwidth"] = borderwidth if borderwidth is not None else _v
_v = arg.pop("shape", None)
self["shape"] = shape if shape is not None else _v
_v = arg.pop("steps", None)
self["steps"] = steps if steps is not None else _v
_v = arg.pop("stepdefaults", None)
self["stepdefaults"] = stepdefaults if stepdefaults is not None else _v
_v = arg.pop("threshold", None)
self["threshold"] = threshold if threshold is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Domain(_BaseTraceHierarchyType):
# column
# ------
@property
def column(self):
"""
If there is a layout grid, use the domain for this column in
the grid for this indicator trace .
The 'column' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["column"]
@column.setter
def column(self, val):
self["column"] = val
# row
# ---
@property
def row(self):
"""
If there is a layout grid, use the domain for this row in the
grid for this indicator trace .
The 'row' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["row"]
@row.setter
def row(self, val):
self["row"] = val
# x
# -
@property
def x(self):
"""
Sets the horizontal domain of this indicator trace (in plot
fraction).
The 'x' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'x[0]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
(1) The 'x[1]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
list
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# y
# -
@property
def y(self):
"""
Sets the vertical domain of this indicator trace (in plot
fraction).
The 'y' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'y[0]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
(1) The 'y[1]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
list
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "indicator"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
column
If there is a layout grid, use the domain for this
column in the grid for this indicator trace .
row
If there is a layout grid, use the domain for this row
in the grid for this indicator trace .
x
Sets the horizontal domain of this indicator trace (in
plot fraction).
y
Sets the vertical domain of this indicator trace (in
plot fraction).
"""
def __init__(self, arg=None, column=None, row=None, x=None, y=None, **kwargs):
"""
Construct a new Domain object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.indicator.Domain`
column
If there is a layout grid, use the domain for this
column in the grid for this indicator trace .
row
If there is a layout grid, use the domain for this row
in the grid for this indicator trace .
x
Sets the horizontal domain of this indicator trace (in
plot fraction).
y
Sets the vertical domain of this indicator trace (in
plot fraction).
Returns
-------
Domain
"""
super(Domain, self).__init__("domain")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.indicator.Domain
constructor must be a dict or
an instance of :class:`plotly.graph_objs.indicator.Domain`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.indicator import domain as v_domain
# Initialize validators
# ---------------------
self._validators["column"] = v_domain.ColumnValidator()
self._validators["row"] = v_domain.RowValidator()
self._validators["x"] = v_domain.XValidator()
self._validators["y"] = v_domain.YValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("column", None)
self["column"] = column if column is not None else _v
_v = arg.pop("row", None)
self["row"] = row if row is not None else _v
_v = arg.pop("x", None)
self["x"] = x if x is not None else _v
_v = arg.pop("y", None)
self["y"] = y if y is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Delta(_BaseTraceHierarchyType):
# decreasing
# ----------
@property
def decreasing(self):
"""
The 'decreasing' property is an instance of Decreasing
that may be specified as:
- An instance of :class:`plotly.graph_objs.indicator.delta.Decreasing`
- A dict of string/value properties that will be passed
to the Decreasing constructor
Supported dict properties:
color
Sets the color for increasing value.
symbol
Sets the symbol to display for increasing value
Returns
-------
plotly.graph_objs.indicator.delta.Decreasing
"""
return self["decreasing"]
@decreasing.setter
def decreasing(self, val):
self["decreasing"] = val
# font
# ----
@property
def font(self):
"""
Set the font used to display the delta
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.indicator.delta.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times | |
<reponame>mcguenther/ipme<gh_stars>0
from ..interfaces.cell import Cell
from ..utils.functions import *
from ..utils.stats import kde, pmf, find_x_range
from ..utils.js_code import HOVER_CODE
from ..utils.constants import COLORS, BORDER_COLORS, PLOT_HEIGHT, PLOT_WIDTH, SIZING_MODE, RUG_DIST_RATIO, RUG_SIZE
from functools import partial
import threading
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource, BoxSelectTool, HoverTool, CustomJS
from bokeh import events
class VariableCell(Cell):
def __init__(self, name, mode, inter_contr):
"""
Parameters:
--------
name A String within the set {"<variableName>"}.
mode A String in {"i","s"}, "i":interactive, "s":static.
Sets:
--------
_source
_selection
_reconstructed
_samples
"""
self._source = {}
self._selection = {}
self._reconstructed = {}
self._samples = {}
self._sel_samples = {}
self._clear_selection = {}
self._all_samples = {}
self._x_range = {}
Cell.__init__(self, name, mode, inter_contr)
def _get_samples(self, space):
"""
Retrieves MCMC samples of <space> into a numpy.ndarray and
sets an entry into self._all_samples Dict.
"""
space_gsam = space
if self._data.get_var_type(self._name) == "observed":
if space == "posterior" and "posterior_predictive" in self._data.get_spaces():
space_gsam="posterior_predictive"
elif space == "prior" and "prior_predictive" in self._data.get_spaces():
space_gsam="prior_predictive"
self._all_samples[space] = self._data.get_samples(self._name, space_gsam).T
# compute x_range
self._x_range[space] = find_x_range(self._all_samples[space])
def _get_data_for_cur_idx_dims_values(self, space):
"""
Returns a numpy.ndarray of the MCMC samples of the <name>
parameter for current index dimensions values.
Returns:
--------
A numpy.ndarray.
"""
if space in self._all_samples:
data = self._all_samples[space]
else:
raise ValueError
for dim_name,dim_value in self._cur_idx_dims_values.items():
data = data[dim_value]
return np.squeeze(data).T
def initialize_fig(self,space):
self._plot[space]=figure( x_range = self._x_range[space], tools="wheel_zoom,reset,box_zoom", toolbar_location='right',
plot_width=PLOT_WIDTH, plot_height=PLOT_HEIGHT, sizing_mode=SIZING_MODE)
self._plot[space].border_fill_color = BORDER_COLORS[0]
self._plot[space].xaxis.axis_label = ""
self._plot[space].yaxis.visible = False
self._plot[space].toolbar.logo = None
self._plot[space].xaxis[0].ticker.desired_num_ticks = 3
if self._mode == "i":
##Events
self._plot[space].on_event(events.Tap, partial(self._clear_selection_callback,space))
self._plot[space].on_event(events.SelectionGeometry, partial(self._selectionbox_callback,space))
##on_change
self._ic._sample_inds_update[space].on_change('data',partial(self._sample_inds_callback, space))
def initialize_cds(self,space):
samples = self._get_data_for_cur_idx_dims_values(space)
if self._type == "Discrete":
self._source[space] = ColumnDataSource(data = pmf(samples))
self._samples[space] = ColumnDataSource(data = dict(x=samples))
self._selection[space] = ColumnDataSource(data=dict(x=np.array([]), y=np.array([]), y0=np.array([])))
self._reconstructed[space] = ColumnDataSource(data=dict(x=np.array([]), y=np.array([]), y0=np.array([])))
else:
self._source[space] = ColumnDataSource(data = kde(samples))
max_v = self._source[space].data['y'].max()
self._samples[space] = ColumnDataSource(data = dict(x=samples, y=np.asarray([-max_v/RUG_DIST_RATIO]*len(samples)),\
size=np.asarray([RUG_SIZE]*len(samples))))
self._sel_samples[space] = ColumnDataSource(data = dict(x=np.array([]), y=np.array([]),\
size=np.array([])))
self._selection[space] = ColumnDataSource(data=dict(x=np.array([]),y=np.array([])))
self._reconstructed[space] = ColumnDataSource(data=dict(x=np.array([]),y=np.array([])))
self._clear_selection[space] = ColumnDataSource(data=dict(x=[],y=[],isIn=[]))
self._ic._var_x_range[(space,self._name)] = ColumnDataSource(data=dict(xmin=np.array([]),xmax=np.array([])))
def initialize_glyphs(self,space):
if self._type == "Discrete":
self.initialize_glyphs_discrete(space)
else:
self.initialize_glyphs_continuous(space)
if self._mode == "i":
self.initialize_glyphs_x_button(space)
def initialize_glyphs_discrete(self,space):
so_seg=self._plot[space].segment(x0 = 'x', y0 ='y0', x1='x', y1='y', source=self._source[space], \
line_alpha=1.0, color = COLORS[0], line_width=1, selection_color=COLORS[0],
nonselection_color=COLORS[0], nonselection_line_alpha=1.0)
so_scat=self._plot[space].scatter('x', 'y', source=self._source[space], size=4, fill_color=COLORS[0], \
fill_alpha=1.0, line_color=COLORS[0], selection_fill_color=COLORS[0], \
nonselection_fill_color=COLORS[0], nonselection_fill_alpha=1.0, \
nonselection_line_color=COLORS[0])
self._plot[space].segment(x0 = 'x', y0 ='y0', x1='x', y1='y', source=self._selection[space], \
line_alpha=0.7, color = COLORS[2], line_width=1)
self._plot[space].scatter('x', 'y', source=self._selection[space], size=4, fill_color=COLORS[2], \
fill_alpha=0.7, line_color=COLORS[2])
self._plot[space].segment(x0 = 'x', y0 ='y0', x1='x', y1='y', source=self._reconstructed[space], \
line_alpha=0.5, color = COLORS[1], line_width=1)
self._plot[space].scatter('x', 'y', source=self._reconstructed[space], size=4, fill_color=COLORS[1], \
fill_alpha=0.5, line_color=COLORS[1])
if self._mode == "i":
##Add BoxSelectTool
self._plot[space].add_tools(BoxSelectTool(dimensions='width',renderers=[so_seg,so_scat]))
def initialize_glyphs_continuous(self,space):
so=self._plot[space].line('x', 'y', line_color = COLORS[0], line_width = 2, source=self._source[space])
re=self._plot[space].line('x', 'y', line_color = COLORS[1], line_width = 2, source=self._reconstructed[space])
self._plot[space].line('x', 'y', line_color = COLORS[2], line_width = 2, source=self._selection[space])
da=self._plot[space].dash('x','y', size='size',angle=90.0, angle_units='deg', line_color = COLORS[0], \
source=self._samples[space])
self._plot[space].dash('x','y', size='size',angle=90.0, angle_units='deg', line_color = COLORS[1], \
source=self._sel_samples[space])
if self._mode == "i":
##Add BoxSelectTool
self._plot[space].add_tools(BoxSelectTool(dimensions='width',renderers=[so]))
TOOLTIPS = [
("x", "@x"),
("y","@y"),
]
hover = HoverTool( tooltips=TOOLTIPS,renderers=[so,re], mode='mouse')
self._plot[space].tools.append(hover)
def initialize_glyphs_x_button(self,space):
## x-button to clear selection
sq_x=self._plot[space].scatter('x', 'y', marker="square_x", size=10, fill_color="grey", hover_fill_color="firebrick", \
fill_alpha=0.5, hover_alpha=1.0, line_color="grey", hover_line_color="white", \
source=self._clear_selection[space], name='clear_selection')
## Add HoverTool for x-button
self._plot[space].add_tools(HoverTool(tooltips="Clear Selection", renderers=[sq_x], mode='mouse', show_arrow=False,
callback=CustomJS(args=dict(source=self._clear_selection[space]), code=HOVER_CODE)))
def _initialize_plot(self):
for space in self._spaces:
self._get_samples(space)
self.initialize_cds(space)
self.initialize_fig(space)
self.initialize_glyphs(space)
def _widget_callback(self, attr, old, new, w_title, space):
"""
Callback called when an indexing dimension is set to
a new coordinate (e.g through indexing dimensions widgets).
"""
if old == new:
return
self._ic._add_widget_threads(threading.Thread(target=partial(self._widget_callback_thread, new, w_title, space), daemon=True))
self._ic._widget_lock_event.set()
def _widget_callback_thread(self, new, w_title, space):
inds = -1
w2_title = ""
values = []
w1_w2_idx_mapping = self._ic._get_w1_w2_idx_mapping()
w2_w1_val_mapping = self._ic._get_w2_w1_val_mapping()
w2_w1_idx_mapping = self._ic._get_w2_w1_idx_mapping()
widgets = self._widgets[space]
if space in w1_w2_idx_mapping and \
w_title in w1_w2_idx_mapping[space]:
for w2_title in w1_w2_idx_mapping[space][w_title]:
name = w_title+"_idx_"+w2_title
if name in self._idx_dims:
values = self._idx_dims[name].values
elif w_title in self._idx_dims:
values = self._idx_dims[w_title].values
elif space in w2_w1_idx_mapping and \
w_title in w2_w1_idx_mapping[space]:
for w1_idx in w2_w1_idx_mapping[space][w_title]:
w1_value = widgets[w1_idx].value
values = w2_w1_val_mapping[space][w_title][w1_value]
inds = [i for i,v in enumerate(values) if v == new]
if inds == -1 or len(inds) == 0:
return
self._cur_idx_dims_values[w_title] = inds
if w2_title and w2_title in self._cur_idx_dims_values:
self._cur_idx_dims_values[w2_title] = [0]
if self._mode == 'i':
self._update_source_cds(space)
self._ic._set_global_update(True)
self._update_cds_interactive(space)
elif self._mode == 's':
self._update_cds_static(space)
def _clear_selection_callback(self,space,event):
"""
Callback called when clear selection glyph is clicked.
"""
isIn = self._clear_selection[space].data['isIn']
if 1 in isIn:
self._ic._set_var_x_range(space,self._name,dict(xmin=np.array([]),xmax=np.array([])))
self._ic._delete_sel_var_idx_dims_values(self._name)
for sp in self._spaces:
self._ic._add_space_threads(threading.Thread(target=partial(self._clear_selection_thread,sp), daemon=True))
self._ic._space_threads_join()
def _clear_selection_thread(self,space):
x_range = self._ic._get_var_x_range(space,self._name)
xmin_list = x_range['xmin']
xmax_list = x_range['xmax']
if len(xmin_list):
self._update_selection_cds(space, xmin_list[0], xmax_list[0])
else:
if self._type == "Discrete":
self._selection[space].data=dict(x=np.array([]),y=np.array([]),y0=np.array([]))
else:
self._selection[space].data=dict(x=np.array([]),y=np.array([]))
self._ic._delete_sel_var_inds(space,self._name)
self._compute_intersection_of_samples(space)
self._ic._selection_threads_join(space)
def _update_cds_interactive(self,space):
"""
Updates interaction-related ColumnDataSources (cds).
"""
sel_var_idx_dims_values = self._ic._get_sel_var_idx_dims_values()
sel_space = self._ic._get_sel_space()
var_x_range = self._ic._get_var_x_range()
global_update = self._ic._get_global_update()
if(global_update):
if (self._name in sel_var_idx_dims_values and space == sel_space and
self._cur_idx_dims_values == sel_var_idx_dims_values[self._name]):
self._update_selection_cds(space, var_x_range[(space,self._name)].data['xmin'][0],\
var_x_range[(space,self._name)].data['xmax'][0])
else:
if self._type == "Discrete":
self._selection[space].data=dict(x=np.array([]),y=np.array([]),y0=np.array([]))
else:
self._selection[space].data=dict(x=np.array([]),y=np.array([]))
self._update_reconstructed_cds(space)
self._update_clear_selection_cds(space)
def _sample_inds_callback(self, space, attr, old, new):
"""
Updates cds when indices of selected samples -- Cell._sample_inds--
are updated.
"""
self._ic._add_selection_threads(space,threading.Thread(target=self._sample_inds_thread, args=(space,), daemon=True))
self._ic._sel_lock_event.set()
def _sample_inds_thread(self,space):
if self._mode == 'i':
self._update_cds_interactive(space)
elif self._mode == 's':
self._update_cds_static(space)
def _update_cds_static(self,space):
"""
Update source & samples cds in the static mode
"""
samples = self._get_data_for_cur_idx_dims_values(space)
inds = self._ic._get_sample_inds(space)
if len(inds):
sel_sample = samples[inds]
if self._type == "Discrete":
self._source[space].data = pmf(sel_sample)
else:
self._source[space].data = kde(sel_sample)
max_v = self._get_max_prob(space)
self._samples[space].data = dict(x=sel_sample,y=np.asarray([-max_v/RUG_DIST_RATIO]*len(sel_sample)),\
size=np.asarray([RUG_SIZE]*len(sel_sample)))
else:
if self._type == "Discrete":
self._source[space].data = pmf(samples)
else:
self._source[space].data = kde(samples)
max_v = self._get_max_prob(space)
self._samples[space].data = dict(x=samples,y=np.asarray([-max_v/RUG_DIST_RATIO]*len(samples)),\
size=np.asarray([RUG_SIZE]*len(samples)))
def set_stratum(self, space, stratum = 0):
"""
Sets selection by spliting the ordered sample set
in 4 equal-sized subsets.
"""
samples = self._get_data_for_cur_idx_dims_values(space)
xmin,xmax = get_stratum_range(samples,stratum)
if self._mode == 'i':
self._ic._sel_space=space
self._ic._var_x_range[(space,self._name)].data=dict(xmin=np.asarray([xmin]),xmax=np.asarray([xmax]))
self._ic._sel_var_idx_dims_values[self._name]=dict(self._cur_idx_dims_values)
inds = find_indices(samples, lambda e: e >= xmin and e<= xmax,xmin,xmax)
self._set_sel_var_inds(space, self._name, inds)
self._compute_intersection_of_samples(space)
return (xmin,xmax)
def _selectionbox_callback(self, space, event):
"""
Callback called when selection box is drawn.
"""
xmin=event.geometry['x0']
xmax=event.geometry['x1']
self._ic._set_sel_space(space)
self._ic._set_var_x_range(space,self._name,dict(xmin=np.asarray([xmin]),xmax=np.asarray([xmax])))
self._ic._set_sel_var_idx_dims_values(self._name,dict(self._cur_idx_dims_values))
for sp in self._spaces:
samples = self._samples[sp].data['x']
self._ic._add_space_threads(threading.Thread(target=partial(self._selectionbox_space_thread,sp,samples, xmin, xmax), daemon=True))
self._ic._space_threads_join()
def _selectionbox_space_thread(self, space, samples, xmin, xmax):
x_range = self._ic._get_var_x_range(space,self._name)
xmin_list = x_range['xmin']
xmax_list = x_range['xmax']
if len(xmin_list):
self._update_selection_cds(space, xmin_list[0], xmax_list[0])
else:
if self._type == "Discrete":
self._selection[space].data=dict(x=np.array([]),y=np.array([]),y0=np.array([]))
else:
self._selection[space].data=dict(x=np.array([]),y=np.array([]))
inds = find_indices(samples, lambda e: e >= xmin and e<= xmax,xmin,xmax)
self._ic._set_sel_var_inds(space, self._name, inds)
self._compute_intersection_of_samples(space)
self._ic._selection_threads_join(space)
def _compute_intersection_of_samples(self,space):
"""
Computes intersection of sample points based on user's
restrictions per parameter.
"""
sel_var_inds = self._ic._get_sel_var_inds()
sp_keys=[k for k in sel_var_inds if k[0]==space]
if len(sp_keys)>1:
sets=[]
for i in range(0, len(sp_keys)):
sets.append(set(sel_var_inds[sp_keys[i]]))
union=set.intersection(*sorted(sets, key=len))
self._ic._set_sample_inds(space,dict(inds=list(union)))
elif len(sp_keys)==1:
self._ic._set_sample_inds(space,dict(inds=sel_var_inds[sp_keys[0]]))
else:
self._ic._set_sample_inds(space,dict(inds=[]))
def _update_source_cds(self,space):
"""
Updates source ColumnDataSource (cds).
"""
samples = self._get_data_for_cur_idx_dims_values(space)
if self._type == "Discrete":
self._source[space].data = pmf(samples)
else:
self._source[space].data = kde(samples)
max_v = self._get_max_prob(space)
self._samples[space].data = dict(x=samples,y=np.asarray([-max_v/RUG_DIST_RATIO]*len(samples)),\
size=np.asarray([RUG_SIZE]*len(samples)))
def _update_selection_cds(self,space,xmin,xmax):
"""
Updates selection ColumnDataSource (cds).
"""
# Get kde points within [xmin,xmax]
data={}
data['x'] = np.array([])
data['y'] = np.array([])
kde_indices = find_indices(self._source[space].data['x'], lambda e: e >= xmin and e<= xmax,xmin,xmax)
if len(kde_indices) == 0:
if self._type == "Discrete":
self._selection[space].data = dict(x=np.array([]),y=np.array([]),y0=np.array([]))
else:
self._selection[space].data = dict(x=np.array([]),y=np.array([]))
return
data['x'] = self._source[space].data['x'][kde_indices]
data['y'] = self._source[space].data['y'][kde_indices]
if self._type == "Discrete":
data['y0'] = np.asarray(len(data['x'])*[0])
else:
# Add interpolated points at xmin, xmax
xmin_inds = find_inds_before_after(self._source[space].data['x'], xmin)
if -1 not in xmin_inds:
xmin_l=self._source[space].data['x'][xmin_inds[0]]
xmin_h=self._source[space].data['x'][xmin_inds[1]]
ymin_l=self._source[space].data['y'][xmin_inds[0]]
ymin_h=self._source[space].data['y'][xmin_inds[1]]
ymin = ((ymin_h-ymin_l)/(xmin_h-xmin_l))*(xmin-xmin_l) + ymin_l
data['x'] = np.insert(data['x'],0,xmin)
data['y'] = np.insert(data['y'],0,ymin)
xmax_inds = find_inds_before_after(self._source[space].data['x'], xmax)
if -1 not in xmax_inds:
xmax_l=self._source[space].data['x'][xmax_inds[0]]
xmax_h=self._source[space].data['x'][xmax_inds[1]]
ymax_l=self._source[space].data['y'][xmax_inds[0]]
ymax_h=self._source[space].data['y'][xmax_inds[1]]
ymax= ((ymax_h-ymax_l)/(xmax_h-xmax_l))*(xmax-xmax_l) + ymax_l
data['x'] = np.append(data['x'],xmax)
data['y'] = np.append(data['y'],ymax)
# Append and prepend zeros
data['y'] = np.insert(data['y'],0,0)
data['y'] = np.append(data['y'],0)
data['x'] = np.insert(data['x'],0,data['x'][0])
data['x'] = np.append(data['x'],data['x'][-1])
| |
import os,sys,glob,time
import obspy
import scipy
import pycwt
import pyasdf
import datetime
import numpy as np
import pandas as pd
from obspy.signal.invsim import cosine_taper
from obspy.signal.regression import linear_regression
from scipy.fftpack import fft,ifft,next_fast_len
from seisgo import stacking as stack
from seisgo.types import CorrData, FFTData
from seisgo import utils
#####
########################################################
################ CROSS-CORRELATE FUNCTIONS ##################
########################################################
def cc_memory(inc_hours,sps,nsta,ncomp,cc_len,cc_step):
"""
Estimates the memory usage with given correlation parameters, assuming float 32.
"""
nseg_chunk = int(np.floor((3600*inc_hours-cc_len)/cc_step))+1
npts_chunk = int(nseg_chunk*cc_len*sps)
memory_size = nsta*npts_chunk*4/1024/1024/1024**ncomp
return memory_size
def compute_fft(trace,win_len,step,stainv=None,
freqmin=None,freqmax=None,time_norm='no',freq_norm='no',
smooth=20,smooth_spec=None,misc=dict(),taper_frac=0.05,df=None):
"""
Call FFTData to build the object. This is an alternative of directly call FFTData().
The motivation of this function is to provide an user interface to build FFTData object.
"""
return FFTData(trace=trace,win_len=win_len,step=step,
stainv=stainv,freqmin=freqmin,freqmax=freqmax,time_norm=time_norm,
freq_norm=freq_norm,smooth=smooth,smooth_spec=smooth_spec,misc=misc,
taper_frac=taper_frac,df=df)
#assemble FFT with given asdf file name
def assemble_fft(sfile,win_len,step,freqmin=None,freqmax=None,
time_norm='no',freq_norm='no',smooth=20,smooth_spec=20,
taper_frac=0.05,df=None,exclude_chan=[None],v=True):
#only deal with ASDF format for now.
# retrive station information
ds=pyasdf.ASDFDataSet(sfile,mpi=False,mode='r')
sta_list = ds.waveforms.list()
nsta=len(sta_list)
print('found %d stations in total'%nsta)
fftdata_all=[]
if nsta==0:
print('no data in %s'%sfile);
return fftdata_all
# loop through all stations
print('working on file: '+sfile.split('/')[-1])
for ista in sta_list:
# get station and inventory
try:
inv1 = ds.waveforms[ista]['StationXML']
except Exception as e:
print('abort! no stationxml for %s in file %s'%(ista,sfile))
continue
# get days information: works better than just list the tags
all_tags = ds.waveforms[ista].get_waveform_tags()
if len(all_tags)==0:continue
#----loop through each stream----
for itag in all_tags:
if v:print("FFT for station %s and trace %s" % (ista,itag))
# read waveform data
source = ds.waveforms[ista][itag]
if len(source)==0:continue
# channel info
comp = source[0].stats.channel
if comp[-1] =='U': comp.replace('U','Z')
#exclude some channels in the exclude_chan list.
if comp in exclude_chan:
print(comp+" is in the exclude_chan list. Skip it!")
continue
fftdata=FFTData(source,win_len,step,stainv=inv1,
time_norm=time_norm,freq_norm=freq_norm,
smooth=smooth,freqmin=freqmin,freqmax=freqmax,
smooth_spec=smooth_spec,taper_frac=taper_frac,df=df)
if fftdata.data is not None:
fftdata_all.append(fftdata)
####
return fftdata_all
def smooth_source_spect(fft1,cc_method,sn):
'''
this function smoothes amplitude spectrum of the 2D spectral matrix. (used in S1)
PARAMETERS:
---------------------
cc_para: dictionary containing useful cc parameters
fft1: source spectrum matrix
RETURNS:
---------------------
sfft1: complex numpy array with normalized spectrum
'''
smoothspect_N = sn #cc_para['smoothspect_N']
N=fft1.shape[0]
Nfft2=fft1.shape[1]
fft1=fft1.reshape(fft1.size)
if cc_method == 'deconv':
#-----normalize single-station cc to z component-----
temp = utils.moving_ave(np.abs(fft1),smoothspect_N)
try:
sfft1 = fft1/temp**2
except Exception:
raise ValueError('smoothed spectrum has zero values')
elif cc_method == 'coherency':
temp = utils.moving_ave(np.abs(fft1),smoothspect_N)
try:
sfft1 = fft1/temp
except Exception:
raise ValueError('smoothed spectrum has zero values')
elif cc_method == 'xcorr':
sfft1 = fft1
else:
raise ValueError('no correction correlation method is selected at L59')
return sfft1.reshape(N,Nfft2)
#
def do_correlation(sfile,win_len,step,maxlag,cc_method='xcorr',acorr_only=False,
xcorr_only=False,substack=False,substack_len=None,smoothspect_N=20,
maxstd=10,freqmin=None,freqmax=None,time_norm='no',freq_norm='no',
smooth_N=20,exclude_chan=[None],outdir='.',v=True):
"""
Wrapper for computing correlation functions. It includes two key steps: 1) compute and assemble
the FFT of all data in the sfile, into a list of FFTData objects; 2) loop through the FFTData object
list and do correlation (auto or xcorr) for each source-receiver pair.
====RETURNS====
ndata: the number of station-component pairs in the sfile, that have been processed.
"""
if win_len in [1,2,3]:
print("!!!WARNING: you may call do_correlation() in the old way with the 2nd argument as the ncomp info.")
print(" This may cause errors with arguments getting the wrong values. In this version and later,")
print(" ncomp is deprecated. No change for other arguments. This warning will be removed in")
print(" versions v0.7.x and later.")
if acorr_only and xcorr_only:
raise ValueError('acorr_only and xcorr_only CAN NOT all be True.')
tname = sfile.split('/')[-1]
tmpfile = os.path.join(outdir,tname.split('.')[0]+'.tmp')
if not os.path.isdir(outdir):os.makedirs(outdir)
#file to store CC results.
outfile=os.path.join(outdir,tname)
# check whether time chunk been processed or not
if os.path.isfile(tmpfile):
ftemp = open(tmpfile,'r')
alines = ftemp.readlines()
if len(alines) and alines[-1] == 'done':
return 0
else:
ftemp.close()
os.remove(tmpfile)
if os.path.isfile(outfile): os.remove(outfile)
ftmp = open(tmpfile,'w')
##############compute FFT#############
fftdata=assemble_fft(sfile,win_len,step,freqmin=freqmin,freqmax=freqmax,
time_norm=time_norm,freq_norm=freq_norm,smooth=smooth_N,exclude_chan=exclude_chan)
ndata=len(fftdata)
#############PERFORM CROSS-CORRELATION##################
if v: print(tname)
iend=ndata
for iiS in range(ndata):
# get index right for auto/cross correlation
istart=iiS;
src=fftdata[iiS].net+"."+fftdata[iiS].sta
# if acorr_only:iend=np.minimum(iiS+ncomp,ndata)
# if xcorr_only:istart=np.minimum(iiS+ncomp,ndata)
#-----------now loop III for each receiver B----------
for iiR in range(istart,iend):
# if v:print('receiver: %s %s' % (fftdata[iiR].net,fftdata[iiR].sta))
rcv=fftdata[iiR].net+"."+fftdata[iiR].sta
if (acorr_only and src==rcv) or (xcorr_only and src != rcv) or (not acorr_only and not xcorr_only):
if fftdata[iiS].data is not None and fftdata[iiR].data is not None:
if v:print('receiver: %s %s' % (fftdata[iiR].net,fftdata[iiR].sta))
corrdata=correlate(fftdata[iiS],fftdata[iiR],maxlag,method=cc_method,substack=substack,
smoothspect_N=smoothspect_N,substack_len=substack_len,
maxstd=maxstd)
if corrdata.data is not None: corrdata.to_asdf(file=outfile)
# create a stamp to show time chunk being done
ftmp.write('done')
ftmp.close()
return ndata
def correlate(fftdata1,fftdata2,maxlag,method='xcorr',substack=False,
substack_len=None,smoothspect_N=20,maxstd=10,terror=0.01):
'''
this function does the cross-correlation in freq domain and has the option to keep sub-stacks of
the cross-correlation if needed. it takes advantage of the linear relationship of ifft, so that
stacking is performed in spectrum domain first to reduce the total number of ifft.
PARAMETERS:
---------------------
fftdata1: FFTData for the source station
fftdata2: FFTData of the receiver station
maxlag: maximum lags to keep in the cross correlation
method: cross-correlation methods selected by the user
terror: 0-1 fraction of timing error in searching for overlapping. The timing error =
terror*dt
RETURNS:
---------------------
corrdata: CorrData object of cross-correlation functions in time domain
'''
corrdata=CorrData()
#check overlapping timestamps before any other processing
#this step is required when there are gaps in the data.
ind1,ind2=utils.check_overlap(fftdata1.time,fftdata2.time,error=terror*fftdata1.dt)
if not len(ind1):
print('no overlapped timestamps in the data.')
return corrdata
#---------- check the existence of earthquakes by std of the data.----------
source_std = fftdata1.std[ind1]
sou_ind = np.where((source_std<maxstd)&(source_std>0)&(np.isnan(source_std)==0))[0]
if not len(sou_ind): return corrdata
receiver_std = fftdata2.std[ind2]
rec_ind = np.where((receiver_std<maxstd)&(receiver_std>0)&(np.isnan(receiver_std)==0))[0]
if not len(rec_ind): return corrdata
bb=np.intersect1d(sou_ind,rec_ind)
if len(bb)==0:return corrdata
bb_data1=[ind1[i] for i in bb]
bb_data2=[ind2[i] for i in bb]
#----load paramters----
dt = fftdata1.dt
cc_len = fftdata1.win_len
cc_step = fftdata1.step
if substack_len is None: substack_len=cc_len
Nfft = fftdata1.Nfft
Nfft2 = Nfft//2
fft1=np.conj(fftdata1.data[bb_data1,:Nfft2]) #get the conjugate of fft1
nwin = fft1.shape[0]
fft2=fftdata2.data[bb_data2,:Nfft2]
timestamp=fftdata1.time[bb_data1]
if method != "xcorr":
fft1 = smooth_source_spect(fft1,method,smoothspect_N)
#------convert all 2D arrays into 1D to speed up--------
corr = np.zeros(nwin*Nfft2,dtype=np.complex64)
corr = fft1.reshape(fft1.size,)*fft2.reshape(fft2.size,)
if method == "coherency":
temp = utils.moving_ave(np.abs(fft2.reshape(fft2.size,)),smoothspect_N)
corr /= temp
corr = corr.reshape(nwin,Nfft2)
if substack:
if substack_len == cc_len:
# choose to keep all fft data for a day
s_corr = np.zeros(shape=(nwin,Nfft),dtype=np.float32) # stacked correlation
ampmax = np.zeros(nwin,dtype=np.float32)
n_corr = np.zeros(nwin,dtype=np.int16) # number of correlations for each substack
t_corr = timestamp # timestamp
crap = np.zeros(Nfft,dtype=np.complex64)
for i in range(nwin):
n_corr[i]= 1
crap[:Nfft2] = corr[i,:]
crap[:Nfft2] = crap[:Nfft2]-np.mean(crap[:Nfft2]) # remove the mean in freq domain (spike at t=0)
crap[-(Nfft2)+1:] = np.flip(np.conj(crap[1:(Nfft2)]),axis=0)
crap[0]=complex(0,0)
s_corr[i,:] = np.real(np.fft.ifftshift(scipy.fftpack.ifft(crap, Nfft, axis=0)))
# remove abnormal data
ampmax = np.max(s_corr,axis=1)
tindx = np.where( (ampmax<20*np.median(ampmax)) & (ampmax>0))[0]
s_corr = s_corr[tindx,:]
t_corr = t_corr[tindx]
n_corr = n_corr[tindx]
else:
# get time information
Ttotal = timestamp[-1]-timestamp[0] # total duration of what we have now
tstart = timestamp[0]
nstack = int(np.round(Ttotal/substack_len))
ampmax = np.zeros(nstack,dtype=np.float32)
s_corr = np.zeros(shape=(nstack,Nfft),dtype=np.float32)
n_corr = np.zeros(nstack,dtype=np.int)
t_corr = np.zeros(nstack,dtype=np.float)
crap = np.zeros(Nfft,dtype=np.complex64)
for istack in range(nstack):
# find the indexes of all of the windows that start or end within
itime = np.where( (timestamp >= tstart) & (timestamp < tstart+substack_len) )[0]
if len(itime)==0:tstart+=substack_len;continue
crap[:Nfft2] = np.mean(corr[itime,:],axis=0) # linear average of the correlation
crap[:Nfft2] = crap[:Nfft2]-np.mean(crap[:Nfft2]) # remove the mean in freq domain (spike at t=0)
crap[-(Nfft2)+1:]=np.flip(np.conj(crap[1:(Nfft2)]),axis=0)
crap[0]=complex(0,0)
s_corr[istack,:] = np.real(np.fft.ifftshift(scipy.fftpack.ifft(crap, Nfft, axis=0)))
n_corr[istack] = len(itime) # number of windows stacks
t_corr[istack] = tstart # save the time stamps
tstart += substack_len
#print('correlation done and stacked at time %s' % str(t_corr[istack]))
# remove abnormal data
ampmax = np.max(s_corr,axis=1)
tindx = np.where( (ampmax<20*np.median(ampmax)) & (ampmax>0))[0]
s_corr = s_corr[tindx,:]
t_corr = t_corr[tindx]
n_corr = n_corr[tindx]
else:
# average daily cross correlation functions
ampmax = np.max(corr,axis=1)
tindx = np.where( (ampmax<20*np.median(ampmax)) & (ampmax>0))[0]
n_corr = nwin
s_corr = np.zeros(Nfft,dtype=np.float32)
t_corr = timestamp[0]
crap = np.zeros(Nfft,dtype=np.complex64)
crap[:Nfft2] = np.mean(corr[tindx],axis=0)
crap[:Nfft2] = crap[:Nfft2]-np.mean(crap[:Nfft2],axis=0)
crap[-(Nfft2)+1:]=np.flip(np.conj(crap[1:(Nfft2)]),axis=0)
s_corr = np.real(np.fft.ifftshift(scipy.fftpack.ifft(crap, Nfft, axis=0)))
# trim the CCFs in [-maxlag maxlag]
t = np.arange(-Nfft2+1, Nfft2)*dt
ind = np.where(np.abs(t) <= maxlag)[0]
if s_corr.ndim==1:
s_corr = s_corr[ind]
elif s_corr.ndim==2:
s_corr = s_corr[:,ind]
### call CorrData to build the object
cc_comp= fftdata1.chan[-1]+fftdata2.chan[-1]
dist,azi,baz = obspy.geodetics.base.gps2dist_azimuth(fftdata1.lat,fftdata1.lon,fftdata2.lat,fftdata2.lon)
corrdata=CorrData(net=[fftdata1.net,fftdata2.net],sta=[fftdata1.sta,fftdata2.sta],\
loc=[fftdata1.loc,fftdata2.loc],chan=[fftdata1.chan,fftdata2.chan],\
lon=[fftdata1.lon,fftdata2.lon],lat=[fftdata1.lat,fftdata2.lat],\
ele=[fftdata1.ele,fftdata2.ele],cc_comp=cc_comp,lag=maxlag,\
dt=fftdata1.dt,cc_len=cc_len,cc_step=cc_step,dist=dist/1000,az=azi,\
baz=baz,time=t_corr,data=s_corr,substack=substack,\
side="A",misc={"cc_method":method,"dist_unit":"km"})
return corrdata
def do_stacking(ccfiles,pairlist=None,outdir='./STACK',method=['linear'],
rotation=False,correctionfile=None,flag=False,keep_substack=False,
to_egf=False):
# source folder
if pairlist is None:
pairlist,netsta_all=get_stationpairs(ccfiles,False)
if len(ccfiles)==0:
raise IOError('Abort! no available CCF data for stacking')
for s in netsta_all:
tmp = os.path.join(outdir,s)
if not os.path.isdir(tmp):os.mkdir(tmp)
if isinstance(pairlist,str):pairlist=[pairlist]
if not os.path.isdir(outdir):os.makedirs(outdir)
if rotation:
enz_system = ['EE','EN','EZ','NE','NN','NZ','ZE','ZN','ZZ']
rtz_components = | |
#! usr/bin/python3.6
"""
Module initially auto generated using V5Automation files from CATIA V5 R28 on 2020-06-11 12:40:47.360445
.. warning::
The notes denoted "CAA V5 Visual Basic Help" are to be used as reference only.
They are there as a guide as to how the visual basic / catscript functions work
and thus help debugging in pycatia.
"""
from pycatia.system_interfaces.setting_controller import SettingController
class UnitsSheetSettingAtt(SettingController):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| System.IUnknown
| System.IDispatch
| System.CATBaseUnknown
| System.CATBaseDispatch
| System.AnyObject
| System.SettingController
| UnitsSheetSettingAtt
|
| The interface to access a CATIAUnitsSheetSettingAtt.
| This interface may be used to read or modify in the CATIA/Tools/Option the
| settings values of Units sheet.
"""
def __init__(self, com_object):
super().__init__(com_object)
self.units_sheet_setting_att = com_object
@property
def display_trailing_zeros(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| o Property DisplayTrailingZeros() As short
|
| Returns or sets the DisplayTrailingZeros parameter.
| Role:Return or Set the DisplayTrailingZeros parameter if it is possible in
| the current administrative context. In user mode this method will always return
| E_FAIL.
|
| Parameters:
|
| oDisplayTrailingZeros
| Legal values:
| 0 : to not display trailing zeros
| 1 : to display trailing zeros.
:return: int
:rtype: int
"""
return self.units_sheet_setting_att.DisplayTrailingZeros
@display_trailing_zeros.setter
def display_trailing_zeros(self, value: int):
"""
:param int value:
"""
self.units_sheet_setting_att.DisplayTrailingZeros = value
@property
def exp_notation_values_greater(self) -> float:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| o Property ExpNotationValuesGreater() As double
|
| Returns or sets the ExpNotationValuesGreater parameter.
| Role:Return or Set the ExpNotationValuesGreater parameter if it is possible
| in the current administrative context. In user mode this method will always
| return E_FAIL.
|
| Parameters:
|
| oExpNotationValuesGreater
| The minimum value for exponential notation values.
:return: float
:rtype: float
"""
return self.units_sheet_setting_att.ExpNotationValuesGreater
@exp_notation_values_greater.setter
def exp_notation_values_greater(self, value: float):
"""
:param float value:
"""
self.units_sheet_setting_att.ExpNotationValuesGreater = value
@property
def exp_notation_values_lower(self) -> float:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| o Property ExpNotationValuesLower() As double
|
| Returns or sets the ExpNotationValuesLower parameter.
| Role:Return or Set the ExpNotationValuesGreater parameter if it is possible
| in the current administrative context. In user mode this method will always
| return E_FAIL.
|
| Parameters:
|
| oExpNotationValuesLower
| The maximum value for exponential notation values.
:return: float
:rtype: float
"""
return self.units_sheet_setting_att.ExpNotationValuesLower
@exp_notation_values_lower.setter
def exp_notation_values_lower(self, value: float):
"""
:param float value:
"""
self.units_sheet_setting_att.ExpNotationValuesLower = value
@property
def list_of_magnitudes(self) -> tuple:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| o Property ListOfMagnitudes() As CATSafeArrayVariant (Read
| Only)
|
| Returns or sets the ListOfMagnitudes parameter.
|
| Ensure consistency with the C++ interface to which the work is delegated.
:return: tuple
:rtype: tuple
"""
return self.units_sheet_setting_att.ListOfMagnitudes
@property
def list_of_magnitudes_size(self) -> float:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| o Property ListOfMagnitudesSize() As double (Read Only)
|
| Returns or sets the ListOfMagnitudesSize parameter.
|
| Ensure consistency with the C++ interface to which the work is delegated.
:return: float
:rtype: float
"""
return self.units_sheet_setting_att.ListOfMagnitudesSize
@property
def same_display(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445)
| o Property SameDisplay() As short
|
| Returns or sets the SameDisplay parameter.
| Role:Return or Set the SameDisplay parameter if it is possible in the
| current administrative context. In user mode this method will always return
| E_FAIL.
|
| Parameters:
|
| oSameDisplay
| Legal values:
| 0 : to not display same display
| 1 : to display same display.
:return: int
:rtype: int
"""
return self.units_sheet_setting_att.SameDisplay
@same_display.setter
def same_display(self, value: int):
"""
:param int value:
"""
self.units_sheet_setting_att.SameDisplay = value
def commit_for_units(self) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445))
| o Sub CommitForUnits()
|
| Implements a function from an interface.
|
| See also:
| UnitsSheetSettingAtt.CommitForUnits
:return: None
:rtype: None
"""
return self.units_sheet_setting_att.CommitForUnits()
def get_decimal_read_only(self, i_magnitude_name: str, o_decimal_place_read_only: float) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445))
| o Sub GetDecimalReadOnly(CATBSTR iMagnitudeName,
| double oDecimalPlaceReadOnly)
|
| Returns the number of decimals for ReadOnly number.
:param str i_magnitude_name:
:param float o_decimal_place_read_only:
:return: None
:rtype: None
"""
return self.units_sheet_setting_att.GetDecimalReadOnly(i_magnitude_name, o_decimal_place_read_only)
def get_decimal_read_write(self, i_magnitude_name: str, o_decimal_place_read_write: float) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445))
| o Sub GetDecimalReadWrite(CATBSTR iMagnitudeName,
| double oDecimalPlaceReadWrite)
|
| Returns the number of decimals for ReadWrite number.
:param str i_magnitude_name:
:param float o_decimal_place_read_write:
:return: None
:rtype: None
"""
return self.units_sheet_setting_att.GetDecimalReadWrite(i_magnitude_name, o_decimal_place_read_write)
def get_dimensions_display_info(self, io_admin_level: str, io_locked: str) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445))
| o Func GetDimensionsDisplayInfo(CATBSTR ioAdminLevel,
| CATBSTR ioLocked) As boolean
|
| Retrieves information about the DimensionsDisplay setting
| parameter.
| Refer to SettingController for a detailed description.
:param str io_admin_level:
:param str io_locked:
:return: bool
:rtype: bool
"""
return self.units_sheet_setting_att.GetDimensionsDisplayInfo(io_admin_level, io_locked)
def get_display_trailing_zeros_info(self, io_admin_level: str, io_locked: str) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445))
| o Func GetDisplayTrailingZerosInfo(CATBSTR ioAdminLevel,
| CATBSTR ioLocked) As boolean
|
| Retrieves environment informations for the DisplayTrailingZeros
| parameter.
| Role:Retrieves the state of the DisplayTrailingZeros parameter in the
| current environment.
|
| Parameters:
|
| ioAdminLevel
|
| If the parameter is locked, AdminLevel gives the administration
| level that imposes the value of the parameter.
| If the parameter is not locked, AdminLevel gives the administration
| level that will give the value of the parameter after a reset.
|
| ioLocked
| Indicates if the parameter has been locked.
|
| Returns:
| Indicates if the parameter has been explicitly modified or remain to
| the administrated value.
:param str io_admin_level:
:param str io_locked:
:return: bool
:rtype: bool
"""
return self.units_sheet_setting_att.GetDisplayTrailingZerosInfo(io_admin_level, io_locked)
def get_exp_notation_values_greater_info(self, io_admin_level: str, io_locked: str) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445))
| o Func GetExpNotationValuesGreaterInfo(CATBSTR ioAdminLevel,
| CATBSTR ioLocked) As boolean
|
| Retrieves environment informations for the ExpNotationValuesGreater
| parameter.
| Role:Retrieves the state of the ExpNotationValuesGreater parameter in the
| current environment.
|
| Parameters:
|
| ioAdminLevel
|
| If the parameter is locked, AdminLevel gives the administration
| level that imposes the value of the parameter.
| If the parameter is not locked, AdminLevel gives the administration
| level that will give the value of the parameter after a reset.
|
| ioLocked
| Indicates if the parameter has been locked.
|
| Returns:
| Indicates if the parameter has been explicitly modified or remain to
| the administrated value.
:param str io_admin_level:
:param str io_locked:
:return: bool
:rtype: bool
"""
return self.units_sheet_setting_att.GetExpNotationValuesGreaterInfo(io_admin_level, io_locked)
def get_exp_notation_values_lower_info(self, io_admin_level: str, io_locked: str) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-06-11 12:40:47.360445))
| o Func GetExpNotationValuesLowerInfo(CATBSTR ioAdminLevel,
| CATBSTR ioLocked) As boolean
|
| Retrieves environment informations for the ExpNotationValuesLower
| parameter.
| Role:Retrieves the state of the ExpNotationValuesLower parameter in the
| current environment.
|
| Parameters:
|
| ioAdminLevel
|
| If the parameter is locked, AdminLevel gives the administration
| level that imposes the value of the parameter.
| If the parameter is not locked, AdminLevel gives the administration
| level that will give the value of the parameter after a reset.
|
| ioLocked
| Indicates if the parameter has been locked.
|
| | |
0:
data['summary_writer'] = summary_writer
data['summary_writer_step'] = step
#######################################################################
# Loss is the MSE between predicted 4pDelta and ground truth 4pDelta
# Loss is L1 loss, in which case we have to do additional postprocessing
if (type(loss_fn) == torch.nn.MSELoss or type(loss_fn) == torch.nn.L1Loss or
type(loss_fn) == torch.nn.SmoothL1Loss):
ground_truth, network_output, delta_gt, delta_hat = model(data)
loss = loss_fn(ground_truth, network_output)
# Triple loss scenario
elif type(loss_fn) == str and loss_fn == 'CosineDistance':
ground_truth, network_output, delta_gt, delta_hat = model(data)
loss = torch.sum(1 - torch.cosine_similarity(ground_truth, network_output, dim=1))
# Triple loss scenario
elif type(loss_fn) == str and (loss_fn == 'TripletLoss' or loss_fn == 'iHomE' or loss_fn == 'biHomE'):
# # Fix fext
# model[0].feature_extractor.freeze(True)
#
# # Calc loss
# loss, delta_gt, delta_hat = model(data)
# print('freezed', loss)
#
# # Calc gradients
# loss.backward()
#
# # Retrieve gradients
# gradient_freezed = {}
# for name, param in model[0].feature_extractor.named_parameters():
# if param.grad is not None:
# param_norm = param.grad.data
# gradient_freezed[name] = param_norm
# print(gradient_freezed['layer1.0.weight'])
#
# # zero the parameter gradients
# #optimizer.zero_grad()
#
# # Unfix fext
# model[0].feature_extractor.freeze(False)
# Calc loss
loss, delta_gt, delta_hat = model(data)
# print('unfreezed', loss)
#
# # Calc gradients
# loss.backward()
#
# # Retrieve gradients
# gradient_unfreezed = {}
# for name, param in model[0].feature_extractor.named_parameters():
# if param.grad is not None:
# param_norm = param.grad.data
# gradient_unfreezed[name] = param_norm
# print(gradient_unfreezed['layer1.0.weight'])
#
# print('OK')
# exit()
else:
assert False, "Do not know the loss: " + str(type(loss_fn))
#######################################################################
# calc gradients
loss.backward()
# Clip gradients if needed
if gradient_clip > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), gradient_clip)
# Optimize
optimizer.step()
scheduler.step()
# Log
if step % log_step == 0:
# Calc norm of gradients
total_norm = 0
for p in model.parameters():
if p.grad is not None:
param_norm = p.grad.data.norm(2)
total_norm += param_norm.item() ** 2
total_norm = total_norm ** (1. / 2)
# Calc Mean Average Corner Error
if self_supervised:
mace = np.mean(np.linalg.norm(delta_gt.detach().cpu().numpy().reshape(-1, 2) -
delta_hat.detach().cpu().numpy().reshape(-1, 2), axis=-1))
summary_writer.add_scalars('mace', {'train': mace}, step)
# # Get feature extractor weigths
# fext_weights = model[0].feature_extractor.retrieve_weights()
# for key in fext_weights:
# summary_writer.add_histogram(key, fext_weights[key].reshape(1, -1), global_step=step)
#
# # Manual save
# fpath = os.path.join(summary_writer.get_logdir(), key + '.txt')
# with open(fpath, 'a') as f:
# weight_str = ','.join([str(e) for e in fext_weights[key].reshape(-1).tolist()])
# f.write(str(step) + ',' + weight_str + '\n')
# Save stats
summary_writer.add_scalars('loss', {'train': loss.item()}, step)
summary_writer.add_scalars('lr', {'value': scheduler.get_last_lr()[0]}, step)
summary_writer.add_scalars('g_norm', {'value': total_norm}, step)
summary_writer.flush()
# verbose
if log_verbose:
print('Epoch: {} iter: {}/{} loss: {}'.format(epoch, iter_no+1, steps_per_epoch, loss.item()))
# Save state
checkpoint_arguments['step'] = step
checkpointer.save("model_{:06d}".format(step), **checkpoint_arguments)
def eval_one_epoch(model: torch.nn.Sequential,
test_dataloader: torch.utils.data.DataLoader,
loss_fn: torch.nn.modules.loss._Loss,
epoch: int, steps_per_epoch: int, batch_size: int, device: str,
summary_writer: torch.utils.tensorboard.SummaryWriter,
self_supervised=False, log_verbose=False):
# Training phase
model.eval()
# Loop for the whole epoch
batched_loss = []
batched_mace = []
with torch.no_grad():
for iter_no, data in tqdm(enumerate(test_dataloader), total=len(test_dataloader)):
# move data to device
for key in data:
data[key] = data[key].to(device, dtype=torch.float)
#######################################################################
# Loss is the MSE between predicted 4pDelta and ground truth 4pDelta
# Loss is L1 loss, in which case we have to do additional postprocessing
if (type(loss_fn) == torch.nn.MSELoss or type(loss_fn) == torch.nn.L1Loss or
type(loss_fn) == torch.nn.SmoothL1Loss):
ground_truth, network_output, delta_gt, delta_hat = model(data)
loss = loss_fn(ground_truth, network_output)
# Triple loss scenario
elif type(loss_fn) == str and loss_fn == 'CosineDistance':
ground_truth, network_output, delta_gt, delta_hat = model(data)
loss = torch.sum(1 - torch.cosine_similarity(ground_truth, network_output, dim=1))
# Triple loss scenario
elif type(loss_fn) == str and (loss_fn == 'TripletLoss' or loss_fn == 'iHomE' or loss_fn == 'biHomE'):
loss, delta_gt, delta_hat = model(data)
else:
assert False, "Do not know the loss: " + str(type(loss_fn))
#######################################################################
# Remember loss
batched_loss.append(loss.item())
# Calc Mean Average Corner Error
if self_supervised:
mace = np.mean(np.linalg.norm(delta_gt.detach().cpu().numpy().reshape(-1, 2) -
delta_hat.detach().cpu().numpy().reshape(-1, 2), axis=-1))
batched_mace.append(mace)
# verbose
if log_verbose:
print('Epoch: {} iter: {}/{} loss: {}'.format(epoch, iter_no+1, steps_per_epoch, loss.item()))
# Save state
summary_writer.add_scalars('loss', {'test': np.mean(batched_loss)}, (epoch + 1) * steps_per_epoch)
if self_supervised:
summary_writer.add_scalars('mace', {'test': np.mean(batched_mace)}, (epoch + 1) * steps_per_epoch)
summary_writer.flush()
def do_train(model: torch.nn.Sequential,
train_dataloader: torch.utils.data.DataLoader,
test_dataloader: torch.utils.data.DataLoader,
optimizer: torch.optim.Optimizer,
gradient_clip: float,
scheduler: torch.optim.lr_scheduler._LRScheduler,
loss_fn: torch.nn.modules.loss._Loss,
epochs: int, steps_per_epoch: int, batch_size: int, device: str,
checkpointer: CheckPointer, checkpoint_arguments: dict, log_dir='logs', log_step=1,
self_supervised=False, log_verbose=False):
###########################################################################
# Initialize TensorBoard writer
###########################################################################
summary_writer = SummaryWriter(log_dir)
###########################################################################
# Device setting
###########################################################################
if device == 'cuda' and torch.cuda.is_available():
if torch.cuda.device_count() > 1:
print('Using {} GPUs for training'.format(torch.cuda.device_count()))
print('Multiple GPUs detected. Using DataParallel mode.')
model = torch.nn.DataParallel(model)
model.to(device)
print('Model device: {}'.format(device))
###########################################################################
# Training loop
###########################################################################
start_epoch = checkpoint_arguments['step'] // steps_per_epoch
for epoch in range(start_epoch, epochs):
# Train part
print('Training epoch: {}'.format(epoch))
train_one_epoch(model=model, train_dataloader=train_dataloader, optimizer=optimizer,
gradient_clip=gradient_clip, scheduler=scheduler, loss_fn=loss_fn, epoch=epoch,
steps_per_epoch=steps_per_epoch, batch_size=batch_size, device=device,
checkpointer=checkpointer, checkpoint_arguments=checkpoint_arguments, log_step=log_step,
summary_writer=summary_writer, self_supervised=self_supervised, log_verbose=log_verbose)
# Test part
if test_dataloader is not None:
print('Testing epoch: {}'.format(epoch))
eval_one_epoch(model=model, test_dataloader=test_dataloader, loss_fn=loss_fn, epoch=epoch,
steps_per_epoch=steps_per_epoch, batch_size=batch_size, device=device,
summary_writer=summary_writer, self_supervised=self_supervised, log_verbose=log_verbose)
def main(config_file_path: str):
# Load yaml config file
with open(config_file_path, 'r') as file:
config = yaml.full_load(file)
###########################################################################
# Make train/test data loaders
###########################################################################
# Dataset fn
if 'oxford' in config['DATA']['NAME']:
make_dataloader_fn = make_oxford_dataloader
elif 'coco' in config['DATA']['NAME']:
make_dataloader_fn = make_coco_dataloader
elif 'clevr_change' in config['DATA']['NAME']:
make_dataloader_fn = make_clevr_change_dataloader
elif 'flir_adas' in config['DATA']['NAME']:
make_dataloader_fn = make_flir_adas_dataloader
else:
assert False, 'I dont know this dataset yet.'
# Camera models root
camera_models_root = (os.path.join(BASE_DIR, config['DATA']['CAMERA_MODELS_ROOT']) if 'CAMERA_MODELS_ROOT' in
config['DATA'] is not None else None)
# Train/test cache
train_cache = config['DATA']['DATASET_TRAIN_CACHE'] if 'DATASET_TRAIN_CACHE' in config['DATA'] is not None else None
test_cache = config['DATA']['DATASET_TEST_CACHE'] if 'DATASET_TEST_CACHE' in config['DATA'] is not None else None
# Collator
collator_blob_porosity = config['DATA']['AUGMENT_BLOB_POROSITY'] if 'AUGMENT_BLOB_POROSITY' in config[
'DATA'] else None
collator_blobiness = config['DATA']['AUGMENT_BLOBINESS'] if 'AUGMENT_BLOBINESS' in config['DATA'] else None
# Data sampler mode
data_sampler_mode = config['DATA']['SAMPLER']['MODE'] if 'MODE' in config['DATA']['SAMPLER'] else None
data_sampler_frame_dist = config['DATA']['SAMPLER']['PAIR_MAX_FRAME_DIST'] if 'PAIR_MAX_FRAME_DIST'\
in config['DATA']['SAMPLER'] else None
# Train dataloader
train_dataloader = make_dataloader_fn(dataset_name=config['DATA']['NAME'],
dataset_root=os.path.join(BASE_DIR, config['DATA']['DATASET_ROOT']),
camera_models_root=camera_models_root,
split=os.path.join(BASE_DIR, config['DATA']['TRAIN_SPLIT']),
transforms=config['DATA']['TRANSFORMS'],
batch_size=config['DATA']['SAMPLER']['BATCH_SIZE'],
samples_per_epoch=config['DATA']['SAMPLER']['TRAIN_SAMPLES_PER_EPOCH'],
mode=data_sampler_mode,
pair_max_frame_dist=data_sampler_frame_dist,
num_workers=config['DATA']['NUM_WORKERS'],
random_seed=config['DATA']['SAMPLER']['TRAIN_SEED'],
cache_path=train_cache,
collator_patch_1=config['MODEL']['BACKBONE']['PATCH_KEYS'][0],
collator_patch_2=config['MODEL']['BACKBONE']['PATCH_KEYS'][1],
collator_blob_porosity=collator_blob_porosity,
collator_blobiness=collator_blobiness)
# Test dataloader
test_dataloader = None
if "TEST_SPLIT" in config['DATA']:
test_dataloader = make_dataloader_fn(dataset_name=config['DATA']['NAME'],
dataset_root=os.path.join(BASE_DIR, config['DATA']['DATASET_ROOT']),
camera_models_root=camera_models_root,
split=os.path.join(BASE_DIR, config['DATA']['TEST_SPLIT']),
transforms=config['DATA']['TRANSFORMS'],
batch_size=config['DATA']['SAMPLER']['BATCH_SIZE'],
samples_per_epoch=config['DATA']['SAMPLER']['TEST_SAMPLES_PER_EPOCH'],
mode=data_sampler_mode,
pair_max_frame_dist=data_sampler_frame_dist,
num_workers=config['DATA']['NUM_WORKERS'],
random_seed=config['DATA']['SAMPLER']['TEST_SEED'],
cache_path=test_cache,
collator_patch_1=config['MODEL']['BACKBONE']['PATCH_KEYS'][0],
collator_patch_2=config['MODEL']['BACKBONE']['PATCH_KEYS'][1],
collator_blob_porosity=collator_blob_porosity,
collator_blobiness=collator_blobiness)
###########################################################################
# Data loaders pickling (for faster debugging)
###########################################################################
# with open('train_dataloader.pkl', 'wb') as f:
# pickle.dump(train_dataloader, f)
# with open('test_dataloader.pkl', 'wb') as f:
# pickle.dump(test_dataloader, f)
# exit()
# with open('train_dataloader.pkl', 'rb') as f:
# train_dataloader = pickle.load(f)
# with open('test_dataloader.pkl', 'rb') as f:
# test_dataloader = pickle.load(f)
###########################################################################
# DATA LOADERS TEST
###########################################################################
# import numpy as np
# import matplotlib.pyplot as plt
# for i_batch, sample_batched in enumerate(train_dataloader):
# images = sample_batched[0][0][0]
#
# patch_1, patch_2 = np.split(images.numpy(), 2, axis=0)
# target = sample_batched[1][0][0].numpy()
#
# fig, (ax1, ax2, ax3, ax4) = plt.subplots(1, 4, figsize=(4, 10))
# ax1.imshow(np.tile(patch_1.transpose((1, 2, 0)), (1, 1, 3)))
# ax1.set_title('patch_1')
#
# import cv2
# mat = cv2.getPerspectiveTransform(np.float32([[0, 0], [128, 0], [128, 128], [0, 128]]),
# np.float32([[0, 0], [128, 0], [128, 128], [0, 128]]) + np.float32(target))
# inv_mat = np.linalg.inv(mat)
# patch_1_w = np.expand_dims(cv2.warpPerspective(patch_1.transpose((1, 2, 0)), inv_mat, dsize=(128, 128)), axis=-1)
# ax2.imshow(np.tile(patch_1_w, (1, 1, 3)))
# ax2.set_title('patch_1 warped')
#
# ax3.imshow(np.tile(patch_2.transpose((1, 2, 0)), (1, 1, 3)))
# ax3.set_title('patch_2')
#
# patch_2_w = np.expand_dims(cv2.warpPerspective(patch_2.transpose((1, 2, 0)), mat, dsize=(128, 128)), axis=-1)
# ax4.imshow(np.tile(patch_2_w, (1, 1, 3)))
# ax4.set_title('patch_2 warped')
#
# plt.show()
###########################################################################
# Import and create the backbone
###########################################################################
# Import backbone
backbone_module = importlib.import_module('src.backbones.{}'.format(config['MODEL']['BACKBONE']['NAME']))
backbone_class_to_call = getattr(backbone_module, 'Model')
# Create backbone class
backbone = backbone_class_to_call(**config['MODEL']['BACKBONE'])
###########################################################################
# Import and create the head
###########################################################################
# Import backbone
head_module = importlib.import_module('src.heads.{}'.format(config['MODEL']['HEAD']['NAME']))
head_class_to_call = getattr(head_module, 'Model')
# Create backbone class
head = head_class_to_call(backbone, **config['MODEL']['HEAD'])
###########################################################################
# Import and create the head
###########################################################################
model = torch.nn.Sequential(backbone, head)
###########################################################################
# Create training elements
###########################################################################
# Training elements
if config['SOLVER']['OPTIMIZER'] == 'Adam':
l2_reg = float(config['SOLVER']['L2_WEIGHT_DECAY']) if 'L2_WEIGHT_DECAY' in config['SOLVER'] is not None else 0
optimizer = torch.optim.Adam(model.parameters(), lr=config['SOLVER']['LR'],
betas=(config['SOLVER']['MOMENTUM_1'], config['SOLVER']['MOMENTUM_2']),
weight_decay=l2_reg)
else:
assert False, 'I do not have this solver implemented yet.'
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=config['SOLVER']['MILESTONES'],
gamma=config['SOLVER']['LR_DECAY'])
try:
loss_fn = getattr(torch.nn, config['SOLVER']['LOSS'])()
except:
loss_fn = config['SOLVER']['LOSS']
###########################################################################
# Checkpoint
###########################################################################
arguments = {"step": | |
traffic in a blue/green deployment.
actionOnTimeout (string) --Information about when to reroute traffic from an original environment to a replacement environment in a blue/green deployment.
CONTINUE_DEPLOYMENT: Register new instances with the load balancer immediately after the new application revision is installed on the instances in the replacement environment.
STOP_DEPLOYMENT: Do not register new instances with load balancer unless traffic is rerouted manually. If traffic is not rerouted manually before the end of the specified wait period, the deployment status is changed to Stopped.
waitTimeInMinutes (integer) --The number of minutes to wait before the status of a blue/green deployment changed to Stopped if rerouting is not started manually. Applies only to the STOP_DEPLOYMENT option for actionOnTimeout
greenFleetProvisioningOption (dict) --Information about how instances are provisioned for a replacement environment in a blue/green deployment.
action (string) --The method used to add instances to a replacement environment.
DISCOVER_EXISTING: Use instances that already exist or will be created manually.
COPY_AUTO_SCALING_GROUP: Use settings from a specified Auto Scaling group to define and create instances in a new Auto Scaling group.
:type loadBalancerInfo: dict
:param loadBalancerInfo: Information about the load balancer used in a deployment.
elbInfoList (list) --An array containing information about the load balancer in Elastic Load Balancing to use in a deployment.
(dict) --Information about a load balancer in Elastic Load Balancing to use in a deployment.
name (string) --For blue/green deployments, the name of the load balancer that will be used to route traffic from original instances to replacement instances in a blue/green deployment. For in-place deployments, the name of the load balancer that instances are deregistered from so they are not serving traffic during a deployment, and then re-registered with after the deployment completes.
:rtype: dict
:return: {
'deploymentGroupId': 'string'
}
"""
pass
def delete_application(applicationName=None):
"""
Deletes an application.
See also: AWS API Documentation
:example: response = client.delete_application(
applicationName='string'
)
:type applicationName: string
:param applicationName: [REQUIRED]
The name of an AWS CodeDeploy application associated with the applicable IAM user or AWS account.
"""
pass
def delete_deployment_config(deploymentConfigName=None):
"""
Deletes a deployment configuration.
See also: AWS API Documentation
:example: response = client.delete_deployment_config(
deploymentConfigName='string'
)
:type deploymentConfigName: string
:param deploymentConfigName: [REQUIRED]
The name of a deployment configuration associated with the applicable IAM user or AWS account.
"""
pass
def delete_deployment_group(applicationName=None, deploymentGroupName=None):
"""
Deletes a deployment group.
See also: AWS API Documentation
:example: response = client.delete_deployment_group(
applicationName='string',
deploymentGroupName='string'
)
:type applicationName: string
:param applicationName: [REQUIRED]
The name of an AWS CodeDeploy application associated with the applicable IAM user or AWS account.
:type deploymentGroupName: string
:param deploymentGroupName: [REQUIRED]
The name of an existing deployment group for the specified application.
:rtype: dict
:return: {
'hooksNotCleanedUp': [
{
'name': 'string',
'hook': 'string'
},
]
}
"""
pass
def deregister_on_premises_instance(instanceName=None):
"""
Deregisters an on-premises instance.
See also: AWS API Documentation
:example: response = client.deregister_on_premises_instance(
instanceName='string'
)
:type instanceName: string
:param instanceName: [REQUIRED]
The name of the on-premises instance to deregister.
"""
pass
def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
ClientMethod.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method's model.
"""
pass
def get_application(applicationName=None):
"""
Gets information about an application.
See also: AWS API Documentation
:example: response = client.get_application(
applicationName='string'
)
:type applicationName: string
:param applicationName: [REQUIRED]
The name of an AWS CodeDeploy application associated with the applicable IAM user or AWS account.
:rtype: dict
:return: {
'application': {
'applicationId': 'string',
'applicationName': 'string',
'createTime': datetime(2015, 1, 1),
'linkedToGitHub': True|False,
'gitHubAccountName': 'string'
}
}
"""
pass
def get_application_revision(applicationName=None, revision=None):
"""
Gets information about an application revision.
See also: AWS API Documentation
:example: response = client.get_application_revision(
applicationName='string',
revision={
'revisionType': 'S3'|'GitHub',
's3Location': {
'bucket': 'string',
'key': 'string',
'bundleType': 'tar'|'tgz'|'zip',
'version': 'string',
'eTag': 'string'
},
'gitHubLocation': {
'repository': 'string',
'commitId': 'string'
}
}
)
:type applicationName: string
:param applicationName: [REQUIRED]
The name of the application that corresponds to the revision.
:type revision: dict
:param revision: [REQUIRED]
Information about the application revision to get, including type and location.
revisionType (string) --The type of application revision:
S3: An application revision stored in Amazon S3.
GitHub: An application revision stored in GitHub.
s3Location (dict) --Information about the location of application artifacts stored in Amazon S3.
bucket (string) --The name of the Amazon S3 bucket where the application revision is stored.
key (string) --The name of the Amazon S3 object that represents the bundled artifacts for the application revision.
bundleType (string) --The file type of the application revision. Must be one of the following:
tar: A tar archive file.
tgz: A compressed tar archive file.
zip: A zip archive file.
version (string) --A specific version of the Amazon S3 object that represents the bundled artifacts for the application revision.
If the version is not specified, the system will use the most recent version by default.
eTag (string) --The ETag of the Amazon S3 object that represents the bundled artifacts for the application revision.
If the ETag is not specified as an input parameter, ETag validation of the object will be skipped.
gitHubLocation (dict) --Information about the location of application artifacts stored in GitHub.
repository (string) --The GitHub account and repository pair that stores a reference to the commit that represents the bundled artifacts for the application revision.
Specified as account/repository.
commitId (string) --The SHA1 commit ID of the GitHub commit that represents the bundled artifacts for the application revision.
:rtype: dict
:return: {
'applicationName': 'string',
'revision': {
'revisionType': 'S3'|'GitHub',
's3Location': {
'bucket': 'string',
'key': 'string',
'bundleType': 'tar'|'tgz'|'zip',
'version': 'string',
'eTag': 'string'
},
'gitHubLocation': {
'repository': 'string',
'commitId': 'string'
}
},
'revisionInfo': {
'description': 'string',
'deploymentGroups': [
'string',
],
'firstUsedTime': datetime(2015, 1, 1),
'lastUsedTime': datetime(2015, 1, 1),
'registerTime': datetime(2015, 1, 1)
}
}
:returns:
S3: An application revision stored in Amazon S3.
GitHub: An application revision stored in GitHub.
"""
pass
def get_deployment(deploymentId=None):
"""
Gets information about a deployment.
See also: AWS API Documentation
:example: response = client.get_deployment(
deploymentId='string'
)
:type deploymentId: string
:param deploymentId: [REQUIRED]
A deployment ID associated with the applicable IAM user or AWS account.
:rtype: dict
:return: {
'deploymentInfo': {
'applicationName': 'string',
'deploymentGroupName': 'string',
'deploymentConfigName': 'string',
'deploymentId': 'string',
'previousRevision': {
'revisionType': 'S3'|'GitHub',
's3Location': {
'bucket': 'string',
'key': 'string',
'bundleType': 'tar'|'tgz'|'zip',
'version': 'string',
'eTag': 'string'
},
'gitHubLocation': {
'repository': 'string',
'commitId': 'string'
}
},
'revision': {
'revisionType': 'S3'|'GitHub',
's3Location': {
'bucket': 'string',
'key': 'string',
'bundleType': 'tar'|'tgz'|'zip',
'version': 'string',
'eTag': 'string'
},
'gitHubLocation': {
'repository': 'string',
'commitId': 'string'
}
},
'status': 'Created'|'Queued'|'InProgress'|'Succeeded'|'Failed'|'Stopped'|'Ready',
'errorInformation': {
'code': 'DEPLOYMENT_GROUP_MISSING'|'APPLICATION_MISSING'|'REVISION_MISSING'|'IAM_ROLE_MISSING'|'IAM_ROLE_PERMISSIONS'|'NO_EC2_SUBSCRIPTION'|'OVER_MAX_INSTANCES'|'NO_INSTANCES'|'TIMEOUT'|'HEALTH_CONSTRAINTS_INVALID'|'HEALTH_CONSTRAINTS'|'INTERNAL_ERROR'|'THROTTLED'|'ALARM_ACTIVE'|'AGENT_ISSUE'|'AUTO_SCALING_IAM_ROLE_PERMISSIONS'|'AUTO_SCALING_CONFIGURATION'|'MANUAL_STOP',
'message': 'string'
},
'createTime': datetime(2015, 1, 1),
'startTime': datetime(2015, 1, 1),
'completeTime': datetime(2015, 1, 1),
'deploymentOverview': {
'Pending': 123,
'InProgress': 123,
'Succeeded': 123,
'Failed': 123,
'Skipped': 123,
'Ready': 123
},
'description': 'string',
'creator': 'user'|'autoscaling'|'codeDeployRollback',
'ignoreApplicationStopFailures': True|False,
'autoRollbackConfiguration': {
'enabled': True|False,
'events': [
'DEPLOYMENT_FAILURE'|'DEPLOYMENT_STOP_ON_ALARM'|'DEPLOYMENT_STOP_ON_REQUEST',
]
},
'updateOutdatedInstancesOnly': True|False,
'rollbackInfo': {
'rollbackDeploymentId': 'string',
'rollbackTriggeringDeploymentId': 'string',
'rollbackMessage': 'string'
},
'deploymentStyle': {
'deploymentType': 'IN_PLACE'|'BLUE_GREEN',
'deploymentOption': 'WITH_TRAFFIC_CONTROL'|'WITHOUT_TRAFFIC_CONTROL'
},
'targetInstances': {
'tagFilters': [
{
'Key': 'string',
'Value': 'string',
'Type': 'KEY_ONLY'|'VALUE_ONLY'|'KEY_AND_VALUE'
},
],
'autoScalingGroups': [
'string',
]
},
'instanceTerminationWaitTimeStarted': True|False,
'blueGreenDeploymentConfiguration': {
'terminateBlueInstancesOnDeploymentSuccess': {
'action': 'TERMINATE'|'KEEP_ALIVE',
'terminationWaitTimeInMinutes': 123
},
'deploymentReadyOption': {
'actionOnTimeout': 'CONTINUE_DEPLOYMENT'|'STOP_DEPLOYMENT',
'waitTimeInMinutes': 123
},
'greenFleetProvisioningOption': {
'action': 'DISCOVER_EXISTING'|'COPY_AUTO_SCALING_GROUP'
}
},
'loadBalancerInfo': {
'elbInfoList': [
{
'name': 'string'
},
]
},
| |
<filename>redcap2mysql.py
#!/usr/bin/python
# Export data from a REDCap project and send to a MySQL database.
# Track changes to transferred data files in local git repository.
#
# This is just a *rough* prototype in the *early* stages of development.
#
# It has been tested on Windows Server 2008 R2 with ActivePython 2.7 (64-bit).
# It has been tested on Windows Server 2008 R2 with Anaconda 4.3.0 2.7 (64-bit).
# It has been tested on Ubuntu 16 with the vendor-supplied Python 2.7 (64-bit).
#
# You need to have a REDCap project and a MySQL database. Access to the MySQL
# database will be over SSL, so you will need to supply SSL key and certs.
#
# Requires Python 2.7, a config file, git, mysql, a DSN, and these packages:
#
# python -m pip install pandas
# python -m pip install sqlalchemy
# python -m pip install ConfigParser
# python -m pip install pycurl
# python -m pip install logging
# python -m pip install datetime
# python -m pip install gitpython
# python -m pip install git+https://github.com/alorenzo175/mylogin.git#egg=mylogin
# python -m pip install certifi
#
# For use with ODBC database connections, you will also want to install pyodbc:
#
# python -m pip install pyodbc
#
# Or, alternatively, for use with the MySQL Connector driver written in Python:
#
# python -m pip install mysql-connector
#
# On Windows, you will also need Microsoft Visual C++ Compiler for Python 2.7.
# https://www.microsoft.com/en-us/download/details.aspx?id=44266
# You will also need the MySQL ODBC Connector (32-bit or 64-bit to match Python).
# https://dev.mysql.com/downloads/connector/odbc/
#
# Usage: python redcap2mysql_odbc.py [Project] [...]
#
# ... Where Project contains letters, numbers, and underscore characters. More
# than one project may be listed, with spaces separating the project names.
#
# This script can be automated with a utility such as cron. Here is an example
# crontab entry which runs the script every day at 8:55 PM:
#
# 55 20 * * * (cd /path/to/folder; /usr/bin/python ./redcap2mysql.py)
#
# Todo:
#
# 1. Add input data validation for all configuration parameters.
# 2. Try to conform to Python coding styles, conventions, and best practices.
# ---------------------------------------------------------------------------
# --------------------------- SETUP -----------------------------------------
# Use Python 3 style print statements.
from __future__ import print_function
# Import packages
import configparser
from sqlalchemy import *
from sys import exit
import os
import sys
from pandas.io import sql
import getpass
import pandas as pd
import certifi
import pycurl
from urllib.parse import urlencode
import hashlib
import logging
import socket
from io import StringIO
import pytz
from datetime import datetime
import re
import git
import traceback
import numpy as np
# -----------------------
# Read configuration file
# -----------------------
config_file = 'conf/redcap2mysql.cfg' # See conf/redcap2mysql.cfg.example
# Configure parameters with defaults. Use a config file for most of these.
config = configparser.ConfigParser(
{'data_path': 'data', 'log_file': 'redcap2mysql.log',
'log_timestamp_format': '%Y-%m-%d %H:%M:%S %Z',
'mysql_dsn': '', 'mysql_pwd': '', 'mysql_host': '',
'mysql_port': '3306', 'mysql_conn_type': 'pyodbc', 'mysql_user': '',
'redcap_url': 'https://localhost/API/', 'redcap_key': '<KEY>',
'redcap_event_name_maxlen': '100'})
if os.path.isfile(config_file) == True:
config.read(config_file)
else:
print(("Can't find config file: " + config_file))
exit(1)
# --------------------------
# Parse configuration object
# --------------------------
data_path = config['global'].get('data_path')
log_timestamp_format = config['global'].get('log_timestamp_format')
log_file = config['global'].get('log_file')
mysql_host = config['mysql'].get('mysql_host')
mysql_db = config['mysql'].get('mysql_db')
mysql_user = config['mysql'].get('mysql_user')
redcap_url = config['redcap'].get('redcap_url')
redcap_key = config['redcap'].get('redcap_key')
redcap_event_name_maxlen = int(
config['redcap'].get('redcap_event_name_maxlen'))
#data_path = config.get('global', 'data_path', 0)
#log_timestamp_format = config.get('global', 'log_timestamp_format', 0)
#log_file = config.get('global', 'log_file', 0)
#mysql_host = config.get('mysql', 'mysql_host', 0)
#mysql_db = config.get('mysql', 'mysql_db', 0)
#mysql_user = config.get('mysql', 'mysql_user', 0)
#redcap_url = config.get('redcap', 'redcap_url', 0)
#redcap_key = config.get('redcap', 'redcap_key', 0)
#redcap_event_name_maxlen = int(
# config.get('redcap', 'redcap_event_name_maxlen', 0))
# -----------------
# Configure logging
# -----------------
log_level = logging.INFO # Set to logging.DEBUG or logging.INFO
# Set log level and timestamp format
logging.basicConfig(filename=log_file, level=logging.DEBUG,
format='%(asctime)s %(message)s', datefmt=log_timestamp_format)
# ------------------------
# Configure local git repo
# ------------------------
# Create a local git repository for downloading and archiving data.
try:
repo = git.Repo.init(data_path)
except:
message = "Error: Can't create git repo (%s)! Check config!" % (data_path)
logging.error(message)
raise OSError(message)
# ---------------------------
# Configure local data folder
# ---------------------------
# Create data folder. Should already exist if git repo created without error.
if not os.path.exists(data_path):
try:
os.makedirs(data_path)
except:
message = "Error: Can't create folder (%s)! Check config!" % (data_path)
logging.critical(message)
raise OSError(message)
# -------------------------
# Configure MySQL user name
# -------------------------
# Get username from the operating system, if it is blank (default).
if mysql_user == '':
mysql_user = getpass.getuser()
# ------------------------------------
# Define database connection functions
# ------------------------------------
def get_mysql_pwd(config):
"""Get the MySQL password from the config file or an interactive prompt."""
# Two ways to get the password are supported.
#
# 1. Read clear-text password from config file. (least secure)
# 2. Read password as entered manually from a console prompt. (most secure)
# First try the config file. This is the least secure method. Protect the file.
#mysql_pwd = config.get('mysql', 'mysql_pwd', 0)
mysql_pwd = config['mysql'].get('mysql_pwd')
# Try other method if config file password is blank or missing.
if mysql_pwd == '':
# Prompt for the password. More secure, but won't work unattended.
mysql_pwd = getpass.getpass()
return(mysql_pwd)
def get_mysql_conn(config):
"""Configure the MySQL database connection."""
#mysql_conn_type = config.get('mysql', 'mysql_conn_type', 0)
#mysql_user = config.get('mysql', 'mysql_user', 0)
#mysql_pwd = config.get('mysql', 'mysql_pwd', 0)
mysql_conn_type = config['mysql'].get('mysql_conn_type')
mysql_user = config['mysql'].get('mysql_user')
mysql_pwd = config['mysql'].get('mysql_pwd')
if mysql_user == '':
mysql_user = getpass.getuser()
if mysql_conn_type == 'pyodbc':
mysql_pwd = get_mysql_pwd(config)
mysql_dsn = config['mysql'].get('mysql_dsn')
# Create database connection.
import pyodbc
DB_URI = "mysql+pyodbc://{user}:{password}@{dsn}"
conn = create_engine(
DB_URI.format( user=mysql_user, password=<PASSWORD>, dsn=mysql_dsn ))
return(conn)
else:
# Try to read encrypted MySQL password from ~/.mylogin.cnf and mysql_path.
mysql_path = config['mysql'].get('mysql_path')
if mysql_pwd == '':
if mysql_path != '':
# Read encrypted password and decrypt it with mylogin module.
# While better than clear-text, be careful about securing the pw file.
# However, it's probably the best method for unattended use.
try:
# Get encrypted password. This requires the mylogin module.
import mylogin
#mysql_host = config.get('mysql', 'mysql_host', 0)
mysql_host = config['mysql'].get('mysql_host')
login = mylogin.get_login_info(mysql_path, host=mysql_host)
mysql_pwd = login['passwd']
except mylogin.exception.UtilError as err:
print(("mylogin error: {0}".format(err)))
else:
mysql_pwd = get_mysql_pwd(config)
# Import packages.
import mysql.connector
from mysql.connector.constants import ClientFlag
# Get SSL settings (file paths to SSL keys and certs).
#ssl_ca = config.get('mysql-ssl', 'ssl_ca', 0)
#ssl_cert = config.get('mysql-ssl', 'ssl_cert', 0)
#ssl_key = config.get('mysql-ssl', 'ssl_key', 0)
ssl_ca = config['mysql-ssl'].get('ssl_ca')
ssl_cert = config['mysql-ssl'].get('ssl_cert')
ssl_key = config['mysql-ssl'].get('ssl_key')
# Check for existence of SSL files.
for file_path in (ssl_ca, ssl_cert, ssl_key):
if not os.path.exists(file_path):
message = "Error: Can't find: %s! Check config!" % (file_path)
logging.critical(message)
raise OSError(message)
# Create a dict of SSL settings to pass to create_engine().
ssl_args = {
'client_flags': [ClientFlag.SSL],
'ssl_ca': ssl_ca,
'ssl_cert': ssl_cert,
'ssl_key': ssl_key,
}
# Create database connection.
#mysql_host = config.get('mysql', 'mysql_host', 0)
#mysql_port = config.get('mysql', 'mysql_port', 0)
#mysql_db = config.get('mysql', 'mysql_db', 0)
mysql_host = config['mysql'].get('mysql_host')
mysql_port = config['mysql'].get('mysql_port')
mysql_db = config['mysql'].get('mysql_db')
DB_URI = "mysql+mysqlconnector://{user}:{password}@{host}:{port}/{db}"
conn = create_engine(
DB_URI.format( user=mysql_user, password=<PASSWORD>, host=mysql_host,
port=mysql_port, db=mysql_db), connect_args = ssl_args )
return(conn)
# -------------------
# Connect to Database
# -------------------
# Create a MySQL connection object based on the configured connection type.
conn = get_mysql_conn(config)
# ------------------------- END SETUP ---------------------------------------
# ----------------
# Define functions
# ----------------
def get_data(csv_file, redcap_key, redcap_url, content):
"""Get REDCap data as a CSV file with an API key, URL and content type."""
with open(csv_file, 'wb') as f:
c = pycurl.Curl()
c.setopt(pycurl.CAINFO, certifi.where())
c.setopt(c.URL, redcap_url)
c.setopt(c.FOLLOWLOCATION, True)
post_data = {'token': redcap_key, 'content': content, \
'rawOrLabel': 'raw', 'type': 'flat', 'format': 'csv', \
'exportSurveyFields': 'True'}
postfields = urlencode(post_data)
c.setopt(c.POSTFIELDS, postfields)
c.setopt(c.WRITEDATA, f)
try:
c.perform()
c.close()
except pycurl.error as err:
c.close()
message = "Error: Can't fetch data. Check config: " + config_file
print(message)
logging.critical(message)
exit(2)
def get_prev_hash(project, mysql_table, log_table, conn = conn):
"""Get the sha1 hash of the previously uploaded data for a table."""
# See if the database contains the log_table (REDCap transfer log) table.
rs = sql.execute('SHOW TABLES LIKE "' + log_table + '";', conn)
row0 = rs.fetchone()
res = ''
if (row0 is not None) and (len(row0) != 0):
res = row0[0]
# If the table is found, find the most recent hash for the table data.
prev_hash = ''
if res == log_table:
sql_cmd = 'SELECT sha1_hash FROM %s ' % (log_table) + \
'WHERE table_name = "%s" ' % (mysql_table) + \
'ORDER BY timestamp_utc DESC ' + \
'LIMIT 1;'
rs = sql.execute(sql_cmd, conn)
row0 = rs.fetchone()
if (row0 is not None) and (len(row0) != 0):
prev_hash = row0[0]
return(prev_hash)
def parse_csv(csv_file):
"""Parse a CSV file with Pandas, with basic checks and error handling."""
if os.path.isfile(csv_file) == True:
num_lines = sum(1 for line in open(csv_file))
if num_lines > 1:
try:
data = | |
linehatched90(_hatch_base*math.sqrt(4))
linehatched90.LArge = linehatched90(_hatch_base*math.sqrt(8))
linehatched90.LARge = linehatched90(_hatch_base*math.sqrt(16))
linehatched90.LARGe = linehatched90(_hatch_base*math.sqrt(32))
linehatched90.LARGE = linehatched90(_hatch_base*math.sqrt(64))
linehatched135 = linehatched(_hatch_base, 135)
linehatched135.SMALL = linehatched135(_hatch_base/math.sqrt(64))
linehatched135.SMALl = linehatched135(_hatch_base/math.sqrt(32))
linehatched135.SMAll = linehatched135(_hatch_base/math.sqrt(16))
linehatched135.SMall = linehatched135(_hatch_base/math.sqrt(8))
linehatched135.Small = linehatched135(_hatch_base/math.sqrt(4))
linehatched135.small = linehatched135(_hatch_base/math.sqrt(2))
linehatched135.normal = linehatched135(_hatch_base)
linehatched135.large = linehatched135(_hatch_base*math.sqrt(2))
linehatched135.Large = linehatched135(_hatch_base*math.sqrt(4))
linehatched135.LArge = linehatched135(_hatch_base*math.sqrt(8))
linehatched135.LARge = linehatched135(_hatch_base*math.sqrt(16))
linehatched135.LARGe = linehatched135(_hatch_base*math.sqrt(32))
linehatched135.LARGE = linehatched135(_hatch_base*math.sqrt(64))
crosslinehatched0 = linehatched(_hatch_base, 0, cross=1)
crosslinehatched0.SMALL = crosslinehatched0(_hatch_base/math.sqrt(64))
crosslinehatched0.SMALl = crosslinehatched0(_hatch_base/math.sqrt(32))
crosslinehatched0.SMAll = crosslinehatched0(_hatch_base/math.sqrt(16))
crosslinehatched0.SMall = crosslinehatched0(_hatch_base/math.sqrt(8))
crosslinehatched0.Small = crosslinehatched0(_hatch_base/math.sqrt(4))
crosslinehatched0.small = crosslinehatched0(_hatch_base/math.sqrt(2))
crosslinehatched0.normal = crosslinehatched0
crosslinehatched0.large = crosslinehatched0(_hatch_base*math.sqrt(2))
crosslinehatched0.Large = crosslinehatched0(_hatch_base*math.sqrt(4))
crosslinehatched0.LArge = crosslinehatched0(_hatch_base*math.sqrt(8))
crosslinehatched0.LARge = crosslinehatched0(_hatch_base*math.sqrt(16))
crosslinehatched0.LARGe = crosslinehatched0(_hatch_base*math.sqrt(32))
crosslinehatched0.LARGE = crosslinehatched0(_hatch_base*math.sqrt(64))
crosslinehatched45 = linehatched(_hatch_base, 45, cross=1)
crosslinehatched45.SMALL = crosslinehatched45(_hatch_base/math.sqrt(64))
crosslinehatched45.SMALl = crosslinehatched45(_hatch_base/math.sqrt(32))
crosslinehatched45.SMAll = crosslinehatched45(_hatch_base/math.sqrt(16))
crosslinehatched45.SMall = crosslinehatched45(_hatch_base/math.sqrt(8))
crosslinehatched45.Small = crosslinehatched45(_hatch_base/math.sqrt(4))
crosslinehatched45.small = crosslinehatched45(_hatch_base/math.sqrt(2))
crosslinehatched45.normal = crosslinehatched45
crosslinehatched45.large = crosslinehatched45(_hatch_base*math.sqrt(2))
crosslinehatched45.Large = crosslinehatched45(_hatch_base*math.sqrt(4))
crosslinehatched45.LArge = crosslinehatched45(_hatch_base*math.sqrt(8))
crosslinehatched45.LARge = crosslinehatched45(_hatch_base*math.sqrt(16))
crosslinehatched45.LARGe = crosslinehatched45(_hatch_base*math.sqrt(32))
crosslinehatched45.LARGE = crosslinehatched45(_hatch_base*math.sqrt(64))
class colorgradient(deco, attr.attr):
"""inserts pieces of the path in different colors"""
def __init__(self, grad, attrs=[], steps=20):
self.attrs = attrs
self.grad = grad
self.steps = steps
def decorate(self, dp, texrunner):
dp.ensurenormpath()
l = dp.path.arclen()
colors = [self.grad.select(n, self.steps) for n in range(self.steps)]
colors.reverse()
params = dp.path.arclentoparam([l*i/float(self.steps) for i in range(self.steps)])
params.reverse()
c = canvas.canvas()
# treat the end pieces separately
c.stroke(dp.path.split(params[1])[1], attr.mergeattrs([colors[0]] + self.attrs))
for n in range(1,self.steps-1):
c.stroke(dp.path.split([params[n-1],params[n+1]])[1], attr.mergeattrs([colors[n]] + self.attrs))
c.stroke(dp.path.split(params[-2])[0], attr.mergeattrs([colors[-1]] + self.attrs))
dp.ornaments.insert(c)
class brace(deco, attr.attr):
r"""draws a nicely curled brace
In most cases, the original line is not wanted use canvas.canvas.draw(..) for it
Geometrical parameters:
inner /\ strokes
____________/ \__________
/ bar bar \ outer
/ \ strokes
totalheight distance from the jaws to the middle cap
barthickness thickness of the main bars
innerstrokesthickness thickness of the two ending strokes
outerstrokesthickness thickness of the inner strokes at the middle cap
innerstrokesrelheight height of the inner/outer strokes, relative to the total height
outerstrokesrelheight this determines the angle of the main bars!
should be around 0.5
Note: if innerstrokesrelheight + outerstrokesrelheight == 1 then the main bars
will be aligned parallel to the connecting line between the endpoints
outerstrokesangle angle of the two ending strokes
innerstrokesangle angle between the inner strokes at the middle cap
slantstrokesangle extra slanting of the inner/outer strokes
innerstrokessmoothness smoothing parameter for the inner + outer strokes
outerstrokessmoothness should be around 1 (allowed: [0,infty))
middlerelpos position of the middle cap (0 == left, 1 == right)
"""
# This code is experimental because it is unclear
# how the brace fits into the concepts of PyX
#
# Some thoughts:
# - a brace needs to be decoratable with text
# it needs stroking and filling attributes
# - the brace is not really a box:
# it has two "anchor" points that are important for aligning it to other things
# and one "anchor" point (plus direction) for aligning other things
# - a brace is not a deformer:
# it does not look at anything else than begin/endpoint of a path
# - a brace might be a connector (which is to be dissolved into the box concept later?)
def __init__(self, reverse=1, stretch=None, dist=None, fillattrs=[],
totalheight=12*unit.x_pt,
barthickness=0.5*unit.x_pt, innerstrokesthickness=0.25*unit.x_pt, outerstrokesthickness=0.25*unit.x_pt,
innerstrokesrelheight=0.6, outerstrokesrelheight=0.7,
innerstrokesangle=30, outerstrokesangle=25, slantstrokesangle=5,
innerstrokessmoothness=2.0, outerstrokessmoothness=2.5,
middlerelpos=0.5):
self.fillattrs = fillattrs
self.reverse = reverse
self.stretch = stretch
self.dist = dist
self.totalheight = totalheight
self.barthickness = barthickness
self.innerstrokesthickness = innerstrokesthickness
self.outerstrokesthickness = outerstrokesthickness
self.innerstrokesrelheight = innerstrokesrelheight
self.outerstrokesrelheight = outerstrokesrelheight
self.innerstrokesangle = innerstrokesangle
self.outerstrokesangle = outerstrokesangle
self.slantstrokesangle = slantstrokesangle
self.innerstrokessmoothness = innerstrokessmoothness
self.outerstrokessmoothness = outerstrokessmoothness
self.middlerelpos = middlerelpos
def __call__(self, **kwargs):
for name in ["reverse", "stretch", "dist", "fillattrs",
"totalheight", "barthickness", "innerstrokesthickness", "outerstrokesthickness",
"innerstrokesrelheight", "outerstrokesrelheight", "innerstrokesangle", "outerstrokesangle", "slantstrokesangle",
"innerstrokessmoothness", "outerstrokessmoothness", "middlerelpos"]:
if not kwargs.has_key(name):
kwargs[name] = self.__dict__[name]
return brace(**kwargs)
def _halfbracepath_pt(self, length_pt, height_pt, ilength_pt, olength_pt, # <<<
ithick_pt, othick_pt, bthick_pt, cos_iangle, sin_iangle, cos_oangle,
sin_oangle, cos_slangle, sin_slangle):
ismooth = self.innerstrokessmoothness
osmooth = self.outerstrokessmoothness
# these two parameters are not important enough to be seen outside
inner_cap_param = 1.5
outer_cap_param = 2.5
outerextracurved = 0.6 # in (0, 1]
# 1.0 will lead to F=G, the outer strokes will not be curved at their ends.
# The smaller, the more curvature
# build an orientation path (three straight lines)
#
# \q1
# / \
# / \
# _/ \______________________________________q5
# q2 q3 q4 \
# \
# \
# \q6
#
# get the points for that:
q1 = (0, height_pt - inner_cap_param * ithick_pt + 0.5*ithick_pt/sin_iangle)
q2 = (q1[0] + ilength_pt * sin_iangle,
q1[1] - ilength_pt * cos_iangle)
q6 = (length_pt, 0)
q5 = (q6[0] - olength_pt * sin_oangle,
q6[1] + olength_pt * cos_oangle)
bardir = (q5[0] - q2[0], q5[1] - q2[1])
bardirnorm = math.hypot(*bardir)
bardir = (bardir[0]/bardirnorm, bardir[1]/bardirnorm)
ismoothlength_pt = ilength_pt * ismooth
osmoothlength_pt = olength_pt * osmooth
if bardirnorm < ismoothlength_pt + osmoothlength_pt:
ismoothlength_pt = bardirnorm * ismoothlength_pt / (ismoothlength_pt + osmoothlength_pt)
osmoothlength_pt = bardirnorm * osmoothlength_pt / (ismoothlength_pt + osmoothlength_pt)
q3 = (q2[0] + ismoothlength_pt * bardir[0],
q2[1] + ismoothlength_pt * bardir[1])
q4 = (q5[0] - osmoothlength_pt * bardir[0],
q5[1] - osmoothlength_pt * bardir[1])
#
# P _O
# / | \A2
# / A1\ \
# / \ B2C2________D2___________E2_______F2___G2
# \______________________________________ \
# B1,C1 D1 E1 F1 G1 \
# \ \
# \ \H2
# H1\_/I2
# I1
#
# the halfbraces meet in P and A1:
P = (0, height_pt)
A1 = (0, height_pt - inner_cap_param * ithick_pt)
# A2 is A1, shifted by the inner thickness
A2 = (A1[0] + ithick_pt * cos_iangle,
A1[1] + ithick_pt * sin_iangle)
s, t = deformer.intersection(P, A2, (cos_slangle, sin_slangle), (sin_iangle, -cos_iangle))
O = (P[0] + s * cos_slangle,
P[1] + s * sin_slangle)
# from D1 to E1 is the straight part of the brace
# also back from E2 to D1
D1 = (q3[0] + bthick_pt * bardir[1],
q3[1] - bthick_pt * bardir[0])
D2 = (q3[0] - bthick_pt * bardir[1],
q3[1] + bthick_pt * bardir[0])
E1 = (q4[0] + bthick_pt * bardir[1],
q4[1] - bthick_pt * bardir[0])
E2 = (q4[0] - bthick_pt * bardir[1],
q4[1] + bthick_pt * bardir[0])
# I1, I2 are the control points at the outer stroke
I1 = (q6[0] - 0.5 * othick_pt * cos_oangle,
q6[1] - 0.5 * othick_pt * sin_oangle)
I2 = (q6[0] + 0.5 * othick_pt * cos_oangle,
q6[1] + 0.5 * othick_pt * sin_oangle)
# get the control points for the curved parts of the brace
s, t = deformer.intersection(A1, D1, (sin_iangle, -cos_iangle), bardir)
B1 = (D1[0] + t * bardir[0],
D1[1] + t * bardir[1])
s, t = deformer.intersection(A2, D2, (sin_iangle, -cos_iangle), bardir)
B2 = (D2[0] + t * bardir[0],
D2[1] + t * bardir[1])
s, t = deformer.intersection(E1, I1, bardir, (-sin_oangle, cos_oangle))
G1 = (E1[0] + s * bardir[0],
E1[1] + s * bardir[1])
s, t = deformer.intersection(E2, I2, bardir, (-sin_oangle, cos_oangle))
G2 = (E2[0] + s * bardir[0],
E2[1] + s * bardir[1])
# at the inner strokes: use curvature zero at both ends
C1 = B1
C2 = B2
# at the outer strokes: use curvature zero only at the connection to
# the straight part
F1 = (outerextracurved * G1[0] + (1 - outerextracurved) * E1[0],
outerextracurved * G1[1] + (1 - outerextracurved) * E1[1])
F2 = (outerextracurved * G2[0] + (1 - outerextracurved) * E2[0],
outerextracurved * G2[1] + (1 - outerextracurved) * E2[1])
# the tip of the outer stroke, endpoints of the bezier curve
H1 = (I1[0] - outer_cap_param * othick_pt * sin_oangle,
I1[1] + outer_cap_param * othick_pt * cos_oangle)
H2 = (I2[0] - outer_cap_param * othick_pt * sin_oangle,
I2[1] + outer_cap_param * othick_pt * cos_oangle)
#for qq in [A1,B1,C1,D1,E1,F1,G1,H1,I1,
# A2,B2,C2,D2,E2,F2,G2,H2,I2,
# O,P
# ]:
# cc.fill(path.circle(qq[0], qq[1], 0.5), [color.rgb.green])
# now build the right halfbrace
bracepath = path.path(path.moveto_pt(*A1))
bracepath.append(path.curveto_pt(B1[0], B1[1], C1[0], C1[1], D1[0], D1[1]))
bracepath.append(path.lineto_pt(E1[0], E1[1]))
bracepath.append(path.curveto_pt(F1[0], F1[1], G1[0], G1[1], H1[0], H1[1]))
# the tip of the right halfbrace
bracepath.append(path.curveto_pt(I1[0], I1[1], I2[0], I2[1], H2[0], H2[1]))
# the rest of the right halfbrace
bracepath.append(path.curveto_pt(G2[0], G2[1], F2[0], F2[1], E2[0], E2[1]))
bracepath.append(path.lineto_pt(D2[0], D2[1]))
bracepath.append(path.curveto_pt(C2[0], C2[1], B2[0], B2[1], A2[0], A2[1]))
# the tip in the middle of the brace
bracepath.append(path.curveto_pt(O[0], O[1], O[0], O[1], P[0], P[1]))
return bracepath
# >>>
def _bracepath(self, | |
Constraint(expr= m.x6513 - 490*m.b6953 <= 0)
m.c4849 = Constraint(expr= m.x6515 - 490*m.b6955 <= 0)
m.c4850 = Constraint(expr= m.x6517 - 490*m.b6957 <= 0)
m.c4851 = Constraint(expr= m.x6519 - 490*m.b6959 <= 0)
m.c4852 = Constraint(expr= m.x6521 - 490*m.b6961 <= 0)
m.c4853 = Constraint(expr= m.x6523 - 490*m.b6963 <= 0)
m.c4854 = Constraint(expr= m.x6525 - 490*m.b6965 <= 0)
m.c4855 = Constraint(expr= m.x6527 - 490*m.b6967 <= 0)
m.c4856 = Constraint(expr= m.x6529 - 490*m.b6969 <= 0)
m.c4857 = Constraint(expr= m.x6563 - 490*m.b7003 <= 0)
m.c4858 = Constraint(expr= m.x6565 - 490*m.b7005 <= 0)
m.c4859 = Constraint(expr= m.x6567 - 490*m.b7007 <= 0)
m.c4860 = Constraint(expr= m.x6569 - 490*m.b7009 <= 0)
m.c4861 = Constraint(expr= m.x6499 - 40*m.b6939 >= 0)
m.c4862 = Constraint(expr= m.x6501 - 40*m.b6941 >= 0)
m.c4863 = Constraint(expr= m.x6503 - 40*m.b6943 >= 0)
m.c4864 = Constraint(expr= m.x6505 - 40*m.b6945 >= 0)
m.c4865 = Constraint(expr= m.x6507 - 40*m.b6947 >= 0)
m.c4866 = Constraint(expr= m.x6509 - 40*m.b6949 >= 0)
m.c4867 = Constraint(expr= m.x6511 - 40*m.b6951 >= 0)
m.c4868 = Constraint(expr= m.x6513 - 40*m.b6953 >= 0)
m.c4869 = Constraint(expr= m.x6515 - 40*m.b6955 >= 0)
m.c4870 = Constraint(expr= m.x6517 - 40*m.b6957 >= 0)
m.c4871 = Constraint(expr= m.x6519 - 40*m.b6959 >= 0)
m.c4872 = Constraint(expr= m.x6521 - 40*m.b6961 >= 0)
m.c4873 = Constraint(expr= m.x6523 - 40*m.b6963 >= 0)
m.c4874 = Constraint(expr= m.x6525 - 40*m.b6965 >= 0)
m.c4875 = Constraint(expr= m.x6527 - 40*m.b6967 >= 0)
m.c4876 = Constraint(expr= m.x6529 - 40*m.b6969 >= 0)
m.c4877 = Constraint(expr= m.x6531 - 40*m.b6971 >= 0)
m.c4878 = Constraint(expr= m.x6533 - 40*m.b6973 >= 0)
m.c4879 = Constraint(expr= m.x6535 - 40*m.b6975 >= 0)
m.c4880 = Constraint(expr= m.x6537 - 40*m.b6977 >= 0)
m.c4881 = Constraint(expr= m.x6539 - 40*m.b6979 >= 0)
m.c4882 = Constraint(expr= m.x6541 - 40*m.b6981 >= 0)
m.c4883 = Constraint(expr= m.x6543 - 40*m.b6983 >= 0)
m.c4884 = Constraint(expr= m.x6545 - 40*m.b6985 >= 0)
m.c4885 = Constraint(expr= m.x6547 - 40*m.b6987 >= 0)
m.c4886 = Constraint(expr= m.x6549 - 40*m.b6989 >= 0)
m.c4887 = Constraint(expr= m.x6551 - 40*m.b6991 >= 0)
m.c4888 = Constraint(expr= m.x6553 - 40*m.b6993 >= 0)
m.c4889 = Constraint(expr= m.x6555 - 40*m.b6995 >= 0)
m.c4890 = Constraint(expr= m.x6557 - 40*m.b6997 >= 0)
m.c4891 = Constraint(expr= m.x6559 - 40*m.b6999 >= 0)
m.c4892 = Constraint(expr= m.x6561 - 40*m.b7001 >= 0)
m.c4893 = Constraint(expr= m.x6563 - 40*m.b7003 >= 0)
m.c4894 = Constraint(expr= m.x6565 - 40*m.b7005 >= 0)
m.c4895 = Constraint(expr= m.x6567 - 40*m.b7007 >= 0)
m.c4896 = Constraint(expr= m.x6569 - 40*m.b7009 >= 0)
m.c4897 = Constraint(expr= m.x6571 - 20*m.b7011 >= 0)
m.c4898 = Constraint(expr= m.x6573 - 20*m.b7013 >= 0)
m.c4899 = Constraint(expr= m.x6575 - 20*m.b7015 >= 0)
m.c4900 = Constraint(expr= m.x6577 - 20*m.b7017 >= 0)
m.c4901 = Constraint(expr= - m.b6898 + m.b6938 + m.b6939 == 0)
m.c4902 = Constraint(expr= - m.b6899 + m.b6940 + m.b6941 == 0)
m.c4903 = Constraint(expr= - m.b6900 + m.b6942 + m.b6943 == 0)
m.c4904 = Constraint(expr= - m.b6901 + m.b6944 + m.b6945 == 0)
m.c4905 = Constraint(expr= - m.b6902 + m.b6946 + m.b6947 == 0)
m.c4906 = Constraint(expr= - m.b6903 + m.b6948 + m.b6949 == 0)
m.c4907 = Constraint(expr= - m.b6904 + m.b6950 + m.b6951 == 0)
m.c4908 = Constraint(expr= - m.b6905 + m.b6952 + m.b6953 == 0)
m.c4909 = Constraint(expr= - m.b6906 + m.b6954 + m.b6955 == 0)
m.c4910 = Constraint(expr= - m.b6907 + m.b6956 + m.b6957 == 0)
m.c4911 = Constraint(expr= - m.b6908 + m.b6958 + m.b6959 == 0)
m.c4912 = Constraint(expr= - m.b6909 + m.b6960 + m.b6961 == 0)
m.c4913 = Constraint(expr= - m.b6910 + m.b6962 + m.b6963 == 0)
m.c4914 = Constraint(expr= - m.b6911 + m.b6964 + m.b6965 == 0)
m.c4915 = Constraint(expr= - m.b6912 + m.b6966 + m.b6967 == 0)
m.c4916 = Constraint(expr= - m.b6913 + m.b6968 + m.b6969 == 0)
m.c4917 = Constraint(expr= - m.b6914 + m.b6970 + m.b6971 == 0)
m.c4918 = Constraint(expr= - m.b6915 + m.b6972 + m.b6973 == 0)
m.c4919 = Constraint(expr= - m.b6916 + m.b6974 + m.b6975 == 0)
m.c4920 = Constraint(expr= - m.b6917 + m.b6976 + m.b6977 == 0)
m.c4921 = Constraint(expr= - m.b6918 + m.b6978 + m.b6979 == 0)
m.c4922 = Constraint(expr= - m.b6919 + m.b6980 + m.b6981 == 0)
m.c4923 = Constraint(expr= - m.b6920 + m.b6982 + m.b6983 == 0)
m.c4924 = Constraint(expr= - m.b6921 + m.b6984 + m.b6985 == 0)
m.c4925 = Constraint(expr= - m.b6922 + m.b6986 + m.b6987 == 0)
m.c4926 = Constraint(expr= - m.b6923 + m.b6988 + m.b6989 == 0)
m.c4927 = Constraint(expr= - m.b6924 + m.b6990 + m.b6991 == 0)
m.c4928 = Constraint(expr= - m.b6925 + m.b6992 + m.b6993 == 0)
m.c4929 = Constraint(expr= - m.b6926 + m.b6994 + m.b6995 == 0)
m.c4930 = Constraint(expr= - m.b6927 + m.b6996 + m.b6997 == 0)
m.c4931 = Constraint(expr= - m.b6928 + m.b6998 + m.b6999 == 0)
m.c4932 = Constraint(expr= - m.b6929 + m.b7000 + m.b7001 == 0)
m.c4933 = Constraint(expr= - m.b6930 + m.b7002 + m.b7003 == 0)
m.c4934 = Constraint(expr= - m.b6931 + m.b7004 + m.b7005 == 0)
m.c4935 = Constraint(expr= - m.b6932 + m.b7006 + m.b7007 == 0)
m.c4936 = Constraint(expr= - m.b6933 + m.b7008 + m.b7009 == 0)
m.c4937 = Constraint(expr= - m.b6934 + m.b7010 + m.b7011 == 0)
m.c4938 = Constraint(expr= - m.b6935 + m.b7012 + m.b7013 == 0)
m.c4939 = Constraint(expr= - m.b6936 + m.b7014 + m.b7015 == 0)
m.c4940 = Constraint(expr= - m.b6937 + m.b7016 + m.b7017 == 0)
m.c4941 = Constraint(expr= m.x6140 - 2*m.x6618 == 0)
m.c4942 = Constraint(expr= m.x6143 - 1.2*m.x6619 - 2*m.x6620 - 1.2*m.x6621 == 0)
m.c4943 = Constraint(expr= m.x6146 - m.x6622 - 1.2*m.x6623 - m.x6624 - 2*m.x6625 - 1.2*m.x6626 - m.x6627 == 0)
m.c4944 = Constraint(expr= m.x6149 - m.x6628 - 1.2*m.x6629 - m.x6630 - 2*m.x6631 - 1.2*m.x6632 - m.x6633 == 0)
m.c4945 = Constraint(expr= m.x6152 - 2*m.x6634 == 0)
m.c4946 = Constraint(expr= m.x6155 - 1.2*m.x6635 - 2*m.x6636 - 1.2*m.x6637 == 0)
m.c4947 = Constraint(expr= m.x6158 - m.x6638 - 1.2*m.x6639 - m.x6640 - 2*m.x6641 - 1.2*m.x6642 - m.x6643 == 0)
m.c4948 = Constraint(expr= m.x6161 - m.x6644 - 1.2*m.x6645 - m.x6646 - 2*m.x6647 - 1.2*m.x6648 - m.x6649 == 0)
m.c4949 = Constraint(expr= m.x6164 - 2*m.x6650 == 0)
m.c4950 = Constraint(expr= m.x6167 - 1.2*m.x6651 - 2*m.x6652 - 1.2*m.x6653 == 0)
m.c4951 = Constraint(expr= m.x6170 - m.x6654 - 1.2*m.x6655 - m.x6656 - 2*m.x6657 - 1.2*m.x6658 - m.x6659 == 0)
m.c4952 = Constraint(expr= m.x6173 - m.x6660 - 1.2*m.x6661 - m.x6662 - 2*m.x6663 - 1.2*m.x6664 - m.x6665 == 0)
m.c4953 = Constraint(expr= m.x6176 - 2*m.x6666 == 0)
m.c4954 = Constraint(expr= m.x6179 - 1.2*m.x6667 - 2*m.x6668 - 1.2*m.x6669 == 0)
m.c4955 = Constraint(expr= m.x6182 - m.x6670 - 1.2*m.x6671 - m.x6672 - 2*m.x6673 - 1.2*m.x6674 - m.x6675 == 0)
m.c4956 = Constraint(expr= m.x6185 - m.x6676 - 1.2*m.x6677 - m.x6678 - 2*m.x6679 - 1.2*m.x6680 - m.x6681 == 0)
m.c4957 = Constraint(expr= m.x6188 - 2*m.x6682 == 0)
m.c4958 = Constraint(expr= m.x6191 - 1.2*m.x6683 - 2*m.x6684 - 1.2*m.x6685 == 0)
m.c4959 = Constraint(expr= m.x6194 - m.x6686 - 1.2*m.x6687 - m.x6688 - 2*m.x6689 - 1.2*m.x6690 - m.x6691 == 0)
m.c4960 = Constraint(expr= m.x6197 - m.x6692 - 1.2*m.x6693 - m.x6694 - 2*m.x6695 - 1.2*m.x6696 - m.x6697 == 0)
m.c4961 = Constraint(expr= m.x6200 - 2*m.x6698 == 0)
m.c4962 = Constraint(expr= m.x6203 - 1.2*m.x6699 - 2*m.x6700 - 1.2*m.x6701 == 0)
m.c4963 = Constraint(expr= m.x6206 - m.x6702 - 1.2*m.x6703 - m.x6704 - 2*m.x6705 - 1.2*m.x6706 - m.x6707 == 0)
m.c4964 = Constraint(expr= m.x6209 - m.x6708 - 1.2*m.x6709 - m.x6710 - 2*m.x6711 - 1.2*m.x6712 - m.x6713 == 0)
m.c4965 = Constraint(expr= m.x6212 - 2*m.x6714 == 0)
m.c4966 = Constraint(expr= m.x6215 - 1.2*m.x6715 - 2*m.x6716 - 1.2*m.x6717 == 0)
m.c4967 = Constraint(expr= m.x6218 - m.x6718 - 1.2*m.x6719 - m.x6720 - 2*m.x6721 - 1.2*m.x6722 - m.x6723 == 0)
m.c4968 = Constraint(expr= m.x6221 - m.x6724 - 1.2*m.x6725 - m.x6726 - 2*m.x6727 - 1.2*m.x6728 - m.x6729 == 0)
m.c4969 = Constraint(expr= m.x6224 - 2*m.x6730 == 0)
m.c4970 = Constraint(expr= m.x6227 - 1.2*m.x6731 - 2*m.x6732 - 1.2*m.x6733 == 0)
m.c4971 = Constraint(expr= m.x6230 - m.x6734 - 1.2*m.x6735 - m.x6736 - 2*m.x6737 - 1.2*m.x6738 - m.x6739 == 0)
m.c4972 = Constraint(expr= m.x6233 - m.x6740 - 1.2*m.x6741 - m.x6742 - 2*m.x6743 - 1.2*m.x6744 - m.x6745 == 0)
m.c4973 = Constraint(expr= m.x6236 - 2*m.x6746 == 0)
m.c4974 = Constraint(expr= m.x6239 - 1.2*m.x6747 - 2*m.x6748 - 1.2*m.x6749 == 0)
m.c4975 = Constraint(expr= m.x6242 - m.x6750 - 1.2*m.x6751 - m.x6752 - 2*m.x6753 - 1.2*m.x6754 - m.x6755 == 0)
m.c4976 = Constraint(expr= m.x6245 - m.x6756 - 1.2*m.x6757 - m.x6758 - 2*m.x6759 - 1.2*m.x6760 - m.x6761 == 0)
m.c4977 = Constraint(expr= m.x6248 - m.x6762 == 0)
m.c4978 = Constraint(expr= m.x6251 - 0.6*m.x6763 - m.x6764 - 0.6*m.x6765 == 0)
m.c4979 = Constraint(expr= m.x6254 - 0.5*m.x6766 - 0.6*m.x6767 - 0.5*m.x6768 - m.x6769 - 0.6*m.x6770 - 0.5*m.x6771
== 0)
m.c4980 = Constraint(expr= m.x6257 - 0.5*m.x6772 - 0.6*m.x6773 | |
<reponame>Autodesk/nanodesign
# Copyright 2016 Autodesk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module is used to read, execute and generate commands for visualization.
Visualization commands are used to perform all visualization
operations. Commands are generated by the popup menu and executed here.
Commands are space-separated name-value pairs with the format:
<entity> <name1>=<value1> <name2>=<value2> ... <nameN>=<valueN>
where <entity> = domain | graphics | helix | strand
Operations performed using the popup menu are written to the file
'vis.cmd'. These commands may be read in at the start of the visualization
session or included on the command line.
"""
import logging
import os
import re
from .menu import VisMenuEntity
import model
class VisCommandEntity:
""" Defines the types of commands recognized by the visualizer. """
ATOMIC_STRUCTURE = 'atomic_structure'
COMMENT = '#'
DOMAIN = 'domain'
GRAPHICS = 'graphics'
HELIX = 'helix'
MODEL = 'model'
STRAND = 'strand'
class VisCommand(object):
""" This class implements command processing. """
NONE = 'None'
ALL = 'All'
COMMAND_DELIM = ';'
SHOW_ATOMIC_STRUCTURE = 'atomic_structure name=%s rep=%s show=%s'
SHOW_HELIX = 'helix name=%s rep=%s show=%s'
SHOW_STRAND = 'strand name=%s rep=%s show=%s'
SHOW_MODEL_REP = 'model rep=%s show=%s'
def __init__(self, model, cmd_file_name, commands):
"""
Initialize VisCommand object.
Arguments:
model (VisModel): The visualization model object used to interface with the DNA design structure
and manage the visualization of all representions.
cmd_file_name (String): The name of the file to read commands from.
commands(string): The string of commands given on the command line.
"""
self.model = model
#self.update_menu = False
self.update_menu = True
self._logger = logging.getLogger(__name__)
# Open command logging file.
path = os.getcwd()
self.file_name = os.path.join(path, "vis.cmd")
self.file = open(self.file_name, 'w')
self.cmd_file_name = cmd_file_name
self.commands = commands
# The dict used to map command entities to their parsing functions.
self.parsing_map = {
VisCommandEntity.ATOMIC_STRUCTURE : self.proc_atomic_struct_cmd,
VisCommandEntity.COMMENT : self.proc_comment,
VisCommandEntity.GRAPHICS : self.proc_graphics_cmd,
VisCommandEntity.HELIX : self.proc_helix_cmd,
VisCommandEntity.MODEL : self.proc_model_cmd,
VisCommandEntity.STRAND : self.proc_strand_cmd
}
def write_cmd(self, cmd):
""" Write a command to a file. """
self.file.write(cmd + "\n")
def execute_file_cmds(self):
""" Execute the commands in a file. """
if not self.cmd_file_name:
return
self._logger.info("Reading commands from \'%s\' " % self.cmd_file_name)
self.update_menu = True
with open(self.cmd_file_name) as cmd_file:
for line in cmd_file:
self._logger.info("Command \'%s\'" % line)
cmd = line.rstrip()
self.execute_cmd(cmd)
self.write_cmd(cmd)
#__for line in cmd_file
#__with open(self.cmd_file_name) as cmd_file
def execute_cmds(self, commands=None):
""" Execute the commands from a string. """
if not (self.commands or commands):
return
if self.commands:
self._logger.info("Execute commands from command line.")
cmd_list = self.commands.split(VisCommand.COMMAND_DELIM)
else:
cmd_list = commands.split(VisCommand.COMMAND_DELIM)
self.update_menu = True
for cmd in cmd_list:
self._logger.info("Command \'%s\'" % cmd)
self.execute_cmd(cmd)
self.write_cmd(cmd)
def execute_cmd(self, cmd):
""" Execute a command. """
tokens = cmd.split()
if not tokens:
return
entity = tokens[0]
if entity in self.parsing_map:
self.parsing_map[entity](cmd, tokens[1:])
else:
self._logger.error("Unknown entity \'%s\' " % entity)
def proc_comment(self, cmd, tokens):
pass
def proc_helix_cmd(self, cmd, tokens):
""" Process a 'helix' command. """
args = [ 'color', 'name', 'names', 'rep', 'show' ]
name = None
rep = None
attributes = []
names = []
for token in tokens:
arg_name,arg_value = token.split('=')
if arg_name not in args:
self._logger.error("Unknown helix argument \'%s\' " % arg_name)
return
if arg_name == 'name':
names.append(arg_value)
elif arg_name == 'names':
names = self.parse_helix_ids(arg_value)
elif arg_name == 'rep':
rep = arg_value
elif arg_name == 'show':
show = (arg_value == 'true')
attributes.append(('show',show))
elif arg_name == 'color':
color = self.parse_color(arg_value)
attributes.append(('color',color))
#__for token in tokens
for name in names:
self.model.show_helix(name, rep, attributes)
# Update the menu but after graphics is up and fully initialized (delayed=True).
if (show and self.update_menu):
delay = True
self.model.menu.update_submenu_selection(VisMenuEntity.HELIX, name, rep, delay)
def generate_helix_cmd(self, helix_name, helix_rep, show):
""" Generate a helix command. """
if helix_name == VisCommand.NONE:
cmd = VisCommand.SHOW_HELIX % ('All', helix_rep, 'false')
elif helix_name == VisCommand.ALL:
cmd = VisCommand.SHOW_HELIX % (helix_name, helix_rep, 'true')
else:
if show:
show_str = 'true'
else:
show_str = 'false'
cmd = VisCommand.SHOW_HELIX % (helix_name, helix_rep, show_str)
self.write_cmd(cmd)
self.execute_cmd(cmd)
def generate_model_cmd(self, rep, show):
""" Generate a model command. """
SHOW_MODEL_REP = 'model rep=%s show=%s'
if show:
show_str = 'true'
else:
show_str = 'false'
rep = rep.replace(" ", "_")
cmd = VisCommand.SHOW_MODEL_REP % (rep, show_str)
self.write_cmd(cmd)
self.execute_cmd(cmd)
def generate_strand_cmd(self, strand_name, strand_rep, show):
""" Generate a strand command. """
if strand_name == VisCommand.NONE:
cmd = VisCommand.SHOW_STRAND % ('All', strand_rep, 'false')
elif strand_name == VisCommand.ALL:
cmd = VisCommand.SHOW_STRAND % (strand_name, strand_rep, 'true')
else:
if show:
show_str = 'true'
else:
show_str = 'false'
cmd = VisCommand.SHOW_STRAND % (strand_name, strand_rep, show_str)
self.write_cmd(cmd)
self.execute_cmd(cmd)
def generate_atomic_struct_cmd(self, atom_struct_name, atom_struct_rep, show):
""" Generate an atomic struct command. """
if atom_struct_name == VisCommand.NONE:
cmd = VisCommand.SHOW_ATOMIC_STRUCTURE % ('All', atom_struct_rep, 'false')
elif atom_struct_name == VisCommand.ALL:
cmd = VisCommand.SHOW_ATOMIC_STRUCTURE % (atom_struct_name, atom_struct_rep, 'true')
else:
if show:
show_str = 'true'
else:
show_str = 'false'
cmd = VisCommand.SHOW_ATOMIC_STRUCTURE % (atom_struct_name, atom_struct_rep, show_str)
self.write_cmd(cmd)
self.execute_cmd(cmd)
def proc_model_cmd(self, cmd, tokens):
""" Process a 'model' command. """
args = [ 'rep', 'show' ]
bbox = None
for token in tokens:
arg_name,arg_value = token.split('=')
if arg_name not in args:
self._logger.error("Unknown model argument \'%s\' " % arg_name)
return
if arg_name == 'rep':
rep = arg_value
elif arg_name == 'show':
show = (arg_value == 'true')
#__for token in tokens
rep = rep.replace("_", " ")
if rep == model.VisModelRepType.BOUNDING_BOX:
self.model.show_bounding_box(show)
elif rep == model.VisModelRepType.GEOMETRY:
self.model.show_structure_geometry(show)
elif rep == model.VisModelRepType.HELIX_NUMBERS:
self.model.show_helix_numbers(show)
elif rep == model.VisModelRepType.HELIX_PROJECTION:
self.model.show_helix_projections(show)
else:
self._logger.error("Unknown model rep \'%s\' " % rep)
return
if (show and self.update_menu):
delay = True
self.model.menu.update_selection(VisMenuEntity.MODEL, rep, delay)
def proc_strand_cmd(self, cmd, tokens):
""" Process a 'strand' command. """
args = [ 'color', 'line_width', 'name', 'names', 'rep', 'show' ]
name = None
rep = None
attributes = []
names = []
for token in tokens:
arg_name,arg_value = token.split('=')
if arg_name not in args:
self._logger.error("Unknown strand argument \'%s\' " % arg_name)
return
if arg_name == 'name':
names.append(arg_value)
elif arg_name == 'names':
names = self.parse_strand_ids(arg_value)
elif arg_name == 'rep':
rep = arg_value
elif arg_name == 'show':
show = (arg_value == 'true')
attributes.append(('show',show))
elif arg_name == 'color':
color = self.parse_color(arg_value)
attributes.append(('color',color))
elif arg_name == 'line_width':
line_width = float(arg_value)
attributes.append(('line_width',line_width))
#__for token in tokens
for name in names:
self.model.show_strand(name, rep, attributes)
# Update the menu but after graphics is up and fully initialized (delayed=True).
if (show and self.update_menu):
delay = True
self.model.menu.update_submenu_selection(VisMenuEntity.STRAND, name, rep, delay)
def proc_atomic_struct_cmd(self, cmd, tokens):
""" Process a 'atomic_structure' command. """
args = [ 'name', 'rep', 'show' ]
name = None
rep = None
show = True
for token in tokens:
arg_name,arg_value = token.split('=')
if arg_name not in args:
self._logger.error("Unknown atomic structure argument \'%s\' " % arg_name)
return
if arg_name == 'name':
name = arg_value
elif arg_name == 'rep':
rep = arg_value
elif arg_name == 'show':
show = (arg_value == 'true')
#__for token in tokens
self.model.show_atomic_struct(name, rep, show)
# Update the menu but after graphics is up and fully initialized (delayed=True).
if (show and self.update_menu):
delay = True
self.model.menu.update_submenu_selection(VisMenuEntity.ATOMIC_STRUCTURE, name, rep, delay)
#__def proc_atomic_struct_cmd(self, tokens)
def generate_graphics_cmd(self, name, value):
""" Generate a graphic command. """
if name == "center":
cmd = "graphics center=(%g,%g,%g)" % (value[0],value[1],value[2])
self.write_cmd(cmd)
def proc_graphics_cmd(self, cmd, tokens):
""" Process a 'graphics' command. """
args = [ 'center' ]
#print("proc_graphics_cmd tokens %s" % str(tokens))
#s = ''.join(tokens)
#print("proc_graphics_cmd s %s" % s)
for token in tokens:
arg_name,arg_value = token.split('=')
if arg_name not in args:
self._logger.error("Unknown graphics argument \'%s\' " % arg_name)
return
if arg_name == 'center':
#print("proc_graphics_cmd value %s" % arg_value)
values = arg_value[arg_value.find("(")+1:arg_value.find(")")].split(",")
| |
# Copyright (C) 2013 by <NAME>.
# This file is part of m209, the M-209 simulation.
# m209 is released under the MIT License (see LICENSE.txt).
"""This module contains data used in the generation of key lists."""
# This data was obtained from:
# TM-11-380, War Department, Technical Manual, Converter M-209, M-209-A,
# M-209-B (cipher) 17 March, 1944.
# Appendix II (pages 76-79):
# http://www.ilord.com/m209manual.html
# http://www.ilord.com/1944manual/page-76.JPG
# http://www.ilord.com/1944manual/page-78.JPG
#
# The overlap values from the tables were omitted to save memory since they can
# be computed from the set of 6 numbers: overlap = sum(first 6 values) - 27.
#
# It should be noted that I think I found a typo in the Group B data. The
# overlap value does jive with the 6 numbers in one case. See the comment marked
# TYPO?, below. Should I tell the War Department? :P
# I changed it to a sequence where the overlap matched the lines above and below
# it. I suppose it could have been intentional.
#
# Finally there were 5 selections in group B that our heuristic algorithm could
# not find a solution for, even when allowed to iterate 100,000 times. At this
# time I am not certain there is a solution for these values. For the time
# being I have commented them out. I would imagine that if our (albeit simple)
# algorithm could not come up with a solution then a soldier would have
# difficulty finding a solution as well, assuming they generated the key lists
# by hand. If they used a computer or algorithm, I'd sure like to see it!
GROUP_A = [
[1, 2, 3, 4, 8, 10],
[1, 2, 3, 4, 7, 11],
[1, 2, 3, 4, 6, 12],
[1, 2, 3, 4, 5, 13],
[1, 2, 3, 5, 8, 9],
[1, 2, 3, 5, 7, 10],
[1, 2, 3, 5, 6, 11],
[1, 2, 3, 6, 7, 9],
[1, 2, 4, 5, 7, 9],
[1, 2, 4, 5, 6, 10],
[1, 2, 3, 4, 9, 10],
[1, 2, 3, 4, 8, 11],
[1, 2, 3, 4, 7, 12],
[1, 2, 3, 4, 6, 13],
[1, 2, 3, 5, 8, 10],
[1, 2, 3, 5, 7, 11],
[1, 2, 3, 5, 6, 12],
[1, 2, 3, 6, 8, 9],
[1, 2, 3, 6, 7, 10],
[1, 2, 4, 5, 8, 9],
[1, 2, 4, 5, 7, 10],
[1, 2, 4, 5, 6, 11],
[1, 2, 4, 6, 7, 9],
[1, 2, 3, 4, 9, 11],
[1, 2, 3, 4, 8, 12],
[1, 2, 3, 4, 7, 13],
[1, 2, 3, 5, 9, 10],
[1, 2, 3, 5, 8, 11],
[1, 2, 3, 5, 7, 12],
[1, 2, 3, 5, 6, 13],
[1, 2, 3, 6, 8, 10],
[1, 2, 3, 6, 7, 11],
[1, 2, 3, 7, 8, 9],
[1, 2, 4, 5, 8, 10],
[1, 2, 4, 5, 7, 11],
[1, 2, 4, 5, 6, 12],
[1, 2, 4, 6, 8, 9],
[1, 2, 4, 6, 7, 10],
[1, 2, 3, 4, 10, 11],
[1, 2, 3, 4, 9, 12],
[1, 2, 3, 4, 8, 13],
[1, 2, 3, 5, 9, 11],
[1, 2, 3, 5, 8, 12],
[1, 2, 3, 5, 7, 13],
[1, 2, 3, 6, 9, 10],
[1, 2, 3, 6, 8, 11],
[1, 2, 3, 6, 7, 12],
[1, 2, 3, 7, 8, 10],
[1, 2, 4, 5, 9, 10],
[1, 2, 4, 5, 8, 11],
[1, 2, 4, 5, 7, 12],
[1, 2, 4, 5, 6, 13],
[1, 2, 4, 6, 7, 11],
[1, 2, 4, 6, 8, 10],
[1, 2, 4, 7, 8, 9],
[1, 2, 3, 4, 10, 12],
[1, 2, 3, 4, 9, 13],
[1, 2, 3, 5, 10, 11],
[1, 2, 3, 5, 9, 12],
[1, 2, 3, 5, 8, 13],
[1, 2, 3, 6, 9, 11],
[1, 2, 3, 6, 8, 12],
[1, 2, 3, 6, 7, 13],
[1, 2, 3, 7, 9, 10],
[1, 2, 3, 7, 8, 11],
[1, 2, 4, 5, 9, 11],
[1, 2, 4, 5, 8, 12],
[1, 2, 4, 5, 7, 13],
[1, 2, 4, 6, 9, 10],
[1, 2, 4, 6, 8, 11],
[1, 2, 4, 6, 7, 12],
[1, 2, 4, 7, 8, 10],
[1, 2, 3, 4, 11, 12],
[1, 2, 3, 4, 10, 13],
[1, 2, 3, 5, 10, 12],
[1, 2, 3, 5, 9, 13],
[1, 2, 3, 6, 10, 11],
[1, 2, 3, 6, 9, 12],
[1, 2, 3, 6, 8, 13],
[1, 2, 3, 7, 9, 11],
[1, 2, 3, 7, 8, 12],
[1, 2, 4, 5, 10, 11],
[1, 2, 4, 5, 9, 12],
[1, 2, 4, 5, 8, 13],
[1, 2, 4, 6, 8, 12],
[1, 2, 4, 6, 9, 11],
[1, 2, 4, 6, 7, 13],
[1, 2, 4, 7, 9, 10],
[1, 2, 4, 7, 8, 11],
[1, 2, 3, 4, 11, 13],
[1, 2, 3, 5, 11, 12],
[1, 2, 3, 5, 10, 13],
[1, 2, 3, 6, 10, 12],
[1, 2, 3, 6, 9, 13],
[1, 2, 3, 7, 10, 11],
[1, 2, 3, 7, 9, 12],
[1, 2, 3, 7, 8, 13],
[1, 2, 4, 5, 10, 12],
[1, 2, 4, 5, 9, 13],
[1, 2, 4, 6, 8, 13],
[1, 2, 4, 6, 9, 12],
[1, 2, 4, 6, 10, 11],
[1, 2, 4, 7, 9, 11],
[1, 2, 4, 7, 8, 12],
[1, 2, 4, 8, 9, 10],
[1, 2, 3, 5, 11, 13],
[1, 2, 3, 6, 11, 12],
[1, 2, 3, 6, 10, 13],
[1, 2, 3, 7, 10, 12],
[1, 2, 3, 7, 9, 13],
[1, 2, 4, 5, 11, 12],
[1, 2, 4, 5, 10, 13],
[1, 2, 4, 6, 9, 13],
[1, 2, 4, 6, 10, 12],
[1, 2, 4, 7, 10, 11],
[1, 2, 4, 7, 9, 12],
[1, 2, 4, 7, 8, 13],
[1, 2, 4, 8, 9, 11],
[1, 2, 3, 5, 12, 13],
[1, 2, 3, 6, 11, 13],
[1, 2, 3, 7, 11, 12],
[1, 2, 3, 7, 10, 13],
[1, 2, 4, 5, 11, 13],
[1, 2, 4, 6, 10, 13],
[1, 2, 4, 6, 11, 12],
[1, 2, 4, 7, 10, 12],
[1, 2, 4, 7, 9, 13],
[1, 2, 4, 8, 10, 11],
[1, 2, 4, 8, 9, 12],
[1, 2, 3, 6, 12, 13],
[1, 2, 3, 7, 11, 13],
[1, 2, 4, 5, 12, 13],
[1, 2, 4, 6, 11, 13],
[1, 2, 4, 7, 11, 12],
[1, 2, 4, 7, 10, 13],
[1, 2, 4, 8, 9, 13],
[1, 2, 4, 8, 10, 12],
[1, 2, 3, 7, 12, 13],
[1, 2, 4, 6, 12, 13],
[1, 2, 4, 7, 11, 13],
[1, 2, 4, 8, 11, 12],
[1, 2, 4, 8, 10, 13],
[1, 2, 4, 7, 12, 13],
[1, 2, 4, 8, 11, 13],
]
GROUP_B = [
[1, 1, 2, 3, 8, 13],
[1, 1, 2, 4, 9, 11],
[1, 1, 2, 4, 8, 12],
[1, 1, 2, 4, 7, 13],
[1, 1, 2, 5, 9, 10],
[1, 1, 2, 5, 8, 11],
[1, 1, 2, 5, 7, 12],
[1, 1, 2, 5, 6, 13],
[1, 1, 3, 4, 9, 10],
[1, 1, 3, 4, 8, 11],
[1, 1, 3, 4, 7, 12],
[1, 1, 3, 4, 6, 13],
[1, 1, 3, 5, 8, 10],
[1, 1, 3, 5, 7, 11],
[1, 1, 3, 5, 6, 12],
[1, 1, 3, 6, 8, 9],
[1, 1, 3, 6, 7, 10],
[1, 2, 2, 3, 9, 11],
[1, 2, 2, 3, 8, 12],
[1, 2, 2, 3, 7, 13],
[1, 2, 2, 4, 8, 11],
[1, 2, 2, 4, 7, 12],
[1, 2, 2, 4, 6, 13],
[1, 2, 2, 5, 8, 10],
[1, 2, 2, 5, 7, 11],
[1, 2, 2, 5, 6, 12],
[1, 2, 2, 6, 8, 9],
[1, 2, | |
<filename>gbpservice/neutron/tests/unit/services/grouppolicy/test_extension_driver_api.py
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from neutron.common import config as neutron_config # noqa
from neutron.db import model_base
import sqlalchemy as sa
from gbpservice.neutron.services.grouppolicy import (
group_policy_driver_api as api)
from gbpservice.neutron.services.grouppolicy import config
from gbpservice.neutron.tests.unit import common as cm
from gbpservice.neutron.tests.unit.services.grouppolicy import (
extensions as test_ext)
from gbpservice.neutron.tests.unit.services.grouppolicy import (
test_grouppolicy_plugin as test_plugin)
from gbpservice.neutron.tests.unit.services.grouppolicy.extensions import (
test_extension as test_extension)
class ExtensionDriverTestBase(test_plugin.GroupPolicyPluginTestCase):
_extension_drivers = ['test']
_extension_path = os.path.dirname(os.path.abspath(test_ext.__file__))
def setUp(self, policy_drivers=None, core_plugin=None,
ml2_options=None, sc_plugin=None):
config.cfg.CONF.set_override('extension_drivers',
self._extension_drivers,
group='group_policy')
if self._extension_path:
config.cfg.CONF.set_override(
'api_extensions_path', self._extension_path)
super(ExtensionDriverTestBase, self).setUp(
core_plugin=core_plugin, ml2_options=ml2_options,
sc_plugin=sc_plugin)
class ExtensionDriverTestCase(ExtensionDriverTestBase):
def test_pt_attr(self):
# Test create with default value.
pt = self.create_policy_target()
policy_target_id = pt['policy_target']['id']
val = pt['policy_target']['pt_extension']
self.assertIsNone(val)
req = self.new_show_request('policy_targets', policy_target_id)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
val = res['policy_target']['pt_extension']
self.assertIsNone(val)
# Test list.
res = self._list('policy_targets')
val = res['policy_targets'][0]['pt_extension']
self.assertIsNone(val)
# Test create with explict value.
pt = self.create_policy_target(pt_extension="abc")
policy_target_id = pt['policy_target']['id']
val = pt['policy_target']['pt_extension']
self.assertEqual("abc", val)
req = self.new_show_request('policy_targets', policy_target_id)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
val = res['policy_target']['pt_extension']
self.assertEqual("abc", val)
# Test update.
data = {'policy_target': {'pt_extension': "def"}}
req = self.new_update_request('policy_targets', data, policy_target_id)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
val = res['policy_target']['pt_extension']
self.assertEqual("def", val)
req = self.new_show_request('policy_targets', policy_target_id)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
val = res['policy_target']['pt_extension']
self.assertEqual("def", val)
def test_ptg_attr(self):
# Test create with default value.
ptg = self.create_policy_target_group()
policy_target_group_id = ptg['policy_target_group']['id']
val = ptg['policy_target_group']['ptg_extension']
self.assertIsNone(val)
req = self.new_show_request('policy_target_groups',
policy_target_group_id)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
val = res['policy_target_group']['ptg_extension']
self.assertIsNone(val)
# Test list.
res = self._list('policy_target_groups')
val = res['policy_target_groups'][0]['ptg_extension']
self.assertIsNone(val)
# Test create with explict value.
ptg = self.create_policy_target_group(ptg_extension="abc")
policy_target_group_id = ptg['policy_target_group']['id']
val = ptg['policy_target_group']['ptg_extension']
self.assertEqual("abc", val)
req = self.new_show_request('policy_target_groups',
policy_target_group_id)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
val = res['policy_target_group']['ptg_extension']
self.assertEqual("abc", val)
# Test update.
data = {'policy_target_group': {'ptg_extension': "def"}}
req = self.new_update_request('policy_target_groups', data,
policy_target_group_id)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
val = res['policy_target_group']['ptg_extension']
self.assertEqual("def", val)
req = self.new_show_request('policy_target_groups',
policy_target_group_id)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
val = res['policy_target_group']['ptg_extension']
self.assertEqual("def", val)
def test_l2p_attr(self):
# Test create with default value.
l2p = self.create_l2_policy()
l2_policy_id = l2p['l2_policy']['id']
val = l2p['l2_policy']['l2p_extension']
self.assertIsNone(val)
req = self.new_show_request('l2_policies', l2_policy_id)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
val = res['l2_policy']['l2p_extension']
self.assertIsNone(val)
# Test list.
res = self._list('l2_policies')
val = res['l2_policies'][0]['l2p_extension']
self.assertIsNone(val)
# Test create with explict value.
l2p = self.create_l2_policy(l2p_extension="abc")
l2_policy_id = l2p['l2_policy']['id']
val = l2p['l2_policy']['l2p_extension']
self.assertEqual("abc", val)
req = self.new_show_request('l2_policies', l2_policy_id)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
val = res['l2_policy']['l2p_extension']
self.assertEqual("abc", val)
# Test update.
data = {'l2_policy': {'l2p_extension': "def"}}
req = self.new_update_request('l2_policies', data, l2_policy_id)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
val = res['l2_policy']['l2p_extension']
self.assertEqual("def", val)
req = self.new_show_request('l2_policies', l2_policy_id)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
val = res['l2_policy']['l2p_extension']
self.assertEqual("def", val)
def test_l3p_attr(self):
# Test create with default value.
l3p = self.create_l3_policy()
l3_policy_id = l3p['l3_policy']['id']
val = l3p['l3_policy']['l3p_extension']
self.assertIsNone(val)
req = self.new_show_request('l3_policies', l3_policy_id)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
val = res['l3_policy']['l3p_extension']
self.assertIsNone(val)
# Test list.
res = self._list('l3_policies')
val = res['l3_policies'][0]['l3p_extension']
self.assertIsNone(val)
# Test create with explict value.
l3p = self.create_l3_policy(l3p_extension="abc")
l3_policy_id = l3p['l3_policy']['id']
val = l3p['l3_policy']['l3p_extension']
self.assertEqual("abc", val)
req = self.new_show_request('l3_policies', l3_policy_id)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
val = res['l3_policy']['l3p_extension']
self.assertEqual("abc", val)
# Test update.
data = {'l3_policy': {'l3p_extension': "def"}}
req = self.new_update_request('l3_policies', data, l3_policy_id)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
val = res['l3_policy']['l3p_extension']
self.assertEqual("def", val)
req = self.new_show_request('l3_policies', l3_policy_id)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
val = res['l3_policy']['l3p_extension']
self.assertEqual("def", val)
def test_pc_attr(self):
# Test create with default value.
pc = self.create_policy_classifier()
policy_classifier_id = pc['policy_classifier']['id']
val = pc['policy_classifier']['pc_extension']
self.assertIsNone(val)
req = self.new_show_request('policy_classifiers', policy_classifier_id)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
val = res['policy_classifier']['pc_extension']
self.assertIsNone(val)
# Test list.
res = self._list('policy_classifiers')
val = res['policy_classifiers'][0]['pc_extension']
self.assertIsNone(val)
# Test create with explict value.
pc = self.create_policy_classifier(pc_extension="abc")
policy_classifier_id = pc['policy_classifier']['id']
val = pc['policy_classifier']['pc_extension']
self.assertEqual("abc", val)
req = self.new_show_request('policy_classifiers', policy_classifier_id)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
val = res['policy_classifier']['pc_extension']
self.assertEqual("abc", val)
# Test update.
data = {'policy_classifier': {'pc_extension': "def"}}
req = self.new_update_request('policy_classifiers', data,
policy_classifier_id)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
val = res['policy_classifier']['pc_extension']
self.assertEqual("def", val)
req = self.new_show_request('policy_classifiers', policy_classifier_id)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
val = res['policy_classifier']['pc_extension']
self.assertEqual("def", val)
def test_pa_attr(self):
# Test create with default value.
pa = self.create_policy_action()
policy_action_id = pa['policy_action']['id']
val = pa['policy_action']['pa_extension']
self.assertIsNone(val)
req = self.new_show_request('policy_actions', policy_action_id)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
val = res['policy_action']['pa_extension']
self.assertIsNone(val)
# Test list.
res = self._list('policy_actions')
val = res['policy_actions'][0]['pa_extension']
self.assertIsNone(val)
# Test create with explict value.
pa = self.create_policy_action(pa_extension="abc")
policy_action_id = pa['policy_action']['id']
val = pa['policy_action']['pa_extension']
self.assertEqual("abc", val)
req = self.new_show_request('policy_actions', policy_action_id)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
val = res['policy_action']['pa_extension']
self.assertEqual("abc", val)
# Test update.
data = {'policy_action': {'pa_extension': "def"}}
req = self.new_update_request('policy_actions', data, policy_action_id)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
val = res['policy_action']['pa_extension']
self.assertEqual("def", val)
req = self.new_show_request('policy_actions', policy_action_id)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
val = res['policy_action']['pa_extension']
self.assertEqual("def", val)
def test_pr_attr(self):
# Create necessary parameters.
classifier = self.create_policy_classifier(
name="class1", protocol="tcp", direction="out",
port_range="50:100")
classifier_id = classifier['policy_classifier']['id']
# Test create with default value.
pr = self.create_policy_rule(policy_classifier_id=classifier_id)
policy_rule_id = pr['policy_rule']['id']
val = pr['policy_rule']['pr_extension']
self.assertIsNone(val)
req = self.new_show_request('policy_rules', policy_rule_id)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
val = res['policy_rule']['pr_extension']
self.assertIsNone(val)
# Test list.
res = self._list('policy_rules')
val = res['policy_rules'][0]['pr_extension']
self.assertIsNone(val)
# Test create with explict value.
pr = self.create_policy_rule(policy_classifier_id=classifier_id,
pr_extension="abc")
policy_rule_id = pr['policy_rule']['id']
val = pr['policy_rule']['pr_extension']
self.assertEqual("abc", val)
req = self.new_show_request('policy_rules', policy_rule_id)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
val = res['policy_rule']['pr_extension']
self.assertEqual("abc", val)
# Test update.
data = {'policy_rule': {'pr_extension': "def"}}
req = self.new_update_request('policy_rules', data, policy_rule_id)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
val = res['policy_rule']['pr_extension']
self.assertEqual("def", val)
req = self.new_show_request('policy_rules', policy_rule_id)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
val = res['policy_rule']['pr_extension']
self.assertEqual("def", val)
def test_prs_attr(self):
# Test create with default value.
prs = self.create_policy_rule_set(policy_rules=[])
policy_rule_set_id = prs['policy_rule_set']['id']
val = prs['policy_rule_set']['prs_extension']
self.assertIsNone(val)
req = self.new_show_request('policy_rule_sets', policy_rule_set_id)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
val = res['policy_rule_set']['prs_extension']
self.assertIsNone(val)
# Test list.
res = self._list('policy_rule_sets')
val = res['policy_rule_sets'][0]['prs_extension']
self.assertIsNone(val)
# Test create with explict value.
prs = self.create_policy_rule_set(policy_rules=[], prs_extension="abc")
policy_rule_set_id = prs['policy_rule_set']['id']
val = prs['policy_rule_set']['prs_extension']
self.assertEqual("abc", val)
req = self.new_show_request('policy_rule_sets', policy_rule_set_id)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
val = res['policy_rule_set']['prs_extension']
self.assertEqual("abc", val)
# Test update.
data = {'policy_rule_set': {'prs_extension': "def"}}
req = self.new_update_request('policy_rule_sets', data,
policy_rule_set_id)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
val = res['policy_rule_set']['prs_extension']
self.assertEqual("def", val)
req = self.new_show_request('policy_rule_sets', policy_rule_set_id)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
val = res['policy_rule_set']['prs_extension']
self.assertEqual("def", val)
def test_nsp_attr(self):
# Test create with default value.
nsp = self.create_network_service_policy()
network_service_policy_id = nsp['network_service_policy']['id']
val = nsp['network_service_policy']['nsp_extension']
self.assertIsNone(val)
req = self.new_show_request('network_service_policies',
network_service_policy_id)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
val = res['network_service_policy']['nsp_extension']
self.assertIsNone(val)
# Test list.
res = self._list('network_service_policies')
val = res['network_service_policies'][0]['nsp_extension']
self.assertIsNone(val)
# Test create with explict value.
nsp = self.create_network_service_policy(nsp_extension="abc")
network_service_policy_id = nsp['network_service_policy']['id']
val = nsp['network_service_policy']['nsp_extension']
self.assertEqual("abc", val)
req = self.new_show_request('network_service_policies',
network_service_policy_id)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
val = res['network_service_policy']['nsp_extension']
self.assertEqual("abc", val)
# Test update.
data = {'network_service_policy': {'nsp_extension': "def"}}
req = self.new_update_request('network_service_policies', data,
network_service_policy_id)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
val = res['network_service_policy']['nsp_extension']
self.assertEqual("def", val)
req = self.new_show_request('network_service_policies',
network_service_policy_id)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
val = res['network_service_policy']['nsp_extension']
self.assertEqual("def", val)
def test_es_attr(self):
self._test_attr('external_segment')
def test_ep_attr(self):
self._test_attr('external_policy')
def test_np_attr(self):
self._test_attr('nat_pool')
def _test_attr(self, type):
# Test create with default value.
acronim = _acronim(type)
plural = cm.get_resource_plural(type)
obj = getattr(self, 'create_%s' % type)()
id = obj[type]['id']
val = obj[type][acronim + '_extension']
self.assertIsNone(val)
req = self.new_show_request(plural, id)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
val = res[type][acronim + '_extension']
self.assertIsNone(val)
# Test list.
res = self._list(plural)
val = res[plural][0][acronim + '_extension']
self.assertIsNone(val)
# Test create with explict value.
kwargs = {acronim + '_extension': "abc"}
obj = getattr(self, 'create_%s' % type)(**kwargs)
id = obj[type]['id']
val = obj[type][acronim + '_extension']
self.assertEqual("abc", val)
req = self.new_show_request(plural, id)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
val = res[type][acronim + '_extension']
self.assertEqual("abc", val)
# Test update.
data = {type: {acronim + '_extension': "def"}}
req = self.new_update_request(plural, data, id)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
val = res[type][acronim + '_extension']
self.assertEqual("def", val)
req = self.new_show_request(plural, id)
res = self.deserialize(self.fmt, req.get_response(self.ext_api))
val = res[type][acronim + '_extension']
self.assertEqual("def", val)
class TestPolicyTargetExtension(model_base.BASEV2):
__tablename__ = 'test_policy_target_extension'
policy_target_id = sa.Column(sa.String(36),
sa.ForeignKey('gp_policy_targets.id',
ondelete="CASCADE"),
primary_key=True)
pt_extension = sa.Column(sa.String(64))
class TestPolicyTargetGroupExtension(model_base.BASEV2):
__tablename__ = 'test_policy_target_group_extension'
policy_target_group_id | |
Delay_Ckeck.NoCheck)
# Case MSG_Trigger.Anforderung_Aus
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Load_Rampdown, row, Delay_Ckeck.NetzStörung)
# Case MSG_Trigger.Netzparallelbetrieb
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.RampUp_Mains_Parallel_Operation, row, Delay_Ckeck.NoCheck)
# Case MSG_Trigger.Inselbetrieb
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.RampUp_Island_Operation, row, Delay_Ckeck.NoCheck)
# Case MSG_Trigger.IGN_Aus
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Ready, row, Delay_Ckeck.NetzStörung)
# End Select
# End If
# Catch ex As Exception
# BGWCalcState.ReportProgress(-1, "Error Check_Action_Betrieb " & ex.Message + " in row " + row.ToString + " Date: " + AMM_List(row).MsgDate.ToString)
# End Try
# End Sub
# Private Sub Check_Action_Rampdown(ByRef A_Action As EngineAction, ByRef MSGNR As Integer, ByRef Message_Time As Date, ByRef row As Integer)
# Try
# If A_Action.Trip_List.Count > 0 Then
# A_Action.Action_To = A_Action.Trip_List(0).MsgDate
# Store_Action(Engine_Action.Forced_Outage, row, Delay_Ckeck.NetzStörung)
# Else
# Select Case MSGNR
# Case MSG_Trigger.GS_Aus
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Engine_Cooldown, row, Delay_Ckeck.NetzStörung)
# Case MSG_Trigger.Bereit_Automatic_Aus
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Load_Rampdown, row, Delay_Ckeck.NetzStörung)
# Case MSG_Trigger.Netzstoerung
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Mains_Failure, row, Delay_Ckeck.NoCheck)
# Case MSG_Trigger.Anforderung_Ein
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Operation, row, Delay_Ckeck.Betrieb_NetzOrInsel)
# Case MSG_Trigger.Bereit_Automatic_Ein
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Operation, row, Delay_Ckeck.Betrieb_NetzOrInsel)
# Case MSG_Trigger.Netzparallelbetrieb
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.RampUp_Mains_Parallel_Operation, row, Delay_Ckeck.NoCheck)
# Case MSG_Trigger.Inselbetrieb
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.RampUp_Island_Operation, row, Delay_Ckeck.NoCheck)
# Case MSG_Trigger.IGN_Aus
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Ready, row, Delay_Ckeck.NetzStörung)
# End Select
# End If
# Catch ex As Exception
# BGWCalcState.ReportProgress(-1, "Error Check_Action_Rampdown " & ex.Message + " in row " + row.ToString + " Date: " + AMM_List(row).MsgDate.ToString)
# End Try
# End Sub
# Private Sub Check_Action_Cooldown(ByRef A_Action As EngineAction, ByRef MSGNR As Integer, ByRef Message_Time As Date, ByRef row As Integer)
# Try
# If A_Action.Trip_List.Count > 0 Then
# A_Action.Action_To = A_Action.Trip_List(0).MsgDate
# Store_Action(Engine_Action.Forced_Outage, row, Delay_Ckeck.NetzStörung)
# Else
# Select Case MSGNR
# Case MSG_Trigger.IGN_Aus
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Ready, row, Delay_Ckeck.NetzStörung)
# Case MSG_Trigger.Anforderung_Ein
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Start_Preparation, row, Delay_Ckeck.NoCheck)
# Case MSG_Trigger.Synchronisieranforderung
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Synchronisation, row, Delay_Ckeck.NoCheck)
# Case MSG_Trigger.Bereit_Automatic_Aus
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Engine_Cooldown, row, Delay_Ckeck.NetzStörung)
# Case MSG_Trigger.Netzstoerung
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Mains_Failure, row, Delay_Ckeck.NoCheck)
# Case MSG_Trigger.Bereit_Automatic_Ein
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Ready, row, Delay_Ckeck.NoCheck)
# Case MSG_Trigger.Netzparallelbetrieb
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.RampUp_Mains_Parallel_Operation, row, Delay_Ckeck.NoCheck)
# Case MSG_Trigger.Inselbetrieb
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.RampUp_Island_Operation, row, Delay_Ckeck.NoCheck)
# End Select
# End If
# Catch ex As Exception
# BGWCalcState.ReportProgress(-1, "Error Check_Action_Cooldown " & ex.Message + " in row " + row.ToString + " Date: " + AMM_List(row).MsgDate.ToString)
# End Try
# End Sub
# Private Sub Check_Action_Start(ByRef A_Action As EngineAction, ByRef MSGNR As Integer, ByRef Message_Time As Date, ByRef row As Integer)
# Try
# If A_Action.Trip_List.Count > 0 Then
# A_Action.Action_To = A_Action.Trip_List(0).MsgDate
# Store_Action(Engine_Action.Forced_Outage, row, Delay_Ckeck.NetzStörung)
# Else
# Select Case MSGNR
# Case MSG_Trigger.Idle
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Idle, row, Delay_Ckeck.NoCheck)
# Case MSG_Trigger.Synchronisieranforderung
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Synchronisation, row, Delay_Ckeck.NoCheck)
# Case MSG_Trigger.Anforderung_Aus
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Ready, row, Delay_Ckeck.NetzStörung)
# Case MSG_Trigger.Bereit_Automatic_Aus
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Start, row, Delay_Ckeck.NetzStörung)
# Case MSG_Trigger.Netzstoerung
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Mains_Failure, row, Delay_Ckeck.NoCheck)
# Case MSG_Trigger.Netzparallelbetrieb
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.RampUp_Mains_Parallel_Operation, row, Delay_Ckeck.NoCheck)
# Case MSG_Trigger.Inselbetrieb
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.RampUp_Island_Operation, row, Delay_Ckeck.NoCheck)
# End Select
# End If
# Catch ex As Exception
# BGWCalcState.ReportProgress(-1, "Error Check_Action_Start " & ex.Message + " in row " + row.ToString + " Date: " + AMM_List(row).MsgDate.ToString)
# End Try
# End Sub
# Private Sub Check_Action_Idle(ByRef A_Action As EngineAction, ByRef MSGNR As Integer, ByRef Message_Time As Date, ByRef row As Integer)
# Try
# If A_Action.Trip_List.Count > 0 Then
# A_Action.Action_To = A_Action.Trip_List(0).MsgDate
# Store_Action(Engine_Action.Forced_Outage, row, Delay_Ckeck.NetzStörung)
# Else
# Select Case MSGNR
# Case MSG_Trigger.Synchronisieranforderung
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Synchronisation, row, Delay_Ckeck.NoCheck)
# Case MSG_Trigger.Anforderung_Aus
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Engine_Cooldown, row, Delay_Ckeck.NetzStörung)
# Case MSG_Trigger.IGN_Aus
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Ready, row, Delay_Ckeck.NetzStörung)
# Case MSG_Trigger.Bereit_Automatic_Aus
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Idle, row, Delay_Ckeck.NetzStörung)
# Case MSG_Trigger.Netzstoerung
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Mains_Failure, row, Delay_Ckeck.NoCheck)
# Case MSG_Trigger.Netzparallelbetrieb
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.RampUp_Mains_Parallel_Operation, row, Delay_Ckeck.NoCheck)
# Case MSG_Trigger.Inselbetrieb
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.RampUp_Island_Operation, row, Delay_Ckeck.NoCheck)
# End Select
# End If
# Catch ex As Exception
# BGWCalcState.ReportProgress(-1, "Error Check_Action_Idle " & ex.Message + " in row " + row.ToString + " Date: " + AMM_List(row).MsgDate.ToString)
# End Try
# End Sub
# Private Sub Check_Action_Synch(ByRef A_Action As EngineAction, ByRef MSGNR As Integer, ByRef Message_Time As Date, ByRef row As Integer)
# Try
# If A_Action.Trip_List.Count > 0 Then
# A_Action.Action_To = A_Action.Trip_List(0).MsgDate
# Store_Action(Engine_Action.Forced_Outage, row, Delay_Ckeck.NoCheck)
# Else
# Select Case MSGNR
# Case MSG_Trigger.Netzparallelbetrieb
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.RampUp_Mains_Parallel_Operation, row, Delay_Ckeck.NoCheck)
# Case MSG_Trigger.Inselbetrieb
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.RampUp_Island_Operation, row, Delay_Ckeck.NoCheck)
# Case MSG_Trigger.Anforderung_Aus
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Engine_Cooldown, row, Delay_Ckeck.NetzStörung)
# Case MSG_Trigger.IGN_Aus
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Ready, row, Delay_Ckeck.NetzStörung)
# Case MSG_Trigger.Bereit_Automatic_Aus
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Synchronisation, row, Delay_Ckeck.NetzStörung)
# Case MSG_Trigger.Netzstoerung
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Mains_Failure, row, Delay_Ckeck.NoCheck)
# End Select
# End If
# Catch ex As Exception
# BGWCalcState.ReportProgress(-1, "Error Check_Action_Synch " & ex.Message + " in row " + row.ToString + " Date: " + AMM_List(row).MsgDate.ToString)
# End Try
# End Sub
# Private Sub Check_Action_Ready(ByRef A_Action As EngineAction, ByRef MSGNR As Integer, ByRef Message_Time As Date, ByRef row As Integer)
# Try
# If A_Action.Trip_List.Count > 0 Then
# A_Action.Action_To = A_Action.Trip_List(0).MsgDate
# Store_Action(Engine_Action.Forced_Outage, row, Delay_Ckeck.NoCheck)
# Else
# Select Case MSGNR
# Case MSG_Trigger.Anforderung_Ein
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Start_Preparation, row, Delay_Ckeck.NoCheck)
# Case MSG_Trigger.Starter_Ein
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Start, row, Delay_Ckeck.NoCheck)
# Case MSG_Trigger.Idle
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Idle, row, Delay_Ckeck.NoCheck)
# Case MSG_Trigger.Synchronisieranforderung
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Synchronisation, row, Delay_Ckeck.NoCheck)
# 'Case MSG_Trigger.Anforderung_Aus
# ' A_Action.Action_To = Message_Time
# ' Store_Action(Engine_Action.Ready, row, Delay_Ckeck.AlarmInNextSecond)
# Case MSG_Trigger.Bereit_Automatic_Aus
# If Not CheckBWSNotAuto(row) Then
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Not_Ready, row, Delay_Ckeck.NetzStörung)
# End If
# Case MSG_Trigger.Netzstoerung
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Mains_Failure, row, Delay_Ckeck.NoCheck)
# Case MSG_Trigger.Netzparallelbetrieb
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.RampUp_Mains_Parallel_Operation, row, Delay_Ckeck.NoCheck)
# Case MSG_Trigger.Inselbetrieb
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.RampUp_Island_Operation, row, Delay_Ckeck.NetzStörung)
# End Select
# End If
# Catch ex As Exception
# BGWCalcState.ReportProgress(-1, "Error Check_Action_Ready " & ex.Message + " in row " + row.ToString + " Date: " + AMM_List(row).MsgDate.ToString)
# End Try
# End Sub
# Private Sub Check_Action_Not_Ready(ByRef A_Action As EngineAction, ByRef MSGNR As Integer, ByRef Message_Time As Date, ByRef row As Integer)
# Try
# If A_Action.Trip_List.Count > 0 Then
# A_Action.Action_To = A_Action.Trip_List(0).MsgDate
# Store_Action(Engine_Action.Forced_Outage, row, Delay_Ckeck.NetzStörung)
# Else
# Select Case MSGNR
# Case MSG_Trigger.Anforderung_Ein
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Start_Preparation, row, Delay_Ckeck.NoCheck)
# Case MSG_Trigger.Synchronisieranforderung
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Synchronisation, row, Delay_Ckeck.NoCheck)
# Case MSG_Trigger.Bereit_Automatic_Ein
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Ready, row, Delay_Ckeck.NoCheck)
# Case MSG_Trigger.Netzparallelbetrieb
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.RampUp_Mains_Parallel_Operation, row, Delay_Ckeck.NoCheck)
# Case MSG_Trigger.Inselbetrieb
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.RampUp_Island_Operation, row, Delay_Ckeck.NoCheck)
# Case MSG_Trigger.Starter_Ein
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Start, row, Delay_Ckeck.NoCheck)
# Case MSG_Trigger.GS_Aus
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Engine_Cooldown, row, Delay_Ckeck.NetzStörung)
# Case MSG_Trigger.Netzstoerung
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Mains_Failure, row, Delay_Ckeck.NoCheck)
# Case MSG_Trigger.GS_Ein
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Operation, row, Delay_Ckeck.Betrieb_NetzOrInsel)
# Case MSG_Trigger.Idle
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Idle, row, Delay_Ckeck.NoCheck)
# Case MSG_Trigger.IGN_Aus
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Ready, row, Delay_Ckeck.NetzStörung)
# End Select
# End If
# Catch ex As Exception
# BGWCalcState.ReportProgress(-1, "Error Check_Action_Not_Ready " & ex.Message + " in row " + row.ToString + " Date: " + AMM_List(row).MsgDate.ToString)
# End Try
# End Sub
# Private Sub Check_Action_GAP(ByRef A_Action As EngineAction, ByRef MSGNR As Integer, ByRef Message_Time As Date, ByRef row As Integer)
# Try
# If A_Action.Trip_List.Count > 0 Then
# A_Action.Action_To = A_Action.Trip_List(0).MsgDate
# Store_Action(Engine_Action.Forced_Outage, row, Delay_Ckeck.NetzStörung)
# Else
# Select Case MSGNR
# Case MSG_Trigger.Anforderung_Ein
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Start_Preparation, row, Delay_Ckeck.NoCheck)
# Case MSG_Trigger.Anforderung_Aus
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Load_Rampdown, row, Delay_Ckeck.NetzStörung)
# Case MSG_Trigger.Synchronisieranforderung
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Synchronisation, row, Delay_Ckeck.NoCheck)
# Case MSG_Trigger.Bereit_Automatic_Aus
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Not_Ready, row, Delay_Ckeck.NetzStörung)
# Case MSG_Trigger.Bereit_Automatic_Ein
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Ready, row, Delay_Ckeck.NoCheck)
# Case MSG_Trigger.Netzparallelbetrieb
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.RampUp_Mains_Parallel_Operation, row, Delay_Ckeck.NoCheck)
# Case MSG_Trigger.Inselbetrieb
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.RampUp_Island_Operation, row, Delay_Ckeck.NoCheck)
# Case MSG_Trigger.Starter_Ein
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Start, row, Delay_Ckeck.NoCheck)
# Case MSG_Trigger.GS_Aus
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Engine_Cooldown, row, Delay_Ckeck.NetzStörung)
# Case MSG_Trigger.GS_Ein
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Operation, row, Delay_Ckeck.Betrieb_NetzOrInsel)
# Case MSG_Trigger.Idle
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Idle, row, Delay_Ckeck.NoCheck)
# Case MSG_Trigger.IGN_Aus
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Ready, row, Delay_Ckeck.NetzStörung)
# Case MSG_Trigger.Netzstoerung
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Mains_Failure, row, Delay_Ckeck.NoCheck)
# Case MSG_Trigger.Power_reduction_In_isolated_operation
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Island_Operation, row, Delay_Ckeck.NoCheck)
# Case MSG_Trigger.BWS_Hand, MSG_Trigger.BWS_AUS
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Troubleshooting, row, Delay_Ckeck.NoCheck)
# Case Else
# For Each tstEnum As Powerreduction In GetType(Powerreduction).GetEnumValues
# If tstEnum = MSGNR Then
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Operation, row, Delay_Ckeck.Betrieb_NetzOrInsel)
# Exit Select
# End If
# Next
# If row > 0 AndAlso ((Message_Time - AMM_List(row - 1).MsgDate).TotalSeconds < GapMaxLength) And E_Action.Action_Actual <> Engine_Action.Undefinded Then
# E_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Undefinded, row, Delay_Ckeck.NoCheck)
# End If
# End Select
# End If
# Catch ex As Exception
# BGWCalcState.ReportProgress(-1, "Error Check_Action_Gap " & ex.Message + " in row " + row.ToString + " Date: " + AMM_List(row).MsgDate.ToString)
# End Try
# End Sub
# Private Sub Check_Action_Forcedoutage(ByRef A_Action As EngineAction, ByRef MSGNR As Integer, ByRef Message_Time As Date, ByRef row As Integer)
# Try
# Select Case MSGNR
# Case MSG_Trigger.BWS_Hand, MSG_Trigger.BWS_AUS, MSG_Trigger.BWS_Auto
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Troubleshooting, row, Delay_Ckeck.NoCheck)
# Case MSG_Trigger.Bereit_Automatic_Ein
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Ready, row, Delay_Ckeck.RemoteReset)
# 'Case MSG_Trigger.Netzparallelbetrieb
# ' A_Action.Action_To = Message_Time
# ' Store_Action(Engine_Action.RampUp_Mains_Parallel_Operation, row, Delay_Ckeck.Betrieb_NetzOrInsel)
# 'Case MSG_Trigger.Inselbetrieb
# ' A_Action.Action_To = Message_Time
# ' Store_Action(Engine_Action.RampUp_Island_Operation, row, Delay_Ckeck.Betrieb_NetzOrInsel)
# Case MSG_Trigger.Anforderung_Ein
# A_Action.Action_To = Message_Time
# Store_Action(Engine_Action.Start_Preparation, row, Delay_Ckeck.NoCheck)
# Case | |
vim.VirtualMachine, ['name', 'runtime.powerState'])
#pylint: disable=no-self-argument, not-callable
def _vsphere_session(f):
@functools.wraps(f)
def wrapped(self, *args, **kwargs):
if self.vsphereConnection:
self.log.debug2("VirtualMachineVMware reusing vsphere connection")
return f(self, *args, **kwargs)
else:
self.log.debug2("VirtualMachineVMware creating new vsphere connection")
try:
with VMwareConnection(self.hostServer, self.hostUsername, self.hostPassword) as self.vsphereConnection:
return f(self, *args, **kwargs)
finally:
self.vsphereConnection = None
return wrapped
#pylint: enable=no-self-argument, not-callable
@_vsphere_session
def PowerOn(self):
"""
Power On this VM
"""
vm = VMwareFindObjectGetProperties(self.vsphereConnection, self.vmName, vim.VirtualMachine, ['name', 'runtime.powerState'])
self.log.debug2("{} runtime.powerState={}".format(self.vmName, vm.runtime.powerState))
if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
return
self.log.info("Waiting for {} to turn on".format(self.vmName))
task = vm.PowerOn()
VMwareWaitForTasks(self.vsphereConnection, [task])
@_vsphere_session
def PowerOff(self):
"""
Power Off this VM
"""
vm = VMwareFindObjectGetProperties(self.vsphereConnection, self.vmName, vim.VirtualMachine, ['name', 'runtime.powerState'])
self.log.debug2("{} runtime.powerState={}".format(self.vmName, vm.runtime.powerState))
if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOff:
return True
self.log.info("Waiting for {} to turn off".format(self.vmName))
task = vm.PowerOff()
VMwareWaitForTasks(self.vsphereConnection, [task])
@_vsphere_session
def GetPowerState(self):
"""
Get the current power state of this VM
Returns:
A string containing 'on' or 'off' (str)
"""
vm = VMwareFindObjectGetProperties(self.vsphereConnection, self.vmName, vim.VirtualMachine, ['name', 'runtime.powerState'])
self.log.debug2("{} runtime.powerState={}".format(self.vmName, vm.runtime.powerState))
if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
return "on"
else:
return "off"
@_vsphere_session
def GetPXEMacAddress(self):
"""
Get the MAC address of the VM to use when PXE booting
Returns:
A string containing the MAC address in 00:00:00:00:00 format (str)
"""
vm = VMwareFindObjectGetProperties(self.vsphereConnection, self.vmName, vim.VirtualMachine, ['name', 'config.hardware'])
macs = []
for dev in vm.config.hardware.device:
if isinstance(dev, vim.vm.device.VirtualEthernetCard):
macs.append(dev.macAddress)
if not macs:
raise SolidFireError("Could not find any ethernet devices in VM {}".format(self.vmName))
if len(macs) == 1:
idx = 0
elif len(macs) == 2:
idx = 0
else: # 4 NICs, or any other config we do not recognize
idx = 2
self.log.debug2("Getting MAC address from NIC {} ({})".format(idx, macs[idx]))
return macs[idx]
@_vsphere_session
def SetPXEBoot(self):
"""
Set the boot order of this VM to PXE boot first
"""
vm = VMwareFindObjectGetProperties(self.vsphereConnection, self.vmName, vim.VirtualMachine, ['name', 'config.hardware'])
disks = []
nics = []
cdrom_present = False
for device in vm.config.hardware.device:
if isinstance(device, vim.vm.device.VirtualDisk):
disks.append(device.key)
elif isinstance(device, vim.vm.device.VirtualEthernetCard):
nics.append(device.key)
elif isinstance(device, vim.vm.device.VirtualCdrom):
cdrom_present = True
nics = sorted(nics)
boot_disk = vim.vm.BootOptions.BootableDiskDevice()
boot_disk.deviceKey = sorted(disks)[0]
boot_nic = vim.vm.BootOptions.BootableEthernetDevice()
if len(nics) == 1:
idx = 0
elif len(nics) == 2:
idx = 0
else: # 4 NICs, or any other config we do not recognize
idx = 2
self.log.debug2("Picking NIC {} to PXE boot from ({})".format(idx, nics[idx]))
boot_nic.deviceKey = nics[idx]
if cdrom_present:
boot_devices = [boot_nic, vim.vm.BootOptions.BootableCdromDevice(), boot_disk]
else:
boot_devices = [boot_nic, boot_disk]
config = vim.vm.ConfigSpec()
config.bootOptions = vim.vm.BootOptions(bootOrder=boot_devices)
task = vm.ReconfigVM_Task(config)
VMwareWaitForTasks(self.vsphereConnection, [task])
@_vsphere_session
def WaitForUp(self, timeout=300):
"""
Wait for this VM to be powered on and the guest OS booted up
"""
start_time = time.time()
# Wait for VM to be powered on
while True:
vm = VMwareFindObjectGetProperties(self.vsphereConnection, self.vmName, vim.VirtualMachine, ["name", "runtime.powerState"])
self.log.debug2("{} runtime.powerState={}".format(self.vmName, vm.runtime.powerState))
if vm.runtime.powerState == vim.VirtualMachinePowerState.poweredOn:
self.log.info("VM is powered on")
break
if timeout > 0 and time.time() - start_time > timeout:
raise SFTimeoutError("Timeout waiting for VM to power on")
time.sleep(2)
self.log.info("Waiting for VMware tools")
# Wait for VMwware tools to be running
while True:
vm = VMwareFindObjectGetProperties(self.vsphereConnection, self.vmName, vim.VirtualMachine, ["name", "guest.toolsRunningStatus", "guest.toolsStatus"])
self.log.debug2("{} guest.toolsRunningStatus={}".format(self.vmName, vm.guest.toolsRunningStatus))
if vm.guest.toolsRunningStatus == vim.VirtualMachineToolsStatus.toolsNotInstalled:
self.log.warning("VMware tools are not installed in this VM; cannot detect VM boot/health")
return
if vm.guest.toolsStatus == vim.VirtualMachineToolsStatus.toolsOk:
self.log.info("VMware tools are running")
break
if timeout > 0 and time.time() - start_time > timeout:
raise SFTimeoutError("Timeout waiting for VMware tools to start")
time.sleep(2)
# Wait for VM heartbeat to be green
while True:
vm = VMwareFindObjectGetProperties(self.vsphereConnection, self.vmName, vim.VirtualMachine, ["name", "guestHeartbeatStatus"])
self.log.debug2("{} guestHeartbeatStatus={}".format(self.vmName, vm.guestHeartbeatStatus))
if vm.guestHeartbeatStatus == vim.ManagedEntityStatus.green:
self.log.info("VM guest heartbeat is green")
break
if timeout > 0 and time.time() - start_time > timeout:
raise SFTimeoutError("Timeout waiting for guest heartbeat")
time.sleep(2)
@_vsphere_session
def Delete(self):
"""
Delete this virtual machine
"""
vm = VMwareFindObjectGetProperties(self.vsphereConnection, self.vmName, vim.VirtualMachine, ['name'])
task = vm.Destroy_Task()
VMwareWaitForTasks(self.vsphereConnection, [task])
@_vsphere_session
def SetVMXProperty(self, name, value):
"""
Set a advanced property of this VM. These are stored as key-value pairs in the VMX file.
Arg:
name: the name (key) of the property to set
value: the value to set
"""
vm = VMwareFindObjectGetProperties(self.vsphereConnection, self.vmName, vim.VirtualMachine, ['name'])
self.log.debug("Setting property {}={} on VM {}".format(name, value, self.vmName))
option = vim.option.OptionValue(key=name, value=value)
config = vim.vm.ConfigSpec()
config.extraConfig = [option]
task = vm.ReconfigVM_Task(config)
VMwareWaitForTasks(self.vsphereConnection, [task])
@_vsphere_session
def AddNetworkAdapter(self, networkName):
"""
Add a new NIC to this VM
Args:
networkName: the name of the port group to connect the NIC to (str)
"""
vm = VMwareFindObjectGetProperties(self.vsphereConnection, self.vmName, vim.VirtualMachine, ['name'])
network = VMwareFindObjectGetProperties(self.vsphereConnection, networkName, vim.Network, [])
nic_spec = vim.vm.device.VirtualDeviceSpec()
nic_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
nic_spec.device = vim.vm.device.VirtualVmxnet3()
nic_spec.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo()
nic_spec.device.backing.network = network
nic_spec.device.backing.deviceName = networkName
nic_spec.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo()
nic_spec.device.connectable.startConnected = True
nic_spec.device.connectable.startConnected = True
nic_spec.device.connectable.allowGuestControl = True
nic_spec.device.connectable.connected = False
nic_spec.device.connectable.status = 'untried'
nic_spec.device.wakeOnLanEnabled = True
nic_spec.device.addressType = 'assigned'
config = vim.vm.ConfigSpec()
config.deviceChange = [nic_spec]
task = vm.ReconfigVM_Task(spec=config)
VMwareWaitForTasks(self.vsphereConnection, [task])
@_vsphere_session
def AddDisk(self, sizeGB, datastoreName, thinProvision=True):
"""
Add a new virtual disk to this VM
Args:
sizeGB: the size of the disk, in GB (int)
datastoreName: the name of the datastore to put the disk in (str)
thinProvision: make this disk thinly provisioned
"""
vm = VMwareFindObjectGetProperties(self.vsphereConnection, self.vmName, vim.VirtualMachine, ['name', 'config'])
ds = VMwareFindObjectGetProperties(self.vsphereConnection, datastoreName, vim.Datastore, ["name"])
# Find the SCSI controller and current LUNs
controller = None
used_luns = set([7])
for dev in vm.config.hardware.device:
if isinstance(dev, vim.vm.device.VirtualDisk):
used_luns.add(dev.unitNumber)
if isinstance(dev, vim.vm.device.VirtualSCSIController):
controller = dev
if not controller:
raise VirtualizationError("Could not find a SCSI controller to attach the disk to")
available_lun = None
for lun in range(17):
if lun not in used_luns:
available_lun = lun
break
if available_lun is None:
raise VirtualizationError("There are no free LUNs on the SCSI controller")
self.log.debug("Adding LUN {} in datastore {}".format(available_lun, ds.name))
disk_spec = vim.vm.device.VirtualDeviceSpec()
disk_spec.fileOperation = "create"
disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.add
disk_spec.device = vim.vm.device.VirtualDisk()
disk_spec.device.backing = vim.vm.device.VirtualDisk.FlatVer2BackingInfo()
disk_spec.device.backing.thinProvisioned = thinProvision
disk_spec.device.backing.diskMode = 'persistent'
disk_spec.device.backing.fileName = "[{}] {}-{}.vmdk".format(ds.name, self.vmName, available_lun)
disk_spec.device.backing.datastore = ds
disk_spec.device.unitNumber = available_lun
disk_spec.device.capacityInKB = sizeGB * 1024 * 1024
disk_spec.device.controllerKey = controller.key
config = vim.vm.ConfigSpec()
config.deviceChange = [disk_spec]
task = vm.ReconfigVM_Task(spec=config)
VMwareWaitForTasks(self.vsphereConnection, [task])
class VMHostVMware(VMHost):
"""
VMware implementation of VMHost class
"""
def __init__(self, vmhostName, mgmtServer, mgmtUsername, mgmtPassword):
super(VMHostVMware, self).__init__(vmhostName, mgmtServer, mgmtUsername, mgmtPassword)
self.hostType = "VMware"
# Test the connection and make sure the host exists
with VMwareConnection(self.mgmtServer, self.mgmtUsername, self.mgmtPassword) as vsphere:
VMwareFindObjectGetProperties(vsphere, self.vmhostName, vim.HostSystem, ["name"])
def CreateDatastores(self, includeInternalDrives=False, includeSlotDrives=False):
"""
Create/attach datastores on any volumes currently connected to this host
Args:
includeInternalDrives: include disks attached to internal SATA/AHCI bus
includeSlotDrives: include external disks in slots
"""
with VMwareConnection(self.mgmtServer, self.mgmtUsername, self.mgmtPassword) as vsphere:
host = VMwareFindObjectGetProperties(vsphere, self.vmhostName, vim.HostSystem, ["name", "configManager"])
storage_manager = host.configManager.storageSystem
datastore_manager = host.configManager.datastoreSystem
self.log.info("Querying connected disk devices on {}".format(self.vmhostName))
# Go through each HBA and make a reference from LUN => datastore name
lun2name = {}
for adapter in storage_manager.storageDeviceInfo.hostBusAdapter:
if adapter.driver == "nvme":
self.log.debug("Skipping NVRAM device")
continue
elif type(adapter) == vim.host.InternetScsiHba:
# iSCSI, either software or HBA
# volumeName.volumeID naming
for target in [hba for hba in storage_manager.storageDeviceInfo.scsiTopology.adapter if hba.adapter == adapter.key][0].target:
for lun in target.lun:
name = ".".join(target.transport.iScsiName.split(".")[-2:])
if lun.lun != 0:
name += "-lun{}".format(lun.lun)
lun2name[lun.scsiLun] = name
#elif type(adapter) == vim.host.BlockHba:
elif adapter.driver == "ahci" or adapter.driver == "vmw_ahci":
# Internal SATA adapter
# "sdimmX" naming
if not includeInternalDrives:
self.log.info("Skipping adapter {} for internal drives".format(adapter.device))
continue
for target in [hba for hba in storage_manager.storageDeviceInfo.scsiTopology.adapter if hba.adapter == adapter.key][0].target:
for lun in target.lun:
lun2name[lun.scsiLun] = "{}-sdimm{}".format(self.vmhostName, target.target)
elif adapter.driver == "mpt2sas" or adapter.driver == "mpt3sas":
# SAS adapter for slot drives on the front of the chassis
# "slotX" naming
if not includeSlotDrives:
self.log.info("Skipping adapter {} for slot drives".format(adapter.device))
continue
for target in [hba for hba in storage_manager.storageDeviceInfo.scsiTopology.adapter if hba.adapter == adapter.key][0].target:
for lun in target.lun:
lun2name[lun.scsiLun] = "{}-slot{}".format(self.vmhostName, target.target)
elif adapter.driver == "vmkusb":
# Skip USB drives
continue
else:
self.log.warning("Skipping unknown HBA {}".format(adapter.device))
self.log.debug("adapter = {}".format(adapter))
# Go through the list of connected LUNs and make a reference from device => datastore name
device2name = {}
for disk in storage_manager.storageDeviceInfo.scsiLun:
if disk.key in lun2name:
device2name[disk.deviceName] = lun2name[disk.key]
self.log.debug("{} => {}".format(disk.deviceName, lun2name[disk.key]))
# Get a list of available disks and create datastores on them
available_devices = | |
<gh_stars>100-1000
import hashlib
import logging
from flask import g, make_response, request
from Pegasus.service import cache
from Pegasus.service._serialize import jsonify
from Pegasus.service.base import OrderedDict
from Pegasus.service.monitoring import monitoring as blueprint
from Pegasus.service.monitoring.queries import (
MasterWorkflowQueries,
StampedeWorkflowQueries,
)
log = logging.getLogger(__name__)
JSON_HEADER = {"Content-Type": "application/json"}
@blueprint.url_value_preprocessor
def pull_m_wf_id(endpoint, values):
"""
If the requested endpoint contains a value for m_wf_id variable then extract it and set it in g.m_wf_id.
"""
if values and "m_wf_id" in values:
g.m_wf_id = values["m_wf_id"]
@blueprint.url_value_preprocessor
def pull_url_context(endpoint, values):
"""
Create a context which can be used when generating url in link section of the responses.
"""
url_context = {}
keys = ["wf_id", "job_id", "task_id", "job_instance_id", "host_id", "instance_id"]
if values:
for key in keys:
if key in values:
url_context[key] = values[key]
else:
if url_context:
g.url_context = url_context
@blueprint.before_request
def compute_stampede_db_url():
"""
If the requested endpoint requires connecting to a STAMPEDE database, then determine STAMPEDE DB URL and store it
in g.stampede_db_url. Also, set g.m_wf_id to be the root workflow's uuid
"""
if "/workflow" not in request.path or "m_wf_id" not in g:
return
md5sum = hashlib.md5()
md5sum.update(g.master_db_url.encode("utf-8"))
m_wf_id = g.m_wf_id
def _get_cache_key(key_suffix):
return "{}.{}".format(md5sum.hexdigest(), key_suffix)
cache_key = _get_cache_key(m_wf_id)
if cache.get(cache_key):
log.debug("Cache Hit: compute_stampede_db_url %s" % cache_key)
root_workflow = cache.get(cache_key)
else:
log.debug("Cache Miss: compute_stampede_db_url %s" % cache_key)
queries = MasterWorkflowQueries(g.master_db_url)
root_workflow = queries.get_root_workflow(m_wf_id)
queries.close()
cache.set(_get_cache_key(root_workflow.wf_id), root_workflow, timeout=600)
cache.set(_get_cache_key(root_workflow.wf_uuid), root_workflow, timeout=600)
g.url_m_wf_id = root_workflow.wf_id
g.m_wf_id = root_workflow.wf_uuid
g.stampede_db_url = root_workflow.db_url
@blueprint.before_request
def get_query_args():
g.query_args = {}
def to_int(q_arg, value):
try:
return int(value)
except ValueError as e:
log.exception(
"Query Argument {} = {} is not a valid int".format(q_arg, value)
)
e = ValueError(
"Expecting integer for argument {}, found {!r}".format(
q_arg, str(value)
)
)
e.codes = ("INVALID_QUERY_ARGUMENT", 400)
raise e from None
def to_str(q_arg, value):
return value
def to_bool(q_arg, value):
value = value.strip().lower()
if value in {"1", "true"}:
return True
elif value in {"0", "false"}:
return False
else:
log.exception(
"Query Argument {} = {} is not a valid boolean".format(q_arg, value)
)
e = ValueError(
"Expecting boolean for argument {}, found {!r}".format(
q_arg, str(value)
)
)
e.codes = ("INVALID_QUERY_ARGUMENT", 400)
raise e
query_args = OrderedDict(
[
("pretty-print", to_bool),
("start-index", to_int),
("max-results", to_int),
("query", to_str),
("order", to_str),
]
)
for arg, cast in query_args.items():
if arg in request.args:
g.query_args[arg.replace("-", "_")] = cast(arg, request.args.get(arg))
"""
Root Workflow
{
"wf_id" : int:wf_id,
"wf_uuid" : string:wf_uuid,
"submit_hostname" : string:submit_hostname,
"submit_dir" : string:submit_dir,
"planner_arguments" : string:planner_arguments,
"planner_version" : string:planner_version,
"user" : string:user,
"grid_dn" : string:grid_dn,
"dax_label" : string:dax_label,
"dax_version" : string:dax_version,
"dax_file" : string:dax_file,
"dag_file_name" : string:dag_file_name,
"timestamp" : int:timestamp,
"workflow_state" : object:workflow_state,
"_links" : {
"workflow" : href:workflow
}
}
"""
@blueprint.route("/root")
def get_root_workflows(username):
"""
Returns a collection of root level workflows.
:query int start-index: Return results starting from record <start-index> (0 indexed)
:query int max-results: Return a maximum of <max-results> records
:query string query: Search criteria
:query string order: Sorting criteria
:query boolean pretty-print: Return formatted JSON response.
:statuscode 200: OK
:statuscode 204: No content; when no workflows found.
:statuscode 400: Bad request
:statuscode 401: Authentication failure
:statuscode 403: Authorization failure
:return type: Collection
:return resource: Root Workflow
"""
queries = MasterWorkflowQueries(g.master_db_url)
paged_response = queries.get_root_workflows(**g.query_args)
if paged_response.total_records == 0:
log.debug("Total records is 0; returning HTTP 204 No content")
return make_response("", 204, JSON_HEADER)
#
# Generate JSON Response
#
response_json = jsonify(paged_response)
return make_response(response_json, 200, JSON_HEADER)
@blueprint.route("/root/<string:m_wf_id>")
def get_root_workflow(username, m_wf_id):
"""
Returns root level workflow identified by m_wf_id.
:query boolean pretty-print: Return formatted JSON response.
:statuscode 200: OK
:statuscode 401: Authentication failure
:statuscode 403: Authorization failure
:statuscode 404: Not found
:return type: Record
:return resource: Root Workflow
"""
queries = MasterWorkflowQueries(g.master_db_url)
record = queries.get_root_workflow(m_wf_id)
#
# Generate JSON Response
#
response_json = jsonify(record)
return make_response(response_json, 200, JSON_HEADER)
"""
Workflow
{
"wf_id" : int:wf_id,
"root_wf_id" : int:root_wf_id,
"parent_wf_id" : int:parent_wf_id,
"wf_uuid" : string:wf_uuid,
"submit_hostname" : string:submit_hostname,
"submit_dir" : string:submit_dir,
"planner_arguments" : string:planner_arguments,
"planner_version" : string:planner_version,
"user" : string:user,
"grid_dn" : string:grid_dn,
"dax_label" : string:dax_label,
"dax_version" : string:dax_version,
"dax_file" : string:dax_file,
"dag_file_name" : string:dag_file_name,
"timestamp" : int:timestamp,
"_links" : {
"workflow_meta" : href:workflow_meta,
"workflow_state" : href:workflow_state,
"job" : href:job,
"task" : href:task,
"host" : href:host,
"invocation" : href:invocation
}
}
"""
@blueprint.route("/root/<string:m_wf_id>/workflow")
def get_workflows(username, m_wf_id):
"""
Returns a collection of workflows.
:query int start-index: Return results starting from record <start-index> (0 indexed)
:query int max-results: Return a maximum of <max-results> records
:query string query: Search criteria
:query string order: Sorting criteria
:query boolean pretty-print: Return formatted JSON response
:statuscode 200: OK
:statuscode 204: No content; when no workflows found.
:statuscode 400: Bad request
:statuscode 401: Authentication failure
:statuscode 403: Authorization failure
:return type: Collection
:return resource: Workflow
"""
queries = StampedeWorkflowQueries(g.stampede_db_url)
paged_response = queries.get_workflows(g.m_wf_id, **g.query_args)
if paged_response.total_records == 0:
log.debug("Total records is 0; returning HTTP 204 No content")
return make_response("", 204, JSON_HEADER)
#
# Generate JSON Response
#
response_json = jsonify(paged_response)
return make_response(response_json, 200, JSON_HEADER)
@blueprint.route("/root/<string:m_wf_id>/workflow/<string:wf_id>")
def get_workflow(username, m_wf_id, wf_id):
"""
Returns workflow identified by m_wf_id, wf_id.
:query boolean pretty-print: Return formatted JSON response
:statuscode 200: OK
:statuscode 401: Authentication failure
:statuscode 403: Authorization failure
:statuscode 404: Not found
:return type: Record
:return resource: Workflow
"""
queries = StampedeWorkflowQueries(g.stampede_db_url)
record = queries.get_workflow(wf_id)
#
# Generate JSON Response
#
response_json = jsonify(record)
return make_response(response_json, 200, JSON_HEADER)
"""
Workflow Meta
{
"key" : string:key,
"value" : string:value,
"_links" : {
"workflow" : <href:workflow>
}
}
"""
@blueprint.route("/root/<string:m_wf_id>/workflow/<string:wf_id>/meta")
def get_workflow_meta(username, m_wf_id, wf_id):
"""
Returns a collection of workflow's metadata.
:query int start-index: Return results starting from record <start-index> (0 indexed)
:query int max-results: Return a maximum of <max-results> records
:query string query: Search criteria
:query string order: Sorting criteria
:query boolean pretty-print: Return formatted JSON response
:statuscode 200: OK
:statuscode 204: No content; when no workflow metadata found.
:statuscode 400: Bad request
:statuscode 401: Authentication failure
:statuscode 403: Authorization failure
:return type: Collection
:return resource: WorkflowMeta
"""
queries = StampedeWorkflowQueries(g.stampede_db_url)
paged_response = queries.get_workflow_meta(g.m_wf_id, **g.query_args)
if paged_response.total_records == 0:
log.debug("Total records is 0; returning HTTP 204 No content")
return make_response("", 204, JSON_HEADER)
#
# Generate JSON Response
#
response_json = jsonify(paged_response)
return make_response(response_json, 200, JSON_HEADER)
"""
Workflow Files
{
"wf_id" : int:wf_id,
"lfn_id" : string:lfn_id,
"lfn" : string:lfn,
"pfns" : [
{
"pfn_id" : <int:pfn_id>
"pfn" : <string:pfn>
"site" : <string:site>
}
],
"meta" : [
{
"meta_id" : <int:meta_id>
"key" : <string:key>
"value" : <string:value>
}
],
"_links" : {
"workflow" : <href:workflow>
}
}
"""
@blueprint.route("/root/<string:m_wf_id>/workflow/<string:wf_id>/files")
def get_workflow_files(username, m_wf_id, wf_id):
"""
Returns a collection of workflows.
:query int start-index: Return results starting from record <start-index> (0 indexed)
:query int max-results: Return a maximum of <max-results> records
:query string query: Search criteria
:query string order: Sorting criteria
:query boolean pretty-print: Return formatted JSON response
:statuscode 200: OK
:statuscode 204: No content; when no workflows found.
:statuscode 400: Bad request
:statuscode 401: Authentication failure
:statuscode 403: Authorization failure
:return type: Collection
:return resource: Workflow
"""
queries = StampedeWorkflowQueries(g.stampede_db_url)
paged_response = queries.get_workflow_files(g.m_wf_id, **g.query_args)
if paged_response.total_records == 0:
log.debug("Total records is 0; returning HTTP 204 No content")
return make_response("", 204, JSON_HEADER)
#
# Generate JSON Response
#
response_json = jsonify(paged_response)
return make_response(response_json, 200, JSON_HEADER)
"""
Workflow State
{
"wf_id" : int:wf_id,
"state" : string:state,
"status" : int:status,
"restart_count" : int:restart_count,
"timestamp" : datetime:timestamp,
"_links" : {
"workflow" : <href:workflow>
}
}
"""
@blueprint.route("/root/<string:m_wf_id>/workflow/<string:wf_id>/state")
def get_workflow_state(username, m_wf_id, wf_id):
"""
Returns a collection of Workflow States.
:query int start-index: Return results starting from record <start-index> (0 indexed)
:query int max-results: Return a maximum of <max-results> records
:query string query: Search criteria
:query string order: Sorting criteria
:query boolean pretty-print: Return formatted JSON response
:statuscode 200: OK
:statuscode 204: No content; when no workflowstates found.
:statuscode 400: Bad request
:statuscode 401: Authentication failure
:statuscode 403: Authorization failure
:return type: Collection
:return resource: WorkflowState
"""
queries = StampedeWorkflowQueries(g.stampede_db_url)
paged_response = queries.get_workflow_state(wf_id, **g.query_args)
if paged_response.total_records == 0:
log.debug("Total records is 0; returning HTTP 204 No content")
return make_response("", 204, JSON_HEADER)
#
# Generate JSON Response
#
response_json = jsonify(paged_response)
return make_response(response_json, 200, JSON_HEADER)
"""
Job
{
"job_id" : int: job_id,
"exec_job_id" : string: exec_job_id,
"submit_file" : string: submit_file,
"type_desc" : string: type_desc,
"max_retries" : int: max_retries,
"clustered" : bool: clustered,
"task_count" : int: task_count,
"executable" : string: executable,
"argv" : string: argv,
"task_count" : int:task_count,
"_links" : {
"workflow" : href:workflow,
"task" | |
<filename>kinto/tests/core/test_storage.py<gh_stars>0
# -*- coding: utf-8 -*-
import time
import mock
import redis
from pyramid import testing
from kinto.core.utils import sqlalchemy
from kinto.core import utils
from kinto.core.storage import (
exceptions, Filter, generators, memory,
redis as redisbackend, postgresql,
Sort, StorageBase, heartbeat
)
from .support import (unittest, ThreadMixin, DummyRequest,
skip_if_travis, skip_if_no_postgresql)
RECORD_ID = '472be9ec-26fe-461b-8282-9c4e4b207ab3'
class GeneratorTest(unittest.TestCase):
def test_generic_has_mandatory_override(self):
self.assertRaises(NotImplementedError, generators.Generator)
def test_id_generator_must_respect_storage_backends(self):
class Dumb(generators.Generator):
def __call__(self):
return '*' * 80
self.assertRaises(AssertionError, Dumb)
def test_default_generator_allow_underscores_dash_alphabet(self):
class Dumb(generators.Generator):
def __call__(self):
return '1234'
generator = Dumb()
self.assertTrue(generator.match('1_2_3-abc'))
self.assertTrue(generator.match('abc_123'))
self.assertFalse(generator.match('-1_2_3-abc'))
self.assertFalse(generator.match('_1_2_3-abc'))
def test_uuid_generator_pattern_allows_uuid_only(self):
invalid_uuid = 'XXX-00000000-0000-5000-a000-000000000000'
generator = generators.UUID4()
self.assertFalse(generator.match(invalid_uuid))
def test_uuid_generator_pattern_is_not_restricted_to_uuid4(self):
generator = generators.UUID4()
self.assertTrue(generator.match(RECORD_ID))
valid_uuid = 'fd800e8d-e8e9-3cac-f502-816cbed9bb6c'
self.assertTrue(generator.match(valid_uuid))
invalid_uuid4 = '00000000-0000-5000-a000-000000000000'
self.assertTrue(generator.match(invalid_uuid4))
invalid_uuid4 = '00000000-0000-4000-e000-000000000000'
self.assertTrue(generator.match(invalid_uuid4))
class StorageBaseTest(unittest.TestCase):
def setUp(self):
self.storage = StorageBase()
def test_mandatory_overrides(self):
calls = [
(self.storage.initialize_schema,),
(self.storage.flush,),
(self.storage.collection_timestamp, '', ''),
(self.storage.create, '', '', {}),
(self.storage.get, '', '', ''),
(self.storage.update, '', '', '', {}),
(self.storage.delete, '', '', ''),
(self.storage.delete_all, '', ''),
(self.storage.purge_deleted, '', ''),
(self.storage.get_all, '', ''),
]
for call in calls:
self.assertRaises(NotImplementedError, *call)
def test_backend_error_message_provides_given_message_if_defined(self):
error = exceptions.BackendError(message="Connection Error")
self.assertEqual(str(error), "Connection Error")
def test_backenderror_message_default_to_original_exception_message(self):
error = exceptions.BackendError(ValueError("Pool Error"))
self.assertEqual(str(error), "ValueError: Pool Error")
class BaseTestStorage(object):
backend = None
settings = {}
def setUp(self):
super(BaseTestStorage, self).setUp()
self.storage = self.backend.load_from_config(self._get_config())
self.storage.initialize_schema()
self.id_field = 'id'
self.modified_field = 'last_modified'
self.client_error_patcher = None
self.record = {'foo': 'bar'}
self.storage_kw = {
'collection_id': 'test',
'parent_id': '1234',
'auth': 'Basic bWF0OjI='
}
self.other_parent_id = '5678'
self.other_auth = 'Basic bWF0OjE='
def _get_config(self, settings=None):
"""Mock Pyramid config object.
"""
if settings is None:
settings = self.settings
config = testing.setUp()
config.add_settings(settings)
return config
def tearDown(self):
mock.patch.stopall()
super(BaseTestStorage, self).tearDown()
self.storage.flush()
def create_record(self, record=None, id_generator=None,
unique_fields=None, **kwargs):
record = record or self.record
kw = self.storage_kw.copy()
kw.update(**kwargs)
return self.storage.create(record=record,
id_generator=id_generator,
unique_fields=unique_fields,
**kw)
def test_raises_backend_error_if_error_occurs_on_client(self):
self.client_error_patcher.start()
self.assertRaises(exceptions.BackendError,
self.storage.get_all,
**self.storage_kw)
def test_backend_error_provides_original_exception(self):
self.client_error_patcher.start()
try:
self.storage.get_all(**self.storage_kw)
except exceptions.BackendError as e:
error = e
self.assertTrue(isinstance(error.original, Exception))
def test_backend_error_is_raised_anywhere(self):
self.client_error_patcher.start()
calls = [
(self.storage.collection_timestamp, {}),
(self.storage.create, dict(record={})),
(self.storage.get, dict(object_id={})),
(self.storage.update, dict(object_id='', record={})),
(self.storage.delete, dict(object_id='')),
(self.storage.delete_all, {}),
(self.storage.purge_deleted, {}),
(self.storage.get_all, {}),
]
for call, kwargs in calls:
kwargs.update(**self.storage_kw)
self.assertRaises(exceptions.BackendError, call, **kwargs)
self.assertRaises(exceptions.BackendError,
self.storage.flush,
auth=self.other_auth)
def test_ping_returns_false_if_unavailable(self):
request = DummyRequest()
request.headers['Authorization'] = self.storage_kw['auth']
request.registry.settings = {'readonly': 'false'}
ping = heartbeat(self.storage)
with mock.patch('kinto.core.storage.random.random', return_value=0.7):
ping(request)
self.client_error_patcher.start()
with mock.patch('kinto.core.storage.random.random', return_value=0.7):
self.assertFalse(ping(request))
with mock.patch('kinto.core.storage.random.random', return_value=0.5):
self.assertFalse(ping(request))
def test_ping_returns_true_when_working(self):
request = DummyRequest()
request.headers['Authorization'] = 'Basic bWF0OjI='
ping = heartbeat(self.storage)
with mock.patch('kinto.core.storage.random.random', return_value=0.7):
self.assertTrue(ping(request))
with mock.patch('kinto.core.storage.random.random', return_value=0.5):
self.assertTrue(ping(request))
def test_ping_returns_true_when_working_in_readonly_mode(self):
request = DummyRequest()
request.headers['Authorization'] = 'Basic bWF0OjI='
request.registry.settings = {'readonly': 'true'}
ping = heartbeat(self.storage)
self.assertTrue(ping(request))
def test_ping_returns_false_if_unavailable_in_readonly_mode(self):
request = DummyRequest()
request.headers['Authorization'] = 'Basic bWF0OjI='
request.registry.settings = {'readonly': 'true'}
ping = heartbeat(self.storage)
with mock.patch.object(self.storage, 'get_all',
side_effect=exceptions.BackendError("Boom!")):
self.assertFalse(ping(request))
def test_ping_logs_error_if_unavailable(self):
request = DummyRequest()
self.client_error_patcher.start()
ping = heartbeat(self.storage)
with mock.patch('kinto.core.storage.logger.exception') as exc_handler:
self.assertFalse(ping(request))
self.assertTrue(exc_handler.called)
def test_create_adds_the_record_id(self):
record = self.create_record()
self.assertIsNotNone(record['id'])
def test_create_works_as_expected(self):
stored = self.create_record()
retrieved = self.storage.get(object_id=stored['id'], **self.storage_kw)
self.assertEquals(retrieved, stored)
def test_create_copies_the_record_before_modifying_it(self):
self.create_record()
self.assertEquals(self.record.get('id'), None)
def test_create_uses_the_resource_id_generator(self):
record = self.create_record(id_generator=lambda: RECORD_ID)
self.assertEquals(record['id'], RECORD_ID)
def test_create_supports_unicode_for_parent_and_id(self):
unicode_id = u'Rémy'
self.create_record(parent_id=unicode_id, collection_id=unicode_id)
def test_create_does_not_overwrite_the_provided_id(self):
record = self.record.copy()
record[self.id_field] = RECORD_ID
stored = self.create_record(record=record)
self.assertEqual(stored[self.id_field], RECORD_ID)
def test_create_raise_unicity_error_if_provided_id_exists(self):
record = self.record.copy()
record[self.id_field] = RECORD_ID
self.create_record(record=record)
record = self.record.copy()
record[self.id_field] = RECORD_ID
self.assertRaises(exceptions.UnicityError,
self.create_record,
record=record)
def test_create_does_generate_a_new_last_modified_field(self):
record = self.record.copy()
self.assertNotIn(self.modified_field, record)
created = self.create_record(record=record)
self.assertIn(self.modified_field, created)
def test_get_raise_on_record_not_found(self):
self.assertRaises(
exceptions.RecordNotFoundError,
self.storage.get,
object_id=RECORD_ID,
**self.storage_kw
)
def test_update_creates_a_new_record_when_needed(self):
self.assertRaises(
exceptions.RecordNotFoundError,
self.storage.get,
object_id=RECORD_ID,
**self.storage_kw
)
record = self.storage.update(object_id=RECORD_ID,
record=self.record,
**self.storage_kw)
retrieved = self.storage.get(object_id=RECORD_ID,
**self.storage_kw)
self.assertEquals(retrieved, record)
def test_update_overwrites_record_id(self):
stored = self.create_record()
record_id = stored[self.id_field]
self.record[self.id_field] = 'this-will-be-ignored'
self.storage.update(object_id=record_id, record=self.record,
**self.storage_kw)
retrieved = self.storage.get(object_id=record_id, **self.storage_kw)
self.assertEquals(retrieved[self.id_field], record_id)
def test_update_generates_a_new_last_modified_field_if_not_present(self):
stored = self.create_record()
record_id = stored[self.id_field]
self.assertNotIn(self.modified_field, self.record)
self.storage.update(object_id=record_id, record=self.record,
**self.storage_kw)
retrieved = self.storage.get(object_id=record_id, **self.storage_kw)
self.assertIn(self.modified_field, retrieved)
self.assertGreater(retrieved[self.modified_field],
stored[self.modified_field])
def test_delete_works_properly(self):
stored = self.create_record()
self.storage.delete(object_id=stored['id'], **self.storage_kw)
self.assertRaises( # Shouldn't exist.
exceptions.RecordNotFoundError,
self.storage.get,
object_id=stored['id'],
**self.storage_kw
)
def test_delete_can_specify_the_last_modified(self):
stored = self.create_record()
last_modified = stored[self.modified_field] + 10
self.storage.delete(
object_id=stored['id'],
last_modified=last_modified,
**self.storage_kw)
records, count = self.storage.get_all(
include_deleted=True, **self.storage_kw)
self.assertEquals(records[0][self.modified_field], last_modified)
def test_delete_raise_when_unknown(self):
self.assertRaises(
exceptions.RecordNotFoundError,
self.storage.delete,
object_id=RECORD_ID,
**self.storage_kw
)
def test_get_all_return_all_values(self):
for x in range(10):
record = dict(self.record)
record["number"] = x
self.create_record(record)
records, total_records = self.storage.get_all(**self.storage_kw)
self.assertEquals(len(records), 10)
self.assertEquals(len(records), total_records)
def test_get_all_handle_limit(self):
for x in range(10):
record = dict(self.record)
record["number"] = x
self.create_record(record)
records, total_records = self.storage.get_all(include_deleted=True,
limit=2,
**self.storage_kw)
self.assertEqual(total_records, 10)
self.assertEqual(len(records), 2)
def test_get_all_handle_sorting_on_id(self):
for x in range(3):
self.create_record()
sorting = [Sort('id', 1)]
records, _ = self.storage.get_all(sorting=sorting,
**self.storage_kw)
self.assertTrue(records[0]['id'] < records[-1]['id'])
def test_get_all_can_filter_with_list_of_values(self):
for l in ['a', 'b', 'c']:
self.create_record({'code': l})
filters = [Filter('code', ['a', 'b'], utils.COMPARISON.IN)]
records, _ = self.storage.get_all(filters=filters,
**self.storage_kw)
self.assertEqual(len(records), 2)
def test_get_all_can_filter_with_numeric_values(self):
for l in [1, 10, 6, 46]:
self.create_record({'code': l})
sorting = [Sort('code', 1)]
filters = [Filter('code', 10, utils.COMPARISON.MAX)]
records, _ = self.storage.get_all(sorting=sorting, filters=filters,
**self.storage_kw)
self.assertEqual(records[0]['code'], 1)
self.assertEqual(records[1]['code'], 6)
self.assertEqual(records[2]['code'], 10)
self.assertEqual(len(records), 3)
def test_get_all_can_filter_with_numeric_strings(self):
for l in ["0566199093", "0781566199"]:
self.create_record({'phone': l})
filters = [Filter('phone', "0566199093", utils.COMPARISON.EQ)]
records, _ = self.storage.get_all(filters=filters,
**self.storage_kw)
self.assertEqual(len(records), 1)
def test_get_all_can_filter_with_float_values(self):
for l in [10, 11.5, 8.5, 6, 7.5]:
self.create_record({'note': l})
filters = [Filter('note', 9.5, utils.COMPARISON.LT)]
records, _ = self.storage.get_all(filters=filters,
**self.storage_kw)
self.assertEqual(len(records), 3)
def test_get_all_can_filter_with_strings(self):
for l in ["Rémy", "Alexis", "Marie"]:
self.create_record({'name': l})
sorting = [Sort('name', 1)]
filters = [Filter('name', "Mathieu", utils.COMPARISON.LT)]
records, _ = self.storage.get_all(sorting=sorting, filters=filters,
**self.storage_kw)
self.assertEqual(records[0]['name'], "Alexis")
self.assertEqual(records[1]['name'], "Marie")
self.assertEqual(len(records), 2)
def test_get_all_can_filter_with_list_of_values_on_id(self):
record1 = self.create_record({'code': 'a'})
record2 = self.create_record({'code': 'b'})
filters = [Filter('id', [record1['id'], record2['id']],
utils.COMPARISON.IN)]
records, _ = self.storage.get_all(filters=filters,
**self.storage_kw)
self.assertEqual(len(records), 2)
def test_get_all_returns_empty_when_including_list_of_empty_values(self):
self.create_record({'code': 'a'})
self.create_record({'code': 'b'})
filters = [Filter('id', [], utils.COMPARISON.IN)]
records, _ = self.storage.get_all(filters=filters, **self.storage_kw)
self.assertEqual(len(records), 0)
def test_get_all_can_filter_with_list_of_excluded_values(self):
for l in ['a', 'b', 'c']:
self.create_record({'code': l})
filters = [Filter('code', ('a', 'b'), utils.COMPARISON.EXCLUDE)]
records, _ = self.storage.get_all(filters=filters,
**self.storage_kw)
self.assertEqual(len(records), 1)
def test_get_all_handle_a_pagination_rules(self):
for x in range(10):
record = dict(self.record)
record["number"] = x % 3
self.create_record(record)
records, total_records = self.storage.get_all(
limit=5,
pagination_rules=[
[Filter('number', 1, utils.COMPARISON.GT)]
], **self.storage_kw)
self.assertEqual(total_records, 10)
self.assertEqual(len(records), 3)
def test_get_all_handle_all_pagination_rules(self):
for x in range(10):
record = dict(self.record)
record["number"] = x % 3
last_record = self.create_record(record)
records, total_records = self.storage.get_all(
limit=5, pagination_rules=[
[Filter('number', 1, utils.COMPARISON.GT)],
[Filter('id', last_record['id'], utils.COMPARISON.EQ)],
], **self.storage_kw)
self.assertEqual(total_records, 10)
self.assertEqual(len(records), 4)
class TimestampsTest(object):
def test_timestamp_are_incremented_on_create(self):
self.create_record() # init
before = self.storage.collection_timestamp(**self.storage_kw)
self.create_record()
after = self.storage.collection_timestamp(**self.storage_kw)
self.assertTrue(before < after)
def test_timestamp_are_incremented_on_update(self):
stored = self.create_record()
_id = stored['id']
before = self.storage.collection_timestamp(**self.storage_kw)
self.storage.update(object_id=_id, record={'bar': 'foo'},
**self.storage_kw)
after = self.storage.collection_timestamp(**self.storage_kw)
self.assertTrue(before < after)
def test_timestamp_are_incremented_on_delete(self):
stored = self.create_record()
_id = stored['id']
before = self.storage.collection_timestamp(**self.storage_kw)
self.storage.delete(object_id=_id, **self.storage_kw)
after = self.storage.collection_timestamp(**self.storage_kw)
self.assertTrue(before < after)
@skip_if_travis
def test_timestamps_are_unique(self):
obtained = []
def create_item():
for i in range(100):
record = self.create_record()
obtained.append((record['last_modified'], record['id']))
thread1 = self._create_thread(target=create_item)
thread2 = self._create_thread(target=create_item)
thread1.start()
thread2.start()
thread1.join()
thread2.join()
# With CPython (GIL), list appending is thread-safe
self.assertEqual(len(obtained), 200)
# No duplicated timestamps
self.assertEqual(len(set(obtained)), len(obtained))
def test_the_timestamp_is_not_updated_when_collection_remains_empty(self):
# Get timestamp once.
first = self.storage.collection_timestamp(**self.storage_kw)
time.sleep(0.002) # wait some time.
# Check that second time returns the same value.
second = self.storage.collection_timestamp(**self.storage_kw)
self.assertEqual(first, second)
def test_the_timestamp_are_based_on_real_time_milliseconds(self):
before = utils.msec_time()
time.sleep(0.002) # 2 msec
record = self.create_record()
now = record['last_modified']
time.sleep(0.002) # 2 msec
after = utils.msec_time()
self.assertTrue(before < now < after,
'%s < %s < %s' % (before, now, after))
def test_timestamp_are_always_incremented_above_existing_value(self):
# Create a record with normal clock
record = self.create_record()
current = record['last_modified']
# Patch the clock to return a time in the past, before the big bang
with mock.patch('kinto.core.utils.msec_time') as time_mocked:
time_mocked.return_value = -1
record = self.create_record()
after = record['last_modified']
# Expect the last one to be based on the highest value
self.assertTrue(0 < current < after,
'0 < %s < %s' % (current, after))
def test_create_uses_specified_last_modified_if_collection_empty(self):
# Collection is empty, create a new record with a specified timestamp.
last_modified = 1448881675541
record = self.record.copy()
record[self.id_field] = RECORD_ID
record[self.modified_field] = last_modified
self.create_record(record=record)
# Check that the record was assigned the specified timestamp.
retrieved = self.storage.get(object_id=RECORD_ID, **self.storage_kw)
self.assertEquals(retrieved[self.modified_field], last_modified)
# Collection timestamp is now the same as its only record.
collection_ts = self.storage.collection_timestamp(**self.storage_kw)
self.assertEquals(collection_ts, last_modified)
def test_create_ignores_specified_last_modified_if_in_the_past(self):
# Create a first record, and get the timestamp.
first_record = self.create_record()
timestamp_before = first_record[self.modified_field]
# Create a new record with its timestamp in the past.
record = self.record.copy()
record[self.id_field] = RECORD_ID
record[self.modified_field] = timestamp_before - 10
self.create_record(record=record)
# Check that record timestamp is the one specified.
retrieved = self.storage.get(object_id=RECORD_ID, **self.storage_kw)
self.assertLess(retrieved[self.modified_field], | |
= self._matchStatus(status)
if not match:
return None
notification = {
'title': status.user.screen_name,
'icon': status.user.profile_image_url,
'uri': ('https://twitter.com/%s/statuses/%d' %
(status.user.screen_name.encode('utf-8'),
status.id)),
}
# Twitter Entities on retweets have incorrect indices. Use the
# retweeted status for rendering the plain text and html.
if getattr(status, 'retweeted_status', None):
notification['subtitle'] = status.retweeted_status.text
else:
notification['subtitle'] = status.text
urls.sort(key=lambda url: url.indices.start, reverse=True)
notification['html'] = notification['subtitle']
for url in urls:
if getattr(url, 'display_url'):
head = notification['subtitle'][0:url.indices.start]
tail = notification['subtitle'][url.indices.end:]
text = u''.join([head, url.display_url, tail])
notification['subtitle'] = text
headHTML = notification['html'][0:url.indices.start]
tailHTML = notification['html'][url.indices.end:]
linkHRef = escapeToXml(url.url, isattrib=1)
linkText = escapeToXml(url.display_url)
link = u"<a href='%s'>%s</a>" % (linkHRef, linkText)
html = u''.join([headHTML, link, tailHTML])
notification['html'] = html
# Prefix the retweeted status explicitly.
if getattr(status, 'retweeted_status', None):
notification['subtitle'] = 'RT @%s: %s' % (
status.retweeted_status.user.screen_name,
notification['subtitle'])
notification['html'] = 'RT @%s: %s' % (
status.retweeted_status.user.screen_name,
notification['html'])
if getattr(status, 'image_url', None):
notification['picture'] = status.image_url
self._addVia(notification)
return notification
def renderTitle(self):
return "%s (%d terms, %d users)" % (self.title,
len(self.terms or []),
len(self.userIDs or []))
class RegDeskSource(PubSubSourceMixin, item.Item):
title = "Registration desk"
feed = attributes.reference()
enabled = attributes.boolean()
via = attributes.text()
subscription = attributes.reference()
event = attributes.reference("""
Reference to the event.
""")
TEXTS_NL = {
'via': 'Registratiebalie',
'regdesk': [u'is binnen',
u'is er nu ook',
u'is net binnengekomen',
u'is gearriveerd'],
}
TEXTS_EN = {
'via': 'Registration Desk',
'regdesk': [u'just arrived',
u'showed up at the entrance',
u'received a badge',
u'has entered the building',
],
}
def format_payload(self, payload):
subtitle = random.choice(self.texts[self.feed.language]['regdesk'])
if payload.person:
return {'title': unicode(payload.person.title),
'subtitle': subtitle,
'icon': unicode(payload.person.image),
}
def renderTitle(self):
return "%s for %s" % (self.title, (self.event and self.event.title) or "?")
def getNode(self):
if self.event is not None:
return (getPubSubService(self.event.uri), unicode('regdesk/by_event/' + getThingID(self.event.uri)))
class RaceSource(PubSubSourceMixin, item.Item):
title = "Race events"
feed = attributes.reference()
enabled = attributes.boolean()
via = attributes.text()
subscription = attributes.reference()
race = attributes.reference("""
Reference to the thing representing the race.
""")
TEXTS_NL = {
'via': 'Alleycat',
'race_finish': u'finishte de %s in %s.',
}
TEXTS_EN = {
'via': 'Alleycat',
'race_finish': u'finished the %s in %s.',
}
def format_payload(self, payload):
subtitle = self.texts[self.feed.language]['race_finish'] % (unicode(payload.event),
unicode(payload.time))
return {'title': unicode(payload.person.title),
'subtitle': subtitle,
'icon': unicode(payload.person.image)}
def renderTitle(self):
return "%s for the race %s" % (self.title, (self.race and self.race.title) or "?")
def getNode(self):
if self.race is not None:
return (getPubSubService(self.race.uri), unicode('race/' + getThingID(self.race.uri)))
class ActivityStreamSourceMixin(PubSubSourceMixin):
"""
Common code for Activity Stream via XMPP Publish-Subscribe sources.
The text labels under the C{'activity_verbs'} key are all templates
where the titles of activity object and target can be used as variables.
A subclass of this mixin will define the list of supported activity streams
verbs it will render into notifications. Notifications with other verbs
are dropped.
Upon receiving a new notification, this list will be processed in order,
checking if the verb is used in the notification. If it is, a matching text
template is looked up in the text labels. If that label is C{None}, the
notification will be dropped. This allows for ignoring certain verbs that
are a subverb of a supported verb. E.g. when the status-update verb is
derived from the post verb, but we don't want to render the status-update
verb at all, we put the status-update verb in C{'supportedVerbs'}, but then
assign C{None} as its text label.
Processing of the list of supported verbs will stop at the first verb that
is found in the notification. Notifications with verbs that derive from
other verbs will have all the superverbs also mentioned in the
notification. Make sure that the list of supported verbs is ordered from
most to least specific, so that the most specific verb for a notification
is found first.
@ivar supportedVerbs: The verbs supported by this instance as a tuple of
verb URIs.
@type supportedVerbs: C{tuple}.
"""
TEXTS_NL = {
'activity_verbs': {
NS_ACTIVITY_SCHEMA + 'post': 'plaatste %(object)s',
NS_ACTIVITY_SCHEMA + 'like': u'is ge\u00efntresseerd in %(object)s',
NS_ACTIVITY_SCHEMA + 'tag': 'wees %(object)s aan in %(target)s',
NS_ACTIVITY_SCHEMA + 'share': 'deelde %(object)s op %(target)s',
NS_ACTIVITY_SCHEMA + 'make-friend': 'werd vrienden met %(object)s',
NS_ACTIVITY_SCHEMA + 'update': 'paste %(object)s aan',
NS_ACTIVITY_SCHEMA + 'rsvp-yes': 'komt naar %(object)s',
NS_ACTIVITY_SCHEMA + 'checkin': 'was bij %(object)s',
NS_ANYMETA_ACTIVITY + 'link-to': 'linkte naar %(object)s vanaf %(target)s',
NS_ANYMETA_ACTIVITY + 'status-update': None,
NS_ANYMETA_ACTIVITY + 'iktag': 'koppelde een ikTag',
NS_ANYMETA_ACTIVITY + 'facebook-connect': 'koppelde aan Facebook',
ACTIVITY_COMMIT: 'committe %(object)s op %(target)s',
}
}
TEXTS_EN = {
'activity_verbs': {
NS_ACTIVITY_SCHEMA + 'post': 'posted %(object)s',
NS_ACTIVITY_SCHEMA + 'like': 'liked %(object)s',
NS_ACTIVITY_SCHEMA + 'tag': 'tagged %(object)s in %(target)s',
NS_ACTIVITY_SCHEMA + 'share': 'shared %(object)s on %(target)s',
NS_ACTIVITY_SCHEMA + 'make-friend': 'friended %(object)s',
NS_ACTIVITY_SCHEMA + 'update': 'updated %(object)s',
NS_ACTIVITY_SCHEMA + 'rsvp-yes': 'will attend %(object)s',
NS_ACTIVITY_SCHEMA + 'checkin': 'was at %(object)s',
NS_ANYMETA_ACTIVITY + 'link-to': 'linked to %(object)s from %(target)s',
NS_ANYMETA_ACTIVITY + 'status-update': None,
NS_ANYMETA_ACTIVITY + 'iktag': 'linked an ikTag',
NS_ANYMETA_ACTIVITY + 'facebook-connect': 'connected to Facebook',
ACTIVITY_COMMIT: 'committed %(object)s on %(target)s',
}
}
supportedVerbs = ()
agentVerbs = frozenset()
def format_payload(self, payload):
"""
Render the payload into a notification.
If available, this uses the anyMeta specific 'figure' links to point to
scaled-and-cropped versions of the image used for the actor (icon) or
the object (picture).
"""
verbs = set([unicode(element)
for element in payload.elements(NS_ACTIVITY_SPEC, 'verb')])
template = None
for verb in self.supportedVerbs:
if verb in verbs:
template = self.texts[self.feed.language]['activity_verbs'][verb]
break
if template is None:
return None
if payload.agent and verb not in self.agentVerbs:
return None
from twisted.words.xish.domish import generateElementsNamed
actorTitle = unicode(generateElementsNamed(payload.author.elements(),
'name').next())
figureURI = None
for element in payload.author.elements(NS_ATOM, 'link'):
if element.getAttribute('rel', 'alternate') == 'figure':
figureURI = element.getAttribute('href')
break
if figureURI:
figureURI += '?width=80&height=80&filter=crop'
pictureURI = None
for element in payload.object.elements(NS_ACTIVITY_SPEC,
'object-type'):
if unicode(element) == TYPE_ATTACHMENT:
for element in payload.object.elements(NS_ATOM, 'link'):
if element.getAttribute('rel', 'alternate') == 'figure':
pictureURI = element.getAttribute('href')
break
if pictureURI:
pictureURI += '?width=480'
vars = {}
if payload.object and payload.object.title:
vars['object'] = unicode(payload.object.title)
if payload.target and payload.target.title:
vars['target'] = unicode(payload.target.title)
subtitle = template % vars
notification = {
'title': actorTitle,
'subtitle': subtitle,
'via': self.getVia()
}
if figureURI:
notification['icon'] = figureURI
if pictureURI:
notification['picture'] = pictureURI
return notification
class ActivityStreamSource(ActivityStreamSourceMixin, item.Item):
"""
Generic anyMeta Activity Streams source.
"""
title = "Activity Stream"
feed = attributes.reference()
enabled = attributes.boolean()
via = attributes.text()
subscription = attributes.reference()
site = attributes.reference("""
Reference to the site representing where activities occur.
""")
actor = attributes.reference("""
Reference to the thing representing the actor of the activities.
""")
supportedVerbs = (
NS_ANYMETA_ACTIVITY + 'status-update',
NS_ACTIVITY_SCHEMA + 'post',
NS_ACTIVITY_SCHEMA + 'like',
NS_ACTIVITY_SCHEMA + 'tag',
NS_ACTIVITY_SCHEMA + 'share',
NS_ACTIVITY_SCHEMA + 'make-friend',
NS_ACTIVITY_SCHEMA + 'update',
#NS_ACTIVITY_SCHEMA + 'rsvp-yes',
#NS_ANYMETA_ACTIVITY + 'link-to',
NS_ANYMETA_ACTIVITY + 'iktag',
NS_ANYMETA_ACTIVITY + 'facebook-connect',
)
agentVerbs = frozenset((
NS_ACTIVITY_SCHEMA + 'like',
NS_ANYMETA_ACTIVITY + 'iktag',
NS_ANYMETA_ACTIVITY + 'facebook-connect',
))
def getNode(self):
if self.site is not None:
return (getPubSubService(self.site.uri), u'activity')
def renderTitle(self):
s = "%s from %s" % (self.title, (self.site and self.site.title) or "?")
return s
def getVia(self):
return self.site.title
class IkCamSource(ActivityStreamSourceMixin, item.Item):
title = "IkCam pictures"
feed = attributes.reference()
enabled = attributes.boolean()
subscription = attributes.reference()
via = attributes.text()
event = attributes.reference("""
Reference to the event the pictures were taken at.
""")
creator = attributes.reference("""
Reference to the creator of the pictures.
""")
ikCamVerb = NS_ANYMETA_ACTIVITY + 'ikcam'
TEXTS_NL = {
'via': 'ikCam',
'ikcam_picture_singular': u'ging op de foto',
'ikcam_picture_plural': u'gingen op de foto',
'ikcam_event': u' bij %s',
}
TEXTS_EN = {
'via': 'ikCam',
'ikcam_picture_singular': u'took a self-portrait',
'ikcam_picture_plural': u'took a group portrait',
'ikcam_event': u' at %s',
}
def format_payload(self, payload):
"""
Render the payload into a notification.
If available, this uses the anyMeta specific 'figure' links to point to
scaled-and-cropped versions of the image used for the actor (icon) or
the object (picture).
"""
texts = self.texts[self.feed.language]
verbs = set([unicode(element)
for element in payload.elements(NS_ACTIVITY_SPEC, 'verb')])
if self.ikCamVerb not in verbs:
return None
# filter out ikcam notifications from other agents
if payload.agent and self.creator and unicode(payload.agent.id) != self.creator.uri:
return None
# filter out ikcam notifications from other events
if payload.target and self.event and unicode(payload.target.id) != self.event.uri:
return None
from twisted.words.xish.domish import generateElementsQNamed
actors = generateElementsQNamed(payload.elements(), 'author', NS_ATOM)
names = reduce(lambda x, y: | |
<filename>zoomrec.py
import csv
import logging
import os
import psutil
import pyautogui
import random
import schedule
import signal
import subprocess
import threading
import time
import atexit
from datetime import datetime, timedelta
global ONGOING_MEETING
global VIDEO_PANEL_HIDED
logging.basicConfig(
format='%(asctime)s %(levelname)s %(message)s', level=logging.INFO)
# Turn DEBUG on:
# - screenshot on error
# - record joining
# - do not exit container on error
DEBUG = True if os.getenv('DEBUG') == 'True' else False
# Disable failsafe
pyautogui.FAILSAFE = False
# Get vars
BASE_PATH = os.getenv('HOME')
CSV_PATH = os.path.join(BASE_PATH, "meetings.csv")
IMG_PATH = os.path.join(BASE_PATH, "img")
REC_PATH = os.path.join(BASE_PATH, "recordings")
DEBUG_PATH = os.path.join(REC_PATH, "screenshots")
NAME_LIST = [
'iPhone',
'iPad',
'Macbook',
'Desktop',
'Huawei',
'Mobile',
'PC',
'Windows',
'Home',
'MyPC',
'Computer',
'Android'
]
TIME_FORMAT = "%Y-%m-%d_%H-%M-%S"
CSV_DELIMITER = ';'
ONGOING_MEETING = False
VIDEO_PANEL_HIDED = False
class BackgroundThread:
def __init__(self, interval=10):
# Sleep interval between
self.interval = interval
thread = threading.Thread(target=self.run, args=())
thread.daemon = True # Daemonize thread
thread.start() # Start the execution
def run(self):
global ONGOING_MEETING
ONGOING_MEETING = True
logging.debug("Check continuously if meeting has ended..")
while ONGOING_MEETING:
# Check if recording
if (pyautogui.locateCenterOnScreen(os.path.join(IMG_PATH, 'warn_meeting_recording.png'), confidence=0.9,
minSearchTime=2) is not None):
logging.info("This meeting is being recorded..")
try:
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'accept_recording.png'), confidence=0.9)
pyautogui.click(x, y)
logging.info("Accepted recording..")
except TypeError:
logging.error("Could not accept recording!")
# Check if ended
if (pyautogui.locateOnScreen(os.path.join(IMG_PATH, 'meeting_ended_by_host_1.png'),
confidence=0.9) is not None or pyautogui.locateOnScreen(
os.path.join(IMG_PATH, 'meeting_ended_by_host_2.png'), confidence=0.9) is not None):
ONGOING_MEETING = False
logging.info("Meeting ended by host..")
time.sleep(self.interval)
class HideViewOptionsThread:
def __init__(self, interval=10):
# Sleep interval between
self.interval = interval
thread = threading.Thread(target=self.run, args=())
thread.daemon = True # Daemonize thread
thread.start() # Start the execution
def run(self):
global VIDEO_PANEL_HIDED
logging.debug("Check continuously if screensharing is active..")
while ONGOING_MEETING:
# Check if host is sharing poll results
if (pyautogui.locateCenterOnScreen(os.path.join(IMG_PATH, 'host_is_sharing_poll_results.png'),
confidence=0.9,
minSearchTime=2) is not None):
logging.info("Host is sharing poll results..")
try:
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'host_is_sharing_poll_results.png'), confidence=0.9)
pyautogui.click(x, y)
try:
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'exit.png'), confidence=0.9)
pyautogui.click(x, y)
logging.info("Closed poll results window..")
except TypeError:
logging.error("Could not exit poll results window!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_close_poll_results_error.png")
except TypeError:
logging.error("Could not find poll results window anymore!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_find_poll_results_error.png")
# Check if view options available
if pyautogui.locateOnScreen(os.path.join(IMG_PATH, 'view_options.png'), confidence=0.9) is not None:
if not VIDEO_PANEL_HIDED:
logging.info("Screensharing active..")
try:
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'view_options.png'), confidence=0.9)
pyautogui.click(x, y)
time.sleep(1)
# Hide video panel
if pyautogui.locateOnScreen(os.path.join(IMG_PATH, 'show_video_panel.png'),
confidence=0.9) is not None:
# Leave 'Show video panel' and move mouse from screen
pyautogui.moveTo(0, 0)
pyautogui.click(0, 0)
VIDEO_PANEL_HIDED = True
else:
try:
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'hide_video_panel.png'), confidence=0.9)
pyautogui.click(x, y)
# Move mouse from screen
pyautogui.moveTo(0, 0)
VIDEO_PANEL_HIDED = True
except TypeError:
logging.error("Could not hide video panel!")
except TypeError:
logging.error("Could not find view options!")
else:
VIDEO_PANEL_HIDED = False
time.sleep(self.interval)
def check_connecting(zoom_pid, start_date, duration):
# Check if connecting
check_periods = 0
connecting = False
# Check if connecting
if pyautogui.locateCenterOnScreen(os.path.join(IMG_PATH, 'connecting.png'), confidence=0.9) is not None:
connecting = True
logging.info("Connecting..")
# Wait while connecting
# Exit when meeting ends after time
while connecting:
if (datetime.now() - start_date).total_seconds() > duration:
logging.info("Meeting ended after time!")
logging.info("Exit Zoom!")
os.killpg(os.getpgid(zoom_pid), signal.SIGQUIT)
return
if pyautogui.locateCenterOnScreen(os.path.join(IMG_PATH, 'connecting.png'), confidence=0.9) is None:
logging.info("Maybe not connecting anymore..")
check_periods += 1
if check_periods >= 2:
connecting = False
logging.info("Not connecting anymore..")
return
time.sleep(2)
def join_meeting(meet_id):
logging.info("Join a meeting..")
found_join_meeting = False
try:
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'join_meeting.png'), minSearchTime=2, confidence=0.9)
pyautogui.click(x, y)
found_join_meeting = True
except TypeError:
pass
if not found_join_meeting:
logging.error("Could not find 'Join Meeting' on screen!")
return False
time.sleep(2)
# Insert meeting id
pyautogui.press('tab')
pyautogui.press('tab')
pyautogui.write(meet_id, interval=0.1)
# Insert name
pyautogui.press('tab')
pyautogui.press('tab')
pyautogui.hotkey('ctrl', 'a')
pyautogui.write(random.choice(NAME_LIST), interval=0.1)
# Configure
pyautogui.press('tab')
pyautogui.press('space')
pyautogui.press('tab')
pyautogui.press('tab')
pyautogui.press('space')
pyautogui.press('tab')
pyautogui.press('tab')
pyautogui.press('space')
time.sleep(2)
# Sometimes invalid id error is displayed
if pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'invalid_meeting_id.png'), confidence=0.9) is not None:
logging.error("Maybe a invalid meeting id was inserted..")
left = False
try:
x, y = pyautogui.locateCenterOnScreen(
os.path.join(IMG_PATH, 'leave.png'), confidence=0.9)
pyautogui.click(x, y)
left = True
except TypeError:
pass
# Valid id
if left:
if pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'join_meeting.png'), confidence=0.9) is not None:
logging.error("Invalid meeting id!")
return False
else:
return True
if pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'authorized_attendees_only.png'), confidence=0.9) is not None:
logging.error("This meeting is for authorized attendees only!")
return False
return True
def find_process_id_by_name(process_name):
list_of_process_objects = []
# Iterate over the all the running process
for proc in psutil.process_iter():
try:
pinfo = proc.as_dict(attrs=['pid', 'name'])
# Check if process name contains the given name string.
if process_name.lower() in pinfo['name'].lower():
list_of_process_objects.append(pinfo)
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
pass
return list_of_process_objects
def show_toolbars():
# Mouse move to show toolbar
width, height = pyautogui.size()
y = (height / 2)
pyautogui.moveTo(0, y, duration=0.5)
pyautogui.moveTo(width - 1, y, duration=0.5)
def join_audio(description):
audio_joined = False
try:
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'join_with_computer_audio.png'), confidence=0.9)
logging.info("Join with computer audio..")
pyautogui.click(x, y)
audio_joined = True
return True
except TypeError:
logging.error("Could not join with computer audio!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_join_with_computer_audio_error.png")
time.sleep(1)
if not audio_joined:
try:
show_toolbars()
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'join_audio.png'), confidence=0.9)
pyautogui.click(x, y)
join_audio(description)
except TypeError:
logging.error("Could not join audio!")
if DEBUG:
pyautogui.screenshot(os.path.join(DEBUG_PATH, time.strftime(
TIME_FORMAT) + "-" + description) + "_join_audio_error.png")
return False
def join(meet_id, meet_pw, duration, description):
global VIDEO_PANEL_HIDED
ffmpeg_debug = None
logging.info("Join meeting: " + description)
if DEBUG:
# Start recording
width, height = pyautogui.size()
resolution = str(width) + 'x' + str(height)
disp = os.getenv('DISPLAY')
logging.info("Start recording..")
filename = os.path.join(
REC_PATH, time.strftime(TIME_FORMAT)) + "-" + description + "-JOIN.mkv"
command = "ffmpeg -nostats -loglevel quiet -f pulse -ac 2 -i 1 -f x11grab -r 30 -s " + resolution + " -i " + \
disp + " -acodec pcm_s16le -vcodec libx264rgb -preset ultrafast -crf 0 -threads 0 -async 1 -vsync 1 " + filename
ffmpeg_debug = subprocess.Popen(
command, stdout=subprocess.PIPE, shell=True, preexec_fn=os.setsid)
atexit.register(os.killpg, os.getpgid(
ffmpeg_debug.pid), signal.SIGQUIT)
# Exit Zoom if running
exit_process_by_name("zoom")
# Start Zoom
zoom = subprocess.Popen("zoom", stdout=subprocess.PIPE,
shell=True, preexec_fn=os.setsid)
# Wait while zoom process is there
list_of_process_ids = find_process_id_by_name('zoom')
while len(list_of_process_ids) <= 0:
logging.info("No Running Zoom Process found!")
list_of_process_ids = find_process_id_by_name('zoom')
time.sleep(1)
# Wait for zoom is started
while pyautogui.locateCenterOnScreen(os.path.join(IMG_PATH, 'join_meeting.png'), confidence=0.9) is None:
logging.info("Zoom not ready yet!")
time.sleep(1)
logging.info("Zoom started!")
start_date = datetime.now()
joined = join_meeting(meet_id)
if not joined:
logging.error("Failed to join meeting!")
os.killpg(os.getpgid(zoom.pid), signal.SIGQUIT)
if DEBUG and ffmpeg_debug is not None:
# closing ffmpeg
os.killpg(os.getpgid(ffmpeg_debug.pid), signal.SIGQUIT)
atexit.unregister(os.killpg)
return
# Check if connecting
check_connecting(zoom.pid, start_date, duration)
pyautogui.write(meet_pw, interval=0.2)
pyautogui.press('tab')
pyautogui.press('space')
# Joined meeting
# Check if connecting
check_connecting(zoom.pid, start_date, duration)
# Check if meeting is started by host
check_periods = 0
meeting_started = True
time.sleep(2)
# Check if waiting for host
if pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'wait_for_host.png'), confidence=0.9, minSearchTime=3) is not None:
meeting_started = False
logging.info("Please wait for the host to start this meeting.")
# Wait for the host to start this meeting
# Exit when meeting ends after time
while not meeting_started:
if (datetime.now() - start_date).total_seconds() > duration:
logging.info("Meeting ended after time!")
logging.info("Exit Zoom!")
os.killpg(os.getpgid(zoom.pid), signal.SIGQUIT)
if DEBUG:
os.killpg(os.getpgid(ffmpeg_debug.pid), signal.SIGQUIT)
atexit.unregister(os.killpg)
return
if pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'wait_for_host.png'), confidence=0.9) is None:
logging.info("Maybe meeting was started now.")
check_periods += 1
if check_periods >= 2:
meeting_started = True
logging.info("Meeting started by host.")
break
time.sleep(2)
# Check if connecting
check_connecting(zoom.pid, start_date, duration)
# Check if in waiting room
check_periods = 0
in_waitingroom = False
time.sleep(2)
# Check if joined into waiting room
if pyautogui.locateCenterOnScreen(os.path.join(IMG_PATH, 'waiting_room.png'), confidence=0.9,
minSearchTime=3) is not None:
in_waitingroom = True
logging.info("Please wait, the meeting host will let you in soon..")
# Wait while host will let you in
# Exit when meeting ends after time
while in_waitingroom:
if (datetime.now() - start_date).total_seconds() > duration:
logging.info("Meeting ended after time!")
logging.info("Exit Zoom!")
os.killpg(os.getpgid(zoom.pid), signal.SIGQUIT)
if DEBUG:
os.killpg(os.getpgid(ffmpeg_debug.pid), signal.SIGQUIT)
atexit.unregister(os.killpg)
return
if pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'waiting_room.png'), confidence=0.9) is None:
logging.info("Maybe no longer in the waiting room..")
check_periods += 1
if check_periods == 2:
logging.info("No longer in the waiting room..")
break
time.sleep(2)
# Meeting joined
# Check if connecting
check_connecting(zoom.pid, start_date, duration)
logging.info("Joined meeting..")
# Check if recording warning is shown at the beginning
if (pyautogui.locateCenterOnScreen(os.path.join(IMG_PATH, 'warn_meeting_recording.png'), confidence=0.9,
minSearchTime=2) is not None):
logging.info("This meeting is being recorded..")
try:
x, y = pyautogui.locateCenterOnScreen(os.path.join(
IMG_PATH, 'accept_recording.png'), confidence=0.9)
pyautogui.click(x, y)
logging.info("Accepted recording..")
except TypeError:
logging.error("Could not accept recording!")
# Check if host is sharing poll results at the beginning
| |
JOIN (
SELECT
"Animal_2".uuid AS uuid,
array_agg("Location_1".name) AS fold_output_name
FROM schema_1."Animal" AS "Animal_2"
JOIN schema_1."Location" AS "Location_1" ON "Animal_2".lives_in = "Location_1".uuid
GROUP BY
"Animal_2".uuid
) AS folded_subquery_1
ON "Animal_1".uuid = folded_subquery_1.uuid
"""
expected_mssql = """
SELECT
[Animal_1].name AS animal_name,
folded_subquery_1.fold_output_name AS homes_list
FROM
db_1.schema_1.[Animal] AS [Animal_1]
JOIN(
SELECT
[Animal_2].uuid AS uuid,
coalesce((
SELECT
'|' + coalesce(
REPLACE(
REPLACE(
REPLACE([Location_1].name, '^', '^e'),
'~',
'^n'),
'|',
'^d'),
'~')
FROM
db_1.schema_1.[Location] AS [Location_1]
WHERE
[Animal_2].lives_in = [Location_1].uuid FOR XML PATH('')
), '') AS fold_output_name
FROM
db_1.schema_1.[Animal] AS [Animal_2]
) AS folded_subquery_1 ON [Animal_1].uuid = folded_subquery_1.uuid
"""
expected_match = SKIP_TEST
expected_gremlin = SKIP_TEST
expected_cypher = SKIP_TEST
check_test_data(
self,
test_data,
expected_match,
expected_gremlin,
expected_mssql,
expected_cypher,
expected_postgresql,
)
def test_fold_after_recurse(self) -> None:
# This is a regression test, checking that:
# - the fold subquery picks the right column of the recursive cte to join to
# - the recursive CTE exposes the columns needed to perform the folded traverse
#
# Testing in any of the SQL backends is sufficient.
test_data = test_input_data.fold_after_recurse()
expected_postgresql = SKIP_TEST
expected_mssql = """
WITH anon_1(lives_in, parent, uuid, __cte_key, __cte_depth) AS (
SELECT
[Animal_2].lives_in AS lives_in,
[Animal_2].parent AS parent,
[Animal_2].uuid AS uuid,
[Animal_2].uuid AS __cte_key,
0 AS __cte_depth
FROM
db_1.schema_1.[Animal] AS [Animal_2]
UNION ALL
SELECT
[Animal_3].lives_in AS lives_in,
[Animal_3].parent AS parent,
[Animal_3].uuid AS uuid,
anon_1.__cte_key AS __cte_key,
anon_1.__cte_depth + 1 AS __cte_depth
FROM
anon_1
JOIN db_1.schema_1.[Animal] AS [Animal_3] ON
anon_1.uuid = [Animal_3].parent
WHERE
anon_1.__cte_depth < 3)
SELECT
[Animal_1].name AS animal_name,
folded_subquery_1.fold_output_name AS homes_list
FROM
db_1.schema_1.[Animal] AS [Animal_1]
JOIN anon_1
ON [Animal_1].uuid = anon_1.__cte_key
JOIN (
SELECT
anon_2.uuid AS uuid,
coalesce((
SELECT
'|' + coalesce(
REPLACE(
REPLACE(
REPLACE([Location_1].name, '^', '^e'),
'~',
'^n'),
'|',
'^d'),
'~')
FROM
db_1.schema_1.[Location] AS [Location_1]
WHERE
anon_2.lives_in = [Location_1].uuid FOR XML PATH ('')),
'') AS fold_output_name
FROM anon_1 AS anon_2
) AS folded_subquery_1
ON anon_1.uuid = folded_subquery_1.uuid
"""
expected_match = SKIP_TEST
expected_gremlin = SKIP_TEST
expected_cypher = SKIP_TEST
check_test_data(
self,
test_data,
expected_match,
expected_gremlin,
expected_mssql,
expected_cypher,
expected_postgresql,
)
def test_fold_on_two_output_variables(self) -> None:
test_data = test_input_data.fold_on_two_output_variables()
expected_postgresql = """
SELECT
"Animal_1".name AS animal_name,
coalesce(folded_subquery_1.fold_output_color, ARRAY[]::VARCHAR[]) AS child_color_list,
coalesce(
folded_subquery_1.fold_output_name,
ARRAY [] :: VARCHAR []
) AS child_names_list
FROM
schema_1."Animal" AS "Animal_1"
JOIN (
SELECT
"Animal_2".uuid AS uuid,
array_agg("Animal_3".name) AS fold_output_name,
array_agg("Animal_3".color) AS fold_output_color
FROM schema_1."Animal" AS "Animal_2"
JOIN schema_1."Animal" AS "Animal_3" ON "Animal_2".uuid = "Animal_3".parent
GROUP BY
"Animal_2".uuid
) AS folded_subquery_1
ON "Animal_1".uuid = folded_subquery_1.uuid
"""
expected_mssql = NotImplementedError
expected_match = SKIP_TEST
expected_gremlin = SKIP_TEST
expected_cypher = SKIP_TEST
check_test_data(
self,
test_data,
expected_match,
expected_gremlin,
expected_mssql,
expected_cypher,
expected_postgresql,
)
def test_fold_same_edge_type_in_different_locations(self) -> None:
test_data = test_input_data.fold_same_edge_type_in_different_locations()
expected_postgresql = """
SELECT
"Animal_1".name AS animal_name,
coalesce(folded_subquery_1.fold_output_name, ARRAY[]::VARCHAR[])
AS child_names_list,
coalesce(folded_subquery_2.fold_output_name, ARRAY[]::VARCHAR[])
AS sibling_and_self_names_list
FROM
schema_1."Animal" AS "Animal_1"
JOIN (
SELECT
"Animal_2".uuid AS uuid,
array_agg("Animal_3".name) AS fold_output_name
FROM
schema_1."Animal" AS "Animal_2"
JOIN schema_1."Animal" AS "Animal_3" ON "Animal_2".uuid = "Animal_3".parent
GROUP BY
"Animal_2".uuid
) AS folded_subquery_1 ON "Animal_1".uuid = folded_subquery_1.uuid
JOIN schema_1."Animal" AS "Animal_4" ON "Animal_1".parent = "Animal_4".uuid
JOIN (
SELECT
"Animal_5".uuid AS uuid,
array_agg("Animal_6".name) AS fold_output_name
FROM
schema_1."Animal" AS "Animal_5"
JOIN schema_1."Animal" AS "Animal_6" ON "Animal_5".uuid = "Animal_6".parent
GROUP BY
"Animal_5".uuid
) AS folded_subquery_2 ON "Animal_4".uuid = folded_subquery_2.uuid
"""
expected_mssql = """
SELECT
[Animal_1].name AS animal_name,
folded_subquery_1.fold_output_name AS child_names_list,
folded_subquery_2.fold_output_name AS sibling_and_self_names_list
FROM
db_1.schema_1.[Animal] AS [Animal_1]
JOIN(
SELECT
[Animal_2].uuid AS uuid,
coalesce((
SELECT
'|' + coalesce(
REPLACE(
REPLACE(
REPLACE([Animal_3].name, '^', '^e'),
'~',
'^n'),
'|',
'^d'),
'~')
FROM
db_1.schema_1.[Animal] AS [Animal_3]
WHERE
[Animal_2].uuid = [Animal_3].parent FOR XML PATH('')),
'') AS fold_output_name
FROM
db_1.schema_1.[Animal] AS [Animal_2]
) AS folded_subquery_1 ON [Animal_1].uuid = folded_subquery_1.uuid
JOIN db_1.schema_1.[Animal] AS [Animal_4] ON [Animal_1].parent = [Animal_4].uuid
JOIN(
SELECT
[Animal_5].uuid AS uuid,
coalesce((
SELECT
'|' + coalesce(
REPLACE(
REPLACE(
REPLACE([Animal_6].name, '^', '^e'),
'~',
'^n'),
'|',
'^d'),
'~')
FROM
db_1.schema_1.[Animal] AS [Animal_6]
WHERE
[Animal_5].uuid = [Animal_6].parent FOR XML PATH('')),
'') AS fold_output_name
FROM
db_1.schema_1.[Animal] AS [Animal_5]
) AS folded_subquery_2 ON [Animal_4].uuid = folded_subquery_2.uuid
"""
expected_match = SKIP_TEST
expected_gremlin = SKIP_TEST
expected_cypher = SKIP_TEST
check_test_data(
self,
test_data,
expected_match,
expected_gremlin,
expected_mssql,
expected_cypher,
expected_postgresql,
)
def test_fold_after_traverse(self) -> None:
test_data = test_input_data.fold_after_traverse()
expected_match = """
SELECT
Animal___1.name AS `animal_name`,
$Animal__in_Animal_ParentOf___1___out_Animal_ParentOf.name
AS `sibling_and_self_names_list`
FROM (
MATCH {{
class: Animal,
as: Animal___1
}}.in('Animal_ParentOf') {{
class: Animal,
as: Animal__in_Animal_ParentOf___1
}}
RETURN $matches
) LET
$Animal__in_Animal_ParentOf___1___out_Animal_ParentOf =
Animal__in_Animal_ParentOf___1.out("Animal_ParentOf").asList()
"""
expected_gremlin = """
g.V('@class', 'Animal')
.as('Animal___1')
.in('Animal_ParentOf')
.as('Animal__in_Animal_ParentOf___1')
.back('Animal___1')
.transform{it, m -> new com.orientechnologies.orient.core.record.impl.ODocument([
animal_name: m.Animal___1.name,
sibling_and_self_names_list: (
(m.Animal__in_Animal_ParentOf___1.out_Animal_ParentOf == null) ? [] : (
m.Animal__in_Animal_ParentOf___1.out_Animal_ParentOf.collect{
entry -> entry.inV.next().name
}
)
)
])}
"""
expected_postgresql = """
SELECT
"Animal_1".name AS animal_name,
coalesce(folded_subquery_1.fold_output_name, ARRAY[]::VARCHAR[])
AS sibling_and_self_names_list
FROM
schema_1."Animal" AS "Animal_1"
JOIN schema_1."Animal" AS "Animal_2"
ON "Animal_1".parent = "Animal_2".uuid
JOIN(
SELECT
"Animal_3".uuid AS uuid,
array_agg("Animal_4".name) AS fold_output_name
FROM schema_1."Animal" AS "Animal_3"
JOIN schema_1."Animal" AS "Animal_4"
ON "Animal_3".uuid = "Animal_4".parent
GROUP BY "Animal_3".uuid
) AS folded_subquery_1
ON "Animal_2".uuid = folded_subquery_1.uuid
"""
expected_cypher = """
MATCH (Animal___1:Animal)
MATCH (Animal___1)<-[:Animal_ParentOf]-(Animal__in_Animal_ParentOf___1:Animal)
OPTIONAL MATCH
(Animal__in_Animal_ParentOf___1)-[:Animal_ParentOf]->
(Animal__in_Animal_ParentOf__out_Animal_ParentOf___1:Animal)
WITH
Animal___1 AS Animal___1,
Animal__in_Animal_ParentOf___1 AS Animal__in_Animal_ParentOf___1,
collect(Animal__in_Animal_ParentOf__out_Animal_ParentOf___1) AS
collected_Animal__in_Animal_ParentOf__out_Animal_ParentOf___1
RETURN
Animal___1.name AS `animal_name`,
[x IN collected_Animal__in_Animal_ParentOf__out_Animal_ParentOf___1 | x.name] AS
`sibling_and_self_names_list`
"""
expected_mssql = """
SELECT
[Animal_1].name AS animal_name,
folded_subquery_1.fold_output_name AS sibling_and_self_names_list
FROM
db_1.schema_1.[Animal] AS [Animal_1]
JOIN db_1.schema_1.[Animal] AS [Animal_2] ON [Animal_1].parent = [Animal_2].uuid
JOIN(
SELECT
[Animal_3].uuid AS uuid,
coalesce((
SELECT
'|' + coalesce(
REPLACE(
REPLACE(
REPLACE([Animal_4].name, '^', '^e'),
'~',
'^n'),
'|',
'^d'),
'~')
FROM
db_1.schema_1.[Animal] AS [Animal_4]
WHERE
[Animal_3].uuid = [Animal_4].parent FOR XML PATH('')),
'') AS fold_output_name
FROM
db_1.schema_1.[Animal] AS [Animal_3]
) AS folded_subquery_1 ON [Animal_2].uuid = folded_subquery_1.uuid
"""
check_test_data(
self,
test_data,
expected_match,
expected_gremlin,
expected_mssql,
expected_cypher,
expected_postgresql,
)
def test_fold_after_traverse_different_types(self) -> None:
test_data = test_input_data.fold_after_traverse_different_types()
expected_mssql = """
SELECT
[Animal_1].name AS animal_name,
folded_subquery_1.fold_output_name AS neighbor_and_self_names_list
FROM
db_1.schema_1.[Animal] AS [Animal_1]
JOIN db_1.schema_1.[Location] AS [Location_1] ON [Animal_1].lives_in = [Location_1].uuid
JOIN(
SELECT
[Location_2].uuid AS uuid,
coalesce((
SELECT
'|' + coalesce(
REPLACE(
REPLACE(
REPLACE([Animal_2].name, '^', '^e'),
'~',
'^n'),
'|',
'^d'),
'~')
FROM
db_1.schema_1.[Animal] AS [Animal_2]
WHERE
[Location_2].uuid = [Animal_2].lives_in FOR XML PATH('')),
'') AS fold_output_name
FROM
db_1.schema_1.[Location] AS [Location_2]
) AS folded_subquery_1 ON [Location_1].uuid = folded_subquery_1.uuid
"""
expected_postgresql = """
SELECT
"Animal_1".name AS animal_name,
coalesce(folded_subquery_1.fold_output_name, ARRAY[]::VARCHAR[])
AS neighbor_and_self_names_list
FROM schema_1."Animal" AS "Animal_1"
JOIN schema_1."Location" AS "Location_1"
ON "Animal_1".lives_in = "Location_1".uuid
JOIN (
SELECT
"Location_2".uuid AS uuid,
array_agg("Animal_2".name) AS fold_output_name
FROM schema_1."Location" AS "Location_2"
JOIN schema_1."Animal" AS "Animal_2"
ON "Location_2".uuid = "Animal_2".lives_in
GROUP BY "Location_2".uuid
) AS folded_subquery_1
ON "Location_1".uuid = folded_subquery_1.uuid
"""
expected_match = SKIP_TEST
expected_gremlin = SKIP_TEST
expected_cypher = SKIP_TEST
check_test_data(
self,
test_data,
expected_match,
expected_gremlin,
expected_mssql,
expected_cypher,
expected_postgresql,
)
def test_fold_after_traverse_no_output_on_root(self) -> None:
test_data = test_input_data.fold_after_traverse_no_output_on_root()
expected_postgresql = """
SELECT
"Location_1".name AS location_name,
coalesce(folded_subquery_1.fold_output_name, ARRAY[]::VARCHAR[])
AS neighbor_and_self_names_list
FROM schema_1."Animal" AS "Animal_1"
JOIN schema_1."Location" AS "Location_1"
ON "Animal_1".lives_in = "Location_1".uuid
JOIN (
SELECT
"Location_2".uuid AS uuid,
array_agg("Animal_2".name) AS fold_output_name
FROM schema_1."Location" AS "Location_2"
JOIN schema_1."Animal" AS "Animal_2"
ON "Location_2".uuid = "Animal_2".lives_in
GROUP BY "Location_2".uuid
) AS folded_subquery_1
ON "Location_1".uuid = folded_subquery_1.uuid
"""
expected_mssql = """
SELECT
[Location_1].name AS location_name,
folded_subquery_1.fold_output_name AS neighbor_and_self_names_list
FROM
db_1.schema_1.[Animal] AS [Animal_1]
JOIN db_1.schema_1.[Location] AS [Location_1] ON [Animal_1].lives_in = [Location_1].uuid
JOIN(
SELECT
[Location_2].uuid AS uuid,
coalesce((
SELECT
'|' + coalesce(
REPLACE(
REPLACE(
REPLACE([Animal_2].name, '^', '^e'),
'~',
'^n'),
'|',
'^d'),
'~')
FROM
db_1.schema_1.[Animal] AS [Animal_2]
WHERE
[Location_2].uuid = [Animal_2].lives_in FOR XML PATH('')),
'') AS fold_output_name
FROM
db_1.schema_1.[Location] AS [Location_2]
) AS folded_subquery_1 ON [Location_1].uuid = folded_subquery_1.uuid
"""
expected_match = SKIP_TEST
expected_gremlin = SKIP_TEST
expected_cypher = SKIP_TEST
check_test_data(
self,
test_data,
expected_match,
expected_gremlin,
expected_mssql,
expected_cypher,
expected_postgresql,
)
def test_fold_and_traverse(self) -> None:
test_data = test_input_data.fold_and_traverse()
expected_match = """
SELECT
Animal___1.name AS `animal_name`,
$Animal___1___in_Animal_ParentOf.name
AS `sibling_and_self_names_list`
FROM (
MATCH {{
class: Animal,
as: Animal___1
}}
RETURN $matches
) LET
$Animal___1___in_Animal_ParentOf =
Animal___1.in("Animal_ParentOf").out("Animal_ParentOf").asList()
"""
expected_gremlin = """
g.V('@class', 'Animal')
.as('Animal___1')
.transform{it, m -> new com.orientechnologies.orient.core.record.impl.ODocument([
animal_name: m.Animal___1.name,
sibling_and_self_names_list: (
(m.Animal___1.in_Animal_ParentOf == null) ? [] : (
m.Animal___1.in_Animal_ParentOf
.collect{
entry -> entry.outV.next()
}
.collectMany{
entry -> entry.out_Animal_ParentOf
.collect{
edge -> edge.inV.next()
}
}
.collect{entry -> entry.name}
))
])}
"""
expected_mssql = """
SELECT
[Animal_1].name AS animal_name,
folded_subquery_1.fold_output_name AS sibling_and_self_names_list
FROM db_1.schema_1.[Animal] AS [Animal_1]
JOIN (
SELECT
[Animal_2].uuid AS uuid,
coalesce((
SELECT
'|' + coalesce(
REPLACE(
REPLACE(
REPLACE([Animal_3].name, '^', '^e'),
'~', | |
import numpy as np
import os
import tensorflow as tf
import tensorflow_addons as tfa
from sklearn.metrics import precision_recall_fscore_support, classification_report, confusion_matrix
from .data_utils import minibatches, pad_sequences, get_chunks, PAD
from .general_utils import Progbar
from .base_model import BaseModel
tf.compat.v1.disable_eager_execution()
class HANNModel(BaseModel):
"""Specialized class of Model for NER"""
def __init__(self, config):
super(HANNModel, self).__init__(config)
self.idx_to_tag = {idx: tag for tag, idx in
list(self.config.vocab_tags.items())}
self.initializer = tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform")
self.regularizer = tf.keras.regularizers.l2(l=0.5 * (self.config.l2_reg_lambda))
self.config = config
def add_placeholders(self):
"""Define placeholders = entries to computational graph"""
# shape = (batch size)
self.document_lengths = tf.compat.v1.placeholder(tf.int32, shape=[None],
name="document_lengths")
# shape = (batch size, max length of documents in batch (how many sentences in one abstract), max length of sentence in batch)
self.word_ids = tf.compat.v1.placeholder(tf.int32, shape=[None, None, None],
name="word_ids")
# shape = (batch_size, max_length of sentence)
self.sentence_lengths = tf.compat.v1.placeholder(tf.int32, shape=[None, None],
name="word_lengths")
# shape = (batch size, max length of documents, max length of sentence, max length of word)
self.char_ids = tf.compat.v1.placeholder(tf.int32, shape=[None, None, None, None],
name="char_ids")
# shape = (batch_size, max_length of sentence)
self.word_lengths = tf.compat.v1.placeholder(tf.int32, shape=[None, None, None],
name="word_lengths")
# shape = (batch size, max length of sentence in batch)
self.labels = tf.compat.v1.placeholder(tf.int32, shape=[None, None],
name="labels")
# hyper parameters
self.dropout = tf.compat.v1.placeholder(dtype=tf.float32, shape=[],
name="dropout")
self.lr = tf.compat.v1.placeholder(dtype=tf.float32, shape=[],
name="lr")
def get_feed_dict(self, words, labels=None, lr=None, dropout=None, pad_tok=0):
"""Given some data, pad it and build a feed dictionary
Args:
words: list of sentences. A sentence is a list of ids of a list of
words. A word is a list of ids
labels: list of ids
lr: (float) learning rate
dropout: (float) keep prob
Returns:
dict {placeholder: value}
"""
# perform padding of the given data
if self.config.use_chars:
char_ids, word_ids = [], []
for abstract in words:
char_ids_abstract, word_ids_abstract = [], []
for sent in abstract:
char_id, word_id = list(zip(*sent))
char_ids_abstract += [list(char_id)]
word_ids_abstract += [list(word_id)]
char_ids += [char_ids_abstract]
word_ids += [word_ids_abstract]
_, document_lengths = pad_sequences(word_ids, pad_tok=pad_tok, nlevels=1)
word_ids, sentence_lengths = pad_sequences(word_ids, pad_tok=pad_tok, nlevels=2)
char_ids, word_lengths = pad_sequences(char_ids, pad_tok=pad_tok, nlevels=3)
else:
_, document_lengths = pad_sequences(words, pad_tok=pad_tok, nlevels=1)
word_ids, sentence_lengths = pad_sequences(words, pad_tok=pad_tok, nlevels=2)
# build feed dictionary
feed = {
self.word_ids: word_ids,
self.document_lengths: document_lengths,
self.sentence_lengths: sentence_lengths
}
if self.config.use_chars:
feed[self.char_ids] = char_ids
feed[self.word_lengths] = word_lengths
if labels is not None:
labels, _ = pad_sequences(labels, 0, nlevels=1)
feed[self.labels] = labels
if lr is not None:
feed[self.lr] = lr
if dropout is not None:
feed[self.dropout] = dropout
return feed, document_lengths
def add_word_embeddings_op(self, word_ids, word_lengths, char_ids, dropout):
"""Defines self.word_embeddings
If self.config.embeddings is not None and is a np array initialized
with pre-trained word vectors, the word embeddings is just a look-up
and we don't train the vectors. Otherwise, a random matrix with
the correct shape is initialized.
"""
with tf.compat.v1.variable_scope("words", reuse=tf.compat.v1.AUTO_REUSE):
if self.config.embeddings is None:
self.logger.info("WARNING: randomly initializing word vectors")
_word_embeddings = tf.compat.v1.get_variable(
name="_word_embeddings",
dtype=tf.float32,
shape=[self.config.nwords, self.config.dim_word])
else:
_word_embeddings = tf.Variable(
self.config.embeddings,
name="_word_embeddings",
dtype=tf.float32,
trainable=self.config.train_embeddings)
word_embeddings = tf.nn.embedding_lookup(params=_word_embeddings,
ids=word_ids, name="word_embeddings")
if self.config.use_chars:
with tf.compat.v1.variable_scope("chars", reuse=tf.compat.v1.AUTO_REUSE):
# get char embeddings matrix
_char_embeddings = tf.compat.v1.get_variable(
name="_char_embeddings",
dtype=tf.float32,
shape=[self.config.nchars, self.config.dim_char])
char_embeddings = tf.nn.embedding_lookup(params=_char_embeddings,
ids=char_ids, name="char_embeddings")
# put the time dimension on axis=1
s = tf.shape(input=char_embeddings)
char_embeddings = tf.reshape(char_embeddings,
shape=[s[0]*s[1]*s[2], s[-2], self.config.dim_char])
word_lengths = tf.reshape(word_lengths, shape=[s[0]*s[1]*s[2]])
# bi lstm on chars
cell_fw = tf.compat.v1.nn.rnn_cell.LSTMCell(self.config.hidden_size_char,
state_is_tuple=True)
cell_bw = tf.compat.v1.nn.rnn_cell.LSTMCell(self.config.hidden_size_char,
state_is_tuple=True)
_output = tf.compat.v1.nn.bidirectional_dynamic_rnn(
cell_fw, cell_bw, char_embeddings,
sequence_length=word_lengths, dtype=tf.float32)
# read and concat output
_, ((_, output_fw), (_, output_bw)) = _output
output = tf.concat([output_fw, output_bw], axis=-1)
# shape = (batch size, max sentence length, char hidden size)
output = tf.reshape(output,
shape=[s[0], s[1], s[2], 2*self.config.hidden_size_char])
word_embeddings = tf.concat([word_embeddings, output], axis=-1)
word_embeddings = tf.nn.dropout(word_embeddings, 1 - (dropout))
return word_embeddings
def add_logits_op(self, word_embeddings, sentence_lengths, document_lengths, dropout):
"""Defines self.logits
For each word in each sentence of the batch, it corresponds to a vector
of scores, of dimension equal to the number of tags.
"""
s = tf.shape(input=word_embeddings)
if self.config.use_chars:
word_embeddings_dim = self.config.dim_word + 2 * self.config.hidden_size_char
else:
word_embeddings_dim = self.config.dim_word
sentence_lengths = tf.reshape(sentence_lengths, shape=[s[0]*s[1]])
if self.config.use_cnn:
word_embeddings = tf.reshape(word_embeddings,
shape=[s[0]*s[1], s[-2], word_embeddings_dim, 1])
if self.config.use_attention:
with tf.compat.v1.variable_scope("conv-attention", reuse=tf.compat.v1.AUTO_REUSE):
W_word = tf.compat.v1.get_variable("weight", dtype=tf.float32,
initializer=self.initializer, regularizer=self.regularizer,
shape=[self.config.cnn_filter_num, self.config.attention_size])
b_word = tf.compat.v1.get_variable("bias", shape=[self.config.attention_size],
dtype=tf.float32, initializer=tf.compat.v1.zeros_initializer())
U_word = tf.compat.v1.get_variable("U-noreg", dtype=tf.float32,
initializer=self.initializer,
shape=[self.config.attention_size, self.config.attention_hop])
if self.config.use_cnn_rnn:
with tf.compat.v1.variable_scope("cnn-rnn"):
cell_fw = tf.compat.v1.nn.rnn_cell.LSTMCell(self.config.hidden_size_lstm_sentence)
cell_bw = tf.compat.v1.nn.rnn_cell.LSTMCell(self.config.hidden_size_lstm_sentence)
pooled_outputs = []
for i, size in enumerate(self.config.cnn_filter_sizes):
with tf.compat.v1.variable_scope("conv-%d" % size, reuse=tf.compat.v1.AUTO_REUSE):# , reuse=False
W_conv = tf.compat.v1.get_variable(name='weight', initializer=self.initializer,
shape=[size, word_embeddings_dim, 1, self.config.cnn_filter_num],
regularizer=self.regularizer)
b_conv = tf.compat.v1.get_variable(name='bias', initializer=tf.compat.v1.zeros_initializer(),
shape=[self.config.cnn_filter_num])
conv = tf.nn.conv2d(input=word_embeddings, filters=W_conv, strides=[1, 1, word_embeddings_dim, 1],
padding="SAME")
h = tf.nn.tanh(tf.nn.bias_add(conv, b_conv), name="h") # bz, n, 1, dc
# h = tf.nn.max_pool(h,
# ksize=[1, 2, 1, 1],
# strides=[1, 2, 1, 1],
# padding="SAME")
h = tf.squeeze(h, axis=2) # bz, n, dc
if self.config.use_cnn_rnn:
_, ((_, output_fw), (_, output_bw)) = tf.compat.v1.nn.bidirectional_dynamic_rnn(
cell_fw, cell_bw, h,
sequence_length=sentence_lengths, dtype=tf.float32)
pooled = tf.concat([output_fw, output_bw], axis=-1) # bz, dc
else:
if self.config.use_attention:
U_sent = tf.tanh(tf.matmul(tf.reshape(h, shape=[-1, self.config.cnn_filter_num]),
W_word) + b_word) # (bz*len, attn_size)
A = tf.transpose(a=tf.reshape(tf.matmul(U_sent, U_word), shape=[-1, s[2],
self.config.attention_hop]), perm=[0, 2, 1]) # (bz, attn_hop, len)
A += 100000. * (tf.tile(tf.expand_dims(tf.cast(tf.sequence_mask(sentence_lengths), tf.float32), axis=1),
[1, self.config.attention_hop, 1]) - 1)
self.A = tf.nn.softmax(A) # (bz, attn_hop, len)
pooled = tf.reshape(tf.einsum('aij,ajk->aik', self.A, h), shape=[-1,
self.config.attention_hop*self.config.cnn_filter_num])
else:
# max pooling
pooled = tf.reduce_max(input_tensor=h, axis=1) # bz, dc
pooled_outputs.append(pooled)
output = tf.concat(pooled_outputs, axis=-1)
# dropout
output = tf.nn.dropout(output, 1 - (dropout))
if self.config.use_cnn_rnn:
cnn_filter_tot_num = (2 * self.config.hidden_size_lstm_sentence) * len(self.config.cnn_filter_sizes)
else:
cnn_filter_tot_num = len(self.config.cnn_filter_sizes) * self.config.cnn_filter_num * self.config.attention_hop
if self.config.use_document_level == True:
output = tf.reshape(output,
shape=[-1, s[1], cnn_filter_tot_num])
else:
word_embeddings = tf.reshape(word_embeddings,
shape=[s[0]*s[1], s[-2], word_embeddings_dim])
if self.config.use_attention:
with tf.compat.v1.variable_scope("bi-lstm-sentence", reuse=tf.compat.v1.AUTO_REUSE):
if self.config.use_gru:
cell_fw = tf.compat.v1.nn.rnn_cell.GRUCell(self.config.hidden_size_lstm_sentence)
cell_bw = tf.compat.v1.nn.rnn_cell.GRUCell(self.config.hidden_size_lstm_sentence)
else:
cell_fw = tf.compat.v1.nn.rnn_cell.LSTMCell(self.config.hidden_size_lstm_sentence)
cell_bw = tf.compat.v1.nn.rnn_cell.LSTMCell(self.config.hidden_size_lstm_sentence)
(output_fw, output_bw), _ = tf.compat.v1.nn.bidirectional_dynamic_rnn(
cell_fw, cell_bw, word_embeddings,
sequence_length=sentence_lengths, dtype=tf.float32)
output = tf.concat([output_fw, output_bw], axis=-1)
W_word = tf.compat.v1.get_variable("weight", dtype=tf.float32,
initializer=self.initializer, regularizer=self.regularizer,
shape=[2*self.config.hidden_size_lstm_sentence, self.config.attention_size])
b_word = tf.compat.v1.get_variable("bias", shape=[self.config.attention_size],
dtype=tf.float32, initializer=tf.compat.v1.zeros_initializer())
U_word = tf.compat.v1.get_variable("U-noreg", dtype=tf.float32,
initializer=self.initializer,
shape=[self.config.attention_size, self.config.attention_hop])
output = tf.reshape(output, shape=[-1, 2*self.config.hidden_size_lstm_sentence])
U_sent = tf.tanh(tf.matmul(output, W_word) + b_word) # (bz*len, attn_size)
A = tf.transpose(a=tf.reshape(tf.matmul(U_sent, U_word), shape=[-1, s[2], self.config.attention_hop]), perm=[0, 2, 1]) # (bz, attn_hop, len)
A += 100000. * (tf.tile(tf.expand_dims(tf.cast(tf.sequence_mask(sentence_lengths), tf.float32), axis=1),
[1, self.config.attention_hop, 1]) - 1)
self.A = tf.nn.softmax(A) # (bz, attn_hop, len)
output = tf.reshape(output, shape=[-1, s[2], 2*self.config.hidden_size_lstm_sentence]) # (bz, len, hidden_size)
output = tf.reshape(tf.einsum('aij,ajk->aik', self.A, output), shape=[-1,
self.config.attention_hop*2*self.config.hidden_size_lstm_sentence])
else:
with tf.compat.v1.variable_scope("bi-lstm-sentence", reuse=tf.compat.v1.AUTO_REUSE):
if self.config.use_gru:
cell_fw = tf.compat.v1.nn.rnn_cell.GRUCell(self.config.hidden_size_lstm_sentence)
cell_bw = tf.compat.v1.nn.rnn_cell.GRUCell(self.config.hidden_size_lstm_sentence)
else:
cell_fw = tf.compat.v1.nn.rnn_cell.LSTMCell(self.config.hidden_size_lstm_sentence)
cell_bw = tf.compat.v1.nn.rnn_cell.LSTMCell(self.config.hidden_size_lstm_sentence)
_, ((_, output_fw), (_, output_bw)) = tf.compat.v1.nn.bidirectional_dynamic_rnn(
cell_fw, cell_bw, word_embeddings,
sequence_length=sentence_lengths, dtype=tf.float32)
output = tf.concat([output_fw, output_bw], axis=-1)
# dropout
output = tf.nn.dropout(output, 1 - (dropout))
if self.config.use_document_level == True:
output = tf.reshape(output, [-1, s[1], self.config.attention_hop*2*self.config.hidden_size_lstm_sentence])
if self.config.use_document_level == True:
with tf.compat.v1.variable_scope("bi-lstm-document", reuse=tf.compat.v1.AUTO_REUSE):
cell_fw = tf.compat.v1.nn.rnn_cell.LSTMCell(self.config.hidden_size_lstm_document)
cell_bw = tf.compat.v1.nn.rnn_cell.LSTMCell(self.config.hidden_size_lstm_document)
(output_fw, output_bw), _ = tf.compat.v1.nn.bidirectional_dynamic_rnn(
cell_fw, cell_bw, output,
sequence_length=document_lengths, dtype=tf.float32)
output = tf.concat([output_fw, output_bw], axis=-1)
# dropout
output = tf.nn.dropout(output, 1 - (dropout))
output = tf.reshape(output, shape=[-1, 2*self.config.hidden_size_lstm_document])
if self.config.use_document_level == True:
hidden_size = 2 * self.config.hidden_size_lstm_document
else:
if self.config.use_cnn:
hidden_size = cnn_filter_tot_num
else:
hidden_size = self.config.attention_hop * 2 * self.config.hidden_size_lstm_sentence
with tf.compat.v1.variable_scope("proj", reuse=tf.compat.v1.AUTO_REUSE):
W_infer = tf.compat.v1.get_variable("weight", dtype=tf.float32,
initializer=self.initializer, regularizer=self.regularizer,
shape=[hidden_size, self.config.ntags])
b_infer = tf.compat.v1.get_variable("bias", shape=[self.config.ntags],
dtype=tf.float32, initializer=tf.compat.v1.zeros_initializer())
pred = tf.matmul(output, W_infer) + b_infer
logits = tf.reshape(pred, [-1, s[1], self.config.ntags])
return logits
def forward(self, word_ids, char_ids, word_lengths, sentence_lengths, document_lengths, dropout):
word_embeddings = self.add_word_embeddings_op(word_ids, word_lengths, char_ids, dropout)
logits = self.add_logits_op(word_embeddings, sentence_lengths, document_lengths, dropout)
return logits
def add_pred_op(self):
"""Defines self.labels_pred
This op is defined only in the case where we don't use a CRF since in
that case we can make the prediction "in the graph" (thanks to tf
functions in other words). With theCRF, as the inference is coded
in python and not in pure tensroflow, we have to make the prediciton
outside the graph.
"""
if not self.config.use_crf:
self.labels_pred = tf.cast(tf.argmax(input=self.logits, axis=-1),
tf.int32)
def Frobenius(self, tensor):
# print(tf.rank(tensor), tf.shape(tensor))
# if tf.rank(tensor) == 3: # batched matrix
return tf.reduce_mean(input_tensor=(tf.squeeze(tf.reduce_sum(input_tensor=tensor**2, axis=[1, 2])) + 1e-10) ** 0.5)
| |
blend should have on the existing values.
This is a :class:numpy.ndarray of floats between zero and
one, where zero is no effect and one is full effect. See
common.can_mask for details.
:return: An array that contains the values of the blended arrays.
:rtype: np.ndarray
"""
m = b != 0
ab = np.zeros_like(a)
ab[m] = 1 - (1 - a[m]) / b[m]
ab[~m] = 0
return ab
@will_clip
@can_mask
@can_fade
@will_match_size
@will_colorize
def linear_burn(a: np.ndarray, b: np.ndarray) -> np.ndarray:
"""Similar to multiply, but is darker, produces less saturated
colors than color burn, and produces more contrast in the shadows.
:param a: The existing values. This is like the bottom layer in
a photo editing tool.
:param b: The values to blend. This is like the top layer in a
photo editing tool.
:param colorize: (Optional). Whether to ensure the two images have
the same number of color channels.
:param fade: (Optional.) The amount the blended values should
affect the existing values. This is a float between zero
and one, where zero is no effect and one is full effect.
See common.can_fade for more details.
:param mask: (Optional.) An image mask that is used to determine
the effect the blend should have on the existing values.
This is a :class:numpy.ndarray of floats between zero and
one, where zero is no effect and one is full effect. See
common.can_mask for details.
:return: An array that contains the values of the blended arrays.
:rtype: np.ndarray
"""
return a + b - 1
# Lighter/dodge blends.
@will_clip
@can_mask
@can_fade
@will_match_size
@will_colorize
def lighter(a: np.ndarray, b: np.ndarray) -> np.ndarray:
"""Replaces values in the existing image with values from the
blending image when the value in the blending image is lighter.
:param a: The existing values. This is like the bottom layer in
a photo editing tool.
:param b: The values to blend. This is like the top layer in a
photo editing tool.
:param colorize: (Optional). Whether to ensure the two images have
the same number of color channels.
:param fade: (Optional.) The amount the blended values should
affect the existing values. This is a float between zero
and one, where zero is no effect and one is full effect.
See common.can_fade for more details.
:param mask: (Optional.) An image mask that is used to determine
the effect the blend should have on the existing values.
This is a :class:numpy.ndarray of floats between zero and
one, where zero is no effect and one is full effect. See
common.can_mask for details.
:return: An array that contains the values of the blended arrays.
:rtype: np.ndarray
"""
ab = a.copy()
ab[b > a] = b[b > a]
return ab
@will_clip
@can_mask
@can_fade
@will_match_size
@will_colorize
def screen(a: np.ndarray, b: np.ndarray) -> np.ndarray:
"""Performs an inverse multiplication on the colors from the two
images then inverse the colors again. This leads to overall
brighter colors and is the opposite of multiply.
:param a: The existing values. This is like the bottom layer in
a photo editing tool.
:param b: The values to blend. This is like the top layer in a
photo editing tool.
:param colorize: (Optional). Whether to ensure the two images have
the same number of color channels.
:param fade: (Optional.) The amount the blended values should
affect the existing values. This is a float between zero
and one, where zero is no effect and one is full effect.
See common.can_fade for more details.
:param mask: (Optional.) An image mask that is used to determine
the effect the blend should have on the existing values.
This is a :class:numpy.ndarray of floats between zero and
one, where zero is no effect and one is full effect. See
common.can_mask for details.
:return: An array that contains the values of the blended arrays.
:rtype: np.ndarray
"""
rev_a = 1 - a
rev_b = 1 - b
ab = rev_a * rev_b
return 1 - ab
@will_clip
@can_mask
@can_fade
@will_match_size
@will_colorize
def color_dodge(a: np.ndarray, b: np.ndarray) -> np.ndarray:
"""Similar to screen, but brighter and decreases the contrast.
:param a: The existing values. This is like the bottom layer in
a photo editing tool.
:param b: The values to blend. This is like the top layer in a
photo editing tool.
:param colorize: (Optional). Whether to ensure the two images have
the same number of color channels.
:param fade: (Optional.) The amount the blended values should
affect the existing values. This is a float between zero
and one, where zero is no effect and one is full effect.
See common.can_fade for more details.
:param mask: (Optional.) An image mask that is used to determine
the effect the blend should have on the existing values.
This is a :class:numpy.ndarray of floats between zero and
one, where zero is no effect and one is full effect. See
common.can_mask for details.
:return: An array that contains the values of the blended arrays.
:rtype: np.ndarray
"""
ab = np.ones_like(a)
ab[b != 1] = a[b != 1] / (1 - b[b != 1])
return ab
@will_clip
@can_mask
@can_fade
@will_match_size
@will_colorize
def linear_dodge(a: np.ndarray, b: np.ndarray) -> np.ndarray:
"""Similar to screen but produces stronger results.
:param a: The existing values. This is like the bottom layer in
a photo editing tool.
:param b: The values to blend. This is like the top layer in a
photo editing tool.
:param colorize: (Optional). Whether to ensure the two images have
the same number of color channels.
:param fade: (Optional.) The amount the blended values should
affect the existing values. This is a float between zero
and one, where zero is no effect and one is full effect.
See common.can_fade for more details.
:param mask: (Optional.) An image mask that is used to determine
the effect the blend should have on the existing values.
This is a :class:numpy.ndarray of floats between zero and
one, where zero is no effect and one is full effect. See
common.can_mask for details.
:return: An array that contains the values of the blended arrays.
:rtype: np.ndarray
"""
return a + b
# Inversion blends.
@will_clip
@can_mask
@can_fade
@will_match_size
@will_colorize
def difference(a: np.ndarray, b: np.ndarray) -> np.ndarray:
"""Takes the absolute value of the difference of the two values.
This is often useful in creating complex patterns or when
aligning two images.
:param a: The existing values. This is like the bottom layer in
a photo editing tool.
:param b: The values to blend. This is like the top layer in a
photo editing tool.
:param colorize: (Optional). Whether to ensure the two images have
the same number of color channels.
:param fade: (Optional.) The amount the blended values should
affect the existing values. This is a float between zero
and one, where zero is no effect and one is full effect.
See common.can_fade for more details.
:param mask: (Optional.) An image mask that is used to determine
the effect the blend should have on the existing values.
This is a :class:numpy.ndarray of floats between zero and
one, where zero is no effect and one is full effect. See
common.can_mask for details.
:return: An array that contains the values of the blended arrays.
:rtype: np.ndarray
"""
return np.abs(a - b)
@will_clip
@can_mask
@can_fade
@will_match_size
@will_colorize
def exclusion(a: np.ndarray, b: np.ndarray) -> np.ndarray:
"""Similar to difference, with the result tending to gray
rather than black.
:param a: The existing values. This is like the bottom layer in
a photo editing tool.
:param b: The values to blend. This is like the top layer in a
photo editing tool.
:param colorize: (Optional). Whether to ensure the two images have
the same number of color channels.
:param fade: (Optional.) The amount the blended values should
affect the existing values. This is a float between zero
and one, where zero is no effect and one is full effect.
See common.can_fade for more details.
:param mask: (Optional.) An image mask that is used to determine
the effect the blend should have on the existing values.
This is a :class:numpy.ndarray of floats between zero and
one, where zero is |