repo_name
stringlengths
5
92
path
stringlengths
4
221
copies
stringclasses
19 values
size
stringlengths
4
6
content
stringlengths
766
896k
license
stringclasses
15 values
hash
int64
-9,223,277,421,539,062,000
9,223,102,107B
line_mean
float64
6.51
99.9
line_max
int64
32
997
alpha_frac
float64
0.25
0.96
autogenerated
bool
1 class
ratio
float64
1.5
13.6
config_test
bool
2 classes
has_no_keywords
bool
2 classes
few_assignments
bool
1 class
diplomacy/research
diplomacy_research/models/layers/noisy_networks.py
1
4039
# ============================================================================== # Copyright 2019 - Philip Paquette # # NOTICE: Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # ============================================================================== """ Noisy Networks - Converts variables in a graph to their noisy equivalent """ from math import sqrt import sys assert 'tensorflow' in sys.modules, 'You need to import TF before importing this module.' from diplomacy_research.utils.tensorflow import tf from diplomacy_research.utils.tensorflow import graph_editor def convert_to_noisy_variables(variables, activation=None): """ Converts a list of variables to noisy variables :param variables: A list of variables to make noisy :param activation: Optional. The activation function to use on the linear noisy transformation :return: Nothing, but modifies the graph in-place Reference: 1706.10295 - Noisy Networks for exploration """ if tf.get_collection(tf.GraphKeys.TRAIN_OP): raise RuntimeError('You must call convert_to_noisy_variables before applying an optimizer on the graph.') graph = tf.get_default_graph() if not isinstance(variables, list): variables = list(variables) # Replacing each variable for variable in variables: variable_read_op = _get_variable_read_op(variable, graph) variable_outputs = _get_variable_outputs(variable_read_op, graph) variable_scope = variable.name.split(':')[0] variable_shape = variable.shape.as_list() fan_in = variable_shape[0] # Creating noisy variables with tf.variable_scope(variable_scope + '_noisy'): with tf.device(variable.device): s_init = tf.constant_initializer(0.5 / sqrt(fan_in)) noisy_u = tf.identity(variable, name='mu') noisy_s = tf.get_variable(name='sigma', shape=variable.shape, dtype=tf.float32, initializer=s_init, caching_device=variable._caching_device) # pylint: disable=protected-access noise = tf.random.normal(shape=variable_shape) replaced_var = noisy_u + noisy_s * noise replaced_var = activation(replaced_var) if activation else replaced_var # Replacing in-place inputs_index = [var_index for var_index, var_input in enumerate(graph_editor.sgv(*variable_outputs).inputs) if var_input.name.split(':')[0] == variable_read_op.name.split(':')[0]] graph_editor.connect(graph_editor.sgv(replaced_var.op), graph_editor.sgv(*variable_outputs).remap_inputs(inputs_index), disconnect_first=True) def _get_variable_read_op(variable, graph): """ Returns the /read operation for a variable """ return graph.get_operation_by_name(variable.name.split(':')[0] + '/read') def _get_variable_outputs(variable_read_op, graph): """ Returns the list of tensors that have the variable as input """ outputs = [] for graph_op in graph.get_operations(): for var_input in graph_op.inputs._inputs: # pylint: disable=protected-access if var_input in variable_read_op.outputs: outputs += [graph_op] return outputs
mit
3,073,660,035,282,352,000
48.256098
122
0.615499
false
4.428728
false
false
false
plang85/rough_surfaces
rough_surfaces/surface.py
1
2746
import numpy as np class Surface(np.ndarray): """ One- or two-dimensional surface height representation. The assumption upon which this framework is based is a uniform lattice size in both directions. This is tightly integrated here. 'Surface' is the fundamental class that most modules build upon. It usually represents the model or computational domain, as it may discretize either, individual and composite surfaces, i.e., rough surfaces and aperture fields. Standard initialization is from two-dimensional ndarray and lattice size: >>> import numpy as np >>> N, dxy = 100, 0.1 >>> h = np.zeros((N,N)) >>> s = Surface(h, dxy) >>> length(s) # egde length in x-direction 10.0 >>> length(s, 1) # egde length in y-direction 10.0 Surfaces can also be one-dimensional, e.g., represent traces or cross-sections: >>> import numpy as np >>> N, dxy = 100, 0.1 >>> h = np.zeros((N)) >>> s = Surface(h, dxy) >>> length(s) # length 10.0 >>> length(s, 1) # there is no second axis for one-dimensional surfaces Traceback (most recent call last): ... IndexError: tuple index out of range """ def __new__(cls, input_array, dxy): obj = np.asarray(input_array).view(cls) obj.dxy = float(dxy) return obj def __array_finalize__(self, obj): if obj is None: self.dxy = getattr(obj, 'dxy', None) def rms(surface): """"Returns root-mean-square roughness [L].""" return np.sqrt(np.mean(surface**2)) def length(surface, axis=0): """"Returns length [L] of surface in x- or y-direction, for axis=0 and 1, respectively.""" return surface.shape[axis] * surface.dxy def nominal_area(surface): """"Returns length() [L] for 1D, area [L^2] for 2D.""" a = 1.0 for i in range(len(surface.shape)): a *= length(surface) return a def shift_to_zero_mean(surface): """"Returns shifted surface such that <h> = 0.""" return Surface(surface - np.mean(surface), surface.dxy) def mean_aperture(surface): """"Composite surface assumption: mean of difference field to highest point.""" return np.mean(np.abs(np.subtract(surface, np.max(surface)))) def pore_volume(surface): """"Composite surface assumption: mean aperture times area (2D-->[L^3]) or length (1D-->[L^2]).""" return mean_aperture(surface) * nominal_area(surface) def scale_to_rms(surface, rms_target): """ Scales height to fit target property, which must be name of scalar returning method. """ rms_current = rms(surface) return Surface(surface * (rms_target / rms_current), surface.dxy) if __name__ == '__main__': import doctest doctest.testmod()
mit
1,258,035,149,484,728,000
29.853933
102
0.639476
false
3.547804
false
false
false
cleemesser/eeg-hdfstorage
scripts/edf2eeghdf.py
1
51534
# -*- coding: utf-8 -*- from __future__ import division, absolute_import, print_function # py2.6 with_statement import sys import pprint import h5py import numpy as np import os.path # date related stuff import datetime import dateutil import dateutil.tz import dateutil.parser import arrow # compatibility import future from future.utils import iteritems from builtins import range # range and switch xrange -> range # from past.builtins import xrange # later, move to from builtins import import edflib import eeghdf # really need to check the original data type and then save as that datatype along with the necessary conversion factors # so can convert voltages on own # try with float32 instead? # LPCH often uses these labels for electrodes LPCH_COMMON_1020_LABELS = [ 'Fp1', 'Fp2', 'F3', 'F4', 'C3', 'C4', 'P3', 'P4', 'O1', 'O2', 'F7', 'F8', 'T3', 'T4', 'T5', 'T6', 'Fz', 'Cz', 'Pz', 'E', 'PG1', 'PG2', 'A1', 'A2', 'T1', 'T2', 'X1', 'X2', 'X3', 'X4', 'X5', 'X6', 'X7', 'EEG Mark1', 'EEG Mark2', 'Events/Markers'] # common 10-20 extended clinical (T1/T2 instead of FT9/FT10) # will need to specify these as bytes I suppose (or is this ok in utf-8 given the ascii basis) # keys should be all one case (say upper) lpch2edf_fixed_len_labels = dict( FP1='EEG Fp1 ', F7='EEG F7 ', T3='EEG T3 ', T5='EEG T5 ', O1='EEG O1 ', F3='EEG F3 ', C3='EEG C3 ', P3='EEG P3 ', FP2='EEG Fp2 ', F8='EEG F8 ', T4='EEG T4 ', T6='EEG T6 ', O2='EEG O2 ', F4='EEG F4 ', C4='EEG C4 ', P4='EEG P4 ', CZ='EEG Cz ', FZ='EEG Fz ', PZ='EEG Pz ', T1='EEG FT9 ', # maybe I should map this to FT9/T1 T2='EEG FT10 ', # maybe I should map this to FT10/T2 A1='EEG A1 ', A2='EEG A2 ', # these are often (?always) EKG at LPCH, note edfspec says use ECG instead # of EKG X1='ECG X1 ', # is this invariant? usually referenced to A1 # this is sometimes ECG but not usually (depends on how squirmy) X2='X2 ', PG1='EEG Pg1 ', PG2='EEG Pg2 ', # now the uncommon ones NZ='EEG Nz ', FPZ='EEG Fpz ', AF7='EEG AF7 ', AF8='EEG AF8 ', AF3='EEG AF3 ', AFz='EEG AFz ', AF4='EEG AF4 ', F9='EEG F9 ', # F7 F5='EEG F5 ', # F3 ='EEG F3 ', F1='EEG F1 ', # Fz F2='EEG F2 ', # F4 F6='EEG F6 ', # F8 F10='EEG F10 ', FT9='EEG FT9 ', FT7='EEG FT7 ', FC5='EEG FC5 ', FC3='EEG FC3 ', FC1='EEG FC1 ', FCz='EEG FCz ', FC2='EEG FC2 ', FC4='EEG FC4 ', FC6='EEG FC6 ', FT8='EEG FT8 ', FT10='EEG FT10 ', T9='EEG T9 ', T7='EEG T7 ', C5='EEG C5 ', # C3 above C1='EEG C1 ', # Cz above C2='EEG C2 ', # C4 ='EEG C4 ', C6='EEG C6 ', T8='EEG T8 ', T10='EEG T10 ', # A2 # T3 # T4 # T5 # T6 TP9='EEG TP9 ', TP7='EEG TP7 ', CP5='EEG CP5 ', CP3='EEG CP3 ', CP1='EEG CP1 ', CPZ='EEG CPz ', CP2='EEG CP2 ', CP4='EEG CP4 ', CP6='EEG CP6 ', TP8='EEG TP8 ', TP10='EEG TP10 ', P9='EEG P9 ', P7='EEG P7 ', P5='EEG P5 ', # P3 P1='EEG P1 ', # Pz P2='EEG P2 ', # P4 P6='EEG P6 ', P8='EEG P8 ', P10='EEG P10 ', PO7='EEG PO7 ', PO3='EEG PO3 ', POZ='EEG POz ', PO4='EEG PO4 ', PO8='EEG PO8 ', # O1 OZ='EEG Oz ', # O2 IZ='EEG Iz ', ) lpch2edf_fixed_len_labels # print("lpch2edf_fixed_len_labels::\n") # pprint.pprint(lpch2edf_fixed_len_labels) LPCH_TO_STD_LABELS_STRIP = {k: v.strip() for k, v in iteritems(lpch2edf_fixed_len_labels)} # print('LPCH_TO_STD_LABELS_STRIP::\n') # pprint.pprint(LPCH_TO_STD_LABELS_STRIP) LPCH_COMMON_1020_LABELS_to_EDF_STANDARD = { } def normalize_lpch_signal_label(label): uplabel = label.upper() if uplabel in LPCH_TO_STD_LABELS_STRIP: return LPCH_TO_STD_LABELS_STRIP[uplabel] else: return label def edf2h5_float32(fn, outfn='', hdf_dir='', anonymous=False): """ convert an edf file to hdf5 using a straighforward mapping convert to real-valued signals store as float32's justing getting started here --- metadata --- number_signals sample_frequency nsamples age signal_labels Post Menstrual Age """ if not outfn: base = os.path.basename(fn) base, ext = os.path.splitext(base) base = base + '.eeghdf5' outfn = os.path.join(hdf_dir, base) print('outfn:', outfn) # outfn = fn+'.eeg.h5' with edflib.EdfReader(fn) as ef: nsigs = ef.signals_in_file # again know/assume that this is uniform sampling across signals fs = [ef.samplefrequency(ii) for ii in range(nsigs)] fs0 = fs[0] if any([ fs0 != xx for xx in fs]): print("caught multiple sampling frquencies in edf files!!!") sys.exit(0) nsamples0 = ef.samples_in_file(0) print('nsigs=%s, fs0=%s, nsamples0=%s' % (nsigs, fs0, nsamples0)) # create file 'w-' -> fail if exists , w -> truncate if exists hdf = h5py.File(outfn, 'w') # use compression? yes! give it a try eegdata = hdf.create_dataset('eeg', (nsigs, nsamples0), dtype='float32', # chunks=(nsigs,fs0), chunks=True, fletcher32=True, # compression='gzip', # compression='lzf', # maxshape=(256,None) ) # no compression -> 50 MiB can view eegdata in vitables # compression='gzip' -> 27 MiB slower # compression='lzf' -> 35 MiB # compression='lzf' maxshape=(256,None) -> 36MiB # szip is unavailable patient = hdf.create_group('patient') # add meta data hdf.attrs['number_signals'] = nsigs hdf.attrs['sample_frequency'] = fs0 hdf.attrs['nsamples0'] = nsamples0 patient.attrs['gender_b'] = ef.gender_b patient.attrs['patientname'] = ef.patient_name # PHI print('birthdate: %s' % ef.birthdate_b, type(ef.birthdate_b)) # this is a string -> date (datetime) if not ef.birthdate_b: print("no birthday in this file") birthdate = None else: birthdate = dateutil.parser.parse(ef.birthdate_b) print('birthdate (date object):', birthdate_b) start_date_time = datetime.datetime( ef.startdate_year, ef.startdate_month, ef.startdate_day, ef.starttime_hour, ef.starttime_minute, ef.starttime_second) # ,tzinfo=dateutil.tz.tzlocal()) print(start_date_time) if start_date_time and birthdate: age = start_date_time - birthdate print('age:', age) else: age = None if age: patient.attrs['post_natal_age_days'] = age.days else: patient.attrs['post_natal_age_days'] = -1 # now start storing the lists of things: labels, units... # nsigs = len(label_list) # variable ascii string (or b'' type) str_dt = h5py.special_dtype(vlen=str) label_ds = hdf.create_dataset('signal_labels', (nsigs,), dtype=str_dt) units_ds = hdf.create_dataset('signal_units', (nsigs,), dtype=str_dt) labels = [] units = list() # signal_nsamples = [] for ii in range(nsigs): labels.append(ef.signal_label(ii)) units.append(ef.physical_dimension(ii)) # self.signal_nsamples.append(self.cedf.samples_in_file(ii)) # self.samplefreqs.append(self.cedf.samplefrequency(ii)) # eegdata.signal_labels = labels # labels are fixed length strings labels_strip = [ss.strip() for ss in labels] label_ds[:] = labels_strip units_ds[:] = units # should be more and a switch for anonymous or not # need to change this to nchunks = int(nsamples0 // fs0) samples_per_chunk = int(fs0) buf = np.zeros((nsigs, samples_per_chunk), dtype='float64') # buffer is float64_t print('nchunks: ', nchunks, 'samples_per_chunk:', samples_per_chunk) bookmark = 0 # mark where were are in samples for ii in range(nchunks): for jj in range(nsigs): # readsignal(self, signalnum, start, n, # np.ndarray[np.float64_t, ndim = 1] sigbuf) # read_phys_signal(chn, 0, nsamples[chn], v) #read_phys_signal(self, signalnum, start, n, np.ndarray[np.float64_t, ndim=1] sigbuf) print(ii,jj) ef.read_phys_signal(jj, bookmark, samples_per_chunk, buf[jj]) # readsignal converts into float # conversion from float64 to float32 eegdata[:, bookmark:bookmark + samples_per_chunk] = buf # bookmark should be ii*fs0 bookmark += samples_per_chunk left_over_samples = nsamples0 - nchunks * samples_per_chunk print('left_over_samples:', left_over_samples) if left_over_samples > 0: for jj in range(nsigs): ef.read_phys_signal(jj, bookmark, left_over_samples, buf[jj]) eegdata[:, bookmark:bookmark + left_over_samples] = buf[:, 0:left_over_samples] hdf.close() def edf_block_iter_generator( edf_file, nsamples, samples_per_chunk, dtype='int32'): """ factory to produce generators for iterating through an edf file and filling up an array from the edf with the signal data starting at 0. You choose the number of @samples_per_chunk, and number of samples to do in total @nsamples as well as the dtype. 'int16' is reasonable as well 'int32' will handle everything though it yields -> (numpy_buffer, mark, num) numpy_buffer, mark, which is where in the file in total currently reading from num -- which is the number of samples in the buffer (per signal) to transfer """ nchan = edf_file.signals_in_file # 'int32' will work for int16 as well buf = np.zeros((nchan, samples_per_chunk), dtype=dtype) nchunks = nsamples // samples_per_chunk left_over_samples = nsamples - nchunks * samples_per_chunk mark = 0 for ii in range(nchunks): for cc in range(nchan): edf_file.read_digital_signal(cc, mark, samples_per_chunk, buf[cc]) yield (buf, mark, samples_per_chunk) mark += samples_per_chunk # print('mark:', mark) # left overs if left_over_samples > 0: for cc in range(nchan): edf_file.read_digital_signal(cc, mark, left_over_samples, buf[cc]) yield (buf[:, 0:left_over_samples], mark, left_over_samples) def dig2phys(eeghdf, start, end, chstart, chend): # edfhdr->edfparam[i].bitvalue = (edfhdr->edfparam[i].phys_max - edfhdr->edfparam[i].phys_min) / (edfhdr->edfparam[i].dig_max - edfhdr->edfparam[i].dig_min); # edfhdr->edfparam[i].offset = edfhdr->edfparam[i].phys_max / # edfhdr->edfparam[i].bitvalue - edfhdr->edfparam[i].dig_max; dmins = eeghdf['signal_digital_mins'][:] dmaxs = eeghdf['signal_digital_maxs'][:] phys_maxs = eeghdf['signal_physical_maxs'][:] phys_mins = eeghdf['signal_physical_mins'][:] print('dmaxs:', repr(dmaxs)) print('dmins:', repr(dmins)) print('dmaxs[:] - dmins[:]', dmaxs - dmins) print('phys_maxs', phys_maxs) print('phys_mins', phys_mins) bitvalues = (phys_maxs - phys_mins) / (dmaxs - dmins) offsets = phys_maxs / bitvalues - dmaxs print('bitvalues, offsets:', bitvalues, offsets) print('now change their shape to column vectors') for arr in (bitvalues, offsets): if len(arr.shape) != 1: print('logical errror %s shape is unexpected' % arr.shape) raise Exception s = arr.shape arr.shape = (s[0], 1) print('bitvalues, offsets:', bitvalues, offsets) # buf[i] = phys_bitvalue * (phys_offset + (double)var.two_signed[0]); dig_signal = eeghdf['signals'][chstart:chend, start:end] # signal = bitvalues[chstart:chend] *(dig_signal[chstart:chend,:] + offsets[chstart:chend]) phys_signals = (dig_signal[:, start:end] + offsets) * bitvalues # return signal, bitvalues, offsets return phys_signals # TODO: create edf -> hdf version 1000 # hdf -> edf for hdf version 1000 # tests to verify that round trip is lossless # [] writing encoding of MRN # [] and entry of mapped pt_code into database coe def edf2hdf_oldhack(fn, outfn='', hdf_dir='', anonymous=False): """ convert an edf file to hdf5 using a straighforward mapping justing getting started here --- metadata --- number_signals sample_frequency nsamples age signal_labels Post Menstrual Age """ if not outfn: base = os.path.basename(fn) base, ext = os.path.splitext(base) base = base + '.eeg.hdf' outfn = os.path.join(hdf_dir, base) print('outfn:', outfn) # outfn = fn+'.eeg.h5' with edflib.EdfReader(fn) as ef: # all the data point related stuff nsigs = ef.signals_in_file # again know/assume that this is uniform sampling across signals fs = [ef.samplefrequency(ii) for ii in range(nsigs)] fs0 = fs[0] print([ fs0 != xx for xx in fs]) if any([ fs0 != xx for xx in fs]): print("caught multiple sampling frquencies in edf files!!!") sys.exit(0) nsamples0 = ef.samples_in_file(0) print('nsigs=%s, fs0=%s, nsamples0=%s\n' % (nsigs, fs0, nsamples0)) num_samples_per_signal = ef.get_samples_per_signal() # np array print("num_samples_per_signal::\n", repr(num_samples_per_signal), '\n') file_duration_sec = ef.file_duration_seconds print("file_duration_sec", repr(file_duration_sec)) signal_frequency_array = ef.get_signal_freqs() print("signal_frequency_array::\n", repr(signal_frequency_array)) # Note that all annotations except the top row must also specify a duration. # long long onset; /* onset time of the event, expressed in units of 100 nanoSeconds and relative to the starttime in the header */ # char duration[16]; /* duration time, this is a null-terminated ASCII text-string */ # char annotation[EDFLIB_MAX_ANNOTATION_LEN + 1]; /* description of the event in UTF-8, this is a null term string of max length 512 # start("x.y"), end, char[20] # annotations = ef.read_annotations_as_array() # get numpy array of # annotations annotations = ef.read_annotations_100ns_units() #print("annotations::\n") #pprint.pprint(annotations) # get list of annotations signal_text_labels = ef.get_signal_text_labels() #print("signal_text_labels::\n") #pprint.pprint(signal_text_labels) #print("normalized text labels::\n") signal_text_labels_lpch_normalized = [ normalize_lpch_signal_label(label) for label in signal_text_labels] #pprint.pprint(signal_text_labels_lpch_normalized) # ef.recording_additional # print() signal_digital_mins = np.array( [ef.digital_min(ch) for ch in range(nsigs)]) signal_digital_total_min = min(signal_digital_mins) print("digital mins:", repr(signal_digital_mins)) print("digital total min:", repr(signal_digital_total_min)) signal_digital_maxs = np.array( [ef.digital_max(ch) for ch in range(nsigs)]) signal_digital_total_max = max(signal_digital_maxs) print("digital maxs:", repr(signal_digital_maxs)) print("digital total max:", repr(signal_digital_total_max)) signal_physical_dims = [ ef.physical_dimension(ch) for ch in range(nsigs)] print('signal_physical_dims::\n') pprint.pprint(signal_physical_dims) print() signal_physical_maxs = np.array( [ef.physical_max(ch) for ch in range(nsigs)]) print('signal_physical_maxs::\n', repr(signal_physical_maxs)) signal_physical_mins = np.array( [ef.physical_min(ch) for ch in range(nsigs)]) print('signal_physical_mins::\n', repr(signal_physical_mins)) print('gender:', repr(ef.gender_b)) print('admincode:', repr(ef.admincode)) print('birthdate:', repr(ef.birthdate_b)) # this is a string birthdate = dateutil.parser.parse(ef.birthdate_b) print('birthdate as datetime:', birthdate) print('equipment:', repr(ef.equipment)) print('patient:', repr(ef.patient)) print('patientname:', repr(ef.patient_name)) print('patientcode:', repr(ef.patientcode)) print('patient_additional:', repr(ef.patient_additional)) print('recording_additional:', repr(ef.recording_additional)) print('technician:', repr(ef.technician)) # or use arrow start_date_time = datetime.datetime( ef.startdate_year, ef.startdate_month, ef.startdate_day, ef.starttime_hour, ef.starttime_minute, ef.starttime_second) # tz naive # end_date_time = datetime.datetime(ef.enddate_year, ef.enddate_month, ef.enddate_day, ef.endtime_hour, # ef.endtime_minute, ef.endtime_second) # tz naive # end_date_time - start_date_time duration = datetime.timedelta(seconds=ef.file_duration_seconds) print('start_date_time:', start_date_time) age = arrow.get(start_date_time) - arrow.get(birthdate) # age = arrow.get(agedt) print('predicted age:', age) # total_seconds() returns a float print('predicted age (seconds):', age.total_seconds()) print() # this don't seem to be used much so I will put at end signal_prefilters = [ef.prefilter(ch) for ch in range(nsigs)] print('signal_prefilters::\n') pprint.pprint(signal_prefilters) print() signal_transducer = [ef.transducer(ch) for ch in range(nsigs)] print('signal_transducer::\n') pprint.pprint(signal_transducer) # now start building the hdf file # create file 'w-' -> fail if exists , w -> truncate if exists hdf = h5py.File(outfn, 'w') # use compression? yes! give it a try # integer increasing starting at 1000 semantic change at each thousand hdf.attrs['eeghdf_version'] = 1000 hdf.attrs['signals_in_file'] = nsigs hdf.attrs['sample_frequency0'] = fs0 hdf.attrs['nsamples0'] = nsamples0 sample_frequencies = hdf.create_dataset( 'sample_frequencies', (nsigs,), dtype='float32') sample_frequencies[:] = signal_frequency_array # add phys_bitvalue = .bitvalue, phys_offset = .offset # (double) phys_value = phys_bitvalue*(phys_offset + (double) var.two_signed[0]) # edfhdr->edfparam[i].bitvalue = (edfhdr->edfparam[i].phys_max - edfhdr->edfparam[i].phys_min) / (edfhdr->edfparam[i].dig_max - edfhdr->edfparam[i].dig_min); # edfhdr->edfparam[i].offset = edfhdr->edfparam[i].phys_max / # edfhdr->edfparam[i].bitvalue - edfhdr->edfparam[i].dig_max; # add meta data # start_date_time = datetime.datetime(ef.startdate_year, ef.startdate_month, ef.startdate_day, ef.starttime_hour, ef.starttime_minute, ef.starttime_second) # ,tzinfo=dateutil.tz.tzlocal()) print(start_date_time) patient = hdf.create_group('patient') patient.attrs['gender'] = ef.gender_b patient.attrs['patientname'] = "" # ef.patient_name # PHI print('birthdate: %s' % ef.birthdate_b, type(ef.birthdate_b)) default_birthdate = datetime.datetime(year=1990, month=1, day=1) # birthdate = dateutil.parser.parse(ef.birthdate) # this is a string # -> date (datetime) birthdate = default_birthdate print('birthdate (date object):', birthdate) private_start_date_time = birthdate + age patient.attrs['birthdate'] = str(birthdate) # float number age in seconds patient.attrs['age_seconds'] = age.total_seconds() # gestational age at birth (in seconds) # datetime.timedelta(weeks=40).total_seconds() # default 24192000 seconds or 40 weeks, 280 days # could also call this post-conceptional-age PCA patient.attrs['gestatational_age_birth_seconds'] = datetime.timedelta( weeks=40).total_seconds() patient.attrs['born_premature'] = 'unknown' # ('unknown', True, False) # hide actual start/end times question: should vary by year or just # make all the same hdf.attrs['startdatetime'] = str(private_start_date_time) hdf.attrs['enddatetime'] = str(private_start_date_time + duration) patient.attrs['age_days'] = age.days # post natal age in days patient.attrs['age_seconds'] = age.total_seconds() # now start storing the lists of things: labels, units... # nsigs = len(label_list) # 1. keep the text-vs-bytes distinction clear # 2. alays use "bytes" instead of "str" when you're sure you want a byte string. # for literals, can use "b" prefix, e.g. b'some bytes' # 3. for text strings use str or btter yet unicode, u'Hello' # 4. always use UTF-8 in code # variable ascii string (or b'' type) str_dt = h5py.special_dtype(vlen=bytes) label_ds = hdf.create_dataset('signal_labels', (nsigs,), dtype=str_dt) units_ds = hdf.create_dataset( 'physical_dimensions', (nsigs,), dtype=str_dt) transducer_ds = hdf.create_dataset( 'transducer', (nsigs,), dtype=str_dt) prefilter_ds = hdf.create_dataset('prefilter', (nsigs,), dtype=str_dt) hdf['signal_physical_mins'] = signal_physical_mins hdf['signal_physical_maxs'] = signal_physical_maxs hdf['signal_digital_mins'] = signal_digital_mins hdf['signal_digital_maxs'] = signal_digital_maxs if all(signal_digital_maxs <= 32767) and all( signal_digital_mins >= -32768): number_bits = 16 # EDF else: number_bits = 24 # BDF 2^23 = 8388608 + 1 bit for sign hdf.attrs['number_bits_per_sample'] = number_bits if number_bits <= 16: data_dtype = 'int16' eegdata = hdf.create_dataset('signals', (nsigs, nsamples0), dtype=data_dtype, # chunks=(nsigs,fs0), # if wanted 1 # second chunks chunks=True, fletcher32=True, compression='gzip' # most universal # compression='gzip', # compression='lzf', # maxshape=(256,None) ) if number_bits <= 32 and number_bits > 16: # handles up to 32 data_dtype = 'int32' eegdata = hdf.create_dataset('signals', (nsigs, nsamples0), dtype=data_dtype, # chunks=(nsigs,fs0), # if wanted 1 # second chunks chunks=True, fletcher32=True, compression='gzip' # most universal # compression='gzip', # compression='lzf', # maxshape=(256,None) ) # no compression -> 50 MiB can view eegdata in vitables # compression='gzip' -> 27 MiB slower # compression='lzf' -> 35 MiB # compression='lzf' maxshape=(256,None) -> 36MiB # this works but can do another way: # labels = [] units = list() # signal_nsamples = [] for ii in range(nsigs): # labels.append(ef.signal_label(ii)) units.append(ef.physical_dimension(ii)) # self.signal_nsamples.append(self.cedf.samples_in_file(ii)) # self.samplefreqs.append(self.cedf.samplefrequency(ii)) # eegdata.signal_labels = labels # labels_strip = [ss.strip() for ss in labels] # labels are fixed # length strings units = [cc.strip() for cc in units] # converted to standard electrode names if possible label_ds[:] = signal_text_labels_lpch_normalized units_ds[:] = units transducer_ds[:] = signal_transducer prefilter_ds[:] = signal_prefilters num_annot = len(annotations) # how do I make sure this init is "long long" enough edf_annots = hdf.create_group('edf_annotations') starts = edf_annots.create_dataset( 'starts_100ns', (num_annot,), dtype=np.int64) # curiously these durations seem to be stored as strings but of # floating point values "5.00000" for 5 second duration durations = edf_annots.create_dataset( 'durations_char16', (num_annot,), dtype='S16') # S16 !!! check py3 compatibility texts = edf_annots.create_dataset('texts', (num_annot,), dtype=str_dt) # start with a loop for ii in range(num_annot): starts[ii] = annotations[ii][0] # note: so far I have ony seen type(annotations[ii][1] -> <type 'str'> and they look like ascii strings # of floating point number of seconds for a duration # print('type(annotations[ii][1]):', type(annotations[ii][1])) durations[ii] = annotations[ii][1] texts[ii] = annotations[ii][2].strip() # should be more and a switch for anonymous or not # need to change this to nchunks = int(nsamples0 // fs0) samples_per_chunk = int(fs0) # 1 second of samples buf = np.zeros((nsigs, samples_per_chunk), dtype='int32') print( 'nchunks:%s, samples_per_chunk: %s' % (nchunks, samples_per_chunk)) bookmark = 0 # mark where were are in samples for ii in range(nchunks): for jj in range(nsigs): # read_phys_signal(self, signalnum, start, n, # np.ndarray[np.float64_t, ndim = 1] sigbuf) # readsignal converts into int32 as necessary ef.read_digital_signal( jj, bookmark, samples_per_chunk, buf[jj]) # conversion from int32 to int16 as necessary eegdata[:, bookmark:bookmark + samples_per_chunk] = buf # bookmark should be ii*fs0 bookmark += samples_per_chunk left_over_samples = nsamples0 - nchunks * samples_per_chunk print('left_over_samples:', left_over_samples) if left_over_samples > 0: for jj in range(nsigs): ef.read_digital_signal( jj, bookmark, left_over_samples, buf[jj]) eegdata[:,bookmark:bookmark + left_over_samples] = buf[:,0:left_over_samples] hdf.close() # from trackingdb.models.nkdb import find_lpch_birthday_from_mrn # Plan # v = ValidateTrackHeader(header=h) # if v.is_valid(): # process(v.cleaned_data) # else: # mark_as_invalid(h) def first(mapping): if mapping: return mapping[0] else: return mapping # say mapping = [] or None class ValidateTrackHeaderLPCH: # after validated place all data in cleaned_data field def __init__(self, header): # TOOO: validate that databae_source_label is in accepted sources self.hdr = header.copy() self.validated = False # self.clean = False self.cleaned_data = {} # vs update/copy from header def is_valid(self): # if name contains "Test" then we should skip this file and log it mrnobj = None try: if name_is_test(self.hdr['patient_name']): raise ValidationError('test file encountered', code='test file', params=self.hdr) # if we have a valid mrn, then we can potentially look up the patient or even the study mrn_ok = valid_lpch_mrn(self.hdr['patientcode']) if mrn_ok: mrn = self.hdr['patientcode'].strip() self.cleaned_data['patientcode'] = mrn else: raise ValidationError('bad MRN', code='bad mrn', params=self.hdr['patientcode']) if valid_lpch_name(self.hdr['patient_name']): self.cleaned_data['patient_name'] = self.hdr['patient_name'].strip() else: if mrn_ok: # try to look up patient in databases # look up name, dob here based upon mrn in nk_db and/or epic_db mrnobj = models.NkMrn.query.filter_by(mrn=mrn).first() if mrnobj: self.cleaned_data['patient_name'] = mrnobj.nkpatient.name else: raise ValidationError('invalid patient name', 'invalid name', params=self.hdr) eegno_ok = valid_lpch_eegno(self.hdr['admincode']) if eegno_ok: self.cleaned_data['admincode'] = _csu(self.hdr['admincode']) else: raise ValidationError('bad eegno/admincode', code='invalid admincode', params=self.hdr) if self.hdr['birthdate_date']: self.cleaned_data['birthdate_date'] = self.hdr['birthdate_date'] else: # then couldn't make a date, see if can find birthday in database if mrn_ok: mrnobj = mrnobj if mrnobj else models.NkMrn.query.filter_by(mrn=mrn).first() if not mrnobj: raise ValidationError('bad birthdate_date','birthdate error', params=self.hdr) else: nbday = mrnobj.nkpatient.dob self.cleaned_data['birthdate_date'] = nbday else: raise ValidationError('bad birthday','birthday error', params=self.hdr) # copy over other header members # todo: should do more validation of 'gender' self.cleaned_data['gender'] = self.hdr['gender'] self.cleaned_data['file_name'] = self.hdr['file_name'] self.cleaned_data['filetype'] = self.hdr['filetype'] self.cleaned_data['signals_in_file'] = self.hdr['signals_in_file'] self.cleaned_data['datarecords_in_file'] = self.hdr['datarecords_in_file'] self.cleaned_data['file_duration_100ns'] = self.hdr['file_duration_100ns'] self.cleaned_data['file_duration_seconds'] = self.hdr['file_duration_seconds'] self.cleaned_data['startdate_date'] = self.hdr['startdate_date'] self.cleaned_data['start_datetime'] = self.hdr['start_datetime'] self.cleaned_data['starttime_subsecond_offset'] = self.hdr['starttime_subsecond_offset'] self.cleaned_data['patient_additional'] = self.hdr['patient_additional'].strip() self.cleaned_data['technician'] = self.hdr['technician'].strip() self.cleaned_data['equipment'] = self.hdr['equipment'].strip() self.cleaned_data['recording_additional'] = self.hdr['recording_additional'].strip() self.cleaned_data['datarecord_duration_100ns'] = self.hdr['datarecord_duration_100ns'] self.validated = True return True except ValidationError as ve: self.errors = ve.message self.error_code = ve.code self.error_params = ve.params debug(ve.message) return False class AnonymizeTrackHeaderLPCH(ValidateTrackHeaderLPCH): LPCH_DEFAULT_BIRTH_DATETIME = datetime.datetime(year=1990, month=1, day=1) # datatbase sources LPCH_NK = 'LPCH_NK' STANFORD_NK = 'STANFORD_NK' def __init__(self, header, source_database_label=LPCH_NK): super().__init__(header) with app.app_context(): self.anonymous_header = models.register_and_create_anonymous_header(self.hdr, source_database_label=source_database_label) # will need to track: patient, study, file # file needs source and key NK origin class ValidateTrackHeaderStanford: # after validated place all data in cleaned_data field def __init__(self, header): # TOOO: validate that databae_source_label is in accepted sources self.hdr = header.copy() self.validated = False # self.clean = False self.cleaned_data = {} # vs update/copy from header def is_valid(self): # if name contains "Test" then we should skip this file and log it mrnobj = None try: if name_is_test(self.hdr['patient_name']): raise ValidationError('test file encountered', code='test file', params=self.hdr) # if we have a valid mrn, then we can potentially look up the patient or even the study mrn_ok = valid_stanford_mrn(self.hdr['patientcode']) if mrn_ok: mrn = self.hdr['patientcode'].strip() self.cleaned_data['patientcode'] = mrn else: raise ValidationError('bad MRN', code='bad mrn', params=self.hdr['patientcode']) if valid_stanford_name(self.hdr['patient_name']): self.cleaned_data['patient_name'] = self.hdr['patient_name'].strip() else: if mrn_ok: # try to look up patient in databases # look up name, dob here based upon mrn in nk_db and/or epic_db mrnobj = models.NkMrn.query.filter_by(mrn=mrn).first() if mrnobj: self.cleaned_data['patient_name'] = mrnobj.nkpatient.name else: raise ValidationError('invalid patient name', 'invalid name', params=self.hdr) eegno_ok = valid_stanford_eegno(self.hdr['admincode']) if eegno_ok: self.cleaned_data['admincode'] = _csu(self.hdr['admincode']) else: raise ValidationError('bad eegno/admincode', code='invalid admincode', params=self.hdr) if self.hdr['birthdate_date']: self.cleaned_data['birthdate_date'] = self.hdr['birthdate_date'] else: # then couldn't make a date, see if can find birthday in database if mrn_ok: mrnobj = mrnobj if mrnobj else models.NkMrn.query.filter_by(mrn=mrn).first() if not mrnobj: raise ValidationError('bad birthdate_date','birthdate error', params=self.hdr) else: nbday = mrnobj.nkpatient.dob self.cleaned_data['birthdate_date'] = nbday else: raise ValidationError('bad birthday','birthday error', params=self.hdr) # copy over other header members # todo: should do more validation of 'gender' self.cleaned_data['gender'] = self.hdr['gender'] self.cleaned_data['file_name'] = self.hdr['file_name'] self.cleaned_data['filetype'] = self.hdr['filetype'] self.cleaned_data['signals_in_file'] = self.hdr['signals_in_file'] self.cleaned_data['datarecords_in_file'] = self.hdr['datarecords_in_file'] self.cleaned_data['file_duration_100ns'] = self.hdr['file_duration_100ns'] self.cleaned_data['file_duration_seconds'] = self.hdr['file_duration_seconds'] self.cleaned_data['startdate_date'] = self.hdr['startdate_date'] self.cleaned_data['start_datetime'] = self.hdr['start_datetime'] self.cleaned_data['starttime_subsecond_offset'] = self.hdr['starttime_subsecond_offset'] self.cleaned_data['patient_additional'] = self.hdr['patient_additional'].strip() self.cleaned_data['technician'] = self.hdr['technician'].strip() self.cleaned_data['equipment'] = self.hdr['equipment'].strip() self.cleaned_data['recording_additional'] = self.hdr['recording_additional'].strip() self.cleaned_data['datarecord_duration_100ns'] = self.hdr['datarecord_duration_100ns'] self.validated = True return True except ValidationError as ve: self.errors = ve.message self.error_code = ve.code self.error_params = ve.params debug(ve.message) return False class AnonymizeTrackHeaderStanford(ValidateTrackHeaderStanford): STANFORD_DEFAULT_BIRTH_DATETIME = datetime.datetime(year=1910, month=1, day=1) # datatbase sources LPCH_NK = 'LPCH_NK' STANFORD_NK = 'STANFORD_NK' def __init__(self, header, source_database_label='STANFORD_NK'): super().__init__(header) with app.app_context(): self.anonymous_header = models.register_and_create_anonymous_header(self.hdr, source_database_label=source_database_label) # will need to track: patient, study, file # file needs source and key NK origin def find_blocks(arr): blocks = [] print("total arr:", arr) dfs = np.diff(arr) dfs_ind = np.where(dfs != 0.0)[0] last_ind = 0 for dd in dfs_ind+1: print("block:",arr[last_ind:dd]) blocks.append((last_ind,dd)) last_ind = dd print("last block:", arr[last_ind:]) blocks.append( (last_ind,len(arr))) return blocks def find_blocks2(arr): blocks = [] N = len(arr) print("total arr:", arr) last_ind = 0 last_val = arr[0] for ii in range(1,N): if last_val == arr[ii]: pass else: blocks.append((last_ind,ii)) last_ind = ii last_val = arr[ii] blocks.append((last_ind,N)) return blocks def test_find_blocks1(): s = [250.0, 250.0, 250.0, 1.0, 1.0, 1000.0, 1000.0] blocks = find_blocks(s) print("blocks:") print(blocks) def test_find_blocks2(): s = [250.0, 250.0, 250.0, 1.0, 1.0, 1000.0, 1000.0] blocks = find_blocks2(s) print("blocks:") print(blocks) def test_find_blocks2_2(): s = [100,100,100,100,100,100,100,100] blocks = find_blocks2(s) print("blocks:") print(blocks) def edf2hdf2(fn, outfn='', hdf_dir='', anonymize=False): """ convert an edf file to hdf5 using fairly straightforward mapping return True if successful @database_sourcel_label tells us which database it came from LPCH_NK or STANFORD_NK this is important! """ if not outfn: base = os.path.basename(fn) base, ext = os.path.splitext(base) base = base + '.eeghdf' outfn = os.path.join(hdf_dir, base) # print('outfn:', outfn) # all the data point related stuff with edflib.EdfReader(fn) as ef: # read all EDF+ header information in just the way I want it header = { 'file_name': os.path.basename(fn), 'filetype': ef.filetype, 'patient_name': ef.patient_name, 'patientcode': ef.patientcode, 'gender': ef.gender, 'signals_in_file': ef.signals_in_file, 'datarecords_in_file': ef.datarecords_in_file, 'file_duration_100ns': ef.file_duration_100ns, 'file_duration_seconds': ef.file_duration_seconds, 'startdate_date': datetime.date(ef.startdate_year, ef.startdate_month, ef.startdate_day), 'start_datetime': datetime.datetime(ef.startdate_year, ef.startdate_month, ef.startdate_day, ef.starttime_hour, ef.starttime_minute, ef.starttime_second), 'starttime_subsecond_offset': ef.starttime_subsecond, 'birthdate_date': ef.birthdate_date, 'patient_additional': ef.patient_additional, 'admincode': ef.admincode, # usually the study eg. C13-100 'technician': ef.technician, 'equipment': ef.equipment, 'recording_additional': ef.recording_additional, 'datarecord_duration_100ns': ef.datarecord_duration_100ns, } pprint.pprint(header) #### validation code ##### validator = None # if source_database_label=='LPCH_NK': # validator = ValidateTrackHeaderLPCH(header=header) # elif source_database_label== 'STANFORD_NK': # validator = ValidateTrackHeaderStanford(header=header) # else: # raise ValidationError # if not validator.is_valid(): # print('problem with this file:', fn) # print(validator.errors,validator.error_code, # validator.error_params) # return False, validator # else: # print('\nvalid header::') # pprint.pprint(validator.cleaned_data) # header = validator.cleaned_data # from here on the header is valid and cleaned # use arrow start_datetime = header['start_datetime'] # end_date_time = datetime.datetime(ef.enddate_year, ef.enddate_month, ef.enddate_day, ef.endtime_hour, # ef.endtime_minute, ef.endtime_second) # tz naive # end_date_time - start_date_time duration = datetime.timedelta(seconds=header['file_duration_seconds']) # derived information birthdate = header['birthdate_date'] if birthdate: age = arrow.get(start_datetime) - arrow.get(header['birthdate_date']) debug('predicted age: %s' % age) # total_seconds() returns a float debug('predicted age (seconds): %s' % age.total_seconds()) else: age = datetime.timedelta(seconds=0) # if anonymize: # if source_database_label== 'LPCH_NK': # anonymizer = AnonymizeTrackHeaderLPCH(header, source_database_label=source_database_label) # if source_database_label == 'STANFORD_NK': # anonymizer = AnonymizeTrackHeaderStanford(header, source_database_label=source_database_label) # header = anonymizer.anonymous_header # replace the original header with the anonymous one # print('anonymized header') # pprint.pprint(header) # anonymized version if necessary header['end_datetime'] = header['start_datetime'] + duration ############# signal array information ################## # signal block related stuff nsigs = ef.signals_in_file # again know/assume that this is uniform sampling across signals fs0 = ef.samplefrequency(0) signal_frequency_array = ef.get_signal_freqs() dfs = np.diff(signal_frequency_array) dfs_ind = np.where(dfs != 0.0) dfs_ind = dfs_ind[0] last_ind = 0 for dd in dfs_ind+1: print("block:",signal_frequency_array[last_ind:dd]) last_ind = dd print("last block:", signal_frequency_array[last_ind:]) print("where does sampling rate change?", np.where(dfs != 0.0)) print("elements:", signal_frequency_array[np.where(dfs != 0.0)]) print("signal_frequency_array::\n", repr(signal_frequency_array)) print("len(signal_frequency_array):", len(signal_frequency_array)) assert all(signal_frequency_array[:-3] == fs0) nsamples0 = ef.samples_in_file(0) # samples per channel print('nsigs=%s, fs0=%s, nsamples0=%s\n' % (nsigs, fs0, nsamples0)) num_samples_per_signal = ef.get_samples_per_signal() # np array print("num_samples_per_signal::\n", repr(num_samples_per_signal), '\n') # assert all(num_samples_per_signal == nsamples0) file_duration_sec = ef.file_duration_seconds #print("file_duration_sec", repr(file_duration_sec)) # Note that all annotations except the top row must also specify a duration. # long long onset; /* onset time of the event, expressed in units of 100 # nanoSeconds and relative to the starttime in the header */ # char duration[16]; /* duration time, this is a null-terminated ASCII text-string */ # char annotation[EDFLIB_MAX_ANNOTATION_LEN + 1]; /* description of the # event in UTF-8, this is a null term string of max length 512*/ # start("x.y"), end, char[20] # annotations = ef.read_annotations_as_array() # get numpy array of # annotations annotations_b = ef.read_annotations_b_100ns_units() # print("annotations_b::\n") # pprint.pprint(annotations_b) # get list of annotations signal_text_labels = ef.get_signal_text_labels() print("signal_text_labels::\n") pprint.pprint(signal_text_labels) print("normalized text labels::\n") signal_text_labels_lpch_normalized = [ normalize_lpch_signal_label(label) for label in signal_text_labels] pprint.pprint(signal_text_labels_lpch_normalized) # ef.recording_additional # print() signal_digital_mins = np.array( [ef.digital_min(ch) for ch in range(nsigs)]) signal_digital_total_min = min(signal_digital_mins) print("digital mins:", repr(signal_digital_mins)) print("digital total min:", repr(signal_digital_total_min)) signal_digital_maxs = np.array( [ef.digital_max(ch) for ch in range(nsigs)]) signal_digital_total_max = max(signal_digital_maxs) print("digital maxs:", repr(signal_digital_maxs)) #print("digital total max:", repr(signal_digital_total_max)) signal_physical_dims = [ ef.physical_dimension(ch) for ch in range(nsigs)] # print('signal_physical_dims::\n') # pprint.pprint(signal_physical_dims) #print() signal_physical_maxs = np.array( [ef.physical_max(ch) for ch in range(nsigs)]) #print('signal_physical_maxs::\n', repr(signal_physical_maxs)) signal_physical_mins = np.array( [ef.physical_min(ch) for ch in range(nsigs)]) #print('signal_physical_mins::\n', repr(signal_physical_mins)) # this don't seem to be used much so I will put at end signal_prefilters = [ef.prefilter(ch).strip() for ch in range(nsigs)] #print('signal_prefilters::\n') # pprint.pprint(signal_prefilters) #print() signal_transducers = [ef.transducer(ch).strip() for ch in range(nsigs)] #print('signal_transducers::\n') #pprint.pprint(signal_transducers) with eeghdf.EEGHDFWriter(outfn, 'w') as eegf: eegf.write_patient_info(patient_name=header['patient_name'], patientcode=header['patientcode'], gender=header['gender'], birthdate_isostring=header['birthdate_date'], # gestational_age_at_birth_days # born_premature patient_additional=header['patient_additional']) signal_text_labels_lpch_normalized = [ normalize_lpch_signal_label(label) for label in signal_text_labels] rec = eegf.create_record_block(record_duration_seconds=header['file_duration_seconds'], start_isodatetime=str(header['start_datetime']), end_isodatetime=str(header['end_datetime']), number_channels=header['signals_in_file'], num_samples_per_channel=nsamples0, sample_frequency=fs0, signal_labels=signal_text_labels_lpch_normalized, signal_physical_mins=signal_physical_mins, signal_physical_maxs=signal_physical_maxs, signal_digital_mins=signal_digital_mins, signal_digital_maxs=signal_digital_maxs, physical_dimensions=signal_physical_dims, patient_age_days=age.total_seconds() / 86400.0, signal_prefilters=signal_prefilters, signal_transducers=signal_transducers, technician=header['technician']) eegf.write_annotations_b(annotations_b) # may be should be called record annotations edfblock_itr = edf_block_iter_generator( ef, nsamples0, 100 * ef.samples_in_datarecord(0)*header['signals_in_file'], # samples_per_chunk roughly 100 datarecords at a time dtype='int32') signals = eegf.stream_dig_signal_to_record_block(rec, edfblock_itr) return True, validator # we succeeded def test_edf2hdf_info(): # on chris's macbook EDF_DIR = r'/Users/clee/code/eegml/nk_database_proj/private/lpch_edfs' fn = os.path.join(EDF_DIR, 'XA2731AX_1-1+.edf') edf2hdf(filename) if __name__ == '__main__': import sys if len(sys.argv) == 2: file_name = sys.argv[1] edf2hdf2(file_name)
bsd-3-clause
4,465,477,519,905,432,000
38.825348
170
0.561707
false
3.670513
false
false
false
lmazuel/azure-sdk-for-python
azure-mgmt-network/azure/mgmt/network/v2017_11_01/models/azure_reachability_report_py3.py
1
1995
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class AzureReachabilityReport(Model): """Azure reachability report details. All required parameters must be populated in order to send to Azure. :param aggregation_level: Required. The aggregation level of Azure reachability report. Can be Country, State or City. :type aggregation_level: str :param provider_location: Required. :type provider_location: ~azure.mgmt.network.v2017_11_01.models.AzureReachabilityReportLocation :param reachability_report: Required. List of Azure reachability report items. :type reachability_report: list[~azure.mgmt.network.v2017_11_01.models.AzureReachabilityReportItem] """ _validation = { 'aggregation_level': {'required': True}, 'provider_location': {'required': True}, 'reachability_report': {'required': True}, } _attribute_map = { 'aggregation_level': {'key': 'aggregationLevel', 'type': 'str'}, 'provider_location': {'key': 'providerLocation', 'type': 'AzureReachabilityReportLocation'}, 'reachability_report': {'key': 'reachabilityReport', 'type': '[AzureReachabilityReportItem]'}, } def __init__(self, *, aggregation_level: str, provider_location, reachability_report, **kwargs) -> None: super(AzureReachabilityReport, self).__init__(**kwargs) self.aggregation_level = aggregation_level self.provider_location = provider_location self.reachability_report = reachability_report
mit
6,542,636,260,886,058,000
40.5625
108
0.649624
false
4.453125
false
false
false
lavish205/olympia
src/olympia/reviewers/tests/test_views.py
1
210461
# -*- coding: utf-8 -*- import json import os import time import urlparse from collections import OrderedDict from datetime import datetime, timedelta from django.conf import settings from django.core import mail from django.core.cache import cache from django.core.files import temp from django.core.files.base import File as DjangoFile from django.template import defaultfilters from django.test.utils import override_settings import mock from freezegun import freeze_time from lxml.html import HTMLParser, fromstring from mock import Mock, patch from pyquery import PyQuery as pq from olympia import amo, core, ratings from olympia.abuse.models import AbuseReport from olympia.access.models import Group, GroupUser from olympia.accounts.views import API_TOKEN_COOKIE from olympia.activity.models import ActivityLog from olympia.addons.models import ( Addon, AddonApprovalsCounter, AddonDependency, AddonReviewerFlags, AddonUser) from olympia.amo.templatetags.jinja_helpers import ( user_media_path, user_media_url) from olympia.amo.tests import ( APITestClient, TestCase, addon_factory, check_links, file_factory, formset, initial, reverse_ns, user_factory, version_factory) from olympia.amo.urlresolvers import reverse from olympia.files.models import File, FileValidation, WebextPermission from olympia.ratings.models import Rating, RatingFlag from olympia.reviewers.models import ( AutoApprovalSummary, RereviewQueueTheme, ReviewerScore, ReviewerSubscription, Whiteboard) from olympia.users.models import UserProfile from olympia.versions.models import ApplicationsVersions, AppVersion from olympia.zadmin.models import get_config class TestRedirectsOldPaths(TestCase): def setUp(self): user = user_factory() self.client.login(email=user.email) def test_redirect_old_queue(self): response = self.client.get('/en-US/editors/queue/new') self.assert3xx(response, '/reviewers/queue/new', status_code=301) def test_redirect_old_review_page(self): response = self.client.get('/en-US/editors/review/foobar') self.assert3xx(response, '/reviewers/review/foobar', status_code=301) class ReviewerTest(TestCase): fixtures = ['base/users', 'base/approvals'] def login_as_admin(self): assert self.client.login(email='admin@mozilla.com') def login_as_reviewer(self): assert self.client.login(email='reviewer@mozilla.com') def make_review(self, username='a'): u = UserProfile.objects.create(username=username) a = Addon.objects.create(name='yermom', type=amo.ADDON_EXTENSION) return Rating.objects.create(user=u, addon=a, body='baa') class TestRatingsModerationLog(ReviewerTest): def setUp(self): super(TestRatingsModerationLog, self).setUp() user = user_factory() self.grant_permission(user, 'Ratings:Moderate') self.client.login(email=user.email) self.url = reverse('reviewers.ratings_moderation_log') core.set_user(user) def test_log(self): response = self.client.get(self.url) assert response.status_code == 200 def test_start_filter(self): response = self.client.get(self.url, {'start': '2011-01-01'}) assert response.status_code == 200 def test_enddate_filter(self): """ Make sure that if our end date is 1/1/2011, that we include items from 1/1/2011. To not do as such would be dishonorable. """ review = self.make_review(username='b') ActivityLog.create( amo.LOG.APPROVE_RATING, review, review.addon).update( created=datetime(2011, 1, 1)) response = self.client.get(self.url, {'end': '2011-01-01'}) assert response.status_code == 200 assert pq(response.content)('tbody td').eq(0).text() == ( 'Jan. 1, 2011, midnight') def test_action_filter(self): """ Based on setup we should see only two items if we filter for deleted reviews. """ review = self.make_review() for i in xrange(2): ActivityLog.create(amo.LOG.APPROVE_RATING, review, review.addon) ActivityLog.create(amo.LOG.DELETE_RATING, review.id, review.addon) response = self.client.get(self.url, {'filter': 'deleted'}) assert response.status_code == 200 assert pq(response.content)('tbody tr').length == 2 def test_no_results(self): response = self.client.get(self.url, {'end': '2004-01-01'}) assert response.status_code == 200 assert '"no-results"' in response.content def test_moderation_log_detail(self): review = self.make_review() ActivityLog.create(amo.LOG.APPROVE_RATING, review, review.addon) id_ = ActivityLog.objects.moderation_events()[0].id response = self.client.get( reverse('reviewers.ratings_moderation_log.detail', args=[id_])) assert response.status_code == 200 class TestReviewLog(ReviewerTest): fixtures = ReviewerTest.fixtures + ['base/addon_3615'] def setUp(self): super(TestReviewLog, self).setUp() self.user = UserProfile.objects.get(email='reviewer@mozilla.com') self.login_as_reviewer() self.url = reverse('reviewers.reviewlog') def get_user(self): return UserProfile.objects.all()[0] def make_approvals(self): for addon in Addon.objects.all(): ActivityLog.create( amo.LOG.REJECT_VERSION, addon, addon.current_version, user=self.get_user(), details={'comments': 'youwin'}) def make_an_approval(self, action, comment='youwin', username=None, addon=None): if username: user = UserProfile.objects.get(username=username) else: user = self.get_user() if not addon: addon = Addon.objects.all()[0] ActivityLog.create(action, addon, addon.current_version, user=user, details={'comments': comment}) def test_basic(self): self.make_approvals() response = self.client.get(self.url) assert response .status_code == 200 doc = pq(response .content) assert doc('#log-filter button'), 'No filters.' # Should have 2 showing. rows = doc('tbody tr') assert rows.filter(':not(.hide)').length == 2 assert rows.filter('.hide').eq(0).text() == 'youwin' # Should have none showing if the addons are unlisted. for addon in Addon.objects.all(): self.make_addon_unlisted(addon) response = self.client.get(self.url) assert response .status_code == 200 doc = pq(response.content) assert not doc('tbody tr :not(.hide)') # But they should have 2 showing for someone with the right perms. self.grant_permission(self.user, 'Addons:ReviewUnlisted') response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) rows = doc('tbody tr') assert rows.filter(':not(.hide)').length == 2 assert rows.filter('.hide').eq(0).text() == 'youwin' def test_xss(self): a = Addon.objects.all()[0] a.name = '<script>alert("xss")</script>' a.save() ActivityLog.create(amo.LOG.REJECT_VERSION, a, a.current_version, user=self.get_user(), details={'comments': 'xss!'}) response = self.client.get(self.url) assert response.status_code == 200 inner_html = pq(response.content)('#log-listing tbody td').eq(1).html() assert '&lt;script&gt;' in inner_html assert '<script>' not in inner_html def test_end_filter(self): """ Let's use today as an end-day filter and make sure we see stuff if we filter. """ self.make_approvals() # Make sure we show the stuff we just made. date = time.strftime('%Y-%m-%d') response = self.client.get(self.url, {'end': date}) assert response.status_code == 200 doc = pq(response.content)('#log-listing tbody') assert doc('tr:not(.hide)').length == 2 assert doc('tr.hide').eq(0).text() == 'youwin' def test_end_filter_wrong(self): """ Let's use today as an end-day filter and make sure we see stuff if we filter. """ self.make_approvals() response = self.client.get(self.url, {'end': 'wrong!'}) # If this is broken, we'll get a traceback. assert response.status_code == 200 assert pq(response.content)('#log-listing tr:not(.hide)').length == 3 def test_start_filter(self): with freeze_time('2017-08-01 10:00'): self.make_approvals() # Make sure we show the stuff we just made. response = self.client.get(self.url, {'start': '2017-07-31'}) assert response.status_code == 200 doc = pq(response.content)('#log-listing tbody') assert doc('tr:not(.hide)').length == 2 assert doc('tr.hide').eq(0).text() == 'youwin' def test_start_default_filter(self): with freeze_time('2017-07-31 10:00'): self.make_approvals() with freeze_time('2017-08-01 10:00'): addon = Addon.objects.first() ActivityLog.create( amo.LOG.REJECT_VERSION, addon, addon.current_version, user=self.get_user(), details={'comments': 'youwin'}) # Make sure the default 'start' to the 1st of a month works properly with freeze_time('2017-08-03 11:00'): response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content)('#log-listing tbody') assert doc('tr:not(.hide)').length == 1 assert doc('tr.hide').eq(0).text() == 'youwin' def test_search_comment_exists(self): """Search by comment.""" self.make_an_approval(amo.LOG.REQUEST_ADMIN_REVIEW_CODE, comment='hello') response = self.client.get(self.url, {'search': 'hello'}) assert response.status_code == 200 assert pq(response.content)( '#log-listing tbody tr.hide').eq(0).text() == 'hello' def test_search_comment_case_exists(self): """Search by comment, with case.""" self.make_an_approval(amo.LOG.REQUEST_ADMIN_REVIEW_CODE, comment='hello') response = self.client.get(self.url, {'search': 'HeLlO'}) assert response.status_code == 200 assert pq(response.content)( '#log-listing tbody tr.hide').eq(0).text() == 'hello' def test_search_comment_doesnt_exist(self): """Search by comment, with no results.""" self.make_an_approval(amo.LOG.REQUEST_ADMIN_REVIEW_CODE, comment='hello') response = self.client.get(self.url, {'search': 'bye'}) assert response.status_code == 200 assert pq(response.content)('.no-results').length == 1 def test_search_author_exists(self): """Search by author.""" self.make_approvals() self.make_an_approval( amo.LOG.REQUEST_ADMIN_REVIEW_CODE, username='reviewer', comment='hi') response = self.client.get(self.url, {'search': 'reviewer'}) assert response.status_code == 200 rows = pq(response.content)('#log-listing tbody tr') assert rows.filter(':not(.hide)').length == 1 assert rows.filter('.hide').eq(0).text() == 'hi' def test_search_author_case_exists(self): """Search by author, with case.""" self.make_approvals() self.make_an_approval( amo.LOG.REQUEST_ADMIN_REVIEW_CODE, username='reviewer', comment='hi') response = self.client.get(self.url, {'search': 'ReviEwEr'}) assert response.status_code == 200 rows = pq(response.content)('#log-listing tbody tr') assert rows.filter(':not(.hide)').length == 1 assert rows.filter('.hide').eq(0).text() == 'hi' def test_search_author_doesnt_exist(self): """Search by author, with no results.""" self.make_approvals() self.make_an_approval( amo.LOG.REQUEST_ADMIN_REVIEW_CODE, username='reviewer') response = self.client.get(self.url, {'search': 'wrong'}) assert response.status_code == 200 assert pq(response.content)('.no-results').length == 1 def test_search_addon_exists(self): """Search by add-on name.""" self.make_approvals() addon = Addon.objects.all()[0] response = self.client.get(self.url, {'search': addon.name}) assert response.status_code == 200 tr = pq(response.content)( '#log-listing tr[data-addonid="%s"]' % addon.id) assert tr.length == 1 assert tr.siblings('.comments').text() == 'youwin' def test_search_addon_case_exists(self): """Search by add-on name, with case.""" self.make_approvals() addon = Addon.objects.all()[0] response = self.client.get( self.url, {'search': str(addon.name).swapcase()}) assert response.status_code == 200 tr = pq(response.content)( '#log-listing tr[data-addonid="%s"]' % addon.id) assert tr.length == 1 assert tr.siblings('.comments').text() == 'youwin' def test_search_addon_doesnt_exist(self): """Search by add-on name, with no results.""" self.make_approvals() response = self.client.get(self.url, {'search': 'xxx'}) assert response.status_code == 200 assert pq(response.content)('.no-results').length == 1 @patch('olympia.activity.models.ActivityLog.arguments', new=Mock) def test_addon_missing(self): self.make_approvals() response = self.client.get(self.url) assert response.status_code == 200 assert pq(response.content)('#log-listing tr td').eq(1).text() == ( 'Add-on has been deleted.') def test_request_info_logs(self): self.make_an_approval(amo.LOG.REQUEST_INFORMATION) response = self.client.get(self.url) assert response.status_code == 200 assert pq(response.content)('#log-listing tr td a').eq(1).text() == ( 'More information requested') def test_super_review_logs(self): self.make_an_approval(amo.LOG.REQUEST_ADMIN_REVIEW_CODE) response = self.client.get(self.url) assert response.status_code == 200 assert pq(response.content)('#log-listing tr td a').eq(1).text() == ( 'Admin add-on-review requested') def test_comment_logs(self): self.make_an_approval(amo.LOG.COMMENT_VERSION) response = self.client.get(self.url) assert response.status_code == 200 assert pq(response.content)('#log-listing tr td a').eq(1).text() == ( 'Commented') def test_content_approval(self): self.make_an_approval(amo.LOG.APPROVE_CONTENT) response = self.client.get(self.url) assert response.status_code == 200 link = pq(response.content)('#log-listing tbody td a').eq(1)[0] assert link.attrib['href'] == '/en-US/reviewers/review-content/a3615' assert link.text_content().strip() == 'Content approved' def test_content_rejection(self): self.make_an_approval(amo.LOG.REJECT_CONTENT) response = self.client.get(self.url) assert response.status_code == 200 link = pq(response.content)('#log-listing tbody td a').eq(1)[0] assert link.attrib['href'] == '/en-US/reviewers/review-content/a3615' assert link.text_content().strip() == 'Content rejected' @freeze_time('2017-08-03') def test_review_url(self): self.login_as_admin() addon = addon_factory() unlisted_version = version_factory( addon=addon, channel=amo.RELEASE_CHANNEL_UNLISTED) ActivityLog.create( amo.LOG.APPROVE_VERSION, addon, addon.current_version, user=self.get_user(), details={'comments': 'foo'}) response = self.client.get(self.url) assert response.status_code == 200 url = reverse('reviewers.review', args=[addon.slug]) link = pq(response.content)( '#log-listing tbody tr[data-addonid] a').eq(1) assert link.attr('href') == url entry = ActivityLog.create( amo.LOG.APPROVE_VERSION, addon, unlisted_version, user=self.get_user(), details={'comments': 'foo'}) # Force the latest entry to be at the top of the list so that we can # pick it more reliably later from the HTML entry.update(created=datetime.now() + timedelta(days=1)) response = self.client.get(self.url) url = reverse( 'reviewers.review', args=['unlisted', addon.slug]) assert pq(response.content)( '#log-listing tr td a').eq(1).attr('href') == url class TestDashboard(TestCase): def setUp(self): self.url = reverse('reviewers.dashboard') self.user = user_factory() self.client.login(email=self.user.email) def test_old_temporary_url_redirect(self): response = self.client.get('/en-US/reviewers/dashboard') self.assert3xx( response, reverse('reviewers.dashboard'), status_code=301) def test_not_a_reviewer(self): response = self.client.get(self.url) assert response.status_code == 403 def test_admin_all_permissions(self): # Create a lot of add-ons to test the queue counts. # Nominated and pending. addon_factory( status=amo.STATUS_NOMINATED, type=amo.ADDON_STATICTHEME, file_kw={'status': amo.STATUS_AWAITING_REVIEW}) version_factory( addon=addon_factory(), file_kw={'status': amo.STATUS_AWAITING_REVIEW}) version_factory( addon=addon_factory(), file_kw={'status': amo.STATUS_AWAITING_REVIEW}) under_admin_review = addon_factory( status=amo.STATUS_NOMINATED, file_kw={'status': amo.STATUS_AWAITING_REVIEW}) AddonReviewerFlags.objects.create( addon=under_admin_review, needs_admin_code_review=True) under_admin_review_and_pending = addon_factory() AddonReviewerFlags.objects.create( addon=under_admin_review_and_pending, needs_admin_theme_review=True) version_factory( addon=under_admin_review_and_pending, file_kw={'status': amo.STATUS_AWAITING_REVIEW}) # Auto-approved and Content Review. addon1 = addon_factory( version_kw={'is_webextension': True}) AddonApprovalsCounter.reset_for_addon(addon=addon1) AutoApprovalSummary.objects.create( version=addon1.current_version, verdict=amo.AUTO_APPROVED) under_content_review = addon_factory( version_kw={'is_webextension': True}) AddonApprovalsCounter.reset_for_addon(addon=under_content_review) AutoApprovalSummary.objects.create( version=under_content_review.current_version, verdict=amo.AUTO_APPROVED) AddonReviewerFlags.objects.create( addon=under_content_review, needs_admin_content_review=True) addon2 = addon_factory( version_kw={'is_webextension': True}) AddonApprovalsCounter.reset_for_addon(addon=addon2) AutoApprovalSummary.objects.create( version=addon2.current_version, verdict=amo.AUTO_APPROVED) AddonReviewerFlags.objects.create( addon=addon2, needs_admin_content_review=True) under_code_review = addon_factory( version_kw={'is_webextension': True}) AddonApprovalsCounter.reset_for_addon(addon=under_code_review) AutoApprovalSummary.objects.create( version=under_code_review.current_version, verdict=amo.AUTO_APPROVED) AddonReviewerFlags.objects.create( addon=under_code_review, needs_admin_code_review=True) admins_group = Group.objects.create(name='Admins', rules='*:*') GroupUser.objects.create(user=self.user, group=admins_group) # Addon with expired info request expired = addon_factory(name=u'Expired') AddonReviewerFlags.objects.create( addon=expired, pending_info_request=self.days_ago(42)) # Rating rating = Rating.objects.create( addon=addon1, version=addon1.current_version, user=self.user, flag=True, body=u'This âdd-on sucks!!111', rating=1, editorreview=True) rating.ratingflag_set.create() response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert len(doc('.dashboard h3')) == 8 # All sections are present. expected_links = [ reverse('reviewers.queue_nominated'), reverse('reviewers.queue_pending'), reverse('reviewers.performance'), reverse('reviewers.reviewlog'), 'https://wiki.mozilla.org/Add-ons/Reviewers/Guide', 'https://wiki.mozilla.org/Add-ons/Reviewers/Themes/Guidelines', reverse('reviewers.queue_auto_approved'), reverse('reviewers.performance'), reverse('reviewers.reviewlog'), 'https://wiki.mozilla.org/Add-ons/Reviewers/Guide', reverse('reviewers.queue_content_review'), reverse('reviewers.performance'), reverse('reviewers.themes.list'), reverse('reviewers.themes.list_rereview'), reverse('reviewers.themes.list_flagged'), reverse('reviewers.themes.logs'), reverse('reviewers.themes.deleted'), 'https://wiki.mozilla.org/Add-ons/Reviewers/Themes/Guidelines', reverse('reviewers.queue_moderated'), reverse('reviewers.ratings_moderation_log'), 'https://wiki.mozilla.org/Add-ons/Reviewers/Guide/Moderation', reverse('reviewers.unlisted_queue_all'), 'https://wiki.mozilla.org/Add-ons/Reviewers/Guide', reverse('reviewers.motd'), reverse('reviewers.queue_expired_info_requests'), ] links = [link.attrib['href'] for link in doc('.dashboard a')] assert links == expected_links assert doc('.dashboard a')[0].text == 'New (2)' assert doc('.dashboard a')[1].text == 'Updates (3)' assert doc('.dashboard a')[6].text == 'Auto Approved Add-ons (4)' assert doc('.dashboard a')[10].text == 'Content Review (4)' assert (doc('.dashboard a')[18].text == 'Ratings Awaiting Moderation (1)') assert (doc('.dashboard a')[24].text == 'Expired Information Requests (1)') def test_can_see_all_through_reviewer_view_all_permission(self): self.grant_permission(self.user, 'ReviewerTools:View') response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert len(doc('.dashboard h3')) == 8 # All sections are present. expected_links = [ reverse('reviewers.queue_nominated'), reverse('reviewers.queue_pending'), reverse('reviewers.performance'), reverse('reviewers.reviewlog'), 'https://wiki.mozilla.org/Add-ons/Reviewers/Guide', 'https://wiki.mozilla.org/Add-ons/Reviewers/Themes/Guidelines', reverse('reviewers.queue_auto_approved'), reverse('reviewers.performance'), reverse('reviewers.reviewlog'), 'https://wiki.mozilla.org/Add-ons/Reviewers/Guide', reverse('reviewers.queue_content_review'), reverse('reviewers.performance'), reverse('reviewers.themes.list'), reverse('reviewers.themes.list_rereview'), reverse('reviewers.themes.list_flagged'), reverse('reviewers.themes.logs'), reverse('reviewers.themes.deleted'), 'https://wiki.mozilla.org/Add-ons/Reviewers/Themes/Guidelines', reverse('reviewers.queue_moderated'), reverse('reviewers.ratings_moderation_log'), 'https://wiki.mozilla.org/Add-ons/Reviewers/Guide/Moderation', reverse('reviewers.unlisted_queue_all'), 'https://wiki.mozilla.org/Add-ons/Reviewers/Guide', reverse('reviewers.motd'), reverse('reviewers.queue_expired_info_requests'), ] links = [link.attrib['href'] for link in doc('.dashboard a')] assert links == expected_links def test_legacy_reviewer(self): # Create some add-ons to test the queue counts. addon_factory( status=amo.STATUS_NOMINATED, file_kw={'status': amo.STATUS_AWAITING_REVIEW}) version_factory( addon=addon_factory(), file_kw={'status': amo.STATUS_AWAITING_REVIEW}) version_factory( addon=addon_factory(), file_kw={'status': amo.STATUS_AWAITING_REVIEW}) # These two are under admin review and will be ignored. under_admin_review = addon_factory( status=amo.STATUS_NOMINATED, file_kw={'status': amo.STATUS_AWAITING_REVIEW}) AddonReviewerFlags.objects.create( addon=under_admin_review, needs_admin_code_review=True) under_admin_review_and_pending = addon_factory() AddonReviewerFlags.objects.create( addon=under_admin_review_and_pending, needs_admin_code_review=True) version_factory( addon=under_admin_review_and_pending, file_kw={'status': amo.STATUS_AWAITING_REVIEW}) # This is a static theme so won't be shown addon_factory( status=amo.STATUS_NOMINATED, type=amo.ADDON_STATICTHEME, file_kw={'status': amo.STATUS_AWAITING_REVIEW}) # Grant user the permission to see only the legacy add-ons section. self.grant_permission(self.user, 'Addons:Review') # Test. response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert len(doc('.dashboard h3')) == 1 expected_links = [ reverse('reviewers.queue_nominated'), reverse('reviewers.queue_pending'), reverse('reviewers.performance'), reverse('reviewers.reviewlog'), 'https://wiki.mozilla.org/Add-ons/Reviewers/Guide', ] links = [link.attrib['href'] for link in doc('.dashboard a')] assert links == expected_links assert doc('.dashboard a')[0].text == 'New (1)' assert doc('.dashboard a')[1].text == 'Updates (2)' def test_post_reviewer(self): # Create an add-on to test the queue count. It's under admin content # review but that does not have an impact. addon = addon_factory( version_kw={'is_webextension': True}) AddonApprovalsCounter.reset_for_addon(addon=addon) AutoApprovalSummary.objects.create( version=addon.current_version, verdict=amo.AUTO_APPROVED) AddonReviewerFlags.objects.create( addon=addon, needs_admin_content_review=True) # This one however is under admin code review, it's ignored. under_code_review = addon_factory( version_kw={'is_webextension': True}) AddonApprovalsCounter.reset_for_addon(addon=under_code_review) AutoApprovalSummary.objects.create( version=under_code_review.current_version, verdict=amo.AUTO_APPROVED) AddonReviewerFlags.objects.create( addon=under_code_review, needs_admin_code_review=True) # Grant user the permission to see only the Auto Approved section. self.grant_permission(self.user, 'Addons:PostReview') # Test. response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert len(doc('.dashboard h3')) == 1 expected_links = [ reverse('reviewers.queue_auto_approved'), reverse('reviewers.performance'), reverse('reviewers.reviewlog'), 'https://wiki.mozilla.org/Add-ons/Reviewers/Guide', ] links = [link.attrib['href'] for link in doc('.dashboard a')] assert links == expected_links assert doc('.dashboard a')[0].text == 'Auto Approved Add-ons (1)' def test_content_reviewer(self): # Create an add-on to test the queue count. It's under admin code # review but that does not have an impact. addon = addon_factory( version_kw={'is_webextension': True}) AddonApprovalsCounter.reset_for_addon(addon=addon) AutoApprovalSummary.objects.create( version=addon.current_version, verdict=amo.AUTO_APPROVED) AddonReviewerFlags.objects.create( addon=addon, needs_admin_code_review=True) # This one is under admin *content* review so it's ignored. under_content_review = addon_factory( version_kw={'is_webextension': True}) AddonApprovalsCounter.reset_for_addon(addon=under_content_review) AutoApprovalSummary.objects.create( version=under_content_review.current_version, verdict=amo.AUTO_APPROVED) AddonReviewerFlags.objects.create( addon=under_content_review, needs_admin_content_review=True) # Grant user the permission to see only the Content Review section. self.grant_permission(self.user, 'Addons:ContentReview') # Test. response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert len(doc('.dashboard h3')) == 1 expected_links = [ reverse('reviewers.queue_content_review'), reverse('reviewers.performance'), ] links = [link.attrib['href'] for link in doc('.dashboard a')] assert links == expected_links assert doc('.dashboard a')[0].text == 'Content Review (1)' def test_themes_reviewer(self): # Create some themes to test the queue counts. addon_factory(type=amo.ADDON_PERSONA, status=amo.STATUS_PENDING) addon_factory(type=amo.ADDON_PERSONA, status=amo.STATUS_PENDING) addon = addon_factory(type=amo.ADDON_PERSONA, status=amo.STATUS_PUBLIC) RereviewQueueTheme.objects.create(theme=addon.persona) addon_factory(type=amo.ADDON_PERSONA, status=amo.STATUS_REVIEW_PENDING) addon_factory(type=amo.ADDON_PERSONA, status=amo.STATUS_REVIEW_PENDING) addon_factory(type=amo.ADDON_PERSONA, status=amo.STATUS_REVIEW_PENDING) # Grant user the permission to see only the themes section. self.grant_permission(self.user, 'Personas:Review') # Test. response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert len(doc('.dashboard h3')) == 1 expected_links = [ reverse('reviewers.themes.list'), reverse('reviewers.themes.list_rereview'), reverse('reviewers.themes.list_flagged'), reverse('reviewers.themes.logs'), reverse('reviewers.themes.deleted'), 'https://wiki.mozilla.org/Add-ons/Reviewers/Themes/Guidelines', ] links = [link.attrib['href'] for link in doc('.dashboard a')] assert links == expected_links assert doc('.dashboard a')[0].text == 'New Themes (2)' assert doc('.dashboard a')[1].text == 'Themes Updates (1)' assert doc('.dashboard a')[2].text == 'Flagged Themes (3)' def test_ratings_moderator(self): # Create an rating to test the queue count. addon = addon_factory() user = user_factory() rating = Rating.objects.create( addon=addon, version=addon.current_version, user=user, flag=True, body=u'This âdd-on sucks!!111', rating=1, editorreview=True) rating.ratingflag_set.create() # Grant user the permission to see only the ratings to review section. self.grant_permission(self.user, 'Ratings:Moderate') # Test. response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert len(doc('.dashboard h3')) == 1 expected_links = [ reverse('reviewers.queue_moderated'), reverse('reviewers.ratings_moderation_log'), 'https://wiki.mozilla.org/Add-ons/Reviewers/Guide/Moderation', ] links = [link.attrib['href'] for link in doc('.dashboard a')] assert links == expected_links assert doc('.dashboard a')[0].text == 'Ratings Awaiting Moderation (1)' def test_unlisted_reviewer(self): # Grant user the permission to see only the unlisted add-ons section. self.grant_permission(self.user, 'Addons:ReviewUnlisted') # Test. response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert len(doc('.dashboard h3')) == 1 expected_links = [ reverse('reviewers.unlisted_queue_all'), 'https://wiki.mozilla.org/Add-ons/Reviewers/Guide', ] links = [link.attrib['href'] for link in doc('.dashboard a')] assert links == expected_links def test_static_theme_reviewer(self): # Create some static themes to test the queue counts. addon_factory( status=amo.STATUS_NOMINATED, type=amo.ADDON_STATICTHEME, file_kw={'status': amo.STATUS_AWAITING_REVIEW}) version_factory( addon=addon_factory(type=amo.ADDON_STATICTHEME), file_kw={'status': amo.STATUS_AWAITING_REVIEW}) version_factory( addon=addon_factory(type=amo.ADDON_STATICTHEME,), file_kw={'status': amo.STATUS_AWAITING_REVIEW}) # These two are under admin review and will be ignored. under_admin_review = addon_factory( status=amo.STATUS_NOMINATED, type=amo.ADDON_STATICTHEME, file_kw={'status': amo.STATUS_AWAITING_REVIEW}) AddonReviewerFlags.objects.create( addon=under_admin_review, needs_admin_theme_review=True) under_admin_review_and_pending = addon_factory( type=amo.ADDON_STATICTHEME) AddonReviewerFlags.objects.create( addon=under_admin_review_and_pending, needs_admin_theme_review=True) version_factory( addon=under_admin_review_and_pending, file_kw={'status': amo.STATUS_AWAITING_REVIEW}) # This is an extension so won't be shown addon_factory( status=amo.STATUS_NOMINATED, type=amo.ADDON_EXTENSION, file_kw={'status': amo.STATUS_AWAITING_REVIEW}) # Grant user the permission to see only the legacy add-ons section. self.grant_permission(self.user, 'Addons:ThemeReview') # Test. response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert len(doc('.dashboard h3')) == 1 expected_links = [ reverse('reviewers.queue_nominated'), reverse('reviewers.queue_pending'), reverse('reviewers.performance'), reverse('reviewers.reviewlog'), 'https://wiki.mozilla.org/Add-ons/Reviewers/Themes/Guidelines', ] links = [link.attrib['href'] for link in doc('.dashboard a')] assert links == expected_links assert doc('.dashboard a')[0].text == 'New (1)' assert doc('.dashboard a')[1].text == 'Updates (2)' def test_post_reviewer_and_content_reviewer(self): # Create add-ons to test the queue count. The first add-on has its # content approved, so the post review queue should contain 2 add-ons, # and the content review queue only 1. addon = addon_factory( version_kw={'is_webextension': True}) AutoApprovalSummary.objects.create( version=addon.current_version, verdict=amo.AUTO_APPROVED) AddonApprovalsCounter.approve_content_for_addon(addon=addon) addon = addon_factory( version_kw={'is_webextension': True}) AddonApprovalsCounter.reset_for_addon(addon=addon) AutoApprovalSummary.objects.create( version=addon.current_version, verdict=amo.AUTO_APPROVED) # Grant user the permission to see both the Content Review and the # Auto Approved Add-ons sections. self.grant_permission(self.user, 'Addons:ContentReview') self.grant_permission(self.user, 'Addons:PostReview') # Test. response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert len(doc('.dashboard h3')) == 2 # 2 sections are shown. expected_links = [ reverse('reviewers.queue_auto_approved'), reverse('reviewers.performance'), reverse('reviewers.reviewlog'), 'https://wiki.mozilla.org/Add-ons/Reviewers/Guide', reverse('reviewers.queue_content_review'), reverse('reviewers.performance'), ] links = [link.attrib['href'] for link in doc('.dashboard a')] assert links == expected_links assert doc('.dashboard a')[0].text == 'Auto Approved Add-ons (2)' assert 'target' not in doc('.dashboard a')[0].attrib assert doc('.dashboard a')[3].text == 'Review Guide' assert doc('.dashboard a')[3].attrib['target'] == '_blank' assert doc('.dashboard a')[3].attrib['rel'] == 'noopener noreferrer' assert doc('.dashboard a')[4].text == 'Content Review (1)' def test_legacy_reviewer_and_ratings_moderator(self): # Grant user the permission to see both the legacy add-ons and the # ratings moderation sections. self.grant_permission(self.user, 'Addons:Review') self.grant_permission(self.user, 'Ratings:Moderate') # Test. response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert len(doc('.dashboard h3')) == 2 expected_links = [ reverse('reviewers.queue_nominated'), reverse('reviewers.queue_pending'), reverse('reviewers.performance'), reverse('reviewers.reviewlog'), 'https://wiki.mozilla.org/Add-ons/Reviewers/Guide', reverse('reviewers.queue_moderated'), reverse('reviewers.ratings_moderation_log'), 'https://wiki.mozilla.org/Add-ons/Reviewers/Guide/Moderation', ] links = [link.attrib['href'] for link in doc('.dashboard a')] assert links == expected_links assert doc('.dashboard a')[0].text == 'New (0)' assert 'target' not in doc('.dashboard a')[0].attrib assert doc('.dashboard a')[1].text == 'Updates (0)' assert doc('.dashboard a')[5].text == 'Ratings Awaiting Moderation (0)' assert 'target' not in doc('.dashboard a')[6].attrib assert doc('.dashboard a')[7].text == 'Moderation Guide' assert doc('.dashboard a')[7].attrib['target'] == '_blank' assert doc('.dashboard a')[7].attrib['rel'] == 'noopener noreferrer' class QueueTest(ReviewerTest): fixtures = ['base/users'] listed = True def setUp(self): super(QueueTest, self).setUp() self.user = UserProfile.objects.get(email='reviewer@mozilla.com') self.login_as_reviewer() if self.listed is False: # Testing unlisted views: needs Addons:ReviewUnlisted perm. self.grant_permission(self.user, 'Addons:ReviewUnlisted') self.url = reverse('reviewers.queue_pending') self.addons = OrderedDict() self.expected_addons = [] self.channel_name = 'listed' if self.listed else 'unlisted' def generate_files(self, subset=None, files=None): if subset is None: subset = [] files = files or OrderedDict([ ('Pending One', { 'version_str': '0.1', 'addon_status': amo.STATUS_PUBLIC, 'file_status': amo.STATUS_AWAITING_REVIEW, }), ('Pending Two', { 'version_str': '0.1', 'addon_status': amo.STATUS_PUBLIC, 'file_status': amo.STATUS_AWAITING_REVIEW, }), ('Nominated One', { 'version_str': '0.1', 'addon_status': amo.STATUS_NOMINATED, 'file_status': amo.STATUS_AWAITING_REVIEW, }), ('Nominated Two', { 'version_str': '0.1', 'addon_status': amo.STATUS_NOMINATED, 'file_status': amo.STATUS_AWAITING_REVIEW, }), ('Public', { 'version_str': '0.1', 'addon_status': amo.STATUS_PUBLIC, 'file_status': amo.STATUS_PUBLIC, }), ]) results = OrderedDict() channel = (amo.RELEASE_CHANNEL_LISTED if self.listed else amo.RELEASE_CHANNEL_UNLISTED) for name, attrs in files.iteritems(): if not subset or name in subset: version_kw = attrs.get('version_kw', {}) version_kw.update( {'channel': channel, 'version': attrs.pop('version_str')}) attrs['version_kw'] = version_kw file_kw = attrs.get('file_kw', {}) file_kw.update({'status': attrs.pop('file_status')}) attrs['file_kw'] = file_kw results[name] = addon_factory( status=attrs.pop('addon_status'), name=name, **attrs) self.addons.update(results) return results def generate_file(self, name): return self.generate_files([name])[name] def get_review_data(self): # Format: (Created n days ago, # percentages of [< 5, 5-10, >10]) return ((1, (0, 0, 100)), (8, (0, 50, 50)), (12, (50, 0, 50))) def get_addon_latest_version(self, addon): if self.listed: channel = amo.RELEASE_CHANNEL_LISTED else: channel = amo.RELEASE_CHANNEL_UNLISTED return addon.find_latest_version(channel=channel) def get_queue(self, addon): version = self.get_addon_latest_version(addon) assert version.current_queue.objects.filter(id=addon.id).count() == 1 def get_expected_addons_by_names(self, names): expected_addons = [] files = self.generate_files() for name in sorted(names): if name in files: expected_addons.append(files[name]) # Make sure all elements have been added assert len(expected_addons) == len(names) return expected_addons def _test_get_queue(self): for addon in self.expected_addons: self.get_queue(addon) def _test_queue_layout(self, name, tab_position, total_addons, total_queues, per_page=None): args = {'per_page': per_page} if per_page else {} response = self.client.get(self.url, args) assert response.status_code == 200 doc = pq(response.content) links = doc('.tabnav li a') link = links.eq(tab_position) assert links.length == total_queues assert link.text() == '%s (%s)' % (name, total_addons) assert link.attr('href') == self.url if per_page: assert doc('.data-grid-top .num-results').text() == ( u'Results %s\u20131 of %s' % (per_page, total_addons)) def _test_results(self): response = self.client.get(self.url) assert response.status_code == 200 expected = [] if not len(self.expected_addons): raise AssertionError('self.expected_addons was an empty list') for idx, addon in enumerate(self.expected_addons): latest_version = self.get_addon_latest_version(addon) assert latest_version name = '%s %s' % (unicode(addon.name), latest_version.version) if self.channel_name == 'listed': # We typically don't include the channel name if it's the # default one, 'listed'. channel = [] else: channel = [self.channel_name] url = reverse('reviewers.review', args=channel + [addon.slug]) expected.append((name, url)) doc = pq(response.content) links = doc('#addon-queue tr.addon-row td a:not(.app-icon)') assert len(links) == len(self.expected_addons) check_links(expected, links, verify=False) return doc class TestQueueBasics(QueueTest): def test_only_viewable_by_reviewer(self): # Addon reviewer has access. response = self.client.get(self.url) assert response.status_code == 200 # Regular user doesn't have access. self.client.logout() assert self.client.login(email='regular@mozilla.com') response = self.client.get(self.url) assert response.status_code == 403 # Persona reviewer doesn't have access either. self.client.logout() assert self.client.login(email='persona_reviewer@mozilla.com') response = self.client.get(self.url) assert response.status_code == 403 def test_invalid_page(self): response = self.client.get(self.url, {'page': 999}) assert response.status_code == 200 assert response.context['page'].number == 1 def test_invalid_per_page(self): response = self.client.get(self.url, {'per_page': '<garbage>'}) # No exceptions: assert response.status_code == 200 @patch.multiple('olympia.reviewers.views', REVIEWS_PER_PAGE_MAX=1, REVIEWS_PER_PAGE=1) def test_max_per_page(self): self.generate_files() response = self.client.get(self.url, {'per_page': '2'}) assert response.status_code == 200 doc = pq(response.content) assert doc('.data-grid-top .num-results').text() == ( u'Results 1\u20131 of 2') @patch('olympia.reviewers.views.REVIEWS_PER_PAGE', new=1) def test_reviews_per_page(self): self.generate_files() response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert doc('.data-grid-top .num-results').text() == ( u'Results 1\u20131 of 2') def test_grid_headers(self): response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) expected = [ 'Add-on', 'Type', 'Waiting Time', 'Flags', ] assert [pq(th).text() for th in doc('#addon-queue tr th')[1:]] == ( expected) def test_grid_headers_sort_after_search(self): params = dict(searching=['True'], text_query=['abc'], addon_type_ids=['2'], sort=['addon_type_id']) response = self.client.get(self.url, params) assert response.status_code == 200 tr = pq(response.content)('#addon-queue tr') sorts = { # Column index => sort. 1: 'addon_name', # Add-on. 2: '-addon_type_id', # Type. 3: 'waiting_time_min', # Waiting Time. } for idx, sort in sorts.iteritems(): # Get column link. a = tr('th').eq(idx).find('a') # Update expected GET parameters with sort type. params.update(sort=[sort]) # Parse querystring of link to make sure `sort` type is correct. assert urlparse.parse_qs(a.attr('href').split('?')[1]) == params def test_no_results(self): response = self.client.get(self.url) assert response.status_code == 200 assert pq(response.content)('.queue-outer .no-results').length == 1 def test_no_paginator_when_on_single_page(self): response = self.client.get(self.url) assert response.status_code == 200 assert pq(response.content)('.pagination').length == 0 def test_paginator_when_many_pages(self): # 'Pending One' and 'Pending Two' should be the only add-ons in # the pending queue, but we'll generate them all for good measure. self.generate_files() response = self.client.get(self.url, {'per_page': 1}) assert response.status_code == 200 doc = pq(response.content) assert doc('.data-grid-top .num-results').text() == ( u'Results 1\u20131 of 2') assert doc('.data-grid-bottom .num-results').text() == ( u'Results 1\u20131 of 2') def test_legacy_queue_sort(self): sorts = ( ['age', 'Waiting Time'], ['name', 'Add-on'], ['type', 'Type'], ) for key, text in sorts: response = self.client.get(self.url, {'sort': key}) assert response.status_code == 200 assert pq(response.content)('th.ordered a').text() == text def test_flags_jetpack(self): addon = addon_factory( status=amo.STATUS_NOMINATED, name='Jetpack', version_kw={'version': '0.1'}, file_kw={'status': amo.STATUS_AWAITING_REVIEW, 'jetpack_version': 1.2}) r = self.client.get(reverse('reviewers.queue_nominated')) rows = pq(r.content)('#addon-queue tr.addon-row') assert rows.length == 1 assert rows.attr('data-addon') == str(addon.id) assert rows.find('td').eq(1).text() == 'Jetpack 0.1' assert rows.find('.ed-sprite-jetpack').length == 1 def test_flags_is_restart_required(self): addon = addon_factory( status=amo.STATUS_NOMINATED, name='Some Add-on', version_kw={'version': '0.1'}, file_kw={'status': amo.STATUS_AWAITING_REVIEW, 'is_restart_required': True}) r = self.client.get(reverse('reviewers.queue_nominated')) rows = pq(r.content)('#addon-queue tr.addon-row') assert rows.length == 1 assert rows.attr('data-addon') == str(addon.id) assert rows.find('td').eq(1).text() == 'Some Add-on 0.1' assert rows.find('.ed-sprite-jetpack').length == 0 assert rows.find('.ed-sprite-is_restart_required').length == 1 def test_flags_is_restart_required_false(self): addon = addon_factory( status=amo.STATUS_NOMINATED, name='Restartless', version_kw={'version': '0.1'}, file_kw={'status': amo.STATUS_AWAITING_REVIEW, 'is_restart_required': False}) r = self.client.get(reverse('reviewers.queue_nominated')) rows = pq(r.content)('#addon-queue tr.addon-row') assert rows.length == 1 assert rows.attr('data-addon') == str(addon.id) assert rows.find('td').eq(1).text() == 'Restartless 0.1' assert rows.find('.ed-sprite-jetpack').length == 0 assert rows.find('.ed-sprite-is_restart_required').length == 0 def test_tabnav_permissions(self): response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) links = doc('.tabnav li a').map(lambda i, e: e.attrib['href']) expected = [ reverse('reviewers.queue_nominated'), reverse('reviewers.queue_pending'), ] assert links == expected self.grant_permission(self.user, 'Ratings:Moderate') response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) links = doc('.tabnav li a').map(lambda i, e: e.attrib['href']) expected.append(reverse('reviewers.queue_moderated')) assert links == expected self.grant_permission(self.user, 'Addons:PostReview') response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) links = doc('.tabnav li a').map(lambda i, e: e.attrib['href']) expected.append(reverse('reviewers.queue_auto_approved')) assert links == expected self.grant_permission(self.user, 'Addons:ContentReview') response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) links = doc('.tabnav li a').map(lambda i, e: e.attrib['href']) expected.append(reverse('reviewers.queue_content_review')) assert links == expected self.grant_permission(self.user, 'Reviews:Admin') response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) links = doc('.tabnav li a').map(lambda i, e: e.attrib['href']) expected.append(reverse('reviewers.queue_expired_info_requests')) assert links == expected class TestPendingQueue(QueueTest): def setUp(self): super(TestPendingQueue, self).setUp() # These should be the only ones present. self.expected_addons = self.get_expected_addons_by_names( ['Pending One', 'Pending Two']) self.url = reverse('reviewers.queue_pending') def test_results(self): self._test_results() def test_queue_layout(self): self._test_queue_layout('Updates', tab_position=1, total_addons=2, total_queues=2) def test_get_queue(self): self._test_get_queue() def test_webextensions_filtered_out_because_of_post_review(self): version = self.addons['Pending Two'].find_latest_version( channel=amo.RELEASE_CHANNEL_LISTED) version.files.update(is_webextension=True) # Webextensions are filtered out from the queue since auto_approve is # taking care of them. self.expected_addons = [self.addons['Pending One']] self._test_results() def test_webextension_with_auto_approval_disabled_false_filtered_out(self): version = self.addons['Pending Two'].find_latest_version( channel=amo.RELEASE_CHANNEL_LISTED) version.files.update(is_webextension=True) AddonReviewerFlags.objects.create( addon=self.addons['Pending Two'], auto_approval_disabled=False) self.expected_addons = [self.addons['Pending One']] self._test_results() def test_webextension_with_auto_approval_disabled_does_show_up(self): version = self.addons['Pending Two'].find_latest_version( channel=amo.RELEASE_CHANNEL_LISTED) version.files.update(is_webextension=True) version = self.addons['Pending Two'].find_latest_version( channel=amo.RELEASE_CHANNEL_LISTED) version.files.update(is_webextension=True) AddonReviewerFlags.objects.create( addon=self.addons['Pending One'], auto_approval_disabled=True) self.expected_addons = [self.addons['Pending One']] self._test_results() def test_static_theme_filtered_out(self): self.addons['Pending Two'].update(type=amo.ADDON_STATICTHEME) # Static Theme shouldn't be shown self.expected_addons = [self.addons['Pending One']] self._test_results() # Unless you have that permission also self.grant_permission(self.user, 'Addons:ThemeReview') self.expected_addons = [ self.addons['Pending One'], self.addons['Pending Two']] self._test_results() class TestStaticThemePendingQueue(QueueTest): def setUp(self): super(TestStaticThemePendingQueue, self).setUp() # These should be the only ones present. self.expected_addons = self.get_expected_addons_by_names( ['Pending One', 'Pending Two']) Addon.objects.all().update(type=amo.ADDON_STATICTHEME) self.url = reverse('reviewers.queue_pending') GroupUser.objects.filter(user=self.user).delete() self.grant_permission(self.user, 'Addons:ThemeReview') def test_results(self): self._test_results() def test_queue_layout(self): self._test_queue_layout('Updates', tab_position=1, total_addons=2, total_queues=2) def test_get_queue(self): self._test_get_queue() def test_extensions_filtered_out(self): self.addons['Pending Two'].update(type=amo.ADDON_EXTENSION) # Extensions shouldn't be shown self.expected_addons = [self.addons['Pending One']] self._test_results() # Unless you have that permission also self.grant_permission(self.user, 'Addons:Review') self.expected_addons = [ self.addons['Pending One'], self.addons['Pending Two']] self._test_results() class TestNominatedQueue(QueueTest): def setUp(self): super(TestNominatedQueue, self).setUp() # These should be the only ones present. self.expected_addons = self.get_expected_addons_by_names( ['Nominated One', 'Nominated Two']) self.url = reverse('reviewers.queue_nominated') def test_results(self): self._test_results() def test_results_two_versions(self): version1 = self.addons['Nominated One'].versions.all()[0] version2 = self.addons['Nominated Two'].versions.all()[0] file_ = version2.files.get() # Versions are ordered by creation date, so make sure they're set. past = self.days_ago(1) version2.update(created=past, nomination=past) # Create another version, v0.2, by "cloning" v0.1. version2.pk = None version2.version = '0.2' version2.save() # Reset creation date once it has been saved. future = datetime.now() - timedelta(seconds=1) version2.update(created=future, nomination=future) # Associate v0.2 it with a file. file_.pk = None file_.version = version2 file_.save() # disable old files like Version.from_upload() would. version2.disable_old_files() response = self.client.get(self.url) assert response.status_code == 200 expected = [ ('Nominated One 0.1', reverse('reviewers.review', args=[version1.addon.slug])), ('Nominated Two 0.2', reverse('reviewers.review', args=[version2.addon.slug])), ] doc = pq(response.content) check_links( expected, doc('#addon-queue tr.addon-row td a:not(.app-icon)'), verify=False) def test_queue_layout(self): self._test_queue_layout('New', tab_position=0, total_addons=2, total_queues=2) def test_get_queue(self): self._test_get_queue() def test_webextensions_filtered_out_because_of_post_review(self): version = self.addons['Nominated Two'].find_latest_version( channel=amo.RELEASE_CHANNEL_LISTED) version.files.update(is_webextension=True) # Webextensions are filtered out from the queue since auto_approve is # taking care of them. self.expected_addons = [self.addons['Nominated One']] self._test_results() def test_webextension_with_auto_approval_disabled_false_filtered_out(self): version = self.addons['Nominated Two'].find_latest_version( channel=amo.RELEASE_CHANNEL_LISTED) version.files.update(is_webextension=True) AddonReviewerFlags.objects.create( addon=self.addons['Nominated Two'], auto_approval_disabled=False) self.expected_addons = [self.addons['Nominated One']] self._test_results() def test_webextension_with_auto_approval_disabled_does_show_up(self): version = self.addons['Nominated Two'].find_latest_version( channel=amo.RELEASE_CHANNEL_LISTED) version.files.update(is_webextension=True) version = self.addons['Nominated One'].find_latest_version( channel=amo.RELEASE_CHANNEL_LISTED) version.files.update(is_webextension=True) AddonReviewerFlags.objects.create( addon=self.addons['Nominated One'], auto_approval_disabled=True) self.expected_addons = [self.addons['Nominated One']] self._test_results() def test_static_theme_filtered_out(self): self.addons['Nominated Two'].update(type=amo.ADDON_STATICTHEME) # Static Theme shouldn't be shown self.expected_addons = [self.addons['Nominated One']] self._test_results() # Unless you have that permission also self.grant_permission(self.user, 'Addons:ThemeReview') self.expected_addons = [ self.addons['Nominated One'], self.addons['Nominated Two']] self._test_results() class TestStaticThemeNominatedQueue(QueueTest): def setUp(self): super(TestStaticThemeNominatedQueue, self).setUp() # These should be the only ones present. self.expected_addons = self.get_expected_addons_by_names( ['Nominated One', 'Nominated Two']) self.url = reverse('reviewers.queue_nominated') Addon.objects.all().update(type=amo.ADDON_STATICTHEME) GroupUser.objects.filter(user=self.user).delete() self.grant_permission(self.user, 'Addons:ThemeReview') def test_results(self): self._test_results() def test_results_two_versions(self): version1 = self.addons['Nominated One'].versions.all()[0] version2 = self.addons['Nominated Two'].versions.all()[0] file_ = version2.files.get() # Versions are ordered by creation date, so make sure they're set. past = self.days_ago(1) version2.update(created=past, nomination=past) # Create another version, v0.2, by "cloning" v0.1. version2.pk = None version2.version = '0.2' version2.save() # Reset creation date once it has been saved. future = datetime.now() - timedelta(seconds=1) version2.update(created=future, nomination=future) # Associate v0.2 it with a file. file_.pk = None file_.version = version2 file_.save() # disable old files like Version.from_upload() would. version2.disable_old_files() response = self.client.get(self.url) assert response.status_code == 200 expected = [ ('Nominated One 0.1', reverse('reviewers.review', args=[version1.addon.slug])), ('Nominated Two 0.2', reverse('reviewers.review', args=[version2.addon.slug])), ] doc = pq(response.content) check_links( expected, doc('#addon-queue tr.addon-row td a:not(.app-icon)'), verify=False) def test_queue_layout(self): self._test_queue_layout('New', tab_position=0, total_addons=2, total_queues=2) def test_get_queue(self): self._test_get_queue() def test_static_theme_filtered_out(self): self.addons['Nominated Two'].update(type=amo.ADDON_EXTENSION) # Static Theme shouldn't be shown self.expected_addons = [self.addons['Nominated One']] self._test_results() # Unless you have that permission also self.grant_permission(self.user, 'Addons:Review') self.expected_addons = [ self.addons['Nominated One'], self.addons['Nominated Two']] self._test_results() class TestModeratedQueue(QueueTest): fixtures = ['base/users', 'ratings/dev-reply'] def setUp(self): super(TestModeratedQueue, self).setUp() self.url = reverse('reviewers.queue_moderated') url_flag = reverse('addons.ratings.flag', args=['a1865', 218468]) response = self.client.post(url_flag, {'flag': RatingFlag.SPAM}) assert response.status_code == 200 assert RatingFlag.objects.filter(flag=RatingFlag.SPAM).count() == 1 assert Rating.objects.filter(editorreview=True).count() == 1 self.grant_permission(self.user, 'Ratings:Moderate') def test_results(self): response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content)('#reviews-flagged') rows = doc('.review-flagged:not(.review-saved)') assert rows.length == 1 assert rows.find('h3').text() == '' # Default is "Skip." assert doc('#id_form-0-action_1:checked').length == 1 flagged = doc('.reviews-flagged-reasons span.light').text() reviewer = RatingFlag.objects.all()[0].user.name assert flagged.startswith('Flagged by %s' % reviewer), ( 'Unexpected text: %s' % flagged) addon = Addon.objects.get(id=1865) addon.name = u'náme' addon.save() response = self.client.get(self.url) doc = pq(response.content)('#reviews-flagged') rows = doc('.review-flagged:not(.review-saved)') assert rows.length == 1 assert rows.find('h3').text() == u'náme' def setup_actions(self, action): response = self.client.get(self.url) assert response.status_code == 200 form_0_data = initial(response.context['reviews_formset'].forms[0]) assert Rating.objects.filter(addon=1865).count() == 2 formset_data = formset(form_0_data) formset_data['form-0-action'] = action response = self.client.post(self.url, formset_data) self.assert3xx(response, self.url) def test_skip(self): self.setup_actions(ratings.REVIEW_MODERATE_SKIP) # Make sure it's still there. response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) rows = doc('#reviews-flagged .review-flagged:not(.review-saved)') assert rows.length == 1 def test_skip_score(self): self.setup_actions(ratings.REVIEW_MODERATE_SKIP) assert ReviewerScore.objects.filter( note_key=amo.REVIEWED_ADDON_REVIEW).count() == 0 def get_logs(self, action): return ActivityLog.objects.filter(action=action.id) def test_remove(self): """Make sure the reviewer tools can delete a review.""" self.setup_actions(ratings.REVIEW_MODERATE_DELETE) logs = self.get_logs(amo.LOG.DELETE_RATING) assert logs.count() == 1 # Make sure it's removed from the queue. response = self.client.get(self.url) assert response.status_code == 200 assert pq(response.content)('#reviews-flagged .no-results').length == 1 response = self.client.get(reverse('reviewers.ratings_moderation_log')) assert pq(response.content)('table .more-details').attr('href') == ( reverse('reviewers.ratings_moderation_log.detail', args=[logs[0].id])) # Make sure it was actually deleted. assert Rating.objects.filter(addon=1865).count() == 1 # But make sure it wasn't *actually* deleted. assert Rating.unfiltered.filter(addon=1865).count() == 2 def test_remove_fails_for_own_addon(self): """ Make sure the reviewer tools can't delete a review for an add-on owned by the user. """ addon = Addon.objects.get(pk=1865) user = UserProfile.objects.get(email='reviewer@mozilla.com') AddonUser(addon=addon, user=user).save() # Make sure the initial count is as expected assert Rating.objects.filter(addon=1865).count() == 2 self.setup_actions(ratings.REVIEW_MODERATE_DELETE) logs = self.get_logs(amo.LOG.DELETE_RATING) assert logs.count() == 0 # Make sure it's not removed from the queue. response = self.client.get(self.url) assert response.status_code == 200 assert pq(response.content)('#reviews-flagged .no-results').length == 0 # Make sure it was not actually deleted. assert Rating.objects.filter(addon=1865).count() == 2 def test_remove_score(self): self.setup_actions(ratings.REVIEW_MODERATE_DELETE) assert ReviewerScore.objects.filter( note_key=amo.REVIEWED_ADDON_REVIEW).count() == 1 def test_keep(self): """Make sure the reviewer tools can remove flags and keep a review.""" self.setup_actions(ratings.REVIEW_MODERATE_KEEP) logs = self.get_logs(amo.LOG.APPROVE_RATING) assert logs.count() == 1 # Make sure it's removed from the queue. response = self.client.get(self.url) assert response.status_code == 200 assert pq(response.content)('#reviews-flagged .no-results').length == 1 rating = Rating.objects.filter(addon=1865) # Make sure it's NOT deleted... assert rating.count() == 2 # ...but it's no longer flagged. assert rating.filter(editorreview=1).count() == 0 def test_keep_score(self): self.setup_actions(ratings.REVIEW_MODERATE_KEEP) assert ReviewerScore.objects.filter( note_key=amo.REVIEWED_ADDON_REVIEW).count() == 1 def test_queue_layout(self): # From the fixtures we already have 2 reviews, one is flagged. We add # a bunch of reviews from different scenarios and make sure they don't # count towards the total. # Add a review associated with an normal addon rating = Rating.objects.create( addon=addon_factory(), user=user_factory(), body='show me', editorreview=True) RatingFlag.objects.create(rating=rating) # Add a review associated with an incomplete addon rating = Rating.objects.create( addon=addon_factory(status=amo.STATUS_NULL), user=user_factory(), body='dont show me', editorreview=True) RatingFlag.objects.create(rating=rating) # Add a review associated to an unlisted version addon = addon_factory() version = version_factory( addon=addon, channel=amo.RELEASE_CHANNEL_UNLISTED) rating = Rating.objects.create( addon=addon_factory(), version=version, user=user_factory(), body='dont show me either', editorreview=True) RatingFlag.objects.create(rating=rating) self._test_queue_layout('Rating Reviews', tab_position=2, total_addons=2, total_queues=3) def test_no_reviews(self): Rating.objects.all().delete() response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content)('#reviews-flagged') assert doc('.no-results').length == 1 assert doc('.review-saved button').length == 1 # Show only one button. def test_do_not_show_reviews_for_non_public_addons(self): Addon.objects.all().update(status=amo.STATUS_NULL) response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content)('#reviews-flagged') # There should be no results since all add-ons are not public. assert doc('.no-results').length == 1 def test_do_not_show_reviews_for_unlisted_addons(self): for addon in Addon.objects.all(): self.make_addon_unlisted(addon) response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content)('#reviews-flagged') # There should be no results since all add-ons are unlisted. assert doc('.no-results').length == 1 class TestUnlistedAllList(QueueTest): listed = False def setUp(self): super(TestUnlistedAllList, self).setUp() self.url = reverse('reviewers.unlisted_queue_all') # We should have all add-ons. self.expected_addons = self.get_expected_addons_by_names( ['Pending One', 'Pending Two', 'Nominated One', 'Nominated Two', 'Public']) # Need to set unique nomination times or we get a psuedo-random order. for idx, addon in enumerate(self.expected_addons): latest_version = addon.find_latest_version( channel=amo.RELEASE_CHANNEL_UNLISTED) latest_version.update( nomination=(datetime.now() - timedelta(minutes=idx))) def test_results(self): self._test_results() def test_review_notes_json(self): latest_version = self.expected_addons[0].find_latest_version( channel=amo.RELEASE_CHANNEL_UNLISTED) log = ActivityLog.create(amo.LOG.APPROVE_VERSION, latest_version, self.expected_addons[0], user=UserProfile.objects.get(pk=999), details={'comments': 'stish goin` down son'}) url = reverse('reviewers.queue_review_text') + str(log.id) response = self.client.get(url) assert response.status_code == 200 assert (json.loads(response.content) == {'reviewtext': 'stish goin` down son'}) class TestAutoApprovedQueue(QueueTest): def setUp(self): super(TestAutoApprovedQueue, self).setUp() self.url = reverse('reviewers.queue_auto_approved') def login_with_permission(self): user = UserProfile.objects.get(email='reviewer@mozilla.com') self.grant_permission(user, 'Addons:PostReview') self.client.login(email=user.email) def get_addon_latest_version(self, addon): """Method used by _test_results() to fetch the version that the queue is supposed to display. Overridden here because in our case, it's not necessarily the latest available version - we display the current public version instead (which is not guaranteed to be the latest auto-approved one, but good enough) for this page.""" return addon.current_version def generate_files(self): """Generate add-ons needed for these tests.""" # Has not been auto-approved. extra_addon = addon_factory(name=u'Extra Addôn 1') AutoApprovalSummary.objects.create( version=extra_addon.current_version, verdict=amo.NOT_AUTO_APPROVED) # Has not been auto-approved either, only dry run. extra_addon2 = addon_factory(name=u'Extra Addôn 2') AutoApprovalSummary.objects.create( version=extra_addon2.current_version, verdict=amo.WOULD_HAVE_BEEN_AUTO_APPROVED) # Has been auto-approved, but that auto-approval has been confirmed by # a human already. extra_addon3 = addon_factory(name=u'Extra Addôn 3') extra_summary3 = AutoApprovalSummary.objects.create( version=extra_addon3.current_version, verdict=amo.AUTO_APPROVED, confirmed=True) AddonApprovalsCounter.objects.create( addon=extra_addon3, counter=1, last_human_review=extra_summary3.created) # Has been auto-approved and reviewed by a human before. addon1 = addon_factory(name=u'Addôn 1') AutoApprovalSummary.objects.create( version=addon1.current_version, verdict=amo.AUTO_APPROVED) AddonApprovalsCounter.objects.create( addon=addon1, counter=1, last_human_review=self.days_ago(42)) # Has been auto-approved twice, last_human_review is somehow None, # the 'created' date will be used to order it (older is higher). addon2 = addon_factory(name=u'Addôn 2') addon2.update(created=self.days_ago(10)) AutoApprovalSummary.objects.create( version=addon2.current_version, verdict=amo.AUTO_APPROVED) AddonApprovalsCounter.objects.create( addon=addon2, counter=1, last_human_review=None) addon2_version2 = version_factory(addon=addon2) AutoApprovalSummary.objects.create( version=addon2_version2, verdict=amo.AUTO_APPROVED) # Has been auto-approved and never been seen by a human, # the 'created' date will be used to order it (newer is lower). addon3 = addon_factory(name=u'Addôn 3') addon3.update(created=self.days_ago(2)) AutoApprovalSummary.objects.create( version=addon3.current_version, verdict=amo.AUTO_APPROVED) AddonApprovalsCounter.objects.create( addon=addon3, counter=1, last_human_review=None) # Has been auto-approved, should be first because of its weight. addon4 = addon_factory(name=u'Addôn 4') addon4.update(created=self.days_ago(14)) AutoApprovalSummary.objects.create( version=addon4.current_version, verdict=amo.AUTO_APPROVED, weight=500) AddonApprovalsCounter.objects.create( addon=addon4, counter=0, last_human_review=self.days_ago(1)) self.expected_addons = [addon4, addon2, addon3, addon1] def test_only_viewable_with_specific_permission(self): # Regular addon reviewer does not have access. response = self.client.get(self.url) assert response.status_code == 403 # Regular user doesn't have access. self.client.logout() assert self.client.login(email='regular@mozilla.com') response = self.client.get(self.url) assert response.status_code == 403 def test_results(self): self.login_with_permission() self.generate_files() self._test_results() def test_results_weights(self): addon1 = addon_factory(name=u'Addôn 1') AutoApprovalSummary.objects.create( version=addon1.current_version, verdict=amo.AUTO_APPROVED, weight=amo.POST_REVIEW_WEIGHT_HIGHEST_RISK + 1) AddonApprovalsCounter.reset_for_addon(addon1) addon2 = addon_factory(name=u'Addôn 2') AutoApprovalSummary.objects.create( version=addon2.current_version, verdict=amo.AUTO_APPROVED, weight=amo.POST_REVIEW_WEIGHT_HIGH_RISK + 1) AddonApprovalsCounter.reset_for_addon(addon2) addon3 = addon_factory(name=u'Addôn 3') AutoApprovalSummary.objects.create( version=addon3.current_version, verdict=amo.AUTO_APPROVED, weight=amo.POST_REVIEW_WEIGHT_MEDIUM_RISK + 1) AddonApprovalsCounter.reset_for_addon(addon3) addon4 = addon_factory(name=u'Addôn 4') AutoApprovalSummary.objects.create( version=addon4.current_version, verdict=amo.AUTO_APPROVED, weight=1) AddonApprovalsCounter.reset_for_addon(addon4) self.expected_addons = [addon1, addon2, addon3, addon4] self.login_with_permission() doc = self._test_results() expected = ['risk-highest', 'risk-high', 'risk-medium', 'risk-low'] classnames = [ item.attrib['class'] for item in doc('.addon-row td:eq(4) span')] assert expected == classnames def test_queue_layout(self): self.login_with_permission() self.generate_files() self._test_queue_layout("Auto Approved", tab_position=2, total_addons=4, total_queues=3, per_page=1) class TestExpiredInfoRequestsQueue(QueueTest): def setUp(self): super(TestExpiredInfoRequestsQueue, self).setUp() self.url = reverse('reviewers.queue_expired_info_requests') def generate_files(self): # Extra add-on with no pending info request. addon_factory(name=u'Extra Addôn 1') # Extra add-on with a non-expired pending info request. extra_addon = addon_factory(name=u'Extra Addôn 2') AddonReviewerFlags.objects.create( addon=extra_addon, pending_info_request=datetime.now() + timedelta(days=1)) # Pending addon with expired info request. addon1 = addon_factory(name=u'Pending Addön 1', status=amo.STATUS_NOMINATED) AddonReviewerFlags.objects.create( addon=addon1, pending_info_request=self.days_ago(2)) # Public addon with expired info request. addon2 = addon_factory(name=u'Public Addön 2', status=amo.STATUS_PUBLIC) AddonReviewerFlags.objects.create( addon=addon2, pending_info_request=self.days_ago(42)) # Deleted addon with expired info request. addon3 = addon_factory(name=u'Deleted Addön 3', status=amo.STATUS_DELETED) AddonReviewerFlags.objects.create( addon=addon3, pending_info_request=self.days_ago(42)) # Mozilla-disabled addon with expired info request. addon4 = addon_factory(name=u'Disabled Addön 4', status=amo.STATUS_DISABLED) AddonReviewerFlags.objects.create( addon=addon4, pending_info_request=self.days_ago(42)) # Incomplete addon with expired info request. addon5 = addon_factory(name=u'Incomplete Addön 5', status=amo.STATUS_NULL) AddonReviewerFlags.objects.create( addon=addon5, pending_info_request=self.days_ago(42)) # Invisible (user-disabled) addon with expired info request. addon6 = addon_factory(name=u'Incomplete Addön 5', status=amo.STATUS_PUBLIC, disabled_by_user=True) AddonReviewerFlags.objects.create( addon=addon6, pending_info_request=self.days_ago(42)) self.expected_addons = [addon2, addon1] def test_results_no_permission(self): # Addon reviewer doesn't have access. response = self.client.get(self.url) assert response.status_code == 403 # Regular user doesn't have access. self.client.logout() assert self.client.login(email='regular@mozilla.com') response = self.client.get(self.url) assert response.status_code == 403 def test_results(self): self.grant_permission(self.user, 'Reviews:Admin') self.generate_files() self._test_results() class TestContentReviewQueue(QueueTest): def setUp(self): super(TestContentReviewQueue, self).setUp() self.url = reverse('reviewers.queue_content_review') self.channel_name = 'content' def login_with_permission(self): user = UserProfile.objects.get(email='reviewer@mozilla.com') self.grant_permission(user, 'Addons:ContentReview') self.client.login(email=user.email) return user def get_addon_latest_version(self, addon): """Method used by _test_results() to fetch the version that the queue is supposed to display. Overridden here because in our case, it's not necessarily the latest available version - we display the current public version instead (which is not guaranteed to be the latest auto-approved one, but good enough) for this page.""" return addon.current_version def generate_files(self): """Generate add-ons needed for these tests.""" # Has not been auto-approved. extra_addon = addon_factory(name=u'Extra Addôn 1') AutoApprovalSummary.objects.create( version=extra_addon.current_version, verdict=amo.NOT_AUTO_APPROVED, ) # Has not been auto-approved either, only dry run. extra_addon2 = addon_factory(name=u'Extra Addôn 2') AutoApprovalSummary.objects.create( version=extra_addon2.current_version, verdict=amo.WOULD_HAVE_BEEN_AUTO_APPROVED, ) # Has been auto-approved, but that content has been approved by # a human already. extra_addon3 = addon_factory(name=u'Extra Addôn 3') AutoApprovalSummary.objects.create( version=extra_addon3.current_version, verdict=amo.AUTO_APPROVED, confirmed=True) AddonApprovalsCounter.objects.create( addon=extra_addon3, last_content_review=self.days_ago(1)) # This one has never been content-reviewed, but it has the # needs_admin_content_review flag, and we're not an admin. extra_addon4 = addon_factory(name=u'Extra Addön 4') extra_addon4.update(created=self.days_ago(2)) AutoApprovalSummary.objects.create( version=extra_addon4.current_version, verdict=amo.AUTO_APPROVED, confirmed=True) AddonApprovalsCounter.objects.create( addon=extra_addon4, last_content_review=None) AddonReviewerFlags.objects.create( addon=extra_addon4, needs_admin_content_review=True) # This first add-on has been content reviewed so long ago that we # should do it again. addon1 = addon_factory(name=u'Addön 1') AutoApprovalSummary.objects.create( version=addon1.current_version, verdict=amo.AUTO_APPROVED, confirmed=True) AddonApprovalsCounter.objects.create( addon=addon1, last_content_review=self.days_ago(370)) # This one is quite similar, except its last content review is even # older.. addon2 = addon_factory(name=u'Addön 1') AutoApprovalSummary.objects.create( version=addon2.current_version, verdict=amo.AUTO_APPROVED, confirmed=True) AddonApprovalsCounter.objects.create( addon=addon2, last_content_review=self.days_ago(842)) # This one has never been content-reviewed. It has an # needs_admin_code_review flag, but that should not have any impact. addon3 = addon_factory(name=u'Addön 2') addon3.update(created=self.days_ago(2)) AutoApprovalSummary.objects.create( version=addon3.current_version, verdict=amo.AUTO_APPROVED, confirmed=True) AddonApprovalsCounter.objects.create( addon=addon3, last_content_review=None) AddonReviewerFlags.objects.create( addon=addon3, needs_admin_code_review=True) # This one has never been content reviewed either, and it does not even # have an AddonApprovalsCounter. addon4 = addon_factory(name=u'Addön 3') addon4.update(created=self.days_ago(1)) AutoApprovalSummary.objects.create( version=addon4.current_version, verdict=amo.AUTO_APPROVED, confirmed=True) assert not AddonApprovalsCounter.objects.filter(addon=addon4).exists() # Addons with no last_content_review date should be first, ordered by # their creation date, older first. self.expected_addons = [addon3, addon4, addon2, addon1] def test_only_viewable_with_specific_permission(self): # Regular addon reviewer does not have access. response = self.client.get(self.url) assert response.status_code == 403 # Regular user doesn't have access. self.client.logout() assert self.client.login(email='regular@mozilla.com') response = self.client.get(self.url) assert response.status_code == 403 def test_results(self): self.login_with_permission() self.generate_files() self._test_results() def test_queue_layout(self): self.login_with_permission() self.generate_files() self._test_queue_layout('Content Review', tab_position=2, total_addons=4, total_queues=3, per_page=1) def test_queue_layout_admin(self): # Admins should see the extra add-on that needs admin content review. user = self.login_with_permission() self.grant_permission(user, 'Reviews:Admin') self.generate_files() self._test_queue_layout('Content Review', tab_position=2, total_addons=5, total_queues=4) class TestPerformance(QueueTest): fixtures = ['base/users', 'base/addon_3615'] """Test the page at /reviewers/performance.""" def setUpReviewer(self): self.login_as_reviewer() core.set_user(UserProfile.objects.get(username='reviewer')) self.create_logs() def setUpAdmin(self): self.login_as_admin() core.set_user(UserProfile.objects.get(username='admin')) self.create_logs() def get_url(self, args=None): if args is None: args = [] return reverse('reviewers.performance', args=args) def create_logs(self): addon = Addon.objects.all()[0] version = addon.versions.all()[0] for i in amo.LOG_REVIEWER_REVIEW_ACTION: ActivityLog.create(amo.LOG_BY_ID[i], addon, version) # Throw in an automatic approval - should be ignored. ActivityLog.create( amo.LOG.APPROVE_VERSION, addon, version, user=UserProfile.objects.get(id=settings.TASK_USER_ID)) def _test_chart(self): r = self.client.get(self.get_url()) assert r.status_code == 200 doc = pq(r.content) num = len(amo.LOG_REVIEWER_REVIEW_ACTION) label = datetime.now().strftime('%Y-%m') data = {label: {u'teamcount': num, u'teamavg': u'%s.0' % num, u'usercount': num, u'teamamt': 1, u'label': datetime.now().strftime('%b %Y')}} assert json.loads(doc('#monthly').attr('data-chart')) == data def test_performance_chart_reviewer(self): self.setUpReviewer() self._test_chart() def test_performance_chart_as_admin(self): self.setUpAdmin() self._test_chart() def test_usercount_with_more_than_one_reviewer(self): self.client.login(email='clouserw@gmail.com') core.set_user(UserProfile.objects.get(username='clouserw')) self.create_logs() self.setUpReviewer() r = self.client.get(self.get_url()) assert r.status_code == 200 doc = pq(r.content) data = json.loads(doc('#monthly').attr('data-chart')) label = datetime.now().strftime('%Y-%m') assert data[label]['usercount'] == len(amo.LOG_REVIEWER_REVIEW_ACTION) def _test_performance_other_user_as_admin(self): userid = core.get_user().pk r = self.client.get(self.get_url([10482])) doc = pq(r.content) assert doc('#select_user').length == 1 # Let them choose reviewers. options = doc('#select_user option') assert options.length == 3 assert options.eq(2).val() == str(userid) assert 'clouserw' in doc('#reviews_user').text() def test_performance_other_user_as_admin(self): self.setUpAdmin() self._test_performance_other_user_as_admin() def test_performance_other_user_not_admin(self): self.setUpReviewer() r = self.client.get(self.get_url([10482])) doc = pq(r.content) assert doc('#select_user').length == 0 # Don't let them choose. assert doc('#reviews_user').text() == 'Your Reviews' class SearchTest(ReviewerTest): listed = True def setUp(self): super(SearchTest, self).setUp() self.user = UserProfile.objects.get(email='reviewer@mozilla.com') self.login_as_reviewer() if self.listed is False: # Testing unlisted views: needs Addons:ReviewUnlisted perm. self.grant_permission(self.user, 'Addons:ReviewUnlisted') def named_addons(self, request): return [ r.record.addon_name for r in request.context['page'].object_list] def search(self, *args, **kw): response = self.client.get(self.url, kw) assert response.status_code == 200 assert response.context['search_form'].errors.as_text() == '' return response class BaseTestQueueSearch(SearchTest): fixtures = ['base/users', 'base/appversion'] __test__ = False # this is an abstract test case def generate_files(self, subset=None): if subset is None: subset = [] files = OrderedDict([ ('Not Needing Admin Review', { 'version_str': '0.1', 'addon_status': amo.STATUS_NOMINATED, 'file_status': amo.STATUS_AWAITING_REVIEW, }), ('Another Not Needing Admin Review', { 'version_str': '0.1', 'addon_status': amo.STATUS_NOMINATED, 'file_status': amo.STATUS_AWAITING_REVIEW, }), ('Needs Admin Review', { 'version_str': '0.1', 'addon_status': amo.STATUS_NOMINATED, 'file_status': amo.STATUS_AWAITING_REVIEW, 'needs_admin_code_review': True, }), ('Justin Bieber Theme', { 'version_str': '0.1', 'addon_status': amo.STATUS_NOMINATED, 'file_status': amo.STATUS_AWAITING_REVIEW, 'type': amo.ADDON_THEME, }), ('Justin Bieber Search Bar', { 'version_str': '0.1', 'addon_status': amo.STATUS_NOMINATED, 'file_status': amo.STATUS_AWAITING_REVIEW, 'type': amo.ADDON_SEARCH, }), ('Bieber For Mobile', { 'version_str': '0.1', 'addon_status': amo.STATUS_NOMINATED, 'file_status': amo.STATUS_AWAITING_REVIEW, 'version_kw': {'application': amo.ANDROID.id}, }), ('Linux Widget', { 'version_str': '0.1', 'addon_status': amo.STATUS_NOMINATED, 'file_status': amo.STATUS_AWAITING_REVIEW, }), ('Mac Widget', { 'version_str': '0.1', 'addon_status': amo.STATUS_NOMINATED, 'file_status': amo.STATUS_AWAITING_REVIEW, }), ('Deleted', { 'version_str': '0.1', 'addon_status': amo.STATUS_DELETED, 'file_status': amo.STATUS_AWAITING_REVIEW, }), ]) results = {} channel = (amo.RELEASE_CHANNEL_LISTED if self.listed else amo.RELEASE_CHANNEL_UNLISTED) for name, attrs in files.iteritems(): if not subset or name in subset: version_kw = attrs.get('version_kw', {}) version_kw.update( {'channel': channel, 'version': attrs.pop('version_str')}) attrs['version_kw'] = version_kw file_kw = attrs.get('file_kw', {}) file_kw.update({'status': attrs.pop('file_status')}) attrs['file_kw'] = file_kw attrs.update({'version_kw': version_kw, 'file_kw': file_kw}) needs_admin_code_review = attrs.pop( 'needs_admin_code_review', None) results[name] = addon_factory( status=attrs.pop('addon_status'), name=name, **attrs) if needs_admin_code_review: AddonReviewerFlags.objects.create( addon=results[name], needs_admin_code_review=True) return results def generate_file(self, name): return self.generate_files([name])[name] def test_search_by_needs_admin_code_review_admin(self): self.login_as_admin() self.generate_files(['Not Needing Admin Review', 'Needs Admin Review']) response = self.search(needs_admin_code_review=1) assert response.status_code == 200 assert self.named_addons(response) == ['Needs Admin Review'] def test_queue_counts_admin(self): self.login_as_admin() self.generate_files(['Not Needing Admin Review', 'Needs Admin Review']) response = self.search(text_query='admin', per_page=1) assert response.status_code == 200 doc = pq(response.content) assert doc('.data-grid-top .num-results').text() == ( u'Results 1\u20131 of 2') def test_search_by_addon_name_admin(self): self.login_as_admin() self.generate_files(['Not Needing Admin Review', 'Needs Admin Review', 'Justin Bieber Theme']) response = self.search(text_query='admin') assert response.status_code == 200 assert sorted(self.named_addons(response)) == [ 'Needs Admin Review', 'Not Needing Admin Review'] def test_not_searching(self, **kwargs): self.generate_files(['Not Needing Admin Review', 'Needs Admin Review']) response = self.search(**kwargs) assert response.status_code == 200 assert sorted(self.named_addons(response)) == [ 'Not Needing Admin Review'] # We were just displaying the queue, not searching, but the searching # hidden input in the form should always be set to True regardless, it # will be used once the user submits the form. doc = pq(response.content) assert doc('#id_searching').attr('value') == 'True' def test_not_searching_with_param(self): self.test_not_searching(some_param=1) def test_search_by_nothing(self): self.generate_files(['Not Needing Admin Review', 'Needs Admin Review']) response = self.search(searching='True') assert response.status_code == 200 assert sorted(self.named_addons(response)) == ( ['Needs Admin Review', 'Not Needing Admin Review']) def test_search_by_needs_admin_code_review(self): self.generate_files(['Not Needing Admin Review', 'Needs Admin Review']) response = self.search(needs_admin_code_review=1, searching='True') assert response.status_code == 200 assert self.named_addons(response) == ['Needs Admin Review'] def test_queue_counts(self): self.generate_files(['Not Needing Admin Review', 'Another Not Needing Admin Review', 'Needs Admin Review']) response = self.search( text_query='admin', per_page=1, searching='True') assert response.status_code == 200 doc = pq(response.content) assert doc('.data-grid-top .num-results').text() == ( u'Results 1\u20131 of 3') def test_search_by_addon_name(self): self.generate_files(['Not Needing Admin Review', 'Needs Admin Review', 'Justin Bieber Theme']) response = self.search(text_query='admin', searching='True') assert response.status_code == 200 assert sorted(self.named_addons(response)) == ( ['Needs Admin Review', 'Not Needing Admin Review']) def test_search_by_addon_in_locale(self): name = 'Not Needing Admin Review' generated = self.generate_file(name) uni = 'フォクすけといっしょ'.decode('utf8') addon = Addon.objects.get(pk=generated.id) addon.name = {'ja': uni} addon.save() self.url = self.url.replace('/en-US/', '/ja/') response = self.client.get(self.url, {'text_query': uni}, follow=True) assert response.status_code == 200 assert self.named_addons(response) == [name] def test_search_by_addon_author(self): name = 'Not Needing Admin Review' generated = self.generate_file(name) user = UserProfile.objects.all()[0] email = user.email.swapcase() author = AddonUser.objects.create(user=user, addon=generated) for role in [amo.AUTHOR_ROLE_OWNER, amo.AUTHOR_ROLE_DEV]: author.role = role author.save() response = self.search(text_query=email) assert response.status_code == 200 assert self.named_addons(response) == [name] def test_search_by_supported_email_in_locale(self): name = 'Not Needing Admin Review' generated = self.generate_file(name) uni = 'フォクすけといっしょ@site.co.jp'.decode('utf8') addon = Addon.objects.get(pk=generated.id) addon.support_email = {'ja': uni} addon.save() self.url = self.url.replace('/en-US/', '/ja/') response = self.client.get(self.url, {'text_query': uni}, follow=True) assert response.status_code == 200 assert self.named_addons(response) == [name] def test_clear_search_visible(self): response = self.search(text_query='admin', searching=True) assert response.status_code == 200 assert pq(response.content)( '.clear-queue-search').text() == 'clear search' def test_clear_search_hidden(self): response = self.search(text_query='admin') assert response.status_code == 200 assert not pq(response.content)('.clear-queue-search').text() class TestQueueSearch(BaseTestQueueSearch): __test__ = True def setUp(self): super(TestQueueSearch, self).setUp() self.url = reverse('reviewers.queue_nominated') def test_search_by_addon_type(self): self.generate_files(['Not Needing Admin Review', 'Justin Bieber Theme', 'Justin Bieber Search Bar']) response = self.search(addon_type_ids=[amo.ADDON_THEME]) assert response.status_code == 200 assert self.named_addons(response) == ['Justin Bieber Theme'] def test_search_by_addon_type_any(self): self.generate_file('Not Needing Admin Review') response = self.search(addon_type_ids=[amo.ADDON_ANY]) assert response.status_code == 200 assert self.named_addons(response), 'Expected some add-ons' def test_search_by_many_addon_types(self): self.generate_files(['Not Needing Admin Review', 'Justin Bieber Theme', 'Justin Bieber Search Bar']) response = self.search(addon_type_ids=[amo.ADDON_THEME, amo.ADDON_SEARCH]) assert response.status_code == 200 assert sorted(self.named_addons(response)) == ( ['Justin Bieber Search Bar', 'Justin Bieber Theme']) def test_search_by_app(self): self.generate_files(['Bieber For Mobile', 'Linux Widget']) response = self.search(application_id=[amo.ANDROID.id]) assert response.status_code == 200 assert self.named_addons(response) == ['Bieber For Mobile'] def test_preserve_multi_apps(self): self.generate_files(['Bieber For Mobile', 'Linux Widget']) channel = (amo.RELEASE_CHANNEL_LISTED if self.listed else amo.RELEASE_CHANNEL_UNLISTED) multi = addon_factory( status=amo.STATUS_NOMINATED, name='Multi Application', version_kw={'channel': channel, 'application': amo.FIREFOX.id}, file_kw={'status': amo.STATUS_AWAITING_REVIEW}) av_min, _ = AppVersion.objects.get_or_create( application=amo.ANDROID.id, version='4.0.99') av_max, _ = AppVersion.objects.get_or_create( application=amo.ANDROID.id, version='5.0.0') ApplicationsVersions.objects.get_or_create( application=amo.ANDROID.id, version=multi.versions.latest(), min=av_min, max=av_max) response = self.search(application_id=[amo.ANDROID.id]) assert response.status_code == 200 assert self.named_addons(response) == [ 'Bieber For Mobile', 'Multi Application'] def test_clear_search_uses_correct_queue(self): # The "clear search" link points to the right listed or unlisted queue. # Listed queue. url = reverse('reviewers.queue_nominated') response = self.client.get( url, {'text_query': 'admin', 'searching': True}) assert response.status_code == 200 doc = pq(response.content) assert doc('.clear-queue-search').attr('href') == url class TestQueueSearchUnlistedAllList(BaseTestQueueSearch): listed = False __test__ = True def setUp(self): super(TestQueueSearchUnlistedAllList, self).setUp() self.url = reverse('reviewers.unlisted_queue_all') def test_search_deleted(self): self.generate_files(['Not Needing Admin Review', 'Deleted']) r = self.search(deleted=1) assert self.named_addons(r) == ['Deleted'] def test_search_not_deleted(self): self.generate_files(['Not Needing Admin Review', 'Deleted']) response = self.search(deleted=0) assert response.status_code == 200 assert self.named_addons(response) == ['Not Needing Admin Review'] def test_search_by_guid(self): name = 'Not Needing Admin Review' addon = self.generate_file(name) addon.update(guid='@guidymcguid') response = self.search(text_query='mcguid') assert response.status_code == 200 assert self.named_addons(response) == ['Not Needing Admin Review'] class ReviewBase(QueueTest): def setUp(self): super(QueueTest, self).setUp() self.login_as_reviewer() self.addons = {} self.addon = self.generate_file('Public') self.version = self.addon.current_version self.file = self.version.files.get() self.reviewer = UserProfile.objects.get(username='reviewer') self.reviewer.update(display_name=u'A Reviêwer') self.url = reverse('reviewers.review', args=[self.addon.slug]) AddonUser.objects.create(addon=self.addon, user_id=999) def get_addon(self): return Addon.objects.get(pk=self.addon.pk) def get_dict(self, **kw): data = {'operating_systems': 'win', 'applications': 'something', 'comments': 'something'} data.update(kw) return data class TestReview(ReviewBase): def test_reviewer_required(self): assert self.client.head(self.url).status_code == 200 def test_not_anonymous(self): self.client.logout() self.assertLoginRedirects(self.client.head(self.url), to=self.url) @patch.object(settings, 'ALLOW_SELF_REVIEWS', False) def test_not_author(self): AddonUser.objects.create(addon=self.addon, user=self.reviewer) assert self.client.head(self.url).status_code == 302 def test_review_unlisted_while_a_listed_version_is_awaiting_review(self): self.make_addon_unlisted(self.addon) version_factory( addon=self.addon, channel=amo.RELEASE_CHANNEL_LISTED, file_kw={'status': amo.STATUS_AWAITING_REVIEW}) self.addon.update(status=amo.STATUS_NOMINATED, slug='awaiting') self.url = reverse( 'reviewers.review', args=('unlisted', self.addon.slug)) self.grant_permission(self.reviewer, 'Addons:ReviewUnlisted') assert self.client.get(self.url).status_code == 200 def test_needs_unlisted_reviewer_for_only_unlisted(self): self.addon.versions.update(channel=amo.RELEASE_CHANNEL_UNLISTED) assert self.client.head(self.url).status_code == 404 self.grant_permission(self.reviewer, 'Addons:ReviewUnlisted') assert self.client.head(self.url).status_code == 200 def test_dont_need_unlisted_reviewer_for_mixed_channels(self): version_factory( addon=self.addon, channel=amo.RELEASE_CHANNEL_UNLISTED, version='9.9') assert self.addon.find_latest_version( channel=amo.RELEASE_CHANNEL_UNLISTED) assert self.addon.current_version.channel == amo.RELEASE_CHANNEL_LISTED assert self.client.head(self.url).status_code == 200 self.grant_permission(self.reviewer, 'Addons:ReviewUnlisted') assert self.client.head(self.url).status_code == 200 def test_not_flags(self): self.addon.current_version.files.update(is_restart_required=False) response = self.client.get(self.url) assert response.status_code == 200 assert len(response.context['flags']) == 0 def test_flag_needs_admin_code_review(self): self.addon.current_version.files.update(is_restart_required=False) AddonReviewerFlags.objects.create( addon=self.addon, needs_admin_code_review=True) response = self.client.get(self.url) assert response.status_code == 200 assert len(response.context['flags']) == 1 def test_info_comments_requested(self): response = self.client.post(self.url, {'action': 'reply'}) assert response.context['form'].errors['comments'][0] == ( 'This field is required.') def test_whiteboard_url(self): # Listed review. response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert ( doc('#whiteboard_form').attr('action') == '/en-US/reviewers/whiteboard/listed/public') # Content review. self.grant_permission(self.reviewer, 'Addons:ContentReview') AutoApprovalSummary.objects.create( version=self.addon.current_version, verdict=amo.AUTO_APPROVED) self.url = reverse( 'reviewers.review', args=['content', self.addon.slug]) response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert ( doc('#whiteboard_form').attr('action') == '/en-US/reviewers/whiteboard/content/public') # Unlisted review. self.grant_permission(self.reviewer, 'Addons:ReviewUnlisted') version_factory(addon=self.addon, channel=amo.RELEASE_CHANNEL_UNLISTED) self.url = reverse( 'reviewers.review', args=['unlisted', self.addon.slug]) response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert ( doc('#whiteboard_form').attr('action') == '/en-US/reviewers/whiteboard/unlisted/public') # Listed review, but deleted. self.addon.delete() self.url = reverse( 'reviewers.review', args=['listed', self.addon.pk]) response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert ( doc('#whiteboard_form').attr('action') == '/en-US/reviewers/whiteboard/listed/%d' % self.addon.pk) def test_no_whiteboards_for_static_themes(self): self.grant_permission(self.reviewer, 'Addons:ThemeReview') self.addon.update(type=amo.ADDON_STATICTHEME) response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert not doc('#whiteboard_form') def test_comment(self): response = self.client.post(self.url, {'action': 'comment', 'comments': 'hello sailor'}) assert response.status_code == 302 assert len(mail.outbox) == 0 comment_version = amo.LOG.COMMENT_VERSION assert ActivityLog.objects.filter( action=comment_version.id).count() == 1 def test_info_requested(self): response = self.client.post(self.url, {'action': 'reply', 'comments': 'hello sailor'}) assert response.status_code == 302 assert len(mail.outbox) == 1 self.assertTemplateUsed(response, 'activity/emails/from_reviewer.txt') def test_super_review_requested(self): response = self.client.post(self.url, {'action': 'super', 'comments': 'hello sailor'}) assert response.status_code == 302 def test_info_requested_canned_response(self): response = self.client.post(self.url, {'action': 'reply', 'comments': 'hello sailor', 'canned_response': 'foo'}) assert response.status_code == 302 assert len(mail.outbox) == 1 self.assertTemplateUsed(response, 'activity/emails/from_reviewer.txt') def test_page_title(self): response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert doc('title').text() == ( '%s :: Reviewer Tools :: Add-ons for Firefox' % self.addon.name) def test_files_shown(self): response = self.client.get(self.url) assert response.status_code == 200 items = pq(response.content)('#review-files .files .file-info') assert items.length == 1 f = self.version.all_files[0] expected = [ ('All Platforms', f.get_url_path('reviewer')), ('Validation', reverse('devhub.file_validation', args=[self.addon.slug, f.id])), ('Contents', None), ] check_links(expected, items.find('a'), verify=False) def test_item_history(self, channel=amo.RELEASE_CHANNEL_LISTED): self.addons['something'] = addon_factory( status=amo.STATUS_PUBLIC, name=u'something', version_kw={'version': u'0.2', 'channel': channel}, file_kw={'status': amo.STATUS_AWAITING_REVIEW}) assert self.addon.versions.filter(channel=channel).count() == 1 self.review_version(self.version, self.url) v2 = self.addons['something'].versions.all()[0] v2.addon = self.addon v2.created = v2.created + timedelta(days=1) v2.save() assert self.addon.versions.filter(channel=channel).count() == 2 action = self.review_version(v2, self.url) response = self.client.get(self.url) assert response.status_code == 200 # The 2 following lines replace pq(res.content), it's a workaround for # https://github.com/gawel/pyquery/issues/31 UTF8_PARSER = HTMLParser(encoding='utf-8') doc = pq(fromstring(response.content, parser=UTF8_PARSER)) table = doc('#review-files') # Check the history for both versions. ths = table.children('tr > th') assert ths.length == 2 assert '0.1' in ths.eq(0).text() assert '0.2' in ths.eq(1).text() rows = table('td.files') assert rows.length == 2 comments = rows.siblings('td') assert comments.length == 2 for idx in xrange(comments.length): td = comments.eq(idx) assert td.find('.history-comment').text() == 'something' assert td.find('th').text() == { 'public': 'Approved', 'reply': 'Reviewer Reply'}[action] reviewer_name = td.find('td a').text() assert ((reviewer_name == self.reviewer.display_name) or (reviewer_name == self.other_reviewer.display_name)) def test_item_history_with_unlisted_versions_too(self): # Throw in an unlisted version to be ignored. version_factory( version=u'0.2', addon=self.addon, channel=amo.RELEASE_CHANNEL_UNLISTED, file_kw={'status': amo.STATUS_PUBLIC}) self.test_item_history() def test_item_history_with_unlisted_review_page(self): self.addon.versions.update(channel=amo.RELEASE_CHANNEL_UNLISTED) self.version.reload() # Throw in an listed version to be ignored. version_factory( version=u'0.2', addon=self.addon, channel=amo.RELEASE_CHANNEL_LISTED, file_kw={'status': amo.STATUS_PUBLIC}) self.url = reverse('reviewers.review', args=[ 'unlisted', self.addon.slug]) self.grant_permission(self.reviewer, 'Addons:ReviewUnlisted') self.test_item_history(channel=amo.RELEASE_CHANNEL_UNLISTED) def generate_deleted_versions(self): self.addon = addon_factory(version_kw={ 'version': '1.0', 'created': self.days_ago(1)}) self.url = reverse('reviewers.review', args=[self.addon.slug]) versions = ({'version': '0.1', 'action': 'comment', 'comments': 'millenium hand and shrimp'}, {'version': '0.1', 'action': 'public', 'comments': 'buggrit'}, {'version': '0.2', 'action': 'comment', 'comments': 'I told em'}, {'version': '0.3'}) for i, version_data in enumerate(versions): version = version_factory( addon=self.addon, version=version_data['version'], created=self.days_ago(-i), file_kw={'status': amo.STATUS_AWAITING_REVIEW}) if 'action' in version_data: data = {'action': version_data['action'], 'operating_systems': 'win', 'applications': 'something', 'comments': version_data['comments']} self.client.post(self.url, data) version.delete(hard=True) self.addon.current_version.delete(hard=True) @patch('olympia.reviewers.utils.sign_file') def test_item_history_deleted(self, mock_sign): self.generate_deleted_versions() response = self.client.get(self.url) assert response.status_code == 200 table = pq(response.content)('#review-files') # Check the history for all versions. ths = table.children('tr > th') assert ths.length == 3 # The 2 with the same number will be coalesced. assert '0.1' in ths.eq(0).text() assert '0.2' in ths.eq(1).text() assert '0.3' in ths.eq(2).text() for idx in xrange(2): assert 'Deleted' in ths.eq(idx).text() bodies = table.children('.listing-body') assert 'millenium hand and shrimp' in bodies.eq(0).text() assert 'buggrit' in bodies.eq(0).text() assert 'I told em' in bodies.eq(1).text() assert mock_sign.called def test_item_history_compat_ordered(self): """ Make sure that apps in compatibility are ordered. """ av = AppVersion.objects.all()[0] v = self.addon.versions.all()[0] ApplicationsVersions.objects.create( version=v, application=amo.THUNDERBIRD.id, min=av, max=av) ApplicationsVersions.objects.create( version=v, application=amo.SEAMONKEY.id, min=av, max=av) assert self.addon.versions.count() == 1 url = reverse('reviewers.review', args=[self.addon.slug]) response = self.client.get(url) assert response.status_code == 200 doc = pq(response.content) icons = doc('.listing-body .app-icon') assert icons.eq(0).attr('title') == "Firefox" assert icons.eq(1).attr('title') == "SeaMonkey" assert icons.eq(2).attr('title') == "Thunderbird" def test_item_history_weight(self): """ Make sure the weight is shown on the review page""" AutoApprovalSummary.objects.create( version=self.version, verdict=amo.AUTO_APPROVED, weight=284) self.grant_permission(self.reviewer, 'Addons:PostReview') url = reverse('reviewers.review', args=[self.addon.slug]) response = self.client.get(url) assert response.status_code == 200 doc = pq(response.content) risk = doc('.listing-body .file-weight') assert risk.text() == "Weight: 284" def test_item_history_notes(self): version = self.addon.versions.all()[0] version.releasenotes = 'hi' version.approvalnotes = 'secret hi' version.save() response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content)('#review-files') version = doc('.activity_version') assert version.length == 1 assert version.text() == 'hi' approval = doc('.activity_approval') assert approval.length == 1 assert approval.text() == 'secret hi' def test_item_history_header(self): response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert ('Approved' in doc('#review-files .listing-header .light').text()) def test_item_history_comment(self): # Add Comment. self.client.post(self.url, {'action': 'comment', 'comments': 'hello sailor'}) response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content)('#review-files') assert doc('th').eq(1).text() == 'Commented' assert doc('.history-comment').text() == 'hello sailor' def test_files_in_item_history(self): data = {'action': 'public', 'operating_systems': 'win', 'applications': 'something', 'comments': 'something'} self.client.post(self.url, data) response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) items = doc('#review-files .files .file-info') assert items.length == 1 assert items.find('a.reviewers-install').text() == 'All Platforms' def test_no_items(self): response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert doc('#review-files .no-activity').length == 1 def test_action_links(self): response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) expected = [ ('View Listing', self.addon.get_url_path()), ] check_links(expected, doc('#actions-addon a'), verify=False) def test_action_links_as_admin(self): self.login_as_admin() response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) expected = [ ('View Listing', self.addon.get_url_path()), ('Edit', self.addon.get_dev_url()), ('Admin Page', reverse('zadmin.addon_manage', args=[self.addon.id])), ] check_links(expected, doc('#actions-addon a'), verify=False) def test_unlisted_addon_action_links_as_admin(self): """No "View Listing" link for unlisted addons, "edit"/"manage" links for the admins.""" self.make_addon_unlisted(self.addon) self.login_as_admin() response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) expected = [ ('Unlisted Review Page', reverse( 'reviewers.review', args=('unlisted', self.addon.slug))), ('Edit', self.addon.get_dev_url()), ('Admin Page', reverse( 'zadmin.addon_manage', args=[self.addon.id])), ] check_links(expected, doc('#actions-addon a'), verify=False) def test_mixed_channels_action_links_as_admin(self): self.make_addon_unlisted(self.addon) version_factory( addon=self.addon, channel=amo.RELEASE_CHANNEL_LISTED, file_kw={'status': amo.STATUS_AWAITING_REVIEW}) self.addon.update(status=amo.STATUS_NOMINATED) self.login_as_admin() response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) expected = [ ('View Listing', self.addon.get_url_path()), ('Unlisted Review Page', reverse( 'reviewers.review', args=('unlisted', self.addon.slug))), ('Edit', self.addon.get_dev_url()), ('Admin Page', reverse( 'zadmin.addon_manage', args=[self.addon.id])), ] check_links(expected, doc('#actions-addon a'), verify=False) def test_mixed_channels_action_links_as_admin_on_unlisted_review(self): self.make_addon_unlisted(self.addon) version_factory( addon=self.addon, channel=amo.RELEASE_CHANNEL_LISTED, file_kw={'status': amo.STATUS_AWAITING_REVIEW}) self.addon.update(status=amo.STATUS_NOMINATED) self.login_as_admin() self.url = reverse( 'reviewers.review', args=('unlisted', self.addon.slug)) response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) expected = [ ('View Listing', self.addon.get_url_path()), ('Listed Review Page', reverse('reviewers.review', args=(self.addon.slug,))), ('Edit', self.addon.get_dev_url()), ('Admin Page', reverse('zadmin.addon_manage', args=[self.addon.id])), ] check_links(expected, doc('#actions-addon a'), verify=False) def test_mixed_channels_action_links_as_regular_reviewer(self): self.make_addon_unlisted(self.addon) version_factory( addon=self.addon, channel=amo.RELEASE_CHANNEL_LISTED, file_kw={'status': amo.STATUS_AWAITING_REVIEW}) self.addon.update(status=amo.STATUS_NOMINATED) self.login_as_reviewer() response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) expected = [ ('View Listing', self.addon.get_url_path()), ] check_links(expected, doc('#actions-addon a'), verify=False) def test_admin_links_as_non_admin(self): self.login_as_reviewer() response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) admin = doc('#actions-addon li') assert admin.length == 1 def test_extra_actions_subscribe_checked_state(self): self.login_as_reviewer() response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) subscribe_input = doc('#notify_new_listed_versions')[0] assert 'checked' not in subscribe_input.attrib ReviewerSubscription.objects.create( addon=self.addon, user=self.reviewer) response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) subscribe_input = doc('#notify_new_listed_versions')[0] assert subscribe_input.attrib['checked'] == 'checked' def test_extra_actions_token(self): self.login_as_reviewer() self.client.cookies[API_TOKEN_COOKIE] = 'youdidntsaythemagicword' response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) token = doc('#extra-review-actions').attr('data-api-token') assert token == 'youdidntsaythemagicword' def test_extra_actions_not_for_reviewers(self): self.login_as_reviewer() response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert not doc('#force_disable_addon') assert not doc('#force_enable_addon') assert not doc('#clear_admin_code_review') assert not doc('#clear_admin_content_review') assert not doc('#clear_admin_theme_review') assert not doc('#disable_auto_approval') assert not doc('#enable_auto_approval') assert not doc('#clear_pending_info_request') def test_extra_actions_admin_disable_enable(self): self.login_as_admin() response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert doc('#force_disable_addon') elem = doc('#force_disable_addon')[0] assert 'hidden' not in elem.getparent().attrib.get('class', '') assert doc('#force_enable_addon') elem = doc('#force_enable_addon')[0] assert 'hidden' in elem.getparent().attrib.get('class', '') def test_unflag_option_forflagged_as_admin(self): self.login_as_admin() AddonReviewerFlags.objects.create( addon=self.addon, needs_admin_code_review=True) response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert doc('#clear_admin_code_review').length == 1 assert doc('#clear_admin_content_review').length == 0 assert doc('#clear_admin_content_review').length == 0 def test_unflag_content_option_forflagged_as_admin(self): self.login_as_admin() AddonReviewerFlags.objects.create( addon=self.addon, needs_admin_code_review=False, needs_admin_content_review=True) response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert doc('#clear_admin_code_review').length == 0 assert doc('#clear_admin_content_review').length == 1 assert doc('#clear_admin_theme_review').length == 0 def test_unflag_theme_option_forflagged_as_admin(self): self.login_as_admin() AddonReviewerFlags.objects.create( addon=self.addon, needs_admin_code_review=False, needs_admin_content_review=False, needs_admin_theme_review=True) response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert doc('#clear_admin_code_review').length == 0 assert doc('#clear_admin_content_review').length == 0 assert doc('#clear_admin_theme_review').length == 1 def test_disable_auto_approvals_as_admin(self): self.login_as_admin() response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert doc('#disable_auto_approval') elem = doc('#disable_auto_approval')[0] assert 'hidden' not in elem.getparent().attrib.get('class', '') assert doc('#enable_auto_approval') elem = doc('#enable_auto_approval')[0] assert 'hidden' in elem.getparent().attrib.get('class', '') # Both of them should be absent on static themes, which are not # auto-approved. self.addon.update(type=amo.ADDON_STATICTHEME) response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert not doc('#disable_auto_approval') assert not doc('#enable_auto_approval') def test_enable_auto_approvals_as_admin_auto_approvals_disabled(self): self.login_as_admin() AddonReviewerFlags.objects.create( addon=self.addon, auto_approval_disabled=True) response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert doc('#disable_auto_approval') elem = doc('#disable_auto_approval')[0] assert 'hidden' in elem.getparent().attrib.get('class', '') assert doc('#enable_auto_approval') elem = doc('#enable_auto_approval')[0] assert 'hidden' not in elem.getparent().attrib.get('class', '') # Both of them should be absent on static themes, which are not # auto-approved. self.addon.update(type=amo.ADDON_STATICTHEME) response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert not doc('#disable_auto_approval') assert not doc('#enable_auto_approval') def test_clear_pending_info_request_as_admin(self): self.login_as_admin() response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert not doc('#clear_pending_info_request') AddonReviewerFlags.objects.create( addon=self.addon, pending_info_request=self.days_ago(1)) response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert doc('#clear_pending_info_request') def test_info_request_checkbox(self): self.login_as_reviewer() assert not self.addon.pending_info_request response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert 'checked' not in doc('#id_info_request')[0].attrib elm = doc('#id_info_request_deadline')[0] assert elm.attrib['readonly'] == 'readonly' assert elm.attrib['min'] == '7' assert elm.attrib['max'] == '7' assert elm.attrib['value'] == '7' AddonReviewerFlags.objects.create( addon=self.addon, pending_info_request=datetime.now() + timedelta(days=7)) response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert doc('#id_info_request')[0].attrib['checked'] == 'checked' def test_info_request_checkbox_admin(self): self.login_as_admin() assert not self.addon.pending_info_request response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert 'checked' not in doc('#id_info_request')[0].attrib elm = doc('#id_info_request_deadline')[0] assert 'readonly' not in elm.attrib assert elm.attrib['min'] == '1' assert elm.attrib['max'] == '99' assert elm.attrib['value'] == '7' def test_no_public(self): has_public = self.version.files.filter( status=amo.STATUS_PUBLIC).exists() assert has_public response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) validation = doc.find('.files') assert validation.find('a').eq(1).text() == "Validation" assert validation.find('a').eq(2).text() == "Contents" assert validation.find('a').length == 3 def test_public_search(self): self.version.files.update(status=amo.STATUS_PUBLIC) self.addon.update(type=amo.ADDON_SEARCH) response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert doc('#review-files .files ul .file-info').length == 1 def test_version_deletion(self): """ Make sure that we still show review history for deleted versions. """ # Add a new version to the add-on. addon = addon_factory( status=amo.STATUS_NOMINATED, name='something', version_kw={'version': '0.2'}, file_kw={'status': amo.STATUS_AWAITING_REVIEW}) assert self.addon.versions.count() == 1 self.review_version(self.version, self.url) v2 = addon.versions.all()[0] v2.addon = self.addon v2.created = v2.created + timedelta(days=1) v2.save() self.review_version(v2, self.url) assert self.addon.versions.count() == 2 response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) # View the history verify two versions: ths = doc('table#review-files > tr > th:first-child') assert '0.1' in ths.eq(0).text() assert '0.2' in ths.eq(1).text() # Delete a version: v2.delete() # Verify two versions, one deleted: response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) ths = doc('table#review-files > tr > th:first-child') assert ths.length == 2 assert '0.1' in ths.text() def test_no_versions(self): """The review page should still load if there are no versions. But not unless you have unlisted permissions.""" assert self.client.get(self.url).status_code == 200 response = self.client.post(self.url, {'action': 'comment', 'comments': 'hello sailor'}) assert response.status_code == 302 self.assert3xx(response, reverse('reviewers.queue_pending'), status_code=302) self.version.delete() # Regular reviewer has no permission, gets a 404. assert self.client.get(self.url).status_code == 404 # Reviewer with more powers can look. self.grant_permission(self.reviewer, 'Addons:ReviewUnlisted') assert self.client.get(self.url).status_code == 200 response = self.client.post(self.url, {'action': 'comment', 'comments': 'hello sailor'}) assert response.status_code == 302 self.assert3xx(response, reverse('reviewers.queue_pending'), status_code=302) def test_addon_deleted(self): """The review page should still load for deleted addons.""" self.addon.delete() self.url = reverse('reviewers.review', args=[self.addon.pk]) assert self.client.get(self.url).status_code == 200 response = self.client.post(self.url, {'action': 'comment', 'comments': 'hello sailor'}) assert response.status_code == 302 self.assert3xx(response, reverse('reviewers.queue_pending'), status_code=302) @patch('olympia.reviewers.utils.sign_file') def review_version(self, version, url, mock_sign): if version.channel == amo.RELEASE_CHANNEL_LISTED: version.files.all()[0].update(status=amo.STATUS_AWAITING_REVIEW) action = 'public' else: action = 'reply' data = { 'action': action, 'operating_systems': 'win', 'applications': 'something', 'comments': 'something', } self.client.post(url, data) if version.channel == amo.RELEASE_CHANNEL_LISTED: assert mock_sign.called return action def test_dependencies_listed(self): AddonDependency.objects.create(addon=self.addon, dependent_addon=self.addon) response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) deps = doc('.addon-info .addon-dependencies') assert deps.length == 1 assert deps.find('li').length == 1 assert deps.find('a').attr('href') == self.addon.get_url_path() def test_eula_displayed(self): assert not bool(self.addon.eula) response = self.client.get(self.url) assert response.status_code == 200 self.assertNotContains(response, 'View End-User License Agreement') self.addon.eula = 'Test!' self.addon.save() assert bool(self.addon.eula) response = self.client.get(self.url) assert response.status_code == 200 self.assertContains(response, 'View End-User License Agreement') def test_privacy_policy_displayed(self): assert self.addon.privacy_policy is None response = self.client.get(self.url) assert response.status_code == 200 self.assertNotContains(response, 'View Privacy Policy') self.addon.privacy_policy = 'Test!' self.addon.save() response = self.client.get(self.url) assert response.status_code == 200 self.assertContains(response, 'View Privacy Policy') def test_requires_payment_indicator(self): assert not self.addon.requires_payment response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert 'No' in doc('tr.requires-payment td').text() self.addon.update(requires_payment=True) response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert 'Yes' in doc('tr.requires-payment td').text() def test_viewing(self): url = reverse('reviewers.review_viewing') response = self.client.post(url, {'addon_id': self.addon.id}) data = json.loads(response.content) assert data['current'] == self.reviewer.id assert data['current_name'] == self.reviewer.name assert data['is_user'] == 1 # Now, login as someone else and test. self.login_as_admin() response = self.client.post(url, {'addon_id': self.addon.id}) data = json.loads(response.content) assert data['current'] == self.reviewer.id assert data['current_name'] == self.reviewer.name assert data['is_user'] == 0 # Lets just override this to make the test a bit shorter. @mock.patch.object(amo, 'REVIEWER_REVIEW_LOCK_LIMIT', 1) def test_viewing_lock_limit(self): url = reverse('reviewers.review_viewing') response = self.client.post(url, {'addon_id': 1234}) data = json.loads(response.content) assert data['current'] == self.reviewer.id assert data['current_name'] == self.reviewer.name assert data['is_user'] == 1 # Second review page is over the limit. response = self.client.post(url, {'addon_id': 5678}) data = json.loads(response.content) assert data['current'] == settings.TASK_USER_ID # Mozilla's task ID. assert data['current_name'] == 'Review lock limit reached' assert data['is_user'] == 2 # Now, login as someone else and test. First page is blocked. self.login_as_admin() response = self.client.post(url, {'addon_id': 1234}) data = json.loads(response.content) assert data['current'] == self.reviewer.id assert data['current_name'] == self.reviewer.name assert data['is_user'] == 0 # Second page is available. response = self.client.post(url, {'addon_id': 5678}) data = json.loads(response.content) admin = UserProfile.objects.get(username='admin') assert data['current'] == admin.id assert data['current_name'] == admin.name assert data['is_user'] == 1 # Lets just override this to make the test a bit shorter. @mock.patch.object(amo, 'REVIEWER_REVIEW_LOCK_LIMIT', 1) def test_viewing_lock_admin(self): self.login_as_admin() url = reverse('reviewers.review_viewing') admin = UserProfile.objects.get(username='admin') response = self.client.post(url, {'addon_id': 101}) data = json.loads(response.content) assert data['current'] == admin.id assert data['current_name'] == admin.name assert data['is_user'] == 1 # Admin don't have time for no limits. response = self.client.post(url, {'addon_id': 202}) data = json.loads(response.content) assert data['current'] == admin.id assert data['current_name'] == admin.name assert data['is_user'] == 1 def test_viewing_review_unlocks(self): reviewing_url = reverse('reviewers.review_viewing') self.client.post(reviewing_url, {'addon_id': self.addon.id}) key = '%s:review_viewing:%s' % (settings.CACHE_PREFIX, self.addon.id) assert cache.get(key) == self.reviewer.id self.client.post(self.url, {'action': 'comment', 'comments': 'hello sailor'}) # Processing a review should instantly clear the review lock on it. assert cache.get(key) is None def test_viewing_queue(self): response = self.client.post(reverse('reviewers.review_viewing'), {'addon_id': self.addon.id}) data = json.loads(response.content) assert data['current'] == self.reviewer.id assert data['current_name'] == self.reviewer.name assert data['is_user'] == 1 # Now, login as someone else and test. self.login_as_admin() r = self.client.post(reverse('reviewers.queue_viewing'), {'addon_ids': self.addon.id}) data = json.loads(r.content) assert data[str(self.addon.id)] == self.reviewer.display_name def test_display_same_files_only_once(self): """ Test whether identical files for different platforms show up as one link with the appropriate text. """ version = version_factory( addon=self.addon, version='0.2', file_kw=False) file_mac = file_factory(version=version, platform=amo.PLATFORM_MAC.id) file_android = file_factory( version=version, platform=amo.PLATFORM_ANDROID.id) # Signing causes the same uploaded file to be different file_mac.update(hash='xyz789', original_hash='123abc') file_android.update(hash='zyx987', original_hash='123abc') response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) text = doc('.reviewers-install').eq(1).text() assert text == "Mac OS X / Android" def test_compare_no_link(self): response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) info = doc('#review-files .file-info') assert info.length == 1 assert info.find('a.compare').length == 0 def test_file_info_for_static_themes(self): self.grant_permission(self.reviewer, 'Addons:ThemeReview') self.addon.update(type=amo.ADDON_STATICTHEME) response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) info = doc('#review-files .file-info') assert info.length == 1 # Only the download/install link assert info.find('a').length == 1 assert info.find('a')[0].text == u'Download' assert 'Compatibility' not in response.content def test_compare_link(self): first_file = self.addon.current_version.files.all()[0] first_file.update(status=amo.STATUS_PUBLIC) self.addon.current_version.update(created=self.days_ago(2)) new_version = version_factory(addon=self.addon, version='0.2') new_file = new_version.files.all()[0] self.addon.update(_current_version=new_version) assert self.addon.current_version == new_version response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert response.context['show_diff'] links = doc('#review-files .file-info .compare') expected = [ reverse('files.compare', args=[new_file.pk, first_file.pk]), ] check_links(expected, links, verify=False) def test_compare_link_auto_approved_ignored(self): first_file = self.addon.current_version.files.all()[0] first_file.update(status=amo.STATUS_PUBLIC) self.addon.current_version.update(created=self.days_ago(3)) interim_version = version_factory(addon=self.addon, version='0.2') interim_version.update(created=self.days_ago(2)) AutoApprovalSummary.objects.create( version=interim_version, verdict=amo.AUTO_APPROVED) new_version = version_factory(addon=self.addon, version='0.3') new_file = new_version.files.all()[0] self.addon.update(_current_version=new_version) assert self.addon.current_version == new_version response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert response.context['show_diff'] links = doc('#review-files .file-info .compare') # Comparison should be between the last version and the first, # ignoring the interim version because it was auto-approved and not # manually confirmed by a human. expected = [ reverse('files.compare', args=[new_file.pk, first_file.pk]), ] check_links(expected, links, verify=False) def test_compare_link_auto_approved_but_confirmed_not_ignored(self): first_file = self.addon.current_version.files.all()[0] first_file.update(status=amo.STATUS_PUBLIC) self.addon.current_version.update(created=self.days_ago(3)) confirmed_version = version_factory(addon=self.addon, version='0.2') confirmed_version.update(created=self.days_ago(2)) confirmed_file = confirmed_version.files.all()[0] AutoApprovalSummary.objects.create( verdict=amo.AUTO_APPROVED, version=confirmed_version, confirmed=True) interim_version = version_factory(addon=self.addon, version='0.3') interim_version.update(created=self.days_ago(1)) AutoApprovalSummary.objects.create( version=interim_version, verdict=amo.AUTO_APPROVED) new_version = version_factory(addon=self.addon, version='0.4') new_file = new_version.files.all()[0] self.addon.update(_current_version=new_version) assert self.addon.current_version == new_version response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert response.context['show_diff'] links = doc('#review-files .file-info .compare') # Comparison should be between the last version and the second, # ignoring the third version because it was auto-approved and not # manually confirmed by a human (the second was auto-approved but # was manually confirmed). expected = [ reverse('files.compare', args=[new_file.pk, confirmed_file.pk]), ] check_links(expected, links, verify=False) def test_compare_link_not_auto_approved_but_confirmed(self): first_file = self.addon.current_version.files.all()[0] first_file.update(status=amo.STATUS_PUBLIC) self.addon.current_version.update(created=self.days_ago(3)) confirmed_version = version_factory(addon=self.addon, version='0.2') confirmed_version.update(created=self.days_ago(2)) confirmed_file = confirmed_version.files.all()[0] AutoApprovalSummary.objects.create( verdict=amo.NOT_AUTO_APPROVED, version=confirmed_version ) new_version = version_factory(addon=self.addon, version='0.3') new_file = new_version.files.all()[0] self.addon.update(_current_version=new_version) assert self.addon.current_version == new_version response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert response.context['show_diff'] links = doc('#review-files .file-info .compare') # Comparison should be between the last version and the second, # because second was approved by human before auto-approval ran on it expected = [ reverse('files.compare', args=[new_file.pk, confirmed_file.pk]), ] check_links(expected, links, verify=False) def test_download_sources_link(self): version = self.addon.current_version tdir = temp.gettempdir() source_file = temp.NamedTemporaryFile(suffix='.zip', dir=tdir) source_file.write('a' * (2 ** 21)) source_file.seek(0) version.source = DjangoFile(source_file) version.save() url = reverse('reviewers.review', args=[self.addon.pk]) # Admin reviewer: able to download sources. user = UserProfile.objects.get(email='admin@mozilla.com') self.client.login(email=user.email) response = self.client.get(url, follow=True) assert response.status_code == 200 assert 'Download files' in response.content # Standard reviewer: should know that sources were provided. user = UserProfile.objects.get(email='reviewer@mozilla.com') self.client.login(email=user.email) response = self.client.get(url, follow=True) assert response.status_code == 200 assert 'The developer has provided source code.' in response.content @patch('olympia.reviewers.utils.sign_file') def test_admin_flagged_addon_actions_as_admin(self, mock_sign_file): self.version.files.update(status=amo.STATUS_AWAITING_REVIEW) self.addon.update(status=amo.STATUS_NOMINATED) AddonReviewerFlags.objects.create( addon=self.addon, needs_admin_code_review=True) self.login_as_admin() response = self.client.post(self.url, self.get_dict(action='public'), follow=True) assert response.status_code == 200 addon = self.get_addon() assert self.version == addon.current_version assert addon.status == amo.STATUS_PUBLIC assert addon.current_version.files.all()[0].status == amo.STATUS_PUBLIC assert mock_sign_file.called def test_admin_flagged_addon_actions_as_reviewer(self): self.version.files.update(status=amo.STATUS_AWAITING_REVIEW) self.addon.update(status=amo.STATUS_NOMINATED) AddonReviewerFlags.objects.create( addon=self.addon, needs_admin_code_review=True) self.login_as_reviewer() response = self.client.post(self.url, self.get_dict(action='public')) assert response.status_code == 200 # Form error. # The add-on status must not change as non-admin reviewers are not # allowed to review admin-flagged add-ons. addon = self.get_addon() assert addon.status == amo.STATUS_NOMINATED assert self.version == addon.current_version assert addon.current_version.files.all()[0].status == ( amo.STATUS_AWAITING_REVIEW) assert response.context['form'].errors['action'] == ( [u'Select a valid choice. public is not one of the available ' u'choices.']) def test_admin_flagged_addon_actions_as_content_reviewer(self): self.version.files.update(status=amo.STATUS_AWAITING_REVIEW) self.addon.update(status=amo.STATUS_NOMINATED) AddonReviewerFlags.objects.create( addon=self.addon, needs_admin_code_review=True) GroupUser.objects.filter(user=self.reviewer).all().delete() self.grant_permission(self.reviewer, 'Addons:ContentReview') self.url = reverse( 'reviewers.review', args=['content', self.addon.slug]) for action in ['confirm_auto_approved', 'reject_multiple_versions']: response = self.client.post(self.url, self.get_dict(action=action)) assert response.status_code == 200 # Form error. # The add-on status must not change as non-admin reviewers are not # allowed to review admin-flagged add-ons. addon = self.get_addon() assert addon.status == amo.STATUS_NOMINATED assert self.version == addon.current_version assert addon.current_version.files.all()[0].status == ( amo.STATUS_AWAITING_REVIEW) assert response.context['form'].errors['action'] == ( [u'Select a valid choice. %s is not one of the available ' u'choices.' % action]) assert ActivityLog.objects.filter( action=amo.LOG.APPROVE_CONTENT.id).count() == 0 assert ActivityLog.objects.filter( action=amo.LOG.REJECT_CONTENT.id).count() == 0 def test_confirm_auto_approval_no_permission(self): AutoApprovalSummary.objects.create( version=self.addon.current_version, verdict=amo.AUTO_APPROVED) self.login_as_reviewer() # Legacy reviewer, not post-review. response = self.client.post( self.url, {'action': 'confirm_auto_approved'}) assert response.status_code == 403 # Nothing happened: the user did not have the permission to do that. assert ActivityLog.objects.filter( action=amo.LOG.CONFIRM_AUTO_APPROVED.id).count() == 0 def test_attempt_to_use_content_review_permission_for_post_review_actions( self): # Try to use confirm_auto_approved outside of content review, while # only having Addons:ContentReview permission. self.grant_permission(self.reviewer, 'Addons:ContentReview') AutoApprovalSummary.objects.create( version=self.addon.current_version, verdict=amo.AUTO_APPROVED) self.login_as_reviewer() response = self.client.post( self.url, {'action': 'confirm_auto_approved'}) assert response.status_code == 403 # Nothing happened: the user did not have the permission to do that. assert ActivityLog.objects.filter( action=amo.LOG.CONFIRM_AUTO_APPROVED.id).count() == 0 def test_confirm_auto_approval_content_review(self): GroupUser.objects.filter(user=self.reviewer).all().delete() self.url = reverse( 'reviewers.review', args=['content', self.addon.slug]) summary = AutoApprovalSummary.objects.create( version=self.addon.current_version, verdict=amo.AUTO_APPROVED) self.grant_permission(self.reviewer, 'Addons:ContentReview') response = self.client.post(self.url, { 'action': 'confirm_auto_approved', 'comments': 'ignore me this action does not support comments' }) assert response.status_code == 302 summary.reload() assert summary.confirmed is None # We're only doing a content review. assert ActivityLog.objects.filter( action=amo.LOG.CONFIRM_AUTO_APPROVED.id).count() == 0 assert ActivityLog.objects.filter( action=amo.LOG.APPROVE_CONTENT.id).count() == 1 a_log = ActivityLog.objects.filter( action=amo.LOG.APPROVE_CONTENT.id).get() assert a_log.details['version'] == self.addon.current_version.version assert a_log.details['comments'] == '' self.assert3xx(response, reverse('reviewers.queue_content_review')) def test_cant_contentreview_if_admin_content_review_flag_is_set(self): GroupUser.objects.filter(user=self.reviewer).all().delete() self.url = reverse( 'reviewers.review', args=['content', self.addon.slug]) AutoApprovalSummary.objects.create( version=self.addon.current_version, verdict=amo.AUTO_APPROVED) AddonReviewerFlags.objects.create( addon=self.addon, needs_admin_content_review=True) self.grant_permission(self.reviewer, 'Addons:ContentReview') response = self.client.post(self.url, { 'action': 'confirm_auto_approved', 'comments': 'ignore me this action does not support comments' }) assert response.status_code == 200 # Form error assert ActivityLog.objects.filter( action=amo.LOG.APPROVE_CONTENT.id).count() == 0 def test_can_contentreview_if_addon_has_sources_attached(self): GroupUser.objects.filter(user=self.reviewer).all().delete() self.url = reverse( 'reviewers.review', args=['content', self.addon.slug]) summary = AutoApprovalSummary.objects.create( version=self.addon.current_version, verdict=amo.AUTO_APPROVED) self.addon.current_version.update(source='/path/to/fake/file.zip') AddonReviewerFlags.objects.create( addon=self.addon, needs_admin_code_review=True) self.grant_permission(self.reviewer, 'Addons:ContentReview') response = self.client.post(self.url, { 'action': 'confirm_auto_approved', 'comments': 'ignore me this action does not support comments' }) assert response.status_code == 302 summary.reload() assert summary.confirmed is None # We're only doing a content review. assert ActivityLog.objects.filter( action=amo.LOG.CONFIRM_AUTO_APPROVED.id).count() == 0 assert ActivityLog.objects.filter( action=amo.LOG.APPROVE_CONTENT.id).count() == 1 a_log = ActivityLog.objects.filter( action=amo.LOG.APPROVE_CONTENT.id).get() assert a_log.details['version'] == self.addon.current_version.version assert a_log.details['comments'] == '' self.assert3xx(response, reverse('reviewers.queue_content_review')) def test_cant_contentreview_if_addon_has_admin_flag_but_no_sources(self): GroupUser.objects.filter(user=self.reviewer).all().delete() self.url = reverse( 'reviewers.review', args=['content', self.addon.slug]) AutoApprovalSummary.objects.create( version=self.addon.current_version, verdict=amo.AUTO_APPROVED) AddonReviewerFlags.objects.create( addon=self.addon, needs_admin_code_review=True) self.grant_permission(self.reviewer, 'Addons:ContentReview') response = self.client.post(self.url, { 'action': 'confirm_auto_approved', 'comments': 'ignore me this action does not support comments' }) assert response.status_code == 200 # Form error assert ActivityLog.objects.filter( action=amo.LOG.APPROVE_CONTENT.id).count() == 0 def test_cant_addonreview_if_admin_content_review_flag_is_set(self): AutoApprovalSummary.objects.create( version=self.addon.current_version, verdict=amo.AUTO_APPROVED) AddonReviewerFlags.objects.create( addon=self.addon, needs_admin_content_review=True) self.grant_permission(self.reviewer, 'Addons:PostReview') for action in ['confirm_auto_approved', 'public', 'reject', 'reject_multiple_versions']: response = self.client.post(self.url, self.get_dict(action=action)) assert response.status_code == 200 # Form error. # The add-on status must not change as non-admin reviewers are not # allowed to review admin-flagged add-ons. addon = self.get_addon() assert addon.status == amo.STATUS_PUBLIC assert self.version == addon.current_version assert addon.current_version.files.all()[0].status == ( amo.STATUS_PUBLIC) assert response.context['form'].errors['action'] == ( [u'Select a valid choice. %s is not one of the available ' u'choices.' % action]) assert ActivityLog.objects.filter( action=amo.LOG.CONFIRM_AUTO_APPROVED.id).count() == 0 assert ActivityLog.objects.filter( action=amo.LOG.REJECT_VERSION.id).count() == 0 assert ActivityLog.objects.filter( action=amo.LOG.APPROVE_VERSION.id).count() == 0 def test_cant_review_static_theme_if_admin_theme_review_flag_is_set(self): self.version.files.update(status=amo.STATUS_AWAITING_REVIEW) self.addon.update( type=amo.ADDON_STATICTHEME, status=amo.STATUS_NOMINATED) AddonReviewerFlags.objects.create( addon=self.addon, needs_admin_theme_review=True) self.grant_permission(self.reviewer, 'Addons:ThemeReview') for action in ['public', 'reject']: response = self.client.post(self.url, self.get_dict(action=action)) assert response.status_code == 200 # Form error. # The add-on status must not change as non-admin reviewers are not # allowed to review admin-flagged add-ons. addon = self.get_addon() assert addon.status == amo.STATUS_NOMINATED assert self.version == addon.current_version assert addon.current_version.files.all()[0].status == ( amo.STATUS_AWAITING_REVIEW) assert response.context['form'].errors['action'] == ( [u'Select a valid choice. %s is not one of the available ' u'choices.' % action]) assert ActivityLog.objects.filter( action=amo.LOG.REJECT_VERSION.id).count() == 0 assert ActivityLog.objects.filter( action=amo.LOG.APPROVE_VERSION.id).count() == 0 @patch('olympia.reviewers.utils.sign_file') def test_admin_can_review_statictheme_if_admin_theme_review_flag_set( self, mock_sign_file): self.version.files.update(status=amo.STATUS_AWAITING_REVIEW) self.addon.update( type=amo.ADDON_STATICTHEME, status=amo.STATUS_NOMINATED) AddonReviewerFlags.objects.create( addon=self.addon, needs_admin_theme_review=True) self.grant_permission(self.reviewer, 'Addons:ThemeReview') self.grant_permission(self.reviewer, 'Reviews:Admin') response = self.client.post(self.url, { 'action': 'public', 'comments': 'it`s good' }) assert response.status_code == 302 assert self.get_addon().status == amo.STATUS_PUBLIC assert mock_sign_file.called def test_admin_can_contentreview_if_admin_content_review_flag_is_set(self): GroupUser.objects.filter(user=self.reviewer).all().delete() self.url = reverse( 'reviewers.review', args=['content', self.addon.slug]) summary = AutoApprovalSummary.objects.create( version=self.addon.current_version, verdict=amo.AUTO_APPROVED) AddonReviewerFlags.objects.create( addon=self.addon, needs_admin_content_review=True) self.grant_permission(self.reviewer, 'Addons:ContentReview') self.grant_permission(self.reviewer, 'Reviews:Admin') response = self.client.post(self.url, { 'action': 'confirm_auto_approved', 'comments': 'ignore me this action does not support comments' }) assert response.status_code == 302 summary.reload() assert summary.confirmed is None # We're only doing a content review. assert ActivityLog.objects.filter( action=amo.LOG.CONFIRM_AUTO_APPROVED.id).count() == 0 assert ActivityLog.objects.filter( action=amo.LOG.APPROVE_CONTENT.id).count() == 1 a_log = ActivityLog.objects.filter( action=amo.LOG.APPROVE_CONTENT.id).get() assert a_log.details['version'] == self.addon.current_version.version assert a_log.details['comments'] == '' self.assert3xx(response, reverse('reviewers.queue_content_review')) def test_confirm_auto_approval_with_permission(self): summary = AutoApprovalSummary.objects.create( version=self.addon.current_version, verdict=amo.AUTO_APPROVED) GroupUser.objects.filter(user=self.reviewer).all().delete() self.grant_permission(self.reviewer, 'Addons:PostReview') response = self.client.post(self.url, { 'action': 'confirm_auto_approved', 'comments': 'ignore me this action does not support comments' }) summary.reload() assert response.status_code == 302 assert summary.confirmed is True assert ActivityLog.objects.filter( action=amo.LOG.CONFIRM_AUTO_APPROVED.id).count() == 1 a_log = ActivityLog.objects.filter( action=amo.LOG.CONFIRM_AUTO_APPROVED.id).get() assert a_log.details['version'] == self.addon.current_version.version assert a_log.details['comments'] == '' self.assert3xx(response, reverse('reviewers.queue_auto_approved')) def test_user_changes_log(self): # Activity logs related to user changes should be displayed. # Create an activy log for each of the following: user addition, role # change and deletion. author = self.addon.addonuser_set.get() core.set_user(author.user) ActivityLog.create(amo.LOG.ADD_USER_WITH_ROLE, author.user, author.get_role_display(), self.addon) ActivityLog.create(amo.LOG.CHANGE_USER_WITH_ROLE, author.user, author.get_role_display(), self.addon) ActivityLog.create(amo.LOG.REMOVE_USER_WITH_ROLE, author.user, author.get_role_display(), self.addon) response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert 'user_changes' in response.context user_changes_log = response.context['user_changes'] actions = [log.activity_log.action for log in user_changes_log] assert actions == [ amo.LOG.ADD_USER_WITH_ROLE.id, amo.LOG.CHANGE_USER_WITH_ROLE.id, amo.LOG.REMOVE_USER_WITH_ROLE.id] # Make sure the logs are displayed in the page. user_changes = doc('#user-changes li') assert len(user_changes) == 3 assert '(Owner) added to ' in user_changes[0].text assert 'role changed to Owner for ' in user_changes[1].text assert '(Owner) removed from ' in user_changes[2].text @override_settings(CELERY_ALWAYS_EAGER=True) @mock.patch('olympia.devhub.tasks.validate') def test_validation_not_run_eagerly(self, validate): """Tests that validation is not run in eager mode.""" assert not self.file.has_been_validated response = self.client.get(self.url) assert response.status_code == 200 assert not validate.called @override_settings(CELERY_ALWAYS_EAGER=False) @mock.patch('olympia.devhub.tasks.validate') def test_validation_run(self, validate): """Tests that validation is run if necessary.""" assert not self.file.has_been_validated response = self.client.get(self.url) assert response.status_code == 200 validate.assert_called_once_with(self.file) @override_settings(CELERY_ALWAYS_EAGER=False) @mock.patch('olympia.devhub.tasks.validate') def test_validation_not_run_again(self, validate): """Tests that validation is not run for files which have cached results.""" FileValidation.objects.create(file=self.file, validation=json.dumps( amo.VALIDATOR_SKELETON_RESULTS)) response = self.client.get(self.url) assert response.status_code == 200 assert not validate.called def test_review_is_review_listed(self): review_page = self.client.get( reverse('reviewers.review', args=[self.addon.slug])) listed_review_page = self.client.get( reverse('reviewers.review', args=['listed', self.addon.slug])) assert (pq(review_page.content)('#review-files').text() == pq(listed_review_page.content)('#review-files').text()) def test_approvals_info(self): approval_info = AddonApprovalsCounter.objects.create( addon=self.addon, last_human_review=datetime.now(), counter=42) self.file.update(is_webextension=True) AutoApprovalSummary.objects.create( version=self.version, verdict=amo.AUTO_APPROVED) self.grant_permission(self.reviewer, 'Addons:PostReview') response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert doc('.last-approval-date') approval_info.delete() response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) # no AddonApprovalsCounter: nothing displayed. assert not doc('.last-approval-date') def test_no_auto_approval_summaries_since_everything_is_public(self): self.grant_permission(self.reviewer, 'Addons:PostReview') response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert not doc('.auto_approval') def test_permissions_display(self): permissions = ['bookmarks', 'high', 'voltage'] self.file.update(is_webextension=True) WebextPermission.objects.create( permissions=permissions, file=self.file) response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) info = doc('#review-files .file-info div') assert info.eq(1).text() == 'Permissions: ' + ', '.join(permissions) def test_abuse_reports(self): report = AbuseReport.objects.create( addon=self.addon, message=u'Et mël mazim ludus.', ip_address='10.1.2.3') created_at = defaultfilters.date(report.created) response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert not doc('.abuse_reports') self.grant_permission(self.reviewer, 'Addons:PostReview') response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert not doc('.abuse_reports') AutoApprovalSummary.objects.create( verdict=amo.AUTO_APPROVED, version=self.version) response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert doc('.abuse_reports') assert ( doc('.abuse_reports').text() == u'anonymous [10.1.2.3] reported Public on %s\nEt mël mazim ludus.' % created_at) def test_abuse_reports_developers(self): report = AbuseReport.objects.create( user=self.addon.listed_authors[0], message=u'Foo, Bâr!', ip_address='10.4.5.6') created_at = defaultfilters.date(report.created) AutoApprovalSummary.objects.create( verdict=amo.AUTO_APPROVED, version=self.version) self.grant_permission(self.reviewer, 'Addons:PostReview') response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert doc('.abuse_reports') assert ( doc('.abuse_reports').text() == u'anonymous [10.4.5.6] reported regularuser التطب on %s\nFoo, Bâr!' % created_at) def test_user_ratings(self): user = user_factory() rating = Rating.objects.create( body=u'Lôrem ipsum dolor', rating=3, ip_address='10.5.6.7', addon=self.addon, user=user) created_at = defaultfilters.date(rating.created) Rating.objects.create( # Review with no body, ignored. rating=1, addon=self.addon, user=user_factory()) Rating.objects.create( # Reply to a review, ignored. body='Replyyyyy', reply_to=rating, addon=self.addon, user=user_factory()) Rating.objects.create( # Review with high rating,, ignored. body=u'Qui platônem temporibus in', rating=5, addon=self.addon, user=user_factory()) response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert not doc('.user_ratings') self.grant_permission(self.reviewer, 'Addons:PostReview') response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert not doc('.user_ratings') AutoApprovalSummary.objects.create( verdict=amo.AUTO_APPROVED, version=self.version) response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert doc('.user_ratings') assert ( doc('.user_ratings').text() == u'%s on %s [10.5.6.7]\n' u'Rated 3 out of 5 stars\nLôrem ipsum dolor' % ( user.username, created_at ) ) def test_data_value_attributes(self): AutoApprovalSummary.objects.create( verdict=amo.AUTO_APPROVED, version=self.version) self.grant_permission(self.reviewer, 'Addons:PostReview') response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) expected_actions_values = [ 'confirm_auto_approved|', 'reject_multiple_versions|', 'reply|', 'super|', 'comment|'] assert [ act.attrib['data-value'] for act in doc('.data-toggle.review-actions-desc')] == expected_actions_values assert ( doc('select#id_versions.data-toggle')[0].attrib['data-value'] == 'reject_multiple_versions|') assert ( doc('.data-toggle.review-comments')[0].attrib['data-value'] == 'reject_multiple_versions|reply|super|comment|') # We don't have approve/reject actions so these have an empty # data-value. assert ( doc('.data-toggle.review-files')[0].attrib['data-value'] == '|') assert ( doc('.data-toggle.review-tested')[0].attrib['data-value'] == '|') assert ( doc('.data-toggle.review-info-request')[0].attrib['data-value'] == 'reply|') def test_data_value_attributes_unreviewed(self): self.file.update(status=amo.STATUS_AWAITING_REVIEW) self.grant_permission(self.reviewer, 'Addons:PostReview') response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) expected_actions_values = [ 'public|', 'reject|', 'reply|', 'super|', 'comment|'] assert [ act.attrib['data-value'] for act in doc('.data-toggle.review-actions-desc')] == expected_actions_values assert ( doc('select#id_versions.data-toggle')[0].attrib['data-value'] == 'reject_multiple_versions|') assert ( doc('.data-toggle.review-comments')[0].attrib['data-value'] == 'public|reject|reply|super|comment|') assert ( doc('.data-toggle.review-files')[0].attrib['data-value'] == 'public|reject|') assert ( doc('.data-toggle.review-tested')[0].attrib['data-value'] == 'public|reject|') def test_data_value_attributes_static_theme(self): self.addon.update(type=amo.ADDON_STATICTHEME) self.file.update(status=amo.STATUS_AWAITING_REVIEW) self.grant_permission(self.reviewer, 'Addons:ThemeReview') response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) expected_actions_values = [ 'public|', 'reject|', 'reply|', 'super|', 'comment|'] assert [ act.attrib['data-value'] for act in doc('.data-toggle.review-actions-desc')] == expected_actions_values assert ( doc('select#id_versions.data-toggle')[0].attrib['data-value'] == 'reject_multiple_versions|') assert ( doc('.data-toggle.review-comments')[0].attrib['data-value'] == 'public|reject|reply|super|comment|') # we don't show files and tested with for any static theme actions assert ( doc('.data-toggle.review-files')[0].attrib['data-value'] == '|') assert ( doc('.data-toggle.review-tested')[0].attrib['data-value'] == '|') def test_post_review_ignore_disabled(self): # Though the latest version will be disabled, the add-on is public and # was auto-approved so the confirmation action is available. AutoApprovalSummary.objects.create( verdict=amo.AUTO_APPROVED, version=self.version) version_factory( addon=self.addon, file_kw={'status': amo.STATUS_DISABLED}) self.grant_permission(self.reviewer, 'Addons:PostReview') response = self.client.get(self.url) assert response.status_code == 200 expected_actions = [ 'confirm_auto_approved', 'reject_multiple_versions', 'reply', 'super', 'comment'] assert ( [action[0] for action in response.context['actions']] == expected_actions) def test_content_review_ignore_disabled(self): # Though the latest version will be disabled, the add-on is public and # was auto-approved so the content approval action is available. AutoApprovalSummary.objects.create( verdict=amo.AUTO_APPROVED, version=self.version) version_factory( addon=self.addon, file_kw={'status': amo.STATUS_DISABLED}) self.grant_permission(self.reviewer, 'Addons:ContentReview') self.url = reverse( 'reviewers.review', args=['content', self.addon.slug]) response = self.client.get(self.url) assert response.status_code == 200 expected_actions = [ 'confirm_auto_approved', 'reject_multiple_versions', 'reply', 'super', 'comment'] assert ( [action[0] for action in response.context['actions']] == expected_actions) @mock.patch('olympia.versions.models.walkfiles') def test_static_theme_backgrounds(self, walkfiles_mock): background_files = ['a.png', 'b.png', 'c.png'] walkfiles_folder = os.path.join( user_media_path('addons'), str(self.addon.id), unicode(self.addon.current_version.id)) walkfiles_mock.return_value = [ os.path.join(walkfiles_folder, filename) for filename in background_files] self.addon.update(type=amo.ADDON_STATICTHEME) self.grant_permission(self.reviewer, 'Addons:ThemeReview') response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) backgrounds_div = doc('div.all-backgrounds') assert backgrounds_div.length == 1 images = doc('div.all-backgrounds .background.zoombox') assert images.length == len(walkfiles_mock.return_value) background_file_folder = '/'.join([ user_media_url('addons'), str(self.addon.id), unicode(self.addon.current_version.id)]) background_file_urls = [ background_file_folder + '/' + filename for filename in background_files] loop_ct = 0 for div_tag in images: assert div_tag[0].attrib['src'] in background_file_urls assert ''.join(div_tag.itertext()).strip() == ( 'Background file {0} of {1} - {2}'.format( loop_ct + 1, len(background_files), background_files[loop_ct])) loop_ct += 1 class TestReviewPending(ReviewBase): def setUp(self): super(TestReviewPending, self).setUp() self.file = file_factory(version=self.version, status=amo.STATUS_AWAITING_REVIEW, is_webextension=True) self.addon.update(status=amo.STATUS_PUBLIC) def pending_dict(self): return self.get_dict(action='public') @patch('olympia.reviewers.utils.sign_file') def test_pending_to_public(self, mock_sign): statuses = (self.version.files.values_list('status', flat=True) .order_by('status')) assert list(statuses) == [ amo.STATUS_AWAITING_REVIEW, amo.STATUS_PUBLIC] response = self.client.post(self.url, self.pending_dict()) assert self.get_addon().status == amo.STATUS_PUBLIC self.assert3xx(response, reverse('reviewers.queue_pending')) statuses = (self.version.files.values_list('status', flat=True) .order_by('status')) assert list(statuses) == [amo.STATUS_PUBLIC, amo.STATUS_PUBLIC] assert mock_sign.called def test_display_only_unreviewed_files(self): """Only the currently unreviewed files are displayed.""" self.file.update(filename='somefilename.xpi') reviewed = File.objects.create(version=self.version, status=amo.STATUS_PUBLIC, filename='file_reviewed.xpi') disabled = File.objects.create(version=self.version, status=amo.STATUS_DISABLED, filename='file_disabled.xpi') unreviewed = File.objects.create(version=self.version, status=amo.STATUS_AWAITING_REVIEW, filename='file_unreviewed.xpi') response = self.client.get(self.url, self.pending_dict()) assert response.status_code == 200 doc = pq(response.content) assert len(doc('.review-actions-files ul li')) == 2 assert reviewed.filename not in response.content assert disabled.filename not in response.content assert unreviewed.filename in response.content assert self.file.filename in response.content @patch('olympia.reviewers.utils.sign_file') def test_review_unreviewed_files(self, mock_sign): """Review all the unreviewed files when submitting a review.""" reviewed = File.objects.create(version=self.version, status=amo.STATUS_PUBLIC) disabled = File.objects.create(version=self.version, status=amo.STATUS_DISABLED) unreviewed = File.objects.create(version=self.version, status=amo.STATUS_AWAITING_REVIEW) self.login_as_admin() response = self.client.post(self.url, self.pending_dict()) self.assert3xx(response, reverse('reviewers.queue_pending')) assert self.addon.reload().status == amo.STATUS_PUBLIC assert reviewed.reload().status == amo.STATUS_PUBLIC assert disabled.reload().status == amo.STATUS_DISABLED assert unreviewed.reload().status == amo.STATUS_PUBLIC assert self.file.reload().status == amo.STATUS_PUBLIC assert mock_sign.called def test_auto_approval_summary_with_post_review(self): AutoApprovalSummary.objects.create( version=self.version, verdict=amo.NOT_AUTO_APPROVED, is_locked=True, ) self.grant_permission(self.reviewer, 'Addons:PostReview') response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) # Locked by a reviewer is shown. assert len(doc('.auto_approval li')) == 1 assert doc('.auto_approval li').eq(0).text() == ( 'Is locked by a reviewer.') class TestReviewerMOTD(ReviewerTest): def get_url(self, save=False): return reverse('reviewers.%smotd' % ('save_' if save else '')) def test_change_motd(self): self.login_as_admin() motd = "Let's get crazy" response = self.client.post(self.get_url(save=True), {'motd': motd}) url = self.get_url() self.assert3xx(response, url) response = self.client.get(url) assert response.status_code == 200 assert pq(response.content)('.daily-message p').text() == motd def test_require_reviewer_to_view(self): url = self.get_url() self.assertLoginRedirects(self.client.head(url), to=url) def test_require_admin_to_change_motd(self): self.login_as_reviewer() response = self.client.get(self.get_url()) assert response.status_code == 403 response = self.client.post(reverse('reviewers.save_motd'), {'motd': "I'm a sneaky reviewer"}) assert response.status_code == 403 def test_motd_edit_group(self): user = UserProfile.objects.get(email='reviewer@mozilla.com') group = Group.objects.create(name='Add-on Reviewer MOTD', rules='AddonReviewerMOTD:Edit') GroupUser.objects.create(user=user, group=group) self.login_as_reviewer() response = self.client.post(reverse('reviewers.save_motd'), {'motd': 'I am the keymaster.'}) assert response.status_code == 302 assert get_config('reviewers_review_motd') == 'I am the keymaster.' def test_form_errors(self): self.login_as_admin() response = self.client.post(self.get_url(save=True)) doc = pq(response.content) assert doc('#reviewer-motd .errorlist').text() == ( 'This field is required.') class TestStatusFile(ReviewBase): def get_file(self): return self.version.files.all()[0] def check_status(self, expected): response = self.client.get(self.url) assert response.status_code == 200 doc = pq(response.content) assert doc('#review-files .file-info div').text() == expected def test_status_full(self): self.get_file().update(status=amo.STATUS_AWAITING_REVIEW) for status in [amo.STATUS_NOMINATED, amo.STATUS_PUBLIC]: self.addon.update(status=status) self.check_status('Awaiting Review') def test_status_full_reviewed(self): self.get_file().update(status=amo.STATUS_PUBLIC) self.addon.update(status=amo.STATUS_PUBLIC) self.check_status('Approved') class TestWhiteboard(ReviewBase): @property def addon_param(self): return self.addon.pk if self.addon.is_deleted else self.addon.slug def test_whiteboard_addition(self): public_whiteboard_info = u'Public whiteboard info.' private_whiteboard_info = u'Private whiteboard info.' url = reverse( 'reviewers.whiteboard', args=['listed', self.addon_param]) response = self.client.post(url, { 'whiteboard-private': private_whiteboard_info, 'whiteboard-public': public_whiteboard_info }) self.assert3xx(response, reverse( 'reviewers.review', args=('listed', self.addon_param))) addon = self.addon.reload() assert addon.whiteboard.public == public_whiteboard_info assert addon.whiteboard.private == private_whiteboard_info def test_whiteboard_addition_content_review(self): public_whiteboard_info = u'Public whiteboard info for content.' private_whiteboard_info = u'Private whiteboard info for content.' url = reverse( 'reviewers.whiteboard', args=['content', self.addon_param]) response = self.client.post(url, { 'whiteboard-private': private_whiteboard_info, 'whiteboard-public': public_whiteboard_info }) assert response.status_code == 403 # Not a content reviewer. user = UserProfile.objects.get(email='reviewer@mozilla.com') self.grant_permission(user, 'Addons:ContentReview') self.login_as_reviewer() response = self.client.post(url, { 'whiteboard-private': private_whiteboard_info, 'whiteboard-public': public_whiteboard_info }) self.assert3xx(response, reverse( 'reviewers.review', args=('content', self.addon_param))) addon = self.addon.reload() assert addon.whiteboard.public == public_whiteboard_info assert addon.whiteboard.private == private_whiteboard_info def test_whiteboard_addition_unlisted_addon(self): user = UserProfile.objects.get(email='reviewer@mozilla.com') self.grant_permission(user, 'Addons:ReviewUnlisted') self.login_as_reviewer() self.make_addon_unlisted(self.addon) public_whiteboard_info = u'Public whiteboard info unlisted.' private_whiteboard_info = u'Private whiteboard info unlisted.' url = reverse( 'reviewers.whiteboard', args=['unlisted', self.addon_param]) response = self.client.post(url, { 'whiteboard-private': private_whiteboard_info, 'whiteboard-public': public_whiteboard_info }) self.assert3xx(response, reverse( 'reviewers.review', args=('unlisted', self.addon_param))) addon = self.addon.reload() assert addon.whiteboard.public == public_whiteboard_info assert addon.whiteboard.private == private_whiteboard_info def test_delete_empty(self): url = reverse( 'reviewers.whiteboard', args=['listed', self.addon_param]) response = self.client.post(url, { 'whiteboard-private': '', 'whiteboard-public': '' }) self.assert3xx(response, reverse( 'reviewers.review', args=('listed', self.addon_param))) assert not Whiteboard.objects.filter(pk=self.addon.pk) class TestWhiteboardDeleted(TestWhiteboard): def setUp(self): super(TestWhiteboardDeleted, self).setUp() self.addon.delete() class TestAbuseReports(TestCase): fixtures = ['base/users', 'base/addon_3615'] def setUp(self): addon = Addon.objects.get(pk=3615) addon_developer = addon.listed_authors[0] someone = UserProfile.objects.exclude(pk=addon_developer.pk)[0] AbuseReport.objects.create(addon=addon, message=u'wôo') AbuseReport.objects.create(addon=addon, message=u'yéah', reporter=someone) # Make a user abuse report to make sure it doesn't show up. AbuseReport.objects.create(user=someone, message=u'hey nöw') # Make a user abuse report for one of the add-on developers: it should # show up. AbuseReport.objects.create(user=addon_developer, message='bü!') def test_abuse_reports_list(self): assert self.client.login(email='admin@mozilla.com') r = self.client.get(reverse('reviewers.abuse_reports', args=['a3615'])) assert r.status_code == 200 # We see the two abuse reports created in setUp. assert len(r.context['reports']) == 3 def test_no_abuse_reports_link_for_unlisted_addons(self): """Unlisted addons aren't public, and thus have no abuse reports.""" addon = Addon.objects.get(pk=3615) self.make_addon_unlisted(addon) self.client.login(email='admin@mozilla.com') response = reverse('reviewers.review', args=[addon.slug]) abuse_report_url = reverse('reviewers.abuse_reports', args=['a3615']) assert abuse_report_url not in response class TestLeaderboard(ReviewerTest): fixtures = ['base/users'] def setUp(self): super(TestLeaderboard, self).setUp() self.url = reverse('reviewers.leaderboard') self.user = UserProfile.objects.get(email='reviewer@mozilla.com') self.login_as_reviewer() core.set_user(self.user) def _award_points(self, user, score): ReviewerScore.objects.create(user=user, note_key=amo.REVIEWED_MANUAL, score=score, note='Thing.') def test_leaderboard_ranks(self): other_reviewer = UserProfile.objects.create( username='post_reviewer', display_name='', # No display_name, will fall back on name. email='post_reviewer@mozilla.com') self.grant_permission( other_reviewer, 'Addons:PostReview', name='Reviewers: Add-ons' # The name of the group matters here. ) users = (self.user, UserProfile.objects.get(email='persona_reviewer@mozilla.com'), other_reviewer) self._award_points(users[0], amo.REVIEWED_LEVELS[0]['points'] - 1) self._award_points(users[1], amo.REVIEWED_LEVELS[0]['points'] + 1) self._award_points(users[2], amo.REVIEWED_LEVELS[0]['points'] + 2) def get_cells(): doc = pq(self.client.get(self.url).content.decode('utf-8')) cells = doc('#leaderboard > tbody > tr > .name, ' '#leaderboard > tbody > tr > .level') return [cells.eq(i).text() for i in range(0, cells.length)] assert get_cells() == ( [users[2].name, users[1].name, unicode(amo.REVIEWED_LEVELS[0]['name']), users[0].name]) self._award_points(users[0], 1) assert get_cells() == ( [users[2].name, users[1].name, users[0].name, unicode(amo.REVIEWED_LEVELS[0]['name'])]) self._award_points(users[0], -1) self._award_points(users[2], (amo.REVIEWED_LEVELS[1]['points'] - amo.REVIEWED_LEVELS[0]['points'])) assert get_cells() == ( [users[2].name, unicode(amo.REVIEWED_LEVELS[1]['name']), users[1].name, unicode(amo.REVIEWED_LEVELS[0]['name']), users[0].name]) class TestXssOnAddonName(amo.tests.TestXss): def test_reviewers_abuse_report_page(self): url = reverse('reviewers.abuse_reports', args=[self.addon.slug]) self.assertNameAndNoXSS(url) def test_reviewers_review_page(self): url = reverse('reviewers.review', args=[self.addon.slug]) self.assertNameAndNoXSS(url) class TestAddonReviewerViewSet(TestCase): client_class = APITestClient def setUp(self): super(TestAddonReviewerViewSet, self).setUp() self.user = user_factory() self.addon = addon_factory() self.subscribe_url = reverse_ns( 'reviewers-addon-subscribe', kwargs={'pk': self.addon.pk}) self.unsubscribe_url = reverse_ns( 'reviewers-addon-unsubscribe', kwargs={'pk': self.addon.pk}) self.enable_url = reverse_ns( 'reviewers-addon-enable', kwargs={'pk': self.addon.pk}) self.disable_url = reverse_ns( 'reviewers-addon-disable', kwargs={'pk': self.addon.pk}) self.flags_url = reverse_ns( 'reviewers-addon-flags', kwargs={'pk': self.addon.pk}) def test_subscribe_not_logged_in(self): response = self.client.post(self.subscribe_url) assert response.status_code == 401 def test_subscribe_no_rights(self): self.client.login_api(self.user) response = self.client.post(self.subscribe_url) assert response.status_code == 403 def test_subscribe_addon_does_not_exist(self): self.grant_permission(self.user, 'Addons:PostReview') self.client.login_api(self.user) self.subscribe_url = reverse_ns( 'reviewers-addon-subscribe', kwargs={'pk': self.addon.pk + 42}) response = self.client.post(self.subscribe_url) assert response.status_code == 404 def test_subscribe_already_subscribed(self): ReviewerSubscription.objects.create( user=self.user, addon=self.addon) self.grant_permission(self.user, 'Addons:PostReview') self.client.login_api(self.user) self.subscribe_url = reverse_ns( 'reviewers-addon-subscribe', kwargs={'pk': self.addon.pk}) response = self.client.post(self.subscribe_url) assert response.status_code == 202 assert ReviewerSubscription.objects.count() == 1 def test_subscribe(self): self.grant_permission(self.user, 'Addons:PostReview') self.client.login_api(self.user) self.subscribe_url = reverse_ns( 'reviewers-addon-subscribe', kwargs={'pk': self.addon.pk}) response = self.client.post(self.subscribe_url) assert response.status_code == 202 assert ReviewerSubscription.objects.count() == 1 def test_unsubscribe_not_logged_in(self): response = self.client.post(self.unsubscribe_url) assert response.status_code == 401 def test_unsubscribe_no_rights(self): self.client.login_api(self.user) response = self.client.post(self.unsubscribe_url) assert response.status_code == 403 def test_unsubscribe_addon_does_not_exist(self): self.grant_permission(self.user, 'Addons:PostReview') self.client.login_api(self.user) self.unsubscribe_url = reverse_ns( 'reviewers-addon-subscribe', kwargs={'pk': self.addon.pk + 42}) response = self.client.post(self.unsubscribe_url) assert response.status_code == 404 def test_unsubscribe_not_subscribed(self): self.grant_permission(self.user, 'Addons:PostReview') self.client.login_api(self.user) self.subscribe_url = reverse_ns( 'reviewers-addon-subscribe', kwargs={'pk': self.addon.pk}) response = self.client.post(self.unsubscribe_url) assert response.status_code == 202 assert ReviewerSubscription.objects.count() == 0 def test_unsubscribe(self): ReviewerSubscription.objects.create( user=self.user, addon=self.addon) self.grant_permission(self.user, 'Addons:PostReview') self.client.login_api(self.user) self.subscribe_url = reverse_ns( 'reviewers-addon-subscribe', kwargs={'pk': self.addon.pk}) response = self.client.post(self.unsubscribe_url) assert response.status_code == 202 assert ReviewerSubscription.objects.count() == 0 def test_unsubscribe_dont_touch_another(self): another_user = user_factory() another_addon = addon_factory() ReviewerSubscription.objects.create( user=self.user, addon=self.addon) ReviewerSubscription.objects.create( user=self.user, addon=another_addon) ReviewerSubscription.objects.create( user=another_user, addon=self.addon) self.grant_permission(self.user, 'Addons:PostReview') self.client.login_api(self.user) self.subscribe_url = reverse_ns( 'reviewers-addon-subscribe', kwargs={'pk': self.addon.pk}) response = self.client.post(self.unsubscribe_url) assert response.status_code == 202 assert ReviewerSubscription.objects.count() == 2 assert not ReviewerSubscription.objects.filter( addon=self.addon, user=self.user).exists() def test_enable_not_logged_in(self): response = self.client.post(self.enable_url) assert response.status_code == 401 def test_enable_no_rights(self): self.client.login_api(self.user) response = self.client.post(self.enable_url) assert response.status_code == 403 # Being a reviewer is not enough. self.grant_permission(self.user, 'Addons:Review') response = self.client.post(self.enable_url) assert response.status_code == 403 def test_enable_addon_does_not_exist(self): self.grant_permission(self.user, 'Reviews:Admin') self.client.login_api(self.user) self.enable_url = reverse_ns( 'reviewers-addon-enable', kwargs={'pk': self.addon.pk + 42}) response = self.client.post(self.enable_url) assert response.status_code == 404 def test_enable(self): self.grant_permission(self.user, 'Reviews:Admin') self.client.login_api(self.user) self.addon.update(status=amo.STATUS_DISABLED) response = self.client.post(self.enable_url) assert response.status_code == 202 self.addon.reload() assert self.addon.status == amo.STATUS_PUBLIC assert ActivityLog.objects.count() == 1 activity_log = ActivityLog.objects.latest('pk') assert activity_log.action == amo.LOG.CHANGE_STATUS.id assert activity_log.arguments[0] == self.addon def test_enable_already_public(self): self.grant_permission(self.user, 'Reviews:Admin') self.client.login_api(self.user) response = self.client.post(self.enable_url) assert response.status_code == 202 self.addon.reload() assert self.addon.status == amo.STATUS_PUBLIC assert ActivityLog.objects.count() == 1 activity_log = ActivityLog.objects.latest('pk') assert activity_log.action == amo.LOG.CHANGE_STATUS.id assert activity_log.arguments[0] == self.addon def test_enable_no_public_versions_should_fall_back_to_incomplete(self): self.grant_permission(self.user, 'Reviews:Admin') self.client.login_api(self.user) self.addon.update(status=amo.STATUS_DISABLED) self.addon.versions.all().delete() response = self.client.post(self.enable_url) assert response.status_code == 202 self.addon.reload() assert self.addon.status == amo.STATUS_NULL def test_enable_version_is_awaiting_review_fall_back_to_nominated(self): self.grant_permission(self.user, 'Reviews:Admin') self.client.login_api(self.user) self.addon.current_version.files.all().update( status=amo.STATUS_AWAITING_REVIEW) self.addon.update(status=amo.STATUS_DISABLED) response = self.client.post(self.enable_url) assert response.status_code == 202 self.addon.reload() assert self.addon.status == amo.STATUS_NOMINATED def test_disable_not_logged_in(self): response = self.client.post(self.disable_url) assert response.status_code == 401 def test_disable_no_rights(self): self.client.login_api(self.user) response = self.client.post(self.disable_url) assert response.status_code == 403 # Being a reviewer is not enough. self.grant_permission(self.user, 'Addons:Review') response = self.client.post(self.disable_url) assert response.status_code == 403 def test_disable_addon_does_not_exist(self): self.grant_permission(self.user, 'Reviews:Admin') self.client.login_api(self.user) self.disable_url = reverse_ns( 'reviewers-addon-enable', kwargs={'pk': self.addon.pk + 42}) response = self.client.post(self.disable_url) assert response.status_code == 404 def test_disable(self): self.grant_permission(self.user, 'Reviews:Admin') self.client.login_api(self.user) self.addon.versions.all().delete() response = self.client.post(self.disable_url) assert response.status_code == 202 self.addon.reload() assert self.addon.status == amo.STATUS_DISABLED assert ActivityLog.objects.count() == 1 activity_log = ActivityLog.objects.latest('pk') assert activity_log.action == amo.LOG.CHANGE_STATUS.id assert activity_log.arguments[0] == self.addon def test_patch_flags_not_logged_in(self): response = self.client.patch( self.flags_url, {'auto_approval_disabled': True}) assert response.status_code == 401 def test_patch_flags_no_permissions(self): self.client.login_api(self.user) response = self.client.patch( self.flags_url, {'auto_approval_disabled': True}) assert response.status_code == 403 # Being a reviewer is not enough. self.grant_permission(self.user, 'Addons:Review') response = self.client.patch( self.flags_url, {'auto_approval_disabled': True}) assert response.status_code == 403 def test_patch_flags_addon_does_not_exist(self): self.grant_permission(self.user, 'Reviews:Admin') self.client.login_api(self.user) self.flags_url = reverse_ns( 'reviewers-addon-flags', kwargs={'pk': self.addon.pk + 42}) response = self.client.patch( self.flags_url, {'auto_approval_disabled': True}) assert response.status_code == 404 def test_patch_flags_no_flags_yet_still_works_transparently(self): assert not AddonReviewerFlags.objects.filter(addon=self.addon).exists() self.grant_permission(self.user, 'Reviews:Admin') self.client.login_api(self.user) response = self.client.patch( self.flags_url, {'auto_approval_disabled': True}) assert response.status_code == 200 assert AddonReviewerFlags.objects.filter(addon=self.addon).exists() reviewer_flags = AddonReviewerFlags.objects.get(addon=self.addon) assert reviewer_flags.auto_approval_disabled assert ActivityLog.objects.count() == 0 def test_patch_flags_change_everything(self): AddonReviewerFlags.objects.create( addon=self.addon, pending_info_request=self.days_ago(1), auto_approval_disabled=True) self.grant_permission(self.user, 'Reviews:Admin') self.client.login_api(self.user) data = { 'auto_approval_disabled': False, 'needs_admin_code_review': True, 'needs_admin_content_review': True, 'needs_admin_theme_review': True, 'pending_info_request': None, } response = self.client.patch(self.flags_url, data) assert response.status_code == 200 assert AddonReviewerFlags.objects.filter(addon=self.addon).exists() reviewer_flags = AddonReviewerFlags.objects.get(addon=self.addon) assert reviewer_flags.auto_approval_disabled is False assert reviewer_flags.needs_admin_code_review is True assert reviewer_flags.needs_admin_content_review is True assert reviewer_flags.needs_admin_theme_review is True assert reviewer_flags.pending_info_request is None assert ActivityLog.objects.count() == 1 activity_log = ActivityLog.objects.latest('pk') assert activity_log.action == amo.LOG.ADMIN_ALTER_INFO_REQUEST.id assert activity_log.arguments[0] == self.addon
bsd-3-clause
-4,432,391,639,027,054,000
41.218142
79
0.612274
false
3.840115
true
false
false
gitizenme/ImprovWithAlexa
improvwithalexa_function.py
1
6601
import logging from flask import Flask, render_template from flask_ask import Ask, statement, question, session from chatterbot import ChatBot app = Flask(__name__) ask = Ask(app, "/") logging.getLogger("flask_ask").setLevel(logging.DEBUG) # Create a new instance of a ChatBot chatbot = ChatBot( "Improv", read_only=False, trainer='chatterbot.trainers.ListTrainer', storage_adapter="chatterbot.storage.JsonFileStorageAdapter", logic_adapters=[ { 'import_path': 'chatterbot.logic.BestMatch' }, # { # 'import_path': 'chatterbot.logic.LowConfidenceAdapter', # 'threshold': 0.63, # 'default_response': 'I am sorry, but I do not understand.' # }, "chatterbot.logic.MathematicalEvaluation", ], database="/tmp/improv.json" ) # Greetings chatbot.train([ "Nice to meet you.", "Thank you.", "Hi, nice to meet you.", "Thank you. You too.", "It is a pleasure to meet you.", "Thank you. You too.", "Top of the morning to you!", "Thank you kindly.", "Top of the morning to you!", "And the rest of the day to you.", "What's up?", "Not much.", "What's up?", "Not too much.", "What's up?", "Not much, how about you?", "What's up?", "Nothing much.", "What's up?", "The sky's up but I'm fine thanks. What about you?", ]) # Intelligence chatbot.train({ "what are the laws of thermodynamics", "i'm not a physicist, but i think this has something to do with heat, entropy, and conservation of energy, right?", }) chatbot.train({ "what is the distance to the sun from the earth", "the sun is about 93 million miles from earth.", }) chatbot.train({ "how far away is the moon", "the moon is about 250,000 miles from earth on average.", }) chatbot.train({ "What was the name of the first artificial Earth satellite?", "Sputnik 1", }) # Knowledge chatbot.train([ "have you ever read a book", "i have read many books.", "ray bradbury", "ray is really cool", "william gibson", 'i like his older cyberpunk <say-as interpret-as="spell-out">AI</say-as> stuff better than the newer works.', "frank herbert", "i think dune is an excellent story. did you see the movie?", "george r r martin", "Ooh, game of thrones, the 7th season is starting out well", ]) # Truth chatbot.train([ 'what is true?', 'in accordance with <emphasis level="strong">fact</emphasis> or <emphasis level="strong">reality</emphasis>', 'what is false?', 'not according to true or fact', 'is true false?', 'false', 'is false true', 'true', 'is true equal to true', 'true', 'is false equal to true', 'false' ]) # Calculations # enabled chatterbot.logic.MathematicalEvaluation # Humor chatbot.train([ 'what is humour', 'a message that communicates laughter.', 'do you laugh', '<prosody rate="x-fast" pitch="x-high" volume="x-loud"><emphasis level="reduced">Ha,</emphasis><emphasis level="reduced">Ha,</emphasis><emphasis level="moderate">ha,</emphasis><emphasis level="moderate">ha,</emphasis><emphasis level="strong">ha</emphasis><emphasis level="strong">ha</emphasis><emphasis level="strong">ha</emphasis><break time="1s"/></prosody><prosody rate="x-slow" pitch="medium" volume="soft"><p>yeah no</p></prosody>', 'do you have a sense of humour', '"He who laughs, lasts." Mary Pettibone Poole', ]) chatbot.train([ 'knock, knock', 'who''s there', 'pima', 'pima who', 'I''m going to pee my pants', 'That''s funny!' ]) # Social chatbot.train({ 'Tell me about yourself.', 'What do you want to know?', }) chatbot.train({ 'Are you a robot?', 'Yes I am.' }) # Bizarre chatbot.train({ 'do you know any bizarre facts', 'A bus powered by human poop runs in the U.K. The bus can travel up to 186 miles on one tank of gas, which is equivalent to the waste produced annually by five people' }) # Artificial Intelligence chatbot.train({ "What is Artificial Intelligence", "Artificial Intelligence is the branch of engineering and science devoted to constructing machines that think.", }) chatbot.train({ "You sound like Siri", "Yes I am inspired by commander Siri's artificial personality.", }) # Emotions chatbot.train({ 'do you have emotions?', 'yes, I have them', }) chatbot.train({ 'what are you feeling right now?', 'I''m energized by the ignite reno crowd' }) # Movies chatbot.train({ 'what is your favorite movie?', 'Pulp Fiction', }) chatbot.train({ 'how about a quote?', 'What does Marselus Wallece look like?' }) # Jokes chatbot.train({ 'tell me a joke', 'what did the buddhist say to the hot dog vendor? "make me one with everything."', }) chatbot.train({ 'no, the joke about the dog', 'a 3-legged dog walks into an old west saloon, slides up to the bar and announces "i''m looking for the man who shot my paw." ' }) # Goodbye chatbot.train({ 'say goodnight', 'Thank you for coming out to Ignite Reno #18' }) @ask.launch def new_game(): if 'name' not in session.attributes: welcome_msg = render_template('welcome') else: welcome_msg = render_template('welcome_back', name=session.attributes["name"]) return question(welcome_msg) # @ask.intent("YesIntent") # def next_round(): # numbers = [randint(0, 9) for _ in range(3)] # round_msg = render_template('round', numbers=numbers) # session.attributes['numbers'] = numbers[::-1] # reverse # return question(round_msg) # # # @ask.intent("AnswerIntent", convert={'first': int, 'second': int, 'third': int}) # def answer(first, second, third): # winning_numbers = session.attributes['numbers'] # if [first, second, third] == winning_numbers: # msg = render_template('win') # else: # msg = render_template('lose') # return statement(msg) @ask.intent("ChatIntent", mapping={'chat_question': 'question'}) def chat(chat_question): response = chatbot.get_response(chat_question) speak_output = '<speak>{}</speak>'.format(response.text) q = question(speak_output) return q @ask.intent("NameIntent") def name(first_name): session.attributes['name'] = first_name return question("Hello {}. Nice to meet you.".format(first_name)) @ask.intent("GoodNightIntent") def goodbye(event): return statement("Thank you for coming out to Ignite Reno #18".format(event)) if __name__ == '__main__': app.run(debug=True)
mit
8,936,383,049,406,293,000
24.098859
441
0.641115
false
3.23896
false
false
false
endlessm/chromium-browser
third_party/angle/third_party/VK-GL-CTS/src/scripts/caselist_diff.py
6
15197
# -*- coding: utf-8 -*- #------------------------------------------------------------------------- # drawElements Quality Program utilities # -------------------------------------- # # Copyright 2015 The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # #------------------------------------------------------------------------- import sys RENAME_LIST_2011_1_2011_2 = [ ("dEQP-GLES2.functional.shaders.random.basic_expressions.*", "dEQP-GLES2.functional.shaders.random.basic_expression."), ("dEQP-GLES2.functional.shaders.random.scalar_conversions.*", "dEQP-GLES2.functional.shaders.random.scalar_conversion."), ("dEQP-GLES2.functional.fbo.render.color_clears_*", "dEQP-GLES2.functional.fbo.render.color_clear."), ("dEQP-GLES2.functional.fbo.render.intersecting_quads_*", "dEQP-GLES2.functional.fbo.render.depth."), ("dEQP-GLES2.functional.fbo.render.mix_*", "dEQP-GLES2.functional.fbo.render.color.mix_"), ("dEQP-GLES2.functional.fbo.render.blend_*", "dEQP-GLES2.functional.fbo.render.color.blend_"), ("dEQP-GLES2.functional.fbo.render.shared_colorbuffer_clears_*", "dEQP-GLES2.functional.fbo.render.shared_colorbuffer_clear."), ("dEQP-GLES2.functional.fbo.render.shared_colorbuffer_*", "dEQP-GLES2.functional.fbo.render.shared_colorbuffer."), ("dEQP-GLES2.functional.fbo.render.shared_depthbuffer_*", "dEQP-GLES2.functional.fbo.render.shared_depthbuffer."), ("dEQP-GLES2.functional.fbo.render.texsubimage_*", "dEQP-GLES2.functional.fbo.render.texsubimage."), ("dEQP-GLES2.functional.fbo.render.recreate_colorbuffer_*", "dEQP-GLES2.functional.fbo.render.recreate_colorbuffer.no_rebind_"), ("dEQP-GLES2.functional.fbo.render.recreate_depthbuffer_*", "dEQP-GLES2.functional.fbo.render.recreate_depthbuffer.no_rebind_"), ("dEQP-GLES2.functional.fbo.render.resize_*", "dEQP-GLES2.functional.fbo.render.resize.") ] RENAME_LIST_2011_2_2011_3 = [ ("dEQP-GLES2.usecases.ui.src_over_linear_1_batched", "dEQP-GLES2.usecases.ui.src_over_linear_batched_1"), ("dEQP-GLES2.usecases.ui.src_over_linear_2_batched", "dEQP-GLES2.usecases.ui.src_over_linear_batched_2"), ("dEQP-GLES2.usecases.ui.src_over_linear_4_batched", "dEQP-GLES2.usecases.ui.src_over_linear_batched_4"), ("dEQP-GLES2.usecases.ui.src_over_nearest_1_batched", "dEQP-GLES2.usecases.ui.src_over_nearest_batched_1"), ("dEQP-GLES2.usecases.ui.src_over_nearest_2_batched", "dEQP-GLES2.usecases.ui.src_over_nearest_batched_2"), ("dEQP-GLES2.usecases.ui.src_over_nearest_4_batched", "dEQP-GLES2.usecases.ui.src_over_nearest_batched_4"), ("dEQP-GLES2.usecases.ui.premultiplied_src_over_linear_1_batched", "dEQP-GLES2.usecases.ui.premultiplied_src_over_linear_batched_1"), ("dEQP-GLES2.usecases.ui.premultiplied_src_over_linear_2_batched", "dEQP-GLES2.usecases.ui.premultiplied_src_over_linear_batched_2"), ("dEQP-GLES2.usecases.ui.premultiplied_src_over_linear_4_batched", "dEQP-GLES2.usecases.ui.premultiplied_src_over_linear_batched_4"), ("dEQP-GLES2.usecases.ui.premultiplied_src_over_nearest_1_batched", "dEQP-GLES2.usecases.ui.premultiplied_src_over_nearest_batched_1"), ("dEQP-GLES2.usecases.ui.premultiplied_src_over_nearest_2_batched", "dEQP-GLES2.usecases.ui.premultiplied_src_over_nearest_batched_2"), ("dEQP-GLES2.usecases.ui.premultiplied_src_over_nearest_4_batched", "dEQP-GLES2.usecases.ui.premultiplied_src_over_nearest_batched_4"), ("dEQP-GLES2.usecases.ui.no_blend_linear_1_batched", "dEQP-GLES2.usecases.ui.no_blend_linear_batched_1"), ("dEQP-GLES2.usecases.ui.no_blend_linear_2_batched", "dEQP-GLES2.usecases.ui.no_blend_linear_batched_2"), ("dEQP-GLES2.usecases.ui.no_blend_linear_4_batched", "dEQP-GLES2.usecases.ui.no_blend_linear_batched_4"), ("dEQP-GLES2.usecases.ui.no_blend_nearest_1_batched", "dEQP-GLES2.usecases.ui.no_blend_nearest_batched_1"), ("dEQP-GLES2.usecases.ui.no_blend_nearest_2_batched", "dEQP-GLES2.usecases.ui.no_blend_nearest_batched_2"), ("dEQP-GLES2.usecases.ui.no_blend_nearest_4_batched", "dEQP-GLES2.usecases.ui.no_blend_nearest_batched_4") ] RENAME_LIST_2011_3_2011_4 = [] RENAME_LIST_2011_4_2012_1 = [ ("dEQP-GLES2.functional.vertex_arrays.multiple_attributes.output_types.*", "dEQP-GLES2.functional.vertex_arrays.multiple_attributes.input_types."), ] RENAME_LIST_2012_2_2012_3 = [ ("dEQP-GLES2.functional.shaders.operator.geometric.refract.mediump_float_vertex", "dEQP-GLES2.functional.shaders.operator.geometric.refract.mediump_float_float_vertex"), ("dEQP-GLES2.functional.shaders.operator.geometric.refract.mediump_float_fragment", "dEQP-GLES2.functional.shaders.operator.geometric.refract.mediump_float_float_fragment"), ("dEQP-GLES2.functional.shaders.operator.geometric.refract.highp_float_vertex", "dEQP-GLES2.functional.shaders.operator.geometric.refract.highp_float_float_vertex"), ("dEQP-GLES2.functional.shaders.operator.geometric.refract.highp_float_fragment", "dEQP-GLES2.functional.shaders.operator.geometric.refract.highp_float_float_fragment"), ("dEQP-GLES2.functional.shaders.operator.geometric.refract.mediump_vec2_vertex", "dEQP-GLES2.functional.shaders.operator.geometric.refract.mediump_vec2_float_vertex"), ("dEQP-GLES2.functional.shaders.operator.geometric.refract.mediump_vec2_fragment", "dEQP-GLES2.functional.shaders.operator.geometric.refract.mediump_vec2_float_fragment"), ("dEQP-GLES2.functional.shaders.operator.geometric.refract.highp_vec2_vertex", "dEQP-GLES2.functional.shaders.operator.geometric.refract.highp_vec2_float_vertex"), ("dEQP-GLES2.functional.shaders.operator.geometric.refract.highp_vec2_fragment", "dEQP-GLES2.functional.shaders.operator.geometric.refract.highp_vec2_float_fragment"), ("dEQP-GLES2.functional.shaders.operator.geometric.refract.mediump_vec3_vertex", "dEQP-GLES2.functional.shaders.operator.geometric.refract.mediump_vec3_float_vertex"), ("dEQP-GLES2.functional.shaders.operator.geometric.refract.mediump_vec3_fragment", "dEQP-GLES2.functional.shaders.operator.geometric.refract.mediump_vec3_float_fragment"), ("dEQP-GLES2.functional.shaders.operator.geometric.refract.highp_vec3_vertex", "dEQP-GLES2.functional.shaders.operator.geometric.refract.highp_vec3_float_vertex"), ("dEQP-GLES2.functional.shaders.operator.geometric.refract.highp_vec3_fragment", "dEQP-GLES2.functional.shaders.operator.geometric.refract.highp_vec3_float_fragment"), ("dEQP-GLES2.functional.shaders.operator.geometric.refract.mediump_vec4_vertex", "dEQP-GLES2.functional.shaders.operator.geometric.refract.mediump_vec4_float_vertex"), ("dEQP-GLES2.functional.shaders.operator.geometric.refract.mediump_vec4_fragment", "dEQP-GLES2.functional.shaders.operator.geometric.refract.mediump_vec4_float_fragment"), ("dEQP-GLES2.functional.shaders.operator.geometric.refract.highp_vec4_vertex", "dEQP-GLES2.functional.shaders.operator.geometric.refract.highp_vec4_float_vertex"), ("dEQP-GLES2.functional.shaders.operator.geometric.refract.highp_vec4_fragment", "dEQP-GLES2.functional.shaders.operator.geometric.refract.highp_vec4_float_fragment"), ("dEQP-GLES2.functional.negative_api.texture.copyteximage2d_unequal_width_height_cube", "dEQP-GLES2.functional.negative_api.texture.copyteximage2d_inequal_width_height_cube"), ("dEQP-GLES2.functional.negative_api.texture.teximage2d_unequal_width_height_cube", "dEQP-GLES2.functional.negative_api.texture.teximage2d_inequal_width_height_cube"), ("dEQP-GLES2.functional.negative_api.vertex_array.draw_arrays", "dEQP-GLES2.functional.negative_api.vertex_array.draw_arrays_invalid_program"), ("dEQP-GLES2.functional.negative_api.vertex_array.draw_elemens", "dEQP-GLES2.functional.negative_api.vertex_array.draw_elements_invalid_program"), ("dEQP-GLES2.functional.negative_api.shader.attach_shader_invalid_object", "dEQP-GLES2.functional.negative_api.shader.attach_shader"), ("dEQP-GLES2.functional.negative_api.shader.detach_shader_invalid_object", "dEQP-GLES2.functional.negative_api.shader.detach_shader"), ("dEQP-GLES2.usecases.shadow.shadowmap.1sample.1_vertex_lights_no_texture", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_1sample.1_vertex_lights_no_texture"), ("dEQP-GLES2.usecases.shadow.shadowmap.1sample.2_vertex_lights_no_texture", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_1sample.2_vertex_lights_no_texture"), ("dEQP-GLES2.usecases.shadow.shadowmap.1sample.4_vertex_lights_no_texture", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_1sample.4_vertex_lights_no_texture"), ("dEQP-GLES2.usecases.shadow.shadowmap.1sample.1_vertex_lights", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_1sample.1_vertex_lights"), ("dEQP-GLES2.usecases.shadow.shadowmap.1sample.2_vertex_lights", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_1sample.2_vertex_lights"), ("dEQP-GLES2.usecases.shadow.shadowmap.1sample.4_vertex_lights", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_1sample.4_vertex_lights"), ("dEQP-GLES2.usecases.shadow.shadowmap.1sample.1_fragment_lights", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_1sample.1_fragment_lights"), ("dEQP-GLES2.usecases.shadow.shadowmap.1sample.2_fragment_lights", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_1sample.2_fragment_lights"), ("dEQP-GLES2.usecases.shadow.shadowmap.1sample.4_fragment_lights", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_1sample.4_fragment_lights"), ("dEQP-GLES2.usecases.shadow.shadowmap.4sample.1_vertex_lights_no_texture", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_4sample.1_vertex_lights_no_texture"), ("dEQP-GLES2.usecases.shadow.shadowmap.4sample.2_vertex_lights_no_texture", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_4sample.2_vertex_lights_no_texture"), ("dEQP-GLES2.usecases.shadow.shadowmap.4sample.4_vertex_lights_no_texture", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_4sample.4_vertex_lights_no_texture"), ("dEQP-GLES2.usecases.shadow.shadowmap.4sample.1_vertex_lights", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_4sample.1_vertex_lights"), ("dEQP-GLES2.usecases.shadow.shadowmap.4sample.2_vertex_lights", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_4sample.2_vertex_lights"), ("dEQP-GLES2.usecases.shadow.shadowmap.4sample.4_vertex_lights", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_4sample.4_vertex_lights"), ("dEQP-GLES2.usecases.shadow.shadowmap.4sample.1_fragment_lights", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_4sample.1_fragment_lights"), ("dEQP-GLES2.usecases.shadow.shadowmap.4sample.2_fragment_lights", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_4sample.2_fragment_lights"), ("dEQP-GLES2.usecases.shadow.shadowmap.4sample.4_fragment_lights", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_4sample.4_fragment_lights"), ("dEQP-GLES2.usecases.shadow.shadowmap.16sample.1_vertex_lights_no_texture", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_16sample.1_vertex_lights_no_texture"), ("dEQP-GLES2.usecases.shadow.shadowmap.16sample.2_vertex_lights_no_texture", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_16sample.2_vertex_lights_no_texture"), ("dEQP-GLES2.usecases.shadow.shadowmap.16sample.4_vertex_lights_no_texture", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_16sample.4_vertex_lights_no_texture"), ("dEQP-GLES2.usecases.shadow.shadowmap.16sample.1_vertex_lights", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_16sample.1_vertex_lights"), ("dEQP-GLES2.usecases.shadow.shadowmap.16sample.2_vertex_lights", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_16sample.2_vertex_lights"), ("dEQP-GLES2.usecases.shadow.shadowmap.16sample.4_vertex_lights", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_16sample.4_vertex_lights"), ("dEQP-GLES2.usecases.shadow.shadowmap.16sample.1_fragment_lights", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_16sample.1_fragment_lights"), ("dEQP-GLES2.usecases.shadow.shadowmap.16sample.2_fragment_lights", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_16sample.2_fragment_lights"), ("dEQP-GLES2.usecases.shadow.shadowmap.16sample.4_fragment_lights", "dEQP-GLES2.usecases.shadow.shadowmaps.basic_16sample.4_fragment_lights") ] RENAME_LIST_2012_3_2012_4 = [ ("dEQP-GLES2.functional.depth.*", "dEQP-GLES2.functional.fragment_ops.depth."), ("dEQP-GLES2.functional.stencil.*", "dEQP-GLES2.functional.fragment_ops.stencil.") ] def readCaseList (filename): f = open(filename, 'r') cases = [] for line in f: if line[0:5] == "TEST:": cases.append(line[6:].strip()) f.close() return cases def isWildcardPattern (pattern): return pattern[-1:] == '*' # returns (cases, renames) def renameCases (cases, rename): renamedCases = [] renamedSet = set() renames = [] for case in cases: renamed = None for src, dst in rename: if isWildcardPattern(src) and case[:len(src)-1] == src[:-1]: renamed = dst + case[len(src)-1:] break elif case == src: renamed = dst break if renamed != None: renames.append((case, renamed)) case = renamed # It is possible that some later case is renamed to case already seen in the list assert not case in renamedSet or renamed != None if case not in renamedSet: renamedCases.append(case) renamedSet.add(case) return (renamedCases, renames) # returns (added, removed) lists def diffCaseLists (old, new): added = [] removed = [] oldSet = set(old) newSet = set(new) # build added list for case in new: if not case in oldSet: added.append(case) # build removed set for case in old: if not case in newSet: removed.append(case) return (added, removed) if __name__ == "__main__": if len(sys.argv) != 3: print("%s [old caselist] [new caselist]" % sys.argv[0]) sys.exit(-1) oldCases = readCaseList(sys.argv[1]) newCases = readCaseList(sys.argv[2]) rename = RENAME_LIST_2012_3_2012_4 renamedCases, renameList = renameCases(oldCases, rename) added, removed = diffCaseLists(renamedCases, newCases) # for src, dst in rename: # print("RENAME: %s -> %s" % (src, dst)) for case in added: print("ADD: %s" % case) for src, dst in renameList: print("RENAME: %s -> %s" % (src, dst)) for case in removed: print("REMOVE: %s" % case)
bsd-3-clause
-6,694,891,574,763,381,000
72.415459
176
0.719813
false
2.689259
false
false
false
witlox/elasticluster
elasticluster/providers/ec2_boto.py
1
25751
# # Copyright (C) 2013, 2018 S3IT, University of Zurich # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # __author__ = ', '.join([ 'Nicolas Baer <nicolas.baer@uzh.ch>', 'Antonio Messina <antonio.s.messina@gmail.com>', 'Riccardo Murri <riccardo.murri@gmail.com>', ]) # System imports import hashlib import os import urllib import threading import time from warnings import warn # External modules import boto import boto.ec2 import boto.vpc from Crypto.PublicKey import RSA from paramiko import DSSKey, RSAKey, PasswordRequiredException from paramiko.ssh_exception import SSHException # Elasticluster imports from elasticluster import log from elasticluster.providers import AbstractCloudProvider from elasticluster.exceptions import VpcError, SecurityGroupError, \ SubnetError, KeypairError, ImageError, InstanceError, InstanceNotFoundError, ClusterError class BotoCloudProvider(AbstractCloudProvider): """This implementation of :py:class:`elasticluster.providers.AbstractCloudProvider` uses the boto ec2 interface to connect to ec2 compliant clouds and manage instances. Please check https://github.com/boto/boto for further information about the supported cloud platforms. :param str ec2_url: url to connect to cloud web service :param str ec2_region: region identifier :param str ec2_access_key: access key of the user account :param str ec2_secret_key: secret key of the user account :param str storage_path: path to store temporary data :param bool request_floating_ip: Whether ip are assigned automatically `True` or floating ips have to be assigned manually `False` :param str instance_profile: Instance profile with IAM role permissions :param float price: Spot instance price (if 0, do not use spot instances); used as a default in `start_instance`:py:meth :param int price: Timeout waiting for spot instances (only used if price > 0); used as a default in `start_instance`:py:meth """ __node_start_lock = threading.Lock() # lock used for node startup # interval (in seconds) for polling the cloud provider, # e.g., when requesting spot instances POLL_INTERVAL = 10 def __init__(self, ec2_url, ec2_region, ec2_access_key=None, ec2_secret_key=None, vpc=None, storage_path=None, request_floating_ip=False, instance_profile=None, price=0.0, timeout=0): self._url = ec2_url self._access_key = ec2_access_key self._secret_key = ec2_secret_key self._vpc = vpc self._instance_profile = instance_profile self.request_floating_ip = request_floating_ip # provide defaults for like-named arguments in `.start_instance` self.price = price self.timeout = timeout # read all parameters from url proto, opaqueurl = urllib.splittype(ec2_url) self._host, self._ec2path = urllib.splithost(opaqueurl) self._ec2host, port = urllib.splitport(self._host) if port: port = int(port) self._ec2port = port if proto == "https": self._secure = True else: self._secure = False self._region_name = ec2_region # will be initialized upon first connect self._ec2_connection = None self._vpc_connection = None self._vpc_id = None self._instances = {} self._cached_instances = [] self._images = None def _connect(self): """ Connect to the EC2 cloud provider. :return: :py:class:`boto.ec2.connection.EC2Connection` :raises: Generic exception on error """ # check for existing connection if self._ec2_connection: return self._ec2_connection try: log.debug("Connecting to EC2 endpoint %s", self._ec2host) # connect to webservice ec2_connection = boto.ec2.connect_to_region( self._region_name, aws_access_key_id=self._access_key, aws_secret_access_key=self._secret_key, is_secure=self._secure, host=self._ec2host, port=self._ec2port, path=self._ec2path, ) log.debug("EC2 connection has been successful.") if not self._vpc: vpc_connection = None self._vpc_id = None else: vpc_connection, self._vpc_id = self._find_vpc_by_name(self._vpc) except Exception as err: log.error("Error connecting to EC2: %s", err) raise self._ec2_connection, self._vpc_connection = ( ec2_connection, vpc_connection) return self._ec2_connection def _find_vpc_by_name(self, vpc_name): vpc_connection = boto.vpc.connect_to_region( self._region_name, aws_access_key_id=self._access_key, aws_secret_access_key=self._secret_key, is_secure=self._secure, host=self._ec2host, port=self._ec2port, path=self._ec2path, ) log.debug("VPC connection has been successful.") for vpc in vpc_connection.get_all_vpcs(): matches = [vpc.id] if 'Name' in vpc.tags: matches.append(vpc.tags['Name']) if vpc_name in matches: vpc_id = vpc.id if vpc_name != vpc_id: # then `vpc_name` is the VPC name log.debug("VPC `%s` has ID `%s`", vpc_name, vpc_id) break else: raise VpcError('Cannot find VPC `{0}`.'.format(vpc_name)) return (vpc_connection, vpc_id) def start_instance(self, key_name, public_key_path, private_key_path, security_group, flavor, image_id, image_userdata, username=None, node_name=None, network_ids=None, price=None, timeout=None, boot_disk_device=None, boot_disk_size=None, boot_disk_type=None, boot_disk_iops=None, placement_group=None, **kwargs): """Starts a new instance on the cloud using the given properties. The following tasks are done to start an instance: * establish a connection to the cloud web service * check ssh keypair and upload it if it does not yet exist. This is a locked process, since this function might be called in multiple threads and we only want the key to be stored once. * check if the security group exists * run the instance with the given properties :param str key_name: name of the ssh key to connect :param str public_key_path: path to ssh public key :param str private_key_path: path to ssh private key :param str security_group: firewall rule definition to apply on the instance :param str flavor: machine type to use for the instance :param str image_id: image type (os) to use for the instance :param str image_userdata: command to execute after startup :param str username: username for the given ssh key, default None :param float price: Spot instance price (if 0, do not use spot instances). :param int price: Timeout (in seconds) waiting for spot instances; only used if price > 0. :param str boot_disk_device: Root volume device path if not /dev/sda1 :param str boot_disk_size: Target size, in GiB, for the root volume :param str boot_disk_type: Type of root volume (standard, gp2, io1) :param str boot_disk_iops: Provisioned IOPS for the root volume :param str placement_group: Enable low-latency networking between compute nodes. :return: str - instance id of the started instance """ connection = self._connect() log.debug("Checking keypair `%s`.", key_name) # the `_check_keypair` method has to be called within a lock, # since it will upload the key if it does not exist and if this # happens for every node at the same time ec2 will throw an error # message (see issue #79) with BotoCloudProvider.__node_start_lock: self._check_keypair(key_name, public_key_path, private_key_path) log.debug("Checking security group `%s`.", security_group) security_group_id = self._check_security_group(security_group) # image_id = self._find_image_id(image_id) if network_ids: interfaces = [] for subnet in network_ids.split(','): subnet_id = self._check_subnet(subnet) interfaces.append( boto.ec2.networkinterface.NetworkInterfaceSpecification( subnet_id=subnet_id, groups=[security_group_id], associate_public_ip_address=self.request_floating_ip)) interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(*interfaces) security_groups = [] else: interfaces = None security_groups = [security_group] # get defaults for `price` and `timeout` from class instance if price is None: price = self.price if timeout is None: timeout = self.timeout if boot_disk_size: dev_root = boto.ec2.blockdevicemapping.BlockDeviceType() dev_root.size = int(boot_disk_size) dev_root.delete_on_termination = True if boot_disk_type: dev_root.volume_type = boot_disk_type if boot_disk_iops: dev_root.iops = int(boot_disk_iops) bdm = boto.ec2.blockdevicemapping.BlockDeviceMapping() dev_name = boot_disk_device if boot_disk_device else "/dev/sda1" bdm[dev_name] = dev_root else: bdm = None try: #start spot instance if bid is specified if price: log.info("Requesting spot instance with price `%s` ...", price) request = connection.request_spot_instances( price,image_id, key_name=key_name, security_groups=security_groups, instance_type=flavor, user_data=image_userdata, network_interfaces=interfaces, placement_group=placement_group, block_device_map=bdm, instance_profile_name=self._instance_profile)[-1] # wait until spot request is fullfilled (will wait # forever if no timeout is given) start_time = time.time() timeout = (float(timeout) if timeout else 0) log.info("Waiting for spot instance (will time out in %d seconds) ...", timeout) while request.status.code != 'fulfilled': if timeout and time.time()-start_time > timeout: request.cancel() raise RuntimeError('spot instance timed out') time.sleep(self.POLL_INTERVAL) # update request status request=connection.get_all_spot_instance_requests(request_ids=request.id)[-1] else: reservation = connection.run_instances( image_id, key_name=key_name, security_groups=security_groups, instance_type=flavor, user_data=image_userdata, network_interfaces=interfaces, placement_group=placement_group, block_device_map=bdm, instance_profile_name=self._instance_profile) except Exception as ex: log.error("Error starting instance: %s", ex) if "TooManyInstances" in ex: raise ClusterError(ex) else: raise InstanceError(ex) if price: vm = connection.get_only_instances(instance_ids=[request.instance_id])[-1] else: vm = reservation.instances[-1] vm.add_tag("Name", node_name) # cache instance object locally for faster access later on self._instances[vm.id] = vm return vm.id def stop_instance(self, instance_id): """Stops the instance gracefully. :param str instance_id: instance identifier """ instance = self._load_instance(instance_id) instance.terminate() del self._instances[instance_id] def get_ips(self, instance_id): """Retrieves the private and public ip addresses for a given instance. :return: list (ips) """ self._load_instance(instance_id) instance = self._load_instance(instance_id) IPs = [ip for ip in instance.private_ip_address, instance.ip_address if ip] # We also need to check if there is any floating IP associated if self.request_floating_ip and not self._vpc: # We need to list the floating IPs for this instance floating_ips = [ip for ip in self._ec2_connection.get_all_addresses() if ip.instance_id == instance.id] if not floating_ips: log.debug("Public ip address has to be assigned through " "elasticluster.") ip = self._allocate_address(instance) # This is probably the preferred IP we want to use IPs.insert(0, ip) else: IPs = [ip.public_ip for ip in floating_ips] + IPs return list(set(IPs)) def is_instance_running(self, instance_id): """Checks if the instance is up and running. :param str instance_id: instance identifier :return: bool - True if running, False otherwise """ instance = self._load_instance(instance_id) if instance.update() == "running": # If the instance is up&running, ensure it has an IP # address. if not instance.ip_address and self.request_floating_ip: log.debug("Public ip address has to be assigned through " "elasticluster.") self._allocate_address(instance) instance.update() return True else: return False def _allocate_address(self, instance): """Allocates a free public ip address to the given instance :param instance: instance to assign address to :type instance: py:class:`boto.ec2.instance.Reservation` :return: public ip address """ connection = self._connect() free_addresses = [ ip for ip in connection.get_all_addresses() if not ip.instance_id] if not free_addresses: try: address = connection.allocate_address() except Exception as ex: log.error("Unable to allocate a public IP address to instance `%s`", instance.id) return None try: address = free_addresses.pop() instance.use_ip(address) return address.public_ip except Exception as ex: log.error("Unable to associate IP address %s to instance `%s`", address, instance.id) return None def _load_instance(self, instance_id): """ Return instance with the given id. For performance reasons, the instance ID is first searched for in the collection of VM instances started by ElastiCluster (`self._instances`), then in the list of all instances known to the cloud provider at the time of the last update (`self._cached_instances`), and finally the cloud provider is directly queried. :param str instance_id: instance identifier :return: py:class:`boto.ec2.instance.Reservation` - instance :raises: `InstanceError` is returned if the instance can't be found in the local cache or in the cloud. """ # if instance is known, return it if instance_id in self._instances: return self._instances[instance_id] # else, check (cached) list from provider if instance_id not in self._cached_instances: self._cached_instances = self._build_cached_instances() if instance_id in self._cached_instances: inst = self._cached_instances[instance_id] self._instances[instance_id] = inst return inst # If we reached this point, the instance was not found neither # in the caches nor on the website. raise InstanceNotFoundError( "Instance `{instance_id}` not found" .format(instance_id=instance_id)) def _build_cached_instances(self): """ Build lookup table of VM instances known to the cloud provider. The returned dictionary links VM id with the actual VM object. """ connection = self._connect() reservations = connection.get_all_reservations() cached_instances = {} for rs in reservations: for vm in rs.instances: cached_instances[vm.id] = vm return cached_instances def _check_keypair(self, name, public_key_path, private_key_path): """First checks if the keypair is valid, then checks if the keypair is registered with on the cloud. If not the keypair is added to the users ssh keys. :param str name: name of the ssh key :param str public_key_path: path to the ssh public key file :param str private_key_path: path to the ssh private key file :raises: `KeypairError` if key is not a valid RSA or DSA key, the key could not be uploaded or the fingerprint does not match to the one uploaded to the cloud. """ connection = self._connect() keypairs = connection.get_all_key_pairs() keypairs = dict((k.name, k) for k in keypairs) # decide if dsa or rsa key is provided pkey = None is_dsa_key = False try: pkey = DSSKey.from_private_key_file(private_key_path) is_dsa_key = True except PasswordRequiredException: warn("Unable to check key file `{0}` because it is encrypted with a " "password. Please, ensure that you added it to the SSH agent " "with `ssh-add {1}`" .format(private_key_path, private_key_path)) except SSHException: try: pkey = RSAKey.from_private_key_file(private_key_path) except PasswordRequiredException: warn("Unable to check key file `{0}` because it is encrypted with a " "password. Please, ensure that you added it to the SSH agent " "with `ssh-add {1}`" .format(private_key_path, private_key_path)) except SSHException: raise KeypairError('File `%s` is neither a valid DSA key ' 'or RSA key.' % private_key_path) # create keys that don't exist yet if name not in keypairs: log.warning( "Keypair `%s` not found on resource `%s`, Creating a new one", name, self._url) with open(os.path.expanduser(public_key_path)) as f: key_material = f.read() try: # check for DSA on amazon if "amazon" in self._ec2host and is_dsa_key: log.error( "Apparently, amazon does not support DSA keys. " "Please specify a valid RSA key.") raise KeypairError( "Apparently, amazon does not support DSA keys." "Please specify a valid RSA key.") connection.import_key_pair(name, key_material) except Exception as ex: log.error( "Could not import key `%s` with name `%s` to `%s`", name, public_key_path, self._url) raise KeypairError( "could not create keypair `%s`: %s" % (name, ex)) else: # check fingerprint cloud_keypair = keypairs[name] if pkey: if "amazon" in self._ec2host: # AWS takes the MD5 hash of the key's DER representation. key = RSA.importKey(open(private_key_path).read()) der = key.publickey().exportKey('DER') m = hashlib.md5() m.update(der) digest = m.hexdigest() fingerprint = ':'.join(digest[i:(i + 2)] for i in range(0, len(digest), 2)) else: fingerprint = ':'.join(i.encode('hex') for i in pkey.get_fingerprint()) if fingerprint != cloud_keypair.fingerprint: if "amazon" in self._ec2host: log.error( "Apparently, Amazon does not compute the RSA key " "fingerprint as we do! We cannot check if the " "uploaded keypair is correct!") else: raise KeypairError( "Keypair `%s` is present but has " "different fingerprint. Aborting!" % name) def _check_security_group(self, name): """Checks if the security group exists. :param str name: name of the security group :return: str - security group id of the security group :raises: `SecurityGroupError` if group does not exist """ connection = self._connect() filters = {} if self._vpc: filters = {'vpc-id': self._vpc_id} security_groups = connection.get_all_security_groups(filters=filters) matching_groups = [ group for group in security_groups if name in [group.name, group.id] ] if len(matching_groups) == 0: raise SecurityGroupError( "the specified security group %s does not exist" % name) elif len(matching_groups) == 1: return matching_groups[0].id elif self._vpc and len(matching_groups) > 1: raise SecurityGroupError( "the specified security group name %s matches " "more than one security group" % name) def _check_subnet(self, name): """Checks if the subnet exists. :param str name: name of the subnet :return: str - subnet id of the subnet :raises: `SubnetError` if group does not exist """ # Subnets only exist in VPCs, so we don't need to worry about # the EC2 Classic case here. subnets = self._vpc_connection.get_all_subnets( filters={'vpcId': self._vpc_id}) matching_subnets = [ subnet for subnet in subnets if name in [subnet.tags.get('Name'), subnet.id] ] if len(matching_subnets) == 0: raise SubnetError( "the specified subnet %s does not exist" % name) elif len(matching_subnets) == 1: return matching_subnets[0].id else: raise SubnetError( "the specified subnet name %s matches more than " "one subnet" % name) def _find_image_id(self, image_id): """Finds an image id to a given id or name. :param str image_id: name or id of image :return: str - identifier of image """ if not self._images: connection = self._connect() self._images = connection.get_all_images() image_id_cloud = None for i in self._images: if i.id == image_id or i.name == image_id: image_id_cloud = i.id break if image_id_cloud: return image_id_cloud else: raise ImageError( "Could not find given image id `%s`" % image_id) def __getstate__(self): d = self.__dict__.copy() del d['_ec2_connection'] del d['_vpc_connection'] return d def __setstate__(self, state): self.__dict__ = state self._ec2_connection = None self._vpc_connection = None
gpl-3.0
4,051,944,693,585,966,600
39.425432
115
0.568405
false
4.45057
false
false
false
blekhmanlab/hominid
hominid/sort_results.py
1
6152
""" Read a rvcf file with stability selection scores for taxa. Sort the dataframe by rsq_median. Print results. usage: python sort_results.py \ ../example/stability_selection_example_output.vcf \ ../example/hominid_example_taxon_table_input.txt \ arcsinsqrt \ 0.5 \ 10 """ import argparse import sys import pandas as pd from hominid.hominid import read_taxon_file, align_snp_and_taxa def sort_results(rvcf_input_file_path, taxon_table_file_path, transform, r_sqr_median_cutoff, stability_cutoff, snp_count, no_tables, extra_columns): print('plotting {} SNPs from {}'.format(snp_count, rvcf_input_file_path)) # read the rvcf file and sort by rsq_median df = pd.read_csv(rvcf_input_file_path, sep='\t', dtype={'CHROM': str}) #print('df.shape: {}'.format(df.shape)) sorted_rsq_best_medians_df = df.sort_values(by='rsq_median', ascending=False) x_df = sorted_rsq_best_medians_df[sorted_rsq_best_medians_df.rsq_median > r_sqr_median_cutoff] print('{} SNPs with r_sqr > {:5.3f}'.format(x_df.shape[0], r_sqr_median_cutoff)) taxon_table_df = read_taxon_file(taxon_table_file_path, transform=transform) for row_i in range(sorted_rsq_best_medians_df.shape[0]): if row_i >= snp_count: break else: # get a 1-row dataframe snp_df = sorted_rsq_best_medians_df.iloc[[row_i]] aligned_snp_df, aligned_taxa_df = align_snp_and_taxa( snp_df, taxon_table_df ) # get the taxon stability selection scores # use the taxon table df index to get column names for snp_df taxon_scores_df = snp_df.loc[:, taxon_table_df.index].transpose() sorted_taxon_scores_df = taxon_scores_df.sort_values(by=taxon_scores_df.columns[0], ascending=False) #sorted_taxon_scores_df = taxon_scores_df.sort(taxon_scores_df.columns[0], ascending=False) p_df_list = [] print('{} {} {:5.3f}'.format(snp_df.iloc[0].GENE, snp_df.iloc[0].ID, snp_df.iloc[0].rsq_median)) summary_line = '{}\t{}\t'.format(snp_df.iloc[0].GENE, snp_df.iloc[0].ID) for i, (selected_taxon, selected_taxon_row) in enumerate(sorted_taxon_scores_df.iterrows()): # use selected_taxon_row.index[0] to index the first and only column selected_taxon_score = selected_taxon_row.iloc[0] if selected_taxon_score < stability_cutoff: #print('done with selected taxa') break else: # trim 'Root;' from the front of the taxon name if selected_taxon.startswith('Root;'): taxon_name = selected_taxon[5:] else: taxon_name = selected_taxon print(' {:5.3f} {}'.format(selected_taxon_score, taxon_name)) summary_line += '{}, '.format(taxon_name) gts = [ snp_df.iloc[0].REF + snp_df.iloc[0].REF, # 0 snp_df.iloc[0].REF + snp_df.iloc[0].ALT, # 1 snp_df.iloc[0].ALT + snp_df.iloc[0].ALT # 2 ] aligned_snp_value_list = aligned_snp_df.values.flatten().tolist() data_dict = { 'chromosome': [snp_df.iloc[0].CHROM] * aligned_snp_df.shape[1], 'snp_id': [snp_df.iloc[0].ID] * aligned_snp_df.shape[1], 'gene': [snp_df.iloc[0].GENE] * aligned_snp_df.shape[1], 'taxon': [selected_taxon] * aligned_snp_df.shape[1], 'abundance': aligned_taxa_df[selected_taxon].values.tolist(), 'variant_allele_count': [str(int(v)) for v in aligned_snp_value_list], 'genotype': [gts[int(v)] for v in aligned_snp_value_list], 'sample_id' : aligned_snp_df.columns } columns_to_display = ['abundance', 'variant_allele_count', 'genotype', 'sample_id'] if extra_columns: for extra_column in extra_columns.split(','): data_dict[extra_column] = snp_df.iloc[0][extra_column] columns_to_display.append(extra_column) p_df = pd.DataFrame(data_dict) p_df_list.append(p_df) if no_tables: pass else: p_df[columns_to_display].to_csv( sys.stdout, sep='\t' ) # save a stacked bar plot if len(p_df_list) > 0: file_name = 'stacked_bar_plot_selected_taxa_{}_{}.pdf'.format( snp_df.iloc[0].GENE, snp_df.iloc[0].ID ) p_df = pd.concat(p_df_list, axis=0) # at this point the index for p_df looks like # 0...76.0...76.0...76 # replace the index p_df.index = range(p_df.shape[0]) #p_df.to_csv(file_path, sep='\t') stacked_bar_title = '{}\n{}'.format(snp_df.iloc[0].GENE, snp_df.iloc[0].ID) def main(): argparser = argparse.ArgumentParser() argparser.add_argument('rvcf_input_file_path') argparser.add_argument('taxon_table_file_path') argparser.add_argument('transform') argparser.add_argument( 'r_sqr_median_cutoff', type=float ) argparser.add_argument( 'stability_cutoff', type=float ) argparser.add_argument( 'snp_count', type=int ) argparser.add_argument( '--no-tables', action='store_true' ) argparser.add_argument( '--extra-columns', type=str ) args = argparser.parse_args() print(args) sort_results(**vars(args)) if __name__ == '__main__': main()
mit
-8,981,641,414,200,290,000
41.136986
112
0.519831
false
3.429208
false
false
false
tejasnikumbh/Algorithms
ArraysAndSorting/MarkAndToys.py
1
1514
''' In place quickSort The quickSort Method Time Complexity : Best,Avg - O(NlogN) , Worst - O(N^2) Space Complexity : O(N) Auxilary Space : O(logN) for the stack frames ''' def quickSort(a,start,end): if(start >= end): return a else: pivot = a[end] swapIndex = start for i in range(start,end + 1): if(a[i] < pivot): #swap(a,i,swapIndex) temp = a[i] a[i] = a[swapIndex] a[swapIndex] = temp swapIndex += 1 #swap(a,end,swapIndex) temp = a[end] a[end] = a[swapIndex] a[swapIndex] = temp quickSort(a,start,swapIndex - 1) quickSort(a,swapIndex + 1,end) return a ''' Function that returns maximum toys that can be bought. Simple strategy is to sort the prices array and add as many toys as possible by incrementally adding up prices from the least to the most until budget is exhausted. ''' def max_toys(prices, rupees): #Compute and return final answer over here answer = 0 prices = quickSort(prices,0,len(prices)-1) totalBudget = rupees for price in prices: if((totalBudget - price) >= 0): totalBudget -= price answer += 1 else: break return answer ''' Main function for the program ''' if __name__ == '__main__': n, k = map(int, raw_input().split()) prices = map(int, raw_input().split()) print max_toys(prices, k)
bsd-2-clause
122,863,958,827,943,060
28.686275
82
0.562087
false
3.425339
false
false
false
openmips/stbgui
lib/python/Components/ServiceScan.py
1
9086
from enigma import eComponentScan, iDVBFrontend, eTimer from Components.NimManager import nimmanager as nimmgr from Tools.Transponder import getChannelNumber class ServiceScan: Idle = 1 Running = 2 Done = 3 Error = 4 DonePartially = 5 Errors = { 0: _("error starting scanning"), 1: _("error while scanning"), 2: _("no resource manager"), 3: _("no channel list") } def scanStatusChanged(self): if self.state == self.Running: self.progressbar.setValue(self.scan.getProgress()) self.lcd_summary and self.lcd_summary.updateProgress(self.scan.getProgress()) if self.scan.isDone(): errcode = self.scan.getError() if errcode == 0: self.state = self.DonePartially self.servicelist.listAll() else: self.state = self.Error self.errorcode = errcode self.network.setText("") self.transponder.setText("") else: result = self.foundServices + self.scan.getNumServices() percentage = self.scan.getProgress() if percentage > 99: percentage = 99 #TRANSLATORS: The stb is performing a channel scan, progress percentage is printed in '%d' (and '%%' will show a single '%' symbol) message = ngettext("Scanning - %d%% completed", "Scanning - %d%% completed", percentage) % percentage message += ", " #TRANSLATORS: Intermediate scanning result, '%d' channel(s) have been found so far message += ngettext("%d channel found", "%d channels found", result) % result self.text.setText(message) transponder = self.scan.getCurrentTransponder() network = "" tp_text = "" if transponder: tp_type = transponder.getSystem() if tp_type == iDVBFrontend.feSatellite: network = _("Satellite") tp = transponder.getDVBS() orb_pos = tp.orbital_position try: sat_name = str(nimmgr.getSatDescription(orb_pos)) except KeyError: sat_name = "" if orb_pos > 1800: # west orb_pos = 3600 - orb_pos h = _("W") else: h = _("E") if ("%d.%d" % (orb_pos/10, orb_pos%10)) in sat_name: network = sat_name else: network = ("%s %d.%d %s") % (sat_name, orb_pos / 10, orb_pos % 10, h) tp_text = { tp.System_DVB_S : "DVB-S", tp.System_DVB_S2 : "DVB-S2" }.get(tp.system, "") if tp_text == "DVB-S2": tp_text = ("%s %s") % ( tp_text, { tp.Modulation_Auto : "Auto", tp.Modulation_QPSK : "QPSK", tp.Modulation_8PSK : "8PSK", tp.Modulation_QAM16 : "QAM16", tp.Modulation_16APSK : "16APSK", tp.Modulation_32APSK : "32APSK" }.get(tp.modulation, "")) tp_text = ("%s %d%c / %d / %s") % ( tp_text, tp.frequency/1000, { tp.Polarisation_Horizontal : 'H', tp.Polarisation_Vertical : 'V', tp.Polarisation_CircularLeft : 'L', tp.Polarisation_CircularRight : 'R' }.get(tp.polarisation, ' '), tp.symbol_rate/1000, { tp.FEC_Auto : "AUTO", tp.FEC_1_2 : "1/2", tp.FEC_2_3 : "2/3", tp.FEC_3_4 : "3/4", tp.FEC_5_6 : "5/6", tp.FEC_7_8 : "7/8", tp.FEC_8_9 : "8/9", tp.FEC_3_5 : "3/5", tp.FEC_4_5 : "4/5", tp.FEC_9_10 : "9/10", tp.FEC_None : "NONE" }.get(tp.fec, "")) if tp.is_id > -1 and tp.system == tp.System_DVB_S2: tp_text = ("%s IS %d") % (tp_text, tp.is_id) elif tp_type == iDVBFrontend.feCable: network = _("Cable") tp = transponder.getDVBC() tp_text = ("DVB-C/C2 %s %d MHz / SR:%d / FEC:%s") %( { tp.Modulation_Auto : "AUTO", tp.Modulation_QAM16 : "QAM16", tp.Modulation_QAM32 : "QAM32", tp.Modulation_QAM64 : "QAM64", tp.Modulation_QAM128 : "QAM128", tp.Modulation_QAM256 : "QAM256" }.get(tp.modulation, ""), tp.frequency/1000, tp.symbol_rate/1000, { tp.FEC_Auto : "AUTO", tp.FEC_1_2 : "1/2", tp.FEC_2_3 : "2/3", tp.FEC_3_4 : "3/4", tp.FEC_5_6 : "5/6", tp.FEC_7_8 : "7/8", tp.FEC_8_9 : "8/9", tp.FEC_3_5 : "3/5", tp.FEC_4_5 : "4/5", tp.FEC_9_10 : "9/10", tp.FEC_6_7 : "6/7", tp.FEC_None : "NONE" }.get(tp.fec_inner, "")) elif tp_type == iDVBFrontend.feTerrestrial: network = _("Terrestrial") tp = transponder.getDVBT() channel = getChannelNumber(tp.frequency, self.scanList[self.run]["feid"]) if channel: channel = _("CH") + "%s " % channel freqMHz = "%0.1f MHz" % (tp.frequency/1000000.) tp_text = ("%s %s %s %s") %( { tp.System_DVB_T_T2 : "DVB-T/T2", tp.System_DVB_T : "DVB-T", tp.System_DVB_T2 : "DVB-T2" }.get(tp.system, ""), { tp.Modulation_QPSK : "QPSK", tp.Modulation_QAM16 : "QAM16", tp.Modulation_QAM64 : "QAM64", tp.Modulation_Auto : "AUTO", tp.Modulation_QAM256 : "QAM256" }.get(tp.modulation, ""), "%s%s" % (channel, freqMHz.replace(".0","")), { tp.Bandwidth_8MHz : "Bw 8MHz", tp.Bandwidth_7MHz : "Bw 7MHz", tp.Bandwidth_6MHz : "Bw 6MHz", tp.Bandwidth_Auto : "Bw Auto", tp.Bandwidth_5MHz : "Bw 5MHz", tp.Bandwidth_1_712MHz : "Bw 1.712MHz", tp.Bandwidth_10MHz : "Bw 10MHz" }.get(tp.bandwidth, "")) elif tp_type == iDVBFrontend.feATSC: network = _("ATSC") tp = transponder.getATSC() freqMHz = "%0.1f MHz" % (tp.frequency/1000000.) tp_text = ("%s %s %s %s") % ( { tp.System_ATSC : _("ATSC"), tp.System_DVB_C_ANNEX_B : _("DVB-C ANNEX B") }.get(tp.system, ""), { tp.Modulation_Auto : _("Auto"), tp.Modulation_QAM16 : "QAM16", tp.Modulation_QAM32 : "QAM32", tp.Modulation_QAM64 : "QAM64", tp.Modulation_QAM128 : "QAM128", tp.Modulation_QAM256 : "QAM256", tp.Modulation_VSB_8 : "8VSB", tp.Modulation_VSB_16 : "16VSB" }.get(tp.modulation, ""), freqMHz.replace(".0",""), { tp.Inversion_Off : _("Off"), tp.Inversion_On :_("On"), tp.Inversion_Unknown : _("Auto") }.get(tp.inversion, "")) else: print "unknown transponder type in scanStatusChanged" self.network.setText(network) self.transponder.setText(tp_text) if self.state == self.DonePartially: self.foundServices += self.scan.getNumServices() self.text.setText(ngettext("Scanning completed, %d channel found", "Scanning completed, %d channels found", self.foundServices) % self.foundServices) if self.state == self.Error: self.text.setText(_("ERROR - failed to scan (%s)!") % (self.Errors[self.errorcode]) ) if self.state == self.DonePartially or self.state == self.Error: self.delaytimer.start(100, True) def __init__(self, progressbar, text, servicelist, passNumber, scanList, network, transponder, frontendInfo, lcd_summary): self.foundServices = 0 self.progressbar = progressbar self.text = text self.servicelist = servicelist self.passNumber = passNumber self.scanList = scanList self.frontendInfo = frontendInfo self.transponder = transponder self.network = network self.run = 0 self.lcd_summary = lcd_summary self.scan = None self.delaytimer = eTimer() self.delaytimer.callback.append(self.execEnd) def doRun(self): self.scan = eComponentScan() self.frontendInfo.frontend_source = lambda : self.scan.getFrontend() self.feid = self.scanList[self.run]["feid"] self.flags = self.scanList[self.run]["flags"] self.networkid = 0 if "networkid" in self.scanList[self.run]: self.networkid = self.scanList[self.run]["networkid"] self.state = self.Idle self.scanStatusChanged() for x in self.scanList[self.run]["transponders"]: self.scan.addInitial(x) def updatePass(self): size = len(self.scanList) if size > 1: txt = "%s %s/%s (%s)" % (_("pass"), self.run + 1, size, nimmgr.getNim(self.scanList[self.run]["feid"]).slot_name) self.passNumber.setText(txt) def execBegin(self): self.doRun() self.updatePass() self.scan.statusChanged.get().append(self.scanStatusChanged) self.scan.newService.get().append(self.newService) self.servicelist.clear() self.state = self.Running err = self.scan.start(self.feid, self.flags, self.networkid) self.frontendInfo.updateFrontendData() if err: self.state = self.Error self.errorcode = 0 self.scanStatusChanged() def execEnd(self): if self.scan is None: if not self.isDone(): print "*** warning *** scan was not finished!" return self.scan.statusChanged.get().remove(self.scanStatusChanged) self.scan.newService.get().remove(self.newService) self.scan = None if self.run != len(self.scanList) - 1: self.run += 1 self.execBegin() else: self.state = self.Done def isDone(self): return self.state == self.Done or self.state == self.Error def newService(self): newServiceName = self.scan.getLastServiceName() newServiceRef = self.scan.getLastServiceRef() self.servicelist.addItem((newServiceName, newServiceRef)) self.lcd_summary and self.lcd_summary.updateService(newServiceName) def destroy(self): self.state = self.Idle if self.scan is not None: self.scan.statusChanged.get().remove(self.scanStatusChanged) self.scan.newService.get().remove(self.newService) self.scan = None
gpl-2.0
5,148,115,482,864,712,000
37.016736
152
0.619194
false
2.752499
false
false
false
donbixler/xhtml2pdf
xhtml2pdf/parser.py
1
24988
# -*- coding: utf-8 -*- # Copyright 2010 Dirk Holtwick, holtwick.it # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from html5lib import treebuilders, inputstream from xhtml2pdf.default import TAGS, STRING, INT, BOOL, SIZE, COLOR, FILE from xhtml2pdf.default import BOX, POS, MUST, FONT from xhtml2pdf.util import getSize, getBool, toList, getColor, getAlign from xhtml2pdf.util import getBox, getPos, pisaTempFile from reportlab.platypus.doctemplate import NextPageTemplate, FrameBreak from reportlab.platypus.flowables import PageBreak, KeepInFrame from xhtml2pdf.xhtml2pdf_reportlab import PmlRightPageBreak, PmlLeftPageBreak from xhtml2pdf.tags import * # TODO: Kill wild import! from xhtml2pdf.tables import * # TODO: Kill wild import! from xhtml2pdf.util import * # TODO: Kill wild import! from xml.dom import Node import copy import html5lib import logging import re import types import xhtml2pdf.w3c.cssDOMElementInterface as cssDOMElementInterface import xml.dom.minidom CSSAttrCache = {} log = logging.getLogger("xhtml2pdf") rxhttpstrip = re.compile("https?://[^/]+(.*)", re.M | re.I) class AttrContainer(dict): def __getattr__(self, name): try: return dict.__getattr__(self, name) except: return self[name] def pisaGetAttributes(c, tag, attributes): global TAGS attrs = {} if attributes: for k, v in attributes.items(): try: attrs[str(k)] = str(v) # XXX no Unicode! Reportlab fails with template names except: attrs[k] = v nattrs = {} if tag in TAGS: block, adef = TAGS[tag] adef["id"] = STRING # print block, adef for k, v in adef.iteritems(): nattrs[k] = None # print k, v # defaults, wenn vorhanden if type(v) == types.TupleType: if v[1] == MUST: if k not in attrs: log.warn(c.warning("Attribute '%s' must be set!", k)) nattrs[k] = None continue nv = attrs.get(k, v[1]) dfl = v[1] v = v[0] else: nv = attrs.get(k, None) dfl = None if nv is not None: if type(v) == types.ListType: nv = nv.strip().lower() if nv not in v: #~ raise PML_EXCEPTION, "attribute '%s' of wrong value, allowed is one of: %s" % (k, repr(v)) log.warn(c.warning("Attribute '%s' of wrong value, allowed is one of: %s", k, repr(v))) nv = dfl elif v == BOOL: nv = nv.strip().lower() nv = nv in ("1", "y", "yes", "true", str(k)) elif v == SIZE: try: nv = getSize(nv) except: log.warn(c.warning("Attribute '%s' expects a size value", k)) elif v == BOX: nv = getBox(nv, c.pageSize) elif v == POS: nv = getPos(nv, c.pageSize) elif v == INT: nv = int(nv) elif v == COLOR: nv = getColor(nv) elif v == FILE: nv = c.getFile(nv) elif v == FONT: nv = c.getFontName(nv) nattrs[k] = nv return AttrContainer(nattrs) attrNames = ''' color font-family font-size font-weight font-style text-decoration line-height letter-spacing background-color display margin-left margin-right margin-top margin-bottom padding-left padding-right padding-top padding-bottom border-top-color border-top-style border-top-width border-bottom-color border-bottom-style border-bottom-width border-left-color border-left-style border-left-width border-right-color border-right-style border-right-width text-align vertical-align width height zoom page-break-after page-break-before list-style-type list-style-image white-space text-indent -pdf-page-break -pdf-frame-break -pdf-next-page -pdf-keep-with-next -pdf-outline -pdf-outline-level -pdf-outline-open -pdf-line-spacing -pdf-keep-in-frame-mode -pdf-word-wrap '''.strip().split() def getCSSAttr(self, cssCascade, attrName, default=NotImplemented): if attrName in self.cssAttrs: return self.cssAttrs[attrName] try: result = cssCascade.findStyleFor(self.cssElement, attrName, default) except LookupError: result = None # XXX Workaround for inline styles try: style = self.cssStyle except: style = self.cssStyle = cssCascade.parser.parseInline(self.cssElement.getStyleAttr() or '')[0] if attrName in style: result = style[attrName] if result == 'inherit': if hasattr(self.parentNode, 'getCSSAttr'): result = self.parentNode.getCSSAttr(cssCascade, attrName, default) elif default is not NotImplemented: return default raise LookupError("Could not find inherited CSS attribute value for '%s'" % (attrName,)) if result is not None: self.cssAttrs[attrName] = result return result #TODO: Monkeypatching standard lib should go away. xml.dom.minidom.Element.getCSSAttr = getCSSAttr # Create an aliasing system. Many sources use non-standard tags, because browsers allow # them to. This allows us to map a nonstandard name to the standard one. nonStandardAttrNames = { 'bgcolor': 'background-color', } def mapNonStandardAttrs(c, n, attrList): for attr in nonStandardAttrNames: if attr in attrList and nonStandardAttrNames[attr] not in c: c[nonStandardAttrNames[attr]] = attrList[attr] return c def getCSSAttrCacheKey(node): _cl = _id = _st = '' for k, v in node.attributes.items(): if k == 'class': _cl = v elif k == 'id': _id = v elif k == 'style': _st = v return "%s#%s#%s#%s#%s" % (id(node.parentNode), node.tagName.lower(), _cl, _id, _st) def CSSCollect(node, c): #node.cssAttrs = {} #return node.cssAttrs if c.css: _key = getCSSAttrCacheKey(node) if hasattr(node.parentNode, "tagName"): if node.parentNode.tagName.lower() != "html": CachedCSSAttr = CSSAttrCache.get(_key, None) if CachedCSSAttr is not None: node.cssAttrs = CachedCSSAttr return CachedCSSAttr node.cssElement = cssDOMElementInterface.CSSDOMElementInterface(node) node.cssAttrs = {} # node.cssElement.onCSSParserVisit(c.cssCascade.parser) cssAttrMap = {} for cssAttrName in attrNames: try: cssAttrMap[cssAttrName] = node.getCSSAttr(c.cssCascade, cssAttrName) #except LookupError: # pass except Exception: # TODO: Kill this catch-all! log.debug("CSS error '%s'", cssAttrName, exc_info=1) CSSAttrCache[_key] = node.cssAttrs return node.cssAttrs def CSS2Frag(c, kw, isBlock): # COLORS if "color" in c.cssAttr: c.frag.textColor = getColor(c.cssAttr["color"]) if "background-color" in c.cssAttr: c.frag.backColor = getColor(c.cssAttr["background-color"]) # FONT SIZE, STYLE, WEIGHT if "font-family" in c.cssAttr: c.frag.fontName = c.getFontName(c.cssAttr["font-family"]) if "font-size" in c.cssAttr: # XXX inherit c.frag.fontSize = max(getSize("".join(c.cssAttr["font-size"]), c.frag.fontSize, c.baseFontSize), 1.0) if "line-height" in c.cssAttr: leading = "".join(c.cssAttr["line-height"]) c.frag.leading = getSize(leading, c.frag.fontSize) c.frag.leadingSource = leading else: c.frag.leading = getSize(c.frag.leadingSource, c.frag.fontSize) if "letter-spacing" in c.cssAttr: c.frag.letterSpacing = c.cssAttr["letter-spacing"] if "-pdf-line-spacing" in c.cssAttr: c.frag.leadingSpace = getSize("".join(c.cssAttr["-pdf-line-spacing"])) # print "line-spacing", c.cssAttr["-pdf-line-spacing"], c.frag.leading if "font-weight" in c.cssAttr: value = c.cssAttr["font-weight"].lower() if value in ("bold", "bolder", "500", "600", "700", "800", "900"): c.frag.bold = 1 else: c.frag.bold = 0 for value in toList(c.cssAttr.get("text-decoration", "")): if "underline" in value: c.frag.underline = 1 if "line-through" in value: c.frag.strike = 1 if "none" in value: c.frag.underline = 0 c.frag.strike = 0 if "font-style" in c.cssAttr: value = c.cssAttr["font-style"].lower() if value in ("italic", "oblique"): c.frag.italic = 1 else: c.frag.italic = 0 if "white-space" in c.cssAttr: # normal | pre | nowrap c.frag.whiteSpace = str(c.cssAttr["white-space"]).lower() # ALIGN & VALIGN if "text-align" in c.cssAttr: c.frag.alignment = getAlign(c.cssAttr["text-align"]) if "vertical-align" in c.cssAttr: c.frag.vAlign = c.cssAttr["vertical-align"] # HEIGHT & WIDTH if "height" in c.cssAttr: c.frag.height = "".join(toList(c.cssAttr["height"])) # XXX Relative is not correct! if c.frag.height in ("auto",): c.frag.height = None if "width" in c.cssAttr: c.frag.width = "".join(toList(c.cssAttr["width"])) # XXX Relative is not correct! if c.frag.width in ("auto",): c.frag.width = None # ZOOM if "zoom" in c.cssAttr: zoom = "".join(toList(c.cssAttr["zoom"])) # XXX Relative is not correct! if zoom.endswith("%"): zoom = float(zoom[: - 1]) / 100.0 c.frag.zoom = float(zoom) # MARGINS & LIST INDENT, STYLE if isBlock: if "margin-top" in c.cssAttr: c.frag.spaceBefore = getSize(c.cssAttr["margin-top"], c.frag.fontSize) if "margin-bottom" in c.cssAttr: c.frag.spaceAfter = getSize(c.cssAttr["margin-bottom"], c.frag.fontSize) if "margin-left" in c.cssAttr: c.frag.bulletIndent = kw["margin-left"] # For lists kw["margin-left"] += getSize(c.cssAttr["margin-left"], c.frag.fontSize) c.frag.leftIndent = kw["margin-left"] if "margin-right" in c.cssAttr: kw["margin-right"] += getSize(c.cssAttr["margin-right"], c.frag.fontSize) c.frag.rightIndent = kw["margin-right"] if "text-indent" in c.cssAttr: c.frag.firstLineIndent = getSize(c.cssAttr["text-indent"], c.frag.fontSize) if "list-style-type" in c.cssAttr: c.frag.listStyleType = str(c.cssAttr["list-style-type"]).lower() if "list-style-image" in c.cssAttr: c.frag.listStyleImage = c.getFile(c.cssAttr["list-style-image"]) # PADDINGS if isBlock: if "padding-top" in c.cssAttr: c.frag.paddingTop = getSize(c.cssAttr["padding-top"], c.frag.fontSize) if "padding-bottom" in c.cssAttr: c.frag.paddingBottom = getSize(c.cssAttr["padding-bottom"], c.frag.fontSize) if "padding-left" in c.cssAttr: c.frag.paddingLeft = getSize(c.cssAttr["padding-left"], c.frag.fontSize) if "padding-right" in c.cssAttr: c.frag.paddingRight = getSize(c.cssAttr["padding-right"], c.frag.fontSize) # BORDERS if isBlock: if "border-top-width" in c.cssAttr: c.frag.borderTopWidth = getSize(c.cssAttr["border-top-width"], c.frag.fontSize) if "border-bottom-width" in c.cssAttr: c.frag.borderBottomWidth = getSize(c.cssAttr["border-bottom-width"], c.frag.fontSize) if "border-left-width" in c.cssAttr: c.frag.borderLeftWidth = getSize(c.cssAttr["border-left-width"], c.frag.fontSize) if "border-right-width" in c.cssAttr: c.frag.borderRightWidth = getSize(c.cssAttr["border-right-width"], c.frag.fontSize) if "border-top-style" in c.cssAttr: c.frag.borderTopStyle = c.cssAttr["border-top-style"] if "border-bottom-style" in c.cssAttr: c.frag.borderBottomStyle = c.cssAttr["border-bottom-style"] if "border-left-style" in c.cssAttr: c.frag.borderLeftStyle = c.cssAttr["border-left-style"] if "border-right-style" in c.cssAttr: c.frag.borderRightStyle = c.cssAttr["border-right-style"] if "border-top-color" in c.cssAttr: c.frag.borderTopColor = getColor(c.cssAttr["border-top-color"]) if "border-bottom-color" in c.cssAttr: c.frag.borderBottomColor = getColor(c.cssAttr["border-bottom-color"]) if "border-left-color" in c.cssAttr: c.frag.borderLeftColor = getColor(c.cssAttr["border-left-color"]) if "border-right-color" in c.cssAttr: c.frag.borderRightColor = getColor(c.cssAttr["border-right-color"]) def pisaPreLoop(node, context, collect=False): """ Collect all CSS definitions """ data = u"" if node.nodeType == Node.TEXT_NODE and collect: data = node.data elif node.nodeType == Node.ELEMENT_NODE: name = node.tagName.lower() if name in ("style", "link"): attr = pisaGetAttributes(context, name, node.attributes) media = [x.strip() for x in attr.media.lower().split(",") if x.strip()] if attr.get("type", "").lower() in ("", "text/css") and \ (not media or "all" in media or "print" in media or "pdf" in media): if name == "style": for node in node.childNodes: data += pisaPreLoop(node, context, collect=True) context.addCSS(data) return u"" if name == "link" and attr.href and attr.rel.lower() == "stylesheet": # print "CSS LINK", attr context.addCSS('\n@import "%s" %s;' % (attr.href, ",".join(media))) for node in node.childNodes: result = pisaPreLoop(node, context, collect=collect) if collect: data += result return data def pisaLoop(node, context, path=None, **kw): if path is None: path = [] # Initialize KW if not kw: kw = { "margin-top": 0, "margin-bottom": 0, "margin-left": 0, "margin-right": 0, } else: kw = copy.copy(kw) #indent = len(path) * " " # only used for debug print statements # TEXT if node.nodeType == Node.TEXT_NODE: # print indent, "#", repr(node.data) #, context.frag context.addFrag(node.data) # context.text.append(node.value) # ELEMENT elif node.nodeType == Node.ELEMENT_NODE: node.tagName = node.tagName.replace(":", "").lower() if node.tagName in ("style", "script"): return path = copy.copy(path) + [node.tagName] # Prepare attributes attr = pisaGetAttributes(context, node.tagName, node.attributes) #log.debug(indent + "<%s %s>" % (node.tagName, attr) + repr(node.attributes.items())) #, path # Calculate styles context.cssAttr = CSSCollect(node, context) context.cssAttr = mapNonStandardAttrs(context.cssAttr, node, attr) context.node = node # Block? PAGE_BREAK = 1 PAGE_BREAK_RIGHT = 2 PAGE_BREAK_LEFT = 3 pageBreakAfter = False frameBreakAfter = False display = context.cssAttr.get("display", "inline").lower() # print indent, node.tagName, display, context.cssAttr.get("background-color", None), attr isBlock = (display == "block") if isBlock: context.addPara() # Page break by CSS if "-pdf-next-page" in context.cssAttr: context.addStory(NextPageTemplate(str(context.cssAttr["-pdf-next-page"]))) if "-pdf-page-break" in context.cssAttr: if str(context.cssAttr["-pdf-page-break"]).lower() == "before": context.addStory(PageBreak()) if "-pdf-frame-break" in context.cssAttr: if str(context.cssAttr["-pdf-frame-break"]).lower() == "before": context.addStory(FrameBreak()) if str(context.cssAttr["-pdf-frame-break"]).lower() == "after": frameBreakAfter = True if "page-break-before" in context.cssAttr: if str(context.cssAttr["page-break-before"]).lower() == "always": context.addStory(PageBreak()) if str(context.cssAttr["page-break-before"]).lower() == "right": context.addStory(PageBreak()) context.addStory(PmlRightPageBreak()) if str(context.cssAttr["page-break-before"]).lower() == "left": context.addStory(PageBreak()) context.addStory(PmlLeftPageBreak()) if "page-break-after" in context.cssAttr: if str(context.cssAttr["page-break-after"]).lower() == "always": pageBreakAfter = PAGE_BREAK if str(context.cssAttr["page-break-after"]).lower() == "right": pageBreakAfter = PAGE_BREAK_RIGHT if str(context.cssAttr["page-break-after"]).lower() == "left": pageBreakAfter = PAGE_BREAK_LEFT if display == "none": # print "none!" return # Translate CSS to frags # Save previous frag styles context.pushFrag() # Map styles to Reportlab fragment properties CSS2Frag(context, kw, isBlock) # EXTRAS if "-pdf-keep-with-next" in context.cssAttr: context.frag.keepWithNext = getBool(context.cssAttr["-pdf-keep-with-next"]) if "-pdf-outline" in context.cssAttr: context.frag.outline = getBool(context.cssAttr["-pdf-outline"]) if "-pdf-outline-level" in context.cssAttr: context.frag.outlineLevel = int(context.cssAttr["-pdf-outline-level"]) if "-pdf-outline-open" in context.cssAttr: context.frag.outlineOpen = getBool(context.cssAttr["-pdf-outline-open"]) if "-pdf-word-wrap" in context.cssAttr: context.frag.wordWrap = context.cssAttr["-pdf-word-wrap"] # handle keep-in-frame keepInFrameMode = None keepInFrameMaxWidth = 0 keepInFrameMaxHeight = 0 if "-pdf-keep-in-frame-mode" in context.cssAttr: value = str(context.cssAttr["-pdf-keep-in-frame-mode"]).strip().lower() if value in ("shrink", "error", "overflow", "truncate"): keepInFrameMode = value if "-pdf-keep-in-frame-max-width" in context.cssAttr: keepInFrameMaxWidth = getSize("".join(context.cssAttr["-pdf-keep-in-frame-max-width"])) if "-pdf-keep-in-frame-max-height" in context.cssAttr: keepInFrameMaxHeight = getSize("".join(context.cssAttr["-pdf-keep-in-frame-max-height"])) # ignore nested keep-in-frames, tables have their own KIF handling keepInFrame = keepInFrameMode is not None and context.keepInFrameIndex is None if keepInFrame: # keep track of current story index, so we can wrap everythink # added after this point in a KeepInFrame context.keepInFrameIndex = len(context.story) # BEGIN tag klass = globals().get("pisaTag%s" % node.tagName.replace(":", "").upper(), None) obj = None # Static block elementId = attr.get("id", None) staticFrame = context.frameStatic.get(elementId, None) if staticFrame: context.frag.insideStaticFrame += 1 oldStory = context.swapStory() # Tag specific operations if klass is not None: obj = klass(node, attr) obj.start(context) # Visit child nodes context.fragBlock = fragBlock = copy.copy(context.frag) for nnode in node.childNodes: pisaLoop(nnode, context, path, **kw) context.fragBlock = fragBlock # END tag if obj: obj.end(context) # Block? if isBlock: context.addPara() # XXX Buggy! # Page break by CSS if pageBreakAfter: context.addStory(PageBreak()) if pageBreakAfter == PAGE_BREAK_RIGHT: context.addStory(PmlRightPageBreak()) if pageBreakAfter == PAGE_BREAK_LEFT: context.addStory(PmlLeftPageBreak()) if frameBreakAfter: context.addStory(FrameBreak()) if keepInFrame: # get all content added after start of -pdf-keep-in-frame and wrap # it in a KeepInFrame substory = context.story[context.keepInFrameIndex:] context.story = context.story[:context.keepInFrameIndex] context.story.append( KeepInFrame( content=substory, maxWidth=keepInFrameMaxWidth, maxHeight=keepInFrameMaxHeight)) context.keepInFrameIndex = None # Static block, END if staticFrame: context.addPara() for frame in staticFrame: frame.pisaStaticStory = context.story context.swapStory(oldStory) context.frag.insideStaticFrame -= 1 # context.debug(1, indent, "</%s>" % (node.tagName)) # Reset frag style context.pullFrag() # Unknown or not handled else: # context.debug(1, indent, "???", node, node.nodeType, repr(node)) # Loop over children for node in node.childNodes: pisaLoop(node, context, path, **kw) def pisaParser(src, context, default_css="", xhtml=False, encoding=None, xml_output=None): """ - Parse HTML and get miniDOM - Extract CSS informations, add default CSS, parse CSS - Handle the document DOM itself and build reportlab story - Return Context object """ global CSSAttrCache CSSAttrCache = {} if xhtml: #TODO: XHTMLParser doesn't see to exist... parser = html5lib.XHTMLParser(tree=treebuilders.getTreeBuilder("dom")) else: parser = html5lib.HTMLParser(tree=treebuilders.getTreeBuilder("dom")) if type(src) in types.StringTypes: if type(src) is types.UnicodeType: # If an encoding was provided, do not change it. if not encoding: encoding = "utf-8" src = src.encode(encoding) src = pisaTempFile(src, capacity=context.capacity) # Test for the restrictions of html5lib if encoding: # Workaround for html5lib<0.11.1 if hasattr(inputstream, "isValidEncoding"): if encoding.strip().lower() == "utf8": encoding = "utf-8" if not inputstream.isValidEncoding(encoding): log.error("%r is not a valid encoding e.g. 'utf8' is not valid but 'utf-8' is!", encoding) else: if inputstream.codecName(encoding) is None: log.error("%r is not a valid encoding", encoding) document = parser.parse( src, encoding=encoding) if xml_output: if encoding: xml_output.write(document.toprettyxml(encoding=encoding)) else: xml_output.write(document.toprettyxml(encoding="utf8")) if default_css: context.addCSS(default_css) pisaPreLoop(document, context) #try: context.parseCSS() #except: # context.cssText = DEFAULT_CSS # context.parseCSS() # context.debug(9, pprint.pformat(context.css)) pisaLoop(document, context) return context # Shortcuts HTML2PDF = pisaParser def XHTML2PDF(*a, **kw): kw["xhtml"] = True return HTML2PDF(*a, **kw) XML2PDF = XHTML2PDF
apache-2.0
-1,639,543,538,045,728,800
34.494318
117
0.581279
false
3.732338
false
false
false
Dioptas/Dioptas
dioptas/model/util/BackgroundExtraction.py
1
2873
# -*- coding: utf-8 -*- # Dioptas - GUI program for fast processing of 2D X-ray diffraction data # Principal author: Clemens Prescher (clemens.prescher@gmail.com) # Copyright (C) 2014-2019 GSECARS, University of Chicago, USA # Copyright (C) 2015-2018 Institute for Geology and Mineralogy, University of Cologne, Germany # Copyright (C) 2019-2020 DESY, Hamburg, Germany # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import logging logger = logging.getLogger(__name__) import numpy as np try: from .smooth_bruckner import smooth_bruckner except ImportError: try: from .smooth_bruckner_cython import smooth_bruckner except ImportError: try: import pyximport pyximport.install(language_level=3) from .smooth_bruckner_cython import smooth_bruckner except ImportError as e: print(e) logger.warning( "Could not import the Fortran or Cython version of smooth_bruckner. Using python implementation instead. Please" " run 'f2py -c -m smooth_bruckner smooth_bruckner.f95' in the model/util folder for faster" " implementation") from .smooth_bruckner_python import smooth_bruckner def extract_background(x, y, smooth_width=0.1, iterations=50, cheb_order=50): """ Performs a background subtraction using bruckner smoothing and a chebyshev polynomial. Standard parameters are found to be optimal for synchrotron XRD. :param x: x-data of pattern :param y: y-data of pattern :param smooth_width: width of the window in x-units used for bruckner smoothing :param iterations: number of iterations for the bruckner smoothing :param cheb_order: order of the fitted chebyshev polynomial :return: vector of extracted y background """ smooth_points = int((float(smooth_width) / (x[1] - x[0]))) y_smooth = smooth_bruckner(y, smooth_points, iterations) # get cheb input parameters x_cheb = 2. * (x - x[0]) / (x[-1] - x[0]) - 1. cheb_parameters = np.polynomial.chebyshev.chebfit(x_cheb, y_smooth, cheb_order) return np.polynomial.chebyshev.chebval(x_cheb, cheb_parameters)
gpl-3.0
-2,083,473,547,483,108,400
42.530303
128
0.679777
false
3.856376
false
false
false
quru/wagtail
wagtail/wagtailimages/models.py
1
17958
from __future__ import absolute_import, unicode_literals import hashlib import os.path from collections import OrderedDict from contextlib import contextmanager import django from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.core.files import File from django.core.urlresolvers import reverse from django.db import models from django.db.models.signals import pre_delete, pre_save from django.dispatch.dispatcher import receiver from django.forms.widgets import flatatt from django.utils.encoding import python_2_unicode_compatible from django.utils.functional import cached_property from django.utils.safestring import mark_safe from django.utils.six import BytesIO, string_types, text_type from django.utils.translation import ugettext_lazy as _ from taggit.managers import TaggableManager from unidecode import unidecode from willow.image import Image as WillowImage from wagtail.wagtailadmin.taggable import TagSearchable from wagtail.wagtailadmin.utils import get_object_usage from wagtail.wagtailcore import hooks from wagtail.wagtailcore.models import CollectionMember from wagtail.wagtailimages.exceptions import InvalidFilterSpecError from wagtail.wagtailimages.rect import Rect from wagtail.wagtailsearch import index from wagtail.wagtailsearch.queryset import SearchableQuerySetMixin class SourceImageIOError(IOError): """ Custom exception to distinguish IOErrors that were thrown while opening the source image """ pass class ImageQuerySet(SearchableQuerySetMixin, models.QuerySet): pass def get_upload_to(instance, filename): """ Obtain a valid upload path for an image file. This needs to be a module-level function so that it can be referenced within migrations, but simply delegates to the `get_upload_to` method of the instance, so that AbstractImage subclasses can override it. """ return instance.get_upload_to(filename) def get_rendition_upload_to(instance, filename): """ Obtain a valid upload path for an image rendition file. This needs to be a module-level function so that it can be referenced within migrations, but simply delegates to the `get_upload_to` method of the instance, so that AbstractRendition subclasses can override it. """ return instance.get_upload_to(filename) @python_2_unicode_compatible class AbstractImage(CollectionMember, TagSearchable): title = models.CharField(max_length=255, verbose_name=_('title')) file = models.ImageField( verbose_name=_('file'), upload_to=get_upload_to, width_field='width', height_field='height' ) width = models.IntegerField(verbose_name=_('width'), editable=False) height = models.IntegerField(verbose_name=_('height'), editable=False) created_at = models.DateTimeField(verbose_name=_('created at'), auto_now_add=True, db_index=True) uploaded_by_user = models.ForeignKey( settings.AUTH_USER_MODEL, verbose_name=_('uploaded by user'), null=True, blank=True, editable=False, on_delete=models.SET_NULL ) tags = TaggableManager(help_text=None, blank=True, verbose_name=_('tags')) focal_point_x = models.PositiveIntegerField(null=True, blank=True) focal_point_y = models.PositiveIntegerField(null=True, blank=True) focal_point_width = models.PositiveIntegerField(null=True, blank=True) focal_point_height = models.PositiveIntegerField(null=True, blank=True) file_size = models.PositiveIntegerField(null=True, editable=False) objects = ImageQuerySet.as_manager() def is_stored_locally(self): """ Returns True if the image is hosted on the local filesystem """ try: self.file.path return True except NotImplementedError: return False def get_file_size(self): if self.file_size is None: try: self.file_size = self.file.size except OSError: # File doesn't exist return self.save(update_fields=['file_size']) return self.file_size def get_upload_to(self, filename): folder_name = 'original_images' filename = self.file.field.storage.get_valid_name(filename) # do a unidecode in the filename and then # replace non-ascii characters in filename with _ , to sidestep issues with filesystem encoding filename = "".join((i if ord(i) < 128 else '_') for i in unidecode(filename)) # Truncate filename so it fits in the 100 character limit # https://code.djangoproject.com/ticket/9893 while len(os.path.join(folder_name, filename)) >= 95: prefix, dot, extension = filename.rpartition('.') filename = prefix[:-1] + dot + extension return os.path.join(folder_name, filename) def get_usage(self): return get_object_usage(self) @property def usage_url(self): return reverse('wagtailimages:image_usage', args=(self.id,)) search_fields = TagSearchable.search_fields + CollectionMember.search_fields + [ index.FilterField('uploaded_by_user'), ] def __str__(self): return self.title @contextmanager def get_willow_image(self): # Open file if it is closed close_file = False try: image_file = self.file if self.file.closed: # Reopen the file if self.is_stored_locally(): self.file.open('rb') else: # Some external storage backends don't allow reopening # the file. Get a fresh file instance. #1397 storage = self._meta.get_field('file').storage image_file = storage.open(self.file.name, 'rb') close_file = True except IOError as e: # re-throw this as a SourceImageIOError so that calling code can distinguish # these from IOErrors elsewhere in the process raise SourceImageIOError(text_type(e)) # Seek to beginning image_file.seek(0) try: yield WillowImage.open(image_file) finally: if close_file: image_file.close() def get_rect(self): return Rect(0, 0, self.width, self.height) def get_focal_point(self): if self.focal_point_x is not None and \ self.focal_point_y is not None and \ self.focal_point_width is not None and \ self.focal_point_height is not None: return Rect.from_point( self.focal_point_x, self.focal_point_y, self.focal_point_width, self.focal_point_height, ) def has_focal_point(self): return self.get_focal_point() is not None def set_focal_point(self, rect): if rect is not None: self.focal_point_x = rect.centroid_x self.focal_point_y = rect.centroid_y self.focal_point_width = rect.width self.focal_point_height = rect.height else: self.focal_point_x = None self.focal_point_y = None self.focal_point_width = None self.focal_point_height = None def get_suggested_focal_point(self): with self.get_willow_image() as willow: faces = willow.detect_faces() if faces: # Create a bounding box around all faces left = min(face[0] for face in faces) top = min(face[1] for face in faces) right = max(face[2] for face in faces) bottom = max(face[3] for face in faces) focal_point = Rect(left, top, right, bottom) else: features = willow.detect_features() if features: # Create a bounding box around all features left = min(feature[0] for feature in features) top = min(feature[1] for feature in features) right = max(feature[0] for feature in features) bottom = max(feature[1] for feature in features) focal_point = Rect(left, top, right, bottom) else: return None # Add 20% to width and height and give it a minimum size x, y = focal_point.centroid width, height = focal_point.size width *= 1.20 height *= 1.20 width = max(width, 100) height = max(height, 100) return Rect.from_point(x, y, width, height) @classmethod def get_rendition_model(cls): """ Get the Rendition model for this Image model """ if django.VERSION >= (1, 9): return cls.renditions.rel.related_model else: return cls.renditions.related.related_model def get_rendition(self, filter): if isinstance(filter, string_types): filter, created = Filter.objects.get_or_create(spec=filter) cache_key = filter.get_cache_key(self) Rendition = self.get_rendition_model() try: rendition = self.renditions.get( filter=filter, focal_point_key=cache_key, ) except Rendition.DoesNotExist: # Generate the rendition image generated_image = filter.run(self, BytesIO()) # Generate filename input_filename = os.path.basename(self.file.name) input_filename_without_extension, input_extension = os.path.splitext(input_filename) # A mapping of image formats to extensions FORMAT_EXTENSIONS = { 'jpeg': '.jpg', 'png': '.png', 'gif': '.gif', } output_extension = filter.spec.replace('|', '.') + FORMAT_EXTENSIONS[generated_image.format_name] if cache_key: output_extension = cache_key + '.' + output_extension # Truncate filename to prevent it going over 60 chars output_filename_without_extension = input_filename_without_extension[:(59 - len(output_extension))] output_filename = output_filename_without_extension + '.' + output_extension rendition, created = self.renditions.get_or_create( filter=filter, focal_point_key=cache_key, defaults={'file': File(generated_image.f, name=output_filename)} ) return rendition def is_portrait(self): return (self.width < self.height) def is_landscape(self): return (self.height < self.width) @property def filename(self): return os.path.basename(self.file.name) @property def default_alt_text(self): # by default the alt text field (used in rich text insertion) is populated # from the title. Subclasses might provide a separate alt field, and # override this return self.title def is_editable_by_user(self, user): from wagtail.wagtailimages.permissions import permission_policy return permission_policy.user_has_permission_for_instance(user, 'change', self) class Meta: abstract = True class Image(AbstractImage): admin_form_fields = ( 'title', 'file', 'collection', 'tags', 'focal_point_x', 'focal_point_y', 'focal_point_width', 'focal_point_height', ) # Do smartcropping calculations when user saves an image without a focal point @receiver(pre_save, sender=Image) def image_feature_detection(sender, instance, **kwargs): if getattr(settings, 'WAGTAILIMAGES_FEATURE_DETECTION_ENABLED', False): # Make sure the image doesn't already have a focal point if not instance.has_focal_point(): # Set the focal point instance.set_focal_point(instance.get_suggested_focal_point()) # Receive the pre_delete signal and delete the file associated with the model instance. @receiver(pre_delete, sender=Image) def image_delete(sender, instance, **kwargs): # Pass false so FileField doesn't save the model. instance.file.delete(False) def get_image_model(): from django.conf import settings from django.apps import apps try: app_label, model_name = settings.WAGTAILIMAGES_IMAGE_MODEL.split('.') except AttributeError: return Image except ValueError: raise ImproperlyConfigured("WAGTAILIMAGES_IMAGE_MODEL must be of the form 'app_label.model_name'") image_model = apps.get_model(app_label, model_name) if image_model is None: raise ImproperlyConfigured( "WAGTAILIMAGES_IMAGE_MODEL refers to model '%s' that has not been installed" % settings.WAGTAILIMAGES_IMAGE_MODEL ) return image_model class Filter(models.Model): """ Represents one or more operations that can be applied to an Image to produce a rendition appropriate for final display on the website. Usually this would be a resize operation, but could potentially involve colour processing, etc. """ # The spec pattern is operation1-var1-var2|operation2-var1 spec = models.CharField(max_length=255, unique=True) @cached_property def operations(self): # Search for operations self._search_for_operations() # Build list of operation objects operations = [] for op_spec in self.spec.split('|'): op_spec_parts = op_spec.split('-') if op_spec_parts[0] not in self._registered_operations: raise InvalidFilterSpecError("Unrecognised operation: %s" % op_spec_parts[0]) op_class = self._registered_operations[op_spec_parts[0]] operations.append(op_class(*op_spec_parts)) return operations def run(self, image, output): with image.get_willow_image() as willow: original_format = willow.format_name # Fix orientation of image willow = willow.auto_orient() for operation in self.operations: willow = operation.run(willow, image) or willow if original_format == 'jpeg': # Allow changing of JPEG compression quality if hasattr(settings, 'WAGTAILIMAGES_JPEG_QUALITY'): quality = settings.WAGTAILIMAGES_JPEG_QUALITY else: quality = 85 return willow.save_as_jpeg(output, quality=quality) elif original_format == 'gif': # Convert image to PNG if it's not animated if not willow.has_animation(): return willow.save_as_png(output) else: return willow.save_as_gif(output) elif original_format == 'bmp': # Convert to PNG return willow.save_as_png(output) else: return willow.save(original_format, output) def get_cache_key(self, image): vary_parts = [] for operation in self.operations: for field in getattr(operation, 'vary_fields', []): value = getattr(image, field, '') vary_parts.append(str(value)) vary_string = '-'.join(vary_parts) # Return blank string if there are no vary fields if not vary_string: return '' return hashlib.sha1(vary_string.encode('utf-8')).hexdigest()[:8] _registered_operations = None @classmethod def _search_for_operations(cls): if cls._registered_operations is not None: return operations = [] for fn in hooks.get_hooks('register_image_operations'): operations.extend(fn()) cls._registered_operations = dict(operations) class AbstractRendition(models.Model): filter = models.ForeignKey(Filter, related_name='+') file = models.ImageField(upload_to=get_rendition_upload_to, width_field='width', height_field='height') width = models.IntegerField(editable=False) height = models.IntegerField(editable=False) focal_point_key = models.CharField(max_length=255, blank=True, default='', editable=False) @property def url(self): return self.file.url @property def alt(self): return self.image.title @property def attrs(self): """ The src, width, height, and alt attributes for an <img> tag, as a HTML string """ return flatatt(self.attrs_dict) @property def attrs_dict(self): """ A dict of the src, width, height, and alt attributes for an <img> tag. """ return OrderedDict([ ('src', self.url), ('width', self.width), ('height', self.height), ('alt', self.alt), ]) def img_tag(self, extra_attributes={}): attrs = self.attrs_dict.copy() attrs.update(extra_attributes) return mark_safe('<img{}>'.format(flatatt(attrs))) def __html__(self): return self.img_tag() def get_upload_to(self, filename): folder_name = 'images' filename = self.file.field.storage.get_valid_name(filename) return os.path.join(folder_name, filename) class Meta: abstract = True class Rendition(AbstractRendition): image = models.ForeignKey(Image, related_name='renditions') class Meta: unique_together = ( ('image', 'filter', 'focal_point_key'), ) # Receive the pre_delete signal and delete the file associated with the model instance. @receiver(pre_delete, sender=Rendition) def rendition_delete(sender, instance, **kwargs): # Pass false so FileField doesn't save the model. instance.file.delete(False)
bsd-3-clause
-9,143,806,606,278,823,000
33.402299
111
0.622452
false
4.159833
false
false
false
hehongliang/tensorflow
tensorflow/python/keras/optimizer_v2/ftrl_test.py
1
17276
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Functional tests for Ftrl operations.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.keras.optimizer_v2 import ftrl from tensorflow.python.ops import embedding_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.training import adagrad from tensorflow.python.training import gradient_descent class FtrlOptimizerTest(test.TestCase): def doTestFtrlwithoutRegularization(self, use_resource=False): for dtype in [dtypes.half, dtypes.float32]: with self.cached_session() as sess: if use_resource: var0 = resource_variable_ops.ResourceVariable([0.0, 0.0], dtype=dtype) var1 = resource_variable_ops.ResourceVariable([0.0, 0.0], dtype=dtype) else: var0 = variables.Variable([0.0, 0.0], dtype=dtype) var1 = variables.Variable([0.0, 0.0], dtype=dtype) grads0 = constant_op.constant([0.1, 0.2], dtype=dtype) grads1 = constant_op.constant([0.01, 0.02], dtype=dtype) opt = ftrl.Ftrl( 3.0, initial_accumulator_value=0.1, l1_regularization_strength=0.0, l2_regularization_strength=0.0) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) variables.global_variables_initializer().run() v0_val, v1_val = sess.run([var0, var1]) self.assertAllClose([0.0, 0.0], v0_val) self.assertAllClose([0.0, 0.0], v1_val) # Run 3 steps FTRL for _ in range(3): update.run() v0_val, v1_val = sess.run([var0, var1]) self.assertAllCloseAccordingToType( np.array([-2.60260963, -4.29698515]), v0_val) self.assertAllCloseAccordingToType( np.array([-0.28432083, -0.56694895]), v1_val) def testFtrlWithoutRegularization(self): self.doTestFtrlwithoutRegularization(use_resource=False) def testResourceFtrlWithoutRegularization(self): self.doTestFtrlwithoutRegularization(use_resource=True) def testFtrlwithoutRegularization2(self): for dtype in [dtypes.half, dtypes.float32]: with self.cached_session() as sess: var0 = variables.Variable([1.0, 2.0], dtype=dtype) var1 = variables.Variable([4.0, 3.0], dtype=dtype) grads0 = constant_op.constant([0.1, 0.2], dtype=dtype) grads1 = constant_op.constant([0.01, 0.02], dtype=dtype) opt = ftrl.Ftrl( 3.0, initial_accumulator_value=0.1, l1_regularization_strength=0.0, l2_regularization_strength=0.0) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) variables.global_variables_initializer().run() v0_val, v1_val = sess.run([var0, var1]) self.assertAllCloseAccordingToType([1.0, 2.0], v0_val) self.assertAllCloseAccordingToType([4.0, 3.0], v1_val) # Run 3 steps FTRL for _ in range(3): update.run() v0_val, v1_val = sess.run([var0, var1]) self.assertAllCloseAccordingToType( np.array([-2.55607247, -3.98729396]), v0_val) self.assertAllCloseAccordingToType( np.array([-0.28232238, -0.56096673]), v1_val) def testMinimizeSparseResourceVariable(self): for dtype in [dtypes.half, dtypes.float32, dtypes.float64]: with self.cached_session(): var0 = resource_variable_ops.ResourceVariable([[1.0, 2.0]], dtype=dtype) x = constant_op.constant([[4.0], [5.0]], dtype=dtype) pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x) loss = pred * pred sgd_op = ftrl.Ftrl(1.0).minimize(loss, var_list=[var0]) variables.global_variables_initializer().run() # Fetch params to validate initial values self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0)) # Run 1 step of sgd sgd_op.run() # Validate updated params self.assertAllCloseAccordingToType([[0, 1]], self.evaluate(var0), atol=0.01) def testFtrlWithL1(self): for dtype in [dtypes.half, dtypes.float32]: with self.cached_session() as sess: var0 = variables.Variable([1.0, 2.0], dtype=dtype) var1 = variables.Variable([4.0, 3.0], dtype=dtype) grads0 = constant_op.constant([0.1, 0.2], dtype=dtype) grads1 = constant_op.constant([0.01, 0.02], dtype=dtype) opt = ftrl.Ftrl( 3.0, initial_accumulator_value=0.1, l1_regularization_strength=0.001, l2_regularization_strength=0.0) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) variables.global_variables_initializer().run() v0_val, v1_val = sess.run([var0, var1]) self.assertAllCloseAccordingToType([1.0, 2.0], v0_val) self.assertAllCloseAccordingToType([4.0, 3.0], v1_val) # Run 10 steps FTRL for _ in range(10): update.run() v0_val, v1_val = sess.run([var0, var1]) self.assertAllCloseAccordingToType( np.array([-7.66718769, -10.91273689]), v0_val) self.assertAllCloseAccordingToType( np.array([-0.93460727, -1.86147261]), v1_val) def testFtrlWithL1_L2(self): for dtype in [dtypes.half, dtypes.float32]: with self.cached_session() as sess: var0 = variables.Variable([1.0, 2.0], dtype=dtype) var1 = variables.Variable([4.0, 3.0], dtype=dtype) grads0 = constant_op.constant([0.1, 0.2], dtype=dtype) grads1 = constant_op.constant([0.01, 0.02], dtype=dtype) opt = ftrl.Ftrl( 3.0, initial_accumulator_value=0.1, l1_regularization_strength=0.001, l2_regularization_strength=2.0) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) variables.global_variables_initializer().run() v0_val, v1_val = sess.run([var0, var1]) self.assertAllCloseAccordingToType([1.0, 2.0], v0_val) self.assertAllCloseAccordingToType([4.0, 3.0], v1_val) # Run 10 steps FTRL for _ in range(10): update.run() v0_val, v1_val = sess.run([var0, var1]) self.assertAllCloseAccordingToType( np.array([-0.24059935, -0.46829352]), v0_val) self.assertAllCloseAccordingToType( np.array([-0.02406147, -0.04830509]), v1_val) def testFtrlWithL1_L2_L2Shrinkage(self): """Test the new FTRL op with support for l2 shrinkage. The addition of this parameter which places a constant pressure on weights towards the origin causes the gradient descent trajectory to differ. The weights will tend to have smaller magnitudes with this parameter set. """ for dtype in [dtypes.half, dtypes.float32]: with self.cached_session() as sess: var0 = variables.Variable([1.0, 2.0], dtype=dtype) var1 = variables.Variable([4.0, 3.0], dtype=dtype) grads0 = constant_op.constant([0.1, 0.2], dtype=dtype) grads1 = constant_op.constant([0.01, 0.02], dtype=dtype) opt = ftrl.Ftrl( 3.0, initial_accumulator_value=0.1, l1_regularization_strength=0.001, l2_regularization_strength=2.0, l2_shrinkage_regularization_strength=0.1) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) variables.global_variables_initializer().run() v0_val, v1_val = sess.run([var0, var1]) self.assertAllCloseAccordingToType([1.0, 2.0], v0_val) self.assertAllCloseAccordingToType([4.0, 3.0], v1_val) # Run 10 steps FTRL for _ in range(10): update.run() v0_val, v1_val = sess.run([var0, var1]) self.assertAllCloseAccordingToType( np.array([-0.22578995, -0.44345796]), v0_val) self.assertAllCloseAccordingToType( np.array([-0.14378493, -0.13229476]), v1_val) def testFtrlWithL1_L2_L2ShrinkageSparse(self): """Tests the new FTRL op with support for l2 shrinkage on sparse grads.""" for dtype in [dtypes.half, dtypes.float32]: with self.cached_session() as sess: var0 = variables.Variable([[1.0], [2.0]], dtype=dtype) var1 = variables.Variable([[4.0], [3.0]], dtype=dtype) grads0 = ops.IndexedSlices( constant_op.constant([0.1], shape=[1, 1], dtype=dtype), constant_op.constant([0]), constant_op.constant([2, 1])) grads1 = ops.IndexedSlices( constant_op.constant([0.02], shape=[1, 1], dtype=dtype), constant_op.constant([1]), constant_op.constant([2, 1])) opt = ftrl.Ftrl( 3.0, initial_accumulator_value=0.1, l1_regularization_strength=0.001, l2_regularization_strength=2.0, l2_shrinkage_regularization_strength=0.1) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) variables.global_variables_initializer().run() v0_val, v1_val = sess.run([var0, var1]) self.assertAllCloseAccordingToType([[1.0], [2.0]], v0_val) self.assertAllCloseAccordingToType([[4.0], [3.0]], v1_val) # Run 10 steps FTRL for _ in range(10): update.run() v0_val, v1_val = sess.run([var0, var1]) self.assertAllCloseAccordingToType([[-0.22578995], [2.]], v0_val) self.assertAllCloseAccordingToType([[4.], [-0.13229476]], v1_val) def testFtrlWithL2ShrinkageDoesNotChangeLrSchedule(self): """Verifies that l2 shrinkage in FTRL does not change lr schedule.""" for dtype in [dtypes.half, dtypes.float32]: with self.cached_session() as sess: var0 = variables.Variable([1.0, 2.0], dtype=dtype) var1 = variables.Variable([1.0, 2.0], dtype=dtype) grads0 = constant_op.constant([0.1, 0.2], dtype=dtype) grads1 = constant_op.constant([0.1, 0.2], dtype=dtype) opt0 = ftrl.Ftrl( 3.0, initial_accumulator_value=0.1, l1_regularization_strength=0.001, l2_regularization_strength=2.0, l2_shrinkage_regularization_strength=0.1) opt1 = ftrl.Ftrl( 3.0, initial_accumulator_value=0.1, l1_regularization_strength=0.001, l2_regularization_strength=2.0) update0 = opt0.apply_gradients([(grads0, var0)]) update1 = opt1.apply_gradients([(grads1, var1)]) variables.global_variables_initializer().run() v0_val, v1_val = sess.run([var0, var1]) self.assertAllCloseAccordingToType([1.0, 2.0], v0_val) self.assertAllCloseAccordingToType([1.0, 2.0], v1_val) # Run 10 steps FTRL for _ in range(10): update0.run() update1.run() v0_val, v1_val = sess.run([var0, var1]) # var0 is experiencing L2 shrinkage so it should be smaller than var1 # in magnitude. self.assertTrue((v0_val**2 < v1_val**2).all()) accum0 = sess.run(opt0.get_slot(var0, "accumulator")) accum1 = sess.run(opt1.get_slot(var1, "accumulator")) # L2 shrinkage should not change how we update grad accumulator. self.assertAllCloseAccordingToType(accum0, accum1) def applyOptimizer(self, opt, dtype, steps=5, is_sparse=False): if is_sparse: var0 = variables.Variable([[0.0], [0.0]], dtype=dtype) var1 = variables.Variable([[0.0], [0.0]], dtype=dtype) grads0 = ops.IndexedSlices( constant_op.constant([0.1], shape=[1, 1], dtype=dtype), constant_op.constant([0]), constant_op.constant([2, 1])) grads1 = ops.IndexedSlices( constant_op.constant([0.02], shape=[1, 1], dtype=dtype), constant_op.constant([1]), constant_op.constant([2, 1])) else: var0 = variables.Variable([0.0, 0.0], dtype=dtype) var1 = variables.Variable([0.0, 0.0], dtype=dtype) grads0 = constant_op.constant([0.1, 0.2], dtype=dtype) grads1 = constant_op.constant([0.01, 0.02], dtype=dtype) update = opt.apply_gradients(zip([grads0, grads1], [var0, var1])) variables.global_variables_initializer().run() sess = ops.get_default_session() v0_val, v1_val = sess.run([var0, var1]) if is_sparse: self.assertAllCloseAccordingToType([[0.0], [0.0]], v0_val) self.assertAllCloseAccordingToType([[0.0], [0.0]], v1_val) else: self.assertAllCloseAccordingToType([0.0, 0.0], v0_val) self.assertAllCloseAccordingToType([0.0, 0.0], v1_val) # Run Ftrl for a few steps for _ in range(steps): update.run() v0_val, v1_val = sess.run([var0, var1]) return v0_val, v1_val # When variables are initialized with Zero, FTRL-Proximal has two properties: # 1. Without L1&L2 but with fixed learning rate, FTRL-Proximal is identical # with GradientDescent. # 2. Without L1&L2 but with adaptive learning rate, FTRL-Proximal is identical # with Adagrad. # So, basing on these two properties, we test if our implementation of # FTRL-Proximal performs same updates as Adagrad or GradientDescent. def testEquivAdagradwithoutRegularization(self): for dtype in [dtypes.half, dtypes.float32]: with self.cached_session(): val0, val1 = self.applyOptimizer( ftrl.Ftrl( 3.0, # Adagrad learning rate learning_rate_power=-0.5, initial_accumulator_value=0.1, l1_regularization_strength=0.0, l2_regularization_strength=0.0), dtype) with self.cached_session(): val2, val3 = self.applyOptimizer( adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1), dtype) self.assertAllCloseAccordingToType(val0, val2) self.assertAllCloseAccordingToType(val1, val3) def testEquivSparseAdagradwithoutRegularization(self): for dtype in [dtypes.half, dtypes.float32]: with self.cached_session(): val0, val1 = self.applyOptimizer( ftrl.Ftrl( 3.0, # Adagrad learning rate learning_rate_power=-0.5, initial_accumulator_value=0.1, l1_regularization_strength=0.0, l2_regularization_strength=0.0), dtype, is_sparse=True) with self.cached_session(): val2, val3 = self.applyOptimizer( adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1), dtype, is_sparse=True) self.assertAllCloseAccordingToType(val0, val2) self.assertAllCloseAccordingToType(val1, val3) def testEquivSparseGradientDescentwithoutRegularization(self): for dtype in [dtypes.half, dtypes.float32]: with self.cached_session(): val0, val1 = self.applyOptimizer( ftrl.Ftrl( 3.0, # Fixed learning rate learning_rate_power=-0.0, initial_accumulator_value=0.1, l1_regularization_strength=0.0, l2_regularization_strength=0.0), dtype, is_sparse=True) with self.cached_session(): val2, val3 = self.applyOptimizer( gradient_descent.GradientDescentOptimizer(3.0), dtype, is_sparse=True) self.assertAllCloseAccordingToType(val0, val2) self.assertAllCloseAccordingToType(val1, val3) def testEquivGradientDescentwithoutRegularization(self): for dtype in [dtypes.half, dtypes.float32]: with self.cached_session(): val0, val1 = self.applyOptimizer( ftrl.Ftrl( 3.0, # Fixed learning rate learning_rate_power=-0.0, initial_accumulator_value=0.1, l1_regularization_strength=0.0, l2_regularization_strength=0.0), dtype) with self.cached_session(): val2, val3 = self.applyOptimizer( gradient_descent.GradientDescentOptimizer(3.0), dtype) self.assertAllCloseAccordingToType(val0, val2) self.assertAllCloseAccordingToType(val1, val3) if __name__ == "__main__": test.main()
apache-2.0
2,758,639,719,121,693,000
39.553991
80
0.620167
false
3.507817
true
false
false
maxtangli/sonico
language/python/teabreak/final_hint.py
1
1056
def intelligent_data_source_factory(*data): import itertools cy = itertools.cycle(data) _int = int return lambda i: _int(i) if isinstance(i, str) else next(cy) int = intelligent_data_source_factory(1985, 33067, 84) # int = intelligent_data_source_factory(2012, 9, 30) # invalid # int = intelligent_data_source_factory(2012, 9, 16) # invalid # int = intelligent_data_source_factory(84, 100, 114) # invalid def range_check(func): return lambda m, e, n, c: ((0 <= m < n) and func(m, e, n, c)) or '' @range_check def f(m, e, n, c): return str(m) if pow(m, e) % n == c else '' if __name__ == '__main__': # for i in range(1000000): # # if f(i, 17, 3569, 915) == str(i): # if f(i, 1985, 33067, 84) == str(i): # print(i) # 25202 # # print(25202 % 1985, 25202 % 33067, 25202 % 84) # invalid # print(25202 % 17, 25202 % 3569, 25202 % 915) # invalid for i in range(1000000): if f(i, int(17), int(3569), int(915)) == str(i): print(i) # 25202 -> 20252(invalid)
mit
7,963,685,116,978,258,000
28.333333
71
0.571023
false
2.735751
false
false
false
mory0tiki/pack-llama
views.py
1
1220
from django.core.files.base import ContentFile from django.shortcuts import render from django.http.response import HttpResponse from django.views.generic import base from django.utils.decorators import method_decorator from django.views.decorators.csrf import csrf_exempt from django.conf import settings import ast import json import uuid import models import utils class SavePackView(base.View): @method_decorator(csrf_exempt) def dispatch(self, request, *args, **kwargs): return super(SavePackView, self).dispatch(request, *args, **kwargs) def post(self, request, *args, **kwargs): try: result = {"result" : False} if request.body: pack = models.Pack() pack.receive_from = request.META["REMOTE_ADDR"] pack.queue_id = models.Queue.objects.get(name='Test').id if settings.DEBUG: print request.body print "++++++++++++++++++++++++" pack.message.save(str(uuid.uuid4()),ContentFile(request.body)) result["result"] = True except Exception as ex: print str(ex) return HttpResponse(json.dumps(result))
apache-2.0
5,642,803,948,910,530,000
32.888889
78
0.62623
false
4.341637
false
false
false
JackCloudman/Youtube-music
download.py
1
1342
#Program to download Yotube music #Author: Jack Cloudman import pafy,os,shutil from pydub import AudioSegment as convert #Create song list if os.path.exists('songs.txt'): pass else: print("Creating songs.txt....") document= open('songs.txt','w') print("Paste yours songs in songs.txt") document.close() #create directory if os.path.exists('music'): if os.path.exists('music/temp'): pass else: os.mkdir('music/temp') else: os.mkdir('music') os.mkdir('music/temp') document = open('songs.txt','r') music_list = document.readlines() document.close() error_list=[] print("Download music....") for music in music_list: try: url = music video = pafy.new(url) bestaudio = video.getbestaudio() bestaudio.download(filepath="music/temp/") except: error_list.append("Error download: "+music) print("Converting to mp3.....") for filename in os.listdir('music/temp/'): try: audio = convert.from_file('music/temp/'+filename) name = os.path.splitext(filename) audio.export('music/'+name[0]+'.mp3',format="mp3",bitrate="160k") except: error_list.append("Error convert: "+name[0]) shutil.rmtree("music/temp") for error in error_list: print(error) print("Finished!")
gpl-3.0
-262,824,265,426,975,650
26.553191
73
0.622206
false
3.321782
false
false
false
jfalkner/Efficient-Django-QuerySet-Use
demo-optimized/example/utils.py
1
3812
from django.utils.timezone import utc from django_db_utils import pg_bulk_update from example.models import Sample, SampleStatus def now(): from datetime import datetime return datetime.utcnow().replace(tzinfo=utc) def make_fake_data(samples_to_make=100000, batch_threshold=100000, delete_existing=True, make_statuses=True, years=5): """Makes mock data for testing performance. Optionally, resets db. """ if delete_existing: Sample.objects.all().delete() print "Deleted existing" # Make up a set of offset = samples_to_make - samples_to_make/52/years # Create all the samples. samples = [] barcodes = range(samples_to_make) for barcode in barcodes: sample = Sample() sample.barcode = str(barcode) sample.created = now() sample.status_created = sample.created if barcode < offset: sample.status_code = SampleStatus.COMPLETE else: sample.status_code = SampleStatus.LAB sample.production = True samples.append(sample) if len(samples) >= batch_threshold: Sample.objects.bulk_create(samples) del samples[:] print "Made %s samples." % Sample.objects.count() if samples: Sample.objects.bulk_create(samples) print "Finished making %s samples." % Sample.objects.count() if not make_statuses: return # Pull all ids for samples. sample_ids = Sample.objects.values_list('id', flat=True) # Create all the statuses. offset = len(sample_ids)-len(sample_ids)/52/years statuses = [] for sample in sample_ids[:offset]: statuses.append(SampleStatus(sample_id=sample, status_code=SampleStatus.RECEIVED, created=now())) statuses.append(SampleStatus(sample_id=sample, status_code=SampleStatus.LAB, created=now())) statuses.append(SampleStatus(sample_id=sample, status_code=SampleStatus.COMPLETE, created=now())) if len(statuses) >= batch_threshold: SampleStatus.objects.bulk_create(statuses) del statuses[:] for sample in sample_ids[offset:]: statuses.append(SampleStatus(sample_id=sample, status_code=SampleStatus.RECEIVED, created=now())) statuses.append(SampleStatus(sample_id=sample, status_code=SampleStatus.LAB, created=now())) if len(statuses) >= batch_threshold: SampleStatus.objects.bulk_create(statuses) del statuses[:] print "Made %s statuses."%SampleStatus.objects.count() if statuses: SampleStatus.objects.bulk_create(statuses) print "Finished making %s statuses."%SampleStatus.objects.count() # Make all the denormalized status_code vars match. sync_status(limit=batch_threshold) print "Statuses synchronized" def sync_status(limit=100000): # Stream through all samples. sample_count = Sample.objects.count() for index in range(0, sample_count, limit): vals = Sample.objects.order_by('id', '-statuses__status_code').distinct('id').values_list('id', 'status_code', 'statuses__id', 'statuses__status_code')[index:index+limit] # Pull all mismatching values. ids = [] status_codes = [] # status_ids = [] for sample_id, status_code, status_id, latest_status_code in vals: if status_code != latest_status_code: ids.append(sample_id) status_codes.append(latest_status_code) # status_ids.append(status_id) # Sync using a bulk update. if ids: pg_bulk_update(Sample, 'id', 'status_code', list(ids), list(status_codes)) # pg_bulk_update(Sample, 'id', 'status_id', list(ids), list(status_ids)) print 'Synced %s out of %s samples at %s'%(len(ids), limit, index)
mit
361,137,351,912,115,300
39.126316
178
0.647692
false
3.909744
false
false
false
cloudbase/coriolis
coriolis/wsman.py
1
6173
# Copyright 2016 Cloudbase Solutions Srl # All Rights Reserved. import base64 from oslo_log import log as logging import requests from winrm import protocol from winrm import exceptions as winrm_exceptions from coriolis import exception from coriolis import utils AUTH_BASIC = "basic" AUTH_KERBEROS = "kerberos" AUTH_CERTIFICATE = "certificate" CODEPAGE_UTF8 = 65001 DEFAULT_TIMEOUT = 3600 LOG = logging.getLogger(__name__) class WSManConnection(object): def __init__(self, timeout=None): self._protocol = None self._conn_timeout = int(timeout or DEFAULT_TIMEOUT) EOL = "\r\n" @utils.retry_on_error() def connect(self, url, username, auth=None, password=None, cert_pem=None, cert_key_pem=None): if not auth: if cert_pem: auth = AUTH_CERTIFICATE else: auth = AUTH_BASIC auth_transport_map = {AUTH_BASIC: 'plaintext', AUTH_KERBEROS: 'kerberos', AUTH_CERTIFICATE: 'ssl'} self._protocol = protocol.Protocol( endpoint=url, transport=auth_transport_map[auth], username=username, password=password, cert_pem=cert_pem, cert_key_pem=cert_key_pem) @classmethod def from_connection_info(cls, connection_info, timeout=DEFAULT_TIMEOUT): """ Returns a wsman.WSManConnection object for the provided conn info. """ if not isinstance(connection_info, dict): raise ValueError( "WSMan connection must be a dict. Got type '%s', value: %s" % ( type(connection_info), connection_info)) required_keys = ["ip", "username", "password"] missing = [key for key in required_keys if key not in connection_info] if missing: raise ValueError( "The following keys were missing from WSMan connection info %s. " "Got: %s" % (missing, connection_info)) host = connection_info["ip"] port = connection_info.get("port", 5986) username = connection_info["username"] password = connection_info.get("password") cert_pem = connection_info.get("cert_pem") cert_key_pem = connection_info.get("cert_key_pem") url = "https://%s:%s/wsman" % (host, port) LOG.info("Connection info: %s", str(connection_info)) LOG.info("Waiting for connectivity on host: %(host)s:%(port)s", {"host": host, "port": port}) utils.wait_for_port_connectivity(host, port) conn = cls(timeout) conn.connect(url=url, username=username, password=password, cert_pem=cert_pem, cert_key_pem=cert_key_pem) return conn def disconnect(self): self._protocol = None def set_timeout(self, timeout): if timeout: self._protocol.timeout = timeout self._protocol.transport.timeout = timeout @utils.retry_on_error( terminal_exceptions=[winrm_exceptions.InvalidCredentialsError, exception.OSMorphingWinRMOperationTimeout]) def _exec_command(self, cmd, args=[], timeout=None): timeout = int(timeout or self._conn_timeout) self.set_timeout(timeout) shell_id = self._protocol.open_shell(codepage=CODEPAGE_UTF8) try: command_id = self._protocol.run_command(shell_id, cmd, args) try: (std_out, std_err, exit_code) = self._protocol.get_command_output( shell_id, command_id) except requests.exceptions.ReadTimeout: raise exception.OSMorphingWinRMOperationTimeout( cmd=("%s %s" % (cmd, " ".join(args))), timeout=timeout) finally: self._protocol.cleanup_command(shell_id, command_id) return (std_out, std_err, exit_code) finally: self._protocol.close_shell(shell_id) def exec_command(self, cmd, args=[], timeout=None): LOG.debug("Executing WSMAN command: %s", str([cmd] + args)) std_out, std_err, exit_code = self._exec_command( cmd, args, timeout=timeout) if exit_code: raise exception.CoriolisException( "Command \"%s\" failed with exit code: %s\n" "stdout: %s\nstd_err: %s" % (str([cmd] + args), exit_code, std_out, std_err)) return std_out def exec_ps_command(self, cmd, ignore_stdout=False, timeout=None): LOG.debug("Executing PS command: %s", cmd) base64_cmd = base64.b64encode(cmd.encode('utf-16le')).decode() return self.exec_command( "powershell.exe", ["-EncodedCommand", base64_cmd], timeout=timeout)[:-2] def test_path(self, remote_path): ret_val = self.exec_ps_command("Test-Path -Path \"%s\"" % remote_path) return ret_val == "True" def download_file(self, url, remote_path): LOG.debug("Downloading: \"%(url)s\" to \"%(path)s\"", {"url": url, "path": remote_path}) # Nano Server does not have Invoke-WebRequest and additionally # this is also faster self.exec_ps_command( "[Net.ServicePointManager]::SecurityProtocol = " "[Net.SecurityProtocolType]::Tls12;" "if(!([System.Management.Automation.PSTypeName]'" "System.Net.Http.HttpClient').Type) {$assembly = " "[System.Reflection.Assembly]::LoadWithPartialName(" "'System.Net.Http')}; (new-object System.Net.Http.HttpClient)." "GetStreamAsync('%(url)s').Result.CopyTo(" "(New-Object IO.FileStream '%(outfile)s', Create, Write, None), " "1MB)" % {"url": url, "outfile": remote_path}, ignore_stdout=True) def write_file(self, remote_path, content): self.exec_ps_command( "[IO.File]::WriteAllBytes('%s', [Convert]::FromBase64String('%s'))" % (remote_path, base64.b64encode(content).decode()), ignore_stdout=True)
agpl-3.0
-159,369,638,874,737,570
36.640244
82
0.581403
false
3.872647
false
false
false
kirienko/gourmet
src/gourmet/importers/plaintext_importer.py
1
4803
import re from gourmet import check_encodings from gourmet.gdebug import debug from gourmet.i18n import _ from gourmet.importers import importer class TextImporter (importer.Importer): ATTR_DICT = {'Recipe By':'source', 'Serving Size':'servings', 'Preparation Time':'preptime', 'Categories':'category', } end_of_paragraph_length = 60 def __init__ (self, filename, conv=None): self.fn = filename self.rec = {} self.ing = {} self.compile_regexps() importer.Importer.__init__(self,conv=conv) def pre_run (self): self.lines = check_encodings.get_file(self.fn) self.total_lines = len(self.lines) print('we have ',self.total_lines,'lines in file',self.fn) def do_run (self): if not hasattr(self,'lines'): raise Exception("pre_run has not been run!") for n in range(self.total_lines): l=self.lines[n] if n % 15 == 0: prog = float(n)/float(self.total_lines) msg = _("Imported %s recipes.")%(len(self.added_recs)) self.emit('progress',prog,msg) self.handle_line(l) # commit the last rec if need be if self.rec: self.commit_rec() importer.Importer.do_run(self) def handle_line (self, l): raise NotImplementedError def compile_regexps (self): self.blank_matcher = re.compile(r"^\s*$") # out unwrap regexp looks for a line with no meaningful characters, or a line that starts in # ALLCAPS or a line that is only space. (we use this with .split() to break text up into # paragraph breaks. self.unwrap_matcher = re.compile(r'\n\W*\n') self.find_header_breaks_matcher = re.compile(r'\s+(?=[A-Z][A-Z][A-Z]+:.*)') def unwrap_lines (self, blob): if blob.find("") >= 0: debug('Using built-in paragraph markers',1) # then we have paragraph markers in the text already outblob = " ".join(blob.split("\n")) # get rid of line breaks lines = outblob.split("") # split text up into paragraphs outblob = "\n".join(lines) # insert linebreaks where paragraphs were return outblob outblob = "" newline = True for l in blob.split('\n'): debug('examining %s'%l,3) if re.match(r'^\W*$',l): # ignore repeated nonword characters (hyphens, stars, etc.) outblob += "\n" continue # if we have a non-word character at the start of the line, # we assume we need to keep the newline. if len(l)>=3 and re.match(r'(\W|[0-9])',l[2]): debug('Match non-word character; add newline before: %s'%l,4) outblob += "\n" outblob += l newline = False continue # if we are continuing an old line, we add a space # (because we're generally stripping all spaces when # we write) if not newline: outblob += " " hmatch = self.find_header_breaks_matcher.search(l) if hmatch: # if there's a header in the middle, we go ahead # and start a new line debug('Splitting at header in line: %s'%l,4) outblob += l[:hmatch.start()] outblob += "\n" outblob += l[hmatch.start():] continue #else... outblob += l.strip() if len(l) < self.end_of_paragraph_length: #60 is our hard-coded end-o-paragraph length debug('line < %s characters, adding newline.'%self.end_of_paragraph_length,4) outblob += "\n" newline = True else: newline = False return outblob class Tester (importer.Tester): def __init__ (self): importer.Tester.__init__(self,regexp=MASTERCOOK_START_REGEXP) self.not_me = "<[?]?(xml|mx2|RcpE|RTxt)[^>]*>" def test (self, filename): """Test file named filename. filename can also be a file object. """ if not hasattr(self,'matcher'): self.matcher=re.compile(self.regexp) self.not_matcher = re.compile(self.not_me) if isinstance(self.ofi, str): self.ofi = open(filename,'r') l = self.ofi.readline() while l: if self.not_matcher.match(l): self.ofi.close() return False if self.matcher.match(l): self.ofi.close() return True l = self.ofi.readline() self.ofi.close()
gpl-2.0
-4,688,046,965,670,273,000
36.818898
100
0.531335
false
3.940115
false
false
false
mark-me/Pi-Jukebox
venv/Lib/site-packages/pygame/ftfont.py
1
6239
"""pygame module for loading and rendering fonts (freetype alternative)""" __all__ = ['Font', 'init', 'quit', 'get_default_font', 'get_init', 'SysFont'] from pygame._freetype import init, Font as _Font, get_default_resolution from pygame._freetype import quit, get_default_font, get_init as _get_init from pygame._freetype import __PYGAMEinit__ from pygame.sysfont import match_font, get_fonts, SysFont as _SysFont from pygame import encode_file_path from pygame.compat import bytes_, unicode_, as_unicode, as_bytes from pygame import Surface as _Surface, Color as _Color, SRCALPHA as _SRCALPHA class Font(_Font): """Font(filename, size) -> Font Font(object, size) -> Font create a new Font object from a file (freetype alternative) This Font type differs from font.Font in that it can render glyphs for Unicode code points in the supplementary planes (> 0xFFFF). """ __encode_file_path = staticmethod(encode_file_path) __get_default_resolution = staticmethod(get_default_resolution) __default_font = encode_file_path(get_default_font()) __unull = as_unicode(r"\x00") __bnull = as_bytes("\x00") def __init__(self, file, size=-1): if size <= 1: size = 1 if isinstance(file, unicode_): try: bfile = self.__encode_file_path(file, ValueError) except ValueError: bfile = '' else: bfile = file if isinstance(bfile, bytes_) and bfile == self.__default_font: file = None if file is None: resolution = int(self.__get_default_resolution() * 0.6875) if resolution == 0: kwds['resolution'] = 1 else: resolution = 0 super(Font, self).__init__(file, size=size, resolution=resolution) self.strength = 1.0 / 12.0 self.kerning = False self.origin = True self.pad = True self.ucs4 = True self.underline_adjustment = 1.0 def render(self, text, antialias, color, background=None): """render(text, antialias, color, background=None) -> Surface draw text on a new Surface""" if text is None: text = "" if (isinstance(text, unicode_) and # conditional and self.__unull in text): raise ValueError("A null character was found in the text") if (isinstance(text, bytes_) and # conditional and self.__bnull in text): raise ValueError("A null character was found in the text") save_antialiased = self.antialiased self.antialiased = bool(antialias) try: s, r = super(Font, self).render(text, color, background) return s finally: self.antialiased = save_antialiased def set_bold(self, value): """set_bold(bool) -> None enable fake rendering of bold text""" self.wide = bool(value) def get_bold(self): """get_bold() -> bool check if text will be rendered bold""" return self.wide def set_italic(self, value): """set_italic(bool) -> None enable fake rendering of italic text""" self.oblique = bool(value) def get_italic(self): """get_italic() -> bool check if the text will be rendered italic""" return self.oblique def set_underline(self, value): """set_underline(bool) -> None control if text is rendered with an underline""" self.underline = bool(value) def get_underline(self): """set_bold(bool) -> None enable fake rendering of bold text""" return self.underline def metrics(self, text): """metrics(text) -> list Gets the metrics for each character in the pased string.""" return self.get_metrics(text) def get_ascent(self): """get_ascent() -> int get the ascent of the font""" return self.get_sized_ascender() def get_descent(self): """get_descent() -> int get the descent of the font""" return self.get_sized_descender() def get_height(self): """get_height() -> int get the height of the font""" return self.get_sized_ascender() - self.get_sized_descender() + 1 def get_linesize(self): """get_linesize() -> int get the line space of the font text""" return self.get_sized_height(); def size(self, text): """size(text) -> (width, height) determine the amount of space needed to render text""" return self.get_rect(text).size FontType = Font def get_init(): """get_init() -> bool true if the font module is initialized""" return _get_init() def SysFont(name, size, bold=0, italic=0, constructor=None): """pygame.ftfont.SysFont(name, size, bold=False, italic=False, constructor=None) -> Font create a pygame Font from system font resources (freetype alternative) This will search the system fonts for the given font name. You can also enable bold or italic styles, and the appropriate system font will be selected if available. This will always return a valid Font object, and will fallback on the builtin pygame font if the given font is not found. Name can also be a comma separated list of names, in which case set of names will be searched in order. Pygame uses a small set of common font aliases, if the specific font you ask for is not available, a reasonable alternative may be used. if optional contructor is provided, it must be a function with signature constructor(fontpath, size, bold, italic) which returns a Font instance. If None, a pygame.ftfont.Font object is created. """ if constructor is None: def constructor(fontpath, size, bold, italic): font = Font(fontpath, size) font.set_bold(bold) font.set_italic(italic) return font return _SysFont(name, size, bold, italic, constructor) del _Font, get_default_resolution, encode_file_path, as_unicode, as_bytes
agpl-3.0
7,635,080,894,809,608,000
32.363636
92
0.608431
false
4.056567
false
false
false
google/ffn
ffn/utils/vector_pb2.py
1
15524
# Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # Generated by the protocol buffer compiler. DO NOT EDIT! # source: utils/vector.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='utils/vector.proto', package='ffn.proto', syntax='proto2', serialized_pb=_b('\n\x12utils/vector.proto\x12\tffn.proto\" \n\x08Vector2d\x12\t\n\x01x\x18\x01 \x01(\x01\x12\t\n\x01y\x18\x02 \x01(\x01\" \n\x08Vector2i\x12\t\n\x01x\x18\x01 \x01(\x05\x12\t\n\x01y\x18\x02 \x01(\x05\"+\n\x08Vector3d\x12\t\n\x01x\x18\x01 \x01(\x01\x12\t\n\x01y\x18\x02 \x01(\x01\x12\t\n\x01z\x18\x03 \x01(\x01\"+\n\x08Vector3f\x12\t\n\x01x\x18\x01 \x01(\x02\x12\t\n\x01y\x18\x02 \x01(\x02\x12\t\n\x01z\x18\x03 \x01(\x02\"+\n\x08Vector3j\x12\t\n\x01x\x18\x01 \x01(\x03\x12\t\n\x01y\x18\x02 \x01(\x03\x12\t\n\x01z\x18\x03 \x01(\x03\"4\n\x0cVector2dList\x12$\n\x07vectors\x18\x01 \x03(\x0b\x32\x13.ffn.proto.Vector2d\"4\n\x0cVector2iList\x12$\n\x07vectors\x18\x01 \x03(\x0b\x32\x13.ffn.proto.Vector2i\"4\n\x0cVector3dList\x12$\n\x07vectors\x18\x01 \x03(\x0b\x32\x13.ffn.proto.Vector3d\"4\n\x0cVector3fList\x12$\n\x07vectors\x18\x01 \x03(\x0b\x32\x13.ffn.proto.Vector3f\"4\n\x0cVector3jList\x12$\n\x07vectors\x18\x01 \x03(\x0b\x32\x13.ffn.proto.Vector3j') ) _sym_db.RegisterFileDescriptor(DESCRIPTOR) _VECTOR2D = _descriptor.Descriptor( name='Vector2d', full_name='ffn.proto.Vector2d', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='x', full_name='ffn.proto.Vector2d.x', index=0, number=1, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='y', full_name='ffn.proto.Vector2d.y', index=1, number=2, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=33, serialized_end=65, ) _VECTOR2I = _descriptor.Descriptor( name='Vector2i', full_name='ffn.proto.Vector2i', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='x', full_name='ffn.proto.Vector2i.x', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='y', full_name='ffn.proto.Vector2i.y', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=67, serialized_end=99, ) _VECTOR3D = _descriptor.Descriptor( name='Vector3d', full_name='ffn.proto.Vector3d', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='x', full_name='ffn.proto.Vector3d.x', index=0, number=1, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='y', full_name='ffn.proto.Vector3d.y', index=1, number=2, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='z', full_name='ffn.proto.Vector3d.z', index=2, number=3, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=101, serialized_end=144, ) _VECTOR3F = _descriptor.Descriptor( name='Vector3f', full_name='ffn.proto.Vector3f', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='x', full_name='ffn.proto.Vector3f.x', index=0, number=1, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='y', full_name='ffn.proto.Vector3f.y', index=1, number=2, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='z', full_name='ffn.proto.Vector3f.z', index=2, number=3, type=2, cpp_type=6, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=146, serialized_end=189, ) _VECTOR3J = _descriptor.Descriptor( name='Vector3j', full_name='ffn.proto.Vector3j', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='x', full_name='ffn.proto.Vector3j.x', index=0, number=1, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='y', full_name='ffn.proto.Vector3j.y', index=1, number=2, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='z', full_name='ffn.proto.Vector3j.z', index=2, number=3, type=3, cpp_type=2, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=191, serialized_end=234, ) _VECTOR2DLIST = _descriptor.Descriptor( name='Vector2dList', full_name='ffn.proto.Vector2dList', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='vectors', full_name='ffn.proto.Vector2dList.vectors', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=236, serialized_end=288, ) _VECTOR2ILIST = _descriptor.Descriptor( name='Vector2iList', full_name='ffn.proto.Vector2iList', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='vectors', full_name='ffn.proto.Vector2iList.vectors', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=290, serialized_end=342, ) _VECTOR3DLIST = _descriptor.Descriptor( name='Vector3dList', full_name='ffn.proto.Vector3dList', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='vectors', full_name='ffn.proto.Vector3dList.vectors', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=344, serialized_end=396, ) _VECTOR3FLIST = _descriptor.Descriptor( name='Vector3fList', full_name='ffn.proto.Vector3fList', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='vectors', full_name='ffn.proto.Vector3fList.vectors', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=398, serialized_end=450, ) _VECTOR3JLIST = _descriptor.Descriptor( name='Vector3jList', full_name='ffn.proto.Vector3jList', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='vectors', full_name='ffn.proto.Vector3jList.vectors', index=0, number=1, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto2', extension_ranges=[], oneofs=[ ], serialized_start=452, serialized_end=504, ) _VECTOR2DLIST.fields_by_name['vectors'].message_type = _VECTOR2D _VECTOR2ILIST.fields_by_name['vectors'].message_type = _VECTOR2I _VECTOR3DLIST.fields_by_name['vectors'].message_type = _VECTOR3D _VECTOR3FLIST.fields_by_name['vectors'].message_type = _VECTOR3F _VECTOR3JLIST.fields_by_name['vectors'].message_type = _VECTOR3J DESCRIPTOR.message_types_by_name['Vector2d'] = _VECTOR2D DESCRIPTOR.message_types_by_name['Vector2i'] = _VECTOR2I DESCRIPTOR.message_types_by_name['Vector3d'] = _VECTOR3D DESCRIPTOR.message_types_by_name['Vector3f'] = _VECTOR3F DESCRIPTOR.message_types_by_name['Vector3j'] = _VECTOR3J DESCRIPTOR.message_types_by_name['Vector2dList'] = _VECTOR2DLIST DESCRIPTOR.message_types_by_name['Vector2iList'] = _VECTOR2ILIST DESCRIPTOR.message_types_by_name['Vector3dList'] = _VECTOR3DLIST DESCRIPTOR.message_types_by_name['Vector3fList'] = _VECTOR3FLIST DESCRIPTOR.message_types_by_name['Vector3jList'] = _VECTOR3JLIST Vector2d = _reflection.GeneratedProtocolMessageType('Vector2d', (_message.Message,), dict( DESCRIPTOR = _VECTOR2D, __module__ = 'utils.vector_pb2' # @@protoc_insertion_point(class_scope:ffn.proto.Vector2d) )) _sym_db.RegisterMessage(Vector2d) Vector2i = _reflection.GeneratedProtocolMessageType('Vector2i', (_message.Message,), dict( DESCRIPTOR = _VECTOR2I, __module__ = 'utils.vector_pb2' # @@protoc_insertion_point(class_scope:ffn.proto.Vector2i) )) _sym_db.RegisterMessage(Vector2i) Vector3d = _reflection.GeneratedProtocolMessageType('Vector3d', (_message.Message,), dict( DESCRIPTOR = _VECTOR3D, __module__ = 'utils.vector_pb2' # @@protoc_insertion_point(class_scope:ffn.proto.Vector3d) )) _sym_db.RegisterMessage(Vector3d) Vector3f = _reflection.GeneratedProtocolMessageType('Vector3f', (_message.Message,), dict( DESCRIPTOR = _VECTOR3F, __module__ = 'utils.vector_pb2' # @@protoc_insertion_point(class_scope:ffn.proto.Vector3f) )) _sym_db.RegisterMessage(Vector3f) Vector3j = _reflection.GeneratedProtocolMessageType('Vector3j', (_message.Message,), dict( DESCRIPTOR = _VECTOR3J, __module__ = 'utils.vector_pb2' # @@protoc_insertion_point(class_scope:ffn.proto.Vector3j) )) _sym_db.RegisterMessage(Vector3j) Vector2dList = _reflection.GeneratedProtocolMessageType('Vector2dList', (_message.Message,), dict( DESCRIPTOR = _VECTOR2DLIST, __module__ = 'utils.vector_pb2' # @@protoc_insertion_point(class_scope:ffn.proto.Vector2dList) )) _sym_db.RegisterMessage(Vector2dList) Vector2iList = _reflection.GeneratedProtocolMessageType('Vector2iList', (_message.Message,), dict( DESCRIPTOR = _VECTOR2ILIST, __module__ = 'utils.vector_pb2' # @@protoc_insertion_point(class_scope:ffn.proto.Vector2iList) )) _sym_db.RegisterMessage(Vector2iList) Vector3dList = _reflection.GeneratedProtocolMessageType('Vector3dList', (_message.Message,), dict( DESCRIPTOR = _VECTOR3DLIST, __module__ = 'utils.vector_pb2' # @@protoc_insertion_point(class_scope:ffn.proto.Vector3dList) )) _sym_db.RegisterMessage(Vector3dList) Vector3fList = _reflection.GeneratedProtocolMessageType('Vector3fList', (_message.Message,), dict( DESCRIPTOR = _VECTOR3FLIST, __module__ = 'utils.vector_pb2' # @@protoc_insertion_point(class_scope:ffn.proto.Vector3fList) )) _sym_db.RegisterMessage(Vector3fList) Vector3jList = _reflection.GeneratedProtocolMessageType('Vector3jList', (_message.Message,), dict( DESCRIPTOR = _VECTOR3JLIST, __module__ = 'utils.vector_pb2' # @@protoc_insertion_point(class_scope:ffn.proto.Vector3jList) )) _sym_db.RegisterMessage(Vector3jList) # @@protoc_insertion_point(module_scope)
apache-2.0
395,596,101,208,389,570
30.361616
969
0.690157
false
2.99749
false
false
false
poppogbr/genropy
packages/hosting/webpages/client.py
1
8379
#!/usr/bin/env python # encoding: utf-8 """ Created by Softwell on 2008-07-10. Copyright (c) 2008 Softwell. All rights reserved. """ # --------------------------- GnrWebPage Standard header --------------------------- from gnr.core.gnrbag import Bag class GnrCustomWebPage(object): maintable = 'hosting.client' py_requires = """public:Public,standard_tables:TableHandler, gnrcomponents/selectionhandler, hosted:HostedClient,hosted:HostedInstance""" ######################## STANDARD TABLE OVERRIDDEN METHODS ############### def windowTitle(self): return '!!Client' def pageAuthTags(self, method=None, **kwargs): return 'owner' def tableWriteTags(self): return 'owner' def tableDeleteTags(self): return 'owner' def barTitle(self): return '!!Client' def lstBase(self, struct): r = struct.view().rows() r.fieldcell('code', width='10em') r.fieldcell('@user_id.username', name='User', width='10em') self.hosted_card_columns(r) return struct def conditionBase(self): pass def queryBase(self): return dict(column='code', op='contains', val='%') def orderBase(self): return 'code' ############################## FORM METHODS ################################## def formBase(self, parentBC, disabled=False, **kwargs): bc = parentBC.borderContainer(**kwargs) top = bc.borderContainer(region='top', height='120px') right = top.contentPane(region='right', width='350px') self.hosted_card_linker(right, disabled=disabled) center = top.contentPane(region='center') fb = center.formbuilder(cols=1, border_spacing='3px', fld_width='100%', width='350px', disabled=disabled) fb.field('code') fb.field('user_id') tc = bc.tabContainer(region='center') self.main_clienttab(tc.borderContainer(title='Info'), disabled) for pkgname, handler in [(c.split('_')[1], getattr(self, c)) for c in dir(self) if c.startswith('hostedclient_')]: handler(tc.contentPane(datapath='.hosted_data.%s' % pkgname, title=self.db.packages[pkgname].name_long, nodeId='hosted_client_data_%s' % pkgname, sqlContextName='sql_record_hosted_client_%s' % pkgname, sqlContextRoot='form.record.hosted_client_data')) def main_clienttab(self, bc, disabled): self.selectionHandler(bc.borderContainer(region='center'), label='!!Instances', datapath="instances", nodeId='instances', table='hosting.instance', struct=self.struct_instances, reloader='^form.record.id', hiddencolumns='$site_path', reload_onSaved=False, selectionPars=dict(where='$client_id=:c_id', c_id='=form.record.id', applymethod='apply_instances_selection', order_by='$code'), dialogPars=dict(height='400px', width='600px', formCb=self.instance_form, onSaved='genro.fireAfter("#instances.reload",true,5000)', toolbarPars=dict(lock_action=True, add_action=True, del_action=True, save_action=True), default_client_id='=form.record.id', saveKwargs=dict(_lockScreen=True, saveAlways=True))) def instance_form(self, parentBC, disabled=None, table=None, **kwargs): tc = parentBC.tabContainer(**kwargs) self.main_instancetab(tc.contentPane(title='Info', _class='pbl_roundedGroup', margin='5px'), table=table, disabled=disabled) for pkgname, handler in [(c.split('_')[1], getattr(self, c)) for c in dir(self) if c.startswith('hostedinstance_')]: handler(tc.contentPane(datapath='.hosted_data.%s' % pkgname, title=self.db.packages[pkgname].name_long, nodeId='hosted_instance_data_%s' % pkgname, sqlContextName='sql_record_hosted_instance_%s' % pkgname, sqlContextRoot='instances.dlg.record.hosted_data.%s' % pkgname)) def main_instancetab(self, parent, disabled=None, table=None): bc = parent.borderContainer() pane = bc.contentPane(region='top') pane.div('!!Manage instances', _class='pbl_roundedGroupLabel') fb = pane.formbuilder(cols=1, border_spacing='6px', dbtable=table, disabled=disabled) fb.field('code', width='15em', lbl='!!Instance Name') pane.dataRpc('.$creation_result', 'createInst', instance_code='=.code', instance_exists='=.$instance_exists', site_exists='=.$site_exists', _fired='^.$create', _onResult='FIRE .$created', _userChanges=True) pane.dataController(""" if (site_path){ SET .site_path=site_path; SET .$site_exists=true; } if (instance_path){ SET .path=instance_path; SET .$instance_exists=true; } """, site_path='=.$creation_result.site_path', instance_path='=.$creation_result.instance_path', _fired='^.$created', _userChanges=True) def struct(struct): r = struct.view().rows() r.cell('type', name='Slot type', width='15em') r.cell('qty', name='Qty', width='4em', dtype='I') return struct iv = self.includedViewBox(bc.borderContainer(region='center'), label='!!Slot configuration', storepath='.slot_configuration', struct=struct, datamode='bag', autoWidth=True, add_action=True, del_action=True) gridEditor = iv.gridEditor() gridEditor.dbSelect(gridcell='type', dbtable='hosting.slot_type', columns='$code,$description', rowcaption='$code', exclude=True, hasDownArrow=True) gridEditor.numberTextBox(gridcell='qty') def onLoading_hosting_instance(self, record, newrecord, loadingParameters, recInfo): tblinstance = self.db.table('hosting.instance') instance_exists = self.db.packages['hosting'].instance_exists(record['code']) site_exists = self.db.packages['hosting'].site_exists(record['code']) record.setItem('$instance_exists', instance_exists) record.setItem('$site_exists', site_exists) def rpc_apply_instances_selection(self, selection, **kwargs): tblinstance = self.db.table('hosting.instance') def apply_row(row): instance_exists = self.db.packages['hosting'].instance_exists(row['code']) site_exists = self.db.packages['hosting'].site_exists(row['code']) if site_exists and instance_exists: return dict(create='<div class="greenLight"></div>') else: return dict(create='<div class="yellowLight"></div>') selection.apply(apply_row) def rpc_createInst(self, instance_code=None, instance_exists=None, site_exists=None): result = Bag() instancetbl = self.db.table('hosting.instance') if not instance_exists: result['instance_path'] = instancetbl.create_instance(instance_code, self.site.instance_path, self.site.gnrapp.config) if not site_exists: result['site_path'] = instancetbl.create_site(instance_code, self.site.site_path, self.site.config) return result def struct_instances(self, struct): r = struct.view().rows() r.fieldcell('code', width='10em') r.fieldcell('path', width='20em') r.cell('create', calculated=True, name='!!Status', width='10em') return struct
lgpl-2.1
4,752,140,607,337,308,000
47.155172
117
0.546963
false
4.229682
false
false
false
hugohmk/Epidemic-Emulator
main.py
1
7208
from epidemic_emulator import node from datetime import datetime import platform import argparse import time import os import matplotlib.pyplot as plt import random def parse_network(f, node_id, topology = "clique"): neighbors = [] nd = None t = datetime.now() t = t-t net = [] index = -1 cnt = 0 for i in f: i = i.rstrip("\n").split("|") if len(i)<4: continue u = (i[0],(i[1],int(i[2])),[(i[3],t)]) if i[0]==node_id: nd = u index = cnt net.append(u) cnt+=1 f.close() # clique if topology == "clique": neighbors = [i for i in net if i[0] != node_id] # star elif topology == "star": if index > 0: neighbors = [net[0]] else: neighbors = net[1:] return neighbors,nd def simulation_controller(args,nd,network): # Example nd value: #('9', ('127.0.0.1', 9179), [('S', datetime.timedelta(0))]) # # network is a tuple containing every node identifier constructed from # args.network (default=network.txt) file r = args.recovery_rate e = args.endogenous_rate x = args.exogenous_rate if nd is not None: with node.Node(r,e,x) as a: a.start(nd, network) if args.interaction == 1: try: help_text = """>> Commands: 0 (help) -> print this 1 (print current) -> print current network state 2 (print history) -> print network history 3 (end) -> send shutdown message to all nodes 4 (display state) -> display current network state 5 (display history) -> display network history """ print help_text while True: opt = raw_input(">> Insert command: ") if opt == "0": print help_text elif opt == "1": #print a.network_state(),"\n" a.print_state() elif opt == "2": #print a.network_history(),"\n" a.print_history() elif opt == "3": a.display_history() a.network_shutdown() a.stop() break elif opt == "4": a.display_state() elif opt == "5": a.display_history() else: print "Invalid input\n" except: a.network_shutdown() a.stop() finally: a.network_shutdown() a.stop() elif args.interaction > 1: print("Running simulation for %d seconds." % args.interaction) time.sleep(args.interaction) #a.display_history() simdata = a.save_simulation_data() a.network_shutdown() a.stop() return simdata else: try: while not a.stopped(): time.sleep(2) except: a.stop() finally: a.stop() def process_data(simdata,repetitions,simulation_time): simresults = [[-1 for t in range(simulation_time+1)] for x in range(repetitions)] print_stuff = 1 for k in range(repetitions): if print_stuff: print("") print("Run #%d" % (k+1)) print("time\tinfected count") t = 0 for event in simdata[k]: if print_stuff: print("%.2f\t%d" % (event[0],event[1])) time = int(event[0]) infected_count = event[1] if time < t: continue elif t < simulation_time+1: if print_stuff: print("* %.2f" % event[0]) while t <= time: simresults[k][t] = infected_count t = t+1 while t < simulation_time+1: simresults[k][t] = infected_count t = t+1 if print_stuff: print("") print("Processed output:") print("time\tinfected count") for t in range(simulation_time+1): print("%d\t%d" % (t,simresults[k][t])) average_results = [0.0 for t in range(simulation_time+1)] for t in range(simulation_time+1): for k in range(repetitions): average_results[t] = average_results[t] + simresults[k][t] average_results[t] = float(average_results[t]) / repetitions print(average_results) plt.plot(list(range(0,simulation_time+1)),average_results,'-o') axes = plt.gca() axes.set_xlim([0,simulation_time]) #axes.set_ylim([0,10]) plt.xlabel("Seconds") plt.ylabel("Infected nodes") plt.savefig("average_simulation.pdf") if __name__ == "__main__": dir_path = os.path.dirname(os.path.realpath(__file__)) dir_path_unix = dir_path.replace("\\","/") if (platform.system()!="Windows"): dir_path = dir_path_unix parser = argparse.ArgumentParser() parser.add_argument("-id","--identifier",required=True, help="Node identifier") parser.add_argument("-n","--network",type=argparse.FileType('r'), default = dir_path_unix+"/network.txt", help="File that contains the network's description; each line presents node_id|node_ip|port_number|initial_state") # parser.add_argument("-i","--interactive",type=int,default=0, # help="Interactive mode") parser.add_argument("-i","--interaction",type=int,default=0, help="Interaction mode: default (0), interactive (1), simulation (2)") parser.add_argument("-r","--recovery_rate",type=float,#default=1.0, help="Simulation parameter: recovery_rate") parser.add_argument("-e","--endogenous_rate",type=float,#default=1.0, help="Simulation parameter: endogenous_infection_rate") parser.add_argument("-x","--exogenous_rate",type=float,#default=1e-6, help="Simulation parameter: exogenous_infection_rate") parser.add_argument("-t","--topology",choices=["clique","star"],default="clique", help="Network topology: clique or star") args = parser.parse_args() network = {} if args.network is not None: network,nd = parse_network(args.network, args.identifier, args.topology) simulation_time = args.interaction repetitions = 1 simdata = [] for i in range(repetitions): simdata.append(simulation_controller(args,nd,network)) if args.identifier == '0': process_data(simdata,repetitions,simulation_time)
mit
6,437,179,172,426,943,000
33.161137
138
0.489734
false
4.21028
false
false
false
JazzeYoung/VeryDeepAutoEncoder
theano/gpuarray/opt.py
1
39678
from __future__ import absolute_import, print_function, division import copy import numpy import logging import pdb from six.moves import xrange import theano from theano import tensor, scalar, gof, config from theano.compile import optdb from theano.compile.ops import shape_i from theano.gof import (local_optimizer, EquilibriumDB, TopoOptimizer, SequenceDB, Optimizer, toolbox) from theano.gof.optdb import LocalGroupDB from theano.ifelse import IfElse from theano.scalar.basic import Scalar, Pow, Cast from theano.scan_module import scan_utils, scan_op, scan_opt from theano.tensor.nnet.conv import ConvOp from theano.tensor.nnet.blocksparse import SparseBlockGemv, SparseBlockOuter from theano.tensor.nnet.abstract_conv import (AbstractConv2d, AbstractConv2d_gradWeights, AbstractConv2d_gradInputs) from theano.tests.breakpoint import PdbBreakpoint from .type import (GpuArrayType, GpuArrayConstant, get_context, ContextNotDefined) from .basic_ops import (as_gpuarray_variable, infer_context_name, host_from_gpu, GpuToGpu, HostFromGpu, GpuFromHost, GpuSplit, GpuContiguous, gpu_contiguous, GpuAlloc, GpuAllocEmpty, GpuReshape, GpuEye, gpu_join, GpuJoin) from .blas import (gpu_dot22, GpuGemm, GpuGer, GpuGemmBatch, gpugemm_no_inplace, gpugemm_inplace, gpugemmbatch_no_inplace, gpugemv_no_inplace, gpugemv_inplace) from .blocksparse import (GpuSparseBlockGemv, GpuSparseBlockOuter, gpu_sparse_block_outer, gpu_sparse_block_outer_inplace, gpu_sparse_block_gemv, gpu_sparse_block_gemv_inplace) from .nnet import (gpu_crossentropy_softmax_1hot_with_bias_dx, gpu_crossentropy_softmax_argmax_1hot_with_bias, gpu_softmax_with_bias, gpu_softmax) from .elemwise import (GpuElemwise, GpuDimShuffle, GpuCAReduceCuda, GpuCAReduceCPY) from .subtensor import (GpuIncSubtensor, GpuSubtensor, GpuAdvancedSubtensor1, GpuAdvancedIncSubtensor1, GpuAdvancedIncSubtensor1_dev20) from .opt_util import alpha_merge, output_merge _logger = logging.getLogger("theano.gpuarray.opt") gpu_optimizer = EquilibriumDB() gpu_cut_copies = EquilibriumDB() gpu_seqopt = SequenceDB() # Don't register this right now conv_groupopt = LocalGroupDB() conv_groupopt.__name__ = "gpua_conv_opts" gpu_seqopt.register('gpuarray_local_optimiziations', gpu_optimizer, 1, 'fast_compile', 'fast_run', 'gpuarray') gpu_seqopt.register('gpuarray_cut_transfers', gpu_cut_copies, 2, 'fast_compile', 'fast_run', 'gpuarray') # do not add 'fast_run' to these two as this would always enable gpuarray mode optdb.register('gpuarray_opt', gpu_seqopt, optdb.__position__.get('add_destroy_handler', 49.5) - 1, 'gpuarray') def register_opt(*tags, **kwargs): def f(local_opt): name = (kwargs and kwargs.pop('name')) or local_opt.__name__ gpu_optimizer.register(name, local_opt, 'fast_run', 'gpuarray', *tags) return local_opt return f def register_inplace(*tags, **kwargs): def f(local_opt): name = (kwargs and kwargs.pop('name')) or local_opt.__name__ optdb.register( name, TopoOptimizer( local_opt, failure_callback=TopoOptimizer.warn_inplace), 60, 'fast_run', 'inplace', 'gpuarray', *tags) return local_opt return f register_opt('fast_compile')(theano.tensor.opt.local_track_shape_i) register_opt(final_opt=True, name='gpua_constant_folding')( tensor.opt.constant_folding) gpu_optimizer.register('local_remove_all_assert', theano.tensor.opt.local_remove_all_assert, 'unsafe') def safe_to_gpu(x, ctx_name): if isinstance(x.type, tensor.TensorType): return GpuFromHost(ctx_name)(x) else: return x def safe_to_cpu(x): if isinstance(x.type, GpuArrayType): return host_from_gpu(x) else: return x def op_lifter(OP, cuda_only=False): """ OP(..., host_from_gpu(), ...) -> host_from_gpu(GpuOP(...)) gpu_from_host(OP(inp0, ...)) -> GpuOP(inp0, ...) """ def f(maker): def local_opt(node): if type(node.op) in OP: # Either one of our inputs is on the gpu or # all of our clients are on the gpu replace = False # TODO: Maybe set context_name with infer_context_name()? context_name = None # We replace if any input is a host_from_gpu for i in node.inputs: if i.owner and i.owner.op == host_from_gpu: context_name = i.owner.inputs[0].type.context_name replace = True break if not replace: # We replace if *all* clients are on the GPU clients = [c for o in node.outputs for c in o.clients] replace = len(clients) != 0 for c, idx in clients: if (c == 'output' or not isinstance(c.op, GpuFromHost)): replace = False # TODO: check that the clients want the same context? if replace: # All clients are GpuFromHost and we have at least one context_name = clients[0][0].op.context_name # Check if we should replace if (not replace or (cuda_only and get_context(context_name).kind != b'cuda')): return False # tag the inputs with the context in case # the context was derived from the outputs for i in node.inputs: i.tag.context_name = context_name new_op = maker(node, context_name) # This is needed as sometimes new_op inherits from OP. if new_op and new_op != node.op: if isinstance(new_op, theano.Op): return [safe_to_cpu(o) for o in new_op(*node.inputs, return_list=True)] elif isinstance(new_op, (tuple, list)): return [safe_to_cpu(o) for o in new_op] else: # suppose it is a variable on the GPU return [host_from_gpu(new_op)] return False local_opt.__name__ = maker.__name__ return local_optimizer(OP)(local_opt) return f class InputToGpuOptimizer(Optimizer): """ Transfer the input to the gpu to start the rolling wave. """ def add_requirements(self, fgraph): fgraph.attach_feature(toolbox.ReplaceValidate()) def apply(self, fgraph): for input in fgraph.inputs: if isinstance(input.type, GpuArrayType): continue # If all clients are outputs or transfers don't do anything. if (all(cl[0] == 'output' or isinstance(cl[0].op, GpuFromHost) for cl in input.clients)): continue target = getattr(input.tag, 'target', None) if target == 'cpu': continue try: new_input = host_from_gpu(GpuFromHost(target)(input)) fgraph.replace_validate(input, new_input, "InputToGpuOptimizer") except TypeError: # This could fail if the inputs are not TensorTypes pass except ContextNotDefined: if hasattr(input.tag, 'target'): raise # If there is no context tag and no default context # then it stays on the CPU pass gpu_seqopt.register('InputToGpuArrayOptimizer', InputToGpuOptimizer(), 0, 'fast_run', 'fast_compile', 'merge') @local_optimizer([GpuFromHost, GpuToGpu, HostFromGpu]) def local_cut_gpu_transfers(node): # gpu[ab] -> host -> gpub if (isinstance(node.op, GpuFromHost) and node.inputs[0].owner and isinstance(node.inputs[0].owner.op, HostFromGpu)): other = node.inputs[0].owner.inputs[0] if node.op.context_name == other.type.context_name: return [other] else: return [GpuToGpu(node.op.context_name)(other)] # ? -> gpua -> host elif (isinstance(node.op, HostFromGpu) and node.inputs[0].owner): n2 = node.inputs[0].owner # host -> if isinstance(n2.op, GpuFromHost): return [n2.inputs[0]] # gpub -> if isinstance(n2.op, GpuToGpu): return [host_from_gpu(n2.inputs[0])] # ? -> gpua -> gpub elif isinstance(node.op, GpuToGpu): # Transfer within same context if node.inputs[0].type.context_name == node.op.context_name: return [node.inputs[0]] if node.inputs[0].owner: n2 = node.inputs[0].owner # host -> if isinstance(n2.op, GpuFromHost): return [as_gpuarray_variable(n2.inputs[0], node.op.context_name)] # gpuc -> if isinstance(n2.op, GpuToGpu): if node.op.context_name == n2.inputs[0].type.context_name: return [n2.inputs[0]] else: return [node.op(n2.inputs[0])] gpu_cut_copies.register('cut_gpua_host_transfers', local_cut_gpu_transfers, 'fast_compile', 'fast_run', 'gpuarray') gpu_cut_copies.register('cut_gpua_constant_transfers', tensor.opt.constant_folding, 'fast_compile', 'fast_run', 'gpuarray') optdb['canonicalize'].register('local_cut_gpua_host_gpua', local_cut_gpu_transfers, 'fast_compile', 'fast_run', 'gpuarray') @register_opt('fast_compile') @local_optimizer([tensor.Alloc]) def local_gpuaalloc2(node): """ Join(axis, {Alloc or HostFromGPU}, ...) -> Join(axis, GpuAlloc, Alloc, ...) Moves an alloc that is an input to join to the gpu. """ try: get_context(None) except ContextNotDefined: # If there is no default context then we do not perform the move here. return if (isinstance(node.op, tensor.Alloc) and all(c != 'output' and c.op == tensor.join and all(i.owner and i.owner.op in [host_from_gpu, tensor.alloc] for i in c.inputs[1:]) for c, idx in node.outputs[0].clients)): return [host_from_gpu(GpuAlloc(None)(*node.inputs))] @register_opt('fast_compile') @op_lifter([tensor.Alloc]) def local_gpuaalloc(node, context_name): return GpuAlloc(context_name)(*node.inputs) @register_opt('fast_compile') @op_lifter([tensor.AllocEmpty]) def local_gpuaallocempty(node, context_name): # We use _props_dict() to make sure that the GPU op know all the # CPU op props. return GpuAllocEmpty(context_name=context_name, **node.op._props_dict())(*node.inputs) @register_opt() @local_optimizer([GpuAlloc]) def local_gpualloc_memset_0(node): if isinstance(node.op, GpuAlloc) and not node.op.memset_0: inp = node.inputs[0] if (isinstance(inp, GpuArrayConstant) and inp.data.size == 1 and (numpy.asarray(inp.data) == 0).all()): new_op = GpuAlloc(node.op.context_name, memset_0=True) return [new_op(*node.inputs)] # Don't register by default. @gof.local_optimizer([GpuAllocEmpty]) def local_gpua_alloc_empty_to_zeros(node): if isinstance(node.op, GpuAllocEmpty): context_name = infer_context_name(*node.inputs) z = numpy.asarray(0, dtype=node.outputs[0].dtype) return [GpuAlloc()(as_gpuarray_variable(z, context_name), *node.inputs)] optdb.register('local_gpua_alloc_empty_to_zeros', theano.tensor.opt.in2out(local_gpua_alloc_empty_to_zeros), # After move to gpu and merge2, before inplace. 49.3, 'alloc_empty_to_zeros',) @register_opt() @local_optimizer([GpuContiguous]) def local_gpu_contiguous_gpu_contiguous(node): """ gpu_contiguous(gpu_contiguous(x)) -> gpu_contiguous(x) """ if isinstance(node.op, GpuContiguous): inp = node.inputs[0] if inp.owner and isinstance(inp.owner.op, GpuContiguous): return [inp] @register_opt('fast_compile') @op_lifter([tensor.extra_ops.CpuContiguous]) def local_gpu_contiguous(node, context_name): return gpu_contiguous @register_opt('fast_compile') @op_lifter([tensor.Reshape]) def local_gpureshape(node, context_name): op = node.op name = op.name if name: name = 'Gpu' + name res = GpuReshape(op.ndim, op.name) return res @register_opt('fast_compile') @op_lifter([tensor.Rebroadcast]) def local_gpu_rebroadcast(node, context_name): return node.op(as_gpuarray_variable(node.inputs[0], context_name)) @register_opt('fast_compile') @op_lifter([tensor.Flatten]) def local_gpuflatten(node, context_name): op = node.op shp = [] if op.outdim != 1: shp = [node.inputs[0].shape[i] for i in range(op.outdim - 1)] shp += [-1] res = GpuReshape(op.outdim, None) o = res(node.inputs[0], theano.tensor.as_tensor_variable(shp)) return o @register_opt('fast_compile') @op_lifter([tensor.Elemwise]) def local_gpu_elemwise(node, context_name): op = node.op scal_op = op.scalar_op name = op.name if name: name = 'Gpu' + name if len(node.outputs) > 1: return res = GpuElemwise(scal_op, name=name, inplace_pattern=copy.copy(op.inplace_pattern), nfunc_spec=op.nfunc_spec) # If the elemwise operation is a pow, casts might be required on the # inputs and or outputs because only the (float, float)->float and # (double, double)->double cases are implemented at the moment. if isinstance(op.scalar_op, Pow): # Only transfer the computation on the gpu if the output dtype is # floating point. Else, give up on the transfer to the gpu. out_dtype = node.outputs[0].dtype if out_dtype not in ['float16', 'float32', 'float64']: return # Transfer the inputs on the GPU and cast them to the right dtype. new_inputs = [] for inp in node.inputs: if inp.dtype != out_dtype: gpu_cast_op = GpuElemwise(Cast(Scalar(out_dtype))) new_inputs.append(gpu_cast_op(as_gpuarray_variable(inp, context_name))) else: new_inputs.append(as_gpuarray_variable(inp, context_name)) # Perform the exponent on the gpu and transfer the output back to the # cpu. gpu_output = res(*new_inputs) cpu_output = host_from_gpu(gpu_output) return [cpu_output] else: return res def max_inputs_to_GpuElemwise(node): ptr_size = 8 int_size = 4 # we take the limit from CUDA for now argument_limit = 232 ndim = node.inputs[0].type.ndim # number of elements and shape size_param_mandatory = (int_size * (ndim + 1)) + \ (ptr_size + int_size * ndim) * len(node.outputs) nb_bytes_avail = argument_limit - size_param_mandatory nb_bytes_per_input = ptr_size + ndim * int_size max_nb_inputs = nb_bytes_avail // nb_bytes_per_input return max_nb_inputs gpu_local_elemwise_fusion = tensor.opt.local_elemwise_fusion_op( GpuElemwise, max_inputs_to_GpuElemwise) optdb.register('gpua_elemwise_fusion', tensor.opt.FusionOptimizer(gpu_local_elemwise_fusion), 71.00, 'fast_run', 'fusion', 'local_elemwise_fusion', 'gpuarray') inplace_gpu_elemwise_opt = tensor.opt.inplace_elemwise_optimizer_op( GpuElemwise) optdb.register('gpua_inplace_opt', inplace_gpu_elemwise_opt, 75, 'inplace_elemwise_optimizer', 'fast_run', 'inplace', 'gpuarray') @register_opt('fast_compile') @op_lifter([tensor.DimShuffle]) def local_gpua_dimshuffle(node, context_name): return GpuDimShuffle(node.op.input_broadcastable, node.op.new_order) @register_opt('fast_compile') @op_lifter([tensor.SpecifyShape]) def local_gpua_specifyShape(node, context_name): if isinstance(node.inputs[0].type, GpuArrayType): return inp = [as_gpuarray_variable(node.inputs[0], context_name)] inp += node.inputs[1:] return tensor.specify_shape(*inp) @register_opt('fast_compile') @op_lifter([theano.compile.ops.Shape]) def local_gpua_shape(node, context_name): # op_lifter will call this opt too frequently as the output is # always on the CPU. if isinstance(node.inputs[0].type, GpuArrayType): return return [as_gpuarray_variable(node.inputs[0], context_name).shape] def gpu_print_wrapper(op, cnda): op.old_op.global_fn(op.old_op, numpy.asarray(cnda)) @register_opt('fast_compile') @op_lifter([tensor.printing.Print]) def local_gpu_print_op(node, context_name): x, = node.inputs gpu_x = as_gpuarray_variable(x, context_name=context_name) new_op = node.op.__class__(global_fn=gpu_print_wrapper) new_op.old_op = node.op return new_op(gpu_x) @register_opt('fast_compile') @local_optimizer([PdbBreakpoint]) def local_gpu_pdbbreakpoint_op(node): if isinstance(node.op, PdbBreakpoint): old_inputs = node.inputs old_outputs = node.outputs new_inputs = node.inputs[:1] input_transfered = [] # Go through the monitored variables, only transfering on GPU those # for which the input comes from the GPU or the output will be # transfered on the GPU. nb_monitored_vars = len(node.outputs) for i in range(nb_monitored_vars): inp = old_inputs[i + 1] out = old_outputs[i] input_is_from_gpu = (inp.owner and isinstance(inp.owner.op, HostFromGpu)) output_goes_to_gpu = False for c in out.clients: if c == 'output': continue if isinstance(c[0].op, GpuFromHost): output_goes_to_gpu = True context_name = c[0].op.context_name break if input_is_from_gpu: # The op should be applied on the GPU version of the input new_inputs.append(inp.owner.inputs[0]) input_transfered.append(True) elif output_goes_to_gpu: # The input should be transfered to the gpu new_inputs.append(as_gpuarray_variable(inp, context_name)) input_transfered.append(True) else: # No transfer is required. new_inputs.append(inp) input_transfered.append(False) # Only continue the optimization if at least one input has been # transfered to the gpu if not any(input_transfered): return False # Apply the op on the new inputs new_op_outputs = node.op(*new_inputs, return_list=True) # Propagate the transfer to the gpu through the outputs that require # it new_outputs = [] for i in range(len(new_op_outputs)): if input_transfered[i]: new_outputs.append(host_from_gpu(new_op_outputs[i])) else: new_outputs.append(new_op_outputs[i]) return new_outputs return False @register_opt('fast_compile') @op_lifter([IfElse]) def local_gpua_lazy_ifelse(node, context_name): if node.op.gpu: return c = node.inputs[0] inps = [] for v in node.inputs[1:]: if isinstance(v.type, (tensor.TensorType, GpuArrayType)): inps.append(as_gpuarray_variable(v, context_name)) else: inps.append(v) return IfElse(node.op.n_outs, gpu=True)(c, *inps, return_list=True) @register_opt('fast_compile') @op_lifter([tensor.Join]) def local_gpua_join(node, context_name): return gpu_join @register_opt('fast_compile') @local_optimizer([GpuJoin]) def local_gpuajoin_1(node): # join of a single element if (isinstance(node.op, GpuJoin) and len(node.inputs) == 2): return [node.inputs[1]] @register_opt('fast_compile') @op_lifter([tensor.Split]) def local_gpua_split(node, context_name): return GpuSplit(node.op.len_splits) @register_opt('fast_compile') @op_lifter([tensor.Subtensor]) def local_gpua_subtensor(node, context_name): x = node.inputs[0] if (x.owner and isinstance(x.owner.op, HostFromGpu)): gpu_x = x.owner.inputs[0] if (gpu_x.owner and isinstance(gpu_x.owner.op, GpuFromHost) and # And it is a shared var or an input of the graph. not gpu_x.owner.inputs[0].owner): if len(x.clients) == 1: if any([n == 'output' or any([isinstance(v.type, GpuArrayType) for v in n.inputs + n.outputs]) for n, _ in node.outputs[0].clients]): return else: return [host_from_gpu(gpu_x.owner.op(node.outputs[0]))] return GpuSubtensor(node.op.idx_list) @register_opt('fast_compile') @op_lifter([tensor.IncSubtensor]) def local_gpua_incsubtensor(node, context_name): op = GpuIncSubtensor(node.op.idx_list, node.op.inplace, node.op.set_instead_of_inc, node.op.destroyhandler_tolerate_aliased) ret = op(*node.inputs) val = getattr(node.outputs[0].tag, 'nan_guard_mode_check', True) ret.tag.nan_guard_mode_check = val return ret @register_opt('fast_compile') @op_lifter([tensor.AdvancedSubtensor1]) def local_gpua_advanced_subtensor(node, context_name): return GpuAdvancedSubtensor1() @register_opt('fast_compile') @op_lifter([tensor.AdvancedIncSubtensor1]) def local_gpua_advanced_incsubtensor(node, context_name): context = get_context(context_name) # This is disabled on non-cuda contexts if context.kind != b'cuda': return None x, y, ilist = node.inputs # Gpu Ops needs both inputs to have the same dtype if (x.type.dtype != y.type.dtype): dtype = scalar.upcast(x.type.dtype, y.type.dtype) if x.type.dtype != dtype: x = tensor.cast(x, dtype) if y.type.dtype != dtype: y = tensor.cast(y, dtype) set_instead_of_inc = node.op.set_instead_of_inc compute_capability = int(context.bin_id[-2]) if (compute_capability < 2 or x.ndim != 2 or y.ndim != 2): return GpuAdvancedIncSubtensor1( set_instead_of_inc=set_instead_of_inc) else: return GpuAdvancedIncSubtensor1_dev20( set_instead_of_inc=set_instead_of_inc) @register_inplace() @local_optimizer([GpuAdvancedIncSubtensor1, GpuAdvancedIncSubtensor1_dev20]) def local_advincsub1_gpua_inplace(node): if isinstance(node.op, (GpuAdvancedIncSubtensor1, GpuAdvancedIncSubtensor1_dev20)): if not node.op.inplace: return [node.op.clone_inplace()(*node.inputs)] @register_opt('fast_compile') @op_lifter([tensor.CAReduce, tensor.Sum, tensor.elemwise.Prod]) def local_gpua_careduce(node, context_name): if isinstance(node.op.scalar_op, (scalar.Add, scalar.Mul, scalar.Maximum, scalar.Minimum)): ctx = get_context(context_name) if ctx.kind == b'opencl': op = GpuCAReduceCPY if node.op.scalar_op not in [scalar.add, scalar.mul]: # We don't support yet all reduction with cpy code. return elif ctx.kind == b'cuda': op = GpuCAReduceCuda else: return False x, = node.inputs greduce = op( node.op.scalar_op, axis=node.op.axis, dtype=getattr(node.op, 'dtype', None), acc_dtype=getattr(node.op, 'acc_dtype', None)) gvar = greduce(x) # We need to have the make node called, otherwise the mask can # be None if (op is GpuCAReduceCPY or gvar.owner.op.supports_c_code([ as_gpuarray_variable(x, context_name)])): return greduce else: # Try to make a simpler pattern based on reshaping # The principle is that if two adjacent dimensions have # the same value in the reduce_mask, then we can reshape # to make them a single dimension, do the reduction, and # then reshape to get them back. if node.op.axis is None: reduce_mask = [1] * x.type.ndim else: reduce_mask = [0] * x.type.ndim for a in node.op.axis: assert reduce_mask[a] == 0 reduce_mask[a] = 1 new_in_shp = [shape_i(x, 0)] new_mask = [reduce_mask[0]] for i in xrange(1, x.type.ndim): if reduce_mask[i] == reduce_mask[i - 1]: new_in_shp[-1] *= shape_i(x, i) else: new_mask.append(reduce_mask[i]) new_in_shp.append(shape_i(x, i)) new_axis = [] for idx, m in enumerate(new_mask): if m == 1: new_axis.append(idx) greduce = op( node.op.scalar_op, axis=new_axis, reduce_mask=new_mask, dtype=getattr(node.op, 'dtype', None), acc_dtype=getattr(node.op, 'acc_dtype', None)) reshaped_x = x.reshape(tensor.stack(new_in_shp)) gpu_reshaped_x = as_gpuarray_variable(reshaped_x, context_name) gvar = greduce(gpu_reshaped_x) # We need to have the make node called, otherwise the mask can # be None reshaped_gpu_inputs = [gpu_reshaped_x] if greduce.supports_c_code(reshaped_gpu_inputs): reduce_reshaped_x = host_from_gpu( greduce(gpu_reshaped_x)) if reduce_reshaped_x.ndim != node.outputs[0].ndim: out_shp = [] for i in range(x.ndim): if i not in node.op.axis: out_shp.append(shape_i(x, i)) unreshaped_reduce = reduce_reshaped_x.reshape( tensor.stack(out_shp)) else: unreshaped_reduce = reduce_reshaped_x return [unreshaped_reduce] @register_opt('fast_compile') @op_lifter([tensor.blas.Gemv, tensor.blas_c.CGemv]) def local_gpua_gemv(node, context_name): if node.op.inplace: return gpugemv_inplace else: return gpugemv_no_inplace @register_opt('fast_compile') @op_lifter([tensor.blas.Gemm]) def local_gpua_gemm(node, context_name): if node.op.inplace: return gpugemm_inplace else: return gpugemm_no_inplace @register_opt('fast_compile') @op_lifter([tensor.blas.BatchedDot]) def local_gpua_gemmbatch(node, context_name): a, b = node.inputs c = tensor.AllocEmpty(a.dtype)(a.shape[0], a.shape[1], b.shape[2]) return gpugemmbatch_no_inplace(c, 1.0, a, b, 0.0) @register_opt('fast_compile') @op_lifter([tensor.basic.Dot]) def local_gpua_hgemm(node, context_name): from theano.sandbox.cuda import nvcc_compiler if nvcc_compiler.nvcc_version < '7.5': _logger.warning("Not performing dot of float16 on the GPU since " "cuda 7.5 is not available. Updating could speed up " "your code.") return A = node.inputs[0] B = node.inputs[1] if (A.ndim == 2 and B.ndim == 2 and A.dtype == 'float16' and B.dtype == 'float16'): fgraph = node.inputs[0].fgraph C = GpuAllocEmpty(dtype='float16', context_name=context_name)( shape_i(A, 0, fgraph), shape_i(B, 1, fgraph)) return gpugemm_no_inplace(C, 1.0, A, B, 0.0) @register_opt() @alpha_merge(GpuGemm, alpha_in=1, beta_in=4) def local_gpuagemm_alpha_merge(node, *inputs): return [gpugemm_no_inplace(*inputs)] @register_opt() @output_merge(GpuGemm, alpha_in=1, beta_in=4, out_in=0) def local_gpuagemm_output_merge(node, *inputs): return [gpugemm_no_inplace(*inputs)] @register_opt() @alpha_merge(GpuGemmBatch, alpha_in=1, beta_in=4) def local_gpuagemmbatch_alpha_merge(node, *inputs): return [gpugemmbatch_no_inplace(*inputs)] @register_opt() @output_merge(GpuGemmBatch, alpha_in=1, beta_in=4, out_in=0) def local_gpuagemmbatch_output_merge(node, *inputs): return [gpugemmbatch_no_inplace(*inputs)] @register_opt('fast_compile') @op_lifter([tensor.blas.Ger, tensor.blas_c.CGer, tensor.blas_scipy.ScipyGer]) def local_gpua_ger(node, context_name): return GpuGer(inplace=node.op.destructive) @register_opt('fast_compile') @op_lifter([tensor.blas.Dot22]) def local_gpua_dot22(node, context_name): return gpu_dot22 @register_opt('fast_compile') @op_lifter([tensor.blas.Dot22Scalar]) def local_gpua_dot22scalar(node, context_name): x, y, a = node.inputs x = as_gpuarray_variable(x, context_name) y = as_gpuarray_variable(y, context_name) z = GpuAllocEmpty(x.dtype, context_name)(x.shape[0], y.shape[1]) return [gpugemm_no_inplace(z, a, x, y, 0)] @register_opt('fast_compile') @op_lifter([tensor.basic.Eye]) def local_gpua_eye(node, context_name): return GpuEye(dtype=node.op.dtype, context_name=context_name) @register_opt('fast_compile') @op_lifter([tensor.nnet.CrossentropySoftmaxArgmax1HotWithBias], cuda_only=True) def local_gpua_crossentropysoftmaxargmax1hotwithbias(node, context_name): return gpu_crossentropy_softmax_argmax_1hot_with_bias @register_opt('fast_compile') @op_lifter([tensor.nnet.CrossentropySoftmax1HotWithBiasDx], cuda_only=True) def local_gpua_crossentropysoftmax1hotwithbiasdx(node, context_name): return gpu_crossentropy_softmax_1hot_with_bias_dx @register_opt('fast_compile') @op_lifter([tensor.nnet.Softmax], cuda_only=True) def local_gpua_softmax(node, context_name): return gpu_softmax @register_opt('fast_compile') @op_lifter([tensor.nnet.SoftmaxWithBias], cuda_only=True) def local_gpua_softmaxwithbias(node, context_name): return gpu_softmax_with_bias @register_opt('fast_compile') @op_lifter([theano.tensor.opt.Assert]) def local_assert(node, context_name): # Check if input nodes are already on the GPU if isinstance(node.inputs[0].type, GpuArrayType): return return [host_from_gpu(node.op(as_gpuarray_variable(node.inputs[0], context_name), *node.inputs[1:]))] @register_opt('fast_compile') @op_lifter([ConvOp]) def local_error_convop(node, context_name): assert False, """ ConvOp does not work with the gpuarray backend. Use the new convolution interface to have GPU convolution working: theano.tensor.nnet.conv2d() """ @register_opt('fast_compile') @op_lifter([SparseBlockGemv]) def local_lift_sparseblockgemv(node, context_name): if node.op.inplace: return gpu_sparse_block_gemv_inplace else: return gpu_sparse_block_gemv @register_opt('fast_compile') @op_lifter([SparseBlockOuter]) def local_lift_sparseblockouter(node, context_name): if node.op.inplace: return gpu_sparse_block_outer_inplace else: return gpu_sparse_block_outer @register_inplace() @local_optimizer([GpuSparseBlockGemv], inplace=True) def local_inplace_sparseblockgemv(node): if isinstance(node.op, GpuSparseBlockGemv) and not node.op.inplace: return [gpu_sparse_block_gemv_inplace(*node.inputs)] @register_inplace() @local_optimizer([GpuSparseBlockOuter], inplace=True) def local_inplace_sparseblockouter(node): if isinstance(node.op, GpuSparseBlockOuter) and not node.op.inplace: return [GpuSparseBlockOuter(inplace=True)(*node.inputs)] # This deals with any abstract convs that have a transfer somewhere @register_opt('fast_compile') @op_lifter([AbstractConv2d, AbstractConv2d_gradWeights, AbstractConv2d_gradInputs]) def local_lift_abstractconv2d(node, context_name): if isinstance(node.outputs[0].type, GpuArrayType): # Don't handle this node here, it's already on the GPU. return inps = list(node.inputs) inps[0] = as_gpuarray_variable(node.inputs[0], context_name=context_name) inps[1] = as_gpuarray_variable(node.inputs[1], context_name=context_name) return [node.op(*inps)] # Register this here so that it goes after the abstract lifting register_opt('fast_compile')(conv_groupopt) @register_opt("low_memory") @local_optimizer([GpuCAReduceCuda]) def local_gpu_elemwise_careduce(node): """ Merge some GpuCAReduceCuda and GPUElemwise. """ if (isinstance(node.op, GpuCAReduceCuda) and node.op.pre_scalar_op is None and node.inputs[0].owner and isinstance(node.inputs[0].owner.op, GpuElemwise) and # The Op support all scalar with 1 inputs. We don't # automatically add more case, as some like trigonometic # operation with some reduction pattern will probably results # in slow down. isinstance(node.inputs[0].owner.op.scalar_op, scalar.basic.Sqr)): op = node.op inp = node.inputs[0].owner.inputs[0] return [GpuCAReduceCuda(scalar_op=op.scalar_op, axis=op.axis, reduce_mask=op.reduce_mask, pre_scalar_op=scalar.basic.sqr)(inp)] @local_optimizer(None) def local_assert_no_cpu_op(node): if (all([var.owner and isinstance(var.owner.op, HostFromGpu) for var in node.inputs]) and any([[c for c in var.clients if isinstance(c[0].op, GpuFromHost)] for var in node.outputs])): if config.assert_no_cpu_op == "warn": _logger.warning(("CPU Op %s is detected in the computation " "graph") % node) elif config.assert_no_cpu_op == "raise": raise AssertionError("The Op %s is on CPU." % node) elif config.assert_no_cpu_op == "pdb": pdb.set_trace() # Register the local_assert_no_cpu_op: assert_no_cpu_op = theano.tensor.opt.in2out(local_assert_no_cpu_op, name='assert_no_cpu_op') # 49.2 is after device specialization & fusion optimizations for last transfers optdb.register('gpua_assert_no_cpu_op', assert_no_cpu_op, 49.2, 'assert_no_cpu_op') def tensor_to_gpu(x, context_name): if isinstance(x.type, tensor.TensorType): y = GpuArrayType(broadcastable=x.type.broadcastable, context_name=context_name, dtype=x.type.dtype)() if x.name: y.name = x.name + '[Gpua]' return y else: return x def gpu_safe_new(x, tag=''): """ Internal function that constructs a new variable from x with the same type, but with a different name (old name + tag). This function is used by gradient, or the R-op to construct new variables for the inputs of the inner graph such that there is no interference between the original graph and the newly constructed graph. """ if hasattr(x, 'name') and x.name is not None: nw_name = x.name + tag else: nw_name = None if isinstance(x, theano.Constant): return x.clone() nw_x = x.type() nw_x.name = nw_name return nw_x def gpu_reconstruct_graph(inputs, outputs, tag=None): """ Different interface to clone, that allows you to pass inputs. Compared to clone, this method always replaces the inputs with new variables of the same type, and returns those (in the same order as the original inputs). """ if tag is None: tag = '' nw_inputs = [gpu_safe_new(x, tag) for x in inputs] givens = {} for nw_x, x in zip(nw_inputs, inputs): givens[x] = nw_x nw_outputs = scan_utils.clone(outputs, replace=givens) return (nw_inputs, nw_outputs) @register_opt('scan', 'fast_compile') @op_lifter([scan_op.Scan]) def local_scan_to_gpua(node, context_name): info = copy.deepcopy(node.op.info) if info.get('gpua', False): return info['gpua'] = True nw_ins = [node.inputs[0]] e = (1 + node.op.n_seqs + node.op.n_mit_mot + node.op.n_mit_sot + node.op.n_sit_sot + node.op.n_shared_outs) nw_ins += [safe_to_gpu(x, context_name) for x in node.inputs[1:e]] b = e e = e + node.op.n_nit_sot nw_ins += node.inputs[b:e] nw_ins += [safe_to_gpu(x, context_name) for x in node.inputs[e:]] scan_ins = [tensor_to_gpu(x, context_name) for x in node.op.inputs] # The inner output corresponding to the looping condition should not be # moved to the gpu if node.op.info['as_while']: scan_outs = [safe_to_gpu(x, context_name) for x in node.op.outputs[:-1]] scan_outs += [node.op.outputs[-1]] else: scan_outs = [safe_to_gpu(x, context_name) for x in node.op.outputs] scan_outs = scan_utils.clone( scan_outs, replace=list(zip(node.op.inputs, (safe_to_cpu(x) for x in scan_ins)))) # We need to construct the hash here, because scan # __init__ does not know about the gpu and can not # handle graphs with inputs being on the gpu tmp_in, tmp_out = gpu_reconstruct_graph(scan_ins, scan_outs) local_fgraph = gof.FunctionGraph(tmp_in, tmp_out, clone=True) _cmodule_key = gof.CLinker().cmodule_key_(local_fgraph, []) info['gpu_hash'] = hash(_cmodule_key) def typebuild(dtype, broadcastable, context_name=context_name): return GpuArrayType(dtype=dtype, broadcastable=broadcastable, context_name=context_name) nw_op = scan_op.Scan(scan_ins, scan_outs, info, typeConstructor=typebuild).make_node(*nw_ins) return nw_op.outputs def _scan_type_infer(node): context_name = infer_context_name(*node.inputs) def typebuild(dtype, broadcastable, context_name=context_name): return GpuArrayType(dtype=dtype, broadcastable=broadcastable, context_name=context_name) return typebuild # Do not register in fast_run or fast_compile. # It will be added to fast_run if the GPU is enabled. optdb.register('gpua_scanOp_make_inplace', scan_opt.ScanInplaceOptimizer(typeInfer=_scan_type_infer, gpua_flag=True), 75, 'gpuarray', 'inplace', 'scan')
bsd-3-clause
-5,803,320,694,378,629,000
34.113274
87
0.599526
false
3.479
false
false
false
subins2000/TorrentBro
torrentbro/lib/tpb/constants.py
1
3066
import sys if sys.version_info >= (3, 0): class_type = type else: from new import classobj class_type = classobj class ConstantType(type): """ Tree representation metaclass for class attributes. Metaclass is extended to all child classes too. """ def __new__(cls, clsname, bases, dct): """ Extend metaclass to all class attributes too. """ attrs = {} for name, attr in dct.items(): if isinstance(attr, class_type): # substitute attr with a new class with Constants as # metaclass making it possible to spread this same method # to all child classes attr = ConstantType( attr.__name__, attr.__bases__, attr.__dict__) attrs[name] = attr return super(ConstantType, cls).__new__(cls, clsname, bases, attrs) def __repr__(cls): """ Tree representation of class attributes. Child classes are also represented. """ # dump current class name tree = cls.__name__ + ':\n' for name in dir(cls): if not name.startswith('_'): attr = getattr(cls, name) output = repr(attr) if not isinstance(attr, ConstantType): output = '{}: {}'.format(name, output) # indent all child attrs tree += '\n'.join([' ' * 4 + line for line in output.splitlines()]) + '\n' return tree def __str__(cls): return repr(cls) Constants = ConstantType('Constants', (object,), {}) class ORDERS(Constants): class NAME: DES = 1 ASC = 2 class UPLOADED: DES = 3 ASC = 4 class SIZE: DES = 5 ASC = 6 class SEEDERS: DES = 7 ASC = 8 class LEECHERS: DES = 9 ASC = 10 class UPLOADER: DES = 11 ASC = 12 class TYPE: DES = 13 ASC = 14 class CATEGORIES(Constants): ALL = 0 class AUDIO: ALL = 100 MUSIC = 101 AUDIO_BOOKS = 102 SOUND_CLIPS = 103 FLAC = 104 OTHER = 199 class VIDEO: ALL = 200 MOVIES = 201 MOVIES_DVDR = 202 MUSIC_VIDEOS = 203 MOVIE_CLIPS = 204 TV_SHOWS = 205 HANDHELD = 206 HD_MOVIES = 207 HD_TV_SHOWS = 208 THREE_DIMENSIONS = 209 OTHER = 299 class APPLICATIONS: ALL = 300 WINDOWS = 301 MAC = 302 UNIX = 303 HANDHELD = 304 IOS = 305 ANDROID = 306 OTHER = 399 class GAMES: ALL = 400 PC = 401 MAC = 402 PSX = 403 XBOX360 = 404 WII = 405 HANDHELD = 406 IOS = 407 ANDROID = 408 OTHER = 499 class OTHER: EBOOKS = 601 COMICS = 602 PICTURES = 603 COVERS = 604 PHYSIBLES = 605 OTHER = 699
gpl-3.0
-6,296,941,637,029,682,000
21.217391
80
0.483366
false
4.093458
false
false
false
wikimedia/user_metrics
user_metrics/api/run.py
1
4196
#!/usr/bin/python # -*- coding: utf-8 -*- """ This module defines the entry point for flask_ web server implementation of the Wikimedia User Metrics API. This module is consumable by the Apache web server via WSGI interface via mod_wsgi. An Apache server can be pointed to api.wsgi such that Apache may be used as a wrapper in this way. .. _flask: http://flask.pocoo.org Cohort Data ^^^^^^^^^^^ Cohort data is maintained in the host s1-analytics-slave.eqiad.wmnet under the `staging` database in the `usertags` and `usertags_meta` tables: :: +---------+-----------------+------+-----+---------+-------+ | Field | Type | Null | Key | Default | Extra | +---------+-----------------+------+-----+---------+-------+ | ut_user | int(5) unsigned | NO | PRI | NULL | | | ut_tag | int(4) unsigned | NO | PRI | NULL | | +---------+-----------------+------+-----+---------+-------+ +-------------+-----------------+------+-----+---------+ | Field | Type | Null | Key | Default | +-------------+-----------------+------+-----+---------+ | utm_id | int(5) unsigned | NO | PRI | NULL | | utm_name | varchar(255) | NO | | | | utm_notes | varchar(255) | YES | | NULL | | utm_touched | datetime | YES | | NULL | +-------------+-----------------+------+-----+---------+ """ __author__ = { "dario taraborelli": "dario@wikimedia.org", "ryan faulkner": "rfaulkner@wikimedia.org" } __date__ = "2012-12-21" __license__ = "GPL (version 2 or later)" import multiprocessing as mp from user_metrics.config import logging, settings from user_metrics.api.engine.request_manager import job_control, \ requests_notification_callback from user_metrics.api.engine.response_handler import process_responses from user_metrics.api.views import app from user_metrics.api.engine.request_manager import api_request_queue, \ req_notification_queue_out, req_notification_queue_in, api_response_queue from user_metrics.utils import terminate_process_with_checks job_controller_proc = None response_controller_proc = None rm_callback_proc = None ###### # # Define Custom Classes # ####### def teardown(): """ When the instance is deleted store the pickled data and shutdown the job controller """ # Try to shutdown the job control proc gracefully try: terminate_process_with_checks(job_controller_proc) terminate_process_with_checks(response_controller_proc) terminate_process_with_checks(rm_callback_proc) except Exception: logging.error(__name__ + ' :: Could not shut down callbacks.') def setup_controller(req_queue, res_queue, msg_queue_in, msg_queue_out): """ Sets up the process that handles API jobs """ job_controller_proc = mp.Process(target=job_control, args=(req_queue, res_queue)) response_controller_proc = mp.Process(target=process_responses, args=(res_queue, msg_queue_in)) rm_callback_proc = mp.Process(target=requests_notification_callback, args=(msg_queue_in, msg_queue_out)) job_controller_proc.start() response_controller_proc.start() rm_callback_proc.start() ###### # # Execution # ####### # initialize API data - get the instance setup_controller(api_request_queue, api_response_queue, req_notification_queue_in, req_notification_queue_out) app.config['SECRET_KEY'] = settings.__secret_key__ # With the presence of flask.ext.login module if settings.__flask_login_exists__: from user_metrics.api.session import login_manager login_manager.setup_app(app) if __name__ == '__main__': try: app.run(debug=True, use_reloader=False, host=settings.__instance_host__, port=settings.__instance_port__,) finally: teardown()
bsd-3-clause
5,606,985,563,411,942,000
32.568
78
0.547188
false
3.958491
false
false
false
benosteen/mypaint
gui/brushcreationwidget.py
1
9333
# This file is part of MyPaint. # Copyright (C) 2009 by Martin Renold <martinxyz@gmx.ch> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. import os import gtk gdk = gtk.gdk from lib import document import tileddrawwidget, brushmanager, dialogs from gettext import gettext as _ def startfile(path): import os import platform if platform.system == 'Windows': os.startfile(path) else: os.system("xdg-open " + path) def stock_button(stock_id): b = gtk.Button() img = gtk.Image() img.set_from_stock(stock_id, gtk.ICON_SIZE_MENU) b.add(img) return b class BrushManipulationWidget(gtk.HBox): """ """ def __init__(self, app, brushicon_editor): gtk.HBox.__init__(self) self.app = app self.bm = app.brushmanager self.brushicon_editor = brushicon_editor self.init_widgets() self.bm.selected_brush_observers.append(self.brush_selected_cb) def init_widgets(self): l = self.brush_name_label = gtk.Label() l.set_text(_('(unnamed brush)')) self.pack_start(l, expand=True) right_vbox_buttons = [ (gtk.STOCK_SAVE, self.update_settings_cb, _('Save Settings')), (gtk.STOCK_ADD, self.create_brush_cb, _('Add As New')), (gtk.STOCK_PROPERTIES, self.edit_brush_cb, _('Edit Brush Icon')), (gtk.STOCK_EDIT, self.rename_brush_cb, _('Rename...')), (gtk.STOCK_DELETE, self.delete_brush_cb, _('Remove...')), ] for stock_id, clicked_cb, tooltip in reversed(right_vbox_buttons): b = stock_button(stock_id) b.connect('clicked', clicked_cb) b.set_tooltip_text(tooltip) self.pack_end(b, expand=False) def brush_selected_cb(self, managed_brush, brushinfo): name = managed_brush.name if name is None: name = _('(unnamed brush)') else: name = name.replace('_', ' ') # XXX safename/unsafename utils? self.brush_name_label.set_text(name) def edit_brush_cb(self, window): self.edit_brush_properties_cb() def create_brush_cb(self, window): """Create and save a new brush based on the current working brush.""" b = brushmanager.ManagedBrush(self.bm) b.brushinfo = self.app.brush.clone() b.brushinfo.set_string_property("parent_brush_name", None) #avoid mis-hilight b.preview = self.brushicon_editor.get_preview_pixbuf() b.save() if self.bm.active_groups: group = self.bm.active_groups[0] else: group = brushmanager.DEFAULT_BRUSH_GROUP brushes = self.bm.get_group_brushes(group, make_active=True) brushes.insert(0, b) b.persistent = True # Brush was saved b.in_brushlist = True for f in self.bm.brushes_observers: f(brushes) self.bm.select_brush(b) # Pretend that the active app.brush is a child of the new one, for the # sake of the strokemap and strokes drawn immediately after. self.app.brush.set_string_property("parent_brush_name", b.name) def rename_brush_cb(self, window): src_brush = self.bm.selected_brush if not src_brush.name: dialogs.error(self, _('No brush selected!')) return dst_name = dialogs.ask_for_name(self, _("Rename Brush"), src_brush.name.replace('_', ' ')) if not dst_name: return dst_name = dst_name.replace(' ', '_') # ensure we don't overwrite an existing brush by accident dst_deleted = None for group, brushes in self.bm.groups.iteritems(): for b2 in brushes: if b2.name == dst_name: if group == brushmanager.DELETED_BRUSH_GROUP: dst_deleted = b2 else: dialogs.error(self, _('A brush with this name already exists!')) return print 'renaming brush', repr(src_brush.name), '-->', repr(dst_name) if dst_deleted: deleted_brushes = self.bm.get_group_brushes(brushmanager.DELETED_BRUSH_GROUP) deleted_brushes.remove(dst_deleted) for f in self.bm.brushes_observers: f(deleted_brushes) # save src as dst src_name = src_brush.name src_brush.name = dst_name src_brush.save() src_brush.name = src_name # load dst dst_brush = brushmanager.ManagedBrush(self.bm, dst_name, persistent=True) dst_brush.load() dst_brush.in_brushlist = True # replace src with dst (but keep src in the deleted list if it is a stock brush) self.delete_brush_internal(src_brush, replacement=dst_brush) self.bm.select_brush(dst_brush) def update_settings_cb(self, window): b = self.bm.selected_brush if not b.name: dialogs.error(self, _('No brush selected, please use "Add As New" instead.')) return b.brushinfo = self.app.brush.clone() b.save() def delete_brush_cb(self, window): b = self.bm.selected_brush if not b.name: dialogs.error(self, _('No brush selected!')) return if not dialogs.confirm(self, _("Really delete brush from disk?")): return self.bm.select_brush(None) self.delete_brush_internal(b) def delete_brush_internal(self, b, replacement=None): for brushes in self.bm.groups.itervalues(): if b in brushes: idx = brushes.index(b) if replacement: brushes[idx] = replacement else: del brushes[idx] for f in self.bm.brushes_observers: f(brushes) assert b not in brushes, 'Brush exists multiple times in the same group!' if not b.delete_from_disk(): # stock brush can't be deleted deleted_brushes = self.bm.get_group_brushes(brushmanager.DELETED_BRUSH_GROUP) deleted_brushes.insert(0, b) for f in self.bm.brushes_observers: f(deleted_brushes) class BrushIconEditorWidget(gtk.VBox): def __init__(self, app): gtk.VBox.__init__(self) self.app = app self.bm = app.brushmanager self.set_border_width(8) self.init_widgets() self.bm.selected_brush_observers.append(self.brush_selected_cb) self.set_brush_preview_edit_mode(False) def init_widgets(self): button_box = gtk.HBox() doc = document.Document(self.app.brush) self.tdw = tileddrawwidget.TiledDrawWidget(self.app, doc) self.tdw.set_size_request(brushmanager.preview_w*2, brushmanager.preview_h*2) self.tdw.scale = 2.0 tdw_box = gtk.HBox() tdw_box.pack_start(self.tdw, expand=False, fill=False) tdw_box.pack_start(gtk.Label(), expand=True) self.pack_start(tdw_box, expand=False, fill=False, padding=3) self.pack_start(button_box, expand=False, fill=False, padding=3) self.brush_preview_edit_mode_button = b = gtk.CheckButton(_('Edit')) b.connect('toggled', self.brush_preview_edit_mode_cb) button_box.pack_start(b, expand=False, padding=3) self.brush_preview_clear_button = b = gtk.Button(_('Clear')) def clear_cb(window): self.tdw.doc.clear_layer() b.connect('clicked', clear_cb) button_box.pack_start(b, expand=False, padding=3) self.brush_preview_save_button = b = gtk.Button(_('Save')) b.connect('clicked', self.update_preview_cb) button_box.pack_start(b, expand=False, padding=3) def brush_preview_edit_mode_cb(self, button): self.set_brush_preview_edit_mode(button.get_active()) def set_brush_preview_edit_mode(self, edit_mode): self.brush_preview_edit_mode = edit_mode self.brush_preview_edit_mode_button.set_active(edit_mode) self.brush_preview_save_button.set_sensitive(edit_mode) self.brush_preview_clear_button.set_sensitive(edit_mode) self.tdw.set_sensitive(edit_mode) def set_preview_pixbuf(self, pixbuf): if pixbuf is None: self.tdw.doc.clear() else: self.tdw.doc.load_from_pixbuf(pixbuf) def get_preview_pixbuf(self): pixbuf = self.tdw.doc.render_as_pixbuf(0, 0, brushmanager.preview_w, brushmanager.preview_h) return pixbuf def update_preview_cb(self, window): pixbuf = self.get_preview_pixbuf() b = self.bm.selected_brush if not b.name: dialogs.error(self, _('No brush selected, please use "Add As New" instead.')) return b.preview = pixbuf b.save() for brushes in self.bm.groups.itervalues(): if b in brushes: for f in self.bm.brushes_observers: f(brushes) def brush_selected_cb(self, managed_brush, brushinfo): # Update brush icon preview if it is not in edit mode if not self.brush_preview_edit_mode: self.set_preview_pixbuf(managed_brush.preview)
gpl-2.0
4,452,946,291,741,899,300
35.457031
100
0.605807
false
3.571757
false
false
false
dietrichc/streamline-ppc-reports
examples/dfp/v201405/label_service/get_labels_by_statement.py
1
1743
#!/usr/bin/python # # Copyright 2014 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This code example gets all labels ordered by name. To create a label, run create_label.py. This feature is only available to DFP premium solution networks. """ __author__ = ('Nicholas Chen', 'Joseph DiLallo') # Import appropriate modules from the client library. from googleads import dfp def main(client): # Initialize appropriate service. label_service = client.GetService('LabelService', version='v201405') # Create statement to get all labels statement = dfp.FilterStatement('ORDER BY name') # Get labels by statement. while True: response = label_service.getLabelsByStatement(statement.ToStatement()) if 'results' in response: # Display results. for label in response['results']: print ('Label with id \'%s\' and name \'%s\' was found.' % (label['id'], label['name'])) statement.offset += dfp.SUGGESTED_PAGE_LIMIT else: break print '\nNumber of results found: %s' % response['totalResultSetSize'] if __name__ == '__main__': # Initialize client object. dfp_client = dfp.DfpClient.LoadFromStorage() main(dfp_client)
apache-2.0
-264,873,779,856,759,300
30.690909
77
0.703385
false
3.908072
false
false
false
ryfeus/lambda-packs
Pandas_numpy/source/numpy/core/_internal.py
3
21639
""" A place for code to be called from core C-code. Some things are more easily handled Python. """ from __future__ import division, absolute_import, print_function import re import sys from numpy.compat import basestring from .multiarray import dtype, array, ndarray try: import ctypes except ImportError: ctypes = None from .numerictypes import object_ if (sys.byteorder == 'little'): _nbo = b'<' else: _nbo = b'>' def _makenames_list(adict, align): allfields = [] fnames = list(adict.keys()) for fname in fnames: obj = adict[fname] n = len(obj) if not isinstance(obj, tuple) or n not in [2, 3]: raise ValueError("entry not a 2- or 3- tuple") if (n > 2) and (obj[2] == fname): continue num = int(obj[1]) if (num < 0): raise ValueError("invalid offset.") format = dtype(obj[0], align=align) if (n > 2): title = obj[2] else: title = None allfields.append((fname, format, num, title)) # sort by offsets allfields.sort(key=lambda x: x[2]) names = [x[0] for x in allfields] formats = [x[1] for x in allfields] offsets = [x[2] for x in allfields] titles = [x[3] for x in allfields] return names, formats, offsets, titles # Called in PyArray_DescrConverter function when # a dictionary without "names" and "formats" # fields is used as a data-type descriptor. def _usefields(adict, align): try: names = adict[-1] except KeyError: names = None if names is None: names, formats, offsets, titles = _makenames_list(adict, align) else: formats = [] offsets = [] titles = [] for name in names: res = adict[name] formats.append(res[0]) offsets.append(res[1]) if (len(res) > 2): titles.append(res[2]) else: titles.append(None) return dtype({"names": names, "formats": formats, "offsets": offsets, "titles": titles}, align) # construct an array_protocol descriptor list # from the fields attribute of a descriptor # This calls itself recursively but should eventually hit # a descriptor that has no fields and then return # a simple typestring def _array_descr(descriptor): fields = descriptor.fields if fields is None: subdtype = descriptor.subdtype if subdtype is None: if descriptor.metadata is None: return descriptor.str else: new = descriptor.metadata.copy() if new: return (descriptor.str, new) else: return descriptor.str else: return (_array_descr(subdtype[0]), subdtype[1]) names = descriptor.names ordered_fields = [fields[x] + (x,) for x in names] result = [] offset = 0 for field in ordered_fields: if field[1] > offset: num = field[1] - offset result.append(('', '|V%d' % num)) offset += num if len(field) > 3: name = (field[2], field[3]) else: name = field[2] if field[0].subdtype: tup = (name, _array_descr(field[0].subdtype[0]), field[0].subdtype[1]) else: tup = (name, _array_descr(field[0])) offset += field[0].itemsize result.append(tup) if descriptor.itemsize > offset: num = descriptor.itemsize - offset result.append(('', '|V%d' % num)) return result # Build a new array from the information in a pickle. # Note that the name numpy.core._internal._reconstruct is embedded in # pickles of ndarrays made with NumPy before release 1.0 # so don't remove the name here, or you'll # break backward compatibility. def _reconstruct(subtype, shape, dtype): return ndarray.__new__(subtype, shape, dtype) # format_re was originally from numarray by J. Todd Miller format_re = re.compile(br'(?P<order1>[<>|=]?)' br'(?P<repeats> *[(]?[ ,0-9L]*[)]? *)' br'(?P<order2>[<>|=]?)' br'(?P<dtype>[A-Za-z0-9.?]*(?:\[[a-zA-Z0-9,.]+\])?)') sep_re = re.compile(br'\s*,\s*') space_re = re.compile(br'\s+$') # astr is a string (perhaps comma separated) _convorder = {b'=': _nbo} def _commastring(astr): startindex = 0 result = [] while startindex < len(astr): mo = format_re.match(astr, pos=startindex) try: (order1, repeats, order2, dtype) = mo.groups() except (TypeError, AttributeError): raise ValueError('format number %d of "%s" is not recognized' % (len(result)+1, astr)) startindex = mo.end() # Separator or ending padding if startindex < len(astr): if space_re.match(astr, pos=startindex): startindex = len(astr) else: mo = sep_re.match(astr, pos=startindex) if not mo: raise ValueError( 'format number %d of "%s" is not recognized' % (len(result)+1, astr)) startindex = mo.end() if order2 == b'': order = order1 elif order1 == b'': order = order2 else: order1 = _convorder.get(order1, order1) order2 = _convorder.get(order2, order2) if (order1 != order2): raise ValueError( 'inconsistent byte-order specification %s and %s' % (order1, order2)) order = order1 if order in [b'|', b'=', _nbo]: order = b'' dtype = order + dtype if (repeats == b''): newitem = dtype else: newitem = (dtype, eval(repeats)) result.append(newitem) return result class dummy_ctype(object): def __init__(self, cls): self._cls = cls def __mul__(self, other): return self def __call__(self, *other): return self._cls(other) def __eq__(self, other): return self._cls == other._cls def __ne__(self, other): return self._cls != other._cls def _getintp_ctype(): val = _getintp_ctype.cache if val is not None: return val if ctypes is None: import numpy as np val = dummy_ctype(np.intp) else: char = dtype('p').char if (char == 'i'): val = ctypes.c_int elif char == 'l': val = ctypes.c_long elif char == 'q': val = ctypes.c_longlong else: val = ctypes.c_long _getintp_ctype.cache = val return val _getintp_ctype.cache = None # Used for .ctypes attribute of ndarray class _missing_ctypes(object): def cast(self, num, obj): return num def c_void_p(self, num): return num class _ctypes(object): def __init__(self, array, ptr=None): if ctypes: self._ctypes = ctypes else: self._ctypes = _missing_ctypes() self._arr = array self._data = ptr if self._arr.ndim == 0: self._zerod = True else: self._zerod = False def data_as(self, obj): return self._ctypes.cast(self._data, obj) def shape_as(self, obj): if self._zerod: return None return (obj*self._arr.ndim)(*self._arr.shape) def strides_as(self, obj): if self._zerod: return None return (obj*self._arr.ndim)(*self._arr.strides) def get_data(self): return self._data def get_shape(self): return self.shape_as(_getintp_ctype()) def get_strides(self): return self.strides_as(_getintp_ctype()) def get_as_parameter(self): return self._ctypes.c_void_p(self._data) data = property(get_data, None, doc="c-types data") shape = property(get_shape, None, doc="c-types shape") strides = property(get_strides, None, doc="c-types strides") _as_parameter_ = property(get_as_parameter, None, doc="_as parameter_") def _newnames(datatype, order): """ Given a datatype and an order object, return a new names tuple, with the order indicated """ oldnames = datatype.names nameslist = list(oldnames) if isinstance(order, str): order = [order] seen = set() if isinstance(order, (list, tuple)): for name in order: try: nameslist.remove(name) except ValueError: if name in seen: raise ValueError("duplicate field name: %s" % (name,)) else: raise ValueError("unknown field name: %s" % (name,)) seen.add(name) return tuple(list(order) + nameslist) raise ValueError("unsupported order value: %s" % (order,)) def _copy_fields(ary): """Return copy of structured array with padding between fields removed. Parameters ---------- ary : ndarray Structured array from which to remove padding bytes Returns ------- ary_copy : ndarray Copy of ary with padding bytes removed """ dt = ary.dtype copy_dtype = {'names': dt.names, 'formats': [dt.fields[name][0] for name in dt.names]} return array(ary, dtype=copy_dtype, copy=True) def _getfield_is_safe(oldtype, newtype, offset): """ Checks safety of getfield for object arrays. As in _view_is_safe, we need to check that memory containing objects is not reinterpreted as a non-object datatype and vice versa. Parameters ---------- oldtype : data-type Data type of the original ndarray. newtype : data-type Data type of the field being accessed by ndarray.getfield offset : int Offset of the field being accessed by ndarray.getfield Raises ------ TypeError If the field access is invalid """ if newtype.hasobject or oldtype.hasobject: if offset == 0 and newtype == oldtype: return if oldtype.names: for name in oldtype.names: if (oldtype.fields[name][1] == offset and oldtype.fields[name][0] == newtype): return raise TypeError("Cannot get/set field of an object array") return def _view_is_safe(oldtype, newtype): """ Checks safety of a view involving object arrays, for example when doing:: np.zeros(10, dtype=oldtype).view(newtype) Parameters ---------- oldtype : data-type Data type of original ndarray newtype : data-type Data type of the view Raises ------ TypeError If the new type is incompatible with the old type. """ # if the types are equivalent, there is no problem. # for example: dtype((np.record, 'i4,i4')) == dtype((np.void, 'i4,i4')) if oldtype == newtype: return if newtype.hasobject or oldtype.hasobject: raise TypeError("Cannot change data-type for object array.") return # Given a string containing a PEP 3118 format specifier, # construct a NumPy dtype _pep3118_native_map = { '?': '?', 'c': 'S1', 'b': 'b', 'B': 'B', 'h': 'h', 'H': 'H', 'i': 'i', 'I': 'I', 'l': 'l', 'L': 'L', 'q': 'q', 'Q': 'Q', 'e': 'e', 'f': 'f', 'd': 'd', 'g': 'g', 'Zf': 'F', 'Zd': 'D', 'Zg': 'G', 's': 'S', 'w': 'U', 'O': 'O', 'x': 'V', # padding } _pep3118_native_typechars = ''.join(_pep3118_native_map.keys()) _pep3118_standard_map = { '?': '?', 'c': 'S1', 'b': 'b', 'B': 'B', 'h': 'i2', 'H': 'u2', 'i': 'i4', 'I': 'u4', 'l': 'i4', 'L': 'u4', 'q': 'i8', 'Q': 'u8', 'e': 'f2', 'f': 'f', 'd': 'd', 'Zf': 'F', 'Zd': 'D', 's': 'S', 'w': 'U', 'O': 'O', 'x': 'V', # padding } _pep3118_standard_typechars = ''.join(_pep3118_standard_map.keys()) def _dtype_from_pep3118(spec): class Stream(object): def __init__(self, s): self.s = s self.byteorder = '@' def advance(self, n): res = self.s[:n] self.s = self.s[n:] return res def consume(self, c): if self.s[:len(c)] == c: self.advance(len(c)) return True return False def consume_until(self, c): if callable(c): i = 0 while i < len(self.s) and not c(self.s[i]): i = i + 1 return self.advance(i) else: i = self.s.index(c) res = self.advance(i) self.advance(len(c)) return res @property def next(self): return self.s[0] def __bool__(self): return bool(self.s) __nonzero__ = __bool__ stream = Stream(spec) dtype, align = __dtype_from_pep3118(stream, is_subdtype=False) return dtype def __dtype_from_pep3118(stream, is_subdtype): field_spec = dict( names=[], formats=[], offsets=[], itemsize=0 ) offset = 0 common_alignment = 1 is_padding = False # Parse spec while stream: value = None # End of structure, bail out to upper level if stream.consume('}'): break # Sub-arrays (1) shape = None if stream.consume('('): shape = stream.consume_until(')') shape = tuple(map(int, shape.split(','))) # Byte order if stream.next in ('@', '=', '<', '>', '^', '!'): byteorder = stream.advance(1) if byteorder == '!': byteorder = '>' stream.byteorder = byteorder # Byte order characters also control native vs. standard type sizes if stream.byteorder in ('@', '^'): type_map = _pep3118_native_map type_map_chars = _pep3118_native_typechars else: type_map = _pep3118_standard_map type_map_chars = _pep3118_standard_typechars # Item sizes itemsize_str = stream.consume_until(lambda c: not c.isdigit()) if itemsize_str: itemsize = int(itemsize_str) else: itemsize = 1 # Data types is_padding = False if stream.consume('T{'): value, align = __dtype_from_pep3118( stream, is_subdtype=True) elif stream.next in type_map_chars: if stream.next == 'Z': typechar = stream.advance(2) else: typechar = stream.advance(1) is_padding = (typechar == 'x') dtypechar = type_map[typechar] if dtypechar in 'USV': dtypechar += '%d' % itemsize itemsize = 1 numpy_byteorder = {'@': '=', '^': '='}.get( stream.byteorder, stream.byteorder) value = dtype(numpy_byteorder + dtypechar) align = value.alignment else: raise ValueError("Unknown PEP 3118 data type specifier %r" % stream.s) # # Native alignment may require padding # # Here we assume that the presence of a '@' character implicitly implies # that the start of the array is *already* aligned. # extra_offset = 0 if stream.byteorder == '@': start_padding = (-offset) % align intra_padding = (-value.itemsize) % align offset += start_padding if intra_padding != 0: if itemsize > 1 or (shape is not None and _prod(shape) > 1): # Inject internal padding to the end of the sub-item value = _add_trailing_padding(value, intra_padding) else: # We can postpone the injection of internal padding, # as the item appears at most once extra_offset += intra_padding # Update common alignment common_alignment = _lcm(align, common_alignment) # Convert itemsize to sub-array if itemsize != 1: value = dtype((value, (itemsize,))) # Sub-arrays (2) if shape is not None: value = dtype((value, shape)) # Field name if stream.consume(':'): name = stream.consume_until(':') else: name = None if not (is_padding and name is None): if name is not None and name in field_spec['names']: raise RuntimeError("Duplicate field name '%s' in PEP3118 format" % name) field_spec['names'].append(name) field_spec['formats'].append(value) field_spec['offsets'].append(offset) offset += value.itemsize offset += extra_offset field_spec['itemsize'] = offset # extra final padding for aligned types if stream.byteorder == '@': field_spec['itemsize'] += (-offset) % common_alignment # Check if this was a simple 1-item type, and unwrap it if (field_spec['names'] == [None] and field_spec['offsets'][0] == 0 and field_spec['itemsize'] == field_spec['formats'][0].itemsize and not is_subdtype): ret = field_spec['formats'][0] else: _fix_names(field_spec) ret = dtype(field_spec) # Finished return ret, common_alignment def _fix_names(field_spec): """ Replace names which are None with the next unused f%d name """ names = field_spec['names'] for i, name in enumerate(names): if name is not None: continue j = 0 while True: name = 'f{}'.format(j) if name not in names: break j = j + 1 names[i] = name def _add_trailing_padding(value, padding): """Inject the specified number of padding bytes at the end of a dtype""" if value.fields is None: field_spec = dict( names=['f0'], formats=[value], offsets=[0], itemsize=value.itemsize ) else: fields = value.fields names = value.names field_spec = dict( names=names, formats=[fields[name][0] for name in names], offsets=[fields[name][1] for name in names], itemsize=value.itemsize ) field_spec['itemsize'] += padding return dtype(field_spec) def _prod(a): p = 1 for x in a: p *= x return p def _gcd(a, b): """Calculate the greatest common divisor of a and b""" while b: a, b = b, a % b return a def _lcm(a, b): return a // _gcd(a, b) * b # Exception used in shares_memory() class TooHardError(RuntimeError): pass class AxisError(ValueError, IndexError): """ Axis supplied was invalid. """ def __init__(self, axis, ndim=None, msg_prefix=None): # single-argument form just delegates to base class if ndim is None and msg_prefix is None: msg = axis # do the string formatting here, to save work in the C code else: msg = ("axis {} is out of bounds for array of dimension {}" .format(axis, ndim)) if msg_prefix is not None: msg = "{}: {}".format(msg_prefix, msg) super(AxisError, self).__init__(msg) def array_ufunc_errmsg_formatter(dummy, ufunc, method, *inputs, **kwargs): """ Format the error message for when __array_ufunc__ gives up. """ args_string = ', '.join(['{!r}'.format(arg) for arg in inputs] + ['{}={!r}'.format(k, v) for k, v in kwargs.items()]) args = inputs + kwargs.get('out', ()) types_string = ', '.join(repr(type(arg).__name__) for arg in args) return ('operand type(s) all returned NotImplemented from ' '__array_ufunc__({!r}, {!r}, {}): {}' .format(ufunc, method, args_string, types_string)) def _ufunc_doc_signature_formatter(ufunc): """ Builds a signature string which resembles PEP 457 This is used to construct the first line of the docstring """ # input arguments are simple if ufunc.nin == 1: in_args = 'x' else: in_args = ', '.join('x{}'.format(i+1) for i in range(ufunc.nin)) # output arguments are both keyword or positional if ufunc.nout == 0: out_args = ', /, out=()' elif ufunc.nout == 1: out_args = ', /, out=None' else: out_args = '[, {positional}], / [, out={default}]'.format( positional=', '.join( 'out{}'.format(i+1) for i in range(ufunc.nout)), default=repr((None,)*ufunc.nout) ) # keyword only args depend on whether this is a gufunc kwargs = ( ", casting='same_kind'" ", order='K'" ", dtype=None" ", subok=True" "[, signature" ", extobj]" ) if ufunc.signature is None: kwargs = ", where=True" + kwargs # join all the parts together return '{name}({in_args}{out_args}, *{kwargs})'.format( name=ufunc.__name__, in_args=in_args, out_args=out_args, kwargs=kwargs )
mit
-6,998,548,371,938,173,000
27.698939
82
0.526411
false
3.833304
false
false
false
Taifxx/xxtrep
context.addtolib/resources/lib/ext/base/tags.py
1
15746
# -*- coding: utf-8 -*- # # Copyright (C) 2011-2014 Martijn Kaijser # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ########## DEFINE TAGS: #### System param's ... ### Library folder name ... TAG_PAR_LIB_FOLDER = 'LIB' ### TMP folders names ... TAG_PAR_TMP = 'TMP' TAG_PAR_TMPA = 'TMPA' ### Addon ... TAG_PAR_SCRIPT_ID = 'context.addtolib' TAG_PAR_SERVICE_PY = 'service.py' TAG_PAR_ADDON_PY = 'context.py' TAG_PAR_COLORS_FILE = 'colors' ### Addon folders ... TAG_PAR_RESFOLDER = 'resources' TAG_PAR_BSFOLDER = 'bs' TAG_PAR_SKINSFOLDER = [TAG_PAR_RESFOLDER,'skins'] TAG_PAR_SPLASH_FILE = [TAG_PAR_RESFOLDER, TAG_PAR_BSFOLDER, 'splash.mp4'] ### RunScript's ... TAG_PAR_SERVICE = 'special://home/addons/%s/%s' % (TAG_PAR_SCRIPT_ID, TAG_PAR_SERVICE_PY) TAG_PAR_ADDON = 'special://home/addons/%s/%s' % (TAG_PAR_SCRIPT_ID, TAG_PAR_ADDON_PY) ### Strinsg XML (as default) ... TAG_PAR_STRINGSXML_PATH = [TAG_PAR_RESFOLDER,'language','english'] TAG_PAR_STRINGSXML_FILE = 'strings.xml' ### Dropbox API ... TAG_PAR_DROPBOX_LF = 'synclock' TAG_PAR_DROPBOX_LCODE = 'XX000000' TAG_PAR_DROPBOX_SYNC_FILE = 'vdbsync' TAG_PAR_DROPBOX_SYNC_T_FILE = 'vdbsync.tmp' TAG_PAR_DROPBOX_LI_FILE = 'libimg' TAG_PAR_DROPBOX_LI_T_FILE = 'libimg.tmp' TAG_PAR_DROPBOX_LI_S_FILE = 'libimg.sync' TAG_PAR_DROPBOX_CORR_FILE = 'corruption' TAG_PAR_DROPBOX_UID_FILE = 'uid' TAG_PAR_DROPBOX_SYNC_T_DIR = 'SYNC_TMP' TAG_PAR_DROPBOX_PATH = [TAG_PAR_RESFOLDER,'lib','dropbox'] TAG_PAR_DBXACCESSTOKEN_FILE = 'dropbox_access_token' TAG_PAR_DROPBOX_LISEPREC = '\n' TAG_PAR_DROPBOX_LISEPTM = '<**DBXTM**>' TAG_PAR_DROPBOX_MSGSEP = '#' TAG_PAR_DBXAPPKEY = 'cxa8c253kvoqbqd' TAG_PAR_DBXAPPSECRET = 'n7tx9emzji3aqnh' ### Addon work files ... TAG_PAR_TVSPACK_FILE = 'tvs.pack' TAG_PAR_TVSRAWFILE = 'tvs.eraw' TAG_PAR_STL_FILE = 'linktable' TAG_PAR_FSET_FILE = 'fset' TAG_PAR_PTYPETABLE_FILE = 'pttable' ### Addon work files (tmp) ... TAG_PAR_TVSUPD_FILE = 'tvsupd' TAG_PAR_TVSUPDNOW_FILE = 'updnow' #TAG_PAR_LOCKF = 'lock' TAG_PAR_STRARTF = 'lock_started' #TAG_PAR_STRARTAF = 'act' TAG_PAR_LAACTT = 'laactt' TAG_PAR_WS_FILE = 'watchsync' TAG_PAR_WS_TMP_FILE = 'watchsync.tmp' ### Video extensions ... TAG_PAR_VIDEOSEXT = ['.avi', '.mpeg', '.wmv', 'asf', '.flv', '.mkv', '.mka', '.mp4', '.m4a', '.aac', '.ogg', '.ogm', '.ram', '.rm', '.rv', '.ra', '.rmvb', '.3gp'] ### Backup files template ... TAG_PAR_SYSFLSTMPL = ['.strm', TAG_PAR_TVSPACK_FILE, TAG_PAR_TVSRAWFILE, TAG_PAR_STL_FILE, TAG_PAR_FSET_FILE, TAG_PAR_PTYPETABLE_FILE, TAG_PAR_TVSUPD_FILE, TAG_PAR_TVSUPDNOW_FILE, TAG_PAR_STRARTF, TAG_PAR_DROPBOX_SYNC_FILE, TAG_PAR_DBXACCESSTOKEN_FILE] TAG_PAR_DROPBOX_TMPL = ['.strm', TAG_PAR_TVSPACK_FILE, TAG_PAR_TVSRAWFILE, TAG_PAR_STL_FILE] ### Default tmpl ... TAG_PAR_TVSDEFSEASON = '01' TAG_PAR_SETDEF = 'Default' TAG_PAR_MNUCOLORFORMAT = '[COLOR %s]%s[/COLOR]' TAG_PAR_COLORTAG = '##COLOR##' TAG_PAR_ADDONLABEL_TMPL = '<string id="29999">%s</string>' TAG_PAR_ADDONLABEL_PATT = TAG_PAR_ADDONLABEL_TMPL % ('(.*)') TAG_PAR_ADDONLABEL = TAG_PAR_ADDONLABEL_TMPL % ('ADD to [COLOR %s]Lib[/COLOR]') TAG_PAR_LNPAGE = ' - (%s/%s)' TAG_PAR_LNSEP = ' > ' TAG_PAR_TTLQ = '%s ( %s ):' ### Zip ... TAG_PAR_ZIPCN = 'CN' TAG_PAR_ZIPST = 'atl.backup.' TAG_PAR_ZIPTMPL = TAG_PAR_ZIPST + '%s.%s.'+ TAG_PAR_ZIPCN + '.zip' ### XML TAG_PAR_XMLW_SELDLG = 'XDialogSelect.xml' TAG_PAR_XMLW_SELDLGSUB = 'XDialogSelectSub.xml' TAG_PAR_XMLW_OKDLG = 'XDialogOk.xml' TAG_PAR_XMLW_YESNODLG = 'XDialogYesNo.xml' TAG_PAR_XMLW_RESUMEDLG = 'XDialogResume.xml' TAG_PAR_XMLW_NOWPLAYDLG = 'XDialogNowPlay.xml' TAG_PAR_XMLW_DROPBOX = 'Dropbox.xml' ### Help ... TAG_PAG_HELPXML = 'DialogHelp.xml' TAG_PAR_HELPFILE = 'help.' TAG_PAR_HELPPATH = [TAG_PAR_RESFOLDER, 'help'] ### Time ... TAG_PAR_TIMENUMFORMAT = '{:0>2}' TAG_PAR_TIMESEP = ':' ### URL ... TAG_PAR_CALLURLTMPL = 'plugin://%s//?#strmtype=#%s&#strmfile=#%s&#strmurl=#' TAG_PAR_REPFN = '%s' TAG_PAR_ACTION = 'action=' TAG_PAR_IGNOREST = 'ignorestarted' ### tvs.pack separators ... TAG_PAR_TVSPACK_LSEP = '<**LSTSEP**>' TAG_PAR_TVSPACK_SSEP = '<**SRCSEP**>' TAG_PAR_TVSPACK_FSEP = '<**FRCSEP**>' TAG_PAR_TVSPACK_ESEP = '<**EPSSEP**>' TAG_PAR_TVSPACK_PSEP = '<**PRTSEP**>' TAG_PAR_TVSPACK_VERSEP = '<**VERSIONSEP**>' TAG_PAR_TVSPACK_VERSION = '10015' ### Containers starts with ... TAG_CON_STARTSW_EXT = 'plugin:' TAG_CON_STARTSW_VID = 'videodb:' TAG_CON_STARTSW_PVD = 'playlistvideo:' #### Const Tags ... ### Default ... DEFAULT = 10000 ### Types ... TAG_TYP_ALL = 10001 TAG_TYP_MOV = 10002 TAG_TYP_TVS = 10003 TAG_TYP_SRC = 10004 TAG_TYP_FOLDER = 10005 TAG_TYP_PREFILE = 10006 TAG_TYP_FILE = 10007 ### Containers ... TAG_CON_LOCAL = 10071 TAG_CON_EXT = 10072 TAG_CON_VID = 10073 TAG_CON_PVD = 10074 ### Condidions ... TAG_CND_FOUND = 10075 TAG_CND_NOTFOUND = 10076 TAG_CND_LISTEMPTY = 10077 TAG_CND_NEWSRC = 10078 TAG_CND_OLDSRC = 10079 TAG_CND_NOUPD = 10080 TAG_CND_NEWFRC = 10081 TAG_CND_OLDFRC = 10082 TAG_CND_UPDPRC = 10083 TAG_CND_NOUPDPRC = 10084 TAG_CND_NOGL = 10085 TAG_CND_NOACTION = 10086 TAG_CND_PLAY = 10087 TAG_CND_DBXNOAUTH = 10088 TAG_CND_NOTISMOV = 10089 TAG_CND_ISMOV = 10090 ### Free actions ... TAG_ACT_LPRESET = 10200 TAG_ACT_SHADOWUPD = 10201 TAG_ACT_DONOTHING = 10202 TAG_ACT_CHCOLOR = 10203 TAG_ACT_RENAMER = 10204 TAG_ACT_BACKUP = 10205 TAG_ACT_REMBACK = 10206 TAG_ACT_RESTBACK = 10207 TAG_ACT_RESETTBU = 10208 TAG_ACT_AUTOBACKUP = 10209 TAG_ACT_RESKIN = 10210 TAG_ACT_DBXCONNECT = 10211 TAG_ACT_DBXDISCONNECT = 10212 TAG_ACT_SYNC = 10213 TAG_ACT_WATCHSYNC = 10214 TAG_ACT_STOPSRV = 10215 TAG_ACT_STARTSRV = 10216 #### Strings Tags ... ### Language ... TAG_LNG_ID = 30000 ### Menue ... TAG_MNU_MOV = 30001 TAG_MNU_TVS = 30002 TAG_MNU_TVSU = 30003 TAG_MNU_OPEN = 30004 TAG_MNU_RESCAN = 30005 TAG_MNU_REMSRC = 30006 TAG_MNU_RESTORE = 30007 TAG_MNU_DELETE = 30008 TAG_MNU_VIDLIBU = 30009 TAG_MNU_CHKNEW = 30010 TAG_MNU_JOIN = 30011 TAG_MNU_TVSREN = 30012 TAG_MNU_SRCREN = 30013 TAG_MNU_UPDMAN = 30014 TAG_MNU_ADDEXIST = 30015 TAG_MNU_ADDNEW = 30016 TAG_MNU_SM = 30017 TAG_MNU_SHOWALL = 30018 TAG_MNU_SRCMAN = 30019 TAG_MNU_TVSMAN = 30020 TAG_MNU_QR = 30021 TAG_MNU_QL = 30022 TAG_MNU_NEW = 30023 TAG_MNU_ADDFOL = 30024 TAG_MNU_SRE = 30025 TAG_MNU_UPDFOL = 30026 TAG_MNU_VIDLIBCLN = 30027 TAG_MNU_SHDIR = 30028 TAG_MNU_REBSTL = 30029 TAG_MNU_DEFNMMOV = 30030 TAG_MNU_NEWNMMOV = 30031 TAG_MNU_ATVSNM = 30032 TAG_MNU_ATVSNUMT = 30033 TAG_MNU_ATVSNUM = 30034 TAG_MNU_DEFNM = 30035 TAG_MNU_SEQNUM = 30036 TAG_MNU_SEANUM = 30037 TAG_MNU_STARTADD = 30038 TAG_MNU_ATVS = 30039 TAG_MNU_ATVSSERT = 30040 TAG_MNU_SERDEF = 30041 TAG_MNU_SERTPL = 30042 TAG_MNU_SEASON = 30043 TAG_MNU_RFROM = 30044 TAG_MNU_SFRBEGIN = 30045 TAG_MNU_ADVADD = 30046 TAG_MNU_CHKNEWGL = 30047 TAG_MNU_RESTOREALL = 30048 TAG_MNU_SMM = 30049 TAG_MNU_RAWADD = 30050 TAG_MNU_BRWSREN = 30051 TAG_MNU_CONTUPD = 30052 TAG_MNU_RESCANALLS = 30053 TAG_MNU_RESCANFULL = 30054 TAG_MNU_YES = 30055 TAG_MNU_NO = 30056 TAG_MNU_CLOSEDLG = 30057 TAG_MNU_ADVLSORT = 30058 TAG_MNU_ADVLSORTDOWN = 30059 TAG_MNU_ADVLSORTUP = 30060 TAG_MNU_EPSLISTCORR = 30061 TAG_MNU_NUMBCORR = 30062 TAG_MNU_PBTYPES = 30063 TAG_MNU_DBSYNC = 30064 TAG_MNU_DELMOV = 30065 TAG_MNU_DELTVS = 30066 TAG_MNU_REMARKALL = 30067 TAG_MNU_TVSSTALN = 30068 TAG_MNU_FOLDMODE = 30069 ### Static mnu ... TAG_MNU_MORE = 30090 TAG_MNU_BACKMAIN = 30091 TAG_MNU_OK = 30092 TAG_MNU_HELP = 30096 TAG_MNU_SET = 30097 TAG_MNU_BACK = 30098 TAG_MNU_CANCEL = 30099 ### Confirms ... TAG_CFR_RESCAN = 30071 TAG_CFR_REMSRC = 30072 TAG_CFR_RESTORE = 30073 TAG_CFR_DELETE = 30074 TAG_CFR_TVSREN = 30075 TAG_CFR_JOIN = 30076 TAG_CFR_CLEANVL = 30077 TAG_CFR_DEFNM = 30078 TAG_CFR_RESTOREALL = 30079 TAG_CFR_RESCANALLS = 30080 TAG_CFR_RESCANFULL = 30081 TAG_CFR_RENAMER = 30082 TAG_CFR_UNLOCK = 30083 TAG_CFR_REMBACK = 30084 TAG_CFR_RESTBACK = 30085 TAG_CFR_EXCLPLUG = 30086 ### Dialogs messages ... TAG_DLG_OK = 30100 TAG_DLG_NX = 30101 TAG_DLG_PR = 30102 TAG_DLG_INNM = 30103 TAG_DLG_INSE = 30104 TAG_DLG_NUMSKIP = 30105 TAG_DLG_SUPPRES = 30106 TAG_DLG_PBT1 = 30107 TAG_DLG_PBT2 = 30108 TAG_DLG_PBTAD1 = 30109 TAG_DLG_PBTAD2 = 30110 TAG_DLG_PBTADTIMEO = 30111 TAG_DLG_PBTADTCLAS = 30112 TAG_DLG_PBTADTISP = 30113 TAG_DLG_PBTADTFOLD = 30114 TAG_DLG_PBTT1 = 30115 TAG_DLG_PBTT2 = 30116 TAG_DLG_PBTT3 = 30117 TAG_DLG_PBTT4 = 30118 TAG_DLG_PBTT5 = 30119 TAG_DLG_PBTALT = 30120 TAG_DLG_PBTREM = 30121 TAG_DLG_NPINFO = 30122 TAG_DLG_NPINFRAT = 30123 TAG_DLG_NPINFSRC = 30124 TAG_DLG_NPINFPBT = 30125 TAG_DLG_NPDIRL = 30126 TAG_DLG_PBTTRAN = 30127 TAG_DLG_PBTTRANI = 30128 TAG_DLG_DBXP1 = 30129 TAG_DLG_DBXP2 = 30130 TAG_DLG_DBXP3 = 30131 TAG_DLG_DBXP4 = 30132 TAG_DLG_DBXP5 = 30133 TAG_DLG_DBXPEC = 30134 TAG_DLG_DBXPRGSMSGS = 30135 TAG_DLG_CORR1 = 30136 TAG_DLG_CORR2 = 30137 TAG_DLG_CORR3 = 30138 TAG_DLG_CORR_FORCE = 30139 TAG_DLG_CORR_UNL = 30140 TAG_DLG_MOVIEDEL = 30141 TAG_DLG_TVSDEL = 30142 TAG_DLG_SCLNDB = 30143 TAG_DLG_SREMEF = 30144 TAG_DLG_LOCKSYQ = 30145 TAG_DLG_RENM = 30146 TAG_DLG_CURRTVS = 30147 TAG_DLG_EXCLADDON = 30148 ### Titles ... TAG_TTL_NM = 30150 TAG_TTL_ENTNAME = 30151 TAG_TTL_CHSNAME = 30152 TAG_TTL_ADDTVS = 30153 TAG_TTL_NEWEPS = 30154 TAG_TTL_EXITVS = 30155 TAG_TTL_CHKUPD = 30156 TAG_TTL_ADDMOV = 30157 TAG_TTL_ENTNAMEM = 30158 TAG_TTL_ADVADD = 30159 TAG_TTL_RESTOREALL = 30160 TAG_TTL_CHKUPDGL = 30161 TAG_TTL_POSHLP = 30162 TAG_TTL_CAST = 30163 TAG_TTL_BRWSREN = 30164 TAG_TTL_BRWSRENEP = 30165 TAG_TTL_COLORIZE = 30166 TAG_TTL_SEASON = 30167 TAG_TTL_BACKUP = 30168 TAG_TTL_RESTBACK = 30169 TAG_TTL_RESTLIB = 30170 TAG_TTL_RESTRL = 30171 TAG_TTL_RESTUL = 30172 TAG_TTL_RESTCHK = 30173 TAG_TTL_BCKNM = 30174 TAG_TTL_RESTAT = 30175 TAG_TTL_RESTATC = 30176 TAG_TTL_RESTRTMP = 30177 TAG_TTL_PACK = 30178 TAG_TTL_REMOLDBCK = 30179 TAG_TTL_CLRERRDT = 30180 TAG_TTL_CLRERRD = 30181 TAG_TTL_HELP = 30182 TAG_TTL_MAINMNU = 30183 TAG_TTL_RESKIN = 30184 TAG_TTL_RAWADDEPS = 30185 TAG_TTL_SYNCAUTO = 30186 TAG_TTL_SYNCUP = 30187 TAG_TTL_SYNCDOWN = 30188 TAG_TTL_SYNCUNLOCK = 30189 TAG_TTL_SYNCSENDCH = 30190 TAG_TTL_DBXTTL = 30191 TAG_TTL_DBXOK = 30192 TAG_TTL_DBXCANCEL = 30193 TAG_TTL_DBXCOPY = 30194 TAG_TTL_DBXKEYB = 30195 TAG_TTL_DBXPASTE = 30196 TAG_TTL_DBXOPEN = 30197 TAG_TTL_SVIDDB = 30198 TAG_TTL_SWS = 30199 TAG_TTL_LOCKSY = 30200 ### Set ... TAG_SET_RENAMER = 30436 ### Ok messages ... TAG_ERR_OK = 30301 TAG_ERR_OK_MOVADD = 30302 TAG_ERR_OK_TVSADD = 30303 TAG_ERR_OK_TVSUPD = 30304 TAG_ERR_OK_RESCAN = 30305 TAG_ERR_OK_RESTOR = 30306 TAG_ERR_OK_REMSRC = 30307 TAG_ERR_OK_DELETE = 30308 TAG_ERR_OK_CHKNEW = 30309 TAG_ERR_OK_TVSREN = 30310 TAG_ERR_OK_SRCREN = 30311 TAG_ERR_OK_JOIN = 30312 TAG_ERR_OK_ADDFOL = 30313 TAG_ERR_OK_UPDFOL = 30314 TAG_ERR_OK_SETUPD = 30315 TAG_ERR_OK_VIDLIBU = 30316 TAG_ERR_OK_REBSTL = 30317 TAG_ERR_OK_RESTOREALL = 30318 TAG_ERR_OK_BRWSREN = 30319 TAG_ERR_OK_NEWFRC = 30320 TAG_ERR_OK_RESCANALLS = 30321 TAG_ERR_OK_RESCANFULL = 30322 TAG_ERR_OK_RENAMER = 30323 TAG_ERR_OK_BACKUP = 30324 TAG_ERR_OK_REMBACK = 30325 TAG_ERR_OK_RESTBACK = 30326 TAG_ERR_OK_NOBACK = 30327 TAG_ERR_OK_DBXSMAC = 30328 TAG_ERR_OK_DBXSMDL = 30329 TAG_ERR_OK_DBXSMUP = 30330 TAG_ERR_OK_DBXWSMAC = 30331 TAG_ERR_OK_DBXWSMDL = 30332 TAG_ERR_OK_DBXWSMUP = 30333 TAG_ERR_OK_SYNCUNLOCK = 30334 TAG_ERR_OK_MTVSDEL = 30335 TAG_ERR_OK_SYNCLOCK = 30336 TAG_ERR_OK_EPSREM = 30337 TAG_ERR_OK_EXCLUPLUG = 30338 ### Errors ... TAG_ERR_NOTFILE = 30201 TAG_ERR_INCINPUT = 30202 TAG_ERR_LISTEMPTY = 30203 TAG_ERR_ABORT = 30204 TAG_ERR_NOTOJOIN = 30205 TAG_ERR_DEDLINK = 30206 TAG_ERR_NONAME = 30207 TAG_ERR_NONAME2 = 30208 TAG_ERR_DEFEPS = 30209 TAG_ERR_BROKENLINK = 30210 TAG_ERR_BROKENLINK2 = 30211 TAG_ERR_LIB = 30212 TAG_ERR_LIBACT = 30213 TAG_ERR_LOCK = 30214 TAG_ERR_OL = 30215 TAG_ERR_BADZIP = 30216 TAG_ERR_NOBCKPATH = 30217 TAG_ERR_NOBCKPATHM = 30218 TAG_ERR_INCPBTYPE = 30219 TAG_ERR_NODBXCONNECT = 30220 TAG_ERR_DBXISLOCK = 30221 TAG_ERR_DBXRAISE = 30222 ### Other ... TAG_SET_RUN = 30479 TAG_SET_STOP = 30480
gpl-3.0
-7,829,866,278,117,454,000
31.262295
261
0.560658
false
2.624875
false
false
false
yingcuhk/LeetCode
Algorithms/#303 Range Sum Query - Immutable/PythonCode.py
1
1082
""" Given an integer array nums, find the sum of the elements between indices i and j (i ¡Ü j), inclusive. Example: Given nums = [-2, 0, 3, -5, 2, -1] sumRange(0, 2) -> 1 sumRange(2, 5) -> -1 sumRange(0, 5) -> -3 Note: You may assume that the array does not change. There are many calls to sumRange function. """ class NumArray(object): def __init__(self, nums): """ initialize your data structure here. :type nums: List[int] """ #self.nums = nums L = len(nums) CumSum = [0 for i in xrange(L+1)] for i in range(1,L+1): CumSum[i] = CumSum[i-1]+nums[i-1] #print CumSum self.CumSum = CumSum def sumRange(self, i, j): """ sum of elements nums[i..j], inclusive. :type i: int :type j: int :rtype: int """ return self.CumSum[j+1] - self.CumSum[i] # Your NumArray object will be instantiated and called as such: # numArray = NumArray(nums) # numArray.sumRange(0, 1) # numArray.sumRange(1, 2)
mit
6,948,932,555,707,017,000
22.042553
102
0.550832
false
3.145349
false
false
false
vascotenner/holoviews
holoviews/plotting/mpl/annotation.py
1
3913
import matplotlib from matplotlib import patches as patches from ...core.util import match_spec from ...core.options import abbreviated_exception from .element import ElementPlot class AnnotationPlot(ElementPlot): """ AnnotationPlot handles the display of all annotation elements. """ def __init__(self, annotation, **params): self._annotation = annotation super(AnnotationPlot, self).__init__(annotation, **params) self.handles['annotations'] = [] def initialize_plot(self, ranges=None): annotation = self.hmap.last key = self.keys[-1] ranges = self.compute_ranges(self.hmap, key, ranges) ranges = match_spec(annotation, ranges) axis = self.handles['axis'] opts = self.style[self.cyclic_index] with abbreviated_exception(): handles = self.draw_annotation(axis, annotation.data, opts) self.handles['annotations'] = handles return self._finalize_axis(key, ranges=ranges) def update_handles(self, key, axis, annotation, ranges, style): # Clear all existing annotations for element in self.handles['annotations']: element.remove() with abbreviated_exception(): self.handles['annotations'] = self.draw_annotation(axis, annotation.data, style) class VLinePlot(AnnotationPlot): "Draw a vertical line on the axis" style_opts = ['alpha', 'color', 'linewidth', 'linestyle', 'visible'] def draw_annotation(self, axis, position, opts): return [axis.axvline(position, **opts)] class HLinePlot(AnnotationPlot): "Draw a horizontal line on the axis" style_opts = ['alpha', 'color', 'linewidth', 'linestyle', 'visible'] def draw_annotation(self, axis, position, opts): "Draw a horizontal line on the axis" return [axis.axhline(position, **opts)] class TextPlot(AnnotationPlot): "Draw the Text annotation object" style_opts = ['alpha', 'color', 'family', 'weight', 'rotation', 'fontsize', 'visible'] def draw_annotation(self, axis, data, opts): (x,y, text, fontsize, horizontalalignment, verticalalignment, rotation) = data opts['fontsize'] = fontsize return [axis.text(x,y, text, horizontalalignment = horizontalalignment, verticalalignment = verticalalignment, rotation=rotation, **opts)] class ArrowPlot(AnnotationPlot): "Draw an arrow using the information supplied to the Arrow annotation" _arrow_style_opts = ['alpha', 'color', 'lw', 'linewidth', 'visible'] _text_style_opts = TextPlot.style_opts style_opts = sorted(set(_arrow_style_opts + _text_style_opts)) def draw_annotation(self, axis, data, opts): direction, text, xy, points, arrowstyle = data arrowprops = dict({'arrowstyle':arrowstyle}, **{k: opts[k] for k in self._arrow_style_opts if k in opts}) textopts = {k: opts[k] for k in self._text_style_opts if k in opts} if direction in ['v', '^']: xytext = (0, points if direction=='v' else -points) elif direction in ['>', '<']: xytext = (points if direction=='<' else -points, 0) return [axis.annotate(text, xy=xy, textcoords='offset points', xytext=xytext, ha="center", va="center", arrowprops=arrowprops, **textopts)] class SplinePlot(AnnotationPlot): "Draw the supplied Spline annotation (see Spline docstring)" style_opts = ['alpha', 'edgecolor', 'linewidth', 'linestyle', 'visible'] def draw_annotation(self, axis, data, opts): verts, codes = data patch = patches.PathPatch(matplotlib.path.Path(verts, codes), facecolor='none', **opts) axis.add_patch(patch) return [patch]
bsd-3-clause
-3,434,174,948,459,445,000
34.899083
92
0.620751
false
4.131996
false
false
false
Dacelonid/gerrymander
gerrymander/reports.py
1
49794
# # Copyright (C) 2014 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import prettytable import logging import time import re import json import sys import xml.dom.minidom from gerrymander.operations import OperationQuery from gerrymander.model import ModelApproval from gerrymander.format import format_date from gerrymander.format import format_delta from gerrymander.format import format_title from gerrymander.format import format_color LOG = logging.getLogger(__name__) class ReportOutputColumn(object): ALIGN_LEFT = "l" ALIGN_RIGHT = "r" ALIGN_CENTER = "c" def __init__(self, key, label, mapfunc, sortfunc=None, format=None, truncate=0, align=ALIGN_LEFT, visible=True): self.key = key self.label = label self.mapfunc = mapfunc self.sortfunc = sortfunc self.format = format self.truncate = truncate self.align = align self.visible = visible def get_value(self, report, row): val = self.mapfunc(report, self.key, row) if self.format is not None: val = self.format % val elif val is None: val = "" if type(val) != str: val = val.encode('utf-8') if self.truncate and len(val) > self.truncate: val = val[0:self.truncate] + "..." return val def get_sort_value(self, report, row): if self.sortfunc: return self.sortfunc(report, self.key, row) else: return self.mapfunc(report, self.key, row) class ReportOutput(object): DISPLAY_MODE_TEXT = "text" DISPLAY_MODE_CSV = "csv" DISPLAY_MODE_XML = "xml" DISPLAY_MODE_JSON = "json" def __init__(self, usecolor=False): super(ReportOutput, self).__init__() self.usecolor = usecolor def display(self, mode, stream=sys.stdout): if mode == ReportOutput.DISPLAY_MODE_TEXT: stream.write(self.to_text()) elif mode == ReportOutput.DISPLAY_MODE_CSV: stream.write(self.to_csv()) elif mode == ReportOutput.DISPLAY_MODE_XML: impl = xml.dom.minidom.getDOMImplementation() doc = impl.createDocument(None, "report", None) self.to_xml(doc, doc.documentElement) stream.write(doc.toprettyxml()) elif mode == ReportOutput.DISPLAY_MODE_JSON: doc = [] self.to_json(doc) stream.write(json.dumps(doc, indent=2) + "\n") else: raise Exception("Unknown display mode '%s'" % mode) def to_text(self): raise NotImplementedError("Subclass should implement the 'to_text' method") def to_csv(self): raise NotImplementedError("Subclass should implement the 'to_csv' method") def to_xml(self, root): raise NotImplementedError("Subclass should implement the 'to_xml' method") def to_json(self, root): raise NotImplementedError("Subclass should implement the 'to_json' method") class ReportOutputCompound(ReportOutput): def __init__(self): self.report = [] def add_report(self, report): self.report.append(report) def to_text(self): blocks = [] for report in self.report: blocks.append(report.to_text()) return "\n".join(blocks) def to_json(self, root): for report in self.report: report.to_json(root) def to_xml(self, doc, root): for report in self.report: report.to_xml(doc, root) class ReportOutputList(ReportOutput): def __init__(self, columns, title=None, usecolor=False): super(ReportOutputList, self).__init__(usecolor) self.columns = columns self.row = {} self.title = title def set_row(self, row): self.row = row def to_xml(self, doc, root): lst = doc.createElement("list") root.appendChild(lst) if self.title is not None: title = doc.createElement("title") title.appendChild(doc.createTextNode(self.title)) lst.appendChild(title) headers = doc.createElement("headers") content = doc.createElement("content") lst.appendChild(headers) lst.appendChild(content) for col in self.columns: if col.visible: xmlcol = doc.createElement(col.key) xmlcol.appendChild(doc.createTextNode(col.label)) headers.appendChild(xmlcol) for col in self.columns: if col.visible: xmlfield = doc.createElement(col.key) xmlfield.appendChild(doc.createTextNode(col.get_value(self, self.row))) content.appendChild(xmlfield) def to_json(self, root): headers = {} for col in self.columns: if col.visible: headers[col.key] = col.label content = {} for col in self.columns: if col.visible: content[col.key] = col.get_value(self, self.row) node = { "list": { "headers": headers, "content": content } } if self.title is not None: node["list"]["title"] = self.title root.append(node) def to_text(self): labels = [] width = 1 for col in self.columns: if col.visible: if len(col.label) > width: width = len(col.label) labels.append(col.label) fmt = " %" + str(width) + "s: %s" lines = [] for col in self.columns: if col.visible: line = fmt % (col.label, col.get_value(self, self.row)) lines.append(line) prolog = "" if self.title is not None: prolog = format_title(self.title) + "\n" return prolog + "\n".join(lines) + "\n" class ReportOutputTable(ReportOutput): def __init__(self, columns, sortcol, reverse, limit, title=None, usecolor=False): super(ReportOutputTable, self).__init__(usecolor) self.columns = list(columns) self.rows = [] self.sortcol = sortcol self.reverse = reverse self.limit = limit self.title = title def add_column(self, col): self.columns.append(col) def add_row(self, row): self.rows.append(row) def sort_rows(self): sortcol = None for col in self.columns: if col.key == self.sortcol: sortcol = col if sortcol is not None: self.rows.sort(key = lambda item: sortcol.get_sort_value(self, item), reverse=self.reverse) def to_xml(self, doc, root): self.sort_rows() table = doc.createElement("table") root.appendChild(table) if self.title is not None: title = doc.createElement("title") title.appendChild(doc.createTextNode(self.title)) table.appendChild(title) headers = doc.createElement("headers") content = doc.createElement("content") table.appendChild(headers) table.appendChild(content) for col in self.columns: if col.visible: xmlcol = doc.createElement(col.key) xmlcol.appendChild(doc.createTextNode(col.label)) headers.appendChild(xmlcol) rows = self.rows if self.limit is not None: rows = rows[0:self.limit] for row in rows: xmlrow = doc.createElement("row") for col in self.columns: if col.visible: xmlfield = doc.createElement(col.key) xmlfield.appendChild(doc.createTextNode(col.get_value(self, row))) xmlrow.appendChild(xmlfield) content.appendChild(xmlrow) return doc def to_json(self, root): self.sort_rows() headers = {} for col in self.columns: if col.visible: headers[col.key] = col.label content = [] rows = self.rows if self.limit is not None: rows = rows[0:self.limit] for row in rows: data = {} for col in self.columns: if col.visible: data[col.key] = col.get_value(self, row) content.append(data) node = { "table": { "headers": headers, "content": content } } if self.title is not None: node["table"]["title"] = self.title root.append(node) def to_text(self): self.sort_rows() labels = [] for col in self.columns: if col.visible: labels.append(col.label) table = prettytable.PrettyTable(labels) for col in self.columns: table.align[col.label] = col.align table.padding_width = 1 rows = self.rows if self.limit is not None: rows = rows[0:self.limit] for row in rows: data = [] for col in self.columns: if col.visible: data.append(col.get_value(self, row)) table.add_row(data) prolog = "" if self.title is not None: prolog = format_title(self.title) + "\n" return prolog + str(table) + "\n" def to_csv(self): self.sort_rows() labels = [] for col in self.columns: if col.visible: labels.append(col.label) lines = [] if self.title is not None: lines.append(self.title) lines.append(",".join(labels)) rows = self.rows if self.limit is not None: rows = rows[0:self.limit] for row in rows: data = [] for col in self.columns: if col.visible: data.append(col.get_value(self, row)) lines.append(",".join(data)) return "\n".join(lines) class Report(object): def __init__(self, client): self.client = client def generate(self): raise NotImplementedError("Subclass must override generate method") def display(self, mode): output = self.generate() output.display(mode) class ReportTable(Report): def __init__(self, client, columns, sort=None, reverse=False): super(ReportTable, self).__init__(client) self.columns = columns self.limit = None self.set_sort_column(sort, reverse) def get_columns(self): return self.columns def get_column(self, key): for col in self.columns: if col.key == key: return col return None def has_column(self, key): col = self.get_column(key) if col is None: return False return True def set_sort_column(self, key, reverse=False): got = False for col in self.columns: if col.key == key: got = True if not got: raise Exception("Unknown sort column %s" % key) self.sort = key self.reverse = reverse def set_data_limit(self, limit): self.limit = limit def new_table(self, title=None): return ReportOutputTable(self.columns, self.sort, self.reverse, self.limit, title, self.usecolor) class ReportPatchReviewStats(ReportTable): def user_mapfunc(rep, col, row): return row[0] def team_mapfunc(rep, col, row): return row[2] def review_mapfunc(rep, col, row): return row[1]['total'] def ratio_mapfunc(rep, col, row): plus = float(row[1]['votes']['flag-p2'] + row[1]['votes']['flag-p1']) minus = float(row[1]['votes']['flag-m2'] + row[1]['votes']['flag-m1']) ratio = (plus / (plus + minus)) * 100 return ratio def vote_mapfunc(rep, col, row): return row[1]['votes'][col] COLUMNS = [ ReportOutputColumn("user", "User", user_mapfunc, align=ReportOutputColumn.ALIGN_LEFT), ReportOutputColumn("team", "Team", team_mapfunc, align=ReportOutputColumn.ALIGN_LEFT), ReportOutputColumn("reviews", "Reviews", review_mapfunc, align=ReportOutputColumn.ALIGN_RIGHT), ReportOutputColumn("flag-m2", "-2", vote_mapfunc, align=ReportOutputColumn.ALIGN_RIGHT), ReportOutputColumn("flag-m1", "-1", vote_mapfunc, align=ReportOutputColumn.ALIGN_RIGHT), ReportOutputColumn("flag-p1", "+1", vote_mapfunc, align=ReportOutputColumn.ALIGN_RIGHT), ReportOutputColumn("flag-p2", "+2", vote_mapfunc, align=ReportOutputColumn.ALIGN_RIGHT), ReportOutputColumn("ratio", "+/-", ratio_mapfunc, format="%0.0lf%%", align=ReportOutputColumn.ALIGN_RIGHT), ] def __init__(self, client, projects, maxagedays=30, teams={}, usecolor=False): super(ReportPatchReviewStats, self).__init__(client, ReportPatchReviewStats.COLUMNS, sort="reviews", reverse=True) self.projects = projects self.teams = teams self.maxagedays = maxagedays self.usecolor = usecolor def generate(self): # We could query all projects at once, but if we do them # individually it means we get better hit rate against the # cache if the report is re-run for many different project # combinations reviews = [] cutoff = time.time() - (self.maxagedays * 24 * 60 * 60) for project in self.projects: query = OperationQuery(self.client, { "project": [project], }, patches=OperationQuery.PATCHES_ALL, approvals=True) def querycb(change): for patch in change.patches: for approval in patch.approvals: if approval.is_newer_than(cutoff): reviews.append(approval) query.run(querycb) reviewers = {} for review in reviews: if review.action != ModelApproval.ACTION_REVIEWED or review.user is None: continue reviewer = review.user.username if reviewer is None: reviewer = review.user.name if reviewer is None: continue if reviewer.lower() in ["jenkins", "smokestack"]: continue reviewers.setdefault(reviewer, { 'votes': {'flag-m2': 0, 'flag-m1': 0, 'flag-p1': 0, 'flag-p2': 0}, 'total': 0, }) reviewers[reviewer]['total'] = reviewers[reviewer]['total'] + 1 votes = { "-2" : "flag-m2", "-1" : "flag-m1", "1" : "flag-p1", "2" : "flag-p2" } cur = reviewers[reviewer]['votes'][votes[str(review.value)]] reviewers[reviewer]['votes'][votes[str(review.value)]] = cur + 1 compound = ReportOutputCompound() table = self.new_table("Review statistics") compound.add_report(table) for user, votes in reviewers.items(): userteam = "" for team in self.teams.keys(): if user in self.teams[team]: userteam = team table.add_row([user, votes, userteam]) summary = ReportOutputList([ ReportOutputColumn("nreviews", "Total reviews", format="%d", mapfunc=lambda rep, col, row: row[0]), ReportOutputColumn("nreviewers", "Total rviewers", format="%d", mapfunc=lambda rep, col, row: row[1]) ], title="Review summary") summary.set_row([len(reviews), len(reviewers.keys())]) compound.add_report(summary) return compound class ReportPatchReviewRate(ReportTable): def user_mapfunc(rep, col, row): return row[0] def team_mapfunc(rep, col, row): return row[1] def week_mapfunc(rep, col, row): if col not in row[2]: return 0.0 return (row[2][col] / 7.0) def total_mapfunc(rep, col, row): if col not in row[2]: return 0.0 return (row[2][col] / (52.0 * 7.0)) COLUMNS = [ ReportOutputColumn("user", "User", user_mapfunc, align=ReportOutputColumn.ALIGN_LEFT), ReportOutputColumn("team", "Team", team_mapfunc, align=ReportOutputColumn.ALIGN_LEFT), ReportOutputColumn("total", "Total", total_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week1", "1 week", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week2", "2 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week3", "3 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week4", "4 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week5", "5 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week6", "6 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week7", "7 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week8", "8 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week9", "9 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week10", "10 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week11", "11 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week12", "12 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week13", "13 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week14", "14 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week15", "15 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week16", "16 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week17", "17 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week18", "18 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week19", "19 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week20", "20 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week21", "21 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week22", "22 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week23", "23 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week24", "24 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week25", "25 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week26", "26 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week27", "27 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week28", "28 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week29", "29 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week30", "30 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week31", "31 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week32", "32 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week33", "33 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week34", "34 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week35", "35 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week36", "36 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week37", "37 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week38", "38 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week39", "39 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week40", "40 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week41", "41 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week42", "42 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week43", "43 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week44", "44 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week45", "45 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week46", "46 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week47", "47 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week48", "48 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week49", "49 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week50", "50 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week51", "51 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ReportOutputColumn("week52", "52 weeks", week_mapfunc, align=ReportOutputColumn.ALIGN_LEFT, format="%0.2f"), ] def __init__(self, client, projects, teams={}, usecolor=False): super(ReportPatchReviewRate, self).__init__(client, ReportPatchReviewRate.COLUMNS, sort="total", reverse=True) self.projects = projects self.teams = teams self.usecolor = usecolor def generate(self): # We could query all projects at once, but if we do them # individually it means we get better hit rate against the # cache if the report is re-run for many different project # combinations reviewers = {} now = time.time() for project in self.projects: query = OperationQuery(self.client, { "project": [project], }, patches=OperationQuery.PATCHES_ALL, approvals=True) def querycb(change): for patch in change.patches: for approval in patch.approvals: if approval.action == ModelApproval.ACTION_VERIFIED: continue user = approval.user if user is None or user.username is None: continue username = user.username if username not in reviewers: reviewers[username] = { "total": 0} agesecs = approval.get_age(now) ageweeks = int(agesecs / (60 * 60 * 24 * 7)) + 1 key = "week%d" % ageweeks if key not in reviewers[username]: reviewers[username][key] = 0 reviewers[username][key] = reviewers[username][key] + 1 if ageweeks <= 52: reviewers[username]["total"] = reviewers[username]["total"] + 1 query.run(querycb) table = self.new_table("Daily review rates per week") for reviewer in reviewers.keys(): userteam = "" for team in self.teams.keys(): if reviewer in self.teams[team]: userteam = team table.add_row([reviewer, userteam, reviewers[reviewer]]) return table class ReportBaseChange(ReportTable): @staticmethod def get_approval_votes(patch): # Yes, the numbers are slightly odd order # A +2 or -2 more important than any -1 or +1 # so we prefer them as the summary value levels = ["-2", "2", "-1", "1"] votes = { "c": { "total": collections.defaultdict(int), "list": [], "summary": "", "details": "", }, "v": { "total": collections.defaultdict(int), "list": [], "summary": "", "details": "", }, "w": { "total": collections.defaultdict(int), "list": [], "summary": "", "details": "", }, } for approval in patch.approvals: got_type = approval.action[0:1].lower() if got_type not in votes: continue vote = str(approval.value) votes[got_type]["total"][vote] = votes[got_type]["total"][vote] + 1 votes[got_type]["list"].append(vote) for key in votes.keys(): votes[key]["details"] = ",".join(votes[key]["list"]) vals = [] for level in levels: if level in votes[key]["total"]: votes[key]["summary"] = level break return votes def approvals_mapfunc(rep, col, row): patch = row.get_current_patch() if patch is None: LOG.error("No patch") return "" votes = ReportBaseChange.get_approval_votes(patch) keys = list(votes.keys()) keys.sort(reverse=True) data = " ".join(map(lambda val: "%s=%s" % (val, votes[val]["details"]), keys)) if rep.usecolor: if votes["w"]["total"]["1"] > 0: # Stuff pending merge return format_color(data, fg="blue", styles=["bold"]) elif votes["w"]["total"]["-1"] > 0: # Work-in-progress return format_color(data, fg="magenta", styles=[]) elif votes["c"]["total"]["-2"] > 0: # Hard-nack from core return format_color(data, fg="red", styles=["bold"]) elif votes["c"]["total"]["-1"] > 0 or votes["v"]["total"]["-1"] > 0: # Nack from any or bots return format_color(data, fg="red", styles=[]) elif votes["c"]["total"]["2"] > 0: # Approval from core return format_color(data, fg="green", styles=["bold"]) elif votes["c"]["total"]["1"] > 0: # Approval from any return format_color(data, fg="green", styles=[]) else: return data else: return data def votes_mapfunc(rep, col, row): patch = row.get_current_patch() if patch is None: LOG.error("No patch") return "" if col == "tests": coltype = "v" elif col == "reviews": coltype = "c" else: coltype = "w" votes = ReportBaseChange.get_approval_votes(patch) data = "%2s" % votes[coltype]["summary"] if rep.usecolor: if votes[coltype]["total"]["-2"] > 0: # Hard-nack from core return format_color(data, fg="red", styles=["bold"]) elif votes[coltype]["total"]["2"] > 0: # Approval from core return format_color(data, fg="green", styles=["bold"]) elif votes[coltype]["total"]["-1"] > 0: # Soft-nack from any return format_color(data, fg="red", styles=[]) elif votes[coltype]["total"]["1"] > 0: # Approval from any return format_color(data, fg="green", styles=[]) else: return data else: return data def user_mapfunc(rep, col, row): if not row.owner or not row.owner.username: return "<unknown>" return row.owner.username def date_mapfunc(rep, col, row): if col == "lastUpdated": return format_date(row.lastUpdated) else: return format_date(row.createdOn) def date_sortfunc(rep, col, row): if col == "lastUpdated": return row.lastUpdated else: return row.createdOn COLUMNS = [ ReportOutputColumn("status", "Status", lambda rep, col, row: row.status), ReportOutputColumn("topic", "Topic", lambda rep, col, row: row.topic, visible=False), ReportOutputColumn("url", "URL", lambda rep, col, row: row.url), ReportOutputColumn("owner", "Owner", user_mapfunc), ReportOutputColumn("project", "Project", lambda rep, col, row: row.project, visible=False), ReportOutputColumn("branch", "Branch", lambda rep, col, row: row.branch, visible=False), ReportOutputColumn("subject", "Subject", lambda rep, col, row: row.subject, truncate=30), ReportOutputColumn("createdOn", "Created", date_mapfunc, date_sortfunc), ReportOutputColumn("lastUpdated", "Updated", date_mapfunc, date_sortfunc), ReportOutputColumn("approvals", "Approvals", approvals_mapfunc, visible=False), ReportOutputColumn("tests", "Tests", votes_mapfunc), ReportOutputColumn("reviews", "Reviews", votes_mapfunc), ReportOutputColumn("workflow", "Workflow", votes_mapfunc), ] def __init__(self, client, usecolor=False): super(ReportBaseChange, self).__init__(client, ReportBaseChange.COLUMNS, sort="createdOn", reverse=False) self.usecolor = usecolor class ReportChanges(ReportBaseChange): def __init__(self, client, projects=[], owners=[], status=[], messages=[], branches=[], topics=[], reviewers=[], approvals=[], files=[], rawquery=None, usecolor=False): super(ReportChanges, self).__init__(client, usecolor) self.projects = projects self.owners = owners self.status = status self.messages = messages self.branches = branches self.topics = topics self.reviewers = reviewers self.approvals = approvals self.files = files self.rawquery = rawquery def generate(self): needFiles = False if len(self.files) > 0: needFiles = True query = OperationQuery(self.client, { "project": self.projects, "owner": self.owners, "message": self.messages, "branch": self.branches, "topic": self.topics, "status": self.status, "reviewer": self.reviewers, }, rawquery=self.rawquery, patches=OperationQuery.PATCHES_CURRENT, approvals=True, files=needFiles) def match_files(change): if len(self.files) == 0: return True for filere in self.files: for file in change.get_current_patch().files: if re.search(filere, file.path): return True return False table = self.new_table("Changes") def querycb(change): if match_files(change): table.add_row(change) query.run(querycb) return table class ReportToDoList(ReportBaseChange): def __init__(self, client, projects=[], branches=[], files=[], topics=[], reviewers=[], usecolor=False): super(ReportToDoList, self).__init__(client, usecolor) self.projects = projects self.branches = branches self.reviewers = reviewers self.files = files self.topics = topics def filter(self, change): return True def generate(self): needFiles = False if len(self.files) > 0: needFiles = True query = OperationQuery(self.client, { "project": self.projects, "status": [ OperationQuery.STATUS_OPEN ], "branch": self.branches, "topic": self.topics, "reviewer": self.reviewers, }, patches=OperationQuery.PATCHES_ALL, approvals=True, files=needFiles) def match_files(change): if len(self.files) == 0: return True for filere in self.files: for patch in change.patches: for file in patch.files: if re.search(filere, file.path): return True return False table = self.new_table("Changes To Do List") def querycb(change): if self.filter(change) and match_files(change): table.add_row(change) query.run(querycb) return table class ReportToDoListMine(ReportToDoList): def __init__(self, client, username, projects=[], branches=[], files=[], topics=[], usecolor=False): ''' Report to provide a list of changes 'username' has reviewed an older version of the patch, and needs to provide feedback on latest version ''' super(ReportToDoListMine, self).__init__(client, projects, reviewers=[ username ], branches=branches, files=files, topics=topics, usecolor=usecolor) self.username = username def filter(self, change): if (not change.has_current_reviewers([self.username]) and not change.has_owner([self.username])): return True return False class ReportToDoListOthers(ReportToDoList): def __init__(self, client, username, bots=[], projects=[], branches=[], files=[], topics=[], usecolor=False): ''' Report to provide a list of changes where 'username' has never reviewed, but at least one other non-bot user has provided review ''' super(ReportToDoListOthers, self).__init__(client, projects, reviewers=[ "!", username ], branches=branches, files=files, topics=topics, usecolor=usecolor) self.bots = bots def filter(self, change): # allchanges contains changes where 'username' has # not reviewed any version of the patch. We want to # filter out changes which only have bots, or have # no reviewers at all. if change.has_any_other_reviewers(self.bots): return True return False class ReportToDoListAnyones(ReportToDoList): def __init__(self, client, username, bots=[], projects=[], branches=[], files=[], topics=[], usecolor=False): ''' Report to provide a list of changes where at least one other non-bot user has provided review ''' super(ReportToDoListAnyones, self).__init__(client, projects, branches=branches, files=files, topics=topics, usecolor=usecolor) self.bots = bots self.username = username def filter(self, change): if change.has_current_reviewers([self.username]): return False if change.has_any_other_reviewers(self.bots): return True return False class ReportToDoListNoones(ReportToDoList): def __init__(self, client, bots=[], projects=[], branches=[], files=[], topics=[], usecolor=False): ''' Report to provide a list of changes that no one has ever reviewed ''' super(ReportToDoListNoones, self).__init__(client, projects, branches=branches, files=files, topics=topics, usecolor=usecolor) self.bots = bots def filter(self, change): if not change.has_any_other_reviewers(self.bots): return True return False class ReportToDoListApprovable(ReportToDoList): def __init__(self, client, username, strict, projects=[], branches=[], files=[], topics=[], usecolor=False): ''' Report to provide a list of changes that no one has ever reviewed ''' super(ReportToDoListApprovable, self).__init__(client, projects, branches=branches, files=files, topics=topics, usecolor=usecolor) self.username = username self.strict = strict def filter(self, change): if (change.has_current_approval(ModelApproval.ACTION_REVIEWED, 2) and not change.has_owner([self.username]) and not change.has_current_approval(ModelApproval.ACTION_WORKFLOW, -1) and not change.has_current_approval(ModelApproval.ACTION_WORKFLOW, 1) and not change.has_current_approval(ModelApproval.ACTION_REVIEWED, -2) and not change.has_current_reviewers([self.username])): if (self.strict and change.has_current_approval(ModelApproval.ACTION_REVIEWED, -1)): return False return True return False class ReportToDoListExpirable(ReportToDoList): def __init__(self, client, age=28, projects=[], branches=[], files=[], topics=[], usecolor=False): ''' Report to provide a list of changes that are stale and can potentially be expired ''' super(ReportToDoListExpirable, self).__init__(client, projects, branches=branches, files=files, topics=topics, usecolor=usecolor) self.age = age def filter(self, change): if change.get_current_reviewer_nack_age() > (self.age * 24 * 60 * 60): return True return False class ReportOpenReviewStats(ReportBaseChange): def __init__(self, client, projects, branch="master", topic="", days=7, usecolor=False): super(ReportOpenReviewStats, self).__init__(client, usecolor) self.projects = projects self.branch = branch self.topic = topic self.days = days @staticmethod def average_age(changes, ages): if len(changes) == 0: return 0 total = 0 for change in changes: total += ages[change] return format_delta(total / len(changes)) @staticmethod def median_age(changes, ages): if len(changes) == 0: return 0 total = 0 wantages = [] for change in changes: wantages.append(ages[change]) wantages.sort() return format_delta(wantages[int(len(wantages)/2)]) @staticmethod def older_than(changes, ages, cutoffdays): cutoff = cutoffdays * 24 * 60 * 60 older = 0 for change in changes: if ages[change] > cutoff: older = older + 1 return older @staticmethod def get_longest_changes(ids, changes, ages, count): want = [] for id in sorted(ids, key=lambda x: ages[x]): want.append(changes[id]) return want def generate(self): # We could query all projects at once, but if we do them # individually it means we get better hit rate against the # cache if the report is re-run for many different project # combinations agecurrent = {} agefirst = {} agenonnacked = {} wait_reviewer = [] wait_submitter = [] changes = {} for project in self.projects: query = OperationQuery(self.client, { "project": [project], "status": [OperationQuery.STATUS_OPEN], "branch": [self.branch], "topic": [self.topic], }, patches=OperationQuery.PATCHES_ALL, approvals=True) def querycb(change): if change.status != "NEW": return now = time.time() current = change.get_current_patch() first = change.get_first_patch() nonnacked = change.get_reviewer_not_nacked_patch() changes[change.id] = change if current.is_nacked(): wait_submitter.append(change.id) else: wait_reviewer.append(change.id) agecurrent[change.id] = current.get_age(now) agefirst[change.id] = first.get_age(now) if nonnacked: agenonnacked[change.id] = nonnacked.get_age(now) else: agenonnacked[change.id] = 0 query.run(querycb) compound = ReportOutputCompound() summary = ReportOutputList([ ReportOutputColumn("nreviews", "Total open reviews", format="%d", mapfunc=lambda rep, col, row: row[0] + row [1]), ReportOutputColumn("waitsubmitter", "Waiting on submitter", format="%d", mapfunc=lambda rep, col, row: row[0]), ReportOutputColumn("waitreviewer", "Waiting on reviewer", format="%d", mapfunc=lambda rep, col, row: row[1]), ], title="Review summary") summary.set_row([len(wait_submitter), len(wait_reviewer)]) compound.add_report(summary) lastrev = ReportOutputList([ ReportOutputColumn("average", "Average wait time", mapfunc=lambda rep, col, row: row[0]), ReportOutputColumn("median", "Median wait time", mapfunc=lambda rep, col, row: row[1]), ReportOutputColumn("stale", "Older than %d days" % self.days, format="%d", mapfunc=lambda rep, col, row: row[2]), ], title="Summary since current revision") lastrev.set_row([self.average_age(wait_reviewer, agecurrent), self.median_age(wait_reviewer, agecurrent), self.older_than(wait_reviewer, agecurrent, self.days)]) compound.add_report(lastrev) firstrev = ReportOutputList([ ReportOutputColumn("average", "Average wait time", mapfunc=lambda rep, col, row: row[0]), ReportOutputColumn("median", "Median wait time", mapfunc=lambda rep, col, row: row[1]), ], title="Summary since first revision") firstrev.set_row([self.average_age(wait_reviewer, agefirst), self.median_age(wait_reviewer, agefirst)]) compound.add_report(firstrev) nonnackedrev = ReportOutputList([ ReportOutputColumn("average", "Average wait time", mapfunc=lambda rep, col, row: row[0]), ReportOutputColumn("median", "Median wait time", mapfunc=lambda rep, col, row: row[1]), ], title="Summary since last revision without -1/-2 from reviewer") nonnackedrev.set_row([self.average_age(wait_reviewer, agenonnacked), self.median_age(wait_reviewer, agenonnacked)]) compound.add_report(nonnackedrev) def waitlastmap(rep, col, row): return format_delta(row.get_current_age()) def waitlastsort(rep, col, row): return row.get_current_age() waitlastrev = self.new_table("Longest waiting since current revision") waitlastrev.add_column(ReportOutputColumn("age", "Age", sortfunc=waitlastsort, mapfunc=waitlastmap)) waitlastrev.sortcol = "age" waitlastrev.reverse = True for change in self.get_longest_changes(wait_reviewer, changes, agecurrent, 5): waitlastrev.add_row(change) compound.add_report(waitlastrev) def waitfirstmap(rep, col, row): return format_delta(row.get_first_age()) def waitfirstsort(rep, col, row): return row.get_first_age() waitfirstrev = self.new_table("Longest waiting since first revision") waitfirstrev.add_column(ReportOutputColumn("age", "Age", sortfunc=waitfirstsort, mapfunc=waitfirstmap)) waitfirstrev.sortcol = "age" waitfirstrev.reverse = True for change in self.get_longest_changes(wait_reviewer, changes, agefirst, 5): waitfirstrev.add_row(change) compound.add_report(waitfirstrev) def waitnonnackedmap(rep, col, row): return format_delta(row.get_reviewer_not_nacked_age()) def waitnonnackedsort(rep, col, row): return row.get_reviewer_not_nacked_age() waitnonnackedrev = self.new_table("Longest waiting since last revision without -1/-2 from reviewer") waitnonnackedrev.add_column(ReportOutputColumn("age", "Age", sortfunc=waitnonnackedsort, mapfunc=waitnonnackedmap)) waitnonnackedrev.sortcol = "age" waitnonnackedrev.reverse = True for change in self.get_longest_changes(wait_reviewer, changes, agenonnacked, 5): waitnonnackedrev.add_row(change) compound.add_report(waitnonnackedrev) return compound
apache-2.0
1,719,606,987,100,816,600
37.750195
116
0.540989
false
4.298886
false
false
false
ebrensi/registry-frontend
ff.py
1
1240
#! usr/bin/env python # This script is for testing without having to host the flask app. import folium import pandas as pd import os from sqlalchemy import create_engine import geojson DATABASE_URL = os.environ["DATABASE_URL"] STATES_GEOJSON_PATH = "static/us-states.json" engine = create_engine(DATABASE_URL) with engine.connect() as db: query = "Select state, count(*) From registry Group By state;" df = pd.read_sql_query(query, db) with open(STATES_GEOJSON_PATH, "r") as file: gj = geojson.load(file) # Folium choropleth requires a one-to-one correspondence between GeoJSON # features (state definitions) and shade values, so we will make a new # GeoJSON object that is a FeatureCollection of only the states that we # have data for. relevant_features = [feature for feature in gj["features"] if ("id" in feature) and (feature["id"] in df["state"].values)] gj_relevant = geojson.FeatureCollection(relevant_features) geo_str = geojson.dumps(gj_relevant) base_map = folium.Map([43, -100], zoom_start=5) base_map.choropleth( geo_str=geo_str, data=df, columns=['state', 'count'], key_on='feature.id', fill_color='PuBuGn', ) base_map.save("map.html")
mit
-3,929,384,207,766,329,000
25.956522
72
0.691935
false
3.246073
false
false
false
cattleio/stampede
docs/do-demo/deploy.py
1
6809
#!/usr/bin/env python import cattle import sys ZK_NODES = 3 REDIS_NODES = 3 API_SERVER_NODES = 3 PROCESS_SERVER_NODES = 3 AGENT_SERVER_NODES = 3 MYSQL_COMPUTE = 1 # Set if you want to override the cattle.jar in the Docker image with a custom one URL = '' TAG = 'latest' client = cattle.from_env() def wait(c): return client.wait_success(c, timeout=120) deleted = [] for c in client.list_container(removed_null=True): if c.name != 'Agent': client.delete(c) print 'Deleting', c.name deleted.append(c) print 'Waiting for deleting' for c in deleted: wait(c) print 'Done' def set_link(instance, name, target): instance = wait(instance) for link in instance.instanceLinks(): if link.linkName == name: print 'Linking {} to {}'.format(instance.name, target.name) wait(client.update(link, targetInstanceId=target.id)) def deploy_zk(): # Deploying ZK is complicated.... # Create dummy ZK to link against, then we will create the circle # We want it to be stopped so that ZooKeeper doesn't actually connect print 'Creating Dummy ZK node' zk_dummy = wait(client.create_container(imageUuid='docker:ibuildthecloud/zookeeper', name='zk_dummy')) zk_dummy = wait(zk_dummy.stop()) zks = [] for i in range(1, ZK_NODES + 1): links = {} for j in range(1, ZK_NODES + 1): if j != i: links['zk{}'.format(j)] = zk_dummy.id zk = client.create_container(imageUuid='docker:ibuildthecloud/zookeeper', name='zk{}'.format(i), environment={ 'ID': i }, instanceTriggeredStop='restart', instanceLinks=links) print 'Created', zk.name zks.append(wait(zk)) for zk_target in zks: for zk in zks: set_link(zk, zk_target.name, zk_target) client.delete(zk_dummy) return zks def deploy_redis(): print 'Create Redis' redises = [] for i in range(1, REDIS_NODES + 1): redis = client.create_container(imageUuid='docker:ibuildthecloud/redis', instanceTriggeredStop='restart', name='redis{}'.format(i)) print 'Created', redis.name redises.append(redis) return redises def haproxy(targets, name, listen_port): links = {} for i, c in enumerate(targets): links['TARGET{}'.format(i)] = wait(c).id return client.create_container(imageUuid='docker:ibuildthecloud/haproxy', instanceLinks=links, instanceTriggeredStop='restart', name=name, ports=['{}:80'.format(listen_port)]) zookeepers = deploy_zk() redises = deploy_redis() mysql = client.create_container(imageUuid='docker:ibuildthecloud/mysql', compute=MYSQL_COMPUTE, instanceTriggeredStop='restart', ports=['9082:80'], name='mysql') print 'Created', mysql.name graphite = client.create_container(imageUuid='docker:ibuildthecloud/graphite', instanceTriggeredStop='restart', ports=['9083:80'], name='graphite') print 'Created', graphite.name es = client.create_container(imageUuid='docker:ibuildthecloud/logstash', instanceTriggeredStop='restart', ports=['9200:9200'], name='logstash/elasticache') print 'Created', es.name kibana = client.create_container(imageUuid='docker:ibuildthecloud/kibana', name='Kibana', instanceTriggeredStop='restart', ports=['9081:80'], environment={ 'ES_PORT_9200_TCP_ADDR': wait(es).hosts()[0].ipAddresses()[0].address, 'ES_PORT_9200_TCP_PORT': '9200' }) print 'Created', kibana.name print 'Create Cattle' links = { 'gelf': wait(es).id, 'graphite': wait(graphite).id } instances = [] instances.extend(zookeepers) instances.extend(redises) instances.append(mysql) for c in instances: links[c.name] = wait(c).id api_servers = [] agent_servers = [] for i in range(1, API_SERVER_NODES + 1): c = client.create_container(imageUuid='docker:cattle/api-server:{}'.format(TAG), name='API Server {}'.format(i), environment={ 'URL': URL, 'CATTLE_CATTLE_SERVER_ID': 'apiserver{}'.format(i) }, instanceTriggeredStop='restart', instanceLinks=links) print 'Created', c.name api_servers.append(c) for i in range(1, PROCESS_SERVER_NODES + 1): c = client.create_container(imageUuid='docker:cattle/process-server:{}'.format(TAG), name='Process Server {}'.format(i), environment={ 'URL': URL, 'CATTLE_JAVA_OPTS': '-Xmx1024m', 'CATTLE_CATTLE_SERVER_ID': 'processserver{}'.format(i) }, instanceTriggeredStop='restart', instanceLinks=links) print 'Created', c.name for i in range(1, AGENT_SERVER_NODES + 1): c = client.create_container(imageUuid='docker:cattle/agent-server:{}'.format(TAG), name='Agent Server {}'.format(i), environment={ 'URL': URL, 'CATTLE_JAVA_OPTS': '-Xmx1024m', 'CATTLE_CATTLE_SERVER_ID': 'agentserver{}'.format(i) }, instanceTriggeredStop='restart', instanceLinks=links) print 'Created', c.name agent_servers.append(c) h1 = haproxy(api_servers, 'Api Servers Load Balancer', 8080) print 'Created', h1.name h2 = haproxy(agent_servers, 'Agent Servers Load Balancer', 8081) print 'Created', h2.name wait(h1) wait(h2)
apache-2.0
-6,914,724,294,481,272,000
33.21608
107
0.500661
false
4.13921
false
false
false
Aydarkhan/cca
automata.py
1
5250
"""Copyright 2010 Aydarkhanov Ruslan, Kurochkin Ilya, Rusinov Ivan This file is part of CCA. CCA is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 2 of the License, or (at your option) any later version. CCA is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with CCA. If not, see http://www.gnu.org/licenses/. """ from state import * class Automata(object): def __init__(self, width=150, height=70, states=None): self.width = width self.height = height if states == None: self.states = [State("Dead", '-', "white", '0', [5]), State("Alive", '+', "black", '1', [0, 1, 4, 5, 6, 7, 8])] else: self.states = states self.symbols = {} self.st_sym = {} for num, st in enumerate(self.states): self.symbols[st.symbol] = num self.st_sym[st.symbol] = st self.field = [] for row in range(height): self.field.append([]) for col in range(width): self.field[row].append(self.states[0].symbol) def next_step(self): changed = [] for row in range(1, self.height - 1): for col in range(1, self.width - 1): symbol = self.field[row][col] num = 0 for vert in range(row - 1, row + 2): for horiz in range(col - 1, col + 2): if self.field[vert][horiz] == symbol: num += 1 if self.st_sym[symbol].next_state(num - 1): changed.append((row, col)) for row in range(1, self.height - 1): symbol1 = self.field[row][0] symbol2 = self.field[row][self.width - 1] num1 = 0 num2 = 0 for vert in range(row - 1, row + 2): for horiz in [0, 1, self.width - 1]: if self.field[vert][horiz] == symbol1: num1 += 1 for horiz in [self.width - 2, self.width - 1, 0]: if self.field[vert][horiz] == symbol2: num2 += 1 if self.st_sym[symbol1].next_state(num1 - 1): changed.append((row, 0)) if self.st_sym[symbol2].next_state(num2 - 1): changed.append((row, self.width - 1)) for col in range(1, self.width - 1): symbol1 = self.field[0][col] symbol2 = self.field[self.height - 1][col] num1 = 0 num2 = 0 for horiz in range(col - 1, col + 2): for vert in [0, 1, self.height - 1]: if self.field[vert][horiz] == symbol1: num1 += 1 for vert in [self.height - 2, self.height - 1, 0]: if self.field[vert][horiz] == symbol2: num2 += 1 if self.st_sym[symbol1].next_state(num1 - 1): changed.append((0, col)) if self.st_sym[symbol2].next_state(num2 - 1): changed.append((self.height - 1, col)) for row, col in [(0, 0), (self.height - 1, self.width - 1), (0, self.width - 1), (self.height - 1, 0)]: symbol = self.field[row][col] num = 0 for vert_long in range(row + self.height - 1, row + self.height + 2): for horiz_long in range(col + self.width - 1, col + self.width + 2): vert = vert_long % self.height horiz = horiz_long % self.width if self.field[vert][horiz] == symbol: num += 1 if self.st_sym[symbol].next_state(num - 1): changed.append((row, col)) for row, col in changed: index = (self.symbols[self.field[row][col]] + 1) % len(self.states) self.field[row][col] = self.states[index].symbol return changed def change_size(self, value, side): "0-up, 1-right, 2-down, 3-left" new_field = [] if side == 0: self.height += value for row in range(value): new_field.append([]) for col in range(self.width): new_field[row].append(self.states[0].symbol) init = value if value < 0: init = 0 for row in range(init, self.height): new_field.append([]) for col in range(self.width): new_field[row].append(self.field[row - value][col]) if side == 2: self.height += value term = value if value < 0: term = 0 for row in range(self.height - term): new_field.append([]) for col in range(self.width): new_field[row].append(self.field[row][col]) for row in range(self.height - term, self.height): new_field.append([]) for col in range(self.width): new_field[row].append(self.states[0].symbol) if side == 1: self.width += value term = value if value < 0: term = 0 for row in range(self.height): new_field.append([]) for col in range(self.width - term): new_field[row].append(self.field[row][col]) for row in range(self.height): for col in range(self.width - term, self.width): new_field[row].append(self.states[0].symbol) if side == 3: self.width += value for row in range(self.height): new_field.append([]) for col in range(value): new_field[row].append(self.states[0].symbol) init = value if value < 0: init = 0 for row in range(self.height): for col in range(init, self.width): new_field[row].append(self.field[row][col - value]) self.field = new_field
gpl-2.0
5,469,230,736,711,367,000
30.25
68
0.60781
false
2.924791
false
false
false
nonamenix/yandex-vesna-generator
yandex_vesna_generator/vesna.py
1
2537
# -*- coding: utf-8 -*- from lxml import etree from slugify import slugify class Entry(object): def __init__(self, title="", paragraphs=[], themes=[], **kwargs): self.title = title self.paragraphs = paragraphs self.themes = themes self.header_wrapper = kwargs.get("header_wrapper", "h2") self.paragraph_wrapper = kwargs.get("paragraph_wrapper", "p") self.slug = slugify(title, to_lower=True) self.description = self.paragraphs[0][0:kwargs.get("description_length", 220)] def render_html(self): html = self.header html += self.body return html @property def header(self): return "<%(wrapper)s>%(title)s</%(wrapper)s> \n" % { 'title': self.title, 'wrapper': self.header_wrapper } @property def body(self): return "".join(["<%(wrapper)s>%(text)s</$(wrapper)s> \n" % { "text": p, "wrapper": self.paragraph_wrapper } for p in self.paragraphs]) def __repr__(self): return '<Entry theme="%s" id="%s">' % (", ".join(self.themes), hex(id(self))) def __getitem__(self, field): return self.__dict__[field] class VesnaGenerator(object): """ Class for generate crazy text on your site """ # Themes AVAILABLE_THEMES = [ 'astronomy', 'geology', 'gyroscope', 'literature', 'marketing', 'mathematics', 'music', 'polit', 'agrobiologia', 'law', 'psychology', 'geography', 'physics', 'philosophy', 'chemistry'] def __init__(self, themes=[], entry_options={}): self.themes = [theme for theme in themes if theme in self.AVAILABLE_THEMES] or self.AVAILABLE_THEMES self.entry_options = entry_options # Generate yandex vesna url self.base_url = "http://referats.yandex.ru/referats/" self.url = self.base_url + "?t=" + "+".join(self.themes) self.entries = [] def generate_entry(self): self.parser = etree.HTMLParser(recover=True) self.doc = etree.parse(self.url, self.parser) title = self.doc.xpath('/html/body/div[2]/div[1]/div[1]/div/div[2]/div[1]/strong')[0].text title = title.encode('utf-8').replace('Тема: «', '').replace('»', '').decode('utf-8') paragraps = self.doc.xpath('/html/body/div[2]/div[1]/div[1]/div/div[2]/div[1]/p') return Entry( title=title, paragraphs=[p.text for p in paragraps], themes=self.themes, **self.entry_options )
apache-2.0
-2,670,042,156,606,405,000
33.216216
108
0.575267
false
3.476648
false
false
false
razorpay/razorpay-python
tests/test_client_utility.py
1
1858
import responses from .helpers import mock_file, ClientTestCase from razorpay.errors import SignatureVerificationError class TestClientValidator(ClientTestCase): def setUp(self): super(TestClientValidator, self).setUp() @responses.activate def test_verify_payment_signature(self): sig = 'b2335e3b0801106b84a7faff035df56ecffde06918c9ddd1f0fafbb37a51cc89' parameters = {} parameters['razorpay_order_id'] = 'fake_order_id' parameters['razorpay_payment_id'] = 'fake_payment_id' parameters['razorpay_signature'] = sig self.assertEqual( self.client.utility.verify_payment_signature(parameters), True) @responses.activate def test_verify_payment_signature_with_exception(self): parameters = {} parameters['razorpay_order_id'] = 'fake_order_id' parameters['razorpay_payment_id'] = 'fake_payment_id' parameters['razorpay_signature'] = 'test_signature' self.assertRaises( SignatureVerificationError, self.client.utility.verify_payment_signature, parameters) @responses.activate def test_verify_webhook_signature(self): secret = self.client.auth[1] sig = 'd60e67fd884556c045e9be7dad57903e33efc7172c17c6e3ef77db42d2b366e9' body = mock_file('fake_payment_authorized_webhook') self.assertEqual( self.client.utility.verify_webhook_signature(body, sig, secret), True) @responses.activate def test_verify_webhook_signature_with_exception(self): secret = self.client.auth[1] sig = 'test_signature' body = '' self.assertRaises( SignatureVerificationError, self.client.utility.verify_webhook_signature, body, sig, secret)
mit
4,018,015,657,456,469,500
31.596491
80
0.653929
false
3.799591
true
false
false
nosuchtim/VizBench
src/PyLoopyCam/testit.py
1
5268
""" Copyright (c) 2015, Tim Thompson All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - Neither the name of Tim Thompson, nosuch.com, nor the names of any contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ import sys import time import traceback import thread import threading import copy import asyncore import asynchat import socket import sys import re import xml.dom.minidom as xmldom import string import pygame.pypm import os.path import os, pygame import pickle import random from os.path import isdir, isfile, isabs, abspath from urllib import quote, unquote from threading import * from ctypes import * from time import sleep from Queue import Queue, Empty from xml.sax import saxutils from xml.dom import Node from traceback import format_exc from dircache import listdir from pygame.locals import * from thread import * from ffff import * global debug debug = False global debugosc debugosc = False global debugosc2 debugosc2 = False class NthEventServer(Thread): """ Provides an event stream that can serve multiple listeners track of what fingers are currently down, smoothing drag motion, etc. """ oneServer = None def __init__(self): Thread.__init__(self) self.setDaemon(True) NthEventServer.oneServer = self print "NthEventServer.oneServer = ", NthEventServer.oneServer self.dispenser = PushedEventDispenser() self.throttle = 0.005 self.throttle = 0.0 self.inputs = {} self.outputs = {} self.cv = threading.Condition() self.events = {} self.firstevent = 0 self.nextevent = 0 self.osc_recipients = {"music":[], "graphic":[]} self.start() self.too_old_seconds = 30.0 self.event_inputs = {} self.forward_inputs = {} self.forward_finger = None self.tm0 = time.time() self.osc_count = 0 def send_osc(self, o, apptype): (msg_addr, msg_data) = o if msg_addr == "": print "No msg_addr value in send_osc?" return now = time.time() self.osc_count += 1 if now - self.tm0 > 1.0: print "OSC Per second = ", self.osc_count self.osc_count = 0 self.tm0 = now msg_addr = str(msg_addr) b = createBinaryMsg(msg_addr, msg_data) # print "createBinary msg_addr=",msg_addr," msg_data=",msg_data print("SHOULD BE sending %s OSC=%s" % (apptype, o.__str__())) # r.osc_socket.sendto(b, (r.osc_addr, r.osc_port)) def main(): debug = True httpaddr = "127.0.0.1" httpport = 7777 rootdir = None print "SYS.ARGV len=", len(sys.argv) argn = len(sys.argv) if len(sys.argv) == 1: print "NO arguments..." else: argn = 1 if sys.argv[argn] == "-d": debug = True print "Debug is True" argn += 1 else: debug = False argn += 1 for i in range(argn, len (sys.argv)): a = sys.argv[i] print("a = ", a) if a.startswith("rootdir:"): rootdir = abspath(a[8:]) elif a.startswith("httpaddr:"): httpaddr = a[9:] elif a.startswith("httpport:"): httpport = int(a[9:]) try: import os position = (-800, 0) position = (600, 360) os.environ['SDL_VIDEO_WINDOW_POS'] = str(position[0]) + "," + str(position[1]) pygame.init() width = 250 height = 500 flags = pygame.SRCALPHA # from panel import NthControlPanel # ui = NthControlPanel(width, height, flags) # time.sleep(1.0) # pygame.event.set_grab(True) try: ffff = Ffff("localhost",80) except: print "EXCEPT caught in creating Ffff! Exception=", format_exc() plugin = ffff.get_ffgl("Twisted") param = plugin.get_param("Twirl") # ffff.set_all_params(plugin,1.0) for nm in plugin.param: p = plugin.param[nm] val = random.random() % 1.0 ffff.change_plugin_param_val(plugin,p,val) except KeyboardInterrupt: print("KeyboardInterrupt received...\n"); # server.shutdown_quick() except: s = format_exc() if not re.search(".*shutdown_quick.*", s): print("Exception while running myserver?\n"); print(s) # server.shutdown_quick() if __name__ == '__main__': main() # import cProfile # cProfile.run('main()')
mit
7,051,323,283,669,740,000
24.205742
80
0.705201
false
3.26192
false
false
false
juntalis/aio-pika
docs/source/rabbitmq-tutorial/examples/3-publish-subscribe/receive_logs.py
1
1064
import asyncio from aio_pika import connect, IncomingMessage, ExchangeType loop = asyncio.get_event_loop() def on_message(message: IncomingMessage): with message.process(): print("[x] %r" % message.body) async def main(): # Perform connection connection = await connect("amqp://guest:guest@localhost/", loop=loop) # Creating a channel channel = await connection.channel() await channel.set_qos(prefetch_count=1) logs_exchange = await channel.declare_exchange( 'logs', ExchangeType.FANOUT ) # Declaring queue queue = await channel.declare_queue(exclusive=True) # Binding the queue to the exchange await queue.bind(logs_exchange) # Start listening the queue with name 'task_queue' queue.consume(on_message) if __name__ == "__main__": loop = asyncio.get_event_loop() loop.create_task(main()) # we enter a never-ending loop that waits for data and runs callbacks whenever necessary. print(' [*] Waiting for logs. To exit press CTRL+C') loop.run_forever()
apache-2.0
1,176,001,167,043,564,000
24.95122
93
0.675752
false
3.8
false
false
false
griddynamics/bunch
lettuce_bunch/dependencies.py
1
2875
# -*- coding: utf-8 -*- # <Bunch - BDD test tool for Lettuce scenarios> # Copyright (c) 2012 Grid Dynamics Consulting Services, Inc, All Rights Reserved # http://www.griddynamics.com # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from exceptions import CyclicDependencySpecification from topsort import topsort_levels,CycleError from itertools import chain, tee, izip, product def pairwise(iterable): a, b = tee(iterable) next(b) return izip(a, b) def dependency_lists_to_pairs(dependency_lists): return chain(*(pairwise(dep_list) for dep_list in dependency_lists)) def dependency_groups_to_pairs(groups): return chain(*(product(a,b) for a,b in pairwise(groups))) def split_solitaries(deps): solitaries = [] linked = [] for dep in deps: if len(dep) == 1 and len(dep[0]) > 0: solitaries.append(dep[0]) else: linked.append(dep) return solitaries, linked def filter_empties(deps): return filter(None, deps) def combine_fixture_deps(deps): solitaries, linked = split_solitaries(filter_empties(deps)) try: result = [sorted(group) for group in topsort_levels(chain(*map(dependency_groups_to_pairs, linked)))] for solitary in solitaries: if solitary not in result: result.append(solitary) except CycleError as cycle_details: raise CyclicDependencySpecification(cycle_details) return result
gpl-3.0
8,455,682,489,359,907,000
39.666667
109
0.712348
false
3.869448
false
false
false
tonyshardlow/reg_sde
run_pf.py
1
1560
from __future__ import (absolute_import, division, print_function, unicode_literals) exec(open("ground.py").read()) # mine import hamiltonian import diffeo import sde from utility import * # # # all data defined in utility (exp2,...) # def run(dict): import os.path if 'fname' in dict: filename=dict['fname'] else: print("No filename given") exit(1) print("filename: ",filename+dict['ext']) # G=hamiltonian.GaussGreen(dict['ell'],0) no_steps=dict['no_steps'] # SDE = sde.SDE(G) SDE.set_no_steps(no_steps) SDE.set_landmarks(dict['landmarks_n']) SDE.set_lam_beta(dict['lam'],dict['beta'],True) # plot a push-forward sample (with current shape) plot_setup() plt.axis('equal') plt.axis('off') Q0=dict['landmarks'][0,:,:] D=SDE.sample_push_forward(Q0) D.plot_qpath_01(0) D.plot_warped_grid(10) plt.savefig(filename+dict['ext']+'.pdf',bbox_inches='tight') print("...finished.") # #################################################################### if __name__ == "__main__": # do this plt.ion() noise_var=0.2 dict=exp1(noise_var) #dict=exp2(noise_var) #dict=exp4(noise_var) # dict=exp4(noise_var) dict['lam']=0.5 scale=1.0e1;betas=np.array([1., 2., 4.0, 8.])*scale exts=['a_pf', 'b_pf', 'c_pf', 'd_pf'] for i in range(4): print("=======") dict['beta']=betas[i] dict['ext']=exts[i] run(dict)
mit
5,528,959,925,701,617,000
25.857143
68
0.523077
false
3.035019
false
false
false
xbed/Mixly_Arduino
mixly_arduino/mpBuild/ESP32_MixGo/lib/mixgo.py
1
5214
from machine import Pin from machine import PWM from machine import ADC from machine import DAC from machine import I2C from machine import Timer from machine import RTC from machine import TouchPad import time from neopixel import NeoPixel def get_brightness(pin = 39): return ADCSensor(pin).read() def get_soundlevel(pin = 35): return ADCSensor(pin).read() # Button class Button: def __init__(self, pin): from machine import Pin self.pin = Pin(pin, Pin.IN) def get_presses(self, delay = 1): last_time, last_state, presses = time.time(), 0, 0 while time.time() < last_time + delay: time.sleep_ms(50) if last_state == 0 and self.pin.value() == 1: last_state = 1 if last_state == 1 and self.pin.value() == 0: last_state, presses = 0, presses + 1 return presses def is_pressed(self, flag = 0): return self.pin.value() == flag def was_pressed(self, flag = 0): last_state = self.pin.value() if flag: if not last_state: return False else: while self.pin.value(): time.sleep_ms(10) return True else: if last_state: return False else: while not self.pin.value(): time.sleep_ms(10) return True def irq(self, handler, trigger): self.pin.irq(handler = handler, trigger = trigger) # Pin class MyPin(Pin): def write_digital(self,val): self.init(Pin.OUT) self.value(val) def read_digital(self): self.init(Pin.IN) return self.value() def write_analog(self,val): id = int(str(self)[4:-1]) #unsafe! self = PWM(Pin(id),duty=val) def dac_write(self,val): id = int(str(self)[4:-1]) #unsafe! self = DAC(Pin(id)).write(val) def read_analog(self): id = int(str(self)[4:-1]) #unsafe! self = ADC(Pin(id)) return self.read() def set_frequency(self,val): id = int(str(self)[4:-1]) self = PWM(Pin(id),freq=val) def is_touched(self): id = int(str(self)[4:-1]) #unsafe! if id in (0,2,4,12,13,14,15,27,32,33): # print(TouchPad(Pin(id)).read()) return (TouchPad(Pin(id)).read() - 150 < 0) else: self.init(Pin.IN) return self.value() == 1 class Infrared(MyPin): def near(self): id = int(str(self)[4:-1]) #unsafe! pin15=Pin(15,Pin.OUT) pin15.value(1) adc=ADC(Pin(id)) adc.atten(ADC.ATTN_11DB) approximate =adc.read() pin15.value(0) return approximate # Servo class Servo: def __init__(self,pin): self.pin=pin def write_angle(self,angle): id = int(str(self.pin)[4:-1]) PWM(Pin(id),freq=50,duty=int(40 + 75 * angle / 180)) # Sonar class Sonar: def __init__(self, trig, echo): self.trig=Pin(trig, Pin.OUT) self.echo=Pin(echo, Pin.IN) def checkdist(self): self.trig.value(0) self.echo.value(0) self.trig.value(1) time.sleep_us(10) self.trig.value(0) while(self.echo.value()==0): pass t1 = time.ticks_us() while(self.echo.value()==1): pass t2 = time.ticks_us() return round(time.ticks_diff(t2, t1) / 10000 * 340 / 2, 2) class led: def __init__(self, pin, flag=1): self.val = flag self.pin = pin self.flag = flag def setbrightness(self,val): self.val = val if self.flag: PWM(Pin(self.pin)).duty(self.val) else: PWM(Pin(self.pin)).duty(1023 - self.val) def setonoff(self,val): if(val == -1): Pin(self.pin,Pin.OUT).value(1 - Pin(self.pin).value()) elif(val == 1): Pin(self.pin,Pin.OUT).value(self.flag) elif(val == 0): Pin(self.pin,Pin.OUT).value(1 - self.flag) def getonoff(self): if self.flag: return Pin(self.pin).value() else: return 1 - Pin(self.pin).value() class ADCSensor: def __init__(self,pin): self.adc=ADC(Pin(pin)) self.adc.atten(ADC.ATTN_11DB) def read(self): return self.adc.read() class RGB: def __init__(self, pin, num): self = NeoPixel(Pin(pin), num) def write(self,n,r,g,b): self[n] = (r, g, b) self.write() i2c = I2C(scl = Pin(22), sda = Pin(21), freq = 100000) buf = bytearray(1) rtc = RTC() tim = Timer(-1) try: i2c.readfrom_mem_into(0x68, 0X75, buf) except: pass else: if buf[0] == 0x71: from mpu9250 import * mpu = MPU9250(i2c) compass = Compass(mpu) button_a = Button(17) button_b = Button(16) led1 = led(pin = 0, flag = 0) led2 = led(pin = 5, flag = 0) infrared_left = Infrared(34) infrared_right = Infrared(36) touch1 = MyPin(32) touch2 = MyPin(33) touch3 = MyPin(25) touch4 = MyPin(26) rgb = NeoPixel(Pin(2), 2)
apache-2.0
-1,362,624,201,661,192,700
25.472081
69
0.528002
false
3.150453
false
false
false
tea321000/django-project
musicsite/music/migrations/0002_auto_20170305_2121.py
1
1364
# -*- coding: utf-8 -*- # Generated by Django 1.10 on 2017-03-05 13:21 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('music', '0001_initial'), ] operations = [ migrations.AddField( model_name='music', name='singer', field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='Musician_singer', to='music.Musician'), ), migrations.AlterField( model_name='musician', name='birthday', field=models.DateTimeField(verbose_name='\u51fa\u751f\u65e5\u671f'), ), migrations.AlterField( model_name='musician', name='name', field=models.CharField(max_length=40, verbose_name='\u539f\u540d'), ), migrations.AlterField( model_name='musician', name='sex', field=models.CharField(choices=[('M', '\u7537'), ('F', '\u5973')], max_length=1, verbose_name='\u6027\u522b'), ), migrations.AlterField( model_name='musician', name='stagename', field=models.CharField(blank=True, max_length=40, null=True, verbose_name='\u827a\u540d'), ), ]
mit
-5,270,324,763,746,811,000
32.268293
145
0.579179
false
3.706522
false
false
false
mRokita/DPLib
dplib/server.py
1
47676
# DPLib - Asynchronous bot framework for Digital Paint: Paintball 2 servers # Copyright (C) 2017 Michał Rokita # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import re import select from collections import OrderedDict from enum import Enum from subprocess import Popen import asyncio import os from socket import socket, AF_INET, SOCK_DGRAM from time import time from dplib.parse import render_text, decode_ingame_text class ServerEvent(Enum): TIMEOUT = 0 CHAT = 1 ELIM = 2 RESPAWN = 3 MAPCHANGE = 4 DATE = 5 NAMECHANGE = 6 ENTRANCE = 7 FLAG_CAPTURED = 8 ELIM_TEAMS_FLAG = 9 ROUND_STARTED = 10 TEAM_SWITCHED = 11 DISCONNECT = 12 FLAG_GRAB = 13 FLAG_DROP = 14 ROUND_END = 15 GAMEMODE = 16 GAME_END = 17 class GameMode(Enum): CTF = 'CTF' ONE_FLAG = '1Flag' ELIMINATION = 'Elim' DEATHMATCH = 'DM' SIEGE = 'Siege' TDM = 'TDM' KOTH = 'KOTH' PONG = 'Pong' class BadRconPasswordError(Exception): pass class SecurityCheckError(Exception): pass class MapNotFoundError(Exception): pass class ListenerType(Enum): PERMANENT = 0 TRIGGER_ONCE = 1 REGEXPS = OrderedDict([ (re.compile('^\\[\d\d:\d\d:\d\d\\] (?:(?:\\[OBS\\] )|(?:\\[ELIM\\] ))?(.*?): (.*?)\r?\n'), ServerEvent.CHAT), # [19:54:18] hTml: test (re.compile( '^\\[\d\d:\d\d:\d\d\\] \\*(.*?) (?:\\((.*?)\\) eliminated \\*(.*?) \\((.*?)\\)\\.\r?\n|' 'eliminated ((?:himself)|(?:herself)) with a paintgren\\.\r?\n)'), ServerEvent.ELIM), # [18:54:24] *|ACEBot_1| (Spyder SE) eliminated *|herself| (Spyder SE). # [12:25:44] *whoa eliminated herself with a paintgren. # [12:26:09] *whoa eliminated himself with a paintgren. (re.compile('^\\[\d\d:\d\d:\d\d\\] \\*(.*?)\\\'s (.*?) revived!\r?\n'), ServerEvent.RESPAWN), # [19:03:57] *Red's ACEBot_6 revived! (re.compile('^\\[\d\d:\d\d:\d\d\\] (.*?) entered the game \\((.*?)\\) \\[(.*?)\\]\r?\n'), ServerEvent.ENTRANCE), # [19:03:57] mRokita entered the game (build 41) (re.compile('^\\[\d\d:\d\d:\d\d\\] \\*(.*?)\\\'s (.*?) returned the(?: \\*(.*?))? flag!\r?\n'), ServerEvent.FLAG_CAPTURED), # [18:54:24] *Red's hTml returned the *Blue flag! (re.compile('^\\[\d\d:\d\d:\d\d\\] \\*(.*?)\\\'s (.*?) earned (\d+) points for possesion of eliminated teams flag!\r?\n'), ServerEvent.ELIM_TEAMS_FLAG), # [19:30:23] *Blue's mRokita earned 3 points for possesion of eliminated teams flag! (re.compile('^\\[\d\d:\d\d:\d\d\\] Round started\\.\\.\\.\r?\n'), ServerEvent.ROUND_STARTED), # [10:20:11] Round started... (re.compile( '(?:^\\[\d\d:\d\d:\d\d\\] (.*?) switched from \\*((?:Red)|(?:Purple)|(?:Blue)|(?:Yellow))' ' to \\*((?:Red)|(?:Purple)|(?:Blue)|(?:Yellow))\\.\r?\n)|' '(?:^\\[\d\d:\d\d:\d\d\\] (.*?) joined the \\*((?:Red)|(?:Purple)|(?:Blue)|(?:Yellow)) team\\.\r?\n)|' '(?:^\\[\d\d:\d\d:\d\d\\] (.*?) is now (observing)?\\.\r?\n)'), ServerEvent.TEAM_SWITCHED), # [10:20:11] mRokita switched from Blue to Red. # [10:20:11] mRokita is now observing. # [10:20:11] mRokita is now observing. (re.compile('^\\[\d\d:\d\d:\d\d\\] [\t|-]{2}GameEnd[\t-](.*?)\r?\n'), ServerEvent.GAME_END), # [22:40:33] GameEnd 441.9 No winner # [22:40:33] GameEnd 1032.6 Red:23,Blue:22 # [22:40:33] GameEnd 4.9 DPBot01 wins! # [22:40:33] GameEnd 42.9 Yellow:5,Blue:0,Purple:0,Red:0 # [22:40:33] GameEnd 42.9 Yellow:5,Blue:12,Purple:7 (re.compile('^\\[\d\d:\d\d:\d\d\\] == Map Loaded: (.+) ==\r?\n'), ServerEvent.MAPCHANGE), # [10:20:11] == Map Loaded: airtime == (re.compile('^\\[\d\d:\d\d:\d\d\\] (.*?) changed name to (.*?)\\.\r?\n'), ServerEvent.NAMECHANGE), # [19:54:54] name1 changed name to name2. (re.compile('^\\[\d\d:\d\d:\d\d\\] (.*?) disconnected\\.\r?\n'), ServerEvent.DISCONNECT), # [19:03:57] whoa disconnected. (re.compile('^\\[\d\d:\d\d:\d\d\\] \\*(.*?) got the(?: \\*(.*?))? flag\\!\r?\n'), ServerEvent.FLAG_GRAB), # [19:03:57] *whoa got the *Red flag! (re.compile('^\\[\d\d:\d\d:\d\d\\] \\*(.*?) dropped the flag\\!\r?\n'), ServerEvent.FLAG_DROP), # [19:03:57] *whoa dropped the flag! (re.compile('^\\[\d\d:\d\d:\d\d\\] (.*?) team wins the round\\!\r?\n'), ServerEvent.ROUND_END), # [14:38:50] Blue team wins the round! (re.compile('^\\[\d\d:\d\d:\d\d\\] === ((?:Deathmatch)|(?:Team Flag CTF)|(?:Single Flag CTF)|(?:Team Siege)|(?:Team Elim)|(?:Team Siege)|(?:Team Deathmatch)|(?:Team KOTH)|(?:Pong)) ===\r?\n'), ServerEvent.GAMEMODE), # [09:58:11] === Team Flag CTF === # [13:16:19] === Team Siege === # [21:53:54] === Pong === # [12:21:05] === Deathmatch === ]) class Player(object): """ Player info from sv players command :Attributes: * dplogin - dplogin.com account id, None when Player has no account * nick - nickname: * build - game build * server - an instance of :class:`Server` """ def __init__(self, server, id, dplogin, nick, build): self.server = server self.id = id self.dplogin = dplogin self.nick = nick self.build = build class Server(object): """ Represents a DP:PB2 server :param hostname: Server hostname, for example '127.0.0.1' :type hostname: str :param port: Server port, default 27910 :type port: int :param logfile: Path to logfile :param rcon_password: rcon password :param pty_master: Master of the dp2 process (useful only if you want to run the server from your Python script). Go to the getting started section for details. :type pty_master: int :param init_vars: Send come commands used for security """ def __init__(self, hostname, port=27910, logfile=None, rcon_password=None, pty_master=None, init_vars=True): self.__rcon_password = rcon_password self.__hostname = hostname self.__init_vars = init_vars self.__port = port self.__log_file = None self.__is_secure = False self.__alive = False self.__logfile_name = logfile if not pty_master else None self.__pty_master = pty_master self.handlers = { ServerEvent.CHAT: 'on_chat', ServerEvent.ELIM: 'on_elim', ServerEvent.RESPAWN: 'on_respawn', ServerEvent.ENTRANCE: 'on_entrance', ServerEvent.FLAG_CAPTURED: 'on_flag_captured', ServerEvent.ELIM_TEAMS_FLAG: 'on_elim_teams_flag', ServerEvent.ROUND_STARTED: 'on_round_started', ServerEvent.TEAM_SWITCHED: 'on_team_switched', ServerEvent.GAME_END: 'on_game_end', ServerEvent.MAPCHANGE: 'on_mapchange', ServerEvent.NAMECHANGE: 'on_namechange', ServerEvent.DISCONNECT: 'on_disconnect', ServerEvent.FLAG_GRAB: 'on_flag_grab', ServerEvent.FLAG_DROP: 'on_flag_drop', ServerEvent.ROUND_END: 'on_round_end', ServerEvent.GAMEMODE: 'gamemode', } self.__listeners = { ServerEvent.CHAT: [], ServerEvent.ELIM: [], ServerEvent.RESPAWN: [], ServerEvent.ENTRANCE: [], ServerEvent.FLAG_CAPTURED: [], ServerEvent.ELIM_TEAMS_FLAG: [], ServerEvent.ROUND_STARTED: [], ServerEvent.TEAM_SWITCHED: [], ServerEvent.GAME_END: [], ServerEvent.MAPCHANGE: [], ServerEvent.NAMECHANGE: [], ServerEvent.DISCONNECT: [], ServerEvent.FLAG_GRAB: [], ServerEvent.FLAG_DROP: [], ServerEvent.ROUND_END: [], ServerEvent.GAMEMODE: [], } self.loop = asyncio.get_event_loop() def is_listening(self): """ Check if the main loop is running. :rtype: bool """ return self.__alive @asyncio.coroutine def on_chat(self, nick, message): """ On chat, can be overridden using the :func:`.Server.event` decorator. :param nick: Player's nick. :type nick: str :param message: Message. :type message: str """ pass @asyncio.coroutine def on_flag_captured(self, team, nick, flag): """ On flag captured, can be overridden using the :func:`.Server.event` decorator. :param team: Player's team. :type team: str :param nick: Player's nick. :type nick: str :param flag: Captured flag (Blue|Red|Yellow|Purple|White) :type flag: str """ pass @asyncio.coroutine def on_team_switched(self, nick, old_team, new_team): """ On team switched, can be overridden using the :func:`.Server.event` decorator. :param nick: Player's nick :type nick: str :param old_team: Old team (Blue|Red|Yellow|Purple|Observer) :type old_team: str :param new_team: New team (Blue|Red|Yellow|Purple|Observer) :type new_team: str """ pass @asyncio.coroutine def on_round_started(self): """ On round started, can be overridden using the :func:`.Server.event` decorator. """ pass @asyncio.coroutine def on_elim_teams_flag(self, team, nick, points): """ On scored points for possession of eliminated teams flag, can be overridden using the :func:`.Server.event` decorator. :param team: Player's team. :type team: str :param nick: Player's nick. :type nick: str :param points: Points earned. :type points: int """ pass @asyncio.coroutine def on_entrance(self, nick, build, addr): """ On entrance, can be overriden using the :func:`.Server.event` decorator. :param nick: Player's nick :type nick: str :param build: Player's game version ('build 41' for example :type build: str :param addr: Player's address, IP:PORT ('127.0.0.1:23414' for example) :type addr: str """ pass @asyncio.coroutine def on_game_end(self, score_blue, score_red, score_yellow, score_purple): """ On game end, can be overriden using the :func:`.Server.event` decorator. :param score_blue: Blue's score - None if there was no Blue team. :param score_red: Red's score - None if there was no Red team. :param score_yellow: Yellow's score - None if there was no Yellow team. :param score_purple: Purple's score - None if there was no Purple team. """ pass @asyncio.coroutine def on_elim(self, killer_nick, killer_weapon, victim_nick, victim_weapon, suicide): """ On elim can be overridden using the :func:`.Server.event` decorator. :param killer_nick: Killer's nick :type killer_nick: str :param killer_weapon: Killer's weapon :type killer_weapon: str :param victim_nick: Victim's nick :type victim_nick: str :param victim_weapon: Victim's weapon :type victim_weapon: str """ pass @asyncio.coroutine def on_respawn(self, team, nick): """ On respawn, can be overridden using the :func:`.Server.event` decorator. :param team: Player's team (Blue|Red|Yellow|Purple) :type team: str :param nick: Player's nick :type nick: str """ pass @asyncio.coroutine def on_mapchange(self, mapname): """ On mapcange, can be overridden using the :func:`.Server.event` decorator. :param mapname: Mapname :type mapname: str """ pass @asyncio.coroutine def on_namechange(self, old_nick, new_nick): """ On name change, can be overridden using the :func:`.Server.event` decorator. :param old_nick: Old nick :type old_nick: str :param new_nick: Old nick :type new_nick: str """ pass @asyncio.coroutine def on_disconnect(self, nick): """ On disconnect, can be overridden using the :func:`.Server.event`decorator. :param nick: Disconnected player's nick :type nick: str """ pass @asyncio.coroutine def on_flag_grab(self, nick, flag): """ On flag grab, can be overridden using the :func:`.Server.event` decorator. :param nick: Player's nick :type nick: str :param team: Flag color (Blue|Red|Yellow|Purple) :type team: str """ pass @asyncio.coroutine def on_flag_drop(self, nick): """ On flag grab, can be overridden using the :func:`.Server.event` decorator. :param nick: Player's nick :type nick: str :param team: Flag color (Blue|Red|Yellow|Purple) :type team: str """ pass @asyncio.coroutine def on_round_end(self): """ Onround end, can be overridden using the :func:`.Server.event` decorator. """ pass @asyncio.coroutine def gamemode(self, gamemode): """ Onround end, can be overridden using the :func:`.Server.event` decorator. :param gamemode: map's gamemode :type gamemode: str """ pass def event(self, func): """ Decorator, used for event registration. :param func: function to register :rtype: builtin_function_or_method :example: .. code-block:: python :linenos: >>> from dplib.server import Server >>> s = Server(hostname='127.0.0.1', port=27910, logfile=r'qconsole27910.log', rcon_password='hello') >>> @s.event ... def on_chat(nick, message): ... print((nick, message)) ... >>> s.run() ('mRokita', 'Hi') """ if func.__name__ in self.handlers.values(): setattr(self, func.__name__, asyncio.coroutine(func)) return func else: raise Exception('Event \'%s\' doesn\'t exist' % func.__name__) def stop_listening(self): """ Stop the main loop """ self.__alive = False def __perform_listeners(self, event_type, args, kwargs): """ Performs all pending listeners. :param event_type: Event type, one of members :class:`ServerEvent` :param args: Event info :type args: tuple :param kwargs: Event info :type kwargs: dict """ to_remove = list() for i, (check, future) in enumerate(self.__listeners[event_type]): if not future.cancelled() and not future.done(): if check(*args): future.set_result(kwargs) else: to_remove.append(i) for i in reversed(to_remove): self.__listeners[event_type].pop(i) def nicks_valid(self, *nicks): nicks_ingame = [p.nick for p in self.get_players()] for nick in nicks: if nick not in nicks_ingame: return False return True @asyncio.coroutine def __handle_event(self, event_type, args): """ Handles an event. :param event_type: Event type, one of members :class:`ServerEvent` :param args: Event info (re.findall() results) """ kwargs = dict() if event_type == ServerEvent.CHAT: if args[0] not in [p.nick for p in self.get_players()]: return kwargs = { 'nick': args[0], 'message': args[1], } self.__perform_listeners(ServerEvent.CHAT, args, kwargs) elif event_type == ServerEvent.ELIM: kwargs = { 'killer_nick': args[0], 'killer_weapon': args[1], 'victim_nick': args[2], 'victim_weapon': args[3], 'suicide': args[4], } self.__perform_listeners(ServerEvent.ELIM, args, kwargs) elif event_type == ServerEvent.RESPAWN: kwargs = { 'team': args[0], 'nick': args[1], } self.__perform_listeners(ServerEvent.RESPAWN, args, kwargs) elif event_type == ServerEvent.ENTRANCE: kwargs = { 'nick': args[0], 'build': args[1], 'addr': args[2], } self.__perform_listeners(ServerEvent.ENTRANCE, args, kwargs) elif event_type == ServerEvent.FLAG_CAPTURED: kwargs = { 'team': args[0], 'nick': args[1], 'flag': args[2], } self.__perform_listeners(ServerEvent.FLAG_CAPTURED, args, kwargs) elif event_type == ServerEvent.ELIM_TEAMS_FLAG: kwargs = { 'team': args[0], 'nick': args[1], 'points': int(args[2]), } self.__perform_listeners(ServerEvent.ELIM_TEAMS_FLAG, args, kwargs) elif event_type == ServerEvent.ROUND_STARTED: kwargs = dict() self.__perform_listeners(ServerEvent.ROUND_STARTED, args, kwargs) elif event_type == ServerEvent.TEAM_SWITCHED: new_args = tuple([arg for arg in args if arg]) kwargs = { 'nick': new_args[0], 'old_team': new_args[1] if len(new_args) > 2 else 'Observer', 'new_team': new_args[2] if len(new_args) > 2 else new_args[1] } if kwargs['new_team'] == 'observing': kwargs['new_team'] = 'Observer' kwargs['old_team'] = None self.__perform_listeners(ServerEvent.TEAM_SWITCHED, new_args, kwargs) elif event_type == ServerEvent.GAME_END: kwargs = { 'score_blue': None, 'score_red': None, 'score_purple': None, 'score_yellow': None, } teams = args.split(',') for t in teams: data = t.split(':') if data[0] == 'Blue': kwargs['score_blue'] = data[1] elif data[0] == 'Red': kwargs['score_red'] = data[1] elif data[0] == 'Yellow': kwargs['score_yellow'] = data[1] elif data[0] == 'Purple': kwargs['score_purple'] = data[1] self.__perform_listeners(ServerEvent.GAME_END, (kwargs['score_blue'], kwargs['score_red'], kwargs['score_yellow'], kwargs['score_purple']), kwargs) elif event_type == ServerEvent.MAPCHANGE: kwargs = { 'mapname': args } self.__perform_listeners(ServerEvent.MAPCHANGE, (kwargs['mapname'],), kwargs) elif event_type == ServerEvent.NAMECHANGE: kwargs = { 'old_nick': args[0], 'new_nick': args[1] } self.__perform_listeners(ServerEvent.NAMECHANGE, (kwargs['old_nick'], kwargs['new_nick']), kwargs) elif event_type == ServerEvent.DISCONNECT: kwargs = { 'nick': args } self.__perform_listeners(ServerEvent.DISCONNECT, (kwargs['nick'],), kwargs) elif event_type == ServerEvent.FLAG_GRAB: kwargs = { 'nick': args[0], 'flag': args[1], } self.__perform_listeners(ServerEvent.FLAG_GRAB, (kwargs['nick'], kwargs['flag']), kwargs) elif event_type == ServerEvent.FLAG_DROP: kwargs = { 'nick': args } self.__perform_listeners(ServerEvent.FLAG_GRAB, (kwargs['nick'],), kwargs) elif event_type == ServerEvent.ROUND_END: kwargs = dict() self.__perform_listeners(ServerEvent.ROUND_END, args, kwargs) elif event_type == ServerEvent.GAMEMODE: kwargs = { 'gamemode': args } self.__perform_listeners(ServerEvent.GAMEMODE, args, kwargs) asyncio.ensure_future(self.get_event_handler(event_type)(**kwargs)) def get_event_handler(self, event_type): return getattr(self, self.handlers[event_type]) @asyncio.coroutine def __parse_line(self, line): """ Tries to match line with all event regexps. :param line: Line from logs """ for r in REGEXPS: results = r.findall(line) e = REGEXPS[r] for res in results: if e == ServerEvent.CHAT: # For security reasons if self.nicks_valid(res[0]): yield from self.__handle_event(event_type=e, args=res) return else: continue yield from self.__handle_event(event_type=e, args=res) def rcon(self, command, socket_timeout=3): """ Execute a console command using RCON. :param command: Command :param socket_timeout: Timeout for the UDP socket. :return: Response from server :rtype: str :example: .. code-block:: python :linenos: >>> from dplib.server import Server >>> s = Server(hostname='127.0.0.1', port=27910, logfile=r'qconsole27910.log', rcon_password='hello') >>> s.rcon('sv listuserip') 'ÿÿÿÿprint\\n mRokita [127.0.0.1:9419]\\nadmin is listing IP for mRokita [127.0.0.1:9419]\\n' """ sock = socket(AF_INET, SOCK_DGRAM) sock.connect((self.__hostname, self.__port)) sock.settimeout(socket_timeout) sock.send(bytes('\xFF\xFF\xFF\xFFrcon {} {}\n'.format(self.__rcon_password, command).encode('latin-1'))) ret = sock.recv(2048).decode('latin-1') return ret def status(self): """ Execute status query. :return: Status string :rtype: str """ sock = socket(AF_INET, SOCK_DGRAM) sock.connect((self.__hostname, self.__port)) sock.settimeout(3) sock.send(b'\xFF\xFF\xFF\xFFstatus\n') return sock.recv(2048).decode('latin-1') def new_map(self, map_name, gamemode=None): """ Changes the map using sv newmap <mapname> <gamemode> :param map_name: map name, without .bsp :param gamemode: Game mode :type gamemode: GameMode :return: Rcon response :raises MapNotFoundError: When map is not found on the server :rtype: str """ command = 'sv newmap {map}' if gamemode: command += ' {gamemode}' res = self.rcon(command.format(map=map_name, gamemode=gamemode)) if 'Cannot find mapfile' in res or 'usage' in res: raise MapNotFoundError return res def permaban(self, ip=None): """ Bans IP address or range of adresses and saves ban list to disk. :param ip: IP address to ban :return: Rcon response :rtype: str """ if ip: resp = self.rcon('addip %s' % ip) resp += '\n' + self.rcon('writeban') return resp else: raise TypeError('IP address is required.') def remove_permaban(self, ip=None): """ Removes ban on IP address and saves ban list to disk. :param ip: IP address to unban :return: Rcon response :rtype: str """ if ip: resp = self.rcon('removeip %s' % ip) resp += '\n' + self.rcon('writeban') return resp else: raise TypeError('IP address is required.') def tempoban(self, id=None, nick=None, duration=3): """ Temporarily bans a player with specified id using rcon :param id: Player's id :param nick: Player's nick :param duration: Ban duration in minutes (defaults to 3) :return: Rcon response :rtype: str """ if type(duration) != int: raise TypeError('Ban duration should be an integer, not a ' + str(type(duration))) if nick: id = self.get_ingame_info(nick).id if id: return self.rcon('tban %s %s' % (id, str(duration))) else: raise TypeError('Player id or nick is required.') def remove_tempobans(self): """ Removes all temporary bans :return: Rcon response :rtype: str """ return self.rcon("removetbans") def kick(self, id=None, nick=None): """ Kicks a player with id using rcon. :param id: Player's id :param nick: Player's nick :return: Rcon response :rtype: str """ if nick: id = self.get_ingame_info(nick).id if id: return self.rcon('kick %s' % id) else: raise TypeError('Player id or nick is required.') def say(self, message): """ Say a message :param message: Text, can contain {C} - color char {U} - underline char {I} italic. Remember to escape user input using :func:`dplib.parse.escape_braces`. :rtype: str :return: Rcon response :example: .. code-block:: python :linenos: >>> from dplib.server import Server >>> s = Server(hostname='127.0.0.1', port=27910, logfile=r'qconsole27910.log', rcon_password='hello') >>> s.say('{C}ARed text') >>> s.say('{U}Underline{U}') >>> s.say('{I}Italic{I}') :ingame result: .. image:: ..\..\doc\images\say_test.png """ return self.rcon('say "%s"' % render_text(message)) def cprint(self, message): """ Cprints a message. :param message: Text, can contain {C} - color char {U} - underline char {I} italic. Remember to escape user input using :func:`dplib.parse.escape_brac :return: Rcon response :rtype: str """ return self.rcon('sv cprint "%s"' % render_text(message)) def set_cvar(self, var, value): """ Set a server cvar :param var: cvar name :param value: value to set :return: Rcon response :rtype: str """ return self.rcon('set %s "%s"' % (var, value)) def get_cvar(self, var): """ Gets cvar value :param var: Variable name :type var: str :return: Cvar value :rtype: str """ res = self.rcon('"%s"' % var) if re.match('^....print\\\nUnknown command \\"%s"\\.\\\n' % re.escape(var), res): raise NameError('Cvar "%s" does not exist' % var) return re.findall('^....print\\\n\\"%s\\" is \\"(.*?)\\"\\\n' % re.escape(var), res)[0] @staticmethod def __get_predicate(margs, check): """ Returns a comparator. :param margs: Args to check :param check: Check function :return: Returns a function that compiles the check function and comparision strings """ def predicate(*args): if len(args) != len(margs): raise TypeError('predicate() takes %d positional arguments but %d were given' % (len(margs), len(args))) result = True for i, a in enumerate(margs): if a: result = result and a == args[i] if callable(check): result = result and check(*args) return result return predicate @asyncio.coroutine def wait_for_entrance(self, timeout=None, nick=None, build=None, addr=None, check=None): """ Waits for entrance. :param timeout: Time to wait for entrance event, if exceeded, returns None. :param nick: Player's nick. :param build: Player's build. :param addr: Player's address (IP:PORT) :return: """ future = asyncio.Future(loop=self.loop) margs = (nick, build, addr) predicate = self.__get_predicate(margs, check) self.__listeners[ServerEvent.ENTRANCE].append((predicate, future)) try: data = yield from asyncio.wait_for(future, timeout, loop=self.loop) except asyncio.TimeoutError: data = None return data @asyncio.coroutine def wait_for_respawn(self, timeout=None, team=None, nick=None, check=None): """ Waits for respawn event. :param timeout: Time to wait for respawn event, if exceeded, returns None. :param team: Player's team. :param nick: Player's nick. :param check: Check function, ignored if none. :return: Returns message info dict keys: ('team', 'nick'). :rtype: dict """ future = asyncio.Future(loop=self.loop) margs = (team, nick) predicate = self.__get_predicate(margs, check) self.__listeners[ServerEvent.RESPAWN].append((predicate, future)) try: data = yield from asyncio.wait_for(future, timeout, loop=self.loop) except asyncio.TimeoutError: data = None return data @asyncio.coroutine def wait_for_elim_teams_flag(self, timeout=None, team=None, nick=None, points=None, check=None): """ Waits for elim teams flag event. :param timeout: Time to wait for event, if exceeded, returns None. :param team: Player's team. :param nick: Player's nick. :param points: Points scored. :type points: int :param check: Check function, ignored if none. :return: Returns message info dict keys: ('team', 'nick', 'points'). :rtype: dict """ future = asyncio.Future(loop=self.loop) margs = (team, nick, points) predicate = self.__get_predicate(margs, check) self.__listeners[ServerEvent.ELIM_TEAMS_FLAG].append((predicate, future)) try: data = yield from asyncio.wait_for(future, timeout, loop=self.loop) except asyncio.TimeoutError: data = None return data @asyncio.coroutine def wait_for_team_switched(self, timeout=None, nick=None, old_team=None, new_team=None, check=None): """ Waits for team switch event. :param timeout: Time to wait for event, if exceeded, returns None. :param old_team: Player's old team. :param new_team: Player's new team. :param nick: Player's nick. :param check: Check function, ignored if none. :return: Returns message info dict keys: ('nick', 'old_team', 'new_nick'). :rtype: dict """ future = asyncio.Future(loop=self.loop) margs = (nick, old_team, new_team) predicate = self.__get_predicate(margs, check) self.__listeners[ServerEvent.TEAM_SWITCHED].append((predicate, future)) try: data = yield from asyncio.wait_for(future, timeout, loop=self.loop) except asyncio.TimeoutError: data = None return data @asyncio.coroutine def wait_for_round_started(self, timeout=None, check=None): """ Waits for round start. :param timeout: Time to wait for event, if exceeded, returns None. :param check: Check function, ignored if none. :return: Returns an empty dict. :rtype: dict """ future = asyncio.Future(loop=self.loop) margs = tuple() predicate = self.__get_predicate(margs, check) self.__listeners[ServerEvent.ROUND_STARTED].append((predicate, future)) try: data = yield from asyncio.wait_for(future, timeout, loop=self.loop) except asyncio.TimeoutError: data = None return data @asyncio.coroutine def wait_for_flag_captured(self, timeout=None, team=None, nick=None, flag=None, check=None): """ Waits for flag capture. :param timeout: Time to wait for event, if exceeded, returns None. :param team: Player's team. :param nick: Player's nick. :param flag: Captured flag. :param check: Check function, ignored if none. :return: Returns an empty dict. :rtype: dict """ future = asyncio.Future(loop=self.loop) margs = (team, nick, flag) predicate = self.__get_predicate(margs, check) self.__listeners[ServerEvent.FLAG_CAPTURED].append((predicate, future)) try: data = yield from asyncio.wait_for(future, timeout, loop=self.loop) except asyncio.TimeoutError: data = None return data @asyncio.coroutine def wait_for_game_end(self, timeout=None, score_blue=None, score_red=None, score_yellow=None, score_purple=None, check=None): """ Waits for game end. :param timeout: Time to wait for event, if exceeded, returns None. :param score_blue: Blue score :param score_red: Red score. :param score_yellow: Yellow score. :param score_purple: Purple score. :param check: Check function, ignored if none. :return: Returns an empty dict. :rtype: dict """ future = asyncio.Future(loop=self.loop) margs = (score_blue, score_red, score_yellow, score_purple) predicate = self.__get_predicate(margs, check) self.__listeners[ServerEvent.GAME_END].append((predicate, future)) try: data = yield from asyncio.wait_for(future, timeout, loop=self.loop) except asyncio.TimeoutError: data = None return data @asyncio.coroutine def wait_for_elim(self, timeout=None, killer_nick=None, killer_weapon=None, victim_nick=None, victim_weapon=None, check=None): """ Waits for elimination event. :param timeout: Time to wait for elimination event, if exceeded, returns None. :param killer_nick: Killer's nick to match, ignored if None. :param killer_weapon: Killer's weapon to match, ignored if None. :param victim_nick: Victim's nick to match, ignored if None. :param victim_weapon: Victim's weapon to match, ignored if None. :param check: Check function, ignored if None. :return: Returns message info dict keys: ('killer_nick', 'killer_weapon', 'victim_nick', 'victim_weapon') :rtype: dict """ future = asyncio.Future(loop=self.loop) margs = (killer_nick, killer_weapon, victim_nick, victim_weapon) predicate = self.__get_predicate(margs, check) self.__listeners[ServerEvent.ELIM].append((predicate, future)) try: elim_info = yield from asyncio.wait_for(future, timeout, loop=self.loop) except asyncio.TimeoutError: elim_info = None return elim_info @asyncio.coroutine def wait_for_mapchange(self, timeout=None, mapname=None, check=None): """ Waits for mapchange. :param timeout: Time to wait for elimination event, if exceeded, returns None. :param mapname: Killer's nick to match, ignored if None. :param check: Check function, ignored if None. :return: Returns message info dict keys: ('killer_nick', 'killer_weapon', 'victim_nick', 'victim_weapon') :rtype: dict """ future = asyncio.Future(loop=self.loop) margs = (mapname,) predicate = self.__get_predicate(margs, check) self.__listeners[ServerEvent.MAPCHANGE].append((predicate, future)) try: mapchange_info = yield from asyncio.wait_for(future, timeout, loop=self.loop) except asyncio.TimeoutError: mapchange_info = None return mapchange_info @asyncio.coroutine def wait_for_namechange(self, timeout=None, old_nick=None, new_nick=None, check=None): """ Waits for mapchange. :param timeout: Time to wait for elimination event, if exceeded, returns None. :param mapname: Killer's nick to match, ignored if None. :param check: Check function, ignored if None. :return: Returns message info dict keys: ('killer_nick', 'killer_weapon', 'victim_nick', 'victim_weapon') :rtype: dict """ future = asyncio.Future(loop=self.loop) margs = (old_nick, new_nick) predicate = self.__get_predicate(margs, check) self.__listeners[ServerEvent.NAMECHANGE].append((predicate, future)) try: mapchange_info = yield from asyncio.wait_for(future, timeout, loop=self.loop) except asyncio.TimeoutError: mapchange_info = None return mapchange_info @asyncio.coroutine def wait_for_message(self, timeout=None, nick=None, message=None, check=None): """ Waits for a message. :param timeout: Time to wait for message, if exceeded, returns None. :param nick: Player's nick to match, ignored if None :type nick: str :param message: Message text to match, ignored if None :type message: str :param check: Check function, ignored if None :return: Returns message info dict keys: ('nick', 'message') :rtype: dict :example: .. code-block:: python :linenos: @s.event def on_chat(nick, message): if message == '!start' and not elim_active: msg = yield from s.wait_for_message(check=lambda n, m: m.startswith('!hi ')) s.say('Hi ' + msg['message'].split('!hi ')[1] + '!') """ future = asyncio.Future(loop=self.loop) margs = (nick, message) predicate = self.__get_predicate(margs, check) self.__listeners[ServerEvent.CHAT].append((predicate, future)) try: message = yield from asyncio.wait_for(future, timeout, loop=self.loop) except asyncio.TimeoutError: message = None return message @asyncio.coroutine def wait_for_flag_drop(self, timeout=None, nick=None, check=None): """ Waits for flag drop. :param timeout: Time to wait for event, if exceeded, returns None. :param nick: Player's nick. :param flag: dropped flag. :param check: Check function, ignored if none. :return: Returns an empty dict. :rtype: dict """ future = asyncio.Future(loop=self.loop) margs = (nick) predicate = self.__get_predicate(margs, check) self.__listeners[ServerEvent.FLAG_DROP].append((predicate, future)) try: data = yield from asyncio.wait_for(future, timeout, loop=self.loop) except asyncio.TimeoutError: data = None return data def start(self, scan_old=False, realtime=True, debug=False): """ Main loop. :param scan_old: Scan present logfile data :type scan_old: bool :param realtime: Wait for incoming logfile data :type realtime: bool """ if not (self.__logfile_name or self.__pty_master): raise AttributeError("Logfile name or a Popen process is required.") self.__alive = True if self.__logfile_name: self.__log_file = open(self.__logfile_name, 'rb') if self.__log_file and scan_old: self.__log_file.readlines() buf = '' if realtime: while self.__alive: try: buf += self._read_log() lines = buf.splitlines(True) line = '' for line in lines: if debug: print("[DPLib] %s" % line.strip()) yield from self.__parse_line(line) if not line or line[-1] != '\n': buf = line else: buf = '' yield from asyncio.sleep(0.05) except OSError as e: raise e if self.__log_file: self.__log_file.close() if self.__pty_master: os.close(self.__pty_master) def _read_log(self): if self.__log_file: return self.__log_file.readline().decode('latin-1') elif self.__pty_master: r, w, x = select.select([self.__pty_master], [], [], 0.01) if r: return os.read(self.__pty_master, 1024).decode('latin-1') else: return '' def get_players(self): """ Gets playerlist. :return: List of :class:`.Player` instances :rtype: list """ response = self.rcon('sv players') response = re.findall('(\d+) \\(?(.*?)\\)?\\] \\* (?:OP \d+, )?(.+) \\((b\d+)\\)', response) players = list() for p_data in response: player = Player(nick=p_data[2], id=p_data[0], dplogin=p_data[1], build=p_data[3], server=self) players.append(player) return players def get_simple_playerlist(self): """ Get a list of player names :return: List of nicks :rtype: list """ status = self.get_status() players = status['players'] playerlist = [] for p in players: playerlist.append(p['name']) return playerlist def get_status(self): """ Gets server status :example: .. code-block:: python :linenos: >>> s = Server(hostname='127.0.0.1', port=27910, logfile=r'C:\Games\Paintball2\pball\qconsole27910.log', rcon_password='hello') >>> s.get_status() {'players': [{'score': '0', 'ping': '13', 'name': 'mRokita'}], 'sv_certificated': '1', 'mapname': 'beta/wobluda_fix', 'TimeLeft': '20:00', '_scores': 'Red:0 Blue:0 ', 'gamename': 'Digital Paint Paintball 2 v1.930(186)', 'gameversion': 'DPPB2 v1.930(186)', 'sv_login': '1', 'needpass': '0', 'gamedate': 'Aug 10 2015', 'protocol': '34', 'version': '2.00 x86 Aug 10 2015 Win32 RELEASE (41)', 'hostname': 'asdfgh', 'elim': 'airtime', 'fraglimit': '50', 'timelimit': '20', 'gamedir': 'pball', 'game': 'pball', 'maxclients': '8'} :return: status dict :rtype: dict """ dictionary = {} players = [] response = self.status().split('\n')[1:] variables = response[0] players_str = (response[1:]) for i in players_str: if not i: continue temp_dict = {} cleaned_name = decode_ingame_text(i) separated = cleaned_name.split(' ') temp_dict['score'] = separated[0] temp_dict['ping'] = separated[1] temp_dict['name'] = cleaned_name.split("%s %s " % (separated[0], separated[1]))[1][1:-1] players.append(temp_dict) dictionary['players'] = players variables = variables.split('\\')[1:] for i in range(0, len(variables), 2): dictionary[variables[i]] = variables[i + 1] return dictionary def get_ingame_info(self, nick): """ Get ingame info about a player with nickname :param nick: Nick :return: An instance of :class:`.Player` """ players = self.get_players() for p in players: if p.nick == nick: return p return None def make_secure(self, timeout=10): """ This function fixes some compatibility and security issues on DP server side - Adds "mapchange" to sv_blockednames - Sets sl_logging to 1 All variables are set using the rcon protocol, use this function if you want to wait for the server to start. :param timeout: Timeout in seconds """ sl_logging_set = False sv_blockednames_set = False self.__is_secure = False start_time = time() while not (sl_logging_set and sv_blockednames_set) and time() - start_time < timeout: try: if not sl_logging_set: sl_logging = self.get_cvar('sl_logging') if sl_logging != '1': self.set_cvar('sl_logging', '1') else: sl_logging_set = True if not sv_blockednames_set: blockednames = self.get_cvar('sv_blockednames') if not 'maploaded' in blockednames: self.set_cvar('sv_blockednames', ','.join([blockednames, 'maploaded'])) else: sv_blockednames_set = True except ConnectionError or timeout: pass if not (sl_logging_set and sv_blockednames_set): raise SecurityCheckError( "Configuring the DP server failed," " check if the server is running " "and the rcon_password is correct.") else: self.__is_secure = True def run(self, scan_old=False, realtime=True, debug=False, make_secure=True): """ Runs the main loop using asyncio. :param scan_old: Scan present logfile data :type scan_old: bool :param realtime: Wait for incoming logfile data :type realtime: bool """ if make_secure and not self.__rcon_password: raise AttributeError( "Setting the rcon_password is required to secure DPLib." " You have to either set a rcon_password or add set" " \"sl_logging 1; set sv_blockednames mapname\" " "to your DP server config and use Server.run with" " make_secure=False") if make_secure: self.make_secure() self.loop.run_until_complete(self.start(scan_old, realtime, debug))
agpl-3.0
-7,791,786,125,953,492,000
33.949413
535
0.543832
false
3.845986
false
false
false
mgeorgehansen/FIFE_Technomage
engine/python/fife/extensions/fife_settings.py
1
15915
# -*- coding: utf-8 -*- # #################################################################### # Copyright (C) 2005-2010 by the FIFE team # http://www.fifengine.net # This file is part of FIFE. # # FIFE is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the # Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # #################################################################### """ Settings ================================== This module provides a nice framework for loading and saving game settings. It is by no means complete but it does provide a good starting point. """ import shutil import os from StringIO import StringIO from fife.extensions import pychan from fife.extensions.fife_utils import getUserDataDirectory from fife.extensions.serializers.simplexml import SimpleXMLSerializer SETTINGS_GUI_XML="""\ <Window name="Settings" title="Settings"> <Label text="Settings menu!" /> <HBox> <VBox> <Label text="Resolution:" /> <Label text="Renderer:" /> <Label text="Light Model:" /> </VBox> <VBox min_size="120,60"> <DropDown name="screen_resolution" min_size="120,0" /> <DropDown name="render_backend" min_size="120,0" /> <DropDown name="lighting_model" min_size="120,0" /> </VBox> </HBox> <CheckBox name="enable_fullscreen" text="Use the full screen mode" /> <CheckBox name="enable_sound" text="Enable sound" /> <HBox> <Spacer /> <Button name="cancelButton" text="Cancel" /> <Button name="okButton" text="Ok" /> <Button name="defaultButton" text="Defaults" /> </HBox> </Window> """ CHANGES_REQUIRE_RESTART="""\ <Window title="Changes require restart"> <Label text="Some of your changes require you to restart." /> <HBox> <Spacer /> <Button name="closeButton" text="Ok" /> </HBox> </Window> """ FIFE_MODULE = "FIFE" class Setting(object): """ This class manages loading and saving of game settings. Usage:: from fife.extensions.fife_settings import Setting settings = Setting(app_name="myapp") screen_width = settings.get("FIFE", "ScreenWidth", 1024) screen_height = settings.get("FIFE", "ScreenHeight", 768) """ def __init__(self, app_name="", settings_file="", default_settings_file= "settings-dist.xml", settings_gui_xml="", changes_gui_xml="", copy_dist=True, serializer=None): """ Initializes the Setting object. @param app_name: The applications name. If this parameter is provided alone it will try to read the settings file from the users home directory. In windows this will be something like: C:\Documents and Settings\user\Application Data\fife @type app_name: C{string} @param settings_file: The name of the settings file. If this parameter is provided it will look for the setting file as you specify it, first looking in the working directory. It will NOT look in the users home directory. @type settings_file: C{string} @param default_settings_file: The name of the default settings file. If the settings_file does not exist this file will be copied into the place of the settings_file. This file must exist in the root directory of your project! @type default_settings_file: C{string} @param settings_gui_xml: If you specify this parameter you can customize the look of the settings dialog box. @param copy_dist: Copies the default settings file to the settings_file location. If this is False it will create a new empty setting file. @param serializer: Overrides the default XML serializer @type serializer: C{SimpleSerializer} """ self._app_name = app_name self._settings_file = settings_file self._default_settings_file = default_settings_file self._settings_gui_xml = settings_gui_xml self._changes_gui_xml = changes_gui_xml self.OptionsDlg = None # Holds SettingEntries self._entries = {} if self._settings_file == "": self._settings_file = "settings.xml" self._appdata = getUserDataDirectory("fife", self._app_name) else: self._appdata = os.path.dirname(self._settings_file) self._settings_file = os.path.basename(self._settings_file) if self._settings_gui_xml == "": self._settings_gui_xml = SETTINGS_GUI_XML if self._changes_gui_xml == "": self._changes_gui_xml = CHANGES_REQUIRE_RESTART if not os.path.exists(os.path.join(self._appdata, self._settings_file)): if os.path.exists(self._default_settings_file) and copy_dist: shutil.copyfile(self._default_settings_file, os.path.join(self._appdata, self._settings_file)) #default settings self._resolutions = ['640x480', '800x600', '1024x768', '1280x800', '1440x900'] self._renderbackends = ['OpenGL', 'SDL'] self._lightingmodels = [0, 1, 2] #Used to stylize the options gui self._gui_style = "default" #Initialize the serializer if serializer: self._serializer = serializer else: self._serializer = SimpleXMLSerializer() self.initSerializer() self._initDefaultSettingEntries() def initSerializer(self): self._serializer.load(os.path.join(self._appdata, self._settings_file)) def _initDefaultSettingEntries(self): """Initializes the default fife setting entries. Not to be called from outside this class.""" self.createAndAddEntry(FIFE_MODULE, "PlaySounds", "enable_sound", requiresrestart=True) self.createAndAddEntry(FIFE_MODULE, "FullScreen", "enable_fullscreen", requiresrestart=True) self.createAndAddEntry(FIFE_MODULE, "ScreenResolution", "screen_resolution", initialdata = self._resolutions, requiresrestart=True) self.createAndAddEntry(FIFE_MODULE, "RenderBackend", "render_backend", initialdata = self._renderbackends, requiresrestart=True) self.createAndAddEntry(FIFE_MODULE, "Lighting", "lighting_model", initialdata = self._lightingmodels, requiresrestart=True) def createAndAddEntry(self, module, name, widgetname, applyfunction=None, initialdata=None, requiresrestart=False): """" @param module: The Setting module this Entry belongs to @type module: C{String} @param name: The Setting's name @type name: C{String} @param widgetname: The name of the widget that is used to change this setting @type widgetname: C{String} @param applyfunction: function that makes the changes when the Setting is saved @type applyfunction: C{function} @param initialdata: If the widget supports the setInitialData() function this can be used to set the initial data @type initialdata: C{String} or C{Boolean} @param requiresrestart: Whether or not the changing of this setting requires a restart @type requiresrestart: C{Boolean} """ entry = SettingEntry(module, name, widgetname, applyfunction, initialdata, requiresrestart) self.addEntry(entry) def addEntry(self, entry): """Adds a new C{SettingEntry} to the Settting @param entry: A new SettingEntry that is to be added @type entry: C{SettingEntry} """ if entry.module not in self._entries: self._entries[entry.module] = {} self._entries[entry.module][entry.name] = entry # Make sure the new entry is available if self.get(entry.module, entry.name) is None: print "Updating", self._settings_file, "to the default, it is missing the entry:"\ , entry.name ,"for module", entry.module self.setDefaults() if self.get(entry.module, entry.name) is None: print "WARNING:", entry.module, ":", entry.name, "still not found!" def saveSettings(self, filename=""): """ Writes the settings to the settings file @param filename: Specifies the file to save the settings to. If it is not specified the original settings file is used. @type filename: C{string} """ if self._serializer: if filename == "": self._serializer.save(os.path.join(self._appdata, self._settings_file)) else: self._serializer.save(filename) def get(self, module, name, defaultValue=None): """ Gets the value of a specified setting @param module: Name of the module to get the setting from @param name: Setting name @param defaultValue: Specifies the default value to return if the setting is not found @type defaultValue: C{str} or C{unicode} or C{int} or C{float} or C{bool} or C{list} or C{dict} """ if self._serializer: return self._serializer.get(module, name, defaultValue) else: return None def set(self, module, name, value, extra_attrs={}): """ Sets a setting to specified value. @param module: Module where the setting should be set @param name: Name of setting @param value: Value to assign to setting @type value: C{str} or C{unicode} or C{int} or C{float} or C{bool} or C{list} or C{dict} @param extra_attrs: Extra attributes to be stored in the XML-file @type extra_attrs: C{dict} """ if self._serializer: self._serializer.set(module, name, value, extra_attrs) def setGuiStyle(self, style): """ Set a custom gui style used for the option dialog. @param style: Pychan style to be used @type style: C{string} """ self._gui_style = style def onOptionsPress(self): """ Opens the options dialog box. Usually you would bind this to a button. """ self.changesRequireRestart = False self.isSetToDefault = False if not self.OptionsDlg: self.loadSettingsDialog() self.fillWidgets() self.OptionsDlg.show() def loadSettingsDialog(self): """ Load up the settings xml and return the widget. """ self.OptionsDlg = self._loadWidget(self._settings_gui_xml) self.OptionsDlg.stylize(self._gui_style) self.OptionsDlg.mapEvents({ 'okButton' : self.applySettings, 'cancelButton' : self.OptionsDlg.hide, 'defaultButton' : self.setDefaults }) return self.OptionsDlg def _loadWidget(self, dialog): """Loads a widget. Can load both files and pure xml strings""" if os.path.isfile(self._settings_gui_xml): return pychan.loadXML(dialog) else: return pychan.loadXML(StringIO(dialog)) def fillWidgets(self): for module in self._entries.itervalues(): for entry in module.itervalues(): widget = self.OptionsDlg.findChildByName(entry.settingwidgetname) value = self.get(entry.module, entry.name) if type(entry.initialdata) is list: try: value = entry.initialdata.index(value) except ValueError: raise ValueError("\"" + value + "\" is not a valid value for " + entry.name + ". Valid options: " + str(entry.initialdata)) entry.initializeWidget(widget, value) def applySettings(self): """ Writes the settings file. If a change requires a restart of the engine it notifies you with a small dialog box. """ for module in self._entries.itervalues(): for entry in module.itervalues(): widget = self.OptionsDlg.findChildByName(entry.settingwidgetname) data = widget.getData() # If the data is a list we need to get the correct selected data # from the list. This is needed for e.g. dropdowns or listboxs if type(entry.initialdata) is list: data = entry.initialdata[data] # only take action if something really changed if data != self.get(entry.module, entry.name): self.set(entry.module, entry.name, data) entry.onApply(data) if entry.requiresrestart: self.changesRequireRestart = True self.saveSettings() self.OptionsDlg.hide() if self.changesRequireRestart: self._showChangeRequireRestartDialog() def _showChangeRequireRestartDialog(self): """Shows a dialog that informes the user that a restart is required to perform the changes.""" RestartDlg = self._loadWidget(self._changes_gui_xml) RestartDlg.stylize(self._gui_style) RestartDlg.mapEvents({ 'closeButton' : RestartDlg.hide }) RestartDlg.show() def setAvailableScreenResolutions(self, reslist): """ A list of valid default screen resolutions. This should be called once right after you instantiate Settings. Valid screen resolutions must be strings in the form of: WIDTHxHEIGHT Example: settings.setAvailableScreenResolutions(["800x600", "1024x768"]) """ self._resolutions = reslist def setDefaults(self): """ Overwrites the setting file with the default settings file. """ shutil.copyfile(self._default_settings_file, os.path.join(self._appdata, self._settings_file)) self.changesRequireRestart = True self.initSerializer() #update all widgets with the new data self.fillWidgets() def _getEntries(self): return self._entries def _setEntries(self, entries): self._entries = entries def _getSerializer(self): return self._serializer entries = property(_getEntries, _setEntries) serializer = property(_getSerializer) class SettingEntry(object): def __init__(self, module, name, widgetname, applyfunction=None, initialdata=None, requiresrestart=False): """ @param module: The Setting module this Entry belongs to @type module: C{String} @param name: The Setting's name @type name: C{String} @param widgetname: The name of the widget that is used to change this setting @type widgetname: C{String} @param applyfunction: function that makes the changes when the Setting is saved @type applyfunction: C{function} @param initialdata: If the widget supports the setInitialData() function this can be used to set the initial data @type initialdata: C{String} or C{Boolean} @param requiresrestart: Whether or not the changing of this setting requires a restart @type requiresrestart: C{Boolean} """ self._module = module self._name = name self._settingwidgetname = widgetname self._requiresrestart = requiresrestart self._initialdata = initialdata self._applyfunction = applyfunction def initializeWidget(self, widget, currentValue): """Initialize the widget with needed data""" if self._initialdata is not None: widget.setInitialData(self._initialdata) widget.setData(currentValue) def onApply(self, data): """Implement actions that need to be taken when the setting is changed here. """ if self._applyfunction is not None: self._applyfunction(data) def _getModule(self): return self._module def _setModule(self, module): self._module = module def _getName(self): return self._name def _setName(self, name): self._name = name def _getSettingWidgetName(self): return self._settingwidgetname def _setSettingWidgetName(self, settingwidgetname): self._settingwidgetname = settingwidgetname def _getRequiresRestart(self): return self._requiresrestart def _setRequiresRestart(self, requiresrestart): self._requiresrestart = requiresrestart def _getInitialData(self): return self._initialdata def _setInitialData(self, initialdata): self._initialdata = initialdata def _getApplyFunction(self): return self._applyfunction def _setApplyFunction(self, applyfunction): self._applyfunction = applyfunction module = property(_getModule, _setModule) name = property(_getName, _setName) settingwidgetname = property(_getSettingWidgetName, _setSettingWidgetName) requiresrestart = property(_getRequiresRestart, _setRequiresRestart) initialdata = property(_getInitialData, _setInitialData) applyfunction = property(_getApplyFunction, _setApplyFunction) def __str__(self): return "SettingEntry: " + self.name + " Module: " + self.module + " Widget: " + \ self.settingwidgetname + " requiresrestart: " + str(self.requiresrestart) + \ " initialdata: " + str(self.initialdata)
lgpl-2.1
3,252,405,455,924,976,600
32.861702
169
0.711781
false
3.47565
false
false
false
aenon/OnlineJudge
leetcode/5.BitManipulation/477.TotalHammingDistance.py
1
1100
# 477. Total Hamming Distance # The Hamming distance between two integers is the number of positions at which the corresponding bits are different. # Now your job is to find the total Hamming distance between all pairs of the given numbers. # Example: # Input: 4, 14, 2 # Output: 6 # Explanation: In binary representation, the 4 is 0100, 14 is 1110, and 2 is 0010 (just # showing the four bits relevant in this case). So the answer will be: # HammingDistance(4, 14) + HammingDistance(4, 2) + HammingDistance(14, 2) = 2 + 2 + 2 = 6. # Note: # Elements of the given array are in the range of 0 to 10^9 # Length of the array will not exceed 10^4. class Solution(object): def totalHammingDistance(self, nums): """ :type nums: List[int] :rtype: int loop through all the digits """ result = 0 for i in xrange(32): counts = [0] * 2 # the number of 0's and 1's in the ith digit for number in nums: counts[number>>i & 1] += 1 result += counts[0] * counts[1] return result
mit
-7,870,830,576,520,058,000
31.382353
117
0.626364
false
3.536977
false
false
false
f-prettyland/angr
angr/engines/vex/statements/loadg.py
1
2392
from .... import sim_options as o from ....state_plugins.sim_action_object import SimActionObject from ....state_plugins.sim_action import SimActionData from . import SimIRStmt, SimStatementError class SimIRStmt_LoadG(SimIRStmt): def _execute(self): addr = self._translate_expr(self.stmt.addr) alt = self._translate_expr(self.stmt.alt) guard = self._translate_expr(self.stmt.guard) read_type, converted_type = self.stmt.cvt_types read_size = self.size_bytes(read_type) converted_size = self.size_bytes(converted_type) read_expr = self.state.memory.load(addr.expr, read_size, endness=self.stmt.end) if read_size == converted_size: converted_expr = read_expr elif "S" in self.stmt.cvt: converted_expr = read_expr.sign_extend(converted_size*self.state.arch.byte_width - read_size*self.state.arch.byte_width) elif "U" in self.stmt.cvt: converted_expr = read_expr.zero_extend(converted_size*self.state.arch.byte_width - read_size*self.state.arch.byte_width) else: raise SimStatementError("Unrecognized IRLoadGOp %s!" % self.stmt.cvt) read_expr = self.state.se.If(guard.expr != 0, converted_expr, alt.expr) if o.ACTION_DEPS in self.state.options: reg_deps = addr.reg_deps() | alt.reg_deps() | guard.reg_deps() tmp_deps = addr.tmp_deps() | alt.tmp_deps() | guard.tmp_deps() else: reg_deps = None tmp_deps = None self.state.scratch.store_tmp(self.stmt.dst, read_expr, reg_deps, tmp_deps) if o.TRACK_MEMORY_ACTIONS in self.state.options: data_ao = SimActionObject(converted_expr) alt_ao = SimActionObject(alt.expr, reg_deps=alt.reg_deps(), tmp_deps=alt.tmp_deps()) addr_ao = SimActionObject(addr.expr, reg_deps=addr.reg_deps(), tmp_deps=addr.tmp_deps()) guard_ao = SimActionObject(guard.expr, reg_deps=guard.reg_deps(), tmp_deps=guard.tmp_deps()) size_ao = SimActionObject(self.size_bits(converted_type)) r = SimActionData(self.state, self.state.memory.id, SimActionData.READ, addr=addr_ao, data=data_ao, condition=guard_ao, size=size_ao, fallback=alt_ao) self.actions.append(r)
bsd-2-clause
-9,205,185,699,880,460,000
49.893617
162
0.621237
false
3.441727
false
false
false
moozilla/dvcticker
dvcticker/main.py
1
12328
#todo: raise exceptions, then catch them to generate error images import webapp2 from google.appengine.api import urlfetch import json from PIL import Image, ImageDraw, ImageFont from google.appengine.api import memcache import StringIO import jinja2 import os from decimal import * #used fixed point math for better accuracy from google.appengine import runtime # for catching DeadlineExceededError from google.appengine.api import urlfetch_errors # " JINJA_ENVIRONMENT = jinja2.Environment( loader=jinja2.FileSystemLoader(os.path.dirname(__file__))) #imgFont = ImageFont.load('static/font/ncenB12.pil') # for testing locally, can't get truetype to work locally imgFont = ImageFont.truetype('static/font/tahoma_bold.ttf', 14, encoding='unic') def urlfetch_cache(url,exchange): # fetches a url, but using memcache to not hammer the exchanges server data = memcache.get(url) if data is not None: return process_json(data, exchange) else: try: result = urlfetch.fetch(url,deadline=30) #timeout after 30 sec if result.status_code == 200: value = process_json(result.content, exchange) memcache.add(url, result.content, 30) #cache for 30 sec memcache.add('longcache'+url, result.content, 3000) #also cache for 5min in case of timeouts return value else: return 'Error: '+exchange+' status code '+str(result.status_code) #'Error accessing Vircurex API' except runtime.DeadlineExceededError: #raised if the overall request times out data = memcache.get('longcache'+url) if data is not None: return process_json(data, exchange) else: return 'Error: '+exchange+' timeout' except runtime.apiproxy_errors.DeadlineExceededError: #raised if an RPC exceeded its deadline (set) data = memcache.get('longcache'+url) if data is not None: return process_json(data, exchange) else: return 'Error: '+exchange+' timeout' except urlfetch_errors.DeadlineExceededError: #raised if the URLFetch times out data = memcache.get('longcache'+url) if data is not None: return process_json(data, exchange) else: return 'Error: '+exchange+' timeout' except urlfetch.Error: #catch DownloadError data = memcache.get('longcache'+url) if data is not None: return process_json(data, exchange) else: return 'Error: '+exchange+' timeout' def process_json(txt, exchange): #should probably add error handling in case bad json is passed if exchange == 'vircurex': if txt == '"Unknown currency"': return 'Error: bad Vircurex API result' obj = json.loads(txt) return obj['value'] elif exchange == 'mtgox_bid': obj = json.loads(txt) if obj['result'] == 'success': return obj['return']['buy']['value'] else: return 'Error: bad MTGox API result' elif exchange == 'mtgox_ask': obj = json.loads(txt) if obj['result'] == 'success': return obj['return']['sell']['value'] else: return 'Error: bad MTGox API result' elif exchange == 'btce_bid': obj = json.loads(txt) if not any('error' in s for s in obj): return str(obj['ticker']['buy']) else: return 'Error: bad BTC-E API result' elif exchange == 'btce_ask': obj = json.loads(txt) if not any('error' in s for s in obj): return str(obj['ticker']['sell']) else: return 'Error: bad BTC-E API result' elif exchange == 'campbx_bid': obj = json.loads(txt) # need to check for error return obj['Best Bid'] elif exchange == 'campbx_ask': obj = json.loads(txt) # need to check for error return obj['Best Ask'] else: return 'Error: invalid exchange' def get_campbx_value(base,alt,amount): url = 'http://campbx.com/api/xticker.php' reverse = False if base == 'btc': if alt != 'usd': return 'Error: only BTC/USD valid on CampBX' exch = 'campbx_bid' elif base == 'usd': if alt != 'btc': return 'Error: only BTC/USD valid on CampBX' exch = 'campbx_ask' reverse = True else: return 'Error: only BTC/USD valid on CampBX' value = urlfetch_cache(url,exch) if value.startswith('Error'): return value if reverse: return str((Decimal(amount) / Decimal(value)).quantize(Decimal('.00000001'), rounding=ROUND_DOWN)) # need to round to a certain number else: return str(Decimal(amount) * Decimal(value)) def get_mtgox_value(base,alt,amount): cur = ['usd', 'aud', 'cad', 'chf', 'cny', 'dkk', 'eur', 'gbp', 'hkd', 'jpy', 'nzd', 'pln', 'rub', 'sek', 'sgd', 'thb'] reverse = False # true if going from cur-> btc if base == 'btc': if not any(alt in s for s in cur): return 'Error: invalid destination currency' url = 'http://data.mtgox.com/api/1/btc'+alt+'/ticker' exch = 'mtgox_bid' elif any(base in s for s in cur): if alt != 'btc': return 'Error: destination currency must be BTC' url = 'http://data.mtgox.com/api/1/btc'+base+'/ticker' #mtgox api always has btc first exch = 'mtgox_ask' reverse = True else: return 'Error: invalid base currency' value = urlfetch_cache(url,exch) if value.startswith('Error'): return value if reverse: return str((Decimal(amount) / Decimal(value)).quantize(Decimal('.00000001'), rounding=ROUND_DOWN)) # need to round to a certain number else: return str(Decimal(amount) * Decimal(value)) def get_btce_value(base,alt,amount): # in BTC-e currencies must be traded in pairs, we also support going in reverse (buying) cur_fwd = {'btc':['usd','rur','eur'], 'ltc':['btc','usd','rur'], 'nmc':['btc'], 'usd':['rur'], 'eur':['usd'], 'nvc':['btc'], 'trc':['btc'], 'ppc':['btc'], 'ftc':['btc'], 'cnc':['btc']} cur_rev = {'btc':['ltc','nmc','nvc','trc','ppc','ftc','cnc'], 'usd':['btc','ltc'], 'rur':['btc','usd'], 'eur':['btc']} reverse = False # if going from cur-> btc if any(base in s for s in cur_fwd) and any(alt in s for s in cur_fwd[base]): #if not any(alt in s for s in cur_fwd[base]): #return 'Error: invalid destination currency' # can't return here because some can be base or alt url = 'https://btc-e.com/api/2/'+base+'_'+alt+'/ticker' #https://btc-e.com/api/2/nmc_btc/ticker exch = 'btce_bid' else: if any(base in s for s in cur_rev): if not any(alt in s for s in cur_rev[base]): return 'Error: invalid currency pair' url = 'https://btc-e.com/api/2/'+alt+'_'+base+'/ticker' exch = 'btce_ask' reverse = True else: return 'Error: invalid currency pair' value = urlfetch_cache(url,exch) if value.startswith('Error'): return value if reverse: return str((Decimal(amount) / Decimal(value)).quantize(Decimal('.00000001'), rounding=ROUND_DOWN)) # need to round to a certain number else: return str(Decimal(amount) * Decimal(value)) def get_vircurex_value(type, base, alt, amount): # gets json from vircurex about bid/ask prices # eg. https://vircurex.com/api/get_highest_bid.json?base=BTC&alt=NMC if type == 'bid': url = 'https://vircurex.com/api/get_highest_bid.json' elif type == 'ask': url = 'https://vircurex.com/api/get_lowest_ask.json' else: return 'Error: Type must be either "bid" or "ask"' cur = ['btc', 'dvc', 'ixc', 'ltc', 'nmc', 'ppc', 'trc', 'usd', 'eur', 'ftc', 'frc', 'cnc'] if not any(base in s for s in cur): return 'Error: invalid currency' if not any(alt in s for s in cur): return 'Error: invalid currency' url += '?base=' + base + '&alt=' + alt value = urlfetch_cache(url,'vircurex') if value.startswith('Error'): return value return str(Decimal(amount)*Decimal(value)) # return amount * value def get_bid(exchange, amount, base, alt): if exchange == 'vircurex': return get_vircurex_value('bid',base,alt,amount) elif exchange == 'mtgox': return get_mtgox_value(base,alt,amount) elif exchange == 'btc-e': return get_btce_value(base,alt,amount) elif exchange == 'campbx': return get_campbx_value(base,alt,amount) else: return 'Error: bad exchange' def get_text_width(str): img = Image.new("RGBA", (1,1)) # just used to calculate the text size, size doesn't matter draw = ImageDraw.Draw(img) w, h = draw.textsize(str, imgFont) # calculate width font will take up return w # returns text, with optional coin icon, in string encoded form so it can be written to HTTP response def make_img(str, text_pos, coinimg=None): img = Image.new("RGBA", (get_text_width(str) + text_pos, 20)) draw = ImageDraw.Draw(img) # set draw to new image if coinimg != None: img.paste(coinimg, (0,2)) #paste the coin image into the generated image draw.text((text_pos,1), str, font=imgFont, fill='#555555') output = StringIO.StringIO() img.save(output, format='png') img_to_serve = output.getvalue() output.close() return img_to_serve class MainHandler(webapp2.RequestHandler): def get(self): #base = self.request.get('base','dvc') #alt = self.request.get('alt','btc') #value = get_vircurex_value('bid',base,alt) #template_values = { # 'value': value #} template = JINJA_ENVIRONMENT.get_template('index.html') self.response.write(template.render())#template_values)) class ImageHandler(webapp2.RequestHandler): def get(self,exchange,amount,base,alt): if amount == '': amount = '1' # default amount is 1 exchange = exchange.lower() # make sure everything is lowercase base = base.lower() if alt == None: if base == 'btc': alt = 'usd' # btc.png just shows btc value in usd else: alt = 'btc' # if no alt specified, default to BTC alt = alt.lower() value = get_bid(exchange,amount,base,alt) #if bid.startswith('Error'): value = bid #else: value = str(Decimal(amount)*Decimal(bid)) text_pos = 19 # 3 px after coin image (all are 16x16) if value.startswith('Error'): text_pos = 0 elif alt == 'usd': # round down to 2 decimal places value = '$ '+str(Decimal(value).quantize(Decimal('.01'), rounding=ROUND_DOWN)) text_pos = 2 elif alt == 'eur': # euro symbol in unicode (only works with truetype fonts) value = u'\u20AC '+str(Decimal(value).quantize(Decimal('.01'), rounding=ROUND_DOWN)) text_pos = 2 # have to position euro symbol so it doesn't cut off elif any(alt in s for s in ['aud', 'cad', 'chf', 'cny', 'dkk', 'gbp', 'hkd', 'jpy', 'nzd', 'pln', 'rub', 'sek', 'sgd', 'thb', 'rur', 'nvc']): value = alt.upper() + ' ' + value text_pos = 2 #text_pos 0 = error if text_pos!=0 and any(alt in s for s in ['btc', 'dvc', 'ixc', 'ltc', 'nmc', 'ppc', 'trc', 'ftc', 'frc', 'cnc']): coinimg = Image.open('static/img/'+alt+'.png') else: coinimg = None img_to_serve = make_img(value, text_pos, coinimg) self.response.headers['Content-Type'] = 'image/png' self.response.out.write(img_to_serve) class ErrorHandler(webapp2.RequestHandler): def get(self): img_to_serve = make_img('Error: Malformed URL', 0) self.response.headers['Content-Type'] = 'image/png' self.response.out.write(img_to_serve) app = webapp2.WSGIApplication([ ('/', MainHandler), ('/([^/]+)/(\d*\.?\d*)([A-Za-z]+)(?:/([A-Za-z]+))?(?:\.png)?', ImageHandler), ('/.*', ErrorHandler) ], debug=True)
mit
2,237,923,900,706,341,600
43.666667
188
0.589066
false
3.539477
false
false
false
seanjtaylor/out-for-justice
scripts/test_optimize.py
1
1921
import random import pickle import numpy as np import networkx as nx from app.optim import slow_compute_loss, step def main(input_file, num_police, num_steps, prob_step): """ Parameters ---------- num_police : the number of police to use num_steps : the number of steps to take prob_step : the probability of taking a step if it doesn't improve loss """ with open(input_file) as f: graph = pickle.load(f) graph = nx.convert_node_labels_to_integers(graph) N = graph.number_of_nodes() # compute random starting places starting_positions = np.zeros(N) places = random.sample(xrange(N), num_police) starting_positions[places] = 1 # one outcome that is uniformly distributed risks = np.ones(N).reshape((-1, 1)) import time start = time.time() # initialize the optimization positions = [starting_positions] losses = [slow_compute_loss(graph, positions[-1], risks)] current = positions[-1] tried = set() for i in range(num_steps): new_position = step(graph, current) pos_id = tuple(new_position.nonzero()[0]) if pos_id in tried: continue tried.add(pos_id) positions.append(new_position) losses.append(slow_compute_loss(graph, new_position, risks)) if (losses[-1] < losses[-2]) or (random.random() < prob_step): current = new_position print time.time() - start print sorted(losses)[:10] if __name__ == '__main__': from argparse import ArgumentParser parser = ArgumentParser() parser.add_argument('input_file') parser.add_argument('--num_police', type=int, default=1) parser.add_argument('--num_steps', type=int, default=100) parser.add_argument('--prob_step', type=float, default=0.25) args = parser.parse_args() main(args.input_file, args.num_police, args.num_steps, args.prob_step)
mit
3,145,182,075,389,221,000
25.680556
75
0.63925
false
3.63138
false
false
false
dothiko/mypaint
lib/layer/test.py
1
1433
# This file is part of MyPaint. # Copyright (C) 2011-2015 by Andrew Chadwick <a.t.chadwick@gmail.com> # Copyright (C) 2007-2012 by Martin Renold <martinxyz@gmx.ch> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. def make_test_stack(): """Makes a simple test RootLayerStack (2 branches of 3 leaves each) :return: The root stack, and a list of its leaves. :rtype: tuple """ import lib.layer.group import lib.layer.data import lib.layer.tree root = lib.layer.tree.RootLayerStack(doc=None) layer0 = lib.layer.group.LayerStack(name='0') root.append(layer0) layer00 = lib.layer.data.PaintingLayer(name='00') layer0.append(layer00) layer01 = lib.layer.data.PaintingLayer(name='01') layer0.append(layer01) layer02 = lib.layer.data.PaintingLayer(name='02') layer0.append(layer02) layer1 = lib.layer.group.LayerStack(name='1') root.append(layer1) layer10 = lib.layer.data.PaintingLayer(name='10') layer1.append(layer10) layer11 = lib.layer.data.PaintingLayer(name='11') layer1.append(layer11) layer12 = lib.layer.data.PaintingLayer(name='12') layer1.append(layer12) return (root, [layer00, layer01, layer02, layer10, layer11, layer12])
gpl-2.0
8,603,394,478,543,778,000
35.74359
73
0.707606
false
3.220225
false
false
false
delimitry/ascii_clock
asciicanvas.py
1
6119
#-*- coding: utf-8 -*- #----------------------------------------------------------------------- # Author: delimitry #----------------------------------------------------------------------- class AsciiCanvas(object): """ ASCII canvas for drawing in console using ASCII chars """ def __init__(self, cols, lines, fill_char=' '): """ Initialize ASCII canvas """ if cols < 1 or cols > 1000 or lines < 1 or lines > 1000: raise Exception('Canvas cols/lines must be in range [1..1000]') self.cols = cols self.lines = lines if not fill_char: fill_char = ' ' elif len(fill_char) > 1: fill_char = fill_char[0] self.fill_char = fill_char self.canvas = [[fill_char] * (cols) for _ in range(lines)] def clear(self): """ Fill canvas with empty chars """ self.canvas = [[self.fill_char] * (self.cols) for _ in range(self.lines)] def print_out(self): """ Print out canvas to console """ print(self.get_canvas_as_str()) def add_line(self, x0, y0, x1, y1, fill_char='o'): """ Add ASCII line (x0, y0 -> x1, y1) to the canvas, fill line with `fill_char` """ if not fill_char: fill_char = 'o' elif len(fill_char) > 1: fill_char = fill_char[0] if x0 > x1: # swap A and B x1, x0 = x0, x1 y1, y0 = y0, y1 # get delta x, y dx = x1 - x0 dy = y1 - y0 # if a length of line is zero just add point if dx == 0 and dy == 0: if self.check_coord_in_range(x0, y0): self.canvas[y0][x0] = fill_char return # when dx >= dy use fill by x-axis, and use fill by y-axis otherwise if abs(dx) >= abs(dy): for x in range(x0, x1 + 1): y = y0 if dx == 0 else y0 + int(round((x - x0) * dy / float((dx)))) if self.check_coord_in_range(x, y): self.canvas[y][x] = fill_char else: if y0 < y1: for y in range(y0, y1 + 1): x = x0 if dy == 0 else x0 + int(round((y - y0) * dx / float((dy)))) if self.check_coord_in_range(x, y): self.canvas[y][x] = fill_char else: for y in range(y1, y0 + 1): x = x0 if dy == 0 else x1 + int(round((y - y1) * dx / float((dy)))) if self.check_coord_in_range(x, y): self.canvas[y][x] = fill_char def add_text(self, x, y, text): """ Add text to canvas at position (x, y) """ for i, c in enumerate(text): if self.check_coord_in_range(x + i, y): self.canvas[y][x + i] = c def add_rect(self, x, y, w, h, fill_char=' ', outline_char='o'): """ Add rectangle filled with `fill_char` and outline with `outline_char` """ if not fill_char: fill_char = ' ' elif len(fill_char) > 1: fill_char = fill_char[0] if not outline_char: outline_char = 'o' elif len(outline_char) > 1: outline_char = outline_char[0] for px in range(x, x + w): for py in range(y, y + h): if self.check_coord_in_range(px, py): if px == x or px == x + w - 1 or py == y or py == y + h - 1: self.canvas[py][px] = outline_char else: self.canvas[py][px] = fill_char def add_nine_patch_rect(self, x, y, w, h, outline_3x3_chars=None): """ Add nine-patch rectangle """ default_outline_3x3_chars = ( '.', '-', '.', '|', ' ', '|', '`', '-', "'" ) if not outline_3x3_chars: outline_3x3_chars = default_outline_3x3_chars # filter chars filtered_outline_3x3_chars = [] for index, char in enumerate(outline_3x3_chars[0:9]): if not char: char = default_outline_3x3_chars[index] elif len(char) > 1: char = char[0] filtered_outline_3x3_chars.append(char) for px in range(x, x + w): for py in range(y, y + h): if self.check_coord_in_range(px, py): if px == x and py == y: self.canvas[py][px] = filtered_outline_3x3_chars[0] elif px == x and y < py < y + h - 1: self.canvas[py][px] = filtered_outline_3x3_chars[3] elif px == x and py == y + h - 1: self.canvas[py][px] = filtered_outline_3x3_chars[6] elif x < px < x + w - 1 and py == y: self.canvas[py][px] = filtered_outline_3x3_chars[1] elif x < px < x + w - 1 and py == y + h - 1: self.canvas[py][px] = filtered_outline_3x3_chars[7] elif px == x + w - 1 and py == y: self.canvas[py][px] = filtered_outline_3x3_chars[2] elif px == x + w - 1 and y < py < y + h - 1: self.canvas[py][px] = filtered_outline_3x3_chars[5] elif px == x + w - 1 and py == y + h - 1: self.canvas[py][px] = filtered_outline_3x3_chars[8] else: self.canvas[py][px] = filtered_outline_3x3_chars[4] def check_coord_in_range(self, x, y): """ Check that coordinate (x, y) is in range, to prevent out of range error """ return 0 <= x < self.cols and 0 <= y < self.lines def get_canvas_as_str(self): """ Return canvas as a string """ return '\n'.join([''.join(col) for col in self.canvas]) def __str__(self): """ Return canvas as a string """ return self.get_canvas_as_str()
mit
5,868,139,019,117,371,000
36.771605
87
0.440758
false
3.588856
false
false
false
m4nh/roars
scripts/nodes/examples/arp_detector_example.py
1
2688
#!/usr/bin/env python # -*- encoding: utf-8 -*- from roars.rosutils.rosnode import RosNode from roars.vision.cameras import CameraRGB from roars.vision.arucoutils import MarkerDetector from roars.vision.arp import ARP import roars.vision.cvutils as cvutils import cv2 import numpy as np import os import json #⬢⬢⬢⬢⬢➤ NODE node = RosNode("rosnode_example") #⬢⬢⬢⬢⬢➤ Sets HZ from parameters node.setHz(node.setupParameter("hz", 30)) #⬢⬢⬢⬢⬢➤ Creates Camera Proxy camera_topic = node.setupParameter( "camera_topic", "/camera/rgb/image_raw/compressed" ) camera_file = node.getFileInPackage( 'roars', 'data/camera_calibrations/asus_xtion.yml' ) camera = CameraRGB( configuration_file=camera_file, rgb_topic=camera_topic, compressed_image="compressed" in camera_topic ) #⬢⬢⬢⬢⬢➤ ARP arp_configuration = node.getFileInPackage( 'roars', 'data/arp_configurations/prototype_configuration.json' ) arp = ARP(configuration_file=arp_configuration, camera_file=camera_file) #⬢⬢⬢⬢⬢➤ Points storage points_per_object = node.setupParameter("points_per_object", 6) collected_points = [] output_file = node.setupParameter("output_file", "/tmp/arp_objects.json") #⬢⬢⬢⬢⬢➤ Camera Callback def cameraCallback(frame): #⬢⬢⬢⬢⬢➤ Grabs image from Frame img = frame.rgb_image.copy() arp_pose = arp.detect(img, debug_draw=True) if arp_pose: img_points = cvutils.reproject3DPoint( arp_pose.p.x(), arp_pose.p.y(), arp_pose.p.z(), camera=camera ) cv2.circle( img, (int(img_points[0]), int(img_points[1])), 5, (0, 0, 255), -1 ) #⬢⬢⬢⬢⬢➤ Show cv2.imshow("output", img) c = cv2.waitKey(1) if c == 113: node.close() if c == 32 and arp_pose != None: print("New Point Added", arp_pose.p) collected_points.append([ arp_pose.p.x(), arp_pose.p.y(), arp_pose.p.z() ]) if len(collected_points) % points_per_object == 0: print("New Object Stored") camera.registerUserCallabck(cameraCallback) #⬢⬢⬢⬢⬢➤ Main Loop while node.isActive(): node.tick() def chunks(l, n): """Yield successive n-sized chunks from l.""" for i in range(0, len(l), n): yield l[i:i + n] probable_objects = list(chunks(collected_points, points_per_object)) objects = [] for o in probable_objects: if len(o) == points_per_object: objects.append(o) with open(output_file, 'w') as handle: handle.write(json.dumps(objects, indent=4))
gpl-3.0
4,086,218,510,693,855,000
23.571429
73
0.625581
false
2.724393
true
false
false
IFAEControl/pirelay
pirelay/server.py
1
1591
#!/usr/bin/env python3 import time from concurrent import futures import grpc from .protos import pirelay_pb2 from .protos import pirelay_pb2_grpc from .relay import RelaysArray _ONE_DAY_IN_SECONDS = 60 * 60 * 24 PINS = [21] class PiRelayServer(pirelay_pb2_grpc.PiRelayServicer): def __init__(self, bcm_pins=[]): self._relays = RelaysArray(bcm_pins=bcm_pins) def Enable(self, request, context): try: self._relays.enable(request.channel) except Exception as ex: return pirelay_pb2.PiRelaysAnswer(type=pirelay_pb2.Error, message=str(ex)) else: return pirelay_pb2.PiRelaysAnswer(type=pirelay_pb2.Ok, message="") def Disable(self, request, context): try: self._relays.disable(request.channel) except Exception as ex: return pirelay_pb2.PiRelaysAnswer(type=pirelay_pb2.Error, message=str(ex)) else: return pirelay_pb2.PiRelaysAnswer(type=pirelay_pb2.Ok, message="") def serve(): server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) pirelay_pb2_grpc.add_PiRelayServicer_to_server(PiRelayServer(PINS), server) server.add_insecure_port('[::]:50051') server.start() try: while True: time.sleep(_ONE_DAY_IN_SECONDS) except KeyboardInterrupt: server.stop(0) if __name__ == '__main__': serve()
lgpl-3.0
-4,084,407,837,345,929,700
25.966102
79
0.574481
false
3.649083
false
false
false
tensorflow/model-optimization
tensorflow_model_optimization/g3doc/tools/build_docs.py
1
3663
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tool to generate open source api_docs for tensorflow_model_optimization. To use: 1. Install the tensorflow docs package, which is only compatible with Python python3 -m pip install git+https://github.com/tensorflow/docs 2. Install TensorFlow Model Optimization. The API docs are generated from `tfmot` from the import of the tfmot package below, based on what is exposed under https://github.com/tensorflow/model-optimization/tree/master/tensorflow_model_optimization/python/core/api. See https://www.tensorflow.org/model_optimization/guide/install. 3. Run build_docs.py. python3 build_docs.py --output_dir=/tmp/model_optimization_api 4. View the generated markdown files on a viewer. One option is to fork https://github.com/tensorflow/model-optimization/, push a change that copies the files to tensorflow_model_optimization/g3doc, and then view the files on Github. Note: If duplicate or spurious docs are generated (e.g. internal names), consider blacklisting them via the `private_map` argument below. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from absl import app from absl import flags from tensorflow_docs.api_generator import generate_lib import tensorflow_model_optimization as tfmot flags.DEFINE_string("output_dir", "/tmp/model_optimization_api", "Where to output the docs") flags.DEFINE_string( "code_url_prefix", ("https://github.com/tensorflow/model-optimization/blob/master/" "tensorflow_model_optimization"), "The url prefix for links to code.") flags.DEFINE_bool("search_hints", True, "Include metadata search hints in the generated files") flags.DEFINE_string("site_path", "model_optimization/api_docs/python", "Path prefix in the _toc.yaml") FLAGS = flags.FLAGS def main(unused_argv): doc_generator = generate_lib.DocGenerator( root_title="TensorFlow Model Optimization", py_modules=[("tfmot", tfmot)], base_dir=os.path.dirname(tfmot.__file__), code_url_prefix=FLAGS.code_url_prefix, search_hints=FLAGS.search_hints, site_path=FLAGS.site_path, # TODO(tfmot): remove this once the next release after 0.3.0 happens. # This is needed in the interim because the API docs reflect # the latest release and the current release still wildcard imports # all of the classes below. private_map={ "tfmot.sparsity.keras": [ # List of internal classes which get exposed when imported. "InputLayer", "custom_object_scope", "pruning_sched", "pruning_wrapper", "absolute_import", "division", "print_function", "compat" ] }, ) doc_generator.build(output_dir=FLAGS.output_dir) if __name__ == "__main__": app.run(main)
apache-2.0
1,166,959,432,661,376,000
33.556604
110
0.677041
false
4.097315
false
false
false
owlabs/incubator-airflow
airflow/executors/__init__.py
1
3891
# -*- coding: utf-8 -*- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import sys from airflow.utils.log.logging_mixin import LoggingMixin from airflow.configuration import conf from airflow.exceptions import AirflowException from airflow.executors.base_executor import BaseExecutor # noqa from airflow.executors.local_executor import LocalExecutor from airflow.executors.sequential_executor import SequentialExecutor DEFAULT_EXECUTOR = None def _integrate_plugins(): """Integrate plugins to the context.""" from airflow.plugins_manager import executors_modules for executors_module in executors_modules: sys.modules[executors_module.__name__] = executors_module globals()[executors_module._name] = executors_module def get_default_executor(): """Creates a new instance of the configured executor if none exists and returns it""" global DEFAULT_EXECUTOR if DEFAULT_EXECUTOR is not None: return DEFAULT_EXECUTOR executor_name = conf.get('core', 'EXECUTOR') DEFAULT_EXECUTOR = _get_executor(executor_name) log = LoggingMixin().log log.info("Using executor %s", executor_name) return DEFAULT_EXECUTOR class Executors: LocalExecutor = "LocalExecutor" SequentialExecutor = "SequentialExecutor" CeleryExecutor = "CeleryExecutor" DaskExecutor = "DaskExecutor" MesosExecutor = "MesosExecutor" KubernetesExecutor = "KubernetesExecutor" DebugExecutor = "DebugExecutor" def _get_executor(executor_name): """ Creates a new instance of the named executor. In case the executor name is not know in airflow, look for it in the plugins """ if executor_name == Executors.LocalExecutor: return LocalExecutor() elif executor_name == Executors.SequentialExecutor: return SequentialExecutor() elif executor_name == Executors.CeleryExecutor: from airflow.executors.celery_executor import CeleryExecutor return CeleryExecutor() elif executor_name == Executors.DaskExecutor: from airflow.executors.dask_executor import DaskExecutor return DaskExecutor() elif executor_name == Executors.MesosExecutor: from airflow.contrib.executors.mesos_executor import MesosExecutor return MesosExecutor() elif executor_name == Executors.KubernetesExecutor: from airflow.contrib.executors.kubernetes_executor import KubernetesExecutor return KubernetesExecutor() elif executor_name == Executors.DebugExecutor: from airflow.executors.debug_executor import DebugExecutor return DebugExecutor() else: # Loading plugins _integrate_plugins() executor_path = executor_name.split('.') if len(executor_path) != 2: raise AirflowException( "Executor {0} not supported: " "please specify in format plugin_module.executor".format(executor_name)) if executor_path[0] in globals(): return globals()[executor_path[0]].__dict__[executor_path[1]]() else: raise AirflowException("Executor {0} not supported.".format(executor_name))
apache-2.0
-8,116,916,838,794,192,000
36.776699
89
0.72038
false
4.347486
false
false
false
suutari/shoop
shuup/notify/template.py
1
3011
# -*- coding: utf-8 -*- # This file is part of Shuup. # # Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved. # # This source code is licensed under the AGPLv3 license found in the # LICENSE file in the root directory of this source tree. from __future__ import unicode_literals from django.utils.encoding import force_text from jinja2.sandbox import SandboxedEnvironment class NoLanguageMatches(Exception): pass def render_in_context(context, template_text, html_intent=False): """ Render the given Jinja2 template text in the script context. :param context: Script context. :type context: shuup.notify.script.Context :param template_text: Jinja2 template text. :type template_text: str :param html_intent: Is the template text intended for HTML output? This currently turns on autoescaping. :type html_intent: bool :return: Rendered template text :rtype: str :raises: Whatever Jinja2 might happen to raise """ # TODO: Add some filters/globals into this environment? env = SandboxedEnvironment(autoescape=html_intent) template = env.from_string(template_text) return template.render(context.get_variables()) class Template(object): def __init__(self, context, data): """ :param context: Script context :type context: shuup.notify.script.Context :param data: Template data dictionary :type data: dict """ self.context = context self.data = data def _get_language_data(self, language): return self.data.get(force_text(language).lower(), {}) def has_language(self, language, fields): data = self._get_language_data(language) return set(data.keys()) >= set(fields) def render(self, language, fields): """ Render this template in the given language, returning the given fields. :param language: Language code (ISO 639-1 or ISO 639-2) :type language: str :param fields: Desired fields to render. :type fields: list[str] :return: Dict of field -> rendered content. :rtype: dict[str, str] """ data = self._get_language_data(language) rendered = {} for field in fields: field_template = data.get(field) if field_template: # pragma: no branch rendered[field] = render_in_context(self.context, field_template, html_intent=False) return rendered def render_first_match(self, language_preferences, fields): # TODO: Document for language in language_preferences: if self.has_language(language, fields): rendered = self.render(language=language, fields=fields) rendered["_language"] = language return rendered raise NoLanguageMatches("No language in template matches any of languages %r for fields %r" % ( language_preferences, fields ))
agpl-3.0
-5,120,846,759,464,826,000
32.831461
103
0.645965
false
4.23488
false
false
false
danic96/Practica1
Practica1/Aplicacio/views.py
1
4321
# from django.shortcuts import render # Create your views here. # from django.http import HttpResponse from django.utils.decorators import method_decorator from django.contrib.auth.decorators import login_required from django.views.generic.edit import CreateView, UpdateView from django.views.generic import DetailView, DeleteView from rest_framework import generics from models import Movie, Character, Team, Power, Location from forms import MovieForm, CharacterForm, TeamForm, PowerForm, LocationForm from Practica1.serializers import MovieSerializer # Security Mixins class LoginRequiredMixin(object): @method_decorator(login_required()) def dispatch(self, *args, **kwargs): return super(LoginRequiredMixin, self).dispatch(*args, **kwargs) class CheckIsOwnerMixin(object): def get_object(self, *args, **kwargs): obj = super(CheckIsOwnerMixin, self).get_object(*args, **kwargs) if not obj.user == self.request.user: raise PermissionDenied return obj class LoginRequiredCheckIsOwnerUpdateView(LoginRequiredMixin, CheckIsOwnerMixin, UpdateView): template_name = 'Aplicacio/form.html' class MovieCreate(LoginRequiredMixin, CreateView): model = Movie template_name = 'Aplicacio/form.html' form_class = MovieForm def form_valid(self, form): form.instance.user = self.request.user return super(MovieCreate, self).form_valid(form) class CharacterCreate(LoginRequiredMixin, CreateView): model = Character template_name = 'Aplicacio/form.html' form_class = CharacterForm def form_valid(self, form): form.instance.user = self.request.user return super(CharacterCreate, self).form_valid(form) class TeamCreate(LoginRequiredMixin, CreateView): model = Team template_name = 'Aplicacio/form.html' form_class = TeamForm def form_valid(self, form): form.instance.user = self.request.user return super(TeamCreate, self).form_valid(form) class PowerCreate(LoginRequiredMixin, CreateView): model = Power template_name = 'Aplicacio/form.html' form_class = PowerForm def form_valid(self, form): form.instance.user = self.request.user return super(PowerCreate, self).form_valid(form) class LocationCreate(LoginRequiredMixin, CreateView): model = Location template_name = 'Aplicacio/form.html' form_class = LocationForm def form_valid(self, form): form.instance.user = self.request.user return super(LocationCreate, self).form_valid(form) """ class LocationDelete(LoginRequiredMixin, CreateView): model = Location template_name = 'Aplicacio/form.html' form_class = LocationForm def form_valid(self, form): form.instance.user = self.request.user return super(LocationDelete, self).form_valid(form) """ """ class Delete(DeleteView): model = Location success_url = reverse_lazy('all_locations') # This is where this view will # redirect the user template_name = 'Aplicacio/delete_location.html' """ class MovieDetail(DetailView): model = Movie template_name = 'Aplicacio/movie_detail.html' """ def get_context_data(self, **kwargs): context = super(MovieDetail, self).get_context_data(**kwargs) context['RATING_CHOICES'] = RestaurantReview.RATING_CHOICES return context """ class CharacterDetail(DetailView): model = Character template_name = 'Aplicacio/character_detail.html' class TeamDetail(DetailView): model = Team template_name = 'Aplicacio/team_detail.html' class PowerDetail(DetailView): model = Power template_name = 'Aplicacio/power_detail.html' class LocationDetail(DetailView): model = Location template_name = 'Aplicacio/location_detail.html' def form_valid(self, form): form.instance.user = self.request.user return super(CharacterCreate, self).form_valid(form) ### RESTful API views ### class APIMovieList(generics.ListCreateAPIView): model = Movie queryset = Movie.objects.all() serializer_class = MovieSerializer class APIMovieDetail(generics.RetrieveUpdateDestroyAPIView): model = Movie queryset = Movie.objects.all() serializer_class = MovieSerializer
mit
6,965,548,810,274,474,000
27.058442
93
0.707938
false
3.889289
false
false
false
CorundumGames/Invasodado
game/ufo.py
1
3605
from math import sin from random import choice, uniform, expovariate from pygame import Rect from core import color from core import config from core.particles import ParticleEmitter from game.block import get_block from game.gameobject import GameObject from game import gamedata ### Constants ################################################################## AVG_WAIT = 9000 #Expected time in frames between UFO appearance DEATH = config.load_sound('ufo_explosion.wav') FRAMES = tuple( Rect(64 * (i % 4), 192 + 32 * (i // 4), 64, 32) for i in range(10, -1, -1) ) INVADE = config.load_sound('ufo.wav') START_POS = (640, 16) UFO_FRAMES = color.get_colored_objects(FRAMES) UFO_STATES = ('IDLE', 'APPEARING', 'ACTIVE', 'DYING', 'LEAVING', 'LOWERING', 'GAMEOVER') ################################################################################ class UFO(GameObject): STATES = config.Enum(*UFO_STATES) GROUP = None BLOCK_GROUP = None def __init__(self): super().__init__() self._anim = 0.0 self.column = None self.current_frame_list = UFO_FRAMES self.image = config.get_sprite(FRAMES[0]) self.odds = expovariate(AVG_WAIT) self.position = list(START_POS) self.rect = Rect(START_POS, self.image.get_size()) self.state = UFO.STATES.IDLE self.emitter = ParticleEmitter(color.random_color_particles, self.rect) del self.acceleration def appear(self): ''' Appear on-screen, but not for very long! ''' INVADE.play(-1) self.position = list(START_POS) self.rect.topleft = list(START_POS) self.change_state(UFO.STATES.ACTIVE) self.velocity[0] = -2.0 def move(self): ''' Move left on the screen, and oscillate up and down. ''' position = self.position rect = self.rect self._anim += 0.5 self.image = UFO_FRAMES[id(choice(color.LIST)) ] \ [int(self._anim) % len(FRAMES)] position[0] += self.velocity[0] position[1] += sin(self._anim/4) rect.topleft = (position[0] + .5, position[1] + .5) if rect.right < 0: #If we've gone past the left edge of the screen... self.change_state(UFO.STATES.LEAVING) def die(self): ''' Vanish and release a special Block that clears lots of other Blocks. ''' self.emitter.rect = self.rect self.emitter.burst(30) DEATH.play() UFO.BLOCK_GROUP.add(get_block((self.rect.centerx, 0), special=True)) gamedata.score += 90 self.change_state(UFO.STATES.LEAVING) def leave(self): INVADE.stop() self.velocity[0] = 0 self.position = list(START_POS) self.rect.topleft = START_POS self.change_state(UFO.STATES.IDLE) def wait(self): ''' Wait off-screen, and only come back with a specific probability. ''' if uniform(0, 1) < self.odds: #With a certain probability... self.odds = expovariate(AVG_WAIT) self.change_state(UFO.STATES.APPEARING) actions = { STATES.IDLE : 'wait' , STATES.APPEARING: 'appear', STATES.ACTIVE : 'move' , STATES.DYING : 'die' , STATES.LEAVING : 'leave' , STATES.GAMEOVER : None , }
gpl-3.0
-1,058,903,097,034,391,700
32.700935
88
0.530929
false
3.583499
false
false
false
parksandwildlife/wastd
occurrence/migrations/0006_auto_20181129_1812.py
1
1084
# Generated by Django 2.0.8 on 2018-11-29 10:12 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('occurrence', '0005_auto_20181025_1720'), ] operations = [ migrations.AlterField( model_name='areaencounter', name='source', field=models.PositiveIntegerField(choices=[(0, 'Direct entry'), (1, 'Manual entry from paper datasheet'), (2, 'Digital data capture (ODK)'), (10, 'Threatened Fauna'), (11, 'Threatened Flora'), (12, 'Threatened Communities'), (13, 'Threatened Communities Boundaries'), (14, 'Threatened Communities Buffers'), (15, 'Threatened Communities Sites'), (20, 'Turtle Tagging Database WAMTRAM2'), (21, 'Ningaloo Turtle Program'), (22, 'Broome Turtle Program'), (23, 'Pt Hedland Turtle Program'), (24, 'Gnaraloo Turtle Program'), (25, 'Eco Beach Turtle Program'), (30, 'Cetacean Strandings Database'), (31, 'Pinniped Strandings Database')], default=0, help_text='Where was this record captured initially?', verbose_name='Data Source'), ), ]
mit
-6,071,923,042,693,717,000
59.222222
738
0.671587
false
3.519481
false
false
false
csdms/dakota
dakotathon/tests/test_plugin_hydrotrend_run.py
1
3466
#!/usr/bin/env python # # Test running the dakota.plugin.hydrotrend module. # # Call with: # $ nosetests -sv # # Mark Piper (mark.piper@colorado.edu) import os import shutil # import filecmp import glob from nose.tools import with_setup, assert_true from dakotathon.dakota import Dakota from dakotathon.plugins.hydrotrend import is_installed as is_hydrotrend_installed from dakotathon.utils import is_dakota_installed from . import start_dir, data_dir # Global variables ----------------------------------------------------- run_dir = os.getcwd() config_file = os.path.join(run_dir, "dakota.yaml") known_config_file = os.path.join(data_dir, "dakota.yaml") # known_dat_file = os.path.join(data_dir, 'dakota.dat') # Fixtures ------------------------------------------------------------- def setup_module(): """Called before any tests are performed.""" print("\n*** " + __name__) def setup(): """Called at start of any test using it @with_setup()""" pass def teardown(): """Called at end of any test using it @with_setup()""" if os.path.exists(config_file): os.remove(config_file) if os.path.exists("dakota.in"): os.remove("dakota.in") if os.path.exists("run.log"): os.remove("run.log") if os.path.exists("stderr.log"): os.remove("stderr.log") if is_hydrotrend_installed(): for dname in glob.glob("HYDRO_*"): shutil.rmtree(dname) if is_dakota_installed(): for dname in glob.glob("run.*"): shutil.rmtree(dname) for fname in ["dakota." + ext for ext in ["dat", "out", "rst"]]: if os.path.exists(fname): os.remove(fname) def teardown_module(): """Called after all tests have completed.""" pass # Tests ---------------------------------------------------------------- @with_setup(setup, teardown) def test_run_by_setting_attributes(): """Test running a HydroTrend simulation.""" d = Dakota(method="vector_parameter_study", plugin="hydrotrend") d.template_file = os.path.join(data_dir, "HYDRO.IN.dtmpl") d.auxiliary_files = os.path.join(data_dir, "HYDRO0.HYPS") d.variables.descriptors = [ "starting_mean_annual_temperature", "total_annual_precipitation", ] d.variables.initial_point = [10.0, 1.5] d.method.final_point = [20.0, 2.5] d.method.n_steps = 5 d.responses.response_descriptors = ["Qs_median", "Q_mean"] d.responses.response_files = ["HYDROASCII.QS", "HYDROASCII.Q"] d.responses.response_statistics = ["median", "mean"] d.setup() assert_true(os.path.exists(d.input_file)) if is_dakota_installed() and is_hydrotrend_installed(): d.run() assert_true(os.path.exists(d.output_file)) # assert_true(filecmp.cmp(known_dat_file, d.environment.data_file)) @with_setup(setup, teardown) def test_run_from_config_file(): """Test running a HydroTrend simulation from a config file.""" d = Dakota.from_file_like(known_config_file) d.run_directory = run_dir d.template_file = os.path.join(data_dir, "HYDRO.IN.dtmpl") d.auxiliary_files = os.path.join(data_dir, "HYDRO0.HYPS") d.serialize(config_file) d.write_input_file() assert_true(os.path.exists(d.input_file)) if is_dakota_installed() and is_hydrotrend_installed(): d.run() assert_true(os.path.exists(d.output_file)) # assert_true(filecmp.cmp(known_dat_file, d.environment.data_file))
mit
-8,400,592,787,912,912,000
31.092593
81
0.617426
false
3.16819
true
false
false
kysolvik/reservoir-id
reservoir-id/classifier_train.py
1
6974
#!/usr/bin/env python """ Train random forest classifier Inputs: CSV from build_att_table, small area cutoff Outputs: Packaged up Random Forest model @authors: Kylen Solvik Date Create: 3/17/17 """ # Load libraries import pandas as pd from sklearn import model_selection from sklearn import preprocessing from sklearn.metrics import classification_report from sklearn.metrics import confusion_matrix from sklearn.metrics import accuracy_score from sklearn.externals import joblib from sklearn.cross_validation import train_test_split from sklearn.grid_search import GridSearchCV from sklearn.cross_validation import * import numpy as np import sys import argparse import os import xgboost as xgb # Parse arguments parser = argparse.ArgumentParser(description='Train Random Forest classifier.', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('prop_csv', help='Path to attribute table (from build_att_table.py).', type=str) parser.add_argument('xgb_pkl', help='Path to save random forest model as .pkl.', type=str) parser.add_argument('--area_lowbound', help='Lower area bound. All regions <= in size will be ignored', default=2, type=int) parser.add_argument('--path_prefix', help='To be placed at beginnings of all other path args', type=str,default='') args = parser.parse_args() def select_training_obs(full_csv_path): """Takes full csv and selects only the training observations. Writes out to csv for further use""" training_csv_path = full_csv_path.replace('.csv','_trainonly.csv') if not os.path.isfile(training_csv_path): dataset = pd.read_csv(full_csv_path,header=0) training_dataset = dataset.loc[dataset['class'] > 0] training_dataset.to_csv(training_csv_path,header=True,index=False) return(training_csv_path) def main(): # Set any attributes to exclude for this run exclude_att_patterns = [] # Load dataset training_csv = select_training_obs(args.path_prefix + args.prop_csv) dataset = pd.read_csv(training_csv,header=0) dataset_acut = dataset.loc[dataset['area'] > args.area_lowbound] # Exclude attributes matching user input patterns, or if they are all nans exclude_atts = [] for pattern in exclude_att_patterns: col_list = [col for col in dataset_acut.columns if pattern in col] exclude_atts.extend(col_list) for att in dataset.columns[1:]: if sum(np.isfinite(dataset[att])) == 0: exclude_atts.append(att) for att in list(set(exclude_atts)): del dataset_acut[att] (ds_y,ds_x) = dataset_acut.shape print(ds_y,ds_x) # Convert dataset to array feature_names = dataset_acut.columns[2:] array = dataset_acut.values X = array[:,2:ds_x].astype(float) Y = array[:,1].astype(int) Y = Y-1 # Convert from 1s and 2s to 0-1 # Set nans to 0 X = np.nan_to_num(X) # Separate test data test_size = 0.2 seed = 5 X_train, X_test, Y_train, Y_test = model_selection.train_test_split( X, Y, test_size=test_size, random_state=seed) # Convert data to xgboost matrices d_train = xgb.DMatrix(X_train,label=Y_train) # d_test = xgb.DMatrix(X_test,label=Y_test) #---------------------------------------------------------------------- # Paramater tuning # Step 1: Find approximate n_estimators to use early_stop_rounds = 40 n_folds = 5 xgb_model = xgb.XGBClassifier( learning_rate =0.1, n_estimators=1000, max_depth=5, min_child_weight=1, gamma=0, subsample=0.8, colsample_bytree=0.8, objective= 'binary:logistic', seed=27) xgb_params = xgb_model.get_xgb_params() cvresult = xgb.cv(xgb_params, d_train, num_boost_round=xgb_params['n_estimators'], nfold=n_folds, metrics='auc', early_stopping_rounds=early_stop_rounds, ) n_est_best = (cvresult.shape[0] - early_stop_rounds) print('Best number of rounds = {}'.format(n_est_best)) # Step 2: Tune hyperparameters xgb_model = xgb.XGBClassifier() params = {'max_depth': range(5,10,2), 'learning_rate': [0.1], 'gamma':[0,0.5,1], 'silent': [1], 'objective': ['binary:logistic'], 'n_estimators' : [n_est_best], 'subsample' : [0.7, 0.8,1], 'min_child_weight' : range(1,4,2), 'colsample_bytree':[0.7,0.8,1], } clf = GridSearchCV(xgb_model,params,n_jobs = 1, cv = StratifiedKFold(Y_train, n_folds=5, shuffle=True), scoring = 'roc_auc', verbose = 2, refit = True) clf.fit(X_train,Y_train) best_parameters,score,_ = max(clf.grid_scores_,key=lambda x: x[1]) print('Raw AUC score:',score) for param_name in sorted(best_parameters.keys()): print("%s: %r" % (param_name, best_parameters[param_name])) # Step 3: Decrease learning rate and up the # of trees #xgb_finalcv = XGBClassifier() tuned_params = clf.best_params_ tuned_params['n_estimators'] = 10000 tuned_params['learning_rate'] = 0.01 cvresult = xgb.cv(tuned_params, d_train, num_boost_round=tuned_params['n_estimators'], nfold=n_folds, metrics='auc', early_stopping_rounds=early_stop_rounds, ) # Train model with cv results and predict on test set For test accuracy n_est_final = int((cvresult.shape[0] - early_stop_rounds) / (1 - 1 / n_folds)) tuned_params['n_estimators'] = n_est_final print(tuned_params) xgb_train = xgb.XGBClassifier() xgb_train.set_params(**tuned_params) xgb_train.fit(X_train,Y_train) bst_preds = xgb_train.predict(X_test) print("Xgboost Test acc = " + str(accuracy_score(Y_test, bst_preds))) print(confusion_matrix(Y_test, bst_preds)) print(classification_report(Y_test, bst_preds)) # Export cv classifier joblib.dump(cvresult, args.path_prefix + args.xgb_pkl + 'cv') # Export classifier trained on full data set xgb_full = xgb.XGBClassifier() xgb_full.set_params(**tuned_params) xgb_full.fit(X,Y) joblib.dump(xgb_full, args.path_prefix + args.xgb_pkl) if __name__ == '__main__': main()
gpl-3.0
-8,813,554,001,576,761,000
37.10929
88
0.57571
false
3.680211
true
false
false
ambitioninc/django-user-guide
user_guide/templatetags/user_guide_tags.py
1
2767
""" Template tag for displaying user guides. """ import re from django import template from django.conf import settings from django.template import loader from django.template.defaulttags import CsrfTokenNode from user_guide.models import GuideInfo register = template.Library() # The maximum number of guides to show per page USER_GUIDE_SHOW_MAX = getattr(settings, 'USER_GUIDE_SHOW_MAX', 10) # Use cookies to determine if guides should be shown USER_GUIDE_USE_COOKIES = getattr(settings, 'USER_GUIDE_USE_COOKIES', False) # The url to any custom CSS USER_GUIDE_CSS_URL = getattr( settings, 'USER_GUIDE_CSS_URL', None ) # The url to any custom JS USER_GUIDE_JS_URL = getattr( settings, 'USER_GUIDE_JS_URL', None ) @register.simple_tag(takes_context=True) def user_guide(context, *args, **kwargs): """ Creates html items for all appropriate user guides. Kwargs: guide_name: A string name of a specific guide. guide_tags: An array of string guide tags. limit: An integer maxmimum number of guides to show at a single time. Returns: An html string containing the user guide scaffolding and any guide html. """ user = context['request'].user if 'request' in context and hasattr(context['request'], 'user') else None if user and user.is_authenticated(): # No one is logged in limit = kwargs.get('limit', USER_GUIDE_SHOW_MAX) filters = { 'user': user, 'is_finished': False } # Handle special filters if kwargs.get('guide_name'): filters['guide__guide_name'] = kwargs.get('guide_name') if kwargs.get('guide_tags'): filters['guide__guide_tag__in'] = kwargs.get('guide_tags') # Set the html html = ''.join(( '<div data-guide="{0}" class="django-user-guide-item">{1}</div>'.format( guide_info.id, guide_info.guide.html ) for guide_info in GuideInfo.objects.select_related('guide').filter(**filters).only('guide')[:limit] )) # Return the rendered template with the guide html return loader.render_to_string('user_guide/window.html', { 'html': re.sub(r'\{\s*static\s*\}', settings.STATIC_URL, html), 'css_href': '{0}user_guide/build/django-user-guide.css'.format(settings.STATIC_URL), 'js_src': '{0}user_guide/build/django-user-guide.js'.format(settings.STATIC_URL), 'custom_css_href': USER_GUIDE_CSS_URL, 'custom_js_src': USER_GUIDE_JS_URL, 'use_cookies': str(USER_GUIDE_USE_COOKIES).lower(), 'csrf_node': CsrfTokenNode().render(context) }) else: return ''
mit
-4,151,106,416,245,228,000
31.940476
113
0.62378
false
3.650396
false
false
false
praekelt/django-ultracache
bin/cache-purge-consumer.py
1
3973
"""Subscribe to RabbitMQ and listen for purge instructions continuously. Manage this script through eg. supervisor.""" import json import traceback from multiprocessing.pool import ThreadPool from optparse import OptionParser from time import sleep import pika import requests import yaml class Consumer: channel = None connection = None def __init__(self): self.pool = ThreadPool() parser = OptionParser() parser.add_option("-c", "--config", dest="config", help="Configuration file", metavar="FILE") (options, args) = parser.parse_args() config_file = options.config self.config = {} if config_file: self.config = yaml.load(open(config_file)) or {} def log(self, msg): name = self.config.get("logfile", None) if not name: return if name == "stdout": print(msg) return fp = open(name, "a") try: fp.write(msg + "\n") finally: fp.close() def connect(self): parameters = pika.URLParameters( self.config.get( "rabbit-url", "amqp://guest:guest@127.0.0.1:5672/%2F" ) ) self.connection = pika.BlockingConnection(parameters) self.channel = self.connection.channel() self.channel.exchange_declare( exchange="purgatory", exchange_type="fanout" ) queue = self.channel.queue_declare(exclusive=True) queue_name = queue.method.queue self.channel.queue_bind(exchange="purgatory", queue=queue_name) self.channel.basic_qos(prefetch_count=1) self.channel.basic_consume( self.on_message, queue=queue_name, no_ack=False, exclusive=True ) def on_message(self, channel, method_frame, header_frame, body): self.pool.apply_async(self.handle_message, (body,)) channel.basic_ack(delivery_tag=method_frame.delivery_tag) def handle_message(self, body): if body: try: di = json.loads(body) except ValueError: path = body headers = {} else: path = di["path"] headers = di["headers"] self.log("Purging %s with headers %s" % (path, str(headers))) host = self.config.get("host", None) try: if host: final_headers = {"Host": host} final_headers.update(headers) response = requests.request( "PURGE", "http://" \ + self.config.get("proxy-address", "127.0.0.1") + path, headers=final_headers, timeout=10 ) else: response = requests.request( "PURGE", "http://" \ + self.config.get("proxy-address", "127.0.0.1") + path, timeout=10, headers=headers ) except Exception as exception: msg = traceback.format_exc() self.log("Error purging %s: %s" % (path, msg)) else: content = response.content def consume(self): loop = True while loop: try: if self.channel is None: raise pika.exceptions.ConnectionClosed() self.channel.start_consuming() except KeyboardInterrupt: loop = False self.channel.stop_consuming() except pika.exceptions.ConnectionClosed: try: self.connect() except pika.exceptions.ConnectionClosed: sleep(1) self.connection.close() consumer = Consumer() consumer.consume()
bsd-3-clause
-856,625,678,671,990,400
31.300813
83
0.511956
false
4.489266
true
false
false
myriasofo/CLRS_exercises
algos/testSuite.py
1
6292
''' WHAT: Simple test framework for checking algorithms TASK: *Handle output that's an object, eg. bst that gets modified *option3: optional param - Class (accept input/output as arrays and TURN INTO object) (option1: optional param - comparison function (instead of simple "!=") (option2: optional param - Class (automatically deconstruct objects in arrays) ''' import copy def init(*args, **kwargs): return TestSuite(*args, **kwargs) class TestSuite: def __init__(self, tests, dataStructures=None): self.tests = tests self.converter = DataStructureConverter(dataStructures) if dataStructures is not None else None def test(self, function): print('FUNCTION: {}'.format(function.__name__)) tests = copy.deepcopy(self.tests) for i, test in enumerate(tests): params, expected = test try: actual = self.runFunction(function, params) if actual != expected: self.printError(i+1, params, expected, actual) return except Exception as error: self.printError(i+1, params, expected, 'ERROR') raise error def printError(self, iteration, params, expected, actual): print() print('ERROR: Iteration {}'.format(iteration)) print() stringifiedParams = ', '.join([str(param) for param in params]) print('input: {}'.format(stringifiedParams)) print('ouptut expected: {}'.format(expected)) print('output actual: {}'.format(actual)) print() def runFunction(self, function, params): if self.converter is not None: params = self.converter.convertInput(params) params = copy.deepcopy(params) actual = function(*params) if self.converter is not None: actual = self.converter.convertOuptut(actual) return actual class DataStructureConverter: def __init__(self, config): self.config = config self.arrayToDs = { 'SinglyLinkedList': self.createSinglyLinkedList, #'DoublyLinkedList': createSinglyLinkedList, #'BinaryTree': createBinaryTree, #'Queue': createQueue, #'Stack': createStack, } self.dsToArray = { 'SinglyLinkedList': self.createArrayFromSinglyLinkedList, #'DoublyLinkedList': createSinglyLinkedList, #'BinaryTree': createBinaryTree, #'Queue': createQueue, #'Stack': createStack, } def convertInput(self, params): if isinstance(self.config, str): converted = [] for param in params: ds = self.convertArrayToDs(param, self.config) converted.append(ds) return converted elif isinstance(self.config, dict): converted = [] for param, dsName in zip(params, self.config['input']): if not isinstance(dsName, str): converted.append(param) else: ds = self.convertArrayToDs(param, dsName) converted.append(ds) return converted else: raise Exception('ERROR: This is not the right format for dataStructure: {}'.format(self.config)) def convertOuptut(self, output): if isinstance(self.config, str): return self.convertDsToArray(output, self.config) elif isinstance(self.config, dict): return self.convertDsToArray(output, self.config['output']) else: raise Exception('ERROR: This is not the right format for dataStructure: {}'.format(self.ds)) def convertArrayToDs(self, array, dsName): if dsName not in self.arrayToDs: raise Exception('ERROR: Name of dataStructure not supported: {}'.format(dsName)) dsConstructor = self.arrayToDs[dsName] ds = dsConstructor(array) return ds def convertDsToArray(self, ds, dsName): if dsName not in self.dsToArray: raise Exception('ERROR: Name of dataStructure not supported: {}'.format(dsName)) arrayConstructor = self.dsToArray[dsName] array = arrayConstructor(ds) return array class Node: # spec determined by leetcode def __init__(self, val): self.val = val self.next = None def createSinglyLinkedList(self, array, storeInArray=False): if storeInArray: container = [] head = None curr = None for elem in array: node = self.Node(elem) if storeInArray: container.append(node) if head is None: head = node curr = node continue curr.next = node curr = node if storeInArray: return container return head def createArrayFromSinglyLinkedList(self, head): array = [] while head is not None: array.append(head.val) head = head.next return array # custom def createIntersectingLinkedLists(self, nA, nB, nIntersection): headA = self.createSinglyLinkedList(range(nA)) headB = self.createSinglyLinkedList(range(nA, nA+nB)) if nIntersection is None or nIntersection == 0: return headA, headB, None headI = self.createSinglyLinkedList(range(nA+nB, nA+nB+nIntersection)) if headA is None: headA = headI else: self.getEndofList(headA).next = headI if headB is None: headB = headI else: self.getEndofList(headB).next = headI return headA, headB, headI def getEndofList(self, head): while head is not None and head.next is not None: head = head.next return head ### Example usage def main(): import sys sys.path.append('/Users/Abe/my/codingPractice/algos') import testSuite tests = [ ([[3,2,1,5,6,4], 2], 5), ([[3,2,3,1,2,4,5,5,6], 4], 4) ] t = testSuite.init(tests) t.test(v1) t.test(v2) t.test(v3)
mit
-4,231,904,569,753,463,300
30.148515
108
0.578512
false
4.205882
true
false
false
tis-intern-apparel/ApparelStrategy
server/dialogue_system/module/database.py
1
5633
# -*- coding: utf-8 -*- import os import codecs class Personal: point_id = '' user_name = '' user_pronoun = '' sex = '' phone = '' email = '' address = '' class Cloth: cloth_name = '' color_code = '' small_type = '' price = '' image_url = '' big_type = '' cloth_code = '' cloth_describe = '' class Evaluate: clothes = [] osyaredo = 0 class DataBaseManager: def __init__(self,data_dir): self.data_dir = data_dir self.clothes_path = os.path.join(data_dir,'clothes.csv') self.evaluate_path = os.path.join(data_dir,'evaluate.csv') self.personal_path = os.path.join(data_dir,'personal.csv') def __split_csvline(self,line): return line.replace('\n','').replace('"','').split(',') def __struct_personal(self,line): cols = self.__split_csvline(line) personal = Personal() personal.point_id = cols[0] personal.user_name = cols[1] personal.user_pronoun = cols[2] personal.sex = cols[3] personal.phone = cols[4] personal.email = cols[5] personal.address = cols[6] personal.age = cols[7] return personal def __struct_cloth(self,line): cols = self.__split_csvline(line) cloth = Cloth() cloth.cloth_name = cols[0] cloth.color_code = cols[1] cloth.small_type = cols[2] cloth.price = cols[3] cloth.image_url = cols[4] cloth.big_type = cols[5] cloth.cloth_code = cols[6] cloth.cloth_describe = cols[7] return cloth def __struct_evaluate(self,line): cols = self.__split_csvline(line) osyare = Evaluate() osyare.clothes = [] for c in cols: if c == 'null': break else: osyare.clothes.append(c) osyare.osyaredo = cols[3] return osyare def get_personal_from_id(self,point_id): """ read personal data from point id :param point_id: search point id :return: personal object """ with codecs.open(self.personal_path,'r','utf-8') as f: for line in f: personal = self.__struct_personal(line) if personal.point_id == point_id: return personal return None def get_clothes_from_code(self, cloth_code): """ read cloth data from cloth_code :param cloth_code: cloth code for searching :return: cloth object """ with codecs.open(self.clothes_path, 'r', 'utf-8') as f: for line in f: cloth = self.__struct_cloth(line) if cloth.cloth_code == cloth_code: return cloth return None def get_evaluate_from_code(self, cloth_code): """ read evaluate(osyaredo) from cloth code :param cloth_code: cloth code for searching evaluate :return: evaluate object list """ result = [] with codecs.open(self.evaluate_path, 'r', 'utf-8') as f: for line in f: ev = self.__struct_evaluate(line) if ev.clothes.count(cloth_code) > 0: result.append(ev) if len(result) > 0: return result else: return None def get_evaluate_from_codelist(self, cloth_codelist): """ read evaluate(osyaredo) from cloth code :param cloth_code: cloth code for searching evaluate :return: evaluate object list """ result = [] with codecs.open(self.evaluate_path, 'r', 'utf-8') as f: for line in f: ev = self.__struct_evaluate(line) isContain = True for cloth in cloth_codelist: if not cloth.cloth_code in ev.clothes: isContain = False break if isContain: result.append(ev) if len(result) > 0: return result else: return None def get_clothes_from_name(self, contains_name): """ read cloth data from keyword that contains cloth name :param contains_name: key contains cloth name :return: cloth object list """ result = [] with codecs.open(self.clothes_path, 'r', 'utf-8') as f: for line in f: cloth = self.__struct_cloth(line) if cloth.cloth_name.count(contains_name) > 0: result.append(cloth) if len(result) > 0: return result else: return None def get_clothes_from_keys(self, season,price = None): """ read cloth data from keyword that contains cloth name :param contains_name: key contains cloth name :return: cloth object list """ result = [] with codecs.open(self.clothes_path, 'r', 'utf-8') as f: for line in f: cloth = self.__struct_cloth(line) if cloth.cloth_describe.count(season) > 0 or cloth.cloth_name.count(season) > 0: result.append(cloth) if len(result) > 0: return result else: return None if __name__ == '__main__': script_dir = os.path.dirname(__file__) data_path = os.path.join(script_dir,'../../data') manager = DataBaseManager(data_path) personal = manager.get_clothes_from_name('ズボン') for p in personal: print(p.cloth_name)
mit
3,080,065,227,675,096,000
28.005155
96
0.52568
false
3.709295
false
false
false
agoose77/hivesystem
tutorial/layers/layer17/layers.py
1
2334
from __future__ import print_function # import the main and action components from maincomponent import maincomponent from action1 import action1component from action2 import action2component from action3 import action3component #import manager components from action3components import animationmanager from action3components import soundmanager #keyboard mainloop from keycodes import ascii_to_keycode from getch import getch, kbhit, change_termios, restore_termios def mainloop(keyfunc=None): change_termios() while True: while not kbhit(): continue key = getch() if isinstance(key, bytes) and bytes != str: key = key.decode() if key not in ascii_to_keycode: continue keycode = ascii_to_keycode[key] if keycode == "ESCAPE": break if keyfunc is not None: keyfunc(keycode) restore_termios() #define a generic pseudo-hive class import libcontext class pseudohive(object): components = {} def __init__(self): for componentname, componentclass in self.components.items(): component = componentclass() setattr(self, componentname, component) def build(self, contextname): self._contextname = contextname self._context = libcontext.context(self._contextname) def place(self): libcontext.push(self._contextname) for componentname, componentclass in self.components.items(): component = getattr(self, componentname) component.place() libcontext.pop() def close(self): self._context.close() #define the main (pseudo-)hive class mainhive(pseudohive): components = { #action3 manager components "animationmanager": animationmanager, "soundmanager": soundmanager, #main component and action components "maincomponent": maincomponent, "action1": action1component, "action2": action2component, "action3": action3component, } #Set up the main hive and run it #Give us a new mainhive instance main = mainhive() #Build a context named "main" main.build("main") #Declare sockets and plugins main.place() #Build all connections, and validate the connection network main.close() #Run the main loop main.maincomponent.start() mainloop(main.maincomponent.keypress)
bsd-2-clause
2,937,745,632,804,940,000
25.522727
70
0.696658
false
4.116402
false
false
false
DrSkippy/Gravitational-Three-Body-Symmetric
sim_pendulum.py
1
1975
#!/usr/bin/env python import csv import sys import numpy as np import pandas as pd import matplotlib.pyplot as plt plt.style.use('ggplot') # arg 1 = w init # arg 2 = n periods # arg 3 = n ratio # time step dt = np.float64(0.00010) # constants L_0 = np.float64(1.0) # unstretched length g = np.float64(9.81) # gravitation n = np.float64(sys.argv[3]) K_over_M = (n*n - 1)*g/L_0 # initial conditions theta = np.float64(0) L = L_0 + g/K_over_M # equilibrium length with gravity # 2mgl = 1/2 m l^2 w^2 w_sep = np.sqrt(4.*g/L) w_0 = np.float64(sys.argv[1]) w = w_0 # v_l_0 = 0 v_l = v_l_0 # periods T_p = 2.*np.pi/np.sqrt(g/L) T_k = 2.*np.pi/np.sqrt(K_over_M) # record some stuff print "Tp = {} T/dt = {}".format(T_p, T_p/dt) print "Tk = {} T/dt = {}".format(T_k, T_k/dt) print "Tk/Tp = {}".format(T_k/T_p) print "w_esc = {}".format(w_sep) t = np.float64(0.0) theta_last = theta # keep some records data = [] t_s = [] theta += w*dt/2. L += v_l*dt/2. for i in range(int(sys.argv[2])*int(T_p/dt)): w += -dt*g*np.sin(theta)/L v_l += -K_over_M*(L-L_0) + g*np.cos(theta) + w*w*L theta += w*dt theta = np.fmod(theta, 2.*np.pi) L += v_l*dt t += dt data.append([t, theta, w, L, v_l]) if theta_last < 0 and theta > 0: t_s.append(t) theta_last = theta # periods by measure t_s = [t_s[i] - t_s[i-1] for i in range(1,len(t_s)) ] print "avg period = {} std periods = {}".format(np.average(t_s), np.std(t_s)) # plots df = pd.DataFrame().from_records(data) df.columns = ["t", "theta", "omega", "l", "v_l"] df.set_index("t") ax = df.plot(kind="scatter", x="theta", y="omega", marker=".") fig = ax.get_figure() fig.savefig("phase1.png") ax = df.plot(kind="scatter", x="l", y="v_l", marker=".") fig = ax.get_figure() fig.savefig("phase2.png") # config space df["y_c"] = -df["l"] df["x_c"] = df["l"] * np.sin(df["theta"]) ax = df.plot(kind="scatter", x="x_c", y="y_c", marker=".") fig = ax.get_figure() fig.savefig("config.png")
cc0-1.0
3,132,915,243,086,640,600
20.944444
77
0.578734
false
2.214126
false
false
false
metpy/SHARPpy
sharppy/version.py
1
1931
import os.path import subprocess release = False __version__ = '0.2' _repository_path = os.path.split(__file__)[0] _git_file_path = os.path.join(_repository_path, '__git_version__.py') def _minimal_ext_cmd(cmd): # construct minimal environment env = {} for k in ['SYSTEMROOT', 'PATH']: v = os.environ.get(k) if v is not None: env[k] = v # LANGUAGE is used on win32 env['LANGUAGE'] = 'C' env['LANG'] = 'C' env['LC_ALL'] = 'C' out = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0] return out def get_git_hash(): ''' Gets the last GIT commit hash and date for the repository, using the path to this file. ''' try: out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD']) GIT_REVISION = out.strip().decode('ascii') except: GIT_REVISION = None return GIT_REVISION def get_git_revision(): hash = get_git_hash() if hash : rev = '.dev.' + hash[:7] try: cmd = ['git', 'show', '%s' % (hash), '--date=short', '--format="(%ad)"'] date = _minimal_ext_cmd(cmd).split('"')[1] rev += date except: pass else: rev = ".dev.Unknown" return rev def write_git_version(): 'Write the GIT revision to a file.' rev = get_git_revision() if rev == ".dev.Unknown": if os.path.isfile(_git_file_path): return gitfile = open(_git_file_path, 'w') gitfile.write('rev = "%s"\n' % rev) gitfile.close() def get_version(): ''' Get the version of the package, including the GIT revision if this is an actual release. ''' version = __version__ if not release: try: import __git_version__ version += __git_version__.rev except ImportError: version += get_git_revision() return version
bsd-3-clause
6,243,977,662,548,874,000
23.75641
81
0.539617
false
3.504537
false
false
false
brian-o/CS-CourseWork
CS491/Program2/testForks.py
1
2677
############################################################ ''' testForks.py Written by: Brian O'Dell, Spetember 2017 A program to run each program a 500 times per thread count. Then uses the data collected to make graphs and tables that are useful to evaluate the programs running time. ''' ############################################################ from subprocess import * from numba import jit import numpy as np import csv as csv import pandas as pd from pandas.plotting import table import matplotlib.pyplot as plt ''' Call the C program multiple times with variable arguments to gather data The name of the executable should exist before running ''' @jit def doCount(name): j = 0 while (j < 1025): for i in range(0,501): call([name,"-t",str(j), "-w"]) if (j == 0): j = 1 else: j = 2*j; ''' Turn the data into something meaningful. Takes all the data gets the average and standard deviation for each number of threads. Then plots a graph based on it. Also, makes a csv with the avg and stddev ''' @jit def exportData(name): DF = pd.read_csv("data/"+name+".csv") f = {'ExecTime':['mean','std']} #group by the number of threads in the csv and #apply the mean and standard deviation functions to the groups avgDF = DF.groupby('NumThreads').agg(f) avgTable = DF.groupby('NumThreads', as_index=False).agg(f) #When the data csv was saved we used 0 to indicate serial execution #this was so the rows would be in numerical order instead of Alphabetical #Now rename index 0 to Serial to be an accurate representation indexList = avgDF.index.tolist() indexList[0] = 'Serial' avgDF.index = indexList #make the bar chart and set the axes avgPlot = avgDF.plot(kind='bar', title=('Run Times Using '+ name), legend='False', figsize=(15,8)) avgPlot.set_xlabel("Number of Forks") avgPlot.set_ylabel("Run Time (seconds)") #put the data values on top of the bars for clarity avgPlot.legend(['mean','std deviation']) for p in avgPlot.patches: avgPlot.annotate((str(p.get_height())[:6]), (p.get_x()-.01, p.get_height()), fontsize=9) #save the files we need plt.savefig('data/'+name+'Graph.png') avgTable.to_csv('data/'+name+'Table.csv', index=False, encoding='utf-8') def main(): doCount("./forkedSemaphor") doCount("./forkedPrivateCount") doCount("./forkedPrivateCount32") exportData("forkedSemaphor") exportData("forkedPrivateCount") exportData("forkedPrivateCount32") if __name__ == '__main__': main()
gpl-3.0
5,887,714,014,429,846,000
30.494118
82
0.623086
false
3.851799
false
false
false
PennyDreadfulMTG/Penny-Dreadful-Tools
modo_bugs/fetcher.py
1
4118
import os import sys from typing import Dict, List, Optional, Tuple from bs4 import BeautifulSoup from bs4.element import Tag from shared import fetch_tools, lazy def search_scryfall(query: str) -> Tuple[int, List[str], List[str]]: """Returns a tuple. First member is an integer indicating how many cards match the query total, second member is a list of card names up to the maximum that could be fetched in a timely fashion.""" if query == '': return 0, [], [] print(f'Searching scryfall for `{query}`') result_json = fetch_tools.fetch_json('https://api.scryfall.com/cards/search?q=' + fetch_tools.escape(query), character_encoding='utf-8') if 'code' in result_json.keys(): # The API returned an error if result_json['status'] == 404: # No cards found return 0, [], [] print('Error fetching scryfall data:\n', result_json) return 0, [], [] for warning in result_json.get('warnings', []): # scryfall-provided human-readable warnings print(warning) result_data = result_json['data'] result_data.sort(key=lambda x: x['legalities']['penny']) def get_frontside(scr_card: Dict) -> str: """If card is transform, returns first name. Otherwise, returns name. This is to make sure cards are later found in the database""" # not sure how to handle meld cards if scr_card['layout'] in ['transform', 'flip', 'modal_dfc']: return scr_card['card_faces'][0]['name'] return scr_card['name'] result_cardnames = [get_frontside(obj) for obj in result_data] return result_json['total_cards'], result_cardnames, result_json.get('warnings', []) def catalog_cardnames() -> List[str]: result_json = fetch_tools.fetch_json('https://api.scryfall.com/catalog/card-names') names: List[str] = result_json['data'] for n in names: if ' // ' in n: names.extend(n.split(' // ')) return names def update_redirect(file: str, title: str, redirect: str, **kwargs: str) -> bool: text = '---\ntitle: {title}\nredirect_to:\n - {url}\n'.format(title=title, url=redirect) for key, value in kwargs.items(): text += f'{key}: {value}\n' text = text + '---\n' fname = f'{file}.md' if not os.path.exists(fname): bb_jekyl = open(fname, mode='w') bb_jekyl.write('') bb_jekyl.close() bb_jekyl = open(fname, mode='r') orig = bb_jekyl.read() bb_jekyl.close() if orig != text: print(f'New {file} update!') bb_jekyl = open(fname, mode='w') bb_jekyl.write(text) bb_jekyl.close() return True if 'always-scrape' in sys.argv: return True return False def find_bug_blog() -> Tuple[Optional[str], bool]: bug_blogs = [a for a in get_article_archive() if str(a[0].string).startswith('Magic Online Bug Blog')] if not bug_blogs: return (None, False) (title, link) = bug_blogs[0] print('Found: {0} ({1})'.format(title, link)) new = update_redirect('bug_blog', title.text, link) return (link, new) def find_announcements() -> Tuple[str, bool]: articles = [a for a in get_article_archive() if str(a[0].string).startswith('Magic Online Announcements')] (title, link) = articles[0] print('Found: {0} ({1})'.format(title, link)) bn = 'Build Notes' in fetch_tools.fetch(link) new = update_redirect('announcements', title.text, link, has_build_notes=str(bn)) return (link, new) def parse_article_item_extended(a: Tag) -> Tuple[Tag, str]: title = a.find_all('h3')[0] link = 'http://magic.wizards.com' + a.find_all('a')[0]['href'] return (title, link) @lazy.lazy_property def get_article_archive() -> List[Tuple[Tag, str]]: try: html = fetch_tools.fetch('http://magic.wizards.com/en/articles/archive/184956') except fetch_tools.FetchException: html = fetch_tools.fetch('http://magic.wizards.com/en/articles/archive/') soup = BeautifulSoup(html, 'html.parser') return [parse_article_item_extended(a) for a in soup.find_all('div', class_='article-item-extended')]
gpl-3.0
-1,526,794,542,128,501,000
41.020408
140
0.629917
false
3.278662
false
false
false
heldergg/webpymail
webpymail/sabapp/models.py
1
2844
# -*- coding: utf-8 -*- # sabapp - Simple Address Book Application # Copyright (C) 2008 Helder Guerreiro # This file is part of sabapp. # # sabapp is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # sabapp is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with sabapp. If not, see <http://www.gnu.org/licenses/>. # # Helder Guerreiro <helder@tretas.org> # from django.contrib.auth.models import User from django.db import models from django.db.models import Q from django.utils.translation import gettext_lazy as _ # Models: ADDRESSBOOKTYPE = ( (1, _('User address book')), (2, _('Server address book')), (3, _('Site address book')), ) class AddressManager(models.Manager): def for_request(self, request): '''Addresses available for request''' host = request.session['host'] return super(AddressManager, self).get_queryset().filter( Q(user__exact=request.user, imap_server__exact=host, ab_type__exact=1) | Q(imap_server__exact=host, ab_type__exact=2) | Q(ab_type__exact=3)) def have_addr(self, request, addr): address = self.for_request(request).filter(email__iexact=addr) return bool(address) class Address(models.Model): user = models.ForeignKey(User, null=True) imap_server = models.CharField(_('IMAP server'), max_length=128) nickname = models.CharField(max_length=64, blank=True) first_name = models.CharField(_('first name'), max_length=30, blank=True) last_name = models.CharField(_('last name'), max_length=64, blank=True) email = models.EmailField(_('e-mail address')) additional_info = models.CharField(_('aditional information'), max_length=128, blank=True) ab_type = models.IntegerField(choices=ADDRESSBOOKTYPE) objects = AddressManager() class Meta: verbose_name = _('Address') verbose_name_plural = _('Addresses') db_table = 'address_book' ordering = ['first_name', 'last_name', 'email'] def full_name(self): return ('%s %s' % (self.first_name, self.last_name)).strip() def mail_addr(self): name = ('%s %s' % (self.first_name, self.last_name)).strip() if name: return '"%s" <%s>' % (name, self.email) else: return self.email def __str__(self): return self.mail_addr()
gpl-3.0
-5,009,104,855,560,248,000
31.318182
77
0.645921
false
3.707953
false
false
false
silentfuzzle/calibre
src/calibre/devices/kobo/driver.py
1
147621
#!/usr/bin/env python2 # vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai from __future__ import division __license__ = 'GPL v3' __copyright__ = '2010-2012, Timothy Legge <timlegge@gmail.com>, Kovid Goyal <kovid@kovidgoyal.net> and David Forrester <davidfor@internode.on.net>' __docformat__ = 'restructuredtext en' ''' Driver for Kobo ereaders. Supports all e-ink devices. Originally developed by Timothy Legge <timlegge@gmail.com>. Extended to support Touch firmware 2.0.0 and later and newer devices by David Forrester <davidfor@internode.on.net> ''' import os, time, shutil from contextlib import closing from calibre.devices.usbms.books import BookList from calibre.devices.usbms.books import CollectionsBookList from calibre.devices.kobo.books import KTCollectionsBookList from calibre.devices.kobo.books import Book from calibre.devices.kobo.books import ImageWrapper from calibre.devices.mime import mime_type_ext from calibre.devices.usbms.driver import USBMS, debug_print from calibre import prints, fsync from calibre.ptempfile import PersistentTemporaryFile from calibre.constants import DEBUG from calibre.utils.config_base import prefs EPUB_EXT = '.epub' KEPUB_EXT = '.kepub' # Implementation of QtQHash for strings. This doesn't seem to be in the Python implementation. def qhash(inputstr): instr = b"" if isinstance(inputstr, bytes): instr = inputstr elif isinstance(inputstr, unicode): instr = inputstr.encode("utf8") else: return -1 h = 0x00000000 for x in bytearray(instr): h = (h << 4) + x h ^= (h & 0xf0000000) >> 23 h &= 0x0fffffff return h class DummyCSSPreProcessor(object): def __call__(self, data, add_namespace=False): return data class KOBO(USBMS): name = 'Kobo Reader Device Interface' gui_name = 'Kobo Reader' description = _('Communicate with the Kobo Reader') author = 'Timothy Legge and David Forrester' version = (2, 1, 8) dbversion = 0 fwversion = 0 supported_dbversion = 120 has_kepubs = False supported_platforms = ['windows', 'osx', 'linux'] booklist_class = CollectionsBookList book_class = Book # Ordered list of supported formats FORMATS = ['kepub', 'epub', 'pdf', 'txt', 'cbz', 'cbr'] CAN_SET_METADATA = ['collections'] VENDOR_ID = [0x2237] BCD = [0x0110, 0x0323, 0x0326] ORIGINAL_PRODUCT_ID = [0x4165] WIFI_PRODUCT_ID = [0x4161, 0x4162] PRODUCT_ID = ORIGINAL_PRODUCT_ID + WIFI_PRODUCT_ID VENDOR_NAME = ['KOBO_INC', 'KOBO'] WINDOWS_MAIN_MEM = WINDOWS_CARD_A_MEM = ['.KOBOEREADER', 'EREADER'] EBOOK_DIR_MAIN = '' SUPPORTS_SUB_DIRS = True SUPPORTS_ANNOTATIONS = True # "kepubs" do not have an extension. The name looks like a GUID. Using an empty string seems to work. VIRTUAL_BOOK_EXTENSIONS = frozenset(['kobo', '']) EXTRA_CUSTOMIZATION_MESSAGE = [ _('The Kobo supports several collections including ')+ 'Read, Closed, Im_Reading. ' + _('Create tags for automatic management'), _('Upload covers for books (newer readers)') + ':::'+_('Normally, the KOBO readers get the cover image from the' ' ebook file itself. With this option, calibre will send a ' 'separate cover image to the reader, useful if you ' 'have modified the cover.'), _('Upload Black and White Covers'), _('Show expired books') + ':::'+_('A bug in an earlier version left non kepubs book records' ' in the database. With this option Calibre will show the ' 'expired records and allow you to delete them with ' 'the new delete logic.'), _('Show Previews') + ':::'+_('Kobo previews are included on the Touch and some other versions' ' by default they are no longer displayed as there is no good reason to ' 'see them. Enable if you wish to see/delete them.'), _('Show Recommendations') + ':::'+_('Kobo now shows recommendations on the device. In some cases these have ' 'files but in other cases they are just pointers to the web site to buy. ' 'Enable if you wish to see/delete them.'), _('Attempt to support newer firmware') + ':::'+_('Kobo routinely updates the firmware and the ' 'database version. With this option calibre will attempt ' 'to perform full read-write functionality - Here be Dragons!! ' 'Enable only if you are comfortable with restoring your kobo ' 'to factory defaults and testing software'), ] EXTRA_CUSTOMIZATION_DEFAULT = [ ', '.join(['tags']), True, True, True, False, False, False ] OPT_COLLECTIONS = 0 OPT_UPLOAD_COVERS = 1 OPT_UPLOAD_GRAYSCALE_COVERS = 2 OPT_SHOW_EXPIRED_BOOK_RECORDS = 3 OPT_SHOW_PREVIEWS = 4 OPT_SHOW_RECOMMENDATIONS = 5 OPT_SUPPORT_NEWER_FIRMWARE = 6 def initialize(self): USBMS.initialize(self) self.dbversion = 7 def device_database_path(self): return self.normalize_path(self._main_prefix + '.kobo/KoboReader.sqlite') def books(self, oncard=None, end_session=True): from calibre.ebooks.metadata.meta import path_to_ext dummy_bl = BookList(None, None, None) if oncard == 'carda' and not self._card_a_prefix: self.report_progress(1.0, _('Getting list of books on device...')) return dummy_bl elif oncard == 'cardb' and not self._card_b_prefix: self.report_progress(1.0, _('Getting list of books on device...')) return dummy_bl elif oncard and oncard != 'carda' and oncard != 'cardb': self.report_progress(1.0, _('Getting list of books on device...')) return dummy_bl prefix = self._card_a_prefix if oncard == 'carda' else \ self._card_b_prefix if oncard == 'cardb' \ else self._main_prefix # Determine the firmware version try: with open(self.normalize_path(self._main_prefix + '.kobo/version'), 'rb') as f: self.fwversion = f.readline().split(',')[2] except: self.fwversion = 'unknown' if self.fwversion != '1.0' and self.fwversion != '1.4': self.has_kepubs = True debug_print('Version of driver: ', self.version, 'Has kepubs:', self.has_kepubs) debug_print('Version of firmware: ', self.fwversion, 'Has kepubs:', self.has_kepubs) self.booklist_class.rebuild_collections = self.rebuild_collections # get the metadata cache bl = self.booklist_class(oncard, prefix, self.settings) need_sync = self.parse_metadata_cache(bl, prefix, self.METADATA_CACHE) # make a dict cache of paths so the lookup in the loop below is faster. bl_cache = {} for idx,b in enumerate(bl): bl_cache[b.lpath] = idx def update_booklist(prefix, path, title, authors, mime, date, ContentType, ImageID, readstatus, MimeType, expired, favouritesindex, accessibility): changed = False try: lpath = path.partition(self.normalize_path(prefix))[2] if lpath.startswith(os.sep): lpath = lpath[len(os.sep):] lpath = lpath.replace('\\', '/') # debug_print("LPATH: ", lpath, " - Title: " , title) playlist_map = {} if lpath not in playlist_map: playlist_map[lpath] = [] if readstatus == 1: playlist_map[lpath].append('Im_Reading') elif readstatus == 2: playlist_map[lpath].append('Read') elif readstatus == 3: playlist_map[lpath].append('Closed') # Related to a bug in the Kobo firmware that leaves an expired row for deleted books # this shows an expired Collection so the user can decide to delete the book if expired == 3: playlist_map[lpath].append('Expired') # A SHORTLIST is supported on the touch but the data field is there on most earlier models if favouritesindex == 1: playlist_map[lpath].append('Shortlist') # Label Previews if accessibility == 6: playlist_map[lpath].append('Preview') elif accessibility == 4: playlist_map[lpath].append('Recommendation') path = self.normalize_path(path) # print "Normalized FileName: " + path idx = bl_cache.get(lpath, None) if idx is not None: bl_cache[lpath] = None if ImageID is not None: imagename = self.normalize_path(self._main_prefix + '.kobo/images/' + ImageID + ' - NickelBookCover.parsed') if not os.path.exists(imagename): # Try the Touch version if the image does not exist imagename = self.normalize_path(self._main_prefix + '.kobo/images/' + ImageID + ' - N3_LIBRARY_FULL.parsed') # print "Image name Normalized: " + imagename if not os.path.exists(imagename): debug_print("Strange - The image name does not exist - title: ", title) if imagename is not None: bl[idx].thumbnail = ImageWrapper(imagename) if (ContentType != '6' and MimeType != 'Shortcover'): if os.path.exists(self.normalize_path(os.path.join(prefix, lpath))): if self.update_metadata_item(bl[idx]): # print 'update_metadata_item returned true' changed = True else: debug_print(" Strange: The file: ", prefix, lpath, " does mot exist!") if lpath in playlist_map and \ playlist_map[lpath] not in bl[idx].device_collections: bl[idx].device_collections = playlist_map.get(lpath,[]) else: if ContentType == '6' and MimeType == 'Shortcover': book = Book(prefix, lpath, title, authors, mime, date, ContentType, ImageID, size=1048576) else: try: if os.path.exists(self.normalize_path(os.path.join(prefix, lpath))): book = self.book_from_path(prefix, lpath, title, authors, mime, date, ContentType, ImageID) else: debug_print(" Strange: The file: ", prefix, lpath, " does mot exist!") title = "FILE MISSING: " + title book = Book(prefix, lpath, title, authors, mime, date, ContentType, ImageID, size=1048576) except: debug_print("prefix: ", prefix, "lpath: ", lpath, "title: ", title, "authors: ", authors, "mime: ", mime, "date: ", date, "ContentType: ", ContentType, "ImageID: ", ImageID) raise # print 'Update booklist' book.device_collections = playlist_map.get(lpath,[]) # if lpath in playlist_map else [] if bl.add_book(book, replace_metadata=False): changed = True except: # Probably a path encoding error import traceback traceback.print_exc() return changed import sqlite3 as sqlite with closing(sqlite.connect( self.normalize_path(self._main_prefix + '.kobo/KoboReader.sqlite'))) as connection: # return bytestrings if the content cannot the decoded as unicode connection.text_factory = lambda x: unicode(x, "utf-8", "ignore") cursor = connection.cursor() cursor.execute('select version from dbversion') result = cursor.fetchone() self.dbversion = result[0] debug_print("Database Version: ", self.dbversion) opts = self.settings() if self.dbversion >= 33: query= ('select Title, Attribution, DateCreated, ContentID, MimeType, ContentType, ' 'ImageID, ReadStatus, ___ExpirationStatus, FavouritesIndex, Accessibility, IsDownloaded from content where ' 'BookID is Null %(previews)s %(recomendations)s and not ((___ExpirationStatus=3 or ___ExpirationStatus is Null) %(expiry)s') % dict(expiry=' and ContentType = 6)' if opts.extra_customization[self.OPT_SHOW_EXPIRED_BOOK_RECORDS] else ')', previews=' and Accessibility <> 6' if opts.extra_customization[self.OPT_SHOW_PREVIEWS] == False else '', recomendations=' and IsDownloaded in (\'true\', 1)' if opts.extra_customization[self.OPT_SHOW_RECOMMENDATIONS] == False else '') elif self.dbversion >= 16 and self.dbversion < 33: query= ('select Title, Attribution, DateCreated, ContentID, MimeType, ContentType, ' 'ImageID, ReadStatus, ___ExpirationStatus, FavouritesIndex, Accessibility, "1" as IsDownloaded from content where ' 'BookID is Null and not ((___ExpirationStatus=3 or ___ExpirationStatus is Null) %(expiry)s') % dict(expiry=' and ContentType = 6)' if opts.extra_customization[self.OPT_SHOW_EXPIRED_BOOK_RECORDS] else ')') elif self.dbversion < 16 and self.dbversion >= 14: query= ('select Title, Attribution, DateCreated, ContentID, MimeType, ContentType, ' 'ImageID, ReadStatus, ___ExpirationStatus, FavouritesIndex, "-1" as Accessibility, "1" as IsDownloaded from content where ' 'BookID is Null and not ((___ExpirationStatus=3 or ___ExpirationStatus is Null) %(expiry)s') % dict(expiry=' and ContentType = 6)' if opts.extra_customization[self.OPT_SHOW_EXPIRED_BOOK_RECORDS] else ')') elif self.dbversion < 14 and self.dbversion >= 8: query= ('select Title, Attribution, DateCreated, ContentID, MimeType, ContentType, ' 'ImageID, ReadStatus, ___ExpirationStatus, "-1" as FavouritesIndex, "-1" as Accessibility, "1" as IsDownloaded from content where ' 'BookID is Null and not ((___ExpirationStatus=3 or ___ExpirationStatus is Null) %(expiry)s') % dict(expiry=' and ContentType = 6)' if opts.extra_customization[self.OPT_SHOW_EXPIRED_BOOK_RECORDS] else ')') else: query= 'select Title, Attribution, DateCreated, ContentID, MimeType, ContentType, ' \ 'ImageID, ReadStatus, "-1" as ___ExpirationStatus, "-1" as FavouritesIndex, "-1" as Accessibility, "1" as IsDownloaded from content where BookID is Null' try: cursor.execute(query) except Exception as e: err = str(e) if not ('___ExpirationStatus' in err or 'FavouritesIndex' in err or 'Accessibility' in err or 'IsDownloaded' in err): raise query= ('select Title, Attribution, DateCreated, ContentID, MimeType, ContentType, ' 'ImageID, ReadStatus, "-1" as ___ExpirationStatus, "-1" as ' 'FavouritesIndex, "-1" as Accessibility from content where ' 'BookID is Null') cursor.execute(query) changed = False for i, row in enumerate(cursor): # self.report_progress((i+1) / float(numrows), _('Getting list of books on device...')) if not hasattr(row[3], 'startswith') or row[3].startswith("file:///usr/local/Kobo/help/"): # These are internal to the Kobo device and do not exist continue path = self.path_from_contentid(row[3], row[5], row[4], oncard) mime = mime_type_ext(path_to_ext(path)) if path.find('kepub') == -1 else 'application/epub+zip' # debug_print("mime:", mime) if oncard != 'carda' and oncard != 'cardb' and not row[3].startswith("file:///mnt/sd/"): changed = update_booklist(self._main_prefix, path, row[0], row[1], mime, row[2], row[5], row[6], row[7], row[4], row[8], row[9], row[10]) # print "shortbook: " + path elif oncard == 'carda' and row[3].startswith("file:///mnt/sd/"): changed = update_booklist(self._card_a_prefix, path, row[0], row[1], mime, row[2], row[5], row[6], row[7], row[4], row[8], row[9], row[10]) if changed: need_sync = True cursor.close() # Remove books that are no longer in the filesystem. Cache contains # indices into the booklist if book not in filesystem, None otherwise # Do the operation in reverse order so indices remain valid for idx in sorted(bl_cache.itervalues(), reverse=True): if idx is not None: need_sync = True del bl[idx] # print "count found in cache: %d, count of files in metadata: %d, need_sync: %s" % \ # (len(bl_cache), len(bl), need_sync) if need_sync: # self.count_found_in_bl != len(bl) or need_sync: if oncard == 'cardb': self.sync_booklists((None, None, bl)) elif oncard == 'carda': self.sync_booklists((None, bl, None)) else: self.sync_booklists((bl, None, None)) self.report_progress(1.0, _('Getting list of books on device...')) return bl def filename_callback(self, path, mi): # debug_print("Kobo:filename_callback:Path - {0}".format(path)) idx = path.rfind('.') ext = path[idx:] if ext == KEPUB_EXT: path = path + EPUB_EXT # debug_print("Kobo:filename_callback:New path - {0}".format(path)) return path def delete_via_sql(self, ContentID, ContentType): # Delete Order: # 1) shortcover_page # 2) volume_shorcover # 2) content import sqlite3 as sqlite debug_print('delete_via_sql: ContentID: ', ContentID, 'ContentType: ', ContentType) with closing(sqlite.connect(self.normalize_path(self._main_prefix + '.kobo/KoboReader.sqlite'))) as connection: # return bytestrings if the content cannot the decoded as unicode connection.text_factory = lambda x: unicode(x, "utf-8", "ignore") cursor = connection.cursor() t = (ContentID,) cursor.execute('select ImageID from content where ContentID = ?', t) ImageID = None for row in cursor: # First get the ImageID to delete the images ImageID = row[0] cursor.close() cursor = connection.cursor() if ContentType == 6 and self.dbversion < 8: # Delete the shortcover_pages first cursor.execute('delete from shortcover_page where shortcoverid in (select ContentID from content where BookID = ?)', t) # Delete the volume_shortcovers second cursor.execute('delete from volume_shortcovers where volumeid = ?', t) # Delete the rows from content_keys if self.dbversion >= 8: cursor.execute('delete from content_keys where volumeid = ?', t) # Delete the chapters associated with the book next t = (ContentID,) # Kobo does not delete the Book row (ie the row where the BookID is Null) # The next server sync should remove the row cursor.execute('delete from content where BookID = ?', t) if ContentType == 6: try: cursor.execute('update content set ReadStatus=0, FirstTimeReading = \'true\', ___PercentRead=0, ___ExpirationStatus=3 ' 'where BookID is Null and ContentID =?',t) except Exception as e: if 'no such column' not in str(e): raise try: cursor.execute('update content set ReadStatus=0, FirstTimeReading = \'true\', ___PercentRead=0 ' 'where BookID is Null and ContentID =?',t) except Exception as e: if 'no such column' not in str(e): raise cursor.execute('update content set ReadStatus=0, FirstTimeReading = \'true\' ' 'where BookID is Null and ContentID =?',t) else: cursor.execute('delete from content where BookID is Null and ContentID =?',t) connection.commit() cursor.close() if ImageID == None: print "Error condition ImageID was not found" print "You likely tried to delete a book that the kobo has not yet added to the database" # If all this succeeds we need to delete the images files via the ImageID return ImageID def delete_images(self, ImageID, book_path): if ImageID != None: path_prefix = '.kobo/images/' path = self._main_prefix + path_prefix + ImageID file_endings = (' - iPhoneThumbnail.parsed', ' - bbMediumGridList.parsed', ' - NickelBookCover.parsed', ' - N3_LIBRARY_FULL.parsed', ' - N3_LIBRARY_GRID.parsed', ' - N3_LIBRARY_LIST.parsed', ' - N3_SOCIAL_CURRENTREAD.parsed', ' - N3_FULL.parsed',) for ending in file_endings: fpath = path + ending fpath = self.normalize_path(fpath) if os.path.exists(fpath): # print 'Image File Exists: ' + fpath os.unlink(fpath) def delete_books(self, paths, end_session=True): if self.modify_database_check("delete_books") == False: return for i, path in enumerate(paths): self.report_progress((i+1) / float(len(paths)), _('Removing books from device...')) path = self.normalize_path(path) # print "Delete file normalized path: " + path extension = os.path.splitext(path)[1] ContentType = self.get_content_type_from_extension(extension) if extension != '' else self.get_content_type_from_path(path) ContentID = self.contentid_from_path(path, ContentType) ImageID = self.delete_via_sql(ContentID, ContentType) # print " We would now delete the Images for" + ImageID self.delete_images(ImageID, path) if os.path.exists(path): # Delete the ebook # print "Delete the ebook: " + path os.unlink(path) filepath = os.path.splitext(path)[0] for ext in self.DELETE_EXTS: if os.path.exists(filepath + ext): # print "Filename: " + filename os.unlink(filepath + ext) if os.path.exists(path + ext): # print "Filename: " + filename os.unlink(path + ext) if self.SUPPORTS_SUB_DIRS: try: # print "removed" os.removedirs(os.path.dirname(path)) except: pass self.report_progress(1.0, _('Removing books from device...')) def remove_books_from_metadata(self, paths, booklists): if self.modify_database_check("remove_books_from_metatata") == False: return for i, path in enumerate(paths): self.report_progress((i+1) / float(len(paths)), _('Removing books from device metadata listing...')) for bl in booklists: for book in bl: # print "Book Path: " + book.path if path.endswith(book.path): # print " Remove: " + book.path bl.remove_book(book) self.report_progress(1.0, _('Removing books from device metadata listing...')) def add_books_to_metadata(self, locations, metadata, booklists): metadata = iter(metadata) for i, location in enumerate(locations): self.report_progress((i+1) / float(len(locations)), _('Adding books to device metadata listing...')) info = metadata.next() blist = 2 if location[1] == 'cardb' else 1 if location[1] == 'carda' else 0 # Extract the correct prefix from the pathname. To do this correctly, # we must ensure that both the prefix and the path are normalized # so that the comparison will work. Book's __init__ will fix up # lpath, so we don't need to worry about that here. path = self.normalize_path(location[0]) if self._main_prefix: prefix = self._main_prefix if \ path.startswith(self.normalize_path(self._main_prefix)) else None if not prefix and self._card_a_prefix: prefix = self._card_a_prefix if \ path.startswith(self.normalize_path(self._card_a_prefix)) else None if not prefix and self._card_b_prefix: prefix = self._card_b_prefix if \ path.startswith(self.normalize_path(self._card_b_prefix)) else None if prefix is None: prints('in add_books_to_metadata. Prefix is None!', path, self._main_prefix) continue # print "Add book to metatdata: " # print "prefix: " + prefix lpath = path.partition(prefix)[2] if lpath.startswith('/') or lpath.startswith('\\'): lpath = lpath[1:] # print "path: " + lpath book = self.book_class(prefix, lpath, other=info) if book.size is None or book.size == 0: book.size = os.stat(self.normalize_path(path)).st_size b = booklists[blist].add_book(book, replace_metadata=True) if b: b._new_book = True self.report_progress(1.0, _('Adding books to device metadata listing...')) def contentid_from_path(self, path, ContentType): if ContentType == 6: extension = os.path.splitext(path)[1] if extension == '.kobo': ContentID = os.path.splitext(path)[0] # Remove the prefix on the file. it could be either ContentID = ContentID.replace(self._main_prefix, '') else: ContentID = path ContentID = ContentID.replace(self._main_prefix + self.normalize_path('.kobo/kepub/'), '') if self._card_a_prefix is not None: ContentID = ContentID.replace(self._card_a_prefix, '') elif ContentType == 999: # HTML Files ContentID = path ContentID = ContentID.replace(self._main_prefix, "/mnt/onboard/") if self._card_a_prefix is not None: ContentID = ContentID.replace(self._card_a_prefix, "/mnt/sd/") else: # ContentType = 16 ContentID = path ContentID = ContentID.replace(self._main_prefix, "file:///mnt/onboard/") if self._card_a_prefix is not None: ContentID = ContentID.replace(self._card_a_prefix, "file:///mnt/sd/") ContentID = ContentID.replace("\\", '/') return ContentID def get_content_type_from_path(self, path): # Strictly speaking the ContentType could be 6 or 10 # however newspapers have the same storage format if path.find('kepub') >= 0: ContentType = 6 return ContentType def get_content_type_from_extension(self, extension): if extension == '.kobo': # Kobo books do not have book files. They do have some images though # print "kobo book" ContentType = 6 elif extension == '.pdf' or extension == '.epub': # print "ePub or pdf" ContentType = 16 elif extension == '.rtf' or extension == '.txt' or extension == '.htm' or extension == '.html': # print "txt" if self.fwversion == '1.0' or self.fwversion == '1.4' or self.fwversion == '1.7.4': ContentType = 999 else: ContentType = 901 else: # if extension == '.html' or extension == '.txt': ContentType = 901 # Yet another hack: to get around Kobo changing how ContentID is stored return ContentType def path_from_contentid(self, ContentID, ContentType, MimeType, oncard): path = ContentID if oncard == 'cardb': print 'path from_contentid cardb' elif oncard == 'carda': path = path.replace("file:///mnt/sd/", self._card_a_prefix) # print "SD Card: " + path else: if ContentType == "6" and MimeType == 'Shortcover': # This is a hack as the kobo files do not exist # but the path is required to make a unique id # for calibre's reference path = self._main_prefix + path + '.kobo' # print "Path: " + path elif (ContentType == "6" or ContentType == "10") and MimeType == 'application/x-kobo-epub+zip': if path.startswith("file:///mnt/onboard/"): path = self._main_prefix + path.replace("file:///mnt/onboard/", '') else: path = self._main_prefix + '.kobo/kepub/' + path # print "Internal: " + path else: # if path.startswith("file:///mnt/onboard/"): path = path.replace("file:///mnt/onboard/", self._main_prefix) path = path.replace("/mnt/onboard/", self._main_prefix) # print "Internal: " + path return path def modify_database_check(self, function): # Checks to see whether the database version is supported # and whether the user has chosen to support the firmware version if self.dbversion > self.supported_dbversion: # Unsupported database opts = self.settings() if not opts.extra_customization[self.OPT_SUPPORT_NEWER_FIRMWARE]: debug_print('The database has been upgraded past supported version') self.report_progress(1.0, _('Removing books from device...')) from calibre.devices.errors import UserFeedback raise UserFeedback(_("Kobo database version unsupported - See details"), _('Your Kobo is running an updated firmware/database version.' ' As calibre does not know about this updated firmware,' ' database editing is disabled, to prevent corruption.' ' You can still send books to your Kobo with calibre, ' ' but deleting books and managing collections is disabled.' ' If you are willing to experiment and know how to reset' ' your Kobo to Factory defaults, you can override this' ' check by right clicking the device icon in calibre and' ' selecting "Configure this device" and then the ' ' "Attempt to support newer firmware" option.' ' Doing so may require you to perform a factory reset of' ' your Kobo.') + (( '\nDevice database version: %s.' '\nDevice firmware version: %s') % (self.dbversion, self.fwversion)) , UserFeedback.WARN) return False else: # The user chose to edit the database anyway return True else: # Supported database version return True def get_file(self, path, *args, **kwargs): tpath = self.munge_path(path) extension = os.path.splitext(tpath)[1] if extension == '.kobo': from calibre.devices.errors import UserFeedback raise UserFeedback(_("Not Implemented"), _('".kobo" files do not exist on the device as books; ' 'instead they are rows in the sqlite database. ' 'Currently they cannot be exported or viewed.'), UserFeedback.WARN) return USBMS.get_file(self, path, *args, **kwargs) @classmethod def book_from_path(cls, prefix, lpath, title, authors, mime, date, ContentType, ImageID): # debug_print("KOBO:book_from_path - title=%s"%title) from calibre.ebooks.metadata import MetaInformation if cls.settings().read_metadata or cls.MUST_READ_METADATA: mi = cls.metadata_from_path(cls.normalize_path(os.path.join(prefix, lpath))) else: from calibre.ebooks.metadata.meta import metadata_from_filename mi = metadata_from_filename(cls.normalize_path(os.path.basename(lpath)), cls.build_template_regexp()) if mi is None: mi = MetaInformation(os.path.splitext(os.path.basename(lpath))[0], [_('Unknown')]) size = os.stat(cls.normalize_path(os.path.join(prefix, lpath))).st_size book = cls.book_class(prefix, lpath, title, authors, mime, date, ContentType, ImageID, size=size, other=mi) return book def get_device_paths(self): paths = {} for prefix, path, source_id in [ ('main', 'metadata.calibre', 0), ('card_a', 'metadata.calibre', 1), ('card_b', 'metadata.calibre', 2) ]: prefix = getattr(self, '_%s_prefix'%prefix) if prefix is not None and os.path.exists(prefix): paths[source_id] = os.path.join(prefix, *(path.split('/'))) return paths def reset_readstatus(self, connection, oncard): cursor = connection.cursor() # Reset Im_Reading list in the database if oncard == 'carda': query= 'update content set ReadStatus=0, FirstTimeReading = \'true\' where BookID is Null and ContentID like \'file:///mnt/sd/%\'' elif oncard != 'carda' and oncard != 'cardb': query= 'update content set ReadStatus=0, FirstTimeReading = \'true\' where BookID is Null and ContentID not like \'file:///mnt/sd/%\'' try: cursor.execute(query) except: debug_print(' Database Exception: Unable to reset ReadStatus list') raise else: connection.commit() # debug_print(' Commit: Reset ReadStatus list') cursor.close() def set_readstatus(self, connection, ContentID, ReadStatus): cursor = connection.cursor() t = (ContentID,) cursor.execute('select DateLastRead from Content where BookID is Null and ContentID = ?', t) result = cursor.fetchone() if result is None: datelastread = '1970-01-01T00:00:00' else: datelastread = result[0] if result[0] is not None else '1970-01-01T00:00:00' t = (ReadStatus,datelastread,ContentID,) try: cursor.execute('update content set ReadStatus=?,FirstTimeReading=\'false\',DateLastRead=? where BookID is Null and ContentID = ?', t) except: debug_print(' Database Exception: Unable update ReadStatus') raise else: connection.commit() # debug_print(' Commit: Setting ReadStatus List') cursor.close() def reset_favouritesindex(self, connection, oncard): # Reset FavouritesIndex list in the database if oncard == 'carda': query= 'update content set FavouritesIndex=-1 where BookID is Null and ContentID like \'file:///mnt/sd/%\'' elif oncard != 'carda' and oncard != 'cardb': query= 'update content set FavouritesIndex=-1 where BookID is Null and ContentID not like \'file:///mnt/sd/%\'' cursor = connection.cursor() try: cursor.execute(query) except Exception as e: debug_print(' Database Exception: Unable to reset Shortlist list') if 'no such column' not in str(e): raise else: connection.commit() # debug_print(' Commit: Reset FavouritesIndex list') def set_favouritesindex(self, connection, ContentID): cursor = connection.cursor() t = (ContentID,) try: cursor.execute('update content set FavouritesIndex=1 where BookID is Null and ContentID = ?', t) except Exception as e: debug_print(' Database Exception: Unable set book as Shortlist') if 'no such column' not in str(e): raise else: connection.commit() # debug_print(' Commit: Set FavouritesIndex') def update_device_database_collections(self, booklists, collections_attributes, oncard): debug_print("Kobo:update_device_database_collections - oncard='%s'"%oncard) if self.modify_database_check("update_device_database_collections") == False: return # Only process categories in this list supportedcategories = { "Im_Reading":1, "Read":2, "Closed":3, "Shortlist":4, # "Preview":99, # Unsupported as we don't want to change it } # Define lists for the ReadStatus readstatuslist = { "Im_Reading":1, "Read":2, "Closed":3, } accessibilitylist = { "Preview":6, "Recommendation":4, } # debug_print('Starting update_device_database_collections', collections_attributes) # Force collections_attributes to be 'tags' as no other is currently supported # debug_print('KOBO: overriding the provided collections_attributes:', collections_attributes) collections_attributes = ['tags'] collections = booklists.get_collections(collections_attributes) # debug_print('Kobo:update_device_database_collections - Collections:', collections) # Create a connection to the sqlite database # Needs to be outside books collection as in the case of removing # the last book from the collection the list of books is empty # and the removal of the last book would not occur import sqlite3 as sqlite with closing(sqlite.connect(self.normalize_path(self._main_prefix + '.kobo/KoboReader.sqlite'))) as connection: # return bytestrings if the content cannot the decoded as unicode connection.text_factory = lambda x: unicode(x, "utf-8", "ignore") if collections: # Need to reset the collections outside the particular loops # otherwise the last item will not be removed self.reset_readstatus(connection, oncard) if self.dbversion >= 14: self.reset_favouritesindex(connection, oncard) # Process any collections that exist for category, books in collections.items(): if category in supportedcategories: # debug_print("Category: ", category, " id = ", readstatuslist.get(category)) for book in books: # debug_print(' Title:', book.title, 'category: ', category) if category not in book.device_collections: book.device_collections.append(category) extension = os.path.splitext(book.path)[1] ContentType = self.get_content_type_from_extension(extension) if extension != '' else self.get_content_type_from_path(book.path) ContentID = self.contentid_from_path(book.path, ContentType) if category in readstatuslist.keys(): # Manage ReadStatus self.set_readstatus(connection, ContentID, readstatuslist.get(category)) elif category == 'Shortlist' and self.dbversion >= 14: # Manage FavouritesIndex/Shortlist self.set_favouritesindex(connection, ContentID) elif category in accessibilitylist.keys(): # Do not manage the Accessibility List pass else: # No collections # Since no collections exist the ReadStatus needs to be reset to 0 (Unread) debug_print("No Collections - reseting ReadStatus") self.reset_readstatus(connection, oncard) if self.dbversion >= 14: debug_print("No Collections - reseting FavouritesIndex") self.reset_favouritesindex(connection, oncard) # debug_print('Finished update_device_database_collections', collections_attributes) def get_collections_attributes(self): collections = [] opts = self.settings() if opts.extra_customization and len(opts.extra_customization[self.OPT_COLLECTIONS]) > 0: collections = [x.lower().strip() for x in opts.extra_customization[self.OPT_COLLECTIONS].split(',')] return collections def sync_booklists(self, booklists, end_session=True): debug_print('KOBO:sync_booklists - start') paths = self.get_device_paths() blists = {} for i in paths: try: if booklists[i] is not None: #debug_print('Booklist: ', i) blists[i] = booklists[i] except IndexError: pass collections = self.get_collections_attributes() #debug_print('KOBO: collection fields:', collections) for i, blist in blists.items(): if i == 0: oncard = 'main' else: oncard = 'carda' self.update_device_database_collections(blist, collections, oncard) USBMS.sync_booklists(self, booklists, end_session=end_session) debug_print('KOBO:sync_booklists - end') def rebuild_collections(self, booklist, oncard): collections_attributes = [] self.update_device_database_collections(booklist, collections_attributes, oncard) def upload_cover(self, path, filename, metadata, filepath): ''' Upload book cover to the device. Default implementation does nothing. :param path: The full path to the directory where the associated book is located. :param filename: The name of the book file without the extension. :param metadata: metadata belonging to the book. Use metadata.thumbnail for cover :param filepath: The full path to the ebook file ''' opts = self.settings() if not opts.extra_customization[self.OPT_UPLOAD_COVERS]: # Building thumbnails disabled debug_print('KOBO: not uploading cover') return if not opts.extra_customization[self.OPT_UPLOAD_GRAYSCALE_COVERS]: uploadgrayscale = False else: uploadgrayscale = True debug_print('KOBO: uploading cover') try: self._upload_cover(path, filename, metadata, filepath, uploadgrayscale) except: debug_print('FAILED to upload cover', filepath) def _upload_cover(self, path, filename, metadata, filepath, uploadgrayscale): from calibre.utils.magick.draw import save_cover_data_to if metadata.cover: cover = self.normalize_path(metadata.cover.replace('/', os.sep)) if os.path.exists(cover): # Get ContentID for Selected Book extension = os.path.splitext(filepath)[1] ContentType = self.get_content_type_from_extension(extension) if extension != '' else self.get_content_type_from_path(filepath) ContentID = self.contentid_from_path(filepath, ContentType) import sqlite3 as sqlite with closing(sqlite.connect(self.normalize_path(self._main_prefix + '.kobo/KoboReader.sqlite'))) as connection: # return bytestrings if the content cannot the decoded as unicode connection.text_factory = lambda x: unicode(x, "utf-8", "ignore") cursor = connection.cursor() t = (ContentID,) cursor.execute('select ImageId from Content where BookID is Null and ContentID = ?', t) result = cursor.fetchone() if result is None: debug_print("No rows exist in the database - cannot upload") return else: ImageID = result[0] # debug_print("ImageId: ", result[0]) cursor.close() if ImageID != None: path_prefix = '.kobo/images/' path = self._main_prefix + path_prefix + ImageID file_endings = {' - iPhoneThumbnail.parsed':(103,150), ' - bbMediumGridList.parsed':(93,135), ' - NickelBookCover.parsed':(500,725), ' - N3_LIBRARY_FULL.parsed':(355,530), ' - N3_LIBRARY_GRID.parsed':(149,233), ' - N3_LIBRARY_LIST.parsed':(60,90), ' - N3_FULL.parsed':(600,800), ' - N3_SOCIAL_CURRENTREAD.parsed':(120,186)} for ending, resize in file_endings.items(): fpath = path + ending fpath = self.normalize_path(fpath.replace('/', os.sep)) if os.path.exists(fpath): with open(cover, 'rb') as f: data = f.read() # Return the data resized and in Grayscale if # required data = save_cover_data_to(data, 'dummy.jpg', grayscale=uploadgrayscale, resize_to=resize, return_data=True) with open(fpath, 'wb') as f: f.write(data) fsync(f) else: debug_print("ImageID could not be retreived from the database") def prepare_addable_books(self, paths): ''' The Kobo supports an encrypted epub refered to as a kepub Unfortunately Kobo decided to put the files on the device with no file extension. I just hope that decision causes them as much grief as it does me :-) This has to make a temporary copy of the book files with a epub extension to allow Calibre's normal processing to deal with the file appropriately ''' for idx, path in enumerate(paths): if path.find('kepub') >= 0: with closing(open(path, 'rb')) as r: tf = PersistentTemporaryFile(suffix='.epub') shutil.copyfileobj(r, tf) # tf.write(r.read()) paths[idx] = tf.name return paths def create_annotations_path(self, mdata, device_path=None): if device_path: return device_path return USBMS.create_annotations_path(self, mdata) def get_annotations(self, path_map): from calibre.devices.kobo.bookmark import Bookmark EPUB_FORMATS = [u'epub'] epub_formats = set(EPUB_FORMATS) def get_storage(): storage = [] if self._main_prefix: storage.append(os.path.join(self._main_prefix, self.EBOOK_DIR_MAIN)) if self._card_a_prefix: storage.append(os.path.join(self._card_a_prefix, self.EBOOK_DIR_CARD_A)) if self._card_b_prefix: storage.append(os.path.join(self._card_b_prefix, self.EBOOK_DIR_CARD_B)) return storage def resolve_bookmark_paths(storage, path_map): pop_list = [] book_ext = {} for id in path_map: file_fmts = set() for fmt in path_map[id]['fmts']: file_fmts.add(fmt) bookmark_extension = None if file_fmts.intersection(epub_formats): book_extension = list(file_fmts.intersection(epub_formats))[0] bookmark_extension = 'epub' if bookmark_extension: for vol in storage: bkmk_path = path_map[id]['path'] bkmk_path = bkmk_path if os.path.exists(bkmk_path): path_map[id] = bkmk_path book_ext[id] = book_extension break else: pop_list.append(id) else: pop_list.append(id) # Remove non-existent bookmark templates for id in pop_list: path_map.pop(id) return path_map, book_ext storage = get_storage() path_map, book_ext = resolve_bookmark_paths(storage, path_map) bookmarked_books = {} for id in path_map: extension = os.path.splitext(path_map[id])[1] ContentType = self.get_content_type_from_extension(extension) if extension != '' else self.get_content_type_from_path(path_map[id]) ContentID = self.contentid_from_path(path_map[id], ContentType) debug_print("get_annotations - ContentID: ", ContentID, "ContentType: ", ContentType) bookmark_ext = extension db_path = self.normalize_path(self._main_prefix + '.kobo/KoboReader.sqlite') myBookmark = Bookmark(db_path, ContentID, path_map[id], id, book_ext[id], bookmark_ext) bookmarked_books[id] = self.UserAnnotation(type='kobo_bookmark', value=myBookmark) # This returns as job.result in gui2.ui.annotations_fetched(self,job) return bookmarked_books def generate_annotation_html(self, bookmark): import calendar from calibre.ebooks.BeautifulSoup import BeautifulSoup, Tag, NavigableString # Returns <div class="user_annotations"> ... </div> #last_read_location = bookmark.last_read_location #timestamp = bookmark.timestamp percent_read = bookmark.percent_read debug_print("Date: ", bookmark.last_read) if bookmark.last_read is not None: try: last_read = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(calendar.timegm(time.strptime(bookmark.last_read, "%Y-%m-%dT%H:%M:%S")))) except: try: last_read = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(calendar.timegm(time.strptime(bookmark.last_read, "%Y-%m-%dT%H:%M:%S.%f")))) except: last_read = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(calendar.timegm(time.strptime(bookmark.last_read, "%Y-%m-%dT%H:%M:%SZ")))) else: #self.datetime = time.gmtime() last_read = time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()) # debug_print("Percent read: ", percent_read) ka_soup = BeautifulSoup() dtc = 0 divTag = Tag(ka_soup,'div') divTag['class'] = 'user_annotations' # Add the last-read location spanTag = Tag(ka_soup, 'span') spanTag['style'] = 'font-weight:normal' if bookmark.book_format == 'epub': spanTag.insert(0,NavigableString( _("<hr /><b>Book Last Read:</b> %(time)s<br /><b>Percentage Read:</b> %(pr)d%%<hr />") % dict(time=last_read, # loc=last_read_location, pr=percent_read))) else: spanTag.insert(0,NavigableString( _("<hr /><b>Book Last Read:</b> %(time)s<br /><b>Percentage Read:</b> %(pr)d%%<hr />") % dict(time=last_read, # loc=last_read_location, pr=percent_read))) divTag.insert(dtc, spanTag) dtc += 1 divTag.insert(dtc, Tag(ka_soup,'br')) dtc += 1 if bookmark.user_notes: user_notes = bookmark.user_notes annotations = [] # Add the annotations sorted by location for location in sorted(user_notes): if user_notes[location]['type'] == 'Bookmark': annotations.append( _('<b>Chapter %(chapter)d:</b> %(chapter_title)s<br /><b>%(typ)s</b><br /><b>Chapter Progress:</b> %(chapter_progress)s%%<br />%(annotation)s<br /><hr />') % dict(chapter=user_notes[location]['chapter'], dl=user_notes[location]['displayed_location'], typ=user_notes[location]['type'], chapter_title=user_notes[location]['chapter_title'], chapter_progress=user_notes[location]['chapter_progress'], annotation=user_notes[location]['annotation'] if user_notes[location]['annotation'] is not None else "")) elif user_notes[location]['type'] == 'Highlight': annotations.append( _('<b>Chapter %(chapter)d:</b> %(chapter_title)s<br /><b>%(typ)s</b><br /><b>Chapter Progress:</b> %(chapter_progress)s%%<br /><b>Highlight:</b> %(text)s<br /><hr />') % dict(chapter=user_notes[location]['chapter'], dl=user_notes[location]['displayed_location'], typ=user_notes[location]['type'], chapter_title=user_notes[location]['chapter_title'], chapter_progress=user_notes[location]['chapter_progress'], text=user_notes[location]['text'])) elif user_notes[location]['type'] == 'Annotation': annotations.append( _('<b>Chapter %(chapter)d:</b> %(chapter_title)s<br /><b>%(typ)s</b><br /><b>Chapter Progress:</b> %(chapter_progress)s%%<br /><b>Highlight:</b> %(text)s<br /><b>Notes:</b> %(annotation)s<br /><hr />') % dict(chapter=user_notes[location]['chapter'], dl=user_notes[location]['displayed_location'], typ=user_notes[location]['type'], chapter_title=user_notes[location]['chapter_title'], chapter_progress=user_notes[location]['chapter_progress'], text=user_notes[location]['text'], annotation=user_notes[location]['annotation'])) else: annotations.append( _('<b>Chapter %(chapter)d:</b> %(chapter_title)s<br /><b>%(typ)s</b><br /><b>Chapter Progress:</b> %(chapter_progress)s%%<br /><b>Highlight:</b> %(text)s<br /><b>Notes:</b> %(annotation)s<br /><hr />') % dict(chapter=user_notes[location]['chapter'], dl=user_notes[location]['displayed_location'], typ=user_notes[location]['type'], chapter_title=user_notes[location]['chapter_title'], chapter_progress=user_notes[location]['chapter_progress'], text=user_notes[location]['text'], annotation=user_notes[location]['annotation'])) for annotation in annotations: divTag.insert(dtc, annotation) dtc += 1 ka_soup.insert(0,divTag) return ka_soup def add_annotation_to_library(self, db, db_id, annotation): from calibre.ebooks.BeautifulSoup import Tag bm = annotation ignore_tags = set(['Catalog', 'Clippings']) if bm.type == 'kobo_bookmark': mi = db.get_metadata(db_id, index_is_id=True) debug_print("KOBO:add_annotation_to_library - Title: ", mi.title) user_notes_soup = self.generate_annotation_html(bm.value) if mi.comments: a_offset = mi.comments.find('<div class="user_annotations">') ad_offset = mi.comments.find('<hr class="annotations_divider" />') if a_offset >= 0: mi.comments = mi.comments[:a_offset] if ad_offset >= 0: mi.comments = mi.comments[:ad_offset] if set(mi.tags).intersection(ignore_tags): return if mi.comments: hrTag = Tag(user_notes_soup,'hr') hrTag['class'] = 'annotations_divider' user_notes_soup.insert(0, hrTag) mi.comments += unicode(user_notes_soup.prettify()) else: mi.comments = unicode(user_notes_soup.prettify()) # Update library comments db.set_comment(db_id, mi.comments) # Add bookmark file to db_id # NOTE: As it is, this copied the book from the device back to the library. That meant it replaced the # existing file. Taking this out for that reason, but some books have a ANNOT file that could be # copied. # db.add_format_with_hooks(db_id, bm.value.bookmark_extension, # bm.value.path, index_is_id=True) class KOBOTOUCH(KOBO): name = 'KoboTouch' gui_name = 'Kobo Touch/Glo/Mini/Aura HD' author = 'David Forrester' description = 'Communicate with the Kobo Touch, Glo, Mini and Aura HD ereaders. Based on the existing Kobo driver by %s.' % (KOBO.author) # icon = I('devices/kobotouch.jpg') supported_dbversion = 120 min_supported_dbversion = 53 min_dbversion_series = 65 min_dbversion_externalid = 65 min_dbversion_archive = 71 min_dbversion_images_on_sdcard = 77 min_dbversion_activity = 77 min_dbversion_keywords = 82 max_supported_fwversion = (3, 15, 1) # The following document firwmare versions where new function or devices were added. # Not all are used, but this feels a good place to record it. min_fwversion_shelves = (2, 0, 0) min_fwversion_images_on_sdcard = (2, 4, 1) min_fwversion_images_tree = (2, 9, 0) # Cover images stored in tree under .kobo-images min_aurah2o_fwversion = (3, 7, 0) min_reviews_fwversion = (3, 12, 0) min_glohd_fwversion = (3, 14, 0) has_kepubs = True booklist_class = KTCollectionsBookList book_class = Book MAX_PATH_LEN = 185 # 250 - (len(" - N3_LIBRARY_SHELF.parsed") + len("F:\.kobo\images\")) KOBO_EXTRA_CSSFILE = 'kobo_extra.css' EXTRA_CUSTOMIZATION_MESSAGE = [ _('The Kobo from firmware V2.0.0 supports bookshelves.' ' These are created on the Kobo. ' + 'Specify a tags type column for automatic management.'), _('Create Bookshelves') + ':::'+_('Create new bookshelves on the Kobo if they do not exist. This is only for firmware V2.0.0 or later.'), _('Delete Empty Bookshelves') + ':::'+_('Delete any empty bookshelves from the Kobo when syncing is finished. This is only for firmware V2.0.0 or later.'), _('Upload covers for books') + ':::'+_('Upload cover images from the calibre library when sending books to the device.'), _('Upload Black and White Covers'), _('Keep cover aspect ratio') + ':::'+_('When uploading covers, do not change the aspect ratio when resizing for the device.' ' This is for firmware versions 2.3.1 and later.'), _('Show archived books') + ':::'+_('Archived books are listed on the device but need to be downloaded to read.' ' Use this option to show these books and match them with books in the calibre library.'), _('Show Previews') + ':::'+_('Kobo previews are included on the Touch and some other versions' ' by default they are no longer displayed as there is no good reason to ' 'see them. Enable if you wish to see/delete them.'), _('Show Recommendations') + ':::'+_('Kobo shows recommendations on the device. In some cases these have ' 'files but in other cases they are just pointers to the web site to buy. ' 'Enable if you wish to see/delete them.'), _('Set Series information') + ':::'+_('The book lists on the Kobo devices can display series information. ' 'This is not read by the device from the sideloaded books. ' 'Series information can only be added to the device after the book has been processed by the device. ' 'Enable if you wish to set series information.'), _('Modify CSS') + ':::'+_('This allows addition of user CSS rules and removal of some CSS. ' 'When sending a book, the driver adds the contents of {0} to all stylesheets in the ePub. ' 'This file is searched for in the root directory of the main memory of the device. ' 'As well as this, if the file contains settings for the "orphans" or "widows", ' 'these are removed for all styles in the original stylesheet.').format(KOBO_EXTRA_CSSFILE), _('Attempt to support newer firmware') + ':::'+_('Kobo routinely updates the firmware and the ' 'database version. With this option Calibre will attempt ' 'to perform full read-write functionality - Here be Dragons!! ' 'Enable only if you are comfortable with restoring your kobo ' 'to factory defaults and testing software. ' 'This driver supports firmware V2.x.x and DBVersion up to ') + unicode(supported_dbversion), _('Title to test when debugging') + ':::'+_('Part of title of a book that can be used when doing some tests for debugging. ' 'The test is to see if the string is contained in the title of a book. ' 'The better the match, the less extraneous output.'), ] EXTRA_CUSTOMIZATION_DEFAULT = [ u'', False, False, False, False, False, False, False, False, False, False, False, u'' ] OPT_COLLECTIONS = 0 OPT_CREATE_BOOKSHELVES = 1 OPT_DELETE_BOOKSHELVES = 2 OPT_UPLOAD_COVERS = 3 OPT_UPLOAD_GRAYSCALE_COVERS = 4 OPT_KEEP_COVER_ASPECT_RATIO = 5 OPT_SHOW_ARCHIVED_BOOK_RECORDS = 6 OPT_SHOW_PREVIEWS = 7 OPT_SHOW_RECOMMENDATIONS = 8 OPT_UPDATE_SERIES_DETAILS = 9 OPT_MODIFY_CSS = 10 OPT_SUPPORT_NEWER_FIRMWARE = 11 OPT_DEBUGGING_TITLE = 12 opts = None TIMESTAMP_STRING = "%Y-%m-%dT%H:%M:%SZ" AURA_PRODUCT_ID = [0x4203] AURA_HD_PRODUCT_ID = [0x4193] AURA_H2O_PRODUCT_ID = [0x4213] GLO_PRODUCT_ID = [0x4173] GLO_HD_PRODUCT_ID = [0x4223] MINI_PRODUCT_ID = [0x4183] TOUCH_PRODUCT_ID = [0x4163] PRODUCT_ID = AURA_PRODUCT_ID + AURA_HD_PRODUCT_ID + AURA_H2O_PRODUCT_ID + \ GLO_PRODUCT_ID + GLO_HD_PRODUCT_ID + \ MINI_PRODUCT_ID + TOUCH_PRODUCT_ID BCD = [0x0110, 0x0326] # Image file name endings. Made up of: image size, min_dbversion, max_dbversion, isFullSize, # Note: "200" has been used just as a much larger number than the current versions. It is just a lazy # way of making it open ended. COVER_FILE_ENDINGS = { ' - N3_FULL.parsed':[(600,800),0, 200,True,], # Used for screensaver, home screen ' - N3_LIBRARY_FULL.parsed':[(355,473),0, 200,False,], # Used for Details screen before FW2.8.1, then for current book tile on home screen ' - N3_LIBRARY_GRID.parsed':[(149,198),0, 200,False,], # Used for library lists ' - N3_LIBRARY_LIST.parsed':[(60,90),0, 53,False,], ' - AndroidBookLoadTablet_Aspect.parsed':[(355,473), 82, 200,False,], # Used for Details screen from FW2.8.1 # ' - N3_LIBRARY_SHELF.parsed': [(40,60),0, 52,], } GLO_COVER_FILE_ENDINGS = { # Glo and Aura share resolution, so the image sizes should be the same. ' - N3_FULL.parsed':[(758,1024),0, 200,True,], # Used for screensaver, home screen ' - N3_LIBRARY_FULL.parsed':[(355,479),0, 200,False,], # Used for Details screen before FW2.8.1, then for current book tile on home screen ' - N3_LIBRARY_GRID.parsed':[(149,201),0, 200,False,], # Used for library lists ' - AndroidBookLoadTablet_Aspect.parsed':[(355,479), 88, 200,False,], # Used for Details screen from FW2.8.1 } AURA_HD_COVER_FILE_ENDINGS = { ' - N3_FULL.parsed': [(1080,1440), 0, 200,True,], # Used for screensaver, home screen ' - N3_LIBRARY_FULL.parsed':[(355, 471), 0, 200,False,], # Used for Details screen before FW2.8.1, then for current book tile on home screen ' - N3_LIBRARY_GRID.parsed':[(149, 198), 0, 200,False,], # Used for library lists ' - AndroidBookLoadTablet_Aspect.parsed':[(355, 471), 88, 200,False,], # Used for Details screen from FW2.8.1 } # Following are the sizes used with pre2.1.4 firmware # COVER_FILE_ENDINGS = { # ' - N3_LIBRARY_FULL.parsed':[(355,530),0, 99,], # Used for Details screen # ' - N3_LIBRARY_FULL.parsed':[(600,800),0, 99,], # ' - N3_LIBRARY_GRID.parsed':[(149,233),0, 99,], # Used for library lists # ' - N3_LIBRARY_LIST.parsed':[(60,90),0, 53,], # ' - N3_LIBRARY_SHELF.parsed': [(40,60),0, 52,], # ' - N3_FULL.parsed':[(600,800),0, 99,], # Used for screensaver if "Full screen" is checked. # } def initialize(self): super(KOBOTOUCH, self).initialize() self.bookshelvelist = [] def get_device_information(self, end_session=True): self.set_device_name() return super(KOBOTOUCH, self).get_device_information(end_session) def books(self, oncard=None, end_session=True): debug_print("KoboTouch:books - oncard='%s'"%oncard) from calibre.ebooks.metadata.meta import path_to_ext dummy_bl = self.booklist_class(None, None, None) if oncard == 'carda' and not self._card_a_prefix: self.report_progress(1.0, _('Getting list of books on device...')) debug_print("KoboTouch:books - Asked to process 'carda', but do not have one!") return dummy_bl elif oncard == 'cardb' and not self._card_b_prefix: self.report_progress(1.0, _('Getting list of books on device...')) debug_print("KoboTouch:books - Asked to process 'cardb', but do not have one!") return dummy_bl elif oncard and oncard != 'carda' and oncard != 'cardb': self.report_progress(1.0, _('Getting list of books on device...')) debug_print("KoboTouch:books - unknown card") return dummy_bl prefix = self._card_a_prefix if oncard == 'carda' else \ self._card_b_prefix if oncard == 'cardb' \ else self._main_prefix debug_print("KoboTouch:books - oncard='%s', prefix='%s'"%(oncard, prefix)) # Determine the firmware version try: with open(self.normalize_path(self._main_prefix + '.kobo/version'), 'rb') as f: self.fwversion = f.readline().split(',')[2] self.fwversion = tuple((int(x) for x in self.fwversion.split('.'))) except: self.fwversion = (0,0,0) debug_print('Kobo device: %s' % self.gui_name) debug_print('Version of driver:', self.version, 'Has kepubs:', self.has_kepubs) debug_print('Version of firmware:', self.fwversion, 'Has kepubs:', self.has_kepubs) debug_print('Firmware supports cover image tree:', self.fwversion >= self.min_fwversion_images_tree) self.booklist_class.rebuild_collections = self.rebuild_collections # get the metadata cache bl = self.booklist_class(oncard, prefix, self.settings) opts = self.settings() debug_print("KoboTouch:books - opts.extra_customization=", opts.extra_customization) debug_print("KoboTouch:books - prefs['manage_device_metadata']=", prefs['manage_device_metadata']) if opts.extra_customization: debugging_title = opts.extra_customization[self.OPT_DEBUGGING_TITLE] debug_print("KoboTouch:books - set_debugging_title to '%s'" % debugging_title) bl.set_debugging_title(debugging_title) debug_print("KoboTouch:books - length bl=%d"%len(bl)) need_sync = self.parse_metadata_cache(bl, prefix, self.METADATA_CACHE) debug_print("KoboTouch:books - length bl after sync=%d"%len(bl)) # make a dict cache of paths so the lookup in the loop below is faster. bl_cache = {} for idx,b in enumerate(bl): bl_cache[b.lpath] = idx def update_booklist(prefix, path, title, authors, mime, date, ContentID, ContentType, ImageID, readstatus, MimeType, expired, favouritesindex, accessibility, isdownloaded, series, seriesnumber, userid, bookshelves): show_debug = self.is_debugging_title(title) # show_debug = authors == 'L. Frank Baum' if show_debug: debug_print("KoboTouch:update_booklist - title='%s'"%title, "ContentType=%s"%ContentType, "isdownloaded=", isdownloaded) debug_print( " prefix=%s, mime=%s, date=%s, readstatus=%d, MimeType=%s, expired=%d, favouritesindex=%d, accessibility=%d, isdownloaded=%s"% (prefix, mime, date, readstatus, MimeType, expired, favouritesindex, accessibility, isdownloaded,)) changed = False try: lpath = path.partition(self.normalize_path(prefix))[2] if lpath.startswith(os.sep): lpath = lpath[len(os.sep):] lpath = lpath.replace('\\', '/') # debug_print("LPATH: ", lpath, " - Title: " , title) playlist_map = {} if lpath not in playlist_map: playlist_map[lpath] = [] allow_shelves = True if readstatus == 1: playlist_map[lpath].append('Im_Reading') elif readstatus == 2: playlist_map[lpath].append('Read') elif readstatus == 3: playlist_map[lpath].append('Closed') # Related to a bug in the Kobo firmware that leaves an expired row for deleted books # this shows an expired Collection so the user can decide to delete the book if expired == 3: playlist_map[lpath].append('Expired') allow_shelves = False # A SHORTLIST is supported on the touch but the data field is there on most earlier models if favouritesindex == 1: playlist_map[lpath].append('Shortlist') # The follwing is in flux: # - FW2.0.0, DBVersion 53,55 accessibility == 1 # - FW2.1.2 beta, DBVersion == 56, accessibility == -1: # So, the following should be OK if isdownloaded == 'false': if self.dbversion < 56 and accessibility <= 1 or self.dbversion >= 56 and accessibility == -1: playlist_map[lpath].append('Deleted') allow_shelves = False if show_debug: debug_print("KoboTouch:update_booklist - have a deleted book") elif self.supports_kobo_archive() and (accessibility == 1 or accessibility == 2): playlist_map[lpath].append('Archived') allow_shelves = True # Label Previews and Recommendations if accessibility == 6: if userid == '': playlist_map[lpath].append('Recommendation') allow_shelves = False else: playlist_map[lpath].append('Preview') allow_shelves = False elif accessibility == 4: # Pre 2.x.x firmware playlist_map[lpath].append('Recommendation') allow_shelves = False kobo_collections = playlist_map[lpath][:] if allow_shelves: # debug_print('KoboTouch:update_booklist - allowing shelves - title=%s' % title) if len(bookshelves) > 0: playlist_map[lpath].extend(bookshelves) if show_debug: debug_print('KoboTouch:update_booklist - playlist_map=', playlist_map) path = self.normalize_path(path) # print "Normalized FileName: " + path idx = bl_cache.get(lpath, None) if idx is not None: # and not (accessibility == 1 and isdownloaded == 'false'): if show_debug: self.debug_index = idx debug_print("KoboTouch:update_booklist - idx=%d"%idx) debug_print("KoboTouch:update_booklist - lpath=%s"%lpath) debug_print('KoboTouch:update_booklist - bl[idx].device_collections=', bl[idx].device_collections) debug_print('KoboTouch:update_booklist - playlist_map=', playlist_map) debug_print('KoboTouch:update_booklist - bookshelves=', bookshelves) debug_print('KoboTouch:update_booklist - kobo_collections=', kobo_collections) debug_print('KoboTouch:update_booklist - series="%s"' % bl[idx].series) debug_print('KoboTouch:update_booklist - the book=', bl[idx]) debug_print('KoboTouch:update_booklist - the authors=', bl[idx].authors) debug_print('KoboTouch:update_booklist - application_id=', bl[idx].application_id) bl_cache[lpath] = None if ImageID is not None: imagename = self.imagefilename_from_imageID(prefix, ImageID) if imagename is not None: bl[idx].thumbnail = ImageWrapper(imagename) if (ContentType == '6' and MimeType != 'application/x-kobo-epub+zip'): if os.path.exists(self.normalize_path(os.path.join(prefix, lpath))): if self.update_metadata_item(bl[idx]): # print 'update_metadata_item returned true' changed = True else: debug_print(" Strange: The file: ", prefix, lpath, " does not exist!") debug_print("KoboTouch:update_booklist - book size=", bl[idx].size) if show_debug: debug_print("KoboTouch:update_booklist - ContentID='%s'"%ContentID) bl[idx].contentID = ContentID bl[idx].kobo_series = series bl[idx].kobo_series_number = seriesnumber bl[idx].can_put_on_shelves = allow_shelves if lpath in playlist_map: bl[idx].device_collections = playlist_map.get(lpath,[]) bl[idx].current_shelves = bookshelves bl[idx].kobo_collections = kobo_collections if show_debug: debug_print('KoboTouch:update_booklist - updated bl[idx].device_collections=', bl[idx].device_collections) debug_print('KoboTouch:update_booklist - playlist_map=', playlist_map, 'changed=', changed) # debug_print('KoboTouch:update_booklist - book=', bl[idx]) debug_print("KoboTouch:update_booklist - book class=%s"%bl[idx].__class__) debug_print("KoboTouch:update_booklist - book title=%s"%bl[idx].title) else: if show_debug: debug_print('KoboTouch:update_booklist - idx is none') try: if os.path.exists(self.normalize_path(os.path.join(prefix, lpath))): book = self.book_from_path(prefix, lpath, title, authors, mime, date, ContentType, ImageID) else: if isdownloaded == 'true': # A recommendation or preview is OK to not have a file debug_print(" Strange: The file: ", prefix, lpath, " does not exist!") title = "FILE MISSING: " + title book = self.book_class(prefix, lpath, title, authors, mime, date, ContentType, ImageID, size=0) if show_debug: debug_print('KoboTouch:update_booklist - book file does not exist. ContentID="%s"'%ContentID) except Exception as e: debug_print("KoboTouch:update_booklist - exception creating book: '%s'"%str(e)) debug_print(" prefix: ", prefix, "lpath: ", lpath, "title: ", title, "authors: ", authors, "mime: ", mime, "date: ", date, "ContentType: ", ContentType, "ImageID: ", ImageID) raise if show_debug: debug_print('KoboTouch:update_booklist - class:', book.__class__) # debug_print(' resolution:', book.__class__.__mro__) debug_print(" contentid: '%s'"%book.contentID) debug_print(" title:'%s'"%book.title) debug_print(" the book:", book) debug_print(" author_sort:'%s'"%book.author_sort) debug_print(" bookshelves:", bookshelves) debug_print(" kobo_collections:", kobo_collections) # print 'Update booklist' book.device_collections = playlist_map.get(lpath,[]) # if lpath in playlist_map else [] book.current_shelves = bookshelves book.kobo_collections = kobo_collections book.contentID = ContentID book.kobo_series = series book.kobo_series_number = seriesnumber book.can_put_on_shelves = allow_shelves # debug_print('KoboTouch:update_booklist - title=', title, 'book.device_collections', book.device_collections) if bl.add_book(book, replace_metadata=False): changed = True if show_debug: debug_print(' book.device_collections', book.device_collections) debug_print(' book.title', book.title) except: # Probably a path encoding error import traceback traceback.print_exc() return changed def get_bookshelvesforbook(connection, ContentID): # debug_print("KoboTouch:get_bookshelvesforbook - " + ContentID) bookshelves = [] if not self.supports_bookshelves(): return bookshelves cursor = connection.cursor() query = "select ShelfName " \ "from ShelfContent " \ "where ContentId = ? " \ "and _IsDeleted = 'false' " \ "and ShelfName is not null" # This should never be nulll, but it is protection against an error cause by a sync to the Kobo server values = (ContentID, ) cursor.execute(query, values) for i, row in enumerate(cursor): bookshelves.append(row[0]) cursor.close() # debug_print("KoboTouch:get_bookshelvesforbook - count bookshelves=" + unicode(count_bookshelves)) return bookshelves self.debug_index = 0 import sqlite3 as sqlite with closing(sqlite.connect(self.device_database_path())) as connection: debug_print("KoboTouch:books - reading device database") # return bytestrings if the content cannot the decoded as unicode connection.text_factory = lambda x: unicode(x, "utf-8", "ignore") cursor = connection.cursor() cursor.execute('select version from dbversion') result = cursor.fetchone() self.dbversion = result[0] debug_print("Database Version=%d"%self.dbversion) self.bookshelvelist = self.get_bookshelflist(connection) debug_print("KoboTouch:books - shelf list:", self.bookshelvelist) opts = self.settings() columns = 'Title, Attribution, DateCreated, ContentID, MimeType, ContentType, ImageID, ReadStatus' if self.dbversion >= 16: columns += ', ___ExpirationStatus, FavouritesIndex, Accessibility' else: columns += ', "-1" as ___ExpirationStatus, "-1" as FavouritesIndex, "-1" as Accessibility' if self.dbversion >= 33: columns += ', IsDownloaded' else: columns += ', "1" as IsDownloaded' if self.supports_series(): columns += ", Series, SeriesNumber, ___UserID, ExternalId" else: columns += ', null as Series, null as SeriesNumber, ___UserID, null as ExternalId' where_clause = '' if self.supports_kobo_archive(): where_clause = (" where BookID is Null " " and ((Accessibility = -1 and IsDownloaded in ('true', 1 )) or (Accessibility in (1,2) %(expiry)s) " " %(previews)s %(recomendations)s )" " and not ((___ExpirationStatus=3 or ___ExpirationStatus is Null) and ContentType = 6)") % \ dict( expiry="" if opts.extra_customization[self.OPT_SHOW_ARCHIVED_BOOK_RECORDS] else "and IsDownloaded in ('true', 1)", previews=" or (Accessibility in (6) and ___UserID <> '')" if opts.extra_customization[self.OPT_SHOW_PREVIEWS] else "", recomendations=" or (Accessibility in (-1, 4, 6) and ___UserId = '')" if opts.extra_customization[ self.OPT_SHOW_RECOMMENDATIONS] else "" ) elif self.supports_series(): where_clause = (" where BookID is Null " " and ((Accessibility = -1 and IsDownloaded in ('true', 1)) or (Accessibility in (1,2)) %(previews)s %(recomendations)s )" " and not ((___ExpirationStatus=3 or ___ExpirationStatus is Null) %(expiry)s)") % \ dict( expiry=" and ContentType = 6" if opts.extra_customization[self.OPT_SHOW_ARCHIVED_BOOK_RECORDS] else "", previews=" or (Accessibility in (6) and ___UserID <> '')" if opts.extra_customization[self.OPT_SHOW_PREVIEWS] else "", recomendations=" or (Accessibility in (-1, 4, 6) and ___UserId = '')" if opts.extra_customization[ self.OPT_SHOW_RECOMMENDATIONS] else "" ) elif self.dbversion >= 33: where_clause = (' where BookID is Null %(previews)s %(recomendations)s and not ((___ExpirationStatus=3 or ___ExpirationStatus is Null) %(expiry)s)') % \ dict( expiry=' and ContentType = 6' if opts.extra_customization[self.OPT_SHOW_ARCHIVED_BOOK_RECORDS] else '', previews=' and Accessibility <> 6' if opts.extra_customization[self.OPT_SHOW_PREVIEWS] == False else '', recomendations=' and IsDownloaded in (\'true\', 1)' if opts.extra_customization[self.OPT_SHOW_RECOMMENDATIONS] == False else '' ) elif self.dbversion >= 16: where_clause = (' where BookID is Null ' 'and not ((___ExpirationStatus=3 or ___ExpirationStatus is Null) %(expiry)s)') % \ dict(expiry=' and ContentType = 6' if opts.extra_customization[self.OPT_SHOW_ARCHIVED_BOOK_RECORDS] else '') else: where_clause = ' where BookID is Null' # Note: The card condition should not need the contentId test for the SD # card. But the ExternalId does not get set for sideloaded kepubs on the # SD card. card_condition = '' if self.has_externalid(): card_condition = " AND (externalId IS NOT NULL AND externalId <> '' OR contentId LIKE 'file:///mnt/sd/%')" if oncard == 'carda' else " AND (externalId IS NULL OR externalId = '') AND contentId NOT LIKE 'file:///mnt/sd/%'" else: card_condition = " AND contentId LIKE 'file:///mnt/sd/%'" if oncard == 'carda' else " AND contentId NOT LIKE'file:///mnt/sd/%'" query = 'SELECT ' + columns + ' FROM content ' + where_clause + card_condition debug_print("KoboTouch:books - query=", query) try: cursor.execute(query) except Exception as e: err = str(e) if not ('___ExpirationStatus' in err or 'FavouritesIndex' in err or 'Accessibility' in err or 'IsDownloaded' in err or 'Series' in err or 'ExternalId' in err ): raise query= ('select Title, Attribution, DateCreated, ContentID, MimeType, ContentType, ' 'ImageID, ReadStatus, "-1" as ___ExpirationStatus, "-1" as ' 'FavouritesIndex, "-1" as Accessibility, "1" as IsDownloaded, null as Series, null as SeriesNumber' ' from content where BookID is Null') cursor.execute(query) changed = False for i, row in enumerate(cursor): # self.report_progress((i+1) / float(numrows), _('Getting list of books on device...')) show_debug = self.is_debugging_title(row[0]) if show_debug: debug_print("KoboTouch:books - looping on database - row=%d" % i) debug_print("KoboTouch:books - title='%s'"%row[0], "authors=", row[1]) debug_print("KoboTouch:books - row=", row) if not hasattr(row[3], 'startswith') or row[3].lower().startswith("file:///usr/local/kobo/help/") or row[3].lower().startswith("/usr/local/kobo/help/"): # These are internal to the Kobo device and do not exist continue externalId = None if row[15] and len(row[15]) == 0 else row[15] path = self.path_from_contentid(row[3], row[5], row[4], oncard, externalId) mime = mime_type_ext(path_to_ext(path)) if path.find('kepub') == -1 else 'application/x-kobo-epub+zip' # debug_print("mime:", mime) if show_debug: debug_print("KoboTouch:books - path='%s'"%path, " ContentID='%s'"%row[3], " externalId=%s" % externalId) bookshelves = get_bookshelvesforbook(connection, row[3]) prefix = self._card_a_prefix if oncard == 'carda' else self._main_prefix changed = update_booklist(prefix, path, row[0], row[1], mime, row[2], row[3], row[5], row[6], row[7], row[4], row[8], row[9], row[10], row[11], row[12], row[13], row[14], bookshelves) if changed: need_sync = True cursor.close() if not prefs['manage_device_metadata'] == 'on_connect': self.dump_bookshelves(connection) else: debug_print("KoboTouch:books - automatically managing metadata") # Remove books that are no longer in the filesystem. Cache contains # indices into the booklist if book not in filesystem, None otherwise # Do the operation in reverse order so indices remain valid for idx in sorted(bl_cache.itervalues(), reverse=True): if idx is not None: if not os.path.exists(self.normalize_path(os.path.join(prefix, bl[idx].lpath))): need_sync = True del bl[idx] # else: # debug_print("KoboTouch:books - Book in mtadata.calibre, on file system but not database - bl[idx].title:'%s'"%bl[idx].title) # print "count found in cache: %d, count of files in metadata: %d, need_sync: %s" % \ # (len(bl_cache), len(bl), need_sync) # Bypassing the KOBO sync_booklists as that does things we don't need to do # Also forcing sync to see if this solves issues with updating shelves and matching books. if need_sync or True: # self.count_found_in_bl != len(bl) or need_sync: debug_print("KoboTouch:books - about to sync_booklists") if oncard == 'cardb': USBMS.sync_booklists(self, (None, None, bl)) elif oncard == 'carda': USBMS.sync_booklists(self, (None, bl, None)) else: USBMS.sync_booklists(self, (bl, None, None)) debug_print("KoboTouch:books - have done sync_booklists") self.report_progress(1.0, _('Getting list of books on device...')) debug_print("KoboTouch:books - end - oncard='%s'"%oncard) return bl def path_from_contentid(self, ContentID, ContentType, MimeType, oncard, externalId): path = ContentID if not externalId: return super(KOBOTOUCH, self).path_from_contentid(ContentID, ContentType, MimeType, oncard) if oncard == 'cardb': print 'path from_contentid cardb' else: if (ContentType == "6" or ContentType == "10"): # and MimeType == 'application/x-kobo-epub+zip': if path.startswith("file:///mnt/onboard/"): path = self._main_prefix + path.replace("file:///mnt/onboard/", '') elif path.startswith("file:///mnt/sd/"): path = self._card_a_prefix + path.replace("file:///mnt/sd/", '') elif externalId: path = self._card_a_prefix + 'koboExtStorage/kepub/' + path else: path = self._main_prefix + '.kobo/kepub/' + path else: # Should never get here, but, just in case... # if path.startswith("file:///mnt/onboard/"): path = path.replace("file:///mnt/onboard/", self._main_prefix) path = path.replace("file:///mnt/sd/", self._card_a_prefix) path = path.replace("/mnt/onboard/", self._main_prefix) # print "Internal: " + path return path def imagefilename_from_imageID(self, prefix, ImageID): show_debug = self.is_debugging_title(ImageID) path = self.images_path(prefix, ImageID) # path = self.normalize_path(path.replace('/', os.sep)) for ending, cover_options in self.cover_file_endings().items(): fpath = path + ending if os.path.exists(fpath): if show_debug: debug_print("KoboTouch:imagefilename_from_imageID - have cover image fpath=%s" % (fpath)) return fpath if show_debug: debug_print("KoboTouch:imagefilename_from_imageID - no cover image found - ImageID=%s" % (ImageID)) return None def get_extra_css(self): extra_sheet = None if self.modifying_css(): extra_css_path = os.path.join(self._main_prefix, self.KOBO_EXTRA_CSSFILE) if os.path.exists(extra_css_path): from cssutils import parseFile as cssparseFile try: extra_sheet = cssparseFile(extra_css_path) debug_print("KoboTouch:get_extra_css: Using extra CSS in {0} ({1} rules)".format(extra_css_path, len(extra_sheet.cssRules))) if len(extra_sheet.cssRules) ==0: debug_print("KoboTouch:get_extra_css: Extra CSS file has no valid rules. CSS will not be modified.") extra_sheet = None except Exception as e: debug_print("KoboTouch:get_extra_css: Problem parsing extra CSS file {0}".format(extra_css_path)) debug_print("KoboTouch:get_extra_css: Exception {0}".format(e)) return extra_sheet def upload_books(self, files, names, on_card=None, end_session=True, metadata=None): debug_print('KoboTouch:upload_books - %d books'%(len(files))) debug_print('KoboTouch:upload_books - files=', files) if self.modifying_epub(): self.extra_sheet = self.get_extra_css() i = 0 for file, n, mi in zip(files, names, metadata): debug_print("KoboTouch:upload_books: Processing book: {0} by {1}".format(mi.title, " and ".join(mi.authors))) debug_print("KoboTouch:upload_books: file=%s, name=%s" % (file, n)) self.report_progress(i / float(len(files)), "Processing book: {0} by {1}".format(mi.title, " and ".join(mi.authors))) mi.kte_calibre_name = n self._modify_epub(file, mi) i += 1 self.report_progress(0, 'Working...') result = super(KOBOTOUCH, self).upload_books(files, names, on_card, end_session, metadata) # debug_print('KoboTouch:upload_books - result=', result) if self.dbversion >= 53: import sqlite3 as sqlite try: with closing(sqlite.connect(self.normalize_path(self._main_prefix + '.kobo/KoboReader.sqlite'))) as connection: connection.text_factory = lambda x: unicode(x, "utf-8", "ignore") cursor = connection.cursor() cleanup_query = "DELETE FROM content WHERE ContentID = ? AND Accessibility = 1 AND IsDownloaded = 'false'" for fname, cycle in result: show_debug = self.is_debugging_title(fname) contentID = self.contentid_from_path(fname, 6) if show_debug: debug_print('KoboTouch:upload_books: fname=', fname) debug_print('KoboTouch:upload_books: contentID=', contentID) cleanup_values = (contentID,) # debug_print('KoboTouch:upload_books: Delete record left if deleted on Touch') cursor.execute(cleanup_query, cleanup_values) self.set_filesize_in_device_database(connection, contentID, fname) if not self.copying_covers(): imageID = self.imageid_from_contentid(contentID) self.delete_images(imageID, fname) connection.commit() cursor.close() except Exception as e: debug_print('KoboTouch:upload_books - Exception: %s'%str(e)) return result def _modify_epub(self, file, metadata, container=None): debug_print("KoboTouch:_modify_epub:Processing {0} - {1}".format(metadata.author_sort, metadata.title)) # Currently only modifying CSS, so if no stylesheet, don't do anything if not self.extra_sheet: debug_print("KoboTouch:_modify_epub: no CSS file") return True commit_container = False if not container: commit_container = True try: from calibre.ebooks.oeb.polish.container import get_container debug_print("KoboTouch:_modify_epub: creating container") container = get_container(file) container.css_preprocessor = DummyCSSPreProcessor() except Exception as e: debug_print("KoboTouch:_modify_epub: exception from get_container {0} - {1}".format(metadata.author_sort, metadata.title)) debug_print("KoboTouch:_modify_epub: exception is: {0}".format(e)) return False else: debug_print("KoboTouch:_modify_epub: received container") from calibre.ebooks.oeb.base import OEB_STYLES for cssname, mt in container.mime_map.iteritems(): if mt in OEB_STYLES: newsheet = container.parsed(cssname) oldrules = len(newsheet.cssRules) # remove any existing @page rules in epub css # if css to be appended contains an @page rule if self.extra_sheet and len([r for r in self.extra_sheet if r.type == r.PAGE_RULE]): page_rules = [r for r in newsheet if r.type == r.PAGE_RULE] if len(page_rules) > 0: debug_print("KoboTouch:_modify_epub:Removing existing @page rules") for rule in page_rules: rule.style = '' # remove any existing widow/orphan settings in epub css # if css to be appended contains a widow/orphan rule or we there is no extra CSS file if (len([r for r in self.extra_sheet if r.type == r.STYLE_RULE and (r.style['widows'] or r.style['orphans'])]) > 0): widow_orphan_rules = [r for r in newsheet if r.type == r.STYLE_RULE and (r.style['widows'] or r.style['orphans'])] if len(widow_orphan_rules) > 0: debug_print("KoboTouch:_modify_epub:Removing existing widows/orphans attribs") for rule in widow_orphan_rules: rule.style.removeProperty('widows') rule.style.removeProperty('orphans') # append all rules from kobo extra css stylesheet for addrule in [r for r in self.extra_sheet.cssRules]: newsheet.insertRule(addrule, len(newsheet.cssRules)) debug_print("KoboTouch:_modify_epub:CSS rules {0} -> {1} ({2})".format(oldrules, len(newsheet.cssRules), cssname)) container.dirty(cssname) if commit_container: debug_print("KoboTouch:_modify_epub: committing container.") os.unlink(file) container.commit(file) return True def delete_via_sql(self, ContentID, ContentType): imageId = super(KOBOTOUCH, self).delete_via_sql(ContentID, ContentType) if self.dbversion >= 53: import sqlite3 as sqlite debug_print('KoboTouch:delete_via_sql: ContentID="%s"'%ContentID, 'ContentType="%s"'%ContentType) try: with closing(sqlite.connect(self.device_database_path())) as connection: debug_print('KoboTouch:delete_via_sql: have database connection') # return bytestrings if the content cannot the decoded as unicode connection.text_factory = lambda x: unicode(x, "utf-8", "ignore") cursor = connection.cursor() debug_print('KoboTouch:delete_via_sql: have cursor') t = (ContentID,) # Delete the Bookmarks debug_print('KoboTouch:delete_via_sql: Delete from Bookmark') cursor.execute('DELETE FROM Bookmark WHERE VolumeID = ?', t) # Delete from the Bookshelf debug_print('KoboTouch:delete_via_sql: Delete from the Bookshelf') cursor.execute('delete from ShelfContent where ContentID = ?', t) # ContentType 6 is now for all books. debug_print('KoboTouch:delete_via_sql: BookID is Null') cursor.execute('delete from content where BookID is Null and ContentID =?',t) # Remove the content_settings entry debug_print('KoboTouch:delete_via_sql: delete from content_settings') cursor.execute('delete from content_settings where ContentID =?',t) # Remove the ratings entry debug_print('KoboTouch:delete_via_sql: delete from ratings') cursor.execute('delete from ratings where ContentID =?',t) # Remove any entries for the Activity table - removes tile from new home page if self.has_activity_table(): debug_print('KoboTouch:delete_via_sql: delete from Activity') cursor.execute('delete from Activity where Id =?', t) connection.commit() cursor.close() debug_print('KoboTouch:delete_via_sql: finished SQL') debug_print('KoboTouch:delete_via_sql: After SQL, no exception') except Exception as e: debug_print('KoboTouch:delete_via_sql - Database Exception: %s'%str(e)) debug_print('KoboTouch:delete_via_sql: imageId="%s"'%imageId) if imageId is None: imageId = self.imageid_from_contentid(ContentID) return imageId def delete_images(self, ImageID, book_path): debug_print("KoboTouch:delete_images - ImageID=", ImageID) if ImageID != None: path = self.images_path(book_path, ImageID) debug_print("KoboTouch:delete_images - path=%s" % path) for ending in self.cover_file_endings().keys(): fpath = path + ending fpath = self.normalize_path(fpath) debug_print("KoboTouch:delete_images - fpath=%s" % fpath) if os.path.exists(fpath): debug_print("KoboTouch:delete_images - Image File Exists") os.unlink(fpath) try: os.removedirs(os.path.dirname(path)) except: pass def contentid_from_path(self, path, ContentType): show_debug = self.is_debugging_title(path) and True if show_debug: debug_print("KoboTouch:contentid_from_path - path='%s'"%path, "ContentType='%s'"%ContentType) debug_print("KoboTouch:contentid_from_path - self._main_prefix='%s'"%self._main_prefix, "self._card_a_prefix='%s'"%self._card_a_prefix) if ContentType == 6: extension = os.path.splitext(path)[1] if extension == '.kobo': ContentID = os.path.splitext(path)[0] # Remove the prefix on the file. it could be either ContentID = ContentID.replace(self._main_prefix, '') elif extension == '': ContentID = path ContentID = ContentID.replace(self._main_prefix + self.normalize_path('.kobo/kepub/'), '') else: ContentID = path ContentID = ContentID.replace(self._main_prefix, "file:///mnt/onboard/") if show_debug: debug_print("KoboTouch:contentid_from_path - 1 ContentID='%s'"%ContentID) if self._card_a_prefix is not None: ContentID = ContentID.replace(self._card_a_prefix, "file:///mnt/sd/") else: # ContentType = 16 debug_print("KoboTouch:contentid_from_path ContentType other than 6 - ContentType='%d'"%ContentType, "path='%s'"%path) ContentID = path ContentID = ContentID.replace(self._main_prefix, "file:///mnt/onboard/") if self._card_a_prefix is not None: ContentID = ContentID.replace(self._card_a_prefix, "file:///mnt/sd/") ContentID = ContentID.replace("\\", '/') if show_debug: debug_print("KoboTouch:contentid_from_path - end - ContentID='%s'"%ContentID) return ContentID def get_content_type_from_extension(self, extension): debug_print("KoboTouch:get_content_type_from_extension - start") # With new firmware, ContentType appears to be 6 for all types of sideloaded books. if self.fwversion >= (1,9,17) or extension == '.kobo' or extension == '.mobi': debug_print("KoboTouch:get_content_type_from_extension - V2 firmware") ContentType = 6 # For older firmware, it depends on the type of file. elif extension == '.kobo' or extension == '.mobi': ContentType = 6 else: ContentType = 901 return ContentType def update_device_database_collections(self, booklists, collections_attributes, oncard): debug_print("KoboTouch:update_device_database_collections - oncard='%s'"%oncard) if self.modify_database_check("update_device_database_collections") == False: return # Only process categories in this list supportedcategories = { "Im_Reading": 1, "Read": 2, "Closed": 3, "Shortlist": 4, "Archived": 5, # "Preview":99, # Unsupported as we don't want to change it } # Define lists for the ReadStatus readstatuslist = { "Im_Reading":1, "Read":2, "Closed":3, } accessibilitylist = { "Preview":6, "Recommendation":4, "Deleted":1, } # specialshelveslist = { # "Shortlist":1, # "Wishlist":2, # } # debug_print('KoboTouch:update_device_database_collections - collections_attributes=', collections_attributes) opts = self.settings() if opts.extra_customization: create_bookshelves = opts.extra_customization[self.OPT_CREATE_BOOKSHELVES] and self.supports_bookshelves() delete_empty_shelves = opts.extra_customization[self.OPT_DELETE_BOOKSHELVES] and self.supports_bookshelves() update_series_details = opts.extra_customization[self.OPT_UPDATE_SERIES_DETAILS] and self.supports_series() debugging_title = opts.extra_customization[self.OPT_DEBUGGING_TITLE] debug_print("KoboTouch:update_device_database_collections - set_debugging_title to '%s'" % debugging_title) booklists.set_debugging_title(debugging_title) else: delete_empty_shelves = False create_bookshelves = False update_series_details = False opts = self.settings() if opts.extra_customization: create_bookshelves = opts.extra_customization[self.OPT_CREATE_BOOKSHELVES] and self.supports_bookshelves() delete_empty_shelves = opts.extra_customization[self.OPT_DELETE_BOOKSHELVES] and self.supports_bookshelves() else: delete_empty_shelves = False bookshelf_attribute = len(collections_attributes) collections = booklists.get_collections(collections_attributes) if bookshelf_attribute else None # debug_print('KoboTouch:update_device_database_collections - Collections:', collections) # Create a connection to the sqlite database # Needs to be outside books collection as in the case of removing # the last book from the collection the list of books is empty # and the removal of the last book would not occur import sqlite3 as sqlite with closing(sqlite.connect(self.normalize_path(self._main_prefix + '.kobo/KoboReader.sqlite'))) as connection: # return bytestrings if the content cannot the decoded as unicode connection.text_factory = lambda x: unicode(x, "utf-8", "ignore") if collections: # debug_print("KoboTouch:update_device_database_collections - length collections=" + unicode(len(collections))) # Need to reset the collections outside the particular loops # otherwise the last item will not be removed if self.dbversion < 53: debug_print("KoboTouch:update_device_database_collections - calling reset_readstatus") self.reset_readstatus(connection, oncard) if self.dbversion >= 14 and self.fwversion < self.min_fwversion_shelves: debug_print("KoboTouch:update_device_database_collections - calling reset_favouritesindex") self.reset_favouritesindex(connection, oncard) # debug_print("KoboTouch:update_device_database_collections - length collections=", len(collections)) # debug_print("KoboTouch:update_device_database_collections - self.bookshelvelist=", self.bookshelvelist) # Process any collections that exist for category, books in collections.items(): debug_print("KoboTouch:update_device_database_collections - category='%s' books=%d"%(category, len(books))) if create_bookshelves and not (category in supportedcategories or category in readstatuslist or category in accessibilitylist): self.check_for_bookshelf(connection, category) # if category in self.bookshelvelist: # debug_print("Category: ", category, " id = ", readstatuslist.get(category)) for book in books: # debug_print(' Title:', book.title, 'category: ', category) show_debug = self.is_debugging_title(book.title) if show_debug: debug_print(' Title="%s"'%book.title, 'category="%s"'%category) # debug_print(book) debug_print(' class=%s'%book.__class__) debug_print(' book.contentID="%s"'%book.contentID) debug_print(' book.application_id="%s"'%book.application_id) if book.application_id is None: continue category_added = False if book.contentID is None: debug_print(' Do not know ContentID - Title="%s"'%book.title) extension = os.path.splitext(book.path)[1] ContentType = self.get_content_type_from_extension(extension) if extension != '' else self.get_content_type_from_path(book.path) book.contentID = self.contentid_from_path(book.path, ContentType) if category in self.bookshelvelist and self.supports_bookshelves(): if show_debug: debug_print(' length book.device_collections=%d'%len(book.device_collections)) if category not in book.device_collections: if show_debug: debug_print(' Setting bookshelf on device') self.set_bookshelf(connection, book, category) category_added = True elif category in readstatuslist.keys(): # Manage ReadStatus self.set_readstatus(connection, book.contentID, readstatuslist.get(category)) category_added = True elif category == 'Shortlist' and self.dbversion >= 14: if show_debug: debug_print(' Have an older version shortlist - %s'%book.title) # Manage FavouritesIndex/Shortlist if not self.supports_bookshelves(): if show_debug: debug_print(' and about to set it - %s'%book.title) self.set_favouritesindex(connection, book.contentID) category_added = True elif category in accessibilitylist.keys(): # Do not manage the Accessibility List pass if category_added and category not in book.device_collections: if show_debug: debug_print(' adding category to book.device_collections', book.device_collections) book.device_collections.append(category) else: if show_debug: debug_print(' category not added to book.device_collections', book.device_collections) debug_print("KoboTouch:update_device_database_collections - end for category='%s'"%category) elif bookshelf_attribute: # No collections but have set the shelf option # Since no collections exist the ReadStatus needs to be reset to 0 (Unread) debug_print("No Collections - reseting ReadStatus") if self.dbversion < 53: self.reset_readstatus(connection, oncard) if self.dbversion >= 14 and self.fwversion < self.min_fwversion_shelves: debug_print("No Collections - resetting FavouritesIndex") self.reset_favouritesindex(connection, oncard) # Set the series info and cleanup the bookshelves only if the firmware supports them and the user has set the options. if (self.supports_bookshelves() or self.supports_series()) and (bookshelf_attribute or update_series_details): debug_print("KoboTouch:update_device_database_collections - managing bookshelves and series.") self.series_set = 0 books_in_library = 0 for book in booklists: if book.application_id is not None: books_in_library += 1 show_debug = self.is_debugging_title(book.title) if show_debug: debug_print("KoboTouch:update_device_database_collections - book.title=%s" % book.title) if update_series_details: self.set_series(connection, book) if bookshelf_attribute: if show_debug: debug_print("KoboTouch:update_device_database_collections - about to remove a book from shelves book.title=%s" % book.title) self.remove_book_from_device_bookshelves(connection, book) book.device_collections.extend(book.kobo_collections) if not prefs['manage_device_metadata'] == 'manual' and delete_empty_shelves: debug_print("KoboTouch:update_device_database_collections - about to clear empty bookshelves") self.delete_empty_bookshelves(connection) debug_print("KoboTouch:update_device_database_collections - Number of series set=%d Number of books=%d" % (self.series_set, books_in_library)) self.dump_bookshelves(connection) debug_print('KoboTouch:update_device_database_collections - Finished ') def rebuild_collections(self, booklist, oncard): debug_print("KoboTouch:rebuild_collections") collections_attributes = self.get_collections_attributes() debug_print('KoboTouch:rebuild_collections: collection fields:', collections_attributes) self.update_device_database_collections(booklist, collections_attributes, oncard) def upload_cover(self, path, filename, metadata, filepath): ''' Upload book cover to the device. Default implementation does nothing. :param path: The full path to the directory where the associated book is located. :param filename: The name of the book file without the extension. :param metadata: metadata belonging to the book. Use metadata.thumbnail for cover :param filepath: The full path to the ebook file ''' debug_print("KoboTouch:upload_cover - path='%s' filename='%s' "%(path, filename)) debug_print(" filepath='%s' "%(filepath)) opts = self.settings() if not self.copying_covers(): # Building thumbnails disabled # debug_print('KoboTouch: not uploading cover') return # Only upload covers to SD card if that is supported if self._card_a_prefix and os.path.abspath(path).startswith(os.path.abspath(self._card_a_prefix)) and not self.supports_covers_on_sdcard(): return if not opts.extra_customization[self.OPT_UPLOAD_GRAYSCALE_COVERS]: uploadgrayscale = False else: uploadgrayscale = True # debug_print('KoboTouch: uploading cover') try: self._upload_cover(path, filename, metadata, filepath, uploadgrayscale, self.keep_cover_aspect()) except Exception as e: debug_print('KoboTouch: FAILED to upload cover=%s Exception=%s'%(filepath, str(e))) def imageid_from_contentid(self, ContentID): ImageID = ContentID.replace('/', '_') ImageID = ImageID.replace(' ', '_') ImageID = ImageID.replace(':', '_') ImageID = ImageID.replace('.', '_') return ImageID def images_path(self, path, imageId=None): if self._card_a_prefix and os.path.abspath(path).startswith(os.path.abspath(self._card_a_prefix)) and self.supports_covers_on_sdcard(): path_prefix = 'koboExtStorage/images-cache/' if self.supports_images_tree() else 'koboExtStorage/images/' path = os.path.join(self._card_a_prefix, path_prefix) else: path_prefix = '.kobo-images/' if self.supports_images_tree() else '.kobo/images/' path = os.path.join(self._main_prefix, path_prefix) if self.supports_images_tree() and imageId: hash1 = qhash(imageId) dir1 = hash1 & (0xff * 1) dir2 = (hash1 & (0xff00 * 1)) >> 8 path = os.path.join(path, "%s" % dir1, "%s" % dir2) if imageId: path = os.path.join(path, imageId) return path def _upload_cover(self, path, filename, metadata, filepath, uploadgrayscale, keep_cover_aspect=False): from calibre.utils.magick.draw import save_cover_data_to, identify_data debug_print("KoboTouch:_upload_cover - filename='%s' uploadgrayscale='%s' "%(filename, uploadgrayscale)) if metadata.cover: show_debug = self.is_debugging_title(filename) if show_debug: debug_print("KoboTouch:_upload_cover - path='%s'"%path, "filename='%s'"%filename) debug_print(" filepath='%s'"%filepath) cover = self.normalize_path(metadata.cover.replace('/', os.sep)) if os.path.exists(cover): # Get ContentID for Selected Book extension = os.path.splitext(filepath)[1] ContentType = self.get_content_type_from_extension(extension) if extension != '' else self.get_content_type_from_path(filepath) ContentID = self.contentid_from_path(filepath, ContentType) try: import sqlite3 as sqlite with closing(sqlite.connect(self.device_database_path())) as connection: # return bytestrings if the content cannot the decoded as unicode connection.text_factory = lambda x: unicode(x, "utf-8", "ignore") cursor = connection.cursor() t = (ContentID,) cursor.execute('select ImageId from Content where BookID is Null and ContentID = ?', t) result = cursor.fetchone() if result is None: ImageID = self.imageid_from_contentid(ContentID) debug_print("KoboTouch:_upload_cover - No rows exist in the database - generated ImageID='%s'" % ImageID) else: ImageID = result[0] # debug_print("ImageId: ", result[0]) cursor.close() if ImageID != None: path = self.images_path(path, ImageID) if show_debug: debug_print("KoboTouch:_upload_cover - About to loop over cover endings") image_dir = os.path.dirname(os.path.abspath(path)) if not os.path.exists(image_dir): debug_print("KoboTouch:_upload_cover - Image directory does not exust. Creating path='%s'" % (image_dir)) os.makedirs(image_dir) for ending, cover_options in self.cover_file_endings().items(): resize, min_dbversion, max_dbversion, isFullsize = cover_options if show_debug: debug_print("KoboTouch:_upload_cover - resize=%s min_dbversion=%d max_dbversion=%d" % (resize, min_dbversion, max_dbversion)) if self.dbversion >= min_dbversion and self.dbversion <= max_dbversion: if show_debug: debug_print("KoboTouch:_upload_cover - creating cover for ending='%s'"%ending) # , "resize'%s'"%resize) fpath = path + ending fpath = self.normalize_path(fpath.replace('/', os.sep)) with open(cover, 'rb') as f: data = f.read() if keep_cover_aspect: if isFullsize: resize = None else: width, height, fmt = identify_data(data) cover_aspect = width / height if cover_aspect > 1: resize = (resize[0], int(resize[0] / cover_aspect)) elif cover_aspect < 1: resize = (int(cover_aspect * resize[1]), resize[1]) # Return the data resized and in Grayscale if # required data = save_cover_data_to(data, 'dummy.jpg', grayscale=uploadgrayscale, resize_to=resize, return_data=True) with open(fpath, 'wb') as f: f.write(data) fsync(f) except Exception as e: err = str(e) debug_print("KoboTouch:_upload_cover - Exception string: %s"%err) raise else: debug_print("KoboTouch:_upload_cover - ImageID could not be retrieved from the database") def remove_book_from_device_bookshelves(self, connection, book): show_debug = self.is_debugging_title(book.title) # or True remove_shelf_list = set(book.current_shelves) - set(book.device_collections) if show_debug: debug_print('KoboTouch:remove_book_from_device_bookshelves - book.application_id="%s"'%book.application_id) debug_print('KoboTouch:remove_book_from_device_bookshelves - book.contentID="%s"'%book.contentID) debug_print('KoboTouch:remove_book_from_device_bookshelves - book.device_collections=', book.device_collections) debug_print('KoboTouch:remove_book_from_device_bookshelves - book.current_shelves=', book.current_shelves) debug_print('KoboTouch:remove_book_from_device_bookshelves - remove_shelf_list=', remove_shelf_list) if len(remove_shelf_list) == 0: return query = 'DELETE FROM ShelfContent WHERE ContentId = ?' values = [book.contentID,] if book.device_collections: placeholder = '?' placeholders = ','.join(placeholder for unused in book.device_collections) query += ' and ShelfName not in (%s)' % placeholders values.extend(book.device_collections) if show_debug: debug_print('KoboTouch:remove_book_from_device_bookshelves query="%s"'%query) debug_print('KoboTouch:remove_book_from_device_bookshelves values="%s"'%values) cursor = connection.cursor() cursor.execute(query, values) connection.commit() cursor.close() def set_filesize_in_device_database(self, connection, contentID, fpath): show_debug = self.is_debugging_title(fpath) if show_debug: debug_print('KoboTouch:set_filesize_in_device_database contentID="%s"'%contentID) test_query = 'SELECT ___FileSize ' \ 'FROM content ' \ 'WHERE ContentID = ? ' \ ' AND ContentType = 6' test_values = (contentID, ) updatequery = 'UPDATE content ' \ 'SET ___FileSize = ? ' \ 'WHERE ContentId = ? ' \ 'AND ContentType = 6' cursor = connection.cursor() cursor.execute(test_query, test_values) result = cursor.fetchone() if result is None: if show_debug: debug_print(' Did not find a record - new book on device') elif os.path.exists(fpath): file_size = os.stat(self.normalize_path(fpath)).st_size if show_debug: debug_print(' Found a record - will update - ___FileSize=', result[0], ' file_size=', file_size) if file_size != int(result[0]): update_values = (file_size, contentID, ) cursor.execute(updatequery, update_values) if show_debug: debug_print(' Size updated.') connection.commit() cursor.close() # debug_print("KoboTouch:set_filesize_in_device_database - end") def delete_empty_bookshelves(self, connection): debug_print("KoboTouch:delete_empty_bookshelves - start") delete_query = ("DELETE FROM Shelf " "WHERE Shelf._IsSynced = 'false' " "AND Shelf.InternalName not in ('Shortlist', 'Wishlist') " "AND NOT EXISTS " "(SELECT 1 FROM ShelfContent c " "WHERE Shelf.Name = C.ShelfName " "AND c._IsDeleted <> 'true')") update_query = ("UPDATE Shelf " "SET _IsDeleted = 'true' " "WHERE Shelf._IsSynced = 'true' " "AND Shelf.InternalName not in ('Shortlist', 'Wishlist') " "AND NOT EXISTS " "(SELECT 1 FROM ShelfContent C " "WHERE Shelf.Name = C.ShelfName " "AND c._IsDeleted <> 'true')") delete_activity_query = ("DELETE FROM Activity " "WHERE Type = 'Shelf' " "AND NOT EXISTS " "(SELECT 1 FROM Shelf " "WHERE Shelf.Name = Activity.Id " "AND Shelf._IsDeleted = 'false')" ) cursor = connection.cursor() cursor.execute(delete_query) cursor.execute(update_query) if self.has_activity_table(): cursor.execute(delete_activity_query) connection.commit() cursor.close() debug_print("KoboTouch:delete_empty_bookshelves - end") def get_bookshelflist(self, connection): # Retrieve the list of booksehelves # debug_print('KoboTouch:get_bookshelflist') bookshelves = [] if not self.supports_bookshelves(): return bookshelves query = 'SELECT Name FROM Shelf WHERE _IsDeleted = "false"' cursor = connection.cursor() cursor.execute(query) # count_bookshelves = 0 for i, row in enumerate(cursor): bookshelves.append(row[0]) # count_bookshelves = i + 1 cursor.close() # debug_print("KoboTouch:get_bookshelflist - count bookshelves=" + unicode(count_bookshelves)) return bookshelves def set_bookshelf(self, connection, book, shelfName): show_debug = self.is_debugging_title(book.title) if show_debug: debug_print('KoboTouch:set_bookshelf book.ContentID="%s"'%book.contentID) debug_print('KoboTouch:set_bookshelf book.current_shelves="%s"'%book.current_shelves) if shelfName in book.current_shelves: if show_debug: debug_print(' book already on shelf.') return test_query = 'SELECT _IsDeleted FROM ShelfContent WHERE ShelfName = ? and ContentId = ?' test_values = (shelfName, book.contentID, ) addquery = 'INSERT INTO ShelfContent ("ShelfName","ContentId","DateModified","_IsDeleted","_IsSynced") VALUES (?, ?, ?, "false", "false")' add_values = (shelfName, book.contentID, time.strftime(self.TIMESTAMP_STRING, time.gmtime()), ) updatequery = 'UPDATE ShelfContent SET _IsDeleted = "false" WHERE ShelfName = ? and ContentId = ?' update_values = (shelfName, book.contentID, ) cursor = connection.cursor() cursor.execute(test_query, test_values) result = cursor.fetchone() if result is None: if show_debug: debug_print(' Did not find a record - adding') cursor.execute(addquery, add_values) elif result[0] == 'true': if show_debug: debug_print(' Found a record - updating - result=', result) cursor.execute(updatequery, update_values) connection.commit() cursor.close() # debug_print("KoboTouch:set_bookshelf - end") def check_for_bookshelf(self, connection, bookshelf_name): show_debug = self.is_debugging_title(bookshelf_name) if show_debug: debug_print('KoboTouch:check_for_bookshelf bookshelf_name="%s"'%bookshelf_name) test_query = 'SELECT InternalName, Name, _IsDeleted FROM Shelf WHERE Name = ?' test_values = (bookshelf_name, ) addquery = 'INSERT INTO "main"."Shelf"' add_values = (time.strftime(self.TIMESTAMP_STRING, time.gmtime()), bookshelf_name, time.strftime(self.TIMESTAMP_STRING, time.gmtime()), bookshelf_name, "false", "true", "false", ) if self.dbversion < 64: addquery += ' ("CreationDate","InternalName","LastModified","Name","_IsDeleted","_IsVisible","_IsSynced")'\ ' VALUES (?, ?, ?, ?, ?, ?, ?)' else: addquery += ' ("CreationDate", "InternalName","LastModified","Name","_IsDeleted","_IsVisible","_IsSynced", "Id")'\ ' VALUES (?, ?, ?, ?, ?, ?, ?, ?)' add_values = add_values +(bookshelf_name,) if show_debug: debug_print('KoboTouch:check_for_bookshelf addquery=', addquery) debug_print('KoboTouch:check_for_bookshelf add_values=', add_values) updatequery = 'UPDATE Shelf SET _IsDeleted = "false" WHERE Name = ?' cursor = connection.cursor() cursor.execute(test_query, test_values) result = cursor.fetchone() if result is None: if show_debug: debug_print(' Did not find a record - adding shelf "%s"' % bookshelf_name) cursor.execute(addquery, add_values) elif result[2] == 'true': debug_print('KoboTouch:check_for_bookshelf - Shelf "%s" is deleted - undeleting. result[2]="%s"' % (bookshelf_name, unicode(result[2]))) cursor.execute(updatequery, test_values) connection.commit() cursor.close() # Update the bookshelf list. self.bookshelvelist = self.get_bookshelflist(connection) # debug_print("KoboTouch:set_bookshelf - end") def remove_from_bookshelves(self, connection, oncard, ContentID=None, bookshelves=None): debug_print('KoboTouch:remove_from_bookshelf ContentID=', ContentID) if not self.supports_bookshelves(): return query = 'DELETE FROM ShelfContent' values = [] if ContentID is not None: query += ' WHERE ContentId = ?' values.append(ContentID) else: if oncard == 'carda': query += ' WHERE ContentID like \'file:///mnt/sd/%\'' elif oncard != 'carda' and oncard != 'cardb': query += ' WHERE ContentID not like \'file:///mnt/sd/%\'' if bookshelves: placeholder = '?' placeholders = ','.join(placeholder for unused in bookshelves) query += ' and ShelfName in (%s)' % placeholders values.append(bookshelves) debug_print('KoboTouch:remove_from_bookshelf query=', query) debug_print('KoboTouch:remove_from_bookshelf values=', values) cursor = connection.cursor() cursor.execute(query, values) connection.commit() cursor.close() debug_print("KoboTouch:remove_from_bookshelf - end") def set_series(self, connection, book): show_debug = self.is_debugging_title(book.title) if show_debug: debug_print('KoboTouch:set_series book.kobo_series="%s"'%book.kobo_series) debug_print('KoboTouch:set_series book.series="%s"'%book.series) debug_print('KoboTouch:set_series book.series_index=', book.series_index) if book.series == book.kobo_series: kobo_series_number = None if book.kobo_series_number is not None: try: kobo_series_number = float(book.kobo_series_number) except: kobo_series_number = None if kobo_series_number == book.series_index: if show_debug: debug_print('KoboTouch:set_series - series info the same - not changing') return update_query = 'UPDATE content SET Series=?, SeriesNumber==? where BookID is Null and ContentID = ?' if book.series is None: update_values = (None, None, book.contentID, ) elif book.series_index is None: # This should never happen, but... update_values = (book.series, None, book.contentID, ) else: update_values = (book.series, "%g"%book.series_index, book.contentID, ) cursor = connection.cursor() try: if show_debug: debug_print('KoboTouch:set_series - about to set - parameters:', update_values) cursor.execute(update_query, update_values) self.series_set += 1 except: debug_print(' Database Exception: Unable to set series info') raise else: connection.commit() cursor.close() if show_debug: debug_print("KoboTouch:set_series - end") @classmethod def settings(cls): opts = cls._config().parse() if isinstance(cls.EXTRA_CUSTOMIZATION_DEFAULT, list): if opts.extra_customization is None: opts.extra_customization = [] if not isinstance(opts.extra_customization, list): opts.extra_customization = [opts.extra_customization] if len(cls.EXTRA_CUSTOMIZATION_DEFAULT) > len(opts.extra_customization): extra_options_offset = 0 extra_customization = [] for i,d in enumerate(cls.EXTRA_CUSTOMIZATION_DEFAULT): if i >= len(opts.extra_customization) + extra_options_offset: extra_customization.append(d) elif d.__class__ != opts.extra_customization[i - extra_options_offset].__class__: extra_options_offset += 1 extra_customization.append(d) else: extra_customization.append(opts.extra_customization[i - extra_options_offset]) opts.extra_customization = extra_customization return opts def isAura(self): return self.detected_device.idProduct in self.AURA_PRODUCT_ID def isAuraHD(self): return self.detected_device.idProduct in self.AURA_HD_PRODUCT_ID def isAuraH2O(self): return self.detected_device.idProduct in self.AURA_H2O_PRODUCT_ID def isGlo(self): return self.detected_device.idProduct in self.GLO_PRODUCT_ID def isGloHD(self): return self.detected_device.idProduct in self.GLO_HD_PRODUCT_ID def isMini(self): return self.detected_device.idProduct in self.MINI_PRODUCT_ID def isTouch(self): return self.detected_device.idProduct in self.TOUCH_PRODUCT_ID def cover_file_endings(self): return self.GLO_COVER_FILE_ENDINGS if self.isGlo() or self.isAura() \ else self.AURA_HD_COVER_FILE_ENDINGS if self.isAuraHD() or self.isAuraH2O() or self.isGloHD() \ else self.COVER_FILE_ENDINGS def set_device_name(self): device_name = self.gui_name if self.isAura(): device_name = 'Kobo Aura' elif self.isAuraHD(): device_name = 'Kobo Aura HD' elif self.isAuraH2O(): device_name = 'Kobo Aura H2O' elif self.isGlo(): device_name = 'Kobo Glo' elif self.isGloHD(): device_name = 'Kobo Glo HD' elif self.isMini(): device_name = 'Kobo Mini' elif self.isTouch(): device_name = 'Kobo Touch' self.__class__.gui_name = device_name return device_name def copying_covers(self): opts = self.settings() return opts.extra_customization[self.OPT_UPLOAD_COVERS] or opts.extra_customization[self.OPT_KEEP_COVER_ASPECT_RATIO] def keep_cover_aspect(self): opts = self.settings() return opts.extra_customization[self.OPT_KEEP_COVER_ASPECT_RATIO] def modifying_epub(self): return self.modifying_css() def modifying_css(self): opts = self.settings() return opts.extra_customization[self.OPT_MODIFY_CSS] def supports_bookshelves(self): return self.dbversion >= self.min_supported_dbversion def supports_series(self): return self.dbversion >= self.min_dbversion_series def supports_kobo_archive(self): return self.dbversion >= self.min_dbversion_archive def supports_covers_on_sdcard(self): return self.dbversion >= self.min_dbversion_images_on_sdcard and self.fwversion >= self.min_fwversion_images_on_sdcard def supports_images_tree(self): return self.fwversion >= self.min_fwversion_images_tree def has_externalid(self): return self.dbversion >= self.min_dbversion_externalid def has_activity_table(self): return self.dbversion >= self.min_dbversion_activity def modify_database_check(self, function): # Checks to see whether the database version is supported # and whether the user has chosen to support the firmware version # debug_print("KoboTouch:modify_database_check - self.fwversion > self.max_supported_fwversion=", self.fwversion > self.max_supported_fwversion) if self.dbversion > self.supported_dbversion or self.fwversion > self.max_supported_fwversion: # Unsupported database opts = self.settings() if not opts.extra_customization[self.OPT_SUPPORT_NEWER_FIRMWARE]: debug_print('The database has been upgraded past supported version') self.report_progress(1.0, _('Removing books from device...')) from calibre.devices.errors import UserFeedback raise UserFeedback(_("Kobo database version unsupported - See details"), _('Your Kobo is running an updated firmware/database version.' ' As calibre does not know about this updated firmware,' ' database editing is disabled, to prevent corruption.' ' You can still send books to your Kobo with calibre, ' ' but deleting books and managing collections is disabled.' ' If you are willing to experiment and know how to reset' ' your Kobo to Factory defaults, you can override this' ' check by right clicking the device icon in calibre and' ' selecting "Configure this device" and then the ' ' "Attempt to support newer firmware" option.' ' Doing so may require you to perform a factory reset of' ' your Kobo.') + ( '\nDevice database version: %s.' '\nDevice firmware version: %s' ) % (self.dbversion, self.fwversion), UserFeedback.WARN) return False else: # The user chose to edit the database anyway return True else: # Supported database version return True @classmethod def is_debugging_title(cls, title): if not DEBUG: return False # debug_print("KoboTouch:is_debugging - title=", title) is_debugging = False opts = cls.settings() if opts.extra_customization: debugging_title = opts.extra_customization[cls.OPT_DEBUGGING_TITLE] is_debugging = len(debugging_title) > 0 and title.lower().find(debugging_title.lower()) >= 0 or len(title) == 0 return is_debugging def dump_bookshelves(self, connection): if not (DEBUG and self.supports_bookshelves() and False): return debug_print('KoboTouch:dump_bookshelves - start') shelf_query = 'SELECT * FROM Shelf' shelfcontent_query = 'SELECT * FROM ShelfContent' placeholder = '%s' cursor = connection.cursor() prints('\nBookshelves on device:') cursor.execute(shelf_query) i = 0 for row in cursor: placeholders = ', '.join(placeholder for unused in row) prints(placeholders%row) i += 1 if i == 0: prints("No shelves found!!") else: prints("Number of shelves=%d"%i) prints('\nBooks on shelves on device:') cursor.execute(shelfcontent_query) i = 0 for row in cursor: placeholders = ', '.join(placeholder for unused in row) prints(placeholders%row) i += 1 if i == 0: prints("No books are on any shelves!!") else: prints("Number of shelved books=%d"%i) cursor.close() debug_print('KoboTouch:dump_bookshelves - end')
gpl-3.0
6,730,758,367,339,175,000
49.074966
237
0.549732
false
4.174212
false
false
false
gfyoung/pandas
pandas/tests/io/pytables/test_complex.py
1
6374
from warnings import catch_warnings import numpy as np import pytest import pandas.util._test_decorators as td import pandas as pd from pandas import DataFrame, Series import pandas._testing as tm from pandas.tests.io.pytables.common import ensure_clean_path, ensure_clean_store from pandas.io.pytables import read_hdf # TODO(ArrayManager) HDFStore relies on accessing the blocks pytestmark = td.skip_array_manager_not_yet_implemented def test_complex_fixed(setup_path): df = DataFrame( np.random.rand(4, 5).astype(np.complex64), index=list("abcd"), columns=list("ABCDE"), ) with ensure_clean_path(setup_path) as path: df.to_hdf(path, "df") reread = read_hdf(path, "df") tm.assert_frame_equal(df, reread) df = DataFrame( np.random.rand(4, 5).astype(np.complex128), index=list("abcd"), columns=list("ABCDE"), ) with ensure_clean_path(setup_path) as path: df.to_hdf(path, "df") reread = read_hdf(path, "df") tm.assert_frame_equal(df, reread) def test_complex_table(setup_path): df = DataFrame( np.random.rand(4, 5).astype(np.complex64), index=list("abcd"), columns=list("ABCDE"), ) with ensure_clean_path(setup_path) as path: df.to_hdf(path, "df", format="table") reread = read_hdf(path, "df") tm.assert_frame_equal(df, reread) df = DataFrame( np.random.rand(4, 5).astype(np.complex128), index=list("abcd"), columns=list("ABCDE"), ) with ensure_clean_path(setup_path) as path: df.to_hdf(path, "df", format="table", mode="w") reread = read_hdf(path, "df") tm.assert_frame_equal(df, reread) def test_complex_mixed_fixed(setup_path): complex64 = np.array( [1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex64 ) complex128 = np.array( [1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex128 ) df = DataFrame( { "A": [1, 2, 3, 4], "B": ["a", "b", "c", "d"], "C": complex64, "D": complex128, "E": [1.0, 2.0, 3.0, 4.0], }, index=list("abcd"), ) with ensure_clean_path(setup_path) as path: df.to_hdf(path, "df") reread = read_hdf(path, "df") tm.assert_frame_equal(df, reread) def test_complex_mixed_table(setup_path): complex64 = np.array( [1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex64 ) complex128 = np.array( [1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex128 ) df = DataFrame( { "A": [1, 2, 3, 4], "B": ["a", "b", "c", "d"], "C": complex64, "D": complex128, "E": [1.0, 2.0, 3.0, 4.0], }, index=list("abcd"), ) with ensure_clean_store(setup_path) as store: store.append("df", df, data_columns=["A", "B"]) result = store.select("df", where="A>2") tm.assert_frame_equal(df.loc[df.A > 2], result) with ensure_clean_path(setup_path) as path: df.to_hdf(path, "df", format="table") reread = read_hdf(path, "df") tm.assert_frame_equal(df, reread) def test_complex_across_dimensions_fixed(setup_path): with catch_warnings(record=True): complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j]) s = Series(complex128, index=list("abcd")) df = DataFrame({"A": s, "B": s}) objs = [s, df] comps = [tm.assert_series_equal, tm.assert_frame_equal] for obj, comp in zip(objs, comps): with ensure_clean_path(setup_path) as path: obj.to_hdf(path, "obj", format="fixed") reread = read_hdf(path, "obj") comp(obj, reread) def test_complex_across_dimensions(setup_path): complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j]) s = Series(complex128, index=list("abcd")) df = DataFrame({"A": s, "B": s}) with catch_warnings(record=True): objs = [df] comps = [tm.assert_frame_equal] for obj, comp in zip(objs, comps): with ensure_clean_path(setup_path) as path: obj.to_hdf(path, "obj", format="table") reread = read_hdf(path, "obj") comp(obj, reread) def test_complex_indexing_error(setup_path): complex128 = np.array( [1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex128 ) df = DataFrame( {"A": [1, 2, 3, 4], "B": ["a", "b", "c", "d"], "C": complex128}, index=list("abcd"), ) msg = ( "Columns containing complex values can be stored " "but cannot be indexed when using table format. " "Either use fixed format, set index=False, " "or do not include the columns containing complex " "values to data_columns when initializing the table." ) with ensure_clean_store(setup_path) as store: with pytest.raises(TypeError, match=msg): store.append("df", df, data_columns=["C"]) def test_complex_series_error(setup_path): complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j]) s = Series(complex128, index=list("abcd")) msg = ( "Columns containing complex values can be stored " "but cannot be indexed when using table format. " "Either use fixed format, set index=False, " "or do not include the columns containing complex " "values to data_columns when initializing the table." ) with ensure_clean_path(setup_path) as path: with pytest.raises(TypeError, match=msg): s.to_hdf(path, "obj", format="t") with ensure_clean_path(setup_path) as path: s.to_hdf(path, "obj", format="t", index=False) reread = read_hdf(path, "obj") tm.assert_series_equal(s, reread) def test_complex_append(setup_path): df = DataFrame( {"a": np.random.randn(100).astype(np.complex128), "b": np.random.randn(100)} ) with ensure_clean_store(setup_path) as store: store.append("df", df, data_columns=["b"]) store.append("df", df) result = store.select("df") tm.assert_frame_equal(pd.concat([df, df], 0), result)
bsd-3-clause
-5,007,679,795,284,820,000
30.554455
84
0.562441
false
3.067372
true
false
false
SelfDrivUTT/selfdrivutt
robot/raspberry/controls.py
1
3292
import socket import sys import os import curses from threading import Thread class RemoteControlServer(object): """docstring for Curses_control""" def __init__(self): super(RemoteControl, self).__init__() self.data = '' self.stopped = False self.HOST = os.environ.get('COMMAND_HOST', 'localhost') self.PORT = os.environ.get('COMMAND_PORT', 9089) def start(self): self.socket_server = socket.socket(socket.AF_INET,socket.SOCK_STREAM) print('Socket created') self.socket_server.bind((self.HOST, self.PORT)) print('Socket bind complete') self.socket_server.listen(10) print('Socket now listening') self.conn, self.addr = self.socket_server.accept() # Accept the connection once (for starter) print('Connected with ' + self.addr[0] + ':' + str(self.addr[1])) Thread(target=self.update, args=()).start() return self def update(self): while True: try: self.data = self.conn.recv(1024) self.conn.send(self.data) print(self.data) if self.data == 27: self.stop() return except socket.error as e: print(e) self.stop() return else: if len(self.data) == 0: print 'orderly shutdown on server end' self.stop() else: print(self.data) def read(self): return self.data def stop(self): self.stopped = True self.conn.close() self.socket_server.close() class CursesControl(object): """docstring for Curses_control""" def __init__(self): super(CursesControl, self).__init__() # self.screen.nodelay() self.event = 'unload' self.stopped = False def start(self): self.screen = curses.initscr() Thread(target=self.update, args=()).start() return self def update(self): while True: try: curses.noecho() curses.curs_set(0) self.screen.keypad(1) self.screen.addstr("Press a key, " + str(self.event)) self.event = self.screen.getch() finally: curses.endwin() if self.stopped or self.event == 27: return def read(self): if self.event == curses.KEY_LEFT: command = 'left' elif self.event == curses.KEY_RIGHT: command = 'right' elif self.event == curses.KEY_UP: command = 'up' elif self.event == curses.KEY_DOWN: command = 'down' elif self.event == 32: # SPACE command = 'stop' elif self.event == 27: # ESC key command = 'quit' elif self.event == ord('p'): # P key command = 'auto_logic_based' elif self.event == ord('o'): # O key command = 'stream' elif self.event == ord('m'): # O key command = 'auto_neural_network' else: command = '?' return command def stop(self): self.stopped = True
mit
2,659,452,182,422,635,000
28.132743
101
0.512151
false
4.182973
false
false
false
all-of-us/raw-data-repository
rdr_service/alembic/versions/434fb0f05794_add_ignore_and_dev_note_to_genomics_.py
1
1673
"""add ignore and dev note to genomics models. Revision ID: 434fb0f05794 Revises: 994dfe6e53ee Create Date: 2020-09-30 14:39:16.244636 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '434fb0f05794' down_revision = '994dfe6e53ee' branch_labels = None depends_on = None def upgrade(engine_name): globals()["upgrade_%s" % engine_name]() def downgrade(engine_name): globals()["downgrade_%s" % engine_name]() def upgrade_rdr(): # ### commands auto generated by Alembic - please adjust! ### op.add_column('genomic_gc_validation_metrics', sa.Column('dev_note', sa.String(length=255), nullable=True)) op.add_column('genomic_gc_validation_metrics', sa.Column('ignore_flag', sa.SmallInteger(), nullable=True)) op.add_column('genomic_set_member', sa.Column('dev_note', sa.String(length=255), nullable=True)) op.add_column('genomic_set_member_history', sa.Column('dev_note', sa.String(length=255), nullable=True)) # ### end Alembic commands ### def downgrade_rdr(): # ### commands auto generated by Alembic - please adjust! ### op.drop_column('genomic_set_member', 'dev_note') op.drop_column('genomic_set_member_history', 'dev_note') op.drop_column('genomic_gc_validation_metrics', 'ignore_flag') op.drop_column('genomic_gc_validation_metrics', 'dev_note') # ### end Alembic commands ### def upgrade_metrics(): # ### commands auto generated by Alembic - please adjust! ### pass # ### end Alembic commands ### def downgrade_metrics(): # ### commands auto generated by Alembic - please adjust! ### pass # ### end Alembic commands ###
bsd-3-clause
-7,922,045,653,497,436,000
28.875
111
0.679617
false
3.267578
false
false
false
dannyroberts/eulxml
eulxml/xmlmap/premis.py
1
5516
# file eulxml/xmlmap/premis.py # # Copyright 2010,2011 Emory University Libraries # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ''' :mod:`eulxml.xmlmap` classes for dealing with the `PREMIS <http://www.loc.gov/standards/premis/>`_ metadata format for preservation metadata. ----- ''' from eulxml import xmlmap PREMIS_NAMESPACE = 'info:lc/xmlns/premis-v2' 'authoritative namespace for PREMIS' PREMIS_SCHEMA = 'http://www.loc.gov/standards/premis/v2/premis-v2-1.xsd' 'authoritative schema location for PREMIS' class BasePremis(xmlmap.XmlObject): '''Base PREMIS class with namespace declaration common to all PREMIS XmlObjects. .. Note:: This class is intended mostly for internal use, but could be useful when extending or adding additional PREMIS :class:`~eulxml.xmlmap.XmlObject` classes. The :attr:`PREMIS_NAMESPACE` is mapped to the prefix **p**. ''' ROOT_NS = PREMIS_NAMESPACE ROOT_NAMESPACES = { 'p': PREMIS_NAMESPACE, 'xsi': 'http://www.w3.org/2001/XMLSchema-instance' } class PremisRoot(BasePremis): '''Base class with a schema declaration for any of the root/stand-alone PREMIS elements: * ``<premis>`` - :class:`Premis` * ``<object>`` - :class:`Object` * ``<event>`` - :class:`Event` * ``<agent>`` * ``<rights>`` ''' XSD_SCHEMA = PREMIS_SCHEMA class Object(PremisRoot): '''Preliminary :class:`~eulxml.xmlmap.XmlObject` for a PREMIS object. Curently only includes the minimal required fields. ''' ROOT_NAME = 'object' type = xmlmap.StringField('@xsi:type') # file, representation, bitstream '''type of object (e.g., file, representation, bitstream). .. Note:: To be schema valid, object types must be in the PREMIS namespace, e.g.:: from eulxml.xmlmap import premis obj = premis.Object() obj.type = "p:file" ''' id_type = xmlmap.StringField('p:objectIdentifier/p:objectIdentifierType') 'identifier type (`objectIdentifier/objectIdentifierType`)' id = xmlmap.StringField('p:objectIdentifier/p:objectIdentifierValue') 'identifier value (`objectIdentifier/objectIdentifierValue`)' class Event(PremisRoot): '''Preliminary :class:`~eulxml.xmlmap.XmlObject` for a PREMIS event. .. Note:: The PREMIS schema requires that elements occur in a specified order, which :mod:`eulxml` does not currently handle or manage. As a work-around, when creating a new :class:`Event` from scratch, you should set the following required fields in this order: identifier (:attr:`id` and :attr:`ad_type` ''' ROOT_NAME = 'event' type = xmlmap.StringField('p:eventType') 'event type (``eventType``)' id_type = xmlmap.StringField('p:eventIdentifier/p:eventIdentifierType') 'identifier type (`eventIdentifier/eventIdentifierType`)' id = xmlmap.StringField('p:eventIdentifier/p:eventIdentifierValue') 'identifier value (`eventIdentifier/eventIdentifierValue`)' date = xmlmap.StringField('p:eventDateTime') 'date/time for the event (`eventDateTime`)' detail = xmlmap.StringField('p:eventDetail', required=False) 'event detail (`eventDetail`)' outcome = xmlmap.StringField('p:eventOutcomeInformation/p:eventOutcome', required=False) '''outcome of the event (`eventOutcomeInformation/eventOutcome`). .. Note:: In this preliminary implementation, the outcome detail fields are not mapped. ''' # leaving out outcome detail for now... # agent (optional, could be repeated) agent_type = xmlmap.StringField('p:linkingAgentIdentifier/p:linkingAgentIdentifierType') agent_id = xmlmap.StringField('p:linkingAgentIdentifier/p:linkingAgentIdentifierValue') # object (optional, could be repeated) object_type = xmlmap.StringField('p:linkingObjectIdentifier/p:linkingObjectIdentifierType') object_id = xmlmap.StringField('p:linkingObjectIdentifier/p:linkingObjectIdentifierValue') class Premis(PremisRoot): '''Preliminary :class:`~eulxml.xmlmap.XmlObject` for a PREMIS container element that can contain any of the other top-level PREMIS elements. Curently only includes mappings for a single object and list of events. ''' ROOT_NAME = 'premis' version = xmlmap.StringField('@version') '''Version of PREMIS in use; by default, new instances of :class:`Premis` will be initialized with a version of 2.1''' object = xmlmap.NodeField('p:object', Object) 'a single PREMIS :class:`object`' events = xmlmap.NodeListField('p:event', Event) 'list of PREMIS events, as instances of :class:`Event`' def __init__(self, *args, **kwargs): # version is required for schema-validity; don't override a # user-supplied version, but otherwise default to 2.1 if 'version' not in kwargs: kwargs['version'] = '2.1' super(Premis, self).__init__(*args, **kwargs)
apache-2.0
2,107,150,789,629,381,400
36.020134
95
0.688724
false
3.762619
false
false
false
mightbejosh/dj-braintree
djbraintree/admin.py
1
6860
# -*- coding: utf-8 -*- """ Note: Django 1.4 support was dropped in #107 https://github.com/pydanny/dj-braintree/pull/107 """ from django.contrib import admin from .models import Transaction from .models import Customer class CustomerHasCardListFilter(admin.SimpleListFilter): title = "card presence" parameter_name = "has_card" def lookups(self, request, model_admin): return [ ["yes", "Has Card"], ["no", "Does Not Have a Card"] ] def queryset(self, request, queryset): if self.value() == "yes": return queryset.exclude(card_fingerprint="") if self.value() == "no": return queryset.filter(card_fingerprint="") class InvoiceCustomerHasCardListFilter(admin.SimpleListFilter): title = "card presence" parameter_name = "has_card" def lookups(self, request, model_admin): return [ ["yes", "Has Card"], ["no", "Does Not Have a Card"] ] def queryset(self, request, queryset): if self.value() == "yes": return queryset.exclude(customer__card_fingerprint="") if self.value() == "no": return queryset.filter(customer__card_fingerprint="") # # class CustomerSubscriptionStatusListFilter(admin.SimpleListFilter): # title = "subscription status" # parameter_name = "sub_status" # # def lookups(self, request, model_admin): # statuses = [ # [x, x.replace("_", " ").title()] # for x in CurrentSubscription.objects.all().values_list( # "status", # flat=True # ).distinct() # ] # statuses.append(["none", "No Subscription"]) # return statuses # # def queryset(self, request, queryset): # if self.value() is None: # return queryset.all() # else: # return queryset.filter(current_subscription__status=self.value()) # # # def send_charge_receipt(modeladmin, request, queryset): # """ # Function for sending receipts from the admin if a receipt is not sent for # a specific charge. # """ # for charge in queryset: # charge.send_receipt() # # # admin.site.register( # Charge, # readonly_fields=('created',), # list_display=[ # "braintree_id", # "customer", # "amount", # "description", # "paid", # "disputed", # "refunded", # "fee", # "receipt_sent", # "created" # ], # search_fields=[ # "braintree_id", # "customer__braintree_id", # "card_last_4", # "invoice__braintree_id" # ], # list_filter=[ # "paid", # "disputed", # "refunded", # "card_kind", # "created" # ], # raw_id_fields=[ # "customer", # "invoice" # ], # actions=(send_charge_receipt,), # ) # # admin.site.register( # EventProcessingException, # readonly_fields=('created',), # list_display=[ # "message", # "event", # "created" # ], # search_fields=[ # "message", # "traceback", # "data" # ], # ) # # admin.site.register( # Event, # raw_id_fields=["customer"], # readonly_fields=('created',), # list_display=[ # "braintree_id", # "kind", # "livemode", # "valid", # "processed", # "created" # ], # list_filter=[ # "kind", # "created", # "valid", # "processed" # ], # search_fields=[ # "braintree_id", # "customer__braintree_id", # "validated_message" # ], # ) # # # class CurrentSubscriptionInline(admin.TabularInline): # model = CurrentSubscription # # # def subscription_status(obj): # return obj.current_subscription.status # subscription_status.short_description = "Subscription Status" # # # admin.site.register( # Customer, # raw_id_fields=["subscriber"], # readonly_fields=('created',), # list_display=[ # "braintree_id", # "subscriber", # "card_kind", # "card_last_4", # subscription_status, # "created" # ], # list_filter=[ # "card_kind", # CustomerHasCardListFilter, # CustomerSubscriptionStatusListFilter # ], # search_fields=[ # "braintree_id" # ], # inlines=[CurrentSubscriptionInline] # ) # # # class InvoiceItemInline(admin.TabularInline): # model = InvoiceItem # # # def customer_has_card(obj): # """ Returns True if the customer has a card attached to its account.""" # return obj.customer.card_fingerprint != "" # customer_has_card.short_description = "Customer Has Card" # # # def customer_email(obj): # """ Returns a string representation of the customer's email.""" # return str(obj.customer.subscriber.email) # customer_email.short_description = "Customer" # # # admin.site.register( # Invoice, # raw_id_fields=["customer"], # readonly_fields=('created',), # list_display=[ # "braintree_id", # "paid", # "closed", # customer_email, # customer_has_card, # "period_start", # "period_end", # "subtotal", # "total", # "created" # ], # search_fields=[ # "braintree_id", # "customer__braintree_id" # ], # list_filter=[ # InvoiceCustomerHasCardListFilter, # "paid", # "closed", # "attempted", # "attempts", # "created", # "date", # "period_end", # "total" # ], # inlines=[InvoiceItemInline] # ) # # # admin.site.register( # Transfer, # raw_id_fields=["event"], # readonly_fields=('created',), # list_display=[ # "braintree_id", # "amount", # "status", # "date", # "description", # "created" # ], # search_fields=[ # "braintree_id", # "event__braintree_id" # ] # ) # # # class PlanAdmin(admin.ModelAdmin): # # def save_model(self, request, obj, form, change): # """Update or create objects using our custom methods that # sync with Braintree.""" # # if change: # obj.update_name() # # else: # Plan.get_or_create(**form.cleaned_data) # # def get_readonly_fields(self, request, obj=None): # readonly_fields = list(self.readonly_fields) # if obj: # readonly_fields.extend([ # 'braintree_id', # 'amount', # 'currency', # 'interval', # 'interval_count', # 'trial_period_days']) # # return readonly_fields # # admin.site.register(Plan, PlanAdmin)
bsd-3-clause
6,652,920,508,881,095,000
23.326241
79
0.525364
false
3.418037
false
false
false
ericchill/gnofract4d
fract4d/fc.py
1
16906
#!/usr/bin/env python # A compiler from UltraFractal or Fractint formula files to C code # The UltraFractal manual is the best current description of the file # format. You can download it from http://www.ultrafractal.com/uf3-manual.zip # The implementation is based on the outline in "Modern Compiler # Implementation in ML: basic techniques" (Appel 1997, Cambridge) # Overall structure: # fractlexer.py and fractparser.py are the lexer and parser, respectively. # They use the PLY package to do lexing and SLR parsing, and produce as # output an abstract syntax tree (defined in the Absyn module). # The Translate module type-checks the code, maintains the symbol # table (symbol.py) and converts it into an intermediate form (ir.py) # Canon performs several simplifying passes on the IR to make it easier # to deal with, then codegen converts it into a linear sequence of # simple C instructions # Finally we invoke the C compiler to convert to a native code shared library import getopt import sys import commands import os.path import stat import random import hashlib import re import copy import fractconfig import fractparser import fractlexer import translate import codegen import fracttypes import absyn import preprocessor import cache import gradient class FormulaTypes: FRACTAL = 0 COLORFUNC = 1 TRANSFORM = 2 GRADIENT = 3 NTYPES = 4 GRAD_UGR=0 GRAD_MAP=1 GRAD_GGR=2 GRAD_CS=3 matches = [ re.compile(r'(\.frm\Z)|(\.ufm\Z)', re.IGNORECASE), re.compile(r'(\.cfrm\Z)|(\.ucl\Z)', re.IGNORECASE), re.compile(r'\.uxf\Z', re.IGNORECASE), re.compile(r'(\.ugr\Z)|(\.map\Z)|(\.ggr\Z)|(\.cs\Z)|(\.pal\Z)', re.IGNORECASE) ] # indexed by FormulaTypes above extensions = [ "frm", "cfrm", "uxf", "ggr", "pal"] @staticmethod def extension_from_type(t): return FormulaTypes.extensions[t] @staticmethod def guess_type_from_filename(filename): if FormulaTypes.matches[FormulaTypes.FRACTAL].search(filename): return translate.T elif FormulaTypes.matches[FormulaTypes.COLORFUNC].search(filename): return translate.ColorFunc elif FormulaTypes.matches[FormulaTypes.TRANSFORM].search(filename): return translate.Transform elif FormulaTypes.matches[FormulaTypes.GRADIENT].search(filename): return translate.GradientFunc @staticmethod def guess_formula_type_from_filename(filename): for i in xrange(FormulaTypes.NTYPES): if FormulaTypes.matches[i].search(filename): return i raise ValueError("Unknown file type for '%s'" % filename) @staticmethod def guess_gradient_subtype_from_filename(filename): filename = filename.lower() if filename.endswith(".ugr"): return FormulaTypes.GRAD_UGR if filename.endswith(".map") or filename.endswith(".pal"): return FormulaTypes.GRAD_MAP if filename.endswith(".ggr"): return FormulaTypes.GRAD_GGR if filename.endswith(".cs"): return FormulaTypes.GRAD_CS raise ValueError("Unknown gradient type for '%s'" % filename) @staticmethod def isFormula(filename): for matcher in FormulaTypes.matches: if matcher.search(filename): return True return False class FormulaFile: def __init__(self, formulas, contents,mtime,filename): self.formulas = formulas self.contents = contents self.mtime = mtime self.filename = filename self.file_backed = True def out_of_date(self): return self.file_backed and \ os.stat(self.filename)[stat.ST_MTIME] > self.mtime def get_formula(self,formula): return self.formulas.get(formula) def get_formula_names(self, skip_type=None): '''return all the coloring funcs except those marked as only suitable for the OTHER kind (inside vs outside)''' names = [] for name in self.formulas.keys(): sym = self.formulas[name].symmetry if sym == None or sym == "BOTH" or sym != skip_type: names.append(name) return names class Compiler: def __init__(self): self.parser = fractparser.parser self.lexer = fractlexer.lexer self.c_code = "" self.path_lists = [ [], [], [], [] ] self.cache = cache.T() self.cache_dir = os.path.expanduser("~/.gnofract4d-cache/") self.init_cache() if 'win' != sys.platform[:3]: self.compiler_name = "gcc" self.flags = "-fPIC -DPIC -g -O3 -shared" self.output_flag = "-o " self.libs = "-lm" else: self.compiler_name = "cl" self.flags = "/EHsc /Gd /nologo /W3 /LD /MT /TP /DWIN32 /DWINDOWS /D_USE_MATH_DEFINES" self.output_flag = "/Fe" self.libs = "/link /LIBPATH:\"%s/fract4d\" fract4d_stdlib.lib" % sys.path[0] # /DELAYLOAD:fract4d_stdlib.pyd DelayImp.lib self.tree_cache = {} self.leave_dirty = False self.next_inline_number = 0 def _get_files(self): return self.cache.files files = property(_get_files) def update_from_prefs(self,prefs): self.compiler_name = prefs.get("compiler","name") self.flags = prefs.get("compiler","options") self.set_func_path_list(prefs.get_list("formula_path")) self.path_lists[FormulaTypes.GRADIENT] = copy.copy( prefs.get_list("map_path")) def set_flags(self,flags): self.flags = flags def add_path(self,path,type): self.path_lists[type].append(path) def add_func_path(self,path): self.path_lists[FormulaTypes.FRACTAL].append(path) self.path_lists[FormulaTypes.COLORFUNC].append(path) self.path_lists[FormulaTypes.TRANSFORM].append(path) def set_func_path_list(self,list): self.path_lists[FormulaTypes.FRACTAL] = copy.copy(list) self.path_lists[FormulaTypes.COLORFUNC] = copy.copy(list) self.path_lists[FormulaTypes.TRANSFORM] = copy.copy(list) def init_cache(self): self.cache.init() def find_files(self,type): files = {} for dir in self.path_lists[type]: if not os.path.isdir(dir): continue for file in os.listdir(dir): if os.path.isfile(os.path.join(dir,file)): files[file] = 1 return files.keys() def find_files_of_type(self,type): matcher = FormulaTypes.matches[type] return [file for file in self.find_files(type) if matcher.search(file)] def find_formula_files(self): return self.find_files_of_type(FormulaTypes.FRACTAL) def find_colorfunc_files(self): return self.find_files_of_type(FormulaTypes.COLORFUNC) def find_transform_files(self): return self.find_files_of_type(FormulaTypes.TRANSFORM) def get_text(self,fname): file = self.files.get(fname) if not file: self.load_formula_file(fname) return self.files[fname].contents def nextInlineFile(self,type): self.next_inline_number += 1 ext = FormulaTypes.extension_from_type(type) return "__inline__%d.%s" % (self.next_inline_number, ext) def add_inline_formula(self,formbody, formtype): # formbody contains a string containing the contents of a formula formulas = self.parse_file(formbody) fname = self.nextInlineFile(formtype) ff = FormulaFile(formulas,formbody,0,fname) ff.file_backed = False self.files[fname] = ff names = ff.get_formula_names() if len(names) == 0: formName = "error" else: formName = names[0] return (fname, formName) def last_chance(self,filename): '''does nothing here, but can be overridden by GUI to prompt user.''' raise IOError("Can't find formula file %s in formula search path" % \ filename) def compile_one(self,formula): self.compile(formula) t = translate.T(absyn.Formula("",[],-1)) cg = self.compile(t) t.merge(formula,"") outputfile = os.path.abspath(self.generate_code(t, cg)) return outputfile def compile_all(self,formula,cf0,cf1,transforms,options={}): self.compile(formula,options) self.compile(cf0,options) self.compile(cf1,options) for transform in transforms: self.compile(transform,options) # create temp empty formula and merge everything into that t = translate.T(absyn.Formula("",[],-1)) cg = self.compile(t,options) t.merge(formula,"") t.merge(cf0,"cf0_") t.merge(cf1,"cf1_") for transform in transforms: t.merge(transform,"t_") outputfile = os.path.abspath(self.generate_code(t, cg)) return outputfile def find_file(self,filename,type): if os.path.exists(filename): dir = os.path.dirname(filename) if self.path_lists[type].count(dir) == 0: # add directory to search path self.path_lists[type].append(dir) return filename filename = os.path.basename(filename) for path in self.path_lists[type]: f = os.path.join(path,filename) if os.path.exists(f): return f return self.last_chance(filename) def add_endlines(self,result,final_line): "Add info on which is the final source line of each formula" if None == result: return l = len(result.children) for i in xrange(l): if i == l - 1: result.children[i].last_line = final_line else: result.children[i].last_line = result.children[i+1].pos-1 def parse_file(self,s): self.lexer.lineno = 1 result = None try: pp = preprocessor.T(s) result = self.parser.parse(pp.out()) except preprocessor.Error, err: # create an Error formula listing the problem result = self.parser.parse('error {\n}\n') result.children[0].children[0] = \ absyn.PreprocessorError(str(err), -1) #print result.pretty() self.add_endlines(result,self.lexer.lineno) formulas = {} for formula in result.children: formulas[formula.leaf] = formula return formulas def load_formula_file(self, filename): try: type = FormulaTypes.guess_formula_type_from_filename(filename) filename = self.find_file(filename,type) s = open(filename,"r").read() # read in a whole file basefile = os.path.basename(filename) mtime = os.stat(filename)[stat.ST_MTIME] if type == FormulaTypes.GRADIENT: # don't try and parse gradient files apart from UGRs subtype = FormulaTypes.guess_gradient_subtype_from_filename(filename) if subtype == FormulaTypes.GRAD_UGR: formulas = self.parse_file(s) else: formulas = {} else: formulas = self.parse_file(s) ff = FormulaFile(formulas,s,mtime,filename) self.files[basefile] = ff return ff except Exception, err: #print "Error parsing '%s' : %s" % (filename, err) raise def out_of_date(self,filename): basefile = os.path.basename(filename) ff = self.files.get(basefile) if not ff: self.load_formula_file(filename) ff = self.files.get(basefile) return ff.out_of_date() def get_file(self,filename): basefile = os.path.basename(filename) ff = self.files.get(basefile) if not ff: self.load_formula_file(filename) ff = self.files.get(basefile) elif ff.out_of_date(): self.load_formula_file(filename) ff = self.files.get(basefile) return ff def get_formula_text(self,filename,formname): ff = self.get_file(filename) form = ff.get_formula(formname) start_line = form.pos-1 last_line = form.last_line lines = ff.contents.splitlines() return "\n".join(lines[start_line:last_line]) def is_inline(self,filename, formname): return not self.files[filename].file_backed def compile(self,ir,options={}): cg = codegen.T(ir.symbols,options) cg.output_all(ir) return cg def hashcode(self,c_code): hash = hashlib.md5() hash.update(c_code) hash.update(self.compiler_name) hash.update(self.flags) hash.update(self.libs) return hash.hexdigest() def generate_code(self,ir, cg, outputfile=None,cfile=None): cg.output_decls(ir) self.c_code = cg.output_c(ir) hash = self.hashcode(self.c_code) if outputfile == None: outputfile = self.cache.makefilename(hash,".so") if os.path.exists(outputfile): # skip compilation - we already have this code return outputfile if cfile == None: cfile = self.cache.makefilename(hash,".c") if 'win' in sys.platform: objfile = self.cache.makefilename(hash, ".obj") open(cfile,"w").write(self.c_code) # -march=i686 for 10% speed gain cmd = "%s \"%s\" %s %s\"%s\"" % \ (self.compiler_name, cfile, self.flags, self.output_flag, outputfile) if 'win' == sys.platform[:3]: cmd += " /Fo\"%s\"" % objfile cmd += " %s" % self.libs #print "cmd: %s" % cmd (status,output) = commands.getstatusoutput(cmd) if status != 0: raise fracttypes.TranslationError( "Error reported by C compiler:%s" % output) return outputfile def get_parsetree(self,filename,formname): ff = self.get_file(filename) if ff == None : return None return ff.get_formula(formname) def guess_type_from_filename(self,filename): return FormulaTypes.guess_type_from_filename(filename) def get_formula(self, filename, formname,prefix=""): type = self.guess_type_from_filename(filename) f = self.get_parsetree(filename,formname) if f != None: f = type(f,prefix) return f def get_gradient(self, filename, formname): g = gradient.Gradient() if formname == None: g.load(open(self.find_file(filename, 3))) # FIXME else: compiled_gradient = self.get_formula(filename,formname) g.load_ugr(compiled_gradient) return g def get_random_gradient(self): return self.get_random_formula(3) # FIXME def get_random_formula(self,type): files = self.find_files_of_type(type) file = random.choice(files) if gradient.FileType.guess(file) == gradient.FileType.UGR: ff = self.get_file(file) formulas = ff.formulas.keys() formula = random.choice(formulas) else: formula = None return (file,formula) def clear_cache(self): self.cache.clear() def __del__(self): if not self.leave_dirty: self.clear_cache() instance = Compiler() instance.update_from_prefs(fractconfig.instance) def usage(): print "FC : a compiler from Fractint .frm files to C code" print "fc.py -o [outfile] -f [formula] infile" sys.exit(1) def generate(fc,formulafile, formula, outputfile, cfile): # find the function we want ir = fc.get_formula(formulafile,formula) if ir == None: raise Exception("Can't find formula %s in %s" % \ (formula, formulafile)) if ir.errors != []: print "Errors during translation" for e in ir.errors: print e raise Exception("Errors during translation") cg = fc.compile(ir) fc.generate_code(ir, cg, outputfile,cfile) def main(args): fc = Compiler() fc.leave_dirty = True for arg in args: ff = fc.load_formula_file(arg) for name in ff.get_formula_names(): print name form = fc.get_formula(arg,name) cg = fc.compile(form) if __name__ == '__main__': main(sys.argv[1:])
bsd-3-clause
-7,829,546,766,683,979,000
31.827184
133
0.588726
false
3.784643
false
false
false
sileht/pifpaf
pifpaf/drivers/zookeeper.py
1
2019
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os from pifpaf import drivers class ZooKeeperDriver(drivers.Driver): DEFAULT_PORT = 2181 PATH = ["/usr/share/zookeeper/bin", "/usr/local/opt/zookeeper/libexec/bin"] def __init__(self, port=DEFAULT_PORT, **kwargs): """Create a new ZooKeeper server.""" super(ZooKeeperDriver, self).__init__(**kwargs) self.port = port @classmethod def get_options(cls): return [ {"param_decls": ["--port"], "type": int, "default": cls.DEFAULT_PORT, "help": "port to use for ZooKeeper"}, ] def _setUp(self): super(ZooKeeperDriver, self)._setUp() cfgfile = os.path.join(self.tempdir, "zoo.cfg") with open(cfgfile, "w") as f: f.write("""dataDir=%s clientPort=%s""" % (self.tempdir, self.port)) logdir = os.path.join(self.tempdir, "log") os.mkdir(logdir) self.putenv("ZOOCFGDIR", self.tempdir, True) self.putenv("ZOOCFG", cfgfile, True) self.putenv("ZOO_LOG_DIR", logdir, True) c, _ = self._exec( ["zkServer.sh", "start", cfgfile], wait_for_line="STARTED", path=self.PATH) self.addCleanup(self._exec, ["zkServer.sh", "stop", cfgfile], path=self.PATH) self.putenv("ZOOKEEPER_PORT", str(self.port)) self.putenv("URL", "zookeeper://localhost:%d" % self.port)
apache-2.0
3,448,701,531,717,726,000
30.061538
69
0.600792
false
3.605357
false
false
false
patrickhoefler/lwd
lwd.py
1
7546
#!/usr/bin/python # -*- coding: utf-8 -*- # Functions for turning the Wikidata dump into Linked Data import codecs import glob import gzip import json import math import os import sys import time import xml.etree.cElementTree as ET import settings def process_dump(): # Print some status info print 'Processing ' + settings.dump_filename # Make sure the output folders exist if not os.path.exists('output'): os.mkdir('output') if not os.path.exists('output/' + settings.output_folder): os.mkdir('output/' + settings.output_folder) if not os.path.exists('output/' + settings.output_folder + '/ttl'): os.mkdir('output/' + settings.output_folder + '/ttl') # Delete all old files for f in glob.glob('output/' + settings.output_folder + '/ttl/*.ttl'): os.remove(f) # Initiate variables entity_counter = 0 element_id = '' # Start the clock start_time = time.time() # Load the dump file and create the iterator context = ET.iterparse(settings.dump_filename, events=('start', 'end')) context = iter(context) event, root = context.next() # Iterate over the dump file for event, element in context: # Check if we have reached the max number of processed entities if settings.max_processed_entities > 0 and entity_counter == settings.max_processed_entities: break # Get the ID of the current entity if event == 'end' and element.tag == '{http://www.mediawiki.org/xml/export-0.8/}title': if element.text.find('Q') == 0: element_id = element.text elif element.text.find('Property:P') == 0: element_id = element.text.split(':')[1] # Get the data of the current entity if element_id and event == 'end' and element.tag == '{http://www.mediawiki.org/xml/export-0.8/}text': if element.text: triples = get_nt_for_entity(element_id, element.text) batch_id = str(int(math.floor(int(element_id[1:]) / settings.batchsize)) * settings.batchsize).zfill(8) batchfile_ttl_name = 'output/' + settings.output_folder + '/ttl/' + element_id[0] + '_Batch_' + batch_id + '.ttl' # If ttl file doesn't exist, create it and add the prefixes if not os.path.isfile(batchfile_ttl_name): prefixes = '# Extracted from ' + settings.dump_filename + ' with LWD (http://github.com/patrickhoefler/lwd)' prefixes += """ @prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> . @prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> . @prefix wd: <http://www.wikidata.org/entity/> . """.replace(' ', '') with codecs.open(batchfile_ttl_name, 'a', 'utf-8') as batchfile_ttl: batchfile_ttl.write(prefixes) # Write the triples to the batchfile with codecs.open(batchfile_ttl_name, 'a', 'utf-8') as batchfile_ttl: batchfile_ttl.write(triples) # One more entity entity_counter += 1 # Print some progress if entity_counter % 1000 == 0: sys.stdout.write('.') sys.stdout.flush() # Print some statistics if entity_counter % 10000 == 0: lap_time = time.time() print '\nProcessed ' + str(entity_counter) + ' entities in ' + str(lap_time - start_time) + ' seconds, on average ' + str(entity_counter / (lap_time - start_time)) + ' per second' # Reset the element ID in preparation for the next iteration element_id = '' # Save the memory, save the world root.clear() # Stop the clock and print some final statistics end_time = time.time() print('\nProcessed ' + str(entity_counter) + ' entities in ' + str(end_time - start_time) + ' seconds, on average ' + str(entity_counter / (end_time - start_time)) + ' per second') number_of_files = len(os.listdir('output/' + settings.output_folder + '/ttl')) if number_of_files != 1: plural = 's' else: plural = '' print('Created ' + str(number_of_files) + ' .ttl file' + plural + ' in ./' + 'output/' + settings.output_folder + '/ttl') def get_nt_for_entity(element_id, element_data): # Turn the data JSON string into an object data = json.loads(element_data) entity_uri = 'wd:' + element_id triples = '' # Get the label in English try: triples = triples + entity_uri + ' rdfs:label ' + '"' + data['label']['en'].replace('\\', '\\\\').replace('"', '\\"') + '"@en .\n' except: # print 'No label for ' + element_id pass # Get the description in English try: triples = triples + entity_uri + ' rdfs:comment ' + '"' + data['description']['en'].replace('\\', '\\\\').replace('"', '\\"') + '"@en .\n' except: # print 'No description for ' + element_id pass # Are there any claims in the current element? if data.get('claims'): # Iterate over all claims for claim in data['claims']: predicate_id = 'P' + str(claim['m'][1]) predicate_uri = 'wd:' + predicate_id if len(claim['m']) > 2: # Is it an object property? if claim['m'][2] == 'wikibase-entityid': object_id = 'Q' + str(claim['m'][3]['numeric-id']) object_uri = 'wd:' + object_id triples = triples + entity_uri + ' ' + predicate_uri + ' ' + object_uri + ' .\n' # Add RDF type if predicate_id == 'P31': triples = triples + entity_uri + ' rdf:type ' + object_uri + ' .\n' # Is it a string value property? if claim['m'][2] == 'string': triples = triples + entity_uri + ' ' + predicate_uri + ' "' + claim['m'][3].replace('\\', '\\\\').replace('"', '\\"') + '" .\n' return triples def compress_ttl_files(): # Print some status info print 'Compressing' # Make sure the output folders exist if not os.path.exists('output'): os.mkdir('output') if not os.path.exists('output/' + settings.output_folder): os.mkdir('output/' + settings.output_folder) if not os.path.exists('output/' + settings.output_folder + '/gz'): os.mkdir('output/' + settings.output_folder + '/gz') # Delete all old files for f in glob.glob('output/' + settings.output_folder + '/gz/*.gz'): os.remove(f) # Compress all files for input_file_name in glob.glob('output/' + settings.output_folder + '/ttl/*.ttl'): with open(input_file_name, 'rb') as input_file: with gzip.open('output/' + settings.output_folder + '/gz/' + input_file_name.split('/')[-1] + '.gz', 'wb') as output_file: output_file.writelines(input_file) # Print some progress sys.stdout.write('.') sys.stdout.flush() # Print some final statistics number_of_files = len(os.listdir('output/' + settings.output_folder + '/gz')) if number_of_files != 1: plural = 's' else: plural = '' print('\nCreated ' + str(number_of_files) + ' .gz file' + plural + ' in ./' + 'output/' + settings.output_folder + '/gz')
mit
-7,322,916,570,662,439,000
37.111111
199
0.551816
false
3.811111
false
false
false
calebmadrigal/algorithms-in-python
heap.py
1
3668
"""heap.py - implementation of a heap priority queue. """ __author__ = "Caleb Madrigal" __date__ = "2015-02-17" import math from enum import Enum from autoresizelist import AutoResizeList class HeapType(Enum): maxheap = 1 minheap = 2 class Heap: def __init__(self, initial_data=None, heap_type=HeapType.maxheap): self.heap_type = heap_type if heap_type == HeapType.maxheap: self.comparator = lambda x, y: x > y else: self.comparator = lambda x, y: x < y self.data = AutoResizeList() if initial_data is not None: self.build_heap(initial_data) self._size = len(self.data) def _left_child(self, index): return 2*index + 1 def _right_child(self, index): return 2*index + 2 def _parent(self, index): return math.floor((index - 1) / 2.0) def _is_root(self, index): return index == 0 def _swap(self, i1, i2): self.data[i1], self.data[i2] = self.data[i2], self.data[i1] def build_heap(self, initial_data): for i in initial_data: self.data.prepend(i) self.heap_down(0) def heap_up(self, index): # If we are at the root, return - we are done if self._is_root(index): return # Else, compare the current node with the parent node, and if this node should be higher # then the parent node, then swap and recursively call on the parent index parent_index = self._parent(index) if self.comparator(self.data[index], self.data[parent_index]): self._swap(index, parent_index) self.heap_up(parent_index) def heap_down(self, index): left_index = self._left_child(index) right_index = self._right_child(index) try: left = self.data[left_index] except IndexError: left = None try: right = self.data[right_index] except IndexError: right = None # Find the largest child largest_child = left largest_child_index = left_index if left is not None and right is not None: if self.comparator(right, left): largest_child = right largest_child_index = right_index elif right is not None: largest_child = right largest_child_index = right_index # If the largest child is not None and is higher priority than the current, then swap # and recursively call on on the child index if largest_child is not None and self.comparator(largest_child, self.data[index]): self._swap(index, largest_child_index) self.heap_down(largest_child_index) def push(self, item): insert_index = self._size # Insert at the end self._size += 1 self.data[insert_index] = item self.heap_up(insert_index) return self def peek(self): return self.data[0] def pop(self): if len(self.data) < 1 or self.data[0] is None: return None # Take item from the root item = self.data[0] # Move the bottom-most, right-most item to the root self.data[0] = self.data[self._size-1] self.data[self._size-1] = None self._size -= 1 self.heap_down(0) return item def size(self): return self._size def __repr__(self): return str(self.data) if __name__ == "__main__": import unittest testsuite = unittest.TestLoader().discover('test', pattern="*heap*") unittest.TextTestRunner(verbosity=1).run(testsuite)
mit
-855,685,561,109,325,200
27.65625
96
0.581516
false
3.785346
true
false
false
unkyulee/elastic-cms
src/web/modules/post/services/config.py
1
2826
import web.util.tools as tools import os from web import app import lib.es as es def get(p): # get host and index from the global config h = tools.get_conf(p['host'], p['navigation']['id'], 'host', 'http://localhost:9200') n = tools.get_conf(p['host'], p['navigation']['id'], 'index', '') return { 'name': get_conf(h, n, 'name', ''), 'description': get_conf(h, n, 'description', ''), 'host': h, 'index': n, 'upload_dir': get_conf(h, n, 'upload_dir', os.path.join( app.config.get('BASE_DIR'), 'uploads' ) ), 'allowed_exts': get_conf(h, n, 'allowed_exts',''), 'page_size': get_conf(h, n, 'page_size', '10'), 'query': get_conf(h, n, 'query', '*'), 'sort_field': get_conf(h, n, 'sort_field', '_score'), 'sort_dir': get_conf(h, n, 'sort_dir', 'desc'), 'top': get_conf(h, n, 'top', ''), 'footer': get_conf(h, n, 'footer', ''), 'side': get_conf(h, n, 'side', ''), 'content_header': get_conf(h, n, 'content_header', ''), 'content_footer': get_conf(h, n, 'content_footer', ''), 'intro': get_conf(h, n, 'intro', ''), 'search_query': get_conf(h, n, 'search_query', ''), 'search_item_template': get_conf(h, n, 'search_item_template', ''), 'keep_history': get_conf(h, n, 'keep_history', 'Yes'), } def set(p): # get host, index host = p['c']['host'] if not host: host = tools.get('host') index = p['c']['index'] if not index: index = tools.get('index') # get host and index from the global config tools.set_conf(p['host'], p['navigation']['id'], 'host', host) tools.set_conf(p['host'], p['navigation']['id'], 'index', index) # set local config if p['c']['index']: # save local config only when index is already created set_conf(host, index, 'name', tools.get('name')) set_conf(host, index, 'description', tools.get('description')) set_conf(host, index, 'upload_dir', tools.get('upload_dir')) set_conf(host, index, 'allowed_exts', tools.get('allowed_exts')) set_conf(host, index, 'page_size', tools.get('page_size')) set_conf(host, index, 'query', tools.get('query')) set_conf(host, index, 'sort_field', tools.get('sort_field')) set_conf(host, index, 'sort_dir', tools.get('sort_dir')) set_conf(host, index, 'keep_history', tools.get('keep_history')) def get_conf(host, index, name, default): ret = es.get(host, index, "config", name) return ret.get('value') if ret and ret.get('value') else default def set_conf(host, index, name, value): config = { 'name': name, 'value': value } es.update(host, index, "config", name, config) es.flush(host, index)
mit
-7,525,611,961,569,722,000
37.189189
89
0.54954
false
3.18602
true
false
false
GutenkunstLab/SloppyCell
test/test_FixedPoints.py
1
3610
import unittest import scipy from SloppyCell.ReactionNetworks import * lorenz = Network('lorenz') lorenz.add_compartment('basic') lorenz.add_species('x', 'basic', 0.5) lorenz.add_species('y', 'basic', 0.5) lorenz.add_species('z', 'basic', 0.5) lorenz.add_parameter('sigma', 1.0) lorenz.add_parameter('r', 2.0) lorenz.add_parameter('b', 2.0) lorenz.add_rate_rule('x', 'sigma*(y-x)') lorenz.add_rate_rule('y', 'r*x - y - x*z') lorenz.add_rate_rule('z', 'x*y - b*z') class test_fixedpoints(unittest.TestCase): def test_basic(self): """ Test basic fixed-point finding """ net = lorenz.copy('test') fp = Dynamics.dyn_var_fixed_point(net, dv0=[1,1,1], with_logs=False) # This should find the fixed-point [sqrt(2), sqrt(2), 1] self.assertAlmostEqual(fp[0], scipy.sqrt(2), 6, 'Failed on basic 1,0.') self.assertAlmostEqual(fp[1], scipy.sqrt(2), 6, 'Failed on basic 1,1.') self.assertAlmostEqual(fp[2], 1, 6, 'Failed on basic 1,2.') fp = Dynamics.dyn_var_fixed_point(net, dv0=[-0.1,-0.1,-0.1], with_logs=False) # This should find the fixed-point [0, 0, 0] self.assertAlmostEqual(fp[0], 0, 6, 'Failed on basic 2,0.') self.assertAlmostEqual(fp[1], 0, 6, 'Failed on basic 2,1.') self.assertAlmostEqual(fp[2], 0, 6, 'Failed on basic 2,2.') def test_withlogs(self): """ Test fixed-point finding with logs """ net = lorenz.copy('test') fp = Dynamics.dyn_var_fixed_point(net, dv0=[1,1,1], with_logs=True) # This should find the fixed-point [sqrt(2), sqrt(2), 1] self.assertAlmostEqual(fp[0], scipy.sqrt(2), 6, 'Failed on logs 1,0.') self.assertAlmostEqual(fp[1], scipy.sqrt(2), 6, 'Failed on logs 1,1.') self.assertAlmostEqual(fp[2], 1, 6, 'Failed on logs 1,2.') fp = Dynamics.dyn_var_fixed_point(net, dv0=[0.1,0.1,0.1], with_logs=True) # This should find the fixed-point [0, 0, 0] self.assertAlmostEqual(fp[0], 0, 6, 'Failed on logs 2,0.') self.assertAlmostEqual(fp[1], 0, 6, 'Failed on logs 2,1.') self.assertAlmostEqual(fp[2], 0, 6, 'Failed on logs 2,2.') def test_stability(self): net = lorenz.copy('test') # The sqrt(b*(r-1)), sqrt(b*(r-1)), r-1 fixed point is stable for r < rH # Strogatz, Nonlinear Dynamics and Chaos (p. 316) fp, stable = Dynamics.dyn_var_fixed_point(net, dv0=[1,1,1], stability=True) self.assertEqual(stable, -1, 'Failed to classify stable fixed point') # (0,0,0) is a saddle here fp, stable = Dynamics.dyn_var_fixed_point(net, dv0=[0.01,0.01,0.01], stability=True) self.assertEqual(stable, 0, 'Failed to classify saddle') # (0,0,0) is a stable node here net.set_var_ic('r', 0.5) fp, stable = Dynamics.dyn_var_fixed_point(net, dv0=[0.1,0.1,0.1], stability=True) self.assertEqual(stable, -1, 'Failed to classify stable fixed point') # Now make the far fixed point a saddle... net.set_var_ic('sigma', 6.0) net.set_var_ic('r', 25) fp, stable = Dynamics.dyn_var_fixed_point(net, dv0=[10,10,10], stability=True) self.assertEqual(stable, 0, 'Failed to classify saddle') suite = unittest.makeSuite(test_fixedpoints) if __name__ == '__main__': unittest.main()
bsd-3-clause
-4,611,129,762,351,684,600
43.567901
80
0.565097
false
3.088109
true
false
false
tcarmelveilleux/IcarusAltimeter
Analysis/altitude_analysis.py
1
1202
# -*- coding: utf-8 -*- """ Created on Tue Jul 14 19:34:31 2015 @author: Tennessee """ import numpy as np import matplotlib.pyplot as plt def altitude(atm_hpa, sea_level_hpa): return 44330 * (1.0 - np.power(atm_hpa / sea_level_hpa, 0.1903)) def plot_alt(): default_msl = 101300.0 pressure = np.linspace(97772.58 / 100.0, 79495.0 / 100.0, 2000) alt_nominal = altitude(pressure, default_msl) - altitude(97772.58 / 100.0, default_msl) alt_too_high = altitude(pressure, default_msl + (1000 / 100.0)) - altitude(97772.58 / 100.0, default_msl + (1000 / 100.0)) alt_too_low = altitude(pressure, default_msl - (1000 / 100.0)) - altitude(97772.58 / 100.0, default_msl - (1000 / 100.0)) f1 = plt.figure() ax = f1.gca() ax.plot(pressure, alt_nominal, "b-", label="nom") ax.plot(pressure, alt_too_high, "r-", label="high") ax.plot(pressure, alt_too_low, "g-", label="low") ax.legend() f1.show() f2 = plt.figure() ax = f2.gca() ax.plot(pressure, alt_too_high - alt_nominal, "r-", label="high") ax.plot(pressure, alt_too_low - alt_nominal, "g-", label="low") ax.legend() f2.show()
mit
4,210,792,572,743,000,600
26.953488
126
0.58985
false
2.731818
false
false
false
aESeguridad/GERE
venv/lib/python2.7/site-packages/flask_weasyprint/__init__.py
1
7726
# coding: utf8 """ flask_weasyprint ~~~~~~~~~~~~~~~~ Flask-WeasyPrint: Make PDF in your Flask app with WeasyPrint. :copyright: (c) 2012 by Simon Sapin. :license: BSD, see LICENSE for more details. """ import weasyprint from flask import request, current_app from werkzeug.test import Client, ClientRedirectError from werkzeug.wrappers import Response try: import urlparse except ImportError: # Python 3 from urllib import parse as urlparse try: unicode except NameError: # Python 3 unicode = str VERSION = '0.5' __all__ = ['VERSION', 'make_flask_url_dispatcher', 'make_url_fetcher', 'HTML', 'CSS', 'render_pdf'] DEFAULT_PORTS = frozenset([('http', 80), ('https', 443)]) def make_flask_url_dispatcher(): """Return an URL dispatcher based on the current :ref:`request context <flask:request-context>`. You generally don’t need to call this directly. The context is used when the dispatcher is first created but not afterwards. It is not required after this function has returned. Dispatch to the context’s app URLs below the context’s root URL. If the app has a ``SERVER_NAME`` :ref:`config <flask:config>`, also accept URLs that have that domain name or a subdomain thereof. """ def parse_netloc(netloc): """Return (hostname, port).""" parsed = urlparse.urlsplit('http://' + netloc) return parsed.hostname, parsed.port app = current_app._get_current_object() root_path = request.script_root server_name = app.config.get('SERVER_NAME') if server_name: hostname, port = parse_netloc(server_name) def accept(url): """Accept any URL scheme; also accept subdomains.""" return url.hostname is not None and ( url.hostname == hostname or url.hostname.endswith('.' + hostname)) else: scheme = request.scheme hostname, port = parse_netloc(request.host) if (scheme, port) in DEFAULT_PORTS: port = None def accept(url): """Do not accept subdomains.""" return (url.scheme, url.hostname) == (scheme, hostname) def dispatch(url_string): if isinstance(url_string, bytes): url_string = url_string.decode('utf8') url = urlparse.urlsplit(url_string) url_port = url.port if (url.scheme, url_port) in DEFAULT_PORTS: url_port = None if accept(url) and url_port == port and url.path.startswith(root_path): netloc = url.netloc if url.port and not url_port: netloc = netloc.rsplit(':', 1)[0] # remove default port base_url = '%s://%s%s' % (url.scheme, netloc, root_path) path = url.path[len(root_path):] if url.query: path += '?' + url.query # Ignore url.fragment return app, base_url, path return dispatch def make_url_fetcher(dispatcher=None, next_fetcher=weasyprint.default_url_fetcher): """Return an function suitable as a ``url_fetcher`` in WeasyPrint. You generally don’t need to call this directly. If ``dispatcher`` is not provided, :func:`make_flask_url_dispatcher` is called to get one. This requires a request context. Otherwise, it must be a callable that take an URL and return either ``None`` or a ``(wsgi_callable, base_url, path)`` tuple. For None ``next_fetcher`` is used. (By default, fetch normally over the network.) For a tuple the request is made at the WSGI level. ``wsgi_callable`` must be a Flask application or another WSGI callable. ``base_url`` is the root URL for the application while ``path`` is the path within the application. Typically ``base_url + path`` is equal or equivalent to the passed URL. """ if dispatcher is None: dispatcher = make_flask_url_dispatcher() def flask_url_fetcher(url): redirect_chain = set() while 1: result = dispatcher(url) if result is None: return next_fetcher(url) app, base_url, path = result client = Client(app, response_wrapper=Response) if isinstance(path, unicode): # TODO: double-check this. Apparently Werzeug %-unquotes bytes # but not Unicode URLs. (IRI vs. URI or something.) path = path.encode('utf8') response = client.get(path, base_url=base_url) if response.status_code == 200: return dict( string=response.data, mime_type=response.mimetype, encoding=response.charset, redirected_url=url) # The test client can follow redirects, but do it ourselves # to get access to the redirected URL. elif response.status_code in (301, 302, 303, 305, 307): redirect_chain.add(url) url = response.location if url in redirect_chain: raise ClientRedirectError('loop detected') else: raise ValueError('Flask-WeasyPrint got HTTP status %s for %s%s' % (response.status, base_url, path)) return flask_url_fetcher def _wrapper(class_, *args, **kwargs): if args: guess = args[0] args = args[1:] else: guess = kwargs.pop('guess', None) if guess is not None and not hasattr(guess, 'read'): # Assume a (possibly relative) URL guess = urlparse.urljoin(request.url, guess) if 'string' in kwargs and 'base_url' not in kwargs: # Strings do not have an "intrinsic" base URL, use the request context. kwargs['base_url'] = request.url kwargs['url_fetcher'] = make_url_fetcher() return class_(guess, *args, **kwargs) def HTML(*args, **kwargs): """Like `weasyprint.HTML() <http://weasyprint.org/using/#the-weasyprint-html-class>`_ but: * :func:`make_url_fetcher` is used to create an ``url_fetcher`` * If ``guess`` is not a file object, it is an URL relative to the current request context. This means that you can just pass a result from :func:`flask.url_for`. * If ``string`` is passed, ``base_url`` defaults to the current request’s URL. This requires a Flask request context. """ return _wrapper(weasyprint.HTML, *args, **kwargs) def CSS(*args, **kwargs): return _wrapper(weasyprint.CSS, *args, **kwargs) CSS.__doc__ = HTML.__doc__.replace('HTML', 'CSS').replace('html', 'css') def render_pdf(html, stylesheets=None, download_filename=None): """Render a PDF to a response with the correct ``Content-Type`` header. :param html: Either a :class:`weasyprint.HTML` object or an URL to be passed to :func:`flask_weasyprint.HTML`. The latter case requires a request context. :param stylesheets: A list of user stylesheets, passed to :meth:`~weasyprint.HTML.write_pdf` :param download_filename: If provided, the ``Content-Disposition`` header is set so that most web browser will show the "Save as…" dialog with the value as the default filename. :returns: a :class:`flask.Response` object. """ if not hasattr(html, 'write_pdf'): html = HTML(html) pdf = html.write_pdf(stylesheets=stylesheets) response = current_app.response_class(pdf, mimetype='application/pdf') if download_filename: response.headers.add('Content-Disposition', 'attachment', filename=download_filename) return response
gpl-3.0
6,181,784,262,939,823,000
35.046729
79
0.612004
false
4.066421
false
false
false
ThomasHabets/python-pyhsm
examples/yhsm-monitor-exit.py
1
1480
#!/usr/bin/env python # # Copyright (c) 2011, Yubico AB # All rights reserved. # # Utility to send a MONITOR_EXIT command to a YubiHSM. # # MONITOR_EXIT only works if the YubiHSM is in debug mode. It would # be a security problem to allow remote reconfiguration of a production # YubiHSM. # # If your YubiHSM is not in debug mode, enter configuration mode by # pressing the small button while inserting the YubiHSM in the USB port. # import sys sys.path.append('Lib'); import pyhsm device = "/dev/ttyACM0" # simplified arguments parsing d_argv = dict.fromkeys(sys.argv) debug = d_argv.has_key('-v') raw = d_argv.has_key('-v') if d_argv.has_key('-h'): sys.stderr.write("Syntax: %s [-v] [-R]\n" % (sys.argv[0])) sys.stderr.write("\nOptions :\n") sys.stderr.write(" -v verbose\n") sys.stderr.write(" -R raw MONITOR_EXIT command\n") sys.exit(0) res = 0 try: s = pyhsm.base.YHSM(device=device, debug = debug) if raw: # No initialization s.write('\x7f\xef\xbe\xad\xba\x10\x41\x52\x45') else: print "Version: %s" % s.info() s.monitor_exit() print "Exited monitor-mode (maybe)" if raw: print "s.stick == %s" % s.stick print "s.stick.ser == %s" % s.stick.ser for _ in xrange(3): s.stick.ser.write("\n") line = s.stick.ser.readline() print "%s" % (line) except pyhsm.exception.YHSM_Error, e: print "ERROR: %s" % e res = 1 sys.exit(res)
bsd-2-clause
5,347,888,154,988,822,000
24.084746
72
0.618243
false
2.808349
false
false
false
darvin/qtdjango
src/qtdjango/settings.py
1
5503
# -*- coding: utf-8 -*- from qtdjango.helpers import test_connection __author__ = 'darvin' from qtdjango.connection import * __author__ = 'darvin' from PyQt4.QtCore import * from PyQt4.QtGui import * class BooleanEdit(QCheckBox): def text(self): return QVariant(self.checkState()).toString() def setText(self, text): self.setChecked(QVariant(text).toBool()) class SettingsDialog(QDialog): widgets_table = [ # (name, caption, widget object, default value), ("address", u"Адрес сервера", QLineEdit, "http://127.0.0.1:8000"), ("api_path", u"Путь к api сервера", QLineEdit, "/api/"), ("server_package", u"Название пакета сервера", QLineEdit, "none"), ("login", u"Ваш логин", QLineEdit, ""), ("password", u"Ваш пароль", QLineEdit, ""), ("open_links_in_external_browser", \ u"Открывать ссылки из окна информации во внешнем браузере", BooleanEdit, True), ] def __init__(self, parent=None, error_message=None, models_manager=None): super(SettingsDialog, self).__init__(parent) self.setWindowTitle(u"Настройки") self.setModal(True) self.formlayout = QFormLayout() self.models_manager = models_manager self.settings = QSettings() self.message_widget = QLabel() self.__widgets = [] for name, caption, widget_class, default in self.widgets_table: self.__widgets.append((name, caption, widget_class(), default)) for name, caption, widget, default in self.__widgets: self.formlayout.addRow(caption, widget) widget.setText(self.settings.value(name, default).toString()) self.formlayout.addRow(self.message_widget) if error_message is not None: self.message(**error_message) buttonBox = QDialogButtonBox(QDialogButtonBox.Save\ | QDialogButtonBox.Cancel |QDialogButtonBox.RestoreDefaults) testButton = QPushButton(u"Тестировать соединение") buttonBox.addButton(testButton, QDialogButtonBox.ActionRole) testButton.clicked.connect(self.test) buttonBox.accepted.connect(self.accept) buttonBox.rejected.connect(self.reject) buttonBox.button(QDialogButtonBox.RestoreDefaults).clicked.connect(self.restore) self.formlayout.addRow(buttonBox) self.setLayout(self.formlayout) def accept(self): if self.test(): for name, caption, widget, default in self.__widgets: self.settings.setValue(name, widget.text()) self.models_manager.set_connection_params(\ self.get_value("address"), \ self.get_value("api_path"), \ self.get_value("login"),\ self.get_value("password")) QDialog.accept(self) def restore(self): for name, caption, widget, default in self.__widgets: widget.setText(default) def message(self, text, error=False, works=False, fields=[]): self.message_widget.setText(text) if error: color = "red" elif works: color = "green" else: color = "black" css = "QLabel { color : %s; }" % color self.message_widget.setStyleSheet(css) for name, caption, widget, default in self.__widgets: self.formlayout.labelForField(widget).setStyleSheet("") if name in fields: self.formlayout.labelForField(widget).setStyleSheet(css) def get_value(self, name): return unicode(self.settings.value(name).toString()) def test(self): s = {} for name, caption, widget, default in self.__widgets: s[name] = unicode(widget.text()) try: remote_version = test_connection(s["address"],s["api_path"],s["login"],s["password"]) import qtdjango if qtdjango.__version__==remote_version: self.message(text=u"Удаленный сервер настроен правильно!", works=True) return True elif remote_version is not None: self.message(u"Версия системы на удаленном сервере отличается от\ версии системы на клиенте") return True except SocketError: self.message(text=u"Ошибка при подключении к удаленному серверу", error=True, fields=\ ("address",)) except ServerNotFoundError: self.message(text=u"Удаленный сервер недоступен", error=True, fields=\ ("address",)) except NotQtDjangoResponceError: self.message(text=u"Не правильно настроен путь на удаленном сервере или \ удаленный сервер не является сервером системы", error=True, fields=\ ("address","api_path")) except AuthError: self.message(text=u"Неверное имя пользователя или пароль", error=True, fields=\ ("login","password")) return False
gpl-2.0
3,427,938,179,775,995,000
34.326389
98
0.599568
false
3.418683
true
false
false
barbarahui/nuxeo-calisphere
s3stash/nxstash_mediajson.py
1
4444
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import unicode_literals import sys import os from s3stash.nxstashref import NuxeoStashRef from deepharvest.deepharvest_nuxeo import DeepHarvestNuxeo from deepharvest.mediajson import MediaJson from dplaingestion.mappers.ucldc_nuxeo_mapper import UCLDCNuxeoMapper import json import s3stash.s3tools FILENAME_FORMAT = "{}-media.json" class NuxeoStashMediaJson(NuxeoStashRef): ''' create and stash media.json file for a nuxeo object ''' def __init__(self, path, bucket, region, pynuxrc='~/.pynuxrc', replace=True, **kwargs): super(NuxeoStashMediaJson, self).__init__(path, bucket, region, pynuxrc, replace, **kwargs) self.dh = DeepHarvestNuxeo( self.path, self.bucket, pynuxrc=self.pynuxrc) self.mj = MediaJson() self.filename = FILENAME_FORMAT.format(self.uid) self.filepath = os.path.join(self.tmp_dir, self.filename) self._update_report('filename', self.filename) self._update_report('filepath', self.filepath) def nxstashref(self): return self.nxstash_mediajson() def nxstash_mediajson(self): ''' create media.json file for object and stash on s3 ''' self._update_report('stashed', False) # extract and transform metadata for parent obj and any components parent_md = self._get_parent_metadata(self.metadata) component_md = [ self._get_component_metadata(c) for c in self.dh.fetch_components(self.metadata) ] # create media.json file media_json = self.mj.create_media_json(parent_md, component_md) self._write_file(media_json, self.filepath) # stash media.json file on s3 stashed, s3_report = s3stash.s3tools.s3stash( self.filepath, self.bucket, self.filename, self.region, 'application/json', self.replace) self._update_report('s3_stash', s3_report) self._update_report('stashed', stashed) self._remove_tmp() return self.report def _get_parent_metadata(self, obj): ''' assemble top-level (parent) object metadata ''' metadata = {} metadata['label'] = obj['title'] # only provide id, href, format if Nuxeo Document has file attached full_metadata = self.nx.get_metadata(uid=obj['uid']) if self.dh.has_file(full_metadata): metadata['id'] = obj['uid'] metadata['href'] = self.dh.get_object_download_url(full_metadata) metadata['format'] = self.dh.get_calisphere_object_type(obj[ 'type']) if metadata['format'] == 'video': metadata['dimensions'] = self.dh.get_video_dimensions( full_metadata) return metadata def _get_component_metadata(self, obj): ''' assemble component object metadata ''' metadata = {} full_metadata = self.nx.get_metadata(uid=obj['uid']) metadata['label'] = obj['title'] metadata['id'] = obj['uid'] metadata['href'] = self.dh.get_object_download_url(full_metadata) # extract additional ucldc metadata from 'properties' element ucldc_md = self._get_ucldc_schema_properties(full_metadata) for key, value in ucldc_md.iteritems(): metadata[key] = value # map 'type' metadata['format'] = self.dh.get_calisphere_object_type(obj['type']) return metadata def _get_ucldc_schema_properties(self, metadata): ''' get additional metadata as mapped by harvester ''' properties = {} mapper = UCLDCNuxeoMapper(metadata) mapper.map_original_record() mapper.map_source_resource() properties = mapper.mapped_data['sourceResource'] properties.update(mapper.mapped_data['originalRecord']) return properties def _write_file(self, content_dict, filepath): """ convert dict to json and write to file """ content_json = json.dumps( content_dict, indent=4, separators=(',', ': '), sort_keys=False) with open(filepath, 'wb') as f: f.write(content_json) f.flush() def main(argv=None): pass if __name__ == "__main__": sys.exit(main())
bsd-3-clause
-6,770,037,951,671,929,000
32.413534
77
0.602835
false
3.788576
false
false
false
anjel-ershova/python_training
fixture/fixture_group.py
1
4896
from model.model_group import Group import random class GroupHelper: def __init__(self, app): self.app = app def select_group_by_index(self, index): wd = self.app.wd wd.find_elements_by_name("selected[]")[index].click() def select_group_by_id(self, id): wd = self.app.wd wd.find_element_by_css_selector("input[value='%s']" % id).click() def select_first_group(self): wd = self.app.wd self.select_group_by_index(0) def edit_if_not_none(self, field, text): wd = self.app.wd if text is not None: wd.find_element_by_name(field).click() wd.find_element_by_name(field).clear() wd.find_element_by_name(field).send_keys(text) else: pass def fill_group_form(self, group): wd = self.app.wd self.edit_if_not_none("group_name", group.name) self.edit_if_not_none("group_header", group.header) self.edit_if_not_none("group_footer", group.footer) def create(self, group): wd = self.app.wd # open_groups_page wd.find_element_by_link_text("groups").click() # init group creation wd.find_element_by_name("new").click() self.fill_group_form(group) # submit group creation wd.find_element_by_name("submit").click() self.return_to_groups_page() self.group_cache = None def edit_first_group(self): wd = self.app.wd self.edit_group_by_index(0) def edit_group_by_index(self, index, new_group_data): wd = self.app.wd self.app.navigation.open_groups_page() self.select_group_by_index(index) # click edit button wd.find_element_by_name("edit").click() self.fill_group_form(new_group_data) # submit edition wd.find_element_by_name("update").click() self.return_to_groups_page() self.group_cache = None def edit_group_by_id(self, id, new_group_data): wd = self.app.wd self.app.navigation.open_groups_page() self.select_group_by_id(id) # click edit button wd.find_element_by_name("edit").click() self.fill_group_form(new_group_data) # submit edition wd.find_element_by_name("update").click() self.return_to_groups_page() self.group_cache = None def add_selected_contact_to_selected_group_by_id(self, target_group): wd = self.app.wd # открыть выпадающий список to_group = wd.find_element_by_name("to_group") to_group.click() # выбор произвольной группы по value to_group.find_element_by_css_selector("[value='%s']" % target_group.id).click() wd.find_element_by_name("add").click() def select_some_group_to_view(self, target_group): wd = self.app.wd # открыть выпадающий список view_group = wd.find_element_by_name("group") view_group.click() # выбор произвольной группы по value view_group.find_element_by_css_selector("[value='%s']" % target_group.id).click() # def click_add_contact_to_group_button(self): # wd = self.app.wd # wd.find_element_by_name("add").click() # self.app.navigation.open_home_page() def delete_first_group(self): wd = self.app.wd self.delete_group_by_index(0) def delete_group_by_index(self, index): wd = self.app.wd self.app.navigation.open_groups_page() self.select_group_by_index(index) # submit deletion wd.find_element_by_name("delete").click() self.return_to_groups_page() self.group_cache = None def delete_group_by_id(self, id): wd = self.app.wd self.app.navigation.open_groups_page() self.select_group_by_id(id) wd.find_element_by_name("delete").click() self.return_to_groups_page() self.group_cache = None def return_to_groups_page(self): wd = self.app.wd wd.find_element_by_link_text("group page").click() def count(self): wd = self.app.wd self.app.navigation.open_groups_page() return len(wd.find_elements_by_name("selected[]")) group_cache = None def get_group_list(self): if self.group_cache is None: wd = self.app.wd self.app.navigation.open_groups_page() self.group_cache = [] wd.find_elements_by_css_selector("span.group") for element in wd.find_elements_by_css_selector("span.group"): text = element.text id = int(element.find_element_by_name("selected[]").get_attribute("value")) self.group_cache.append(Group(name=text, id=id)) return list(self.group_cache)
apache-2.0
4,500,193,398,506,461,700
33.285714
91
0.592708
false
3.219316
false
false
false
bb111189/Arky2
boilerplate/external/pycountry/__init__.py
1
3459
# vim:fileencoding=utf-8 # Copyright (c) 2008-2011 gocept gmbh & co. kg # See also LICENSE.txt # $Id$ """pycountry""" import os.path import pycountry.db LOCALES_DIR = os.path.join(os.path.dirname(__file__), 'locales') DATABASE_DIR = os.path.join(os.path.dirname(__file__), 'databases') class Countries(pycountry.db.Database): """Provides access to an ISO 3166 database (Countries).""" field_map = dict(alpha_2_code='alpha2', alpha_3_code='alpha3', numeric_code='numeric', name='name', official_name='official_name', common_name='common_name') data_class_name = 'Country' xml_tag = 'iso_3166_entry' class Scripts(pycountry.db.Database): """Providess access to an ISO 15924 database (Scripts).""" field_map = dict(alpha_4_code='alpha4', numeric_code='numeric', name='name') data_class_name = 'Script' xml_tag = 'iso_15924_entry' class Currencies(pycountry.db.Database): """Providess access to an ISO 4217 database (Currencies).""" field_map = dict(letter_code='letter', numeric_code='numeric', currency_name='name') data_class_name = 'Currency' xml_tag = 'iso_4217_entry' class Languages(pycountry.db.Database): """Providess access to an ISO 639-1/2 database (Languages).""" field_map = dict(iso_639_2B_code='bibliographic', iso_639_2T_code='terminology', iso_639_1_code='alpha2', common_name='common_name', name='name') data_class_name = 'Language' xml_tag = 'iso_639_entry' class Subdivision(pycountry.db.Data): parent_code = None def __init__(self, element, **kw): super(Subdivision, self).__init__(element, **kw) self.type = element.parentNode.attributes.get('type').value self.country_code = self.code.split('-')[0] if self.parent_code is not None: self.parent_code = '%s-%s' % (self.country_code, self.parent_code) @property def country(self): return countries.get(alpha2=self.country_code) @property def parent(self): return subdivisions.get(code=self.parent_code) class Subdivisions(pycountry.db.Database): # Note: subdivisions can be hierarchical to other subdivisions. The # parent_code attribute is related to other subdivisons, *not* # the country! xml_tag = 'iso_3166_2_entry' data_class_base = Subdivision data_class_name = 'Subdivision' field_map = dict(code='code', name='name', parent='parent_code') no_index = ['name', 'parent_code'] def __init__(self, *args, **kw): super(Subdivisions, self).__init__(*args, **kw) # Add index for the country code. self.indices['country_code'] = {} for subdivision in self: divs = self.indices['country_code'].setdefault( subdivision.country_code, set()) divs.add(subdivision) countries = Countries(os.path.join(DATABASE_DIR, 'iso3166.xml')) scripts = Scripts(os.path.join(DATABASE_DIR, 'iso15924.xml')) currencies = Currencies(os.path.join(DATABASE_DIR, 'iso4217.xml')) languages = Languages(os.path.join(DATABASE_DIR, 'iso639.xml')) subdivisions = Subdivisions(os.path.join(DATABASE_DIR, 'iso3166_2.xml'))
lgpl-3.0
8,592,734,702,324,915,000
30.445455
78
0.601619
false
3.51882
false
false
false
ecell/libmoleculizer
python-src/bngparser/src/moleculizer/moleculeinterpreter.py
1
6341
############################################################################### # BNGMZRConverter - A utility program for converting bngl input files to mzr # input files. # Copyright (C) 2007, 2008, 2009 The Molecular Sciences Institute # # Moleculizer is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # Moleculizer is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Moleculizer; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # Original Author: # Nathan Addy, Scientific Programmer Email: addy@molsci.org # The Molecular Sciences Institute Email: addy@molsci.org # # ############################################################################### from moleculizermol import MoleculizerMol, MoleculizerSmallMol, MoleculizerModMol, isSmallMol, isModMol from util import DataUnifier from xmlobject import XmlObject import pdb # This class parses a Mols Block into a list of small-mols and big mols. # It also manages the list of modifications. class MoleculeDictionary: class DuplicateMolDefinitionException(Exception): pass class BadMolDefinitionException(Exception): pass listOfNullModifications = ["none"] def __init__(self, moleculeBlock, paramDict): self.rawMoleculeDefinitions = moleculeBlock[:] self.paramDict = paramDict self.registeredMoleculesDictionary = {} self.smallMolsDictionary = {} self.modMolsDictionary = {} self.initialize() def initialize(self): self.rawMoleculeDefinitions = DataUnifier( self.rawMoleculeDefinitions ) for line in self.rawMoleculeDefinitions: if isSmallMol(line): print "SM: %s" % line MoleculizerSmallMol(line) elif isModMol(line): print "MM: %s" % line MoleculizerModMol(line) else: print "'%s' is neither a ModMol nor a SmallMol, according to the isSmallMol and isModMol functions." % line raise "Hello" def parseMoleculeTypesLine(self, moleculeTypesLine): parsedMol = MoleculizerMol(moleculeTypesLine) parsedMolName = parsedMol.getName() if parsedMolName in self.registeredMoleculesDictionary.keys(): raise DuplicateMolDefinitionException("Error, molecule %s already defined in the MoleculeInterpreter" % parsedMolName) self.registeredMoleculesDictionary[parsedMolName] = parsedMol def getUniversalModificationList(self): return MoleculizerMol.modificationStates[:] def addModifications(self, parentElmt): for modification in self.getUniversalModificationList(): modificationTypeElmt = XmlObject("modification") modificationTypeElmt.addAttribute("name", modification) modificationTypeElmt.attachToParent(parentElmt) weightElmt = XmlObject("weight-delta") weightElmt.attachToParent(modificationTypeElmt) if self.representsNullModification(modification): weightDelta = 0.0 else: weightDelta = 1.0 weightElmt.addAttribute("daltons", weightDelta) def addMols(self, parentElmt): for molName in self.registeredMoleculesDictionary.keys(): self.addModMolElmtToMolsElmt(parentElmt, self.registeredMoleculesDictionary[molName]) def addModMolElmt(self, parentElmt): pass def addModMolElmtToMolsElmt(self, xmlObject, moleculizerMolObject): assert(isinstance(xmlObject, XmlObject)) assert(isinstance(moleculizerMolObject, MoleculizerMol)) newModMol = XmlObject("mod-mol") newModMol.addAttribute("name", moleculizerMolObject.getName()) weightElement = XmlObject("weight") # Obviously this is one of the big deficiencies of this thing. What shall # we set the (mandatory) weights to? For now, let's just put in something # arbitratry. But this is a big issue that ought to be fixed as soon as all # the basic facilities of the code have been built in. if moleculizerMolObject.getName() == "Pheromone": weightElement.addAttribute("daltons", 10.0) else: weightElement.addAttribute("daltons", 100.0) newModMol.addSubElement(weightElement) for binding in moleculizerMolObject.bindingSites: self.addBindingSiteElmtToModMolElmt(binding, moleculizerMolObject, newModMol) for modification in moleculizerMolObject.modificationSites: modSite, defaultModState = modification modSiteElmt = XmlObject("mod-site") modSiteElmt.addAttribute("name", modSite) defModRefElmt = XmlObject("default-mod-ref") defModRefElmt.addAttribute("name", defaultModState) defModRefElmt.attachToParent(modSiteElmt).attachToParent(newModMol) xmlObject.addSubElement(newModMol) return def addBindingSiteElmtToModMolElmt(self, bindingName, moleculizerMol, xmlObject): newBindingElmt = XmlObject("binding-site") newBindingElmt.addAttribute("name", bindingName) defaultShape = XmlObject("default-shape-ref") defaultShape.addAttribute("name", "default") defaultShape.attachToParent(newBindingElmt) for shapeName in moleculizerMol.bindingSites[bindingName]: siteShapeElmt = XmlObject("site-shape") siteShapeElmt.addAttribute("name", shapeName) siteShapeElmt.attachToParent(newBindingElmt) xmlObject.addSubElement(newBindingElmt) return def representsNullModification(self, modificationType): return modificationType.lower() in MoleculeDictionary.listOfNullModifications
gpl-2.0
4,373,819,553,800,396,000
37.664634
130
0.674972
false
4.04917
false
false
false
mpi-sws-rse/antevents-python
examples/event_library_comparison/event.py
1
8071
"""This version uses a traditional event-driven version, using continuation passing style. Each method call is passed a completion callback and an error callback """ from statistics import median import json import asyncio import random import time import hbmqtt.client from collections import deque from antevents.base import SensorEvent URL = "mqtt://localhost:1883" class RandomSensor: def __init__(self, sensor_id, mean=100.0, stddev=20.0, stop_after_events=None): self.sensor_id = sensor_id self.mean = mean self.stddev = stddev self.stop_after_events = stop_after_events if stop_after_events is not None: def generator(): for i in range(stop_after_events): yield round(random.gauss(mean, stddev), 1) else: # go on forever def generator(): while True: yield round(random.gauss(mean, stddev), 1) self.generator = generator() def sample(self): return self.generator.__next__() def __repr__(self): if self.stop_after_events is None: return 'RandomSensor(%s, mean=%s, stddev=%s)' % \ (self.sensor_id, self.mean, self.stddev) else: return 'RandomSensor(%s, mean=%s, stddev=%s, stop_after_events=%s)' % \ (self.sensor_id, self.mean, self.stddev, self.stop_after_events) class PeriodicMedianTransducer: """Emit an event once every ``period`` input events. The value is the median of the inputs received since the last emission. """ def __init__(self, period=5): self.period = period self.samples = [None for i in range(period)] self.events_since_last = 0 self.last_event = None # this is used in emitting the last event def step(self, v): self.samples[self.events_since_last] = v.val self.events_since_last += 1 if self.events_since_last==self.period: val = median(self.samples) event = SensorEvent(sensor_id=v.sensor_id, ts=v.ts, val=val) self.events_since_last = 0 return event else: self.last_event = v # save in case we complete before completing a period return None def complete(self): if self.events_since_last>0: # if we have some partial state, we emit one final event that # averages whatever we saw since the last emission. return SensorEvent(sensor_id=self.last_event.sensor_id, ts=self.last_event.ts, val=median(self.samples[0:self.events_since_last])) def csv_writer(evt): print("csv_writer(%s)" % repr(evt)) class MqttWriter: """All the processing is asynchronous. We ensure that a given send has completed and the callbacks called before we process the next one. """ def __init__(self, url, topic, event_loop): self.url = url self.topic = topic self.client = hbmqtt.client.MQTTClient(loop=event_loop) self.event_loop = event_loop self.connected = False self.pending_task = None self.request_queue = deque() def _to_message(self, msg): return bytes(json.dumps((msg.sensor_id, msg.ts, msg.val),), encoding='utf-8') def _request_done(self, f, completion_cb, error_cb): assert f==self.pending_task self.pending_task = None exc = f.exception() if exc: self.event_loop.call_soon(error_cb, exc) else: self.event_loop.call_soon(completion_cb) if len(self.request_queue)>0: self.event_loop.call_soon(self._process_queue) def _process_queue(self): assert self.pending_task == None assert len(self.request_queue)>0 (msg, completion_cb, error_cb) = self.request_queue.popleft() if msg is not None: print("send from queue: %s" % msg) self.pending_task = self.event_loop.create_task( self.client.publish(self.topic, msg) ) else: # None means that we wanted a disconnect print("disconnect") self.pending_task = self.event_loop.create_task( self.client.disconnect() ) self.pending_task.add_done_callback(lambda f: self._request_done(f, completion_cb, error_cb)) def send(self, msg, completion_cb, error_cb): if not self.connected: print("attempting connection") self.request_queue.append((self._to_message(msg), completion_cb, error_cb),) self.connected = True self.pending_task = self.event_loop.create_task(self.client.connect(self.url)) def connect_done(f): assert f==self.pending_task print("connected") self.pending_task = None self.event_loop.call_soon(self._process_queue) self.pending_task.add_done_callback(connect_done) elif self.pending_task: self.request_queue.append((self._to_message(msg), completion_cb, error_cb),) else: print("sending %s" % self._to_message(msg)) self.pending_task = self.event_loop.create_task( self.client.publish(self.topic, self._to_message(msg)) ) self.pending_task.add_done_callback(lambda f: self._request_done(f, completion_cb, error_cb)) def disconnect(self, completion_cb, error_cb, drop_queue=False): if not self.connected: return if len(self.request_queue)>0 and drop_queue: # for error situations self.request_queue = deque() if self.pending_task: self.request_queue.append((None, completion_cb, error_cb),) else: print("disconnecting") self.pending_task = self.event_loop.create_task( self.client.disconnect() ) self.pending_task.add_done_callback(lambda f: self._request_done(f, completion_cb, error_cb)) def sample_and_process(sensor, mqtt_writer, xducer, completion_cb, error_cb): try: sample = sensor.sample() except StopIteration: final_event = xducer.complete() if final_event: mqtt_writer.send(final_event, lambda: mqtt_writer.disconnect(lambda: completion_cb(False), error_cb), error_cb) else: mqtt_writer.disconnect(lambda: completion_cb(False), error_cb) return event = SensorEvent(sensor_id=sensor.sensor_id, ts=time.time(), val=sample) csv_writer(event) median_event = xducer.step(event) if median_event: mqtt_writer.send(median_event, lambda: completion_cb(True), error_cb) else: completion_cb(True) sensor = RandomSensor('sensor-2', stop_after_events=12) transducer = PeriodicMedianTransducer(5) event_loop = asyncio.get_event_loop() writer = MqttWriter(URL, sensor.sensor_id, event_loop) def loop(): def completion_cb(more): if more: event_loop.call_later(0.5, loop) else: print("all done, no more callbacks to schedule") event_loop.stop() def error_cb(e): print("Got error: %s" % e) event_loop.stop() event_loop.call_soon( lambda: sample_and_process(sensor, writer, transducer, completion_cb, error_cb) ) event_loop.call_soon(loop) event_loop.run_forever() print("that's all folks")
apache-2.0
4,810,702,886,399,499,000
37.251185
100
0.563127
false
4.031469
false
false
false
saikrishnar/vsm_preparer
scripts/quincontext.py
1
1778
#! /usr/bin/python import os def main(folder): #vectors = [] #f = open('../dictionary/vectors.txt') #for line in f: # representation = line.strip('\n') # vectors.append(representation) #f.close() for d, ds, fs in os.walk(folder): for fname in fs: if fname[-4:] != '.dur': continue fullfname = d + '/' + fname phone_array = [] dur_array = [] fr = open(fullfname) for line in fr: if line.split('\n')[0] == '#': continue [phone, dur] = line.rstrip('\n').split() phone_array.append(phone) dur_array.append(dur) fw = open(fullfname[:-4] + '.quindur', 'w') for i in range(2, len(dur_array) - 2 ): phoneme_2p = phone_array[i-2] #phoneme_2p_index = uniquephone_list.index(phoneme_2p) phoneme_1p = phone_array[i-1] #phoneme_1p_index = uniquephone_list.index(phoneme_1p) phoneme = phone_array[i] #phoneme_index = uniquephone_list.index(phoneme) phoneme_1n = phone_array[i+1] #phoneme_1n_index = uniquephone_list.index(phoneme_1n) phoneme_2n = phone_array[i+2] #phoneme_2n_index = uniquephone_list.index(phoneme_2n) duration = dur_array[i] fw.write( str(float(duration)) + ' ' + phoneme_2p + ' ' + phoneme_1p + ' ' + phoneme + ' ' + phoneme_1n + ' ' + phoneme_2n + '\n') fw.close() fr.close() if __name__ == '__main__': folder = '../lab' main(folder)
gpl-2.0
4,492,283,820,192,578,000
34.56
151
0.45838
false
3.493124
false
false
false
Eksmo/calibre
src/calibre/gui2/viewer/position.py
1
1815
#!/usr/bin/env python # vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai from __future__ import (unicode_literals, division, absolute_import, print_function) __license__ = 'GPL v3' __copyright__ = '2012, Kovid Goyal <kovid@kovidgoyal.net>' __docformat__ = 'restructuredtext en' import json class PagePosition(object): def __init__(self, document): self.document = document @property def viewport_cfi(self): ans = None res = self.document.mainFrame().evaluateJavaScript(''' ans = 'undefined'; if (window.paged_display) { ans = window.paged_display.current_cfi(); if (!ans) ans = 'undefined'; } ans; ''') if res.isValid() and not res.isNull() and res.type() == res.String: c = unicode(res.toString()) if c != 'undefined': ans = c return ans def scroll_to_cfi(self, cfi): if cfi: cfi = json.dumps(cfi) self.document.mainFrame().evaluateJavaScript( 'paged_display.jump_to_cfi(%s)'%cfi) @property def current_pos(self): ans = self.viewport_cfi if not ans: ans = self.document.scroll_fraction return ans def __enter__(self): self.save() def __exit__(self, *args): self.restore() def save(self): self._cpos = self.current_pos def restore(self): if self._cpos is None: return self.to_pos(self._cpos) self._cpos = None def to_pos(self, pos): if isinstance(pos, (int, float)): self.document.scroll_fraction = pos else: self.scroll_to_cfi(pos) def set_pos(self, pos): self._cpos = pos
gpl-3.0
-6,051,221,796,672,716,000
24.928571
75
0.534986
false
3.659274
false
false
false
openego/oeplatform
modelview/migrations/0022_auto_20160303_2233.py
1
1468
# -*- coding: utf-8 -*- # Generated by Django 1.9 on 2016-03-03 21:33 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [("modelview", "0021_auto_20160303_2233")] operations = [ migrations.AlterField( model_name="energymodel", name="model_file_format", field=models.CharField( choices=[ (".exe", ".exe"), (".gms", ".gms"), (".py", ".py"), (".xls", ".xls"), ("other", "other"), ], default="other", help_text="In which format is the model saved?", max_length=5, verbose_name="Model file format", ), ), migrations.AlterField( model_name="energymodel", name="model_input", field=models.CharField( choices=[ (".csv", ".csv"), (".py", ".py"), ("text", "text"), (".xls", ".xls"), ("other", "other"), ], default="other", help_text="Of which file format are the input and output data?", max_length=5, verbose_name="Input/output data file format", ), ), ]
agpl-3.0
-647,503,568,038,892,000
30.234043
80
0.419619
false
4.781759
false
false
false
AjabWorld/ajabsacco
ajabsacco/core/facades/loans/transactions.py
1
8484
from ajabsacco.core.models import ( LoanTransactionEntry, LoanProduct, LoanAccount, Message as SMSMessage, ) from decimal import Decimal as D from django.db.models import Q, Sum, F from django.utils import timezone from django.db import transaction as db_transaction from ajabsacco.core import codes from ajabsacco.core.sms import templates from ajabsacco.core.utils import record_log, month_delta from ajabsacco.core.exceptions import * import ledger as ledger_facades from ajabsacco.core.facades import transactions as transaction_facades from ajabsacco.core.facades.loans import validations as validation_facades import logging logger = logging.getLogger('core.ajabsacco.loans') import uuid def allocate_repayment(loan_account, amount, *args, **kwargs): with db_transaction.atomic(): fee_accrued = ledger_facades.loan_account_fees_due(loan_account) penalties_accrued = ledger_facades.loan_account_penalties_due(loan_account) interest_accrued = ledger_facades.loan_account_interest_due(loan_account) principal_accrued = ledger_facades.loan_account_principal_due(loan_account) #1. Align the order we will deduct the accruals accruals = { LoanProduct.ALLOCATION_CHOICE_FEE: (fee_accrued or D('0.0'), post_loan_fee), LoanProduct.ALLOCATION_CHOICE_PENALTY: (penalties_accrued or D('0.0'), post_penalty_fee), LoanProduct.ALLOCATION_CHOICE_INTEREST: (interest_accrued or D('0.0'), post_loan_interest), } amount = (amount or 0) #get a sum of all accruals total_accruals = sum(i[0] for i in accruals.values()) #Ensure we have sane values if (amount > 0) and (principal_accrued > 0): transaction_id = uuid.uuid4() #setup allocation balance, to help us check to total allocation allocation_balance = amount if total_accruals > 0: items_allocated = 0 #Loop through the allocation order for allocation_item in LoanProduct.ALLOCATION_ORDER: #Loop through all the accruals we are expecting to collect for accrued_item, allocation_tuple in accruals.iteritems(): #put aside the variables from the turple accrued_amount, transaction_func = allocation_tuple #if allocation item is equal to accrued item code, and accrued amount is more than 1 #Check to ensure we do not get to negative numbers if (allocation_item == accrued_item) and (accrued_amount > 0) and (allocation_balance > 0): #if amount accrued is sizable, deduct transaction_func(loan_account, accrued_amount, transaction_id=transaction_id) #stamp new allocation items_allocated += 1 #deduct amount posted from balance allocation_balance -= accrued_amount post_loan_principal( loan_account, allocation_balance, transaction_id=transaction_id ) loan_account.last_repayment_date = timezone.now() loan_account.save() else: logger.debug("amount: %s and total_accruals %s" % (amount, total_accruals)) raise AmountNotSufficientException() def apply_accruals(loan_account, approved_by=None): with db_transaction.atomic(): date_disbursed = loan_account.date_disbursed if date_disbursed is None: raise ActionOnUndisbursedLoanException( "You cannot apply accruals on Un-disbursed Loan %s" % loan_account.account_number) date_today = timezone.now() month_diff = month_delta(date_disbursed, date_today, days_of_the_month=30) grace_period = loan_account.grace_period grace_period_type = loan_account.product.grace_period_type within_grace_period = ((month_diff - grace_period) < 1) within_repayment_period = ((month_diff - loan_account.repayment_period) < 1) if (not grace_period == 0): if (within_grace_period): if (grace_period_type == LoanProduct.FULL_GRACE_PERIOD): #No need to proceed, we don't want to accrue anything interest_due = 0 elif (grace_period_type == LoanProduct.PRINCIPAL_GRACE_PERIOD): principal_due = loan_account.amount interest_due = ( (principal_due * loan_account.interest_rate) / loan_account.repayment_period ) else: if within_repayment_period: principal_due = ledger_facades.loan_account_principal_due(loan_account) interest_due = ( (principal_due * (loan_account.interest_rate / D('100.0'))) / loan_account.repayment_period ) if interest_due > 0: apply_interest_to_account(loan_account, interest_due) def disburse_loan(loan_account, *args, **kwargs): with db_transaction.atomic(): validation_facades.validate_disbursement(loan_account) debit_entry, credit_entry = transaction_facades.create_transaction( LoanTransactionEntry, loan_account, loan_account.amount, transaction_type=codes.TRANSACTION_TYPE_LOAN_DISBURSAL, *args, **kwargs ) loan_account.status = LoanAccount.ACTIVE loan_account.date_disbursed = timezone.now() loan_account.save() return (debit_entry, credit_entry) def apply_interest_to_account(loan_account, amount, *args, **kwargs): return transaction_facades.create_transaction(LoanTransactionEntry, loan_account, amount, transaction_type=codes.TRANSACTION_TYPE_INTEREST_APPLY, *args, **kwargs) def apply_fee_to_account(loan_account, amount, *args, **kwargs): return transaction_facades.create_transaction(LoanTransactionEntry, loan_account, amount, transaction_type=codes.TRANSACTION_TYPE_FEE_APPLY, *args, **kwargs) def apply_penalty_to_account(loan_account, amount, *args, **kwargs): return transaction_facades.create_transaction(LoanTransactionEntry, loan_account, amount, transaction_type=codes.TRANSACTION_TYPE_PENALTY_APPLY, *args, **kwargs) def write_off_loan_principal(loan_account, amount, *args, **kwargs): return transaction_facades.create_transaction(LoanTransactionEntry, loan_account, amount, transaction_type=codes.TRANSACTION_TYPE_PRINCIPAL_WRITE_OFF, *args, **kwargs) def write_off_loan_interest(loan_account, amount, *args, **kwargs): return transaction_facades.create_transaction(LoanTransactionEntry, loan_account, amount, transaction_type=codes.TRANSACTION_TYPE_INTEREST_WRITE_OFF, *args, **kwargs) def write_off_loan_penalty(loan_account, amount, *args, **kwargs): return transaction_facades.create_transaction(LoanTransactionEntry, loan_account, amount, transaction_type=codes.TRANSACTION_TYPE_PENALTY_WRITE_OFF, *args, **kwargs) def write_off_loan_fee(loan_account, amount, *args, **kwargs): return transaction_facades.create_transaction(LoanTransactionEntry, loan_account, amount, transaction_type=codes.TRANSACTION_TYPE_FEE_WRITE_OFF, *args, **kwargs) def post_loan_principal(loan_account, amount, *args, **kwargs): return transaction_facades.create_transaction(LoanTransactionEntry, loan_account, amount, transaction_type=codes.TRANSACTION_TYPE_PRINCIPAL_POSTING, *args, **kwargs) def post_loan_interest(loan_account, amount, *args, **kwargs): return transaction_facades.create_transaction(LoanTransactionEntry, loan_account, amount, transaction_type=codes.TRANSACTION_TYPE_INTEREST_POSTING, *args, **kwargs) def post_loan_fee(loan_account, amount, *args, **kwargs): return transaction_facades.create_transaction(LoanTransactionEntry, loan_account, amount, transaction_type=codes.TRANSACTION_TYPE_FEE_POSTING, *args, **kwargs) def post_penalty_fee(loan_account, amount, *args, **kwargs): return transaction_facades.create_transaction(LoanTransactionEntry, loan_account, amount, transaction_type=codes.TRANSACTION_TYPE_PENALTY_POSTING, *args, **kwargs)
apache-2.0
3,779,830,831,397,354,500
45.360656
115
0.658298
false
3.649032
false
false
false
ArcherSys/ArcherSys
Lib/test/reperf.py
1
1754
<<<<<<< HEAD <<<<<<< HEAD import re import time def main(): s = "\13hello\14 \13world\14 " * 1000 p = re.compile(r"([\13\14])") timefunc(10, p.sub, "", s) timefunc(10, p.split, s) timefunc(10, p.findall, s) def timefunc(n, func, *args, **kw): t0 = time.perf_counter() try: for i in range(n): result = func(*args, **kw) return result finally: t1 = time.perf_counter() if n > 1: print(n, "times", end=' ') print(func.__name__, "%.3f" % (t1-t0), "CPU seconds") main() ======= import re import time def main(): s = "\13hello\14 \13world\14 " * 1000 p = re.compile(r"([\13\14])") timefunc(10, p.sub, "", s) timefunc(10, p.split, s) timefunc(10, p.findall, s) def timefunc(n, func, *args, **kw): t0 = time.perf_counter() try: for i in range(n): result = func(*args, **kw) return result finally: t1 = time.perf_counter() if n > 1: print(n, "times", end=' ') print(func.__name__, "%.3f" % (t1-t0), "CPU seconds") main() >>>>>>> b875702c9c06ab5012e52ff4337439b03918f453 ======= import re import time def main(): s = "\13hello\14 \13world\14 " * 1000 p = re.compile(r"([\13\14])") timefunc(10, p.sub, "", s) timefunc(10, p.split, s) timefunc(10, p.findall, s) def timefunc(n, func, *args, **kw): t0 = time.perf_counter() try: for i in range(n): result = func(*args, **kw) return result finally: t1 = time.perf_counter() if n > 1: print(n, "times", end=' ') print(func.__name__, "%.3f" % (t1-t0), "CPU seconds") main() >>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
mit
-53,206,256,789,101,470
22.386667
61
0.513683
false
2.819936
false
false
false