repo_name
stringlengths
5
100
path
stringlengths
4
251
copies
stringclasses
990 values
size
stringlengths
4
7
content
stringlengths
499
1.05M
license
stringclasses
15 values
yaojingwu1992/XlsxWriter
xlsxwriter/test/comparison/test_chart_gap05.py
8
1698
############################################################################### # # Tests for XlsxWriter. # # Copyright (c), 2013-2015, John McNamara, jmcnamara@cpan.org # from ..excel_comparsion_test import ExcelComparisonTest from ...workbook import Workbook class TestCompareXLSXFiles(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.maxDiff = None filename = 'chart_gap05.xlsx' test_dir = 'xlsxwriter/test/comparison/' self.got_filename = test_dir + '_test_' + filename self.exp_filename = test_dir + 'xlsx_files/' + filename self.ignore_files = [] self.ignore_elements = {} def test_create_file(self): """Test the creation of a simple XlsxWriter file.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() chart = workbook.add_chart({'type': 'bar'}) chart.axis_ids = [45938176, 59715584] chart.axis2_ids = [70848512, 54519680] data = [[1, 2, 3, 4, 5], [6, 8, 6, 4, 2]] worksheet.write_column('A1', data[0]) worksheet.write_column('B1', data[1]) chart.add_series({'values': '=Sheet1!$A$1:$A$5', 'gap': 51, 'overlap': 12}) chart.add_series({'values': '=Sheet1!$B$1:$B$5', 'y2_axis': 1, 'gap': 251, 'overlap': -27}) chart.set_x2_axis({'label_position': 'next_to'}) worksheet.insert_chart('E9', chart) workbook.close() self.assertExcelEqual()
bsd-2-clause
chryswoods/SireTests
unittests/SireMove/reti_oscillator.py
1
2980
from Sire.IO import * from Sire.MM import * from Sire.Mol import * from Sire.Move import * from Sire.MM import * from Sire.System import * from Sire.CAS import * from Sire.Vol import * from Sire.Units import * import Sire.Stream protodir = "/Users/chris/Work/ProtoMS/" print("Parameterising the oscillator...") oscillator = PDB().readMolecule("test/io/oscillator.pdb") oscillator = oscillator.edit().rename("harm1t2").commit() protoms = ProtoMS("%s/protoms2" % protodir) protoms.addParameterFile( "test/io/oscillators.ff" ) oscillator = protoms.parameterise(oscillator, ProtoMS.SOLUTE) print("...parameterisation complete!") internalff = InternalFF("InternalFF") internalff.add( oscillator ) system = System() system.add( internalff ) lam = Symbol("lambda") system.setComponent(lam, 0.01) system.setComponent(system.totalComponent(), lam * internalff.components().total()) system.add( "average energy", MonitorComponent(system.totalComponent()) ) lambda_values = [ 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9 ] replicas = Replicas(system, len(lambda_values)) replicas.setLambdaComponent(lam) for i in range(0,len(lambda_values)): replicas.setLambdaValue(i, lambda_values[i]) zmatmove = ZMatMove( internalff.groups()[0] ) zmatmove.setTemperature( 298 * kelvin ) nsubmoves = 1000 replicas.setSubMoves( SameMoves(zmatmove) ) replicas.setNSubMoves(nsubmoves) # Average energy should be 1/2 kT theo_nrg = 0.5 * gasr * 298 print("Running a simulation - initial energy = %f kcal mol-1" % system.energy().to(kcal_per_mol)) repexmove = RepExMove() lambda_trajectory = [] i = -1 def printInfo(replicas): lamtraj = replicas.lambdaTrajectory() lambda_trajectory.append(lamtraj) ids = replicas.replicaIDs() for j in range(0,replicas.count()): replica = replicas[j] system = replica.subSystem() zmatmove = replica.subMoves()[0] print("Replica %d: lambda = %f: ID = %d" % (j, replica.lambdaValue(), ids[j])) print("%d : Energy = %f kcal mol-1" % ( (i+1)*replica.nSubMoves(), \ system.energy().to(kcal_per_mol) )) avg_nrg = system[MonitorName("average energy")].accumulator().average() print(" Average energy = %f kcal mol-1 : error = %f kcal mol-1" % (avg_nrg, theo_nrg-avg_nrg)) print(" Acceptance ratio = %f %%" % (100 * zmatmove.acceptanceRatio())) printInfo(replicas) #Sire.Stream.save( (replicas, repexmove), "test/SireMove/reti_oscillator.s3" ) for i in range(0,10): sim = SupraSim.run( replicas, repexmove, 1 ) replicas = sim.system() repexmove = sim.moves()[0] printInfo(replicas) print(" Replica exchange acceptance ratio: %f %%" % (100*repexmove.acceptanceRatio())) print("\nReplica trajectory") for i in range(0, len(lambda_trajectory)): print("%d " % i, end=' ') for lamval in lambda_trajectory[i]: print("%f " % lamval, end=' ') print("\n", end=' ')
gpl-2.0
houzhenggang/hiwifi-openwrt-HC5661-HC5761
staging_dir/host/lib/python2.7/csv.py
174
16344
""" csv.py - read/write/investigate CSV files """ import re from functools import reduce from _csv import Error, __version__, writer, reader, register_dialect, \ unregister_dialect, get_dialect, list_dialects, \ field_size_limit, \ QUOTE_MINIMAL, QUOTE_ALL, QUOTE_NONNUMERIC, QUOTE_NONE, \ __doc__ from _csv import Dialect as _Dialect try: from cStringIO import StringIO except ImportError: from StringIO import StringIO __all__ = [ "QUOTE_MINIMAL", "QUOTE_ALL", "QUOTE_NONNUMERIC", "QUOTE_NONE", "Error", "Dialect", "__doc__", "excel", "excel_tab", "field_size_limit", "reader", "writer", "register_dialect", "get_dialect", "list_dialects", "Sniffer", "unregister_dialect", "__version__", "DictReader", "DictWriter" ] class Dialect: """Describe an Excel dialect. This must be subclassed (see csv.excel). Valid attributes are: delimiter, quotechar, escapechar, doublequote, skipinitialspace, lineterminator, quoting. """ _name = "" _valid = False # placeholders delimiter = None quotechar = None escapechar = None doublequote = None skipinitialspace = None lineterminator = None quoting = None def __init__(self): if self.__class__ != Dialect: self._valid = True self._validate() def _validate(self): try: _Dialect(self) except TypeError, e: # We do this for compatibility with py2.3 raise Error(str(e)) class excel(Dialect): """Describe the usual properties of Excel-generated CSV files.""" delimiter = ',' quotechar = '"' doublequote = True skipinitialspace = False lineterminator = '\r\n' quoting = QUOTE_MINIMAL register_dialect("excel", excel) class excel_tab(excel): """Describe the usual properties of Excel-generated TAB-delimited files.""" delimiter = '\t' register_dialect("excel-tab", excel_tab) class DictReader: def __init__(self, f, fieldnames=None, restkey=None, restval=None, dialect="excel", *args, **kwds): self._fieldnames = fieldnames # list of keys for the dict self.restkey = restkey # key to catch long rows self.restval = restval # default value for short rows self.reader = reader(f, dialect, *args, **kwds) self.dialect = dialect self.line_num = 0 def __iter__(self): return self @property def fieldnames(self): if self._fieldnames is None: try: self._fieldnames = self.reader.next() except StopIteration: pass self.line_num = self.reader.line_num return self._fieldnames @fieldnames.setter def fieldnames(self, value): self._fieldnames = value def next(self): if self.line_num == 0: # Used only for its side effect. self.fieldnames row = self.reader.next() self.line_num = self.reader.line_num # unlike the basic reader, we prefer not to return blanks, # because we will typically wind up with a dict full of None # values while row == []: row = self.reader.next() d = dict(zip(self.fieldnames, row)) lf = len(self.fieldnames) lr = len(row) if lf < lr: d[self.restkey] = row[lf:] elif lf > lr: for key in self.fieldnames[lr:]: d[key] = self.restval return d class DictWriter: def __init__(self, f, fieldnames, restval="", extrasaction="raise", dialect="excel", *args, **kwds): self.fieldnames = fieldnames # list of keys for the dict self.restval = restval # for writing short dicts if extrasaction.lower() not in ("raise", "ignore"): raise ValueError, \ ("extrasaction (%s) must be 'raise' or 'ignore'" % extrasaction) self.extrasaction = extrasaction self.writer = writer(f, dialect, *args, **kwds) def writeheader(self): header = dict(zip(self.fieldnames, self.fieldnames)) self.writerow(header) def _dict_to_list(self, rowdict): if self.extrasaction == "raise": wrong_fields = [k for k in rowdict if k not in self.fieldnames] if wrong_fields: raise ValueError("dict contains fields not in fieldnames: " + ", ".join(wrong_fields)) return [rowdict.get(key, self.restval) for key in self.fieldnames] def writerow(self, rowdict): return self.writer.writerow(self._dict_to_list(rowdict)) def writerows(self, rowdicts): rows = [] for rowdict in rowdicts: rows.append(self._dict_to_list(rowdict)) return self.writer.writerows(rows) # Guard Sniffer's type checking against builds that exclude complex() try: complex except NameError: complex = float class Sniffer: ''' "Sniffs" the format of a CSV file (i.e. delimiter, quotechar) Returns a Dialect object. ''' def __init__(self): # in case there is more than one possible delimiter self.preferred = [',', '\t', ';', ' ', ':'] def sniff(self, sample, delimiters=None): """ Returns a dialect (or None) corresponding to the sample """ quotechar, doublequote, delimiter, skipinitialspace = \ self._guess_quote_and_delimiter(sample, delimiters) if not delimiter: delimiter, skipinitialspace = self._guess_delimiter(sample, delimiters) if not delimiter: raise Error, "Could not determine delimiter" class dialect(Dialect): _name = "sniffed" lineterminator = '\r\n' quoting = QUOTE_MINIMAL # escapechar = '' dialect.doublequote = doublequote dialect.delimiter = delimiter # _csv.reader won't accept a quotechar of '' dialect.quotechar = quotechar or '"' dialect.skipinitialspace = skipinitialspace return dialect def _guess_quote_and_delimiter(self, data, delimiters): """ Looks for text enclosed between two identical quotes (the probable quotechar) which are preceded and followed by the same character (the probable delimiter). For example: ,'some text', The quote with the most wins, same with the delimiter. If there is no quotechar the delimiter can't be determined this way. """ matches = [] for restr in ('(?P<delim>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?P=delim)', # ,".*?", '(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?P<delim>[^\w\n"\'])(?P<space> ?)', # ".*?", '(?P<delim>>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?:$|\n)', # ,".*?" '(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?:$|\n)'): # ".*?" (no delim, no space) regexp = re.compile(restr, re.DOTALL | re.MULTILINE) matches = regexp.findall(data) if matches: break if not matches: # (quotechar, doublequote, delimiter, skipinitialspace) return ('', False, None, 0) quotes = {} delims = {} spaces = 0 for m in matches: n = regexp.groupindex['quote'] - 1 key = m[n] if key: quotes[key] = quotes.get(key, 0) + 1 try: n = regexp.groupindex['delim'] - 1 key = m[n] except KeyError: continue if key and (delimiters is None or key in delimiters): delims[key] = delims.get(key, 0) + 1 try: n = regexp.groupindex['space'] - 1 except KeyError: continue if m[n]: spaces += 1 quotechar = reduce(lambda a, b, quotes = quotes: (quotes[a] > quotes[b]) and a or b, quotes.keys()) if delims: delim = reduce(lambda a, b, delims = delims: (delims[a] > delims[b]) and a or b, delims.keys()) skipinitialspace = delims[delim] == spaces if delim == '\n': # most likely a file with a single column delim = '' else: # there is *no* delimiter, it's a single column of quoted data delim = '' skipinitialspace = 0 # if we see an extra quote between delimiters, we've got a # double quoted format dq_regexp = re.compile(r"((%(delim)s)|^)\W*%(quote)s[^%(delim)s\n]*%(quote)s[^%(delim)s\n]*%(quote)s\W*((%(delim)s)|$)" % \ {'delim':delim, 'quote':quotechar}, re.MULTILINE) if dq_regexp.search(data): doublequote = True else: doublequote = False return (quotechar, doublequote, delim, skipinitialspace) def _guess_delimiter(self, data, delimiters): """ The delimiter /should/ occur the same number of times on each row. However, due to malformed data, it may not. We don't want an all or nothing approach, so we allow for small variations in this number. 1) build a table of the frequency of each character on every line. 2) build a table of frequencies of this frequency (meta-frequency?), e.g. 'x occurred 5 times in 10 rows, 6 times in 1000 rows, 7 times in 2 rows' 3) use the mode of the meta-frequency to determine the /expected/ frequency for that character 4) find out how often the character actually meets that goal 5) the character that best meets its goal is the delimiter For performance reasons, the data is evaluated in chunks, so it can try and evaluate the smallest portion of the data possible, evaluating additional chunks as necessary. """ data = filter(None, data.split('\n')) ascii = [chr(c) for c in range(127)] # 7-bit ASCII # build frequency tables chunkLength = min(10, len(data)) iteration = 0 charFrequency = {} modes = {} delims = {} start, end = 0, min(chunkLength, len(data)) while start < len(data): iteration += 1 for line in data[start:end]: for char in ascii: metaFrequency = charFrequency.get(char, {}) # must count even if frequency is 0 freq = line.count(char) # value is the mode metaFrequency[freq] = metaFrequency.get(freq, 0) + 1 charFrequency[char] = metaFrequency for char in charFrequency.keys(): items = charFrequency[char].items() if len(items) == 1 and items[0][0] == 0: continue # get the mode of the frequencies if len(items) > 1: modes[char] = reduce(lambda a, b: a[1] > b[1] and a or b, items) # adjust the mode - subtract the sum of all # other frequencies items.remove(modes[char]) modes[char] = (modes[char][0], modes[char][1] - reduce(lambda a, b: (0, a[1] + b[1]), items)[1]) else: modes[char] = items[0] # build a list of possible delimiters modeList = modes.items() total = float(chunkLength * iteration) # (rows of consistent data) / (number of rows) = 100% consistency = 1.0 # minimum consistency threshold threshold = 0.9 while len(delims) == 0 and consistency >= threshold: for k, v in modeList: if v[0] > 0 and v[1] > 0: if ((v[1]/total) >= consistency and (delimiters is None or k in delimiters)): delims[k] = v consistency -= 0.01 if len(delims) == 1: delim = delims.keys()[0] skipinitialspace = (data[0].count(delim) == data[0].count("%c " % delim)) return (delim, skipinitialspace) # analyze another chunkLength lines start = end end += chunkLength if not delims: return ('', 0) # if there's more than one, fall back to a 'preferred' list if len(delims) > 1: for d in self.preferred: if d in delims.keys(): skipinitialspace = (data[0].count(d) == data[0].count("%c " % d)) return (d, skipinitialspace) # nothing else indicates a preference, pick the character that # dominates(?) items = [(v,k) for (k,v) in delims.items()] items.sort() delim = items[-1][1] skipinitialspace = (data[0].count(delim) == data[0].count("%c " % delim)) return (delim, skipinitialspace) def has_header(self, sample): # Creates a dictionary of types of data in each column. If any # column is of a single type (say, integers), *except* for the first # row, then the first row is presumed to be labels. If the type # can't be determined, it is assumed to be a string in which case # the length of the string is the determining factor: if all of the # rows except for the first are the same length, it's a header. # Finally, a 'vote' is taken at the end for each column, adding or # subtracting from the likelihood of the first row being a header. rdr = reader(StringIO(sample), self.sniff(sample)) header = rdr.next() # assume first row is header columns = len(header) columnTypes = {} for i in range(columns): columnTypes[i] = None checked = 0 for row in rdr: # arbitrary number of rows to check, to keep it sane if checked > 20: break checked += 1 if len(row) != columns: continue # skip rows that have irregular number of columns for col in columnTypes.keys(): for thisType in [int, long, float, complex]: try: thisType(row[col]) break except (ValueError, OverflowError): pass else: # fallback to length of string thisType = len(row[col]) # treat longs as ints if thisType == long: thisType = int if thisType != columnTypes[col]: if columnTypes[col] is None: # add new column type columnTypes[col] = thisType else: # type is inconsistent, remove column from # consideration del columnTypes[col] # finally, compare results against first row and "vote" # on whether it's a header hasHeader = 0 for col, colType in columnTypes.items(): if type(colType) == type(0): # it's a length if len(header[col]) != colType: hasHeader += 1 else: hasHeader -= 1 else: # attempt typecast try: colType(header[col]) except (ValueError, TypeError): hasHeader += 1 else: hasHeader -= 1 return hasHeader > 0
gpl-2.0
mtesauro/postern-poc
postern-poc.py
1
13117
#!/usr/bin/env python """ Postern ~~~~~~~~ A proof of concept implementation of a key management agent for use with the barbican server (https://github.com/cloudkeep/barbican). DO NOT USE THIS IN PRODUCTION. IT IS NOT SECURE IN ANY WAY. YOU HAVE BEEN WARNED. :copyright: (c) 2013 by Matt Tesauro :license: Apache 2.0, see LICENSE for details """ import requests import json import logging import uuid import netifaces import socket import platform import time import datetime # Debugging import pprint from collections import defaultdict from errno import ENOENT from stat import S_IFDIR, S_IFLNK, S_IFREG from sys import argv, exit from fuse import FUSE, FuseOSError, Operations, LoggingMixIn from ConfigParser import SafeConfigParser if not hasattr(__builtins__, 'bytes'): bytes = str class Memory(LoggingMixIn, Operations): 'Memory filesystem for a POC for Cloud Keep' def __init__(self): self.files = {} self.data = defaultdict(bytes) self.policy = {} self.access_count = {} self.keys = {} self.fd = 0 now = time.time() self.files['/'] = dict(st_mode=(S_IFDIR | 0755), st_ctime=now, st_mtime=now, st_atime=now, st_nlink=2) # Add data from policy.json to filesystem for index in range(len(policy['policies'])): # Create files from policy new_file = '/' + \ str(policy['policies'][index]['keys'][0]['filename']) self.files[new_file] = dict(st_mode=(S_IFREG | 33056), st_nlink=1, st_size=0, st_ctime=now, st_mtime=now, st_atime=now) self.data[new_file] = \ str(policy['policies'][index]['keys'][0]['secret']) self.files[new_file]['st_size'] = len(self.data[new_file]) # Set policies dict max_access = policy['policies'][index]['max_key_accesses'] time_reboot = \ policy['policies'][index]['time_available_after_reboot'] new_policy = {'max_access': max_access, 'time_reboot': time_reboot} self.policy[new_file] = new_policy # Initialize access count to zero self.access_count[new_file] = 0 # Initialize the file keys - e.g. UUID for each file for API self.keys[new_file] = \ str(policy['policies'][index]['keys'][0]['uuid']) # Log to the API that policy has been downloaded msg = 'Policy being enforced for ' + new_file key = self.keys[new_file] api_log(key, msg) # Clear the policy.json dict to remove those values/secrets from memory policy.clear() print 'init() complete' def chmod(self, path, mode): # chmod is not allowed - clear data and panic if called self.clear_data() panic(self.keys[path]) return 0 def chown(self, path, uid, gid): # chown is not allowed - clear data and panic if called self.clear_data() panic(self.keys[path]) def create(self, path, mode): # Nothing but policy defined files are in this filesystem # so creating a new file is not allowed self.clear_data() panic("Create file attempt") return self.fd def getattr(self, path, fh=None): # getattr is used all the time for many operations # - no policy check needed if path not in self.files: raise FuseOSError(ENOENT) return self.files[path] def getxattr(self, path, name, position=0): # getxattr is used all the time for many operations # - no policy check needed attrs = self.files[path].get('attrs', {}) try: return attrs[name] except KeyError: return '' # Should return ENOATTR def listxattr(self, path): # lists extended attributes - not supported but also not harmful # - no policy check needed attrs = self.files[path].get('attrs', {}) return attrs.keys() def mkdir(self, path, mode): # Nothing but policy defined files are in this filesystem # so creating a new file is not allowed self.clear_data() panic("Create file attempt") def open(self, path, flags): # Reads are allowed under the constraint of the policy # However, both reading and moving (unlink) end up calling read # so policy enforcement is best handled there or max_access will be # wrongly incremented since reading a file includes an open() and a # read() call. self.fd += 1 return self.fd def read(self, path, size, offset, fh): # Since several filesystems operations end up here, this is a good # policy enforcement point. Moving or reading files ends up with a # call here so this is the best (and last) place to enforce policy # before access to the data is provided. # Check access against policy if not self.check_policy(path): # Violation of policy self.clear_data() panic(self.keys[path]) else: # Log to the API that policy has been downloaded msg = 'Access of ' + path + ' allowed by policy' key = self.keys[path] api_log(key, msg) return self.data[path][offset:offset + size] def readdir(self, path, fh): # readdir is needed for ls and other operations # may consider logging these outside policy in future return ['.', '..'] + [x[1:] for x in self.files if x != '/'] def readlink(self, path): # Symlinks are not supported so calling this is a violation self.clear_data() panic(self.keys[path]) def removexattr(self, path, name): # Extended attributes are not supported and this is a read-only # filesystem so panic if called self.clear_data() panic(self.keys[path]) def rename(self, old, new): # Read-only filesystem so renames are not allowed, panic if called self.clear_data() panic(self.keys[old]) def rmdir(self, path): # Read-only filesystem so renames are not allowed, panic if called self.clear_data() panic(self.keys["Remove dir attempt"]) def setxattr(self, path, name, value, options, position=0): # Read-only filesystem so renames are not allowed, panic if called self.clear_data() panic(self.keys[old]) def statfs(self, path): # Used by du and others to determine file sizes - seems harmless # - no policy check needed return dict(f_bsize=512, f_blocks=4096, f_bavail=2048) def symlink(self, target, source): # symlinks not supported - panic if called self.clear_data() panic("Attempt to create symlink") def truncate(self, path, length, fh=None): # Used for read/write file systems so panic if called self.clear_data() panic(self.keys[path]) def unlink(self, path): # Used by mv if you move a file out of the fuse mounted directory # plus this is read-only filesystem so panic if called self.clear_data() panic(self.keys[path]) def utimens(self, path, times=None): ## DEBUG print 'utimens call, path=', path, ' times=', times # Since this only modifies file access/modification times, no need # to panic if called - no data is disclosed by this now = time.time() atime, mtime = times if times else (now, now) self.files[path]['st_atime'] = atime self.files[path]['st_mtime'] = mtime def write(self, path, data, offset, fh): # Read-only filesystem - panic if called self.clear_data() panic("Write attempt on filesystem") return 0 def clear_data(self): # Clear existing memory structures self.files.clear() self.data.clear() self.policy.clear() for index in range(len(policy)): policy[index] = '' print 'PANIC:' print '\tViolation of policy. In memory filesystem erased.\n' def check_policy(self, path): # Check call against current access policy pass_policy = False if self.access_count[path] < self.policy[path]['max_access']: self.access_count[path] +=1 pass_policy = True else: # max_access exceeded - policy check failure return False # Check against time since reboot max_seconds = float(self.policy[path]['time_reboot']) / 60 with open('/proc/uptime', 'r') as f: uptime_seconds = float(f.readline().split()[0]) if uptime_seconds < max_seconds: print 'Within uptime restriction' # pass_policy = True else: print 'Exceeded uptime restriction' # return False return pass_policy def panic(key): "Warn the API that a violation of policy has occured" # Inform the API of the panic condition message = 'Policy violation on ' + socket.gethostname() panic_url = api_url + logging_uri # Stupid json.dumps() chokes on utcnow() so doing json manually for now # no biscuit for json.dumps() panic_data = '{"agent_id": "' + agent_guid + '", ' panic_data += '"received_on": "' + str(datetime.datetime.utcnow()) + '", ' panic_data += '"severity": "PANIC", ' panic_data += '"key_id": "' + key + '", ' panic_data += '"message": "' + message + '"}' # Send the JSON log to the API headers = {'Content-Type': 'application/json', 'Accept': 'application/json', 'Accept-Charset': 'ISO-8859-1,utf8;q=0.7,*;q=0.3', 'Accept-Encoding': 'gzip,deflate,sdch'} panic = requests.post(panic_url, data=json.dumps(panic_data), headers=headers) if panic.status_code == 200: print 'Panic log sent to API' def api_log(key, message): "Log data to the API" # Inform the API of the panic condition message += ' on host ' + socket.gethostname() log_url = api_url + logging_uri # Stupid json.dumps() chokes on utcnow() so doing json manually for now # no biscuit for json.dumps() log_data = '{"agent_id": "' + agent_guid + '", ' log_data += '"received_on": "' + str(datetime.datetime.utcnow()) + '", ' log_data += '"severity": "INFO", ' log_data += '"key_id": "' + key + '", ' log_data += '"message": "' + message + '"}' # Send the JSON log to the API headers = {'Content-Type': 'application/json', 'Accept': 'application/json', 'Accept-Charset': 'ISO-8859-1,utf8;q=0.7,*;q=0.3', 'Accept-Encoding': 'gzip,deflate,sdch'} log_call = requests.post(log_url, data=json.dumps(log_data), headers=headers) if log_call.status_code == 200: print 'INFO log sent to API' def pair_data(): "Returns the data needed to pair the agent" # Pairing requires id_guid, ip_addresses[], hostname, os/version, # agent-version, tenant-id, tags[] pair_post = '{"uuid":"' + agent_guid + '"' pair_post += ',"agent_version": "0.1"' # IP addresses pair_post += ',"ip_addresses": [{' for key in netifaces.interfaces(): pair_post += '"' + key + '": "' pair_post += netifaces.ifaddresses(key)[2][0]['addr'] + '",' pair_post = pair_post[:-1] + '}],' # hostname, os, version and tentant ID pair_post += '"hostname": "' + socket.gethostname() + '",' pair_post += '"os_version": "' + platform.platform() + '",' pair_post += '"tenant_id": "' + tenant_id + '",' # tags from config file ## currently hard coded. ## ToDo read these from config and set them here pair_post += '"tags": [{"0": "web server", "1": "Falcon API"}]' pair_post += '}' return pair_post if __name__ == '__main__': # Set a few variables - some of this should probably be in the config config_file = '/etc/cloudkeep/postern.config' agent_version = '0.1' policy_uri = '/api/123/policies/' pair_uri = '/api/123/agents/' logging_uri = '/api/123/logs/' max_tries = 5 retry_wait = 3 # Read config for settings parser = SafeConfigParser() parser.read(config_file) # Set URL for the Barbican - aka the mothership if (parser.has_option('settings', 'api_url')): api_url = parser.get('settings', 'api_url') else: print 'ERROR:' exit('\tConfiguration file lacks a URL set for api_url\n') # And the rest of the settings if ((parser.has_option('settings', 'agent_guid')) and (len(parser.get('settings', 'agent_guid')) > 0)): agent_guid = parser.get('settings', 'agent_guid') else: agent_guid = uuid.uuid4() parser.set('settings', 'agent_guid', str(agent_guid)) with open(config_file, 'wb') as new_config: parser.write(new_config) if (parser.has_option('settings', 'tenant_id')): tenant_id = parser.get('settings', 'tenant_id') else: print 'ERROR:' exit("\tConfiguration file lacks a ID set for tenant_id\n") if ((parser.has_option('settings', 'mount_point')) and (len(parser.get('settings', 'mount_point')) > 0)): mount_point = parser.get('settings', 'mount_point') else: mount_point = '/etc/keys' #MAT# For Testing #mount_point = '/home/mtesauro/projects/keys' # Loop while pairing with API (if needed) and also downloading the policy policy = False paired = False while not policy: ## Point this at my local server for now api_url = 'http://example.com/cloudkeep/' # Download the policy file r = requests.get(api_url + policy_uri) if r.status_code == 200: policy = r.json() else: policy = False if not policy and not paired: ## Headers to remove the auto-gzip of the requests module headers = {'Content-Type': 'application/json', 'Accept': '*/*', 'Accept-Encoding': 'bogus'} pair = requests.post(api_url + pair_uri, data=pair_data(), headers=headers) if pair.status_code == 200: paired = True # Limit the number of times we'll attempt and exit if exceeded max_tries-=1 if max_tries == 0: print 'Error: \n\tUnable to pair and/or pull policy from the API' exit(1) time.sleep(retry_wait) #MAT# No logging for now ##logging.getLogger().setLevel(logging.DEBUG) fuse = FUSE(Memory(), mount_point, foreground=True)
apache-2.0
cloudfoundry/php-buildpack-legacy
builds/runtimes/python-2.7.6/lib/python2.7/ctypes/test/test_strings.py
76
7172
import unittest from ctypes import * from test import test_support class StringArrayTestCase(unittest.TestCase): def test(self): BUF = c_char * 4 buf = BUF("a", "b", "c") self.assertEqual(buf.value, "abc") self.assertEqual(buf.raw, "abc\000") buf.value = "ABCD" self.assertEqual(buf.value, "ABCD") self.assertEqual(buf.raw, "ABCD") buf.value = "x" self.assertEqual(buf.value, "x") self.assertEqual(buf.raw, "x\000CD") buf[1] = "Z" self.assertEqual(buf.value, "xZCD") self.assertEqual(buf.raw, "xZCD") self.assertRaises(ValueError, setattr, buf, "value", "aaaaaaaa") self.assertRaises(TypeError, setattr, buf, "value", 42) def test_c_buffer_value(self, memoryview=memoryview): buf = c_buffer(32) buf.value = "Hello, World" self.assertEqual(buf.value, "Hello, World") self.assertRaises(TypeError, setattr, buf, "value", memoryview("Hello, World")) self.assertRaises(TypeError, setattr, buf, "value", memoryview("abc")) self.assertRaises(ValueError, setattr, buf, "raw", memoryview("x" * 100)) def test_c_buffer_raw(self, memoryview=memoryview): buf = c_buffer(32) buf.raw = memoryview("Hello, World") self.assertEqual(buf.value, "Hello, World") self.assertRaises(TypeError, setattr, buf, "value", memoryview("abc")) self.assertRaises(ValueError, setattr, buf, "raw", memoryview("x" * 100)) def test_c_buffer_deprecated(self): # Compatibility with 2.x with test_support.check_py3k_warnings(): self.test_c_buffer_value(buffer) self.test_c_buffer_raw(buffer) def test_param_1(self): BUF = c_char * 4 buf = BUF() ## print c_char_p.from_param(buf) def test_param_2(self): BUF = c_char * 4 buf = BUF() ## print BUF.from_param(c_char_p("python")) ## print BUF.from_param(BUF(*"pyth")) try: c_wchar except NameError: pass else: class WStringArrayTestCase(unittest.TestCase): def test(self): BUF = c_wchar * 4 buf = BUF(u"a", u"b", u"c") self.assertEqual(buf.value, u"abc") buf.value = u"ABCD" self.assertEqual(buf.value, u"ABCD") buf.value = u"x" self.assertEqual(buf.value, u"x") buf[1] = u"Z" self.assertEqual(buf.value, u"xZCD") class StringTestCase(unittest.TestCase): def XX_test_basic_strings(self): cs = c_string("abcdef") # Cannot call len on a c_string any longer self.assertRaises(TypeError, len, cs) self.assertEqual(sizeof(cs), 7) # The value property is the string up to the first terminating NUL. self.assertEqual(cs.value, "abcdef") self.assertEqual(c_string("abc\000def").value, "abc") # The raw property is the total buffer contents: self.assertEqual(cs.raw, "abcdef\000") self.assertEqual(c_string("abc\000def").raw, "abc\000def\000") # We can change the value: cs.value = "ab" self.assertEqual(cs.value, "ab") self.assertEqual(cs.raw, "ab\000\000\000\000\000") cs.raw = "XY" self.assertEqual(cs.value, "XY") self.assertEqual(cs.raw, "XY\000\000\000\000\000") self.assertRaises(TypeError, c_string, u"123") def XX_test_sized_strings(self): # New in releases later than 0.4.0: self.assertRaises(TypeError, c_string, None) # New in releases later than 0.4.0: # c_string(number) returns an empty string of size number self.assertTrue(len(c_string(32).raw) == 32) self.assertRaises(ValueError, c_string, -1) self.assertRaises(ValueError, c_string, 0) # These tests fail, because it is no longer initialized ## self.assertTrue(c_string(2).value == "") ## self.assertTrue(c_string(2).raw == "\000\000") self.assertTrue(c_string(2).raw[-1] == "\000") self.assertTrue(len(c_string(2).raw) == 2) def XX_test_initialized_strings(self): self.assertTrue(c_string("ab", 4).raw[:2] == "ab") self.assertTrue(c_string("ab", 4).raw[:2:] == "ab") self.assertTrue(c_string("ab", 4).raw[:2:-1] == "ba") self.assertTrue(c_string("ab", 4).raw[:2:2] == "a") self.assertTrue(c_string("ab", 4).raw[-1] == "\000") self.assertTrue(c_string("ab", 2).raw == "a\000") def XX_test_toolong(self): cs = c_string("abcdef") # Much too long string: self.assertRaises(ValueError, setattr, cs, "value", "123456789012345") # One char too long values: self.assertRaises(ValueError, setattr, cs, "value", "1234567") ## def test_perf(self): ## check_perf() try: c_wchar except NameError: pass else: class WStringTestCase(unittest.TestCase): def test_wchar(self): c_wchar(u"x") repr(byref(c_wchar(u"x"))) c_wchar("x") def X_test_basic_wstrings(self): cs = c_wstring(u"abcdef") # XXX This behaviour is about to change: # len returns the size of the internal buffer in bytes. # This includes the terminating NUL character. self.assertTrue(sizeof(cs) == 14) # The value property is the string up to the first terminating NUL. self.assertTrue(cs.value == u"abcdef") self.assertTrue(c_wstring(u"abc\000def").value == u"abc") self.assertTrue(c_wstring(u"abc\000def").value == u"abc") # The raw property is the total buffer contents: self.assertTrue(cs.raw == u"abcdef\000") self.assertTrue(c_wstring(u"abc\000def").raw == u"abc\000def\000") # We can change the value: cs.value = u"ab" self.assertTrue(cs.value == u"ab") self.assertTrue(cs.raw == u"ab\000\000\000\000\000") self.assertRaises(TypeError, c_wstring, "123") self.assertRaises(ValueError, c_wstring, 0) def X_test_toolong(self): cs = c_wstring(u"abcdef") # Much too long string: self.assertRaises(ValueError, setattr, cs, "value", u"123456789012345") # One char too long values: self.assertRaises(ValueError, setattr, cs, "value", u"1234567") def run_test(rep, msg, func, arg): items = range(rep) from time import clock start = clock() for i in items: func(arg); func(arg); func(arg); func(arg); func(arg) stop = clock() print "%20s: %.2f us" % (msg, ((stop-start)*1e6/5/rep)) def check_perf(): # Construct 5 objects REP = 200000 run_test(REP, "c_string(None)", c_string, None) run_test(REP, "c_string('abc')", c_string, 'abc') # Python 2.3 -OO, win2k, P4 700 MHz: # # c_string(None): 1.75 us # c_string('abc'): 2.74 us # Python 2.2 -OO, win2k, P4 700 MHz: # # c_string(None): 2.95 us # c_string('abc'): 3.67 us if __name__ == '__main__': ## check_perf() unittest.main()
mit
fernandezcuesta/ansible
test/units/modules/network/eos/eos_module.py
38
3312
# (c) 2016 Red Hat Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import json import os from ansible.compat.tests import unittest from ansible.compat.tests.mock import patch from ansible.module_utils import basic from ansible.module_utils._text import to_bytes def set_module_args(args): args = json.dumps({'ANSIBLE_MODULE_ARGS': args}) basic._ANSIBLE_ARGS = to_bytes(args) fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures') fixture_data = {} def load_fixture(name): path = os.path.join(fixture_path, name) if path in fixture_data: return fixture_data[path] with open(path) as f: data = f.read() try: data = json.loads(data) except: pass fixture_data[path] = data return data class AnsibleExitJson(Exception): pass class AnsibleFailJson(Exception): pass class TestEosModule(unittest.TestCase): def execute_module(self, failed=False, changed=False, commands=None, sort=True, defaults=False, transport='cli'): self.load_fixtures(commands, transport=transport) if failed: result = self.failed() self.assertTrue(result['failed'], result) else: result = self.changed(changed) self.assertEqual(result['changed'], changed, result) if commands is not None: if sort: self.assertEqual(sorted(commands), sorted(result['commands']), result['commands']) else: self.assertEqual(commands, result['commands'], result['commands']) return result def failed(self): def fail_json(*args, **kwargs): kwargs['failed'] = True raise AnsibleFailJson(kwargs) with patch.object(basic.AnsibleModule, 'fail_json', fail_json): with self.assertRaises(AnsibleFailJson) as exc: self.module.main() result = exc.exception.args[0] self.assertTrue(result['failed'], result) return result def changed(self, changed=False): def exit_json(*args, **kwargs): if 'changed' not in kwargs: kwargs['changed'] = False raise AnsibleExitJson(kwargs) with patch.object(basic.AnsibleModule, 'exit_json', exit_json): with self.assertRaises(AnsibleExitJson) as exc: self.module.main() result = exc.exception.args[0] self.assertEqual(result['changed'], changed, result) return result def load_fixtures(self, commands=None, transport='cli'): pass
gpl-3.0
gloryofrobots/langs
codegen/operators_test_gen.py
2
4746
# from __future__ import division import random from itertools import chain, izip import math def pairwise(iterable): a = iter(iterable) return izip(a, a) def interleave(list_a, list_b): return list(chain.from_iterable(izip(list_a, list_b))) def integers(min, max, count): return [random.randint(min, max) for _ in range(count)] def floats(min, max, count): return [random.uniform(min, max) for _ in range(count)] def flatten(lst): return list(chain.from_iterable(lst)) NUM_TEST = 4 ARITH_TESTS = flatten([ integers(0, 10, NUM_TEST), integers(10, 100, NUM_TEST) , integers(100, 1000, NUM_TEST) , integers(1000, 10000, NUM_TEST) , integers(-10, 0, NUM_TEST) , integers(-100, -10, NUM_TEST) , integers(-1000, -100, NUM_TEST) , integers(-10000, -1000, NUM_TEST) , interleave(integers(-100, 0, NUM_TEST), integers(0, 100, NUM_TEST)), interleave(integers(-1000, -900, NUM_TEST), integers(900, 1000, NUM_TEST)), floats(0, 10, NUM_TEST), floats(10, 100, NUM_TEST) , floats(100, 1000, NUM_TEST) , floats(1000, 10000, NUM_TEST) , floats(-10, 0, NUM_TEST) , floats(-100, -10, NUM_TEST) , floats(-1000, -100, NUM_TEST) , floats(-10000, -1000, NUM_TEST) , interleave(floats(-100, 0, NUM_TEST), integers(0, 100, NUM_TEST)), interleave(floats(-1000, -900, NUM_TEST), integers(900, 1000, NUM_TEST)), interleave(floats(-10000, -9000, NUM_TEST), integers(9000, 10000, NUM_TEST)), interleave(floats(-100, 0, NUM_TEST), floats(0, 100, NUM_TEST)), interleave(floats(-1000, -900, NUM_TEST), floats(900, 1000, NUM_TEST)), ]) BIT_SHIFT_TESTS = flatten([ interleave(integers(-100, 0, NUM_TEST), integers(0, 10, NUM_TEST)), interleave(integers(-1000, -900, NUM_TEST), integers(10, 20, NUM_TEST)), interleave(integers(-10000, -9000, NUM_TEST), integers(20, 30, NUM_TEST)), interleave(integers(0, 10, NUM_TEST), integers(0, 100, NUM_TEST)), interleave(integers(10, 100, NUM_TEST), integers(0, 10, NUM_TEST)), interleave(integers(100, 1000, NUM_TEST), integers(0, 10, NUM_TEST)), interleave(integers(1000, 10000, NUM_TEST), integers(0, 100, NUM_TEST)), interleave(integers(-1000, -900, NUM_TEST), integers(40, 50, NUM_TEST)), interleave(integers(-10000, -9000, NUM_TEST), integers(0, 50, NUM_TEST)), ]) BIT_TESTS = flatten([ interleave(integers(-10000, -9000, NUM_TEST), integers(9000, 10000, NUM_TEST)), interleave(integers(-10, 10, NUM_TEST), integers(9000, 10000, NUM_TEST)), interleave(integers(-10000, -9000, NUM_TEST), integers(-10, 10, NUM_TEST)), interleave(integers(-10, 10, NUM_TEST), integers(0, 2, NUM_TEST)), ]) def format_result(test, evalstring=None): if not evalstring: evalstring = test try: value = eval(evalstring) if value is True: test_result = "True" elif value is False: test_result = "False" else: test_result = str(value) return " affirm:is_equal(%s, %s)" % (test, test_result) except Exception as e: print e return " affirm:is_throw(() -> %s, ())" % test def test_binary(py_op, arza_op, data, result): for x,y in pairwise(data): test_str = "%s %s %s" % (str(x), arza_op, str(y)) eval_str = "%s %s %s" % (str(x), py_op, str(y)) result.append(format_result(test_str, eval_str)) def test_call_binary(arza_op, fn, data, result): for x,y in pairwise(data): test_str = "%s %s %s" % (str(x), arza_op, str(y)) eval_str = "%s(%s , %s)" % (fn, str(x), str(y)) result.append(format_result(test_str, eval_str)) def test_unary(py_op, arza_op, data, result): for x in data: test_str = "%s (%s)" % (arza_op, str(x)) eval_str = "%s %s" % (py_op, str(x)) result.append(format_result(test_str, eval_str)) OP_TABLE = { "-":"negate", "<":"<", ">":">", ">=":">=", "<=":"<=", "==":"==", "!=":"!=", "+":"+", "**":"**", "-":"-", "%":"%", "*":"*", "/":"/", # "::":"::", # "++":"++", "<<": "`lshift`", ">>": "`rshift`", "&": "`bitand`", "^": "`bitxor`", "~": "bitnot", "%": "`mod`", } RESULT = [] for op in ['-', '+', '*', '/', '<', '>', '==', '>=', '<=', '**']: test_binary(op, OP_TABLE[op], ARITH_TESTS, RESULT) test_call_binary(OP_TABLE['%'], 'math.fmod', ARITH_TESTS, RESULT) for op in ['>>', '<<', '&', '^']: test_binary(op, OP_TABLE[op], BIT_SHIFT_TESTS, RESULT) test_unary('~', OP_TABLE["~"], BIT_SHIFT_TESTS, RESULT) F = open("test_operators.arza", "w") T =\ """ fun test() -> %s """ BODY = "\n".join(RESULT) F.write(T % BODY) F.close() # print BODY print len(RESULT) # for t in RESULT: # print t
gpl-2.0
sonaht/ansible
test/units/modules/network/iosxr/test_iosxr_command.py
86
4034
# (c) 2016 Red Hat Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import json from ansible.compat.tests.mock import patch from ansible.modules.network.iosxr import iosxr_command from .iosxr_module import TestIosxrModule, load_fixture, set_module_args class TestIosxrCommandModule(TestIosxrModule): module = iosxr_command def setUp(self): self.mock_run_commands = patch('ansible.modules.network.iosxr.iosxr_command.run_commands') self.run_commands = self.mock_run_commands.start() def tearDown(self): self.mock_run_commands.stop() def load_fixtures(self, commands=None): def load_from_file(*args, **kwargs): module, commands = args output = list() for item in commands: try: command = item['command'] except ValueError: command = item filename = str(command).replace(' ', '_') output.append(load_fixture(filename)) return output self.run_commands.side_effect = load_from_file def test_iosxr_command_simple(self): set_module_args(dict(commands=['show version'])) result = self.execute_module() self.assertEqual(len(result['stdout']), 1) self.assertTrue(result['stdout'][0].startswith('Cisco IOS XR Software')) def test_iosxr_command_multiple(self): set_module_args(dict(commands=['show version', 'show version'])) result = self.execute_module() self.assertEqual(len(result['stdout']), 2) self.assertTrue(result['stdout'][0].startswith('Cisco IOS XR Software')) def test_iosxr_command_wait_for(self): wait_for = 'result[0] contains "Cisco IOS"' set_module_args(dict(commands=['show version'], wait_for=wait_for)) self.execute_module() def test_iosxr_command_wait_for_fails(self): wait_for = 'result[0] contains "test string"' set_module_args(dict(commands=['show version'], wait_for=wait_for)) self.execute_module(failed=True) self.assertEqual(self.run_commands.call_count, 10) def test_iosxr_command_retries(self): wait_for = 'result[0] contains "test string"' set_module_args(dict(commands=['show version'], wait_for=wait_for, retries=2)) self.execute_module(failed=True) self.assertEqual(self.run_commands.call_count, 2) def test_iosxr_command_match_any(self): wait_for = ['result[0] contains "Cisco IOS"', 'result[0] contains "test string"'] set_module_args(dict(commands=['show version'], wait_for=wait_for, match='any')) self.execute_module() def test_iosxr_command_match_all(self): wait_for = ['result[0] contains "Cisco IOS"', 'result[0] contains "XR Software"'] set_module_args(dict(commands=['show version'], wait_for=wait_for, match='all')) self.execute_module() def test_iosxr_command_match_all_failure(self): wait_for = ['result[0] contains "Cisco IOS"', 'result[0] contains "test string"'] commands = ['show version', 'show version'] set_module_args(dict(commands=commands, wait_for=wait_for, match='all')) self.execute_module(failed=True)
gpl-3.0
cloudfoundry/php-buildpack-legacy
builds/runtimes/python-2.7.6/lib/python2.7/xmllib.py
225
34865
"""A parser for XML, using the derived class as static DTD.""" # Author: Sjoerd Mullender. import re import string import warnings warnings.warn("The xmllib module is obsolete. Use xml.sax instead.", DeprecationWarning, 2) del warnings version = '0.3' class Error(RuntimeError): pass # Regular expressions used for parsing _S = '[ \t\r\n]+' # white space _opS = '[ \t\r\n]*' # optional white space _Name = '[a-zA-Z_:][-a-zA-Z0-9._:]*' # valid XML name _QStr = "(?:'[^']*'|\"[^\"]*\")" # quoted XML string illegal = re.compile('[^\t\r\n -\176\240-\377]') # illegal chars in content interesting = re.compile('[]&<]') amp = re.compile('&') ref = re.compile('&(' + _Name + '|#[0-9]+|#x[0-9a-fA-F]+)[^-a-zA-Z0-9._:]') entityref = re.compile('&(?P<name>' + _Name + ')[^-a-zA-Z0-9._:]') charref = re.compile('&#(?P<char>[0-9]+[^0-9]|x[0-9a-fA-F]+[^0-9a-fA-F])') space = re.compile(_S + '$') newline = re.compile('\n') attrfind = re.compile( _S + '(?P<name>' + _Name + ')' '(' + _opS + '=' + _opS + '(?P<value>'+_QStr+'|[-a-zA-Z0-9.:+*%?!\(\)_#=~]+))?') starttagopen = re.compile('<' + _Name) starttagend = re.compile(_opS + '(?P<slash>/?)>') starttagmatch = re.compile('<(?P<tagname>'+_Name+')' '(?P<attrs>(?:'+attrfind.pattern+')*)'+ starttagend.pattern) endtagopen = re.compile('</') endbracket = re.compile(_opS + '>') endbracketfind = re.compile('(?:[^>\'"]|'+_QStr+')*>') tagfind = re.compile(_Name) cdataopen = re.compile(r'<!\[CDATA\[') cdataclose = re.compile(r'\]\]>') # this matches one of the following: # SYSTEM SystemLiteral # PUBLIC PubidLiteral SystemLiteral _SystemLiteral = '(?P<%s>'+_QStr+')' _PublicLiteral = '(?P<%s>"[-\'\(\)+,./:=?;!*#@$_%% \n\ra-zA-Z0-9]*"|' \ "'[-\(\)+,./:=?;!*#@$_%% \n\ra-zA-Z0-9]*')" _ExternalId = '(?:SYSTEM|' \ 'PUBLIC'+_S+_PublicLiteral%'pubid'+ \ ')'+_S+_SystemLiteral%'syslit' doctype = re.compile('<!DOCTYPE'+_S+'(?P<name>'+_Name+')' '(?:'+_S+_ExternalId+')?'+_opS) xmldecl = re.compile('<\?xml'+_S+ 'version'+_opS+'='+_opS+'(?P<version>'+_QStr+')'+ '(?:'+_S+'encoding'+_opS+'='+_opS+ "(?P<encoding>'[A-Za-z][-A-Za-z0-9._]*'|" '"[A-Za-z][-A-Za-z0-9._]*"))?' '(?:'+_S+'standalone'+_opS+'='+_opS+ '(?P<standalone>\'(?:yes|no)\'|"(?:yes|no)"))?'+ _opS+'\?>') procopen = re.compile(r'<\?(?P<proc>' + _Name + ')' + _opS) procclose = re.compile(_opS + r'\?>') commentopen = re.compile('<!--') commentclose = re.compile('-->') doubledash = re.compile('--') attrtrans = string.maketrans(' \r\n\t', ' ') # definitions for XML namespaces _NCName = '[a-zA-Z_][-a-zA-Z0-9._]*' # XML Name, minus the ":" ncname = re.compile(_NCName + '$') qname = re.compile('(?:(?P<prefix>' + _NCName + '):)?' # optional prefix '(?P<local>' + _NCName + ')$') xmlns = re.compile('xmlns(?::(?P<ncname>'+_NCName+'))?$') # XML parser base class -- find tags and call handler functions. # Usage: p = XMLParser(); p.feed(data); ...; p.close(). # The dtd is defined by deriving a class which defines methods with # special names to handle tags: start_foo and end_foo to handle <foo> # and </foo>, respectively. The data between tags is passed to the # parser by calling self.handle_data() with some data as argument (the # data may be split up in arbitrary chunks). class XMLParser: attributes = {} # default, to be overridden elements = {} # default, to be overridden # parsing options, settable using keyword args in __init__ __accept_unquoted_attributes = 0 __accept_missing_endtag_name = 0 __map_case = 0 __accept_utf8 = 0 __translate_attribute_references = 1 # Interface -- initialize and reset this instance def __init__(self, **kw): self.__fixed = 0 if 'accept_unquoted_attributes' in kw: self.__accept_unquoted_attributes = kw['accept_unquoted_attributes'] if 'accept_missing_endtag_name' in kw: self.__accept_missing_endtag_name = kw['accept_missing_endtag_name'] if 'map_case' in kw: self.__map_case = kw['map_case'] if 'accept_utf8' in kw: self.__accept_utf8 = kw['accept_utf8'] if 'translate_attribute_references' in kw: self.__translate_attribute_references = kw['translate_attribute_references'] self.reset() def __fixelements(self): self.__fixed = 1 self.elements = {} self.__fixdict(self.__dict__) self.__fixclass(self.__class__) def __fixclass(self, kl): self.__fixdict(kl.__dict__) for k in kl.__bases__: self.__fixclass(k) def __fixdict(self, dict): for key in dict.keys(): if key[:6] == 'start_': tag = key[6:] start, end = self.elements.get(tag, (None, None)) if start is None: self.elements[tag] = getattr(self, key), end elif key[:4] == 'end_': tag = key[4:] start, end = self.elements.get(tag, (None, None)) if end is None: self.elements[tag] = start, getattr(self, key) # Interface -- reset this instance. Loses all unprocessed data def reset(self): self.rawdata = '' self.stack = [] self.nomoretags = 0 self.literal = 0 self.lineno = 1 self.__at_start = 1 self.__seen_doctype = None self.__seen_starttag = 0 self.__use_namespaces = 0 self.__namespaces = {'xml':None} # xml is implicitly declared # backward compatibility hack: if elements not overridden, # fill it in ourselves if self.elements is XMLParser.elements: self.__fixelements() # For derived classes only -- enter literal mode (CDATA) till EOF def setnomoretags(self): self.nomoretags = self.literal = 1 # For derived classes only -- enter literal mode (CDATA) def setliteral(self, *args): self.literal = 1 # Interface -- feed some data to the parser. Call this as # often as you want, with as little or as much text as you # want (may include '\n'). (This just saves the text, all the # processing is done by goahead().) def feed(self, data): self.rawdata = self.rawdata + data self.goahead(0) # Interface -- handle the remaining data def close(self): self.goahead(1) if self.__fixed: self.__fixed = 0 # remove self.elements so that we don't leak del self.elements # Interface -- translate references def translate_references(self, data, all = 1): if not self.__translate_attribute_references: return data i = 0 while 1: res = amp.search(data, i) if res is None: return data s = res.start(0) res = ref.match(data, s) if res is None: self.syntax_error("bogus `&'") i = s+1 continue i = res.end(0) str = res.group(1) rescan = 0 if str[0] == '#': if str[1] == 'x': str = chr(int(str[2:], 16)) else: str = chr(int(str[1:])) if data[i - 1] != ';': self.syntax_error("`;' missing after char reference") i = i-1 elif all: if str in self.entitydefs: str = self.entitydefs[str] rescan = 1 elif data[i - 1] != ';': self.syntax_error("bogus `&'") i = s + 1 # just past the & continue else: self.syntax_error("reference to unknown entity `&%s;'" % str) str = '&' + str + ';' elif data[i - 1] != ';': self.syntax_error("bogus `&'") i = s + 1 # just past the & continue # when we get here, str contains the translated text and i points # to the end of the string that is to be replaced data = data[:s] + str + data[i:] if rescan: i = s else: i = s + len(str) # Interface - return a dictionary of all namespaces currently valid def getnamespace(self): nsdict = {} for t, d, nst in self.stack: nsdict.update(d) return nsdict # Internal -- handle data as far as reasonable. May leave state # and data to be processed by a subsequent call. If 'end' is # true, force handling all data as if followed by EOF marker. def goahead(self, end): rawdata = self.rawdata i = 0 n = len(rawdata) while i < n: if i > 0: self.__at_start = 0 if self.nomoretags: data = rawdata[i:n] self.handle_data(data) self.lineno = self.lineno + data.count('\n') i = n break res = interesting.search(rawdata, i) if res: j = res.start(0) else: j = n if i < j: data = rawdata[i:j] if self.__at_start and space.match(data) is None: self.syntax_error('illegal data at start of file') self.__at_start = 0 if not self.stack and space.match(data) is None: self.syntax_error('data not in content') if not self.__accept_utf8 and illegal.search(data): self.syntax_error('illegal character in content') self.handle_data(data) self.lineno = self.lineno + data.count('\n') i = j if i == n: break if rawdata[i] == '<': if starttagopen.match(rawdata, i): if self.literal: data = rawdata[i] self.handle_data(data) self.lineno = self.lineno + data.count('\n') i = i+1 continue k = self.parse_starttag(i) if k < 0: break self.__seen_starttag = 1 self.lineno = self.lineno + rawdata[i:k].count('\n') i = k continue if endtagopen.match(rawdata, i): k = self.parse_endtag(i) if k < 0: break self.lineno = self.lineno + rawdata[i:k].count('\n') i = k continue if commentopen.match(rawdata, i): if self.literal: data = rawdata[i] self.handle_data(data) self.lineno = self.lineno + data.count('\n') i = i+1 continue k = self.parse_comment(i) if k < 0: break self.lineno = self.lineno + rawdata[i:k].count('\n') i = k continue if cdataopen.match(rawdata, i): k = self.parse_cdata(i) if k < 0: break self.lineno = self.lineno + rawdata[i:k].count('\n') i = k continue res = xmldecl.match(rawdata, i) if res: if not self.__at_start: self.syntax_error("<?xml?> declaration not at start of document") version, encoding, standalone = res.group('version', 'encoding', 'standalone') if version[1:-1] != '1.0': raise Error('only XML version 1.0 supported') if encoding: encoding = encoding[1:-1] if standalone: standalone = standalone[1:-1] self.handle_xml(encoding, standalone) i = res.end(0) continue res = procopen.match(rawdata, i) if res: k = self.parse_proc(i) if k < 0: break self.lineno = self.lineno + rawdata[i:k].count('\n') i = k continue res = doctype.match(rawdata, i) if res: if self.literal: data = rawdata[i] self.handle_data(data) self.lineno = self.lineno + data.count('\n') i = i+1 continue if self.__seen_doctype: self.syntax_error('multiple DOCTYPE elements') if self.__seen_starttag: self.syntax_error('DOCTYPE not at beginning of document') k = self.parse_doctype(res) if k < 0: break self.__seen_doctype = res.group('name') if self.__map_case: self.__seen_doctype = self.__seen_doctype.lower() self.lineno = self.lineno + rawdata[i:k].count('\n') i = k continue elif rawdata[i] == '&': if self.literal: data = rawdata[i] self.handle_data(data) i = i+1 continue res = charref.match(rawdata, i) if res is not None: i = res.end(0) if rawdata[i-1] != ';': self.syntax_error("`;' missing in charref") i = i-1 if not self.stack: self.syntax_error('data not in content') self.handle_charref(res.group('char')[:-1]) self.lineno = self.lineno + res.group(0).count('\n') continue res = entityref.match(rawdata, i) if res is not None: i = res.end(0) if rawdata[i-1] != ';': self.syntax_error("`;' missing in entityref") i = i-1 name = res.group('name') if self.__map_case: name = name.lower() if name in self.entitydefs: self.rawdata = rawdata = rawdata[:res.start(0)] + self.entitydefs[name] + rawdata[i:] n = len(rawdata) i = res.start(0) else: self.unknown_entityref(name) self.lineno = self.lineno + res.group(0).count('\n') continue elif rawdata[i] == ']': if self.literal: data = rawdata[i] self.handle_data(data) i = i+1 continue if n-i < 3: break if cdataclose.match(rawdata, i): self.syntax_error("bogus `]]>'") self.handle_data(rawdata[i]) i = i+1 continue else: raise Error('neither < nor & ??') # We get here only if incomplete matches but # nothing else break # end while if i > 0: self.__at_start = 0 if end and i < n: data = rawdata[i] self.syntax_error("bogus `%s'" % data) if not self.__accept_utf8 and illegal.search(data): self.syntax_error('illegal character in content') self.handle_data(data) self.lineno = self.lineno + data.count('\n') self.rawdata = rawdata[i+1:] return self.goahead(end) self.rawdata = rawdata[i:] if end: if not self.__seen_starttag: self.syntax_error('no elements in file') if self.stack: self.syntax_error('missing end tags') while self.stack: self.finish_endtag(self.stack[-1][0]) # Internal -- parse comment, return length or -1 if not terminated def parse_comment(self, i): rawdata = self.rawdata if rawdata[i:i+4] != '<!--': raise Error('unexpected call to handle_comment') res = commentclose.search(rawdata, i+4) if res is None: return -1 if doubledash.search(rawdata, i+4, res.start(0)): self.syntax_error("`--' inside comment") if rawdata[res.start(0)-1] == '-': self.syntax_error('comment cannot end in three dashes') if not self.__accept_utf8 and \ illegal.search(rawdata, i+4, res.start(0)): self.syntax_error('illegal character in comment') self.handle_comment(rawdata[i+4: res.start(0)]) return res.end(0) # Internal -- handle DOCTYPE tag, return length or -1 if not terminated def parse_doctype(self, res): rawdata = self.rawdata n = len(rawdata) name = res.group('name') if self.__map_case: name = name.lower() pubid, syslit = res.group('pubid', 'syslit') if pubid is not None: pubid = pubid[1:-1] # remove quotes pubid = ' '.join(pubid.split()) # normalize if syslit is not None: syslit = syslit[1:-1] # remove quotes j = k = res.end(0) if k >= n: return -1 if rawdata[k] == '[': level = 0 k = k+1 dq = sq = 0 while k < n: c = rawdata[k] if not sq and c == '"': dq = not dq elif not dq and c == "'": sq = not sq elif sq or dq: pass elif level <= 0 and c == ']': res = endbracket.match(rawdata, k+1) if res is None: return -1 self.handle_doctype(name, pubid, syslit, rawdata[j+1:k]) return res.end(0) elif c == '<': level = level + 1 elif c == '>': level = level - 1 if level < 0: self.syntax_error("bogus `>' in DOCTYPE") k = k+1 res = endbracketfind.match(rawdata, k) if res is None: return -1 if endbracket.match(rawdata, k) is None: self.syntax_error('garbage in DOCTYPE') self.handle_doctype(name, pubid, syslit, None) return res.end(0) # Internal -- handle CDATA tag, return length or -1 if not terminated def parse_cdata(self, i): rawdata = self.rawdata if rawdata[i:i+9] != '<![CDATA[': raise Error('unexpected call to parse_cdata') res = cdataclose.search(rawdata, i+9) if res is None: return -1 if not self.__accept_utf8 and \ illegal.search(rawdata, i+9, res.start(0)): self.syntax_error('illegal character in CDATA') if not self.stack: self.syntax_error('CDATA not in content') self.handle_cdata(rawdata[i+9:res.start(0)]) return res.end(0) __xml_namespace_attributes = {'ns':None, 'src':None, 'prefix':None} # Internal -- handle a processing instruction tag def parse_proc(self, i): rawdata = self.rawdata end = procclose.search(rawdata, i) if end is None: return -1 j = end.start(0) if not self.__accept_utf8 and illegal.search(rawdata, i+2, j): self.syntax_error('illegal character in processing instruction') res = tagfind.match(rawdata, i+2) if res is None: raise Error('unexpected call to parse_proc') k = res.end(0) name = res.group(0) if self.__map_case: name = name.lower() if name == 'xml:namespace': self.syntax_error('old-fashioned namespace declaration') self.__use_namespaces = -1 # namespace declaration # this must come after the <?xml?> declaration (if any) # and before the <!DOCTYPE> (if any). if self.__seen_doctype or self.__seen_starttag: self.syntax_error('xml:namespace declaration too late in document') attrdict, namespace, k = self.parse_attributes(name, k, j) if namespace: self.syntax_error('namespace declaration inside namespace declaration') for attrname in attrdict.keys(): if not attrname in self.__xml_namespace_attributes: self.syntax_error("unknown attribute `%s' in xml:namespace tag" % attrname) if not 'ns' in attrdict or not 'prefix' in attrdict: self.syntax_error('xml:namespace without required attributes') prefix = attrdict.get('prefix') if ncname.match(prefix) is None: self.syntax_error('xml:namespace illegal prefix value') return end.end(0) if prefix in self.__namespaces: self.syntax_error('xml:namespace prefix not unique') self.__namespaces[prefix] = attrdict['ns'] else: if name.lower() == 'xml': self.syntax_error('illegal processing instruction target name') self.handle_proc(name, rawdata[k:j]) return end.end(0) # Internal -- parse attributes between i and j def parse_attributes(self, tag, i, j): rawdata = self.rawdata attrdict = {} namespace = {} while i < j: res = attrfind.match(rawdata, i) if res is None: break attrname, attrvalue = res.group('name', 'value') if self.__map_case: attrname = attrname.lower() i = res.end(0) if attrvalue is None: self.syntax_error("no value specified for attribute `%s'" % attrname) attrvalue = attrname elif attrvalue[:1] == "'" == attrvalue[-1:] or \ attrvalue[:1] == '"' == attrvalue[-1:]: attrvalue = attrvalue[1:-1] elif not self.__accept_unquoted_attributes: self.syntax_error("attribute `%s' value not quoted" % attrname) res = xmlns.match(attrname) if res is not None: # namespace declaration ncname = res.group('ncname') namespace[ncname or ''] = attrvalue or None if not self.__use_namespaces: self.__use_namespaces = len(self.stack)+1 continue if '<' in attrvalue: self.syntax_error("`<' illegal in attribute value") if attrname in attrdict: self.syntax_error("attribute `%s' specified twice" % attrname) attrvalue = attrvalue.translate(attrtrans) attrdict[attrname] = self.translate_references(attrvalue) return attrdict, namespace, i # Internal -- handle starttag, return length or -1 if not terminated def parse_starttag(self, i): rawdata = self.rawdata # i points to start of tag end = endbracketfind.match(rawdata, i+1) if end is None: return -1 tag = starttagmatch.match(rawdata, i) if tag is None or tag.end(0) != end.end(0): self.syntax_error('garbage in starttag') return end.end(0) nstag = tagname = tag.group('tagname') if self.__map_case: nstag = tagname = nstag.lower() if not self.__seen_starttag and self.__seen_doctype and \ tagname != self.__seen_doctype: self.syntax_error('starttag does not match DOCTYPE') if self.__seen_starttag and not self.stack: self.syntax_error('multiple elements on top level') k, j = tag.span('attrs') attrdict, nsdict, k = self.parse_attributes(tagname, k, j) self.stack.append((tagname, nsdict, nstag)) if self.__use_namespaces: res = qname.match(tagname) else: res = None if res is not None: prefix, nstag = res.group('prefix', 'local') if prefix is None: prefix = '' ns = None for t, d, nst in self.stack: if prefix in d: ns = d[prefix] if ns is None and prefix != '': ns = self.__namespaces.get(prefix) if ns is not None: nstag = ns + ' ' + nstag elif prefix != '': nstag = prefix + ':' + nstag # undo split self.stack[-1] = tagname, nsdict, nstag # translate namespace of attributes attrnamemap = {} # map from new name to old name (used for error reporting) for key in attrdict.keys(): attrnamemap[key] = key if self.__use_namespaces: nattrdict = {} for key, val in attrdict.items(): okey = key res = qname.match(key) if res is not None: aprefix, key = res.group('prefix', 'local') if self.__map_case: key = key.lower() if aprefix is not None: ans = None for t, d, nst in self.stack: if aprefix in d: ans = d[aprefix] if ans is None: ans = self.__namespaces.get(aprefix) if ans is not None: key = ans + ' ' + key else: key = aprefix + ':' + key nattrdict[key] = val attrnamemap[key] = okey attrdict = nattrdict attributes = self.attributes.get(nstag) if attributes is not None: for key in attrdict.keys(): if not key in attributes: self.syntax_error("unknown attribute `%s' in tag `%s'" % (attrnamemap[key], tagname)) for key, val in attributes.items(): if val is not None and not key in attrdict: attrdict[key] = val method = self.elements.get(nstag, (None, None))[0] self.finish_starttag(nstag, attrdict, method) if tag.group('slash') == '/': self.finish_endtag(tagname) return tag.end(0) # Internal -- parse endtag def parse_endtag(self, i): rawdata = self.rawdata end = endbracketfind.match(rawdata, i+1) if end is None: return -1 res = tagfind.match(rawdata, i+2) if res is None: if self.literal: self.handle_data(rawdata[i]) return i+1 if not self.__accept_missing_endtag_name: self.syntax_error('no name specified in end tag') tag = self.stack[-1][0] k = i+2 else: tag = res.group(0) if self.__map_case: tag = tag.lower() if self.literal: if not self.stack or tag != self.stack[-1][0]: self.handle_data(rawdata[i]) return i+1 k = res.end(0) if endbracket.match(rawdata, k) is None: self.syntax_error('garbage in end tag') self.finish_endtag(tag) return end.end(0) # Internal -- finish processing of start tag def finish_starttag(self, tagname, attrdict, method): if method is not None: self.handle_starttag(tagname, method, attrdict) else: self.unknown_starttag(tagname, attrdict) # Internal -- finish processing of end tag def finish_endtag(self, tag): self.literal = 0 if not tag: self.syntax_error('name-less end tag') found = len(self.stack) - 1 if found < 0: self.unknown_endtag(tag) return else: found = -1 for i in range(len(self.stack)): if tag == self.stack[i][0]: found = i if found == -1: self.syntax_error('unopened end tag') return while len(self.stack) > found: if found < len(self.stack) - 1: self.syntax_error('missing close tag for %s' % self.stack[-1][2]) nstag = self.stack[-1][2] method = self.elements.get(nstag, (None, None))[1] if method is not None: self.handle_endtag(nstag, method) else: self.unknown_endtag(nstag) if self.__use_namespaces == len(self.stack): self.__use_namespaces = 0 del self.stack[-1] # Overridable -- handle xml processing instruction def handle_xml(self, encoding, standalone): pass # Overridable -- handle DOCTYPE def handle_doctype(self, tag, pubid, syslit, data): pass # Overridable -- handle start tag def handle_starttag(self, tag, method, attrs): method(attrs) # Overridable -- handle end tag def handle_endtag(self, tag, method): method() # Example -- handle character reference, no need to override def handle_charref(self, name): try: if name[0] == 'x': n = int(name[1:], 16) else: n = int(name) except ValueError: self.unknown_charref(name) return if not 0 <= n <= 255: self.unknown_charref(name) return self.handle_data(chr(n)) # Definition of entities -- derived classes may override entitydefs = {'lt': '&#60;', # must use charref 'gt': '&#62;', 'amp': '&#38;', # must use charref 'quot': '&#34;', 'apos': '&#39;', } # Example -- handle data, should be overridden def handle_data(self, data): pass # Example -- handle cdata, could be overridden def handle_cdata(self, data): pass # Example -- handle comment, could be overridden def handle_comment(self, data): pass # Example -- handle processing instructions, could be overridden def handle_proc(self, name, data): pass # Example -- handle relatively harmless syntax errors, could be overridden def syntax_error(self, message): raise Error('Syntax error at line %d: %s' % (self.lineno, message)) # To be overridden -- handlers for unknown objects def unknown_starttag(self, tag, attrs): pass def unknown_endtag(self, tag): pass def unknown_charref(self, ref): pass def unknown_entityref(self, name): self.syntax_error("reference to unknown entity `&%s;'" % name) class TestXMLParser(XMLParser): def __init__(self, **kw): self.testdata = "" XMLParser.__init__(self, **kw) def handle_xml(self, encoding, standalone): self.flush() print 'xml: encoding =',encoding,'standalone =',standalone def handle_doctype(self, tag, pubid, syslit, data): self.flush() print 'DOCTYPE:',tag, repr(data) def handle_data(self, data): self.testdata = self.testdata + data if len(repr(self.testdata)) >= 70: self.flush() def flush(self): data = self.testdata if data: self.testdata = "" print 'data:', repr(data) def handle_cdata(self, data): self.flush() print 'cdata:', repr(data) def handle_proc(self, name, data): self.flush() print 'processing:',name,repr(data) def handle_comment(self, data): self.flush() r = repr(data) if len(r) > 68: r = r[:32] + '...' + r[-32:] print 'comment:', r def syntax_error(self, message): print 'error at line %d:' % self.lineno, message def unknown_starttag(self, tag, attrs): self.flush() if not attrs: print 'start tag: <' + tag + '>' else: print 'start tag: <' + tag, for name, value in attrs.items(): print name + '=' + '"' + value + '"', print '>' def unknown_endtag(self, tag): self.flush() print 'end tag: </' + tag + '>' def unknown_entityref(self, ref): self.flush() print '*** unknown entity ref: &' + ref + ';' def unknown_charref(self, ref): self.flush() print '*** unknown char ref: &#' + ref + ';' def close(self): XMLParser.close(self) self.flush() def test(args = None): import sys, getopt from time import time if not args: args = sys.argv[1:] opts, args = getopt.getopt(args, 'st') klass = TestXMLParser do_time = 0 for o, a in opts: if o == '-s': klass = XMLParser elif o == '-t': do_time = 1 if args: file = args[0] else: file = 'test.xml' if file == '-': f = sys.stdin else: try: f = open(file, 'r') except IOError, msg: print file, ":", msg sys.exit(1) data = f.read() if f is not sys.stdin: f.close() x = klass() t0 = time() try: if do_time: x.feed(data) x.close() else: for c in data: x.feed(c) x.close() except Error, msg: t1 = time() print msg if do_time: print 'total time: %g' % (t1-t0) sys.exit(1) t1 = time() if do_time: print 'total time: %g' % (t1-t0) if __name__ == '__main__': test()
mit
robotframework/robotframework
src/robot/utils/markuputils.py
2
1664
# Copyright 2008-2015 Nokia Networks # Copyright 2016- Robot Framework Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from .htmlformatters import LinkFormatter, HtmlFormatter _format_url = LinkFormatter().format_url _format_html = HtmlFormatter().format _generic_escapes = (('&', '&amp;'), ('<', '&lt;'), ('>', '&gt;')) _attribute_escapes = _generic_escapes \ + (('"', '&quot;'), ('\n', '&#10;'), ('\r', '&#13;'), ('\t', '&#09;')) _illegal_chars_in_xml = re.compile('[\x00-\x08\x0B\x0C\x0E-\x1F\uFFFE\uFFFF]') def html_escape(text, linkify=True): text = _escape(text) if linkify and '://' in text: text = _format_url(text) return text def xml_escape(text): return _illegal_chars_in_xml.sub('', _escape(text)) def html_format(text): return _format_html(_escape(text)) def attribute_escape(attr): attr = _escape(attr, _attribute_escapes) return _illegal_chars_in_xml.sub('', attr) def _escape(text, escapes=_generic_escapes): for name, value in escapes: if name in text: # performance optimization text = text.replace(name, value) return text
apache-2.0
ondra-novak/chromium.src
tools/chrome_proxy/integration_tests/chrome_proxy_metrics_unittest.py
32
8204
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import base64 import unittest from integration_tests import chrome_proxy_metrics as metrics from integration_tests import network_metrics_unittest as network_unittest from metrics import test_page_test_results # Timeline events used in tests. # An HTML not via proxy. EVENT_HTML_PROXY = network_unittest.NetworkMetricTest.MakeNetworkTimelineEvent( url='http://test.html1', response_headers={ 'Content-Type': 'text/html', 'Content-Length': str(len(network_unittest.HTML_BODY)), }, body=network_unittest.HTML_BODY) # An HTML via proxy with the deprecated Via header. EVENT_HTML_PROXY_DEPRECATED_VIA = ( network_unittest.NetworkMetricTest.MakeNetworkTimelineEvent( url='http://test.html2', response_headers={ 'Content-Type': 'text/html', 'Content-Encoding': 'gzip', 'X-Original-Content-Length': str(len(network_unittest.HTML_BODY)), 'Via': (metrics.CHROME_PROXY_VIA_HEADER_DEPRECATED + ',other-via'), }, body=network_unittest.HTML_BODY)) # An image via proxy with Via header and it is cached. EVENT_IMAGE_PROXY_CACHED = ( network_unittest.NetworkMetricTest.MakeNetworkTimelineEvent( url='http://test.image', response_headers={ 'Content-Type': 'image/jpeg', 'Content-Encoding': 'gzip', 'X-Original-Content-Length': str(network_unittest.IMAGE_OCL), 'Via': '1.1 ' + metrics.CHROME_PROXY_VIA_HEADER, }, body=base64.b64encode(network_unittest.IMAGE_BODY), base64_encoded_body=True, served_from_cache=True)) # An image fetched directly. EVENT_IMAGE_DIRECT = ( network_unittest.NetworkMetricTest.MakeNetworkTimelineEvent( url='http://test.image', response_headers={ 'Content-Type': 'image/jpeg', 'Content-Encoding': 'gzip', }, body=base64.b64encode(network_unittest.IMAGE_BODY), base64_encoded_body=True)) # A safe-browsing malware response. EVENT_MALWARE_PROXY = ( network_unittest.NetworkMetricTest.MakeNetworkTimelineEvent( url='http://test.malware', response_headers={ 'X-Malware-Url': '1', 'Via': '1.1 ' + metrics.CHROME_PROXY_VIA_HEADER, 'Location': 'http://test.malware', }, status=307)) class ChromeProxyMetricTest(unittest.TestCase): _test_proxy_info = {} def _StubGetProxyInfo(self, info): def stub(unused_tab, unused_url=''): # pylint: disable=W0613 return ChromeProxyMetricTest._test_proxy_info metrics.GetProxyInfoFromNetworkInternals = stub ChromeProxyMetricTest._test_proxy_info = info def testChromeProxyResponse(self): # An https non-proxy response. resp = metrics.ChromeProxyResponse( network_unittest.NetworkMetricTest.MakeNetworkTimelineEvent( url='https://test.url', response_headers={ 'Content-Type': 'text/html', 'Content-Length': str(len(network_unittest.HTML_BODY)), 'Via': 'some other via', }, body=network_unittest.HTML_BODY)) self.assertFalse(resp.ShouldHaveChromeProxyViaHeader()) self.assertFalse(resp.HasChromeProxyViaHeader()) self.assertTrue(resp.IsValidByViaHeader()) # A proxied JPEG image response resp = metrics.ChromeProxyResponse( network_unittest.NetworkMetricTest.MakeNetworkTimelineEvent( url='http://test.image', response_headers={ 'Content-Type': 'image/jpeg', 'Content-Encoding': 'gzip', 'Via': '1.1 ' + metrics.CHROME_PROXY_VIA_HEADER, 'X-Original-Content-Length': str(network_unittest.IMAGE_OCL), }, body=base64.b64encode(network_unittest.IMAGE_BODY), base64_encoded_body=True)) self.assertTrue(resp.ShouldHaveChromeProxyViaHeader()) self.assertTrue(resp.HasChromeProxyViaHeader()) self.assertTrue(resp.IsValidByViaHeader()) def testChromeProxyMetricForDataSaving(self): metric = metrics.ChromeProxyMetric() events = [ EVENT_HTML_PROXY, EVENT_HTML_PROXY_DEPRECATED_VIA, EVENT_IMAGE_PROXY_CACHED, EVENT_IMAGE_DIRECT] metric.SetEvents(events) self.assertTrue(len(events), len(list(metric.IterResponses(None)))) results = test_page_test_results.TestPageTestResults(self) metric.AddResultsForDataSaving(None, results) results.AssertHasPageSpecificScalarValue('resources_via_proxy', 'count', 2) results.AssertHasPageSpecificScalarValue('resources_from_cache', 'count', 1) results.AssertHasPageSpecificScalarValue('resources_direct', 'count', 2) def testChromeProxyMetricForHeaderValidation(self): metric = metrics.ChromeProxyMetric() metric.SetEvents([ EVENT_HTML_PROXY, EVENT_HTML_PROXY_DEPRECATED_VIA, EVENT_IMAGE_PROXY_CACHED, EVENT_IMAGE_DIRECT]) results = test_page_test_results.TestPageTestResults(self) missing_via_exception = False try: metric.AddResultsForHeaderValidation(None, results) except metrics.ChromeProxyMetricException: missing_via_exception = True # Only the HTTP image response does not have a valid Via header. self.assertTrue(missing_via_exception) # Two events with valid Via headers. metric.SetEvents([ EVENT_HTML_PROXY_DEPRECATED_VIA, EVENT_IMAGE_PROXY_CACHED]) metric.AddResultsForHeaderValidation(None, results) results.AssertHasPageSpecificScalarValue('checked_via_header', 'count', 2) def testChromeProxyMetricForBypass(self): metric = metrics.ChromeProxyMetric() metric.SetEvents([ EVENT_HTML_PROXY, EVENT_HTML_PROXY_DEPRECATED_VIA, EVENT_IMAGE_PROXY_CACHED, EVENT_IMAGE_DIRECT]) results = test_page_test_results.TestPageTestResults(self) bypass_exception = False try: metric.AddResultsForBypass(None, results) except metrics.ChromeProxyMetricException: bypass_exception = True # Two of the first three events have Via headers. self.assertTrue(bypass_exception) # Use directly fetched image only. It is treated as bypassed. metric.SetEvents([EVENT_IMAGE_DIRECT]) metric.AddResultsForBypass(None, results) results.AssertHasPageSpecificScalarValue('bypass', 'count', 1) def testChromeProxyMetricForHTTPFallback(self): metric = metrics.ChromeProxyMetric() metric.SetEvents([ EVENT_HTML_PROXY, EVENT_HTML_PROXY_DEPRECATED_VIA]) results = test_page_test_results.TestPageTestResults(self) fallback_exception = False info = {} info['enabled'] = False self._StubGetProxyInfo(info) try: metric.AddResultsForBypass(None, results) except metrics.ChromeProxyMetricException: fallback_exception = True self.assertTrue(fallback_exception) fallback_exception = False info['enabled'] = True info['proxies'] = [ 'something.else.com:80', metrics.PROXY_SETTING_DIRECT ] self._StubGetProxyInfo(info) try: metric.AddResultsForBypass(None, results) except metrics.ChromeProxyMetricException: fallback_exception = True self.assertTrue(fallback_exception) info['enabled'] = True info['proxies'] = [ metrics.PROXY_SETTING_HTTP, metrics.PROXY_SETTING_DIRECT ] self._StubGetProxyInfo(info) metric.AddResultsForHTTPFallback(None, results) def testChromeProxyMetricForSafebrowsing(self): metric = metrics.ChromeProxyMetric() metric.SetEvents([EVENT_MALWARE_PROXY]) results = test_page_test_results.TestPageTestResults(self) metric.AddResultsForSafebrowsing(None, results) results.AssertHasPageSpecificScalarValue('safebrowsing', 'boolean', True) # Clear results and metrics to test no response for safebrowsing results = test_page_test_results.TestPageTestResults(self) metric.SetEvents([]) metric.AddResultsForSafebrowsing(None, results) results.AssertHasPageSpecificScalarValue('safebrowsing', 'boolean', True)
bsd-3-clause
torchbox/wagtail
wagtail/documents/migrations/0011_add_choose_permissions.py
5
3579
# Generated by Django 3.1.2 on 2020-10-15 00:52 from django.db import migrations def add_choose_permission_to_admin_groups(apps, _schema_editor): ContentType = apps.get_model('contenttypes.ContentType') Permission = apps.get_model('auth.Permission') Group = apps.get_model('auth.Group') # Get document content type document_content_type, _created = ContentType.objects.get_or_create( model='document', app_label='wagtaildocs' ) # Create the Choose permission (if it doesn't already exist) choose_document_permission, _created = Permission.objects.get_or_create( content_type=document_content_type, codename='choose_document', defaults={'name': 'Can choose document'} ) # Assign it to all groups which have "Access the Wagtail admin" permission. # This emulates the previous behavior, where everyone who would access the admin # could choose any document in any Collection, because choosing wasn't permissioned. for group in Group.objects.filter(permissions__codename='access_admin'): group.permissions.add(choose_document_permission) def remove_choose_permission(apps, _schema_editor): """Reverse the above additions of permissions.""" ContentType = apps.get_model('contenttypes.ContentType') Permission = apps.get_model('auth.Permission') document_content_type = ContentType.objects.get( model='document', app_label='wagtaildocs', ) # This cascades to Group Permission.objects.filter( content_type=document_content_type, codename='choose_document' ).delete() def get_choose_permission(apps): Permission = apps.get_model('auth.Permission') ContentType = apps.get_model('contenttypes.ContentType') document_content_type, _created = ContentType.objects.get_or_create( model='document', app_label='wagtaildocs', ) return Permission.objects.filter( content_type=document_content_type, codename__in=['choose_document'] ).first() def copy_choose_permission_to_collections(apps, _schema_editor): Collection = apps.get_model('wagtailcore.Collection') Group = apps.get_model('auth.Group') GroupCollectionPermission = apps.get_model('wagtailcore.GroupCollectionPermission') root_collection = Collection.objects.get(depth=1) permission = get_choose_permission(apps) if permission: for group in Group.objects.filter(permissions=permission): GroupCollectionPermission.objects.create( group=group, collection=root_collection, permission=permission ) def remove_choose_permission_from_collections(apps, _schema_editor): GroupCollectionPermission = apps.get_model('wagtailcore.GroupCollectionPermission') choose_permission = get_choose_permission(apps) if choose_permission: GroupCollectionPermission.objects.filter(permission=choose_permission).delete() class Migration(migrations.Migration): dependencies = [ ('wagtaildocs', '0010_document_file_hash'), ] operations = [ migrations.AlterModelOptions( name='document', options={'permissions': [('choose_document', 'Can choose document')], 'verbose_name': 'document', 'verbose_name_plural': 'documents'}, ), migrations.RunPython(add_choose_permission_to_admin_groups, remove_choose_permission), migrations.RunPython(copy_choose_permission_to_collections, remove_choose_permission_from_collections), ]
bsd-3-clause
insertnamehere1/maraschino
lib/jsonrpclib/jsonrpc.py
13
17112
""" Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================ JSONRPC Library (jsonrpclib) ============================ This library is a JSON-RPC v.2 (proposed) implementation which follows the xmlrpclib API for portability between clients. It uses the same Server / ServerProxy, loads, dumps, etc. syntax, while providing features not present in XML-RPC like: * Keyword arguments * Notifications * Versioning * Batches and batch notifications Eventually, I'll add a SimpleXMLRPCServer compatible library, and other things to tie the thing off nicely. :) For a quick-start, just open a console and type the following, replacing the server address, method, and parameters appropriately. >>> import jsonrpclib >>> server = jsonrpclib.Server('http://localhost:8181') >>> server.add(5, 6) 11 >>> server._notify.add(5, 6) >>> batch = jsonrpclib.MultiCall(server) >>> batch.add(3, 50) >>> batch.add(2, 3) >>> batch._notify.add(3, 5) >>> batch() [53, 5] See http://code.google.com/p/jsonrpclib/ for more info. """ import types import sys from xmlrpclib import Transport as XMLTransport from xmlrpclib import SafeTransport as XMLSafeTransport from xmlrpclib import ServerProxy as XMLServerProxy from xmlrpclib import _Method as XML_Method import time import string import random # Library includes import jsonrpclib from jsonrpclib import config from jsonrpclib import history # JSON library importing cjson = None json = None try: import cjson except ImportError: try: import json except ImportError: try: import simplejson as json except ImportError: raise ImportError( 'You must have the cjson, json, or simplejson ' + 'module(s) available.' ) IDCHARS = string.ascii_lowercase+string.digits class UnixSocketMissing(Exception): """ Just a properly named Exception if Unix Sockets usage is attempted on a platform that doesn't support them (Windows) """ pass #JSON Abstractions def jdumps(obj, encoding='utf-8'): # Do 'serialize' test at some point for other classes global cjson if cjson: return cjson.encode(obj) else: return json.dumps(obj, encoding=encoding) def jloads(json_string): global cjson if cjson: return cjson.decode(json_string) else: return json.loads(json_string) # XMLRPClib re-implementations class ProtocolError(Exception): pass class TransportMixIn(object): """ Just extends the XMLRPC transport where necessary. """ user_agent = config.user_agent # for Python 2.7 support _connection = None def send_content(self, connection, request_body): connection.putheader("Content-Type", "application/json") connection.putheader("Content-Length", str(len(request_body))) connection.endheaders() if request_body: connection.send(request_body) def getparser(self): target = JSONTarget() return JSONParser(target), target class JSONParser(object): def __init__(self, target): self.target = target def feed(self, data): self.target.feed(data) def close(self): pass class JSONTarget(object): def __init__(self): self.data = [] def feed(self, data): self.data.append(data) def close(self): return ''.join(self.data) class Transport(TransportMixIn, XMLTransport): pass class SafeTransport(TransportMixIn, XMLSafeTransport): pass from httplib import HTTP, HTTPConnection from socket import socket USE_UNIX_SOCKETS = False try: from socket import AF_UNIX, SOCK_STREAM USE_UNIX_SOCKETS = True except ImportError: pass if (USE_UNIX_SOCKETS): class UnixHTTPConnection(HTTPConnection): def connect(self): self.sock = socket(AF_UNIX, SOCK_STREAM) self.sock.connect(self.host) class UnixHTTP(HTTP): _connection_class = UnixHTTPConnection class UnixTransport(TransportMixIn, XMLTransport): def make_connection(self, host): import httplib host, extra_headers, x509 = self.get_host_info(host) return UnixHTTP(host) class ServerProxy(XMLServerProxy): """ Unfortunately, much more of this class has to be copied since so much of it does the serialization. """ def __init__(self, uri, transport=None, encoding=None, verbose=0, version=None): import urllib if not version: version = config.version self.__version = version schema, uri = urllib.splittype(uri) if schema not in ('http', 'https', 'unix'): raise IOError('Unsupported JSON-RPC protocol.') if schema == 'unix': if not USE_UNIX_SOCKETS: # Don't like the "generic" Exception... raise UnixSocketMissing("Unix sockets not available.") self.__host = uri self.__handler = '/' else: self.__host, self.__handler = urllib.splithost(uri) if not self.__handler: # Not sure if this is in the JSON spec? #self.__handler = '/' self.__handler == '/' if transport is None: if schema == 'unix': transport = UnixTransport() elif schema == 'https': transport = SafeTransport() else: transport = Transport() self.__transport = transport self.__encoding = encoding self.__verbose = verbose def _request(self, methodname, params, rpcid=None): request = dumps(params, methodname, encoding=self.__encoding, rpcid=rpcid, version=self.__version) response = self._run_request(request) check_for_errors(response) return response['result'] def _request_notify(self, methodname, params, rpcid=None): request = dumps(params, methodname, encoding=self.__encoding, rpcid=rpcid, version=self.__version, notify=True) response = self._run_request(request, notify=True) check_for_errors(response) return def _run_request(self, request, notify=None): history.add_request(request) response = self.__transport.request( self.__host, self.__handler, request, verbose=self.__verbose ) # Here, the XMLRPC library translates a single list # response to the single value -- should we do the # same, and require a tuple / list to be passed to # the response object, or expect the Server to be # outputting the response appropriately? history.add_response(response) if not response: return None return_obj = loads(response) return return_obj def __getattr__(self, name): # Same as original, just with new _Method reference return _Method(self._request, name) @property def _notify(self): # Just like __getattr__, but with notify namespace. return _Notify(self._request_notify) class _Method(XML_Method): def __call__(self, *args, **kwargs): if len(args) > 0 and len(kwargs) > 0: raise ProtocolError('Cannot use both positional ' + 'and keyword arguments (according to JSON-RPC spec.)') if len(args) > 0: return self.__send(self.__name, args) else: return self.__send(self.__name, kwargs) def __getattr__(self, name): self.__name = '%s.%s' % (self.__name, name) return self # The old method returned a new instance, but this seemed wasteful. # The only thing that changes is the name. #return _Method(self.__send, "%s.%s" % (self.__name, name)) class _Notify(object): def __init__(self, request): self._request = request def __getattr__(self, name): return _Method(self._request, name) # Batch implementation class MultiCallMethod(object): def __init__(self, method, notify=False): self.method = method self.params = [] self.notify = notify def __call__(self, *args, **kwargs): if len(kwargs) > 0 and len(args) > 0: raise ProtocolError('JSON-RPC does not support both ' + 'positional and keyword arguments.') if len(kwargs) > 0: self.params = kwargs else: self.params = args def request(self, encoding=None, rpcid=None): return dumps(self.params, self.method, version=2.0, encoding=encoding, rpcid=rpcid, notify=self.notify) def __repr__(self): return '%s' % self.request() def __getattr__(self, method): new_method = '%s.%s' % (self.method, method) self.method = new_method return self class MultiCallNotify(object): def __init__(self, multicall): self.multicall = multicall def __getattr__(self, name): new_job = MultiCallMethod(name, notify=True) self.multicall._job_list.append(new_job) return new_job class MultiCallIterator(object): def __init__(self, results): self.results = results def __iter__(self): for i in range(0, len(self.results)): yield self[i] raise StopIteration def __getitem__(self, i): item = self.results[i] check_for_errors(item) return item['result'] def __len__(self): return len(self.results) class MultiCall(object): def __init__(self, server): self._server = server self._job_list = [] def _request(self): if len(self._job_list) < 1: # Should we alert? This /is/ pretty obvious. return request_body = '[ %s ]' % ','.join([job.request() for job in self._job_list]) responses = self._server._run_request(request_body) del self._job_list[:] if not responses: responses = [] return MultiCallIterator(responses) @property def _notify(self): return MultiCallNotify(self) def __getattr__(self, name): new_job = MultiCallMethod(name) self._job_list.append(new_job) return new_job __call__ = _request # These lines conform to xmlrpclib's "compatibility" line. # Not really sure if we should include these, but oh well. Server = ServerProxy class Fault(object): # JSON-RPC error class def __init__(self, code=-32000, message='Server error', rpcid=None): self.faultCode = code self.faultString = message self.rpcid = rpcid def error(self): return {'code':self.faultCode, 'message':self.faultString} def response(self, rpcid=None, version=None): if not version: version = config.version if rpcid: self.rpcid = rpcid return dumps( self, methodresponse=True, rpcid=self.rpcid, version=version ) def __repr__(self): return '<Fault %s: %s>' % (self.faultCode, self.faultString) def random_id(length=8): return_id = '' for i in range(length): return_id += random.choice(IDCHARS) return return_id class Payload(dict): def __init__(self, rpcid=None, version=None): if not version: version = config.version self.id = rpcid self.version = float(version) def request(self, method, params=[]): if type(method) not in types.StringTypes: raise ValueError('Method name must be a string.') if not self.id: self.id = random_id() request = { 'id':self.id, 'method':method } if params: request['params'] = params if self.version >= 2: request['jsonrpc'] = str(self.version) return request def notify(self, method, params=[]): request = self.request(method, params) if self.version >= 2: del request['id'] else: request['id'] = None return request def response(self, result=None): response = {'result':result, 'id':self.id} if self.version >= 2: response['jsonrpc'] = str(self.version) else: response['error'] = None return response def error(self, code=-32000, message='Server error.'): error = self.response() if self.version >= 2: del error['result'] else: error['result'] = None error['error'] = {'code':code, 'message':message} return error def dumps(params=[], methodname=None, methodresponse=None, encoding=None, rpcid=None, version=None, notify=None): """ This differs from the Python implementation in that it implements the rpcid argument since the 2.0 spec requires it for responses. """ if not version: version = config.version valid_params = (types.TupleType, types.ListType, types.DictType) if methodname in types.StringTypes and \ type(params) not in valid_params and \ not isinstance(params, Fault): """ If a method, and params are not in a listish or a Fault, error out. """ raise TypeError('Params must be a dict, list, tuple or Fault ' + 'instance.') # Begin parsing object payload = Payload(rpcid=rpcid, version=version) if not encoding: encoding = 'utf-8' if type(params) is Fault: response = payload.error(params.faultCode, params.faultString) return jdumps(response, encoding=encoding) if type(methodname) not in types.StringTypes and methodresponse != True: raise ValueError('Method name must be a string, or methodresponse '+ 'must be set to True.') if config.use_jsonclass == True: from jsonrpclib import jsonclass params = jsonclass.dump(params) if methodresponse is True: if rpcid is None: raise ValueError('A method response must have an rpcid.') response = payload.response(params) return jdumps(response, encoding=encoding) request = None if notify == True: request = payload.notify(methodname, params) else: request = payload.request(methodname, params) return jdumps(request, encoding=encoding) def loads(data): """ This differs from the Python implementation, in that it returns the request structure in Dict format instead of the method, params. It will return a list in the case of a batch request / response. """ if data == '': # notification return None result = jloads(data) # if the above raises an error, the implementing server code # should return something like the following: # { 'jsonrpc':'2.0', 'error': fault.error(), id: None } if config.use_jsonclass == True: from jsonrpclib import jsonclass result = jsonclass.load(result) return result def check_for_errors(result): if not result: # Notification return result if type(result) is not types.DictType: raise TypeError('Response is not a dict.') if 'jsonrpc' in result.keys() and float(result['jsonrpc']) > 2.0: raise NotImplementedError('JSON-RPC version not yet supported.') if 'result' not in result.keys() and 'error' not in result.keys(): raise ValueError('Response does not have a result or error key.') if 'error' in result.keys() and result['error'] != None: code = result['error']['code'] message = result['error']['message'] raise ProtocolError((code, message)) return result def isbatch(result): if type(result) not in (types.ListType, types.TupleType): return False if len(result) < 1: return False if type(result[0]) is not types.DictType: return False if 'jsonrpc' not in result[0].keys(): return False try: version = float(result[0]['jsonrpc']) except ValueError: raise ProtocolError('"jsonrpc" key must be a float(able) value.') if version < 2: return False return True def isnotification(request): if 'id' not in request.keys(): # 2.0 notification return True if request['id'] == None: # 1.0 notification return True return False
mit
runekaagaard/django-contrib-locking
tests/utils_tests/test_text.py
5
10262
# -*- coding: utf-8 -*- from __future__ import unicode_literals from unittest import skipUnless import warnings from django.test import SimpleTestCase, ignore_warnings from django.test.utils import reset_warning_registry from django.utils import six, text from django.utils.deprecation import RemovedInDjango19Warning from django.utils.encoding import force_text from django.utils.functional import lazy from django.utils.translation import override lazystr = lazy(force_text, six.text_type) IS_WIDE_BUILD = (len('\U0001F4A9') == 1) class TestUtilsText(SimpleTestCase): def test_get_text_list(self): self.assertEqual(text.get_text_list(['a', 'b', 'c', 'd']), 'a, b, c or d') self.assertEqual(text.get_text_list(['a', 'b', 'c'], 'and'), 'a, b and c') self.assertEqual(text.get_text_list(['a', 'b'], 'and'), 'a and b') self.assertEqual(text.get_text_list(['a']), 'a') self.assertEqual(text.get_text_list([]), '') with override('ar'): self.assertEqual(text.get_text_list(['a', 'b', 'c']), "a، b أو c") def test_smart_split(self): testdata = [ ('This is "a person" test.', ['This', 'is', '"a person"', 'test.']), ('This is "a person\'s" test.', ['This', 'is', '"a person\'s"', 'test.']), ('This is "a person\\"s" test.', ['This', 'is', '"a person\\"s"', 'test.']), ('"a \'one', ['"a', "'one"]), ('all friends\' tests', ['all', 'friends\'', 'tests']), ('url search_page words="something else"', ['url', 'search_page', 'words="something else"']), ("url search_page words='something else'", ['url', 'search_page', "words='something else'"]), ('url search_page words "something else"', ['url', 'search_page', 'words', '"something else"']), ('url search_page words-"something else"', ['url', 'search_page', 'words-"something else"']), ('url search_page words=hello', ['url', 'search_page', 'words=hello']), ('url search_page words="something else', ['url', 'search_page', 'words="something', 'else']), ("cut:','|cut:' '", ["cut:','|cut:' '"]), (lazystr("a b c d"), # Test for #20231 ['a', 'b', 'c', 'd']), ] for test, expected in testdata: self.assertEqual(list(text.smart_split(test)), expected) def test_truncate_chars(self): truncator = text.Truncator( 'The quick brown fox jumped over the lazy dog.' ) self.assertEqual('The quick brown fox jumped over the lazy dog.', truncator.chars(100)), self.assertEqual('The quick brown fox ...', truncator.chars(23)), self.assertEqual('The quick brown fo.....', truncator.chars(23, '.....')), # Ensure that we normalize our unicode data first nfc = text.Truncator('o\xfco\xfco\xfco\xfc') nfd = text.Truncator('ou\u0308ou\u0308ou\u0308ou\u0308') self.assertEqual('oüoüoüoü', nfc.chars(8)) self.assertEqual('oüoüoüoü', nfd.chars(8)) self.assertEqual('oü...', nfc.chars(5)) self.assertEqual('oü...', nfd.chars(5)) # Ensure the final length is calculated correctly when there are # combining characters with no precomposed form, and that combining # characters are not split up. truncator = text.Truncator('-B\u030AB\u030A----8') self.assertEqual('-B\u030A...', truncator.chars(5)) self.assertEqual('-B\u030AB\u030A-...', truncator.chars(7)) self.assertEqual('-B\u030AB\u030A----8', truncator.chars(8)) # Ensure the length of the end text is correctly calculated when it # contains combining characters with no precomposed form. truncator = text.Truncator('-----') self.assertEqual('---B\u030A', truncator.chars(4, 'B\u030A')) self.assertEqual('-----', truncator.chars(5, 'B\u030A')) # Make a best effort to shorten to the desired length, but requesting # a length shorter than the ellipsis shouldn't break self.assertEqual('...', text.Truncator('asdf').chars(1)) def test_truncate_words(self): truncator = text.Truncator('The quick brown fox jumped over the lazy ' 'dog.') self.assertEqual('The quick brown fox jumped over the lazy dog.', truncator.words(10)) self.assertEqual('The quick brown fox...', truncator.words(4)) self.assertEqual('The quick brown fox[snip]', truncator.words(4, '[snip]')) def test_truncate_html_words(self): truncator = text.Truncator('<p id="par"><strong><em>The quick brown fox' ' jumped over the lazy dog.</em></strong></p>') self.assertEqual('<p id="par"><strong><em>The quick brown fox jumped over' ' the lazy dog.</em></strong></p>', truncator.words(10, html=True)) self.assertEqual('<p id="par"><strong><em>The quick brown fox...</em>' '</strong></p>', truncator.words(4, html=True)) self.assertEqual('<p id="par"><strong><em>The quick brown fox....</em>' '</strong></p>', truncator.words(4, '....', html=True)) self.assertEqual('<p id="par"><strong><em>The quick brown fox</em>' '</strong></p>', truncator.words(4, '', html=True)) # Test with new line inside tag truncator = text.Truncator('<p>The quick <a href="xyz.html"\n' 'id="mylink">brown fox</a> jumped over the lazy dog.</p>') self.assertEqual('<p>The quick <a href="xyz.html"\n' 'id="mylink">brown...</a></p>', truncator.words(3, '...', html=True)) # Test self-closing tags truncator = text.Truncator('<br/>The <hr />quick brown fox jumped over' ' the lazy dog.') self.assertEqual('<br/>The <hr />quick brown...', truncator.words(3, '...', html=True)) truncator = text.Truncator('<br>The <hr/>quick <em>brown fox</em> ' 'jumped over the lazy dog.') self.assertEqual('<br>The <hr/>quick <em>brown...</em>', truncator.words(3, '...', html=True)) # Test html entities truncator = text.Truncator('<i>Buenos d&iacute;as!' ' &#x00bf;C&oacute;mo est&aacute;?</i>') self.assertEqual('<i>Buenos d&iacute;as! &#x00bf;C&oacute;mo...</i>', truncator.words(3, '...', html=True)) truncator = text.Truncator('<p>I &lt;3 python, what about you?</p>') self.assertEqual('<p>I &lt;3 python...</p>', truncator.words(3, '...', html=True)) def test_wrap(self): digits = '1234 67 9' self.assertEqual(text.wrap(digits, 100), '1234 67 9') self.assertEqual(text.wrap(digits, 9), '1234 67 9') self.assertEqual(text.wrap(digits, 8), '1234 67\n9') self.assertEqual(text.wrap('short\na long line', 7), 'short\na long\nline') self.assertEqual(text.wrap('do-not-break-long-words please? ok', 8), 'do-not-break-long-words\nplease?\nok') long_word = 'l%sng' % ('o' * 20) self.assertEqual(text.wrap(long_word, 20), long_word) self.assertEqual(text.wrap('a %s word' % long_word, 10), 'a\n%s\nword' % long_word) def test_normalize_newlines(self): self.assertEqual(text.normalize_newlines("abc\ndef\rghi\r\n"), "abc\ndef\nghi\n") self.assertEqual(text.normalize_newlines("\n\r\r\n\r"), "\n\n\n\n") self.assertEqual(text.normalize_newlines("abcdefghi"), "abcdefghi") self.assertEqual(text.normalize_newlines(""), "") def test_normalize_newlines_bytes(self): """normalize_newlines should be able to handle bytes too""" normalized = text.normalize_newlines(b"abc\ndef\rghi\r\n") self.assertEqual(normalized, "abc\ndef\nghi\n") self.assertIsInstance(normalized, six.text_type) def test_slugify(self): items = ( ('Hello, World!', 'hello-world'), ('spam & eggs', 'spam-eggs'), ) for value, output in items: self.assertEqual(text.slugify(value), output) def test_unescape_entities(self): items = [ ('', ''), ('foo', 'foo'), ('&amp;', '&'), ('&#x26;', '&'), ('&#38;', '&'), ('foo &amp; bar', 'foo & bar'), ('foo & bar', 'foo & bar'), ] for value, output in items: self.assertEqual(text.unescape_entities(value), output) def test_get_valid_filename(self): filename = "^&'@{}[],$=!-#()%+~_123.txt" self.assertEqual(text.get_valid_filename(filename), "-_123.txt") @ignore_warnings(category=RemovedInDjango19Warning) def test_javascript_quote(self): input = "<script>alert('Hello \\xff.\n Welcome\there\r');</script>" output = r"<script>alert(\'Hello \\xff.\n Welcome\there\r\');<\/script>" self.assertEqual(text.javascript_quote(input), output) # Exercising quote_double_quotes keyword argument input = '"Text"' self.assertEqual(text.javascript_quote(input), '"Text"') self.assertEqual(text.javascript_quote(input, quote_double_quotes=True), '&quot;Text&quot;') @ignore_warnings(category=RemovedInDjango19Warning) @skipUnless(IS_WIDE_BUILD, 'Not running in a wide build of Python') def test_javascript_quote_unicode(self): input = "<script>alert('Hello \\xff.\n Wel𝕃come\there\r');</script>" output = r"<script>alert(\'Hello \\xff.\n Wel𝕃come\there\r\');<\/script>" self.assertEqual(text.javascript_quote(input), output) def test_deprecation(self): reset_warning_registry() with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") text.javascript_quote('thingy') self.assertEqual(len(w), 1) self.assertIn('escapejs()', repr(w[0].message))
bsd-3-clause
druuu/django
tests/generic_views/test_detail.py
281
8387
# -*- coding: utf-8 -*- from __future__ import unicode_literals import datetime from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist from django.test import TestCase, override_settings from django.test.client import RequestFactory from django.views.generic.base import View from django.views.generic.detail import SingleObjectTemplateResponseMixin from django.views.generic.edit import ModelFormMixin from .models import Artist, Author, Book, Page @override_settings(ROOT_URLCONF='generic_views.urls') class DetailViewTest(TestCase): @classmethod def setUpTestData(cls): cls.artist1 = Artist.objects.create(name='Rene Magritte') cls.author1 = Author.objects.create(name='Roberto Bolaño', slug='roberto-bolano') cls.author2 = Author.objects.create(name='Scott Rosenberg', slug='scott-rosenberg') cls.book1 = Book.objects.create(name='2066', slug='2066', pages=800, pubdate=datetime.date(2008, 10, 1)) cls.book1.authors.add(cls.author1) cls.book2 = Book.objects.create( name='Dreaming in Code', slug='dreaming-in-code', pages=300, pubdate=datetime.date(2006, 5, 1) ) cls.page1 = Page.objects.create( content='I was once bitten by a moose.', template='generic_views/page_template.html' ) def test_simple_object(self): res = self.client.get('/detail/obj/') self.assertEqual(res.status_code, 200) self.assertEqual(res.context['object'], {'foo': 'bar'}) self.assertIsInstance(res.context['view'], View) self.assertTemplateUsed(res, 'generic_views/detail.html') def test_detail_by_pk(self): res = self.client.get('/detail/author/%s/' % self.author1.pk) self.assertEqual(res.status_code, 200) self.assertEqual(res.context['object'], self.author1) self.assertEqual(res.context['author'], self.author1) self.assertTemplateUsed(res, 'generic_views/author_detail.html') def test_detail_missing_object(self): res = self.client.get('/detail/author/500/') self.assertEqual(res.status_code, 404) def test_detail_object_does_not_exist(self): self.assertRaises(ObjectDoesNotExist, self.client.get, '/detail/doesnotexist/1/') def test_detail_by_custom_pk(self): res = self.client.get('/detail/author/bycustompk/%s/' % self.author1.pk) self.assertEqual(res.status_code, 200) self.assertEqual(res.context['object'], self.author1) self.assertEqual(res.context['author'], self.author1) self.assertTemplateUsed(res, 'generic_views/author_detail.html') def test_detail_by_slug(self): res = self.client.get('/detail/author/byslug/scott-rosenberg/') self.assertEqual(res.status_code, 200) self.assertEqual(res.context['object'], Author.objects.get(slug='scott-rosenberg')) self.assertEqual(res.context['author'], Author.objects.get(slug='scott-rosenberg')) self.assertTemplateUsed(res, 'generic_views/author_detail.html') def test_detail_by_custom_slug(self): res = self.client.get('/detail/author/bycustomslug/scott-rosenberg/') self.assertEqual(res.status_code, 200) self.assertEqual(res.context['object'], Author.objects.get(slug='scott-rosenberg')) self.assertEqual(res.context['author'], Author.objects.get(slug='scott-rosenberg')) self.assertTemplateUsed(res, 'generic_views/author_detail.html') def test_detail_by_pk_ignore_slug(self): res = self.client.get('/detail/author/bypkignoreslug/%s-roberto-bolano/' % self.author1.pk) self.assertEqual(res.status_code, 200) self.assertEqual(res.context['object'], self.author1) self.assertEqual(res.context['author'], self.author1) self.assertTemplateUsed(res, 'generic_views/author_detail.html') def test_detail_by_pk_ignore_slug_mismatch(self): res = self.client.get('/detail/author/bypkignoreslug/%s-scott-rosenberg/' % self.author1.pk) self.assertEqual(res.status_code, 200) self.assertEqual(res.context['object'], self.author1) self.assertEqual(res.context['author'], self.author1) self.assertTemplateUsed(res, 'generic_views/author_detail.html') def test_detail_by_pk_and_slug(self): res = self.client.get('/detail/author/bypkandslug/%s-roberto-bolano/' % self.author1.pk) self.assertEqual(res.status_code, 200) self.assertEqual(res.context['object'], self.author1) self.assertEqual(res.context['author'], self.author1) self.assertTemplateUsed(res, 'generic_views/author_detail.html') def test_detail_by_pk_and_slug_mismatch_404(self): res = self.client.get('/detail/author/bypkandslug/%s-scott-rosenberg/' % self.author1.pk) self.assertEqual(res.status_code, 404) def test_verbose_name(self): res = self.client.get('/detail/artist/%s/' % self.artist1.pk) self.assertEqual(res.status_code, 200) self.assertEqual(res.context['object'], self.artist1) self.assertEqual(res.context['artist'], self.artist1) self.assertTemplateUsed(res, 'generic_views/artist_detail.html') def test_template_name(self): res = self.client.get('/detail/author/%s/template_name/' % self.author1.pk) self.assertEqual(res.status_code, 200) self.assertEqual(res.context['object'], self.author1) self.assertEqual(res.context['author'], self.author1) self.assertTemplateUsed(res, 'generic_views/about.html') def test_template_name_suffix(self): res = self.client.get('/detail/author/%s/template_name_suffix/' % self.author1.pk) self.assertEqual(res.status_code, 200) self.assertEqual(res.context['object'], self.author1) self.assertEqual(res.context['author'], self.author1) self.assertTemplateUsed(res, 'generic_views/author_view.html') def test_template_name_field(self): res = self.client.get('/detail/page/%s/field/' % self.page1.pk) self.assertEqual(res.status_code, 200) self.assertEqual(res.context['object'], self.page1) self.assertEqual(res.context['page'], self.page1) self.assertTemplateUsed(res, 'generic_views/page_template.html') def test_context_object_name(self): res = self.client.get('/detail/author/%s/context_object_name/' % self.author1.pk) self.assertEqual(res.status_code, 200) self.assertEqual(res.context['object'], self.author1) self.assertEqual(res.context['thingy'], self.author1) self.assertNotIn('author', res.context) self.assertTemplateUsed(res, 'generic_views/author_detail.html') def test_duplicated_context_object_name(self): res = self.client.get('/detail/author/%s/dupe_context_object_name/' % self.author1.pk) self.assertEqual(res.status_code, 200) self.assertEqual(res.context['object'], self.author1) self.assertNotIn('author', res.context) self.assertTemplateUsed(res, 'generic_views/author_detail.html') def test_deferred_queryset_template_name(self): class FormContext(SingleObjectTemplateResponseMixin): request = RequestFactory().get('/') model = Author object = Author.objects.defer('name').get(pk=self.author1.pk) self.assertEqual(FormContext().get_template_names()[0], 'generic_views/author_detail.html') def test_deferred_queryset_context_object_name(self): class FormContext(ModelFormMixin): request = RequestFactory().get('/') model = Author object = Author.objects.defer('name').get(pk=self.author1.pk) fields = ('name',) form_context_data = FormContext().get_context_data() self.assertEqual(form_context_data['object'], self.author1) self.assertEqual(form_context_data['author'], self.author1) def test_invalid_url(self): self.assertRaises(AttributeError, self.client.get, '/detail/author/invalid/url/') def test_invalid_queryset(self): self.assertRaises(ImproperlyConfigured, self.client.get, '/detail/author/invalid/qs/') def test_non_model_object_with_meta(self): res = self.client.get('/detail/nonmodel/1/') self.assertEqual(res.status_code, 200) self.assertEqual(res.context['object'].id, "non_model_1")
bsd-3-clause
jostep/tensorflow
tensorflow/contrib/util/loader.py
88
2003
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for loading op libraries. @@load_op_library """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import re from tensorflow.python.framework import load_library from tensorflow.python.platform import resource_loader def load_op_library(path): """Loads a contrib op library from the given path. NOTE(mrry): On Windows, we currently assume that some contrib op libraries are statically linked into the main TensorFlow Python extension DLL - use dynamically linked ops if the .so is present. Args: path: An absolute path to a shared object file. Returns: A Python module containing the Python wrappers for Ops defined in the plugin. """ if os.name == 'nt': # To avoid makeing every user_ops aware of windows, re-write # the file extension from .so to .dll. path = re.sub(r'\.so$', '.dll', path) # Currently we have only some user_ops as dlls on windows - don't try # to load them if the dll is not found. # TODO(mrry): Once we have all of them this check should be removed. if not os.path.exists(path): return None path = resource_loader.get_path_to_datafile(path) ret = load_library.load_op_library(path) assert ret, 'Could not load %s' % path return ret
apache-2.0
jostep/tensorflow
tensorflow/examples/learn/text_classification_cnn.py
29
5677
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Example of Estimator for CNN-based text classification with DBpedia data.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import sys import numpy as np import pandas from sklearn import metrics import tensorflow as tf FLAGS = None MAX_DOCUMENT_LENGTH = 100 EMBEDDING_SIZE = 20 N_FILTERS = 10 WINDOW_SIZE = 20 FILTER_SHAPE1 = [WINDOW_SIZE, EMBEDDING_SIZE] FILTER_SHAPE2 = [WINDOW_SIZE, N_FILTERS] POOLING_WINDOW = 4 POOLING_STRIDE = 2 n_words = 0 MAX_LABEL = 15 WORDS_FEATURE = 'words' # Name of the input words feature. def cnn_model(features, labels, mode): """2 layer ConvNet to predict from sequence of words to a class.""" # Convert indexes of words into embeddings. # This creates embeddings matrix of [n_words, EMBEDDING_SIZE] and then # maps word indexes of the sequence into [batch_size, sequence_length, # EMBEDDING_SIZE]. word_vectors = tf.contrib.layers.embed_sequence( features[WORDS_FEATURE], vocab_size=n_words, embed_dim=EMBEDDING_SIZE) word_vectors = tf.expand_dims(word_vectors, 3) with tf.variable_scope('CNN_Layer1'): # Apply Convolution filtering on input sequence. conv1 = tf.layers.conv2d( word_vectors, filters=N_FILTERS, kernel_size=FILTER_SHAPE1, padding='VALID', # Add a ReLU for non linearity. activation=tf.nn.relu) # Max pooling across output of Convolution+Relu. pool1 = tf.layers.max_pooling2d( conv1, pool_size=POOLING_WINDOW, strides=POOLING_STRIDE, padding='SAME') # Transpose matrix so that n_filters from convolution becomes width. pool1 = tf.transpose(pool1, [0, 1, 3, 2]) with tf.variable_scope('CNN_Layer2'): # Second level of convolution filtering. conv2 = tf.layers.conv2d( pool1, filters=N_FILTERS, kernel_size=FILTER_SHAPE2, padding='VALID') # Max across each filter to get useful features for classification. pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1]) # Apply regular WX + B and classification. logits = tf.layers.dense(pool2, MAX_LABEL, activation=None) predicted_classes = tf.argmax(logits, 1) if mode == tf.estimator.ModeKeys.PREDICT: return tf.estimator.EstimatorSpec( mode=mode, predictions={ 'class': predicted_classes, 'prob': tf.nn.softmax(logits) }) onehot_labels = tf.one_hot(labels, MAX_LABEL, 1, 0) loss = tf.losses.softmax_cross_entropy( onehot_labels=onehot_labels, logits=logits) if mode == tf.estimator.ModeKeys.TRAIN: optimizer = tf.train.AdamOptimizer(learning_rate=0.01) train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step()) return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op) eval_metric_ops = { 'accuracy': tf.metrics.accuracy( labels=labels, predictions=predicted_classes) } return tf.estimator.EstimatorSpec( mode=mode, loss=loss, eval_metric_ops=eval_metric_ops) def main(unused_argv): global n_words # Prepare training and testing data dbpedia = tf.contrib.learn.datasets.load_dataset( 'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data) x_train = pandas.DataFrame(dbpedia.train.data)[1] y_train = pandas.Series(dbpedia.train.target) x_test = pandas.DataFrame(dbpedia.test.data)[1] y_test = pandas.Series(dbpedia.test.target) # Process vocabulary vocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor( MAX_DOCUMENT_LENGTH) x_train = np.array(list(vocab_processor.fit_transform(x_train))) x_test = np.array(list(vocab_processor.transform(x_test))) n_words = len(vocab_processor.vocabulary_) print('Total words: %d' % n_words) # Build model classifier = tf.estimator.Estimator(model_fn=cnn_model) # Train. train_input_fn = tf.estimator.inputs.numpy_input_fn( x={WORDS_FEATURE: x_train}, y=y_train, batch_size=len(x_train), num_epochs=None, shuffle=True) classifier.train(input_fn=train_input_fn, steps=100) # Predict. test_input_fn = tf.estimator.inputs.numpy_input_fn( x={WORDS_FEATURE: x_test}, y=y_test, num_epochs=1, shuffle=False) predictions = classifier.predict(input_fn=test_input_fn) y_predicted = np.array(list(p['class'] for p in predictions)) y_predicted = y_predicted.reshape(np.array(y_test).shape) # Score with sklearn. score = metrics.accuracy_score(y_test, y_predicted) print('Accuracy (sklearn): {0:f}'.format(score)) # Score with tensorflow. scores = classifier.evaluate(input_fn=test_input_fn) print('Accuracy (tensorflow): {0:f}'.format(scores['accuracy'])) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( '--test_with_fake_data', default=False, help='Test the example code with fake data.', action='store_true') FLAGS, unparsed = parser.parse_known_args() tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
apache-2.0
aztec1337/microbit-menu
microbit_menu-v0.2.py
1
1053
''' MicrobitMenu v0.2 Author: aztec1337 Github: https://github.com/aztec1337/microbit-menu/ Licensed under the GNU General Public License Version 3 (GNU GPL v3), available at: http://www.gnu.org/licenses/gpl-3.0.txt ''' from microbit import * import random menuAmount = 1 # Start at 0 menu = 0 void = 0 while True: if menu == 0: if void == 0: display.scroll("DICE") void = 1 elif menu == 1: if void == 0: display.scroll("LETTER") void = 1 if button_a.is_pressed(): menu += 1 if menu > menuAmount: menu = 0 void = 0 if button_b.is_pressed(): if menu == 0: display.show(str(random.randint(1, 6))) # Random int (1 - 6) elif menu == 1: letters = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z",] display.show(random.choice(letters)) # Choose random of alphabet
gpl-3.0
mne-tools/mne-python
mne/preprocessing/tests/test_ica.py
1
62908
# Author: Denis Engemann <denis.engemann@gmail.com> # Alexandre Gramfort <alexandre.gramfort@inria.fr> # # License: BSD-3-Clause from contextlib import nullcontext import os import os.path as op import shutil import pytest import numpy as np from numpy.testing import (assert_array_almost_equal, assert_array_equal, assert_allclose, assert_equal) from scipy import stats, linalg from scipy.io import loadmat, savemat import matplotlib.pyplot as plt from mne import (Epochs, read_events, pick_types, create_info, EpochsArray, EvokedArray, Annotations, pick_channels_regexp, make_ad_hoc_cov) from mne.cov import read_cov from mne.preprocessing import (ICA as _ICA, ica_find_ecg_events, ica_find_eog_events, read_ica) from mne.preprocessing.ica import (get_score_funcs, corrmap, _sort_components, _ica_explained_variance, read_ica_eeglab) from mne.io import read_raw_fif, Info, RawArray, read_raw_ctf, read_raw_eeglab from mne.io.pick import _DATA_CH_TYPES_SPLIT, get_channel_type_constants from mne.io.eeglab.eeglab import _check_load_mat from mne.rank import _compute_rank_int from mne.utils import (catch_logging, requires_sklearn, _record_warnings, check_version) from mne.datasets import testing from mne.event import make_fixed_length_events data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data') raw_fname = op.join(data_dir, 'test_raw.fif') event_name = op.join(data_dir, 'test-eve.fif') test_cov_name = op.join(data_dir, 'test-cov.fif') test_base_dir = testing.data_path(download=False) ctf_fname = op.join(test_base_dir, 'CTF', 'testdata_ctf.ds') fif_fname = op.join(test_base_dir, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif') eeglab_fname = op.join(test_base_dir, 'EEGLAB', 'test_raw.set') eeglab_montage = op.join(test_base_dir, 'EEGLAB', 'test_chans.locs') ctf_fname2 = op.join(test_base_dir, 'CTF', 'catch-alp-good-f.ds') event_id, tmin, tmax = 1, -0.2, 0.2 # if stop is too small pca may fail in some cases, but we're okay on this file start, stop = 0, 6 score_funcs_unsuited = ['pointbiserialr', 'ansari'] pymatreader_mark = pytest.mark.skipif( not check_version('pymatreader'), reason='Requires pymatreader') def ICA(*args, **kwargs): """Fix the random state in tests.""" if 'random_state' not in kwargs: kwargs['random_state'] = 0 return _ICA(*args, **kwargs) def _skip_check_picard(method): if method == 'picard': try: import picard # noqa, analysis:ignore except Exception as exp: pytest.skip("Picard is not installed (%s)." % (exp,)) @requires_sklearn @pytest.mark.parametrize("method", ["fastica", "picard"]) def test_ica_full_data_recovery(method): """Test recovery of full data when no source is rejected.""" # Most basic recovery _skip_check_picard(method) raw = read_raw_fif(raw_fname).crop(0.5, stop).load_data() with raw.info._unlock(): raw.info['projs'] = [] events = read_events(event_name) picks = pick_types(raw.info, meg=True, stim=False, ecg=False, eog=False, exclude='bads')[:10] epochs = Epochs(raw, events[:4], event_id, tmin, tmax, picks=picks, baseline=None, preload=True) evoked = epochs.average() n_channels = 5 data = raw._data[:n_channels].copy() data_epochs = epochs.get_data() data_evoked = evoked.data raw.set_annotations(Annotations([0.5], [0.5], ['BAD'])) methods = [method] for method in methods: stuff = [(2, n_channels, True), (2, n_channels // 2, False)] for n_components, n_pca_components, ok in stuff: ica = ICA(n_components=n_components, random_state=0, method=method, max_iter=1) kwargs = dict(exclude=[], n_pca_components=n_pca_components) picks = list(range(n_channels)) with pytest.warns(UserWarning, match=None): # sometimes warns ica.fit(raw, picks=picks) _assert_ica_attributes(ica, raw.get_data(picks)) raw2 = ica.apply(raw.copy(), **kwargs) if ok: assert_allclose(data[:n_channels], raw2._data[:n_channels], rtol=1e-10, atol=1e-15) else: diff = np.abs(data[:n_channels] - raw2._data[:n_channels]) assert (np.max(diff) > 1e-14) ica = ICA(n_components=n_components, method=method, random_state=0) with _record_warnings(): # sometimes warns ica.fit(epochs, picks=picks) _assert_ica_attributes(ica, epochs.get_data(picks)) epochs2 = ica.apply(epochs.copy(), **kwargs) data2 = epochs2.get_data()[:, :n_channels] if ok: assert_allclose(data_epochs[:, :n_channels], data2, rtol=1e-10, atol=1e-15) else: diff = np.abs(data_epochs[:, :n_channels] - data2) assert (np.max(diff) > 1e-14) evoked2 = ica.apply(evoked.copy(), **kwargs) data2 = evoked2.data[:n_channels] if ok: assert_allclose(data_evoked[:n_channels], data2, rtol=1e-10, atol=1e-15) else: diff = np.abs(evoked.data[:n_channels] - data2) assert (np.max(diff) > 1e-14) with pytest.raises(ValueError, match='Invalid value'): ICA(method='pizza-decomposision') @pytest.mark.parametrize("method", ["fastica", "picard"]) def test_ica_simple(method): """Test that ICA recovers the unmixing matrix in a simple case.""" if method == "fastica": try: import sklearn # noqa: F401 except ImportError: pytest.skip("scikit-learn not installed") _skip_check_picard(method) n_components = 3 n_samples = 1000 rng = np.random.RandomState(0) S = rng.laplace(size=(n_components, n_samples)) A = rng.randn(n_components, n_components) data = np.dot(A, S) info = create_info(data.shape[-2], 1000., 'eeg') cov = make_ad_hoc_cov(info) ica = ICA(n_components=n_components, method=method, random_state=0, noise_cov=cov) with pytest.warns(RuntimeWarning, match='No average EEG.*'): ica.fit(RawArray(data, info)) transform = ica.unmixing_matrix_ @ ica.pca_components_ @ A amari_distance = np.mean(np.sum(np.abs(transform), axis=1) / np.max(np.abs(transform), axis=1) - 1.) assert amari_distance < 0.1 def test_warnings(): """Test that ICA warns on certain input data conditions.""" raw = read_raw_fif(raw_fname).crop(0, 5).load_data() events = read_events(event_name) epochs = Epochs(raw, events=events, baseline=None, preload=True) ica = ICA(n_components=2, max_iter=1, method='infomax', random_state=0) # not high-passed with epochs.info._unlock(): epochs.info['highpass'] = 0. with pytest.warns(RuntimeWarning, match='should be high-pass filtered'): ica.fit(epochs) # baselined with epochs.info._unlock(): epochs.info['highpass'] = 1. epochs.baseline = (epochs.tmin, 0) with pytest.warns(RuntimeWarning, match='epochs.*were baseline-corrected'): ica.fit(epochs) # cleaning baseline-corrected data with epochs.info._unlock(): epochs.info['highpass'] = 1. epochs.baseline = None ica.fit(epochs) epochs.baseline = (epochs.tmin, 0) with pytest.warns(RuntimeWarning, match='consider baseline-correcting.*' 'again'): ica.apply(epochs) @requires_sklearn @pytest.mark.parametrize('n_components', (None, 0.9999, 8, 9, 10)) @pytest.mark.parametrize('n_pca_components', [8, 9, 0.9999, 10]) @pytest.mark.filterwarnings('ignore:FastICA did not converge.*:UserWarning') def test_ica_noop(n_components, n_pca_components, tmp_path): """Test that our ICA is stable even with a bad max_pca_components.""" data = np.random.RandomState(0).randn(10, 1000) info = create_info(10, 1000., 'eeg') raw = RawArray(data, info) raw.set_eeg_reference() with raw.info._unlock(): raw.info['highpass'] = 1.0 # fake high-pass filtering assert np.linalg.matrix_rank(raw.get_data()) == 9 kwargs = dict(n_components=n_components, verbose=True) if isinstance(n_components, int) and \ isinstance(n_pca_components, int) and \ n_components > n_pca_components: return ica = ICA(**kwargs) ica.n_pca_components = n_pca_components # backward compat if n_components == 10 and n_pca_components == 0.9999: with pytest.raises(RuntimeError, match='.*requires.*PCA.*'): ica.fit(raw) return if n_components == 10 and n_pca_components == 10: ctx = pytest.warns(RuntimeWarning, match='.*unstable.*integer <= 9') bad = True # pinv will fail elif n_components == 0.9999 and n_pca_components == 8: ctx = pytest.raises(RuntimeError, match='requires 9 PCA values.*but') bad = 'exit' else: bad = False # pinv will not fail ctx = nullcontext() with ctx: ica.fit(raw) assert ica._max_pca_components is None if bad == 'exit': return raw_new = ica.apply(raw.copy()) # 8 components is not a no-op; "bad" means our pinv has failed if n_pca_components == 8 or bad: assert ica.n_pca_components == n_pca_components assert not np.allclose(raw.get_data(), raw_new.get_data(), atol=0) return assert_allclose(raw.get_data(), raw_new.get_data(), err_msg='Id failure') _assert_ica_attributes(ica, data) # and with I/O fname = tmp_path / 'temp-ica.fif' ica.save(fname) ica_new = read_ica(fname) raw_new = ica_new.apply(raw.copy()) assert_allclose(raw.get_data(), raw_new.get_data(), err_msg='I/O failure') _assert_ica_attributes(ica_new) assert ica.reject_ == ica_new.reject_ @requires_sklearn @pytest.mark.parametrize("method, max_iter_default", [("fastica", 1000), ("infomax", 500), ("picard", 500)]) def test_ica_max_iter_(method, max_iter_default): """Test that ICA.max_iter is set to the right defaults.""" _skip_check_picard(method) # check that new defaults come out for 'auto' ica = ICA(n_components=3, method=method, max_iter='auto') assert ica.max_iter == max_iter_default # check that user input comes out unchanged ica = ICA(n_components=3, method=method, max_iter=2000) assert ica.max_iter == 2000 with pytest.raises(ValueError, match='Invalid'): ICA(max_iter='foo') with pytest.raises(TypeError, match='must be an instance'): ICA(max_iter=1.) @requires_sklearn @pytest.mark.parametrize("method", ["infomax", "fastica", "picard"]) def test_ica_n_iter_(method, tmp_path): """Test that ICA.n_iter_ is set after fitting.""" _skip_check_picard(method) raw = read_raw_fif(raw_fname).crop(0.5, stop).load_data() n_components = 3 max_iter = 1 ica = ICA(n_components=n_components, max_iter=max_iter, method=method, random_state=0) if method == 'infomax': ica.fit(raw) else: with pytest.warns(UserWarning, match='did not converge'): ica.fit(raw) assert ica.method == method assert_equal(ica.n_iter_, max_iter) # Test I/O roundtrip. output_fname = tmp_path / 'test_ica-ica.fif' _assert_ica_attributes(ica, raw.get_data('data'), limits=(5, 110)) ica.save(output_fname) ica = read_ica(output_fname) assert ica.method == method _assert_ica_attributes(ica) assert_equal(ica.n_iter_, max_iter) @requires_sklearn @pytest.mark.parametrize("method", ["fastica", "picard"]) def test_ica_rank_reduction(method): """Test recovery ICA rank reduction.""" _skip_check_picard(method) # Most basic recovery raw = read_raw_fif(raw_fname).crop(0.5, stop).load_data() picks = pick_types(raw.info, meg=True, stim=False, ecg=False, eog=False, exclude='bads')[:10] n_components = 5 for n_pca_components in [6, 10]: with pytest.warns(UserWarning, match='did not converge'): ica = ICA(n_components=n_components, method=method, max_iter=1).fit(raw, picks=picks) rank_before = _compute_rank_int(raw.copy().pick(picks), proj=False) assert_equal(rank_before, len(picks)) raw_clean = ica.apply(raw.copy(), n_pca_components=n_pca_components) rank_after = _compute_rank_int(raw_clean.copy().pick(picks), proj=False) # interaction between ICA rejection and PCA components difficult # to preduct. Rank_after often seems to be 1 higher then # n_pca_components assert (n_components < n_pca_components <= rank_after <= rank_before) # This is a lot of parameters but they interact so they matter. Also they in # total take < 2 sec on a workstation. @pytest.mark.parametrize('n_pca_components', (None, 0.999999)) @pytest.mark.parametrize('proj', (True, False)) @pytest.mark.parametrize('cov', (False, True)) @pytest.mark.parametrize('meg', ('mag', True, False)) @pytest.mark.parametrize('eeg', (False, True)) def test_ica_projs(n_pca_components, proj, cov, meg, eeg): """Test that ICA handles projections properly.""" if cov and not proj: # proj is always done with cov return if not meg and not eeg: # no channels return raw = read_raw_fif(raw_fname).crop(0.5, stop).pick_types( meg=meg, eeg=eeg) raw.pick(np.arange(0, len(raw.ch_names), 5)) # just for speed raw.info.normalize_proj() assert 10 < len(raw.ch_names) < 75 if eeg: raw.set_eeg_reference(projection=True) raw.load_data() raw._data -= raw._data.mean(-1, keepdims=True) raw_data = raw.get_data() assert len(raw.info['projs']) > 0 assert not raw.proj raw_fit = raw.copy() kwargs = dict(atol=1e-12 if eeg else 1e-20, rtol=1e-8) if proj: raw_fit.apply_proj() fit_data = raw_fit.get_data() if proj: assert not np.allclose(raw_fit.get_data(), raw_data, **kwargs) else: assert np.allclose(raw_fit.get_data(), raw_data, **kwargs) assert raw_fit.proj == proj if cov: noise_cov = make_ad_hoc_cov(raw.info) else: noise_cov = None # infomax here just so we don't require sklearn ica = ICA(max_iter=1, noise_cov=noise_cov, method='infomax', n_components=10) with _record_warnings(): # convergence ica.fit(raw_fit) if cov: assert ica.pre_whitener_.shape == (len(raw.ch_names),) * 2 else: assert ica.pre_whitener_.shape == (len(raw.ch_names), 1) with catch_logging() as log: raw_apply = ica.apply( raw_fit.copy(), n_pca_components=n_pca_components, verbose=True) log = log.getvalue() print(log) # very useful for debugging, might as well leave it in if proj: assert 'Applying projection' in log else: assert 'Applying projection' not in log assert_allclose(raw_apply.get_data(), fit_data, **kwargs) raw_apply = ica.apply(raw.copy()) apply_data = raw_apply.get_data() assert_allclose(apply_data, fit_data, **kwargs) if proj: assert not np.allclose(apply_data, raw_data, **kwargs) else: assert_allclose(apply_data, raw_data, **kwargs) @requires_sklearn @pytest.mark.parametrize("method", ["fastica", "picard"]) def test_ica_reset(method): """Test ICA resetting.""" _skip_check_picard(method) raw = read_raw_fif(raw_fname).crop(0.5, stop).load_data() picks = pick_types(raw.info, meg=True, stim=False, ecg=False, eog=False, exclude='bads')[:10] run_time_attrs = ( 'pre_whitener_', 'unmixing_matrix_', 'mixing_matrix_', 'n_components_', 'n_samples_', 'pca_components_', 'pca_explained_variance_', 'pca_mean_', 'n_iter_' ) ica = ICA(n_components=3, method=method, max_iter=1) assert ica.current_fit == 'unfitted' with pytest.warns(UserWarning, match='did not converge'): ica.fit(raw, picks=picks) assert (all(hasattr(ica, attr) for attr in run_time_attrs)) assert ica.labels_ is not None assert ica.current_fit == 'raw' ica._reset() assert (not any(hasattr(ica, attr) for attr in run_time_attrs)) assert ica.labels_ is not None assert ica.current_fit == 'unfitted' @requires_sklearn @pytest.mark.parametrize("method", ["fastica", "picard"]) @pytest.mark.parametrize('n_components', (2, 0.6)) @pytest.mark.parametrize('noise_cov', (False, True)) @pytest.mark.parametrize('n_pca_components', [20]) def test_ica_core(method, n_components, noise_cov, n_pca_components, browser_backend): """Test ICA on raw and epochs.""" _skip_check_picard(method) raw = read_raw_fif(raw_fname).crop(0, stop).load_data() # The None cases help reveal bugs but are time consuming. if noise_cov: noise_cov = read_cov(test_cov_name) noise_cov['projs'] = [] # avoid warnings else: noise_cov = None events = read_events(event_name) picks = pick_types(raw.info, meg=True, stim=False, ecg=False, eog=False, exclude='bads')[::4] raw.pick(picks[::4]) raw.del_proj() del picks epochs = Epochs(raw, events[:4], event_id, tmin, tmax, baseline=None, preload=True) # test essential core functionality # Test ICA raw ica = ICA(noise_cov=noise_cov, n_components=n_components, method=method, max_iter=1) with pytest.raises(ValueError, match='Cannot check for channels of t'): 'meg' in ica print(ica) # to test repr repr_ = ica.__repr__() repr_html_ = ica._repr_html_() assert repr_ == f'<ICA | no decomposition, method: {method}>' assert method in repr_html_ # test fit checker with pytest.raises(RuntimeError, match='No fit available'): ica.get_sources(raw) with pytest.raises(RuntimeError, match='No fit available'): ica.get_sources(epochs) # Test error upon empty epochs fitting with pytest.raises(RuntimeError, match='none were found'): ica.fit(epochs[0:0]) # test decomposition with pytest.warns(UserWarning, match='did not converge'): ica.fit(raw) repr(ica) # to test repr repr_ = ica.__repr__() repr_html_ = ica._repr_html_() assert 'raw data decomposition' in repr_ assert f'{ica.n_components_} ICA components' in repr_ assert 'Available PCA components' in repr_html_ assert ('mag' in ica) # should now work without error # test re-fit unmixing1 = ica.unmixing_matrix_ with pytest.warns(UserWarning, match='did not converge'): ica.fit(raw) assert_array_almost_equal(unmixing1, ica.unmixing_matrix_) raw_sources = ica.get_sources(raw) # test for #3804 assert_equal(raw_sources._filenames, [None]) print(raw_sources) # test for gh-6271 (scaling of ICA traces) fig = raw_sources.plot(clipping=None) assert len(fig.mne.traces) in (2, 6) for line in fig.mne.traces: y = line.get_ydata() assert np.ptp(y) < 15 sources = raw_sources[:, :][0] assert (sources.shape[0] == ica.n_components_) # test preload filter raw3 = raw.copy() raw3.preload = False with pytest.raises(RuntimeError, match='to be loaded'): ica.apply(raw3) ####################################################################### # test epochs decomposition ica = ICA(noise_cov=noise_cov, n_components=n_components, method=method) with _record_warnings(): # sometimes warns ica.fit(epochs) _assert_ica_attributes(ica, epochs.get_data(), limits=(0.2, 20)) data = epochs.get_data()[:, 0, :] n_samples = np.prod(data.shape) assert_equal(ica.n_samples_, n_samples) print(ica) # to test repr sources = ica.get_sources(epochs).get_data() assert (sources.shape[1] == ica.n_components_) with pytest.raises(ValueError, match='target do not have the same nu'): ica.score_sources(epochs, target=np.arange(1)) # test preload filter epochs3 = epochs.copy() epochs3.preload = False with pytest.raises(RuntimeError, match='requires epochs data to be l'): ica.apply(epochs3) # test for bug with whitener updating _pre_whitener = ica.pre_whitener_.copy() epochs._data[:, 0, 10:15] *= 1e12 ica.apply(epochs.copy()) assert_array_equal(_pre_whitener, ica.pre_whitener_) # test expl. var threshold leading to empty sel ica.n_components = 0.1 with pytest.raises(RuntimeError, match='One PCA component captures most'): ica.fit(epochs) offender = 1, 2, 3, with pytest.raises(ValueError, match='Data input must be of Raw'): ica.get_sources(offender) with pytest.raises(TypeError, match='must be an instance of'): ica.fit(offender) with pytest.raises(TypeError, match='must be an instance of'): ica.apply(offender) # gh-7868 ica.n_pca_components = 3 ica.n_components = None with pytest.raises(ValueError, match='pca_components.*is greater'): ica.fit(epochs, picks=[0, 1]) ica.n_pca_components = None ica.n_components = 3 with pytest.raises(ValueError, match='n_components.*cannot be greater'): ica.fit(epochs, picks=[0, 1]) @pytest.fixture def short_raw_epochs(): """Get small data.""" raw = read_raw_fif(raw_fname).crop(0, 5).load_data() raw.pick_channels(set(raw.ch_names[::10]) | set( ['EOG 061', 'MEG 1531', 'MEG 1441', 'MEG 0121'])) assert 'eog' in raw raw.del_proj() # avoid warnings raw.set_annotations(Annotations([0.5], [0.5], ['BAD'])) raw.resample(100) # XXX This breaks the tests :( # raw.info['bads'] = [raw.ch_names[1]] # Create epochs that have different channels from raw events = make_fixed_length_events(raw) picks = pick_types(raw.info, meg=True, eeg=True, eog=False)[:-1] epochs = Epochs(raw, events, None, tmin, tmax, picks=picks, baseline=(None, 0), preload=True, proj=False) assert len(epochs) == 3 epochs_eog = Epochs(raw, epochs.events, event_id, tmin, tmax, picks=('meg', 'eog'), baseline=(None, 0), preload=True) return raw, epochs, epochs_eog @requires_sklearn @pytest.mark.slowtest @pytest.mark.parametrize("method", ["picard", "fastica"]) def test_ica_additional(method, tmp_path, short_raw_epochs): """Test additional ICA functionality.""" _skip_check_picard(method) raw, epochs, epochs_eog = short_raw_epochs few_picks = np.arange(5) # test if n_components=None works ica = ICA(n_components=None, method=method, max_iter=1) with pytest.warns(UserWarning, match='did not converge'): ica.fit(epochs) _assert_ica_attributes(ica, epochs.get_data('data'), limits=(0.05, 20)) test_cov = read_cov(test_cov_name) ica = ICA(noise_cov=test_cov, n_components=3, method=method) assert (ica.info is None) with pytest.warns(RuntimeWarning, match='normalize_proj'): ica.fit(raw, picks=few_picks) _assert_ica_attributes(ica, raw.get_data(np.arange(5)), limits=(1, 90)) assert (isinstance(ica.info, Info)) assert (ica.n_components_ < 5) ica = ICA(n_components=3, method=method, max_iter=1) with pytest.raises(RuntimeError, match='No fit'): ica.save('') with pytest.warns(Warning, match='converge'): ica.fit(raw, np.arange(1, 6)) _assert_ica_attributes( ica, raw.get_data(np.arange(1, 6))) # check Kuiper index threshold assert_allclose(ica._get_ctps_threshold(), 0.5) with pytest.raises(TypeError, match='str or numeric'): ica.find_bads_ecg(raw, threshold=None) with pytest.warns(RuntimeWarning, match='is longer than the signal'): ica.find_bads_ecg(raw, threshold=0.25) # check invalid measure argument with pytest.raises(ValueError, match='Invalid value'): ica.find_bads_ecg(raw, method='correlation', measure='unknown', threshold='auto') # check passing a ch_name to find_bads_ecg with pytest.warns(RuntimeWarning, match='longer'): _, scores_1 = ica.find_bads_ecg(raw, threshold='auto') with pytest.warns(RuntimeWarning, match='longer'): _, scores_2 = ica.find_bads_ecg(raw, raw.ch_names[1], threshold='auto') assert scores_1[0] != scores_2[0] # test corrmap ica2 = ica.copy() ica3 = ica.copy() corrmap([ica, ica2], (0, 0), threshold='auto', label='blinks', plot=True, ch_type="mag") with pytest.raises(RuntimeError, match='No component detected'): corrmap([ica, ica2], (0, 0), threshold=2, plot=False, show=False,) corrmap([ica, ica2], (0, 0), threshold=0.5, plot=False, show=False) assert (ica.labels_["blinks"] == ica2.labels_["blinks"]) assert (0 in ica.labels_["blinks"]) # test retrieval of component maps as arrays components = ica.get_components() template = components[:, 0] EvokedArray(components, ica.info, tmin=0.).plot_topomap([0], time_unit='s') corrmap([ica, ica3], template, threshold='auto', label='blinks', plot=True, ch_type="mag") assert (ica2.labels_["blinks"] == ica3.labels_["blinks"]) plt.close('all') # No match bad_ica = ica2.copy() bad_ica.mixing_matrix_[:] = 0. with pytest.warns(RuntimeWarning, match='divide'): with catch_logging() as log: corrmap([ica, bad_ica], (0, 0), threshold=0.5, plot=False, show=False, verbose=True) log = log.getvalue() assert 'No maps selected' in log # make sure a single threshold in a list works corrmap([ica, ica3], template, threshold=[0.5], label='blinks', plot=False, ch_type="mag") ica_different_channels = ICA(n_components=2, max_iter=1) with pytest.warns(Warning, match='converge'): ica_different_channels.fit(raw, picks=[2, 3, 4, 5]) with pytest.raises(ValueError, match='Not all ICA instances have the'): corrmap([ica_different_channels, ica], (0, 0)) # test warnings on bad filenames ica_badname = tmp_path / 'test-bad-name.fif.gz' with pytest.warns(RuntimeWarning, match='-ica.fif'): ica.save(ica_badname) with pytest.warns(RuntimeWarning, match='-ica.fif'): read_ica(ica_badname) # test decim ica = ICA(n_components=3, method=method, max_iter=1) raw_ = raw.copy() for _ in range(3): raw_.append(raw_) n_samples = raw_._data.shape[1] with pytest.warns(UserWarning, match='did not converge'): ica.fit(raw, picks=few_picks) _assert_ica_attributes(ica) assert raw_._data.shape[1] == n_samples # test expl var with pytest.raises(ValueError, match=r".*1.0 \(exclusive\).*"): ICA(n_components=1., method=method) with pytest.raises(ValueError, match="Selecting one component"): ICA(n_components=1, method=method) ica = ICA(n_components=4, method=method, max_iter=1) with pytest.warns(UserWarning, match='did not converge'): ica.fit(raw) _assert_ica_attributes(ica) assert ica.n_components_ == 4 ica_var = _ica_explained_variance(ica, raw, normalize=True) assert (np.all(ica_var[:-1] >= ica_var[1:])) # test ica sorting ica.exclude = [0] ica.labels_ = dict(blink=[0], think=[1]) ica_sorted = _sort_components(ica, [3, 2, 1, 0], copy=True) assert_equal(ica_sorted.exclude, [3]) assert_equal(ica_sorted.labels_, dict(blink=[3], think=[2])) # epochs extraction from raw fit with pytest.warns(RuntimeWarning, match='could not be picked'), \ pytest.raises(RuntimeError, match="match fitted data"): ica.get_sources(epochs) # test filtering ica_raw = ica.get_sources(raw) d1 = ica_raw._data[0].copy() ica_raw.filter(4, 20, fir_design='firwin2') assert_equal(ica_raw.info['lowpass'], 20.) assert_equal(ica_raw.info['highpass'], 4.) assert ((d1 != ica_raw._data[0]).any()) d1 = ica_raw._data[0].copy() ica_raw.notch_filter([10], trans_bandwidth=10, fir_design='firwin') assert ((d1 != ica_raw._data[0]).any()) test_ica_fname = tmp_path / 'test-ica.fif' ica.n_pca_components = 2 ica.method = 'fake' ica.save(test_ica_fname) ica_read = read_ica(test_ica_fname) assert (ica.n_pca_components == ica_read.n_pca_components) assert_equal(ica.method, ica_read.method) assert_equal(ica.labels_, ica_read.labels_) # check type consistency attrs = ('mixing_matrix_ unmixing_matrix_ pca_components_ ' 'pca_explained_variance_ pre_whitener_') def f(x, y): return getattr(x, y).dtype for attr in attrs.split(): assert_equal(f(ica_read, attr), f(ica, attr)) ica.n_pca_components = 4 ica_read.n_pca_components = 4 ica.exclude = [] ica.save(test_ica_fname, overwrite=True) # also testing overwrite ica_read = read_ica(test_ica_fname) for attr in ['mixing_matrix_', 'unmixing_matrix_', 'pca_components_', 'pca_mean_', 'pca_explained_variance_', 'pre_whitener_']: assert_array_almost_equal(getattr(ica, attr), getattr(ica_read, attr)) assert (ica.ch_names == ica_read.ch_names) assert (isinstance(ica_read.info, Info)) sources = ica.get_sources(raw)[:, :][0] sources2 = ica_read.get_sources(raw)[:, :][0] assert_array_almost_equal(sources, sources2) _raw1 = ica.apply(raw.copy(), exclude=[1]) _raw2 = ica_read.apply(raw.copy(), exclude=[1]) assert_array_almost_equal(_raw1[:, :][0], _raw2[:, :][0]) ica = ICA(n_components=2, method=method, max_iter=1) with _record_warnings(): # ICA does not converge ica.fit(raw, picks=few_picks) # check score funcs for name, func in get_score_funcs().items(): if name in score_funcs_unsuited: continue scores = ica.score_sources(raw, target='EOG 061', score_func=func, start=0, stop=10) assert (ica.n_components_ == len(scores)) # check univariate stats scores = ica.score_sources(raw, start=0, stop=50, score_func=stats.skew) # check exception handling with pytest.raises(ValueError, match='Sources and target do not have'): ica.score_sources(raw, target=np.arange(1)) evoked = epochs.average() evoked_data = evoked.data.copy() raw_data = raw[:][0].copy() epochs_data = epochs.get_data().copy() with pytest.warns(RuntimeWarning, match='longer'): idx, scores = ica.find_bads_ecg(raw, method='ctps', threshold='auto', start=0, stop=raw.times.size) assert_equal(len(scores), ica.n_components_) with pytest.warns(RuntimeWarning, match='longer'): idx, scores = ica.find_bads_ecg(raw, method='correlation', threshold='auto') assert_equal(len(scores), ica.n_components_) with pytest.warns(RuntimeWarning, match='longer'): idx, scores = ica.find_bads_eog(raw) assert_equal(len(scores), ica.n_components_) with pytest.raises(ValueError, match='integer .* start and stop'): idx, scores = ica.find_bads_ecg(epochs, start=0, stop=1000) idx, scores = ica.find_bads_ecg(epochs, method='ctps', threshold='auto', start=epochs.times[0], stop=epochs.times[-1]) assert_equal(len(scores), ica.n_components_) with pytest.raises(ValueError, match='only Raw and Epochs input'): ica.find_bads_ecg(epochs.average(), method='ctps', threshold='auto') with pytest.raises(ValueError, match='Invalid value'): ica.find_bads_ecg(raw, method='crazy-coupling') with pytest.warns(RuntimeWarning, match='longer'): idx, scores = ica.find_bads_eog(raw) assert_equal(len(scores), ica.n_components_) raw.info['chs'][raw.ch_names.index('EOG 061') - 1]['kind'] = 202 with pytest.warns(RuntimeWarning, match='longer'): idx, scores = ica.find_bads_eog(raw) assert (isinstance(scores, list)) assert_equal(len(scores[0]), ica.n_components_) idx, scores = ica.find_bads_eog(evoked, ch_name='MEG 1441') assert_equal(len(scores), ica.n_components_) with pytest.raises(ValueError, match='integer .* start and stop'): idx, scores = ica.find_bads_ecg(evoked, start=0, stop=1000) idx, scores = ica.find_bads_ecg(evoked, method='correlation', threshold='auto') assert_equal(len(scores), ica.n_components_) assert_array_equal(raw_data, raw[:][0]) assert_array_equal(epochs_data, epochs.get_data()) assert_array_equal(evoked_data, evoked.data) # check score funcs for name, func in get_score_funcs().items(): if name in score_funcs_unsuited: continue scores = ica.score_sources(epochs_eog, target='EOG 061', score_func=func) assert (ica.n_components_ == len(scores)) # check univariate stats scores = ica.score_sources(epochs, score_func=stats.skew) # check exception handling with pytest.raises(ValueError, match='Sources and target do not have'): ica.score_sources(epochs, target=np.arange(1)) # ecg functionality ecg_scores = ica.score_sources(raw, target='MEG 1531', score_func='pearsonr') with pytest.warns(RuntimeWarning, match='longer'): ecg_events = ica_find_ecg_events( raw, sources[np.abs(ecg_scores).argmax()]) assert (ecg_events.ndim == 2) # eog functionality eog_scores = ica.score_sources(raw, target='EOG 061', score_func='pearsonr') with pytest.warns(RuntimeWarning, match='longer'): eog_events = ica_find_eog_events( raw, sources[np.abs(eog_scores).argmax()]) assert (eog_events.ndim == 2) # Test ica fiff export assert raw.last_samp - raw.first_samp + 1 == raw.n_times assert raw.n_times > 100 ica_raw = ica.get_sources(raw, start=100, stop=200) assert ica_raw.first_samp == raw.first_samp + 100 assert ica_raw.n_times == 100 assert ica_raw.last_samp - ica_raw.first_samp + 1 == 100 assert ica_raw._data.shape[1] == 100 assert_equal(len(ica_raw._filenames), 1) # API consistency ica_chans = [ch for ch in ica_raw.ch_names if 'ICA' in ch] assert (ica.n_components_ == len(ica_chans)) test_ica_fname = op.join(op.abspath(op.curdir), 'test-ica_raw.fif') ica.n_components = np.int32(ica.n_components) ica_raw.save(test_ica_fname, overwrite=True) ica_raw2 = read_raw_fif(test_ica_fname, preload=True) assert_allclose(ica_raw._data, ica_raw2._data, rtol=1e-5, atol=1e-4) ica_raw2.close() os.remove(test_ica_fname) # Test ica epochs export ica_epochs = ica.get_sources(epochs) assert (ica_epochs.events.shape == epochs.events.shape) ica_chans = [ch for ch in ica_epochs.ch_names if 'ICA' in ch] assert (ica.n_components_ == len(ica_chans)) assert (ica.n_components_ == ica_epochs.get_data().shape[1]) assert (ica_epochs._raw is None) assert (ica_epochs.preload is True) # test float n pca components ica.pca_explained_variance_ = np.array([0.2] * 5) ica.n_components_ = 0 for ncomps, expected in [[0.3, 2], [0.9, 5], [1, 1]]: ncomps_ = ica._check_n_pca_components(ncomps) assert (ncomps_ == expected) ica = ICA(method=method) with _record_warnings(): # sometimes does not converge ica.fit(raw, picks=few_picks) _assert_ica_attributes(ica, raw.get_data(few_picks)) with pytest.warns(RuntimeWarning, match='longer'): ica.find_bads_ecg(raw, threshold='auto') ica.find_bads_eog(epochs, ch_name='MEG 0121') assert_array_equal(raw_data, raw[:][0]) raw.drop_channels(raw.ch_names[:2]) with pytest.raises(RuntimeError, match='match fitted'): with pytest.warns(RuntimeWarning, match='longer'): ica.find_bads_eog(raw) with pytest.raises(RuntimeError, match='match fitted'): with pytest.warns(RuntimeWarning, match='longer'): ica.find_bads_ecg(raw, threshold='auto') # test passing picks including the marked bad channels raw_ = raw.copy() raw_.pick_types(eeg=True) raw_.info['bads'] = [raw_.ch_names[0]] picks = pick_types(raw_.info, eeg=True, exclude=[]) ica = ICA(n_components=0.99, max_iter='auto') ica.fit(raw_, picks=picks, reject_by_annotation=True) @requires_sklearn def test_get_explained_variance_ratio(tmp_path, short_raw_epochs): """Test ICA.get_explained_variance_ratio().""" raw, epochs, _ = short_raw_epochs ica = ICA(max_iter=1) # Unfitted ICA should raise an exception with pytest.raises(ValueError, match='ICA must be fitted first'): ica.get_explained_variance_ratio(epochs) with pytest.warns(RuntimeWarning, match='were baseline-corrected'): ica.fit(epochs) # components = int, ch_type = None explained_var_comp_0 = ica.get_explained_variance_ratio( epochs, components=0 ) # components = int, ch_type = str explained_var_comp_0_eeg = ica.get_explained_variance_ratio( epochs, components=0, ch_type='eeg' ) # components = int, ch_type = list of str explained_var_comp_0_eeg_mag = ica.get_explained_variance_ratio( epochs, components=0, ch_type=['eeg', 'mag'] ) # components = list of int, single element, ch_type = None explained_var_comp_1 = ica.get_explained_variance_ratio( epochs, components=[1] ) # components = list of int, multiple elements, ch_type = None explained_var_comps_01 = ica.get_explained_variance_ratio( epochs, components=[0, 1] ) # components = None, i.e., all components, ch_type = None explained_var_comps_all = ica.get_explained_variance_ratio( epochs, components=None ) assert 'grad' in explained_var_comp_0 assert 'mag' in explained_var_comp_0 assert 'eeg' in explained_var_comp_0 assert len(explained_var_comp_0_eeg) == 1 assert 'eeg' in explained_var_comp_0_eeg assert 'mag' in explained_var_comp_0_eeg_mag assert 'eeg' in explained_var_comp_0_eeg_mag assert 'grad' not in explained_var_comp_0_eeg_mag assert round(explained_var_comp_0['grad'], 4) == 0.1784 assert round(explained_var_comp_0['mag'], 4) == 0.0259 assert round(explained_var_comp_0['eeg'], 4) == 0.0229 assert np.isclose( explained_var_comp_0['eeg'], explained_var_comp_0_eeg['eeg'] ) assert np.isclose( explained_var_comp_0['mag'], explained_var_comp_0_eeg_mag['mag'] ) assert np.isclose( explained_var_comp_0['eeg'], explained_var_comp_0_eeg_mag['eeg'] ) assert round(explained_var_comp_1['eeg'], 4) == 0.0231 assert round(explained_var_comps_01['eeg'], 4) == 0.0459 assert ( explained_var_comps_all['grad'] == explained_var_comps_all['mag'] == explained_var_comps_all['eeg'] == 1 ) # Test Raw ica.get_explained_variance_ratio(raw) # Test Evoked evoked = epochs.average() ica.get_explained_variance_ratio(evoked) # Test Evoked without baseline correction evoked.baseline = None ica.get_explained_variance_ratio(evoked) # Test invalid ch_type with pytest.raises(ValueError, match='only the following channel types'): ica.get_explained_variance_ratio(raw, ch_type='foobar') @requires_sklearn @pytest.mark.slowtest @pytest.mark.parametrize('method, cov', [ ('picard', None), ('picard', test_cov_name), ('fastica', None), ]) def test_ica_cov(method, cov, tmp_path, short_raw_epochs): """Test ICA with cov.""" _skip_check_picard(method) raw, epochs, epochs_eog = short_raw_epochs if cov is not None: cov = read_cov(cov) # test reading and writing test_ica_fname = tmp_path / 'test-ica.fif' kwargs = dict(n_pca_components=4) ica = ICA(noise_cov=cov, n_components=2, method=method, max_iter=1) with _record_warnings(): # ICA does not converge ica.fit(raw, picks=np.arange(10)) _assert_ica_attributes(ica) sources = ica.get_sources(epochs).get_data() assert (ica.mixing_matrix_.shape == (2, 2)) assert (ica.unmixing_matrix_.shape == (2, 2)) assert (ica.pca_components_.shape == (10, 10)) assert (sources.shape[1] == ica.n_components_) for exclude in [[], [0], np.array([1, 2, 3])]: ica.exclude = exclude ica.labels_ = {'foo': [0]} ica.save(test_ica_fname, overwrite=True) ica_read = read_ica(test_ica_fname) assert (list(ica.exclude) == ica_read.exclude) assert_equal(ica.labels_, ica_read.labels_) ica.apply(raw.copy(), **kwargs) ica.exclude = [] ica.apply(raw.copy(), exclude=[1], **kwargs) assert (ica.exclude == []) ica.exclude = [0, 1] ica.apply(raw.copy(), exclude=[1], **kwargs) assert (ica.exclude == [0, 1]) ica_raw = ica.get_sources(raw) assert (ica.exclude == [ica_raw.ch_names.index(e) for e in ica_raw.info['bads']]) @requires_sklearn @pytest.mark.parametrize("method", ["fastica", "picard"]) def test_ica_reject_buffer(method): """Test ICA data raw buffer rejection.""" _skip_check_picard(method) raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data() picks = pick_types(raw.info, meg=True, stim=False, ecg=False, eog=False, exclude='bads') raw._data[2, 1000:1005] = 5e-12 ica = ICA(n_components=3, method=method) with catch_logging() as drop_log: ica.fit(raw, picks[:5], reject=dict(mag=2.5e-12), decim=2, tstep=0.01, verbose=True, reject_by_annotation=False) assert (raw._data[:5, ::2].shape[1] - 4 == ica.n_samples_) log = [line for line in drop_log.getvalue().split('\n') if 'detected' in line] assert_equal(len(log), 1) _assert_ica_attributes(ica) @requires_sklearn @pytest.mark.parametrize("method", ["fastica", "picard"]) def test_ica_twice(method): """Test running ICA twice.""" _skip_check_picard(method) raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data() raw.pick(raw.ch_names[::10]) picks = pick_types(raw.info, meg='grad', exclude='bads') n_components = 0.99 n_pca_components = 0.9999 if method == 'fastica': ctx = _record_warnings # convergence, sometimes else: ctx = nullcontext ica1 = ICA(n_components=n_components, method=method) with ctx(): ica1.fit(raw, picks=picks, decim=3) raw_new = ica1.apply(raw, n_pca_components=n_pca_components) ica2 = ICA(n_components=n_components, method=method) with ctx(): ica2.fit(raw_new, picks=picks, decim=3) assert_equal(ica1.n_components_, ica2.n_components_) @requires_sklearn @pytest.mark.parametrize("method", ["fastica", "picard", "infomax"]) def test_fit_methods(method, tmp_path): """Test fit_params for ICA.""" _skip_check_picard(method) fit_params = {} # test no side effects ICA(fit_params=fit_params, method=method) assert fit_params == {} # Test I/O roundtrip. # Only picard and infomax support the "extended" keyword, so limit the # tests to those. if method in ['picard', 'infomax']: tmp_path = str(tmp_path) output_fname = op.join(tmp_path, 'test_ica-ica.fif') raw = read_raw_fif(raw_fname).crop(0.5, stop).load_data() n_components = 3 max_iter = 1 fit_params = dict(extended=True) ica = ICA(fit_params=fit_params, n_components=n_components, max_iter=max_iter, method=method) fit_params_after_instantiation = ica.fit_params if method == 'infomax': ica.fit(raw) else: with pytest.warns(UserWarning, match='did not converge'): ica.fit(raw) ica.save(output_fname) ica = read_ica(output_fname) assert ica.fit_params == fit_params_after_instantiation @pytest.mark.parametrize( ('param_name', 'param_val'), ( ('start', 0), ('stop', 500), ('reject', dict(eeg=500e-6)), ('flat', dict(eeg=1e-6)) ) ) def test_fit_params_epochs_vs_raw(param_name, param_val, tmp_path): """Check that we get a warning when passing parameters that get ignored.""" method = 'infomax' n_components = 3 max_iter = 1 raw = read_raw_fif(raw_fname).pick_types(meg=False, eeg=True) events = read_events(event_name) reject = param_val if param_name == 'reject' else None epochs = Epochs(raw, events=events, reject=reject) ica = ICA(n_components=n_components, max_iter=max_iter, method=method) fit_params = {param_name: param_val} with pytest.warns(RuntimeWarning, match='parameters.*will be ignored'): ica.fit(inst=epochs, **fit_params) assert ica.reject_ == reject _assert_ica_attributes(ica) tmp_fname = tmp_path / 'test-ica.fif' ica.save(tmp_fname) ica = read_ica(tmp_fname) assert ica.reject_ == reject _assert_ica_attributes(ica) @requires_sklearn @pytest.mark.parametrize("method", ["fastica", "picard"]) @pytest.mark.parametrize("allow_ref_meg", [True, False]) def test_bad_channels(method, allow_ref_meg): """Test exception when unsupported channels are used.""" _skip_check_picard(method) chs = list(get_channel_type_constants()) info = create_info(len(chs), 500, chs) rng = np.random.RandomState(0) data = rng.rand(len(chs), 50) raw = RawArray(data, info) data = rng.rand(100, len(chs), 50) epochs = EpochsArray(data, info) # fake high-pass filtering with raw.info._unlock(): raw.info['highpass'] = 1.0 with epochs.info._unlock(): epochs.info['highpass'] = 1.0 n_components = 0.9 data_chs = list(_DATA_CH_TYPES_SPLIT + ('eog',)) if allow_ref_meg: data_chs.append('ref_meg') chs_bad = list(set(chs) - set(data_chs)) ica = ICA(n_components=n_components, method=method, allow_ref_meg=allow_ref_meg) for inst in [raw, epochs]: for ch in chs_bad: if allow_ref_meg: # Test case for only bad channels picks_bad1 = pick_types(inst.info, meg=False, ref_meg=False, **{str(ch): True}) # Test case for good and bad channels picks_bad2 = pick_types(inst.info, meg=True, ref_meg=True, **{str(ch): True}) else: # Test case for only bad channels picks_bad1 = pick_types(inst.info, meg=False, **{str(ch): True}) # Test case for good and bad channels picks_bad2 = pick_types(inst.info, meg=True, **{str(ch): True}) with pytest.raises(ValueError, match='Invalid channel type'): ica.fit(inst, picks=picks_bad1) ica.fit(inst, picks=picks_bad2) with pytest.raises(ValueError, match='No appropriate channels found'): ica.fit(inst, picks=[]) @requires_sklearn @pytest.mark.parametrize("method", ["fastica", "picard"]) def test_eog_channel(method): """Test that EOG channel is included when performing ICA.""" _skip_check_picard(method) raw = read_raw_fif(raw_fname, preload=True) events = read_events(event_name) picks = pick_types(raw.info, meg=True, stim=True, ecg=False, eog=True, exclude='bads') epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=None, preload=True, proj=False) n_components = 0.9 ica = ICA(n_components=n_components, method=method) # Test case for MEG and EOG data. Should have EOG channel for inst in [raw, epochs]: picks1a = pick_types(inst.info, meg=True, stim=False, ecg=False, eog=False, exclude='bads')[:4] picks1b = pick_types(inst.info, meg=False, stim=False, ecg=False, eog=True, exclude='bads') picks1 = np.append(picks1a, picks1b) ica.fit(inst, picks=picks1) assert (any('EOG' in ch for ch in ica.ch_names)) _assert_ica_attributes(ica, inst.get_data(picks1), limits=(0.8, 600)) # Test case for MEG data. Should have no EOG channel for inst in [raw, epochs]: picks1 = pick_types(inst.info, meg=True, stim=False, ecg=False, eog=False, exclude='bads')[:5] ica.fit(inst, picks=picks1) _assert_ica_attributes(ica) assert not any('EOG' in ch for ch in ica.ch_names) @requires_sklearn @pytest.mark.parametrize("method", ["fastica", "picard"]) def test_n_components_none(method, tmp_path): """Test n_components=None.""" _skip_check_picard(method) raw = read_raw_fif(raw_fname).crop(1.5, stop).load_data() events = read_events(event_name) picks = pick_types(raw.info, eeg=True, meg=False)[::5] epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks, baseline=(None, 0), preload=True) n_components = None random_state = 12345 output_fname = tmp_path / 'test_ica-ica.fif' ica = ICA(method=method, n_components=n_components, random_state=random_state) with _record_warnings(): ica.fit(epochs) _assert_ica_attributes(ica) ica.save(output_fname) ica = read_ica(output_fname) _assert_ica_attributes(ica) assert ica.n_pca_components is None assert ica.n_components is None assert ica.n_components_ == len(picks) @pytest.mark.slowtest @requires_sklearn @testing.requires_testing_data def test_ica_ctf(): """Test run ICA computation on ctf data with/without compensation.""" method = 'fastica' raw = read_raw_ctf(ctf_fname).crop(0, 3).load_data() picks = sorted(set(range(0, len(raw.ch_names), 10)) | set(pick_types(raw.info, ref_meg=True))) raw.pick(picks) events = make_fixed_length_events(raw, 99999) for comp in [0, 1]: raw.apply_gradient_compensation(comp) epochs = Epochs(raw, events=events, tmin=-0.2, tmax=0.2, baseline=None, preload=True) evoked = epochs.average() # test fit for inst in [raw, epochs]: ica = ICA(n_components=2, max_iter=2, method=method) with _record_warnings(): # convergence sometimes ica.fit(inst) _assert_ica_attributes(ica) # test apply and get_sources for inst in [raw, epochs, evoked]: ica.apply(inst.copy()) ica.get_sources(inst) # test mixed compensation case raw.apply_gradient_compensation(0) ica = ICA(n_components=2, max_iter=2, method=method) with _record_warnings(): # convergence sometimes ica.fit(raw) _assert_ica_attributes(ica) raw.apply_gradient_compensation(1) epochs = Epochs(raw, events=events, tmin=-0.2, tmax=0.2, baseline=None, preload=True) evoked = epochs.average() for inst in [raw, epochs, evoked]: with pytest.raises(RuntimeError, match='Compensation grade of ICA'): ica.apply(inst.copy()) with pytest.raises(RuntimeError, match='Compensation grade of ICA'): ica.get_sources(inst) @requires_sklearn @testing.requires_testing_data def test_ica_labels(): """Test ICA labels.""" # The CTF data are uniquely well suited to testing the ICA.find_bads_ # methods raw = read_raw_ctf(ctf_fname, preload=True) raw.pick_channels(raw.ch_names[:300:10] + raw.ch_names[300:]) # set the appropriate EEG channels to EOG and ECG rename = {'EEG057': 'eog', 'EEG058': 'eog', 'EEG059': 'ecg'} for key in rename: assert key in raw.ch_names raw.set_channel_types(rename) ica = ICA(n_components=4, max_iter=2, method='fastica', allow_ref_meg=True) with pytest.warns(UserWarning, match='did not converge'): ica.fit(raw) _assert_ica_attributes(ica) ica.find_bads_eog(raw, l_freq=None, h_freq=None) picks = list(pick_types(raw.info, meg=False, eog=True)) for idx, ch in enumerate(picks): assert '{}/{}/{}'.format('eog', idx, raw.ch_names[ch]) in ica.labels_ assert 'eog' in ica.labels_ for key in ('ecg', 'ref_meg', 'ecg/ECG-MAG'): assert key not in ica.labels_ ica.find_bads_ecg(raw, l_freq=None, h_freq=None, method='correlation', threshold='auto') picks = list(pick_types(raw.info, meg=False, ecg=True)) for idx, ch in enumerate(picks): assert '{}/{}/{}'.format('ecg', idx, raw.ch_names[ch]) in ica.labels_ for key in ('ecg', 'eog'): assert key in ica.labels_ for key in ('ref_meg', 'ecg/ECG-MAG'): assert key not in ica.labels_ # derive reference ICA components and append them to raw ica_rf = ICA(n_components=2, max_iter=2, allow_ref_meg=True) with pytest.warns(UserWarning, match='did not converge'): ica_rf.fit(raw.copy().pick_types(meg=False, ref_meg=True)) icacomps = ica_rf.get_sources(raw) # rename components so they are auto-detected by find_bads_ref icacomps.rename_channels({c: 'REF_' + c for c in icacomps.ch_names}) # and add them to raw raw.add_channels([icacomps]) ica.find_bads_ref(raw, l_freq=None, h_freq=None, method="separate") picks = pick_channels_regexp(raw.ch_names, 'REF_ICA*') for idx, ch in enumerate(picks): assert '{}/{}/{}'.format('ref_meg', idx, raw.ch_names[ch]) in ica.labels_ ica.find_bads_ref(raw, l_freq=None, h_freq=None, method="together") assert 'ref_meg' in ica.labels_ for key in ('ecg', 'eog', 'ref_meg'): assert key in ica.labels_ assert 'ecg/ECG-MAG' not in ica.labels_ ica.find_bads_ecg(raw, l_freq=None, h_freq=None, threshold='auto') for key in ('ecg', 'eog', 'ref_meg', 'ecg/ECG-MAG'): assert key in ica.labels_ scores = ica.find_bads_muscle(raw)[1] assert 'muscle' in ica.labels_ assert ica.labels_['muscle'] == [0] assert_allclose(scores, [0.56, 0.01, 0.03, 0.00], atol=0.03) events = np.array([[6000, 0, 0], [8000, 0, 0]]) epochs = Epochs(raw, events=events, baseline=None, preload=True) # move up threhsold more noise because less data scores = ica.find_bads_muscle(epochs, threshold=0.8)[1] assert 'muscle' in ica.labels_ assert ica.labels_['muscle'] == [0] assert_allclose(scores, [0.81, 0.14, 0.37, 0.05], atol=0.03) ica = ICA(n_components=4, max_iter=2, method='fastica', allow_ref_meg=True) with pytest.warns(UserWarning, match='did not converge'): ica.fit(raw, picks="eeg") ica.find_bads_muscle(raw) assert 'muscle' in ica.labels_ @requires_sklearn @testing.requires_testing_data @pytest.mark.parametrize('fname, grade', [ (fif_fname, None), pytest.param(eeglab_fname, None, marks=pymatreader_mark), (ctf_fname2, 0), (ctf_fname2, 1), ]) def test_ica_eeg(fname, grade): """Test ICA on EEG.""" method = 'fastica' if fname.endswith('.fif'): raw = read_raw_fif(fif_fname) raw.pick(raw.ch_names[::5]).load_data() raw.info.normalize_proj() elif fname.endswith('.set'): raw = read_raw_eeglab(input_fname=eeglab_fname, preload=True) else: with pytest.warns(RuntimeWarning, match='MISC channel'): raw = read_raw_ctf(ctf_fname2) raw.pick(raw.ch_names[:30] + raw.ch_names[30::10]).load_data() if grade is not None: raw.apply_gradient_compensation(grade) events = make_fixed_length_events(raw, 99999, start=0, stop=0.3, duration=0.1) picks_meg = pick_types(raw.info, meg=True, eeg=False, ref_meg=False)[:2] picks_eeg = pick_types(raw.info, meg=False, eeg=True)[:2] picks_all = [] picks_all.extend(picks_meg) picks_all.extend(picks_eeg) epochs = Epochs(raw, events=events, tmin=-0.1, tmax=0.1, baseline=None, preload=True, proj=False) evoked = epochs.average() for picks in [picks_meg, picks_eeg, picks_all]: if len(picks) == 0: continue # test fit for inst in [raw, epochs]: ica = ICA(n_components=2, max_iter=2, method=method) with _record_warnings(): ica.fit(inst, picks=picks, verbose=True) _assert_ica_attributes(ica) # test apply and get_sources for inst in [raw, epochs, evoked]: ica.apply(inst) ica.get_sources(inst) @pymatreader_mark @testing.requires_testing_data def test_read_ica_eeglab(): """Test read_ica_eeglab function.""" fname = op.join(test_base_dir, "EEGLAB", "test_raw.set") fname_cleaned_matlab = op.join(test_base_dir, "EEGLAB", "test_raw.cleaned.set") raw = read_raw_eeglab(fname, preload=True) raw_eeg = _check_load_mat(fname, None) raw_cleaned_matlab = read_raw_eeglab(fname_cleaned_matlab, preload=True) mark_to_remove = ["manual"] comp_info = raw_eeg.marks["comp_info"] if len(comp_info["flags"].shape) > 1: ind_comp_to_drop = [np.where(flags)[0] for flags, label in zip(comp_info["flags"], comp_info["label"]) if label in mark_to_remove] ind_comp_to_drop = np.unique(np.concatenate(ind_comp_to_drop)) else: ind_comp_to_drop = np.where(comp_info["flags"])[0] ica = read_ica_eeglab(fname) _assert_ica_attributes(ica) raw_cleaned = ica.apply(raw.copy(), exclude=ind_comp_to_drop) assert_allclose(raw_cleaned_matlab.get_data(), raw_cleaned.get_data(), rtol=1e-05, atol=1e-08) @pymatreader_mark @testing.requires_testing_data def test_read_ica_eeglab_mismatch(tmp_path): """Test read_ica_eeglab function when there is a mismatch.""" fname_orig = op.join(test_base_dir, "EEGLAB", "test_raw.set") base = op.basename(fname_orig)[:-3] shutil.copyfile(fname_orig[:-3] + 'fdt', tmp_path / (base + 'fdt')) fname = tmp_path / base data = loadmat(fname_orig) w = data['EEG']['icaweights'][0][0] w[:] = np.random.RandomState(0).randn(*w.shape) savemat(str(fname), data, appendmat=False) assert op.isfile(fname) with pytest.warns(RuntimeWarning, match='Mismatch.*removal.*icawinv.*'): ica = read_ica_eeglab(fname) _assert_ica_attributes(ica) ica_correct = read_ica_eeglab(fname_orig) attrs = [attr for attr in dir(ica_correct) if attr.endswith('_') and not attr.startswith('_')] assert 'mixing_matrix_' in attrs assert 'unmixing_matrix_' in attrs assert ica.labels_ == ica_correct.labels_ == {} attrs.pop(attrs.index('labels_')) attrs.pop(attrs.index('reject_')) for attr in attrs: a, b = getattr(ica, attr), getattr(ica_correct, attr) assert_allclose(a, b, rtol=1e-12, atol=1e-12, err_msg=attr) def _assert_ica_attributes(ica, data=None, limits=(1.0, 70)): """Assert some attributes of ICA objects.""" __tracebackhide__ = True # This tests properties, but also serves as documentation of # the shapes these arrays can obtain and how they obtain them # Pre-whitener n_ch = len(ica.ch_names) assert ica.pre_whitener_.shape == ( n_ch, n_ch if ica.noise_cov is not None else 1) # PCA n_pca = ica.pca_components_.shape[0] assert ica.pca_components_.shape == (n_pca, n_ch), 'PCA shape' assert_allclose(np.dot(ica.pca_components_, ica.pca_components_.T), np.eye(n_pca), atol=1e-6, err_msg='PCA orthogonality') assert ica.pca_mean_.shape == (n_ch,) # Mixing/unmixing assert ica.unmixing_matrix_.shape == (ica.n_components_,) * 2, \ 'Unmixing shape' assert ica.mixing_matrix_.shape == (ica.n_components_,) * 2, \ 'Mixing shape' mix_unmix = np.dot(ica.mixing_matrix_, ica.unmixing_matrix_) s = linalg.svdvals(ica.unmixing_matrix_) nz = len(s) - (s > s[0] * 1e-12).sum() want = np.eye(ica.n_components_) want[:nz] = 0 assert_allclose(mix_unmix, want, atol=1e-6, err_msg='Mixing as pinv') assert ica.pca_explained_variance_.shape[0] >= \ ica.unmixing_matrix_.shape[1] # our PCA components should be unit vectors (the variances get put into # the unmixing_matrix_ to make it a whitener) norms = np.linalg.norm(ica.pca_components_, axis=1) assert_allclose(norms, 1.) # let's check the whitening if data is not None: if data.ndim == 3: data = data.transpose(1, 0, 2).reshape(data.shape[1], -1) data = ica._transform_raw(RawArray(data, ica.info), 0, None) norms = np.linalg.norm(data, axis=1) # at least close to normal assert norms.min() > limits[0], 'Not roughly unity' assert norms.max() < limits[1], 'Not roughly unity' assert hasattr(ica, 'reject_') @pytest.mark.parametrize("ch_type", ["dbs", "seeg"]) def test_ica_ch_types(ch_type): """Test ica with different channel types.""" # gh-8739 data = np.random.RandomState(0).randn(10, 1000) info = create_info(10, 1000., ch_type) raw = RawArray(data, info) events = make_fixed_length_events(raw, 99999, start=0, stop=0.3, duration=0.1) epochs = Epochs(raw, events=events, tmin=-0.1, tmax=0.1, baseline=None, preload=True, proj=False) evoked = epochs.average() # test fit method = 'infomax' for inst in [raw, epochs]: ica = ICA(n_components=2, max_iter=2, method=method) with _record_warnings(): ica.fit(inst, verbose=True) _assert_ica_attributes(ica) # test apply and get_sources for inst in [raw, epochs, evoked]: ica.apply(inst) ica.get_sources(inst)
bsd-3-clause
dreikanter/pyke
pyke/file.py
1
2152
import imp import inspect import os.path from pyke import const from pyke.task import PykeTask class PykeFile(): """Pykefile representation.""" def __init__(self, dir_path): """Search for pyke file in the specified directory and load it.""" file_name = dir_path if not os.path.isfile(dir_path): for valid_name in const.PYKEFILE: file_name = os.path.join(dir_path, valid_name) if os.path.exists(file_name): break else: file_name = None self._load(file_name) def __str__(self): pattern = "{%s filename: %s; tasks: [%s]}" name = self.__class__.__name__ return pattern % (name, self._fullname, self.tasks().keys()) def _load(self, file_name): """Load pykefile tasks.""" self._fullname = os.path.abspath(file_name) if file_name else None self._tasks = {} # task functions self.metadata = [] # task information if not self._fullname: return pykemod = imp.load_source(const.PYKEMOD, self._fullname) is_task = lambda func: inspect.isfunction(func) and \ func.__module__ == const.PYKEMOD and \ not func.__name__.startswith(const.UNDERSCORE) for member in inspect.getmembers(pykemod): name, func = member if is_task(func): self._tasks[name] = PykeTask(name, func) self.description = pykemod.__doc__ def loaded(self): """Return True if pykefile was found and loaded.""" return bool(self._fullname) def file_name(self): """Return absolute path to the pykefile or None if not loaded.""" return self._fullname def tasks(self): """Return tasks dict or {} if pykefile was not loaded.""" return self._tasks if self.loaded() else {} def execute(self, task, args): """Run a pyke task.""" excludes = ['task', 'quiet', 'verbose', 'dryrun', 'help', 'file'] task_args = {k: args[k] for k in args if k not in excludes} self._tasks[task].call(task_args)
mit
airbnb/airflow
airflow/providers/google/cloud/_internal_client/secret_manager_client.py
3
3612
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import re from typing import Optional import google from cached_property import cached_property from google.api_core.exceptions import NotFound, PermissionDenied from google.api_core.gapic_v1.client_info import ClientInfo from google.cloud.secretmanager_v1 import SecretManagerServiceClient from airflow.utils.log.logging_mixin import LoggingMixin from airflow.version import version SECRET_ID_PATTERN = r"^[a-zA-Z0-9-_]*$" class _SecretManagerClient(LoggingMixin): """ Retrieves Secrets object from Google Cloud Secrets Manager. This is a common class reused between SecretsManager and Secrets Hook that provides the shared authentication and verification mechanisms. This class should not be used directly, use SecretsManager or SecretsHook instead :param credentials: Credentials used to authenticate to GCP :type credentials: google.auth.credentials.Credentials """ def __init__( self, credentials: google.auth.credentials.Credentials, ) -> None: super().__init__() self.credentials = credentials @staticmethod def is_valid_secret_name(secret_name: str) -> bool: """ Returns true if the secret name is valid. :param secret_name: name of the secret :type secret_name: str :return: """ return bool(re.match(SECRET_ID_PATTERN, secret_name)) @cached_property def client(self) -> SecretManagerServiceClient: """Create an authenticated KMS client""" _client = SecretManagerServiceClient( credentials=self.credentials, client_info=ClientInfo(client_library_version='airflow_v' + version) ) return _client def get_secret(self, secret_id: str, project_id: str, secret_version: str = 'latest') -> Optional[str]: """ Get secret value from the Secret Manager. :param secret_id: Secret Key :type secret_id: str :param project_id: Project id to use :type project_id: str :param secret_version: version of the secret (default is 'latest') :type secret_version: str """ name = self.client.secret_version_path(project_id, secret_id, secret_version) try: response = self.client.access_secret_version(name) value = response.payload.data.decode('UTF-8') return value except NotFound: self.log.error("Google Cloud API Call Error (NotFound): Secret ID %s not found.", secret_id) return None except PermissionDenied: self.log.error( """Google Cloud API Call Error (PermissionDenied): No access for Secret ID %s. Did you add 'secretmanager.versions.access' permission?""", secret_id, ) return None
apache-2.0
tangentlabs/wagtail
wagtail/wagtailimages/rich_text.py
22
1547
from wagtail.wagtailimages.models import get_image_model from wagtail.wagtailimages.formats import get_image_format class ImageEmbedHandler(object): """ ImageEmbedHandler will be invoked whenever we encounter an element in HTML content with an attribute of data-embedtype="image". The resulting element in the database representation will be: <embed embedtype="image" id="42" format="thumb" alt="some custom alt text"> """ @staticmethod def get_db_attributes(tag): """ Given a tag that we've identified as an image embed (because it has a data-embedtype="image" attribute), return a dict of the attributes we should have on the resulting <embed> element. """ return { 'id': tag['data-id'], 'format': tag['data-format'], 'alt': tag['data-alt'], } @staticmethod def expand_db_attributes(attrs, for_editor): """ Given a dict of attributes from the <embed> tag, return the real HTML representation. """ Image = get_image_model() try: image = Image.objects.get(id=attrs['id']) format = get_image_format(attrs['format']) if for_editor: try: return format.image_to_editor_html(image, attrs['alt']) except: return '' else: return format.image_to_html(image, attrs['alt']) except Image.DoesNotExist: return "<img>"
bsd-3-clause
sorgerlab/indra
indra/databases/lincs_client.py
5
6680
from __future__ import absolute_import, print_function, unicode_literals from builtins import dict, str __all__ = ['get_drug_target_data', 'LincsClient', 'load_lincs_csv'] import os import sys import json import logging import requests from io import StringIO, BytesIO from indra.util import read_unicode_csv_fileobj from indra.databases.identifiers import ensure_chembl_prefix logger = logging.getLogger(__name__) LINCS_URL = 'http://lincs.hms.harvard.edu/db' resources = os.path.join(os.path.abspath(os.path.dirname(__file__)), os.path.pardir, 'resources') lincs_sm = os.path.join(resources, 'lincs_small_molecules.json') lincs_prot = os.path.join(resources, 'lincs_proteins.json') class LincsClient(object): """Client for querying LINCS small molecules and proteins.""" def __init__(self): with open(lincs_sm, 'r') as fh: self._sm_data = json.load(fh) extra_sm_data = load_lincs_extras() self._sm_data.update(extra_sm_data) with open(lincs_prot, 'r') as fh: self._prot_data = json.load(fh) def get_small_molecule_name(self, hms_lincs_id): """Get the name of a small molecule from the LINCS sm metadata. Parameters ---------- hms_lincs_id : str The HMS LINCS ID of the small molecule. Returns ------- str The name of the small molecule. """ entry = self._get_entry_by_id(self._sm_data, hms_lincs_id) if not entry: return None name = entry['Name'] return name def get_small_molecule_refs(self, hms_lincs_id): """Get the id refs of a small molecule from the LINCS sm metadata. Parameters ---------- hms_lincs_id : str The HMS LINCS ID of the small molecule. Returns ------- dict A dictionary of references. """ refs = {'HMS-LINCS': hms_lincs_id} entry = self._get_entry_by_id(self._sm_data, hms_lincs_id) # If there is no entry for this ID if not entry: return refs # If there is an entry then fill up the refs with existing values mappings = dict(chembl='ChEMBL ID', chebi='ChEBI ID', pubchem='PubChem CID', lincs='LINCS ID') for k, v in mappings.items(): if entry.get(v): key = k.upper() value = entry[v] # Swap in primary PubChem IDs where there is an outdated one if key == 'PUBCHEM' and value in pc_to_primary_mappings: value = pc_to_primary_mappings[value] # Fix CHEMBL IDs if key == 'CHEMBL': value = ensure_chembl_prefix(value) refs[key] = value return refs def get_protein_refs(self, hms_lincs_id): """Get the refs for a protein from the LINCs protein metadata. Parameters ---------- hms_lincs_id : str The HMS LINCS ID for the protein Returns ------- dict A dictionary of protein references. """ # TODO: We could get phosphorylation states from the protein data. refs = {'HMS-LINCS': hms_lincs_id} entry = self._get_entry_by_id(self._prot_data, hms_lincs_id) # If there is no entry for this ID if not entry: return refs mappings = dict(egid='Gene ID', up='UniProt ID') for k, v in mappings.items(): if entry.get(v): refs[k.upper()] = entry.get(v) return refs def _get_entry_by_id(self, resource, hms_lincs_id): # This means it's a short ID if '-' not in hms_lincs_id: keys = [k for k in resource.keys() if k.startswith(hms_lincs_id)] if not keys: logger.debug('Couldn\'t find entry for %s' % hms_lincs_id) return None entry = resource[keys[0]] # This means it's a full ID else: entry = resource.get(hms_lincs_id) if not entry: logger.debug('Couldn\'t find entry for %s' % hms_lincs_id) return None return entry def get_drug_target_data(): """Load the csv into a list of dicts containing the LINCS drug target data. Returns ------- data : list[dict] A list of dicts, each keyed based on the header of the csv, with values as the corresponding column values. """ url = LINCS_URL + '/datasets/20000/results' return load_lincs_csv(url) def _build_db_refs(lincs_id, data, **mappings): db_refs = {'HMS-LINCS': lincs_id} for db_ref, key in mappings.items(): if data[key]: db_refs[db_ref.upper()] = data[key] return db_refs def load_lincs_csv(url): """Helper function to turn csv rows into dicts.""" resp = requests.get(url, params={'output_type': '.csv'}, timeout=120) resp.raise_for_status() if sys.version_info[0] < 3: csv_io = BytesIO(resp.content) else: csv_io = StringIO(resp.text) data_rows = list(read_unicode_csv_fileobj(csv_io, delimiter=',')) headers = data_rows[0] return [{header: val for header, val in zip(headers, line_elements)} for line_elements in data_rows[1:]] def load_lincs_extras(): fname = os.path.join(resources, 'hms_lincs_extra.tsv') with open(fname, 'r') as fh: rows = [line.strip('\n').split('\t') for line in fh.readlines()] return {r[0]: {'HMS LINCS ID': r[0], 'Name': r[1], 'ChEMBL ID': r[2] if r[2] else ''} for r in rows[1:]} # This is a set of mappings specific to HMS-LINCS that map outdated compound # IDs appearing in HMS-LINCS to preferred compound IDs. This can be obtained # more generally via indra.databases.pubchem_client, but this is a pre-compiled # version here for fast lookups in this client. pc_to_primary_mappings = \ {'23624255': '135564985', '10451420': '135465539', '10196499': '135398501', '57899889': '135564632', '53239990': '135564599', '71433937': '136240579', '53401173': '135539077', '71543332': '135398499', '5353940': '5169', '49830557': '135398510', '11258443': '135451019', '68925359': '135440466', '16750408': '135565545', '57347681': '135565635', '5357795': '92577', '56965966': '135398516', '24906282': '448949', '66524294': '135398492', '11696609': '135398495', '9549301': '135473382', '56965894': '135423438', }
bsd-2-clause
tangentlabs/wagtail
wagtail/contrib/wagtailapi/serializers.py
12
8154
from __future__ import absolute_import from collections import OrderedDict from modelcluster.models import get_all_child_relations from taggit.managers import _TaggableManager from rest_framework import serializers from rest_framework.fields import Field from rest_framework import relations from wagtail.utils.compat import get_related_model from wagtail.wagtailcore import fields as wagtailcore_fields from .utils import ObjectDetailURL, URLPath, pages_for_site class MetaField(Field): """ Serializes the "meta" section of each object. This section is used for storing non-field data such as model name, urls, etc. Example: "meta": { "type": "wagtailimages.Image", "detail_url": "http://api.example.com/v1/images/1/" } """ def get_attribute(self, instance): return instance def to_representation(self, obj): return OrderedDict([ ('type', type(obj)._meta.app_label + '.' + type(obj).__name__), ('detail_url', ObjectDetailURL(type(obj), obj.pk)), ]) class PageMetaField(MetaField): """ A subclass of MetaField for Page objects. Changes the "type" field to use the name of the specific model of the page. Example: "meta": { "type": "blog.BlogPage", "detail_url": "http://api.example.com/v1/pages/1/" } """ def to_representation(self, page): return OrderedDict([ ('type', page.specific_class._meta.app_label + '.' + page.specific_class.__name__), ('detail_url', ObjectDetailURL(type(page), page.pk)), ]) class DocumentMetaField(MetaField): """ A subclass of MetaField for Document objects. Adds a "download_url" field. "meta": { "type": "wagtaildocs.Document", "detail_url": "http://api.example.com/v1/documents/1/", "download_url": "http://api.example.com/documents/1/my_document.pdf" } """ def to_representation(self, document): data = OrderedDict([ ('type', "wagtaildocs.Document"), ('detail_url', ObjectDetailURL(type(document), document.pk)), ]) # Add download url if self.context.get('show_details', False): data['download_url'] = URLPath(document.url) return data class RelatedField(relations.RelatedField): """ Serializes related objects (eg, foreign keys). Example: "feed_image": { "id": 1, "meta": { "type": "wagtailimages.Image", "detail_url": "http://api.example.com/v1/images/1/" } } """ meta_field_serializer_class = MetaField def to_representation(self, value): return OrderedDict([ ('id', value.pk), ('meta', self.meta_field_serializer_class().to_representation(value)), ]) class PageParentField(RelatedField): """ Serializes the "parent" field on Page objects. Pages don't have a "parent" field so some extra logic is needed to find the parent page. That logic is implemented in this class. The representation is the same as the RelatedField class. """ meta_field_serializer_class = PageMetaField def get_attribute(self, instance): parent = instance.get_parent() site_pages = pages_for_site(self.context['request'].site) if site_pages.filter(id=parent.id).exists(): return parent class ChildRelationField(Field): """ Serializes child relations. Child relations are any model that is related to a Page using a ParentalKey. They are used for repeated fields on a page such as carousel items or related links. Child objects are part of the pages content so we nest them. The relation is represented as a list of objects. Example: "carousel_items": [ { "title": "First carousel item", "image": { "id": 1, "meta": { "type": "wagtailimages.Image", "detail_url": "http://api.example.com/v1/images/1/" } } }, "carousel_items": [ { "title": "Second carousel item (no image)", "image": null } ] """ def __init__(self, *args, **kwargs): self.child_fields = kwargs.pop('child_fields') super(ChildRelationField, self).__init__(*args, **kwargs) def to_representation(self, value): serializer_class = get_serializer_class(value.model, self.child_fields) serializer = serializer_class() return [ serializer.to_representation(child_object) for child_object in value.all() ] class StreamField(Field): """ Serializes StreamField values. Stream fields are stored in JSON format in the database. We reuse that in the API. Example: "body": [ { "type": "heading", "value": { "text": "Hello world!", "size": "h1" } }, { "type": "paragraph", "value": "Some content" } { "type": "image", "value": 1 } ] Where "heading" is a struct block containing "text" and "size" fields, and "paragraph" is a simple text block. Note that foreign keys are represented slightly differently in stream fields to other parts of the API. In stream fields, a foreign key is represented by an integer (the ID of the related object) but elsewhere in the API, foreign objects are nested objects with id and meta as attributes. """ def to_representation(self, value): return value.stream_block.get_prep_value(value) class TagsField(Field): """ Serializes django-taggit TaggableManager fields. These fields are a common way to link tags to objects in Wagtail. The API serializes these as a list of strings taken from the name attribute of each tag. Example: "tags": ["bird", "wagtail"] """ def to_representation(self, value): return list(value.all().order_by('name').values_list('name', flat=True)) class BaseSerializer(serializers.ModelSerializer): # Add StreamField to serializer_field_mapping serializer_field_mapping = serializers.ModelSerializer.serializer_field_mapping.copy() serializer_field_mapping.update({ wagtailcore_fields.StreamField: StreamField, }) serializer_related_field = RelatedField meta = MetaField() def build_property_field(self, field_name, model_class): # TaggableManager is not a Django field so it gets treated as a property field = getattr(model_class, field_name) if isinstance(field, _TaggableManager): return TagsField, {} return super(BaseSerializer, self).build_property_field(field_name, model_class) class PageSerializer(BaseSerializer): meta = PageMetaField() parent = PageParentField(read_only=True) def build_relational_field(self, field_name, relation_info): # Find all relation fields that point to child class and make them use # the ChildRelationField class. if relation_info.to_many: model = getattr(self.Meta, 'model') child_relations = { child_relation.field.rel.related_name: get_related_model(child_relation) for child_relation in get_all_child_relations(model) } if field_name in child_relations and hasattr(child_relations[field_name], 'api_fields'): return ChildRelationField, {'child_fields': child_relations[field_name].api_fields} return super(BaseSerializer, self).build_relational_field(field_name, relation_info) class ImageSerializer(BaseSerializer): pass class DocumentSerializer(BaseSerializer): meta = DocumentMetaField() def get_serializer_class(model_, fields_, base=BaseSerializer): class Meta: model = model_ fields = fields_ return type(model_.__name__ + 'Serializer', (base, ), { 'Meta': Meta })
bsd-3-clause
mboeru/maraschino
lib/werkzeug/contrib/jsrouting.py
92
8408
# -*- coding: utf-8 -*- """ werkzeug.contrib.jsrouting ~~~~~~~~~~~~~~~~~~~~~~~~~~ Addon module that allows to create a JavaScript function from a map that generates rules. :copyright: (c) 2011 by the Werkzeug Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ try: from simplejson import dumps except ImportError: try: from json import dumps except ImportError: def dumps(*args): raise RuntimeError('simplejson required for jsrouting') from inspect import getmro from werkzeug.routing import NumberConverter def render_template(name_parts, rules, converters): result = u'' if name_parts: for idx in xrange(0, len(name_parts) - 1): name = u'.'.join(name_parts[:idx + 1]) result += u"if (typeof %s === 'undefined') %s = {}\n" % (name, name) result += '%s = ' % '.'.join(name_parts) result += """(function (server_name, script_name, subdomain, url_scheme) { var converters = %(converters)s; var rules = $rules; function in_array(array, value) { if (array.indexOf != undefined) { return array.indexOf(value) != -1; } for (var i = 0; i < array.length; i++) { if (array[i] == value) { return true; } } return false; } function array_diff(array1, array2) { array1 = array1.slice(); for (var i = array1.length-1; i >= 0; i--) { if (in_array(array2, array1[i])) { array1.splice(i, 1); } } return array1; } function split_obj(obj) { var names = []; var values = []; for (var name in obj) { if (typeof(obj[name]) != 'function') { names.push(name); values.push(obj[name]); } } return {names: names, values: values, original: obj}; } function suitable(rule, args) { var default_args = split_obj(rule.defaults || {}); var diff_arg_names = array_diff(rule.arguments, default_args.names); for (var i = 0; i < diff_arg_names.length; i++) { if (!in_array(args.names, diff_arg_names[i])) { return false; } } if (array_diff(rule.arguments, args.names).length == 0) { if (rule.defaults == null) { return true; } for (var i = 0; i < default_args.names.length; i++) { var key = default_args.names[i]; var value = default_args.values[i]; if (value != args.original[key]) { return false; } } } return true; } function build(rule, args) { var tmp = []; var processed = rule.arguments.slice(); for (var i = 0; i < rule.trace.length; i++) { var part = rule.trace[i]; if (part.is_dynamic) { var converter = converters[rule.converters[part.data]]; var data = converter(args.original[part.data]); if (data == null) { return null; } tmp.push(data); processed.push(part.name); } else { tmp.push(part.data); } } tmp = tmp.join(''); var pipe = tmp.indexOf('|'); var subdomain = tmp.substring(0, pipe); var url = tmp.substring(pipe+1); var unprocessed = array_diff(args.names, processed); var first_query_var = true; for (var i = 0; i < unprocessed.length; i++) { if (first_query_var) { url += '?'; } else { url += '&'; } first_query_var = false; url += encodeURIComponent(unprocessed[i]); url += '='; url += encodeURIComponent(args.original[unprocessed[i]]); } return {subdomain: subdomain, path: url}; } function lstrip(s, c) { while (s && s.substring(0, 1) == c) { s = s.substring(1); } return s; } function rstrip(s, c) { while (s && s.substring(s.length-1, s.length) == c) { s = s.substring(0, s.length-1); } return s; } return function(endpoint, args, force_external) { args = split_obj(args); var rv = null; for (var i = 0; i < rules.length; i++) { var rule = rules[i]; if (rule.endpoint != endpoint) continue; if (suitable(rule, args)) { rv = build(rule, args); if (rv != null) { break; } } } if (rv == null) { return null; } if (!force_external && rv.subdomain == subdomain) { return rstrip(script_name, '/') + '/' + lstrip(rv.path, '/'); } else { return url_scheme + '://' + (rv.subdomain ? rv.subdomain + '.' : '') + server_name + rstrip(script_name, '/') + '/' + lstrip(rv.path, '/'); } }; })""" % {'converters': u', '.join(converters)} return result def generate_map(map, name='url_map'): """ Generates a JavaScript function containing the rules defined in this map, to be used with a MapAdapter's generate_javascript method. If you don't pass a name the returned JavaScript code is an expression that returns a function. Otherwise it's a standalone script that assigns the function with that name. Dotted names are resolved (so you an use a name like 'obj.url_for') In order to use JavaScript generation, simplejson must be installed. Note that using this feature will expose the rules defined in your map to users. If your rules contain sensitive information, don't use JavaScript generation! """ map.update() rules = [] converters = [] for rule in map.iter_rules(): trace = [{ 'is_dynamic': is_dynamic, 'data': data } for is_dynamic, data in rule._trace] rule_converters = {} for key, converter in rule._converters.iteritems(): js_func = js_to_url_function(converter) try: index = converters.index(js_func) except ValueError: converters.append(js_func) index = len(converters) - 1 rule_converters[key] = index rules.append({ u'endpoint': rule.endpoint, u'arguments': list(rule.arguments), u'converters': rule_converters, u'trace': trace, u'defaults': rule.defaults }) return render_template(name_parts=name and name.split('.') or [], rules=dumps(rules), converters=converters) def generate_adapter(adapter, name='url_for', map_name='url_map'): """Generates the url building function for a map.""" values = { u'server_name': dumps(adapter.server_name), u'script_name': dumps(adapter.script_name), u'subdomain': dumps(adapter.subdomain), u'url_scheme': dumps(adapter.url_scheme), u'name': name, u'map_name': map_name } return u'''\ var %(name)s = %(map_name)s( %(server_name)s, %(script_name)s, %(subdomain)s, %(url_scheme)s );''' % values def js_to_url_function(converter): """Get the JavaScript converter function from a rule.""" if hasattr(converter, 'js_to_url_function'): data = converter.js_to_url_function() else: for cls in getmro(type(converter)): if cls in js_to_url_functions: data = js_to_url_functions[cls](converter) break else: return 'encodeURIComponent' return '(function(value) { %s })' % data def NumberConverter_js_to_url(conv): if conv.fixed_digits: return u'''\ var result = value.toString(); while (result.length < %s) result = '0' + result; return result;''' % conv.fixed_digits return u'return value.toString();' js_to_url_functions = { NumberConverter: NumberConverter_js_to_url }
mit
airbnb/airflow
tests/providers/amazon/aws/hooks/conftest.py
6
1123
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # pylint: disable=redefined-outer-name import boto3 import pytest from moto import mock_s3 @pytest.fixture def mocked_s3_res(): with mock_s3(): yield boto3.resource("s3") @pytest.fixture def s3_bucket(mocked_s3_res): bucket = 'airflow-test-s3-bucket' mocked_s3_res.create_bucket(Bucket=bucket) return bucket
apache-2.0
Titulacion-Sistemas/PythonTitulacion-EV
Lib/site-packages/pywin32-219-py2.7-win32.egg/win32com/demos/ietoolbar.py
18
10753
# -*- coding: latin-1 -*- # PyWin32 Internet Explorer Toolbar # # written by Leonard Ritter (paniq@gmx.net) # and Robert Förtsch (info@robert-foertsch.com) """ This sample implements a simple IE Toolbar COM server supporting Windows XP styles and access to the IWebBrowser2 interface. It also demonstrates how to hijack the parent window to catch WM_COMMAND messages. """ # imports section import sys, os from win32com import universal from win32com.client import gencache, DispatchWithEvents, Dispatch from win32com.client import constants, getevents import win32com import pythoncom import _winreg from win32com.shell import shell from win32com.shell.shellcon import * from win32com.axcontrol import axcontrol try: # try to get styles (winxp) import winxpgui as win32gui except: # import default module (win2k and lower) import win32gui import win32ui import win32con import commctrl import array, struct # ensure we know the ms internet controls typelib so we have access to IWebBrowser2 later on win32com.client.gencache.EnsureModule('{EAB22AC0-30C1-11CF-A7EB-0000C05BAE0B}',0,1,1) # IDeskBand_methods = ['GetBandInfo'] IDockingWindow_methods = ['ShowDW','CloseDW','ResizeBorderDW'] IOleWindow_methods = ['GetWindow','ContextSensitiveHelp'] IInputObject_methods = ['UIActivateIO','HasFocusIO','TranslateAcceleratorIO'] IObjectWithSite_methods = ['SetSite','GetSite'] IPersistStream_methods = ['GetClassID','IsDirty','Load','Save','GetSizeMax'] _ietoolbar_methods_ = IDeskBand_methods + IDockingWindow_methods + \ IOleWindow_methods + IInputObject_methods + \ IObjectWithSite_methods + IPersistStream_methods _ietoolbar_com_interfaces_ = [ shell.IID_IDeskBand, # IDeskBand axcontrol.IID_IObjectWithSite, # IObjectWithSite pythoncom.IID_IPersistStream, axcontrol.IID_IOleCommandTarget, ] class WIN32STRUCT: def __init__(self, **kw): full_fmt = "" for name, fmt, default in self._struct_items_: self.__dict__[name] = None if fmt == "z": full_fmt += "pi" else: full_fmt += fmt for name, val in kw.iteritems(): self.__dict__[name] = val def __setattr__(self, attr, val): if not attr.startswith("_") and attr not in self.__dict__: raise AttributeError(attr) self.__dict__[attr] = val def toparam(self): self._buffs = [] full_fmt = "" vals = [] for name, fmt, default in self._struct_items_: val = self.__dict__[name] if fmt == "z": fmt = "Pi" if val is None: vals.append(0) vals.append(0) else: str_buf = array.array("c", val+'\0') vals.append(str_buf.buffer_info()[0]) vals.append(len(val)) self._buffs.append(str_buf) # keep alive during the call. else: if val is None: val = default vals.append(val) full_fmt += fmt return struct.pack(*(full_fmt,) + tuple(vals)) class TBBUTTON(WIN32STRUCT): _struct_items_ = [ ("iBitmap", "i", 0), ("idCommand", "i", 0), ("fsState", "B", 0), ("fsStyle", "B", 0), ("bReserved", "H", 0), ("dwData", "I", 0), ("iString", "z", None), ] class Stub: """ this class serves as a method stub, outputting debug info whenever the object is being called. """ def __init__(self,name): self.name = name def __call__(self,*args): print 'STUB: ',self.name,args class IEToolbarCtrl: """ a tiny wrapper for our winapi-based toolbar control implementation. """ def __init__(self,hwndparent): styles = win32con.WS_CHILD \ | win32con.WS_VISIBLE \ | win32con.WS_CLIPSIBLINGS \ | win32con.WS_CLIPCHILDREN \ | commctrl.TBSTYLE_LIST \ | commctrl.TBSTYLE_FLAT \ | commctrl.TBSTYLE_TRANSPARENT \ | commctrl.CCS_TOP \ | commctrl.CCS_NODIVIDER \ | commctrl.CCS_NORESIZE \ | commctrl.CCS_NOPARENTALIGN self.hwnd = win32gui.CreateWindow('ToolbarWindow32', None, styles, 0, 0, 100, 100, hwndparent, 0, win32gui.dllhandle, None) win32gui.SendMessage(self.hwnd, commctrl.TB_BUTTONSTRUCTSIZE, 20, 0) def ShowWindow(self,mode): win32gui.ShowWindow(self.hwnd,mode) def AddButtons(self,*buttons): tbbuttons = '' for button in buttons: tbbuttons += button.toparam() return win32gui.SendMessage(self.hwnd, commctrl.TB_ADDBUTTONS, len(buttons), tbbuttons) def GetSafeHwnd(self): return self.hwnd class IEToolbar: """ The actual COM server class """ _com_interfaces_ = _ietoolbar_com_interfaces_ _public_methods_ = _ietoolbar_methods_ _reg_clsctx_ = pythoncom.CLSCTX_INPROC_SERVER # if you copy and modify this example, be sure to change the clsid below _reg_clsid_ = "{F21202A2-959A-4149-B1C3-68B9013F3335}" _reg_progid_ = "PyWin32.IEToolbar" _reg_desc_ = 'PyWin32 IE Toolbar' def __init__( self ): # put stubs for non-implemented methods for method in self._public_methods_: if not hasattr(self,method): print 'providing default stub for %s' % method setattr(self,method,Stub(method)) def GetWindow(self): return self.toolbar.GetSafeHwnd() def Load(self, stream): # called when the toolbar is loaded pass def Save(self, pStream, fClearDirty): # called when the toolbar shall save its information pass def CloseDW(self, dwReserved): del self.toolbar def ShowDW(self, bShow): if bShow: self.toolbar.ShowWindow(win32con.SW_SHOW) else: self.toolbar.ShowWindow(win32con.SW_HIDE) def on_first_button(self): print "first!" self.webbrowser.Navigate2('http://starship.python.net/crew/mhammond/') def on_second_button(self): print "second!" def on_third_button(self): print "third!" def toolbar_command_handler(self,args): hwnd,message,wparam,lparam,time,point = args if lparam == self.toolbar.GetSafeHwnd(): self._command_map[wparam]() def SetSite(self,unknown): if unknown: # retrieve the parent window interface for this site olewindow = unknown.QueryInterface(pythoncom.IID_IOleWindow) # ask the window for its handle hwndparent = olewindow.GetWindow() # first get a command target cmdtarget = unknown.QueryInterface(axcontrol.IID_IOleCommandTarget) # then travel over to a service provider serviceprovider = cmdtarget.QueryInterface(pythoncom.IID_IServiceProvider) # finally ask for the internet explorer application, returned as a dispatch object self.webbrowser = win32com.client.Dispatch(serviceprovider.QueryService('{0002DF05-0000-0000-C000-000000000046}',pythoncom.IID_IDispatch)) # now create and set up the toolbar self.toolbar = IEToolbarCtrl(hwndparent) buttons = [ ('Visit PyWin32 Homepage',self.on_first_button), ('Another Button', self.on_second_button), ('Yet Another Button', self.on_third_button), ] self._command_map = {} # wrap our parent window so we can hook message handlers window = win32ui.CreateWindowFromHandle(hwndparent) # add the buttons for i in range(len(buttons)): button = TBBUTTON() name,func = buttons[i] id = 0x4444+i button.iBitmap = -2 button.idCommand = id button.fsState = commctrl.TBSTATE_ENABLED button.fsStyle = commctrl.TBSTYLE_BUTTON button.iString = name self._command_map[0x4444+i] = func self.toolbar.AddButtons(button) window.HookMessage(self.toolbar_command_handler,win32con.WM_COMMAND) else: # lose all references self.webbrowser = None def GetClassID(self): return self._reg_clsid_ def GetBandInfo(self, dwBandId, dwViewMode, dwMask): ptMinSize = (0,24) ptMaxSize = (2000,24) ptIntegral = (0,0) ptActual = (2000,24) wszTitle = 'PyWin32 IE Toolbar' dwModeFlags = DBIMF_VARIABLEHEIGHT crBkgnd = 0 return (ptMinSize,ptMaxSize,ptIntegral,ptActual,wszTitle,dwModeFlags,crBkgnd) # used for HKLM install def DllInstall( bInstall, cmdLine ): comclass = IEToolbar # register plugin def DllRegisterServer(): comclass = IEToolbar # register toolbar with IE try: print "Trying to register Toolbar.\n" hkey = _winreg.CreateKey( _winreg.HKEY_LOCAL_MACHINE, "SOFTWARE\\Microsoft\\Internet Explorer\\Toolbar" ) subKey = _winreg.SetValueEx( hkey, comclass._reg_clsid_, 0, _winreg.REG_BINARY, "\0" ) except WindowsError: print "Couldn't set registry value.\nhkey: %d\tCLSID: %s\n" % ( hkey, comclass._reg_clsid_ ) else: print "Set registry value.\nhkey: %d\tCLSID: %s\n" % ( hkey, comclass._reg_clsid_ ) # TODO: implement reg settings for standard toolbar button # unregister plugin def DllUnregisterServer(): comclass = IEToolbar # unregister toolbar from internet explorer try: print "Trying to unregister Toolbar.\n" hkey = _winreg.CreateKey( _winreg.HKEY_LOCAL_MACHINE, "SOFTWARE\\Microsoft\\Internet Explorer\\Toolbar" ) _winreg.DeleteValue( hkey, comclass._reg_clsid_ ) except WindowsError: print "Couldn't delete registry value.\nhkey: %d\tCLSID: %s\n" % ( hkey, comclass._reg_clsid_ ) else: print "Deleting reg key succeeded.\n" # entry point if __name__ == '__main__': import win32com.server.register win32com.server.register.UseCommandLine( IEToolbar ) # parse actual command line option if "--unregister" in sys.argv: DllUnregisterServer() else: DllRegisterServer() else: # import trace utility for remote debugging import win32traceutil
mit
kseistrup/qtile
libqtile/drawer.py
6
13929
# Copyright (c) 2010 Aldo Cortesi # Copyright (c) 2011 Florian Mounier # Copyright (c) 2011 oitel # Copyright (c) 2011 Kenji_Takahashi # Copyright (c) 2011 Paul Colomiets # Copyright (c) 2012, 2014 roger # Copyright (c) 2012 nullzion # Copyright (c) 2013 Tao Sauvage # Copyright (c) 2014-2015 Sean Vig # Copyright (c) 2014 Nathan Hoad # Copyright (c) 2014 dequis # Copyright (c) 2014 Tycho Andersen # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from __future__ import division import collections import math import cairocffi import xcffib.xproto from . import pangocffi from . import utils class TextLayout(object): def __init__(self, drawer, text, colour, font_family, font_size, font_shadow, wrap=True, markup=False): self.drawer, self.colour = drawer, colour layout = drawer.ctx.create_layout() layout.set_alignment(pangocffi.ALIGN_CENTER) if not wrap: # pango wraps by default layout.set_ellipsize(pangocffi.ELLIPSIZE_END) desc = pangocffi.FontDescription.from_string(font_family) desc.set_absolute_size(pangocffi.units_from_double(font_size)) layout.set_font_description(desc) self.font_shadow = font_shadow self.layout = layout self.markup = markup self.text = text self._width = None def finalize(self): self.layout.finalize() @property def text(self): return self.layout.get_text() @text.setter def text(self, value): if self.markup: # pangocffi doesn't like None here, so we use "". if value is None: value = '' attrlist, value, accel_char = pangocffi.parse_markup(value) self.layout.set_attributes(attrlist) return self.layout.set_text(utils.scrub_to_utf8(value)) @property def width(self): if self._width is not None: return self._width else: return self.layout.get_pixel_size()[0] @width.setter def width(self, value): self._width = value self.layout.set_width(pangocffi.units_from_double(value)) @width.deleter def width(self): self._width = None self.layout.set_width(-1) @property def height(self): return self.layout.get_pixel_size()[1] def fontdescription(self): return self.layout.get_font_description() @property def font_family(self): d = self.fontdescription() return d.get_family() @font_family.setter def font_family(self, font): d = self.fontdescription() d.set_family(font) self.layout.set_font_description(d) @property def font_size(self): d = self.fontdescription() return d.get_size() @font_size.setter def font_size(self, size): d = self.fontdescription() d.set_size(size) d.set_absolute_size(pangocffi.units_from_double(size)) self.layout.set_font_description(d) def draw(self, x, y): if self.font_shadow is not None: self.drawer.set_source_rgb(self.font_shadow) self.drawer.ctx.move_to(x + 1, y + 1) self.drawer.ctx.show_layout(self.layout) self.drawer.set_source_rgb(self.colour) self.drawer.ctx.move_to(x, y) self.drawer.ctx.show_layout(self.layout) def framed(self, border_width, border_color, pad_x, pad_y): return TextFrame(self, border_width, border_color, pad_x, pad_y) class TextFrame(object): def __init__(self, layout, border_width, border_color, pad_x, pad_y): self.layout = layout self.border_width = border_width self.border_color = border_color self.drawer = self.layout.drawer if isinstance(pad_x, collections.Iterable): self.pad_left = pad_x[0] self.pad_right = pad_x[1] else: self.pad_left = self.pad_right = pad_x if isinstance(pad_y, collections.Iterable): self.pad_top = pad_y[0] self.pad_bottom = pad_y[1] else: self.pad_top = self.pad_bottom = pad_y def draw(self, x, y, rounded=True, fill=False): self.drawer.set_source_rgb(self.border_color) opts = [ x, y, self.layout.width + self.pad_left + self.pad_right, self.layout.height + self.pad_top + self.pad_bottom, self.border_width ] if fill: if rounded: self.drawer.rounded_fillrect(*opts) else: self.drawer.fillrect(*opts) else: if rounded: self.drawer.rounded_rectangle(*opts) else: self.drawer.rectangle(*opts) self.drawer.ctx.stroke() self.layout.draw( x + self.pad_left, y + self.pad_top ) def draw_fill(self, x, y, rounded=True): self.draw(x, y, rounded, fill=True) @property def height(self): return self.layout.height + self.pad_top + self.pad_bottom @property def width(self): return self.layout.width + self.pad_left + self.pad_right class Drawer(object): """ A helper class for drawing and text layout. We have a drawer object for each widget in the bar. The underlying surface is a pixmap with the same size as the bar itself. We draw to the pixmap starting at offset 0, 0, and when the time comes to display to the window, we copy the appropriate portion of the pixmap onto the window. """ def __init__(self, qtile, wid, width, height): self.qtile = qtile self.wid, self.width, self.height = wid, width, height self.pixmap = self.qtile.conn.conn.generate_id() self.gc = self.qtile.conn.conn.generate_id() self.qtile.conn.conn.core.CreatePixmap( self.qtile.conn.default_screen.root_depth, self.pixmap, self.wid, self.width, self.height ) self.qtile.conn.conn.core.CreateGC( self.gc, self.wid, xcffib.xproto.GC.Foreground | xcffib.xproto.GC.Background, [ self.qtile.conn.default_screen.black_pixel, self.qtile.conn.default_screen.white_pixel ] ) self.surface = cairocffi.XCBSurface( qtile.conn.conn, self.pixmap, self.find_root_visual(), self.width, self.height, ) self.ctx = self.new_ctx() self.clear((0, 0, 1)) def finalize(self): self.qtile.conn.conn.core.FreeGC(self.gc) self.qtile.conn.conn.core.FreePixmap(self.pixmap) self.ctx = None self.surface = None def _rounded_rect(self, x, y, width, height, linewidth): aspect = 1.0 corner_radius = height / 10.0 radius = corner_radius / aspect degrees = math.pi / 180.0 self.ctx.new_sub_path() delta = radius + linewidth / 2 self.ctx.arc(x + width - delta, y + delta, radius, -90 * degrees, 0 * degrees) self.ctx.arc(x + width - delta, y + height - delta, radius, 0 * degrees, 90 * degrees) self.ctx.arc(x + delta, y + height - delta, radius, 90 * degrees, 180 * degrees) self.ctx.arc(x + delta, y + delta, radius, 180 * degrees, 270 * degrees) self.ctx.close_path() def rounded_rectangle(self, x, y, width, height, linewidth): self._rounded_rect(x, y, width, height, linewidth) self.ctx.set_line_width(linewidth) self.ctx.stroke() def rounded_fillrect(self, x, y, width, height, linewidth): self._rounded_rect(x, y, width, height, linewidth) self.ctx.fill() def rectangle(self, x, y, width, height, linewidth=2): self.ctx.set_line_width(linewidth) self.ctx.rectangle(x, y, width, height) self.ctx.stroke() def fillrect(self, x, y, width, height, linewidth=2): self.ctx.set_line_width(linewidth) self.ctx.rectangle(x, y, width, height) self.ctx.fill() self.ctx.stroke() def draw(self, offsetx=0, offsety=0, width=None, height=None): """ offsetx: the X offset to start drawing at. offsety: the Y offset to start drawing at. width: the X portion of the canvas to draw at the starting point. height: the Y portion of the canvas to draw at the starting point. """ self.qtile.conn.conn.core.CopyArea( self.pixmap, self.wid, self.gc, 0, 0, # srcx, srcy offsetx, offsety, # dstx, dsty self.width if width is None else width, self.height if height is None else height ) def find_root_visual(self): for i in self.qtile.conn.default_screen.allowed_depths: for v in i.visuals: if v.visual_id == self.qtile.conn.default_screen.root_visual: return v def new_ctx(self): return pangocffi.CairoContext(cairocffi.Context(self.surface)) def set_source_rgb(self, colour): if type(colour) == list: if len(colour) == 0: # defaults to black self.ctx.set_source_rgba(*utils.rgb("#000000")) elif len(colour) == 1: self.ctx.set_source_rgba(*utils.rgb(colour[0])) else: linear = cairocffi.LinearGradient(0.0, 0.0, 0.0, self.height) step_size = 1.0 / (len(colour) - 1) step = 0.0 for c in colour: rgb_col = utils.rgb(c) if len(rgb_col) < 4: rgb_col[3] = 1 linear.add_color_stop_rgba(step, *rgb_col) step += step_size self.ctx.set_source(linear) else: self.ctx.set_source_rgba(*utils.rgb(colour)) def clear(self, colour): self.set_source_rgb(colour) self.ctx.rectangle(0, 0, self.width, self.height) self.ctx.fill() self.ctx.stroke() def textlayout(self, text, colour, font_family, font_size, font_shadow, markup=False, **kw): """ Get a text layout. """ return TextLayout(self, text, colour, font_family, font_size, font_shadow, markup=markup, **kw) def max_layout_size(self, texts, font_family, font_size): sizelayout = self.textlayout( "", "ffffff", font_family, font_size, None) widths, heights = [], [] for i in texts: sizelayout.text = i widths.append(sizelayout.width) heights.append(sizelayout.height) return max(widths), max(heights) # Old text layout functions, to be deprectated. def set_font(self, fontface, size, antialias=True): self.ctx.select_font_face(fontface) self.ctx.set_font_size(size) fo = self.ctx.get_font_options() fo.set_antialias(cairocffi.ANTIALIAS_SUBPIXEL) def text_extents(self, text): return self.ctx.text_extents(utils.scrub_to_utf8(text)) def font_extents(self): return self.ctx.font_extents() def fit_fontsize(self, heightlimit): """ Try to find a maximum font size that fits any strings within the height. """ self.ctx.set_font_size(heightlimit) asc, desc, height, _, _ = self.font_extents() self.ctx.set_font_size( int(heightlimit * heightlimit / height)) return self.font_extents() def fit_text(self, strings, heightlimit): """ Try to find a maximum font size that fits all strings within the height. """ self.ctx.set_font_size(heightlimit) _, _, _, maxheight, _, _ = self.ctx.text_extents("".join(strings)) if not maxheight: return 0, 0 self.ctx.set_font_size( int(heightlimit * heightlimit / maxheight)) maxwidth, maxheight = 0, 0 for i in strings: _, _, x, y, _, _ = self.ctx.text_extents(i) maxwidth = max(maxwidth, x) maxheight = max(maxheight, y) return maxwidth, maxheight def draw_vbar(self, color, x, y1, y2, linewidth=1): self.set_source_rgb(color) self.ctx.move_to(x, y1) self.ctx.line_to(x, y2) self.ctx.set_line_width(linewidth) self.ctx.stroke() def draw_hbar(self, color, x1, x2, y, linewidth=1): self.set_source_rgb(color) self.ctx.move_to(x1, y) self.ctx.line_to(x2, y) self.ctx.set_line_width(linewidth) self.ctx.stroke()
mit
ssorj/qpid-proton
python/examples/test_examples.py
5
7448
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import re import subprocess import time import unittest class Popen(subprocess.Popen): # We always use these options def __init__(self, args, **kwargs): super(Popen, self).\ __init__(args, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, universal_newlines=True, **kwargs) # For Python 2 compatibility add context manager support to Popen if it's not there if not hasattr(subprocess.Popen, '__enter__'): def __enter__(self): return self # For Python 2 compatibility add context manager support to Popen if it's not there if not hasattr(subprocess.Popen, '__exit__'): def __exit__(self, exc_type, exc_val, exc_tb): try: if self.stdin: self.stdin.close() if self.stdout: self.stdout.close() if self.stderr: self.stderr.close() finally: self.wait() def remove_unicode_prefix(line): return re.sub(r"u(['\"])", r"\1", line) class ExamplesTest(unittest.TestCase): def test_helloworld(self, example="helloworld.py"): with Popen([example]) as p: p.wait() output = [l.strip() for l in p.stdout] self.assertEqual(output, ['Hello World!']) def test_helloworld_direct(self): self.test_helloworld('helloworld_direct.py') def test_helloworld_blocking(self): self.test_helloworld('helloworld_blocking.py') def test_helloworld_tornado(self): self.test_helloworld('helloworld_tornado.py') def test_helloworld_direct_tornado(self): self.test_helloworld('helloworld_direct_tornado.py') def test_simple_send_recv(self, recv='simple_recv.py', send='simple_send.py'): with Popen([recv]) as r: with Popen([send]): pass actual = [remove_unicode_prefix(l.strip()) for l in r.stdout] expected_py2 = ["{'sequence': int32(%i)}" % (i + 1,) for i in range(100)] expected_py3 = ["{'sequence': %i}" % (i + 1,) for i in range(100)] self.assertIn(actual, [expected_py2, expected_py3]) def test_client_server(self, client=['client.py'], server=['server.py'], sleep=0): with Popen(server) as s: if sleep: time.sleep(sleep) with Popen(client) as c: c.wait() actual = [l.strip() for l in c.stdout] inputs = ["Twas brillig, and the slithy toves", "Did gire and gymble in the wabe.", "All mimsy were the borogroves,", "And the mome raths outgrabe."] expected = ["%s => %s" % (l, l.upper()) for l in inputs] self.assertEqual(actual, expected) s.terminate() def test_sync_client_server(self): self.test_client_server(client=['sync_client.py']) def test_client_server_tx(self): self.test_client_server(server=['server_tx.py']) def test_sync_client_server_tx(self): self.test_client_server(client=['sync_client.py'], server=['server_tx.py']) def test_client_server_direct(self): self.test_client_server(client=['client.py', '-a', 'localhost:8888/examples'], server=['server_direct.py'], sleep=0.5) def test_sync_client_server_direct(self): self.test_client_server(client=['sync_client.py', '-a', 'localhost:8888/examples'], server=['server_direct.py'], sleep=0.5) def test_db_send_recv(self): self.maxDiff = None # setup databases subprocess.check_call(['db_ctrl.py', 'init', './src_db']) subprocess.check_call(['db_ctrl.py', 'init', './dst_db']) with Popen(['db_ctrl.py', 'insert', './src_db'], stdin=subprocess.PIPE) as fill: for i in range(100): fill.stdin.write("Message-%i\n" % (i + 1)) fill.stdin.close() # run send and recv with Popen(['db_recv.py', '-m', '100']) as r: with Popen(['db_send.py', '-m', '100']): pass r.wait() # verify output of receive actual = [l.strip() for l in r.stdout] expected = ["inserted message %i" % (i + 1) for i in range(100)] self.assertEqual(actual, expected) # verify state of databases with Popen(['db_ctrl.py', 'list', './dst_db']) as v: v.wait() expected = ["(%i, 'Message-%i')" % (i + 1, i + 1) for i in range(100)] actual = [remove_unicode_prefix(l.strip()) for l in v.stdout] self.assertEqual(actual, expected) def test_tx_send_tx_recv(self): self.test_simple_send_recv(recv='tx_recv.py', send='tx_send.py') def test_simple_send_direct_recv(self): self.maxDiff = None with Popen(['direct_recv.py', '-a', 'localhost:8888']) as r: time.sleep(0.5) with Popen(['simple_send.py', '-a', 'localhost:8888']): pass r.wait() actual = [remove_unicode_prefix(l.strip()) for l in r.stdout] expected_py2 = ["{'sequence': int32(%i)}" % (i + 1,) for i in range(100)] expected_py3 = ["{'sequence': %i}" % (i + 1,) for i in range(100)] self.assertIn(actual, [expected_py2, expected_py3]) def test_direct_send_simple_recv(self): with Popen(['direct_send.py', '-a', 'localhost:8888']): time.sleep(0.5) with Popen(['simple_recv.py', '-a', 'localhost:8888']) as r: r.wait() actual = [remove_unicode_prefix(l.strip()) for l in r.stdout] expected_py2 = ["{'sequence': int32(%i)}" % (i + 1,) for i in range(100)] expected_py3 = ["{'sequence': %i}" % (i + 1,) for i in range(100)] self.assertIn(actual, [expected_py2, expected_py3]) def test_selected_recv(self): with Popen(['colour_send.py']): pass with Popen(['selected_recv.py', '-m', '50']) as r: r.wait() actual = [l.strip() for l in r.stdout] expected = ["green %i" % (i + 1) for i in range(100) if i % 2 == 0] self.assertEqual(actual, expected) with Popen(['simple_recv.py', '-m', '50']) as r: r.wait() actual = [l.strip() for l in r.stdout] expected = ["red %i" % (i + 1) for i in range(100) if i % 2 == 1] self.assertEqual(actual, expected)
apache-2.0
BryceLohr/authentic
authentic2/idp/signals.py
2
2126
from django.dispatch import Signal from authentic2.idp.attributes import provide_attributes_at_sso '''authorize_decision Expect a dictionnaries as return with: - the authorization decision e.g. dic['authz'] = True or False - optionnaly a message e.g. dic['message'] = message ''' authorize_service = Signal(providing_args = ["request", "user", "audience"]) '''add_attributes_to_response This signal is used by asynchronous bindings that do not receive attribute list in the request. That means that a predefined list is defined. The asynchronous binding means that the user is "on" the IdP to bring the request then it is possible to take attributes in the Django session. Mainly, it is usable at SSO request treatment. The signal is send with parameters: - request: The request having triggerred a need of attribute - user: instance of the User Django Model to indicate the subject of attributes. Maybe different from request.user if any. - We should here only use a username in case that we want to provide attributes for entities having no corresponding User instance. - audience: identifier of the destination of attributes (e.g. the providerID for SAML2). The return expected is a dictionnaries such as: - dic = {} - attributes = {} - attributes[name] = (value1, value2, ) - attributes[(name, format)] = (value1, value2, ) - attributes[(name, format, nickname)] = (value1, value2, ) - dic['attributes'] = attributes - return dic ''' add_attributes_to_response = \ Signal(providing_args = ["request", "user", "audience"]) add_attributes_to_response.connect(provide_attributes_at_sso) '''add_attributes_to_response Idem as add_attributes_to_response except that the signal sender gives a list of attribute identifiers. The attribute namespace is obtained from the provider to which a namespace has been declared. ''' add_attributes_listed_to_response = \ Signal(providing_args = ["request", "user", "audience", "attributes"]) '''avoid_consent Expect a boolean e.g. dic['avoid_consent'] = True or False ''' avoid_consent = Signal(providing_args = ["request", "user", "audience"])
agpl-3.0
proffalken/cobbler
apitests/distro/new_distro_test.py
1
2132
""" new_distro.py defines a set of methods designed for testing Cobbler's distros. Copyright 2009, Red Hat, Inc Steve Salevan <ssalevan@redhat.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA """ from base import * class DistroTests(CobblerTest): def test_new_working_distro_basic(self): """ Attempts to create a barebones Cobbler distro using information contained within config file """ self.create_distro() distro = self.api.find_distro({'name': cfg["distro_name"]}) self.assertTrue(distro != None) def test_new_working_distro_detailed(self): """ Attempts to create a Cobbler distro with a bevy of options, using information contained within config file """ did = self.create_distro_detailed() self.assertTrue(self.api.find_distro({'name': cfg["distro_name"]}) != None) def test_new_nonworking_distro(self): """ Attempts to create a distro lacking required information, passes if xmlrpclib returns Fault """ did = self.api.new_distro(self.token) self.api.modify_distro(did, "name", cfg["distro_name"], self.token) self.assertRaises(xmlrpclib.Fault, self.api.save_distro, did, self.token) def test_new_distro_without_token(self): """ Attempts to run new_distro method without supplying authenticated token """ self.assertRaises(xmlrpclib.Fault, self.api.new_distro)
gpl-2.0
lokirius/python-for-android
python3-alpha/python3-src/Tools/scripts/eptags.py
88
1485
#! /usr/bin/env python3 """Create a TAGS file for Python programs, usable with GNU Emacs. usage: eptags pyfiles... The output TAGS file is usable with Emacs version 18, 19, 20. Tagged are: - functions (even inside other defs or classes) - classes eptags warns about files it cannot open. eptags will not give warnings about duplicate tags. BUGS: Because of tag duplication (methods with the same name in different classes), TAGS files are not very useful for most object-oriented python projects. """ import sys,re expr = r'^[ \t]*(def|class)[ \t]+([a-zA-Z_][a-zA-Z0-9_]*)[ \t]*[:\(]' matcher = re.compile(expr) def treat_file(filename, outfp): """Append tags found in file named 'filename' to the open file 'outfp'""" try: fp = open(filename, 'r') except: sys.stderr.write('Cannot open %s\n'%filename) return charno = 0 lineno = 0 tags = [] size = 0 while 1: line = fp.readline() if not line: break lineno = lineno + 1 m = matcher.search(line) if m: tag = m.group(0) + '\177%d,%d\n' % (lineno, charno) tags.append(tag) size = size + len(tag) charno = charno + len(line) outfp.write('\f\n%s,%d\n' % (filename,size)) for tag in tags: outfp.write(tag) def main(): outfp = open('TAGS', 'w') for filename in sys.argv[1:]: treat_file(filename, outfp) if __name__=="__main__": main()
apache-2.0
simplyguru-dot/ansible-modules-extras
notification/jabber.py
60
4555
#!/usr/bin/python # -*- coding: utf-8 -*- # # (c) 2015, Brian Coca <bcoca@ansible.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/> DOCUMENTATION = ''' --- version_added: "1.2" module: jabber short_description: Send a message to jabber user or chat room description: - Send a message to jabber options: user: description: - User as which to connect required: true password: description: - password for user to connect required: true to: description: - user ID or name of the room, when using room use a slash to indicate your nick. required: true msg: description: - The message body. required: true default: null host: description: - host to connect, overrides user info required: false port: description: - port to connect to, overrides default required: false default: 5222 encoding: description: - message encoding required: false # informational: requirements for nodes requirements: - python xmpp (xmpppy) author: "Brian Coca (@bcoca)" ''' EXAMPLES = ''' # send a message to a user - jabber: user=mybot@example.net password=secret to=friend@example.net msg="Ansible task finished" # send a message to a room - jabber: user=mybot@example.net password=secret to=mychaps@conference.example.net/ansiblebot msg="Ansible task finished" # send a message, specifying the host and port - jabber user=mybot@example.net host=talk.example.net port=5223 password=secret to=mychaps@example.net msg="Ansible task finished" ''' import os import re import time HAS_XMPP = True try: import xmpp except ImportError: HAS_XMPP = False def main(): module = AnsibleModule( argument_spec=dict( user=dict(required=True), password=dict(required=True), to=dict(required=True), msg=dict(required=True), host=dict(required=False), port=dict(required=False,default=5222), encoding=dict(required=False), ), supports_check_mode=True ) if not HAS_XMPP: module.fail_json(msg="The required python xmpp library (xmpppy) is not installed") jid = xmpp.JID(module.params['user']) user = jid.getNode() server = jid.getDomain() port = module.params['port'] password = module.params['password'] try: to, nick = module.params['to'].split('/', 1) except ValueError: to, nick = module.params['to'], None if module.params['host']: host = module.params['host'] else: host = server if module.params['encoding']: xmpp.simplexml.ENCODING = params['encoding'] msg = xmpp.protocol.Message(body=module.params['msg']) try: conn=xmpp.Client(server) if not conn.connect(server=(host,port)): module.fail_json(rc=1, msg='Failed to connect to server: %s' % (server)) if not conn.auth(user,password,'Ansible'): module.fail_json(rc=1, msg='Failed to authorize %s on: %s' % (user,server)) # some old servers require this, also the sleep following send conn.sendInitPresence(requestRoster=0) if nick: # sending to room instead of user, need to join msg.setType('groupchat') msg.setTag('x', namespace='http://jabber.org/protocol/muc#user') conn.send(xmpp.Presence(to=module.params['to'])) time.sleep(1) else: msg.setType('chat') msg.setTo(to) if not module.check_mode: conn.send(msg) time.sleep(1) conn.disconnect() except Exception, e: module.fail_json(msg="unable to send msg: %s" % e) module.exit_json(changed=False, to=to, user=user, msg=msg.getBody()) # import module snippets from ansible.module_utils.basic import * main()
gpl-3.0
twbarber/pygooglevoice
googlevoice/util.py
40
8838
import re from sys import stdout from xml.parsers.expat import ParserCreate from time import gmtime from datetime import datetime from pprint import pprint try: from urllib2 import build_opener,install_opener, \ HTTPCookieProcessor,Request,urlopen from urllib import urlencode,quote except ImportError: from urllib.request import build_opener,install_opener, \ HTTPCookieProcessor,Request,urlopen from urllib.parse import urlencode,quote try: from http.cookiejar import LWPCookieJar as CookieJar except ImportError: from cookielib import LWPCookieJar as CookieJar try: from json import loads except ImportError: from simplejson import loads try: input = raw_input except NameError: input = input sha1_re = re.compile(r'^[a-fA-F0-9]{40}$') def print_(*values, **kwargs): """ Implementation of Python3's print function Prints the values to a stream, or to sys.stdout by default. Optional keyword arguments: file: a file-like object (stream); defaults to the current sys.stdout. sep: string inserted between values, default a space. end: string appended after the last value, default a newline. """ fo = kwargs.pop('file', stdout) fo.write(kwargs.pop('sep', ' ').join(map(str, values))) fo.write(kwargs.pop('end', '\n')) fo.flush() def is_sha1(s): """ Returns ``True`` if the string is a SHA1 hash """ return bool(sha1_re.match(s)) def validate_response(response): """ Validates that the JSON response is A-OK """ try: assert 'ok' in response and response['ok'] except AssertionError: raise ValidationError('There was a problem with GV: %s' % response) def load_and_validate(response): """ Loads JSON data from http response then validates """ validate_response(loads(response.read())) class ValidationError(Exception): """ Bombs when response code back from Voice 500s """ class LoginError(Exception): """ Occurs when login credentials are incorrect """ class ParsingError(Exception): """ Happens when XML feed parsing fails """ class JSONError(Exception): """ Failed JSON deserialization """ class DownloadError(Exception): """ Cannot download message, probably not in voicemail/recorded """ class ForwardingError(Exception): """ Forwarding number given was incorrect """ class AttrDict(dict): def __getattr__(self, attr): if attr in self: return self[attr] class Phone(AttrDict): """ Wrapper for phone objects used for phone specific methods Attributes are: * id: int * phoneNumber: i18n phone number * formattedNumber: humanized phone number string * we: data dict * wd: data dict * verified: bool * name: strign label * smsEnabled: bool * scheduleSet: bool * policyBitmask: int * weekdayTimes: list * dEPRECATEDDisabled: bool * weekdayAllDay: bool * telephonyVerified * weekendTimes: list * active: bool * weekendAllDay: bool * enabledForOthers: bool * type: int (1 - Home, 2 - Mobile, 3 - Work, 4 - Gizmo) """ def __init__(self, voice, data): self.voice = voice super(Phone, self).__init__(data) def enable(self,): """ Enables this phone for usage """ return self.__call_forwarding() def disable(self): """ Disables this phone """ return self.__call_forwarding('0') def __call_forwarding(self, enabled='1'): """ Enables or disables this phone """ self.voice.__validate_special_page('default_forward', {'enabled':enabled, 'phoneId': self.id}) def __str__(self): return self.phoneNumber def __repr__(self): return '<Phone %s>' % self.phoneNumber class Message(AttrDict): """ Wrapper for all call/sms message instances stored in Google Voice Attributes are: * id: SHA1 identifier * isTrash: bool * displayStartDateTime: datetime * star: bool * isSpam: bool * startTime: gmtime * labels: list * displayStartTime: time * children: str * note: str * isRead: bool * displayNumber: str * relativeStartTime: str * phoneNumber: str * type: int """ def __init__(self, folder, id, data): assert is_sha1(id), 'Message id not a SHA1 hash' self.folder = folder self.id = id super(AttrDict, self).__init__(data) self['startTime'] = gmtime(int(self['startTime'])/1000) self['displayStartDateTime'] = datetime.strptime( self['displayStartDateTime'], '%m/%d/%y %I:%M %p') self['displayStartTime'] = self['displayStartDateTime'].time() def delete(self, trash=1): """ Moves this message to the Trash. Use ``message.delete(0)`` to move it out of the Trash. """ self.folder.voice.__messages_post('delete', self.id, trash=trash) def star(self, star=1): """ Star this message. Use ``message.star(0)`` to unstar it. """ self.folder.voice.__messages_post('star', self.id, star=star) def mark(self, read=1): """ Mark this message as read. Use ``message.mark(0)`` to mark it as unread. """ self.folder.voice.__messages_post('mark', self.id, read=read) def download(self, adir=None): """ Download the message MP3 (if any). Saves files to ``adir`` (defaults to current directory). Message hashes can be found in ``self.voicemail().messages`` for example. Returns location of saved file. """ return self.folder.voice.download(self, adir) def __str__(self): return self.id def __repr__(self): return '<Message #%s (%s)>' % (self.id, self.phoneNumber) class Folder(AttrDict): """ Folder wrapper for feeds from Google Voice Attributes are: * totalSize: int (aka ``__len__``) * unreadCounts: dict * resultsPerPage: int * messages: list of Message instances """ def __init__(self, voice, name, data): self.voice = voice self.name = name super(AttrDict, self).__init__(data) def messages(self): """ Returns a list of all messages in this folder """ return [Message(self, *i) for i in self['messages'].items()] messages = property(messages) def __len__(self): return self['totalSize'] def __repr__(self): return '<Folder %s (%s)>' % (self.name, len(self)) class XMLParser(object): """ XML Parser helper that can dig json and html out of the feeds. The parser takes a ``Voice`` instance, page name, and function to grab data from. Calling the parser calls the data function once, sets up the ``json`` and ``html`` attributes and returns a ``Folder`` instance for the given page:: >>> o = XMLParser(voice, 'voicemail', lambda: 'some xml payload') >>> o() ... <Folder ...> >>> o.json ... 'some json payload' >>> o.data ... 'loaded json payload' >>> o.html ... 'some html payload' """ attr = None def start_element(self, name, attrs): if name in ('json','html'): self.attr = name def end_element(self, name): self.attr = None def char_data(self, data): if self.attr and data: setattr(self, self.attr, getattr(self, self.attr) + data) def __init__(self, voice, name, datafunc): self.json, self.html = '','' self.datafunc = datafunc self.voice = voice self.name = name def __call__(self): self.json, self.html = '','' parser = ParserCreate() parser.StartElementHandler = self.start_element parser.EndElementHandler = self.end_element parser.CharacterDataHandler = self.char_data try: data = self.datafunc() parser.Parse(data, 1) except: raise ParsingError return self.folder def folder(self): """ Returns associated ``Folder`` instance for given page (``self.name``) """ return Folder(self.voice, self.name, self.data) folder = property(folder) def data(self): """ Returns the parsed json information after calling the XMLParser """ try: return loads(self.json) except: raise JSONError data = property(data)
bsd-3-clause
girving/tensorflow
tensorflow/contrib/cmake/python_sanity_test.py
44
3692
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Complain about invalid or missing entries in python_*.txt files. Problematic entries can be commented for temporary whitelisting. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import unittest def abs_path(path): root = os.path.dirname(__file__) for _ in range(3): root = os.path.join(root, os.pardir) path = os.path.join(root, path) path = os.path.abspath(path) return path def read_entries(test): with open(abs_path(test.entries_file), "r") as f: lines = f.readlines() lines = [line.strip() for line in lines] lines = [line for line in lines if line] test.entries = [] test.whitelist = [] for line in lines: # line is comment if line.startswith("#"): line = line[1:].strip() # whitelist entry if line.startswith("tensorflow/"): test.whitelist.append(line) # line has comment -> strip comment elif line.find("#") != -1: line = line[:line.find("#")].strip() test.entries.append(line) else: test.entries.append(line) def test_invalid_directories(test): for entry in test.entries: if not os.path.isdir(abs_path(entry)): problem = "'" + test.entries_file + "' contains invalid '" + entry + "'" solution = ("Please remove the invalid entry (or add the missing " "directory).") raise AssertionError(problem + "\n" + solution) def test_missing_directory(test, path): if path in test.whitelist: return dir_exists = os.path.isdir(abs_path(path)) entry_exists = path in test.entries if dir_exists and not entry_exists: problem = "'" + test.entries_file + "' is missing '" + path + "'" solution = "Please add the missing entry (comment to whitelist if needed)." raise AssertionError(problem + "\n" + solution) class PythonModuleTest(unittest.TestCase): def setUp(self): self.entries_file = "tensorflow/contrib/cmake/python_modules.txt" read_entries(self) def testInvalidEntries(self): test_invalid_directories(self) def testMissingModules(self): module_names = next(os.walk(abs_path("tensorflow/contrib")))[1] for module_name in module_names: path = "tensorflow/contrib/" + module_name test_missing_directory(self, path + "/python") test_missing_directory(self, path + "/python/ops") test_missing_directory(self, path + "/python/kernels") test_missing_directory(self, path + "/python/layers") class PythonProtoTest(unittest.TestCase): def setUp(self): self.entries_file = "tensorflow/contrib/cmake/python_protos.txt" read_entries(self) def testInvalidEntries(self): test_invalid_directories(self) class PythonProtoCCTest(unittest.TestCase): def setUp(self): self.entries_file = "tensorflow/contrib/cmake/python_protos_cc.txt" read_entries(self) def testInvalidEntries(self): test_invalid_directories(self) if __name__ == "__main__": unittest.main()
apache-2.0
girving/tensorflow
tensorflow/contrib/distributions/python/ops/bijectors/kumaraswamy.py
33
5245
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Kumaraswamy bijector.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.distributions import bijector from tensorflow.python.util import deprecation __all__ = [ "Kumaraswamy", ] class Kumaraswamy(bijector.Bijector): """Compute `Y = g(X) = (1 - (1 - X)**(1 / b))**(1 / a), X in [0, 1]`. This bijector maps inputs from `[0, 1]` to [0, 1]`. The inverse of the bijector applied to a uniform random variable `X ~ U(0, 1) gives back a random variable with the [Kumaraswamy distribution]( https://en.wikipedia.org/wiki/Kumaraswamy_distribution): ```none Y ~ Kumaraswamy(a, b) pdf(y; a, b, 0 <= y <= 1) = a * b * y ** (a - 1) * (1 - y**a) ** (b - 1) ``` """ @deprecation.deprecated( "2018-10-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.contrib.distributions`.", warn_once=True) def __init__(self, concentration1=None, concentration0=None, validate_args=False, name="kumaraswamy"): """Instantiates the `Kumaraswamy` bijector. Args: concentration1: Python `float` scalar indicating the transform power, i.e., `Y = g(X) = (1 - (1 - X)**(1 / b))**(1 / a)` where `a` is `concentration1`. concentration0: Python `float` scalar indicating the transform power, i.e., `Y = g(X) = (1 - (1 - X)**(1 / b))**(1 / a)` where `b` is `concentration0`. validate_args: Python `bool` indicating whether arguments should be checked for correctness. name: Python `str` name given to ops managed by this object. """ self._graph_parents = [] self._name = name self._validate_args = validate_args with self._name_scope("init", values=[concentration1, concentration0]): concentration1 = self._maybe_assert_valid_concentration( ops.convert_to_tensor(concentration1, name="concentration1"), validate_args=validate_args) concentration0 = self._maybe_assert_valid_concentration( ops.convert_to_tensor(concentration0, name="concentration0"), validate_args=validate_args) self._concentration1 = concentration1 self._concentration0 = concentration0 super(Kumaraswamy, self).__init__( forward_min_event_ndims=0, validate_args=validate_args, name=name) @property def concentration1(self): """The `a` in: `Y = g(X) = (1 - (1 - X)**(1 / b))**(1 / a)`.""" return self._concentration1 @property def concentration0(self): """The `b` in: `Y = g(X) = (1 - (1 - X)**(1 / b))**(1 / a)`.""" return self._concentration0 def _forward(self, x): x = self._maybe_assert_valid(x) return math_ops.exp( math_ops.log1p(-math_ops.exp(math_ops.log1p(-x) / self.concentration0)) / self.concentration1) def _inverse(self, y): y = self._maybe_assert_valid(y) return math_ops.exp(math_ops.log1p( -(1 - y**self.concentration1)**self.concentration0)) def _inverse_log_det_jacobian(self, y): y = self._maybe_assert_valid(y) return ( math_ops.log(self.concentration1) + math_ops.log(self.concentration0) + (self.concentration1 - 1) * math_ops.log(y) + (self.concentration0 - 1) * math_ops.log1p(-y**self.concentration1)) def _maybe_assert_valid_concentration(self, concentration, validate_args): """Checks the validity of a concentration parameter.""" if not validate_args: return concentration return control_flow_ops.with_dependencies([ check_ops.assert_positive( concentration, message="Concentration parameter must be positive."), ], concentration) def _maybe_assert_valid(self, x): if not self.validate_args: return x return control_flow_ops.with_dependencies([ check_ops.assert_non_negative( x, message="sample must be non-negative"), check_ops.assert_less_equal( x, array_ops.ones([], self.concentration0.dtype), message="sample must be no larger than `1`."), ], x)
apache-2.0
girving/tensorflow
tensorflow/compiler/tests/momentum_test.py
9
8601
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for Momentum.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.compiler.tests import xla_test from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.ops import array_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.training import momentum as momentum_lib class MomentumOptimizerTest(xla_test.XLATestCase): def _update_nesterov_momentum_numpy(self, var, accum, g, lr, momentum): var += accum * lr * momentum accum = accum * momentum + g var -= lr * accum var -= accum * lr * momentum return var, accum def testBasic(self): for dtype in self.float_types: with self.cached_session(), self.test_scope(): var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype) var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype) grads0 = constant_op.constant([0.1, 0.1], dtype=dtype) grads1 = constant_op.constant([0.01, 0.01], dtype=dtype) mom_opt = momentum_lib.MomentumOptimizer( learning_rate=2.0, momentum=0.9) mom_update = mom_opt.apply_gradients( zip([grads0, grads1], [var0, var1])) variables.global_variables_initializer().run() # Check we have slots self.assertEqual(["momentum"], mom_opt.get_slot_names()) slot0 = mom_opt.get_slot(var0, "momentum") self.assertEquals(slot0.get_shape(), var0.get_shape()) self.assertFalse(slot0 in variables.trainable_variables()) slot1 = mom_opt.get_slot(var1, "momentum") self.assertEquals(slot1.get_shape(), var1.get_shape()) self.assertFalse(slot1 in variables.trainable_variables()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], var0.eval()) self.assertAllClose([3.0, 4.0], var1.eval()) # Step 1: the momentum accumulators where 0. So we should see a normal # update: v -= grad * learning_rate mom_update.run() # Check that the momentum accumulators have been updated. self.assertAllCloseAccordingToType(np.array([0.1, 0.1]), slot0.eval()) self.assertAllCloseAccordingToType(np.array([0.01, 0.01]), slot1.eval()) # Check that the parameters have been updated. self.assertAllCloseAccordingToType( np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]), var0.eval()) self.assertAllCloseAccordingToType( np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]), var1.eval()) # Step 2: the momentum accumulators contain the previous update. mom_update.run() # Check that the momentum accumulators have been updated. self.assertAllCloseAccordingToType( np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]), slot0.eval()) self.assertAllCloseAccordingToType( np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]), slot1.eval()) # Check that the parameters have been updated. self.assertAllCloseAccordingToType( np.array([ 1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0), 2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0) ]), var0.eval()) self.assertAllCloseAccordingToType( np.array([ 2.98 - ((0.9 * 0.01 + 0.01) * 2.0), 3.98 - ( (0.9 * 0.01 + 0.01) * 2.0) ]), var1.eval()) def testNesterovMomentum(self): for dtype in self.float_types: with self.cached_session(), self.test_scope(): var0 = resource_variable_ops.ResourceVariable([0.1, 0.2], dtype=dtype) var1 = resource_variable_ops.ResourceVariable([0.3, 0.4], dtype=dtype) var0_np = np.array([0.1, 0.2], dtype=dtype) var1_np = np.array([0.3, 0.4], dtype=dtype) accum0_np = np.array([0.0, 0.0], dtype=dtype) accum1_np = np.array([0.0, 0.0], dtype=dtype) cost = 0.4 * var0 * var0 + 0.9 * var1 global_step = resource_variable_ops.ResourceVariable( array_ops.zeros([], dtypes.int32), name="global_step") mom_op = momentum_lib.MomentumOptimizer( learning_rate=0.1, momentum=0.9, use_nesterov=True) opt_op = mom_op.minimize(cost, global_step, [var0, var1]) variables.global_variables_initializer().run() for _ in range(1, 5): opt_op.run() var0_np, accum0_np = self._update_nesterov_momentum_numpy( var0_np, accum0_np, var0_np * 0.8, 0.1, 0.9) var1_np, accum1_np = self._update_nesterov_momentum_numpy( var1_np, accum1_np, 0.9, 0.1, 0.9) self.assertAllCloseAccordingToType(var0_np, var0.eval()) self.assertAllCloseAccordingToType(var1_np, var1.eval()) def testTensorLearningRateAndMomentum(self): for dtype in self.float_types: with self.cached_session(), self.test_scope(): var0 = resource_variable_ops.ResourceVariable([1.0, 2.0], dtype=dtype) var1 = resource_variable_ops.ResourceVariable([3.0, 4.0], dtype=dtype) grads0 = constant_op.constant([0.1, 0.1], dtype=dtype) grads1 = constant_op.constant([0.01, 0.01], dtype=dtype) mom_opt = momentum_lib.MomentumOptimizer( learning_rate=constant_op.constant(2.0), momentum=constant_op.constant(0.9)) mom_update = mom_opt.apply_gradients( zip([grads0, grads1], [var0, var1])) variables.global_variables_initializer().run() # Check we have slots self.assertEqual(["momentum"], mom_opt.get_slot_names()) slot0 = mom_opt.get_slot(var0, "momentum") self.assertEquals(slot0.get_shape(), var0.get_shape()) self.assertFalse(slot0 in variables.trainable_variables()) slot1 = mom_opt.get_slot(var1, "momentum") self.assertEquals(slot1.get_shape(), var1.get_shape()) self.assertFalse(slot1 in variables.trainable_variables()) # Fetch params to validate initial values self.assertAllClose([1.0, 2.0], var0.eval()) self.assertAllClose([3.0, 4.0], var1.eval()) # Step 1: the momentum accumulators where 0. So we should see a normal # update: v -= grad * learning_rate mom_update.run() # Check that the momentum accumulators have been updated. self.assertAllCloseAccordingToType(np.array([0.1, 0.1]), slot0.eval()) self.assertAllCloseAccordingToType(np.array([0.01, 0.01]), slot1.eval()) # Check that the parameters have been updated. self.assertAllCloseAccordingToType( np.array([1.0 - (0.1 * 2.0), 2.0 - (0.1 * 2.0)]), var0.eval()) self.assertAllCloseAccordingToType( np.array([3.0 - (0.01 * 2.0), 4.0 - (0.01 * 2.0)]), var1.eval()) # Step 2: the momentum accumulators contain the previous update. mom_update.run() # Check that the momentum accumulators have been updated. self.assertAllCloseAccordingToType( np.array([(0.9 * 0.1 + 0.1), (0.9 * 0.1 + 0.1)]), slot0.eval()) self.assertAllCloseAccordingToType( np.array([(0.9 * 0.01 + 0.01), (0.9 * 0.01 + 0.01)]), slot1.eval()) # Check that the parameters have been updated. self.assertAllCloseAccordingToType( np.array([ 1.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0), 2.0 - (0.1 * 2.0) - ((0.9 * 0.1 + 0.1) * 2.0) ]), var0.eval()) self.assertAllCloseAccordingToType( np.array([ 2.98 - ((0.9 * 0.01 + 0.01) * 2.0), 3.98 - ( (0.9 * 0.01 + 0.01) * 2.0) ]), var1.eval()) if __name__ == "__main__": test.main()
apache-2.0
FireWRT/OpenWrt-Firefly-Libraries
staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python3.4/test/test_sys_settrace.py
19
25252
# Testing the line trace facility. from test import support import unittest import sys import difflib import gc # A very basic example. If this fails, we're in deep trouble. def basic(): return 1 basic.events = [(0, 'call'), (1, 'line'), (1, 'return')] # Many of the tests below are tricky because they involve pass statements. # If there is implicit control flow around a pass statement (in an except # clause or else caluse) under what conditions do you set a line number # following that clause? # The entire "while 0:" statement is optimized away. No code # exists for it, so the line numbers skip directly from "del x" # to "x = 1". def arigo_example(): x = 1 del x while 0: pass x = 1 arigo_example.events = [(0, 'call'), (1, 'line'), (2, 'line'), (5, 'line'), (5, 'return')] # check that lines consisting of just one instruction get traced: def one_instr_line(): x = 1 del x x = 1 one_instr_line.events = [(0, 'call'), (1, 'line'), (2, 'line'), (3, 'line'), (3, 'return')] def no_pop_tops(): # 0 x = 1 # 1 for a in range(2): # 2 if a: # 3 x = 1 # 4 else: # 5 x = 1 # 6 no_pop_tops.events = [(0, 'call'), (1, 'line'), (2, 'line'), (3, 'line'), (6, 'line'), (2, 'line'), (3, 'line'), (4, 'line'), (2, 'line'), (2, 'return')] def no_pop_blocks(): y = 1 while not y: bla x = 1 no_pop_blocks.events = [(0, 'call'), (1, 'line'), (2, 'line'), (4, 'line'), (4, 'return')] def called(): # line -3 x = 1 def call(): # line 0 called() call.events = [(0, 'call'), (1, 'line'), (-3, 'call'), (-2, 'line'), (-2, 'return'), (1, 'return')] def raises(): raise Exception def test_raise(): try: raises() except Exception as exc: x = 1 test_raise.events = [(0, 'call'), (1, 'line'), (2, 'line'), (-3, 'call'), (-2, 'line'), (-2, 'exception'), (-2, 'return'), (2, 'exception'), (3, 'line'), (4, 'line'), (4, 'return')] def _settrace_and_return(tracefunc): sys.settrace(tracefunc) sys._getframe().f_back.f_trace = tracefunc def settrace_and_return(tracefunc): _settrace_and_return(tracefunc) settrace_and_return.events = [(1, 'return')] def _settrace_and_raise(tracefunc): sys.settrace(tracefunc) sys._getframe().f_back.f_trace = tracefunc raise RuntimeError def settrace_and_raise(tracefunc): try: _settrace_and_raise(tracefunc) except RuntimeError as exc: pass settrace_and_raise.events = [(2, 'exception'), (3, 'line'), (4, 'line'), (4, 'return')] # implicit return example # This test is interesting because of the else: pass # part of the code. The code generate for the true # part of the if contains a jump past the else branch. # The compiler then generates an implicit "return None" # Internally, the compiler visits the pass statement # and stores its line number for use on the next instruction. # The next instruction is the implicit return None. def ireturn_example(): a = 5 b = 5 if a == b: b = a+1 else: pass ireturn_example.events = [(0, 'call'), (1, 'line'), (2, 'line'), (3, 'line'), (4, 'line'), (6, 'line'), (6, 'return')] # Tight loop with while(1) example (SF #765624) def tightloop_example(): items = range(0, 3) try: i = 0 while 1: b = items[i]; i+=1 except IndexError: pass tightloop_example.events = [(0, 'call'), (1, 'line'), (2, 'line'), (3, 'line'), (4, 'line'), (5, 'line'), (5, 'line'), (5, 'line'), (5, 'line'), (5, 'exception'), (6, 'line'), (7, 'line'), (7, 'return')] def tighterloop_example(): items = range(1, 4) try: i = 0 while 1: i = items[i] except IndexError: pass tighterloop_example.events = [(0, 'call'), (1, 'line'), (2, 'line'), (3, 'line'), (4, 'line'), (4, 'line'), (4, 'line'), (4, 'line'), (4, 'exception'), (5, 'line'), (6, 'line'), (6, 'return')] def generator_function(): try: yield True "continued" finally: "finally" def generator_example(): # any() will leave the generator before its end x = any(generator_function()) # the following lines were not traced for x in range(10): y = x generator_example.events = ([(0, 'call'), (2, 'line'), (-6, 'call'), (-5, 'line'), (-4, 'line'), (-4, 'return'), (-4, 'call'), (-4, 'exception'), (-1, 'line'), (-1, 'return')] + [(5, 'line'), (6, 'line')] * 10 + [(5, 'line'), (5, 'return')]) class Tracer: def __init__(self): self.events = [] def trace(self, frame, event, arg): self.events.append((frame.f_lineno, event)) return self.trace def traceWithGenexp(self, frame, event, arg): (o for o in [1]) self.events.append((frame.f_lineno, event)) return self.trace class TraceTestCase(unittest.TestCase): # Disable gc collection when tracing, otherwise the # deallocators may be traced as well. def setUp(self): self.using_gc = gc.isenabled() gc.disable() self.addCleanup(sys.settrace, sys.gettrace()) def tearDown(self): if self.using_gc: gc.enable() def compare_events(self, line_offset, events, expected_events): events = [(l - line_offset, e) for (l, e) in events] if events != expected_events: self.fail( "events did not match expectation:\n" + "\n".join(difflib.ndiff([str(x) for x in expected_events], [str(x) for x in events]))) def run_and_compare(self, func, events): tracer = Tracer() sys.settrace(tracer.trace) func() sys.settrace(None) self.compare_events(func.__code__.co_firstlineno, tracer.events, events) def run_test(self, func): self.run_and_compare(func, func.events) def run_test2(self, func): tracer = Tracer() func(tracer.trace) sys.settrace(None) self.compare_events(func.__code__.co_firstlineno, tracer.events, func.events) def test_set_and_retrieve_none(self): sys.settrace(None) assert sys.gettrace() is None def test_set_and_retrieve_func(self): def fn(*args): pass sys.settrace(fn) try: assert sys.gettrace() is fn finally: sys.settrace(None) def test_01_basic(self): self.run_test(basic) def test_02_arigo(self): self.run_test(arigo_example) def test_03_one_instr(self): self.run_test(one_instr_line) def test_04_no_pop_blocks(self): self.run_test(no_pop_blocks) def test_05_no_pop_tops(self): self.run_test(no_pop_tops) def test_06_call(self): self.run_test(call) def test_07_raise(self): self.run_test(test_raise) def test_08_settrace_and_return(self): self.run_test2(settrace_and_return) def test_09_settrace_and_raise(self): self.run_test2(settrace_and_raise) def test_10_ireturn(self): self.run_test(ireturn_example) def test_11_tightloop(self): self.run_test(tightloop_example) def test_12_tighterloop(self): self.run_test(tighterloop_example) def test_13_genexp(self): self.run_test(generator_example) # issue1265: if the trace function contains a generator, # and if the traced function contains another generator # that is not completely exhausted, the trace stopped. # Worse: the 'finally' clause was not invoked. tracer = Tracer() sys.settrace(tracer.traceWithGenexp) generator_example() sys.settrace(None) self.compare_events(generator_example.__code__.co_firstlineno, tracer.events, generator_example.events) def test_14_onliner_if(self): def onliners(): if True: False else: True return 0 self.run_and_compare( onliners, [(0, 'call'), (1, 'line'), (3, 'line'), (3, 'return')]) def test_15_loops(self): # issue1750076: "while" expression is skipped by debugger def for_example(): for x in range(2): pass self.run_and_compare( for_example, [(0, 'call'), (1, 'line'), (2, 'line'), (1, 'line'), (2, 'line'), (1, 'line'), (1, 'return')]) def while_example(): # While expression should be traced on every loop x = 2 while x > 0: x -= 1 self.run_and_compare( while_example, [(0, 'call'), (2, 'line'), (3, 'line'), (4, 'line'), (3, 'line'), (4, 'line'), (3, 'line'), (3, 'return')]) def test_16_blank_lines(self): namespace = {} exec("def f():\n" + "\n" * 256 + " pass", namespace) self.run_and_compare( namespace["f"], [(0, 'call'), (257, 'line'), (257, 'return')]) class RaisingTraceFuncTestCase(unittest.TestCase): def setUp(self): self.addCleanup(sys.settrace, sys.gettrace()) def trace(self, frame, event, arg): """A trace function that raises an exception in response to a specific trace event.""" if event == self.raiseOnEvent: raise ValueError # just something that isn't RuntimeError else: return self.trace def f(self): """The function to trace; raises an exception if that's the case we're testing, so that the 'exception' trace event fires.""" if self.raiseOnEvent == 'exception': x = 0 y = 1/x else: return 1 def run_test_for_event(self, event): """Tests that an exception raised in response to the given event is handled OK.""" self.raiseOnEvent = event try: for i in range(sys.getrecursionlimit() + 1): sys.settrace(self.trace) try: self.f() except ValueError: pass else: self.fail("exception not raised!") except RuntimeError: self.fail("recursion counter not reset") # Test the handling of exceptions raised by each kind of trace event. def test_call(self): self.run_test_for_event('call') def test_line(self): self.run_test_for_event('line') def test_return(self): self.run_test_for_event('return') def test_exception(self): self.run_test_for_event('exception') def test_trash_stack(self): def f(): for i in range(5): print(i) # line tracing will raise an exception at this line def g(frame, why, extra): if (why == 'line' and frame.f_lineno == f.__code__.co_firstlineno + 2): raise RuntimeError("i am crashing") return g sys.settrace(g) try: f() except RuntimeError: # the test is really that this doesn't segfault: import gc gc.collect() else: self.fail("exception not propagated") def test_exception_arguments(self): def f(): x = 0 # this should raise an error x.no_such_attr def g(frame, event, arg): if (event == 'exception'): type, exception, trace = arg self.assertIsInstance(exception, Exception) return g existing = sys.gettrace() try: sys.settrace(g) try: f() except AttributeError: # this is expected pass finally: sys.settrace(existing) # 'Jump' tests: assigning to frame.f_lineno within a trace function # moves the execution position - it's how debuggers implement a Jump # command (aka. "Set next statement"). class JumpTracer: """Defines a trace function that jumps from one place to another, with the source and destination lines of the jump being defined by the 'jump' property of the function under test.""" def __init__(self, function): self.function = function self.jumpFrom = function.jump[0] self.jumpTo = function.jump[1] self.done = False def trace(self, frame, event, arg): if not self.done and frame.f_code == self.function.__code__: firstLine = frame.f_code.co_firstlineno if event == 'line' and frame.f_lineno == firstLine + self.jumpFrom: # Cope with non-integer self.jumpTo (because of # no_jump_to_non_integers below). try: frame.f_lineno = firstLine + self.jumpTo except TypeError: frame.f_lineno = self.jumpTo self.done = True return self.trace # The first set of 'jump' tests are for things that are allowed: def jump_simple_forwards(output): output.append(1) output.append(2) output.append(3) jump_simple_forwards.jump = (1, 3) jump_simple_forwards.output = [3] def jump_simple_backwards(output): output.append(1) output.append(2) jump_simple_backwards.jump = (2, 1) jump_simple_backwards.output = [1, 1, 2] def jump_out_of_block_forwards(output): for i in 1, 2: output.append(2) for j in [3]: # Also tests jumping over a block output.append(4) output.append(5) jump_out_of_block_forwards.jump = (3, 5) jump_out_of_block_forwards.output = [2, 5] def jump_out_of_block_backwards(output): output.append(1) for i in [1]: output.append(3) for j in [2]: # Also tests jumping over a block output.append(5) output.append(6) output.append(7) jump_out_of_block_backwards.jump = (6, 1) jump_out_of_block_backwards.output = [1, 3, 5, 1, 3, 5, 6, 7] def jump_to_codeless_line(output): output.append(1) # Jumping to this line should skip to the next one. output.append(3) jump_to_codeless_line.jump = (1, 2) jump_to_codeless_line.output = [3] def jump_to_same_line(output): output.append(1) output.append(2) output.append(3) jump_to_same_line.jump = (2, 2) jump_to_same_line.output = [1, 2, 3] # Tests jumping within a finally block, and over one. def jump_in_nested_finally(output): try: output.append(2) finally: output.append(4) try: output.append(6) finally: output.append(8) output.append(9) jump_in_nested_finally.jump = (4, 9) jump_in_nested_finally.output = [2, 9] def jump_infinite_while_loop(output): output.append(1) while 1: output.append(2) output.append(3) jump_infinite_while_loop.jump = (3, 4) jump_infinite_while_loop.output = [1, 3] # The second set of 'jump' tests are for things that are not allowed: def no_jump_too_far_forwards(output): try: output.append(2) output.append(3) except ValueError as e: output.append('after' in str(e)) no_jump_too_far_forwards.jump = (3, 6) no_jump_too_far_forwards.output = [2, True] def no_jump_too_far_backwards(output): try: output.append(2) output.append(3) except ValueError as e: output.append('before' in str(e)) no_jump_too_far_backwards.jump = (3, -1) no_jump_too_far_backwards.output = [2, True] # Test each kind of 'except' line. def no_jump_to_except_1(output): try: output.append(2) except: e = sys.exc_info()[1] output.append('except' in str(e)) no_jump_to_except_1.jump = (2, 3) no_jump_to_except_1.output = [True] def no_jump_to_except_2(output): try: output.append(2) except ValueError: e = sys.exc_info()[1] output.append('except' in str(e)) no_jump_to_except_2.jump = (2, 3) no_jump_to_except_2.output = [True] def no_jump_to_except_3(output): try: output.append(2) except ValueError as e: output.append('except' in str(e)) no_jump_to_except_3.jump = (2, 3) no_jump_to_except_3.output = [True] def no_jump_to_except_4(output): try: output.append(2) except (ValueError, RuntimeError) as e: output.append('except' in str(e)) no_jump_to_except_4.jump = (2, 3) no_jump_to_except_4.output = [True] def no_jump_forwards_into_block(output): try: output.append(2) for i in 1, 2: output.append(4) except ValueError as e: output.append('into' in str(e)) no_jump_forwards_into_block.jump = (2, 4) no_jump_forwards_into_block.output = [True] def no_jump_backwards_into_block(output): try: for i in 1, 2: output.append(3) output.append(4) except ValueError as e: output.append('into' in str(e)) no_jump_backwards_into_block.jump = (4, 3) no_jump_backwards_into_block.output = [3, 3, True] def no_jump_into_finally_block(output): try: try: output.append(3) x = 1 finally: output.append(6) except ValueError as e: output.append('finally' in str(e)) no_jump_into_finally_block.jump = (4, 6) no_jump_into_finally_block.output = [3, 6, True] # The 'finally' still runs def no_jump_out_of_finally_block(output): try: try: output.append(3) finally: output.append(5) output.append(6) except ValueError as e: output.append('finally' in str(e)) no_jump_out_of_finally_block.jump = (5, 1) no_jump_out_of_finally_block.output = [3, True] # This verifies the line-numbers-must-be-integers rule. def no_jump_to_non_integers(output): try: output.append(2) except ValueError as e: output.append('integer' in str(e)) no_jump_to_non_integers.jump = (2, "Spam") no_jump_to_non_integers.output = [True] def jump_across_with(output): with open(support.TESTFN, "wb") as fp: pass with open(support.TESTFN, "wb") as fp: pass jump_across_with.jump = (1, 3) jump_across_with.output = [] # This verifies that you can't set f_lineno via _getframe or similar # trickery. def no_jump_without_trace_function(): try: previous_frame = sys._getframe().f_back previous_frame.f_lineno = previous_frame.f_lineno except ValueError as e: # This is the exception we wanted; make sure the error message # talks about trace functions. if 'trace' not in str(e): raise else: # Something's wrong - the expected exception wasn't raised. raise RuntimeError("Trace-function-less jump failed to fail") class JumpTestCase(unittest.TestCase): def setUp(self): self.addCleanup(sys.settrace, sys.gettrace()) sys.settrace(None) def compare_jump_output(self, expected, received): if received != expected: self.fail( "Outputs don't match:\n" + "Expected: " + repr(expected) + "\n" + "Received: " + repr(received)) def run_test(self, func): tracer = JumpTracer(func) sys.settrace(tracer.trace) output = [] func(output) sys.settrace(None) self.compare_jump_output(func.output, output) def test_01_jump_simple_forwards(self): self.run_test(jump_simple_forwards) def test_02_jump_simple_backwards(self): self.run_test(jump_simple_backwards) def test_03_jump_out_of_block_forwards(self): self.run_test(jump_out_of_block_forwards) def test_04_jump_out_of_block_backwards(self): self.run_test(jump_out_of_block_backwards) def test_05_jump_to_codeless_line(self): self.run_test(jump_to_codeless_line) def test_06_jump_to_same_line(self): self.run_test(jump_to_same_line) def test_07_jump_in_nested_finally(self): self.run_test(jump_in_nested_finally) def test_jump_infinite_while_loop(self): self.run_test(jump_infinite_while_loop) def test_08_no_jump_too_far_forwards(self): self.run_test(no_jump_too_far_forwards) def test_09_no_jump_too_far_backwards(self): self.run_test(no_jump_too_far_backwards) def test_10_no_jump_to_except_1(self): self.run_test(no_jump_to_except_1) def test_11_no_jump_to_except_2(self): self.run_test(no_jump_to_except_2) def test_12_no_jump_to_except_3(self): self.run_test(no_jump_to_except_3) def test_13_no_jump_to_except_4(self): self.run_test(no_jump_to_except_4) def test_14_no_jump_forwards_into_block(self): self.run_test(no_jump_forwards_into_block) def test_15_no_jump_backwards_into_block(self): self.run_test(no_jump_backwards_into_block) def test_16_no_jump_into_finally_block(self): self.run_test(no_jump_into_finally_block) def test_17_no_jump_out_of_finally_block(self): self.run_test(no_jump_out_of_finally_block) def test_18_no_jump_to_non_integers(self): self.run_test(no_jump_to_non_integers) def test_19_no_jump_without_trace_function(self): # Must set sys.settrace(None) in setUp(), else condition is not # triggered. no_jump_without_trace_function() def test_jump_across_with(self): self.addCleanup(support.unlink, support.TESTFN) self.run_test(jump_across_with) def test_20_large_function(self): d = {} exec("""def f(output): # line 0 x = 0 # line 1 y = 1 # line 2 ''' # line 3 %s # lines 4-1004 ''' # line 1005 x += 1 # line 1006 output.append(x) # line 1007 return""" % ('\n' * 1000,), d) f = d['f'] f.jump = (2, 1007) f.output = [0] self.run_test(f) def test_jump_to_firstlineno(self): # This tests that PDB can jump back to the first line in a # file. See issue #1689458. It can only be triggered in a # function call if the function is defined on a single line. code = compile(""" # Comments don't count. output.append(2) # firstlineno is here. output.append(3) output.append(4) """, "<fake module>", "exec") class fake_function: __code__ = code jump = (2, 0) tracer = JumpTracer(fake_function) sys.settrace(tracer.trace) namespace = {"output": []} exec(code, namespace) sys.settrace(None) self.compare_jump_output([2, 3, 2, 3, 4], namespace["output"]) def test_main(): support.run_unittest( TraceTestCase, RaisingTraceFuncTestCase, JumpTestCase ) if __name__ == "__main__": test_main()
gpl-2.0
FireWRT/OpenWrt-Firefly-Libraries
staging_dir/host/lib/python3.4/distutils/tests/test_cygwinccompiler.py
145
5671
"""Tests for distutils.cygwinccompiler.""" import unittest import sys import os from io import BytesIO import subprocess from test.support import run_unittest from distutils import cygwinccompiler from distutils.cygwinccompiler import (CygwinCCompiler, check_config_h, CONFIG_H_OK, CONFIG_H_NOTOK, CONFIG_H_UNCERTAIN, get_versions, get_msvcr) from distutils.tests import support class FakePopen(object): test_class = None def __init__(self, cmd, shell, stdout): self.cmd = cmd.split()[0] exes = self.test_class._exes if self.cmd in exes: # issue #6438 in Python 3.x, Popen returns bytes self.stdout = BytesIO(exes[self.cmd]) else: self.stdout = os.popen(cmd, 'r') class CygwinCCompilerTestCase(support.TempdirManager, unittest.TestCase): def setUp(self): super(CygwinCCompilerTestCase, self).setUp() self.version = sys.version self.python_h = os.path.join(self.mkdtemp(), 'python.h') from distutils import sysconfig self.old_get_config_h_filename = sysconfig.get_config_h_filename sysconfig.get_config_h_filename = self._get_config_h_filename self.old_find_executable = cygwinccompiler.find_executable cygwinccompiler.find_executable = self._find_executable self._exes = {} self.old_popen = cygwinccompiler.Popen FakePopen.test_class = self cygwinccompiler.Popen = FakePopen def tearDown(self): sys.version = self.version from distutils import sysconfig sysconfig.get_config_h_filename = self.old_get_config_h_filename cygwinccompiler.find_executable = self.old_find_executable cygwinccompiler.Popen = self.old_popen super(CygwinCCompilerTestCase, self).tearDown() def _get_config_h_filename(self): return self.python_h def _find_executable(self, name): if name in self._exes: return name return None def test_check_config_h(self): # check_config_h looks for "GCC" in sys.version first # returns CONFIG_H_OK if found sys.version = ('2.6.1 (r261:67515, Dec 6 2008, 16:42:21) \n[GCC ' '4.0.1 (Apple Computer, Inc. build 5370)]') self.assertEqual(check_config_h()[0], CONFIG_H_OK) # then it tries to see if it can find "__GNUC__" in pyconfig.h sys.version = 'something without the *CC word' # if the file doesn't exist it returns CONFIG_H_UNCERTAIN self.assertEqual(check_config_h()[0], CONFIG_H_UNCERTAIN) # if it exists but does not contain __GNUC__, it returns CONFIG_H_NOTOK self.write_file(self.python_h, 'xxx') self.assertEqual(check_config_h()[0], CONFIG_H_NOTOK) # and CONFIG_H_OK if __GNUC__ is found self.write_file(self.python_h, 'xxx __GNUC__ xxx') self.assertEqual(check_config_h()[0], CONFIG_H_OK) def test_get_versions(self): # get_versions calls distutils.spawn.find_executable on # 'gcc', 'ld' and 'dllwrap' self.assertEqual(get_versions(), (None, None, None)) # Let's fake we have 'gcc' and it returns '3.4.5' self._exes['gcc'] = b'gcc (GCC) 3.4.5 (mingw special)\nFSF' res = get_versions() self.assertEqual(str(res[0]), '3.4.5') # and let's see what happens when the version # doesn't match the regular expression # (\d+\.\d+(\.\d+)*) self._exes['gcc'] = b'very strange output' res = get_versions() self.assertEqual(res[0], None) # same thing for ld self._exes['ld'] = b'GNU ld version 2.17.50 20060824' res = get_versions() self.assertEqual(str(res[1]), '2.17.50') self._exes['ld'] = b'@(#)PROGRAM:ld PROJECT:ld64-77' res = get_versions() self.assertEqual(res[1], None) # and dllwrap self._exes['dllwrap'] = b'GNU dllwrap 2.17.50 20060824\nFSF' res = get_versions() self.assertEqual(str(res[2]), '2.17.50') self._exes['dllwrap'] = b'Cheese Wrap' res = get_versions() self.assertEqual(res[2], None) def test_get_msvcr(self): # none sys.version = ('2.6.1 (r261:67515, Dec 6 2008, 16:42:21) ' '\n[GCC 4.0.1 (Apple Computer, Inc. build 5370)]') self.assertEqual(get_msvcr(), None) # MSVC 7.0 sys.version = ('2.5.1 (r251:54863, Apr 18 2007, 08:51:08) ' '[MSC v.1300 32 bits (Intel)]') self.assertEqual(get_msvcr(), ['msvcr70']) # MSVC 7.1 sys.version = ('2.5.1 (r251:54863, Apr 18 2007, 08:51:08) ' '[MSC v.1310 32 bits (Intel)]') self.assertEqual(get_msvcr(), ['msvcr71']) # VS2005 / MSVC 8.0 sys.version = ('2.5.1 (r251:54863, Apr 18 2007, 08:51:08) ' '[MSC v.1400 32 bits (Intel)]') self.assertEqual(get_msvcr(), ['msvcr80']) # VS2008 / MSVC 9.0 sys.version = ('2.5.1 (r251:54863, Apr 18 2007, 08:51:08) ' '[MSC v.1500 32 bits (Intel)]') self.assertEqual(get_msvcr(), ['msvcr90']) # unknown sys.version = ('2.5.1 (r251:54863, Apr 18 2007, 08:51:08) ' '[MSC v.1999 32 bits (Intel)]') self.assertRaises(ValueError, get_msvcr) def test_suite(): return unittest.makeSuite(CygwinCCompilerTestCase) if __name__ == '__main__': run_unittest(test_suite())
gpl-2.0
aronbierbaum/txsuds
suds/sax/element.py
175
36480
# This program is free software; you can redistribute it and/or modify # it under the terms of the (LGPL) GNU Lesser General Public License as # published by the Free Software Foundation; either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Library Lesser General Public License for more details at # ( http://www.gnu.org/licenses/lgpl.html ). # # You should have received a copy of the GNU Lesser General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # written by: Jeff Ortel ( jortel@redhat.com ) """ Provides XML I{element} classes. """ from logging import getLogger from suds import * from suds.sax import * from suds.sax.text import Text from suds.sax.attribute import Attribute import sys if sys.version_info < (2, 4, 0): from sets import Set as set del sys log = getLogger(__name__) class Element: """ An XML element object. @ivar parent: The node containing this attribute @type parent: L{Element} @ivar prefix: The I{optional} namespace prefix. @type prefix: basestring @ivar name: The I{unqualified} name of the attribute @type name: basestring @ivar expns: An explicit namespace (xmlns="..."). @type expns: (I{prefix}, I{name}) @ivar nsprefixes: A mapping of prefixes to namespaces. @type nsprefixes: dict @ivar attributes: A list of XML attributes. @type attributes: [I{Attribute},] @ivar text: The element's I{text} content. @type text: basestring @ivar children: A list of child elements. @type children: [I{Element},] @cvar matcher: A collection of I{lambda} for string matching. @cvar specialprefixes: A dictionary of builtin-special prefixes. """ matcher = \ { 'eq': lambda a,b: a == b, 'startswith' : lambda a,b: a.startswith(b), 'endswith' : lambda a,b: a.endswith(b), 'contains' : lambda a,b: b in a } specialprefixes = { Namespace.xmlns[0] : Namespace.xmlns[1] } @classmethod def buildPath(self, parent, path): """ Build the specifed pat as a/b/c where missing intermediate nodes are built automatically. @param parent: A parent element on which the path is built. @type parent: I{Element} @param path: A simple path separated by (/). @type path: basestring @return: The leaf node of I{path}. @rtype: L{Element} """ for tag in path.split('/'): child = parent.getChild(tag) if child is None: child = Element(tag, parent) parent = child return child def __init__(self, name, parent=None, ns=None): """ @param name: The element's (tag) name. May cotain a prefix. @type name: basestring @param parent: An optional parent element. @type parent: I{Element} @param ns: An optional namespace @type ns: (I{prefix}, I{name}) """ self.rename(name) self.expns = None self.nsprefixes = {} self.attributes = [] self.text = None if parent is not None: if isinstance(parent, Element): self.parent = parent else: raise Exception('parent (%s) not-valid', parent.__class__.__name__) else: self.parent = None self.children = [] self.applyns(ns) def rename(self, name): """ Rename the element. @param name: A new name for the element. @type name: basestring """ if name is None: raise Exception('name (%s) not-valid' % name) else: self.prefix, self.name = splitPrefix(name) def setPrefix(self, p, u=None): """ Set the element namespace prefix. @param p: A new prefix for the element. @type p: basestring @param u: A namespace URI to be mapped to the prefix. @type u: basestring @return: self @rtype: L{Element} """ self.prefix = p if p is not None and u is not None: self.addPrefix(p, u) return self def qname(self): """ Get the B{fully} qualified name of this element @return: The fully qualified name. @rtype: basestring """ if self.prefix is None: return self.name else: return '%s:%s' % (self.prefix, self.name) def getRoot(self): """ Get the root (top) node of the tree. @return: The I{top} node of this tree. @rtype: I{Element} """ if self.parent is None: return self else: return self.parent.getRoot() def clone(self, parent=None): """ Deep clone of this element and children. @param parent: An optional parent for the copied fragment. @type parent: I{Element} @return: A deep copy parented by I{parent} @rtype: I{Element} """ root = Element(self.qname(), parent, self.namespace()) for a in self.attributes: root.append(a.clone(self)) for c in self.children: root.append(c.clone(self)) for item in self.nsprefixes.items(): root.addPrefix(item[0], item[1]) return root def detach(self): """ Detach from parent. @return: This element removed from its parent's child list and I{parent}=I{None} @rtype: L{Element} """ if self.parent is not None: if self in self.parent.children: self.parent.children.remove(self) self.parent = None return self def set(self, name, value): """ Set an attribute's value. @param name: The name of the attribute. @type name: basestring @param value: The attribute value. @type value: basestring @see: __setitem__() """ attr = self.getAttribute(name) if attr is None: attr = Attribute(name, value) self.append(attr) else: attr.setValue(value) def unset(self, name): """ Unset (remove) an attribute. @param name: The attribute name. @type name: str @return: self @rtype: L{Element} """ try: attr = self.getAttribute(name) self.attributes.remove(attr) except: pass return self def get(self, name, ns=None, default=None): """ Get the value of an attribute by name. @param name: The name of the attribute. @type name: basestring @param ns: The optional attribute's namespace. @type ns: (I{prefix}, I{name}) @param default: An optional value to be returned when either the attribute does not exist of has not value. @type default: basestring @return: The attribute's value or I{default} @rtype: basestring @see: __getitem__() """ attr = self.getAttribute(name, ns) if attr is None or attr.value is None: return default else: return attr.getValue() def setText(self, value): """ Set the element's L{Text} content. @param value: The element's text value. @type value: basestring @return: self @rtype: I{Element} """ if isinstance(value, Text): self.text = value else: self.text = Text(value) return self def getText(self, default=None): """ Get the element's L{Text} content with optional default @param default: A value to be returned when no text content exists. @type default: basestring @return: The text content, or I{default} @rtype: L{Text} """ if self.hasText(): return self.text else: return default def trim(self): """ Trim leading and trailing whitespace. @return: self @rtype: L{Element} """ if self.hasText(): self.text = self.text.trim() return self def hasText(self): """ Get whether the element has I{text} and that it is not an empty (zero length) string. @return: True when has I{text}. @rtype: boolean """ return ( self.text is not None and len(self.text) ) def namespace(self): """ Get the element's namespace. @return: The element's namespace by resolving the prefix, the explicit namespace or the inherited namespace. @rtype: (I{prefix}, I{name}) """ if self.prefix is None: return self.defaultNamespace() else: return self.resolvePrefix(self.prefix) def defaultNamespace(self): """ Get the default (unqualified namespace). This is the expns of the first node (looking up the tree) that has it set. @return: The namespace of a node when not qualified. @rtype: (I{prefix}, I{name}) """ p = self while p is not None: if p.expns is not None: return (None, p.expns) else: p = p.parent return Namespace.default def append(self, objects): """ Append the specified child based on whether it is an element or an attrbuite. @param objects: A (single|collection) of attribute(s) or element(s) to be added as children. @type objects: (L{Element}|L{Attribute}) @return: self @rtype: L{Element} """ if not isinstance(objects, (list, tuple)): objects = (objects,) for child in objects: if isinstance(child, Element): self.children.append(child) child.parent = self continue if isinstance(child, Attribute): self.attributes.append(child) child.parent = self continue raise Exception('append %s not-valid' % child.__class__.__name__) return self def insert(self, objects, index=0): """ Insert an L{Element} content at the specified index. @param objects: A (single|collection) of attribute(s) or element(s) to be added as children. @type objects: (L{Element}|L{Attribute}) @param index: The position in the list of children to insert. @type index: int @return: self @rtype: L{Element} """ objects = (objects,) for child in objects: if isinstance(child, Element): self.children.insert(index, child) child.parent = self else: raise Exception('append %s not-valid' % child.__class__.__name__) return self def remove(self, child): """ Remove the specified child element or attribute. @param child: A child to remove. @type child: L{Element}|L{Attribute} @return: The detached I{child} when I{child} is an element, else None. @rtype: L{Element}|None """ if isinstance(child, Element): return child.detach() if isinstance(child, Attribute): self.attributes.remove(child) return None def replaceChild(self, child, content): """ Replace I{child} with the specified I{content}. @param child: A child element. @type child: L{Element} @param content: An element or collection of elements. @type content: L{Element} or [L{Element},] """ if child not in self.children: raise Exception('child not-found') index = self.children.index(child) self.remove(child) if not isinstance(content, (list, tuple)): content = (content,) for node in content: self.children.insert(index, node.detach()) node.parent = self index += 1 def getAttribute(self, name, ns=None, default=None): """ Get an attribute by name and (optional) namespace @param name: The name of a contained attribute (may contain prefix). @type name: basestring @param ns: An optional namespace @type ns: (I{prefix}, I{name}) @param default: Returned when attribute not-found. @type default: L{Attribute} @return: The requested attribute object. @rtype: L{Attribute} """ if ns is None: prefix, name = splitPrefix(name) if prefix is None: ns = None else: ns = self.resolvePrefix(prefix) for a in self.attributes: if a.match(name, ns): return a return default def getChild(self, name, ns=None, default=None): """ Get a child by (optional) name and/or (optional) namespace. @param name: The name of a child element (may contain prefix). @type name: basestring @param ns: An optional namespace used to match the child. @type ns: (I{prefix}, I{name}) @param default: Returned when child not-found. @type default: L{Element} @return: The requested child, or I{default} when not-found. @rtype: L{Element} """ if ns is None: prefix, name = splitPrefix(name) if prefix is None: ns = None else: ns = self.resolvePrefix(prefix) for c in self.children: if c.match(name, ns): return c return default def childAtPath(self, path): """ Get a child at I{path} where I{path} is a (/) separated list of element names that are expected to be children. @param path: A (/) separated list of element names. @type path: basestring @return: The leaf node at the end of I{path} @rtype: L{Element} """ result = None node = self for name in [p for p in path.split('/') if len(p) > 0]: ns = None prefix, name = splitPrefix(name) if prefix is not None: ns = node.resolvePrefix(prefix) result = node.getChild(name, ns) if result is None: break; else: node = result return result def childrenAtPath(self, path): """ Get a list of children at I{path} where I{path} is a (/) separated list of element names that are expected to be children. @param path: A (/) separated list of element names. @type path: basestring @return: The collection leaf nodes at the end of I{path} @rtype: [L{Element},...] """ parts = [p for p in path.split('/') if len(p) > 0] if len(parts) == 1: result = self.getChildren(path) else: result = self.__childrenAtPath(parts) return result def getChildren(self, name=None, ns=None): """ Get a list of children by (optional) name and/or (optional) namespace. @param name: The name of a child element (may contain prefix). @type name: basestring @param ns: An optional namespace used to match the child. @type ns: (I{prefix}, I{name}) @return: The list of matching children. @rtype: [L{Element},...] """ if ns is None: if name is None: return self.children prefix, name = splitPrefix(name) if prefix is None: ns = None else: ns = self.resolvePrefix(prefix) return [c for c in self.children if c.match(name, ns)] def detachChildren(self): """ Detach and return this element's children. @return: The element's children (detached). @rtype: [L{Element},...] """ detached = self.children self.children = [] for child in detached: child.parent = None return detached def resolvePrefix(self, prefix, default=Namespace.default): """ Resolve the specified prefix to a namespace. The I{nsprefixes} is searched. If not found, it walks up the tree until either resolved or the top of the tree is reached. Searching up the tree provides for inherited mappings. @param prefix: A namespace prefix to resolve. @type prefix: basestring @param default: An optional value to be returned when the prefix cannot be resolved. @type default: (I{prefix},I{URI}) @return: The namespace that is mapped to I{prefix} in this context. @rtype: (I{prefix},I{URI}) """ n = self while n is not None: if prefix in n.nsprefixes: return (prefix, n.nsprefixes[prefix]) if prefix in self.specialprefixes: return (prefix, self.specialprefixes[prefix]) n = n.parent return default def addPrefix(self, p, u): """ Add or update a prefix mapping. @param p: A prefix. @type p: basestring @param u: A namespace URI. @type u: basestring @return: self @rtype: L{Element} """ self.nsprefixes[p] = u return self def updatePrefix(self, p, u): """ Update (redefine) a prefix mapping for the branch. @param p: A prefix. @type p: basestring @param u: A namespace URI. @type u: basestring @return: self @rtype: L{Element} @note: This method traverses down the entire branch! """ if p in self.nsprefixes: self.nsprefixes[p] = u for c in self.children: c.updatePrefix(p, u) return self def clearPrefix(self, prefix): """ Clear the specified prefix from the prefix mappings. @param prefix: A prefix to clear. @type prefix: basestring @return: self @rtype: L{Element} """ if prefix in self.nsprefixes: del self.nsprefixes[prefix] return self def findPrefix(self, uri, default=None): """ Find the first prefix that has been mapped to a namespace URI. The local mapping is searched, then it walks up the tree until it reaches the top or finds a match. @param uri: A namespace URI. @type uri: basestring @param default: A default prefix when not found. @type default: basestring @return: A mapped prefix. @rtype: basestring """ for item in self.nsprefixes.items(): if item[1] == uri: prefix = item[0] return prefix for item in self.specialprefixes.items(): if item[1] == uri: prefix = item[0] return prefix if self.parent is not None: return self.parent.findPrefix(uri, default) else: return default def findPrefixes(self, uri, match='eq'): """ Find all prefixes that has been mapped to a namespace URI. The local mapping is searched, then it walks up the tree until it reaches the top collecting all matches. @param uri: A namespace URI. @type uri: basestring @param match: A matching function L{Element.matcher}. @type match: basestring @return: A list of mapped prefixes. @rtype: [basestring,...] """ result = [] for item in self.nsprefixes.items(): if self.matcher[match](item[1], uri): prefix = item[0] result.append(prefix) for item in self.specialprefixes.items(): if self.matcher[match](item[1], uri): prefix = item[0] result.append(prefix) if self.parent is not None: result += self.parent.findPrefixes(uri, match) return result def promotePrefixes(self): """ Push prefix declarations up the tree as far as possible. Prefix mapping are pushed to its parent unless the parent has the prefix mapped to another URI or the parent has the prefix. This is propagated up the tree until the top is reached. @return: self @rtype: L{Element} """ for c in self.children: c.promotePrefixes() if self.parent is None: return for p,u in self.nsprefixes.items(): if p in self.parent.nsprefixes: pu = self.parent.nsprefixes[p] if pu == u: del self.nsprefixes[p] continue if p != self.parent.prefix: self.parent.nsprefixes[p] = u del self.nsprefixes[p] return self def refitPrefixes(self): """ Refit namespace qualification by replacing prefixes with explicit namespaces. Also purges prefix mapping table. @return: self @rtype: L{Element} """ for c in self.children: c.refitPrefixes() if self.prefix is not None: ns = self.resolvePrefix(self.prefix) if ns[1] is not None: self.expns = ns[1] self.prefix = None self.nsprefixes = {} return self def normalizePrefixes(self): """ Normalize the namespace prefixes. This generates unique prefixes for all namespaces. Then retrofits all prefixes and prefix mappings. Further, it will retrofix attribute values that have values containing (:). @return: self @rtype: L{Element} """ PrefixNormalizer.apply(self) return self def isempty(self, content=True): """ Get whether the element has no children. @param content: Test content (children & text) only. @type content: boolean @return: True when element has not children. @rtype: boolean """ noattrs = not len(self.attributes) nochildren = not len(self.children) notext = ( self.text is None ) nocontent = ( nochildren and notext ) if content: return nocontent else: return ( nocontent and noattrs ) def isnil(self): """ Get whether the element is I{nil} as defined by having an attribute in the I{xsi:nil="true"} @return: True if I{nil}, else False @rtype: boolean """ nilattr = self.getAttribute('nil', ns=Namespace.xsins) if nilattr is None: return False else: return ( nilattr.getValue().lower() == 'true' ) def setnil(self, flag=True): """ Set this node to I{nil} as defined by having an attribute I{xsi:nil}=I{flag}. @param flag: A flag inidcating how I{xsi:nil} will be set. @type flag: boolean @return: self @rtype: L{Element} """ p, u = Namespace.xsins name = ':'.join((p, 'nil')) self.set(name, str(flag).lower()) self.addPrefix(p, u) if flag: self.text = None return self def applyns(self, ns): """ Apply the namespace to this node. If the prefix is I{None} then this element's explicit namespace I{expns} is set to the URI defined by I{ns}. Otherwise, the I{ns} is simply mapped. @param ns: A namespace. @type ns: (I{prefix},I{URI}) """ if ns is None: return if not isinstance(ns, (tuple,list)): raise Exception('namespace must be tuple') if ns[0] is None: self.expns = ns[1] else: self.prefix = ns[0] self.nsprefixes[ns[0]] = ns[1] def str(self, indent=0): """ Get a string representation of this XML fragment. @param indent: The indent to be used in formatting the output. @type indent: int @return: A I{pretty} string. @rtype: basestring """ tab = '%*s'%(indent*3,'') result = [] result.append('%s<%s' % (tab, self.qname())) result.append(self.nsdeclarations()) for a in [unicode(a) for a in self.attributes]: result.append(' %s' % a) if self.isempty(): result.append('/>') return ''.join(result) result.append('>') if self.hasText(): result.append(self.text.escape()) for c in self.children: result.append('\n') result.append(c.str(indent+1)) if len(self.children): result.append('\n%s' % tab) result.append('</%s>' % self.qname()) result = ''.join(result) return result def plain(self): """ Get a string representation of this XML fragment. @return: A I{plain} string. @rtype: basestring """ result = [] result.append('<%s' % self.qname()) result.append(self.nsdeclarations()) for a in [unicode(a) for a in self.attributes]: result.append(' %s' % a) if self.isempty(): result.append('/>') return ''.join(result) result.append('>') if self.hasText(): result.append(self.text.escape()) for c in self.children: result.append(c.plain()) result.append('</%s>' % self.qname()) result = ''.join(result) return result def nsdeclarations(self): """ Get a string representation for all namespace declarations as xmlns="" and xmlns:p="". @return: A separated list of declarations. @rtype: basestring """ s = [] myns = (None, self.expns) if self.parent is None: pns = Namespace.default else: pns = (None, self.parent.expns) if myns[1] != pns[1]: if self.expns is not None: d = ' xmlns="%s"' % self.expns s.append(d) for item in self.nsprefixes.items(): (p,u) = item if self.parent is not None: ns = self.parent.resolvePrefix(p) if ns[1] == u: continue d = ' xmlns:%s="%s"' % (p, u) s.append(d) return ''.join(s) def match(self, name=None, ns=None): """ Match by (optional) name and/or (optional) namespace. @param name: The optional element tag name. @type name: str @param ns: An optional namespace. @type ns: (I{prefix}, I{name}) @return: True if matched. @rtype: boolean """ if name is None: byname = True else: byname = ( self.name == name ) if ns is None: byns = True else: byns = ( self.namespace()[1] == ns[1] ) return ( byname and byns ) def branch(self): """ Get a flattened representation of the branch. @return: A flat list of nodes. @rtype: [L{Element},..] """ branch = [self] for c in self.children: branch += c.branch() return branch def ancestors(self): """ Get a list of ancestors. @return: A list of ancestors. @rtype: [L{Element},..] """ ancestors = [] p = self.parent while p is not None: ancestors.append(p) p = p.parent return ancestors def walk(self, visitor): """ Walk the branch and call the visitor function on each node. @param visitor: A function. @return: self @rtype: L{Element} """ visitor(self) for c in self.children: c.walk(visitor) return self def prune(self): """ Prune the branch of empty nodes. """ pruned = [] for c in self.children: c.prune() if c.isempty(False): pruned.append(c) for p in pruned: self.children.remove(p) def __childrenAtPath(self, parts): result = [] node = self last = len(parts)-1 ancestors = parts[:last] leaf = parts[last] for name in ancestors: ns = None prefix, name = splitPrefix(name) if prefix is not None: ns = node.resolvePrefix(prefix) child = node.getChild(name, ns) if child is None: break else: node = child if child is not None: ns = None prefix, leaf = splitPrefix(leaf) if prefix is not None: ns = node.resolvePrefix(prefix) result = child.getChildren(leaf) return result def __len__(self): return len(self.children) def __getitem__(self, index): if isinstance(index, basestring): return self.get(index) else: if index < len(self.children): return self.children[index] else: return None def __setitem__(self, index, value): if isinstance(index, basestring): self.set(index, value) else: if index < len(self.children) and \ isinstance(value, Element): self.children.insert(index, value) def __eq__(self, rhs): return rhs is not None and \ isinstance(rhs, Element) and \ self.name == rhs.name and \ self.namespace()[1] == rhs.namespace()[1] def __repr__(self): return \ 'Element (prefix=%s, name=%s)' % (self.prefix, self.name) def __str__(self): return unicode(self).encode('utf-8') def __unicode__(self): return self.str() def __iter__(self): return NodeIterator(self) class NodeIterator: """ The L{Element} child node iterator. @ivar pos: The current position @type pos: int @ivar children: A list of a child nodes. @type children: [L{Element},..] """ def __init__(self, parent): """ @param parent: An element to iterate. @type parent: L{Element} """ self.pos = 0 self.children = parent.children def next(self): """ Get the next child. @return: The next child. @rtype: L{Element} @raise StopIterator: At the end. """ try: child = self.children[self.pos] self.pos += 1 return child except: raise StopIteration() class PrefixNormalizer: """ The prefix normalizer provides namespace prefix normalization. @ivar node: A node to normalize. @type node: L{Element} @ivar branch: The nodes flattened branch. @type branch: [L{Element},..] @ivar namespaces: A unique list of namespaces (URI). @type namespaces: [str,] @ivar prefixes: A reverse dict of prefixes. @type prefixes: {u, p} """ @classmethod def apply(cls, node): """ Normalize the specified node. @param node: A node to normalize. @type node: L{Element} @return: The normalized node. @rtype: L{Element} """ pn = PrefixNormalizer(node) return pn.refit() def __init__(self, node): """ @param node: A node to normalize. @type node: L{Element} """ self.node = node self.branch = node.branch() self.namespaces = self.getNamespaces() self.prefixes = self.genPrefixes() def getNamespaces(self): """ Get the I{unique} set of namespaces referenced in the branch. @return: A set of namespaces. @rtype: set """ s = set() for n in self.branch + self.node.ancestors(): if self.permit(n.expns): s.add(n.expns) s = s.union(self.pset(n)) return s def pset(self, n): """ Convert the nodes nsprefixes into a set. @param n: A node. @type n: L{Element} @return: A set of namespaces. @rtype: set """ s = set() for ns in n.nsprefixes.items(): if self.permit(ns): s.add(ns[1]) return s def genPrefixes(self): """ Generate a I{reverse} mapping of unique prefixes for all namespaces. @return: A referse dict of prefixes. @rtype: {u, p} """ prefixes = {} n = 0 for u in self.namespaces: p = 'ns%d' % n prefixes[u] = p n += 1 return prefixes def refit(self): """ Refit (normalize) the prefixes in the node. """ self.refitNodes() self.refitMappings() def refitNodes(self): """ Refit (normalize) all of the nodes in the branch. """ for n in self.branch: if n.prefix is not None: ns = n.namespace() if self.permit(ns): n.prefix = self.prefixes[ns[1]] self.refitAttrs(n) def refitAttrs(self, n): """ Refit (normalize) all of the attributes in the node. @param n: A node. @type n: L{Element} """ for a in n.attributes: self.refitAddr(a) def refitAddr(self, a): """ Refit (normalize) the attribute. @param a: An attribute. @type a: L{Attribute} """ if a.prefix is not None: ns = a.namespace() if self.permit(ns): a.prefix = self.prefixes[ns[1]] self.refitValue(a) def refitValue(self, a): """ Refit (normalize) the attribute's value. @param a: An attribute. @type a: L{Attribute} """ p,name = splitPrefix(a.getValue()) if p is None: return ns = a.resolvePrefix(p) if self.permit(ns): u = ns[1] p = self.prefixes[u] a.setValue(':'.join((p, name))) def refitMappings(self): """ Refit (normalize) all of the nsprefix mappings. """ for n in self.branch: n.nsprefixes = {} n = self.node for u, p in self.prefixes.items(): n.addPrefix(p, u) def permit(self, ns): """ Get whether the I{ns} is to be normalized. @param ns: A namespace. @type ns: (p,u) @return: True if to be included. @rtype: boolean """ return not self.skip(ns) def skip(self, ns): """ Get whether the I{ns} is to B{not} be normalized. @param ns: A namespace. @type ns: (p,u) @return: True if to be skipped. @rtype: boolean """ return ns is None or \ ( ns == Namespace.default ) or \ ( ns == Namespace.xsdns ) or \ ( ns == Namespace.xsins) or \ ( ns == Namespace.xmlns )
lgpl-3.0
FireWRT/OpenWrt-Firefly-Libraries
staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python3.4/selectors.py
9
16735
"""Selectors module. This module allows high-level and efficient I/O multiplexing, built upon the `select` module primitives. """ from abc import ABCMeta, abstractmethod from collections import namedtuple, Mapping import math import select import sys # generic events, that must be mapped to implementation-specific ones EVENT_READ = (1 << 0) EVENT_WRITE = (1 << 1) def _fileobj_to_fd(fileobj): """Return a file descriptor from a file object. Parameters: fileobj -- file object or file descriptor Returns: corresponding file descriptor Raises: ValueError if the object is invalid """ if isinstance(fileobj, int): fd = fileobj else: try: fd = int(fileobj.fileno()) except (AttributeError, TypeError, ValueError): raise ValueError("Invalid file object: " "{!r}".format(fileobj)) from None if fd < 0: raise ValueError("Invalid file descriptor: {}".format(fd)) return fd SelectorKey = namedtuple('SelectorKey', ['fileobj', 'fd', 'events', 'data']) """Object used to associate a file object to its backing file descriptor, selected event mask and attached data.""" class _SelectorMapping(Mapping): """Mapping of file objects to selector keys.""" def __init__(self, selector): self._selector = selector def __len__(self): return len(self._selector._fd_to_key) def __getitem__(self, fileobj): try: fd = self._selector._fileobj_lookup(fileobj) return self._selector._fd_to_key[fd] except KeyError: raise KeyError("{!r} is not registered".format(fileobj)) from None def __iter__(self): return iter(self._selector._fd_to_key) class BaseSelector(metaclass=ABCMeta): """Selector abstract base class. A selector supports registering file objects to be monitored for specific I/O events. A file object is a file descriptor or any object with a `fileno()` method. An arbitrary object can be attached to the file object, which can be used for example to store context information, a callback, etc. A selector can use various implementations (select(), poll(), epoll()...) depending on the platform. The default `Selector` class uses the most efficient implementation on the current platform. """ @abstractmethod def register(self, fileobj, events, data=None): """Register a file object. Parameters: fileobj -- file object or file descriptor events -- events to monitor (bitwise mask of EVENT_READ|EVENT_WRITE) data -- attached data Returns: SelectorKey instance Raises: ValueError if events is invalid KeyError if fileobj is already registered OSError if fileobj is closed or otherwise is unacceptable to the underlying system call (if a system call is made) Note: OSError may or may not be raised """ raise NotImplementedError @abstractmethod def unregister(self, fileobj): """Unregister a file object. Parameters: fileobj -- file object or file descriptor Returns: SelectorKey instance Raises: KeyError if fileobj is not registered Note: If fileobj is registered but has since been closed this does *not* raise OSError (even if the wrapped syscall does) """ raise NotImplementedError def modify(self, fileobj, events, data=None): """Change a registered file object monitored events or attached data. Parameters: fileobj -- file object or file descriptor events -- events to monitor (bitwise mask of EVENT_READ|EVENT_WRITE) data -- attached data Returns: SelectorKey instance Raises: Anything that unregister() or register() raises """ self.unregister(fileobj) return self.register(fileobj, events, data) @abstractmethod def select(self, timeout=None): """Perform the actual selection, until some monitored file objects are ready or a timeout expires. Parameters: timeout -- if timeout > 0, this specifies the maximum wait time, in seconds if timeout <= 0, the select() call won't block, and will report the currently ready file objects if timeout is None, select() will block until a monitored file object becomes ready Returns: list of (key, events) for ready file objects `events` is a bitwise mask of EVENT_READ|EVENT_WRITE """ raise NotImplementedError def close(self): """Close the selector. This must be called to make sure that any underlying resource is freed. """ pass def get_key(self, fileobj): """Return the key associated to a registered file object. Returns: SelectorKey for this file object """ mapping = self.get_map() try: if mapping is None: raise KeyError return mapping[fileobj] except KeyError: raise KeyError("{!r} is not registered".format(fileobj)) from None @abstractmethod def get_map(self): """Return a mapping of file objects to selector keys.""" raise NotImplementedError def __enter__(self): return self def __exit__(self, *args): self.close() class _BaseSelectorImpl(BaseSelector): """Base selector implementation.""" def __init__(self): # this maps file descriptors to keys self._fd_to_key = {} # read-only mapping returned by get_map() self._map = _SelectorMapping(self) def _fileobj_lookup(self, fileobj): """Return a file descriptor from a file object. This wraps _fileobj_to_fd() to do an exhaustive search in case the object is invalid but we still have it in our map. This is used by unregister() so we can unregister an object that was previously registered even if it is closed. It is also used by _SelectorMapping. """ try: return _fileobj_to_fd(fileobj) except ValueError: # Do an exhaustive search. for key in self._fd_to_key.values(): if key.fileobj is fileobj: return key.fd # Raise ValueError after all. raise def register(self, fileobj, events, data=None): if (not events) or (events & ~(EVENT_READ | EVENT_WRITE)): raise ValueError("Invalid events: {!r}".format(events)) key = SelectorKey(fileobj, self._fileobj_lookup(fileobj), events, data) if key.fd in self._fd_to_key: raise KeyError("{!r} (FD {}) is already registered" .format(fileobj, key.fd)) self._fd_to_key[key.fd] = key return key def unregister(self, fileobj): try: key = self._fd_to_key.pop(self._fileobj_lookup(fileobj)) except KeyError: raise KeyError("{!r} is not registered".format(fileobj)) from None return key def modify(self, fileobj, events, data=None): # TODO: Subclasses can probably optimize this even further. try: key = self._fd_to_key[self._fileobj_lookup(fileobj)] except KeyError: raise KeyError("{!r} is not registered".format(fileobj)) from None if events != key.events: self.unregister(fileobj) key = self.register(fileobj, events, data) elif data != key.data: # Use a shortcut to update the data. key = key._replace(data=data) self._fd_to_key[key.fd] = key return key def close(self): self._fd_to_key.clear() self._map = None def get_map(self): return self._map def _key_from_fd(self, fd): """Return the key associated to a given file descriptor. Parameters: fd -- file descriptor Returns: corresponding key, or None if not found """ try: return self._fd_to_key[fd] except KeyError: return None class SelectSelector(_BaseSelectorImpl): """Select-based selector.""" def __init__(self): super().__init__() self._readers = set() self._writers = set() def register(self, fileobj, events, data=None): key = super().register(fileobj, events, data) if events & EVENT_READ: self._readers.add(key.fd) if events & EVENT_WRITE: self._writers.add(key.fd) return key def unregister(self, fileobj): key = super().unregister(fileobj) self._readers.discard(key.fd) self._writers.discard(key.fd) return key if sys.platform == 'win32': def _select(self, r, w, _, timeout=None): r, w, x = select.select(r, w, w, timeout) return r, w + x, [] else: _select = select.select def select(self, timeout=None): timeout = None if timeout is None else max(timeout, 0) ready = [] try: r, w, _ = self._select(self._readers, self._writers, [], timeout) except InterruptedError: return ready r = set(r) w = set(w) for fd in r | w: events = 0 if fd in r: events |= EVENT_READ if fd in w: events |= EVENT_WRITE key = self._key_from_fd(fd) if key: ready.append((key, events & key.events)) return ready if hasattr(select, 'poll'): class PollSelector(_BaseSelectorImpl): """Poll-based selector.""" def __init__(self): super().__init__() self._poll = select.poll() def register(self, fileobj, events, data=None): key = super().register(fileobj, events, data) poll_events = 0 if events & EVENT_READ: poll_events |= select.POLLIN if events & EVENT_WRITE: poll_events |= select.POLLOUT self._poll.register(key.fd, poll_events) return key def unregister(self, fileobj): key = super().unregister(fileobj) self._poll.unregister(key.fd) return key def select(self, timeout=None): if timeout is None: timeout = None elif timeout <= 0: timeout = 0 else: # poll() has a resolution of 1 millisecond, round away from # zero to wait *at least* timeout seconds. timeout = math.ceil(timeout * 1e3) ready = [] try: fd_event_list = self._poll.poll(timeout) except InterruptedError: return ready for fd, event in fd_event_list: events = 0 if event & ~select.POLLIN: events |= EVENT_WRITE if event & ~select.POLLOUT: events |= EVENT_READ key = self._key_from_fd(fd) if key: ready.append((key, events & key.events)) return ready if hasattr(select, 'epoll'): class EpollSelector(_BaseSelectorImpl): """Epoll-based selector.""" def __init__(self): super().__init__() self._epoll = select.epoll() def fileno(self): return self._epoll.fileno() def register(self, fileobj, events, data=None): key = super().register(fileobj, events, data) epoll_events = 0 if events & EVENT_READ: epoll_events |= select.EPOLLIN if events & EVENT_WRITE: epoll_events |= select.EPOLLOUT self._epoll.register(key.fd, epoll_events) return key def unregister(self, fileobj): key = super().unregister(fileobj) try: self._epoll.unregister(key.fd) except OSError: # This can happen if the FD was closed since it # was registered. pass return key def select(self, timeout=None): if timeout is None: timeout = -1 elif timeout <= 0: timeout = 0 else: # epoll_wait() has a resolution of 1 millisecond, round away # from zero to wait *at least* timeout seconds. timeout = math.ceil(timeout * 1e3) * 1e-3 # epoll_wait() expects `maxevents` to be greater than zero; # we want to make sure that `select()` can be called when no # FD is registered. max_ev = max(len(self._fd_to_key), 1) ready = [] try: fd_event_list = self._epoll.poll(timeout, max_ev) except InterruptedError: return ready for fd, event in fd_event_list: events = 0 if event & ~select.EPOLLIN: events |= EVENT_WRITE if event & ~select.EPOLLOUT: events |= EVENT_READ key = self._key_from_fd(fd) if key: ready.append((key, events & key.events)) return ready def close(self): self._epoll.close() super().close() if hasattr(select, 'kqueue'): class KqueueSelector(_BaseSelectorImpl): """Kqueue-based selector.""" def __init__(self): super().__init__() self._kqueue = select.kqueue() def fileno(self): return self._kqueue.fileno() def register(self, fileobj, events, data=None): key = super().register(fileobj, events, data) if events & EVENT_READ: kev = select.kevent(key.fd, select.KQ_FILTER_READ, select.KQ_EV_ADD) self._kqueue.control([kev], 0, 0) if events & EVENT_WRITE: kev = select.kevent(key.fd, select.KQ_FILTER_WRITE, select.KQ_EV_ADD) self._kqueue.control([kev], 0, 0) return key def unregister(self, fileobj): key = super().unregister(fileobj) if key.events & EVENT_READ: kev = select.kevent(key.fd, select.KQ_FILTER_READ, select.KQ_EV_DELETE) try: self._kqueue.control([kev], 0, 0) except OSError: # This can happen if the FD was closed since it # was registered. pass if key.events & EVENT_WRITE: kev = select.kevent(key.fd, select.KQ_FILTER_WRITE, select.KQ_EV_DELETE) try: self._kqueue.control([kev], 0, 0) except OSError: # See comment above. pass return key def select(self, timeout=None): timeout = None if timeout is None else max(timeout, 0) max_ev = len(self._fd_to_key) ready = [] try: kev_list = self._kqueue.control(None, max_ev, timeout) except InterruptedError: return ready for kev in kev_list: fd = kev.ident flag = kev.filter events = 0 if flag == select.KQ_FILTER_READ: events |= EVENT_READ if flag == select.KQ_FILTER_WRITE: events |= EVENT_WRITE key = self._key_from_fd(fd) if key: ready.append((key, events & key.events)) return ready def close(self): self._kqueue.close() super().close() # Choose the best implementation: roughly, epoll|kqueue > poll > select. # select() also can't accept a FD > FD_SETSIZE (usually around 1024) if 'KqueueSelector' in globals(): DefaultSelector = KqueueSelector elif 'EpollSelector' in globals(): DefaultSelector = EpollSelector elif 'PollSelector' in globals(): DefaultSelector = PollSelector else: DefaultSelector = SelectSelector
gpl-2.0
wangyum/spark
examples/src/main/python/ml/sql_transformer.py
34
1343
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # $example on$ from pyspark.ml.feature import SQLTransformer # $example off$ from pyspark.sql import SparkSession if __name__ == "__main__": spark = SparkSession\ .builder\ .appName("SQLTransformerExample")\ .getOrCreate() # $example on$ df = spark.createDataFrame([ (0, 1.0, 3.0), (2, 2.0, 5.0) ], ["id", "v1", "v2"]) sqlTrans = SQLTransformer( statement="SELECT *, (v1 + v2) AS v3, (v1 * v2) AS v4 FROM __THIS__") sqlTrans.transform(df).show() # $example off$ spark.stop()
apache-2.0
sebmarchand/syzygy
third_party/numpy/files/numpy/f2py/tests/test_callback.py
22
1740
from numpy.testing import * from numpy import array import math import util class TestF77Callback(util.F2PyTest): code = """ subroutine t(fun,a) integer a cf2py intent(out) a external fun call fun(a) end subroutine func(a) cf2py intent(in,out) a integer a a = a + 11 end subroutine func0(a) cf2py intent(out) a integer a a = 11 end subroutine t2(a) cf2py intent(callback) fun integer a cf2py intent(out) a external fun call fun(a) end """ @dec.slow def test_all(self): for name in "t,t2".split(","): self.check_function(name) def check_function(self, name): t = getattr(self.module, name) r = t(lambda : 4) assert_( r==4,`r`) r = t(lambda a:5,fun_extra_args=(6,)) assert_( r==5,`r`) r = t(lambda a:a,fun_extra_args=(6,)) assert_( r==6,`r`) r = t(lambda a:5+a,fun_extra_args=(7,)) assert_( r==12,`r`) r = t(lambda a:math.degrees(a),fun_extra_args=(math.pi,)) assert_( r==180,`r`) r = t(math.degrees,fun_extra_args=(math.pi,)) assert_( r==180,`r`) r = t(self.module.func, fun_extra_args=(6,)) assert_( r==17,`r`) r = t(self.module.func0) assert_( r==11,`r`) r = t(self.module.func0._cpointer) assert_( r==11,`r`) class A: def __call__(self): return 7 def mth(self): return 9 a = A() r = t(a) assert_( r==7,`r`) r = t(a.mth) assert_( r==9,`r`) if __name__ == "__main__": import nose nose.runmodule()
apache-2.0
CZCV/s-dilation-caffe
python/caffe/detector.py
20
8541
#!/usr/bin/env python """ Do windowed detection by classifying a number of images/crops at once, optionally using the selective search window proposal method. This implementation follows ideas in Ross Girshick, Jeff Donahue, Trevor Darrell, Jitendra Malik. Rich feature hierarchies for accurate object detection and semantic segmentation. http://arxiv.org/abs/1311.2524 The selective_search_ijcv_with_python code required for the selective search proposal mode is available at https://github.com/sergeyk/selective_search_ijcv_with_python """ import numpy as np import os import caffe class Detector(caffe.Net): """ Detector extends Net for windowed detection by a list of crops or selective search proposals. Parameters ---------- mean, input_scale, raw_scale, channel_swap : params for preprocessing options. context_pad : amount of surrounding context to take s.t. a `context_pad` sized border of pixels in the network input image is context, as in R-CNN feature extraction. """ def __init__(self, model_file, pretrained_file, mean=None, input_scale=None, raw_scale=None, channel_swap=None, context_pad=None): caffe.Net.__init__(self, model_file, pretrained_file, caffe.TEST) # configure pre-processing in_ = self.inputs[0] self.transformer = caffe.io.Transformer( {in_: self.blobs[in_].data.shape}) self.transformer.set_transpose(in_, (2, 0, 1)) if mean is not None: self.transformer.set_mean(in_, mean) if input_scale is not None: self.transformer.set_input_scale(in_, input_scale) if raw_scale is not None: self.transformer.set_raw_scale(in_, raw_scale) if channel_swap is not None: self.transformer.set_channel_swap(in_, channel_swap) self.configure_crop(context_pad) def detect_windows(self, images_windows): """ Do windowed detection over given images and windows. Windows are extracted then warped to the input dimensions of the net. Parameters ---------- images_windows: (image filename, window list) iterable. context_crop: size of context border to crop in pixels. Returns ------- detections: list of {filename: image filename, window: crop coordinates, predictions: prediction vector} dicts. """ # Extract windows. window_inputs = [] for image_fname, windows in images_windows: image = caffe.io.load_image(image_fname).astype(np.float32) for window in windows: window_inputs.append(self.crop(image, window)) # Run through the net (warping windows to input dimensions). in_ = self.inputs[0] caffe_in = np.zeros((len(window_inputs), window_inputs[0].shape[2]) + self.blobs[in_].data.shape[2:], dtype=np.float32) for ix, window_in in enumerate(window_inputs): caffe_in[ix] = self.transformer.preprocess(in_, window_in) out = self.forward_all(**{in_: caffe_in}) predictions = out[self.outputs[0]] # Package predictions with images and windows. detections = [] ix = 0 for image_fname, windows in images_windows: for window in windows: detections.append({ 'window': window, 'prediction': predictions[ix], 'filename': image_fname }) ix += 1 return detections def detect_selective_search(self, image_fnames): """ Do windowed detection over Selective Search proposals by extracting the crop and warping to the input dimensions of the net. Parameters ---------- image_fnames: list Returns ------- detections: list of {filename: image filename, window: crop coordinates, predictions: prediction vector} dicts. """ import selective_search_ijcv_with_python as selective_search # Make absolute paths so MATLAB can find the files. image_fnames = [os.path.abspath(f) for f in image_fnames] windows_list = selective_search.get_windows( image_fnames, cmd='selective_search_rcnn' ) # Run windowed detection on the selective search list. return self.detect_windows(zip(image_fnames, windows_list)) def crop(self, im, window): """ Crop a window from the image for detection. Include surrounding context according to the `context_pad` configuration. Parameters ---------- im: H x W x K image ndarray to crop. window: bounding box coordinates as ymin, xmin, ymax, xmax. Returns ------- crop: cropped window. """ # Crop window from the image. crop = im[window[0]:window[2], window[1]:window[3]] if self.context_pad: box = window.copy() crop_size = self.blobs[self.inputs[0]].width # assumes square scale = crop_size / (1. * crop_size - self.context_pad * 2) # Crop a box + surrounding context. half_h = (box[2] - box[0] + 1) / 2. half_w = (box[3] - box[1] + 1) / 2. center = (box[0] + half_h, box[1] + half_w) scaled_dims = scale * np.array((-half_h, -half_w, half_h, half_w)) box = np.round(np.tile(center, 2) + scaled_dims) full_h = box[2] - box[0] + 1 full_w = box[3] - box[1] + 1 scale_h = crop_size / full_h scale_w = crop_size / full_w pad_y = round(max(0, -box[0]) * scale_h) # amount out-of-bounds pad_x = round(max(0, -box[1]) * scale_w) # Clip box to image dimensions. im_h, im_w = im.shape[:2] box = np.clip(box, 0., [im_h, im_w, im_h, im_w]) clip_h = box[2] - box[0] + 1 clip_w = box[3] - box[1] + 1 assert(clip_h > 0 and clip_w > 0) crop_h = round(clip_h * scale_h) crop_w = round(clip_w * scale_w) if pad_y + crop_h > crop_size: crop_h = crop_size - pad_y if pad_x + crop_w > crop_size: crop_w = crop_size - pad_x # collect with context padding and place in input # with mean padding context_crop = im[box[0]:box[2], box[1]:box[3]] context_crop = caffe.io.resize_image(context_crop, (crop_h, crop_w)) crop = np.ones(self.crop_dims, dtype=np.float32) * self.crop_mean crop[pad_y:(pad_y + crop_h), pad_x:(pad_x + crop_w)] = context_crop return crop def configure_crop(self, context_pad): """ Configure crop dimensions and amount of context for cropping. If context is included, make the special input mean for context padding. Parameters ---------- context_pad : amount of context for cropping. """ # crop dimensions in_ = self.inputs[0] tpose = self.transformer.transpose[in_] inv_tpose = [tpose[t] for t in tpose] self.crop_dims = np.array(self.blobs[in_].data.shape[1:])[inv_tpose] #.transpose(inv_tpose) # context padding self.context_pad = context_pad if self.context_pad: in_ = self.inputs[0] transpose = self.transformer.transpose.get(in_) channel_order = self.transformer.channel_swap.get(in_) raw_scale = self.transformer.raw_scale.get(in_) # Padding context crops needs the mean in unprocessed input space. mean = self.transformer.mean.get(in_) if mean is not None: inv_transpose = [transpose[t] for t in transpose] crop_mean = mean.copy().transpose(inv_transpose) if channel_order is not None: channel_order_inverse = [channel_order.index(i) for i in range(crop_mean.shape[2])] crop_mean = crop_mean[:, :, channel_order_inverse] if raw_scale is not None: crop_mean /= raw_scale self.crop_mean = crop_mean else: self.crop_mean = np.zeros(self.crop_dims, dtype=np.float32)
agpl-3.0
oceanobservatories/mi-instrument
mi/idk/scripts/cat_data_log.py
11
2395
""" @file mi/idk/script/watch_data_log.py @author Bill French @brief Watch the port agent log for the current IDK driver """ __author__ = 'Bill French' import time import sys import binascii import fileinput from mi.idk.comm_config import CommConfig from mi.idk.metadata import Metadata from mi.core.instrument.port_agent_client import PortAgentPacket, HEADER_SIZE DATADIR="/tmp" SLEEP=1.0 SENTINLE=binascii.unhexlify('A39D7A') def run(): buffer = None for line in fileinput.input(): if(buffer == None): buffer = "" buffer = buffer + line (record, buffer) = _get_record(buffer) if(record): _write_packet(record) def _write_packet(record): if(record.get_header_type() == PortAgentPacket.DATA_FROM_INSTRUMENT): sys.stdout.write(record.get_data()) elif(record.get_header_type() == PortAgentPacket.DATA_FROM_DRIVER): #sys.stdout.write(">>> %s" % record.get_data()) pass def _get_record(buffer): """ Work to read a XML record. If we can't parse then just return nothing @return: if an XML port agent record is found, return it's value. """ remaining = None data_start = 0 data_end = 0 index = buffer.find(SENTINLE) if(index < 0): return (None, buffer) packet = _get_header(buffer[index:]) if packet: remaining = _get_remaining(buffer[index:], packet) if(_read_data(buffer[index:], packet)): return (packet, remaining) else: if(remaining): return (None, buffer[index+1:]) else: return (None, buffer) else: return (None, buffer) def _get_header(buffer): packet = PortAgentPacket() if(len(buffer) < HEADER_SIZE): return None header = buffer[0:HEADER_SIZE] packet.unpack_header(header) if(packet.get_data_length() < 0): return None print "time: %f" % packet.get_timestamp() return packet def _read_data(buffer, packet): if(len(buffer) < HEADER_SIZE + packet.get_data_length()): return False data = buffer[HEADER_SIZE:HEADER_SIZE+packet.get_data_length()] packet.attach_data(data) return True def _get_remaining(buffer, packet): if(len(buffer) == HEADER_SIZE + packet.get_data_length()): return None return buffer[HEADER_SIZE+packet.get_data_length():] if __name__ == '__main__': run()
bsd-2-clause
zero-rp/miniblink49
third_party/skia/PRESUBMIT.py
7
19450
# Copyright (c) 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Top-level presubmit script for Skia. See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts for more details about the presubmit API built into gcl. """ import csv import fnmatch import os import re import subprocess import sys import traceback REVERT_CL_SUBJECT_PREFIX = 'Revert ' SKIA_TREE_STATUS_URL = 'http://skia-tree-status.appspot.com' CQ_KEYWORDS_THAT_NEED_APPENDING = ('CQ_INCLUDE_TRYBOTS', 'CQ_EXTRA_TRYBOTS', 'CQ_EXCLUDE_TRYBOTS', 'CQ_TRYBOTS') # Please add the complete email address here (and not just 'xyz@' or 'xyz'). PUBLIC_API_OWNERS = ( 'reed@chromium.org', 'reed@google.com', 'bsalomon@chromium.org', 'bsalomon@google.com', 'djsollen@chromium.org', 'djsollen@google.com', ) AUTHORS_FILE_NAME = 'AUTHORS' DOCS_PREVIEW_URL = 'https://skia.org/?cl=' def _CheckChangeHasEol(input_api, output_api, source_file_filter=None): """Checks that files end with atleast one \n (LF).""" eof_files = [] for f in input_api.AffectedSourceFiles(source_file_filter): contents = input_api.ReadFile(f, 'rb') # Check that the file ends in atleast one newline character. if len(contents) > 1 and contents[-1:] != '\n': eof_files.append(f.LocalPath()) if eof_files: return [output_api.PresubmitPromptWarning( 'These files should end in a newline character:', items=eof_files)] return [] def _PythonChecks(input_api, output_api): """Run checks on any modified Python files.""" pylint_disabled_warnings = ( 'F0401', # Unable to import. 'E0611', # No name in module. 'W0232', # Class has no __init__ method. 'E1002', # Use of super on an old style class. 'W0403', # Relative import used. 'R0201', # Method could be a function. 'E1003', # Using class name in super. 'W0613', # Unused argument. ) # Run Pylint on only the modified python files. Unfortunately it still runs # Pylint on the whole file instead of just the modified lines. affected_python_files = [] for affected_file in input_api.AffectedSourceFiles(None): affected_file_path = affected_file.LocalPath() if affected_file_path.endswith('.py'): affected_python_files.append(affected_file_path) return input_api.canned_checks.RunPylint( input_api, output_api, disabled_warnings=pylint_disabled_warnings, white_list=affected_python_files) def _IfDefChecks(input_api, output_api): """Ensures if/ifdef are not before includes. See skbug/3362 for details.""" comment_block_start_pattern = re.compile('^\s*\/\*.*$') comment_block_middle_pattern = re.compile('^\s+\*.*') comment_block_end_pattern = re.compile('^\s+\*\/.*$') single_line_comment_pattern = re.compile('^\s*//.*$') def is_comment(line): return (comment_block_start_pattern.match(line) or comment_block_middle_pattern.match(line) or comment_block_end_pattern.match(line) or single_line_comment_pattern.match(line)) empty_line_pattern = re.compile('^\s*$') def is_empty_line(line): return empty_line_pattern.match(line) failing_files = [] for affected_file in input_api.AffectedSourceFiles(None): affected_file_path = affected_file.LocalPath() if affected_file_path.endswith('.cpp') or affected_file_path.endswith('.h'): f = open(affected_file_path) for line in f.xreadlines(): if is_comment(line) or is_empty_line(line): continue # The below will be the first real line after comments and newlines. if line.startswith('#if 0 '): pass elif line.startswith('#if ') or line.startswith('#ifdef '): failing_files.append(affected_file_path) break results = [] if failing_files: results.append( output_api.PresubmitError( 'The following files have #if or #ifdef before includes:\n%s\n\n' 'See skbug.com/3362 for why this should be fixed.' % '\n'.join(failing_files))) return results def _CopyrightChecks(input_api, output_api, source_file_filter=None): results = [] year_pattern = r'\d{4}' year_range_pattern = r'%s(-%s)?' % (year_pattern, year_pattern) years_pattern = r'%s(,%s)*,?' % (year_range_pattern, year_range_pattern) copyright_pattern = ( r'Copyright (\([cC]\) )?%s \w+' % years_pattern) for affected_file in input_api.AffectedSourceFiles(source_file_filter): if 'third_party' in affected_file.LocalPath(): continue contents = input_api.ReadFile(affected_file, 'rb') if not re.search(copyright_pattern, contents): results.append(output_api.PresubmitError( '%s is missing a correct copyright header.' % affected_file)) return results def _ToolFlags(input_api, output_api): """Make sure `{dm,nanobench}_flags.py test` passes if modified.""" results = [] sources = lambda x: ('dm_flags.py' in x.LocalPath() or 'nanobench_flags.py' in x.LocalPath()) for f in input_api.AffectedSourceFiles(sources): if 0 != subprocess.call(['python', f.LocalPath(), 'test']): results.append(output_api.PresubmitError('`python %s test` failed' % f)) return results def _CommonChecks(input_api, output_api): """Presubmit checks common to upload and commit.""" results = [] sources = lambda x: (x.LocalPath().endswith('.h') or x.LocalPath().endswith('.gypi') or x.LocalPath().endswith('.gyp') or x.LocalPath().endswith('.py') or x.LocalPath().endswith('.sh') or x.LocalPath().endswith('.m') or x.LocalPath().endswith('.mm') or x.LocalPath().endswith('.go') or x.LocalPath().endswith('.c') or x.LocalPath().endswith('.cc') or x.LocalPath().endswith('.cpp')) results.extend( _CheckChangeHasEol( input_api, output_api, source_file_filter=sources)) results.extend(_PythonChecks(input_api, output_api)) results.extend(_IfDefChecks(input_api, output_api)) results.extend(_CopyrightChecks(input_api, output_api, source_file_filter=sources)) results.extend(_ToolFlags(input_api, output_api)) return results def CheckChangeOnUpload(input_api, output_api): """Presubmit checks for the change on upload. The following are the presubmit checks: * Check change has one and only one EOL. """ results = [] results.extend(_CommonChecks(input_api, output_api)) return results def _CheckTreeStatus(input_api, output_api, json_url): """Check whether to allow commit. Args: input_api: input related apis. output_api: output related apis. json_url: url to download json style status. """ tree_status_results = input_api.canned_checks.CheckTreeIsOpen( input_api, output_api, json_url=json_url) if not tree_status_results: # Check for caution state only if tree is not closed. connection = input_api.urllib2.urlopen(json_url) status = input_api.json.loads(connection.read()) connection.close() if ('caution' in status['message'].lower() and os.isatty(sys.stdout.fileno())): # Display a prompt only if we are in an interactive shell. Without this # check the commit queue behaves incorrectly because it considers # prompts to be failures. short_text = 'Tree state is: ' + status['general_state'] long_text = status['message'] + '\n' + json_url tree_status_results.append( output_api.PresubmitPromptWarning( message=short_text, long_text=long_text)) else: # Tree status is closed. Put in message about contacting sheriff. connection = input_api.urllib2.urlopen( SKIA_TREE_STATUS_URL + '/current-sheriff') sheriff_details = input_api.json.loads(connection.read()) if sheriff_details: tree_status_results[0]._message += ( '\n\nPlease contact the current Skia sheriff (%s) if you are trying ' 'to submit a build fix\nand do not know how to submit because the ' 'tree is closed') % sheriff_details['username'] return tree_status_results def _CheckOwnerIsInAuthorsFile(input_api, output_api): results = [] issue = input_api.change.issue if issue and input_api.rietveld: issue_properties = input_api.rietveld.get_issue_properties( issue=int(issue), messages=False) owner_email = issue_properties['owner_email'] try: authors_content = '' for line in open(AUTHORS_FILE_NAME): if not line.startswith('#'): authors_content += line email_fnmatches = re.findall('<(.*)>', authors_content) for email_fnmatch in email_fnmatches: if fnmatch.fnmatch(owner_email, email_fnmatch): # Found a match, the user is in the AUTHORS file break out of the loop break else: # TODO(rmistry): Remove the below CLA messaging once a CLA checker has # been added to the CQ. results.append( output_api.PresubmitError( 'The email %s is not in Skia\'s AUTHORS file.\n' 'Issue owner, this CL must include an addition to the Skia AUTHORS ' 'file.\n' 'Googler reviewers, please check that the AUTHORS entry ' 'corresponds to an email address in http://goto/cla-signers. If it ' 'does not then ask the issue owner to sign the CLA at ' 'https://developers.google.com/open-source/cla/individual ' '(individual) or ' 'https://developers.google.com/open-source/cla/corporate ' '(corporate).' % owner_email)) except IOError: # Do not fail if authors file cannot be found. traceback.print_exc() input_api.logging.error('AUTHORS file not found!') return results def _CheckLGTMsForPublicAPI(input_api, output_api): """Check LGTMs for public API changes. For public API files make sure there is an LGTM from the list of owners in PUBLIC_API_OWNERS. """ results = [] requires_owner_check = False for affected_file in input_api.AffectedFiles(): affected_file_path = affected_file.LocalPath() file_path, file_ext = os.path.splitext(affected_file_path) # We only care about files that end in .h and are under the top-level # include dir. if file_ext == '.h' and 'include' == file_path.split(os.path.sep)[0]: requires_owner_check = True if not requires_owner_check: return results lgtm_from_owner = False issue = input_api.change.issue if issue and input_api.rietveld: issue_properties = input_api.rietveld.get_issue_properties( issue=int(issue), messages=True) if re.match(REVERT_CL_SUBJECT_PREFIX, issue_properties['subject'], re.I): # It is a revert CL, ignore the public api owners check. return results # TODO(rmistry): Stop checking for COMMIT=false once crbug/470609 is # resolved. if issue_properties['cq_dry_run'] or re.search( r'^COMMIT=false$', issue_properties['description'], re.M): # Ignore public api owners check for dry run CLs since they are not # going to be committed. return results match = re.search(r'^TBR=(.*)$', issue_properties['description'], re.M) if match: tbr_entries = match.group(1).strip().split(',') for owner in PUBLIC_API_OWNERS: if owner in tbr_entries or owner.split('@')[0] in tbr_entries: # If an owner is specified in the TBR= line then ignore the public # api owners check. return results if issue_properties['owner_email'] in PUBLIC_API_OWNERS: # An owner created the CL that is an automatic LGTM. lgtm_from_owner = True messages = issue_properties.get('messages') if messages: for message in messages: if (message['sender'] in PUBLIC_API_OWNERS and 'lgtm' in message['text'].lower()): # Found an lgtm in a message from an owner. lgtm_from_owner = True break if not lgtm_from_owner: results.append( output_api.PresubmitError( 'Since the CL is editing public API, you must have an LGTM from ' 'one of: %s' % str(PUBLIC_API_OWNERS))) return results def PostUploadHook(cl, change, output_api): """git cl upload will call this hook after the issue is created/modified. This hook does the following: * Adds a link to preview docs changes if there are any docs changes in the CL. * Adds 'NOTRY=true' if the CL contains only docs changes. * Adds 'NOTREECHECKS=true' for non master branch changes since they do not need to be gated on the master branch's tree. * Adds 'NOTRY=true' for non master branch changes since trybots do not yet work on them. * Adds 'NOPRESUBMIT=true' for non master branch changes since those don't run the presubmit checks. """ results = [] atleast_one_docs_change = False all_docs_changes = True for affected_file in change.AffectedFiles(): affected_file_path = affected_file.LocalPath() file_path, _ = os.path.splitext(affected_file_path) if 'site' == file_path.split(os.path.sep)[0]: atleast_one_docs_change = True else: all_docs_changes = False if atleast_one_docs_change and not all_docs_changes: break issue = cl.issue rietveld_obj = cl.RpcServer() if issue and rietveld_obj: original_description = rietveld_obj.get_description(issue) new_description = original_description # If the change includes only doc changes then add NOTRY=true in the # CL's description if it does not exist yet. if all_docs_changes and not re.search( r'^NOTRY=true$', new_description, re.M | re.I): new_description += '\nNOTRY=true' results.append( output_api.PresubmitNotifyResult( 'This change has only doc changes. Automatically added ' '\'NOTRY=true\' to the CL\'s description')) # If there is atleast one docs change then add preview link in the CL's # description if it does not already exist there. if atleast_one_docs_change and not re.search( r'^DOCS_PREVIEW=.*', new_description, re.M | re.I): # Automatically add a link to where the docs can be previewed. new_description += '\nDOCS_PREVIEW= %s%s' % (DOCS_PREVIEW_URL, issue) results.append( output_api.PresubmitNotifyResult( 'Automatically added a link to preview the docs changes to the ' 'CL\'s description')) # If the target ref is not master then add NOTREECHECKS=true and NOTRY=true # to the CL's description if it does not already exist there. target_ref = rietveld_obj.get_issue_properties(issue, False).get( 'target_ref', '') if target_ref != 'refs/heads/master': if not re.search( r'^NOTREECHECKS=true$', new_description, re.M | re.I): new_description += "\nNOTREECHECKS=true" results.append( output_api.PresubmitNotifyResult( 'Branch changes do not need to rely on the master branch\'s ' 'tree status. Automatically added \'NOTREECHECKS=true\' to the ' 'CL\'s description')) if not re.search( r'^NOTRY=true$', new_description, re.M | re.I): new_description += "\nNOTRY=true" results.append( output_api.PresubmitNotifyResult( 'Trybots do not yet work for non-master branches. ' 'Automatically added \'NOTRY=true\' to the CL\'s description')) if not re.search( r'^NOPRESUBMIT=true$', new_description, re.M | re.I): new_description += "\nNOPRESUBMIT=true" results.append( output_api.PresubmitNotifyResult( 'Branch changes do not run the presubmit checks.')) # Read and process the HASHTAGS file. hashtags_fullpath = os.path.join(change._local_root, 'HASHTAGS') with open(hashtags_fullpath, 'rb') as hashtags_csv: hashtags_reader = csv.reader(hashtags_csv, delimiter=',') for row in hashtags_reader: if not row or row[0].startswith('#'): # Ignore empty lines and comments continue hashtag = row[0] # Search for the hashtag in the description. if re.search('#%s' % hashtag, new_description, re.M | re.I): for mapped_text in row[1:]: # Special case handling for CQ_KEYWORDS_THAT_NEED_APPENDING. appended_description = _HandleAppendingCQKeywords( hashtag, mapped_text, new_description, results, output_api) if appended_description: new_description = appended_description continue # Add the mapped text if it does not already exist in the # CL's description. if not re.search( r'^%s$' % mapped_text, new_description, re.M | re.I): new_description += '\n%s' % mapped_text results.append( output_api.PresubmitNotifyResult( 'Found \'#%s\', automatically added \'%s\' to the CL\'s ' 'description' % (hashtag, mapped_text))) # If the description has changed update it. if new_description != original_description: rietveld_obj.update_description(issue, new_description) return results def _HandleAppendingCQKeywords(hashtag, keyword_and_value, description, results, output_api): """Handles the CQ keywords that need appending if specified in hashtags.""" keyword = keyword_and_value.split('=')[0] if keyword in CQ_KEYWORDS_THAT_NEED_APPENDING: # If the keyword is already in the description then append to it. match = re.search( r'^%s=(.*)$' % keyword, description, re.M | re.I) if match: old_values = match.group(1).split(';') new_value = keyword_and_value.split('=')[1] if new_value in old_values: # Do not need to do anything here. return description # Update the description with the new values. new_description = description.replace( match.group(0), "%s;%s" % (match.group(0), new_value)) results.append( output_api.PresubmitNotifyResult( 'Found \'#%s\', automatically appended \'%s\' to %s in ' 'the CL\'s description' % (hashtag, new_value, keyword))) return new_description return None def CheckChangeOnCommit(input_api, output_api): """Presubmit checks for the change on commit. The following are the presubmit checks: * Check change has one and only one EOL. * Ensures that the Skia tree is open in http://skia-tree-status.appspot.com/. Shows a warning if it is in 'Caution' state and an error if it is in 'Closed' state. """ results = [] results.extend(_CommonChecks(input_api, output_api)) results.extend( _CheckTreeStatus(input_api, output_api, json_url=( SKIA_TREE_STATUS_URL + '/banner-status?format=json'))) results.extend(_CheckLGTMsForPublicAPI(input_api, output_api)) results.extend(_CheckOwnerIsInAuthorsFile(input_api, output_api)) return results
apache-2.0
oarb-projects/deep_learning_projects
dlnd_tv_script_generation-master/dlnd_tv_script_generation-master/problem_unittests.py
26
12046
import numpy as np import tensorflow as tf from tensorflow.contrib import rnn def _print_success_message(): print('Tests Passed') def test_create_lookup_tables(create_lookup_tables): with tf.Graph().as_default(): test_text = ''' Moe_Szyslak Moe's Tavern Where the elite meet to drink Bart_Simpson Eh yeah hello is Mike there Last name Rotch Moe_Szyslak Hold on I'll check Mike Rotch Mike Rotch Hey has anybody seen Mike Rotch lately Moe_Szyslak Listen you little puke One of these days I'm gonna catch you and I'm gonna carve my name on your back with an ice pick Moe_Szyslak Whats the matter Homer You're not your normal effervescent self Homer_Simpson I got my problems Moe Give me another one Moe_Szyslak Homer hey you should not drink to forget your problems Barney_Gumble Yeah you should only drink to enhance your social skills''' test_text = test_text.lower() test_text = test_text.split() vocab_to_int, int_to_vocab = create_lookup_tables(test_text) # Check types assert isinstance(vocab_to_int, dict),\ 'vocab_to_int is not a dictionary.' assert isinstance(int_to_vocab, dict),\ 'int_to_vocab is not a dictionary.' # Compare lengths of dicts assert len(vocab_to_int) == len(int_to_vocab),\ 'Length of vocab_to_int and int_to_vocab don\'t match. ' \ 'vocab_to_int is length {}. int_to_vocab is length {}'.format(len(vocab_to_int), len(int_to_vocab)) # Make sure the dicts have the same words vocab_to_int_word_set = set(vocab_to_int.keys()) int_to_vocab_word_set = set(int_to_vocab.values()) assert not (vocab_to_int_word_set - int_to_vocab_word_set),\ 'vocab_to_int and int_to_vocab don\'t have the same words.' \ '{} found in vocab_to_int, but not in int_to_vocab'.format(vocab_to_int_word_set - int_to_vocab_word_set) assert not (int_to_vocab_word_set - vocab_to_int_word_set),\ 'vocab_to_int and int_to_vocab don\'t have the same words.' \ '{} found in int_to_vocab, but not in vocab_to_int'.format(int_to_vocab_word_set - vocab_to_int_word_set) # Make sure the dicts have the same word ids vocab_to_int_word_id_set = set(vocab_to_int.values()) int_to_vocab_word_id_set = set(int_to_vocab.keys()) assert not (vocab_to_int_word_id_set - int_to_vocab_word_id_set),\ 'vocab_to_int and int_to_vocab don\'t contain the same word ids.' \ '{} found in vocab_to_int, but not in int_to_vocab'.format(vocab_to_int_word_id_set - int_to_vocab_word_id_set) assert not (int_to_vocab_word_id_set - vocab_to_int_word_id_set),\ 'vocab_to_int and int_to_vocab don\'t contain the same word ids.' \ '{} found in int_to_vocab, but not in vocab_to_int'.format(int_to_vocab_word_id_set - vocab_to_int_word_id_set) # Make sure the dicts make the same lookup missmatches = [(word, id, id, int_to_vocab[id]) for word, id in vocab_to_int.items() if int_to_vocab[id] != word] assert not missmatches,\ 'Found {} missmatche(s). First missmatch: vocab_to_int[{}] = {} and int_to_vocab[{}] = {}'.format( len(missmatches), *missmatches[0]) assert len(vocab_to_int) > len(set(test_text))/2,\ 'The length of vocab seems too small. Found a length of {}'.format(len(vocab_to_int)) _print_success_message() def test_get_batches(get_batches): with tf.Graph().as_default(): test_batch_size = 128 test_seq_length = 5 test_int_text = list(range(1000*test_seq_length)) batches = get_batches(test_int_text, test_batch_size, test_seq_length) # Check type assert isinstance(batches, np.ndarray),\ 'Batches is not a Numpy array' # Check shape assert batches.shape == (7, 2, 128, 5),\ 'Batches returned wrong shape. Found {}'.format(batches.shape) _print_success_message() def test_tokenize(token_lookup): with tf.Graph().as_default(): symbols = set(['.', ',', '"', ';', '!', '?', '(', ')', '--', '\n']) token_dict = token_lookup() # Check type assert isinstance(token_dict, dict), \ 'Returned type is {}.'.format(type(token_dict)) # Check symbols missing_symbols = symbols - set(token_dict.keys()) unknown_symbols = set(token_dict.keys()) - symbols assert not missing_symbols, \ 'Missing symbols: {}'.format(missing_symbols) assert not unknown_symbols, \ 'Unknown symbols: {}'.format(unknown_symbols) # Check values type bad_value_type = [type(val) for val in token_dict.values() if not isinstance(val, str)] assert not bad_value_type,\ 'Found token as {} type.'.format(bad_value_type[0]) # Check for spaces key_has_spaces = [k for k in token_dict.keys() if ' ' in k] val_has_spaces = [val for val in token_dict.values() if ' ' in val] assert not key_has_spaces,\ 'The key "{}" includes spaces. Remove spaces from keys and values'.format(key_has_spaces[0]) assert not val_has_spaces,\ 'The value "{}" includes spaces. Remove spaces from keys and values'.format(val_has_spaces[0]) # Check for symbols in values symbol_val = () for symbol in symbols: for val in token_dict.values(): if symbol in val: symbol_val = (symbol, val) assert not symbol_val,\ 'Don\'t use a symbol that will be replaced in your tokens. Found the symbol {} in value {}'.format(*symbol_val) _print_success_message() def test_get_inputs(get_inputs): with tf.Graph().as_default(): input_data, targets, lr = get_inputs() # Check type assert input_data.op.type == 'Placeholder',\ 'Input not a Placeholder.' assert targets.op.type == 'Placeholder',\ 'Targets not a Placeholder.' assert lr.op.type == 'Placeholder',\ 'Learning Rate not a Placeholder.' # Check name assert input_data.name == 'input:0',\ 'Input has bad name. Found name {}'.format(input_data.name) # Check rank input_rank = 0 if input_data.get_shape() == None else len(input_data.get_shape()) targets_rank = 0 if targets.get_shape() == None else len(targets.get_shape()) lr_rank = 0 if lr.get_shape() == None else len(lr.get_shape()) assert input_rank == 2,\ 'Input has wrong rank. Rank {} found.'.format(input_rank) assert targets_rank == 2,\ 'Targets has wrong rank. Rank {} found.'.format(targets_rank) assert lr_rank == 0,\ 'Learning Rate has wrong rank. Rank {} found'.format(lr_rank) _print_success_message() def test_get_init_cell(get_init_cell): with tf.Graph().as_default(): test_batch_size_ph = tf.placeholder(tf.int32) test_rnn_size = 256 cell, init_state = get_init_cell(test_batch_size_ph, test_rnn_size) # Check type assert isinstance(cell, tf.contrib.rnn.MultiRNNCell),\ 'Cell is wrong type. Found {} type'.format(type(cell)) # Check for name attribute assert hasattr(init_state, 'name'),\ 'Initial state doesn\'t have the "name" attribute. Try using `tf.identity` to set the name.' # Check name assert init_state.name == 'initial_state:0',\ 'Initial state doesn\'t have the correct name. Found the name {}'.format(init_state.name) _print_success_message() def test_get_embed(get_embed): with tf.Graph().as_default(): embed_shape = [50, 5, 256] test_input_data = tf.placeholder(tf.int32, embed_shape[:2]) test_vocab_size = 27 test_embed_dim = embed_shape[2] embed = get_embed(test_input_data, test_vocab_size, test_embed_dim) # Check shape assert embed.shape == embed_shape,\ 'Wrong shape. Found shape {}'.format(embed.shape) _print_success_message() def test_build_rnn(build_rnn): with tf.Graph().as_default(): test_rnn_size = 256 test_rnn_layer_size = 2 test_cell = rnn.MultiRNNCell([rnn.BasicLSTMCell(test_rnn_size)] * test_rnn_layer_size) test_inputs = tf.placeholder(tf.float32, [None, None, test_rnn_size]) outputs, final_state = build_rnn(test_cell, test_inputs) # Check name assert hasattr(final_state, 'name'),\ 'Final state doesn\'t have the "name" attribute. Try using `tf.identity` to set the name.' assert final_state.name == 'final_state:0',\ 'Final state doesn\'t have the correct name. Found the name {}'.format(final_state.name) # Check shape assert outputs.get_shape().as_list() == [None, None, test_rnn_size],\ 'Outputs has wrong shape. Found shape {}'.format(outputs.get_shape()) assert final_state.get_shape().as_list() == [test_rnn_layer_size, 2, None, test_rnn_size],\ 'Final state wrong shape. Found shape {}'.format(final_state.get_shape()) _print_success_message() def test_build_nn(build_nn): with tf.Graph().as_default(): test_input_data_shape = [128, 5] test_input_data = tf.placeholder(tf.int32, test_input_data_shape) test_rnn_size = 256 test_rnn_layer_size = 2 test_vocab_size = 27 test_cell = rnn.MultiRNNCell([rnn.BasicLSTMCell(test_rnn_size)] * test_rnn_layer_size) logits, final_state = build_nn(test_cell, test_rnn_size, test_input_data, test_vocab_size) # Check name assert hasattr(final_state, 'name'), \ 'Final state doesn\'t have the "name" attribute. Are you using build_rnn?' assert final_state.name == 'final_state:0', \ 'Final state doesn\'t have the correct name. Found the name {}. Are you using build_rnn?'.format(final_state.name) # Check Shape assert logits.get_shape().as_list() == test_input_data_shape + [test_vocab_size], \ 'Outputs has wrong shape. Found shape {}'.format(logits.get_shape()) assert final_state.get_shape().as_list() == [test_rnn_layer_size, 2, None, test_rnn_size], \ 'Final state wrong shape. Found shape {}'.format(final_state.get_shape()) _print_success_message() def test_get_tensors(get_tensors): test_graph = tf.Graph() with test_graph.as_default(): test_input = tf.placeholder(tf.int32, name='input') test_initial_state = tf.placeholder(tf.int32, name='initial_state') test_final_state = tf.placeholder(tf.int32, name='final_state') test_probs = tf.placeholder(tf.float32, name='probs') input_text, initial_state, final_state, probs = get_tensors(test_graph) # Check correct tensor assert input_text == test_input,\ 'Test input is wrong tensor' assert initial_state == test_initial_state, \ 'Initial state is wrong tensor' assert final_state == test_final_state, \ 'Final state is wrong tensor' assert probs == test_probs, \ 'Probabilities is wrong tensor' _print_success_message() def test_pick_word(pick_word): with tf.Graph().as_default(): test_probabilities = np.array([0.1, 0.8, 0.05, 0.05]) test_int_to_vocab = {word_i: word for word_i, word in enumerate(['this', 'is', 'a', 'test'])} pred_word = pick_word(test_probabilities, test_int_to_vocab) # Check type assert isinstance(pred_word, str),\ 'Predicted word is wrong type. Found {} type.'.format(type(pred_word)) # Check word is from vocab assert pred_word in test_int_to_vocab.values(),\ 'Predicted word not found in int_to_vocab.' _print_success_message()
mit
prasanna08/oppia
scripts/linters/codeowner_linter.py
1
14292
# coding: utf-8 # # Copyright 2020 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Lint checks for codeowner file.""" from __future__ import absolute_import # pylint: disable=import-only-modules from __future__ import unicode_literals # pylint: disable=import-only-modules import glob import os import subprocess import python_utils from .. import concurrent_task_utils CODEOWNER_FILEPATH = '.github/CODEOWNERS' # This list needs to be in sync with the important patterns in the CODEOWNERS # file. CODEOWNER_IMPORTANT_PATHS = [ '/scripts/linters/warranted_angular_security_bypasses.py', '/core/controllers/acl_decorators*.py', '/core/controllers/base*.py', '/core/domain/html*.py', '/core/domain/rights_manager*.py', '/core/domain/role_services*.py', '/core/domain/user*.py', '/core/storage/', '/export/', '/manifest.json', '/package.json', '/requirements.txt', '/requirements.in', '/yarn.lock', '/scripts/install_third_party_libs.py', '/.github/', '/.github/CODEOWNERS', '/.github/stale.yml', '/.github/workflows/'] class CodeownerLintChecksManager(python_utils.OBJECT): """Manages codeowner checks.""" def __init__(self, file_cache): """Constructs a CodeownerLintChecksManager object. Args: file_cache: object(FileCache). Provides thread-safe access to cached file content. """ self.file_cache = file_cache self.error_messages = [] self.failed = False def _walk_with_gitignore(self, root, exclude_dirs): """A walk function similar to os.walk but this would ignore the files and directories which is not tracked by git. Also, this will ignore the directories mentioned in exclude_dirs. Args: root: str. The path from where the function should start walking. exclude_dirs: list(str). A list of dir path which should be ignored. Yields: list(str). A list of unignored files. """ dirs, file_paths = [], [] for name in os.listdir(root): if os.path.isdir(os.path.join(root, name)): dirs.append(os.path.join(root, name)) else: file_paths.append(os.path.join(root, name)) yield [ file_path for file_path in file_paths if not self._is_path_ignored( file_path)] for dir_path in dirs: # Adding "/" in the end of the dir path according to the git dir # path structure. if (not self._is_path_ignored(dir_path + '/')) and ( dir_path not in exclude_dirs): for x in self._walk_with_gitignore(dir_path, exclude_dirs): yield x def _is_path_ignored(self, path_to_check): """Checks whether the given path is ignored by git. Args: path_to_check: str. A path to a file or a dir. Returns: bool. Whether the given path is ignored by git. """ command = ['git', 'check-ignore', '-q', path_to_check] # The "git check-ignore <path>" command returns 0 when the path is # ignored otherwise it returns 1. subprocess.call then returns this # returncode. return subprocess.call(command) == 0 def _is_path_contains_frontend_specs(self, path_to_check): """Checks whether if a path contains all spec files. Args: path_to_check: str. A path to a file or a dir. Returns: bool. Whether the given path contains all spec files. """ return '*.spec.ts' in path_to_check or '*Spec.ts' in path_to_check def _check_for_important_patterns_at_bottom_of_codeowners( self, important_patterns): """Checks that the most important patterns are at the bottom of the CODEOWNERS file. Arguments: important_patterns: list(str). List of the important patterns for CODEOWNERS file. Returns: tuple(bool, str). A 2-tuple of whether the CODEOWNERS "important pattern" check fails and failed messages list. """ # Check that there are no duplicate elements in the lists. important_patterns_set = set(important_patterns) codeowner_important_paths_set = set(CODEOWNER_IMPORTANT_PATHS) if len(important_patterns_set) != len(important_patterns): error_message = ( '%s --> Duplicate pattern(s) found in critical rules' ' section.' % CODEOWNER_FILEPATH) self.error_messages.append(error_message) self.failed = True if len(codeowner_important_paths_set) != len(CODEOWNER_IMPORTANT_PATHS): error_message = ( 'scripts/linters/pre_commit_linter.py --> Duplicate pattern(s) ' 'found in CODEOWNER_IMPORTANT_PATHS list.') self.error_messages.append(error_message) self.failed = True # Check missing rules by set difference operation. critical_rule_section_minus_list_set = ( important_patterns_set.difference(codeowner_important_paths_set)) list_minus_critical_rule_section_set = ( codeowner_important_paths_set.difference(important_patterns_set)) for rule in critical_rule_section_minus_list_set: error_message = ( '%s --> Rule %s is not present in the ' 'CODEOWNER_IMPORTANT_PATHS list in ' 'scripts/linters/pre_commit_linter.py. Please add this rule in ' 'the mentioned list or remove this rule from the \'Critical ' 'files\' section.' % (CODEOWNER_FILEPATH, rule)) self.error_messages.append(error_message) self.failed = True for rule in list_minus_critical_rule_section_set: error_message = ( '%s --> Rule \'%s\' is not present in the \'Critical files\' ' 'section. Please place it under the \'Critical files\' ' 'section since it is an important rule. Alternatively please ' 'remove it from the \'CODEOWNER_IMPORTANT_PATHS\' list in ' 'scripts/linters/pre_commit_linter.py if it is no longer an ' 'important rule.' % (CODEOWNER_FILEPATH, rule)) self.error_messages.append(error_message) self.failed = True def check_codeowner_file(self): """Checks the CODEOWNERS file for any uncovered dirs/files and also checks that every pattern in the CODEOWNERS file matches at least one file/dir. Note that this checks the CODEOWNERS file according to the glob patterns supported by Python2.7 environment. For more information please refer https://docs.python.org/2/library/glob.html. This function also ensures that the most important rules are at the bottom of the CODEOWNERS file. Returns: TaskResult. A TaskResult object representing the result of the lint check. """ name = 'CODEOWNERS' # Checks whether every pattern in the CODEOWNERS file matches at # least one dir/file. critical_file_section_found = False important_rules_in_critical_section = [] file_patterns = [] dir_patterns = [] for line_num, line in enumerate(self.file_cache.readlines( CODEOWNER_FILEPATH)): stripped_line = line.strip() if '# Critical files' in line: critical_file_section_found = True if stripped_line and stripped_line[0] != '#': if '@' not in line: error_message = ( '%s --> Pattern on line %s doesn\'t have ' 'codeowner' % (CODEOWNER_FILEPATH, line_num + 1)) self.error_messages.append(error_message) self.failed = True else: # Extract the file pattern from the line. line_in_concern = line.split('@')[0].strip() # This is being populated for the important rules # check. if critical_file_section_found: important_rules_in_critical_section.append( line_in_concern) # Checks if the path is the full path relative to the # root oppia directory. if not line_in_concern.startswith('/'): error_message = ( '%s --> Pattern on line %s is invalid. Use ' 'full path relative to the root directory' % (CODEOWNER_FILEPATH, line_num + 1)) self.error_messages.append(error_message) self.failed = True # The double asterisks should be allowed only when path # includes all the frontend spec files. if not self._is_path_contains_frontend_specs( line_in_concern): # The double asterisks pattern is supported by the # CODEOWNERS syntax but not the glob in Python 2. # The following condition checks this. if '**' in line_in_concern: error_message = ( '%s --> Pattern on line %s is invalid. ' '\'**\' wildcard not allowed' % ( CODEOWNER_FILEPATH, line_num + 1)) self.error_messages.append(error_message) self.failed = True # Adjustments to the dir paths in CODEOWNERS syntax # for glob-style patterns to match correctly. if line_in_concern.endswith('/'): line_in_concern = line_in_concern[:-1] # The following condition checks whether the specified # path exists in the codebase or not. The CODEOWNERS # syntax has paths starting with '/' which refers to # full path relative to root, but python glob module # does not conform to this logic and literally matches # the '/' character. Therefore the leading '/' has to # be changed to './' for glob patterns to match # correctly. line_in_concern = line_in_concern.replace('/', './', 1) # The checking for path existence won't happen if the path # is getting all the frontend spec files. if not self._is_path_contains_frontend_specs( line_in_concern): if not glob.glob(line_in_concern): error_message = ( '%s --> Pattern on line %s doesn\'t match ' 'any file or directory' % ( CODEOWNER_FILEPATH, line_num + 1)) self.error_messages.append(error_message) self.failed = True # The following list is being populated with the # paths in the CODEOWNERS file with the removal of the # leading '/' to aid in the glob pattern matching in # the next part of the check wherein the valid patterns # are used to check if they cover the entire codebase. if os.path.isdir(line_in_concern): dir_patterns.append(line_in_concern) else: file_patterns.append(line_in_concern) # Checks that every file (except those under the dir represented by # the dir_patterns) is covered under CODEOWNERS. for file_paths in self._walk_with_gitignore('.', dir_patterns): for file_path in file_paths: match = False for file_pattern in file_patterns: if file_path in glob.glob(file_pattern): match = True break if not match: error_message = ( '%s is not listed in the .github/CODEOWNERS file.' % ( file_path)) self.error_messages.append(error_message) self.failed = True self._check_for_important_patterns_at_bottom_of_codeowners( important_rules_in_critical_section) return concurrent_task_utils.TaskResult( name, self.failed, self.error_messages, self.error_messages) def perform_all_lint_checks(self): """Perform all the lint checks and returns the messages returned by all the checks. Returns: list(TaskResult). A list of TaskResult objects representing the results of the lint checks. """ return [self.check_codeowner_file()] def get_linters(file_cache): """Creates CodeownerLintChecksManager object and returns it. Args: file_cache: object(FileCache). Provides thread-safe access to cached file content. Returns: tuple(CodeownerLintChecksManager, None). A 2-tuple of custom and third_party linter objects. """ custom_linter = CodeownerLintChecksManager(file_cache) return custom_linter, None
apache-2.0
leedm777/ansible-modules-core
cloud/openstack/_quantum_subnet.py
129
10130
#!/usr/bin/python #coding: utf-8 -*- # (c) 2013, Benno Joy <benno@ansible.com> # # This module is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this software. If not, see <http://www.gnu.org/licenses/>. try: try: from neutronclient.neutron import client except ImportError: from quantumclient.quantum import client from keystoneclient.v2_0 import client as ksclient HAVE_DEPS = True except ImportError: HAVE_DEPS = False DOCUMENTATION = ''' --- module: quantum_subnet deprecated: Deprecated in 2.0. Use os_subnet instead version_added: "1.2" short_description: Add/remove subnet from a network description: - Add/remove subnet from a network options: login_username: description: - login username to authenticate to keystone required: true default: admin login_password: description: - Password of login user required: true default: True login_tenant_name: description: - The tenant name of the login user required: true default: True auth_url: description: - The keystone URL for authentication required: false default: 'http://127.0.0.1:35357/v2.0/' region_name: description: - Name of the region required: false default: None state: description: - Indicate desired state of the resource choices: ['present', 'absent'] default: present network_name: description: - Name of the network to which the subnet should be attached required: true default: None name: description: - The name of the subnet that should be created required: true default: None cidr: description: - The CIDR representation of the subnet that should be assigned to the subnet required: true default: None tenant_name: description: - The name of the tenant for whom the subnet should be created required: false default: None ip_version: description: - The IP version of the subnet 4 or 6 required: false default: 4 enable_dhcp: description: - Whether DHCP should be enabled for this subnet. required: false default: true gateway_ip: description: - The ip that would be assigned to the gateway for this subnet required: false default: None dns_nameservers: description: - DNS nameservers for this subnet, comma-separated required: false default: None version_added: "1.4" allocation_pool_start: description: - From the subnet pool the starting address from which the IP should be allocated required: false default: None allocation_pool_end: description: - From the subnet pool the last IP that should be assigned to the virtual machines required: false default: None requirements: - "python >= 2.6" - "python-neutronclient or python-quantumclient" - "python-keystoneclient" ''' EXAMPLES = ''' # Create a subnet for a tenant with the specified subnet - quantum_subnet: state=present login_username=admin login_password=admin login_tenant_name=admin tenant_name=tenant1 network_name=network1 name=net1subnet cidr=192.168.0.0/24" ''' _os_keystone = None _os_tenant_id = None _os_network_id = None def _get_ksclient(module, kwargs): try: kclient = ksclient.Client(username=kwargs.get('login_username'), password=kwargs.get('login_password'), tenant_name=kwargs.get('login_tenant_name'), auth_url=kwargs.get('auth_url')) except Exception, e: module.fail_json(msg = "Error authenticating to the keystone: %s" %e.message) global _os_keystone _os_keystone = kclient return kclient def _get_endpoint(module, ksclient): try: endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL') except Exception, e: module.fail_json(msg = "Error getting network endpoint: %s" % e.message) return endpoint def _get_neutron_client(module, kwargs): _ksclient = _get_ksclient(module, kwargs) token = _ksclient.auth_token endpoint = _get_endpoint(module, _ksclient) kwargs = { 'token': token, 'endpoint_url': endpoint } try: neutron = client.Client('2.0', **kwargs) except Exception, e: module.fail_json(msg = " Error in connecting to neutron: %s" % e.message) return neutron def _set_tenant_id(module): global _os_tenant_id if not module.params['tenant_name']: tenant_name = module.params['login_tenant_name'] else: tenant_name = module.params['tenant_name'] for tenant in _os_keystone.tenants.list(): if tenant.name == tenant_name: _os_tenant_id = tenant.id break if not _os_tenant_id: module.fail_json(msg = "The tenant id cannot be found, please check the parameters") def _get_net_id(neutron, module): kwargs = { 'tenant_id': _os_tenant_id, 'name': module.params['network_name'], } try: networks = neutron.list_networks(**kwargs) except Exception, e: module.fail_json("Error in listing neutron networks: %s" % e.message) if not networks['networks']: return None return networks['networks'][0]['id'] def _get_subnet_id(module, neutron): global _os_network_id subnet_id = None _os_network_id = _get_net_id(neutron, module) if not _os_network_id: module.fail_json(msg = "network id of network not found.") else: kwargs = { 'tenant_id': _os_tenant_id, 'name': module.params['name'], } try: subnets = neutron.list_subnets(**kwargs) except Exception, e: module.fail_json( msg = " Error in getting the subnet list:%s " % e.message) if not subnets['subnets']: return None return subnets['subnets'][0]['id'] def _create_subnet(module, neutron): neutron.format = 'json' subnet = { 'name': module.params['name'], 'ip_version': module.params['ip_version'], 'enable_dhcp': module.params['enable_dhcp'], 'tenant_id': _os_tenant_id, 'gateway_ip': module.params['gateway_ip'], 'dns_nameservers': module.params['dns_nameservers'], 'network_id': _os_network_id, 'cidr': module.params['cidr'], } if module.params['allocation_pool_start'] and module.params['allocation_pool_end']: allocation_pools = [ { 'start' : module.params['allocation_pool_start'], 'end' : module.params['allocation_pool_end'] } ] subnet.update({'allocation_pools': allocation_pools}) if not module.params['gateway_ip']: subnet.pop('gateway_ip') if module.params['dns_nameservers']: subnet['dns_nameservers'] = module.params['dns_nameservers'].split(',') else: subnet.pop('dns_nameservers') try: new_subnet = neutron.create_subnet(dict(subnet=subnet)) except Exception, e: module.fail_json(msg = "Failure in creating subnet: %s" % e.message) return new_subnet['subnet']['id'] def _delete_subnet(module, neutron, subnet_id): try: neutron.delete_subnet(subnet_id) except Exception, e: module.fail_json( msg = "Error in deleting subnet: %s" % e.message) return True def main(): argument_spec = openstack_argument_spec() argument_spec.update(dict( name = dict(required=True), network_name = dict(required=True), cidr = dict(required=True), tenant_name = dict(default=None), state = dict(default='present', choices=['absent', 'present']), ip_version = dict(default='4', choices=['4', '6']), enable_dhcp = dict(default='true', type='bool'), gateway_ip = dict(default=None), dns_nameservers = dict(default=None), allocation_pool_start = dict(default=None), allocation_pool_end = dict(default=None), )) module = AnsibleModule(argument_spec=argument_spec) if not HAVE_DEPS: module.fail_json(msg='python-keystoneclient and either python-neutronclient or python-quantumclient are required') neutron = _get_neutron_client(module, module.params) _set_tenant_id(module) if module.params['state'] == 'present': subnet_id = _get_subnet_id(module, neutron) if not subnet_id: subnet_id = _create_subnet(module, neutron) module.exit_json(changed = True, result = "Created" , id = subnet_id) else: module.exit_json(changed = False, result = "success" , id = subnet_id) else: subnet_id = _get_subnet_id(module, neutron) if not subnet_id: module.exit_json(changed = False, result = "success") else: _delete_subnet(module, neutron, subnet_id) module.exit_json(changed = True, result = "deleted") # this is magic, see lib/ansible/module.params['common.py from ansible.module_utils.basic import * from ansible.module_utils.openstack import * if __name__ == '__main__': main()
gpl-3.0
vbelakov/h2o
scripts/genSpeeDRFPythonParams.py
11
1027
from pprint import pprint params = {} def parseValue(v): if v == 'true': return 1 if v == 'false': return 0 try: float(v) return float(v) except ValueError: if '.' in v: return v.split('.')[-1] return v def process(line): global params if line.strip()[0] == '_': return line = line.split('=') if len(line) == 1: # no default value supplied! value = None name = line[0].split()[-1].strip().strip(';') else: value = parseValue(line[-1].strip().strip(';')) name = line[0].split()[-1].strip() if name[0] == '_': return params[name] = value def main(): global params with open("../src/main/java/hex/singlenoderf/SpeeDRF.java", 'r') as f: readnext = False for line in f: if readnext: process(line) readnext = False continue if "@API" in line: readnext = True pprint(params) if __name__ == "__main__": main()
apache-2.0
ryfeus/lambda-packs
Selenium_PhantomJS/source/pip/_vendor/requests/packages/chardet/euckrprober.py
2920
1675
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is mozilla.org code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from .mbcharsetprober import MultiByteCharSetProber from .codingstatemachine import CodingStateMachine from .chardistribution import EUCKRDistributionAnalysis from .mbcssm import EUCKRSMModel class EUCKRProber(MultiByteCharSetProber): def __init__(self): MultiByteCharSetProber.__init__(self) self._mCodingSM = CodingStateMachine(EUCKRSMModel) self._mDistributionAnalyzer = EUCKRDistributionAnalysis() self.reset() def get_charset_name(self): return "EUC-KR"
mit
ryfeus/lambda-packs
pytorch/source/setuptools/glibc.py
79
3146
# This file originally from pip: # https://github.com/pypa/pip/blob/8f4f15a5a95d7d5b511ceaee9ed261176c181970/src/pip/_internal/utils/glibc.py from __future__ import absolute_import import ctypes import re import warnings def glibc_version_string(): "Returns glibc version string, or None if not using glibc." # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen # manpage says, "If filename is NULL, then the returned handle is for the # main program". This way we can let the linker do the work to figure out # which libc our process is actually using. process_namespace = ctypes.CDLL(None) try: gnu_get_libc_version = process_namespace.gnu_get_libc_version except AttributeError: # Symbol doesn't exist -> therefore, we are not linked to # glibc. return None # Call gnu_get_libc_version, which returns a string like "2.5" gnu_get_libc_version.restype = ctypes.c_char_p version_str = gnu_get_libc_version() # py2 / py3 compatibility: if not isinstance(version_str, str): version_str = version_str.decode("ascii") return version_str # Separated out from have_compatible_glibc for easier unit testing def check_glibc_version(version_str, required_major, minimum_minor): # Parse string and check against requested version. # # We use a regexp instead of str.split because we want to discard any # random junk that might come after the minor version -- this might happen # in patched/forked versions of glibc (e.g. Linaro's version of glibc # uses version strings like "2.20-2014.11"). See gh-3588. m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str) if not m: warnings.warn("Expected glibc version with 2 components major.minor," " got: %s" % version_str, RuntimeWarning) return False return (int(m.group("major")) == required_major and int(m.group("minor")) >= minimum_minor) def have_compatible_glibc(required_major, minimum_minor): version_str = glibc_version_string() if version_str is None: return False return check_glibc_version(version_str, required_major, minimum_minor) # platform.libc_ver regularly returns completely nonsensical glibc # versions. E.g. on my computer, platform says: # # ~$ python2.7 -c 'import platform; print(platform.libc_ver())' # ('glibc', '2.7') # ~$ python3.5 -c 'import platform; print(platform.libc_ver())' # ('glibc', '2.9') # # But the truth is: # # ~$ ldd --version # ldd (Debian GLIBC 2.22-11) 2.22 # # This is unfortunate, because it means that the linehaul data on libc # versions that was generated by pip 8.1.2 and earlier is useless and # misleading. Solution: instead of using platform, use our code that actually # works. def libc_ver(): """Try to determine the glibc version Returns a tuple of strings (lib, version) which default to empty strings in case the lookup fails. """ glibc_version = glibc_version_string() if glibc_version is None: return ("", "") else: return ("glibc", glibc_version)
mit
NalinG/coala
tests/bears/BearTest.py
1
6175
import multiprocessing import unittest from os.path import abspath from coalib.bears.Bear import Bear from coalib.output.printers.LOG_LEVEL import LOG_LEVEL from coalib.processes.communication.LogMessage import LogMessage from coalib.settings.Section import Section from coalib.settings.Setting import Setting class TestBear(Bear): def __init__(self, section, queue): Bear.__init__(self, section, queue) def run(self): self.print("set", "up", delimiter="=") self.err("teardown") self.err() @staticmethod def get_dependencies(): return [BadTestBear] class BadTestBear(Bear): def __init__(self, section, queue): Bear.__init__(self, section, queue) def run(self): raise NotImplementedError class TypedTestBear(Bear): def __init__(self, section, queue): Bear.__init__(self, section, queue) self.was_executed = False def run(self, something: int): self.was_executed = True return [] class BearWithPrerequisites(Bear): prerequisites_fulfilled = True def __init__(self, section, queue, prerequisites_fulfilled): BearWithPrerequisites.prerequisites_fulfilled = prerequisites_fulfilled Bear.__init__(self, section, queue) self.was_executed = False def run(self): self.was_executed = True return [] @classmethod def check_prerequisites(cls): return cls.prerequisites_fulfilled class BearTest(unittest.TestCase): def setUp(self): self.queue = multiprocessing.Queue() self.settings = Section("test_settings") self.uut = TestBear(self.settings, self.queue) def test_simple_api(self): self.assertRaises(TypeError, TestBear, self.settings, 2) self.assertRaises(TypeError, TestBear, None, self.queue) self.assertRaises(NotImplementedError, self.uut.kind) base = Bear(self.settings, None) self.assertRaises(NotImplementedError, base.run) self.assertEqual(base.get_non_optional_settings(), {}) def test_message_queue(self): self.uut.execute() self.check_message(LOG_LEVEL.DEBUG, "Running bear {}...".format("TestBear")) self.check_message(LOG_LEVEL.DEBUG, "set=up") self.check_message(LOG_LEVEL.ERROR, "teardown") def test_bad_bear(self): self.uut = BadTestBear(self.settings, self.queue) self.uut.execute() self.check_message(LOG_LEVEL.DEBUG) self.check_message(LOG_LEVEL.WARNING, "Bear BadTestBear failed to run. Take a look at " "debug messages for further information.") # debug message contains custom content, dont test this here self.queue.get() def test_inconvertible(self): self.uut = TypedTestBear(self.settings, self.queue) self.settings.append(Setting("something", "5")) self.uut.execute() self.check_message(LOG_LEVEL.DEBUG) self.assertTrue(self.uut.was_executed) self.settings.append(Setting("something", "nonsense")) self.uut.was_executed = False self.uut.execute() self.check_message(LOG_LEVEL.DEBUG) self.check_message(LOG_LEVEL.WARNING) self.assertTrue(self.queue.empty()) self.assertFalse(self.uut.was_executed) def check_message(self, log_level, message=None): msg = self.queue.get() self.assertIsInstance(msg, LogMessage) if message: self.assertEqual(msg.message, message) self.assertEqual(msg.log_level, log_level, msg) def test_no_queue(self): uut = TestBear(self.settings, None) uut.execute() # No exceptions def test_dependencies(self): self.assertEqual(Bear.get_dependencies(), []) self.assertEqual(Bear.missing_dependencies([]), []) self.assertEqual(Bear.missing_dependencies([BadTestBear]), []) self.assertEqual(TestBear.missing_dependencies([]), [BadTestBear]) self.assertEqual(TestBear.missing_dependencies([BadTestBear]), []) self.assertEqual(TestBear.missing_dependencies([TestBear]), [BadTestBear]) self.assertEqual(TestBear.missing_dependencies([TestBear, BadTestBear]), []) def test_check_prerequisites(self): uut = BearWithPrerequisites(self.settings, self.queue, True) uut.execute() self.check_message(LOG_LEVEL.DEBUG) self.assertTrue(self.queue.empty()) self.assertTrue(uut.was_executed) self.assertRaisesRegex(RuntimeError, "The bear BearWithPrerequisites does not " "fulfill all requirements\\.", BearWithPrerequisites, self.settings, self.queue, False) self.check_message(LOG_LEVEL.WARNING, "The bear BearWithPrerequisites does not fulfill " "all requirements.") self.assertTrue(self.queue.empty()) self.assertRaisesRegex(RuntimeError, "The bear BearWithPrerequisites does not " "fulfill all requirements\\. Just because " "I want to\\.", BearWithPrerequisites, self.settings, self.queue, "Just because I want to.") self.check_message(LOG_LEVEL.WARNING, "The bear BearWithPrerequisites does not fulfill " "all requirements. Just because I want to.") self.assertTrue(self.queue.empty()) def test_get_config_dir(self): section = Section("default") section.append(Setting("files", "**", "/path/to/dir/config")) uut = TestBear(section, None) self.assertEqual(uut.get_config_dir(), abspath("/path/to/dir"))
agpl-3.0
zeaphoo/cocopot
scripts/bench.py
2
9763
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 2014 by Rackspace Hosting, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import argparse from collections import defaultdict from decimal import Decimal import gc import random import sys import timeit try: import cProfile except ImportError: import profile as cProfile try: import guppy except ImportError: heapy = None else: heapy = guppy.hpy() try: import pprofile except ImportError: pprofile = None import falcon.testing as helpers import os import sys import random def falcon(body, headers): import falcon path = '/hello/{account_id}/test' falcon_app = falcon.API('text/plain') # def ask(req, resp, params): # params['answer'] = 42 # @falcon.before(ask) class HelloResource: def on_get(self, req, resp, account_id): user_agent = req.user_agent # NOQA limit = req.get_param('limit') or '10' # NOQA resp.data = body resp.set_headers(headers) falcon_app.add_route(path, HelloResource()) return falcon_app def flask(body, headers): import flask path = '/hello/<account_id>/test' flask_app = flask.Flask('hello') @flask_app.route(path) def hello(account_id): request = flask.request user_agent = request.headers['User-Agent'] # NOQA limit = request.args.get('limit', '10') # NOQA return flask.Response(body, headers=headers, mimetype='text/plain') return flask_app def bottle(body, headers): import bottle path = '/hello/<account_id>/test' @bottle.route(path) def hello(account_id): user_agent = bottle.request.headers['User-Agent'] # NOQA limit = bottle.request.query.limit or '10' # NOQA return bottle.Response(body, headers=headers) return bottle.default_app() def werkzeug(body, headers): import werkzeug.wrappers as werkzeug from werkzeug.routing import Map, Rule path = '/hello/<account_id>/test' url_map = Map([Rule(path, endpoint='hello')]) @werkzeug.Request.application def hello(request): user_agent = request.headers['User-Agent'] # NOQA limit = request.args.get('limit', '10') # NOQA adapter = url_map.bind_to_environ(request.environ) # NOQA endpoint, values = adapter.match() # NOQA aid = values['account_id'] # NOQA return werkzeug.Response(body, headers=headers, mimetype='text/plain') return hello def cocopot(body, headers): import cocopot path = '/hello/<account_id>/test' cocopot_app = cocopot.Cocopot('hello') @cocopot_app.route(path) def hello(account_id): request = cocopot.request user_agent = request.headers['User-Agent'] # NOQA limit = request.args.get('limit', '10') # NOQA return cocopot.Response(body, headers=headers, mimetype='text/plain') return cocopot_app def bench(name, iterations, env, stat_memory): func = create_bench(name, env) gc.collect() heap_diff = None if heapy and stat_memory: heap_before = heapy.heap() total_sec = timeit.timeit(func, setup=gc.enable, number=iterations) if heapy and stat_memory: heap_diff = heapy.heap() - heap_before sec_per_req = Decimal(str(total_sec)) / Decimal(str(iterations)) sys.stdout.write('.') sys.stdout.flush() return (name, sec_per_req, heap_diff) def profile(name, env, filename=None, verbose=False): if filename: filename = name + '-' + filename print('Profiling %s ==> %s' % (name, filename)) else: filename = None title = name + ' profile' print() print('=' * len(title)) print(title) print('=' * len(title)) func = create_bench(name, env) gc.collect() code = 'for x in range(10000): func()' if verbose: if pprofile is None: print('pprofile not found. Please install pprofile and try again.') return pprofile.runctx(code, locals(), globals(), filename=filename) else: cProfile.runctx(code, locals(), globals(), sort='tottime', filename=filename) BODY = helpers.rand_string(10240, 10240) # NOQA HEADERS = {'X-Test': 'Funky Chicken'} # NOQA def create_bench(name, env): srmock = helpers.StartResponseMock() function = name.lower().replace('-', '_') app = eval('{0}(BODY, HEADERS)'.format(function)) def bench(): app(env, srmock) if srmock.status != '200 OK': raise AssertionError(srmock.status + ' != 200 OK') return bench def consolidate_datasets(datasets): results = defaultdict(list) for dataset in datasets: for name, sec_per_req, _ in dataset: results[name].append(sec_per_req) return [(name, min(vector)) for name, vector in results.items()] def round_to_int(dec): return int(dec.to_integral_value()) def avg(array): return sum(array) / len(array) def hello_env(): request_headers = {'Content-Type': 'application/json'} return helpers.create_environ('/hello/584/test', query_string='limit=10&thing=ab', headers=request_headers) def queues_env(): request_headers = {'Content-Type': 'application/json'} path = ('/v1/852809/queues/0fd4c8c6-bd72-11e2-8e47-db5ebd4c8125' '/claims/db5ebd4c8125') qs = 'limit=10&thing=a%20b&x=%23%24' return helpers.create_environ(path, query_string=qs, headers=request_headers) def get_env(framework): return queues_env() if framework == 'falcon-ext' else hello_env() def run(frameworks, trials, iterations, stat_memory): # Skip any frameworks that are not installed for name in frameworks: try: create_bench(name, hello_env()) except ImportError as ex: print(ex) print('Skipping missing library: ' + name) del frameworks[frameworks.index(name)] print() if not frameworks: print('Nothing to do.\n') return datasets = [] for r in range(trials): random.shuffle(frameworks) sys.stdout.write('Benchmarking, Trial %d of %d' % (r + 1, trials)) sys.stdout.flush() dataset = [bench(framework, iterations, get_env(framework), stat_memory) for framework in frameworks] datasets.append(dataset) print('done.') return datasets def main(): frameworks = [ 'bottle', 'falcon', 'flask', 'werkzeug', 'cocopot' ] parser = argparse.ArgumentParser(description="Falcon benchmark runner") parser.add_argument('-b', '--benchmark', type=str, action='append', choices=frameworks, dest='frameworks', nargs='+') parser.add_argument('-i', '--iterations', type=int, default=50000) parser.add_argument('-t', '--trials', type=int, default=3) parser.add_argument('-p', '--profile', type=str, choices=['standard', 'verbose']) parser.add_argument('-o', '--profile-output', type=str, default=None) parser.add_argument('-m', '--stat-memory', action='store_true') args = parser.parse_args() if args.stat_memory and heapy is None: print('WARNING: Guppy not installed; memory stats are unavailable.\n') if args.frameworks: frameworks = args.frameworks # Normalize frameworks type normalized_frameworks = [] for one_or_many in frameworks: if isinstance(one_or_many, list): normalized_frameworks.extend(one_or_many) else: normalized_frameworks.append(one_or_many) frameworks = normalized_frameworks # Profile? if args.profile: for name in frameworks: profile(name, get_env(name), filename=args.profile_output, verbose=(args.profile == 'verbose')) print() return # Otherwise, benchmark datasets = run(frameworks, args.trials, args.iterations, args.stat_memory) dataset = consolidate_datasets(datasets) dataset = sorted(dataset, key=lambda r: r[1]) baseline = dataset[-1][1] print('\nResults:\n') for i, (name, sec_per_req) in enumerate(dataset): req_per_sec = round_to_int(Decimal(1) / sec_per_req) us_per_req = (sec_per_req * Decimal(10 ** 6)) factor = round_to_int(baseline / sec_per_req) print('{3}. {0:.<15s}{1:.>06d} req/sec or {2: >3.2f} μs/req ({4}x)'. format(name, req_per_sec, us_per_req, i + 1, factor)) if heapy and args.stat_memory: print() for name, _, heap_diff in datasets[0]: title = 'Memory change induced by ' + name print() print('=' * len(title)) print(title) print('=' * len(title)) print(heap_diff) print() if __name__ == '__main__': main()
mit
findayu/picasso-graphic
tools/gyp/pylib/gyp/MSVSProject.py
2720
6387
# Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Visual Studio project reader/writer.""" import gyp.common import gyp.easy_xml as easy_xml #------------------------------------------------------------------------------ class Tool(object): """Visual Studio tool.""" def __init__(self, name, attrs=None): """Initializes the tool. Args: name: Tool name. attrs: Dict of tool attributes; may be None. """ self._attrs = attrs or {} self._attrs['Name'] = name def _GetSpecification(self): """Creates an element for the tool. Returns: A new xml.dom.Element for the tool. """ return ['Tool', self._attrs] class Filter(object): """Visual Studio filter - that is, a virtual folder.""" def __init__(self, name, contents=None): """Initializes the folder. Args: name: Filter (folder) name. contents: List of filenames and/or Filter objects contained. """ self.name = name self.contents = list(contents or []) #------------------------------------------------------------------------------ class Writer(object): """Visual Studio XML project writer.""" def __init__(self, project_path, version, name, guid=None, platforms=None): """Initializes the project. Args: project_path: Path to the project file. version: Format version to emit. name: Name of the project. guid: GUID to use for project, if not None. platforms: Array of string, the supported platforms. If null, ['Win32'] """ self.project_path = project_path self.version = version self.name = name self.guid = guid # Default to Win32 for platforms. if not platforms: platforms = ['Win32'] # Initialize the specifications of the various sections. self.platform_section = ['Platforms'] for platform in platforms: self.platform_section.append(['Platform', {'Name': platform}]) self.tool_files_section = ['ToolFiles'] self.configurations_section = ['Configurations'] self.files_section = ['Files'] # Keep a dict keyed on filename to speed up access. self.files_dict = dict() def AddToolFile(self, path): """Adds a tool file to the project. Args: path: Relative path from project to tool file. """ self.tool_files_section.append(['ToolFile', {'RelativePath': path}]) def _GetSpecForConfiguration(self, config_type, config_name, attrs, tools): """Returns the specification for a configuration. Args: config_type: Type of configuration node. config_name: Configuration name. attrs: Dict of configuration attributes; may be None. tools: List of tools (strings or Tool objects); may be None. Returns: """ # Handle defaults if not attrs: attrs = {} if not tools: tools = [] # Add configuration node and its attributes node_attrs = attrs.copy() node_attrs['Name'] = config_name specification = [config_type, node_attrs] # Add tool nodes and their attributes if tools: for t in tools: if isinstance(t, Tool): specification.append(t._GetSpecification()) else: specification.append(Tool(t)._GetSpecification()) return specification def AddConfig(self, name, attrs=None, tools=None): """Adds a configuration to the project. Args: name: Configuration name. attrs: Dict of configuration attributes; may be None. tools: List of tools (strings or Tool objects); may be None. """ spec = self._GetSpecForConfiguration('Configuration', name, attrs, tools) self.configurations_section.append(spec) def _AddFilesToNode(self, parent, files): """Adds files and/or filters to the parent node. Args: parent: Destination node files: A list of Filter objects and/or relative paths to files. Will call itself recursively, if the files list contains Filter objects. """ for f in files: if isinstance(f, Filter): node = ['Filter', {'Name': f.name}] self._AddFilesToNode(node, f.contents) else: node = ['File', {'RelativePath': f}] self.files_dict[f] = node parent.append(node) def AddFiles(self, files): """Adds files to the project. Args: files: A list of Filter objects and/or relative paths to files. This makes a copy of the file/filter tree at the time of this call. If you later add files to a Filter object which was passed into a previous call to AddFiles(), it will not be reflected in this project. """ self._AddFilesToNode(self.files_section, files) # TODO(rspangler) This also doesn't handle adding files to an existing # filter. That is, it doesn't merge the trees. def AddFileConfig(self, path, config, attrs=None, tools=None): """Adds a configuration to a file. Args: path: Relative path to the file. config: Name of configuration to add. attrs: Dict of configuration attributes; may be None. tools: List of tools (strings or Tool objects); may be None. Raises: ValueError: Relative path does not match any file added via AddFiles(). """ # Find the file node with the right relative path parent = self.files_dict.get(path) if not parent: raise ValueError('AddFileConfig: file "%s" not in project.' % path) # Add the config to the file node spec = self._GetSpecForConfiguration('FileConfiguration', config, attrs, tools) parent.append(spec) def WriteIfChanged(self): """Writes the project file.""" # First create XML content definition content = [ 'VisualStudioProject', {'ProjectType': 'Visual C++', 'Version': self.version.ProjectVersion(), 'Name': self.name, 'ProjectGUID': self.guid, 'RootNamespace': self.name, 'Keyword': 'Win32Proj' }, self.platform_section, self.tool_files_section, self.configurations_section, ['References'], # empty section self.files_section, ['Globals'] # empty section ] easy_xml.WriteXmlIfChanged(content, self.project_path, encoding="Windows-1252")
bsd-3-clause
leighpauls/k2cro4
tools/bisect-builds.py
2
25793
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Snapshot Build Bisect Tool This script bisects a snapshot archive using binary search. It starts at a bad revision (it will try to guess HEAD) and asks for a last known-good revision. It will then binary search across this revision range by downloading, unzipping, and opening Chromium for you. After testing the specific revision, it will ask you whether it is good or bad before continuing the search. """ # The root URL for storage. BASE_URL = 'http://commondatastorage.googleapis.com/chromium-browser-snapshots' # The root URL for official builds. OFFICIAL_BASE_URL = 'http://master.chrome.corp.google.com/official_builds' # Changelogs URL. CHANGELOG_URL = 'http://build.chromium.org/f/chromium/' \ 'perf/dashboard/ui/changelog.html?url=/trunk/src&range=%d%%3A%d' # Official Changelogs URL. OFFICIAL_CHANGELOG_URL = 'http://omahaproxy.appspot.com/'\ 'changelog?old_version=%s&new_version=%s' # DEPS file URL. DEPS_FILE= 'http://src.chromium.org/viewvc/chrome/trunk/src/DEPS?revision=%d' # WebKit Changelogs URL. WEBKIT_CHANGELOG_URL = 'http://trac.webkit.org/log/' \ 'trunk/?rev=%d&stop_rev=%d&verbose=on&limit=10000' DONE_MESSAGE_GOOD_MIN = 'You are probably looking for a change made after %s ' \ '(known good), but no later than %s (first known bad).' DONE_MESSAGE_GOOD_MAX = 'You are probably looking for a change made after %s ' \ '(known bad), but no later than %s (first known good).' ############################################################################### import math import optparse import os import pipes import re import shutil import subprocess import sys import tempfile import threading import urllib from distutils.version import LooseVersion from xml.etree import ElementTree import zipfile class PathContext(object): """A PathContext is used to carry the information used to construct URLs and paths when dealing with the storage server and archives.""" def __init__(self, platform, good_revision, bad_revision, is_official): super(PathContext, self).__init__() # Store off the input parameters. self.platform = platform # What's passed in to the '-a/--archive' option. self.good_revision = good_revision self.bad_revision = bad_revision self.is_official = is_official # The name of the ZIP file in a revision directory on the server. self.archive_name = None # Set some internal members: # _listing_platform_dir = Directory that holds revisions. Ends with a '/'. # _archive_extract_dir = Uncompressed directory in the archive_name file. # _binary_name = The name of the executable to run. if self.platform == 'linux' or self.platform == 'linux64': self._binary_name = 'chrome' elif self.platform == 'mac': self.archive_name = 'chrome-mac.zip' self._archive_extract_dir = 'chrome-mac' elif self.platform == 'win': self.archive_name = 'chrome-win32.zip' self._archive_extract_dir = 'chrome-win32' self._binary_name = 'chrome.exe' else: raise Exception('Invalid platform: %s' % self.platform) if is_official: if self.platform == 'linux': self._listing_platform_dir = 'lucid32bit/' self.archive_name = 'chrome-lucid32bit.zip' self._archive_extract_dir = 'chrome-lucid32bit' elif self.platform == 'linux64': self._listing_platform_dir = 'lucid64bit/' self.archive_name = 'chrome-lucid64bit.zip' self._archive_extract_dir = 'chrome-lucid64bit' elif self.platform == 'mac': self._listing_platform_dir = 'mac/' self._binary_name = 'Google Chrome.app/Contents/MacOS/Google Chrome' elif self.platform == 'win': self._listing_platform_dir = 'win/' else: if self.platform == 'linux' or self.platform == 'linux64': self.archive_name = 'chrome-linux.zip' self._archive_extract_dir = 'chrome-linux' if self.platform == 'linux': self._listing_platform_dir = 'Linux/' elif self.platform == 'linux64': self._listing_platform_dir = 'Linux_x64/' elif self.platform == 'mac': self._listing_platform_dir = 'Mac/' self._binary_name = 'Chromium.app/Contents/MacOS/Chromium' elif self.platform == 'win': self._listing_platform_dir = 'Win/' def GetListingURL(self, marker=None): """Returns the URL for a directory listing, with an optional marker.""" marker_param = '' if marker: marker_param = '&marker=' + str(marker) return BASE_URL + '/?delimiter=/&prefix=' + self._listing_platform_dir + \ marker_param def GetDownloadURL(self, revision): """Gets the download URL for a build archive of a specific revision.""" if self.is_official: return "%s/%s/%s%s" % ( OFFICIAL_BASE_URL, revision, self._listing_platform_dir, self.archive_name) else: return "%s/%s%s/%s" % ( BASE_URL, self._listing_platform_dir, revision, self.archive_name) def GetLastChangeURL(self): """Returns a URL to the LAST_CHANGE file.""" return BASE_URL + '/' + self._listing_platform_dir + 'LAST_CHANGE' def GetLaunchPath(self): """Returns a relative path (presumably from the archive extraction location) that is used to run the executable.""" return os.path.join(self._archive_extract_dir, self._binary_name) def ParseDirectoryIndex(self): """Parses the Google Storage directory listing into a list of revision numbers.""" def _FetchAndParse(url): """Fetches a URL and returns a 2-Tuple of ([revisions], next-marker). If next-marker is not None, then the listing is a partial listing and another fetch should be performed with next-marker being the marker= GET parameter.""" handle = urllib.urlopen(url) document = ElementTree.parse(handle) # All nodes in the tree are namespaced. Get the root's tag name to extract # the namespace. Etree does namespaces as |{namespace}tag|. root_tag = document.getroot().tag end_ns_pos = root_tag.find('}') if end_ns_pos == -1: raise Exception("Could not locate end namespace for directory index") namespace = root_tag[:end_ns_pos + 1] # Find the prefix (_listing_platform_dir) and whether or not the list is # truncated. prefix_len = len(document.find(namespace + 'Prefix').text) next_marker = None is_truncated = document.find(namespace + 'IsTruncated') if is_truncated is not None and is_truncated.text.lower() == 'true': next_marker = document.find(namespace + 'NextMarker').text # Get a list of all the revisions. all_prefixes = document.findall(namespace + 'CommonPrefixes/' + namespace + 'Prefix') # The <Prefix> nodes have content of the form of # |_listing_platform_dir/revision/|. Strip off the platform dir and the # trailing slash to just have a number. revisions = [] for prefix in all_prefixes: revnum = prefix.text[prefix_len:-1] try: revnum = int(revnum) revisions.append(revnum) except ValueError: pass return (revisions, next_marker) # Fetch the first list of revisions. (revisions, next_marker) = _FetchAndParse(self.GetListingURL()) # If the result list was truncated, refetch with the next marker. Do this # until an entire directory listing is done. while next_marker: next_url = self.GetListingURL(next_marker) (new_revisions, next_marker) = _FetchAndParse(next_url) revisions.extend(new_revisions) return revisions def GetRevList(self): """Gets the list of revision numbers between self.good_revision and self.bad_revision.""" # Download the revlist and filter for just the range between good and bad. minrev = min(self.good_revision, self.bad_revision) maxrev = max(self.good_revision, self.bad_revision) revlist = map(int, self.ParseDirectoryIndex()) revlist = [x for x in revlist if x >= int(minrev) and x <= int(maxrev)] revlist.sort() return revlist def GetOfficialBuildsList(self): """Gets the list of official build numbers between self.good_revision and self.bad_revision.""" # Download the revlist and filter for just the range between good and bad. minrev = min(self.good_revision, self.bad_revision) maxrev = max(self.good_revision, self.bad_revision) handle = urllib.urlopen(OFFICIAL_BASE_URL) dirindex = handle.read() handle.close() build_numbers = re.findall(r'<a href="([0-9][0-9].*)/">', dirindex) final_list = [] i = 0 parsed_build_numbers = [LooseVersion(x) for x in build_numbers] for build_number in sorted(parsed_build_numbers): path = OFFICIAL_BASE_URL + '/' + str(build_number) + '/' + \ self._listing_platform_dir + self.archive_name i = i + 1 try: connection = urllib.urlopen(path) connection.close() if build_number > maxrev: break if build_number >= minrev: final_list.append(str(build_number)) except urllib.HTTPError, e: pass return final_list def UnzipFilenameToDir(filename, dir): """Unzip |filename| to directory |dir|.""" cwd = os.getcwd() if not os.path.isabs(filename): filename = os.path.join(cwd, filename) zf = zipfile.ZipFile(filename) # Make base. if not os.path.isdir(dir): os.mkdir(dir) os.chdir(dir) # Extract files. for info in zf.infolist(): name = info.filename if name.endswith('/'): # dir if not os.path.isdir(name): os.makedirs(name) else: # file dir = os.path.dirname(name) if not os.path.isdir(dir): os.makedirs(dir) out = open(name, 'wb') out.write(zf.read(name)) out.close() # Set permissions. Permission info in external_attr is shifted 16 bits. os.chmod(name, info.external_attr >> 16L) os.chdir(cwd) def FetchRevision(context, rev, filename, quit_event=None, progress_event=None): """Downloads and unzips revision |rev|. @param context A PathContext instance. @param rev The Chromium revision number/tag to download. @param filename The destination for the downloaded file. @param quit_event A threading.Event which will be set by the master thread to indicate that the download should be aborted. @param progress_event A threading.Event which will be set by the master thread to indicate that the progress of the download should be displayed. """ def ReportHook(blocknum, blocksize, totalsize): if quit_event and quit_event.isSet(): raise RuntimeError("Aborting download of revision %s" % str(rev)) if progress_event and progress_event.isSet(): size = blocknum * blocksize if totalsize == -1: # Total size not known. progress = "Received %d bytes" % size else: size = min(totalsize, size) progress = "Received %d of %d bytes, %.2f%%" % ( size, totalsize, 100.0 * size / totalsize) # Send a \r to let all progress messages use just one line of output. sys.stdout.write("\r" + progress) sys.stdout.flush() download_url = context.GetDownloadURL(rev) try: urllib.urlretrieve(download_url, filename, ReportHook) if progress_event and progress_event.isSet(): print except RuntimeError, e: pass def RunRevision(context, revision, zipfile, profile, num_runs, args): """Given a zipped revision, unzip it and run the test.""" print "Trying revision %s..." % str(revision) # Create a temp directory and unzip the revision into it. cwd = os.getcwd() tempdir = tempfile.mkdtemp(prefix='bisect_tmp') UnzipFilenameToDir(zipfile, tempdir) os.chdir(tempdir) # Run the build as many times as specified. testargs = [context.GetLaunchPath(), '--user-data-dir=%s' % profile] + args # The sandbox must be run as root on Official Chrome, so bypass it. if context.is_official and (context.platform == 'linux' or context.platform == 'linux64'): testargs.append('--no-sandbox') for i in range(0, num_runs): subproc = subprocess.Popen(testargs, bufsize=-1, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (stdout, stderr) = subproc.communicate() os.chdir(cwd) try: shutil.rmtree(tempdir, True) except Exception, e: pass return (subproc.returncode, stdout, stderr) def AskIsGoodBuild(rev, official_builds, status, stdout, stderr): """Ask the user whether build |rev| is good or bad.""" # Loop until we get a response that we can parse. while True: response = raw_input('Revision %s is [(g)ood/(b)ad/(u)nknown/(q)uit]: ' % str(rev)) if response and response in ('g', 'b', 'u'): return response if response and response == 'q': raise SystemExit() class DownloadJob(object): """DownloadJob represents a task to download a given Chromium revision.""" def __init__(self, context, name, rev, zipfile): super(DownloadJob, self).__init__() # Store off the input parameters. self.context = context self.name = name self.rev = rev self.zipfile = zipfile self.quit_event = threading.Event() self.progress_event = threading.Event() def Start(self): """Starts the download.""" fetchargs = (self.context, self.rev, self.zipfile, self.quit_event, self.progress_event) self.thread = threading.Thread(target=FetchRevision, name=self.name, args=fetchargs) self.thread.start() def Stop(self): """Stops the download which must have been started previously.""" self.quit_event.set() self.thread.join() os.unlink(self.zipfile) def WaitFor(self): """Prints a message and waits for the download to complete. The download must have been started previously.""" print "Downloading revision %s..." % str(self.rev) self.progress_event.set() # Display progress of download. self.thread.join() def Bisect(platform, official_builds, good_rev=0, bad_rev=0, num_runs=1, try_args=(), profile=None, evaluate=AskIsGoodBuild): """Given known good and known bad revisions, run a binary search on all archived revisions to determine the last known good revision. @param platform Which build to download/run ('mac', 'win', 'linux64', etc.). @param official_builds Specify build type (Chromium or Official build). @param good_rev Number/tag of the known good revision. @param bad_rev Number/tag of the known bad revision. @param num_runs Number of times to run each build for asking good/bad. @param try_args A tuple of arguments to pass to the test application. @param profile The name of the user profile to run with. @param evaluate A function which returns 'g' if the argument build is good, 'b' if it's bad or 'u' if unknown. Threading is used to fetch Chromium revisions in the background, speeding up the user's experience. For example, suppose the bounds of the search are good_rev=0, bad_rev=100. The first revision to be checked is 50. Depending on whether revision 50 is good or bad, the next revision to check will be either 25 or 75. So, while revision 50 is being checked, the script will download revisions 25 and 75 in the background. Once the good/bad verdict on rev 50 is known: - If rev 50 is good, the download of rev 25 is cancelled, and the next test is run on rev 75. - If rev 50 is bad, the download of rev 75 is cancelled, and the next test is run on rev 25. """ if not profile: profile = 'profile' context = PathContext(platform, good_rev, bad_rev, official_builds) cwd = os.getcwd() print "Downloading list of known revisions..." _GetDownloadPath = lambda rev: os.path.join(cwd, '%s-%s' % (str(rev), context.archive_name)) if official_builds: revlist = context.GetOfficialBuildsList() else: revlist = context.GetRevList() # Get a list of revisions to bisect across. if len(revlist) < 2: # Don't have enough builds to bisect. msg = 'We don\'t have enough builds to bisect. revlist: %s' % revlist raise RuntimeError(msg) # Figure out our bookends and first pivot point; fetch the pivot revision. minrev = 0 maxrev = len(revlist) - 1 pivot = maxrev / 2 rev = revlist[pivot] zipfile = _GetDownloadPath(rev) fetch = DownloadJob(context, 'initial_fetch', rev, zipfile) fetch.Start() fetch.WaitFor() # Binary search time! while fetch and fetch.zipfile and maxrev - minrev > 1: if bad_rev < good_rev: min_str, max_str = "bad", "good" else: min_str, max_str = "good", "bad" print 'Bisecting range [%s (%s), %s (%s)].' % (revlist[minrev], min_str, \ revlist[maxrev], max_str) # Pre-fetch next two possible pivots # - down_pivot is the next revision to check if the current revision turns # out to be bad. # - up_pivot is the next revision to check if the current revision turns # out to be good. down_pivot = int((pivot - minrev) / 2) + minrev down_fetch = None if down_pivot != pivot and down_pivot != minrev: down_rev = revlist[down_pivot] down_fetch = DownloadJob(context, 'down_fetch', down_rev, _GetDownloadPath(down_rev)) down_fetch.Start() up_pivot = int((maxrev - pivot) / 2) + pivot up_fetch = None if up_pivot != pivot and up_pivot != maxrev: up_rev = revlist[up_pivot] up_fetch = DownloadJob(context, 'up_fetch', up_rev, _GetDownloadPath(up_rev)) up_fetch.Start() # Run test on the pivot revision. status = None stdout = None stderr = None try: (status, stdout, stderr) = RunRevision(context, rev, fetch.zipfile, profile, num_runs, try_args) except Exception, e: print >>sys.stderr, e fetch.Stop() fetch = None # Call the evaluate function to see if the current revision is good or bad. # On that basis, kill one of the background downloads and complete the # other, as described in the comments above. try: answer = evaluate(rev, official_builds, status, stdout, stderr) if answer == 'g' and good_rev < bad_rev or \ answer == 'b' and bad_rev < good_rev: minrev = pivot if down_fetch: down_fetch.Stop() # Kill the download of the older revision. if up_fetch: up_fetch.WaitFor() pivot = up_pivot fetch = up_fetch elif answer == 'b' and good_rev < bad_rev or \ answer == 'g' and bad_rev < good_rev: maxrev = pivot if up_fetch: up_fetch.Stop() # Kill the download of the newer revision. if down_fetch: down_fetch.WaitFor() pivot = down_pivot fetch = down_fetch elif answer == 'u': # Nuke the revision from the revlist and choose a new pivot. revlist.pop(pivot) maxrev -= 1 # Assumes maxrev >= pivot. if maxrev - minrev > 1: # Alternate between using down_pivot or up_pivot for the new pivot # point, without affecting the range. Do this instead of setting the # pivot to the midpoint of the new range because adjacent revisions # are likely affected by the same issue that caused the (u)nknown # response. if up_fetch and down_fetch: fetch = [up_fetch, down_fetch][len(revlist) % 2] elif up_fetch: fetch = up_fetch else: fetch = down_fetch fetch.WaitFor() if fetch == up_fetch: pivot = up_pivot - 1 # Subtracts 1 because revlist was resized. else: pivot = down_pivot zipfile = fetch.zipfile if down_fetch and fetch != down_fetch: down_fetch.Stop() if up_fetch and fetch != up_fetch: up_fetch.Stop() else: assert False, "Unexpected return value from evaluate(): " + answer except SystemExit: print "Cleaning up..." for f in [_GetDownloadPath(revlist[down_pivot]), _GetDownloadPath(revlist[up_pivot])]: try: os.unlink(f) except OSError: pass sys.exit(0) rev = revlist[pivot] return (revlist[minrev], revlist[maxrev]) def GetWebKitRevisionForChromiumRevision(rev): """Returns the webkit revision that was in chromium's DEPS file at chromium revision |rev|.""" # . doesn't match newlines without re.DOTALL, so this is safe. webkit_re = re.compile(r'webkit_revision.:\D*(\d+)') url = urllib.urlopen(DEPS_FILE % rev) m = webkit_re.search(url.read()) url.close() if m: return int(m.group(1)) else: raise Exception('Could not get webkit revision for cr rev %d' % rev) def GetChromiumRevision(url): """Returns the chromium revision read from given URL.""" try: # Location of the latest build revision number return int(urllib.urlopen(url).read()) except Exception, e: print('Could not determine latest revision. This could be bad...') return 999999999 def main(): usage = ('%prog [options] [-- chromium-options]\n' 'Perform binary search on the snapshot builds.\n' '\n' 'Tip: add "-- --no-first-run" to bypass the first run prompts.') parser = optparse.OptionParser(usage=usage) # Strangely, the default help output doesn't include the choice list. choices = ['mac', 'win', 'linux', 'linux64'] # linux-chromiumos lacks a continuous archive http://crbug.com/78158 parser.add_option('-a', '--archive', choices = choices, help = 'The buildbot archive to bisect [%s].' % '|'.join(choices)) parser.add_option('-o', action="store_true", dest='official_builds', help = 'Bisect across official ' + 'Chrome builds (internal only) instead of ' + 'Chromium archives.') parser.add_option('-b', '--bad', type = 'str', help = 'The bad revision to bisect to. Default is HEAD.') parser.add_option('-g', '--good', type = 'str', help = 'The last known good revision to bisect from. ' + 'Default is 0.') parser.add_option('-p', '--profile', '--user-data-dir', type = 'str', help = 'Profile to use; this will not reset every run. ' + 'Defaults to a clean profile.', default = 'profile') parser.add_option('-t', '--times', type = 'int', help = 'Number of times to run each build before asking ' + 'if it\'s good or bad. Temporary profiles are reused.', default = 1) (opts, args) = parser.parse_args() if opts.archive is None: print 'Error: missing required parameter: --archive' print parser.print_help() return 1 # Create the context. Initialize 0 for the revisions as they are set below. context = PathContext(opts.archive, 0, 0, opts.official_builds) # Pick a starting point, try to get HEAD for this. if opts.bad: bad_rev = opts.bad else: bad_rev = '999.0.0.0' if not opts.official_builds: bad_rev = GetChromiumRevision(context.GetLastChangeURL()) # Find out when we were good. if opts.good: good_rev = opts.good else: good_rev = '0.0.0.0' if opts.official_builds else 0 if opts.official_builds: good_rev = LooseVersion(good_rev) bad_rev = LooseVersion(bad_rev) else: good_rev = int(good_rev) bad_rev = int(bad_rev) if opts.times < 1: print('Number of times to run (%d) must be greater than or equal to 1.' % opts.times) parser.print_help() return 1 (min_chromium_rev, max_chromium_rev) = Bisect( opts.archive, opts.official_builds, good_rev, bad_rev, opts.times, args, opts.profile) # Get corresponding webkit revisions. try: min_webkit_rev = GetWebKitRevisionForChromiumRevision(min_chromium_rev) max_webkit_rev = GetWebKitRevisionForChromiumRevision(max_chromium_rev) except Exception, e: # Silently ignore the failure. min_webkit_rev, max_webkit_rev = 0, 0 # We're done. Let the user know the results in an official manner. if good_rev > bad_rev: print DONE_MESSAGE_GOOD_MAX % (str(min_chromium_rev), str(max_chromium_rev)) else: print DONE_MESSAGE_GOOD_MIN % (str(min_chromium_rev), str(max_chromium_rev)) if min_webkit_rev != max_webkit_rev: print 'WEBKIT CHANGELOG URL:' print ' ' + WEBKIT_CHANGELOG_URL % (max_webkit_rev, min_webkit_rev) print 'CHANGELOG URL:' if opts.official_builds: print OFFICIAL_CHANGELOG_URL % (min_chromium_rev, max_chromium_rev) else: print ' ' + CHANGELOG_URL % (min_chromium_rev, max_chromium_rev) if __name__ == '__main__': sys.exit(main())
bsd-3-clause
leth/nose2
nose2/tests/unit/test_attrib_plugin.py
17
2494
import unittest from nose2.plugins import attrib from nose2 import events, session from nose2.tests._common import TestCase class TestAttribPlugin(TestCase): tags = ['unit'] def setUp(self): class TC_1(TestCase): tags = ['a', 'b'] def test_a(self): pass test_a.a = 1 test_a.c = 0 def test_b(self): pass test_b.b = 1 self.TC_1 = TC_1 self.session = session.Session() self.plugin = attrib.AttributeSelector(session=self.session) self.plugin.register() def test_validate_attribs_with_simple_values(self): assert self.plugin.validateAttrib( self.TC_1('test_a'), [[('a', '1')]]) assert self.plugin.validateAttrib( self.TC_1('test_a'), [[('a', True)]]) assert self.plugin.validateAttrib( self.TC_1('test_a'), [[('c', False)]]) assert self.plugin.validateAttrib( self.TC_1('test_b'), [[('b', '1')]]) assert not self.plugin.validateAttrib( self.TC_1('test_a'), [[('a', False)]]) assert not self.plugin.validateAttrib( self.TC_1('test_a'), [[('c', True)]]) assert not self.plugin.validateAttrib( self.TC_1('test_a'), [[('a', '2')]]) assert not self.plugin.validateAttrib( self.TC_1('test_a'), [[('b', '1')]]) def test_validate_attribs_with_callable(self): assert self.plugin.validateAttrib( self.TC_1('test_a'), [[('a', lambda key, test: True)]]) assert not self.plugin.validateAttrib( self.TC_1('test_a'), [[('a', lambda key, test: False)]]) def test_validate_attribs_against_list(self): assert self.plugin.validateAttrib( self.TC_1('test_a'), [[('tags', 'a')]]) assert self.plugin.validateAttrib( self.TC_1('test_a'), [[('tags', 'b')]]) assert not self.plugin.validateAttrib( self.TC_1('test_a'), [[('tags', 'c')]]) def test_module_loaded_suite_filters_suite(self): self.plugin.attribs = ['a'] suite = unittest.TestSuite() suite.addTest(self.TC_1('test_a')) suite.addTest(self.TC_1('test_b')) event = events.ModuleSuiteEvent(None, None, suite) self.session.hooks.moduleLoadedSuite(event) self.assertEqual(len(event.suite._tests), 1) self.assertEqual(event.suite._tests[0]._testMethodName, 'test_a')
bsd-2-clause
leighpauls/k2cro4
third_party/python_26/Lib/site-packages/win32/lib/pywintypes.py
20
5418
# Magic utility that "redirects" to pywintypesxx.dll def __import_pywin32_system_module__(modname, globs): # This has been through a number of iterations. The problem: how to # locate pywintypesXX.dll when it may be in a number of places, and how # to avoid ever loading it twice. This problem is compounded by the # fact that the "right" way to do this requires win32api, but this # itself requires pywintypesXX. # And the killer problem is that someone may have done 'import win32api' # before this code is called. In that case Windows will have already # loaded pywintypesXX as part of loading win32api - but by the time # we get here, we may locate a different one. This appears to work, but # then starts raising bizarre TypeErrors complaining that something # is not a pywintypes type when it clearly is! # So in what we hope is the last major iteration of this, we now # rely on a _win32sysloader module, implemented in C but not relying # on pywintypesXX.dll. It then can check if the DLL we are looking for # lib is already loaded. import imp, sys, os if not sys.platform.startswith("win32"): # These extensions can be built on Linux via the 'mainwin' toolkit. # Look for a native 'lib{modname}.so' # NOTE: The _win32sysloader module will probably build in this # environment, so it may be better to use that here too. for ext, mode, ext_type in imp.get_suffixes(): if ext_type==imp.C_EXTENSION: for path in sys.path: look = os.path.join(path, "lib" + modname + ext) if os.path.isfile(look): mod = imp.load_module(modname, None, look, (ext, mode, ext_type)) # and fill our namespace with it. globs.update(mod.__dict__) return raise ImportError, "No dynamic module " + modname # See if this is a debug build. for suffix_item in imp.get_suffixes(): if suffix_item[0]=='_d.pyd': suffix = '_d' break else: suffix = "" filename = "%s%d%d%s.dll" % \ (modname, sys.version_info[0], sys.version_info[1], suffix) if hasattr(sys, "frozen"): # If we are running from a frozen program (py2exe, McMillan, freeze) # then we try and load the DLL from our sys.path # XXX - This path may also benefit from _win32sysloader? However, # MarkH has never seen the DLL load problem with py2exe programs... for look in sys.path: # If the sys.path entry is a (presumably) .zip file, use the # directory if os.path.isfile(look): look = os.path.dirname(look) found = os.path.join(look, filename) if os.path.isfile(found): break else: raise ImportError, \ "Module '%s' isn't in frozen sys.path %s" % (modname, sys.path) else: # First see if it already in our process - if so, we must use that. import _win32sysloader found = _win32sysloader.GetModuleFilename(filename) if found is None: # We ask Windows to load it next. This is in an attempt to # get the exact same module loaded should pywintypes be imported # first (which is how we are here) or if, eg, win32api was imported # first thereby implicitly loading the DLL. # Sadly though, it doesn't quite work - if pywintypesxx.dll # is in system32 *and* the executable's directory, on XP SP2, an # import of win32api will cause Windows to load pywintypes # from system32, where LoadLibrary for that name will # load the one in the exe's dir. # That shouldn't really matter though, so long as we only ever # get one loaded. found = _win32sysloader.LoadModule(filename) if found is None: # Windows can't find it - which although isn't relevent here, # means that we *must* be the first win32 import, as an attempt # to import win32api etc would fail when Windows attempts to # locate the DLL. # This is most likely to happen for "non-admin" installs, where # we can't put the files anywhere else on the global path. # If there is a version in our Python directory, use that if os.path.isfile(os.path.join(sys.prefix, filename)): found = os.path.join(sys.prefix, filename) if found is None: # Not in the Python directory? Maybe we were installed via # easy_install... if os.path.isfile(os.path.join(os.path.dirname(__file__), filename)): found = os.path.join(os.path.dirname(__file__), filename) if found is None: # give up in disgust. raise ImportError, \ "No system module '%s' (%s)" % (modname, filename) # Python can load the module mod = imp.load_module(modname, None, found, ('.dll', 'rb', imp.C_EXTENSION)) # and fill our namespace with it. globs.update(mod.__dict__) __import_pywin32_system_module__("pywintypes", globals())
bsd-3-clause
txm/potato
django/core/cache/backends/base.py
232
7960
"Base Cache class." import warnings from django.conf import settings from django.core.exceptions import ImproperlyConfigured, DjangoRuntimeWarning from django.utils.encoding import smart_str from django.utils.importlib import import_module class InvalidCacheBackendError(ImproperlyConfigured): pass class CacheKeyWarning(DjangoRuntimeWarning): pass # Memcached does not accept keys longer than this. MEMCACHE_MAX_KEY_LENGTH = 250 def default_key_func(key, key_prefix, version): """ Default function to generate keys. Constructs the key used by all other methods. By default it prepends the `key_prefix'. KEY_FUNCTION can be used to specify an alternate function with custom key making behavior. """ return ':'.join([key_prefix, str(version), smart_str(key)]) def get_key_func(key_func): """ Function to decide which key function to use. Defaults to ``default_key_func``. """ if key_func is not None: if callable(key_func): return key_func else: key_func_module_path, key_func_name = key_func.rsplit('.', 1) key_func_module = import_module(key_func_module_path) return getattr(key_func_module, key_func_name) return default_key_func class BaseCache(object): def __init__(self, params): timeout = params.get('timeout', params.get('TIMEOUT', 300)) try: timeout = int(timeout) except (ValueError, TypeError): timeout = 300 self.default_timeout = timeout options = params.get('OPTIONS', {}) max_entries = params.get('max_entries', options.get('MAX_ENTRIES', 300)) try: self._max_entries = int(max_entries) except (ValueError, TypeError): self._max_entries = 300 cull_frequency = params.get('cull_frequency', options.get('CULL_FREQUENCY', 3)) try: self._cull_frequency = int(cull_frequency) except (ValueError, TypeError): self._cull_frequency = 3 self.key_prefix = smart_str(params.get('KEY_PREFIX', '')) self.version = params.get('VERSION', 1) self.key_func = get_key_func(params.get('KEY_FUNCTION', None)) def make_key(self, key, version=None): """Constructs the key used by all other methods. By default it uses the key_func to generate a key (which, by default, prepends the `key_prefix' and 'version'). An different key function can be provided at the time of cache construction; alternatively, you can subclass the cache backend to provide custom key making behavior. """ if version is None: version = self.version new_key = self.key_func(key, self.key_prefix, version) return new_key def add(self, key, value, timeout=None, version=None): """ Set a value in the cache if the key does not already exist. If timeout is given, that timeout will be used for the key; otherwise the default cache timeout will be used. Returns True if the value was stored, False otherwise. """ raise NotImplementedError def get(self, key, default=None, version=None): """ Fetch a given key from the cache. If the key does not exist, return default, which itself defaults to None. """ raise NotImplementedError def set(self, key, value, timeout=None, version=None): """ Set a value in the cache. If timeout is given, that timeout will be used for the key; otherwise the default cache timeout will be used. """ raise NotImplementedError def delete(self, key, version=None): """ Delete a key from the cache, failing silently. """ raise NotImplementedError def get_many(self, keys, version=None): """ Fetch a bunch of keys from the cache. For certain backends (memcached, pgsql) this can be *much* faster when fetching multiple values. Returns a dict mapping each key in keys to its value. If the given key is missing, it will be missing from the response dict. """ d = {} for k in keys: val = self.get(k, version=version) if val is not None: d[k] = val return d def has_key(self, key, version=None): """ Returns True if the key is in the cache and has not expired. """ return self.get(key, version=version) is not None def incr(self, key, delta=1, version=None): """ Add delta to value in the cache. If the key does not exist, raise a ValueError exception. """ value = self.get(key, version=version) if value is None: raise ValueError("Key '%s' not found" % key) new_value = value + delta self.set(key, new_value, version=version) return new_value def decr(self, key, delta=1, version=None): """ Subtract delta from value in the cache. If the key does not exist, raise a ValueError exception. """ return self.incr(key, -delta, version=version) def __contains__(self, key): """ Returns True if the key is in the cache and has not expired. """ # This is a separate method, rather than just a copy of has_key(), # so that it always has the same functionality as has_key(), even # if a subclass overrides it. return self.has_key(key) def set_many(self, data, timeout=None, version=None): """ Set a bunch of values in the cache at once from a dict of key/value pairs. For certain backends (memcached), this is much more efficient than calling set() multiple times. If timeout is given, that timeout will be used for the key; otherwise the default cache timeout will be used. """ for key, value in data.items(): self.set(key, value, timeout=timeout, version=version) def delete_many(self, keys, version=None): """ Set a bunch of values in the cache at once. For certain backends (memcached), this is much more efficient than calling delete() multiple times. """ for key in keys: self.delete(key, version=version) def clear(self): """Remove *all* values from the cache at once.""" raise NotImplementedError def validate_key(self, key): """ Warn about keys that would not be portable to the memcached backend. This encourages (but does not force) writing backend-portable cache code. """ if len(key) > MEMCACHE_MAX_KEY_LENGTH: warnings.warn('Cache key will cause errors if used with memcached: ' '%s (longer than %s)' % (key, MEMCACHE_MAX_KEY_LENGTH), CacheKeyWarning) for char in key: if ord(char) < 33 or ord(char) == 127: warnings.warn('Cache key contains characters that will cause ' 'errors if used with memcached: %r' % key, CacheKeyWarning) def incr_version(self, key, delta=1, version=None): """Adds delta to the cache version for the supplied key. Returns the new version. """ if version is None: version = self.version value = self.get(key, version=version) if value is None: raise ValueError("Key '%s' not found" % key) self.set(key, value, version=version+delta) self.delete(key, version=version) return version+delta def decr_version(self, key, delta=1, version=None): """Substracts delta from the cache version for the supplied key. Returns the new version. """ return self.incr_version(key, -delta, version)
bsd-3-clause
BeiLuoShiMen/nupic
nupic/regions/ImageSensorFilters/AddNoise.py
17
5596
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2013, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- import numpy from PIL import Image from nupic.regions.ImageSensorFilters.BaseFilter import BaseFilter class AddNoise(BaseFilter): """ Add noise to the image. """ def __init__(self, noiseLevel=0.0, doForeground=True, doBackground=False, dynamic=True, noiseThickness=1): """ noiseLevel -- Amount of noise to add, from 0 to 1.0. For black and white images, this means the values of noiseLevel fraction of the pixels will be flipped (e.g. noiseLevel of 0.2 flips 20 percent of the pixels). For grayscale images, each pixel will be modified by up to 255 * noiseLevel (either upwards or downwards). doForeground -- Whether to add noise to the foreground. For black and white images, black pixels are foreground and white pixels are background. For grayscale images, any pixel which does not equal the background color (the ImageSensor 'background' parameter) is foreground, and the rest is background. doBackground -- Whether to add noise to the background (see above). """ BaseFilter.__init__(self) self.noiseLevel = noiseLevel self.doForeground = doForeground self.doBackground = doBackground self.dynamic = dynamic self.noiseThickness = noiseThickness # Generate and save our random state saveState = numpy.random.get_state() numpy.random.seed(0) self._randomState = numpy.random.get_state() numpy.random.set_state(saveState) def process(self, image): """ @param image -- The image to process. Returns a single image, or a list containing one or more images. """ # Get our random state back saveState = numpy.random.get_state() numpy.random.set_state(self._randomState) # Send through parent class first BaseFilter.process(self, image) alpha = image.split()[1] # ----------------------------------------------------------------------- # black and white if self.mode == 'bw': # For black and white images, our doBackground pixels are 255 and our figure pixels # are 0. assert self.noiseThickness != 0, "ImageSensor parameter noiseThickness cannot be 0" pixels = numpy.array(image.split()[0].getdata(), dtype=int) (imgWidth,imgHeight) = image.size pixels2d = (numpy.array(pixels)).reshape(imgHeight, imgWidth) noiseArrayW = numpy.floor(imgWidth/float(self.noiseThickness)) noiseArrayH = numpy.floor(imgHeight/float(self.noiseThickness)) thickNoise = numpy.random.random((noiseArrayH, noiseArrayW)) thickNoise = 255*(thickNoise < self.noiseLevel) idxW = numpy.array([int(self.noiseThickness*i) for i in xrange(noiseArrayW)]) idxH = numpy.array([int(self.noiseThickness*i) for i in xrange(noiseArrayH)]) for nt1 in xrange(self.noiseThickness): for nt2 in xrange(self.noiseThickness): submatIdx = numpy.ix_(idxH + nt1, idxW + nt2) if self.doForeground and self.doBackground: pixels2d[submatIdx] ^= thickNoise elif self.doForeground: pixels2d[submatIdx] = (pixels2d[submatIdx]^thickNoise) | pixels2d[submatIdx] elif self.doBackground: pixels2d[submatIdx] = (pixels2d[submatIdx]^thickNoise) & pixels2d[submatIdx] pixels2d = numpy.abs(pixels2d) pixels = pixels2d.reshape(1,imgWidth*imgHeight)[0] # ----------------------------------------------------------------------- # gray-scale elif self.mode == 'gray': pixels = numpy.array(image.split()[0].getdata(), dtype=int) noise = numpy.random.random(len(pixels)) # get array of floats from 0 to 1 # Add +/- self.noiseLevel to each pixel noise = (noise - 0.5) * 2 * self.noiseLevel * 255 mask = numpy.array(alpha.getdata(), dtype=int) != self.background if self.doForeground and self.doBackground: pixels += noise elif self.doForeground: pixels[mask!=0] += noise[mask!=0] elif self.doBackground: pixels[mask==0] += noise[mask==0] pixels = pixels.clip(min=0, max=255) else: raise ValueError("This image mode not supported") # write out the new pixels #from dbgp.client import brk; brk(port=9049) newimage = Image.new(image.mode, image.size) #newimage.putdata([uint(p) for p in pixels]) newimage.putdata(pixels.tolist()) newimage.putalpha(alpha) # If generating dynamic noise, change our random state each time. if self.dynamic: self._randomState = numpy.random.get_state() # Restore random state numpy.random.set_state(saveState) return newimage
agpl-3.0
navrasio/mxnet
example/gluon/word_language_model/model.py
12
2871
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import mxnet as mx from mxnet import gluon from mxnet.gluon import nn, rnn class RNNModel(gluon.Block): """A model with an encoder, recurrent layer, and a decoder.""" def __init__(self, mode, vocab_size, num_embed, num_hidden, num_layers, dropout=0.5, tie_weights=False, **kwargs): super(RNNModel, self).__init__(**kwargs) with self.name_scope(): self.drop = nn.Dropout(dropout) self.encoder = nn.Embedding(vocab_size, num_embed, weight_initializer=mx.init.Uniform(0.1)) if mode == 'rnn_relu': self.rnn = rnn.RNN(num_hidden, num_layers, dropout=dropout, input_size=num_embed) elif mode == 'rnn_tanh': self.rnn = rnn.RNN(num_hidden, num_layers, 'tanh', dropout=dropout, input_size=num_embed) elif mode == 'lstm': self.rnn = rnn.LSTM(num_hidden, num_layers, dropout=dropout, input_size=num_embed) elif mode == 'gru': self.rnn = rnn.GRU(num_hidden, num_layers, dropout=dropout, input_size=num_embed) else: raise ValueError("Invalid mode %s. Options are rnn_relu, " "rnn_tanh, lstm, and gru"%mode) if tie_weights: self.decoder = nn.Dense(vocab_size, in_units=num_hidden, params=self.encoder.params) else: self.decoder = nn.Dense(vocab_size, in_units=num_hidden) self.num_hidden = num_hidden def forward(self, inputs, hidden): emb = self.drop(self.encoder(inputs)) output, hidden = self.rnn(emb, hidden) output = self.drop(output) decoded = self.decoder(output.reshape((-1, self.num_hidden))) return decoded, hidden def begin_state(self, *args, **kwargs): return self.rnn.begin_state(*args, **kwargs)
apache-2.0
Kingclove/project4-info3180
server/lib/werkzeug/posixemulation.py
319
3543
# -*- coding: utf-8 -*- r""" werkzeug.posixemulation ~~~~~~~~~~~~~~~~~~~~~~~ Provides a POSIX emulation for some features that are relevant to web applications. The main purpose is to simplify support for systems such as Windows NT that are not 100% POSIX compatible. Currently this only implements a :func:`rename` function that follows POSIX semantics. Eg: if the target file already exists it will be replaced without asking. This module was introduced in 0.6.1 and is not a public interface. It might become one in later versions of Werkzeug. :copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ import sys import os import errno import time import random can_rename_open_file = False if os.name == 'nt': # pragma: no cover _rename = lambda src, dst: False _rename_atomic = lambda src, dst: False try: import ctypes _MOVEFILE_REPLACE_EXISTING = 0x1 _MOVEFILE_WRITE_THROUGH = 0x8 _MoveFileEx = ctypes.windll.kernel32.MoveFileExW def _rename(src, dst): if not isinstance(src, unicode): src = unicode(src, sys.getfilesystemencoding()) if not isinstance(dst, unicode): dst = unicode(dst, sys.getfilesystemencoding()) if _rename_atomic(src, dst): return True retry = 0 rv = False while not rv and retry < 100: rv = _MoveFileEx(src, dst, _MOVEFILE_REPLACE_EXISTING | _MOVEFILE_WRITE_THROUGH) if not rv: time.sleep(0.001) retry += 1 return rv # new in Vista and Windows Server 2008 _CreateTransaction = ctypes.windll.ktmw32.CreateTransaction _CommitTransaction = ctypes.windll.ktmw32.CommitTransaction _MoveFileTransacted = ctypes.windll.kernel32.MoveFileTransactedW _CloseHandle = ctypes.windll.kernel32.CloseHandle can_rename_open_file = True def _rename_atomic(src, dst): ta = _CreateTransaction(None, 0, 0, 0, 0, 1000, 'Werkzeug rename') if ta == -1: return False try: retry = 0 rv = False while not rv and retry < 100: rv = _MoveFileTransacted(src, dst, None, None, _MOVEFILE_REPLACE_EXISTING | _MOVEFILE_WRITE_THROUGH, ta) if rv: rv = _CommitTransaction(ta) break else: time.sleep(0.001) retry += 1 return rv finally: _CloseHandle(ta) except Exception: pass def rename(src, dst): # Try atomic or pseudo-atomic rename if _rename(src, dst): return # Fall back to "move away and replace" try: os.rename(src, dst) except OSError as e: if e.errno != errno.EEXIST: raise old = "%s-%08x" % (dst, random.randint(0, sys.maxint)) os.rename(dst, old) os.rename(src, dst) try: os.unlink(old) except Exception: pass else: rename = os.rename can_rename_open_file = True
apache-2.0
alexmerser/overholt
overholt/api/stores.py
9
2963
# -*- coding: utf-8 -*- """ overholt.api.stores ~~~~~~~~~~~~~~~~~~~ Store endpoints """ from flask import Blueprint, request from ..forms import NewStoreForm, UpdateStoreForm from ..services import stores as _stores, products as _products, users as _users from ..tasks import send_manager_added_email, send_manager_removed_email from . import OverholtFormError, route bp = Blueprint('stores', __name__, url_prefix='/stores') @route(bp, '/') def list(): """Returns a list of all store instances.""" return _stores.all() @route(bp, '/', methods=['POST']) def new(): """Creates a new store. Returns the new store instance.""" form = NewStoreForm() if form.validate_on_submit(): return _stores.create(**request.json) raise OverholtFormError(form.errors) @route(bp, '/<store_id>') def show(store_id): """Returns a store instance.""" return _stores.get_or_404(store_id) @route(bp, '/<store_id>', methods=['PUT']) def update(store_id): """Updates a store. Returns the updated store instance.""" form = UpdateStoreForm() if form.validate_on_submit(): return _stores.update(_stores.get_or_404(store_id), **request.json) raise OverholtFormError(form.errors) @route(bp, '/<store_id>', methods=['DELETE']) def delete(store_id): """Deletes a store. Returns a 204 response.""" _stores.delete(_stores.get_or_404(store_id)) return None, 204 @route(bp, '/<store_id>/products') def products(store_id): """Returns a list of product instances belonging to a store.""" return _stores.get_or_404(store_id).products @route(bp, '/<store_id>/products/<product_id>', methods=['PUT']) def add_product(store_id, product_id): """Adds a product to a store. Returns the product instance.""" return _stores.add_product(_stores.get_or_404(store_id), _products.get_or_404(product_id)) @route(bp, '/<store_id>/products/<product_id>', methods=['DELETE']) def remove_product(store_id, product_id): """Removes a product form a store. Returns a 204 response.""" _stores.remove_product(_stores.get_or_404(store_id), _products.get_or_404(product_id)) return None, 204 @route(bp, '/<store_id>/managers') def managers(store_id): return _stores.get_or_404(store_id).managers @route(bp, '/<store_id>/managers/<user_id>', methods=['PUT']) def add_manager(store_id, user_id): store, manager = _stores.add_manager(_stores.get_or_404(store_id), _users.get_or_404(user_id)) send_manager_added_email.delay(manager.email) return store @route(bp, '/<store_id>/managers/<user_id>', methods=['DELETE']) def remove_manager(store_id, user_id): store, manager = _stores.remove_manager(_stores.get_or_404(store_id), _users.get_or_404(user_id)) send_manager_removed_email.delay(manager.email) return None, 204
mit
hj91/jaikuengine
common/im.py
33
11250
# Copyright 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import re from django.conf import settings from common import api from common import clean from common import exception from common import patterns from common import user from common import util from common.protocol import base from common.protocol import xmpp HELP_HUH = "Sorry, did not understand \"%s\". Send HELP for commands" HELP_WELCOME = "Welcome to %s IM!\n" % (settings.SITE_NAME) HELP_WELCOME_NICK = "Welcome to %s IM, %s!\n" % (settings.SITE_NAME, '%s') HELP_NOT_SIGNED_IN = "You are currently signed out\n" HELP_SIGNED_IN_AS = "You are signed in as '%s'\n" HELP_FOLLOW_ONLY = "You are signed in as a follow-only user\n" HELP_PASSWORD = "Your password is: %s\n" \ "Use it to sign in on the web at http://%s/\n" % ('%s', settings.DOMAIN) HELP_POST = "To post to your stream, just send a message" HELP_CHANNEL_POST = "To post to a channel, start your message with " \ "#channel" HELP_COMMENT = "To comment the latest update from someone, start " \ "with @user" HELP_FOLLOW = "To follow a user or channel, send FOLLOW <user/#channel>" HELP_FOLLOW_NEW = "Send FOLLOW <user/#channel> to just follow a user or " \ "channel without signing up" HELP_LEAVE = "To stop following a user or channel, send LEAVE <user/#channel>" HELP_STOP = "To stop all alerts, send STOP" HELP_START = "To resume alerts, send START" HELP_SIGN_OUT = "To sign out from %s IM, send SIGN OUT" % (settings.SITE_NAME) HELP_DELETE_ME = "To remove your %s account, send DELETE ME" % (settings.SITE_NAME) HELP_SIGN_IN = "Send SIGN IN <screen name> <password> if you already have a " \ "%s account" % (settings.SITE_NAME) HELP_SIGN_UP = "Send SIGN UP <desired screen name> to create a new account" HELP_MORE = "For more commands, type HELP" HELP_FOOTER = "\n" \ "Questions? Visit http://%s/help/im\n" \ "Contact us at support@%s" % (settings.DOMAIN, settings.NS_DOMAIN) HELP_FOOTER_INFORMAL = "\n" \ "How it all works: http://%s/help/im" % (settings.DOMAIN) HELP_OTR = "Your IM client has tried to initiate an OTR (off-the-record) session. However, this bot does not support OTR." HELP_START_NOTIFICATIONS = "IM notifications have been enabled. Send STOP to disable notifications, HELP for commands." HELP_STOP_NOTIFICATIONS = "IM notifications have been disabled. Send START to enable notifications, HELP for commands." # TODO(tyler): Merge with validate/clean/nick/whatever NICK_RE = re.compile(r"""^[a-zA-Z][a-zA-Z0-9]{2,15}$""") class ImService(base.Service): handlers = [patterns.SignInHandler, patterns.SignOutHandler, patterns.PromotionHandler, patterns.HelpHandler, patterns.CommentHandler, patterns.OnHandler, patterns.OffHandler, patterns.ChannelPostHandler, patterns.FollowHandler, patterns.LeaveHandler, patterns.PostHandler, ] # TODO(termie): the following should probably be part of some sort of # service interface def response_ok(self, rv=None): return "" def response_error(self, exc): return str(exc) def channel_join(self, from_jid, nick): jid_ref = api.actor_lookup_im(api.ROOT, from_jid.base()) if not jid_ref: raise exception.ValidationError( "You must be signed in to join a channel, please SIGN IN") channel = clean.channel(nick) try: api.channel_join(jid_ref, jid_ref.nick, channel) self.send_message((from_jid,), "%s joined %s" % (jid_ref.nick, channel)) except: self.send_message((from_jid,), "Join FAILED: %s" % channel) def channel_part(self, from_jid, nick): jid_ref = api.actor_lookup_im(api.ROOT, from_jid.base()) if not jid_ref: raise exception.ValidationError( "You must be signed in to leave a channel, please SIGN IN") channel = clean.channel(nick) try: api.channel_part(jid_ref, jid_ref.nick, channel) self.send_message((from_jid,), "%s parted %s" % (jid_ref.nick, channel)) except: self.send_message((from_jid,), "Leave FAILED: %s" % channel) def actor_add_contact(self, from_jid, nick): jid_ref = api.actor_lookup_im(api.ROOT, from_jid.base()) if not jid_ref: raise exception.ValidationError( "You must be signed in to post, please SIGN IN") nick = clean.nick(nick) try: api.actor_add_contact(jid_ref, jid_ref.nick, nick) self.send_message((from_jid,), "%s followed %s" % (jid_ref.nick, nick)) except: self.send_message((from_jid,), "Follow FAILED: %s" % nick) def actor_remove_contact(self, from_jid, nick): jid_ref = api.actor_lookup_im(api.ROOT, from_jid.base()) if not jid_ref: raise exception.ValidationError( "You must be signed in to post, please SIGN IN") nick = clean.nick(nick) try: api.actor_remove_contact(jid_ref, jid_ref.nick, nick) self.send_message((from_jid,), "%s stopped following %s" % (jid_ref.nick, nick)) except: self.send_message((from_jid,), "Leave FAILED: %s" % nick) def send_message(self, to_jid_list, message): self.connection.send_message(to_jid_list, message) def unknown(self, from_jid, message): self.send_message([from_jid], HELP_HUH % message) def sign_in(self, from_jid, nick, password): jid_ref = api.actor_lookup_im(api.ROOT, from_jid.base()) if jid_ref: raise exception.ValidationError( "You are already signed in, please SIGN OUT first") user_ref = user.authenticate_user_login(nick, password) if not user_ref: raise exception.ValidationError("Username or password is incorrect") im_ref = api.im_associate(api.ROOT, user_ref.nick, from_jid.base()) welcome = '\n'.join([HELP_WELCOME_NICK % user_ref.display_nick(), HELP_POST, HELP_CHANNEL_POST, HELP_COMMENT, HELP_FOLLOW, HELP_STOP, HELP_MORE, HELP_FOOTER]) self.send_message([from_jid], welcome) def sign_out(self, from_jid): jid_ref = api.actor_lookup_im(api.ROOT, from_jid.base()) if not jid_ref: raise exception.ValidationError("You are not signed in.") im_ref = api.im_disassociate(api.ROOT, jid_ref.nick, from_jid.base()) self.send_message([from_jid], "signed out") def help(self, from_jid): welcome = '\n'.join([HELP_WELCOME, HELP_POST, HELP_CHANNEL_POST, HELP_COMMENT, HELP_FOLLOW, HELP_STOP, HELP_MORE, HELP_FOOTER]) self.send_message([from_jid], welcome) def start_notifications(self, from_jid): jid_ref = api.actor_lookup_im(api.ROOT, from_jid.base()) if not jid_ref: raise exception.ValidationError("You are not signed in.") actor_ref = api.settings_change_notify(api.ROOT, jid_ref.nick, im=True) self.send_message([from_jid], HELP_START_NOTIFICATIONS) def stop_notifications(self, from_jid): jid_ref = api.actor_lookup_im(api.ROOT, from_jid.base()) if not jid_ref: raise exception.ValidationError("You are not signed in.") actor_ref = api.settings_change_notify(api.ROOT, jid_ref.nick, im=False) self.send_message([from_jid], HELP_STOP_NOTIFICATIONS) def post(self, from_jid, message): jid_ref = api.actor_lookup_im(api.ROOT, from_jid.base()) if not jid_ref: raise exception.ValidationError( "You must be signed in to post, please SIGN IN") entry_ref = api.post(jid_ref, nick=jid_ref.nick, message=message) def channel_post(self, from_jid, channel_nick, message): jid_ref = api.actor_lookup_im(api.ROOT, from_jid.base()) if not jid_ref: raise exception.ValidationError( "You must be signed in to post, please SIGN IN") comment_ref = api.channel_post( jid_ref, message=message, nick=jid_ref.nick, channel=channel_nick ) def add_comment(self, from_jid, nick, message): jid_ref = api.actor_lookup_im(api.ROOT, from_jid.base()) if not jid_ref: raise exception.ValidationError( "You must be signed in to post, please SIGN IN") logging.debug("comment: %s %s %s", nick, jid_ref.nick, message) nick = clean.nick(nick) stream_entry = api.reply_get_cache(sender=nick, target=jid_ref.nick, service='im') if not stream_entry: # Well, or memcache timed it out... Or we crashed... Or... Or... raise exception.ValidationError( 'The message to which you tried to respond doesn\'t exist') api.entry_add_comment(jid_ref, entry=stream_entry.keyname(), content=message, nick=jid_ref.nick, stream=stream_entry.stream) def promote_user(self, from_jid, nick): ji_ref = api.actor_lookup_im(api.ROOT, from_jid.base()) if jid_ref: # TODO(tyler): Should we tell the user who they are? raise exception.ValidationError( "You already have an account and are signed in.") if not NICK_RE.match(nick): raise exception.ValidationError( "Invalid screen name, can only use letters or numbers, 3 to 16 " "characters") # Create the user. (user_create will check to see if the account has # already been created.) password = util.generate_uuid()[:8] # TODO(termie): Must have a first/last name. :( actor = api.user_create(api.ROOT, nick=nick, password=password, given_name=nick, family_name=nick) # link this im account to the user's account (equivalent of SIGN IN) self.sign_in(from_jid, nick, password) # Inform the user of their new password welcome = '\n'.join([HELP_WELCOME_NICK % nick, HELP_PASSWORD % password, HELP_POST, HELP_CHANNEL_POST, HELP_COMMENT, HELP_FOLLOW, HELP_STOP, HELP_MORE, HELP_FOOTER]) self.send_message([from_jid], welcome)
apache-2.0
gylian/sickrage
sickbeard/webserve.py
1
221274
# Author: Nic Wolfe <nic@wolfeden.ca> # URL: http://code.google.com/p/sickbeard/ # # This file is part of SickRage. # # SickRage is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # SickRage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with SickRage. If not, see <http://www.gnu.org/licenses/>. from __future__ import with_statement import traceback import os import time import urllib import re import datetime import codecs import sickbeard from sickbeard import config, sab from sickbeard import clients from sickbeard import history, notifiers, processTV from sickbeard import ui from sickbeard import logger, helpers, exceptions, classes, db from sickbeard import encodingKludge as ek from sickbeard import search_queue from sickbeard import image_cache from sickbeard import naming from sickbeard import scene_exceptions from sickbeard import subtitles from sickbeard import network_timezones from sickbeard import sbdatetime from sickbeard.providers import newznab, rsstorrent from sickbeard.common import Quality, Overview, statusStrings, qualityPresetStrings, cpu_presets from sickbeard.common import SNATCHED, UNAIRED, IGNORED, ARCHIVED, WANTED, FAILED, SKIPPED from sickbeard.common import SD, HD720p, HD1080p from sickbeard.exceptions import ex from sickbeard.blackandwhitelist import BlackAndWhiteList from sickbeard.scene_exceptions import get_scene_exceptions from sickbeard.browser import foldersAtPath from sickbeard.scene_numbering import get_scene_numbering, set_scene_numbering, get_scene_numbering_for_show, \ get_xem_numbering_for_show, get_scene_absolute_numbering_for_show, get_xem_absolute_numbering_for_show, \ get_scene_absolute_numbering from lib.dateutil import tz, parser as dateutil_parser from lib.unrar2 import RarFile from lib import adba, subliminal from lib.trakt import TraktAPI from lib.trakt.exceptions import traktException from versionChecker import CheckVersion try: import json except ImportError: from lib import simplejson as json try: import xml.etree.cElementTree as etree except ImportError: import xml.etree.ElementTree as etree from Cheetah.Template import Template as CheetahTemplate from Cheetah.Filters import Filter as CheetahFilter from tornado.routes import route from tornado.web import RequestHandler, HTTPError, authenticated, asynchronous from tornado.gen import coroutine from tornado.ioloop import IOLoop from tornado.concurrent import run_on_executor from concurrent.futures import ThreadPoolExecutor route_locks = {} class html_entities(CheetahFilter): def filter(self, val, **dummy_kw): if isinstance(val, unicode): filtered = val.encode('ascii', 'xmlcharrefreplace') elif val is None: filtered = '' elif isinstance(val, str): try: filtered = val.decode(sickbeard.SYS_ENCODING).encode('ascii', 'xmlcharrefreplace') except UnicodeDecodeError as e: logger.log(u'Unable to decode using {0}, trying utf-8. Error is: {1}'.format(sickbeard.SYS_ENCODING, ex(e)),logger.DEBUG) try: filtered = val.decode('utf-8').encode('ascii', 'xmlcharrefreplace') except UnicodeDecodeError as e: logger.log(u'Unable to decode using utf-8, Error is {0}.'.format(ex(e)),logger.ERROR) else: filtered = self.filter(str(val)) return filtered class PageTemplate(CheetahTemplate): def __init__(self, rh, *args, **kwargs): kwargs['file'] = os.path.join(sickbeard.PROG_DIR, "gui/" + sickbeard.GUI_NAME + "/interfaces/default/", kwargs['file']) kwargs['filter'] = html_entities super(PageTemplate, self).__init__(*args, **kwargs) self.sbRoot = sickbeard.WEB_ROOT self.sbHttpPort = sickbeard.WEB_PORT self.sbHttpsPort = sickbeard.WEB_PORT self.sbHttpsEnabled = sickbeard.ENABLE_HTTPS self.sbHandleReverseProxy = sickbeard.HANDLE_REVERSE_PROXY self.sbThemeName = sickbeard.THEME_NAME self.sbLogin = rh.get_current_user() if rh.request.headers['Host'][0] == '[': self.sbHost = re.match("^\[.*\]", rh.request.headers['Host'], re.X | re.M | re.S).group(0) else: self.sbHost = re.match("^[^:]+", rh.request.headers['Host'], re.X | re.M | re.S).group(0) if "X-Forwarded-Host" in rh.request.headers: self.sbHost = rh.request.headers['X-Forwarded-Host'] if "X-Forwarded-Port" in rh.request.headers: sbHttpPort = rh.request.headers['X-Forwarded-Port'] self.sbHttpsPort = sbHttpPort if "X-Forwarded-Proto" in rh.request.headers: self.sbHttpsEnabled = True if rh.request.headers['X-Forwarded-Proto'] == 'https' else False logPageTitle = 'Logs &amp; Errors' if len(classes.ErrorViewer.errors): logPageTitle += ' (' + str(len(classes.ErrorViewer.errors)) + ')' self.logPageTitle = logPageTitle self.sbPID = str(sickbeard.PID) self.menu = [ {'title': 'Home', 'key': 'home'}, {'title': 'Coming Episodes', 'key': 'comingEpisodes'}, {'title': 'History', 'key': 'history'}, {'title': 'Manage', 'key': 'manage'}, {'title': 'Config', 'key': 'config'}, {'title': logPageTitle, 'key': 'errorlogs'}, ] def compile(self, *args, **kwargs): if not os.path.exists(os.path.join(sickbeard.CACHE_DIR, 'cheetah')): os.mkdir(os.path.join(sickbeard.CACHE_DIR, 'cheetah')) kwargs['cacheModuleFilesForTracebacks'] = True kwargs['cacheDirForModuleFiles'] = os.path.join(sickbeard.CACHE_DIR, 'cheetah') return super(PageTemplate, self).compile(*args, **kwargs) class BaseHandler(RequestHandler): def __init__(self, *args, **kwargs): super(BaseHandler, self).__init__(*args, **kwargs) def set_default_headers(self): self.set_header('Cache-Control', 'no-store, no-cache, must-revalidate, max-age=0') def write_error(self, status_code, **kwargs): # handle 404 http errors if status_code == 404: url = self.request.uri if sickbeard.WEB_ROOT and self.request.uri.startswith(sickbeard.WEB_ROOT): url = url[len(sickbeard.WEB_ROOT) + 1:] if url[:3] != 'api': return self.redirect('/') else: self.finish('Wrong API key used') elif self.settings.get("debug") and "exc_info" in kwargs: exc_info = kwargs["exc_info"] trace_info = ''.join(["%s<br/>" % line for line in traceback.format_exception(*exc_info)]) request_info = ''.join(["<strong>%s</strong>: %s<br/>" % (k, self.request.__dict__[k] ) for k in self.request.__dict__.keys()]) error = exc_info[1] self.set_header('Content-Type', 'text/html') self.finish("""<html> <title>%s</title> <body> <h2>Error</h2> <p>%s</p> <h2>Traceback</h2> <p>%s</p> <h2>Request Info</h2> <p>%s</p> <button onclick="window.location='%s/errorlogs/';">View Log(Errors)</button> </body> </html>""" % (error, error, trace_info, request_info, sickbeard.WEB_ROOT)) def redirect(self, url, permanent=False, status=None): """Sends a redirect to the given (optionally relative) URL. ----->>>>> NOTE: Removed self.finish <<<<<----- If the ``status`` argument is specified, that value is used as the HTTP status code; otherwise either 301 (permanent) or 302 (temporary) is chosen based on the ``permanent`` argument. The default is 302 (temporary). """ import urlparse from tornado.escape import utf8 if not url.startswith(sickbeard.WEB_ROOT): url = sickbeard.WEB_ROOT + url if self._headers_written: raise Exception("Cannot redirect after headers have been written") if status is None: status = 301 if permanent else 302 else: assert isinstance(status, int) and 300 <= status <= 399 self.set_status(status) self.set_header("Location", urlparse.urljoin(utf8(self.request.uri), utf8(url))) def get_current_user(self, *args, **kwargs): if not isinstance(self, UI) and sickbeard.WEB_USERNAME and sickbeard.WEB_PASSWORD: return self.get_secure_cookie('sickrage_user') else: return True class WebHandler(BaseHandler): def __init__(self, *args, **kwargs): super(WebHandler, self).__init__(*args, **kwargs) self.io_loop = IOLoop.current() executor = ThreadPoolExecutor(50) @authenticated @coroutine def get(self, route, *args, **kwargs): try: # route -> method obj route = route.strip('/').replace('.', '_') or 'index' method = getattr(self, route) results = yield self.async_call(method) self.finish(results) except: logger.log('Failed doing webui request "%s": %s' % (route, traceback.format_exc()), logger.DEBUG) raise HTTPError(404) @run_on_executor def async_call(self, function): try: kwargs = self.request.arguments for arg, value in kwargs.items(): if len(value) == 1: kwargs[arg] = value[0] result = function(**kwargs) return result except: logger.log('Failed doing webui callback: %s' % (traceback.format_exc()), logger.ERROR) raise # post uses get method post = get class LoginHandler(BaseHandler): def get(self, *args, **kwargs): if self.get_current_user(): self.redirect('/home/') else: t = PageTemplate(rh=self, file="login.tmpl") self.finish(t.respond()) def post(self, *args, **kwargs): api_key = None username = sickbeard.WEB_USERNAME password = sickbeard.WEB_PASSWORD if (self.get_argument('username') == username or not username) \ and (self.get_argument('password') == password or not password): api_key = sickbeard.API_KEY if api_key: remember_me = int(self.get_argument('remember_me', default=0) or 0) self.set_secure_cookie('sickrage_user', api_key, expires_days=30 if remember_me > 0 else None) self.redirect('/home/') class LogoutHandler(BaseHandler): def get(self, *args, **kwargs): self.clear_cookie("sickrage_user") self.redirect('/login/') class KeyHandler(RequestHandler): def __init__(self, *args, **kwargs): super(KeyHandler, self).__init__(*args, **kwargs) def get(self, *args, **kwargs): api_key = None try: username = sickbeard.WEB_USERNAME password = sickbeard.WEB_PASSWORD if (self.get_argument('u', None) == username or not username) and \ (self.get_argument('p', None) == password or not password): api_key = sickbeard.API_KEY self.finish({'success': api_key is not None, 'api_key': api_key}) except: logger.log('Failed doing key request: %s' % (traceback.format_exc()), logger.ERROR) self.finish({'success': False, 'error': 'Failed returning results'}) @route('(.*)(/?)') class WebRoot(WebHandler): def __init__(self, *args, **kwargs): super(WebRoot, self).__init__(*args, **kwargs) def index(self): return self.redirect('/home/') def robots_txt(self): """ Keep web crawlers out """ self.set_header('Content-Type', 'text/plain') return "User-agent: *\nDisallow: /" def apibuilder(self): t = PageTemplate(rh=self, file="apiBuilder.tmpl") def titler(x): return (helpers.remove_article(x), x)[not x or sickbeard.SORT_ARTICLE] t.sortedShowList = sorted(sickbeard.showList, lambda x, y: cmp(titler(x.name), titler(y.name))) myDB = db.DBConnection(row_type="dict") seasonSQLResults = {} episodeSQLResults = {} for curShow in t.sortedShowList: seasonSQLResults[curShow.indexerid] = myDB.select( "SELECT DISTINCT season FROM tv_episodes WHERE showid = ? ORDER BY season DESC", [curShow.indexerid]) for curShow in t.sortedShowList: episodeSQLResults[curShow.indexerid] = myDB.select( "SELECT DISTINCT season,episode FROM tv_episodes WHERE showid = ? ORDER BY season DESC, episode DESC", [curShow.indexerid]) t.seasonSQLResults = seasonSQLResults t.episodeSQLResults = episodeSQLResults if len(sickbeard.API_KEY) == 32: t.apikey = sickbeard.API_KEY else: t.apikey = "api key not generated" return t.respond() def showPoster(self, show=None, which=None): # Redirect initial poster/banner thumb to default images if which[0:6] == 'poster': default_image_name = 'poster.png' else: default_image_name = 'banner.png' # image_path = ek.ek(os.path.join, sickbeard.PROG_DIR, 'gui', 'slick', 'images', default_image_name) static_image_path = os.path.join('/images', default_image_name) if show and sickbeard.helpers.findCertainShow(sickbeard.showList, int(show)): cache_obj = image_cache.ImageCache() image_file_name = None if which == 'poster': image_file_name = cache_obj.poster_path(show) if which == 'poster_thumb' or which == 'small': image_file_name = cache_obj.poster_thumb_path(show) if which == 'banner': image_file_name = cache_obj.banner_path(show) if which == 'banner_thumb': image_file_name = cache_obj.banner_thumb_path(show) if ek.ek(os.path.isfile, image_file_name): static_image_path = os.path.normpath(image_file_name.replace(sickbeard.CACHE_DIR, '/cache')) static_image_path = static_image_path.replace('\\', '/') return self.redirect(static_image_path) def setHomeLayout(self, layout): if layout not in ('poster', 'small', 'banner', 'simple'): layout = 'poster' sickbeard.HOME_LAYOUT = layout return self.redirect("/home/") def setPosterSortBy(self, sort): if sort not in ('name', 'date', 'network', 'progress'): sort = 'name' sickbeard.POSTER_SORTBY = sort sickbeard.save_config() def setPosterSortDir(self, direction): sickbeard.POSTER_SORTDIR = int(direction) sickbeard.save_config() def setHistoryLayout(self, layout): if layout not in ('compact', 'detailed'): layout = 'detailed' sickbeard.HISTORY_LAYOUT = layout return self.redirect("/history/") def toggleDisplayShowSpecials(self, show): sickbeard.DISPLAY_SHOW_SPECIALS = not sickbeard.DISPLAY_SHOW_SPECIALS return self.redirect("/home/displayShow?show=" + show) def setComingEpsLayout(self, layout): if layout not in ('poster', 'banner', 'list', 'calendar'): layout = 'banner' if layout == 'calendar': sickbeard.COMING_EPS_SORT = 'date' sickbeard.COMING_EPS_LAYOUT = layout return self.redirect("/comingEpisodes/") def toggleComingEpsDisplayPaused(self): sickbeard.COMING_EPS_DISPLAY_PAUSED = not sickbeard.COMING_EPS_DISPLAY_PAUSED return self.redirect("/comingEpisodes/") def setComingEpsSort(self, sort): if sort not in ('date', 'network', 'show'): sort = 'date' if sickbeard.COMING_EPS_LAYOUT == 'calendar': sort \ = 'date' sickbeard.COMING_EPS_SORT = sort return self.redirect("/comingEpisodes/") def comingEpisodes(self, layout="None"): today1 = datetime.date.today() today = today1.toordinal() next_week1 = (datetime.date.today() + datetime.timedelta(days=7)) next_week = next_week1.toordinal() recently = (datetime.date.today() - datetime.timedelta(days=sickbeard.COMING_EPS_MISSED_RANGE)).toordinal() done_show_list = [] qualList = Quality.DOWNLOADED + Quality.SNATCHED + [ARCHIVED, IGNORED] myDB = db.DBConnection() sql_results = myDB.select( "SELECT *, tv_shows.status AS show_status FROM tv_episodes, tv_shows WHERE season != 0 AND airdate >= ? AND airdate < ? AND tv_shows.indexer_id = tv_episodes.showid AND tv_episodes.status NOT IN (" + ','.join( ['?'] * len(qualList)) + ")", [today, next_week] + qualList) for cur_result in sql_results: done_show_list.append(int(cur_result["showid"])) more_sql_results = myDB.select( "SELECT *, tv_shows.status AS show_status FROM tv_episodes outer_eps, tv_shows WHERE season != 0 AND showid NOT IN (" + ','.join( ['?'] * len( done_show_list)) + ") AND tv_shows.indexer_id = outer_eps.showid AND airdate = (SELECT airdate FROM tv_episodes inner_eps WHERE inner_eps.season != 0 AND inner_eps.showid = outer_eps.showid AND inner_eps.airdate >= ? ORDER BY inner_eps.airdate ASC LIMIT 1) AND outer_eps.status NOT IN (" + ','.join( ['?'] * len(Quality.DOWNLOADED + Quality.SNATCHED)) + ")", done_show_list + [next_week] + Quality.DOWNLOADED + Quality.SNATCHED) sql_results += more_sql_results more_sql_results = myDB.select( "SELECT *, tv_shows.status AS show_status FROM tv_episodes, tv_shows WHERE season != 0 AND tv_shows.indexer_id = tv_episodes.showid AND airdate < ? AND airdate >= ? AND tv_episodes.status = ? AND tv_episodes.status NOT IN (" + ','.join( ['?'] * len(qualList)) + ")", [today, recently, WANTED] + qualList) sql_results += more_sql_results # sort by localtime sorts = { 'date': (lambda x, y: cmp(x["localtime"], y["localtime"])), 'show': (lambda a, b: cmp((a["show_name"], a["localtime"]), (b["show_name"], b["localtime"]))), 'network': (lambda a, b: cmp((a["network"], a["localtime"]), (b["network"], b["localtime"]))), } # make a dict out of the sql results sql_results = [dict(row) for row in sql_results] # add localtime to the dict for index, item in enumerate(sql_results): sql_results[index]['localtime'] = sbdatetime.sbdatetime.convert_to_setting( network_timezones.parse_date_time(item['airdate'], item['airs'], item['network'])) sql_results.sort(sorts[sickbeard.COMING_EPS_SORT]) t = PageTemplate(rh=self, file="comingEpisodes.tmpl") # paused_item = { 'title': '', 'path': 'toggleComingEpsDisplayPaused' } # paused_item['title'] = 'Hide Paused' if sickbeard.COMING_EPS_DISPLAY_PAUSED else 'Show Paused' paused_item = {'title': 'View Paused:', 'path': {'': ''}} paused_item['path'] = {'Hide': 'toggleComingEpsDisplayPaused'} if sickbeard.COMING_EPS_DISPLAY_PAUSED else { 'Show': 'toggleComingEpsDisplayPaused'} t.submenu = [ {'title': 'Sort by:', 'path': {'Date': 'setComingEpsSort/?sort=date', 'Show': 'setComingEpsSort/?sort=show', 'Network': 'setComingEpsSort/?sort=network', }}, {'title': 'Layout:', 'path': {'Banner': 'setComingEpsLayout/?layout=banner', 'Poster': 'setComingEpsLayout/?layout=poster', 'List': 'setComingEpsLayout/?layout=list', 'Calendar': 'setComingEpsLayout/?layout=calendar', }}, paused_item, ] t.next_week = datetime.datetime.combine(next_week1, datetime.time(tzinfo=network_timezones.sb_timezone)) t.today = datetime.datetime.now().replace(tzinfo=network_timezones.sb_timezone) t.sql_results = sql_results # Allow local overriding of layout parameter if layout and layout in ('poster', 'banner', 'list', 'calendar'): t.layout = layout else: t.layout = sickbeard.COMING_EPS_LAYOUT return t.respond() class CalendarHandler(BaseHandler): def get(self, *args, **kwargs): if sickbeard.CALENDAR_UNPROTECTED: self.write(self.calendar()) else: self.calendar_auth() @authenticated def calendar_auth(self): self.write(self.calendar()) # Raw iCalendar implementation by Pedro Jose Pereira Vieito (@pvieito). # # iCalendar (iCal) - Standard RFC 5545 <http://tools.ietf.org/html/rfc5546> # Works with iCloud, Google Calendar and Outlook. def calendar(self): """ Provides a subscribeable URL for iCal subscriptions """ logger.log(u"Receiving iCal request from %s" % self.request.remote_ip) # Create a iCal string ical = 'BEGIN:VCALENDAR\r\n' ical += 'VERSION:2.0\r\n' ical += 'X-WR-CALNAME:SickRage\r\n' ical += 'X-WR-CALDESC:SickRage\r\n' ical += 'PRODID://Sick-Beard Upcoming Episodes//\r\n' # Limit dates past_date = (datetime.date.today() + datetime.timedelta(weeks=-52)).toordinal() future_date = (datetime.date.today() + datetime.timedelta(weeks=52)).toordinal() # Get all the shows that are not paused and are currently on air (from kjoconnor Fork) myDB = db.DBConnection() calendar_shows = myDB.select( "SELECT show_name, indexer_id, network, airs, runtime FROM tv_shows WHERE ( status = 'Continuing' OR status = 'Returning Series' ) AND paused != '1'") for show in calendar_shows: # Get all episodes of this show airing between today and next month episode_list = myDB.select( "SELECT indexerid, name, season, episode, description, airdate FROM tv_episodes WHERE airdate >= ? AND airdate < ? AND showid = ?", (past_date, future_date, int(show["indexer_id"]))) utc = tz.gettz('GMT') for episode in episode_list: air_date_time = network_timezones.parse_date_time(episode['airdate'], show["airs"], show['network']).astimezone(utc) air_date_time_end = air_date_time + datetime.timedelta( minutes=helpers.tryInt(show["runtime"], 60)) # Create event for episode ical = ical + 'BEGIN:VEVENT\r\n' ical = ical + 'DTSTART:' + air_date_time.strftime("%Y%m%d") + 'T' + air_date_time.strftime( "%H%M%S") + 'Z\r\n' ical = ical + 'DTEND:' + air_date_time_end.strftime( "%Y%m%d") + 'T' + air_date_time_end.strftime( "%H%M%S") + 'Z\r\n' ical = ical + 'SUMMARY:' + show['show_name'] + ' - ' + str( episode['season']) + "x" + str(episode['episode']) + " - " + episode['name'] + '\r\n' ical = ical + 'UID:Sick-Beard-' + str(datetime.date.today().isoformat()) + '-' + show[ 'show_name'].replace(" ", "-") + '-E' + str(episode['episode']) + 'S' + str( episode['season']) + '\r\n' if episode['description']: ical = ical + 'DESCRIPTION: {0} on {1} \\n\\n {2}\r\n'.format( (show['airs'] or '(Unknown airs)'), (show['network'] or 'Unknown network'), episode['description'].splitlines()[0]) else: ical = ical + 'DESCRIPTION:' + (show['airs'] or '(Unknown airs)') + ' on ' + ( show['network'] or 'Unknown network') + '\r\n' ical = ical + 'END:VEVENT\r\n' # Ending the iCal ical += 'END:VCALENDAR' return ical @route('/ui(/?.*)') class UI(WebRoot): def __init__(self, *args, **kwargs): super(UI, self).__init__(*args, **kwargs) def add_message(self): ui.notifications.message('Test 1', 'This is test number 1') ui.notifications.error('Test 2', 'This is test number 2') return "ok" def get_messages(self): messages = {} cur_notification_num = 1 for cur_notification in ui.notifications.get_notifications(self.request.remote_ip): messages['notification-' + str(cur_notification_num)] = {'title': cur_notification.title, 'message': cur_notification.message, 'type': cur_notification.type} cur_notification_num += 1 return json.dumps(messages) @route('/browser(/?.*)') class WebFileBrowser(WebRoot): def __init__(self, *args, **kwargs): super(WebFileBrowser, self).__init__(*args, **kwargs) def index(self, path='', includeFiles=False, *args, **kwargs): self.set_header("Content-Type", "application/json") return json.dumps(foldersAtPath(path, True, bool(int(includeFiles)))) def complete(self, term, includeFiles=0, *args, **kwargs): self.set_header("Content-Type", "application/json") paths = [entry['path'] for entry in foldersAtPath(os.path.dirname(term), includeFiles=bool(int(includeFiles))) if 'path' in entry] return json.dumps(paths) @route('/home(/?.*)') class Home(WebRoot): def __init__(self, *args, **kwargs): super(Home, self).__init__(*args, **kwargs) def HomeMenu(self): menu = [ {'title': 'Add Shows', 'path': 'home/addShows/', }, {'title': 'Manual Post-Processing', 'path': 'home/postprocess/'}, {'title': 'Update KODI', 'path': 'home/updateKODI/', 'requires': self.haveKODI}, {'title': 'Update Plex', 'path': 'home/updatePLEX/', 'requires': self.havePLEX}, {'title': 'Manage Downloads', 'path': 'manage/manageTorrents/', 'requires': self.haveDOWN}, ] return menu def _genericMessage(self, subject, message): t = PageTemplate(rh=self, file="genericMessage.tmpl") t.submenu = self.HomeMenu() t.subject = subject t.message = message return t.respond() def _getEpisode(self, show, season=None, episode=None, absolute=None): if show is None: return "Invalid show parameters" showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show)) if showObj is None: return "Invalid show paramaters" if absolute: epObj = showObj.getEpisode(absolute_number=int(absolute)) elif season and episode: epObj = showObj.getEpisode(int(season), int(episode)) else: return "Invalid paramaters" if epObj is None: return "Episode couldn't be retrieved" return epObj def index(self): t = PageTemplate(rh=self, file="home.tmpl") if sickbeard.ANIME_SPLIT_HOME: shows = [] anime = [] for show in sickbeard.showList: if show.is_anime: anime.append(show) else: shows.append(show) t.showlists = [["Shows", shows], ["Anime", anime]] else: t.showlists = [["Shows", sickbeard.showList]] t.submenu = self.HomeMenu() return t.respond() def is_alive(self, *args, **kwargs): if 'callback' in kwargs and '_' in kwargs: callback, _ = kwargs['callback'], kwargs['_'] else: return "Error: Unsupported Request. Send jsonp request with 'callback' variable in the query string." # self.set_header('Cache-Control', 'max-age=0,no-cache,no-store') self.set_header('Content-Type', 'text/javascript') self.set_header('Access-Control-Allow-Origin', '*') self.set_header('Access-Control-Allow-Headers', 'x-requested-with') if sickbeard.started: return callback + '(' + json.dumps( {"msg": str(sickbeard.PID)}) + ');' else: return callback + '(' + json.dumps({"msg": "nope"}) + ');' def haveKODI(self): return sickbeard.USE_KODI and sickbeard.KODI_UPDATE_LIBRARY def havePLEX(self): return sickbeard.USE_PLEX and sickbeard.PLEX_UPDATE_LIBRARY def haveDOWN(self): if sickbeard.USE_TORRENTS and sickbeard.TORRENT_METHOD != 'blackhole' \ and (sickbeard.ENABLE_HTTPS and sickbeard.TORRENT_HOST[:5] == 'https' or not sickbeard.ENABLE_HTTPS and sickbeard.TORRENT_HOST[:5] == 'http:')\ or (sickbeard.USE_NZBS and sickbeard.NZB_METHOD != 'blackhole'): return True else: return False def testSABnzbd(self, host=None, username=None, password=None, apikey=None): # self.set_header('Cache-Control', 'max-age=0,no-cache,no-store') host = config.clean_url(host) connection, accesMsg = sab.getSabAccesMethod(host, username, password, apikey) if connection: authed, authMsg = sab.testAuthentication(host, username, password, apikey) # @UnusedVariable if authed: return "Success. Connected and authenticated" else: return "Authentication failed. SABnzbd expects '" + accesMsg + "' as authentication method" else: return "Unable to connect to host" def testTorrent(self, torrent_method=None, host=None, username=None, password=None): # self.set_header('Cache-Control', 'max-age=0,no-cache,no-store') host = config.clean_url(host) client = clients.getClientIstance(torrent_method) connection, accesMsg = client(host, username, password).testAuthentication() return accesMsg def testFreeMobile(self, freemobile_id=None, freemobile_apikey=None): result, message = notifiers.freemobile_notifier.test_notify(freemobile_id, freemobile_apikey) if result: return "SMS sent successfully" else: return "Problem sending SMS: " + message def testGrowl(self, host=None, password=None): # self.set_header('Cache-Control', 'max-age=0,no-cache,no-store') host = config.clean_host(host, default_port=23053) result = notifiers.growl_notifier.test_notify(host, password) if password is None or password == '': pw_append = '' else: pw_append = " with password: " + password if result: return "Registered and Tested growl successfully " + urllib.unquote_plus(host) + pw_append else: return "Registration and Testing of growl failed " + urllib.unquote_plus(host) + pw_append def testProwl(self, prowl_api=None, prowl_priority=0): # self.set_header('Cache-Control', 'max-age=0,no-cache,no-store') result = notifiers.prowl_notifier.test_notify(prowl_api, prowl_priority) if result: return "Test prowl notice sent successfully" else: return "Test prowl notice failed" def testBoxcar(self, username=None): # self.set_header('Cache-Control', 'max-age=0,no-cache,no-store') result = notifiers.boxcar_notifier.test_notify(username) if result: return "Boxcar notification succeeded. Check your Boxcar clients to make sure it worked" else: return "Error sending Boxcar notification" def testBoxcar2(self, accesstoken=None): # self.set_header('Cache-Control', 'max-age=0,no-cache,no-store') result = notifiers.boxcar2_notifier.test_notify(accesstoken) if result: return "Boxcar2 notification succeeded. Check your Boxcar2 clients to make sure it worked" else: return "Error sending Boxcar2 notification" def testPushover(self, userKey=None, apiKey=None): # self.set_header('Cache-Control', 'max-age=0,no-cache,no-store') result = notifiers.pushover_notifier.test_notify(userKey, apiKey) if result: return "Pushover notification succeeded. Check your Pushover clients to make sure it worked" else: return "Error sending Pushover notification" def twitterStep1(self): # self.set_header('Cache-Control', 'max-age=0,no-cache,no-store') return notifiers.twitter_notifier._get_authorization() def twitterStep2(self, key): # self.set_header('Cache-Control', 'max-age=0,no-cache,no-store') result = notifiers.twitter_notifier._get_credentials(key) logger.log(u"result: " + str(result)) if result: return "Key verification successful" else: return "Unable to verify key" def testTwitter(self): # self.set_header('Cache-Control', 'max-age=0,no-cache,no-store') result = notifiers.twitter_notifier.test_notify() if result: return "Tweet successful, check your twitter to make sure it worked" else: return "Error sending tweet" def testKODI(self, host=None, username=None, password=None): # self.set_header('Cache-Control', 'max-age=0,no-cache,no-store') host = config.clean_hosts(host) finalResult = '' for curHost in [x.strip() for x in host.split(",")]: curResult = notifiers.kodi_notifier.test_notify(urllib.unquote_plus(curHost), username, password) if len(curResult.split(":")) > 2 and 'OK' in curResult.split(":")[2]: finalResult += "Test KODI notice sent successfully to " + urllib.unquote_plus(curHost) else: finalResult += "Test KODI notice failed to " + urllib.unquote_plus(curHost) finalResult += "<br />\n" return finalResult def testPLEX(self, host=None, username=None, password=None): # self.set_header('Cache-Control', 'max-age=0,no-cache,no-store') finalResult = '' for curHost in [x.strip() for x in host.split(",")]: curResult = notifiers.plex_notifier.test_notify(urllib.unquote_plus(curHost), username, password) if len(curResult.split(":")) > 2 and 'OK' in curResult.split(":")[2]: finalResult += "Test Plex notice sent successfully to " + urllib.unquote_plus(curHost) else: finalResult += "Test Plex notice failed to " + urllib.unquote_plus(curHost) finalResult += "<br />\n" return finalResult def testLibnotify(self): # self.set_header('Cache-Control', 'max-age=0,no-cache,no-store') if notifiers.libnotify_notifier.test_notify(): return "Tried sending desktop notification via libnotify" else: return notifiers.libnotify.diagnose() def testNMJ(self, host=None, database=None, mount=None): # self.set_header('Cache-Control', 'max-age=0,no-cache,no-store') host = config.clean_host(host) result = notifiers.nmj_notifier.test_notify(urllib.unquote_plus(host), database, mount) if result: return "Successfully started the scan update" else: return "Test failed to start the scan update" def settingsNMJ(self, host=None): # self.set_header('Cache-Control', 'max-age=0,no-cache,no-store') host = config.clean_host(host) result = notifiers.nmj_notifier.notify_settings(urllib.unquote_plus(host)) if result: return '{"message": "Got settings from %(host)s", "database": "%(database)s", "mount": "%(mount)s"}' % { "host": host, "database": sickbeard.NMJ_DATABASE, "mount": sickbeard.NMJ_MOUNT} else: return '{"message": "Failed! Make sure your Popcorn is on and NMJ is running. (see Log & Errors -> Debug for detailed info)", "database": "", "mount": ""}' def testNMJv2(self, host=None): # self.set_header('Cache-Control', 'max-age=0,no-cache,no-store') host = config.clean_host(host) result = notifiers.nmjv2_notifier.test_notify(urllib.unquote_plus(host)) if result: return "Test notice sent successfully to " + urllib.unquote_plus(host) else: return "Test notice failed to " + urllib.unquote_plus(host) def settingsNMJv2(self, host=None, dbloc=None, instance=None): # self.set_header('Cache-Control', 'max-age=0,no-cache,no-store') host = config.clean_host(host) result = notifiers.nmjv2_notifier.notify_settings(urllib.unquote_plus(host), dbloc, instance) if result: return '{"message": "NMJ Database found at: %(host)s", "database": "%(database)s"}' % {"host": host, "database": sickbeard.NMJv2_DATABASE} else: return '{"message": "Unable to find NMJ Database at location: %(dbloc)s. Is the right location selected and PCH running?", "database": ""}' % { "dbloc": dbloc} def testTrakt(self, username=None, password=None, disable_ssl=None, blacklist_name=None): # self.set_header('Cache-Control', 'max-age=0,no-cache,no-store') if disable_ssl == 'true': disable_ssl = True else: disable_ssl = False return notifiers.trakt_notifier.test_notify(username, password, disable_ssl, blacklist_name) def loadShowNotifyLists(self): # self.set_header('Cache-Control', 'max-age=0,no-cache,no-store') myDB = db.DBConnection() rows = myDB.select("SELECT show_id, show_name, notify_list FROM tv_shows ORDER BY show_name ASC") data = {} size = 0 for r in rows: data[r['show_id']] = {'id': r['show_id'], 'name': r['show_name'], 'list': r['notify_list']} size += 1 data['_size'] = size return json.dumps(data) def saveShowNotifyList(self, show=None, emails=None): # self.set_header('Cache-Control', 'max-age=0,no-cache,no-store') myDB = db.DBConnection() if myDB.action("UPDATE tv_shows SET notify_list = ? WHERE show_id = ?", [emails, show]): return 'OK' else: return 'ERROR: %s' % myDB.last_err def testEmail(self, host=None, port=None, smtp_from=None, use_tls=None, user=None, pwd=None, to=None): # self.set_header('Cache-Control', 'max-age=0,no-cache,no-store') host = config.clean_host(host) if notifiers.email_notifier.test_notify(host, port, smtp_from, use_tls, user, pwd, to): return 'Test email sent successfully! Check inbox.' else: return 'ERROR: %s' % notifiers.email_notifier.last_err def testNMA(self, nma_api=None, nma_priority=0): # self.set_header('Cache-Control', 'max-age=0,no-cache,no-store') result = notifiers.nma_notifier.test_notify(nma_api, nma_priority) if result: return "Test NMA notice sent successfully" else: return "Test NMA notice failed" def testPushalot(self, authorizationToken=None): # self.set_header('Cache-Control', 'max-age=0,no-cache,no-store') result = notifiers.pushalot_notifier.test_notify(authorizationToken) if result: return "Pushalot notification succeeded. Check your Pushalot clients to make sure it worked" else: return "Error sending Pushalot notification" def testPushbullet(self, api=None): # self.set_header('Cache-Control', 'max-age=0,no-cache,no-store') result = notifiers.pushbullet_notifier.test_notify(api) if result: return "Pushbullet notification succeeded. Check your device to make sure it worked" else: return "Error sending Pushbullet notification" def getPushbulletDevices(self, api=None): # self.set_header('Cache-Control', 'max-age=0,no-cache,no-store') result = notifiers.pushbullet_notifier.get_devices(api) if result: return result else: return "Error sending Pushbullet notification" def shutdown(self, pid=None): if str(pid) != str(sickbeard.PID): return self.redirect("/home/") sickbeard.events.put(sickbeard.events.SystemEvent.SHUTDOWN) title = "Shutting down" message = "SickRage is shutting down..." return self._genericMessage(title, message) def restart(self, pid=None): if str(pid) != str(sickbeard.PID): return self.redirect("/home/") t = PageTemplate(rh=self, file="restart.tmpl") t.submenu = self.HomeMenu() # restart sickbeard.events.put(sickbeard.events.SystemEvent.RESTART) return t.respond() def updateCheck(self, pid=None): if str(pid) != str(sickbeard.PID): return self.redirect('/home/') sickbeard.versionCheckScheduler.action.check_for_new_version(force=True) return self.redirect('/home/') def update(self, pid=None): if str(pid) != str(sickbeard.PID): return self.redirect('/home/') checkversion = CheckVersion() backup = checkversion._runbackup() if backup == True: if sickbeard.versionCheckScheduler.action.update(): # do a hard restart sickbeard.events.put(sickbeard.events.SystemEvent.RESTART) t = PageTemplate(rh=self, file="restart.tmpl") return t.respond() else: return self._genericMessage("Update Failed", "Update wasn't successful, not restarting. Check your log for more information.") else: return self.redirect('/home/') def branchCheckout(self, branch): if sickbeard.BRANCH != branch: sickbeard.BRANCH = branch ui.notifications.message('Checking out branch: ', branch) return self.update(sickbeard.PID) else: ui.notifications.message('Already on branch: ', branch) return self.redirect('/home') def getDBcompare(self, branchDest=None): checkversion = CheckVersion() db_status = checkversion.getDBcompare(branchDest) if db_status == 'upgrade': logger.log(u"Checkout branch has a new DB version - Upgrade", logger.DEBUG) return json.dumps({ "status": "success", 'message': 'upgrade' }) elif db_status == 'equal': logger.log(u"Checkout branch has the same DB version - Equal", logger.DEBUG) return json.dumps({ "status": "success", 'message': 'equal' }) elif db_status == 'downgrade': logger.log(u"Checkout branch has an old DB version - Downgrade", logger.DEBUG) return json.dumps({ "status": "success", 'message': 'downgrade' }) else: logger.log(u"Checkout branch couldn't compare DB version.", logger.ERROR) return json.dumps({ "status": "error", 'message': 'General exception' }) def displayShow(self, show=None): if show is None: return self._genericMessage("Error", "Invalid show ID") else: showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show)) if showObj is None: return self._genericMessage("Error", "Show not in show list") myDB = db.DBConnection() seasonResults = myDB.select( "SELECT DISTINCT season FROM tv_episodes WHERE showid = ? ORDER BY season DESC", [showObj.indexerid] ) sqlResults = myDB.select( "SELECT * FROM tv_episodes WHERE showid = ? ORDER BY season DESC, episode DESC", [showObj.indexerid] ) t = PageTemplate(rh=self, file="displayShow.tmpl") t.submenu = [{'title': 'Edit', 'path': 'home/editShow?show=%d' % showObj.indexerid}] try: t.showLoc = (showObj.location, True) except sickbeard.exceptions.ShowDirNotFoundException: t.showLoc = (showObj._location, False) show_message = '' if sickbeard.showQueueScheduler.action.isBeingAdded(showObj): show_message = 'This show is in the process of being downloaded - the info below is incomplete.' elif sickbeard.showQueueScheduler.action.isBeingUpdated(showObj): show_message = 'The information on this page is in the process of being updated.' elif sickbeard.showQueueScheduler.action.isBeingRefreshed(showObj): show_message = 'The episodes below are currently being refreshed from disk' elif sickbeard.showQueueScheduler.action.isBeingSubtitled(showObj): show_message = 'Currently downloading subtitles for this show' elif sickbeard.showQueueScheduler.action.isInRefreshQueue(showObj): show_message = 'This show is queued to be refreshed.' elif sickbeard.showQueueScheduler.action.isInUpdateQueue(showObj): show_message = 'This show is queued and awaiting an update.' elif sickbeard.showQueueScheduler.action.isInSubtitleQueue(showObj): show_message = 'This show is queued and awaiting subtitles download.' if not sickbeard.showQueueScheduler.action.isBeingAdded(showObj): if not sickbeard.showQueueScheduler.action.isBeingUpdated(showObj): t.submenu.append( {'title': 'Remove', 'path': 'home/deleteShow?show=%d' % showObj.indexerid, 'confirm': True}) t.submenu.append({'title': 'Re-scan files', 'path': 'home/refreshShow?show=%d' % showObj.indexerid}) t.submenu.append( {'title': 'Force Full Update', 'path': 'home/updateShow?show=%d&amp;force=1' % showObj.indexerid}) t.submenu.append({'title': 'Update show in KODI', 'path': 'home/updateKODI?showName=%s' % urllib.quote_plus( showObj.name.encode('utf-8')), 'requires': self.haveKODI}) t.submenu.append({'title': 'Preview Rename', 'path': 'home/testRename?show=%d' % showObj.indexerid}) if sickbeard.USE_SUBTITLES and not sickbeard.showQueueScheduler.action.isBeingSubtitled( showObj) and showObj.subtitles: t.submenu.append( {'title': 'Download Subtitles', 'path': 'home/subtitleShow?show=%d' % showObj.indexerid}) t.show = showObj t.sqlResults = sqlResults t.seasonResults = seasonResults t.show_message = show_message epCounts = {} epCats = {} epCounts[Overview.SKIPPED] = 0 epCounts[Overview.WANTED] = 0 epCounts[Overview.QUAL] = 0 epCounts[Overview.GOOD] = 0 epCounts[Overview.UNAIRED] = 0 epCounts[Overview.SNATCHED] = 0 for curResult in sqlResults: curEpCat = showObj.getOverview(int(curResult["status"] or -1)) if curEpCat: epCats[str(curResult["season"]) + "x" + str(curResult["episode"])] = curEpCat epCounts[curEpCat] += 1 def titler(x): return (helpers.remove_article(x), x)[not x or sickbeard.SORT_ARTICLE] if sickbeard.ANIME_SPLIT_HOME: shows = [] anime = [] for show in sickbeard.showList: if show.is_anime: anime.append(show) else: shows.append(show) t.sortedShowLists = [["Shows", sorted(shows, lambda x, y: cmp(titler(x.name), titler(y.name)))], ["Anime", sorted(anime, lambda x, y: cmp(titler(x.name), titler(y.name)))]] else: t.sortedShowLists = [ ["Shows", sorted(sickbeard.showList, lambda x, y: cmp(titler(x.name), titler(y.name)))]] t.bwl = None if showObj.is_anime: t.bwl = BlackAndWhiteList(showObj.indexerid) t.epCounts = epCounts t.epCats = epCats showObj.exceptions = scene_exceptions.get_scene_exceptions(showObj.indexerid) indexerid = int(showObj.indexerid) indexer = int(showObj.indexer) t.all_scene_exceptions = showObj.exceptions t.scene_numbering = get_scene_numbering_for_show(indexerid, indexer) t.xem_numbering = get_xem_numbering_for_show(indexerid, indexer) t.scene_absolute_numbering = get_scene_absolute_numbering_for_show(indexerid, indexer) t.xem_absolute_numbering = get_xem_absolute_numbering_for_show(indexerid, indexer) return t.respond() def plotDetails(self, show, season, episode): myDB = db.DBConnection() result = myDB.selectOne( "SELECT description FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ?", (int(show), int(season), int(episode))) return result['description'] if result else 'Episode not found.' def sceneExceptions(self, show): exceptionsList = sickbeard.scene_exceptions.get_all_scene_exceptions(show) if not exceptionsList: return "No scene exceptions" out = [] for season, names in iter(sorted(exceptionsList.iteritems())): if season == -1: season = "*" out.append("S" + str(season) + ": " + ", ".join(names)) return "<br/>".join(out) def editShow(self, show=None, location=None, anyQualities=[], bestQualities=[], exceptions_list=[], flatten_folders=None, paused=None, directCall=False, air_by_date=None, sports=None, dvdorder=None, indexerLang=None, subtitles=None, archive_firstmatch=None, rls_ignore_words=None, rls_require_words=None, anime=None, blackWords=None, whiteWords=None, blacklist=None, whitelist=None, scene=None, defaultEpStatus=None,audio_lang=None): anidb_failed = False if show is None: errString = "Invalid show ID: " + str(show) if directCall: return [errString] else: return self._genericMessage("Error", errString) showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show)) if not showObj: errString = "Unable to find the specified show: " + str(show) if directCall: return [errString] else: return self._genericMessage("Error", errString) showObj.exceptions = scene_exceptions.get_scene_exceptions(showObj.indexerid) if not location and not anyQualities and not bestQualities and not flatten_folders: t = PageTemplate(rh=self, file="editShow.tmpl") t.submenu = self.HomeMenu() if showObj.is_anime: bwl = BlackAndWhiteList(showObj.indexerid) t.whiteWords = "" if "global" in bwl.whiteDict: t.whiteWords = ", ".join(bwl.whiteDict["global"]) t.blackWords = "" if "global" in bwl.blackDict: t.blackWords = ", ".join(bwl.blackDict["global"]) t.whitelist = [] if bwl.whiteDict.has_key("release_group"): t.whitelist = bwl.whiteDict["release_group"] t.blacklist = [] if bwl.blackDict.has_key("release_group"): t.blacklist = bwl.blackDict["release_group"] t.groups = [] if helpers.set_up_anidb_connection() and not anidb_failed: try: anime = adba.Anime(sickbeard.ADBA_CONNECTION, name=showObj.name) t.groups = anime.get_groups() except Exception as e: anidb_failed = True ui.notifications.error('Unable to retreive Fansub Groups from AniDB.') with showObj.lock: t.show = showObj t.scene_exceptions = get_scene_exceptions(showObj.indexerid) return t.respond() flatten_folders = config.checkbox_to_value(flatten_folders) dvdorder = config.checkbox_to_value(dvdorder) archive_firstmatch = config.checkbox_to_value(archive_firstmatch) paused = config.checkbox_to_value(paused) air_by_date = config.checkbox_to_value(air_by_date) scene = config.checkbox_to_value(scene) sports = config.checkbox_to_value(sports) anime = config.checkbox_to_value(anime) subtitles = config.checkbox_to_value(subtitles) if indexerLang and indexerLang in sickbeard.indexerApi(showObj.indexer).indexer().config['valid_languages']: indexer_lang = indexerLang else: indexer_lang = showObj.lang # if we changed the language then kick off an update if indexer_lang == showObj.lang: do_update = False else: do_update = True if scene == showObj.scene and anime == showObj.anime: do_update_scene_numbering = False else: do_update_scene_numbering = True if type(anyQualities) != list: anyQualities = [anyQualities] if type(bestQualities) != list: bestQualities = [bestQualities] if type(exceptions_list) != list: exceptions_list = [exceptions_list] # If directCall from mass_edit_update no scene exceptions handling or blackandwhite list handling if directCall: do_update_exceptions = False else: if set(exceptions_list) == set(showObj.exceptions): do_update_exceptions = False else: do_update_exceptions = True if showObj.is_anime: bwl = BlackAndWhiteList(showObj.indexerid) if whitelist: whitelist = whitelist.split(",") shortWhiteList = [] if helpers.set_up_anidb_connection() and not anidb_failed: try: for groupName in whitelist: group = sickbeard.ADBA_CONNECTION.group(gname=groupName) for line in group.datalines: if line["shortname"]: shortWhiteList.append(line["shortname"]) else: if not groupName in shortWhiteList: shortWhiteList.append(groupName) except Exception as e: anidb_failed = True ui.notifications.error('Unable to retreive data from AniDB.') shortWhiteList = whitelist else: shortWhiteList = whitelist bwl.set_white_keywords_for("release_group", shortWhiteList) else: bwl.set_white_keywords_for("release_group", []) if blacklist: blacklist = blacklist.split(",") shortBlacklist = [] if helpers.set_up_anidb_connection() and not anidb_failed: try: for groupName in blacklist: group = sickbeard.ADBA_CONNECTION.group(gname=groupName) for line in group.datalines: if line["shortname"]: shortBlacklist.append(line["shortname"]) else: if not groupName in shortBlacklist: shortBlacklist.append(groupName) except Exception as e: anidb_failed = True ui.notifications.error('Unable to retreive data from AniDB.') shortBlacklist = blacklist else: shortBlacklist = blacklist bwl.set_black_keywords_for("release_group", shortBlacklist) else: bwl.set_black_keywords_for("release_group", []) if whiteWords: whiteWords = [x.strip() for x in whiteWords.split(",")] bwl.set_white_keywords_for("global", whiteWords) else: bwl.set_white_keywords_for("global", []) if blackWords: blackWords = [x.strip() for x in blackWords.split(",")] bwl.set_black_keywords_for("global", blackWords) else: bwl.set_black_keywords_for("global", []) errors = [] with showObj.lock: newQuality = Quality.combineQualities(map(int, anyQualities), map(int, bestQualities)) showObj.quality = newQuality showObj.archive_firstmatch = archive_firstmatch # reversed for now if bool(showObj.flatten_folders) != bool(flatten_folders): showObj.flatten_folders = flatten_folders try: sickbeard.showQueueScheduler.action.refreshShow(showObj) except exceptions.CantRefreshException, e: errors.append("Unable to refresh this show: " + ex(e)) showObj.paused = paused showObj.scene = scene showObj.anime = anime showObj.sports = sports showObj.subtitles = subtitles showObj.audio_lang = audio_lang showObj.air_by_date = air_by_date showObj.default_ep_status = int(defaultEpStatus) if not directCall: showObj.lang = indexer_lang showObj.dvdorder = dvdorder showObj.rls_ignore_words = rls_ignore_words.strip() showObj.rls_require_words = rls_require_words.strip() # if we change location clear the db of episodes, change it, write to db, and rescan if os.path.normpath(showObj._location) != os.path.normpath(location): logger.log(os.path.normpath(showObj._location) + " != " + os.path.normpath(location), logger.DEBUG) if not ek.ek(os.path.isdir, location) and not sickbeard.CREATE_MISSING_SHOW_DIRS: errors.append("New location <tt>%s</tt> does not exist" % location) # don't bother if we're going to update anyway elif not do_update: # change it try: showObj.location = location try: sickbeard.showQueueScheduler.action.refreshShow(showObj) except exceptions.CantRefreshException, e: errors.append("Unable to refresh this show:" + ex(e)) # grab updated info from TVDB # showObj.loadEpisodesFromIndexer() # rescan the episodes in the new folder except exceptions.NoNFOException: errors.append( "The folder at <tt>%s</tt> doesn't contain a tvshow.nfo - copy your files to that folder before you change the directory in SickRage." % location) # save it to the DB showObj.saveToDB() # force the update if do_update: try: sickbeard.showQueueScheduler.action.updateShow(showObj, True) time.sleep(cpu_presets[sickbeard.CPU_PRESET]) except exceptions.CantUpdateException as e: errors.append("Unable to update show: {0}".format(str(e))) if do_update_exceptions: try: scene_exceptions.update_scene_exceptions(showObj.indexerid, exceptions_list) # @UndefinedVdexerid) time.sleep(cpu_presets[sickbeard.CPU_PRESET]) except exceptions.CantUpdateException, e: errors.append("Unable to force an update on scene exceptions of the show.") if not paused and (sickbeard.TRAKT_USE_ROLLING_DOWNLOAD and sickbeard.USE_TRAKT): # Checking if trakt and rolling_download are enable because updateWantedList() # doesn't do the distinction between a failuire and being not activated(Return false) if not sickbeard.traktRollingScheduler.action.updateWantedList(): errors.append("Unable to force an update on wanted episode") if do_update_scene_numbering: try: sickbeard.scene_numbering.xem_refresh(showObj.indexerid, showObj.indexer) time.sleep(cpu_presets[sickbeard.CPU_PRESET]) except exceptions.CantUpdateException, e: errors.append("Unable to force an update on scene numbering of the show.") if directCall: return errors if len(errors) > 0: ui.notifications.error('%d error%s while saving changes:' % (len(errors), "" if len(errors) == 1 else "s"), '<ul>' + '\n'.join(['<li>%s</li>' % error for error in errors]) + "</ul>") return self.redirect("/home/displayShow?show=" + show) def deleteShow(self, show=None, full=0): if show is None: return self._genericMessage("Error", "Invalid show ID") showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show)) if showObj is None: return self._genericMessage("Error", "Unable to find the specified show") if sickbeard.showQueueScheduler.action.isBeingAdded( showObj) or sickbeard.showQueueScheduler.action.isBeingUpdated(showObj): return self._genericMessage("Error", "Shows can't be deleted while they're being added or updated.") if sickbeard.USE_TRAKT and sickbeard.TRAKT_SYNC: # remove show from trakt.tv library try: sickbeard.traktCheckerScheduler.action.removeShowFromTraktLibrary(showObj) except traktException as e: logger.log("Trakt: Unable to delete show: {0}. Error: {1}".format(showObj.name, ex(e)),logger.ERROR) return self._genericMessage("Error", "Unable to delete show: {0}".format(showObj.name)) showObj.deleteShow(bool(full)) ui.notifications.message('<b>%s</b> has been %s %s' % (showObj.name, ('deleted', 'trashed')[sickbeard.TRASH_REMOVE_SHOW], ('(media untouched)', '(with all related media)')[bool(full)])) return self.redirect("/home/") def refreshShow(self, show=None): if show is None: return self._genericMessage("Error", "Invalid show ID") showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show)) if showObj is None: return self._genericMessage("Error", "Unable to find the specified show") # force the update from the DB try: sickbeard.showQueueScheduler.action.refreshShow(showObj) except exceptions.CantRefreshException, e: ui.notifications.error("Unable to refresh this show.", ex(e)) time.sleep(cpu_presets[sickbeard.CPU_PRESET]) return self.redirect("/home/displayShow?show=" + str(showObj.indexerid)) def updateShow(self, show=None, force=0): if show is None: return self._genericMessage("Error", "Invalid show ID") showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show)) if showObj is None: return self._genericMessage("Error", "Unable to find the specified show") # force the update try: sickbeard.showQueueScheduler.action.updateShow(showObj, bool(force)) except exceptions.CantUpdateException, e: ui.notifications.error("Unable to update this show.", ex(e)) # just give it some time time.sleep(cpu_presets[sickbeard.CPU_PRESET]) return self.redirect("/home/displayShow?show=" + str(showObj.indexerid)) def subtitleShow(self, show=None, force=0): if show is None: return self._genericMessage("Error", "Invalid show ID") showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show)) if showObj is None: return self._genericMessage("Error", "Unable to find the specified show") # search and download subtitles sickbeard.showQueueScheduler.action.downloadSubtitles(showObj, bool(force)) time.sleep(cpu_presets[sickbeard.CPU_PRESET]) return self.redirect("/home/displayShow?show=" + str(showObj.indexerid)) def updateKODI(self, showName=None): # only send update to first host in the list -- workaround for kodi sql backend users if sickbeard.KODI_UPDATE_ONLYFIRST: # only send update to first host in the list -- workaround for kodi sql backend users host = sickbeard.KODI_HOST.split(",")[0].strip() else: host = sickbeard.KODI_HOST if notifiers.kodi_notifier.update_library(showName=showName): ui.notifications.message("Library update command sent to KODI host(s): " + host) else: ui.notifications.error("Unable to contact one or more KODI host(s): " + host) return self.redirect('/home/') def updatePLEX(self): if notifiers.plex_notifier.update_library(): ui.notifications.message( "Library update command sent to Plex Media Server host: " + sickbeard.PLEX_SERVER_HOST) else: ui.notifications.error("Unable to contact Plex Media Server host: " + sickbeard.PLEX_SERVER_HOST) return self.redirect('/home/') def setStatus(self, show=None, eps=None, status=None, direct=False): if show is None or eps is None or status is None: errMsg = "You must specify a show and at least one episode" if direct: ui.notifications.error('Error', errMsg) return json.dumps({'result': 'error'}) else: return self._genericMessage("Error", errMsg) if not statusStrings.has_key(int(status)): errMsg = "Invalid status" if direct: ui.notifications.error('Error', errMsg) return json.dumps({'result': 'error'}) else: return self._genericMessage("Error", errMsg) showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show)) if showObj is None: errMsg = "Error", "Show not in show list" if direct: ui.notifications.error('Error', errMsg) return json.dumps({'result': 'error'}) else: return self._genericMessage("Error", errMsg) segments = {} trakt_data = [] if eps is not None: sql_l = [] for curEp in eps.split('|'): logger.log(u"Attempting to set status on episode " + curEp + " to " + status, logger.DEBUG) epInfo = curEp.split('x') epObj = showObj.getEpisode(int(epInfo[0]), int(epInfo[1])) if epObj is None: return self._genericMessage("Error", "Episode couldn't be retrieved") if int(status) in [WANTED, FAILED]: # figure out what episodes are wanted so we can backlog them if epObj.season in segments: segments[epObj.season].append(epObj) else: segments[epObj.season] = [epObj] with epObj.lock: # don't let them mess up UNAIRED episodes if epObj.status == UNAIRED: logger.log(u"Refusing to change status of " + curEp + " because it is UNAIRED", logger.ERROR) continue if int( status) in Quality.DOWNLOADED and epObj.status not in Quality.SNATCHED + Quality.SNATCHED_PROPER + Quality.DOWNLOADED + [ IGNORED] and not ek.ek(os.path.isfile, epObj.location): logger.log( u"Refusing to change status of " + curEp + " to DOWNLOADED because it's not SNATCHED/DOWNLOADED", logger.ERROR) continue if int( status) == FAILED and epObj.status not in Quality.SNATCHED + Quality.SNATCHED_PROPER + Quality.DOWNLOADED: logger.log( u"Refusing to change status of " + curEp + " to FAILED because it's not SNATCHED/DOWNLOADED", logger.ERROR) continue if epObj.status in Quality.DOWNLOADED and int(status) == WANTED: logger.log(u"Removing release_name for episode as you want to set a downloaded episode back to wanted, so obviously you want it replaced") epObj.release_name = "" epObj.status = int(status) # mass add to database sql_l.append(epObj.get_sql()) trakt_data.append((epObj.season, epObj.episode)) data = notifiers.trakt_notifier.trakt_episode_data_generate(trakt_data) if sickbeard.USE_TRAKT and sickbeard.TRAKT_SYNC_WATCHLIST: if int(status) in [WANTED, FAILED]: logger.log(u"Add episodes, showid: indexerid " + str(showObj.indexerid) + ", Title " + str(showObj.name) + " to Watchlist", logger.DEBUG) upd = "add" elif int(status) in [ARCHIVED, IGNORED, SKIPPED ] + Quality.DOWNLOADED: logger.log(u"Remove episodes, showid: indexerid " + str(showObj.indexerid) + ", Title " + str(showObj.name) + " from Watchlist", logger.DEBUG) upd = "remove" if data: notifiers.trakt_notifier.update_watchlist(showObj, data_episode=data, update=upd) if len(sql_l) > 0: myDB = db.DBConnection() myDB.mass_action(sql_l) if int(status) == WANTED and not showObj.paused: msg = "Backlog was automatically started for the following seasons of <b>" + showObj.name + "</b>:<br />" msg += '<ul>' for season, segment in segments.items(): cur_backlog_queue_item = search_queue.BacklogQueueItem(showObj, segment) sickbeard.searchQueueScheduler.action.add_item(cur_backlog_queue_item) msg += "<li>Season " + str(season) + "</li>" logger.log(u"Sending backlog for " + showObj.name + " season " + str( season) + " because some eps were set to wanted") msg += "</ul>" if segments: ui.notifications.message("Backlog started", msg) elif int(status) == WANTED and showObj.paused: logger.log(u"Some episodes were set to wanted, but " + showObj.name + " is paused. Not adding to Backlog until show is unpaused") if int(status) == FAILED: msg = "Retrying Search was automatically started for the following season of <b>" + showObj.name + "</b>:<br />" msg += '<ul>' for season, segment in segments.items(): cur_failed_queue_item = search_queue.FailedQueueItem(showObj, segment) sickbeard.searchQueueScheduler.action.add_item(cur_failed_queue_item) msg += "<li>Season " + str(season) + "</li>" logger.log(u"Retrying Search for " + showObj.name + " season " + str( season) + " because some eps were set to failed") msg += "</ul>" if segments: ui.notifications.message("Retry Search started", msg) if direct: return json.dumps({'result': 'success'}) else: return self.redirect("/home/displayShow?show=" + show) def setAudio(self, show=None, eps=None, audio_langs=None, direct=False): if show == None or eps == None or audio_langs == None: errMsg = "You must specify a show and at least one episode" if direct: ui.notifications.error('Error', errMsg) return json.dumps({'result': 'error'}) else: return _genericMessage("Error", errMsg) showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show)) if showObj == None: return _genericMessage("Error", "Show not in show list") try: show_loc = showObj.location #@UnusedVariable except exceptions.ShowDirNotFoundException: return _genericMessage("Error", "Can't rename episodes when the show dir is missing.") ep_obj_rename_list = [] for curEp in eps.split('|'): logger.log(u"Attempting to set audio on episode "+curEp+" to "+audio_langs, logger.DEBUG) epInfo = curEp.split('x') epObj = showObj.getEpisode(int(epInfo[0]), int(epInfo[1])) epObj.audio_langs = str(audio_langs) epObj.saveToDB() if direct: return json.dumps({'result': 'success'}) else: return self.redirect("/home/displayShow?show=" + show) def testRename(self, show=None): if show is None: return self._genericMessage("Error", "You must specify a show") showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show)) if showObj is None: return self._genericMessage("Error", "Show not in show list") try: show_loc = showObj.location # @UnusedVariable except exceptions.ShowDirNotFoundException: return self._genericMessage("Error", "Can't rename episodes when the show dir is missing.") ep_obj_rename_list = [] ep_obj_list = showObj.getAllEpisodes(has_location=True) for cur_ep_obj in ep_obj_list: # Only want to rename if we have a location if cur_ep_obj.location: if cur_ep_obj.relatedEps: # do we have one of multi-episodes in the rename list already have_already = False for cur_related_ep in cur_ep_obj.relatedEps + [cur_ep_obj]: if cur_related_ep in ep_obj_rename_list: have_already = True break if not have_already: ep_obj_rename_list.append(cur_ep_obj) else: ep_obj_rename_list.append(cur_ep_obj) if ep_obj_rename_list: # present season DESC episode DESC on screen ep_obj_rename_list.reverse() t = PageTemplate(rh=self, file="testRename.tmpl") t.submenu = [{'title': 'Edit', 'path': 'home/editShow?show=%d' % showObj.indexerid}] t.ep_obj_list = ep_obj_rename_list t.show = showObj return t.respond() def doRename(self, show=None, eps=None): if show is None or eps is None: errMsg = "You must specify a show and at least one episode" return self._genericMessage("Error", errMsg) show_obj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show)) if show_obj is None: errMsg = "Error", "Show not in show list" return self._genericMessage("Error", errMsg) try: show_loc = show_obj.location # @UnusedVariable except exceptions.ShowDirNotFoundException: return self._genericMessage("Error", "Can't rename episodes when the show dir is missing.") if eps is None: return self.redirect("/home/displayShow?show=" + show) myDB = db.DBConnection() for curEp in eps.split('|'): epInfo = curEp.split('x') # this is probably the worst possible way to deal with double eps but I've kinda painted myself into a corner here with this stupid database ep_result = myDB.select( "SELECT * FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ? AND 5=5", [show, epInfo[0], epInfo[1]]) if not ep_result: logger.log(u"Unable to find an episode for " + curEp + ", skipping", logger.WARNING) continue related_eps_result = myDB.select("SELECT * FROM tv_episodes WHERE location = ? AND episode != ?", [ep_result[0]["location"], epInfo[1]]) root_ep_obj = show_obj.getEpisode(int(epInfo[0]), int(epInfo[1])) root_ep_obj.relatedEps = [] for cur_related_ep in related_eps_result: related_ep_obj = show_obj.getEpisode(int(cur_related_ep["season"]), int(cur_related_ep["episode"])) if related_ep_obj not in root_ep_obj.relatedEps: root_ep_obj.relatedEps.append(related_ep_obj) root_ep_obj.rename() return self.redirect("/home/displayShow?show=" + show) def searchEpisode(self, show=None, season=None, episode=None, downCurQuality=0): # retrieve the episode object and fail if we can't get one ep_obj = self._getEpisode(show, season, episode) if isinstance(ep_obj, str): return json.dumps({'result': 'failure'}) # make a queue item for it and put it on the queue ep_queue_item = search_queue.ManualSearchQueueItem(ep_obj.show, ep_obj, bool(int(downCurQuality))) sickbeard.searchQueueScheduler.action.add_item(ep_queue_item) if not ep_queue_item.started and ep_queue_item.success is None: return json.dumps( {'result': 'success'}) # I Actually want to call it queued, because the search hasnt been started yet! if ep_queue_item.started and ep_queue_item.success is None: return json.dumps({'result': 'success'}) else: return json.dumps({'result': 'failure'}) ### Returns the current ep_queue_item status for the current viewed show. # Possible status: Downloaded, Snatched, etc... # Returns {'show': 279530, 'episodes' : ['episode' : 6, 'season' : 1, 'searchstatus' : 'queued', 'status' : 'running', 'quality': '4013'] def getManualSearchStatus(self, show=None): def getEpisodes(searchThread, searchstatus): results = [] showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(searchThread.show.indexerid)) if isinstance(searchThread, sickbeard.search_queue.ManualSearchQueueItem): results.append({'show': searchThread.show.indexerid, 'episode': searchThread.segment.episode, 'episodeindexid': searchThread.segment.indexerid, 'season': searchThread.segment.season, 'searchstatus': searchstatus, 'status': statusStrings[searchThread.segment.status], 'quality': self.getQualityClass(searchThread.segment), 'overview': Overview.overviewStrings[showObj.getOverview(int(searchThread.segment.status or -1))]}) else: for epObj in searchThread.segment: results.append({'show': epObj.show.indexerid, 'episode': epObj.episode, 'episodeindexid': epObj.indexerid, 'season': epObj.season, 'searchstatus': searchstatus, 'status': statusStrings[epObj.status], 'quality': self.getQualityClass(epObj), 'overview': Overview.overviewStrings[showObj.getOverview(int(epObj.status or -1))]}) return results episodes = [] # Queued Searches searchstatus = 'queued' for searchThread in sickbeard.searchQueueScheduler.action.get_all_ep_from_queue(show): episodes += getEpisodes(searchThread, searchstatus) # Running Searches searchstatus = 'searching' if (sickbeard.searchQueueScheduler.action.is_manualsearch_in_progress()): searchThread = sickbeard.searchQueueScheduler.action.currentItem if searchThread.success: searchstatus = 'finished' episodes += getEpisodes(searchThread, searchstatus) # Finished Searches searchstatus = 'finished' for searchThread in sickbeard.search_queue.MANUAL_SEARCH_HISTORY: if show is not None: if not str(searchThread.show.indexerid) == show: continue if isinstance(searchThread, sickbeard.search_queue.ManualSearchQueueItem): if not [x for x in episodes if x['episodeindexid'] == searchThread.segment.indexerid]: episodes += getEpisodes(searchThread, searchstatus) else: ### These are only Failed Downloads/Retry SearchThreadItems.. lets loop through the segement/episodes if not [i for i, j in zip(searchThread.segment, episodes) if i.indexerid == j['episodeindexid']]: episodes += getEpisodes(searchThread, searchstatus) return json.dumps({'episodes': episodes}) def getQualityClass(self, ep_obj): # return the correct json value # Find the quality class for the episode quality_class = Quality.qualityStrings[Quality.UNKNOWN] ep_status, ep_quality = Quality.splitCompositeStatus(ep_obj.status) for x in (SD, HD720p, HD1080p): if ep_quality in Quality.splitQuality(x)[0]: quality_class = qualityPresetStrings[x] break return quality_class def searchEpisodeSubtitles(self, show=None, season=None, episode=None): # retrieve the episode object and fail if we can't get one ep_obj = self._getEpisode(show, season, episode) if isinstance(ep_obj, str): return json.dumps({'result': 'failure'}) # try do download subtitles for that episode previous_subtitles = set(subliminal.language.Language(x) for x in ep_obj.subtitles) try: ep_obj.subtitles = set(x.language for x in ep_obj.downloadSubtitles().values()[0]) except: return json.dumps({'result': 'failure'}) # return the correct json value if previous_subtitles != ep_obj.subtitles: status = 'New subtitles downloaded: %s' % ' '.join([ "<img src='" + sickbeard.WEB_ROOT + "/images/flags/" + x.alpha2 + ".png' alt='" + x.name + "'/>" for x in sorted(list(ep_obj.subtitles.difference(previous_subtitles)))]) else: status = 'No subtitles downloaded' ui.notifications.message('Subtitles Search', status) return json.dumps({'result': status, 'subtitles': ','.join(sorted([x.alpha2 for x in ep_obj.subtitles.union( previous_subtitles)]))}) def setSceneNumbering(self, show, indexer, forSeason=None, forEpisode=None, forAbsolute=None, sceneSeason=None, sceneEpisode=None, sceneAbsolute=None): # sanitize: if forSeason in ['null', '']: forSeason = None if forEpisode in ['null', '']: forEpisode = None if forAbsolute in ['null', '']: forAbsolute = None if sceneSeason in ['null', '']: sceneSeason = None if sceneEpisode in ['null', '']: sceneEpisode = None if sceneAbsolute in ['null', '']: sceneAbsolute = None showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(show)) if showObj.is_anime: result = { 'success': True, 'forAbsolute': forAbsolute, } else: result = { 'success': True, 'forSeason': forSeason, 'forEpisode': forEpisode, } # retrieve the episode object and fail if we can't get one if showObj.is_anime: ep_obj = self._getEpisode(show, absolute=forAbsolute) else: ep_obj = self._getEpisode(show, forSeason, forEpisode) if isinstance(ep_obj, str): result['success'] = False result['errorMessage'] = ep_obj elif showObj.is_anime: logger.log(u"setAbsoluteSceneNumbering for %s from %s to %s" % (show, forAbsolute, sceneAbsolute), logger.DEBUG) show = int(show) indexer = int(indexer) forAbsolute = int(forAbsolute) if sceneAbsolute is not None: sceneAbsolute = int(sceneAbsolute) set_scene_numbering(show, indexer, absolute_number=forAbsolute, sceneAbsolute=sceneAbsolute) else: logger.log(u"setEpisodeSceneNumbering for %s from %sx%s to %sx%s" % (show, forSeason, forEpisode, sceneSeason, sceneEpisode), logger.DEBUG) show = int(show) indexer = int(indexer) forSeason = int(forSeason) forEpisode = int(forEpisode) if sceneSeason is not None: sceneSeason = int(sceneSeason) if sceneEpisode is not None: sceneEpisode = int(sceneEpisode) set_scene_numbering(show, indexer, season=forSeason, episode=forEpisode, sceneSeason=sceneSeason, sceneEpisode=sceneEpisode) if showObj.is_anime: sn = get_scene_absolute_numbering(show, indexer, forAbsolute) if sn: result['sceneAbsolute'] = sn else: result['sceneAbsolute'] = None else: sn = get_scene_numbering(show, indexer, forSeason, forEpisode) if sn: (result['sceneSeason'], result['sceneEpisode']) = sn else: (result['sceneSeason'], result['sceneEpisode']) = (None, None) return json.dumps(result) def retryEpisode(self, show, season, episode, downCurQuality): # retrieve the episode object and fail if we can't get one ep_obj = self._getEpisode(show, season, episode) if isinstance(ep_obj, str): return json.dumps({'result': 'failure'}) # make a queue item for it and put it on the queue ep_queue_item = search_queue.FailedQueueItem(ep_obj.show, [ep_obj], bool(int(downCurQuality))) sickbeard.searchQueueScheduler.action.add_item(ep_queue_item) if not ep_queue_item.started and ep_queue_item.success is None: return json.dumps( {'result': 'success'}) # I Actually want to call it queued, because the search hasnt been started yet! if ep_queue_item.started and ep_queue_item.success is None: return json.dumps({'result': 'success'}) else: return json.dumps({'result': 'failure'}) @route('/home/postprocess(/?.*)') class HomePostProcess(Home): def __init__(self, *args, **kwargs): super(HomePostProcess, self).__init__(*args, **kwargs) def index(self): t = PageTemplate(rh=self, file="home_postprocess.tmpl") t.submenu = self.HomeMenu() return t.respond() def processEpisode(self, dir=None, nzbName=None, jobName=None, quiet=None, process_method=None, force=None, is_priority=None, delete_on="0", failed="0", type="auto", *args, **kwargs): if failed == "0": failed = False else: failed = True if force in ["on", "1"]: force = True else: force = False if is_priority in ["on", "1"]: is_priority = True else: is_priority = False if delete_on in ["on", "1"]: delete_on = True else: delete_on = False if not dir: return self.redirect("/home/postprocess/") else: result = processTV.processDir(dir, nzbName, process_method=process_method, force=force, is_priority=is_priority, delete_on=delete_on, failed=failed, type=type) if quiet is not None and int(quiet) == 1: return result result = result.replace("\n", "<br />\n") return self._genericMessage("Postprocessing results", result) @route('/home/addShows(/?.*)') class HomeAddShows(Home): def __init__(self, *args, **kwargs): super(HomeAddShows, self).__init__(*args, **kwargs) def index(self): t = PageTemplate(rh=self, file="home_addShows.tmpl") t.submenu = self.HomeMenu() return t.respond() def getIndexerLanguages(self): result = sickbeard.indexerApi().config['valid_languages'] return json.dumps({'results': result}) def sanitizeFileName(self, name): return helpers.sanitizeFileName(name) def searchIndexersForShowName(self, search_term, lang=None, indexer=None): if not lang or lang == 'null': lang = sickbeard.INDEXER_DEFAULT_LANGUAGE search_term = search_term.encode('utf-8') results = {} final_results = [] # Query Indexers for each search term and build the list of results for indexer in sickbeard.indexerApi().indexers if not int(indexer) else [int(indexer)]: lINDEXER_API_PARMS = sickbeard.indexerApi(indexer).api_params.copy() lINDEXER_API_PARMS['language'] = lang lINDEXER_API_PARMS['custom_ui'] = classes.AllShowsListUI t = sickbeard.indexerApi(indexer).indexer(**lINDEXER_API_PARMS) logger.log("Searching for Show with searchterm: %s on Indexer: %s" % ( search_term, sickbeard.indexerApi(indexer).name), logger.DEBUG) try: # add search results results.setdefault(indexer, []).extend(t[search_term]) except Exception, e: continue map(final_results.extend, ([[sickbeard.indexerApi(id).name, id, sickbeard.indexerApi(id).config["show_url"], int(show['id']), show['seriesname'], show['firstaired']] for show in shows] for id, shows in results.items())) lang_id = sickbeard.indexerApi().config['langabbv_to_id'][lang] return json.dumps({'results': final_results, 'langid': lang_id}) def massAddTable(self, rootDir=None): t = PageTemplate(rh=self, file="home_massAddTable.tmpl") t.submenu = self.HomeMenu() if not rootDir: return "No folders selected." elif type(rootDir) != list: root_dirs = [rootDir] else: root_dirs = rootDir root_dirs = [urllib.unquote_plus(x) for x in root_dirs] if sickbeard.ROOT_DIRS: default_index = int(sickbeard.ROOT_DIRS.split('|')[0]) else: default_index = 0 if len(root_dirs) > default_index: tmp = root_dirs[default_index] if tmp in root_dirs: root_dirs.remove(tmp) root_dirs = [tmp] + root_dirs dir_list = [] myDB = db.DBConnection() for root_dir in root_dirs: try: file_list = ek.ek(os.listdir, root_dir) except: continue for cur_file in file_list: try: cur_path = ek.ek(os.path.normpath, ek.ek(os.path.join, root_dir, cur_file)) if not ek.ek(os.path.isdir, cur_path): continue except: continue cur_dir = { 'dir': cur_path, 'display_dir': '<b>' + ek.ek(os.path.dirname, cur_path) + os.sep + '</b>' + ek.ek( os.path.basename, cur_path), } # see if the folder is in KODI already dirResults = myDB.select("SELECT * FROM tv_shows WHERE location = ?", [cur_path]) if dirResults: cur_dir['added_already'] = True else: cur_dir['added_already'] = False dir_list.append(cur_dir) indexer_id = show_name = indexer = None for cur_provider in sickbeard.metadata_provider_dict.values(): if not (indexer_id and show_name): (indexer_id, show_name, indexer) = cur_provider.retrieveShowMetadata(cur_path) # default to TVDB if indexer was not detected if show_name and not (indexer or indexer_id): (sn, idx, id) = helpers.searchIndexerForShowID(show_name, indexer, indexer_id) # set indexer and indexer_id from found info if not indexer and idx: indexer = idx if not indexer_id and id: indexer_id = id cur_dir['existing_info'] = (indexer_id, show_name, indexer) if indexer_id and helpers.findCertainShow(sickbeard.showList, indexer_id): cur_dir['added_already'] = True t.dirList = dir_list return t.respond() def newShow(self, show_to_add=None, other_shows=None): """ Display the new show page which collects a tvdb id, folder, and extra options and posts them to addNewShow """ t = PageTemplate(rh=self, file="home_newShow.tmpl") t.submenu = self.HomeMenu() indexer, show_dir, indexer_id, show_name = self.split_extra_show(show_to_add) if indexer_id and indexer and show_name: use_provided_info = True else: use_provided_info = False # tell the template whether we're giving it show name & Indexer ID t.use_provided_info = use_provided_info # use the given show_dir for the indexer search if available if not show_dir: t.default_show_name = '' elif not show_name: t.default_show_name = re.sub(' \(\d{4}\)', '', ek.ek(os.path.basename, ek.ek(os.path.normpath, show_dir)).replace('.', ' ')) else: t.default_show_name = show_name # carry a list of other dirs if given if not other_shows: other_shows = [] elif type(other_shows) != list: other_shows = [other_shows] if use_provided_info: t.provided_indexer_id = int(indexer_id or 0) t.provided_indexer_name = show_name t.provided_show_dir = show_dir t.other_shows = other_shows t.provided_indexer = int(indexer or sickbeard.INDEXER_DEFAULT) t.indexers = sickbeard.indexerApi().indexers return t.respond() def recommendedShows(self): """ Display the new show page which collects a tvdb id, folder, and extra options and posts them to addNewShow """ t = PageTemplate(rh=self, file="home_recommendedShows.tmpl") t.submenu = self.HomeMenu() return t.respond() def getRecommendedShows(self): final_results = [] logger.log(u"Getting recommended shows from Trakt.tv", logger.DEBUG) trakt_api = TraktAPI(sickbeard.TRAKT_API_KEY, sickbeard.TRAKT_USERNAME, sickbeard.TRAKT_PASSWORD, sickbeard.TRAKT_DISABLE_SSL_VERIFY, sickbeard.TRAKT_TIMEOUT) try: recommendedlist = trakt_api.traktRequest("recommendations/shows?extended=full,images") if recommendedlist: indexers = ['tvdb', 'tvrage'] map(final_results.append, ( [int(show['ids'][indexers[sickbeard.TRAKT_DEFAULT_INDEXER - 1]]), 'http://www.trakt.tv/shows/%s' % show['ids']['slug'], show['title'], show['overview'], None if show['first_aired'] is None else dateutil_parser.parse(show['first_aired']).strftime(sickbeard.DATE_PRESET)] for show in recommendedlist if not helpers.findCertainShow(sickbeard.showList, [ int(show['ids'][indexers[sickbeard.TRAKT_DEFAULT_INDEXER - 1]])]))) except traktException as e: logger.log(u"Could not connect to Trakt service: %s" % ex(e), logger.WARNING) return json.dumps({'results': final_results}) def addRecommendedShow(self, whichSeries=None, indexerLang=None, rootDir=None, defaultStatus=None, anyQualities=None, bestQualities=None, flatten_folders=None, subtitles=None, fullShowPath=None, other_shows=None, skipShow=None, providedIndexer=None, anime=None, scene=None): indexer = 1 indexer_name = sickbeard.indexerApi(int(indexer)).name show_url = whichSeries.split('|')[1] indexer_id = whichSeries.split('|')[0] show_name = whichSeries.split('|')[2] if indexerLang is None: indexerLang = sickbeard.INDEXER_DEFAULT_LANGUAGE return self.addNewShow('|'.join([indexer_name, str(indexer), show_url, indexer_id, show_name, ""]), indexerLang, rootDir, defaultStatus, anyQualities, bestQualities, flatten_folders, subtitles, fullShowPath, other_shows, skipShow, providedIndexer, anime, scene) def trendingShows(self): """ Display the new show page which collects a tvdb id, folder, and extra options and posts them to addNewShow """ t = PageTemplate(rh=self, file="home_trendingShows.tmpl") t.submenu = self.HomeMenu() return t.respond() def getTrendingShows(self): """ Display the new show page which collects a tvdb id, folder, and extra options and posts them to addNewShow """ t = PageTemplate(rh=self, file="trendingShows.tmpl") t.submenu = self.HomeMenu() t.trending_shows = [] trakt_api = TraktAPI(sickbeard.TRAKT_API_KEY, sickbeard.TRAKT_USERNAME, sickbeard.TRAKT_PASSWORD, sickbeard.TRAKT_DISABLE_SSL_VERIFY, sickbeard.TRAKT_TIMEOUT) try: not_liked_show = "" if sickbeard.TRAKT_BLACKLIST_NAME is not None and sickbeard.TRAKT_BLACKLIST_NAME: not_liked_show = trakt_api.traktRequest("users/" + sickbeard.TRAKT_USERNAME + "/lists/" + sickbeard.TRAKT_BLACKLIST_NAME + "/items") or [] else: logger.log(u"trending blacklist name is empty", logger.DEBUG) limit_show = 50 + len(not_liked_show) shows = trakt_api.traktRequest("shows/trending?limit=" + str(limit_show) + "&extended=full,images") or [] library_shows = trakt_api.traktRequest("sync/collection/shows?extended=full") or [] for show in shows: try: tvdb_id = int(show['show']['ids']['tvdb']) tvrage_id = int(show['show']['ids']['tvrage'] or 0) if not helpers.findCertainShow(sickbeard.showList, [tvdb_id, tvrage_id]): if show['show']['ids']['tvdb'] not in (lshow['show']['ids']['tvdb'] for lshow in library_shows): if not_liked_show: if show['show']['ids']['tvdb'] not in (show['show']['ids']['tvdb'] for show in not_liked_show if show['type'] == 'show'): t.trending_shows += [show] else: t.trending_shows += [show] except exceptions.MultipleShowObjectsException: continue if sickbeard.TRAKT_BLACKLIST_NAME != '': t.blacklist = True else: t.blacklist = False except traktException as e: logger.log(u"Could not connect to Trakt service: %s" % ex(e), logger.WARNING) return t.respond() def addShowToBlacklist(self, indexer_id): # URL parameters data = { 'shows': [ { 'ids': { 'tvdb': indexer_id } } ] } trakt_api = TraktAPI(sickbeard.TRAKT_API_KEY, sickbeard.TRAKT_USERNAME, sickbeard.TRAKT_PASSWORD) result=trakt_api.traktRequest("users/" + sickbeard.TRAKT_USERNAME + "/lists/" + sickbeard.TRAKT_BLACKLIST_NAME + "/items", data, method='POST') return self.redirect('/home/addShows/trendingShows/') def existingShows(self): """ Prints out the page to add existing shows from a root dir """ t = PageTemplate(rh=self, file="home_addExistingShow.tmpl") t.submenu = self.HomeMenu() return t.respond() def addTraktShow(self, indexer_id, showName): if helpers.findCertainShow(sickbeard.showList, int(indexer_id)): return if sickbeard.ROOT_DIRS: root_dirs = sickbeard.ROOT_DIRS.split('|') location = root_dirs[int(root_dirs[0]) + 1] else: location = None if location: show_dir = ek.ek(os.path.join, location, helpers.sanitizeFileName(showName)) dir_exists = helpers.makeDir(show_dir) if not dir_exists: logger.log(u"Unable to create the folder " + show_dir + ", can't add the show", logger.ERROR) return else: helpers.chmodAsParent(show_dir) sickbeard.showQueueScheduler.action.addShow(1, int(indexer_id), show_dir, default_status=sickbeard.STATUS_DEFAULT, quality=sickbeard.QUALITY_DEFAULT, flatten_folders=sickbeard.FLATTEN_FOLDERS_DEFAULT, subtitles=sickbeard.SUBTITLES_DEFAULT, anime=sickbeard.ANIME_DEFAULT, scene=sickbeard.SCENE_DEFAULT) ui.notifications.message('Show added', 'Adding the specified show into ' + show_dir) else: logger.log(u"There was an error creating the show, no root directory setting found", logger.ERROR) return "No root directories setup, please go back and add one." # done adding show return self.redirect('/home/') def addNewShow(self, whichSeries=None, indexerLang=None, rootDir=None, defaultStatus=None, anyQualities=None, bestQualities=None, flatten_folders=None, subtitles=None, fullShowPath=None, other_shows=None, skipShow=None, providedIndexer=None, anime=None, scene=None, audio_lang=None): """ Receive tvdb id, dir, and other options and create a show from them. If extra show dirs are provided then it forwards back to newShow, if not it goes to /home. """ if indexerLang is None: indexerLang = sickbeard.INDEXER_DEFAULT_LANGUAGE # grab our list of other dirs if given if not other_shows: other_shows = [] elif type(other_shows) != list: other_shows = [other_shows] def finishAddShow(): # if there are no extra shows then go home if not other_shows: return self.redirect('/home/') # peel off the next one next_show_dir = other_shows[0] rest_of_show_dirs = other_shows[1:] # go to add the next show return self.newShow(next_show_dir, rest_of_show_dirs) # if we're skipping then behave accordingly if skipShow: return finishAddShow() # sanity check on our inputs if (not rootDir and not fullShowPath) or not whichSeries: return "Missing params, no Indexer ID or folder:" + repr(whichSeries) + " and " + repr( rootDir) + "/" + repr(fullShowPath) # figure out what show we're adding and where series_pieces = whichSeries.split('|') if (whichSeries and rootDir) or (whichSeries and fullShowPath and len(series_pieces) > 1): if len(series_pieces) < 6: logger.log("Unable to add show due to show selection. Not anough arguments: %s" % (repr(series_pieces)), logger.ERROR) ui.notifications.error("Unknown error. Unable to add show due to problem with show selection.") return self.redirect('/home/addShows/existingShows/') indexer = int(series_pieces[1]) indexer_id = int(series_pieces[3]) # Show name was sent in UTF-8 in the form show_name = series_pieces[4].decode('utf-8') else: # if no indexer was provided use the default indexer set in General settings if not providedIndexer: providedIndexer = sickbeard.INDEXER_DEFAULT indexer = int(providedIndexer) indexer_id = int(whichSeries) show_name = os.path.basename(os.path.normpath(fullShowPath)) # use the whole path if it's given, or else append the show name to the root dir to get the full show path if fullShowPath: show_dir = ek.ek(os.path.normpath, fullShowPath) else: show_dir = ek.ek(os.path.join, rootDir, helpers.sanitizeFileName(show_name)) # blanket policy - if the dir exists you should have used "add existing show" numbnuts if ek.ek(os.path.isdir, show_dir) and not fullShowPath: ui.notifications.error("Unable to add show", "Folder " + show_dir + " exists already") return self.redirect('/home/addShows/existingShows/') # don't create show dir if config says not to if sickbeard.ADD_SHOWS_WO_DIR: logger.log(u"Skipping initial creation of " + show_dir + " due to config.ini setting") else: dir_exists = helpers.makeDir(show_dir) if not dir_exists: logger.log(u"Unable to create the folder " + show_dir + ", can't add the show", logger.ERROR) ui.notifications.error("Unable to add show", "Unable to create the folder " + show_dir + ", can't add the show") return self.redirect("/home/") else: helpers.chmodAsParent(show_dir) # prepare the inputs for passing along scene = config.checkbox_to_value(scene) anime = config.checkbox_to_value(anime) flatten_folders = config.checkbox_to_value(flatten_folders) subtitles = config.checkbox_to_value(subtitles) if not anyQualities: anyQualities = [] if not bestQualities: bestQualities = [] if type(anyQualities) != list: anyQualities = [anyQualities] if type(bestQualities) != list: bestQualities = [bestQualities] newQuality = Quality.combineQualities(map(int, anyQualities), map(int, bestQualities)) # add the show sickbeard.showQueueScheduler.action.addShow(indexer, indexer_id, show_dir, int(defaultStatus), newQuality, flatten_folders, indexerLang, subtitles, anime, scene, audio_lang=audio_lang) ui.notifications.message('Show added', 'Adding the specified show into ' + show_dir) return finishAddShow() def split_extra_show(self, extra_show): if not extra_show: return (None, None, None, None) split_vals = extra_show.split('|') if len(split_vals) < 4: indexer = split_vals[0] show_dir = split_vals[1] return (indexer, show_dir, None, None) indexer = split_vals[0] show_dir = split_vals[1] indexer_id = split_vals[2] show_name = '|'.join(split_vals[3:]) return (indexer, show_dir, indexer_id, show_name) def addExistingShows(self, shows_to_add=None, promptForSettings=None): """ Receives a dir list and add them. Adds the ones with given TVDB IDs first, then forwards along to the newShow page. """ # grab a list of other shows to add, if provided if not shows_to_add: shows_to_add = [] elif type(shows_to_add) != list: shows_to_add = [shows_to_add] shows_to_add = [urllib.unquote_plus(x) for x in shows_to_add] promptForSettings = config.checkbox_to_value(promptForSettings) indexer_id_given = [] dirs_only = [] # separate all the ones with Indexer IDs for cur_dir in shows_to_add: if '|' in cur_dir: split_vals = cur_dir.split('|') if len(split_vals) < 3: dirs_only.append(cur_dir) if not '|' in cur_dir: dirs_only.append(cur_dir) else: indexer, show_dir, indexer_id, show_name = self.split_extra_show(cur_dir) if not show_dir or not indexer_id or not show_name: continue indexer_id_given.append((int(indexer), show_dir, int(indexer_id), show_name)) # if they want me to prompt for settings then I will just carry on to the newShow page if promptForSettings and shows_to_add: return self.newShow(shows_to_add[0], shows_to_add[1:]) # if they don't want me to prompt for settings then I can just add all the nfo shows now num_added = 0 for cur_show in indexer_id_given: indexer, show_dir, indexer_id, show_name = cur_show if indexer is not None and indexer_id is not None: # add the show sickbeard.showQueueScheduler.action.addShow(indexer, indexer_id, show_dir, default_status=sickbeard.STATUS_DEFAULT, quality=sickbeard.QUALITY_DEFAULT, flatten_folders=sickbeard.FLATTEN_FOLDERS_DEFAULT, subtitles=sickbeard.SUBTITLES_DEFAULT, anime=sickbeard.ANIME_DEFAULT, scene=sickbeard.SCENE_DEFAULT,audio_lang=sickbeard.AUDIO_SHOW_DEFAULT) num_added += 1 if num_added: ui.notifications.message("Shows Added", "Automatically added " + str(num_added) + " from their existing metadata files") # if we're done then go home if not dirs_only: return self.redirect('/home/') # for the remaining shows we need to prompt for each one, so forward this on to the newShow page return self.newShow(dirs_only[0], dirs_only[1:]) @route('/manage(/?.*)') class Manage(Home, WebRoot): def __init__(self, *args, **kwargs): super(Manage, self).__init__(*args, **kwargs) def ManageMenu(self): menu = [ {'title': 'Backlog Overview', 'path': 'manage/backlogOverview/'}, {'title': 'Manage Searches', 'path': 'manage/manageSearches/'}, {'title': 'Episode Status Management', 'path': 'manage/episodeStatuses/'}, ] if (sickbeard.USE_TORRENTS and sickbeard.TORRENT_METHOD != 'blackhole' \ and (sickbeard.ENABLE_HTTPS and sickbeard.TORRENT_HOST[:5] == 'https' or not sickbeard.ENABLE_HTTPS and sickbeard.TORRENT_HOST[:5] == 'http:')) \ or (sickbeard.USE_NZBS and sickbeard.NZB_METHOD != 'blackhole'): menu.append({'title': 'Manage Downloads', 'path': 'manage/manageTorrents/'}) if sickbeard.USE_SUBTITLES: menu.append({'title': 'Missed Subtitle Management', 'path': 'manage/subtitleMissed/'}) if sickbeard.USE_FAILED_DOWNLOADS: menu.append({'title': 'Failed Downloads', 'path': 'manage/failedDownloads/'}) return menu def index(self): t = PageTemplate(rh=self, file="manage.tmpl") t.submenu = self.ManageMenu() return t.respond() def showEpisodeStatuses(self, indexer_id, whichStatus): status_list = [int(whichStatus)] if status_list[0] == SNATCHED: status_list = Quality.SNATCHED + Quality.SNATCHED_PROPER myDB = db.DBConnection() cur_show_results = myDB.select( "SELECT season, episode, name FROM tv_episodes WHERE showid = ? AND season != 0 AND status IN (" + ','.join( ['?'] * len(status_list)) + ")", [int(indexer_id)] + status_list) result = {} for cur_result in cur_show_results: cur_season = int(cur_result["season"]) cur_episode = int(cur_result["episode"]) if cur_season not in result: result[cur_season] = {} result[cur_season][cur_episode] = cur_result["name"] return json.dumps(result) def episodeStatuses(self, whichStatus=None): if whichStatus: whichStatus = int(whichStatus) status_list = [whichStatus] if status_list[0] == SNATCHED: status_list = Quality.SNATCHED + Quality.SNATCHED_PROPER else: status_list = [] t = PageTemplate(rh=self, file="manage_episodeStatuses.tmpl") t.submenu = self.ManageMenu() t.whichStatus = whichStatus # if we have no status then this is as far as we need to go if not status_list: return t.respond() myDB = db.DBConnection() status_results = myDB.select( "SELECT show_name, tv_shows.indexer_id AS indexer_id FROM tv_episodes, tv_shows WHERE tv_episodes.status IN (" + ','.join( ['?'] * len( status_list)) + ") AND season != 0 AND tv_episodes.showid = tv_shows.indexer_id ORDER BY show_name", status_list) ep_counts = {} show_names = {} sorted_show_ids = [] for cur_status_result in status_results: cur_indexer_id = int(cur_status_result["indexer_id"]) if cur_indexer_id not in ep_counts: ep_counts[cur_indexer_id] = 1 else: ep_counts[cur_indexer_id] += 1 show_names[cur_indexer_id] = cur_status_result["show_name"] if cur_indexer_id not in sorted_show_ids: sorted_show_ids.append(cur_indexer_id) t.show_names = show_names t.ep_counts = ep_counts t.sorted_show_ids = sorted_show_ids return t.respond() def changeEpisodeStatuses(self, oldStatus, newStatus, *args, **kwargs): status_list = [int(oldStatus)] if status_list[0] == SNATCHED: status_list = Quality.SNATCHED + Quality.SNATCHED_PROPER to_change = {} # make a list of all shows and their associated args for arg in kwargs: indexer_id, what = arg.split('-') # we don't care about unchecked checkboxes if kwargs[arg] != 'on': continue if indexer_id not in to_change: to_change[indexer_id] = [] to_change[indexer_id].append(what) myDB = db.DBConnection() for cur_indexer_id in to_change: # get a list of all the eps we want to change if they just said "all" if 'all' in to_change[cur_indexer_id]: all_eps_results = myDB.select( "SELECT season, episode FROM tv_episodes WHERE status IN (" + ','.join( ['?'] * len(status_list)) + ") AND season != 0 AND showid = ?", status_list + [cur_indexer_id]) all_eps = [str(x["season"]) + 'x' + str(x["episode"]) for x in all_eps_results] to_change[cur_indexer_id] = all_eps self.setStatus(cur_indexer_id, '|'.join(to_change[cur_indexer_id]), newStatus, direct=True) return self.redirect('/manage/episodeStatuses/') def showSubtitleMissed(self, indexer_id, whichSubs): myDB = db.DBConnection() cur_show_results = myDB.select( "SELECT season, episode, name, subtitles FROM tv_episodes WHERE showid = ? AND season != 0 AND status LIKE '%4'", [int(indexer_id)]) result = {} for cur_result in cur_show_results: if whichSubs == 'all': if len(set(cur_result["subtitles"].split(',')).intersection(set(subtitles.wantedLanguages()))) >= len( subtitles.wantedLanguages()): continue elif whichSubs in cur_result["subtitles"].split(','): continue cur_season = int(cur_result["season"]) cur_episode = int(cur_result["episode"]) if cur_season not in result: result[cur_season] = {} if cur_episode not in result[cur_season]: result[cur_season][cur_episode] = {} result[cur_season][cur_episode]["name"] = cur_result["name"] result[cur_season][cur_episode]["subtitles"] = ",".join( subliminal.language.Language(subtitle).alpha2 for subtitle in cur_result["subtitles"].split(',')) if not \ cur_result["subtitles"] == '' else '' return json.dumps(result) def subtitleMissed(self, whichSubs=None): t = PageTemplate(rh=self, file="manage_subtitleMissed.tmpl") t.submenu = self.ManageMenu() t.whichSubs = whichSubs if not whichSubs: return t.respond() myDB = db.DBConnection() status_results = myDB.select( "SELECT show_name, tv_shows.indexer_id as indexer_id, tv_episodes.subtitles subtitles FROM tv_episodes, tv_shows WHERE tv_shows.subtitles = 1 AND tv_episodes.status LIKE '%4' AND tv_episodes.season != 0 AND tv_episodes.showid = tv_shows.indexer_id ORDER BY show_name") ep_counts = {} show_names = {} sorted_show_ids = [] for cur_status_result in status_results: if whichSubs == 'all': if len(set(cur_status_result["subtitles"].split(',')).intersection( set(subtitles.wantedLanguages()))) >= len(subtitles.wantedLanguages()): continue elif whichSubs in cur_status_result["subtitles"].split(','): continue cur_indexer_id = int(cur_status_result["indexer_id"]) if cur_indexer_id not in ep_counts: ep_counts[cur_indexer_id] = 1 else: ep_counts[cur_indexer_id] += 1 show_names[cur_indexer_id] = cur_status_result["show_name"] if cur_indexer_id not in sorted_show_ids: sorted_show_ids.append(cur_indexer_id) t.show_names = show_names t.ep_counts = ep_counts t.sorted_show_ids = sorted_show_ids return t.respond() def downloadSubtitleMissed(self, *args, **kwargs): to_download = {} # make a list of all shows and their associated args for arg in kwargs: indexer_id, what = arg.split('-') # we don't care about unchecked checkboxes if kwargs[arg] != 'on': continue if indexer_id not in to_download: to_download[indexer_id] = [] to_download[indexer_id].append(what) for cur_indexer_id in to_download: # get a list of all the eps we want to download subtitles if they just said "all" if 'all' in to_download[cur_indexer_id]: myDB = db.DBConnection() all_eps_results = myDB.select( "SELECT season, episode FROM tv_episodes WHERE status LIKE '%4' AND season != 0 AND showid = ?", [cur_indexer_id]) to_download[cur_indexer_id] = [str(x["season"]) + 'x' + str(x["episode"]) for x in all_eps_results] for epResult in to_download[cur_indexer_id]: season, episode = epResult.split('x') show = sickbeard.helpers.findCertainShow(sickbeard.showList, int(cur_indexer_id)) subtitles = show.getEpisode(int(season), int(episode)).downloadSubtitles() return self.redirect('/manage/subtitleMissed/') def backlogShow(self, indexer_id): show_obj = helpers.findCertainShow(sickbeard.showList, int(indexer_id)) if show_obj: sickbeard.backlogSearchScheduler.action.searchBacklog([show_obj]) return self.redirect("/manage/backlogOverview/") def backlogOverview(self): t = PageTemplate(rh=self, file="manage_backlogOverview.tmpl") t.submenu = self.ManageMenu() showCounts = {} showCats = {} showSQLResults = {} myDB = db.DBConnection() for curShow in sickbeard.showList: epCounts = {} epCats = {} epCounts[Overview.SKIPPED] = 0 epCounts[Overview.WANTED] = 0 epCounts[Overview.QUAL] = 0 epCounts[Overview.GOOD] = 0 epCounts[Overview.UNAIRED] = 0 epCounts[Overview.SNATCHED] = 0 sqlResults = myDB.select( "SELECT * FROM tv_episodes WHERE showid = ? ORDER BY season DESC, episode DESC", [curShow.indexerid]) for curResult in sqlResults: curEpCat = curShow.getOverview(int(curResult["status"] or -1)) if curEpCat: epCats[str(curResult["season"]) + "x" + str(curResult["episode"])] = curEpCat epCounts[curEpCat] += 1 showCounts[curShow.indexerid] = epCounts showCats[curShow.indexerid] = epCats showSQLResults[curShow.indexerid] = sqlResults t.showCounts = showCounts t.showCats = showCats t.showSQLResults = showSQLResults return t.respond() def massEdit(self, toEdit=None): t = PageTemplate(rh=self, file="manage_massEdit.tmpl") t.submenu = self.ManageMenu() if not toEdit: return self.redirect("/manage/") showIDs = toEdit.split("|") showList = [] for curID in showIDs: curID = int(curID) showObj = helpers.findCertainShow(sickbeard.showList, curID) if showObj: showList.append(showObj) archive_firstmatch_all_same = True last_archive_firstmatch = None flatten_folders_all_same = True last_flatten_folders = None paused_all_same = True last_paused = None default_ep_status_all_same = True last_default_ep_status = None anime_all_same = True last_anime = None sports_all_same = True last_sports = None quality_all_same = True last_quality = None subtitles_all_same = True last_subtitles = None scene_all_same = True last_scene = None air_by_date_all_same = True last_air_by_date = None lang_audio_all_same = True last_lang_audio = None root_dir_list = [] for curShow in showList: cur_root_dir = ek.ek(os.path.dirname, curShow._location) if cur_root_dir not in root_dir_list: root_dir_list.append(cur_root_dir) if archive_firstmatch_all_same: # if we had a value already and this value is different then they're not all the same if last_archive_firstmatch not in (None, curShow.archive_firstmatch): archive_firstmatch_all_same = False else: last_archive_firstmatch = curShow.archive_firstmatch # if we know they're not all the same then no point even bothering if paused_all_same: # if we had a value already and this value is different then they're not all the same if last_paused not in (None, curShow.paused): paused_all_same = False else: last_paused = curShow.paused if default_ep_status_all_same: if last_default_ep_status not in (None, curShow.default_ep_status): default_ep_status_all_same = False else: last_default_ep_status = curShow.default_ep_status if anime_all_same: # if we had a value already and this value is different then they're not all the same if last_anime not in (None, curShow.is_anime): anime_all_same = False else: last_anime = curShow.anime if flatten_folders_all_same: if last_flatten_folders not in (None, curShow.flatten_folders): flatten_folders_all_same = False else: last_flatten_folders = curShow.flatten_folders if quality_all_same: if last_quality not in (None, curShow.quality): quality_all_same = False else: last_quality = curShow.quality if subtitles_all_same: if last_subtitles not in (None, curShow.subtitles): subtitles_all_same = False else: last_subtitles = curShow.subtitles if scene_all_same: if last_scene not in (None, curShow.scene): scene_all_same = False else: last_scene = curShow.scene if sports_all_same: if last_sports not in (None, curShow.sports): sports_all_same = False else: last_sports = curShow.sports if air_by_date_all_same: if last_air_by_date not in (None, curShow.air_by_date): air_by_date_all_same = False else: last_air_by_date = curShow.air_by_date if lang_audio_all_same: if last_lang_audio not in (None, curShow.audio_lang): lang_audio_all_same = False else: last_lang_audio = curShow.audio_lang t.showList = toEdit t.archive_firstmatch_value = last_archive_firstmatch if archive_firstmatch_all_same else None t.default_ep_status_value = last_default_ep_status if default_ep_status_all_same else None t.paused_value = last_paused if paused_all_same else None t.anime_value = last_anime if anime_all_same else None t.flatten_folders_value = last_flatten_folders if flatten_folders_all_same else None t.quality_value = last_quality if quality_all_same else None t.subtitles_value = last_subtitles if subtitles_all_same else None t.scene_value = last_scene if scene_all_same else None t.sports_value = last_sports if sports_all_same else None t.air_by_date_value = last_air_by_date if air_by_date_all_same else None t.audio_value = last_lang_audio if lang_audio_all_same else None t.root_dir_list = root_dir_list return t.respond() def massEditSubmit(self, archive_firstmatch=None, paused=None, default_ep_status=None, anime=None, sports=None, scene=None, flatten_folders=None, quality_preset=False, subtitles=None, air_by_date=None, anyQualities=[], bestQualities=[], audioLang = None, toEdit=None, *args, **kwargs): dir_map = {} for cur_arg in kwargs: if not cur_arg.startswith('orig_root_dir_'): continue which_index = cur_arg.replace('orig_root_dir_', '') end_dir = kwargs['new_root_dir_' + which_index] dir_map[kwargs[cur_arg]] = end_dir showIDs = toEdit.split("|") errors = [] for curShow in showIDs: curErrors = [] showObj = helpers.findCertainShow(sickbeard.showList, int(curShow)) if not showObj: continue cur_root_dir = ek.ek(os.path.dirname, showObj._location) cur_show_dir = ek.ek(os.path.basename, showObj._location) if cur_root_dir in dir_map and cur_root_dir != dir_map[cur_root_dir]: new_show_dir = ek.ek(os.path.join, dir_map[cur_root_dir], cur_show_dir) logger.log( u"For show " + showObj.name + " changing dir from " + showObj._location + " to " + new_show_dir) else: new_show_dir = showObj._location if archive_firstmatch == 'keep': new_archive_firstmatch = showObj.archive_firstmatch else: new_archive_firstmatch = True if archive_firstmatch == 'enable' else False new_archive_firstmatch = 'on' if new_archive_firstmatch else 'off' if paused == 'keep': new_paused = showObj.paused else: new_paused = True if paused == 'enable' else False new_paused = 'on' if new_paused else 'off' if default_ep_status == 'keep': new_default_ep_status = showObj.default_ep_status else: new_default_ep_status = default_ep_status if anime == 'keep': new_anime = showObj.anime else: new_anime = True if anime == 'enable' else False new_anime = 'on' if new_anime else 'off' if sports == 'keep': new_sports = showObj.sports else: new_sports = True if sports == 'enable' else False new_sports = 'on' if new_sports else 'off' if scene == 'keep': new_scene = showObj.is_scene else: new_scene = True if scene == 'enable' else False new_scene = 'on' if new_scene else 'off' if air_by_date == 'keep': new_air_by_date = showObj.air_by_date else: new_air_by_date = True if air_by_date == 'enable' else False new_air_by_date = 'on' if new_air_by_date else 'off' if flatten_folders == 'keep': new_flatten_folders = showObj.flatten_folders else: new_flatten_folders = True if flatten_folders == 'enable' else False new_flatten_folders = 'on' if new_flatten_folders else 'off' if subtitles == 'keep': new_subtitles = showObj.subtitles else: new_subtitles = True if subtitles == 'enable' else False new_subtitles = 'on' if new_subtitles else 'off' if quality_preset == 'keep': anyQualities, bestQualities = Quality.splitQuality(showObj.quality) if audioLang == 'keep': new_audio_lang = showObj.audio_lang; else: new_audio_lang = audioLang exceptions_list = [] curErrors += self.editShow(curShow, new_show_dir, anyQualities, bestQualities, exceptions_list, defaultEpStatus=new_default_ep_status, archive_firstmatch=new_archive_firstmatch, flatten_folders=new_flatten_folders, paused=new_paused, sports=new_sports, subtitles=new_subtitles, anime=new_anime, scene=new_scene, air_by_date=new_air_by_date, audio_lang=new_audio_lang, directCall=True) if curErrors: logger.log(u"Errors: " + str(curErrors), logger.ERROR) errors.append('<b>%s:</b>\n<ul>' % showObj.name + ' '.join( ['<li>%s</li>' % error for error in curErrors]) + "</ul>") if len(errors) > 0: ui.notifications.error('%d error%s while saving changes:' % (len(errors), "" if len(errors) == 1 else "s"), " ".join(errors)) return self.redirect("/manage/") def massUpdate(self, toUpdate=None, toRefresh=None, toRename=None, toDelete=None, toRemove=None, toMetadata=None, toSubtitle=None): if toUpdate is not None: toUpdate = toUpdate.split('|') else: toUpdate = [] if toRefresh is not None: toRefresh = toRefresh.split('|') else: toRefresh = [] if toRename is not None: toRename = toRename.split('|') else: toRename = [] if toSubtitle is not None: toSubtitle = toSubtitle.split('|') else: toSubtitle = [] if toDelete is not None: toDelete = toDelete.split('|') else: toDelete = [] if toRemove is not None: toRemove = toRemove.split('|') else: toRemove = [] if toMetadata is not None: toMetadata = toMetadata.split('|') else: toMetadata = [] errors = [] refreshes = [] updates = [] renames = [] subtitles = [] for curShowID in set(toUpdate + toRefresh + toRename + toSubtitle + toDelete + toRemove + toMetadata): if curShowID == '': continue showObj = sickbeard.helpers.findCertainShow(sickbeard.showList, int(curShowID)) if showObj is None: continue if curShowID in toDelete: showObj.deleteShow(True) # don't do anything else if it's being deleted continue if curShowID in toRemove: showObj.deleteShow() # don't do anything else if it's being remove continue if curShowID in toUpdate: try: sickbeard.showQueueScheduler.action.updateShow(showObj, True) updates.append(showObj.name) except exceptions.CantUpdateException, e: errors.append("Unable to update show: {0}".format(str(e))) # don't bother refreshing shows that were updated anyway if curShowID in toRefresh and curShowID not in toUpdate: try: sickbeard.showQueueScheduler.action.refreshShow(showObj) refreshes.append(showObj.name) except exceptions.CantRefreshException, e: errors.append("Unable to refresh show " + showObj.name + ": " + ex(e)) if curShowID in toRename: sickbeard.showQueueScheduler.action.renameShowEpisodes(showObj) renames.append(showObj.name) if curShowID in toSubtitle: sickbeard.showQueueScheduler.action.downloadSubtitles(showObj) subtitles.append(showObj.name) if len(errors) > 0: ui.notifications.error("Errors encountered", '<br >\n'.join(errors)) messageDetail = "" if len(updates) > 0: messageDetail += "<br /><b>Updates</b><br /><ul><li>" messageDetail += "</li><li>".join(updates) messageDetail += "</li></ul>" if len(refreshes) > 0: messageDetail += "<br /><b>Refreshes</b><br /><ul><li>" messageDetail += "</li><li>".join(refreshes) messageDetail += "</li></ul>" if len(renames) > 0: messageDetail += "<br /><b>Renames</b><br /><ul><li>" messageDetail += "</li><li>".join(renames) messageDetail += "</li></ul>" if len(subtitles) > 0: messageDetail += "<br /><b>Subtitles</b><br /><ul><li>" messageDetail += "</li><li>".join(subtitles) messageDetail += "</li></ul>" if len(updates + refreshes + renames + subtitles) > 0: ui.notifications.message("The following actions were queued:", messageDetail) return self.redirect("/manage/") def manageTorrents(self): t = PageTemplate(rh=self, file="manage_torrents.tmpl") t.info_download_station = '' t.submenu = self.ManageMenu() if re.search('localhost', sickbeard.TORRENT_HOST): if sickbeard.LOCALHOST_IP == '': t.webui_url = re.sub('localhost', helpers.get_lan_ip(), sickbeard.TORRENT_HOST) else: t.webui_url = re.sub('localhost', sickbeard.LOCALHOST_IP, sickbeard.TORRENT_HOST) else: t.webui_url = sickbeard.TORRENT_HOST if sickbeard.TORRENT_METHOD == 'utorrent': t.webui_url = '/'.join(s.strip('/') for s in (t.webui_url, 'gui/')) if sickbeard.TORRENT_METHOD == 'download_station': if helpers.check_url(t.webui_url + 'download/'): t.webui_url = t.webui_url + 'download/' else: t.info_download_station = '<p>To have a better experience please set the Download Station alias as <code>download</code>, you can check this setting in the Synology DSM <b>Control Panel</b> > <b>Application Portal</b>. Make sure you allow DSM to be embedded with iFrames too in <b>Control Panel</b> > <b>DSM Settings</b> > <b>Security</b>.</p><br/><p>There is more information about this available <a href="https://github.com/midgetspy/Sick-Beard/pull/338">here</a>.</p><br/>' if not sickbeard.TORRENT_PASSWORD == "" and not sickbeard.TORRENT_USERNAME == "": t.webui_url = re.sub('://', '://' + str(sickbeard.TORRENT_USERNAME) + ':' + str(sickbeard.TORRENT_PASSWORD) + '@' ,t.webui_url) if sickbeard.NZB_METHOD == "sabnzbd": if re.search('localhost', sickbeard.SAB_HOST): if sickbeard.LOCALHOST_IP == '': t.webui_url_nzb = re.sub('localhost', helpers.get_lan_ip(), sickbeard.SAB_HOST) else: t.webui_url_nzb = re.sub('localhost', sickbeard.LOCALHOST_IP, sickbeard.SAB_HOST) else: t.webui_url_nzb = sickbeard.SAB_HOST if not sickbeard.SAB_PASSWORD == "" and not sickbeard.SAB_USERNAME == "": t.webui_url_nzb = re.sub('://', '://' + str(sickbeard.SAB_USERNAME) + ':' + str(sickbeard.SAB_PASSWORD) + '@' ,t.webui_url_nzb) if sickbeard.NZB_METHOD == "nzbget": if re.search('localhost', sickbeard.NZBGET_HOST): if sickbeard.LOCALHOST_IP == '': t.webui_url_nzb = re.sub('localhost', helpers.get_lan_ip(), sickbeard.NZBGET_HOST) else: t.webui_url_nzb = re.sub('localhost', sickbeard.LOCALHOST_IP, sickbeard.NZBGET_HOST) else: t.webui_url_nzb = sickbeard.NZBGET_HOST if not sickbeard.NZBGET_PASSWORD == "" and not sickbeard.NZBGET_USERNAME == "": t.webui_url_nzb = re.sub('://', '://' + str(sickbeard.NZBGET_USERNAME) + ':' + str(sickbeard.NZBGET_PASSWORD) + '@' ,t.webui_url_nzb) return t.respond() def failedDownloads(self, limit=100, toRemove=None): myDB = db.DBConnection('failed.db') if limit == "0": sqlResults = myDB.select("SELECT * FROM failed") else: sqlResults = myDB.select("SELECT * FROM failed LIMIT ?", [limit]) toRemove = toRemove.split("|") if toRemove is not None else [] for release in toRemove: myDB.action("DELETE FROM failed WHERE failed.release = ?", [release]) if toRemove: return self.redirect('/manage/failedDownloads/') t = PageTemplate(rh=self, file="manage_failedDownloads.tmpl") t.failedResults = sqlResults t.limit = limit t.submenu = self.ManageMenu() return t.respond() @route('/manage/manageSearches(/?.*)') class ManageSearches(Manage): def __init__(self, *args, **kwargs): super(ManageSearches, self).__init__(*args, **kwargs) def index(self): t = PageTemplate(rh=self, file="manage_manageSearches.tmpl") # t.backlogPI = sickbeard.backlogSearchScheduler.action.getProgressIndicator() t.backlogPaused = sickbeard.searchQueueScheduler.action.is_backlog_paused() t.backlogRunning = sickbeard.searchQueueScheduler.action.is_backlog_in_progress() t.dailySearchStatus = sickbeard.dailySearchScheduler.action.amActive t.findPropersStatus = sickbeard.properFinderScheduler.action.amActive t.queueLength = sickbeard.searchQueueScheduler.action.queue_length() t.submenu = self.ManageMenu() return t.respond() def forceBacklog(self): # force it to run the next time it looks result = sickbeard.backlogSearchScheduler.forceRun() if result: logger.log(u"Backlog search forced") ui.notifications.message('Backlog search started') return self.redirect("/manage/manageSearches/") def forceSearch(self): # force it to run the next time it looks result = sickbeard.dailySearchScheduler.forceRun() if result: logger.log(u"Daily search forced") ui.notifications.message('Daily search started') return self.redirect("/manage/manageSearches/") def forceFindPropers(self): # force it to run the next time it looks result = sickbeard.properFinderScheduler.forceRun() if result: logger.log(u"Find propers search forced") ui.notifications.message('Find propers search started') return self.redirect("/manage/manageSearches/") def pauseBacklog(self, paused=None): if paused == "1": sickbeard.searchQueueScheduler.action.pause_backlog() else: sickbeard.searchQueueScheduler.action.unpause_backlog() return self.redirect("/manage/manageSearches/") @route('/history(/?.*)') class History(WebRoot): def __init__(self, *args, **kwargs): super(History, self).__init__(*args, **kwargs) def index(self, limit=100): # sqlResults = myDB.select("SELECT h.*, show_name, name FROM history h, tv_shows s, tv_episodes e WHERE h.showid=s.indexer_id AND h.showid=e.showid AND h.season=e.season AND h.episode=e.episode ORDER BY date DESC LIMIT "+str(numPerPage*(p-1))+", "+str(numPerPage)) myDB = db.DBConnection() if limit == "0": sqlResults = myDB.select( "SELECT h.*, show_name FROM history h, tv_shows s WHERE h.showid=s.indexer_id ORDER BY date DESC") else: sqlResults = myDB.select( "SELECT h.*, show_name FROM history h, tv_shows s WHERE h.showid=s.indexer_id ORDER BY date DESC LIMIT ?", [limit]) history = {'show_id': 0, 'season': 0, 'episode': 0, 'quality': 0, 'actions': [{'time': '', 'action': '', 'provider': ''}]} compact = [] for sql_result in sqlResults: if not any((history['show_id'] == sql_result['showid'] and history['season'] == sql_result['season'] and history['episode'] == sql_result['episode'] and history['quality'] == sql_result['quality']) for history in compact): history = {} history['show_id'] = sql_result['showid'] history['season'] = sql_result['season'] history['episode'] = sql_result['episode'] history['quality'] = sql_result['quality'] history['show_name'] = sql_result['show_name'] history['resource'] = sql_result['resource'] action = {} history['actions'] = [] action['time'] = sql_result['date'] action['action'] = sql_result['action'] action['provider'] = sql_result['provider'] action['resource'] = sql_result['resource'] history['actions'].append(action) history['actions'].sort(key=lambda x: x['time']) compact.append(history) else: index = [i for i, dict in enumerate(compact) \ if dict['show_id'] == sql_result['showid'] \ and dict['season'] == sql_result['season'] \ and dict['episode'] == sql_result['episode'] and dict['quality'] == sql_result['quality']][0] action = {} history = compact[index] action['time'] = sql_result['date'] action['action'] = sql_result['action'] action['provider'] = sql_result['provider'] action['resource'] = sql_result['resource'] history['actions'].append(action) history['actions'].sort(key=lambda x: x['time'], reverse=True) t = PageTemplate(rh=self, file="history.tmpl") t.historyResults = sqlResults t.compactResults = compact t.limit = limit t.submenu = [ {'title': 'Clear History', 'path': 'history/clearHistory'}, {'title': 'Trim History', 'path': 'history/trimHistory'}, ] return t.respond() def clearHistory(self): myDB = db.DBConnection() myDB.action("DELETE FROM history WHERE 1=1") ui.notifications.message('History cleared') return self.redirect("/history/") def trimHistory(self): myDB = db.DBConnection() myDB.action("DELETE FROM history WHERE date < " + str( (datetime.datetime.today() - datetime.timedelta(days=30)).strftime(history.dateFormat))) ui.notifications.message('Removed history entries greater than 30 days old') return self.redirect("/history/") @route('/config(/?.*)') class Config(WebRoot): def __init__(self, *args, **kwargs): super(Config, self).__init__(*args, **kwargs) def ConfigMenu(self): menu = [ {'title': 'General', 'path': 'config/general/'}, {'title': 'Backup/Restore', 'path': 'config/backuprestore/'}, {'title': 'Search Settings', 'path': 'config/search/'}, {'title': 'Search Providers', 'path': 'config/providers/'}, {'title': 'Subtitles Settings', 'path': 'config/subtitles/'}, {'title': 'Post Processing', 'path': 'config/postProcessing/'}, {'title': 'Notifications', 'path': 'config/notifications/'}, {'title': 'Anime', 'path': 'config/anime/'}, ] return menu def index(self): t = PageTemplate(rh=self, file="config.tmpl") t.submenu = self.ConfigMenu() return t.respond() @route('/config/general(/?.*)') class ConfigGeneral(Config): def __init__(self, *args, **kwargs): super(ConfigGeneral, self).__init__(*args, **kwargs) def index(self): t = PageTemplate(rh=self, file="config_general.tmpl") t.submenu = self.ConfigMenu() return t.respond() def generateApiKey(self): return helpers.generateApiKey() def saveRootDirs(self, rootDirString=None): sickbeard.ROOT_DIRS = rootDirString def saveAddShowDefaults(self, defaultStatus, anyQualities, bestQualities, defaultFlattenFolders, subtitles=False, anime=False, scene=False, audio_lang=None): if anyQualities: anyQualities = anyQualities.split(',') else: anyQualities = [] if bestQualities: bestQualities = bestQualities.split(',') else: bestQualities = [] newQuality = Quality.combineQualities(map(int, anyQualities), map(int, bestQualities)) sickbeard.STATUS_DEFAULT = int(defaultStatus) sickbeard.QUALITY_DEFAULT = int(newQuality) sickbeard.AUDIO_SHOW_DEFAULT = str(audio_lang) sickbeard.FLATTEN_FOLDERS_DEFAULT = config.checkbox_to_value(defaultFlattenFolders) sickbeard.SUBTITLES_DEFAULT = config.checkbox_to_value(subtitles) sickbeard.ANIME_DEFAULT = config.checkbox_to_value(anime) sickbeard.SCENE_DEFAULT = config.checkbox_to_value(scene) sickbeard.save_config() def saveGeneral(self, log_dir=None, log_nr = 5, log_size = 1048576, web_port=None, web_log=None, encryption_version=None, web_ipv6=None, update_shows_on_start=None, update_shows_on_snatch=None, trash_remove_show=None, trash_rotate_logs=None, update_frequency=None, indexerDefaultLang='en', launch_browser=None, showupdate_hour=3, web_username=None, api_key=None, indexer_default=None, timezone_display=None, cpu_preset=None, web_password=None, version_notify=None, enable_https=None, https_cert=None, https_key=None, handle_reverse_proxy=None, sort_article=None, auto_update=None, notify_on_update=None, proxy_setting=None, proxy_indexers=None, anon_redirect=None, git_path=None, git_remote=None, calendar_unprotected=None, debug=None, no_restart=None, display_filesize=None, fuzzy_dating=None, trim_zero=None, date_preset=None, date_preset_na=None, time_preset=None, indexer_timeout=None, download_url=None, rootDir=None, theme_name=None, git_reset=None, git_username=None, git_password=None, git_autoissues=None): results = [] # Misc sickbeard.DOWNLOAD_URL = download_url sickbeard.INDEXER_DEFAULT_LANGUAGE = indexerDefaultLang sickbeard.LAUNCH_BROWSER = config.checkbox_to_value(launch_browser) if sickbeard.SHOWUPDATE_HOUR != config.to_int(showupdate_hour): sickbeard.showUpdateScheduler.stop.set() logger.log(u"Waiting for the SHOWUPDATER thread to exit so we can set new start hour") try: sickbeard.showUpdateScheduler.join(10) # Wait 10 sec for the thread to exit except: pass if sickbeard.showUpdateScheduler.isAlive(): logger.log(u"Unable to stop SHOWUPDATER thread, the new configuration will be applied after a restart", logger.WARNING) else: logger.log(u"Starting SHOWUPDATER thread with the new start hour: " + str(config.to_int(showupdate_hour))) sickbeard.showUpdateScheduler = scheduler.Scheduler(showUpdater.ShowUpdater(), cycleTime=datetime.timedelta(hours=1), threadName="SHOWUPDATER", start_time=datetime.time(hour=config.to_int(showupdate_hour))) sickbeard.SHOWUPDATE_HOUR = config.to_int(showupdate_hour) config.change_VERSION_NOTIFY(config.checkbox_to_value(version_notify)) sickbeard.AUTO_UPDATE = config.checkbox_to_value(auto_update) sickbeard.NOTIFY_ON_UPDATE = config.checkbox_to_value(notify_on_update) # sickbeard.LOG_DIR is set in config.change_LOG_DIR() sickbeard.LOG_NR = log_nr sickbeard.LOG_SIZE = log_size sickbeard.UPDATE_SHOWS_ON_START = config.checkbox_to_value(update_shows_on_start) sickbeard.UPDATE_SHOWS_ON_SNATCH = config.checkbox_to_value(update_shows_on_snatch) sickbeard.TRASH_REMOVE_SHOW = config.checkbox_to_value(trash_remove_show) sickbeard.TRASH_ROTATE_LOGS = config.checkbox_to_value(trash_rotate_logs) config.change_UPDATE_FREQUENCY(update_frequency) sickbeard.LAUNCH_BROWSER = config.checkbox_to_value(launch_browser) sickbeard.SORT_ARTICLE = config.checkbox_to_value(sort_article) sickbeard.CPU_PRESET = cpu_preset sickbeard.ANON_REDIRECT = anon_redirect sickbeard.PROXY_SETTING = proxy_setting sickbeard.PROXY_INDEXERS = config.checkbox_to_value(proxy_indexers) sickbeard.GIT_USERNAME = git_username sickbeard.GIT_PASSWORD = git_password sickbeard.GIT_RESET = config.checkbox_to_value(git_reset) sickbeard.GIT_AUTOISSUES = config.checkbox_to_value(git_autoissues) sickbeard.GIT_PATH = git_path sickbeard.GIT_REMOTE = git_remote sickbeard.CALENDAR_UNPROTECTED = config.checkbox_to_value(calendar_unprotected) sickbeard.NO_RESTART = config.checkbox_to_value(no_restart) sickbeard.DEBUG = config.checkbox_to_value(debug) # sickbeard.LOG_DIR is set in config.change_LOG_DIR() sickbeard.WEB_PORT = config.to_int(web_port) sickbeard.WEB_IPV6 = config.checkbox_to_value(web_ipv6) # sickbeard.WEB_LOG is set in config.change_LOG_DIR() if config.checkbox_to_value(encryption_version) == 1: sickbeard.ENCRYPTION_VERSION = 2 else: sickbeard.ENCRYPTION_VERSION = 0 sickbeard.WEB_USERNAME = web_username sickbeard.WEB_PASSWORD = web_password sickbeard.DISPLAY_FILESIZE = config.checkbox_to_value(display_filesize) sickbeard.FUZZY_DATING = config.checkbox_to_value(fuzzy_dating) sickbeard.TRIM_ZERO = config.checkbox_to_value(trim_zero) if date_preset: sickbeard.DATE_PRESET = date_preset discarded_na_data = date_preset_na if indexer_default: sickbeard.INDEXER_DEFAULT = config.to_int(indexer_default) if indexer_timeout: sickbeard.INDEXER_TIMEOUT = config.to_int(indexer_timeout) if time_preset: sickbeard.TIME_PRESET_W_SECONDS = time_preset sickbeard.TIME_PRESET = sickbeard.TIME_PRESET_W_SECONDS.replace(u":%S", u"") sickbeard.TIMEZONE_DISPLAY = timezone_display if not config.change_LOG_DIR(log_dir, web_log): results += ["Unable to create directory " + os.path.normpath(log_dir) + ", log directory not changed."] sickbeard.API_KEY = api_key sickbeard.ENABLE_HTTPS = config.checkbox_to_value(enable_https) if not config.change_HTTPS_CERT(https_cert): results += [ "Unable to create directory " + os.path.normpath(https_cert) + ", https cert directory not changed."] if not config.change_HTTPS_KEY(https_key): results += [ "Unable to create directory " + os.path.normpath(https_key) + ", https key directory not changed."] sickbeard.HANDLE_REVERSE_PROXY = config.checkbox_to_value(handle_reverse_proxy) sickbeard.THEME_NAME = theme_name sickbeard.save_config() if len(results) > 0: for x in results: logger.log(x, logger.ERROR) ui.notifications.error('Error(s) Saving Configuration', '<br />\n'.join(results)) else: ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE)) return self.redirect("/config/general/") @route('/config/backuprestore(/?.*)') class ConfigBackupRestore(Config): def __init__(self, *args, **kwargs): super(ConfigBackupRestore, self).__init__(*args, **kwargs) def index(self): t = PageTemplate(rh=self, file="config_backuprestore.tmpl") t.submenu = self.ConfigMenu() return t.respond() def backup(self, backupDir=None): finalResult = '' if backupDir: source = [os.path.join(sickbeard.DATA_DIR, 'sickbeard.db'), sickbeard.CONFIG_FILE] source.append(os.path.join(sickbeard.DATA_DIR, 'failed.db')) source.append(os.path.join(sickbeard.DATA_DIR, 'cache.db')) target = os.path.join(backupDir, 'sickrage-' + time.strftime('%Y%m%d%H%M%S') + '.zip') for (path, dirs, files) in os.walk(sickbeard.CACHE_DIR, topdown=True): for dirname in dirs: if path == sickbeard.CACHE_DIR and dirname not in ['images']: dirs.remove(dirname) for filename in files: source.append(os.path.join(path, filename)) if helpers.backupConfigZip(source, target, sickbeard.DATA_DIR): finalResult += "Successful backup to " + target else: finalResult += "Backup FAILED" else: finalResult += "You need to choose a folder to save your backup to!" finalResult += "<br />\n" return finalResult def restore(self, backupFile=None): finalResult = '' if backupFile: source = backupFile target_dir = os.path.join(sickbeard.DATA_DIR, 'restore') if helpers.restoreConfigZip(source, target_dir): finalResult += "Successfully extracted restore files to " + target_dir finalResult += "<br>Restart sickrage to complete the restore." else: finalResult += "Restore FAILED" else: finalResult += "You need to select a backup file to restore!" finalResult += "<br />\n" return finalResult @route('/config/search(/?.*)') class ConfigSearch(Config): def __init__(self, *args, **kwargs): super(ConfigSearch, self).__init__(*args, **kwargs) def index(self): t = PageTemplate(rh=self, file="config_search.tmpl") t.submenu = self.ConfigMenu() return t.respond() def saveSearch(self, use_nzbs=None, use_torrents=None, nzb_dir=None, sab_username=None, sab_password=None, sab_apikey=None, sab_category=None, sab_category_anime=None, sab_host=None, nzbget_username=None, nzbget_password=None, nzbget_category=None, nzbget_category_anime=None, nzbget_priority=None, nzbget_host=None, nzbget_use_https=None, backlog_days=None, backlog_frequency=None, dailysearch_frequency=None, nzb_method=None, torrent_method=None, usenet_retention=None, download_propers=None, check_propers_interval=None, allow_high_priority=None, sab_forced=None, randomize_providers=None, backlog_startup=None, use_failed_downloads=None, delete_failed=None, dailysearch_startup=None, torrent_dir=None, torrent_username=None, torrent_password=None, torrent_host=None, torrent_label=None, torrent_label_anime=None, torrent_path=None, torrent_verify_cert=None, torrent_seed_time=None, torrent_paused=None, torrent_high_bandwidth=None, coming_eps_missed_range=None, torrent_rpcurl=None, torrent_auth_type = None, ignore_words=None, require_words=None): results = [] if not config.change_NZB_DIR(nzb_dir): results += ["Unable to create directory " + os.path.normpath(nzb_dir) + ", dir not changed."] if not config.change_TORRENT_DIR(torrent_dir): results += ["Unable to create directory " + os.path.normpath(torrent_dir) + ", dir not changed."] config.change_DAILYSEARCH_FREQUENCY(dailysearch_frequency) config.change_BACKLOG_FREQUENCY(backlog_frequency) sickbeard.BACKLOG_DAYS = config.to_int(backlog_days, default=7) sickbeard.COMING_EPS_MISSED_RANGE = config.to_int(coming_eps_missed_range,default=7) sickbeard.USE_NZBS = config.checkbox_to_value(use_nzbs) sickbeard.USE_TORRENTS = config.checkbox_to_value(use_torrents) sickbeard.NZB_METHOD = nzb_method sickbeard.TORRENT_METHOD = torrent_method sickbeard.USENET_RETENTION = config.to_int(usenet_retention, default=500) sickbeard.IGNORE_WORDS = ignore_words if ignore_words else "" sickbeard.REQUIRE_WORDS = require_words if require_words else "" sickbeard.RANDOMIZE_PROVIDERS = config.checkbox_to_value(randomize_providers) sickbeard.DOWNLOAD_PROPERS = config.checkbox_to_value(download_propers) config.change_DOWNLOAD_PROPERS(sickbeard.DOWNLOAD_PROPERS) if sickbeard.DOWNLOAD_PROPERS and not sickbeard.properFinderScheduler.isAlive(): sickbeard.properFinderScheduler.silent = False try: sickbeard.properFinderScheduler.start() except: pass elif not sickbeard.DOWNLOAD_PROPERS: sickbeard.properFinderScheduler.stop.set() sickbeard.properFinderScheduler.silent = True try: sickbeard.properFinderScheduler.join(5) except: pass sickbeard.CHECK_PROPERS_INTERVAL = check_propers_interval sickbeard.ALLOW_HIGH_PRIORITY = config.checkbox_to_value(allow_high_priority) sickbeard.DAILYSEARCH_STARTUP = config.checkbox_to_value(dailysearch_startup) sickbeard.BACKLOG_STARTUP = config.checkbox_to_value(backlog_startup) sickbeard.USE_FAILED_DOWNLOADS = config.checkbox_to_value(use_failed_downloads) sickbeard.DELETE_FAILED = config.checkbox_to_value(delete_failed) sickbeard.SAB_USERNAME = sab_username sickbeard.SAB_PASSWORD = sab_password sickbeard.SAB_APIKEY = sab_apikey.strip() sickbeard.SAB_CATEGORY = sab_category sickbeard.SAB_CATEGORY_ANIME = sab_category_anime sickbeard.SAB_HOST = config.clean_url(sab_host) sickbeard.SAB_FORCED = config.checkbox_to_value(sab_forced) sickbeard.NZBGET_USERNAME = nzbget_username sickbeard.NZBGET_PASSWORD = nzbget_password sickbeard.NZBGET_CATEGORY = nzbget_category sickbeard.NZBGET_CATEGORY_ANIME = nzbget_category_anime sickbeard.NZBGET_HOST = config.clean_host(nzbget_host) sickbeard.NZBGET_USE_HTTPS = config.checkbox_to_value(nzbget_use_https) sickbeard.NZBGET_PRIORITY = config.to_int(nzbget_priority, default=100) sickbeard.TORRENT_USERNAME = torrent_username sickbeard.TORRENT_PASSWORD = torrent_password sickbeard.TORRENT_LABEL = torrent_label sickbeard.TORRENT_LABEL_ANIME = torrent_label_anime sickbeard.TORRENT_VERIFY_CERT = config.checkbox_to_value(torrent_verify_cert) sickbeard.TORRENT_PATH = torrent_path sickbeard.TORRENT_SEED_TIME = torrent_seed_time sickbeard.TORRENT_PAUSED = config.checkbox_to_value(torrent_paused) sickbeard.TORRENT_HIGH_BANDWIDTH = config.checkbox_to_value(torrent_high_bandwidth) sickbeard.TORRENT_HOST = config.clean_url(torrent_host) sickbeard.TORRENT_RPCURL = torrent_rpcurl sickbeard.TORRENT_AUTH_TYPE = torrent_auth_type sickbeard.save_config() if len(results) > 0: for x in results: logger.log(x, logger.ERROR) ui.notifications.error('Error(s) Saving Configuration', '<br />\n'.join(results)) else: ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE)) return self.redirect("/config/search/") @route('/config/postProcessing(/?.*)') class ConfigPostProcessing(Config): def __init__(self, *args, **kwargs): super(ConfigPostProcessing, self).__init__(*args, **kwargs) def index(self): t = PageTemplate(rh=self, file="config_postProcessing.tmpl") t.submenu = self.ConfigMenu() return t.respond() def savePostProcessing(self, naming_pattern=None, naming_multi_ep=None, kodi_data=None, kodi_12plus_data=None, mediabrowser_data=None, sony_ps3_data=None, wdtv_data=None, tivo_data=None, mede8er_data=None, keep_processed_dir=None, process_method=None, del_rar_contents=None, process_automatically=None, no_delete=None, rename_episodes=None, airdate_episodes=None, unpack=None, move_associated_files=None, sync_files=None, postpone_if_sync_files=None, nfo_rename=None, tv_download_dir=None, naming_custom_abd=None, naming_anime=None, naming_abd_pattern=None, naming_strip_year=None, use_failed_downloads=None, delete_failed=None, extra_scripts=None, skip_removed_files=None, naming_custom_sports=None, naming_sports_pattern=None, naming_custom_anime=None, naming_anime_pattern=None, naming_anime_multi_ep=None, autopostprocesser_frequency=None): results = [] if not config.change_TV_DOWNLOAD_DIR(tv_download_dir): results += ["Unable to create directory " + os.path.normpath(tv_download_dir) + ", dir not changed."] sickbeard.PROCESS_AUTOMATICALLY = config.checkbox_to_value(process_automatically) config.change_AUTOPOSTPROCESSER_FREQUENCY(autopostprocesser_frequency) if sickbeard.PROCESS_AUTOMATICALLY and not sickbeard.autoPostProcesserScheduler.isAlive(): sickbeard.autoPostProcesserScheduler.silent = False try: sickbeard.autoPostProcesserScheduler.start() except: pass elif not sickbeard.PROCESS_AUTOMATICALLY: sickbeard.autoPostProcesserScheduler.stop.set() sickbeard.autoPostProcesserScheduler.silent = True try: sickbeard.autoPostProcesserScheduler.join(5) except: pass if unpack: if self.isRarSupported() != 'not supported': sickbeard.UNPACK = config.checkbox_to_value(unpack) else: sickbeard.UNPACK = 0 results.append("Unpacking Not Supported, disabling unpack setting") else: sickbeard.UNPACK = config.checkbox_to_value(unpack) sickbeard.NO_DELETE = config.checkbox_to_value(no_delete) sickbeard.KEEP_PROCESSED_DIR = config.checkbox_to_value(keep_processed_dir) sickbeard.PROCESS_METHOD = process_method sickbeard.DELRARCONTENTS = config.checkbox_to_value(del_rar_contents) sickbeard.EXTRA_SCRIPTS = [x.strip() for x in extra_scripts.split('|') if x.strip()] sickbeard.RENAME_EPISODES = config.checkbox_to_value(rename_episodes) sickbeard.AIRDATE_EPISODES = config.checkbox_to_value(airdate_episodes) sickbeard.MOVE_ASSOCIATED_FILES = config.checkbox_to_value(move_associated_files) sickbeard.SYNC_FILES = sync_files sickbeard.POSTPONE_IF_SYNC_FILES = config.checkbox_to_value(postpone_if_sync_files) sickbeard.NAMING_CUSTOM_ABD = config.checkbox_to_value(naming_custom_abd) sickbeard.NAMING_CUSTOM_SPORTS = config.checkbox_to_value(naming_custom_sports) sickbeard.NAMING_CUSTOM_ANIME = config.checkbox_to_value(naming_custom_anime) sickbeard.NAMING_STRIP_YEAR = config.checkbox_to_value(naming_strip_year) sickbeard.USE_FAILED_DOWNLOADS = config.checkbox_to_value(use_failed_downloads) sickbeard.DELETE_FAILED = config.checkbox_to_value(delete_failed) sickbeard.SKIP_REMOVED_FILES = config.checkbox_to_value(skip_removed_files) sickbeard.NFO_RENAME = config.checkbox_to_value(nfo_rename) sickbeard.METADATA_KODI = kodi_data sickbeard.METADATA_KODI_12PLUS = kodi_12plus_data sickbeard.METADATA_MEDIABROWSER = mediabrowser_data sickbeard.METADATA_PS3 = sony_ps3_data sickbeard.METADATA_WDTV = wdtv_data sickbeard.METADATA_TIVO = tivo_data sickbeard.METADATA_MEDE8ER = mede8er_data sickbeard.metadata_provider_dict['KODI'].set_config(sickbeard.METADATA_KODI) sickbeard.metadata_provider_dict['KODI 12+'].set_config(sickbeard.METADATA_KODI_12PLUS) sickbeard.metadata_provider_dict['MediaBrowser'].set_config(sickbeard.METADATA_MEDIABROWSER) sickbeard.metadata_provider_dict['Sony PS3'].set_config(sickbeard.METADATA_PS3) sickbeard.metadata_provider_dict['WDTV'].set_config(sickbeard.METADATA_WDTV) sickbeard.metadata_provider_dict['TIVO'].set_config(sickbeard.METADATA_TIVO) sickbeard.metadata_provider_dict['Mede8er'].set_config(sickbeard.METADATA_MEDE8ER) if self.isNamingValid(naming_pattern, naming_multi_ep, anime_type=naming_anime) != "invalid": sickbeard.NAMING_PATTERN = naming_pattern sickbeard.NAMING_MULTI_EP = int(naming_multi_ep) sickbeard.NAMING_ANIME = int(naming_anime) sickbeard.NAMING_FORCE_FOLDERS = naming.check_force_season_folders() else: if int(naming_anime) in [1, 2]: results.append("You tried saving an invalid anime naming config, not saving your naming settings") else: results.append("You tried saving an invalid naming config, not saving your naming settings") if self.isNamingValid(naming_anime_pattern, naming_anime_multi_ep, anime_type=naming_anime) != "invalid": sickbeard.NAMING_ANIME_PATTERN = naming_anime_pattern sickbeard.NAMING_ANIME_MULTI_EP = int(naming_anime_multi_ep) sickbeard.NAMING_ANIME = int(naming_anime) sickbeard.NAMING_FORCE_FOLDERS = naming.check_force_season_folders() else: if int(naming_anime) in [1, 2]: results.append("You tried saving an invalid anime naming config, not saving your naming settings") else: results.append("You tried saving an invalid naming config, not saving your naming settings") if self.isNamingValid(naming_abd_pattern, None, abd=True) != "invalid": sickbeard.NAMING_ABD_PATTERN = naming_abd_pattern else: results.append( "You tried saving an invalid air-by-date naming config, not saving your air-by-date settings") if self.isNamingValid(naming_sports_pattern, None, sports=True) != "invalid": sickbeard.NAMING_SPORTS_PATTERN = naming_sports_pattern else: results.append( "You tried saving an invalid sports naming config, not saving your sports settings") sickbeard.save_config() if len(results) > 0: for x in results: logger.log(x, logger.ERROR) ui.notifications.error('Error(s) Saving Configuration', '<br />\n'.join(results)) else: ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE)) return self.redirect("/config/postProcessing/") def testNaming(self, pattern=None, multi=None, abd=False, sports=False, anime_type=None): if multi is not None: multi = int(multi) if anime_type is not None: anime_type = int(anime_type) result = naming.test_name(pattern, multi, abd, sports, anime_type) result = ek.ek(os.path.join, result['dir'], result['name']) return result def isNamingValid(self, pattern=None, multi=None, abd=False, sports=False, anime_type=None): if pattern is None: return "invalid" if multi is not None: multi = int(multi) if anime_type is not None: anime_type = int(anime_type) # air by date shows just need one check, we don't need to worry about season folders if abd: is_valid = naming.check_valid_abd_naming(pattern) require_season_folders = False # sport shows just need one check, we don't need to worry about season folders elif sports: is_valid = naming.check_valid_sports_naming(pattern) require_season_folders = False else: # check validity of single and multi ep cases for the whole path is_valid = naming.check_valid_naming(pattern, multi, anime_type) # check validity of single and multi ep cases for only the file name require_season_folders = naming.check_force_season_folders(pattern, multi, anime_type) if is_valid and not require_season_folders: return "valid" elif is_valid and require_season_folders: return "seasonfolders" else: return "invalid" def isRarSupported(self): """ Test Packing Support: - Simulating in memory rar extraction on test.rar file """ try: rar_path = os.path.join(sickbeard.PROG_DIR, 'lib', 'unrar2', 'test.rar') testing = RarFile(rar_path).read_files('*test.txt') if testing[0][1] == 'This is only a test.': return 'supported' logger.log(u'Rar Not Supported: Can not read the content of test file', logger.ERROR) return 'not supported' except Exception, e: logger.log(u'Rar Not Supported: ' + ex(e), logger.ERROR) return 'not supported' @route('/config/providers(/?.*)') class ConfigProviders(Config): def __init__(self, *args, **kwargs): super(ConfigProviders, self).__init__(*args, **kwargs) def index(self): t = PageTemplate(rh=self, file="config_providers.tmpl") t.submenu = self.ConfigMenu() return t.respond() def canAddNewznabProvider(self, name): if not name: return json.dumps({'error': 'No Provider Name specified'}) providerDict = dict(zip([x.getID() for x in sickbeard.newznabProviderList], sickbeard.newznabProviderList)) tempProvider = newznab.NewznabProvider(name, '') if tempProvider.getID() in providerDict: return json.dumps({'error': 'Provider Name already exists as ' + providerDict[tempProvider.getID()].name}) else: return json.dumps({'success': tempProvider.getID()}) def saveNewznabProvider(self, name, url, key=''): if not name or not url: return '0' providerDict = dict(zip([x.name for x in sickbeard.newznabProviderList], sickbeard.newznabProviderList)) if name in providerDict: if not providerDict[name].default: providerDict[name].name = name providerDict[name].url = config.clean_url(url) providerDict[name].key = key # a 0 in the key spot indicates that no key is needed if key == '0': providerDict[name].needs_auth = False else: providerDict[name].needs_auth = True return providerDict[name].getID() + '|' + providerDict[name].configStr() else: newProvider = newznab.NewznabProvider(name, url, key=key) sickbeard.newznabProviderList.append(newProvider) return newProvider.getID() + '|' + newProvider.configStr() def getNewznabCategories(self, name, url, key): ''' Retrieves a list of possible categories with category id's Using the default url/api?cat http://yournewznaburl.com/api?t=caps&apikey=yourapikey ''' error = "" success = False if not name: error += "\nNo Provider Name specified" if not url: error += "\nNo Provider Url specified" if not key: error += "\nNo Provider Api key specified" if error <> "": return json.dumps({'success': False, 'error': error}) # Get list with Newznabproviders # providerDict = dict(zip([x.getID() for x in sickbeard.newznabProviderList], sickbeard.newznabProviderList)) # Get newznabprovider obj with provided name tempProvider = newznab.NewznabProvider(name, url, key) success, tv_categories, error = tempProvider.get_newznab_categories() return json.dumps({'success': success, 'tv_categories': tv_categories, 'error': error}) def deleteNewznabProvider(self, nnid): providerDict = dict(zip([x.getID() for x in sickbeard.newznabProviderList], sickbeard.newznabProviderList)) if nnid not in providerDict or providerDict[nnid].default: return '0' # delete it from the list sickbeard.newznabProviderList.remove(providerDict[nnid]) if nnid in sickbeard.PROVIDER_ORDER: sickbeard.PROVIDER_ORDER.remove(nnid) return '1' def canAddTorrentRssProvider(self, name, url, cookies, titleTAG): if not name: return json.dumps({'error': 'Invalid name specified'}) providerDict = dict( zip([x.getID() for x in sickbeard.torrentRssProviderList], sickbeard.torrentRssProviderList)) tempProvider = rsstorrent.TorrentRssProvider(name, url, cookies, titleTAG) if tempProvider.getID() in providerDict: return json.dumps({'error': 'Exists as ' + providerDict[tempProvider.getID()].name}) else: (succ, errMsg) = tempProvider.validateRSS() if succ: return json.dumps({'success': tempProvider.getID()}) else: return json.dumps({'error': errMsg}) def saveTorrentRssProvider(self, name, url, cookies, titleTAG): if not name or not url: return '0' providerDict = dict(zip([x.name for x in sickbeard.torrentRssProviderList], sickbeard.torrentRssProviderList)) if name in providerDict: providerDict[name].name = name providerDict[name].url = config.clean_url(url) providerDict[name].cookies = cookies providerDict[name].titleTAG = titleTAG return providerDict[name].getID() + '|' + providerDict[name].configStr() else: newProvider = rsstorrent.TorrentRssProvider(name, url, cookies, titleTAG) sickbeard.torrentRssProviderList.append(newProvider) return newProvider.getID() + '|' + newProvider.configStr() def deleteTorrentRssProvider(self, id): providerDict = dict( zip([x.getID() for x in sickbeard.torrentRssProviderList], sickbeard.torrentRssProviderList)) if id not in providerDict: return '0' # delete it from the list sickbeard.torrentRssProviderList.remove(providerDict[id]) if id in sickbeard.PROVIDER_ORDER: sickbeard.PROVIDER_ORDER.remove(id) return '1' def saveProviders(self, newznab_string='', torrentrss_string='', provider_order=None, **kwargs): results = [] provider_str_list = provider_order.split() provider_list = [] newznabProviderDict = dict( zip([x.getID() for x in sickbeard.newznabProviderList], sickbeard.newznabProviderList)) finishedNames = [] # add all the newznab info we got into our list if newznab_string: for curNewznabProviderStr in newznab_string.split('!!!'): if not curNewznabProviderStr: continue cur_name, cur_url, cur_key, cur_cat = curNewznabProviderStr.split('|') cur_url = config.clean_url(cur_url) newProvider = newznab.NewznabProvider(cur_name, cur_url, key=cur_key) cur_id = newProvider.getID() # if it already exists then update it if cur_id in newznabProviderDict: newznabProviderDict[cur_id].name = cur_name newznabProviderDict[cur_id].url = cur_url newznabProviderDict[cur_id].key = cur_key newznabProviderDict[cur_id].catIDs = cur_cat # a 0 in the key spot indicates that no key is needed if cur_key == '0': newznabProviderDict[cur_id].needs_auth = False else: newznabProviderDict[cur_id].needs_auth = True try: newznabProviderDict[cur_id].search_mode = str(kwargs[cur_id + '_search_mode']).strip() except: pass try: newznabProviderDict[cur_id].search_fallback = config.checkbox_to_value( kwargs[cur_id + '_search_fallback']) except: newznabProviderDict[cur_id].search_fallback = 0 try: newznabProviderDict[cur_id].enable_daily = config.checkbox_to_value( kwargs[cur_id + '_enable_daily']) except: newznabProviderDict[cur_id].enable_daily = 0 try: newznabProviderDict[cur_id].enable_backlog = config.checkbox_to_value( kwargs[cur_id + '_enable_backlog']) except: newznabProviderDict[cur_id].enable_backlog = 0 else: sickbeard.newznabProviderList.append(newProvider) finishedNames.append(cur_id) # delete anything that is missing for curProvider in sickbeard.newznabProviderList: if curProvider.getID() not in finishedNames: sickbeard.newznabProviderList.remove(curProvider) torrentRssProviderDict = dict( zip([x.getID() for x in sickbeard.torrentRssProviderList], sickbeard.torrentRssProviderList)) finishedNames = [] if torrentrss_string: for curTorrentRssProviderStr in torrentrss_string.split('!!!'): if not curTorrentRssProviderStr: continue curName, curURL, curCookies, curTitleTAG = curTorrentRssProviderStr.split('|') curURL = config.clean_url(curURL) newProvider = rsstorrent.TorrentRssProvider(curName, curURL, curCookies, curTitleTAG) curID = newProvider.getID() # if it already exists then update it if curID in torrentRssProviderDict: torrentRssProviderDict[curID].name = curName torrentRssProviderDict[curID].url = curURL torrentRssProviderDict[curID].cookies = curCookies torrentRssProviderDict[curID].curTitleTAG = curTitleTAG else: sickbeard.torrentRssProviderList.append(newProvider) finishedNames.append(curID) # delete anything that is missing for curProvider in sickbeard.torrentRssProviderList: if curProvider.getID() not in finishedNames: sickbeard.torrentRssProviderList.remove(curProvider) # do the enable/disable for curProviderStr in provider_str_list: curProvider, curEnabled = curProviderStr.split(':') curEnabled = config.to_int(curEnabled) curProvObj = [x for x in sickbeard.providers.sortedProviderList() if x.getID() == curProvider and hasattr(x, 'enabled')] if curProvObj: curProvObj[0].enabled = bool(curEnabled) provider_list.append(curProvider) if curProvider in newznabProviderDict: newznabProviderDict[curProvider].enabled = bool(curEnabled) elif curProvider in torrentRssProviderDict: torrentRssProviderDict[curProvider].enabled = bool(curEnabled) # dynamically load provider settings for curTorrentProvider in [curProvider for curProvider in sickbeard.providers.sortedProviderList() if curProvider.providerType == sickbeard.GenericProvider.TORRENT]: if hasattr(curTorrentProvider, 'minseed'): try: curTorrentProvider.minseed = int(str(kwargs[curTorrentProvider.getID() + '_minseed']).strip()) except: curTorrentProvider.minseed = 0 if hasattr(curTorrentProvider, 'minleech'): try: curTorrentProvider.minleech = int(str(kwargs[curTorrentProvider.getID() + '_minleech']).strip()) except: curTorrentProvider.minleech = 0 if hasattr(curTorrentProvider, 'ratio'): try: curTorrentProvider.ratio = str(kwargs[curTorrentProvider.getID() + '_ratio']).strip() except: curTorrentProvider.ratio = None if hasattr(curTorrentProvider, 'digest'): try: curTorrentProvider.digest = str(kwargs[curTorrentProvider.getID() + '_digest']).strip() except: curTorrentProvider.digest = None if hasattr(curTorrentProvider, 'hash'): try: curTorrentProvider.hash = str(kwargs[curTorrentProvider.getID() + '_hash']).strip() except: curTorrentProvider.hash = None if hasattr(curTorrentProvider, 'api_key'): try: curTorrentProvider.api_key = str(kwargs[curTorrentProvider.getID() + '_api_key']).strip() except: curTorrentProvider.api_key = None if hasattr(curTorrentProvider, 'username'): try: curTorrentProvider.username = str(kwargs[curTorrentProvider.getID() + '_username']).strip() except: curTorrentProvider.username = None if hasattr(curTorrentProvider, 'password'): try: curTorrentProvider.password = str(kwargs[curTorrentProvider.getID() + '_password']).strip() except: curTorrentProvider.password = None if hasattr(curTorrentProvider, 'passkey'): try: curTorrentProvider.passkey = str(kwargs[curTorrentProvider.getID() + '_passkey']).strip() except: curTorrentProvider.passkey = None if hasattr(curTorrentProvider, 'confirmed'): try: curTorrentProvider.confirmed = config.checkbox_to_value( kwargs[curTorrentProvider.getID() + '_confirmed']) except: curTorrentProvider.confirmed = 0 if hasattr(curTorrentProvider, 'proxy'): try: curTorrentProvider.proxy.enabled = config.checkbox_to_value( kwargs[curTorrentProvider.getID() + '_proxy']) except: curTorrentProvider.proxy.enabled = 0 if hasattr(curTorrentProvider.proxy, 'url'): try: curTorrentProvider.proxy.url = str(kwargs[curTorrentProvider.getID() + '_proxy_url']).strip() except: curTorrentProvider.proxy.url = None if hasattr(curTorrentProvider, 'freeleech'): try: curTorrentProvider.freeleech = config.checkbox_to_value( kwargs[curTorrentProvider.getID() + '_freeleech']) except: curTorrentProvider.freeleech = 0 if hasattr(curTorrentProvider, 'search_mode'): try: curTorrentProvider.search_mode = str(kwargs[curTorrentProvider.getID() + '_search_mode']).strip() except: curTorrentProvider.search_mode = 'eponly' if hasattr(curTorrentProvider, 'search_fallback'): try: curTorrentProvider.search_fallback = config.checkbox_to_value( kwargs[curTorrentProvider.getID() + '_search_fallback']) except: curTorrentProvider.search_fallback = 0 # these exceptions are catching unselected checkboxes if hasattr(curTorrentProvider, 'enable_daily'): try: curTorrentProvider.enable_daily = config.checkbox_to_value( kwargs[curTorrentProvider.getID() + '_enable_daily']) except: curTorrentProvider.enable_daily = 0 # these exceptions are actually catching unselected checkboxes if hasattr(curTorrentProvider, 'enable_backlog'): try: curTorrentProvider.enable_backlog = config.checkbox_to_value( kwargs[curTorrentProvider.getID() + '_enable_backlog']) except: curTorrentProvider.enable_backlog = 0 # these exceptions are actually catching unselected checkboxes if hasattr(curTorrentProvider, 'cat'): try: curTorrentProvider.cat = int(str(kwargs[curTorrentProvider.getID() + '_cat']).strip()) except: curTorrentProvider.cat = 0 if hasattr(curTorrentProvider, 'subtitle'): try: curTorrentProvider.subtitle = config.checkbox_to_value( kwargs[curTorrentProvider.getID() + '_subtitle']) except: curTorrentProvider.subtitle = 0 for curNzbProvider in [curProvider for curProvider in sickbeard.providers.sortedProviderList() if curProvider.providerType == sickbeard.GenericProvider.NZB]: if hasattr(curNzbProvider, 'api_key'): try: curNzbProvider.api_key = str(kwargs[curNzbProvider.getID() + '_api_key']).strip() except: curNzbProvider.api_key = None if hasattr(curNzbProvider, 'username'): try: curNzbProvider.username = str(kwargs[curNzbProvider.getID() + '_username']).strip() except: curNzbProvider.username = None if hasattr(curNzbProvider, 'search_mode'): try: curNzbProvider.search_mode = str(kwargs[curNzbProvider.getID() + '_search_mode']).strip() except: curNzbProvider.search_mode = 'eponly' if hasattr(curNzbProvider, 'search_fallback'): try: curNzbProvider.search_fallback = config.checkbox_to_value( kwargs[curNzbProvider.getID() + '_search_fallback']) except: curNzbProvider.search_fallback = 0 # these exceptions are actually catching unselected checkboxes if hasattr(curNzbProvider, 'enable_daily'): try: curNzbProvider.enable_daily = config.checkbox_to_value( kwargs[curNzbProvider.getID() + '_enable_daily']) except: curNzbProvider.enable_daily = 0 # these exceptions are actually catching unselected checkboxes if hasattr(curNzbProvider, 'enable_backlog'): try: curNzbProvider.enable_backlog = config.checkbox_to_value( kwargs[curNzbProvider.getID() + '_enable_backlog']) except: curNzbProvider.enable_backlog = 0 # these exceptions are actually catching unselected checkboxes sickbeard.NEWZNAB_DATA = '!!!'.join([x.configStr() for x in sickbeard.newznabProviderList]) sickbeard.PROVIDER_ORDER = provider_list sickbeard.save_config() if len(results) > 0: for x in results: logger.log(x, logger.ERROR) ui.notifications.error('Error(s) Saving Configuration', '<br />\n'.join(results)) else: ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE)) return self.redirect("/config/providers/") @route('/config/notifications(/?.*)') class ConfigNotifications(Config): def __init__(self, *args, **kwargs): super(ConfigNotifications, self).__init__(*args, **kwargs) def index(self): t = PageTemplate(rh=self, file="config_notifications.tmpl") t.submenu = self.ConfigMenu() return t.respond() def saveNotifications(self, use_kodi=None, kodi_always_on=None, kodi_notify_onsnatch=None, kodi_notify_ondownload=None, kodi_notify_onsubtitledownload=None, kodi_update_onlyfirst=None, kodi_update_library=None, kodi_update_full=None, kodi_host=None, kodi_username=None, kodi_password=None, use_plex=None, plex_notify_onsnatch=None, plex_notify_ondownload=None, plex_notify_onsubtitledownload=None, plex_update_library=None, plex_server_host=None, plex_server_token=None, plex_host=None, plex_username=None, plex_password=None, use_growl=None, growl_notify_onsnatch=None, growl_notify_ondownload=None, growl_notify_onsubtitledownload=None, growl_host=None, growl_password=None, use_freemobile=None, freemobile_notify_onsnatch=None, freemobile_notify_ondownload=None, freemobile_notify_onsubtitledownload=None, freemobile_id=None, freemobile_apikey=None, use_prowl=None, prowl_notify_onsnatch=None, prowl_notify_ondownload=None, prowl_notify_onsubtitledownload=None, prowl_api=None, prowl_priority=0, use_twitter=None, twitter_notify_onsnatch=None, twitter_notify_ondownload=None, twitter_notify_onsubtitledownload=None, use_boxcar=None, boxcar_notify_onsnatch=None, boxcar_notify_ondownload=None, boxcar_notify_onsubtitledownload=None, boxcar_username=None, use_boxcar2=None, boxcar2_notify_onsnatch=None, boxcar2_notify_ondownload=None, boxcar2_notify_onsubtitledownload=None, boxcar2_accesstoken=None, use_pushover=None, pushover_notify_onsnatch=None, pushover_notify_ondownload=None, pushover_notify_onsubtitledownload=None, pushover_userkey=None, pushover_apikey=None, pushover_prio=None, use_libnotify=None, libnotify_notify_onsnatch=None, libnotify_notify_ondownload=None, libnotify_notify_onsubtitledownload=None, use_nmj=None, nmj_host=None, nmj_database=None, nmj_mount=None, use_synoindex=None, use_nmjv2=None, nmjv2_host=None, nmjv2_dbloc=None, nmjv2_database=None, use_trakt=None, trakt_username=None, trakt_password=None, trakt_remove_watchlist=None, trakt_sync_watchlist=None, trakt_method_add=None, trakt_start_paused=None, trakt_use_recommended=None, trakt_sync=None, trakt_default_indexer=None, trakt_remove_serieslist=None, trakt_disable_ssl_verify=None, trakt_timeout=None, trakt_blacklist_name=None, trakt_use_rolling_download=None, trakt_rolling_num_ep=None, trakt_rolling_add_paused=None, trakt_rolling_frequency=None, trakt_rolling_default_watched_status=None, use_synologynotifier=None, synologynotifier_notify_onsnatch=None, synologynotifier_notify_ondownload=None, synologynotifier_notify_onsubtitledownload=None, use_pytivo=None, pytivo_notify_onsnatch=None, pytivo_notify_ondownload=None, pytivo_notify_onsubtitledownload=None, pytivo_update_library=None, pytivo_host=None, pytivo_share_name=None, pytivo_tivo_name=None, use_nma=None, nma_notify_onsnatch=None, nma_notify_ondownload=None, nma_notify_onsubtitledownload=None, nma_api=None, nma_priority=0, use_pushalot=None, pushalot_notify_onsnatch=None, pushalot_notify_ondownload=None, pushalot_notify_onsubtitledownload=None, pushalot_authorizationtoken=None, use_pushbullet=None, pushbullet_notify_onsnatch=None, pushbullet_notify_ondownload=None, pushbullet_notify_onsubtitledownload=None, pushbullet_api=None, pushbullet_device=None, pushbullet_device_list=None, use_email=None, email_notify_onsnatch=None, email_notify_ondownload=None, email_notify_onsubtitledownload=None, email_host=None, email_port=25, email_from=None, email_tls=None, email_user=None, email_password=None, email_list=None, email_show_list=None, email_show=None): results = [] sickbeard.USE_KODI = config.checkbox_to_value(use_kodi) sickbeard.KODI_ALWAYS_ON = config.checkbox_to_value(kodi_always_on) sickbeard.KODI_NOTIFY_ONSNATCH = config.checkbox_to_value(kodi_notify_onsnatch) sickbeard.KODI_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(kodi_notify_ondownload) sickbeard.KODI_NOTIFY_ONSUBTITLEDOWNLOAD = config.checkbox_to_value(kodi_notify_onsubtitledownload) sickbeard.KODI_UPDATE_LIBRARY = config.checkbox_to_value(kodi_update_library) sickbeard.KODI_UPDATE_FULL = config.checkbox_to_value(kodi_update_full) sickbeard.KODI_UPDATE_ONLYFIRST = config.checkbox_to_value(kodi_update_onlyfirst) sickbeard.KODI_HOST = config.clean_hosts(kodi_host) sickbeard.KODI_USERNAME = kodi_username sickbeard.KODI_PASSWORD = kodi_password sickbeard.USE_PLEX = config.checkbox_to_value(use_plex) sickbeard.PLEX_NOTIFY_ONSNATCH = config.checkbox_to_value(plex_notify_onsnatch) sickbeard.PLEX_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(plex_notify_ondownload) sickbeard.PLEX_NOTIFY_ONSUBTITLEDOWNLOAD = config.checkbox_to_value(plex_notify_onsubtitledownload) sickbeard.PLEX_UPDATE_LIBRARY = config.checkbox_to_value(plex_update_library) sickbeard.PLEX_HOST = config.clean_hosts(plex_host) sickbeard.PLEX_SERVER_HOST = config.clean_host(plex_server_host) sickbeard.PLEX_SERVER_TOKEN = config.clean_host(plex_server_token) sickbeard.PLEX_USERNAME = plex_username sickbeard.PLEX_PASSWORD = plex_password sickbeard.USE_GROWL = config.checkbox_to_value(use_growl) sickbeard.GROWL_NOTIFY_ONSNATCH = config.checkbox_to_value(growl_notify_onsnatch) sickbeard.GROWL_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(growl_notify_ondownload) sickbeard.GROWL_NOTIFY_ONSUBTITLEDOWNLOAD = config.checkbox_to_value(growl_notify_onsubtitledownload) sickbeard.GROWL_HOST = config.clean_host(growl_host, default_port=23053) sickbeard.GROWL_PASSWORD = growl_password sickbeard.USE_FREEMOBILE = config.checkbox_to_value(use_freemobile) sickbeard.FREEMOBILE_NOTIFY_ONSNATCH = config.checkbox_to_value(freemobile_notify_onsnatch) sickbeard.FREEMOBILE_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(freemobile_notify_ondownload) sickbeard.FREEMOBILE_NOTIFY_ONSUBTITLEDOWNLOAD = config.checkbox_to_value(freemobile_notify_onsubtitledownload) sickbeard.FREEMOBILE_ID = freemobile_id sickbeard.FREEMOBILE_APIKEY = freemobile_apikey sickbeard.USE_PROWL = config.checkbox_to_value(use_prowl) sickbeard.PROWL_NOTIFY_ONSNATCH = config.checkbox_to_value(prowl_notify_onsnatch) sickbeard.PROWL_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(prowl_notify_ondownload) sickbeard.PROWL_NOTIFY_ONSUBTITLEDOWNLOAD = config.checkbox_to_value(prowl_notify_onsubtitledownload) sickbeard.PROWL_API = prowl_api sickbeard.PROWL_PRIORITY = prowl_priority sickbeard.USE_TWITTER = config.checkbox_to_value(use_twitter) sickbeard.TWITTER_NOTIFY_ONSNATCH = config.checkbox_to_value(twitter_notify_onsnatch) sickbeard.TWITTER_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(twitter_notify_ondownload) sickbeard.TWITTER_NOTIFY_ONSUBTITLEDOWNLOAD = config.checkbox_to_value(twitter_notify_onsubtitledownload) sickbeard.USE_BOXCAR = config.checkbox_to_value(use_boxcar) sickbeard.BOXCAR_NOTIFY_ONSNATCH = config.checkbox_to_value(boxcar_notify_onsnatch) sickbeard.BOXCAR_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(boxcar_notify_ondownload) sickbeard.BOXCAR_NOTIFY_ONSUBTITLEDOWNLOAD = config.checkbox_to_value(boxcar_notify_onsubtitledownload) sickbeard.BOXCAR_USERNAME = boxcar_username sickbeard.USE_BOXCAR2 = config.checkbox_to_value(use_boxcar2) sickbeard.BOXCAR2_NOTIFY_ONSNATCH = config.checkbox_to_value(boxcar2_notify_onsnatch) sickbeard.BOXCAR2_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(boxcar2_notify_ondownload) sickbeard.BOXCAR2_NOTIFY_ONSUBTITLEDOWNLOAD = config.checkbox_to_value(boxcar2_notify_onsubtitledownload) sickbeard.BOXCAR2_ACCESSTOKEN = boxcar2_accesstoken sickbeard.USE_PUSHOVER = config.checkbox_to_value(use_pushover) sickbeard.PUSHOVER_NOTIFY_ONSNATCH = config.checkbox_to_value(pushover_notify_onsnatch) sickbeard.PUSHOVER_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(pushover_notify_ondownload) sickbeard.PUSHOVER_NOTIFY_ONSUBTITLEDOWNLOAD = config.checkbox_to_value(pushover_notify_onsubtitledownload) sickbeard.PUSHOVER_USERKEY = pushover_userkey sickbeard.PUSHOVER_APIKEY = pushover_apikey sickbeard.PUSHOVER_PRIO = pushover_prio sickbeard.USE_LIBNOTIFY = config.checkbox_to_value(use_libnotify) sickbeard.LIBNOTIFY_NOTIFY_ONSNATCH = config.checkbox_to_value(libnotify_notify_onsnatch) sickbeard.LIBNOTIFY_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(libnotify_notify_ondownload) sickbeard.LIBNOTIFY_NOTIFY_ONSUBTITLEDOWNLOAD = config.checkbox_to_value(libnotify_notify_onsubtitledownload) sickbeard.USE_NMJ = config.checkbox_to_value(use_nmj) sickbeard.NMJ_HOST = config.clean_host(nmj_host) sickbeard.NMJ_DATABASE = nmj_database sickbeard.NMJ_MOUNT = nmj_mount sickbeard.USE_NMJv2 = config.checkbox_to_value(use_nmjv2) sickbeard.NMJv2_HOST = config.clean_host(nmjv2_host) sickbeard.NMJv2_DATABASE = nmjv2_database sickbeard.NMJv2_DBLOC = nmjv2_dbloc sickbeard.USE_SYNOINDEX = config.checkbox_to_value(use_synoindex) sickbeard.USE_SYNOLOGYNOTIFIER = config.checkbox_to_value(use_synologynotifier) sickbeard.SYNOLOGYNOTIFIER_NOTIFY_ONSNATCH = config.checkbox_to_value(synologynotifier_notify_onsnatch) sickbeard.SYNOLOGYNOTIFIER_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(synologynotifier_notify_ondownload) sickbeard.SYNOLOGYNOTIFIER_NOTIFY_ONSUBTITLEDOWNLOAD = config.checkbox_to_value( synologynotifier_notify_onsubtitledownload) sickbeard.USE_TRAKT = config.checkbox_to_value(use_trakt) sickbeard.TRAKT_USERNAME = trakt_username sickbeard.TRAKT_PASSWORD = trakt_password sickbeard.TRAKT_REMOVE_WATCHLIST = config.checkbox_to_value(trakt_remove_watchlist) sickbeard.TRAKT_REMOVE_SERIESLIST = config.checkbox_to_value(trakt_remove_serieslist) sickbeard.TRAKT_SYNC_WATCHLIST = config.checkbox_to_value(trakt_sync_watchlist) sickbeard.TRAKT_METHOD_ADD = int(trakt_method_add) sickbeard.TRAKT_START_PAUSED = config.checkbox_to_value(trakt_start_paused) sickbeard.TRAKT_USE_RECOMMENDED = config.checkbox_to_value(trakt_use_recommended) sickbeard.TRAKT_SYNC = config.checkbox_to_value(trakt_sync) sickbeard.TRAKT_DEFAULT_INDEXER = int(trakt_default_indexer) sickbeard.TRAKT_DISABLE_SSL_VERIFY = config.checkbox_to_value(trakt_disable_ssl_verify) sickbeard.TRAKT_TIMEOUT = int(trakt_timeout) sickbeard.TRAKT_BLACKLIST_NAME = trakt_blacklist_name sickbeard.TRAKT_USE_ROLLING_DOWNLOAD = config.checkbox_to_value(trakt_use_rolling_download) sickbeard.TRAKT_ROLLING_NUM_EP = int(trakt_rolling_num_ep) sickbeard.TRAKT_ROLLING_ADD_PAUSED = config.checkbox_to_value(trakt_rolling_add_paused) sickbeard.TRAKT_ROLLING_FREQUENCY = int(trakt_rolling_frequency) sickbeard.TRAKT_ROLLING_DEFAULT_WATCHED_STATUS = int(trakt_rolling_default_watched_status) if sickbeard.USE_TRAKT: sickbeard.traktCheckerScheduler.silent = False else: sickbeard.traktCheckerScheduler.silent = True sickbeard.USE_EMAIL = config.checkbox_to_value(use_email) sickbeard.EMAIL_NOTIFY_ONSNATCH = config.checkbox_to_value(email_notify_onsnatch) sickbeard.EMAIL_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(email_notify_ondownload) sickbeard.EMAIL_NOTIFY_ONSUBTITLEDOWNLOAD = config.checkbox_to_value(email_notify_onsubtitledownload) sickbeard.EMAIL_HOST = config.clean_host(email_host) sickbeard.EMAIL_PORT = config.to_int(email_port, default=25) sickbeard.EMAIL_FROM = email_from sickbeard.EMAIL_TLS = config.checkbox_to_value(email_tls) sickbeard.EMAIL_USER = email_user sickbeard.EMAIL_PASSWORD = email_password sickbeard.EMAIL_LIST = email_list sickbeard.USE_PYTIVO = config.checkbox_to_value(use_pytivo) sickbeard.PYTIVO_NOTIFY_ONSNATCH = config.checkbox_to_value(pytivo_notify_onsnatch) sickbeard.PYTIVO_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(pytivo_notify_ondownload) sickbeard.PYTIVO_NOTIFY_ONSUBTITLEDOWNLOAD = config.checkbox_to_value(pytivo_notify_onsubtitledownload) sickbeard.PYTIVO_UPDATE_LIBRARY = config.checkbox_to_value(pytivo_update_library) sickbeard.PYTIVO_HOST = config.clean_host(pytivo_host) sickbeard.PYTIVO_SHARE_NAME = pytivo_share_name sickbeard.PYTIVO_TIVO_NAME = pytivo_tivo_name sickbeard.USE_NMA = config.checkbox_to_value(use_nma) sickbeard.NMA_NOTIFY_ONSNATCH = config.checkbox_to_value(nma_notify_onsnatch) sickbeard.NMA_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(nma_notify_ondownload) sickbeard.NMA_NOTIFY_ONSUBTITLEDOWNLOAD = config.checkbox_to_value(nma_notify_onsubtitledownload) sickbeard.NMA_API = nma_api sickbeard.NMA_PRIORITY = nma_priority sickbeard.USE_PUSHALOT = config.checkbox_to_value(use_pushalot) sickbeard.PUSHALOT_NOTIFY_ONSNATCH = config.checkbox_to_value(pushalot_notify_onsnatch) sickbeard.PUSHALOT_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(pushalot_notify_ondownload) sickbeard.PUSHALOT_NOTIFY_ONSUBTITLEDOWNLOAD = config.checkbox_to_value(pushalot_notify_onsubtitledownload) sickbeard.PUSHALOT_AUTHORIZATIONTOKEN = pushalot_authorizationtoken sickbeard.USE_PUSHBULLET = config.checkbox_to_value(use_pushbullet) sickbeard.PUSHBULLET_NOTIFY_ONSNATCH = config.checkbox_to_value(pushbullet_notify_onsnatch) sickbeard.PUSHBULLET_NOTIFY_ONDOWNLOAD = config.checkbox_to_value(pushbullet_notify_ondownload) sickbeard.PUSHBULLET_NOTIFY_ONSUBTITLEDOWNLOAD = config.checkbox_to_value(pushbullet_notify_onsubtitledownload) sickbeard.PUSHBULLET_API = pushbullet_api sickbeard.PUSHBULLET_DEVICE = pushbullet_device_list sickbeard.save_config() if len(results) > 0: for x in results: logger.log(x, logger.ERROR) ui.notifications.error('Error(s) Saving Configuration', '<br />\n'.join(results)) else: ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE)) return self.redirect("/config/notifications/") @route('/config/subtitles(/?.*)') class ConfigSubtitles(Config): def __init__(self, *args, **kwargs): super(ConfigSubtitles, self).__init__(*args, **kwargs) def index(self): t = PageTemplate(rh=self, file="config_subtitles.tmpl") t.submenu = self.ConfigMenu() return t.respond() def saveSubtitles(self, use_subtitles=None, subtitles_plugins=None, subtitles_languages=None, subtitles_dir=None, service_order=None, subtitles_history=None, subtitles_finder_frequency=None, subtitles_multi=None, embedded_subtitles_all=None): results = [] if subtitles_finder_frequency == '' or subtitles_finder_frequency is None: subtitles_finder_frequency = 1 if use_subtitles == "on" and not sickbeard.subtitlesFinderScheduler.isAlive(): sickbeard.subtitlesFinderScheduler.silent = False try: sickbeard.subtitlesFinderScheduler.start() except: pass elif not use_subtitles == "on": sickbeard.subtitlesFinderScheduler.stop.set() sickbeard.subtitlesFinderScheduler.silent = True try: sickbeard.subtitlesFinderScheduler.join(5) except: pass sickbeard.USE_SUBTITLES = config.checkbox_to_value(use_subtitles) sickbeard.SUBTITLES_LANGUAGES = [lang.alpha2 for lang in subtitles.isValidLanguage( subtitles_languages.replace(' ', '').split(','))] if subtitles_languages != '' else '' sickbeard.SUBTITLES_DIR = subtitles_dir sickbeard.SUBTITLES_HISTORY = config.checkbox_to_value(subtitles_history) sickbeard.EMBEDDED_SUBTITLES_ALL = config.checkbox_to_value(embedded_subtitles_all) sickbeard.SUBTITLES_FINDER_FREQUENCY = config.to_int(subtitles_finder_frequency, default=1) sickbeard.SUBTITLES_MULTI = config.checkbox_to_value(subtitles_multi) # Subtitles services services_str_list = service_order.split() subtitles_services_list = [] subtitles_services_enabled = [] for curServiceStr in services_str_list: curService, curEnabled = curServiceStr.split(':') subtitles_services_list.append(curService) subtitles_services_enabled.append(int(curEnabled)) sickbeard.SUBTITLES_SERVICES_LIST = subtitles_services_list sickbeard.SUBTITLES_SERVICES_ENABLED = subtitles_services_enabled sickbeard.save_config() if len(results) > 0: for x in results: logger.log(x, logger.ERROR) ui.notifications.error('Error(s) Saving Configuration', '<br />\n'.join(results)) else: ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE)) return self.redirect("/config/subtitles/") @route('/config/anime(/?.*)') class ConfigAnime(Config): def __init__(self, *args, **kwargs): super(ConfigAnime, self).__init__(*args, **kwargs) def index(self): t = PageTemplate(rh=self, file="config_anime.tmpl") t.submenu = self.ConfigMenu() return t.respond() def saveAnime(self, use_anidb=None, anidb_username=None, anidb_password=None, anidb_use_mylist=None, split_home=None): results = [] sickbeard.USE_ANIDB = config.checkbox_to_value(use_anidb) sickbeard.ANIDB_USERNAME = anidb_username sickbeard.ANIDB_PASSWORD = anidb_password sickbeard.ANIDB_USE_MYLIST = config.checkbox_to_value(anidb_use_mylist) sickbeard.ANIME_SPLIT_HOME = config.checkbox_to_value(split_home) sickbeard.save_config() if len(results) > 0: for x in results: logger.log(x, logger.ERROR) ui.notifications.error('Error(s) Saving Configuration', '<br />\n'.join(results)) else: ui.notifications.message('Configuration Saved', ek.ek(os.path.join, sickbeard.CONFIG_FILE)) return self.redirect("/config/anime/") @route('/errorlogs(/?.*)') class ErrorLogs(WebRoot): def __init__(self, *args, **kwargs): super(ErrorLogs, self).__init__(*args, **kwargs) def ErrorLogsMenu(self): menu = [ {'title': 'Clear Errors', 'path': 'errorlogs/clearerrors/'}, ] return menu def index(self): t = PageTemplate(rh=self, file="errorlogs.tmpl") t.submenu = self.ErrorLogsMenu() return t.respond() def haveErrors(self): if len(classes.ErrorViewer.errors) > 0: return True def clearerrors(self): classes.ErrorViewer.clear() return self.redirect("/errorlogs/") def viewlog(self, minLevel=logger.INFO, logFilter="<NONE>",logSearch=None, maxLines=500): def Get_Data(Levelmin, data_in, lines_in, regex, Filter, Search, mlines): lastLine = False numLines = lines_in numToShow = min(maxLines, numLines + len(data_in)) finalData = [] for x in reversed(data_in): x = ek.ss(x) match = re.match(regex, x) if match: level = match.group(7) logName = match.group(8) if not sickbeard.DEBUG and (level == 'DEBUG' or level == 'DB'): continue if level not in logger.reverseNames: lastLine = False continue if logSearch and logSearch.lower() in x.lower(): lastLine = True finalData.append(x) numLines += 1 elif not logSearch and logger.reverseNames[level] >= minLevel and (logFilter == '<NONE>' or logName.startswith(logFilter)): lastLine = True finalData.append(x) numLines += 1 else: lastLine = False continue elif lastLine: finalData.append("AA" + x) numLines += 1 if numLines >= numToShow: return finalData return finalData t = PageTemplate(rh=self, file="viewlogs.tmpl") t.submenu = self.ErrorLogsMenu() minLevel = int(minLevel) logNameFilters = {'<NONE>': u'&lt;No Filter&gt;', 'DAILYSEARCHER': u'Daily Searcher', 'BACKLOG': u'Backlog', 'SHOWUPDATER': u'Show Updater', 'CHECKVERSION': u'Check Version', 'SHOWQUEUE': u'Show Queue', 'SEARCHQUEUE': u'Search Queue', 'FINDPROPERS': u'Find Propers', 'POSTPROCESSER': u'Postprocesser', 'FINDSUBTITLES': u'Find Subtitles', 'TRAKTCHECKER': u'Trakt Checker', 'EVENT': u'Event', 'ERROR': u'Error', 'TORNADO': u'Tornado', 'Thread': u'Thread', 'MAIN': u'Main' } if logFilter not in logNameFilters: logFilter = '<NONE>' regex = "^(\d\d\d\d)\-(\d\d)\-(\d\d)\s*(\d\d)\:(\d\d):(\d\d)\s*([A-Z]+)\s*(.+?)\s*\:\:\s*(.*)$" data = [] if os.path.isfile(logger.logFile): with ek.ek(codecs.open, *[logger.logFile, 'r', 'utf-8']) as f: data = Get_Data(minLevel, f.readlines(), 0, regex, logFilter, logSearch, maxLines) for i in range (1 , int(sickbeard.LOG_NR)): if os.path.isfile(logger.logFile + "." + str(i)) and (len(data) <= maxLines): with ek.ek(codecs.open, *[logger.logFile + "." + str(i), 'r', 'utf-8']) as f: data += Get_Data(minLevel, f.readlines(), len(data), regex, logFilter, logSearch, maxLines) result = "".join(data) t.logLines = result t.minLevel = minLevel t.logNameFilters = logNameFilters t.logFilter = logFilter t.logSearch = logSearch return t.respond() def submit_errors(self): if not (sickbeard.GIT_USERNAME and sickbeard.GIT_PASSWORD): ui.notifications.error("Missing information", "Please set your GitHub username and password in the config.") logger.log(u'Please set your GitHub username and password in the config, unable to submit issue ticket to GitHub!') else: issue = logger.submit_errors() if issue: ui.notifications.message('Your issue ticket #%s was submitted successfully!' % issue.number) return self.redirect("/errorlogs/")
gpl-3.0
spmaniato/LTLMoP
src/lib/createJTLVinput.py
7
7188
""" =============================================== createJTLVinput.py - LTL Pre-Processor Routines =============================================== Module that creates the input files for the JTLV based synthesis algorithm. Its functions create the skeleton .smv file and the .ltl file which includes the topological relations and the given spec. """ import math import parseEnglishToLTL import textwrap from LTLParser.LTLFormula import LTLFormula, LTLFormulaType, treeToString def createSMVfile(fileName, sensorList, robotPropList): ''' This function writes the skeleton SMV file. It takes as input a filename, the number of regions, the list of the sensor propositions and the list of robot propositions (without the regions). ''' fileName = fileName + '.smv' smvFile = open(fileName, 'w') # Write the header smvFile.write(textwrap.dedent(""" -- Skeleton SMV file -- (Generated by the LTLMoP toolkit) MODULE main VAR e : env(); s : sys(); """)); # Define sensor propositions smvFile.write(textwrap.dedent(""" MODULE env -- inputs VAR """)); for sensor in sensorList: smvFile.write('\t\t') smvFile.write(sensor) smvFile.write(' : boolean;\n') smvFile.write(textwrap.dedent(""" MODULE sys -- outputs VAR """)); # Define robot propositions for robotProp in robotPropList: smvFile.write('\t\t') smvFile.write(robotProp) smvFile.write(' : boolean;\n') # close the file smvFile.close() def createTopologyFragment(adjData, regions, use_bits=True): if use_bits: numBits = int(math.ceil(math.log(len(adjData),2))) # TODO: only calc bitencoding once bitEncode = parseEnglishToLTL.bitEncoding(len(adjData), numBits) currBitEnc = bitEncode['current'] nextBitEnc = bitEncode['next'] # The topological relation (adjacency) adjFormulas = [] for Origin in range(len(adjData)): # from region i we can stay in region i adjFormula = '\t\t\t []( (' adjFormula = adjFormula + (currBitEnc[Origin] if use_bits else "s."+regions[Origin].name) adjFormula = adjFormula + ') -> ( (' adjFormula = adjFormula + (nextBitEnc[Origin] if use_bits else "next(s."+regions[Origin].name+")") adjFormula = adjFormula + ')' for dest in range(len(adjData)): if adjData[Origin][dest]: # not empty, hence there is a transition adjFormula = adjFormula + '\n\t\t\t\t\t\t\t\t\t| (' adjFormula = adjFormula + (nextBitEnc[dest] if use_bits else "next(s."+regions[dest].name+")") adjFormula = adjFormula + ') ' # closing this region adjFormula = adjFormula + ' ) ) ' adjFormulas.append(adjFormula) # In a BDD strategy, it's best to explicitly exclude these adjFormulas.append("[]"+createInitialRegionFragment(regions, use_bits)) return " & \n".join(adjFormulas) def createInitialRegionFragment(regions, use_bits=True): # Setting the system initial formula to allow only valid # region (encoding). This may be redundant if an initial region is # specified, but it is here to ensure the system cannot start from # an invalid, or empty region (encoding). if use_bits: numBits = int(math.ceil(math.log(len(regions),2))) # TODO: only calc bitencoding once bitEncode = parseEnglishToLTL.bitEncoding(len(regions), numBits) currBitEnc = bitEncode['current'] nextBitEnc = bitEncode['next'] initreg_formula = '\t\t\t( ' + currBitEnc[0] + ' \n' for regionInd in range(1,len(currBitEnc)): initreg_formula = initreg_formula + '\t\t\t\t | ' + currBitEnc[regionInd] + '\n' initreg_formula = initreg_formula + '\t\t\t) \n' else: initreg_formula = "\n\t({})".format(" | ".join(["({})".format(" & ".join(["s."+r2.name if r is r2 else "!s."+r2.name for r2 in regions])) for r in regions])) return initreg_formula def createNecessaryFillerSpec(spec_part): """ Both assumptions guarantees need to have at least one each of initial, safety, and liveness. If any are not present, create trivial TRUE ones. """ if spec_part.strip() == "": filler_spec = ["TRUE", "[](TRUE)", "[]<>(TRUE)"] else: formula = LTLFormula.fromString(spec_part) filler_spec = [] if not formula.getConjunctsByType(LTLFormulaType.INITIAL): filler_spec.append("TRUE") if not formula.getConjunctsByType(LTLFormulaType.SAFETY): filler_spec.append("[](TRUE)") if not formula.getConjunctsByType(LTLFormulaType.LIVENESS): filler_spec.append("[]<>(TRUE)") return " & ".join(filler_spec) def flattenLTLFormulas(f): if isinstance(f, LTLFormula): return str(f) # If we've received a list of LTLFormula, assume that they should be conjoined if isinstance(f, list) and all((isinstance(sf, LTLFormula) for sf in f)): return " & \n".join([treeToString(sf.tree, top_level=False) for sf in f]) if isinstance(f, basestring): return f raise ValueError("Invalid formula type: must be either string, LTLFormula, or LTLFormula list") def createLTLfile(fileName, spec_env, spec_sys): ''' This function writes the LTL file. It encodes the specification and topological relation. It takes as input a filename, the list of the sensor propositions, the list of robot propositions (without the regions), the adjacency data (transition data structure) and a specification ''' spec_env = flattenLTLFormulas(spec_env) spec_sys = flattenLTLFormulas(spec_sys) # Force .ltl suffix if not fileName.endswith('.ltl'): fileName = fileName + '.ltl' ltlFile = open(fileName, 'w') # Write the header and begining of the formula ltlFile.write(textwrap.dedent(""" -- LTL specification file -- (Generated by the LTLMoP toolkit) """)) ltlFile.write('LTLSPEC -- Assumptions\n') ltlFile.write('\t(\n') filler = createNecessaryFillerSpec(spec_env) if filler: ltlFile.write('\t' + filler) # Write the environment assumptions # from the 'spec' input if spec_env.strip() != "": if filler: ltlFile.write('& \n') ltlFile.write(spec_env) ltlFile.write('\n\t);\n\n') ltlFile.write('LTLSPEC -- Guarantees\n') ltlFile.write('\t(\n') filler = createNecessaryFillerSpec(spec_sys) if filler: ltlFile.write('\t' + filler) # Write the desired robot behavior if spec_sys.strip() != "": if filler: ltlFile.write('& \n') ltlFile.write(spec_sys) # Close the LTL formula ltlFile.write('\n\t);\n') # close the file ltlFile.close()
gpl-3.0
pongem/python-bot-project
appengine/standard/botapp/env/lib/python2.7/site-packages/django/db/migrations/autodetector.py
41
56981
from __future__ import unicode_literals import functools import re from itertools import chain from django.conf import settings from django.db import models from django.db.migrations import operations from django.db.migrations.migration import Migration from django.db.migrations.operations.models import AlterModelOptions from django.db.migrations.optimizer import MigrationOptimizer from django.db.migrations.questioner import MigrationQuestioner from django.db.migrations.utils import ( COMPILED_REGEX_TYPE, RegexObject, get_migration_name_timestamp, ) from django.utils import six from .topological_sort import stable_topological_sort class MigrationAutodetector(object): """ Takes a pair of ProjectStates, and compares them to see what the first would need doing to make it match the second (the second usually being the project's current state). Note that this naturally operates on entire projects at a time, as it's likely that changes interact (for example, you can't add a ForeignKey without having a migration to add the table it depends on first). A user interface may offer single-app usage if it wishes, with the caveat that it may not always be possible. """ def __init__(self, from_state, to_state, questioner=None): self.from_state = from_state self.to_state = to_state self.questioner = questioner or MigrationQuestioner() self.existing_apps = {app for app, model in from_state.models} def changes(self, graph, trim_to_apps=None, convert_apps=None, migration_name=None): """ Main entry point to produce a list of applicable changes. Takes a graph to base names on and an optional set of apps to try and restrict to (restriction is not guaranteed) """ changes = self._detect_changes(convert_apps, graph) changes = self.arrange_for_graph(changes, graph, migration_name) if trim_to_apps: changes = self._trim_to_apps(changes, trim_to_apps) return changes def deep_deconstruct(self, obj): """ Recursive deconstruction for a field and its arguments. Used for full comparison for rename/alter; sometimes a single-level deconstruction will not compare correctly. """ if isinstance(obj, list): return [self.deep_deconstruct(value) for value in obj] elif isinstance(obj, tuple): return tuple(self.deep_deconstruct(value) for value in obj) elif isinstance(obj, dict): return { key: self.deep_deconstruct(value) for key, value in obj.items() } elif isinstance(obj, functools.partial): return (obj.func, self.deep_deconstruct(obj.args), self.deep_deconstruct(obj.keywords)) elif isinstance(obj, COMPILED_REGEX_TYPE): return RegexObject(obj) elif isinstance(obj, type): # If this is a type that implements 'deconstruct' as an instance method, # avoid treating this as being deconstructible itself - see #22951 return obj elif hasattr(obj, 'deconstruct'): deconstructed = obj.deconstruct() if isinstance(obj, models.Field): # we have a field which also returns a name deconstructed = deconstructed[1:] path, args, kwargs = deconstructed return ( path, [self.deep_deconstruct(value) for value in args], { key: self.deep_deconstruct(value) for key, value in kwargs.items() }, ) else: return obj def only_relation_agnostic_fields(self, fields): """ Return a definition of the fields that ignores field names and what related fields actually relate to. Used for detecting renames (as, of course, the related fields change during renames) """ fields_def = [] for name, field in sorted(fields): deconstruction = self.deep_deconstruct(field) if field.remote_field and field.remote_field.model: del deconstruction[2]['to'] fields_def.append(deconstruction) return fields_def def _detect_changes(self, convert_apps=None, graph=None): """ Returns a dict of migration plans which will achieve the change from from_state to to_state. The dict has app labels as keys and a list of migrations as values. The resulting migrations aren't specially named, but the names do matter for dependencies inside the set. convert_apps is the list of apps to convert to use migrations (i.e. to make initial migrations for, in the usual case) graph is an optional argument that, if provided, can help improve dependency generation and avoid potential circular dependencies. """ # The first phase is generating all the operations for each app # and gathering them into a big per-app list. # We'll then go through that list later and order it and split # into migrations to resolve dependencies caused by M2Ms and FKs. self.generated_operations = {} # Prepare some old/new state and model lists, separating # proxy models and ignoring unmigrated apps. self.old_apps = self.from_state.concrete_apps self.new_apps = self.to_state.apps self.old_model_keys = [] self.old_proxy_keys = [] self.old_unmanaged_keys = [] self.new_model_keys = [] self.new_proxy_keys = [] self.new_unmanaged_keys = [] for al, mn in sorted(self.from_state.models.keys()): model = self.old_apps.get_model(al, mn) if not model._meta.managed: self.old_unmanaged_keys.append((al, mn)) elif al not in self.from_state.real_apps: if model._meta.proxy: self.old_proxy_keys.append((al, mn)) else: self.old_model_keys.append((al, mn)) for al, mn in sorted(self.to_state.models.keys()): model = self.new_apps.get_model(al, mn) if not model._meta.managed: self.new_unmanaged_keys.append((al, mn)) elif ( al not in self.from_state.real_apps or (convert_apps and al in convert_apps) ): if model._meta.proxy: self.new_proxy_keys.append((al, mn)) else: self.new_model_keys.append((al, mn)) # Renames have to come first self.generate_renamed_models() # Prepare lists of fields and generate through model map self._prepare_field_lists() self._generate_through_model_map() # Generate non-rename model operations self.generate_deleted_models() self.generate_created_models() self.generate_deleted_proxies() self.generate_created_proxies() self.generate_altered_options() self.generate_altered_managers() # Generate field operations self.generate_renamed_fields() self.generate_removed_fields() self.generate_added_fields() self.generate_altered_fields() self.generate_altered_unique_together() self.generate_altered_index_together() self.generate_altered_db_table() self.generate_altered_order_with_respect_to() self._sort_migrations() self._build_migration_list(graph) self._optimize_migrations() return self.migrations def _prepare_field_lists(self): """ Prepare field lists, and prepare a list of the fields that used through models in the old state so we can make dependencies from the through model deletion to the field that uses it. """ self.kept_model_keys = set(self.old_model_keys).intersection(self.new_model_keys) self.kept_proxy_keys = set(self.old_proxy_keys).intersection(self.new_proxy_keys) self.kept_unmanaged_keys = set(self.old_unmanaged_keys).intersection(self.new_unmanaged_keys) self.through_users = {} self.old_field_keys = set() self.new_field_keys = set() for app_label, model_name in sorted(self.kept_model_keys): old_model_name = self.renamed_models.get((app_label, model_name), model_name) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] self.old_field_keys.update((app_label, model_name, x) for x, y in old_model_state.fields) self.new_field_keys.update((app_label, model_name, x) for x, y in new_model_state.fields) def _generate_through_model_map(self): """ Through model map generation """ for app_label, model_name in sorted(self.old_model_keys): old_model_name = self.renamed_models.get((app_label, model_name), model_name) old_model_state = self.from_state.models[app_label, old_model_name] for field_name, field in old_model_state.fields: old_field = self.old_apps.get_model(app_label, old_model_name)._meta.get_field(field_name) if (hasattr(old_field, "remote_field") and getattr(old_field.remote_field, "through", None) and not old_field.remote_field.through._meta.auto_created): through_key = ( old_field.remote_field.through._meta.app_label, old_field.remote_field.through._meta.model_name, ) self.through_users[through_key] = (app_label, old_model_name, field_name) def _build_migration_list(self, graph=None): """ We need to chop the lists of operations up into migrations with dependencies on each other. We do this by stepping up an app's list of operations until we find one that has an outgoing dependency that isn't in another app's migration yet (hasn't been chopped off its list). We then chop off the operations before it into a migration and move onto the next app. If we loop back around without doing anything, there's a circular dependency (which _should_ be impossible as the operations are all split at this point so they can't depend and be depended on). """ self.migrations = {} num_ops = sum(len(x) for x in self.generated_operations.values()) chop_mode = False while num_ops: # On every iteration, we step through all the apps and see if there # is a completed set of operations. # If we find that a subset of the operations are complete we can # try to chop it off from the rest and continue, but we only # do this if we've already been through the list once before # without any chopping and nothing has changed. for app_label in sorted(self.generated_operations.keys()): chopped = [] dependencies = set() for operation in list(self.generated_operations[app_label]): deps_satisfied = True operation_dependencies = set() for dep in operation._auto_deps: is_swappable_dep = False if dep[0] == "__setting__": # We need to temporarily resolve the swappable dependency to prevent # circular references. While keeping the dependency checks on the # resolved model we still add the swappable dependencies. # See #23322 resolved_app_label, resolved_object_name = getattr(settings, dep[1]).split('.') original_dep = dep dep = (resolved_app_label, resolved_object_name.lower(), dep[2], dep[3]) is_swappable_dep = True if dep[0] != app_label and dep[0] != "__setting__": # External app dependency. See if it's not yet # satisfied. for other_operation in self.generated_operations.get(dep[0], []): if self.check_dependency(other_operation, dep): deps_satisfied = False break if not deps_satisfied: break else: if is_swappable_dep: operation_dependencies.add((original_dep[0], original_dep[1])) elif dep[0] in self.migrations: operation_dependencies.add((dep[0], self.migrations[dep[0]][-1].name)) else: # If we can't find the other app, we add a first/last dependency, # but only if we've already been through once and checked everything if chop_mode: # If the app already exists, we add a dependency on the last migration, # as we don't know which migration contains the target field. # If it's not yet migrated or has no migrations, we use __first__ if graph and graph.leaf_nodes(dep[0]): operation_dependencies.add(graph.leaf_nodes(dep[0])[0]) else: operation_dependencies.add((dep[0], "__first__")) else: deps_satisfied = False if deps_satisfied: chopped.append(operation) dependencies.update(operation_dependencies) self.generated_operations[app_label] = self.generated_operations[app_label][1:] else: break # Make a migration! Well, only if there's stuff to put in it if dependencies or chopped: if not self.generated_operations[app_label] or chop_mode: subclass = type(str("Migration"), (Migration,), {"operations": [], "dependencies": []}) instance = subclass("auto_%i" % (len(self.migrations.get(app_label, [])) + 1), app_label) instance.dependencies = list(dependencies) instance.operations = chopped instance.initial = app_label not in self.existing_apps self.migrations.setdefault(app_label, []).append(instance) chop_mode = False else: self.generated_operations[app_label] = chopped + self.generated_operations[app_label] new_num_ops = sum(len(x) for x in self.generated_operations.values()) if new_num_ops == num_ops: if not chop_mode: chop_mode = True else: raise ValueError("Cannot resolve operation dependencies: %r" % self.generated_operations) num_ops = new_num_ops def _sort_migrations(self): """ Reorder to make things possible. The order we have already isn't bad, but we need to pull a few things around so FKs work nicely inside the same app """ for app_label, ops in sorted(self.generated_operations.items()): # construct a dependency graph for intra-app dependencies dependency_graph = {op: set() for op in ops} for op in ops: for dep in op._auto_deps: if dep[0] == app_label: for op2 in ops: if self.check_dependency(op2, dep): dependency_graph[op].add(op2) # we use a stable sort for deterministic tests & general behavior self.generated_operations[app_label] = stable_topological_sort(ops, dependency_graph) def _optimize_migrations(self): # Add in internal dependencies among the migrations for app_label, migrations in self.migrations.items(): for m1, m2 in zip(migrations, migrations[1:]): m2.dependencies.append((app_label, m1.name)) # De-dupe dependencies for app_label, migrations in self.migrations.items(): for migration in migrations: migration.dependencies = list(set(migration.dependencies)) # Optimize migrations for app_label, migrations in self.migrations.items(): for migration in migrations: migration.operations = MigrationOptimizer().optimize(migration.operations, app_label=app_label) def check_dependency(self, operation, dependency): """ Returns ``True`` if the given operation depends on the given dependency, ``False`` otherwise. """ # Created model if dependency[2] is None and dependency[3] is True: return ( isinstance(operation, operations.CreateModel) and operation.name_lower == dependency[1].lower() ) # Created field elif dependency[2] is not None and dependency[3] is True: return ( ( isinstance(operation, operations.CreateModel) and operation.name_lower == dependency[1].lower() and any(dependency[2] == x for x, y in operation.fields) ) or ( isinstance(operation, operations.AddField) and operation.model_name_lower == dependency[1].lower() and operation.name_lower == dependency[2].lower() ) ) # Removed field elif dependency[2] is not None and dependency[3] is False: return ( isinstance(operation, operations.RemoveField) and operation.model_name_lower == dependency[1].lower() and operation.name_lower == dependency[2].lower() ) # Removed model elif dependency[2] is None and dependency[3] is False: return ( isinstance(operation, operations.DeleteModel) and operation.name_lower == dependency[1].lower() ) # Field being altered elif dependency[2] is not None and dependency[3] == "alter": return ( isinstance(operation, operations.AlterField) and operation.model_name_lower == dependency[1].lower() and operation.name_lower == dependency[2].lower() ) # order_with_respect_to being unset for a field elif dependency[2] is not None and dependency[3] == "order_wrt_unset": return ( isinstance(operation, operations.AlterOrderWithRespectTo) and operation.name_lower == dependency[1].lower() and (operation.order_with_respect_to or "").lower() != dependency[2].lower() ) # Field is removed and part of an index/unique_together elif dependency[2] is not None and dependency[3] == "foo_together_change": return ( isinstance(operation, (operations.AlterUniqueTogether, operations.AlterIndexTogether)) and operation.name_lower == dependency[1].lower() ) # Unknown dependency. Raise an error. else: raise ValueError("Can't handle dependency %r" % (dependency, )) def add_operation(self, app_label, operation, dependencies=None, beginning=False): # Dependencies are (app_label, model_name, field_name, create/delete as True/False) operation._auto_deps = dependencies or [] if beginning: self.generated_operations.setdefault(app_label, []).insert(0, operation) else: self.generated_operations.setdefault(app_label, []).append(operation) def swappable_first_key(self, item): """ Sorting key function that places potential swappable models first in lists of created models (only real way to solve #22783) """ try: model = self.new_apps.get_model(item[0], item[1]) base_names = [base.__name__ for base in model.__bases__] string_version = "%s.%s" % (item[0], item[1]) if ( model._meta.swappable or "AbstractUser" in base_names or "AbstractBaseUser" in base_names or settings.AUTH_USER_MODEL.lower() == string_version.lower() ): return ("___" + item[0], "___" + item[1]) except LookupError: pass return item def generate_renamed_models(self): """ Finds any renamed models, and generates the operations for them, and removes the old entry from the model lists. Must be run before other model-level generation. """ self.renamed_models = {} self.renamed_models_rel = {} added_models = set(self.new_model_keys) - set(self.old_model_keys) for app_label, model_name in sorted(added_models): model_state = self.to_state.models[app_label, model_name] model_fields_def = self.only_relation_agnostic_fields(model_state.fields) removed_models = set(self.old_model_keys) - set(self.new_model_keys) for rem_app_label, rem_model_name in removed_models: if rem_app_label == app_label: rem_model_state = self.from_state.models[rem_app_label, rem_model_name] rem_model_fields_def = self.only_relation_agnostic_fields(rem_model_state.fields) if model_fields_def == rem_model_fields_def: if self.questioner.ask_rename_model(rem_model_state, model_state): self.add_operation( app_label, operations.RenameModel( old_name=rem_model_state.name, new_name=model_state.name, ) ) self.renamed_models[app_label, model_name] = rem_model_name renamed_models_rel_key = '%s.%s' % (rem_model_state.app_label, rem_model_state.name) self.renamed_models_rel[renamed_models_rel_key] = '%s.%s' % ( model_state.app_label, model_state.name, ) self.old_model_keys.remove((rem_app_label, rem_model_name)) self.old_model_keys.append((app_label, model_name)) break def generate_created_models(self): """ Find all new models (both managed and unmanaged) and make create operations for them as well as separate operations to create any foreign key or M2M relationships (we'll optimize these back in later if we can). We also defer any model options that refer to collections of fields that might be deferred (e.g. unique_together, index_together). """ old_keys = set(self.old_model_keys).union(self.old_unmanaged_keys) added_models = set(self.new_model_keys) - old_keys added_unmanaged_models = set(self.new_unmanaged_keys) - old_keys all_added_models = chain( sorted(added_models, key=self.swappable_first_key, reverse=True), sorted(added_unmanaged_models, key=self.swappable_first_key, reverse=True) ) for app_label, model_name in all_added_models: model_state = self.to_state.models[app_label, model_name] model_opts = self.new_apps.get_model(app_label, model_name)._meta # Gather related fields related_fields = {} primary_key_rel = None for field in model_opts.local_fields: if field.remote_field: if field.remote_field.model: if field.primary_key: primary_key_rel = field.remote_field.model elif not field.remote_field.parent_link: related_fields[field.name] = field # through will be none on M2Ms on swapped-out models; # we can treat lack of through as auto_created=True, though. if (getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created): related_fields[field.name] = field for field in model_opts.local_many_to_many: if field.remote_field.model: related_fields[field.name] = field if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created: related_fields[field.name] = field # Are there unique/index_together to defer? unique_together = model_state.options.pop('unique_together', None) index_together = model_state.options.pop('index_together', None) order_with_respect_to = model_state.options.pop('order_with_respect_to', None) # Depend on the deletion of any possible proxy version of us dependencies = [ (app_label, model_name, None, False), ] # Depend on all bases for base in model_state.bases: if isinstance(base, six.string_types) and "." in base: base_app_label, base_name = base.split(".", 1) dependencies.append((base_app_label, base_name, None, True)) # Depend on the other end of the primary key if it's a relation if primary_key_rel: dependencies.append(( primary_key_rel._meta.app_label, primary_key_rel._meta.object_name, None, True )) # Generate creation operation self.add_operation( app_label, operations.CreateModel( name=model_state.name, fields=[d for d in model_state.fields if d[0] not in related_fields], options=model_state.options, bases=model_state.bases, managers=model_state.managers, ), dependencies=dependencies, beginning=True, ) # Don't add operations which modify the database for unmanaged models if not model_opts.managed: continue # Generate operations for each related field for name, field in sorted(related_fields.items()): dependencies = self._get_dependecies_for_foreign_key(field) # Depend on our own model being created dependencies.append((app_label, model_name, None, True)) # Make operation self.add_operation( app_label, operations.AddField( model_name=model_name, name=name, field=field, ), dependencies=list(set(dependencies)), ) # Generate other opns related_dependencies = [ (app_label, model_name, name, True) for name, field in sorted(related_fields.items()) ] related_dependencies.append((app_label, model_name, None, True)) if unique_together: self.add_operation( app_label, operations.AlterUniqueTogether( name=model_name, unique_together=unique_together, ), dependencies=related_dependencies ) if index_together: self.add_operation( app_label, operations.AlterIndexTogether( name=model_name, index_together=index_together, ), dependencies=related_dependencies ) if order_with_respect_to: self.add_operation( app_label, operations.AlterOrderWithRespectTo( name=model_name, order_with_respect_to=order_with_respect_to, ), dependencies=[ (app_label, model_name, order_with_respect_to, True), (app_label, model_name, None, True), ] ) def generate_created_proxies(self): """ Makes CreateModel statements for proxy models. We use the same statements as that way there's less code duplication, but of course for proxy models we can skip all that pointless field stuff and just chuck out an operation. """ added = set(self.new_proxy_keys) - set(self.old_proxy_keys) for app_label, model_name in sorted(added): model_state = self.to_state.models[app_label, model_name] assert model_state.options.get("proxy") # Depend on the deletion of any possible non-proxy version of us dependencies = [ (app_label, model_name, None, False), ] # Depend on all bases for base in model_state.bases: if isinstance(base, six.string_types) and "." in base: base_app_label, base_name = base.split(".", 1) dependencies.append((base_app_label, base_name, None, True)) # Generate creation operation self.add_operation( app_label, operations.CreateModel( name=model_state.name, fields=[], options=model_state.options, bases=model_state.bases, managers=model_state.managers, ), # Depend on the deletion of any possible non-proxy version of us dependencies=dependencies, ) def generate_deleted_models(self): """ Find all deleted models (managed and unmanaged) and make delete operations for them as well as separate operations to delete any foreign key or M2M relationships (we'll optimize these back in later if we can). We also bring forward removal of any model options that refer to collections of fields - the inverse of generate_created_models(). """ new_keys = set(self.new_model_keys).union(self.new_unmanaged_keys) deleted_models = set(self.old_model_keys) - new_keys deleted_unmanaged_models = set(self.old_unmanaged_keys) - new_keys all_deleted_models = chain(sorted(deleted_models), sorted(deleted_unmanaged_models)) for app_label, model_name in all_deleted_models: model_state = self.from_state.models[app_label, model_name] model = self.old_apps.get_model(app_label, model_name) if not model._meta.managed: # Skip here, no need to handle fields for unmanaged models continue # Gather related fields related_fields = {} for field in model._meta.local_fields: if field.remote_field: if field.remote_field.model: related_fields[field.name] = field # through will be none on M2Ms on swapped-out models; # we can treat lack of through as auto_created=True, though. if (getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created): related_fields[field.name] = field for field in model._meta.local_many_to_many: if field.remote_field.model: related_fields[field.name] = field if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created: related_fields[field.name] = field # Generate option removal first unique_together = model_state.options.pop('unique_together', None) index_together = model_state.options.pop('index_together', None) if unique_together: self.add_operation( app_label, operations.AlterUniqueTogether( name=model_name, unique_together=None, ) ) if index_together: self.add_operation( app_label, operations.AlterIndexTogether( name=model_name, index_together=None, ) ) # Then remove each related field for name, field in sorted(related_fields.items()): self.add_operation( app_label, operations.RemoveField( model_name=model_name, name=name, ) ) # Finally, remove the model. # This depends on both the removal/alteration of all incoming fields # and the removal of all its own related fields, and if it's # a through model the field that references it. dependencies = [] for related_object in model._meta.related_objects: related_object_app_label = related_object.related_model._meta.app_label object_name = related_object.related_model._meta.object_name field_name = related_object.field.name dependencies.append((related_object_app_label, object_name, field_name, False)) if not related_object.many_to_many: dependencies.append((related_object_app_label, object_name, field_name, "alter")) for name, field in sorted(related_fields.items()): dependencies.append((app_label, model_name, name, False)) # We're referenced in another field's through= through_user = self.through_users.get((app_label, model_state.name_lower)) if through_user: dependencies.append((through_user[0], through_user[1], through_user[2], False)) # Finally, make the operation, deduping any dependencies self.add_operation( app_label, operations.DeleteModel( name=model_state.name, ), dependencies=list(set(dependencies)), ) def generate_deleted_proxies(self): """ Makes DeleteModel statements for proxy models. """ deleted = set(self.old_proxy_keys) - set(self.new_proxy_keys) for app_label, model_name in sorted(deleted): model_state = self.from_state.models[app_label, model_name] assert model_state.options.get("proxy") self.add_operation( app_label, operations.DeleteModel( name=model_state.name, ), ) def generate_renamed_fields(self): """ Works out renamed fields """ self.renamed_fields = {} for app_label, model_name, field_name in sorted(self.new_field_keys - self.old_field_keys): old_model_name = self.renamed_models.get((app_label, model_name), model_name) old_model_state = self.from_state.models[app_label, old_model_name] field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name) # Scan to see if this is actually a rename! field_dec = self.deep_deconstruct(field) for rem_app_label, rem_model_name, rem_field_name in sorted(self.old_field_keys - self.new_field_keys): if rem_app_label == app_label and rem_model_name == model_name: old_field_dec = self.deep_deconstruct(old_model_state.get_field_by_name(rem_field_name)) if field.remote_field and field.remote_field.model and 'to' in old_field_dec[2]: old_rel_to = old_field_dec[2]['to'] if old_rel_to in self.renamed_models_rel: old_field_dec[2]['to'] = self.renamed_models_rel[old_rel_to] if old_field_dec == field_dec: if self.questioner.ask_rename(model_name, rem_field_name, field_name, field): self.add_operation( app_label, operations.RenameField( model_name=model_name, old_name=rem_field_name, new_name=field_name, ) ) self.old_field_keys.remove((rem_app_label, rem_model_name, rem_field_name)) self.old_field_keys.add((app_label, model_name, field_name)) self.renamed_fields[app_label, model_name, field_name] = rem_field_name break def generate_added_fields(self): """ Fields that have been added """ for app_label, model_name, field_name in sorted(self.new_field_keys - self.old_field_keys): self._generate_added_field(app_label, model_name, field_name) def _generate_added_field(self, app_label, model_name, field_name): field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name) # Fields that are foreignkeys/m2ms depend on stuff dependencies = [] if field.remote_field and field.remote_field.model: dependencies.extend(self._get_dependecies_for_foreign_key(field)) # You can't just add NOT NULL fields with no default or fields # which don't allow empty strings as default. preserve_default = True time_fields = (models.DateField, models.DateTimeField, models.TimeField) if (not field.null and not field.has_default() and not field.many_to_many and not (field.blank and field.empty_strings_allowed) and not (isinstance(field, time_fields) and field.auto_now)): field = field.clone() if isinstance(field, time_fields) and field.auto_now_add: field.default = self.questioner.ask_auto_now_add_addition(field_name, model_name) else: field.default = self.questioner.ask_not_null_addition(field_name, model_name) preserve_default = False self.add_operation( app_label, operations.AddField( model_name=model_name, name=field_name, field=field, preserve_default=preserve_default, ), dependencies=dependencies, ) def generate_removed_fields(self): """ Fields that have been removed. """ for app_label, model_name, field_name in sorted(self.old_field_keys - self.new_field_keys): self._generate_removed_field(app_label, model_name, field_name) def _generate_removed_field(self, app_label, model_name, field_name): self.add_operation( app_label, operations.RemoveField( model_name=model_name, name=field_name, ), # We might need to depend on the removal of an # order_with_respect_to or index/unique_together operation; # this is safely ignored if there isn't one dependencies=[ (app_label, model_name, field_name, "order_wrt_unset"), (app_label, model_name, field_name, "foo_together_change"), ], ) def generate_altered_fields(self): """ Fields that have been altered. """ for app_label, model_name, field_name in sorted(self.old_field_keys.intersection(self.new_field_keys)): # Did the field change? old_model_name = self.renamed_models.get((app_label, model_name), model_name) old_field_name = self.renamed_fields.get((app_label, model_name, field_name), field_name) old_field = self.old_apps.get_model(app_label, old_model_name)._meta.get_field(old_field_name) new_field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name) # Implement any model renames on relations; these are handled by RenameModel # so we need to exclude them from the comparison if hasattr(new_field, "remote_field") and getattr(new_field.remote_field, "model", None): rename_key = ( new_field.remote_field.model._meta.app_label, new_field.remote_field.model._meta.model_name, ) if rename_key in self.renamed_models: new_field.remote_field.model = old_field.remote_field.model if hasattr(new_field, "remote_field") and getattr(new_field.remote_field, "through", None): rename_key = ( new_field.remote_field.through._meta.app_label, new_field.remote_field.through._meta.model_name, ) if rename_key in self.renamed_models: new_field.remote_field.through = old_field.remote_field.through old_field_dec = self.deep_deconstruct(old_field) new_field_dec = self.deep_deconstruct(new_field) if old_field_dec != new_field_dec: both_m2m = old_field.many_to_many and new_field.many_to_many neither_m2m = not old_field.many_to_many and not new_field.many_to_many if both_m2m or neither_m2m: # Either both fields are m2m or neither is preserve_default = True if (old_field.null and not new_field.null and not new_field.has_default() and not new_field.many_to_many): field = new_field.clone() new_default = self.questioner.ask_not_null_alteration(field_name, model_name) if new_default is not models.NOT_PROVIDED: field.default = new_default preserve_default = False else: field = new_field self.add_operation( app_label, operations.AlterField( model_name=model_name, name=field_name, field=field, preserve_default=preserve_default, ) ) else: # We cannot alter between m2m and concrete fields self._generate_removed_field(app_label, model_name, field_name) self._generate_added_field(app_label, model_name, field_name) def _get_dependecies_for_foreign_key(self, field): # Account for FKs to swappable models swappable_setting = getattr(field, 'swappable_setting', None) if swappable_setting is not None: dep_app_label = "__setting__" dep_object_name = swappable_setting else: dep_app_label = field.remote_field.model._meta.app_label dep_object_name = field.remote_field.model._meta.object_name dependencies = [(dep_app_label, dep_object_name, None, True)] if getattr(field.remote_field, "through", None) and not field.remote_field.through._meta.auto_created: dependencies.append(( field.remote_field.through._meta.app_label, field.remote_field.through._meta.object_name, None, True, )) return dependencies def _generate_altered_foo_together(self, operation): option_name = operation.option_name for app_label, model_name in sorted(self.kept_model_keys): old_model_name = self.renamed_models.get((app_label, model_name), model_name) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] # We run the old version through the field renames to account for those old_value = old_model_state.options.get(option_name) or set() if old_value: old_value = { tuple( self.renamed_fields.get((app_label, model_name, n), n) for n in unique ) for unique in old_value } new_value = new_model_state.options.get(option_name) or set() if new_value: new_value = set(new_value) if old_value != new_value: dependencies = [] for foo_togethers in new_value: for field_name in foo_togethers: field = self.new_apps.get_model(app_label, model_name)._meta.get_field(field_name) if field.remote_field and field.remote_field.model: dependencies.extend(self._get_dependecies_for_foreign_key(field)) self.add_operation( app_label, operation( name=model_name, **{option_name: new_value} ), dependencies=dependencies, ) def generate_altered_unique_together(self): self._generate_altered_foo_together(operations.AlterUniqueTogether) def generate_altered_index_together(self): self._generate_altered_foo_together(operations.AlterIndexTogether) def generate_altered_db_table(self): models_to_check = self.kept_model_keys.union(self.kept_proxy_keys).union(self.kept_unmanaged_keys) for app_label, model_name in sorted(models_to_check): old_model_name = self.renamed_models.get((app_label, model_name), model_name) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] old_db_table_name = old_model_state.options.get('db_table') new_db_table_name = new_model_state.options.get('db_table') if old_db_table_name != new_db_table_name: self.add_operation( app_label, operations.AlterModelTable( name=model_name, table=new_db_table_name, ) ) def generate_altered_options(self): """ Works out if any non-schema-affecting options have changed and makes an operation to represent them in state changes (in case Python code in migrations needs them) """ models_to_check = self.kept_model_keys.union( self.kept_proxy_keys ).union( self.kept_unmanaged_keys ).union( # unmanaged converted to managed set(self.old_unmanaged_keys).intersection(self.new_model_keys) ).union( # managed converted to unmanaged set(self.old_model_keys).intersection(self.new_unmanaged_keys) ) for app_label, model_name in sorted(models_to_check): old_model_name = self.renamed_models.get((app_label, model_name), model_name) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] old_options = dict( option for option in old_model_state.options.items() if option[0] in AlterModelOptions.ALTER_OPTION_KEYS ) new_options = dict( option for option in new_model_state.options.items() if option[0] in AlterModelOptions.ALTER_OPTION_KEYS ) if old_options != new_options: self.add_operation( app_label, operations.AlterModelOptions( name=model_name, options=new_options, ) ) def generate_altered_order_with_respect_to(self): for app_label, model_name in sorted(self.kept_model_keys): old_model_name = self.renamed_models.get((app_label, model_name), model_name) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] if (old_model_state.options.get("order_with_respect_to") != new_model_state.options.get("order_with_respect_to")): # Make sure it comes second if we're adding # (removal dependency is part of RemoveField) dependencies = [] if new_model_state.options.get("order_with_respect_to"): dependencies.append(( app_label, model_name, new_model_state.options["order_with_respect_to"], True, )) # Actually generate the operation self.add_operation( app_label, operations.AlterOrderWithRespectTo( name=model_name, order_with_respect_to=new_model_state.options.get('order_with_respect_to'), ), dependencies=dependencies, ) def generate_altered_managers(self): for app_label, model_name in sorted(self.kept_model_keys): old_model_name = self.renamed_models.get((app_label, model_name), model_name) old_model_state = self.from_state.models[app_label, old_model_name] new_model_state = self.to_state.models[app_label, model_name] if old_model_state.managers != new_model_state.managers: self.add_operation( app_label, operations.AlterModelManagers( name=model_name, managers=new_model_state.managers, ) ) def arrange_for_graph(self, changes, graph, migration_name=None): """ Takes in a result from changes() and a MigrationGraph, and fixes the names and dependencies of the changes so they extend the graph from the leaf nodes for each app. """ leaves = graph.leaf_nodes() name_map = {} for app_label, migrations in list(changes.items()): if not migrations: continue # Find the app label's current leaf node app_leaf = None for leaf in leaves: if leaf[0] == app_label: app_leaf = leaf break # Do they want an initial migration for this app? if app_leaf is None and not self.questioner.ask_initial(app_label): # They don't. for migration in migrations: name_map[(app_label, migration.name)] = (app_label, "__first__") del changes[app_label] continue # Work out the next number in the sequence if app_leaf is None: next_number = 1 else: next_number = (self.parse_number(app_leaf[1]) or 0) + 1 # Name each migration for i, migration in enumerate(migrations): if i == 0 and app_leaf: migration.dependencies.append(app_leaf) if i == 0 and not app_leaf: new_name = "0001_%s" % migration_name if migration_name else "0001_initial" else: new_name = "%04i_%s" % ( next_number, migration_name or self.suggest_name(migration.operations)[:100], ) name_map[(app_label, migration.name)] = (app_label, new_name) next_number += 1 migration.name = new_name # Now fix dependencies for app_label, migrations in changes.items(): for migration in migrations: migration.dependencies = [name_map.get(d, d) for d in migration.dependencies] return changes def _trim_to_apps(self, changes, app_labels): """ Takes changes from arrange_for_graph and set of app labels and returns a modified set of changes which trims out as many migrations that are not in app_labels as possible. Note that some other migrations may still be present, as they may be required dependencies. """ # Gather other app dependencies in a first pass app_dependencies = {} for app_label, migrations in changes.items(): for migration in migrations: for dep_app_label, name in migration.dependencies: app_dependencies.setdefault(app_label, set()).add(dep_app_label) required_apps = set(app_labels) # Keep resolving till there's no change old_required_apps = None while old_required_apps != required_apps: old_required_apps = set(required_apps) for app_label in list(required_apps): required_apps.update(app_dependencies.get(app_label, set())) # Remove all migrations that aren't needed for app_label in list(changes.keys()): if app_label not in required_apps: del changes[app_label] return changes @classmethod def suggest_name(cls, ops): """ Given a set of operations, suggests a name for the migration they might represent. Names are not guaranteed to be unique, but we put some effort in to the fallback name to avoid VCS conflicts if we can. """ if len(ops) == 1: if isinstance(ops[0], operations.CreateModel): return ops[0].name_lower elif isinstance(ops[0], operations.DeleteModel): return "delete_%s" % ops[0].name_lower elif isinstance(ops[0], operations.AddField): return "%s_%s" % (ops[0].model_name_lower, ops[0].name_lower) elif isinstance(ops[0], operations.RemoveField): return "remove_%s_%s" % (ops[0].model_name_lower, ops[0].name_lower) elif len(ops) > 1: if all(isinstance(o, operations.CreateModel) for o in ops): return "_".join(sorted(o.name_lower for o in ops)) return "auto_%s" % get_migration_name_timestamp() @classmethod def parse_number(cls, name): """ Given a migration name, tries to extract a number from the beginning of it. If no number found, returns None. """ match = re.match(r'^\d+', name) if match: return int(match.group()) return None
apache-2.0
jmiserez/pox
pox/web/webmessenger.py
2
8857
# Copyright 2011 James McCauley # # This file is part of POX. # # POX is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # POX is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with POX. If not, see <http://www.gnu.org/licenses/>. """ Connects the POX messenger bus to HTTP. Requires the "webserver" component. """ from SocketServer import ThreadingMixIn from BaseHTTPServer import * import time import select import random import hashlib import base64 import json from pox.lib.recoco import Timer from pox.messenger.messenger import MessengerConnection from pox.core import core from webcore import * log = core.getLogger() class HTTPMessengerConnection (MessengerConnection): def __init__ (self, source, session_key): MessengerConnection.__init__(self, source, ID=str(id(self))) #TODO: better ID self.session_key = session_key self._messages = [] self._cond = threading.Condition() self._quitting = False # We're really protected from attack by the session key, we hope self._tx_seq = -1 #random.randint(0, 1 << 32) self._rx_seq = None #self._t = Timer(10, lambda : self.send({'hi':'again'}), recurring=True) self._touched = time.time() def _check_timeout (self): if (time.time() - self._touched) > 120: log.info("Session %s timed out", self.session_key) self._close() def _new_tx_seq (self): self._tx_seq = (self._tx_seq + 1) & 0x7fFFffFF return self._tx_seq def _check_rx_seq (self, seq): seq = int(seq) if self._rx_seq is None: self._rx_seq = seq if seq != self._rx_seq: return False self._rx_seq = (self._rx_seq + 1) & 0x7fFFffFF return True def _close (self): super(HTTPMessengerConnection, self)._close() #TODO: track request sockets and cancel them? self._quitting = True def sendRaw (self, data): self._cond.acquire() self._messages.append(data) self._cond.notify() self._cond.release() def _do_recv_msg (self, items): #print ">>",items for item in items: self._recv_msg(item) class HTTPMessengerSource (object): def __init__ (self): self._session_key_salt = str(time.time()) + "POX" self._connections = {} #self._t = Timer(5, self._check_timeouts, recurring=True) self._t = Timer(60*2, self._check_timeouts, recurring=True) def _check_timeouts (self): for c in self._connections.values(): c._check_timeout() def _forget (self, connection): if connection.session_key in self._connections: del self._connections[connection.session_key] else: #print "Failed to forget", connection pass def create_session (self): key = None while True: key = str(random.random()) + self._session_key_salt key += str(id(key)) key = base64.encodestring(hashlib.md5(key).digest()).replace('=','').replace('+','').replace('/','').strip() if key not in self._connections: break ses = HTTPMessengerConnection(self, key) self._connections[key] = ses return ses def get_session (self, key): return self._connections.get(key, None) class CometRequestHandler (SplitRequestHandler): protocol_version = 'HTTP/1.1' # def __init__ (self, *args, **kw): # super(CometRequestHandler, self).__init__(*args, **kw) def _init (self): self.source = self.args['source'] self.auth_function = self.args.get('auth', None) def _doAuth (self): if self.auth_function: auth = self.headers.get("Authorization", "").strip().lower() success = False if auth.startswith("basic "): try: auth = base64.decodestring(auth[6:].strip()).split(':', 1) success = self.auth_function(auth[0], auth[1]) except: pass if success is not True: self.send_response(401, "Authorization Required") self.send_header("WWW-Authenticate", 'Basic realm="POX"') self.end_headers() return def _getSession (self): session_key = self.headers.get("X-POX-Messenger-Session-Key") if session_key is None: session_key = self.path.split('/')[-1] session_key = session_key.strip() if len(session_key) == 0: #TODO: return some bad response and log return None if session_key == "new": hmh = self.source.create_session() else: hmh = self.source.get_session(session_key) #print session_key, hmh.session_key return hmh def _enter (self): self._doAuth() hmh = self._getSession() if hmh is None: #TODO: return some bad response and log pass else: hmh._touched = time.time() return hmh def do_POST (self): hmh = self._enter() if hmh is None: return None l = self.headers.get("Content-Length", "") if l == "": data = json.loads(self.rfile.read()) else: data = json.loads(self.rfile.read(int(l))) payload = data['data'] # We send null payload for timeout poking and initial setup if payload is not None: if not hmh._check_rx_seq(data['seq']): # Bad seq! data = '{"seq":-1,"ses":"%s"}' % (hmh.session_key,) self.send_response(400, "Bad sequence number") self.send_header("Content-Type", "application/json") self.send_header("Content-Length", len(data)) self.send_header("X-POX-Messenger-Sequence-Number", "-1") if self.auth_function: self.send_header("WWW-Authenticate", 'Basic realm="POX"') self.end_headers() self.wfile.write(data) hmh._close() return core.callLater(hmh._do_recv_msg, payload) else: #print "KeepAlive", hmh pass try: data = '{"seq":-1,"ses":"%s"}' % (hmh.session_key,) self.send_response(200, "OK") self.send_header("Content-Type", "application/json") self.send_header("Content-Length", len(data)) self.send_header("X-POX-Messenger-Sequence-Number", "-1") if self.auth_function: self.send_header("WWW-Authenticate", 'Basic realm="POX"') self.end_headers() self.wfile.write(data) except: import traceback traceback.print_exc() pass return def do_GET (self): hmh = self._enter() if hmh is None: return None hmh._cond.acquire() if len(hmh._messages) == 0: # Wait for messages while True: # Every couple seconds check if the socket is dead hmh._cond.wait(2) if len(hmh._messages): break if hmh._quitting: break r,w,x = select.select([self.wfile],[],[self.wfile], 0) if len(r) or len(x): # Other side disconnected? hmh._cond.release() return # Okay... if hmh._quitting: #NOTE: we don't drain the messages first, but maybe we should? try: data = '{"seq":-1,"ses":"%s"}' % (hmh.session_key,) self.send_response(200, "OK") self.send_header("Content-Type", "application/json") self.send_header("Content-Length", len(data)) self.send_header("X-POX-Messenger-Sequence-Number", "-1") if self.auth_function: self.send_header("WWW-Authenticate", 'Basic realm="POX"') self.end_headers() self.wfile.write(data) except: pass hmh._cond.release() return num_messages = min(20, len(hmh._messages)) data = hmh._messages[:num_messages] seq = hmh._new_tx_seq() data = '{"seq":%i,"ses":"%s","data":[%s]}' % (seq, hmh.session_key, ','.join(data)) try: self.send_response(200, "OK") self.send_header("Content-Type", "application/json") self.send_header("Content-Length", len(data)) self.send_header("X-POX-Messenger-Sequence-Number", str(seq)) if self.auth_function: self.send_header("WWW-Authenticate", 'Basic realm="POX"') self.end_headers() self.wfile.write(data) del hmh._messages[:num_messages] hmh._first_seq += num_messages hmh._message_count = 0 except: pass hmh._cond.release() def launch (username='', password=''): if not core.hasComponent("WebServer"): log.error("WebServer is required but unavailable") return source = core.registerNew(HTTPMessengerSource) # Set up config info config = {"source":source} if len(username) and len(password): config['auth'] = lambda u, p: (u == username) and (p == password) core.WebServer.set_handler("/_webmsg/", CometRequestHandler, config, True)
gpl-3.0
wcpr740/wcpr.org
flask_site/helpers/assets.py
1
2233
import os from webassets import filter from flask_assets import Environment, Bundle from flask_site.libraries.csscompressor_webassets import CSSCompressor def register_filters(): # This compressor is better than the one built into webassets filter.register_filter(CSSCompressor) def compile_assets(app, bundle_config): """ Compile Bundles from a config dictionary, loaded from a file. :param flask.Flask app: the flask application after it has been initialized :param dict bundle_config: configuration, see example config. :return: """ if not bundle_config: raise IOError('Bundles config is empty') assets = Environment(app) for name, settings in bundle_config.iteritems(): bundle = check_and_compile_bundle(name, settings) assets.register(name, bundle) def check_and_compile_bundle(name, settings): if len(name) == 0: raise ValueError('The bundle name must have a length of more than 0') if not isinstance(settings['type'], str): raise ValueError('The "%s" bundle must have a string type associated with it' % name) if len(settings['type']) == 0: raise ValueError('The "%s" bundle type must have a type length of more than 0' % name) if len(settings['files']) == 0: raise ValueError('The "%s" bundle must have files associated with it' % name) # Check each file in bundle to make sure it exists. static_abs_path = os.path.abspath('static') for filename in settings['files']: if not os.path.isfile(os.path.join(static_abs_path, filename)): raise IOError('File "%s" in bundle "%s" does not exist.' % (filename, name)) if settings.get('filters', None) is None: filters = None else: filters = ','.join(settings['filters']) if settings.get('output', None) is None: output = 'out/' + name + '.%(version)s' + '.' + settings['type'] else: output = settings['output'] if settings.get('depends', None) is None: depends = None else: depends = ','.join(settings['depends']) return Bundle(*settings['files'], filters=filters, output=output, depends=depends) __all__ = ['compile_assets', 'register_filters']
apache-2.0
RPGOne/scikit-learn
sklearn/covariance/tests/test_robust_covariance.py
75
3825
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr> # Gael Varoquaux <gael.varoquaux@normalesup.org> # Virgile Fritsch <virgile.fritsch@inria.fr> # # License: BSD 3 clause import numpy as np from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_raise_message from sklearn.exceptions import NotFittedError from sklearn import datasets from sklearn.covariance import empirical_covariance, MinCovDet, \ EllipticEnvelope from sklearn.covariance import fast_mcd X = datasets.load_iris().data X_1d = X[:, 0] n_samples, n_features = X.shape def test_mcd(): # Tests the FastMCD algorithm implementation # Small data set # test without outliers (random independent normal data) launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80) # test with a contaminated data set (medium contamination) launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70) # test with a contaminated data set (strong contamination) launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50) # Medium data set launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540) # Large data set launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870) # 1D data set launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350) def test_fast_mcd_on_invalid_input(): X = np.arange(100) assert_raise_message(ValueError, 'fast_mcd expects at least 2 samples', fast_mcd, X) def test_mcd_class_on_invalid_input(): X = np.arange(100) mcd = MinCovDet() assert_raise_message(ValueError, 'MinCovDet expects at least 2 samples', mcd.fit, X) def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov, tol_support): rand_gen = np.random.RandomState(0) data = rand_gen.randn(n_samples, n_features) # add some outliers outliers_index = rand_gen.permutation(n_samples)[:n_outliers] outliers_offset = 10. * \ (rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5) data[outliers_index] += outliers_offset inliers_mask = np.ones(n_samples).astype(bool) inliers_mask[outliers_index] = False pure_data = data[inliers_mask] # compute MCD by fitting an object mcd_fit = MinCovDet(random_state=rand_gen).fit(data) T = mcd_fit.location_ S = mcd_fit.covariance_ H = mcd_fit.support_ # compare with the estimates learnt from the inliers error_location = np.mean((pure_data.mean(0) - T) ** 2) assert(error_location < tol_loc) error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2) assert(error_cov < tol_cov) assert(np.sum(H) >= tol_support) assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_) def test_mcd_issue1127(): # Check that the code does not break with X.shape = (3, 1) # (i.e. n_support = n_samples) rnd = np.random.RandomState(0) X = rnd.normal(size=(3, 1)) mcd = MinCovDet() mcd.fit(X) def test_outlier_detection(): rnd = np.random.RandomState(0) X = rnd.randn(100, 10) clf = EllipticEnvelope(contamination=0.1) assert_raises(NotFittedError, clf.predict, X) assert_raises(NotFittedError, clf.decision_function, X) clf.fit(X) y_pred = clf.predict(X) decision = clf.decision_function(X, raw_values=True) decision_transformed = clf.decision_function(X, raw_values=False) assert_array_almost_equal( decision, clf.mahalanobis(X)) assert_array_almost_equal(clf.mahalanobis(X), clf.dist_) assert_almost_equal(clf.score(X, np.ones(100)), (100 - y_pred[y_pred == -1].size) / 100.) assert(sum(y_pred == -1) == sum(decision_transformed < 0))
bsd-3-clause
JakeBrand/CMPUT410-E6
v1/lib/python2.7/site-packages/django/contrib/comments/signals.py
312
1065
""" Signals relating to comments. """ from django.dispatch import Signal # Sent just before a comment will be posted (after it's been approved and # moderated; this can be used to modify the comment (in place) with posting # details or other such actions. If any receiver returns False the comment will be # discarded and a 400 response. This signal is sent at more or less # the same time (just before, actually) as the Comment object's pre-save signal, # except that the HTTP request is sent along with this signal. comment_will_be_posted = Signal(providing_args=["comment", "request"]) # Sent just after a comment was posted. See above for how this differs # from the Comment object's post-save signal. comment_was_posted = Signal(providing_args=["comment", "request"]) # Sent after a comment was "flagged" in some way. Check the flag to see if this # was a user requesting removal of a comment, a moderator approving/removing a # comment, or some other custom user flag. comment_was_flagged = Signal(providing_args=["comment", "flag", "created", "request"])
apache-2.0
midma101/AndIWasJustGoingToBed
.venv/lib/python2.7/site-packages/Crypto/SelfTest/Hash/test_SHA384.py
118
2762
# -*- coding: utf-8 -*- # # SelfTest/Hash/test_SHA.py: Self-test for the SHA-384 hash function # # Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net> # # =================================================================== # The contents of this file are dedicated to the public domain. To # the extent that dedication to the public domain is not available, # everyone is granted a worldwide, perpetual, royalty-free, # non-exclusive license to exercise all rights associated with the # contents of this file for any purpose whatsoever. # No rights are reserved. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # =================================================================== """Self-test suite for Crypto.Hash.SHA384""" __revision__ = "$Id$" # Test vectors from various sources # This is a list of (expected_result, input[, description]) tuples. test_data = [ # RFC 4634: Section Page 8.4, "Test 1" ('cb00753f45a35e8bb5a03d699ac65007272c32ab0eded1631a8b605a43ff5bed8086072ba1e7cc2358baeca134c825a7', 'abc'), # RFC 4634: Section Page 8.4, "Test 2.2" ('09330c33f71147e83d192fc782cd1b4753111b173b3b05d22fa08086e3b0f712fcc7c71a557e2db966c3e9fa91746039', 'abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu'), # RFC 4634: Section Page 8.4, "Test 3" ('9d0e1809716474cb086e834e310a4a1ced149e9c00f248527972cec5704c2a5b07b8b3dc38ecc4ebae97ddd87f3d8985', 'a' * 10**6, "'a' * 10**6"), # Taken from http://de.wikipedia.org/wiki/Secure_Hash_Algorithm ('38b060a751ac96384cd9327eb1b1e36a21fdb71114be07434c0cc7bf63f6e1da274edebfe76f65fbd51ad2f14898b95b', ''), # Example from http://de.wikipedia.org/wiki/Secure_Hash_Algorithm ('71e8383a4cea32d6fd6877495db2ee353542f46fa44bc23100bca48f3366b84e809f0708e81041f427c6d5219a286677', 'Franz jagt im komplett verwahrlosten Taxi quer durch Bayern'), ] def get_tests(config={}): from Crypto.Hash import SHA384 from common import make_hash_tests return make_hash_tests(SHA384, "SHA384", test_data, digest_size=48, oid='\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x02') if __name__ == '__main__': import unittest suite = lambda: unittest.TestSuite(get_tests()) unittest.main(defaultTest='suite') # vim:set ts=4 sw=4 sts=4 expandtab:
mit
midma101/AndIWasJustGoingToBed
.venv/lib/python2.7/site-packages/Crypto/Util/number.py
128
95488
# # number.py : Number-theoretic functions # # Part of the Python Cryptography Toolkit # # Written by Andrew M. Kuchling, Barry A. Warsaw, and others # # =================================================================== # The contents of this file are dedicated to the public domain. To # the extent that dedication to the public domain is not available, # everyone is granted a worldwide, perpetual, royalty-free, # non-exclusive license to exercise all rights associated with the # contents of this file for any purpose whatsoever. # No rights are reserved. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # =================================================================== # __revision__ = "$Id$" from Crypto.pct_warnings import GetRandomNumber_DeprecationWarning, PowmInsecureWarning from warnings import warn as _warn import math import sys from Crypto.Util.py3compat import * bignum = long try: from Crypto.PublicKey import _fastmath except ImportError: # For production, we are going to let import issues due to gmp/mpir shared # libraries not loading slide silently and use slowmath. If you'd rather # see an exception raised if _fastmath exists but cannot be imported, # uncomment the below # # from distutils.sysconfig import get_config_var # import inspect, os # _fm_path = os.path.normpath(os.path.dirname(os.path.abspath( # inspect.getfile(inspect.currentframe()))) # +"/../../PublicKey/_fastmath"+get_config_var("SO")) # if os.path.exists(_fm_path): # raise ImportError("While the _fastmath module exists, importing "+ # "it failed. This may point to the gmp or mpir shared library "+ # "not being in the path. _fastmath was found at "+_fm_path) _fastmath = None # You need libgmp v5 or later to get mpz_powm_sec. Warn if it's not available. if _fastmath is not None and not _fastmath.HAVE_DECL_MPZ_POWM_SEC: _warn("Not using mpz_powm_sec. You should rebuild using libgmp >= 5 to avoid timing attack vulnerability.", PowmInsecureWarning) # New functions from _number_new import * # Commented out and replaced with faster versions below ## def long2str(n): ## s='' ## while n>0: ## s=chr(n & 255)+s ## n=n>>8 ## return s ## import types ## def str2long(s): ## if type(s)!=types.StringType: return s # Integers will be left alone ## return reduce(lambda x,y : x*256+ord(y), s, 0L) def size (N): """size(N:long) : int Returns the size of the number N in bits. """ bits = 0 while N >> bits: bits += 1 return bits def getRandomNumber(N, randfunc=None): """Deprecated. Use getRandomInteger or getRandomNBitInteger instead.""" warnings.warn("Crypto.Util.number.getRandomNumber has confusing semantics"+ "and has been deprecated. Use getRandomInteger or getRandomNBitInteger instead.", GetRandomNumber_DeprecationWarning) return getRandomNBitInteger(N, randfunc) def getRandomInteger(N, randfunc=None): """getRandomInteger(N:int, randfunc:callable):long Return a random number with at most N bits. If randfunc is omitted, then Random.new().read is used. This function is for internal use only and may be renamed or removed in the future. """ if randfunc is None: _import_Random() randfunc = Random.new().read S = randfunc(N>>3) odd_bits = N % 8 if odd_bits != 0: char = ord(randfunc(1)) >> (8-odd_bits) S = bchr(char) + S value = bytes_to_long(S) return value def getRandomRange(a, b, randfunc=None): """getRandomRange(a:int, b:int, randfunc:callable):long Return a random number n so that a <= n < b. If randfunc is omitted, then Random.new().read is used. This function is for internal use only and may be renamed or removed in the future. """ range_ = b - a - 1 bits = size(range_) value = getRandomInteger(bits, randfunc) while value > range_: value = getRandomInteger(bits, randfunc) return a + value def getRandomNBitInteger(N, randfunc=None): """getRandomInteger(N:int, randfunc:callable):long Return a random number with exactly N-bits, i.e. a random number between 2**(N-1) and (2**N)-1. If randfunc is omitted, then Random.new().read is used. This function is for internal use only and may be renamed or removed in the future. """ value = getRandomInteger (N-1, randfunc) value |= 2L ** (N-1) # Ensure high bit is set assert size(value) >= N return value def GCD(x,y): """GCD(x:long, y:long): long Return the GCD of x and y. """ x = abs(x) ; y = abs(y) while x > 0: x, y = y % x, x return y def inverse(u, v): """inverse(u:long, v:long):long Return the inverse of u mod v. """ u3, v3 = long(u), long(v) u1, v1 = 1L, 0L while v3 > 0: q=divmod(u3, v3)[0] u1, v1 = v1, u1 - v1*q u3, v3 = v3, u3 - v3*q while u1<0: u1 = u1 + v return u1 # Given a number of bits to generate and a random generation function, # find a prime number of the appropriate size. def getPrime(N, randfunc=None): """getPrime(N:int, randfunc:callable):long Return a random N-bit prime number. If randfunc is omitted, then Random.new().read is used. """ if randfunc is None: _import_Random() randfunc = Random.new().read number=getRandomNBitInteger(N, randfunc) | 1 while (not isPrime(number, randfunc=randfunc)): number=number+2 return number def _rabinMillerTest(n, rounds, randfunc=None): """_rabinMillerTest(n:long, rounds:int, randfunc:callable):int Tests if n is prime. Returns 0 when n is definitly composite. Returns 1 when n is probably prime. Returns 2 when n is definitly prime. If randfunc is omitted, then Random.new().read is used. This function is for internal use only and may be renamed or removed in the future. """ # check special cases (n==2, n even, n < 2) if n < 3 or (n & 1) == 0: return n == 2 # n might be very large so it might be beneficial to precalculate n-1 n_1 = n - 1 # determine m and b so that 2**b * m = n - 1 and b maximal b = 0 m = n_1 while (m & 1) == 0: b += 1 m >>= 1 tested = [] # we need to do at most n-2 rounds. for i in xrange (min (rounds, n-2)): # randomly choose a < n and make sure it hasn't been tested yet a = getRandomRange (2, n, randfunc) while a in tested: a = getRandomRange (2, n, randfunc) tested.append (a) # do the rabin-miller test z = pow (a, m, n) # (a**m) % n if z == 1 or z == n_1: continue composite = 1 for r in xrange (b): z = (z * z) % n if z == 1: return 0 elif z == n_1: composite = 0 break if composite: return 0 return 1 def getStrongPrime(N, e=0, false_positive_prob=1e-6, randfunc=None): """getStrongPrime(N:int, e:int, false_positive_prob:float, randfunc:callable):long Return a random strong N-bit prime number. In this context p is a strong prime if p-1 and p+1 have at least one large prime factor. N should be a multiple of 128 and > 512. If e is provided the returned prime p-1 will be coprime to e and thus suitable for RSA where e is the public exponent. The optional false_positive_prob is the statistical probability that true is returned even though it is not (pseudo-prime). It defaults to 1e-6 (less than 1:1000000). Note that the real probability of a false-positive is far less. This is just the mathematically provable limit. randfunc should take a single int parameter and return that many random bytes as a string. If randfunc is omitted, then Random.new().read is used. """ # This function was implemented following the # instructions found in the paper: # "FAST GENERATION OF RANDOM, STRONG RSA PRIMES" # by Robert D. Silverman # RSA Laboratories # May 17, 1997 # which by the time of writing could be freely downloaded here: # http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.17.2713&rep=rep1&type=pdf # Use the accelerator if available if _fastmath is not None: return _fastmath.getStrongPrime(long(N), long(e), false_positive_prob, randfunc) if (N < 512) or ((N % 128) != 0): raise ValueError ("bits must be multiple of 128 and > 512") rabin_miller_rounds = int(math.ceil(-math.log(false_positive_prob)/math.log(4))) # calculate range for X # lower_bound = sqrt(2) * 2^{511 + 128*x} # upper_bound = 2^{512 + 128*x} - 1 x = (N - 512) >> 7; # We need to approximate the sqrt(2) in the lower_bound by an integer # expression because floating point math overflows with these numbers lower_bound = divmod(14142135623730950489L * (2L ** (511 + 128*x)), 10000000000000000000L)[0] upper_bound = (1L << (512 + 128*x)) - 1 # Randomly choose X in calculated range X = getRandomRange (lower_bound, upper_bound, randfunc) # generate p1 and p2 p = [0, 0] for i in (0, 1): # randomly choose 101-bit y y = getRandomNBitInteger (101, randfunc) # initialize the field for sieving field = [0] * 5 * len (sieve_base) # sieve the field for prime in sieve_base: offset = y % prime for j in xrange ((prime - offset) % prime, len (field), prime): field[j] = 1 # look for suitable p[i] starting at y result = 0 for j in range(len(field)): composite = field[j] # look for next canidate if composite: continue tmp = y + j result = _rabinMillerTest (tmp, rabin_miller_rounds) if result > 0: p[i] = tmp break if result == 0: raise RuntimeError ("Couln't find prime in field. " "Developer: Increase field_size") # Calculate R # R = (p2^{-1} mod p1) * p2 - (p1^{-1} mod p2) * p1 tmp1 = inverse (p[1], p[0]) * p[1] # (p2^-1 mod p1)*p2 tmp2 = inverse (p[0], p[1]) * p[0] # (p1^-1 mod p2)*p1 R = tmp1 - tmp2 # (p2^-1 mod p1)*p2 - (p1^-1 mod p2)*p1 # search for final prime number starting by Y0 # Y0 = X + (R - X mod p1p2) increment = p[0] * p[1] X = X + (R - (X % increment)) while 1: is_possible_prime = 1 # first check candidate against sieve_base for prime in sieve_base: if (X % prime) == 0: is_possible_prime = 0 break # if e is given make sure that e and X-1 are coprime # this is not necessarily a strong prime criterion but useful when # creating them for RSA where the p-1 and q-1 should be coprime to # the public exponent e if e and is_possible_prime: if e & 1: if GCD (e, X-1) != 1: is_possible_prime = 0 else: if GCD (e, divmod((X-1),2)[0]) != 1: is_possible_prime = 0 # do some Rabin-Miller-Tests if is_possible_prime: result = _rabinMillerTest (X, rabin_miller_rounds) if result > 0: break X += increment # abort when X has more bits than requested # TODO: maybe we shouldn't abort but rather start over. if X >= 1L << N: raise RuntimeError ("Couln't find prime in field. " "Developer: Increase field_size") return X def isPrime(N, false_positive_prob=1e-6, randfunc=None): """isPrime(N:long, false_positive_prob:float, randfunc:callable):bool Return true if N is prime. The optional false_positive_prob is the statistical probability that true is returned even though it is not (pseudo-prime). It defaults to 1e-6 (less than 1:1000000). Note that the real probability of a false-positive is far less. This is just the mathematically provable limit. If randfunc is omitted, then Random.new().read is used. """ if _fastmath is not None: return _fastmath.isPrime(long(N), false_positive_prob, randfunc) if N < 3 or N & 1 == 0: return N == 2 for p in sieve_base: if N == p: return 1 if N % p == 0: return 0 rounds = int(math.ceil(-math.log(false_positive_prob)/math.log(4))) return _rabinMillerTest(N, rounds, randfunc) # Improved conversion functions contributed by Barry Warsaw, after # careful benchmarking import struct def long_to_bytes(n, blocksize=0): """long_to_bytes(n:long, blocksize:int) : string Convert a long integer to a byte string. If optional blocksize is given and greater than zero, pad the front of the byte string with binary zeros so that the length is a multiple of blocksize. """ # after much testing, this algorithm was deemed to be the fastest s = b('') n = long(n) pack = struct.pack while n > 0: s = pack('>I', n & 0xffffffffL) + s n = n >> 32 # strip off leading zeros for i in range(len(s)): if s[i] != b('\000')[0]: break else: # only happens when n == 0 s = b('\000') i = 0 s = s[i:] # add back some pad bytes. this could be done more efficiently w.r.t. the # de-padding being done above, but sigh... if blocksize > 0 and len(s) % blocksize: s = (blocksize - len(s) % blocksize) * b('\000') + s return s def bytes_to_long(s): """bytes_to_long(string) : long Convert a byte string to a long integer. This is (essentially) the inverse of long_to_bytes(). """ acc = 0L unpack = struct.unpack length = len(s) if length % 4: extra = (4 - length % 4) s = b('\000') * extra + s length = length + extra for i in range(0, length, 4): acc = (acc << 32) + unpack('>I', s[i:i+4])[0] return acc # For backwards compatibility... import warnings def long2str(n, blocksize=0): warnings.warn("long2str() has been replaced by long_to_bytes()") return long_to_bytes(n, blocksize) def str2long(s): warnings.warn("str2long() has been replaced by bytes_to_long()") return bytes_to_long(s) def _import_Random(): # This is called in a function instead of at the module level in order to # avoid problems with recursive imports global Random, StrongRandom from Crypto import Random from Crypto.Random.random import StrongRandom # The first 10000 primes used for checking primality. # This should be enough to eliminate most of the odd # numbers before needing to do a Rabin-Miller test at all. sieve_base = ( 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997, 1009, 1013, 1019, 1021, 1031, 1033, 1039, 1049, 1051, 1061, 1063, 1069, 1087, 1091, 1093, 1097, 1103, 1109, 1117, 1123, 1129, 1151, 1153, 1163, 1171, 1181, 1187, 1193, 1201, 1213, 1217, 1223, 1229, 1231, 1237, 1249, 1259, 1277, 1279, 1283, 1289, 1291, 1297, 1301, 1303, 1307, 1319, 1321, 1327, 1361, 1367, 1373, 1381, 1399, 1409, 1423, 1427, 1429, 1433, 1439, 1447, 1451, 1453, 1459, 1471, 1481, 1483, 1487, 1489, 1493, 1499, 1511, 1523, 1531, 1543, 1549, 1553, 1559, 1567, 1571, 1579, 1583, 1597, 1601, 1607, 1609, 1613, 1619, 1621, 1627, 1637, 1657, 1663, 1667, 1669, 1693, 1697, 1699, 1709, 1721, 1723, 1733, 1741, 1747, 1753, 1759, 1777, 1783, 1787, 1789, 1801, 1811, 1823, 1831, 1847, 1861, 1867, 1871, 1873, 1877, 1879, 1889, 1901, 1907, 1913, 1931, 1933, 1949, 1951, 1973, 1979, 1987, 1993, 1997, 1999, 2003, 2011, 2017, 2027, 2029, 2039, 2053, 2063, 2069, 2081, 2083, 2087, 2089, 2099, 2111, 2113, 2129, 2131, 2137, 2141, 2143, 2153, 2161, 2179, 2203, 2207, 2213, 2221, 2237, 2239, 2243, 2251, 2267, 2269, 2273, 2281, 2287, 2293, 2297, 2309, 2311, 2333, 2339, 2341, 2347, 2351, 2357, 2371, 2377, 2381, 2383, 2389, 2393, 2399, 2411, 2417, 2423, 2437, 2441, 2447, 2459, 2467, 2473, 2477, 2503, 2521, 2531, 2539, 2543, 2549, 2551, 2557, 2579, 2591, 2593, 2609, 2617, 2621, 2633, 2647, 2657, 2659, 2663, 2671, 2677, 2683, 2687, 2689, 2693, 2699, 2707, 2711, 2713, 2719, 2729, 2731, 2741, 2749, 2753, 2767, 2777, 2789, 2791, 2797, 2801, 2803, 2819, 2833, 2837, 2843, 2851, 2857, 2861, 2879, 2887, 2897, 2903, 2909, 2917, 2927, 2939, 2953, 2957, 2963, 2969, 2971, 2999, 3001, 3011, 3019, 3023, 3037, 3041, 3049, 3061, 3067, 3079, 3083, 3089, 3109, 3119, 3121, 3137, 3163, 3167, 3169, 3181, 3187, 3191, 3203, 3209, 3217, 3221, 3229, 3251, 3253, 3257, 3259, 3271, 3299, 3301, 3307, 3313, 3319, 3323, 3329, 3331, 3343, 3347, 3359, 3361, 3371, 3373, 3389, 3391, 3407, 3413, 3433, 3449, 3457, 3461, 3463, 3467, 3469, 3491, 3499, 3511, 3517, 3527, 3529, 3533, 3539, 3541, 3547, 3557, 3559, 3571, 3581, 3583, 3593, 3607, 3613, 3617, 3623, 3631, 3637, 3643, 3659, 3671, 3673, 3677, 3691, 3697, 3701, 3709, 3719, 3727, 3733, 3739, 3761, 3767, 3769, 3779, 3793, 3797, 3803, 3821, 3823, 3833, 3847, 3851, 3853, 3863, 3877, 3881, 3889, 3907, 3911, 3917, 3919, 3923, 3929, 3931, 3943, 3947, 3967, 3989, 4001, 4003, 4007, 4013, 4019, 4021, 4027, 4049, 4051, 4057, 4073, 4079, 4091, 4093, 4099, 4111, 4127, 4129, 4133, 4139, 4153, 4157, 4159, 4177, 4201, 4211, 4217, 4219, 4229, 4231, 4241, 4243, 4253, 4259, 4261, 4271, 4273, 4283, 4289, 4297, 4327, 4337, 4339, 4349, 4357, 4363, 4373, 4391, 4397, 4409, 4421, 4423, 4441, 4447, 4451, 4457, 4463, 4481, 4483, 4493, 4507, 4513, 4517, 4519, 4523, 4547, 4549, 4561, 4567, 4583, 4591, 4597, 4603, 4621, 4637, 4639, 4643, 4649, 4651, 4657, 4663, 4673, 4679, 4691, 4703, 4721, 4723, 4729, 4733, 4751, 4759, 4783, 4787, 4789, 4793, 4799, 4801, 4813, 4817, 4831, 4861, 4871, 4877, 4889, 4903, 4909, 4919, 4931, 4933, 4937, 4943, 4951, 4957, 4967, 4969, 4973, 4987, 4993, 4999, 5003, 5009, 5011, 5021, 5023, 5039, 5051, 5059, 5077, 5081, 5087, 5099, 5101, 5107, 5113, 5119, 5147, 5153, 5167, 5171, 5179, 5189, 5197, 5209, 5227, 5231, 5233, 5237, 5261, 5273, 5279, 5281, 5297, 5303, 5309, 5323, 5333, 5347, 5351, 5381, 5387, 5393, 5399, 5407, 5413, 5417, 5419, 5431, 5437, 5441, 5443, 5449, 5471, 5477, 5479, 5483, 5501, 5503, 5507, 5519, 5521, 5527, 5531, 5557, 5563, 5569, 5573, 5581, 5591, 5623, 5639, 5641, 5647, 5651, 5653, 5657, 5659, 5669, 5683, 5689, 5693, 5701, 5711, 5717, 5737, 5741, 5743, 5749, 5779, 5783, 5791, 5801, 5807, 5813, 5821, 5827, 5839, 5843, 5849, 5851, 5857, 5861, 5867, 5869, 5879, 5881, 5897, 5903, 5923, 5927, 5939, 5953, 5981, 5987, 6007, 6011, 6029, 6037, 6043, 6047, 6053, 6067, 6073, 6079, 6089, 6091, 6101, 6113, 6121, 6131, 6133, 6143, 6151, 6163, 6173, 6197, 6199, 6203, 6211, 6217, 6221, 6229, 6247, 6257, 6263, 6269, 6271, 6277, 6287, 6299, 6301, 6311, 6317, 6323, 6329, 6337, 6343, 6353, 6359, 6361, 6367, 6373, 6379, 6389, 6397, 6421, 6427, 6449, 6451, 6469, 6473, 6481, 6491, 6521, 6529, 6547, 6551, 6553, 6563, 6569, 6571, 6577, 6581, 6599, 6607, 6619, 6637, 6653, 6659, 6661, 6673, 6679, 6689, 6691, 6701, 6703, 6709, 6719, 6733, 6737, 6761, 6763, 6779, 6781, 6791, 6793, 6803, 6823, 6827, 6829, 6833, 6841, 6857, 6863, 6869, 6871, 6883, 6899, 6907, 6911, 6917, 6947, 6949, 6959, 6961, 6967, 6971, 6977, 6983, 6991, 6997, 7001, 7013, 7019, 7027, 7039, 7043, 7057, 7069, 7079, 7103, 7109, 7121, 7127, 7129, 7151, 7159, 7177, 7187, 7193, 7207, 7211, 7213, 7219, 7229, 7237, 7243, 7247, 7253, 7283, 7297, 7307, 7309, 7321, 7331, 7333, 7349, 7351, 7369, 7393, 7411, 7417, 7433, 7451, 7457, 7459, 7477, 7481, 7487, 7489, 7499, 7507, 7517, 7523, 7529, 7537, 7541, 7547, 7549, 7559, 7561, 7573, 7577, 7583, 7589, 7591, 7603, 7607, 7621, 7639, 7643, 7649, 7669, 7673, 7681, 7687, 7691, 7699, 7703, 7717, 7723, 7727, 7741, 7753, 7757, 7759, 7789, 7793, 7817, 7823, 7829, 7841, 7853, 7867, 7873, 7877, 7879, 7883, 7901, 7907, 7919, 7927, 7933, 7937, 7949, 7951, 7963, 7993, 8009, 8011, 8017, 8039, 8053, 8059, 8069, 8081, 8087, 8089, 8093, 8101, 8111, 8117, 8123, 8147, 8161, 8167, 8171, 8179, 8191, 8209, 8219, 8221, 8231, 8233, 8237, 8243, 8263, 8269, 8273, 8287, 8291, 8293, 8297, 8311, 8317, 8329, 8353, 8363, 8369, 8377, 8387, 8389, 8419, 8423, 8429, 8431, 8443, 8447, 8461, 8467, 8501, 8513, 8521, 8527, 8537, 8539, 8543, 8563, 8573, 8581, 8597, 8599, 8609, 8623, 8627, 8629, 8641, 8647, 8663, 8669, 8677, 8681, 8689, 8693, 8699, 8707, 8713, 8719, 8731, 8737, 8741, 8747, 8753, 8761, 8779, 8783, 8803, 8807, 8819, 8821, 8831, 8837, 8839, 8849, 8861, 8863, 8867, 8887, 8893, 8923, 8929, 8933, 8941, 8951, 8963, 8969, 8971, 8999, 9001, 9007, 9011, 9013, 9029, 9041, 9043, 9049, 9059, 9067, 9091, 9103, 9109, 9127, 9133, 9137, 9151, 9157, 9161, 9173, 9181, 9187, 9199, 9203, 9209, 9221, 9227, 9239, 9241, 9257, 9277, 9281, 9283, 9293, 9311, 9319, 9323, 9337, 9341, 9343, 9349, 9371, 9377, 9391, 9397, 9403, 9413, 9419, 9421, 9431, 9433, 9437, 9439, 9461, 9463, 9467, 9473, 9479, 9491, 9497, 9511, 9521, 9533, 9539, 9547, 9551, 9587, 9601, 9613, 9619, 9623, 9629, 9631, 9643, 9649, 9661, 9677, 9679, 9689, 9697, 9719, 9721, 9733, 9739, 9743, 9749, 9767, 9769, 9781, 9787, 9791, 9803, 9811, 9817, 9829, 9833, 9839, 9851, 9857, 9859, 9871, 9883, 9887, 9901, 9907, 9923, 9929, 9931, 9941, 9949, 9967, 9973, 10007, 10009, 10037, 10039, 10061, 10067, 10069, 10079, 10091, 10093, 10099, 10103, 10111, 10133, 10139, 10141, 10151, 10159, 10163, 10169, 10177, 10181, 10193, 10211, 10223, 10243, 10247, 10253, 10259, 10267, 10271, 10273, 10289, 10301, 10303, 10313, 10321, 10331, 10333, 10337, 10343, 10357, 10369, 10391, 10399, 10427, 10429, 10433, 10453, 10457, 10459, 10463, 10477, 10487, 10499, 10501, 10513, 10529, 10531, 10559, 10567, 10589, 10597, 10601, 10607, 10613, 10627, 10631, 10639, 10651, 10657, 10663, 10667, 10687, 10691, 10709, 10711, 10723, 10729, 10733, 10739, 10753, 10771, 10781, 10789, 10799, 10831, 10837, 10847, 10853, 10859, 10861, 10867, 10883, 10889, 10891, 10903, 10909, 10937, 10939, 10949, 10957, 10973, 10979, 10987, 10993, 11003, 11027, 11047, 11057, 11059, 11069, 11071, 11083, 11087, 11093, 11113, 11117, 11119, 11131, 11149, 11159, 11161, 11171, 11173, 11177, 11197, 11213, 11239, 11243, 11251, 11257, 11261, 11273, 11279, 11287, 11299, 11311, 11317, 11321, 11329, 11351, 11353, 11369, 11383, 11393, 11399, 11411, 11423, 11437, 11443, 11447, 11467, 11471, 11483, 11489, 11491, 11497, 11503, 11519, 11527, 11549, 11551, 11579, 11587, 11593, 11597, 11617, 11621, 11633, 11657, 11677, 11681, 11689, 11699, 11701, 11717, 11719, 11731, 11743, 11777, 11779, 11783, 11789, 11801, 11807, 11813, 11821, 11827, 11831, 11833, 11839, 11863, 11867, 11887, 11897, 11903, 11909, 11923, 11927, 11933, 11939, 11941, 11953, 11959, 11969, 11971, 11981, 11987, 12007, 12011, 12037, 12041, 12043, 12049, 12071, 12073, 12097, 12101, 12107, 12109, 12113, 12119, 12143, 12149, 12157, 12161, 12163, 12197, 12203, 12211, 12227, 12239, 12241, 12251, 12253, 12263, 12269, 12277, 12281, 12289, 12301, 12323, 12329, 12343, 12347, 12373, 12377, 12379, 12391, 12401, 12409, 12413, 12421, 12433, 12437, 12451, 12457, 12473, 12479, 12487, 12491, 12497, 12503, 12511, 12517, 12527, 12539, 12541, 12547, 12553, 12569, 12577, 12583, 12589, 12601, 12611, 12613, 12619, 12637, 12641, 12647, 12653, 12659, 12671, 12689, 12697, 12703, 12713, 12721, 12739, 12743, 12757, 12763, 12781, 12791, 12799, 12809, 12821, 12823, 12829, 12841, 12853, 12889, 12893, 12899, 12907, 12911, 12917, 12919, 12923, 12941, 12953, 12959, 12967, 12973, 12979, 12983, 13001, 13003, 13007, 13009, 13033, 13037, 13043, 13049, 13063, 13093, 13099, 13103, 13109, 13121, 13127, 13147, 13151, 13159, 13163, 13171, 13177, 13183, 13187, 13217, 13219, 13229, 13241, 13249, 13259, 13267, 13291, 13297, 13309, 13313, 13327, 13331, 13337, 13339, 13367, 13381, 13397, 13399, 13411, 13417, 13421, 13441, 13451, 13457, 13463, 13469, 13477, 13487, 13499, 13513, 13523, 13537, 13553, 13567, 13577, 13591, 13597, 13613, 13619, 13627, 13633, 13649, 13669, 13679, 13681, 13687, 13691, 13693, 13697, 13709, 13711, 13721, 13723, 13729, 13751, 13757, 13759, 13763, 13781, 13789, 13799, 13807, 13829, 13831, 13841, 13859, 13873, 13877, 13879, 13883, 13901, 13903, 13907, 13913, 13921, 13931, 13933, 13963, 13967, 13997, 13999, 14009, 14011, 14029, 14033, 14051, 14057, 14071, 14081, 14083, 14087, 14107, 14143, 14149, 14153, 14159, 14173, 14177, 14197, 14207, 14221, 14243, 14249, 14251, 14281, 14293, 14303, 14321, 14323, 14327, 14341, 14347, 14369, 14387, 14389, 14401, 14407, 14411, 14419, 14423, 14431, 14437, 14447, 14449, 14461, 14479, 14489, 14503, 14519, 14533, 14537, 14543, 14549, 14551, 14557, 14561, 14563, 14591, 14593, 14621, 14627, 14629, 14633, 14639, 14653, 14657, 14669, 14683, 14699, 14713, 14717, 14723, 14731, 14737, 14741, 14747, 14753, 14759, 14767, 14771, 14779, 14783, 14797, 14813, 14821, 14827, 14831, 14843, 14851, 14867, 14869, 14879, 14887, 14891, 14897, 14923, 14929, 14939, 14947, 14951, 14957, 14969, 14983, 15013, 15017, 15031, 15053, 15061, 15073, 15077, 15083, 15091, 15101, 15107, 15121, 15131, 15137, 15139, 15149, 15161, 15173, 15187, 15193, 15199, 15217, 15227, 15233, 15241, 15259, 15263, 15269, 15271, 15277, 15287, 15289, 15299, 15307, 15313, 15319, 15329, 15331, 15349, 15359, 15361, 15373, 15377, 15383, 15391, 15401, 15413, 15427, 15439, 15443, 15451, 15461, 15467, 15473, 15493, 15497, 15511, 15527, 15541, 15551, 15559, 15569, 15581, 15583, 15601, 15607, 15619, 15629, 15641, 15643, 15647, 15649, 15661, 15667, 15671, 15679, 15683, 15727, 15731, 15733, 15737, 15739, 15749, 15761, 15767, 15773, 15787, 15791, 15797, 15803, 15809, 15817, 15823, 15859, 15877, 15881, 15887, 15889, 15901, 15907, 15913, 15919, 15923, 15937, 15959, 15971, 15973, 15991, 16001, 16007, 16033, 16057, 16061, 16063, 16067, 16069, 16073, 16087, 16091, 16097, 16103, 16111, 16127, 16139, 16141, 16183, 16187, 16189, 16193, 16217, 16223, 16229, 16231, 16249, 16253, 16267, 16273, 16301, 16319, 16333, 16339, 16349, 16361, 16363, 16369, 16381, 16411, 16417, 16421, 16427, 16433, 16447, 16451, 16453, 16477, 16481, 16487, 16493, 16519, 16529, 16547, 16553, 16561, 16567, 16573, 16603, 16607, 16619, 16631, 16633, 16649, 16651, 16657, 16661, 16673, 16691, 16693, 16699, 16703, 16729, 16741, 16747, 16759, 16763, 16787, 16811, 16823, 16829, 16831, 16843, 16871, 16879, 16883, 16889, 16901, 16903, 16921, 16927, 16931, 16937, 16943, 16963, 16979, 16981, 16987, 16993, 17011, 17021, 17027, 17029, 17033, 17041, 17047, 17053, 17077, 17093, 17099, 17107, 17117, 17123, 17137, 17159, 17167, 17183, 17189, 17191, 17203, 17207, 17209, 17231, 17239, 17257, 17291, 17293, 17299, 17317, 17321, 17327, 17333, 17341, 17351, 17359, 17377, 17383, 17387, 17389, 17393, 17401, 17417, 17419, 17431, 17443, 17449, 17467, 17471, 17477, 17483, 17489, 17491, 17497, 17509, 17519, 17539, 17551, 17569, 17573, 17579, 17581, 17597, 17599, 17609, 17623, 17627, 17657, 17659, 17669, 17681, 17683, 17707, 17713, 17729, 17737, 17747, 17749, 17761, 17783, 17789, 17791, 17807, 17827, 17837, 17839, 17851, 17863, 17881, 17891, 17903, 17909, 17911, 17921, 17923, 17929, 17939, 17957, 17959, 17971, 17977, 17981, 17987, 17989, 18013, 18041, 18043, 18047, 18049, 18059, 18061, 18077, 18089, 18097, 18119, 18121, 18127, 18131, 18133, 18143, 18149, 18169, 18181, 18191, 18199, 18211, 18217, 18223, 18229, 18233, 18251, 18253, 18257, 18269, 18287, 18289, 18301, 18307, 18311, 18313, 18329, 18341, 18353, 18367, 18371, 18379, 18397, 18401, 18413, 18427, 18433, 18439, 18443, 18451, 18457, 18461, 18481, 18493, 18503, 18517, 18521, 18523, 18539, 18541, 18553, 18583, 18587, 18593, 18617, 18637, 18661, 18671, 18679, 18691, 18701, 18713, 18719, 18731, 18743, 18749, 18757, 18773, 18787, 18793, 18797, 18803, 18839, 18859, 18869, 18899, 18911, 18913, 18917, 18919, 18947, 18959, 18973, 18979, 19001, 19009, 19013, 19031, 19037, 19051, 19069, 19073, 19079, 19081, 19087, 19121, 19139, 19141, 19157, 19163, 19181, 19183, 19207, 19211, 19213, 19219, 19231, 19237, 19249, 19259, 19267, 19273, 19289, 19301, 19309, 19319, 19333, 19373, 19379, 19381, 19387, 19391, 19403, 19417, 19421, 19423, 19427, 19429, 19433, 19441, 19447, 19457, 19463, 19469, 19471, 19477, 19483, 19489, 19501, 19507, 19531, 19541, 19543, 19553, 19559, 19571, 19577, 19583, 19597, 19603, 19609, 19661, 19681, 19687, 19697, 19699, 19709, 19717, 19727, 19739, 19751, 19753, 19759, 19763, 19777, 19793, 19801, 19813, 19819, 19841, 19843, 19853, 19861, 19867, 19889, 19891, 19913, 19919, 19927, 19937, 19949, 19961, 19963, 19973, 19979, 19991, 19993, 19997, 20011, 20021, 20023, 20029, 20047, 20051, 20063, 20071, 20089, 20101, 20107, 20113, 20117, 20123, 20129, 20143, 20147, 20149, 20161, 20173, 20177, 20183, 20201, 20219, 20231, 20233, 20249, 20261, 20269, 20287, 20297, 20323, 20327, 20333, 20341, 20347, 20353, 20357, 20359, 20369, 20389, 20393, 20399, 20407, 20411, 20431, 20441, 20443, 20477, 20479, 20483, 20507, 20509, 20521, 20533, 20543, 20549, 20551, 20563, 20593, 20599, 20611, 20627, 20639, 20641, 20663, 20681, 20693, 20707, 20717, 20719, 20731, 20743, 20747, 20749, 20753, 20759, 20771, 20773, 20789, 20807, 20809, 20849, 20857, 20873, 20879, 20887, 20897, 20899, 20903, 20921, 20929, 20939, 20947, 20959, 20963, 20981, 20983, 21001, 21011, 21013, 21017, 21019, 21023, 21031, 21059, 21061, 21067, 21089, 21101, 21107, 21121, 21139, 21143, 21149, 21157, 21163, 21169, 21179, 21187, 21191, 21193, 21211, 21221, 21227, 21247, 21269, 21277, 21283, 21313, 21317, 21319, 21323, 21341, 21347, 21377, 21379, 21383, 21391, 21397, 21401, 21407, 21419, 21433, 21467, 21481, 21487, 21491, 21493, 21499, 21503, 21517, 21521, 21523, 21529, 21557, 21559, 21563, 21569, 21577, 21587, 21589, 21599, 21601, 21611, 21613, 21617, 21647, 21649, 21661, 21673, 21683, 21701, 21713, 21727, 21737, 21739, 21751, 21757, 21767, 21773, 21787, 21799, 21803, 21817, 21821, 21839, 21841, 21851, 21859, 21863, 21871, 21881, 21893, 21911, 21929, 21937, 21943, 21961, 21977, 21991, 21997, 22003, 22013, 22027, 22031, 22037, 22039, 22051, 22063, 22067, 22073, 22079, 22091, 22093, 22109, 22111, 22123, 22129, 22133, 22147, 22153, 22157, 22159, 22171, 22189, 22193, 22229, 22247, 22259, 22271, 22273, 22277, 22279, 22283, 22291, 22303, 22307, 22343, 22349, 22367, 22369, 22381, 22391, 22397, 22409, 22433, 22441, 22447, 22453, 22469, 22481, 22483, 22501, 22511, 22531, 22541, 22543, 22549, 22567, 22571, 22573, 22613, 22619, 22621, 22637, 22639, 22643, 22651, 22669, 22679, 22691, 22697, 22699, 22709, 22717, 22721, 22727, 22739, 22741, 22751, 22769, 22777, 22783, 22787, 22807, 22811, 22817, 22853, 22859, 22861, 22871, 22877, 22901, 22907, 22921, 22937, 22943, 22961, 22963, 22973, 22993, 23003, 23011, 23017, 23021, 23027, 23029, 23039, 23041, 23053, 23057, 23059, 23063, 23071, 23081, 23087, 23099, 23117, 23131, 23143, 23159, 23167, 23173, 23189, 23197, 23201, 23203, 23209, 23227, 23251, 23269, 23279, 23291, 23293, 23297, 23311, 23321, 23327, 23333, 23339, 23357, 23369, 23371, 23399, 23417, 23431, 23447, 23459, 23473, 23497, 23509, 23531, 23537, 23539, 23549, 23557, 23561, 23563, 23567, 23581, 23593, 23599, 23603, 23609, 23623, 23627, 23629, 23633, 23663, 23669, 23671, 23677, 23687, 23689, 23719, 23741, 23743, 23747, 23753, 23761, 23767, 23773, 23789, 23801, 23813, 23819, 23827, 23831, 23833, 23857, 23869, 23873, 23879, 23887, 23893, 23899, 23909, 23911, 23917, 23929, 23957, 23971, 23977, 23981, 23993, 24001, 24007, 24019, 24023, 24029, 24043, 24049, 24061, 24071, 24077, 24083, 24091, 24097, 24103, 24107, 24109, 24113, 24121, 24133, 24137, 24151, 24169, 24179, 24181, 24197, 24203, 24223, 24229, 24239, 24247, 24251, 24281, 24317, 24329, 24337, 24359, 24371, 24373, 24379, 24391, 24407, 24413, 24419, 24421, 24439, 24443, 24469, 24473, 24481, 24499, 24509, 24517, 24527, 24533, 24547, 24551, 24571, 24593, 24611, 24623, 24631, 24659, 24671, 24677, 24683, 24691, 24697, 24709, 24733, 24749, 24763, 24767, 24781, 24793, 24799, 24809, 24821, 24841, 24847, 24851, 24859, 24877, 24889, 24907, 24917, 24919, 24923, 24943, 24953, 24967, 24971, 24977, 24979, 24989, 25013, 25031, 25033, 25037, 25057, 25073, 25087, 25097, 25111, 25117, 25121, 25127, 25147, 25153, 25163, 25169, 25171, 25183, 25189, 25219, 25229, 25237, 25243, 25247, 25253, 25261, 25301, 25303, 25307, 25309, 25321, 25339, 25343, 25349, 25357, 25367, 25373, 25391, 25409, 25411, 25423, 25439, 25447, 25453, 25457, 25463, 25469, 25471, 25523, 25537, 25541, 25561, 25577, 25579, 25583, 25589, 25601, 25603, 25609, 25621, 25633, 25639, 25643, 25657, 25667, 25673, 25679, 25693, 25703, 25717, 25733, 25741, 25747, 25759, 25763, 25771, 25793, 25799, 25801, 25819, 25841, 25847, 25849, 25867, 25873, 25889, 25903, 25913, 25919, 25931, 25933, 25939, 25943, 25951, 25969, 25981, 25997, 25999, 26003, 26017, 26021, 26029, 26041, 26053, 26083, 26099, 26107, 26111, 26113, 26119, 26141, 26153, 26161, 26171, 26177, 26183, 26189, 26203, 26209, 26227, 26237, 26249, 26251, 26261, 26263, 26267, 26293, 26297, 26309, 26317, 26321, 26339, 26347, 26357, 26371, 26387, 26393, 26399, 26407, 26417, 26423, 26431, 26437, 26449, 26459, 26479, 26489, 26497, 26501, 26513, 26539, 26557, 26561, 26573, 26591, 26597, 26627, 26633, 26641, 26647, 26669, 26681, 26683, 26687, 26693, 26699, 26701, 26711, 26713, 26717, 26723, 26729, 26731, 26737, 26759, 26777, 26783, 26801, 26813, 26821, 26833, 26839, 26849, 26861, 26863, 26879, 26881, 26891, 26893, 26903, 26921, 26927, 26947, 26951, 26953, 26959, 26981, 26987, 26993, 27011, 27017, 27031, 27043, 27059, 27061, 27067, 27073, 27077, 27091, 27103, 27107, 27109, 27127, 27143, 27179, 27191, 27197, 27211, 27239, 27241, 27253, 27259, 27271, 27277, 27281, 27283, 27299, 27329, 27337, 27361, 27367, 27397, 27407, 27409, 27427, 27431, 27437, 27449, 27457, 27479, 27481, 27487, 27509, 27527, 27529, 27539, 27541, 27551, 27581, 27583, 27611, 27617, 27631, 27647, 27653, 27673, 27689, 27691, 27697, 27701, 27733, 27737, 27739, 27743, 27749, 27751, 27763, 27767, 27773, 27779, 27791, 27793, 27799, 27803, 27809, 27817, 27823, 27827, 27847, 27851, 27883, 27893, 27901, 27917, 27919, 27941, 27943, 27947, 27953, 27961, 27967, 27983, 27997, 28001, 28019, 28027, 28031, 28051, 28057, 28069, 28081, 28087, 28097, 28099, 28109, 28111, 28123, 28151, 28163, 28181, 28183, 28201, 28211, 28219, 28229, 28277, 28279, 28283, 28289, 28297, 28307, 28309, 28319, 28349, 28351, 28387, 28393, 28403, 28409, 28411, 28429, 28433, 28439, 28447, 28463, 28477, 28493, 28499, 28513, 28517, 28537, 28541, 28547, 28549, 28559, 28571, 28573, 28579, 28591, 28597, 28603, 28607, 28619, 28621, 28627, 28631, 28643, 28649, 28657, 28661, 28663, 28669, 28687, 28697, 28703, 28711, 28723, 28729, 28751, 28753, 28759, 28771, 28789, 28793, 28807, 28813, 28817, 28837, 28843, 28859, 28867, 28871, 28879, 28901, 28909, 28921, 28927, 28933, 28949, 28961, 28979, 29009, 29017, 29021, 29023, 29027, 29033, 29059, 29063, 29077, 29101, 29123, 29129, 29131, 29137, 29147, 29153, 29167, 29173, 29179, 29191, 29201, 29207, 29209, 29221, 29231, 29243, 29251, 29269, 29287, 29297, 29303, 29311, 29327, 29333, 29339, 29347, 29363, 29383, 29387, 29389, 29399, 29401, 29411, 29423, 29429, 29437, 29443, 29453, 29473, 29483, 29501, 29527, 29531, 29537, 29567, 29569, 29573, 29581, 29587, 29599, 29611, 29629, 29633, 29641, 29663, 29669, 29671, 29683, 29717, 29723, 29741, 29753, 29759, 29761, 29789, 29803, 29819, 29833, 29837, 29851, 29863, 29867, 29873, 29879, 29881, 29917, 29921, 29927, 29947, 29959, 29983, 29989, 30011, 30013, 30029, 30047, 30059, 30071, 30089, 30091, 30097, 30103, 30109, 30113, 30119, 30133, 30137, 30139, 30161, 30169, 30181, 30187, 30197, 30203, 30211, 30223, 30241, 30253, 30259, 30269, 30271, 30293, 30307, 30313, 30319, 30323, 30341, 30347, 30367, 30389, 30391, 30403, 30427, 30431, 30449, 30467, 30469, 30491, 30493, 30497, 30509, 30517, 30529, 30539, 30553, 30557, 30559, 30577, 30593, 30631, 30637, 30643, 30649, 30661, 30671, 30677, 30689, 30697, 30703, 30707, 30713, 30727, 30757, 30763, 30773, 30781, 30803, 30809, 30817, 30829, 30839, 30841, 30851, 30853, 30859, 30869, 30871, 30881, 30893, 30911, 30931, 30937, 30941, 30949, 30971, 30977, 30983, 31013, 31019, 31033, 31039, 31051, 31063, 31069, 31079, 31081, 31091, 31121, 31123, 31139, 31147, 31151, 31153, 31159, 31177, 31181, 31183, 31189, 31193, 31219, 31223, 31231, 31237, 31247, 31249, 31253, 31259, 31267, 31271, 31277, 31307, 31319, 31321, 31327, 31333, 31337, 31357, 31379, 31387, 31391, 31393, 31397, 31469, 31477, 31481, 31489, 31511, 31513, 31517, 31531, 31541, 31543, 31547, 31567, 31573, 31583, 31601, 31607, 31627, 31643, 31649, 31657, 31663, 31667, 31687, 31699, 31721, 31723, 31727, 31729, 31741, 31751, 31769, 31771, 31793, 31799, 31817, 31847, 31849, 31859, 31873, 31883, 31891, 31907, 31957, 31963, 31973, 31981, 31991, 32003, 32009, 32027, 32029, 32051, 32057, 32059, 32063, 32069, 32077, 32083, 32089, 32099, 32117, 32119, 32141, 32143, 32159, 32173, 32183, 32189, 32191, 32203, 32213, 32233, 32237, 32251, 32257, 32261, 32297, 32299, 32303, 32309, 32321, 32323, 32327, 32341, 32353, 32359, 32363, 32369, 32371, 32377, 32381, 32401, 32411, 32413, 32423, 32429, 32441, 32443, 32467, 32479, 32491, 32497, 32503, 32507, 32531, 32533, 32537, 32561, 32563, 32569, 32573, 32579, 32587, 32603, 32609, 32611, 32621, 32633, 32647, 32653, 32687, 32693, 32707, 32713, 32717, 32719, 32749, 32771, 32779, 32783, 32789, 32797, 32801, 32803, 32831, 32833, 32839, 32843, 32869, 32887, 32909, 32911, 32917, 32933, 32939, 32941, 32957, 32969, 32971, 32983, 32987, 32993, 32999, 33013, 33023, 33029, 33037, 33049, 33053, 33071, 33073, 33083, 33091, 33107, 33113, 33119, 33149, 33151, 33161, 33179, 33181, 33191, 33199, 33203, 33211, 33223, 33247, 33287, 33289, 33301, 33311, 33317, 33329, 33331, 33343, 33347, 33349, 33353, 33359, 33377, 33391, 33403, 33409, 33413, 33427, 33457, 33461, 33469, 33479, 33487, 33493, 33503, 33521, 33529, 33533, 33547, 33563, 33569, 33577, 33581, 33587, 33589, 33599, 33601, 33613, 33617, 33619, 33623, 33629, 33637, 33641, 33647, 33679, 33703, 33713, 33721, 33739, 33749, 33751, 33757, 33767, 33769, 33773, 33791, 33797, 33809, 33811, 33827, 33829, 33851, 33857, 33863, 33871, 33889, 33893, 33911, 33923, 33931, 33937, 33941, 33961, 33967, 33997, 34019, 34031, 34033, 34039, 34057, 34061, 34123, 34127, 34129, 34141, 34147, 34157, 34159, 34171, 34183, 34211, 34213, 34217, 34231, 34253, 34259, 34261, 34267, 34273, 34283, 34297, 34301, 34303, 34313, 34319, 34327, 34337, 34351, 34361, 34367, 34369, 34381, 34403, 34421, 34429, 34439, 34457, 34469, 34471, 34483, 34487, 34499, 34501, 34511, 34513, 34519, 34537, 34543, 34549, 34583, 34589, 34591, 34603, 34607, 34613, 34631, 34649, 34651, 34667, 34673, 34679, 34687, 34693, 34703, 34721, 34729, 34739, 34747, 34757, 34759, 34763, 34781, 34807, 34819, 34841, 34843, 34847, 34849, 34871, 34877, 34883, 34897, 34913, 34919, 34939, 34949, 34961, 34963, 34981, 35023, 35027, 35051, 35053, 35059, 35069, 35081, 35083, 35089, 35099, 35107, 35111, 35117, 35129, 35141, 35149, 35153, 35159, 35171, 35201, 35221, 35227, 35251, 35257, 35267, 35279, 35281, 35291, 35311, 35317, 35323, 35327, 35339, 35353, 35363, 35381, 35393, 35401, 35407, 35419, 35423, 35437, 35447, 35449, 35461, 35491, 35507, 35509, 35521, 35527, 35531, 35533, 35537, 35543, 35569, 35573, 35591, 35593, 35597, 35603, 35617, 35671, 35677, 35729, 35731, 35747, 35753, 35759, 35771, 35797, 35801, 35803, 35809, 35831, 35837, 35839, 35851, 35863, 35869, 35879, 35897, 35899, 35911, 35923, 35933, 35951, 35963, 35969, 35977, 35983, 35993, 35999, 36007, 36011, 36013, 36017, 36037, 36061, 36067, 36073, 36083, 36097, 36107, 36109, 36131, 36137, 36151, 36161, 36187, 36191, 36209, 36217, 36229, 36241, 36251, 36263, 36269, 36277, 36293, 36299, 36307, 36313, 36319, 36341, 36343, 36353, 36373, 36383, 36389, 36433, 36451, 36457, 36467, 36469, 36473, 36479, 36493, 36497, 36523, 36527, 36529, 36541, 36551, 36559, 36563, 36571, 36583, 36587, 36599, 36607, 36629, 36637, 36643, 36653, 36671, 36677, 36683, 36691, 36697, 36709, 36713, 36721, 36739, 36749, 36761, 36767, 36779, 36781, 36787, 36791, 36793, 36809, 36821, 36833, 36847, 36857, 36871, 36877, 36887, 36899, 36901, 36913, 36919, 36923, 36929, 36931, 36943, 36947, 36973, 36979, 36997, 37003, 37013, 37019, 37021, 37039, 37049, 37057, 37061, 37087, 37097, 37117, 37123, 37139, 37159, 37171, 37181, 37189, 37199, 37201, 37217, 37223, 37243, 37253, 37273, 37277, 37307, 37309, 37313, 37321, 37337, 37339, 37357, 37361, 37363, 37369, 37379, 37397, 37409, 37423, 37441, 37447, 37463, 37483, 37489, 37493, 37501, 37507, 37511, 37517, 37529, 37537, 37547, 37549, 37561, 37567, 37571, 37573, 37579, 37589, 37591, 37607, 37619, 37633, 37643, 37649, 37657, 37663, 37691, 37693, 37699, 37717, 37747, 37781, 37783, 37799, 37811, 37813, 37831, 37847, 37853, 37861, 37871, 37879, 37889, 37897, 37907, 37951, 37957, 37963, 37967, 37987, 37991, 37993, 37997, 38011, 38039, 38047, 38053, 38069, 38083, 38113, 38119, 38149, 38153, 38167, 38177, 38183, 38189, 38197, 38201, 38219, 38231, 38237, 38239, 38261, 38273, 38281, 38287, 38299, 38303, 38317, 38321, 38327, 38329, 38333, 38351, 38371, 38377, 38393, 38431, 38447, 38449, 38453, 38459, 38461, 38501, 38543, 38557, 38561, 38567, 38569, 38593, 38603, 38609, 38611, 38629, 38639, 38651, 38653, 38669, 38671, 38677, 38693, 38699, 38707, 38711, 38713, 38723, 38729, 38737, 38747, 38749, 38767, 38783, 38791, 38803, 38821, 38833, 38839, 38851, 38861, 38867, 38873, 38891, 38903, 38917, 38921, 38923, 38933, 38953, 38959, 38971, 38977, 38993, 39019, 39023, 39041, 39043, 39047, 39079, 39089, 39097, 39103, 39107, 39113, 39119, 39133, 39139, 39157, 39161, 39163, 39181, 39191, 39199, 39209, 39217, 39227, 39229, 39233, 39239, 39241, 39251, 39293, 39301, 39313, 39317, 39323, 39341, 39343, 39359, 39367, 39371, 39373, 39383, 39397, 39409, 39419, 39439, 39443, 39451, 39461, 39499, 39503, 39509, 39511, 39521, 39541, 39551, 39563, 39569, 39581, 39607, 39619, 39623, 39631, 39659, 39667, 39671, 39679, 39703, 39709, 39719, 39727, 39733, 39749, 39761, 39769, 39779, 39791, 39799, 39821, 39827, 39829, 39839, 39841, 39847, 39857, 39863, 39869, 39877, 39883, 39887, 39901, 39929, 39937, 39953, 39971, 39979, 39983, 39989, 40009, 40013, 40031, 40037, 40039, 40063, 40087, 40093, 40099, 40111, 40123, 40127, 40129, 40151, 40153, 40163, 40169, 40177, 40189, 40193, 40213, 40231, 40237, 40241, 40253, 40277, 40283, 40289, 40343, 40351, 40357, 40361, 40387, 40423, 40427, 40429, 40433, 40459, 40471, 40483, 40487, 40493, 40499, 40507, 40519, 40529, 40531, 40543, 40559, 40577, 40583, 40591, 40597, 40609, 40627, 40637, 40639, 40693, 40697, 40699, 40709, 40739, 40751, 40759, 40763, 40771, 40787, 40801, 40813, 40819, 40823, 40829, 40841, 40847, 40849, 40853, 40867, 40879, 40883, 40897, 40903, 40927, 40933, 40939, 40949, 40961, 40973, 40993, 41011, 41017, 41023, 41039, 41047, 41051, 41057, 41077, 41081, 41113, 41117, 41131, 41141, 41143, 41149, 41161, 41177, 41179, 41183, 41189, 41201, 41203, 41213, 41221, 41227, 41231, 41233, 41243, 41257, 41263, 41269, 41281, 41299, 41333, 41341, 41351, 41357, 41381, 41387, 41389, 41399, 41411, 41413, 41443, 41453, 41467, 41479, 41491, 41507, 41513, 41519, 41521, 41539, 41543, 41549, 41579, 41593, 41597, 41603, 41609, 41611, 41617, 41621, 41627, 41641, 41647, 41651, 41659, 41669, 41681, 41687, 41719, 41729, 41737, 41759, 41761, 41771, 41777, 41801, 41809, 41813, 41843, 41849, 41851, 41863, 41879, 41887, 41893, 41897, 41903, 41911, 41927, 41941, 41947, 41953, 41957, 41959, 41969, 41981, 41983, 41999, 42013, 42017, 42019, 42023, 42043, 42061, 42071, 42073, 42083, 42089, 42101, 42131, 42139, 42157, 42169, 42179, 42181, 42187, 42193, 42197, 42209, 42221, 42223, 42227, 42239, 42257, 42281, 42283, 42293, 42299, 42307, 42323, 42331, 42337, 42349, 42359, 42373, 42379, 42391, 42397, 42403, 42407, 42409, 42433, 42437, 42443, 42451, 42457, 42461, 42463, 42467, 42473, 42487, 42491, 42499, 42509, 42533, 42557, 42569, 42571, 42577, 42589, 42611, 42641, 42643, 42649, 42667, 42677, 42683, 42689, 42697, 42701, 42703, 42709, 42719, 42727, 42737, 42743, 42751, 42767, 42773, 42787, 42793, 42797, 42821, 42829, 42839, 42841, 42853, 42859, 42863, 42899, 42901, 42923, 42929, 42937, 42943, 42953, 42961, 42967, 42979, 42989, 43003, 43013, 43019, 43037, 43049, 43051, 43063, 43067, 43093, 43103, 43117, 43133, 43151, 43159, 43177, 43189, 43201, 43207, 43223, 43237, 43261, 43271, 43283, 43291, 43313, 43319, 43321, 43331, 43391, 43397, 43399, 43403, 43411, 43427, 43441, 43451, 43457, 43481, 43487, 43499, 43517, 43541, 43543, 43573, 43577, 43579, 43591, 43597, 43607, 43609, 43613, 43627, 43633, 43649, 43651, 43661, 43669, 43691, 43711, 43717, 43721, 43753, 43759, 43777, 43781, 43783, 43787, 43789, 43793, 43801, 43853, 43867, 43889, 43891, 43913, 43933, 43943, 43951, 43961, 43963, 43969, 43973, 43987, 43991, 43997, 44017, 44021, 44027, 44029, 44041, 44053, 44059, 44071, 44087, 44089, 44101, 44111, 44119, 44123, 44129, 44131, 44159, 44171, 44179, 44189, 44201, 44203, 44207, 44221, 44249, 44257, 44263, 44267, 44269, 44273, 44279, 44281, 44293, 44351, 44357, 44371, 44381, 44383, 44389, 44417, 44449, 44453, 44483, 44491, 44497, 44501, 44507, 44519, 44531, 44533, 44537, 44543, 44549, 44563, 44579, 44587, 44617, 44621, 44623, 44633, 44641, 44647, 44651, 44657, 44683, 44687, 44699, 44701, 44711, 44729, 44741, 44753, 44771, 44773, 44777, 44789, 44797, 44809, 44819, 44839, 44843, 44851, 44867, 44879, 44887, 44893, 44909, 44917, 44927, 44939, 44953, 44959, 44963, 44971, 44983, 44987, 45007, 45013, 45053, 45061, 45077, 45083, 45119, 45121, 45127, 45131, 45137, 45139, 45161, 45179, 45181, 45191, 45197, 45233, 45247, 45259, 45263, 45281, 45289, 45293, 45307, 45317, 45319, 45329, 45337, 45341, 45343, 45361, 45377, 45389, 45403, 45413, 45427, 45433, 45439, 45481, 45491, 45497, 45503, 45523, 45533, 45541, 45553, 45557, 45569, 45587, 45589, 45599, 45613, 45631, 45641, 45659, 45667, 45673, 45677, 45691, 45697, 45707, 45737, 45751, 45757, 45763, 45767, 45779, 45817, 45821, 45823, 45827, 45833, 45841, 45853, 45863, 45869, 45887, 45893, 45943, 45949, 45953, 45959, 45971, 45979, 45989, 46021, 46027, 46049, 46051, 46061, 46073, 46091, 46093, 46099, 46103, 46133, 46141, 46147, 46153, 46171, 46181, 46183, 46187, 46199, 46219, 46229, 46237, 46261, 46271, 46273, 46279, 46301, 46307, 46309, 46327, 46337, 46349, 46351, 46381, 46399, 46411, 46439, 46441, 46447, 46451, 46457, 46471, 46477, 46489, 46499, 46507, 46511, 46523, 46549, 46559, 46567, 46573, 46589, 46591, 46601, 46619, 46633, 46639, 46643, 46649, 46663, 46679, 46681, 46687, 46691, 46703, 46723, 46727, 46747, 46751, 46757, 46769, 46771, 46807, 46811, 46817, 46819, 46829, 46831, 46853, 46861, 46867, 46877, 46889, 46901, 46919, 46933, 46957, 46993, 46997, 47017, 47041, 47051, 47057, 47059, 47087, 47093, 47111, 47119, 47123, 47129, 47137, 47143, 47147, 47149, 47161, 47189, 47207, 47221, 47237, 47251, 47269, 47279, 47287, 47293, 47297, 47303, 47309, 47317, 47339, 47351, 47353, 47363, 47381, 47387, 47389, 47407, 47417, 47419, 47431, 47441, 47459, 47491, 47497, 47501, 47507, 47513, 47521, 47527, 47533, 47543, 47563, 47569, 47581, 47591, 47599, 47609, 47623, 47629, 47639, 47653, 47657, 47659, 47681, 47699, 47701, 47711, 47713, 47717, 47737, 47741, 47743, 47777, 47779, 47791, 47797, 47807, 47809, 47819, 47837, 47843, 47857, 47869, 47881, 47903, 47911, 47917, 47933, 47939, 47947, 47951, 47963, 47969, 47977, 47981, 48017, 48023, 48029, 48049, 48073, 48079, 48091, 48109, 48119, 48121, 48131, 48157, 48163, 48179, 48187, 48193, 48197, 48221, 48239, 48247, 48259, 48271, 48281, 48299, 48311, 48313, 48337, 48341, 48353, 48371, 48383, 48397, 48407, 48409, 48413, 48437, 48449, 48463, 48473, 48479, 48481, 48487, 48491, 48497, 48523, 48527, 48533, 48539, 48541, 48563, 48571, 48589, 48593, 48611, 48619, 48623, 48647, 48649, 48661, 48673, 48677, 48679, 48731, 48733, 48751, 48757, 48761, 48767, 48779, 48781, 48787, 48799, 48809, 48817, 48821, 48823, 48847, 48857, 48859, 48869, 48871, 48883, 48889, 48907, 48947, 48953, 48973, 48989, 48991, 49003, 49009, 49019, 49031, 49033, 49037, 49043, 49057, 49069, 49081, 49103, 49109, 49117, 49121, 49123, 49139, 49157, 49169, 49171, 49177, 49193, 49199, 49201, 49207, 49211, 49223, 49253, 49261, 49277, 49279, 49297, 49307, 49331, 49333, 49339, 49363, 49367, 49369, 49391, 49393, 49409, 49411, 49417, 49429, 49433, 49451, 49459, 49463, 49477, 49481, 49499, 49523, 49529, 49531, 49537, 49547, 49549, 49559, 49597, 49603, 49613, 49627, 49633, 49639, 49663, 49667, 49669, 49681, 49697, 49711, 49727, 49739, 49741, 49747, 49757, 49783, 49787, 49789, 49801, 49807, 49811, 49823, 49831, 49843, 49853, 49871, 49877, 49891, 49919, 49921, 49927, 49937, 49939, 49943, 49957, 49991, 49993, 49999, 50021, 50023, 50033, 50047, 50051, 50053, 50069, 50077, 50087, 50093, 50101, 50111, 50119, 50123, 50129, 50131, 50147, 50153, 50159, 50177, 50207, 50221, 50227, 50231, 50261, 50263, 50273, 50287, 50291, 50311, 50321, 50329, 50333, 50341, 50359, 50363, 50377, 50383, 50387, 50411, 50417, 50423, 50441, 50459, 50461, 50497, 50503, 50513, 50527, 50539, 50543, 50549, 50551, 50581, 50587, 50591, 50593, 50599, 50627, 50647, 50651, 50671, 50683, 50707, 50723, 50741, 50753, 50767, 50773, 50777, 50789, 50821, 50833, 50839, 50849, 50857, 50867, 50873, 50891, 50893, 50909, 50923, 50929, 50951, 50957, 50969, 50971, 50989, 50993, 51001, 51031, 51043, 51047, 51059, 51061, 51071, 51109, 51131, 51133, 51137, 51151, 51157, 51169, 51193, 51197, 51199, 51203, 51217, 51229, 51239, 51241, 51257, 51263, 51283, 51287, 51307, 51329, 51341, 51343, 51347, 51349, 51361, 51383, 51407, 51413, 51419, 51421, 51427, 51431, 51437, 51439, 51449, 51461, 51473, 51479, 51481, 51487, 51503, 51511, 51517, 51521, 51539, 51551, 51563, 51577, 51581, 51593, 51599, 51607, 51613, 51631, 51637, 51647, 51659, 51673, 51679, 51683, 51691, 51713, 51719, 51721, 51749, 51767, 51769, 51787, 51797, 51803, 51817, 51827, 51829, 51839, 51853, 51859, 51869, 51871, 51893, 51899, 51907, 51913, 51929, 51941, 51949, 51971, 51973, 51977, 51991, 52009, 52021, 52027, 52051, 52057, 52067, 52069, 52081, 52103, 52121, 52127, 52147, 52153, 52163, 52177, 52181, 52183, 52189, 52201, 52223, 52237, 52249, 52253, 52259, 52267, 52289, 52291, 52301, 52313, 52321, 52361, 52363, 52369, 52379, 52387, 52391, 52433, 52453, 52457, 52489, 52501, 52511, 52517, 52529, 52541, 52543, 52553, 52561, 52567, 52571, 52579, 52583, 52609, 52627, 52631, 52639, 52667, 52673, 52691, 52697, 52709, 52711, 52721, 52727, 52733, 52747, 52757, 52769, 52783, 52807, 52813, 52817, 52837, 52859, 52861, 52879, 52883, 52889, 52901, 52903, 52919, 52937, 52951, 52957, 52963, 52967, 52973, 52981, 52999, 53003, 53017, 53047, 53051, 53069, 53077, 53087, 53089, 53093, 53101, 53113, 53117, 53129, 53147, 53149, 53161, 53171, 53173, 53189, 53197, 53201, 53231, 53233, 53239, 53267, 53269, 53279, 53281, 53299, 53309, 53323, 53327, 53353, 53359, 53377, 53381, 53401, 53407, 53411, 53419, 53437, 53441, 53453, 53479, 53503, 53507, 53527, 53549, 53551, 53569, 53591, 53593, 53597, 53609, 53611, 53617, 53623, 53629, 53633, 53639, 53653, 53657, 53681, 53693, 53699, 53717, 53719, 53731, 53759, 53773, 53777, 53783, 53791, 53813, 53819, 53831, 53849, 53857, 53861, 53881, 53887, 53891, 53897, 53899, 53917, 53923, 53927, 53939, 53951, 53959, 53987, 53993, 54001, 54011, 54013, 54037, 54049, 54059, 54083, 54091, 54101, 54121, 54133, 54139, 54151, 54163, 54167, 54181, 54193, 54217, 54251, 54269, 54277, 54287, 54293, 54311, 54319, 54323, 54331, 54347, 54361, 54367, 54371, 54377, 54401, 54403, 54409, 54413, 54419, 54421, 54437, 54443, 54449, 54469, 54493, 54497, 54499, 54503, 54517, 54521, 54539, 54541, 54547, 54559, 54563, 54577, 54581, 54583, 54601, 54617, 54623, 54629, 54631, 54647, 54667, 54673, 54679, 54709, 54713, 54721, 54727, 54751, 54767, 54773, 54779, 54787, 54799, 54829, 54833, 54851, 54869, 54877, 54881, 54907, 54917, 54919, 54941, 54949, 54959, 54973, 54979, 54983, 55001, 55009, 55021, 55049, 55051, 55057, 55061, 55073, 55079, 55103, 55109, 55117, 55127, 55147, 55163, 55171, 55201, 55207, 55213, 55217, 55219, 55229, 55243, 55249, 55259, 55291, 55313, 55331, 55333, 55337, 55339, 55343, 55351, 55373, 55381, 55399, 55411, 55439, 55441, 55457, 55469, 55487, 55501, 55511, 55529, 55541, 55547, 55579, 55589, 55603, 55609, 55619, 55621, 55631, 55633, 55639, 55661, 55663, 55667, 55673, 55681, 55691, 55697, 55711, 55717, 55721, 55733, 55763, 55787, 55793, 55799, 55807, 55813, 55817, 55819, 55823, 55829, 55837, 55843, 55849, 55871, 55889, 55897, 55901, 55903, 55921, 55927, 55931, 55933, 55949, 55967, 55987, 55997, 56003, 56009, 56039, 56041, 56053, 56081, 56087, 56093, 56099, 56101, 56113, 56123, 56131, 56149, 56167, 56171, 56179, 56197, 56207, 56209, 56237, 56239, 56249, 56263, 56267, 56269, 56299, 56311, 56333, 56359, 56369, 56377, 56383, 56393, 56401, 56417, 56431, 56437, 56443, 56453, 56467, 56473, 56477, 56479, 56489, 56501, 56503, 56509, 56519, 56527, 56531, 56533, 56543, 56569, 56591, 56597, 56599, 56611, 56629, 56633, 56659, 56663, 56671, 56681, 56687, 56701, 56711, 56713, 56731, 56737, 56747, 56767, 56773, 56779, 56783, 56807, 56809, 56813, 56821, 56827, 56843, 56857, 56873, 56891, 56893, 56897, 56909, 56911, 56921, 56923, 56929, 56941, 56951, 56957, 56963, 56983, 56989, 56993, 56999, 57037, 57041, 57047, 57059, 57073, 57077, 57089, 57097, 57107, 57119, 57131, 57139, 57143, 57149, 57163, 57173, 57179, 57191, 57193, 57203, 57221, 57223, 57241, 57251, 57259, 57269, 57271, 57283, 57287, 57301, 57329, 57331, 57347, 57349, 57367, 57373, 57383, 57389, 57397, 57413, 57427, 57457, 57467, 57487, 57493, 57503, 57527, 57529, 57557, 57559, 57571, 57587, 57593, 57601, 57637, 57641, 57649, 57653, 57667, 57679, 57689, 57697, 57709, 57713, 57719, 57727, 57731, 57737, 57751, 57773, 57781, 57787, 57791, 57793, 57803, 57809, 57829, 57839, 57847, 57853, 57859, 57881, 57899, 57901, 57917, 57923, 57943, 57947, 57973, 57977, 57991, 58013, 58027, 58031, 58043, 58049, 58057, 58061, 58067, 58073, 58099, 58109, 58111, 58129, 58147, 58151, 58153, 58169, 58171, 58189, 58193, 58199, 58207, 58211, 58217, 58229, 58231, 58237, 58243, 58271, 58309, 58313, 58321, 58337, 58363, 58367, 58369, 58379, 58391, 58393, 58403, 58411, 58417, 58427, 58439, 58441, 58451, 58453, 58477, 58481, 58511, 58537, 58543, 58549, 58567, 58573, 58579, 58601, 58603, 58613, 58631, 58657, 58661, 58679, 58687, 58693, 58699, 58711, 58727, 58733, 58741, 58757, 58763, 58771, 58787, 58789, 58831, 58889, 58897, 58901, 58907, 58909, 58913, 58921, 58937, 58943, 58963, 58967, 58979, 58991, 58997, 59009, 59011, 59021, 59023, 59029, 59051, 59053, 59063, 59069, 59077, 59083, 59093, 59107, 59113, 59119, 59123, 59141, 59149, 59159, 59167, 59183, 59197, 59207, 59209, 59219, 59221, 59233, 59239, 59243, 59263, 59273, 59281, 59333, 59341, 59351, 59357, 59359, 59369, 59377, 59387, 59393, 59399, 59407, 59417, 59419, 59441, 59443, 59447, 59453, 59467, 59471, 59473, 59497, 59509, 59513, 59539, 59557, 59561, 59567, 59581, 59611, 59617, 59621, 59627, 59629, 59651, 59659, 59663, 59669, 59671, 59693, 59699, 59707, 59723, 59729, 59743, 59747, 59753, 59771, 59779, 59791, 59797, 59809, 59833, 59863, 59879, 59887, 59921, 59929, 59951, 59957, 59971, 59981, 59999, 60013, 60017, 60029, 60037, 60041, 60077, 60083, 60089, 60091, 60101, 60103, 60107, 60127, 60133, 60139, 60149, 60161, 60167, 60169, 60209, 60217, 60223, 60251, 60257, 60259, 60271, 60289, 60293, 60317, 60331, 60337, 60343, 60353, 60373, 60383, 60397, 60413, 60427, 60443, 60449, 60457, 60493, 60497, 60509, 60521, 60527, 60539, 60589, 60601, 60607, 60611, 60617, 60623, 60631, 60637, 60647, 60649, 60659, 60661, 60679, 60689, 60703, 60719, 60727, 60733, 60737, 60757, 60761, 60763, 60773, 60779, 60793, 60811, 60821, 60859, 60869, 60887, 60889, 60899, 60901, 60913, 60917, 60919, 60923, 60937, 60943, 60953, 60961, 61001, 61007, 61027, 61031, 61043, 61051, 61057, 61091, 61099, 61121, 61129, 61141, 61151, 61153, 61169, 61211, 61223, 61231, 61253, 61261, 61283, 61291, 61297, 61331, 61333, 61339, 61343, 61357, 61363, 61379, 61381, 61403, 61409, 61417, 61441, 61463, 61469, 61471, 61483, 61487, 61493, 61507, 61511, 61519, 61543, 61547, 61553, 61559, 61561, 61583, 61603, 61609, 61613, 61627, 61631, 61637, 61643, 61651, 61657, 61667, 61673, 61681, 61687, 61703, 61717, 61723, 61729, 61751, 61757, 61781, 61813, 61819, 61837, 61843, 61861, 61871, 61879, 61909, 61927, 61933, 61949, 61961, 61967, 61979, 61981, 61987, 61991, 62003, 62011, 62017, 62039, 62047, 62053, 62057, 62071, 62081, 62099, 62119, 62129, 62131, 62137, 62141, 62143, 62171, 62189, 62191, 62201, 62207, 62213, 62219, 62233, 62273, 62297, 62299, 62303, 62311, 62323, 62327, 62347, 62351, 62383, 62401, 62417, 62423, 62459, 62467, 62473, 62477, 62483, 62497, 62501, 62507, 62533, 62539, 62549, 62563, 62581, 62591, 62597, 62603, 62617, 62627, 62633, 62639, 62653, 62659, 62683, 62687, 62701, 62723, 62731, 62743, 62753, 62761, 62773, 62791, 62801, 62819, 62827, 62851, 62861, 62869, 62873, 62897, 62903, 62921, 62927, 62929, 62939, 62969, 62971, 62981, 62983, 62987, 62989, 63029, 63031, 63059, 63067, 63073, 63079, 63097, 63103, 63113, 63127, 63131, 63149, 63179, 63197, 63199, 63211, 63241, 63247, 63277, 63281, 63299, 63311, 63313, 63317, 63331, 63337, 63347, 63353, 63361, 63367, 63377, 63389, 63391, 63397, 63409, 63419, 63421, 63439, 63443, 63463, 63467, 63473, 63487, 63493, 63499, 63521, 63527, 63533, 63541, 63559, 63577, 63587, 63589, 63599, 63601, 63607, 63611, 63617, 63629, 63647, 63649, 63659, 63667, 63671, 63689, 63691, 63697, 63703, 63709, 63719, 63727, 63737, 63743, 63761, 63773, 63781, 63793, 63799, 63803, 63809, 63823, 63839, 63841, 63853, 63857, 63863, 63901, 63907, 63913, 63929, 63949, 63977, 63997, 64007, 64013, 64019, 64033, 64037, 64063, 64067, 64081, 64091, 64109, 64123, 64151, 64153, 64157, 64171, 64187, 64189, 64217, 64223, 64231, 64237, 64271, 64279, 64283, 64301, 64303, 64319, 64327, 64333, 64373, 64381, 64399, 64403, 64433, 64439, 64451, 64453, 64483, 64489, 64499, 64513, 64553, 64567, 64577, 64579, 64591, 64601, 64609, 64613, 64621, 64627, 64633, 64661, 64663, 64667, 64679, 64693, 64709, 64717, 64747, 64763, 64781, 64783, 64793, 64811, 64817, 64849, 64853, 64871, 64877, 64879, 64891, 64901, 64919, 64921, 64927, 64937, 64951, 64969, 64997, 65003, 65011, 65027, 65029, 65033, 65053, 65063, 65071, 65089, 65099, 65101, 65111, 65119, 65123, 65129, 65141, 65147, 65167, 65171, 65173, 65179, 65183, 65203, 65213, 65239, 65257, 65267, 65269, 65287, 65293, 65309, 65323, 65327, 65353, 65357, 65371, 65381, 65393, 65407, 65413, 65419, 65423, 65437, 65447, 65449, 65479, 65497, 65519, 65521, 65537, 65539, 65543, 65551, 65557, 65563, 65579, 65581, 65587, 65599, 65609, 65617, 65629, 65633, 65647, 65651, 65657, 65677, 65687, 65699, 65701, 65707, 65713, 65717, 65719, 65729, 65731, 65761, 65777, 65789, 65809, 65827, 65831, 65837, 65839, 65843, 65851, 65867, 65881, 65899, 65921, 65927, 65929, 65951, 65957, 65963, 65981, 65983, 65993, 66029, 66037, 66041, 66047, 66067, 66071, 66083, 66089, 66103, 66107, 66109, 66137, 66161, 66169, 66173, 66179, 66191, 66221, 66239, 66271, 66293, 66301, 66337, 66343, 66347, 66359, 66361, 66373, 66377, 66383, 66403, 66413, 66431, 66449, 66457, 66463, 66467, 66491, 66499, 66509, 66523, 66529, 66533, 66541, 66553, 66569, 66571, 66587, 66593, 66601, 66617, 66629, 66643, 66653, 66683, 66697, 66701, 66713, 66721, 66733, 66739, 66749, 66751, 66763, 66791, 66797, 66809, 66821, 66841, 66851, 66853, 66863, 66877, 66883, 66889, 66919, 66923, 66931, 66943, 66947, 66949, 66959, 66973, 66977, 67003, 67021, 67033, 67043, 67049, 67057, 67061, 67073, 67079, 67103, 67121, 67129, 67139, 67141, 67153, 67157, 67169, 67181, 67187, 67189, 67211, 67213, 67217, 67219, 67231, 67247, 67261, 67271, 67273, 67289, 67307, 67339, 67343, 67349, 67369, 67391, 67399, 67409, 67411, 67421, 67427, 67429, 67433, 67447, 67453, 67477, 67481, 67489, 67493, 67499, 67511, 67523, 67531, 67537, 67547, 67559, 67567, 67577, 67579, 67589, 67601, 67607, 67619, 67631, 67651, 67679, 67699, 67709, 67723, 67733, 67741, 67751, 67757, 67759, 67763, 67777, 67783, 67789, 67801, 67807, 67819, 67829, 67843, 67853, 67867, 67883, 67891, 67901, 67927, 67931, 67933, 67939, 67943, 67957, 67961, 67967, 67979, 67987, 67993, 68023, 68041, 68053, 68059, 68071, 68087, 68099, 68111, 68113, 68141, 68147, 68161, 68171, 68207, 68209, 68213, 68219, 68227, 68239, 68261, 68279, 68281, 68311, 68329, 68351, 68371, 68389, 68399, 68437, 68443, 68447, 68449, 68473, 68477, 68483, 68489, 68491, 68501, 68507, 68521, 68531, 68539, 68543, 68567, 68581, 68597, 68611, 68633, 68639, 68659, 68669, 68683, 68687, 68699, 68711, 68713, 68729, 68737, 68743, 68749, 68767, 68771, 68777, 68791, 68813, 68819, 68821, 68863, 68879, 68881, 68891, 68897, 68899, 68903, 68909, 68917, 68927, 68947, 68963, 68993, 69001, 69011, 69019, 69029, 69031, 69061, 69067, 69073, 69109, 69119, 69127, 69143, 69149, 69151, 69163, 69191, 69193, 69197, 69203, 69221, 69233, 69239, 69247, 69257, 69259, 69263, 69313, 69317, 69337, 69341, 69371, 69379, 69383, 69389, 69401, 69403, 69427, 69431, 69439, 69457, 69463, 69467, 69473, 69481, 69491, 69493, 69497, 69499, 69539, 69557, 69593, 69623, 69653, 69661, 69677, 69691, 69697, 69709, 69737, 69739, 69761, 69763, 69767, 69779, 69809, 69821, 69827, 69829, 69833, 69847, 69857, 69859, 69877, 69899, 69911, 69929, 69931, 69941, 69959, 69991, 69997, 70001, 70003, 70009, 70019, 70039, 70051, 70061, 70067, 70079, 70099, 70111, 70117, 70121, 70123, 70139, 70141, 70157, 70163, 70177, 70181, 70183, 70199, 70201, 70207, 70223, 70229, 70237, 70241, 70249, 70271, 70289, 70297, 70309, 70313, 70321, 70327, 70351, 70373, 70379, 70381, 70393, 70423, 70429, 70439, 70451, 70457, 70459, 70481, 70487, 70489, 70501, 70507, 70529, 70537, 70549, 70571, 70573, 70583, 70589, 70607, 70619, 70621, 70627, 70639, 70657, 70663, 70667, 70687, 70709, 70717, 70729, 70753, 70769, 70783, 70793, 70823, 70841, 70843, 70849, 70853, 70867, 70877, 70879, 70891, 70901, 70913, 70919, 70921, 70937, 70949, 70951, 70957, 70969, 70979, 70981, 70991, 70997, 70999, 71011, 71023, 71039, 71059, 71069, 71081, 71089, 71119, 71129, 71143, 71147, 71153, 71161, 71167, 71171, 71191, 71209, 71233, 71237, 71249, 71257, 71261, 71263, 71287, 71293, 71317, 71327, 71329, 71333, 71339, 71341, 71347, 71353, 71359, 71363, 71387, 71389, 71399, 71411, 71413, 71419, 71429, 71437, 71443, 71453, 71471, 71473, 71479, 71483, 71503, 71527, 71537, 71549, 71551, 71563, 71569, 71593, 71597, 71633, 71647, 71663, 71671, 71693, 71699, 71707, 71711, 71713, 71719, 71741, 71761, 71777, 71789, 71807, 71809, 71821, 71837, 71843, 71849, 71861, 71867, 71879, 71881, 71887, 71899, 71909, 71917, 71933, 71941, 71947, 71963, 71971, 71983, 71987, 71993, 71999, 72019, 72031, 72043, 72047, 72053, 72073, 72077, 72089, 72091, 72101, 72103, 72109, 72139, 72161, 72167, 72169, 72173, 72211, 72221, 72223, 72227, 72229, 72251, 72253, 72269, 72271, 72277, 72287, 72307, 72313, 72337, 72341, 72353, 72367, 72379, 72383, 72421, 72431, 72461, 72467, 72469, 72481, 72493, 72497, 72503, 72533, 72547, 72551, 72559, 72577, 72613, 72617, 72623, 72643, 72647, 72649, 72661, 72671, 72673, 72679, 72689, 72701, 72707, 72719, 72727, 72733, 72739, 72763, 72767, 72797, 72817, 72823, 72859, 72869, 72871, 72883, 72889, 72893, 72901, 72907, 72911, 72923, 72931, 72937, 72949, 72953, 72959, 72973, 72977, 72997, 73009, 73013, 73019, 73037, 73039, 73043, 73061, 73063, 73079, 73091, 73121, 73127, 73133, 73141, 73181, 73189, 73237, 73243, 73259, 73277, 73291, 73303, 73309, 73327, 73331, 73351, 73361, 73363, 73369, 73379, 73387, 73417, 73421, 73433, 73453, 73459, 73471, 73477, 73483, 73517, 73523, 73529, 73547, 73553, 73561, 73571, 73583, 73589, 73597, 73607, 73609, 73613, 73637, 73643, 73651, 73673, 73679, 73681, 73693, 73699, 73709, 73721, 73727, 73751, 73757, 73771, 73783, 73819, 73823, 73847, 73849, 73859, 73867, 73877, 73883, 73897, 73907, 73939, 73943, 73951, 73961, 73973, 73999, 74017, 74021, 74027, 74047, 74051, 74071, 74077, 74093, 74099, 74101, 74131, 74143, 74149, 74159, 74161, 74167, 74177, 74189, 74197, 74201, 74203, 74209, 74219, 74231, 74257, 74279, 74287, 74293, 74297, 74311, 74317, 74323, 74353, 74357, 74363, 74377, 74381, 74383, 74411, 74413, 74419, 74441, 74449, 74453, 74471, 74489, 74507, 74509, 74521, 74527, 74531, 74551, 74561, 74567, 74573, 74587, 74597, 74609, 74611, 74623, 74653, 74687, 74699, 74707, 74713, 74717, 74719, 74729, 74731, 74747, 74759, 74761, 74771, 74779, 74797, 74821, 74827, 74831, 74843, 74857, 74861, 74869, 74873, 74887, 74891, 74897, 74903, 74923, 74929, 74933, 74941, 74959, 75011, 75013, 75017, 75029, 75037, 75041, 75079, 75083, 75109, 75133, 75149, 75161, 75167, 75169, 75181, 75193, 75209, 75211, 75217, 75223, 75227, 75239, 75253, 75269, 75277, 75289, 75307, 75323, 75329, 75337, 75347, 75353, 75367, 75377, 75389, 75391, 75401, 75403, 75407, 75431, 75437, 75479, 75503, 75511, 75521, 75527, 75533, 75539, 75541, 75553, 75557, 75571, 75577, 75583, 75611, 75617, 75619, 75629, 75641, 75653, 75659, 75679, 75683, 75689, 75703, 75707, 75709, 75721, 75731, 75743, 75767, 75773, 75781, 75787, 75793, 75797, 75821, 75833, 75853, 75869, 75883, 75913, 75931, 75937, 75941, 75967, 75979, 75983, 75989, 75991, 75997, 76001, 76003, 76031, 76039, 76079, 76081, 76091, 76099, 76103, 76123, 76129, 76147, 76157, 76159, 76163, 76207, 76213, 76231, 76243, 76249, 76253, 76259, 76261, 76283, 76289, 76303, 76333, 76343, 76367, 76369, 76379, 76387, 76403, 76421, 76423, 76441, 76463, 76471, 76481, 76487, 76493, 76507, 76511, 76519, 76537, 76541, 76543, 76561, 76579, 76597, 76603, 76607, 76631, 76649, 76651, 76667, 76673, 76679, 76697, 76717, 76733, 76753, 76757, 76771, 76777, 76781, 76801, 76819, 76829, 76831, 76837, 76847, 76871, 76873, 76883, 76907, 76913, 76919, 76943, 76949, 76961, 76963, 76991, 77003, 77017, 77023, 77029, 77041, 77047, 77069, 77081, 77093, 77101, 77137, 77141, 77153, 77167, 77171, 77191, 77201, 77213, 77237, 77239, 77243, 77249, 77261, 77263, 77267, 77269, 77279, 77291, 77317, 77323, 77339, 77347, 77351, 77359, 77369, 77377, 77383, 77417, 77419, 77431, 77447, 77471, 77477, 77479, 77489, 77491, 77509, 77513, 77521, 77527, 77543, 77549, 77551, 77557, 77563, 77569, 77573, 77587, 77591, 77611, 77617, 77621, 77641, 77647, 77659, 77681, 77687, 77689, 77699, 77711, 77713, 77719, 77723, 77731, 77743, 77747, 77761, 77773, 77783, 77797, 77801, 77813, 77839, 77849, 77863, 77867, 77893, 77899, 77929, 77933, 77951, 77969, 77977, 77983, 77999, 78007, 78017, 78031, 78041, 78049, 78059, 78079, 78101, 78121, 78137, 78139, 78157, 78163, 78167, 78173, 78179, 78191, 78193, 78203, 78229, 78233, 78241, 78259, 78277, 78283, 78301, 78307, 78311, 78317, 78341, 78347, 78367, 78401, 78427, 78437, 78439, 78467, 78479, 78487, 78497, 78509, 78511, 78517, 78539, 78541, 78553, 78569, 78571, 78577, 78583, 78593, 78607, 78623, 78643, 78649, 78653, 78691, 78697, 78707, 78713, 78721, 78737, 78779, 78781, 78787, 78791, 78797, 78803, 78809, 78823, 78839, 78853, 78857, 78877, 78887, 78889, 78893, 78901, 78919, 78929, 78941, 78977, 78979, 78989, 79031, 79039, 79043, 79063, 79087, 79103, 79111, 79133, 79139, 79147, 79151, 79153, 79159, 79181, 79187, 79193, 79201, 79229, 79231, 79241, 79259, 79273, 79279, 79283, 79301, 79309, 79319, 79333, 79337, 79349, 79357, 79367, 79379, 79393, 79397, 79399, 79411, 79423, 79427, 79433, 79451, 79481, 79493, 79531, 79537, 79549, 79559, 79561, 79579, 79589, 79601, 79609, 79613, 79621, 79627, 79631, 79633, 79657, 79669, 79687, 79691, 79693, 79697, 79699, 79757, 79769, 79777, 79801, 79811, 79813, 79817, 79823, 79829, 79841, 79843, 79847, 79861, 79867, 79873, 79889, 79901, 79903, 79907, 79939, 79943, 79967, 79973, 79979, 79987, 79997, 79999, 80021, 80039, 80051, 80071, 80077, 80107, 80111, 80141, 80147, 80149, 80153, 80167, 80173, 80177, 80191, 80207, 80209, 80221, 80231, 80233, 80239, 80251, 80263, 80273, 80279, 80287, 80309, 80317, 80329, 80341, 80347, 80363, 80369, 80387, 80407, 80429, 80447, 80449, 80471, 80473, 80489, 80491, 80513, 80527, 80537, 80557, 80567, 80599, 80603, 80611, 80621, 80627, 80629, 80651, 80657, 80669, 80671, 80677, 80681, 80683, 80687, 80701, 80713, 80737, 80747, 80749, 80761, 80777, 80779, 80783, 80789, 80803, 80809, 80819, 80831, 80833, 80849, 80863, 80897, 80909, 80911, 80917, 80923, 80929, 80933, 80953, 80963, 80989, 81001, 81013, 81017, 81019, 81023, 81031, 81041, 81043, 81047, 81049, 81071, 81077, 81083, 81097, 81101, 81119, 81131, 81157, 81163, 81173, 81181, 81197, 81199, 81203, 81223, 81233, 81239, 81281, 81283, 81293, 81299, 81307, 81331, 81343, 81349, 81353, 81359, 81371, 81373, 81401, 81409, 81421, 81439, 81457, 81463, 81509, 81517, 81527, 81533, 81547, 81551, 81553, 81559, 81563, 81569, 81611, 81619, 81629, 81637, 81647, 81649, 81667, 81671, 81677, 81689, 81701, 81703, 81707, 81727, 81737, 81749, 81761, 81769, 81773, 81799, 81817, 81839, 81847, 81853, 81869, 81883, 81899, 81901, 81919, 81929, 81931, 81937, 81943, 81953, 81967, 81971, 81973, 82003, 82007, 82009, 82013, 82021, 82031, 82037, 82039, 82051, 82067, 82073, 82129, 82139, 82141, 82153, 82163, 82171, 82183, 82189, 82193, 82207, 82217, 82219, 82223, 82231, 82237, 82241, 82261, 82267, 82279, 82301, 82307, 82339, 82349, 82351, 82361, 82373, 82387, 82393, 82421, 82457, 82463, 82469, 82471, 82483, 82487, 82493, 82499, 82507, 82529, 82531, 82549, 82559, 82561, 82567, 82571, 82591, 82601, 82609, 82613, 82619, 82633, 82651, 82657, 82699, 82721, 82723, 82727, 82729, 82757, 82759, 82763, 82781, 82787, 82793, 82799, 82811, 82813, 82837, 82847, 82883, 82889, 82891, 82903, 82913, 82939, 82963, 82981, 82997, 83003, 83009, 83023, 83047, 83059, 83063, 83071, 83077, 83089, 83093, 83101, 83117, 83137, 83177, 83203, 83207, 83219, 83221, 83227, 83231, 83233, 83243, 83257, 83267, 83269, 83273, 83299, 83311, 83339, 83341, 83357, 83383, 83389, 83399, 83401, 83407, 83417, 83423, 83431, 83437, 83443, 83449, 83459, 83471, 83477, 83497, 83537, 83557, 83561, 83563, 83579, 83591, 83597, 83609, 83617, 83621, 83639, 83641, 83653, 83663, 83689, 83701, 83717, 83719, 83737, 83761, 83773, 83777, 83791, 83813, 83833, 83843, 83857, 83869, 83873, 83891, 83903, 83911, 83921, 83933, 83939, 83969, 83983, 83987, 84011, 84017, 84047, 84053, 84059, 84061, 84067, 84089, 84121, 84127, 84131, 84137, 84143, 84163, 84179, 84181, 84191, 84199, 84211, 84221, 84223, 84229, 84239, 84247, 84263, 84299, 84307, 84313, 84317, 84319, 84347, 84349, 84377, 84389, 84391, 84401, 84407, 84421, 84431, 84437, 84443, 84449, 84457, 84463, 84467, 84481, 84499, 84503, 84509, 84521, 84523, 84533, 84551, 84559, 84589, 84629, 84631, 84649, 84653, 84659, 84673, 84691, 84697, 84701, 84713, 84719, 84731, 84737, 84751, 84761, 84787, 84793, 84809, 84811, 84827, 84857, 84859, 84869, 84871, 84913, 84919, 84947, 84961, 84967, 84977, 84979, 84991, 85009, 85021, 85027, 85037, 85049, 85061, 85081, 85087, 85091, 85093, 85103, 85109, 85121, 85133, 85147, 85159, 85193, 85199, 85201, 85213, 85223, 85229, 85237, 85243, 85247, 85259, 85297, 85303, 85313, 85331, 85333, 85361, 85363, 85369, 85381, 85411, 85427, 85429, 85439, 85447, 85451, 85453, 85469, 85487, 85513, 85517, 85523, 85531, 85549, 85571, 85577, 85597, 85601, 85607, 85619, 85621, 85627, 85639, 85643, 85661, 85667, 85669, 85691, 85703, 85711, 85717, 85733, 85751, 85781, 85793, 85817, 85819, 85829, 85831, 85837, 85843, 85847, 85853, 85889, 85903, 85909, 85931, 85933, 85991, 85999, 86011, 86017, 86027, 86029, 86069, 86077, 86083, 86111, 86113, 86117, 86131, 86137, 86143, 86161, 86171, 86179, 86183, 86197, 86201, 86209, 86239, 86243, 86249, 86257, 86263, 86269, 86287, 86291, 86293, 86297, 86311, 86323, 86341, 86351, 86353, 86357, 86369, 86371, 86381, 86389, 86399, 86413, 86423, 86441, 86453, 86461, 86467, 86477, 86491, 86501, 86509, 86531, 86533, 86539, 86561, 86573, 86579, 86587, 86599, 86627, 86629, 86677, 86689, 86693, 86711, 86719, 86729, 86743, 86753, 86767, 86771, 86783, 86813, 86837, 86843, 86851, 86857, 86861, 86869, 86923, 86927, 86929, 86939, 86951, 86959, 86969, 86981, 86993, 87011, 87013, 87037, 87041, 87049, 87071, 87083, 87103, 87107, 87119, 87121, 87133, 87149, 87151, 87179, 87181, 87187, 87211, 87221, 87223, 87251, 87253, 87257, 87277, 87281, 87293, 87299, 87313, 87317, 87323, 87337, 87359, 87383, 87403, 87407, 87421, 87427, 87433, 87443, 87473, 87481, 87491, 87509, 87511, 87517, 87523, 87539, 87541, 87547, 87553, 87557, 87559, 87583, 87587, 87589, 87613, 87623, 87629, 87631, 87641, 87643, 87649, 87671, 87679, 87683, 87691, 87697, 87701, 87719, 87721, 87739, 87743, 87751, 87767, 87793, 87797, 87803, 87811, 87833, 87853, 87869, 87877, 87881, 87887, 87911, 87917, 87931, 87943, 87959, 87961, 87973, 87977, 87991, 88001, 88003, 88007, 88019, 88037, 88069, 88079, 88093, 88117, 88129, 88169, 88177, 88211, 88223, 88237, 88241, 88259, 88261, 88289, 88301, 88321, 88327, 88337, 88339, 88379, 88397, 88411, 88423, 88427, 88463, 88469, 88471, 88493, 88499, 88513, 88523, 88547, 88589, 88591, 88607, 88609, 88643, 88651, 88657, 88661, 88663, 88667, 88681, 88721, 88729, 88741, 88747, 88771, 88789, 88793, 88799, 88801, 88807, 88811, 88813, 88817, 88819, 88843, 88853, 88861, 88867, 88873, 88883, 88897, 88903, 88919, 88937, 88951, 88969, 88993, 88997, 89003, 89009, 89017, 89021, 89041, 89051, 89057, 89069, 89071, 89083, 89087, 89101, 89107, 89113, 89119, 89123, 89137, 89153, 89189, 89203, 89209, 89213, 89227, 89231, 89237, 89261, 89269, 89273, 89293, 89303, 89317, 89329, 89363, 89371, 89381, 89387, 89393, 89399, 89413, 89417, 89431, 89443, 89449, 89459, 89477, 89491, 89501, 89513, 89519, 89521, 89527, 89533, 89561, 89563, 89567, 89591, 89597, 89599, 89603, 89611, 89627, 89633, 89653, 89657, 89659, 89669, 89671, 89681, 89689, 89753, 89759, 89767, 89779, 89783, 89797, 89809, 89819, 89821, 89833, 89839, 89849, 89867, 89891, 89897, 89899, 89909, 89917, 89923, 89939, 89959, 89963, 89977, 89983, 89989, 90001, 90007, 90011, 90017, 90019, 90023, 90031, 90053, 90059, 90067, 90071, 90073, 90089, 90107, 90121, 90127, 90149, 90163, 90173, 90187, 90191, 90197, 90199, 90203, 90217, 90227, 90239, 90247, 90263, 90271, 90281, 90289, 90313, 90353, 90359, 90371, 90373, 90379, 90397, 90401, 90403, 90407, 90437, 90439, 90469, 90473, 90481, 90499, 90511, 90523, 90527, 90529, 90533, 90547, 90583, 90599, 90617, 90619, 90631, 90641, 90647, 90659, 90677, 90679, 90697, 90703, 90709, 90731, 90749, 90787, 90793, 90803, 90821, 90823, 90833, 90841, 90847, 90863, 90887, 90901, 90907, 90911, 90917, 90931, 90947, 90971, 90977, 90989, 90997, 91009, 91019, 91033, 91079, 91081, 91097, 91099, 91121, 91127, 91129, 91139, 91141, 91151, 91153, 91159, 91163, 91183, 91193, 91199, 91229, 91237, 91243, 91249, 91253, 91283, 91291, 91297, 91303, 91309, 91331, 91367, 91369, 91373, 91381, 91387, 91393, 91397, 91411, 91423, 91433, 91453, 91457, 91459, 91463, 91493, 91499, 91513, 91529, 91541, 91571, 91573, 91577, 91583, 91591, 91621, 91631, 91639, 91673, 91691, 91703, 91711, 91733, 91753, 91757, 91771, 91781, 91801, 91807, 91811, 91813, 91823, 91837, 91841, 91867, 91873, 91909, 91921, 91939, 91943, 91951, 91957, 91961, 91967, 91969, 91997, 92003, 92009, 92033, 92041, 92051, 92077, 92083, 92107, 92111, 92119, 92143, 92153, 92173, 92177, 92179, 92189, 92203, 92219, 92221, 92227, 92233, 92237, 92243, 92251, 92269, 92297, 92311, 92317, 92333, 92347, 92353, 92357, 92363, 92369, 92377, 92381, 92383, 92387, 92399, 92401, 92413, 92419, 92431, 92459, 92461, 92467, 92479, 92489, 92503, 92507, 92551, 92557, 92567, 92569, 92581, 92593, 92623, 92627, 92639, 92641, 92647, 92657, 92669, 92671, 92681, 92683, 92693, 92699, 92707, 92717, 92723, 92737, 92753, 92761, 92767, 92779, 92789, 92791, 92801, 92809, 92821, 92831, 92849, 92857, 92861, 92863, 92867, 92893, 92899, 92921, 92927, 92941, 92951, 92957, 92959, 92987, 92993, 93001, 93047, 93053, 93059, 93077, 93083, 93089, 93097, 93103, 93113, 93131, 93133, 93139, 93151, 93169, 93179, 93187, 93199, 93229, 93239, 93241, 93251, 93253, 93257, 93263, 93281, 93283, 93287, 93307, 93319, 93323, 93329, 93337, 93371, 93377, 93383, 93407, 93419, 93427, 93463, 93479, 93481, 93487, 93491, 93493, 93497, 93503, 93523, 93529, 93553, 93557, 93559, 93563, 93581, 93601, 93607, 93629, 93637, 93683, 93701, 93703, 93719, 93739, 93761, 93763, 93787, 93809, 93811, 93827, 93851, 93871, 93887, 93889, 93893, 93901, 93911, 93913, 93923, 93937, 93941, 93949, 93967, 93971, 93979, 93983, 93997, 94007, 94009, 94033, 94049, 94057, 94063, 94079, 94099, 94109, 94111, 94117, 94121, 94151, 94153, 94169, 94201, 94207, 94219, 94229, 94253, 94261, 94273, 94291, 94307, 94309, 94321, 94327, 94331, 94343, 94349, 94351, 94379, 94397, 94399, 94421, 94427, 94433, 94439, 94441, 94447, 94463, 94477, 94483, 94513, 94529, 94531, 94541, 94543, 94547, 94559, 94561, 94573, 94583, 94597, 94603, 94613, 94621, 94649, 94651, 94687, 94693, 94709, 94723, 94727, 94747, 94771, 94777, 94781, 94789, 94793, 94811, 94819, 94823, 94837, 94841, 94847, 94849, 94873, 94889, 94903, 94907, 94933, 94949, 94951, 94961, 94993, 94999, 95003, 95009, 95021, 95027, 95063, 95071, 95083, 95087, 95089, 95093, 95101, 95107, 95111, 95131, 95143, 95153, 95177, 95189, 95191, 95203, 95213, 95219, 95231, 95233, 95239, 95257, 95261, 95267, 95273, 95279, 95287, 95311, 95317, 95327, 95339, 95369, 95383, 95393, 95401, 95413, 95419, 95429, 95441, 95443, 95461, 95467, 95471, 95479, 95483, 95507, 95527, 95531, 95539, 95549, 95561, 95569, 95581, 95597, 95603, 95617, 95621, 95629, 95633, 95651, 95701, 95707, 95713, 95717, 95723, 95731, 95737, 95747, 95773, 95783, 95789, 95791, 95801, 95803, 95813, 95819, 95857, 95869, 95873, 95881, 95891, 95911, 95917, 95923, 95929, 95947, 95957, 95959, 95971, 95987, 95989, 96001, 96013, 96017, 96043, 96053, 96059, 96079, 96097, 96137, 96149, 96157, 96167, 96179, 96181, 96199, 96211, 96221, 96223, 96233, 96259, 96263, 96269, 96281, 96289, 96293, 96323, 96329, 96331, 96337, 96353, 96377, 96401, 96419, 96431, 96443, 96451, 96457, 96461, 96469, 96479, 96487, 96493, 96497, 96517, 96527, 96553, 96557, 96581, 96587, 96589, 96601, 96643, 96661, 96667, 96671, 96697, 96703, 96731, 96737, 96739, 96749, 96757, 96763, 96769, 96779, 96787, 96797, 96799, 96821, 96823, 96827, 96847, 96851, 96857, 96893, 96907, 96911, 96931, 96953, 96959, 96973, 96979, 96989, 96997, 97001, 97003, 97007, 97021, 97039, 97073, 97081, 97103, 97117, 97127, 97151, 97157, 97159, 97169, 97171, 97177, 97187, 97213, 97231, 97241, 97259, 97283, 97301, 97303, 97327, 97367, 97369, 97373, 97379, 97381, 97387, 97397, 97423, 97429, 97441, 97453, 97459, 97463, 97499, 97501, 97511, 97523, 97547, 97549, 97553, 97561, 97571, 97577, 97579, 97583, 97607, 97609, 97613, 97649, 97651, 97673, 97687, 97711, 97729, 97771, 97777, 97787, 97789, 97813, 97829, 97841, 97843, 97847, 97849, 97859, 97861, 97871, 97879, 97883, 97919, 97927, 97931, 97943, 97961, 97967, 97973, 97987, 98009, 98011, 98017, 98041, 98047, 98057, 98081, 98101, 98123, 98129, 98143, 98179, 98207, 98213, 98221, 98227, 98251, 98257, 98269, 98297, 98299, 98317, 98321, 98323, 98327, 98347, 98369, 98377, 98387, 98389, 98407, 98411, 98419, 98429, 98443, 98453, 98459, 98467, 98473, 98479, 98491, 98507, 98519, 98533, 98543, 98561, 98563, 98573, 98597, 98621, 98627, 98639, 98641, 98663, 98669, 98689, 98711, 98713, 98717, 98729, 98731, 98737, 98773, 98779, 98801, 98807, 98809, 98837, 98849, 98867, 98869, 98873, 98887, 98893, 98897, 98899, 98909, 98911, 98927, 98929, 98939, 98947, 98953, 98963, 98981, 98993, 98999, 99013, 99017, 99023, 99041, 99053, 99079, 99083, 99089, 99103, 99109, 99119, 99131, 99133, 99137, 99139, 99149, 99173, 99181, 99191, 99223, 99233, 99241, 99251, 99257, 99259, 99277, 99289, 99317, 99347, 99349, 99367, 99371, 99377, 99391, 99397, 99401, 99409, 99431, 99439, 99469, 99487, 99497, 99523, 99527, 99529, 99551, 99559, 99563, 99571, 99577, 99581, 99607, 99611, 99623, 99643, 99661, 99667, 99679, 99689, 99707, 99709, 99713, 99719, 99721, 99733, 99761, 99767, 99787, 99793, 99809, 99817, 99823, 99829, 99833, 99839, 99859, 99871, 99877, 99881, 99901, 99907, 99923, 99929, 99961, 99971, 99989, 99991, 100003, 100019, 100043, 100049, 100057, 100069, 100103, 100109, 100129, 100151, 100153, 100169, 100183, 100189, 100193, 100207, 100213, 100237, 100267, 100271, 100279, 100291, 100297, 100313, 100333, 100343, 100357, 100361, 100363, 100379, 100391, 100393, 100403, 100411, 100417, 100447, 100459, 100469, 100483, 100493, 100501, 100511, 100517, 100519, 100523, 100537, 100547, 100549, 100559, 100591, 100609, 100613, 100621, 100649, 100669, 100673, 100693, 100699, 100703, 100733, 100741, 100747, 100769, 100787, 100799, 100801, 100811, 100823, 100829, 100847, 100853, 100907, 100913, 100927, 100931, 100937, 100943, 100957, 100981, 100987, 100999, 101009, 101021, 101027, 101051, 101063, 101081, 101089, 101107, 101111, 101113, 101117, 101119, 101141, 101149, 101159, 101161, 101173, 101183, 101197, 101203, 101207, 101209, 101221, 101267, 101273, 101279, 101281, 101287, 101293, 101323, 101333, 101341, 101347, 101359, 101363, 101377, 101383, 101399, 101411, 101419, 101429, 101449, 101467, 101477, 101483, 101489, 101501, 101503, 101513, 101527, 101531, 101533, 101537, 101561, 101573, 101581, 101599, 101603, 101611, 101627, 101641, 101653, 101663, 101681, 101693, 101701, 101719, 101723, 101737, 101741, 101747, 101749, 101771, 101789, 101797, 101807, 101833, 101837, 101839, 101863, 101869, 101873, 101879, 101891, 101917, 101921, 101929, 101939, 101957, 101963, 101977, 101987, 101999, 102001, 102013, 102019, 102023, 102031, 102043, 102059, 102061, 102071, 102077, 102079, 102101, 102103, 102107, 102121, 102139, 102149, 102161, 102181, 102191, 102197, 102199, 102203, 102217, 102229, 102233, 102241, 102251, 102253, 102259, 102293, 102299, 102301, 102317, 102329, 102337, 102359, 102367, 102397, 102407, 102409, 102433, 102437, 102451, 102461, 102481, 102497, 102499, 102503, 102523, 102533, 102539, 102547, 102551, 102559, 102563, 102587, 102593, 102607, 102611, 102643, 102647, 102653, 102667, 102673, 102677, 102679, 102701, 102761, 102763, 102769, 102793, 102797, 102811, 102829, 102841, 102859, 102871, 102877, 102881, 102911, 102913, 102929, 102931, 102953, 102967, 102983, 103001, 103007, 103043, 103049, 103067, 103069, 103079, 103087, 103091, 103093, 103099, 103123, 103141, 103171, 103177, 103183, 103217, 103231, 103237, 103289, 103291, 103307, 103319, 103333, 103349, 103357, 103387, 103391, 103393, 103399, 103409, 103421, 103423, 103451, 103457, 103471, 103483, 103511, 103529, 103549, 103553, 103561, 103567, 103573, 103577, 103583, 103591, 103613, 103619, 103643, 103651, 103657, 103669, 103681, 103687, 103699, 103703, 103723, 103769, 103787, 103801, 103811, 103813, 103837, 103841, 103843, 103867, 103889, 103903, 103913, 103919, 103951, 103963, 103967, 103969, 103979, 103981, 103991, 103993, 103997, 104003, 104009, 104021, 104033, 104047, 104053, 104059, 104087, 104089, 104107, 104113, 104119, 104123, 104147, 104149, 104161, 104173, 104179, 104183, 104207, 104231, 104233, 104239, 104243, 104281, 104287, 104297, 104309, 104311, 104323, 104327, 104347, 104369, 104381, 104383, 104393, 104399, 104417, 104459, 104471, 104473, 104479, 104491, 104513, 104527, 104537, 104543, 104549, 104551, 104561, 104579, 104593, 104597, 104623, 104639, 104651, 104659, 104677, 104681, 104683, 104693, 104701, 104707, 104711, 104717, 104723, 104729, )
mit
waseem18/oh-mainline
vendor/packages/twisted/twisted/mail/test/test_options.py
17
7839
# Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Tests for L{twisted.mail.tap}. """ from twisted.trial.unittest import TestCase from twisted.python.usage import UsageError from twisted.mail import protocols from twisted.mail.tap import Options, makeService from twisted.python import deprecate, versions from twisted.python.filepath import FilePath from twisted.internet import endpoints, defer class OptionsTestCase(TestCase): """ Tests for the command line option parser used for I{twistd mail}. """ def setUp(self): self.aliasFilename = self.mktemp() aliasFile = file(self.aliasFilename, 'w') aliasFile.write('someuser:\tdifferentuser\n') aliasFile.close() def testAliasesWithoutDomain(self): """ Test that adding an aliases(5) file before adding a domain raises a UsageError. """ self.assertRaises( UsageError, Options().parseOptions, ['--aliases', self.aliasFilename]) def testAliases(self): """ Test that adding an aliases(5) file to an IAliasableDomain at least doesn't raise an unhandled exception. """ Options().parseOptions([ '--maildirdbmdomain', 'example.com=example.com', '--aliases', self.aliasFilename]) def testPasswordfileDeprecation(self): """ Test that the --passwordfile option will emit a correct warning. """ passwd = FilePath(self.mktemp()) passwd.setContent("") options = Options() options.opt_passwordfile(passwd.path) warnings = self.flushWarnings([self.testPasswordfileDeprecation]) self.assertEquals(warnings[0]['category'], DeprecationWarning) self.assertEquals(len(warnings), 1) msg = deprecate.getDeprecationWarningString(options.opt_passwordfile, versions.Version('twisted.mail', 11, 0, 0)) self.assertEquals(warnings[0]['message'], msg) def test_barePort(self): """ A bare port passed to I{--pop3} results in deprecation warning in addition to a TCP4ServerEndpoint. """ options = Options() options.parseOptions(['--pop3', '8110']) self.assertEquals(len(options['pop3']), 1) self.assertIsInstance( options['pop3'][0], endpoints.TCP4ServerEndpoint) warnings = self.flushWarnings([options.opt_pop3]) self.assertEquals(len(warnings), 1) self.assertEquals(warnings[0]['category'], DeprecationWarning) self.assertEquals( warnings[0]['message'], "Specifying plain ports and/or a certificate is deprecated since " "Twisted 11.0; use endpoint descriptions instead.") def _endpointTest(self, service): """ Use L{Options} to parse a single service configuration parameter and verify that an endpoint of the correct type is added to the list for that service. """ options = Options() options.parseOptions(['--' + service, 'tcp:1234']) self.assertEquals(len(options[service]), 1) self.assertIsInstance( options[service][0], endpoints.TCP4ServerEndpoint) def test_endpointSMTP(self): """ When I{--smtp} is given a TCP endpoint description as an argument, a TCPServerEndpoint is added to the list of SMTP endpoints. """ self._endpointTest('smtp') def test_endpointPOP3(self): """ When I{--pop3} is given a TCP endpoint description as an argument, a TCPServerEndpoint is added to the list of POP3 endpoints. """ self._endpointTest('pop3') def test_protoDefaults(self): """ POP3 and SMTP each listen on a TCP4ServerEndpoint by default. """ options = Options() options.parseOptions([]) self.assertEquals(len(options['pop3']), 1) self.assertIsInstance( options['pop3'][0], endpoints.TCP4ServerEndpoint) self.assertEquals(len(options['smtp']), 1) self.assertIsInstance( options['smtp'][0], endpoints.TCP4ServerEndpoint) def test_protoDisable(self): """ The I{--no-pop3} and I{--no-smtp} options disable POP3 and SMTP respectively. """ options = Options() options.parseOptions(['--no-pop3']) self.assertEquals(options._getEndpoints(None, 'pop3'), []) self.assertNotEquals(options._getEndpoints(None, 'smtp'), []) options = Options() options.parseOptions(['--no-smtp']) self.assertNotEquals(options._getEndpoints(None, 'pop3'), []) self.assertEquals(options._getEndpoints(None, 'smtp'), []) def test_allProtosDisabledError(self): """ If all protocols are disabled, L{UsageError} is raised. """ options = Options() self.assertRaises( UsageError, options.parseOptions, (['--no-pop3', '--no-smtp'])) def test_pop3sBackwardCompatibility(self): """ The deprecated I{--pop3s} and I{--certificate} options set up a POP3 SSL server. """ cert = FilePath(self.mktemp()) cert.setContent("") options = Options() options.parseOptions(['--pop3s', '8995', '--certificate', cert.path]) self.assertEquals(len(options['pop3']), 2) self.assertIsInstance( options['pop3'][0], endpoints.SSL4ServerEndpoint) self.assertIsInstance( options['pop3'][1], endpoints.TCP4ServerEndpoint) warnings = self.flushWarnings([options.postOptions]) self.assertEquals(len(warnings), 1) self.assertEquals(warnings[0]['category'], DeprecationWarning) self.assertEquals( warnings[0]['message'], "Specifying plain ports and/or a certificate is deprecated since " "Twisted 11.0; use endpoint descriptions instead.") class SpyEndpoint(object): """ SpyEndpoint remembers what factory it is told to listen with. """ listeningWith = None def listen(self, factory): self.listeningWith = factory return defer.succeed(None) class MakeServiceTests(TestCase): """ Tests for L{twisted.mail.tap.makeService} """ def _endpointServerTest(self, key, factoryClass): """ Configure a service with two endpoints for the protocol associated with C{key} and verify that when the service is started a factory of type C{factoryClass} is used to listen on each of them. """ cleartext = SpyEndpoint() secure = SpyEndpoint() config = Options() config[key] = [cleartext, secure] service = makeService(config) service.privilegedStartService() service.startService() self.addCleanup(service.stopService) self.assertIsInstance(cleartext.listeningWith, factoryClass) self.assertIsInstance(secure.listeningWith, factoryClass) def test_pop3(self): """ If one or more endpoints is included in the configuration passed to L{makeService} for the C{"pop3"} key, a service for starting a POP3 server is constructed for each of them and attached to the returned service. """ self._endpointServerTest("pop3", protocols.POP3Factory) def test_smtp(self): """ If one or more endpoints is included in the configuration passed to L{makeService} for the C{"smtp"} key, a service for starting an SMTP server is constructed for each of them and attached to the returned service. """ self._endpointServerTest("smtp", protocols.SMTPFactory)
agpl-3.0
yeraydiazdiaz/nonrel-blog
django/contrib/gis/db/backends/spatialite/operations.py
78
14881
import re from decimal import Decimal from django.contrib.gis.db.backends.base import BaseSpatialOperations from django.contrib.gis.db.backends.util import SpatialOperation, SpatialFunction from django.contrib.gis.db.backends.spatialite.adapter import SpatiaLiteAdapter from django.contrib.gis.geometry.backend import Geometry from django.contrib.gis.measure import Distance from django.core.exceptions import ImproperlyConfigured from django.db.backends.sqlite3.base import DatabaseOperations from django.db.utils import DatabaseError class SpatiaLiteOperator(SpatialOperation): "For SpatiaLite operators (e.g. `&&`, `~`)." def __init__(self, operator): super(SpatiaLiteOperator, self).__init__(operator=operator) class SpatiaLiteFunction(SpatialFunction): "For SpatiaLite function calls." def __init__(self, function, **kwargs): super(SpatiaLiteFunction, self).__init__(function, **kwargs) class SpatiaLiteFunctionParam(SpatiaLiteFunction): "For SpatiaLite functions that take another parameter." sql_template = '%(function)s(%(geo_col)s, %(geometry)s, %%s)' class SpatiaLiteDistance(SpatiaLiteFunction): "For SpatiaLite distance operations." dist_func = 'Distance' sql_template = '%(function)s(%(geo_col)s, %(geometry)s) %(operator)s %%s' def __init__(self, operator): super(SpatiaLiteDistance, self).__init__(self.dist_func, operator=operator) class SpatiaLiteRelate(SpatiaLiteFunctionParam): "For SpatiaLite Relate(<geom>, <pattern>) calls." pattern_regex = re.compile(r'^[012TF\*]{9}$') def __init__(self, pattern): if not self.pattern_regex.match(pattern): raise ValueError('Invalid intersection matrix pattern "%s".' % pattern) super(SpatiaLiteRelate, self).__init__('Relate') # Valid distance types and substitutions dtypes = (Decimal, Distance, float, int, long) def get_dist_ops(operator): "Returns operations for regular distances; spherical distances are not currently supported." return (SpatiaLiteDistance(operator),) class SpatiaLiteOperations(DatabaseOperations, BaseSpatialOperations): compiler_module = 'django.contrib.gis.db.models.sql.compiler' name = 'spatialite' spatialite = True version_regex = re.compile(r'^(?P<major>\d)\.(?P<minor1>\d)\.(?P<minor2>\d+)') valid_aggregates = dict([(k, None) for k in ('Extent', 'Union')]) Adapter = SpatiaLiteAdapter Adaptor = Adapter # Backwards-compatibility alias. area = 'Area' centroid = 'Centroid' contained = 'MbrWithin' difference = 'Difference' distance = 'Distance' envelope = 'Envelope' intersection = 'Intersection' length = 'GLength' # OpenGis defines Length, but this conflicts with an SQLite reserved keyword num_geom = 'NumGeometries' num_points = 'NumPoints' point_on_surface = 'PointOnSurface' scale = 'ScaleCoords' svg = 'AsSVG' sym_difference = 'SymDifference' transform = 'Transform' translate = 'ShiftCoords' union = 'GUnion' # OpenGis defines Union, but this conflicts with an SQLite reserved keyword unionagg = 'GUnion' from_text = 'GeomFromText' from_wkb = 'GeomFromWKB' select = 'AsText(%s)' geometry_functions = { 'equals' : SpatiaLiteFunction('Equals'), 'disjoint' : SpatiaLiteFunction('Disjoint'), 'touches' : SpatiaLiteFunction('Touches'), 'crosses' : SpatiaLiteFunction('Crosses'), 'within' : SpatiaLiteFunction('Within'), 'overlaps' : SpatiaLiteFunction('Overlaps'), 'contains' : SpatiaLiteFunction('Contains'), 'intersects' : SpatiaLiteFunction('Intersects'), 'relate' : (SpatiaLiteRelate, basestring), # Returns true if B's bounding box completely contains A's bounding box. 'contained' : SpatiaLiteFunction('MbrWithin'), # Returns true if A's bounding box completely contains B's bounding box. 'bbcontains' : SpatiaLiteFunction('MbrContains'), # Returns true if A's bounding box overlaps B's bounding box. 'bboverlaps' : SpatiaLiteFunction('MbrOverlaps'), # These are implemented here as synonyms for Equals 'same_as' : SpatiaLiteFunction('Equals'), 'exact' : SpatiaLiteFunction('Equals'), } distance_functions = { 'distance_gt' : (get_dist_ops('>'), dtypes), 'distance_gte' : (get_dist_ops('>='), dtypes), 'distance_lt' : (get_dist_ops('<'), dtypes), 'distance_lte' : (get_dist_ops('<='), dtypes), } geometry_functions.update(distance_functions) def __init__(self, connection): super(DatabaseOperations, self).__init__(connection) # Determine the version of the SpatiaLite library. try: vtup = self.spatialite_version_tuple() version = vtup[1:] if version < (2, 3, 0): raise ImproperlyConfigured('GeoDjango only supports SpatiaLite versions ' '2.3.0 and above') self.spatial_version = version except ImproperlyConfigured: raise except Exception, msg: raise ImproperlyConfigured('Cannot determine the SpatiaLite version for the "%s" ' 'database (error was "%s"). Was the SpatiaLite initialization ' 'SQL loaded on this database?' % (self.connection.settings_dict['NAME'], msg)) # Creating the GIS terms dictionary. gis_terms = ['isnull'] gis_terms += self.geometry_functions.keys() self.gis_terms = dict([(term, None) for term in gis_terms]) if version >= (2, 4, 0): # Spatialite 2.4.0-RC4 added AsGML and AsKML, however both # RC2 (shipped in popular Debian/Ubuntu packages) and RC4 # report version as '2.4.0', so we fall back to feature detection try: self._get_spatialite_func("AsGML(GeomFromText('POINT(1 1)'))") self.gml = 'AsGML' self.kml = 'AsKML' except DatabaseError: # we are using < 2.4.0-RC4 pass def check_aggregate_support(self, aggregate): """ Checks if the given aggregate name is supported (that is, if it's in `self.valid_aggregates`). """ agg_name = aggregate.__class__.__name__ return agg_name in self.valid_aggregates def convert_geom(self, wkt, geo_field): """ Converts geometry WKT returned from a SpatiaLite aggregate. """ if wkt: return Geometry(wkt, geo_field.srid) else: return None def geo_db_type(self, f): """ Returns None because geometry columnas are added via the `AddGeometryColumn` stored procedure on SpatiaLite. """ return None def get_distance(self, f, value, lookup_type): """ Returns the distance parameters for the given geometry field, lookup value, and lookup type. SpatiaLite only supports regular cartesian-based queries (no spheroid/sphere calculations for point geometries like PostGIS). """ if not value: return [] value = value[0] if isinstance(value, Distance): if f.geodetic(self.connection): raise ValueError('SpatiaLite does not support distance queries on ' 'geometry fields with a geodetic coordinate system. ' 'Distance objects; use a numeric value of your ' 'distance in degrees instead.') else: dist_param = getattr(value, Distance.unit_attname(f.units_name(self.connection))) else: dist_param = value return [dist_param] def get_geom_placeholder(self, f, value): """ Provides a proper substitution value for Geometries that are not in the SRID of the field. Specifically, this routine will substitute in the Transform() and GeomFromText() function call(s). """ def transform_value(value, srid): return not (value is None or value.srid == srid) if hasattr(value, 'expression'): if transform_value(value, f.srid): placeholder = '%s(%%s, %s)' % (self.transform, f.srid) else: placeholder = '%s' # No geometry value used for F expression, substitue in # the column name instead. return placeholder % '%s.%s' % tuple(map(self.quote_name, value.cols[value.expression])) else: if transform_value(value, f.srid): # Adding Transform() to the SQL placeholder. return '%s(%s(%%s,%s), %s)' % (self.transform, self.from_text, value.srid, f.srid) else: return '%s(%%s,%s)' % (self.from_text, f.srid) def _get_spatialite_func(self, func): """ Helper routine for calling SpatiaLite functions and returning their result. """ cursor = self.connection._cursor() try: try: cursor.execute('SELECT %s' % func) row = cursor.fetchone() except: # Responsibility of caller to perform error handling. raise finally: cursor.close() return row[0] def geos_version(self): "Returns the version of GEOS used by SpatiaLite as a string." return self._get_spatialite_func('geos_version()') def proj4_version(self): "Returns the version of the PROJ.4 library used by SpatiaLite." return self._get_spatialite_func('proj4_version()') def spatialite_version(self): "Returns the SpatiaLite library version as a string." return self._get_spatialite_func('spatialite_version()') def spatialite_version_tuple(self): """ Returns the SpatiaLite version as a tuple (version string, major, minor, subminor). """ # Getting the SpatiaLite version. try: version = self.spatialite_version() except DatabaseError: # The `spatialite_version` function first appeared in version 2.3.1 # of SpatiaLite, so doing a fallback test for 2.3.0 (which is # used by popular Debian/Ubuntu packages). version = None try: tmp = self._get_spatialite_func("X(GeomFromText('POINT(1 1)'))") if tmp == 1.0: version = '2.3.0' except DatabaseError: pass # If no version string defined, then just re-raise the original # exception. if version is None: raise m = self.version_regex.match(version) if m: major = int(m.group('major')) minor1 = int(m.group('minor1')) minor2 = int(m.group('minor2')) else: raise Exception('Could not parse SpatiaLite version string: %s' % version) return (version, major, minor1, minor2) def spatial_aggregate_sql(self, agg): """ Returns the spatial aggregate SQL template and function for the given Aggregate instance. """ agg_name = agg.__class__.__name__ if not self.check_aggregate_support(agg): raise NotImplementedError('%s spatial aggregate is not implmented for this backend.' % agg_name) agg_name = agg_name.lower() if agg_name == 'union': agg_name += 'agg' sql_template = self.select % '%(function)s(%(field)s)' sql_function = getattr(self, agg_name) return sql_template, sql_function def spatial_lookup_sql(self, lvalue, lookup_type, value, field, qn): """ Returns the SpatiaLite-specific SQL for the given lookup value [a tuple of (alias, column, db_type)], lookup type, lookup value, the model field, and the quoting function. """ alias, col, db_type = lvalue # Getting the quoted field as `geo_col`. geo_col = '%s.%s' % (qn(alias), qn(col)) if lookup_type in self.geometry_functions: # See if a SpatiaLite geometry function matches the lookup type. tmp = self.geometry_functions[lookup_type] # Lookup types that are tuples take tuple arguments, e.g., 'relate' and # distance lookups. if isinstance(tmp, tuple): # First element of tuple is the SpatiaLiteOperation instance, and the # second element is either the type or a tuple of acceptable types # that may passed in as further parameters for the lookup type. op, arg_type = tmp # Ensuring that a tuple _value_ was passed in from the user if not isinstance(value, (tuple, list)): raise ValueError('Tuple required for `%s` lookup type.' % lookup_type) # Geometry is first element of lookup tuple. geom = value[0] # Number of valid tuple parameters depends on the lookup type. if len(value) != 2: raise ValueError('Incorrect number of parameters given for `%s` lookup type.' % lookup_type) # Ensuring the argument type matches what we expect. if not isinstance(value[1], arg_type): raise ValueError('Argument type should be %s, got %s instead.' % (arg_type, type(value[1]))) # For lookup type `relate`, the op instance is not yet created (has # to be instantiated here to check the pattern parameter). if lookup_type == 'relate': op = op(value[1]) elif lookup_type in self.distance_functions: op = op[0] else: op = tmp geom = value # Calling the `as_sql` function on the operation instance. return op.as_sql(geo_col, self.get_geom_placeholder(field, geom)) elif lookup_type == 'isnull': # Handling 'isnull' lookup type return "%s IS %sNULL" % (geo_col, (not value and 'NOT ' or '')) raise TypeError("Got invalid lookup_type: %s" % repr(lookup_type)) # Routines for getting the OGC-compliant models. def geometry_columns(self): from django.contrib.gis.db.backends.spatialite.models import GeometryColumns return GeometryColumns def spatial_ref_sys(self): from django.contrib.gis.db.backends.spatialite.models import SpatialRefSys return SpatialRefSys
bsd-3-clause
tntnatbry/tensorflow
tensorflow/python/framework/graph_util_test.py
25
13057
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for tensorflow.python.client.graph_util.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.core.framework import attr_value_pb2 from tensorflow.core.framework import graph_pb2 from tensorflow.core.framework import node_def_pb2 from tensorflow.python.client import session from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import graph_util from tensorflow.python.framework import importer from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_util from tensorflow.python.ops import gen_state_ops from tensorflow.python.ops import math_ops # pylint: disable=unused-import from tensorflow.python.ops import math_ops as math_ops_lib from tensorflow.python.ops import variables from tensorflow.python.platform import test # Utility device function to use for testing def test_device_func_pin_variable_to_cpu(op): if op.device: return op.device return "/cpu:0" if op.node_def.op in ["Variable", "VariableV2"] else op.device class DeviceFunctionsTest(test.TestCase): def testTwoDeviceFunctions(self): with ops.Graph().as_default() as g: var_0 = gen_state_ops._variable( shape=[1], dtype=dtypes.float32, name="var_0", container="", shared_name="") with g.device(test_device_func_pin_variable_to_cpu): var_1 = gen_state_ops._variable( shape=[1], dtype=dtypes.float32, name="var_1", container="", shared_name="") var_2 = gen_state_ops._variable( shape=[1], dtype=dtypes.float32, name="var_2", container="", shared_name="") var_3 = gen_state_ops._variable( shape=[1], dtype=dtypes.float32, name="var_3", container="", shared_name="") with g.device(test_device_func_pin_variable_to_cpu): var_4 = gen_state_ops._variable( shape=[1], dtype=dtypes.float32, name="var_4", container="", shared_name="") with g.device("/device:GPU:0"): var_5 = gen_state_ops._variable( shape=[1], dtype=dtypes.float32, name="var_5", container="", shared_name="") var_6 = gen_state_ops._variable( shape=[1], dtype=dtypes.float32, name="var_6", container="", shared_name="") self.assertDeviceEqual(var_0.device, None) self.assertDeviceEqual(var_1.device, "/device:CPU:0") self.assertDeviceEqual(var_2.device, None) self.assertDeviceEqual(var_3.device, None) self.assertDeviceEqual(var_4.device, "/device:CPU:0") self.assertDeviceEqual(var_5.device, "/device:GPU:0") self.assertDeviceEqual(var_6.device, "/device:CPU:0") def testNestedDeviceFunctions(self): with ops.Graph().as_default(): var_0 = variables.Variable(0) with ops.device(test_device_func_pin_variable_to_cpu): var_1 = variables.Variable(1) with ops.device(lambda op: "/gpu:0"): var_2 = variables.Variable(2) with ops.device("/gpu:0"): # Implicit merging device function. var_3 = variables.Variable(3) self.assertDeviceEqual(var_0.device, None) self.assertDeviceEqual(var_1.device, "/device:CPU:0") self.assertDeviceEqual(var_2.device, "/device:GPU:0") self.assertDeviceEqual(var_3.device, "/device:GPU:0") def testExplicitDevice(self): with ops.Graph().as_default() as g: const_0 = constant_op.constant(5.0) with g.device("/device:GPU:0"): const_1 = constant_op.constant(5.0) with g.device("/device:GPU:1"): const_2 = constant_op.constant(5.0) with g.device("/device:CPU:0"): const_3 = constant_op.constant(5.0) with g.device("/device:CPU:1"): const_4 = constant_op.constant(5.0) with g.device("/job:ps"): const_5 = constant_op.constant(5.0) self.assertDeviceEqual(const_0.device, None) self.assertDeviceEqual(const_1.device, "/device:GPU:0") self.assertDeviceEqual(const_2.device, "/device:GPU:1") self.assertDeviceEqual(const_3.device, "/device:CPU:0") self.assertDeviceEqual(const_4.device, "/device:CPU:1") self.assertDeviceEqual(const_5.device, "/job:ps") def testDefaultDevice(self): with ops.Graph().as_default() as g, g.device( test_device_func_pin_variable_to_cpu): with g.device("/job:ps"): const_0 = constant_op.constant(5.0) with g.device("/device:GPU:0"): const_1 = constant_op.constant(5.0) with g.device("/device:GPU:1"): const_2 = constant_op.constant(5.0) with g.device("/device:CPU:0"): const_3 = constant_op.constant(5.0) with g.device("/device:CPU:1"): const_4 = constant_op.constant(5.0) with g.device("/replica:0"): const_5 = constant_op.constant(5.0) self.assertDeviceEqual(const_0.device, "/job:ps") self.assertDeviceEqual(const_1.device, "/device:GPU:0") self.assertDeviceEqual(const_2.device, "/device:GPU:1") self.assertDeviceEqual(const_3.device, "/device:CPU:0") self.assertDeviceEqual(const_4.device, "/device:CPU:1") self.assertDeviceEqual(const_5.device, "/replica:0") def testExtractSubGraph(self): graph_def = graph_pb2.GraphDef() n1 = graph_def.node.add() n1.name = "n1" n1.input.extend(["n5"]) n2 = graph_def.node.add() n2.name = "n2" # Take the first output of the n1 node as the input. n2.input.extend(["n1:0"]) n3 = graph_def.node.add() n3.name = "n3" # Add a control input (which isn't really needed by the kernel, but # rather to enforce execution order between nodes). n3.input.extend(["^n2"]) n4 = graph_def.node.add() n4.name = "n4" # It is fine to have a loops in the graph as well. n5 = graph_def.node.add() n5.name = "n5" n5.input.extend(["n1"]) sub_graph = graph_util.extract_sub_graph(graph_def, ["n3"]) self.assertEqual("n1", sub_graph.node[0].name) self.assertEqual("n2", sub_graph.node[1].name) self.assertEqual("n3", sub_graph.node[2].name) self.assertEqual("n5", sub_graph.node[3].name) def testConvertVariablesToConsts(self): with ops.Graph().as_default(): variable_node = variables.Variable(1.0, name="variable_node") _ = variables.Variable(1.0, name="unused_variable_node") output_node = math_ops_lib.multiply( variable_node, 2.0, name="output_node") with session.Session() as sess: init = variables.initialize_variables([variable_node]) sess.run(init) output = sess.run(output_node) self.assertNear(2.0, output, 0.00001) variable_graph_def = sess.graph.as_graph_def() # First get the constant_graph_def when variable_names_whitelist is set, # note that if variable_names_whitelist is not set an error will be # thrown because unused_variable_node is not initialized. constant_graph_def = graph_util.convert_variables_to_constants( sess, variable_graph_def, ["output_node"], variable_names_whitelist=set(["variable_node"])) # Then initialize the unused variable, and get another # constant_graph_def when variable_names_whitelist is not set. sess.run(variables.global_variables_initializer()) constant_graph_def_without_variable_whitelist = ( graph_util.convert_variables_to_constants(sess, variable_graph_def, ["output_node"])) # The unused variable should be cleared so the two graphs should be # equivalent. self.assertEqual( str(constant_graph_def), str(constant_graph_def_without_variable_whitelist)) # Test variable name black list. This should result in the variable not # being a const. sess.run(variables.global_variables_initializer()) constant_graph_def_with_blacklist = ( graph_util.convert_variables_to_constants( sess, variable_graph_def, ["output_node"], variable_names_blacklist=set(["variable_node"]))) variable_node = None for node in constant_graph_def_with_blacklist.node: if node.name == "variable_node": variable_node = node self.assertIsNotNone(variable_node) self.assertEqual(variable_node.op, "VariableV2") # Now we make sure the variable is now a constant, and that the graph still # produces the expected result. with ops.Graph().as_default(): _ = importer.import_graph_def(constant_graph_def, name="") self.assertEqual(4, len(constant_graph_def.node)) for node in constant_graph_def.node: self.assertNotEqual("Variable", node.op) self.assertNotEqual("VariableV2", node.op) with session.Session() as sess: output_node = sess.graph.get_tensor_by_name("output_node:0") output = sess.run(output_node) self.assertNear(2.0, output, 0.00001) def create_node_def(self, op, name, inputs): new_node = node_def_pb2.NodeDef() new_node.op = op new_node.name = name for input_name in inputs: new_node.input.extend([input_name]) return new_node def create_constant_node_def(self, name, value, dtype, shape=None): node = self.create_node_def("Const", name, []) self.set_attr_dtype(node, "dtype", dtype) self.set_attr_tensor(node, "value", value, dtype, shape) return node def set_attr_dtype(self, node, key, value): node.attr[key].CopyFrom( attr_value_pb2.AttrValue(type=value.as_datatype_enum)) def set_attr_tensor(self, node, key, value, dtype, shape=None): node.attr[key].CopyFrom( attr_value_pb2.AttrValue(tensor=tensor_util.make_tensor_proto( value, dtype=dtype, shape=shape))) def testRemoveTrainingNodes(self): a_constant_name = "a_constant" b_constant_name = "b_constant" a_check_name = "a_check" b_check_name = "b_check" a_identity_name = "a_identity" b_identity_name = "b_identity" add_name = "add" graph_def = graph_pb2.GraphDef() a_constant = self.create_constant_node_def( a_constant_name, value=1, dtype=dtypes.float32, shape=[]) graph_def.node.extend([a_constant]) a_check_node = self.create_node_def("CheckNumerics", a_check_name, [a_constant_name]) graph_def.node.extend([a_check_node]) a_identity_node = self.create_node_def( "Identity", a_identity_name, [a_constant_name, "^" + a_check_name]) graph_def.node.extend([a_identity_node]) b_constant = self.create_constant_node_def( b_constant_name, value=1, dtype=dtypes.float32, shape=[]) graph_def.node.extend([b_constant]) b_check_node = self.create_node_def("CheckNumerics", b_check_name, [b_constant_name]) graph_def.node.extend([b_check_node]) b_identity_node = self.create_node_def( "Identity", b_identity_name, [b_constant_name, "^" + b_check_name]) graph_def.node.extend([b_identity_node]) add_node = self.create_node_def("Add", add_name, [a_identity_name, b_identity_name]) self.set_attr_dtype(add_node, "T", dtypes.float32) graph_def.node.extend([add_node]) expected_output = graph_pb2.GraphDef() a_constant = self.create_constant_node_def( a_constant_name, value=1, dtype=dtypes.float32, shape=[]) expected_output.node.extend([a_constant]) b_constant = self.create_constant_node_def( b_constant_name, value=1, dtype=dtypes.float32, shape=[]) expected_output.node.extend([b_constant]) add_node = self.create_node_def("Add", add_name, [a_constant_name, b_constant_name]) self.set_attr_dtype(add_node, "T", dtypes.float32) expected_output.node.extend([add_node]) output = graph_util.remove_training_nodes(graph_def) self.assertProtoEquals(expected_output, output) if __name__ == "__main__": test.main()
apache-2.0
ArnossArnossi/django
django/core/mail/message.py
307
17138
from __future__ import unicode_literals import mimetypes import os import random import time from email import ( charset as Charset, encoders as Encoders, generator, message_from_string, ) from email.header import Header from email.message import Message from email.mime.base import MIMEBase from email.mime.message import MIMEMessage from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText from email.utils import formataddr, formatdate, getaddresses, parseaddr from io import BytesIO from django.conf import settings from django.core.mail.utils import DNS_NAME from django.utils import six from django.utils.encoding import force_text # Don't BASE64-encode UTF-8 messages so that we avoid unwanted attention from # some spam filters. utf8_charset = Charset.Charset('utf-8') utf8_charset.body_encoding = None # Python defaults to BASE64 # Default MIME type to use on attachments (if it is not explicitly given # and cannot be guessed). DEFAULT_ATTACHMENT_MIME_TYPE = 'application/octet-stream' class BadHeaderError(ValueError): pass # Copied from Python 3.2+ standard library, with the following modifications: # * Used cached hostname for performance. # TODO: replace with email.utils.make_msgid(.., domain=DNS_NAME) when dropping # Python 2 (Python 2's version doesn't have domain parameter) (#23905). def make_msgid(idstring=None, domain=None): """Returns a string suitable for RFC 2822 compliant Message-ID, e.g: <20020201195627.33539.96671@nightshade.la.mastaler.com> Optional idstring if given is a string used to strengthen the uniqueness of the message id. Optional domain if given provides the portion of the message id after the '@'. It defaults to the locally defined hostname. """ timeval = time.time() utcdate = time.strftime('%Y%m%d%H%M%S', time.gmtime(timeval)) pid = os.getpid() randint = random.randrange(100000) if idstring is None: idstring = '' else: idstring = '.' + idstring if domain is None: # stdlib uses socket.getfqdn() here instead domain = DNS_NAME msgid = '<%s.%s.%s%s@%s>' % (utcdate, pid, randint, idstring, domain) return msgid # Header names that contain structured address data (RFC #5322) ADDRESS_HEADERS = { 'from', 'sender', 'reply-to', 'to', 'cc', 'bcc', 'resent-from', 'resent-sender', 'resent-to', 'resent-cc', 'resent-bcc', } def forbid_multi_line_headers(name, val, encoding): """Forbids multi-line headers, to prevent header injection.""" encoding = encoding or settings.DEFAULT_CHARSET val = force_text(val) if '\n' in val or '\r' in val: raise BadHeaderError("Header values can't contain newlines (got %r for header %r)" % (val, name)) try: val.encode('ascii') except UnicodeEncodeError: if name.lower() in ADDRESS_HEADERS: val = ', '.join(sanitize_address(addr, encoding) for addr in getaddresses((val,))) else: val = Header(val, encoding).encode() else: if name.lower() == 'subject': val = Header(val).encode() return str(name), val def sanitize_address(addr, encoding): if not isinstance(addr, tuple): addr = parseaddr(force_text(addr)) nm, addr = addr nm = Header(nm, encoding).encode() try: addr.encode('ascii') except UnicodeEncodeError: # IDN if '@' in addr: localpart, domain = addr.split('@', 1) localpart = str(Header(localpart, encoding)) domain = domain.encode('idna').decode('ascii') addr = '@'.join([localpart, domain]) else: addr = Header(addr, encoding).encode() return formataddr((nm, addr)) class MIMEMixin(): def as_string(self, unixfrom=False, linesep='\n'): """Return the entire formatted message as a string. Optional `unixfrom' when True, means include the Unix From_ envelope header. This overrides the default as_string() implementation to not mangle lines that begin with 'From '. See bug #13433 for details. """ fp = six.StringIO() g = generator.Generator(fp, mangle_from_=False) if six.PY2: g.flatten(self, unixfrom=unixfrom) else: g.flatten(self, unixfrom=unixfrom, linesep=linesep) return fp.getvalue() if six.PY2: as_bytes = as_string else: def as_bytes(self, unixfrom=False, linesep='\n'): """Return the entire formatted message as bytes. Optional `unixfrom' when True, means include the Unix From_ envelope header. This overrides the default as_bytes() implementation to not mangle lines that begin with 'From '. See bug #13433 for details. """ fp = BytesIO() g = generator.BytesGenerator(fp, mangle_from_=False) g.flatten(self, unixfrom=unixfrom, linesep=linesep) return fp.getvalue() class SafeMIMEMessage(MIMEMixin, MIMEMessage): def __setitem__(self, name, val): # message/rfc822 attachments must be ASCII name, val = forbid_multi_line_headers(name, val, 'ascii') MIMEMessage.__setitem__(self, name, val) class SafeMIMEText(MIMEMixin, MIMEText): def __init__(self, _text, _subtype='plain', _charset=None): self.encoding = _charset if _charset == 'utf-8': # Unfortunately, Python < 3.5 doesn't support setting a Charset instance # as MIMEText init parameter (http://bugs.python.org/issue16324). # We do it manually and trigger re-encoding of the payload. MIMEText.__init__(self, _text, _subtype, None) del self['Content-Transfer-Encoding'] self.set_payload(_text, utf8_charset) self.replace_header('Content-Type', 'text/%s; charset="%s"' % (_subtype, _charset)) elif _charset is None: # the default value of '_charset' is 'us-ascii' on Python 2 MIMEText.__init__(self, _text, _subtype) else: MIMEText.__init__(self, _text, _subtype, _charset) def __setitem__(self, name, val): name, val = forbid_multi_line_headers(name, val, self.encoding) MIMEText.__setitem__(self, name, val) class SafeMIMEMultipart(MIMEMixin, MIMEMultipart): def __init__(self, _subtype='mixed', boundary=None, _subparts=None, encoding=None, **_params): self.encoding = encoding MIMEMultipart.__init__(self, _subtype, boundary, _subparts, **_params) def __setitem__(self, name, val): name, val = forbid_multi_line_headers(name, val, self.encoding) MIMEMultipart.__setitem__(self, name, val) class EmailMessage(object): """ A container for email information. """ content_subtype = 'plain' mixed_subtype = 'mixed' encoding = None # None => use settings default def __init__(self, subject='', body='', from_email=None, to=None, bcc=None, connection=None, attachments=None, headers=None, cc=None, reply_to=None): """ Initialize a single email message (which can be sent to multiple recipients). All strings used to create the message can be unicode strings (or UTF-8 bytestrings). The SafeMIMEText class will handle any necessary encoding conversions. """ if to: if isinstance(to, six.string_types): raise TypeError('"to" argument must be a list or tuple') self.to = list(to) else: self.to = [] if cc: if isinstance(cc, six.string_types): raise TypeError('"cc" argument must be a list or tuple') self.cc = list(cc) else: self.cc = [] if bcc: if isinstance(bcc, six.string_types): raise TypeError('"bcc" argument must be a list or tuple') self.bcc = list(bcc) else: self.bcc = [] if reply_to: if isinstance(reply_to, six.string_types): raise TypeError('"reply_to" argument must be a list or tuple') self.reply_to = list(reply_to) else: self.reply_to = [] self.from_email = from_email or settings.DEFAULT_FROM_EMAIL self.subject = subject self.body = body self.attachments = attachments or [] self.extra_headers = headers or {} self.connection = connection def get_connection(self, fail_silently=False): from django.core.mail import get_connection if not self.connection: self.connection = get_connection(fail_silently=fail_silently) return self.connection def message(self): encoding = self.encoding or settings.DEFAULT_CHARSET msg = SafeMIMEText(self.body, self.content_subtype, encoding) msg = self._create_message(msg) msg['Subject'] = self.subject msg['From'] = self.extra_headers.get('From', self.from_email) msg['To'] = self.extra_headers.get('To', ', '.join(map(force_text, self.to))) if self.cc: msg['Cc'] = ', '.join(map(force_text, self.cc)) if self.reply_to: msg['Reply-To'] = self.extra_headers.get('Reply-To', ', '.join(map(force_text, self.reply_to))) # Email header names are case-insensitive (RFC 2045), so we have to # accommodate that when doing comparisons. header_names = [key.lower() for key in self.extra_headers] if 'date' not in header_names: msg['Date'] = formatdate() if 'message-id' not in header_names: # Use cached DNS_NAME for performance msg['Message-ID'] = make_msgid(domain=DNS_NAME) for name, value in self.extra_headers.items(): if name.lower() in ('from', 'to'): # From and To are already handled continue msg[name] = value return msg def recipients(self): """ Returns a list of all recipients of the email (includes direct addressees as well as Cc and Bcc entries). """ return self.to + self.cc + self.bcc def send(self, fail_silently=False): """Sends the email message.""" if not self.recipients(): # Don't bother creating the network connection if there's nobody to # send to. return 0 return self.get_connection(fail_silently).send_messages([self]) def attach(self, filename=None, content=None, mimetype=None): """ Attaches a file with the given filename and content. The filename can be omitted and the mimetype is guessed, if not provided. If the first parameter is a MIMEBase subclass it is inserted directly into the resulting message attachments. """ if isinstance(filename, MIMEBase): assert content is None assert mimetype is None self.attachments.append(filename) else: assert content is not None self.attachments.append((filename, content, mimetype)) def attach_file(self, path, mimetype=None): """ Attaches a file from the filesystem. The mimetype will be set to the DEFAULT_ATTACHMENT_MIME_TYPE if it is not specified and cannot be guessed or (PY3 only) if it suggests text/* for a binary file. """ filename = os.path.basename(path) if not mimetype: mimetype, _ = mimetypes.guess_type(filename) if not mimetype: mimetype = DEFAULT_ATTACHMENT_MIME_TYPE basetype, subtype = mimetype.split('/', 1) read_mode = 'r' if basetype == 'text' else 'rb' content = None with open(path, read_mode) as f: try: content = f.read() except UnicodeDecodeError: # If mimetype suggests the file is text but it's actually # binary, read() will raise a UnicodeDecodeError on Python 3. pass # If the previous read in text mode failed, try binary mode. if content is None: with open(path, 'rb') as f: content = f.read() mimetype = DEFAULT_ATTACHMENT_MIME_TYPE self.attach(filename, content, mimetype) def _create_message(self, msg): return self._create_attachments(msg) def _create_attachments(self, msg): if self.attachments: encoding = self.encoding or settings.DEFAULT_CHARSET body_msg = msg msg = SafeMIMEMultipart(_subtype=self.mixed_subtype, encoding=encoding) if self.body: msg.attach(body_msg) for attachment in self.attachments: if isinstance(attachment, MIMEBase): msg.attach(attachment) else: msg.attach(self._create_attachment(*attachment)) return msg def _create_mime_attachment(self, content, mimetype): """ Converts the content, mimetype pair into a MIME attachment object. If the mimetype is message/rfc822, content may be an email.Message or EmailMessage object, as well as a str. """ basetype, subtype = mimetype.split('/', 1) if basetype == 'text': encoding = self.encoding or settings.DEFAULT_CHARSET attachment = SafeMIMEText(content, subtype, encoding) elif basetype == 'message' and subtype == 'rfc822': # Bug #18967: per RFC2046 s5.2.1, message/rfc822 attachments # must not be base64 encoded. if isinstance(content, EmailMessage): # convert content into an email.Message first content = content.message() elif not isinstance(content, Message): # For compatibility with existing code, parse the message # into an email.Message object if it is not one already. content = message_from_string(content) attachment = SafeMIMEMessage(content, subtype) else: # Encode non-text attachments with base64. attachment = MIMEBase(basetype, subtype) attachment.set_payload(content) Encoders.encode_base64(attachment) return attachment def _create_attachment(self, filename, content, mimetype=None): """ Converts the filename, content, mimetype triple into a MIME attachment object. """ if mimetype is None: mimetype, _ = mimetypes.guess_type(filename) if mimetype is None: mimetype = DEFAULT_ATTACHMENT_MIME_TYPE attachment = self._create_mime_attachment(content, mimetype) if filename: try: filename.encode('ascii') except UnicodeEncodeError: if six.PY2: filename = filename.encode('utf-8') filename = ('utf-8', '', filename) attachment.add_header('Content-Disposition', 'attachment', filename=filename) return attachment class EmailMultiAlternatives(EmailMessage): """ A version of EmailMessage that makes it easy to send multipart/alternative messages. For example, including text and HTML versions of the text is made easier. """ alternative_subtype = 'alternative' def __init__(self, subject='', body='', from_email=None, to=None, bcc=None, connection=None, attachments=None, headers=None, alternatives=None, cc=None, reply_to=None): """ Initialize a single email message (which can be sent to multiple recipients). All strings used to create the message can be unicode strings (or UTF-8 bytestrings). The SafeMIMEText class will handle any necessary encoding conversions. """ super(EmailMultiAlternatives, self).__init__( subject, body, from_email, to, bcc, connection, attachments, headers, cc, reply_to, ) self.alternatives = alternatives or [] def attach_alternative(self, content, mimetype): """Attach an alternative content representation.""" assert content is not None assert mimetype is not None self.alternatives.append((content, mimetype)) def _create_message(self, msg): return self._create_attachments(self._create_alternatives(msg)) def _create_alternatives(self, msg): encoding = self.encoding or settings.DEFAULT_CHARSET if self.alternatives: body_msg = msg msg = SafeMIMEMultipart(_subtype=self.alternative_subtype, encoding=encoding) if self.body: msg.attach(body_msg) for alternative in self.alternatives: msg.attach(self._create_mime_attachment(*alternative)) return msg
bsd-3-clause
waseem18/oh-mainline
vendor/packages/distribute/setuptools/command/bdist_wininst.py
136
1548
from distutils.command.bdist_wininst import bdist_wininst as _bdist_wininst import os, sys class bdist_wininst(_bdist_wininst): def create_exe(self, arcname, fullname, bitmap=None): _bdist_wininst.create_exe(self, arcname, fullname, bitmap) dist_files = getattr(self.distribution, 'dist_files', []) if self.target_version: installer_name = os.path.join(self.dist_dir, "%s.win32-py%s.exe" % (fullname, self.target_version)) pyversion = self.target_version # fix 2.5 bdist_wininst ignoring --target-version spec bad = ('bdist_wininst','any',installer_name) if bad in dist_files: dist_files.remove(bad) else: installer_name = os.path.join(self.dist_dir, "%s.win32.exe" % fullname) pyversion = 'any' good = ('bdist_wininst', pyversion, installer_name) if good not in dist_files: dist_files.append(good) def reinitialize_command (self, command, reinit_subcommands=0): cmd = self.distribution.reinitialize_command( command, reinit_subcommands) if command in ('install', 'install_lib'): cmd.install_lib = None # work around distutils bug return cmd def run(self): self._is_running = True try: _bdist_wininst.run(self) finally: self._is_running = False
agpl-3.0
yeraydiazdiaz/nonrel-blog
django/utils/importlib.py
445
1229
# Taken from Python 2.7 with permission from/by the original author. import sys def _resolve_name(name, package, level): """Return the absolute name of the module to be imported.""" if not hasattr(package, 'rindex'): raise ValueError("'package' not set to a string") dot = len(package) for x in xrange(level, 1, -1): try: dot = package.rindex('.', 0, dot) except ValueError: raise ValueError("attempted relative import beyond top-level " "package") return "%s.%s" % (package[:dot], name) def import_module(name, package=None): """Import a module. The 'package' argument is required when performing a relative import. It specifies the package to use as the anchor point from which to resolve the relative import to an absolute import. """ if name.startswith('.'): if not package: raise TypeError("relative imports require the 'package' argument") level = 0 for character in name: if character != '.': break level += 1 name = _resolve_name(name[level:], package, level) __import__(name) return sys.modules[name]
bsd-3-clause
tswast/google-cloud-python
bigquery_datatransfer/samples/create_scheduled_query.py
2
3661
# -*- coding: utf-8 -*- # # Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # To install the latest published package dependency, execute the following: # pip install google-cloud-bigquery-datatransfer def sample_create_transfer_config(project_id, dataset_id, authorization_code=""): # [START bigquerydatatransfer_create_scheduled_query] from google.cloud import bigquery_datatransfer_v1 import google.protobuf.json_format client = bigquery_datatransfer_v1.DataTransferServiceClient() # TODO(developer): Set the project_id to the project that contains the # destination dataset. # project_id = "your-project-id" # TODO(developer): Set the destination dataset. The authorized user must # have owner permissions on the dataset. # dataset_id = "your_dataset_id" # TODO(developer): The first time you run this sample, set the # authorization code to a value from the URL: # https://www.gstatic.com/bigquerydatatransfer/oauthz/auth?client_id=433065040935-hav5fqnc9p9cht3rqneus9115ias2kn1.apps.googleusercontent.com&scope=https://www.googleapis.com/auth/bigquery%20https://www.googleapis.com/auth/drive&redirect_uri=urn:ietf:wg:oauth:2.0:oob # # authorization_code = "_4/ABCD-EFGHIJKLMNOP-QRSTUVWXYZ" # # You can use an empty string for authorization_code in subsequent runs of # this code sample with the same credentials. # # authorization_code = "" # Use standard SQL syntax for the query. query_string = """ SELECT CURRENT_TIMESTAMP() as current_time, @run_time as intended_run_time, @run_date as intended_run_date, 17 as some_integer """ parent = client.project_path(project_id) transfer_config = google.protobuf.json_format.ParseDict( { "destination_dataset_id": dataset_id, "display_name": "Your Scheduled Query Name", "data_source_id": "scheduled_query", "params": { "query": query_string, "destination_table_name_template": "your_table_{run_date}", "write_disposition": "WRITE_TRUNCATE", "partitioning_field": "", }, "schedule": "every 24 hours", }, bigquery_datatransfer_v1.types.TransferConfig(), ) response = client.create_transfer_config( parent, transfer_config, authorization_code=authorization_code ) print("Created scheduled query '{}'".format(response.name)) # [END bigquerydatatransfer_create_scheduled_query] # Return the config name for testing purposes, so that it can be deleted. return response.name def main(): import argparse parser = argparse.ArgumentParser() parser.add_argument("--project_id", type=str, default="your-project-id") parser.add_argument("--dataset_id", type=str, default="your_dataset_id") parser.add_argument("--authorization_code", type=str, default="") args = parser.parse_args() sample_create_transfer_config(args.project_id, args.authorization_code) if __name__ == "__main__": main()
apache-2.0
flying-sheep/aiohttp
tests/autobahn/client.py
21
1349
#!/usr/bin/env python3 import asyncio import aiohttp def client(loop, url, name): ws = yield from aiohttp.ws_connect(url + '/getCaseCount') num_tests = int((yield from ws.receive()).data) print('running %d cases' % num_tests) yield from ws.close() for i in range(1, num_tests + 1): print('running test case:', i) text_url = url + '/runCase?case=%d&agent=%s' % (i, name) ws = yield from aiohttp.ws_connect(text_url) while True: msg = yield from ws.receive() if msg.tp == aiohttp.MsgType.text: ws.send_str(msg.data) elif msg.tp == aiohttp.MsgType.binary: ws.send_bytes(msg.data) elif msg.tp == aiohttp.MsgType.close: yield from ws.close() break else: break url = url + '/updateReports?agent=%s' % name ws = yield from aiohttp.ws_connect(url) yield from ws.close() def run(loop, url, name): try: yield from client(loop, url, name) except: import traceback traceback.print_exc() if __name__ == '__main__': loop = asyncio.get_event_loop() try: loop.run_until_complete(run(loop, 'http://localhost:9001', 'aiohttp')) except KeyboardInterrupt: pass finally: loop.close()
apache-2.0
boto/botoflow
botoflow/decider/activity_future.py
2
4126
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://aws.amazon.com/apache2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. from ..core import BaseFuture, AnyFuture, AllFuture, CancelledError from ..core.async_task import AsyncTask class ActivityFuture(BaseFuture): def __init__(self, future, activity_task_handler, activity_id): """ :param future: :type future: Future :param activity_task_handler: :type activity_task_handler: awsflow.decider.activity_task_handler.ActivityTaskHandler :param activity_id: :type activity_id: str :return: """ super(ActivityFuture, self).__init__() self._future = future self._activity_task_handler = activity_task_handler self._activity_id = activity_id self._cancellation_future = None # may be set to a Future if cancel was requested task = AsyncTask(self._future_callback, (future,), name=self._future_callback.__name__) task.cancellable = False future.add_task(task) def _future_callback(self, future): if self.done(): return if future.exception() is not None: self.set_exception(future.exception(), future.traceback()) else: self.set_result(future.result()) if self._cancellation_future is not None: self._cancellation_future.set_result(None) def cancel(self): """Requests cancellation of activity. :return: Cancellation future :rtype: awsflow.Future """ if not super(ActivityFuture, self).cancelled(): self._cancellation_future = self._activity_task_handler.request_cancel_activity_task( self, self._activity_id) return self._cancellation_future def cancelled(self): """ Returns True if activity was cancelled :return: """ if isinstance(self._exception, CancelledError): return True return False def exception(self): """ Returns the exception if available, or a ValueError if a result is not yet set """ if self.done(): return self._exception else: raise ValueError("Exception was not yet set") def traceback(self): if self.done(): return self._traceback else: raise ValueError("Exception is not yet set") def result(self): """ Return the result :raises Exception: Any exception raised from the call will be raised. :raises ValueError: if a result was not yet set """ if self.done(): return self._get_result() else: raise ValueError("Result is not yet set") def __or__(self, other): if isinstance(other, BaseFuture): return AnyFuture(self, other) elif isinstance(other, AnyFuture): other.add_future(self) return other raise TypeError("unsupported operand type(s) for " "|: '%s' and '%s'" % (self.__class__.__name__, other.__class__.__name__)) def __and__(self, other): if isinstance(other, BaseFuture): return AllFuture(self, other) elif isinstance(other, AllFuture): other.add_future(self) return other raise TypeError("unsupported operand type(s) for " "&: '%s' and '%s'" % (self.__class__.__name__, other.__class__.__name__))
apache-2.0
john-38787364/antifier
tacx_trainer_debug.py
1
2170
import usb.core, time, binascii, sys from datetime import datetime import os import trainer dev_trainer = trainer.get_trainer() if not dev_trainer: print "Could not find trainer" sys.exit() trainer.initialise_trainer(dev_trainer) eventcounter=1 reslist=[-3251, -1625, 0, 1625, 3251, 4876, 6502, 8127, 9752, 11378, 13003]#1625/1626 increments log_file=open('tacx_trainer_debug.log','w') log_file.write(hex(trainer.trainer_type)+"\n") resistance= -3278 print "KEEP CYCLING AT A MODERATE PACE UNTIL BLACK WINDOW DISAPPEARS" try: while True: last_measured_time = time.time() * 1000 data = dev_trainer.read(0x82,64) #get data from device log_file.write(datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]+" TRAINER RX DATA "+binascii.hexlify(data)+"\n") #increment resistance resistance += 5 #add 5 on each time nearest_validated_resistance = min(reslist, key=lambda x:abs(x-resistance)) if nearest_validated_resistance - resistance < 5 and nearest_validated_resistance - resistance > 0: resistance = nearest_validated_resistance if resistance >= 13004: break if resistance < 0: r = (256*256) + resistance else: r= resistance r6=int(r)>>8 & 0xff #byte6 r5=int(r) & 0xff #byte 5 #echo pedal cadence back to trainer if len(data) > 40: pedecho = data[42] else: pedecho = 0 byte_ints = [0x01, 0x08, 0x01, 0x00, r5, r6, pedecho, 0x00 ,0x02, 0x52, 0x10, 0x04] m=datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]+" TRAINER TX DATA "+str(resistance)+' {}'.format(' '.join(hex(x)[2:].zfill(2) for x in byte_ints)) print m log_file.write(m+"\n") byte_str = "".join(chr(n) for n in byte_ints) dev_trainer.write(0x02,byte_str)#send data to device #add wait so we only send every 250ms time_to_process_loop = time.time() * 1000 - last_measured_time sleep_time = 0.25 - (time_to_process_loop)/1000 if sleep_time < 0: sleep_time = 0 time.sleep(sleep_time) eventcounter += 1 except KeyboardInterrupt: # interrupt power data sending with ctrl c, make sure script continues to reset device pass log_file.close() print "COMPLETE"
mit
aniketpuranik/pynet_test
DJANGOX/djproject/djproject/settings.py
1
2450
""" Django settings for djproject project. For more information on this file, see https://docs.djangoproject.com/en/1.7/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.7/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(__file__)) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'j(el9)5h37x7wy(4!2)at^=z9nalr=j-r!lcu*-@^er_g&&t$e' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True TEMPLATE_DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'net_system', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'djproject.urls' WSGI_APPLICATION = 'djproject.wsgi.application' # Django admin requires the app_directories.Loader if DEBUG == True: TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ) else: TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', ) TEMPLATE_DIRS = ( "/home/apuranik/DJANGOX/templates", ) # Database # https://docs.djangoproject.com/en/1.7/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.net_system'), } } # Internationalization # https://docs.djangoproject.com/en/1.7/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'America/Los_Angeles' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.7/howto/static-files/ STATIC_URL = '/static/'
apache-2.0
ormnv/os_final_project
django/conf/locale/en_GB/formats.py
108
1861
# -*- encoding: utf-8 -*- # This file is distributed under the same license as the Django package. # # The *_FORMAT strings use the Django date format syntax, # see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = 'j M Y' # '25 Oct 2006' TIME_FORMAT = 'P' # '2:30 pm' DATETIME_FORMAT = 'j M Y, P' # '25 Oct 2006, 2:30 pm' YEAR_MONTH_FORMAT = 'F Y' # 'October 2006' MONTH_DAY_FORMAT = 'j F' # '25 October' SHORT_DATE_FORMAT = 'd/m/Y' # '25/10/2006' SHORT_DATETIME_FORMAT = 'd/m/Y P' # '25/10/2006 2:30 pm' FIRST_DAY_OF_WEEK = 0 # Sunday # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see http://docs.python.org/library/datetime.html#strftime-strptime-behavior DATE_INPUT_FORMATS = ( '%d/%m/%Y', '%d/%m/%y', # '25/10/2006', '25/10/06' # '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006' # '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006' # '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006' # '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006' ) DATETIME_INPUT_FORMATS = ( '%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59' '%Y-%m-%d %H:%M', # '2006-10-25 14:30' '%Y-%m-%d', # '2006-10-25' '%d/%m/%Y %H:%M:%S', # '25/10/2006 14:30:59' '%d/%m/%Y %H:%M', # '25/10/2006 14:30' '%d/%m/%Y', # '25/10/2006' '%d/%m/%y %H:%M:%S', # '25/10/06 14:30:59' '%d/%m/%y %H:%M', # '25/10/06 14:30' '%d/%m/%y', # '25/10/06' ) DECIMAL_SEPARATOR = '.' THOUSAND_SEPARATOR = ',' NUMBER_GROUPING = 3
bsd-3-clause
sshnaidm/ru
script.video.F4mProxy/lib/flvlib/tags.py
99
11473
import os import struct import logging from primitives import * from constants import * from astypes import MalformedFLV from astypes import get_script_data_variable, make_script_data_variable log = logging.getLogger('flvlib.tags') STRICT_PARSING = False def strict_parser(): return globals()['STRICT_PARSING'] class EndOfTags(Exception): pass def ensure(value, expected, error_msg): if value == expected: return if strict_parser(): raise MalformedFLV(error_msg) else: log.warning('Skipping non-conformant value in FLV file') class Tag(object): def __init__(self, parent_flv, f): self.f = f self.parent_flv = parent_flv self.offset = None self.size = None self.timestamp = None def parse(self): f = self.f self.offset = f.tell() - 1 # DataSize self.size = get_ui24(f) # Timestamp + TimestampExtended self.timestamp = get_si32_extended(f) if self.timestamp < 0: log.warning("The tag at offset 0x%08X has negative timestamp: %d", self.offset, self.timestamp) # StreamID stream_id = get_ui24(f) ensure(stream_id, 0, "StreamID non zero: 0x%06X" % stream_id) # The rest gets parsed in the subclass, it should move f to the # correct position to read PreviousTagSize self.parse_tag_content() previous_tag_size = get_ui32(f) ensure(previous_tag_size, self.size + 11, "PreviousTagSize of %d (0x%08X) " "not equal to actual tag size of %d (0x%08X)" % (previous_tag_size, previous_tag_size, self.size + 11, self.size + 11)) def parse_tag_content(self): # By default just seek past the tag content self.f.seek(self.size, os.SEEK_CUR) class AudioTag(Tag): def __init__(self, parent_flv, f): Tag.__init__(self, parent_flv, f) self.sound_format = None self.sound_rate = None self.sound_size = None self.sound_type = None self.aac_packet_type = None # always None for non-AAC tags def parse_tag_content(self): f = self.f sound_flags = get_ui8(f) read_bytes = 1 self.sound_format = (sound_flags & 0xF0) >> 4 self.sound_rate = (sound_flags & 0xC) >> 2 self.sound_size = (sound_flags & 0x2) >> 1 self.sound_type = sound_flags & 0x1 if self.sound_format == SOUND_FORMAT_AAC: # AAC packets can be sequence headers or raw data. # The former contain codec information needed by the decoder to be # able to interpret the rest of the data. self.aac_packet_type = get_ui8(f) read_bytes += 1 # AAC always has sampling rate of 44 kHz ensure(self.sound_rate, SOUND_RATE_44_KHZ, "AAC sound format with incorrect sound rate: %d" % self.sound_rate) # AAC is always stereo ensure(self.sound_type, SOUND_TYPE_STEREO, "AAC sound format with incorrect sound type: %d" % self.sound_type) if strict_parser(): try: sound_format_to_string[self.sound_format] except KeyError: raise MalformedFLV("Invalid sound format: %d", self.sound_format) try: (self.aac_packet_type and aac_packet_type_to_string[self.aac_packet_type]) except KeyError: raise MalformedFLV("Invalid AAC packet type: %d", self.aac_packet_type) f.seek(self.size - read_bytes, os.SEEK_CUR) def __repr__(self): if self.offset is None: return "<AudioTag unparsed>" elif self.aac_packet_type is None: return ("<AudioTag at offset 0x%08X, time %d, size %d, %s>" % (self.offset, self.timestamp, self.size, sound_format_to_string.get(self.sound_format, '?'))) else: return ("<AudioTag at offset 0x%08X, time %d, size %d, %s, %s>" % (self.offset, self.timestamp, self.size, sound_format_to_string.get(self.sound_format, '?'), aac_packet_type_to_string.get(self.aac_packet_type, '?'))) class VideoTag(Tag): def __init__(self, parent_flv, f): Tag.__init__(self, parent_flv, f) self.frame_type = None self.codec_id = None self.h264_packet_type = None # Always None for non-H.264 tags def parse_tag_content(self): f = self.f video_flags = get_ui8(f) read_bytes = 1 self.frame_type = (video_flags & 0xF0) >> 4 self.codec_id = video_flags & 0xF if self.codec_id == CODEC_ID_H264: # H.264 packets can be sequence headers, NAL units or sequence # ends. self.h264_packet_type = get_ui8(f) read_bytes += 1 if strict_parser(): try: frame_type_to_string[self.frame_type] except KeyError: raise MalformedFLV("Invalid frame type: %d", self.frame_type) try: codec_id_to_string[self.codec_id] except KeyError: raise MalformedFLV("Invalid codec ID: %d", self.codec_id) try: (self.h264_packet_type and h264_packet_type_to_string[self.h264_packet_type]) except KeyError: raise MalformedFLV("Invalid H.264 packet type: %d", self.h264_packet_type) f.seek(self.size - read_bytes, os.SEEK_CUR) def __repr__(self): if self.offset is None: return "<VideoTag unparsed>" elif self.h264_packet_type is None: return ("<VideoTag at offset 0x%08X, time %d, size %d, %s (%s)>" % (self.offset, self.timestamp, self.size, codec_id_to_string.get(self.codec_id, '?'), frame_type_to_string.get(self.frame_type, '?'))) else: return ("<VideoTag at offset 0x%08X, " "time %d, size %d, %s (%s), %s>" % (self.offset, self.timestamp, self.size, codec_id_to_string.get(self.codec_id, '?'), frame_type_to_string.get(self.frame_type, '?'), h264_packet_type_to_string.get( self.h264_packet_type, '?'))) class ScriptTag(Tag): def __init__(self, parent_flv, f): Tag.__init__(self, parent_flv, f) self.name = None self.variable = None def parse_tag_content(self): f = self.f # Here there's always a byte with the value of 0x02, # which means "string", although the spec says NOTHING # about it.. value_type = get_ui8(f) ensure(value_type, 2, "The name of a script tag is not a string") # Need to pass the tag end offset, because apparently YouTube # doesn't give a *shit* about the FLV spec and just happily # ends the onMetaData tag after self.size bytes, instead of # ending it with the *required* 0x09 marker. Bastards! if strict_parser(): # If we're strict, just don't pass this info tag_end = None else: # 11 = tag type (1) + data size (3) + timestamp (4) + stream id (3) tag_end = self.offset + 11 + self.size log.debug("max offset is 0x%08X", tag_end) self.name, self.variable = \ get_script_data_variable(f, max_offset=tag_end) log.debug("A script tag with a name of %s and value of %r", self.name, self.variable) def __repr__(self): if self.offset is None: return "<ScriptTag unparsed>" else: return ("<ScriptTag %s at offset 0x%08X, time %d, size %d>" % (self.name, self.offset, self.timestamp, self.size)) tag_to_class = { TAG_TYPE_AUDIO: AudioTag, TAG_TYPE_VIDEO: VideoTag, TAG_TYPE_SCRIPT: ScriptTag } class FLV(object): def __init__(self, f): self.f = f self.version = None self.has_audio = None self.has_video = None self.tags = [] def parse_header(self): f = self.f f.seek(0) # FLV header header = f.read(3) if len(header) < 3: raise MalformedFLV("The file is shorter than 3 bytes") # Do this irrelevant of STRICT_PARSING, to catch bogus files if header != "FLV": raise MalformedFLV("File signature is incorrect: 0x%X 0x%X 0x%X" % struct.unpack("3B", header)) # File version self.version = get_ui8(f) log.debug("File version is %d", self.version) # TypeFlags flags = get_ui8(f) ensure(flags & 0xF8, 0, "First TypeFlagsReserved field non zero: 0x%X" % (flags & 0xF8)) ensure(flags & 0x2, 0, "Second TypeFlagsReserved field non zero: 0x%X" % (flags & 0x2)) self.has_audio = False self.has_video = False if flags & 0x4: self.has_audio = True if flags & 0x1: self.has_video = True log.debug("File %s audio", (self.has_audio and "has") or "does not have") log.debug("File %s video", (self.has_video and "has") or "does not have") header_size = get_ui32(f) log.debug("Header size is %d bytes", header_size) f.seek(header_size) tag_0_size = get_ui32(f) ensure(tag_0_size, 0, "PreviousTagSize0 non zero: 0x%08X" % tag_0_size) def iter_tags(self): self.parse_header() try: while True: tag = self.get_next_tag() yield tag except EndOfTags: pass def read_tags(self): self.tags = list(self.iter_tags()) def get_next_tag(self): f = self.f try: tag_type = get_ui8(f) except EndOfFile: raise EndOfTags tag_klass = self.tag_type_to_class(tag_type) tag = tag_klass(self, f) tag.parse() return tag def tag_type_to_class(self, tag_type): try: return tag_to_class[tag_type] except KeyError: raise MalformedFLV("Invalid tag type: %d", tag_type) def create_flv_tag(type, data, timestamp=0): tag_type = struct.pack("B", type) timestamp = make_si32_extended(timestamp) stream_id = make_ui24(0) data_size = len(data) tag_size = data_size + 11 return ''.join([tag_type, make_ui24(data_size), timestamp, stream_id, data, make_ui32(tag_size)]) def create_script_tag(name, data, timestamp=0): payload = make_ui8(2) + make_script_data_variable(name, data) return create_flv_tag(TAG_TYPE_SCRIPT, payload, timestamp) def create_flv_header(has_audio=True, has_video=True): type_flags = 0 if has_video: type_flags = type_flags | 0x1 if has_audio: type_flags = type_flags | 0x4 return ''.join(['FLV', make_ui8(1), make_ui8(type_flags), make_ui32(9), make_ui32(0)])
gpl-2.0
jreback/pandas
pandas/core/resample.py
1
64488
import copy from datetime import timedelta from textwrap import dedent from typing import Dict, Optional, Union, no_type_check import numpy as np from pandas._libs import lib from pandas._libs.tslibs import ( IncompatibleFrequency, NaT, Period, Timedelta, Timestamp, to_offset, ) from pandas._typing import TimedeltaConvertibleTypes, TimestampConvertibleTypes from pandas.compat.numpy import function as nv from pandas.errors import AbstractMethodError from pandas.util._decorators import Appender, Substitution, doc from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries from pandas.core.aggregation import aggregate import pandas.core.algorithms as algos from pandas.core.base import DataError from pandas.core.generic import NDFrame, _shared_docs from pandas.core.groupby.base import GotItemMixin, ShallowMixin from pandas.core.groupby.generic import SeriesGroupBy from pandas.core.groupby.groupby import ( BaseGroupBy, GroupBy, _pipe_template, get_groupby, ) from pandas.core.groupby.grouper import Grouper from pandas.core.groupby.ops import BinGrouper from pandas.core.indexes.api import Index from pandas.core.indexes.datetimes import DatetimeIndex, date_range from pandas.core.indexes.period import PeriodIndex, period_range from pandas.core.indexes.timedeltas import TimedeltaIndex, timedelta_range from pandas.tseries.frequencies import is_subperiod, is_superperiod from pandas.tseries.offsets import DateOffset, Day, Nano, Tick _shared_docs_kwargs: Dict[str, str] = {} class Resampler(BaseGroupBy, ShallowMixin): """ Class for resampling datetimelike data, a groupby-like operation. See aggregate, transform, and apply functions on this object. It's easiest to use obj.resample(...) to use Resampler. Parameters ---------- obj : pandas object groupby : a TimeGrouper object axis : int, default 0 kind : str or None 'period', 'timestamp' to override default index treatment Returns ------- a Resampler of the appropriate type Notes ----- After resampling, see aggregate, apply, and transform functions. """ # to the groupby descriptor _attributes = [ "freq", "axis", "closed", "label", "convention", "loffset", "kind", "origin", "offset", ] def __init__(self, obj, groupby=None, axis=0, kind=None, **kwargs): self.groupby = groupby self.keys = None self.sort = True self.axis = axis self.kind = kind self.squeeze = False self.group_keys = True self.as_index = True self.exclusions = set() self.binner = None # pandas\core\resample.py:96: error: Incompatible types in assignment # (expression has type "None", variable has type "BaseGrouper") # [assignment] self.grouper = None # type: ignore[assignment] if self.groupby is not None: self.groupby._set_grouper(self._convert_obj(obj), sort=True) def __str__(self) -> str: """ Provide a nice str repr of our rolling object. """ attrs = ( f"{k}={getattr(self.groupby, k)}" for k in self._attributes if getattr(self.groupby, k, None) is not None ) return f"{type(self).__name__} [{', '.join(attrs)}]" def __getattr__(self, attr: str): if attr in self._internal_names_set: return object.__getattribute__(self, attr) if attr in self._attributes: return getattr(self.groupby, attr) if attr in self.obj: return self[attr] return object.__getattribute__(self, attr) def __iter__(self): """ Resampler iterator. Returns ------- Generator yielding sequence of (name, subsetted object) for each group. See Also -------- GroupBy.__iter__ : Generator yielding sequence for each group. """ self._set_binner() return super().__iter__() @property def obj(self): return self.groupby.obj @property def ax(self): return self.groupby.ax @property def _typ(self) -> str: """ Masquerade for compat as a Series or a DataFrame. """ if isinstance(self._selected_obj, ABCSeries): return "series" return "dataframe" @property def _from_selection(self) -> bool: """ Is the resampling from a DataFrame column or MultiIndex level. """ # upsampling and PeriodIndex resampling do not work # with selection, this state used to catch and raise an error return self.groupby is not None and ( self.groupby.key is not None or self.groupby.level is not None ) def _convert_obj(self, obj): """ Provide any conversions for the object in order to correctly handle. Parameters ---------- obj : the object to be resampled Returns ------- obj : converted object """ obj = obj._consolidate() return obj def _get_binner_for_time(self): raise AbstractMethodError(self) def _set_binner(self): """ Setup our binners. Cache these as we are an immutable object """ if self.binner is None: self.binner, self.grouper = self._get_binner() def _get_binner(self): """ Create the BinGrouper, assume that self.set_grouper(obj) has already been called. """ binner, bins, binlabels = self._get_binner_for_time() assert len(bins) == len(binlabels) bin_grouper = BinGrouper(bins, binlabels, indexer=self.groupby.indexer) return binner, bin_grouper def _assure_grouper(self): """ Make sure that we are creating our binner & grouper. """ self._set_binner() @Substitution( klass="Resampler", examples=""" >>> df = pd.DataFrame({'A': [1, 2, 3, 4]}, ... index=pd.date_range('2012-08-02', periods=4)) >>> df A 2012-08-02 1 2012-08-03 2 2012-08-04 3 2012-08-05 4 To get the difference between each 2-day period's maximum and minimum value in one pass, you can do >>> df.resample('2D').pipe(lambda x: x.max() - x.min()) A 2012-08-02 1 2012-08-04 1""", ) @Appender(_pipe_template) def pipe(self, func, *args, **kwargs): return super().pipe(func, *args, **kwargs) _agg_see_also_doc = dedent( """ See Also -------- DataFrame.groupby.aggregate : Aggregate using callable, string, dict, or list of string/callables. DataFrame.resample.transform : Transforms the Series on each group based on the given function. DataFrame.aggregate: Aggregate using one or more operations over the specified axis. """ ) _agg_examples_doc = dedent( """ Examples -------- >>> s = pd.Series([1,2,3,4,5], index=pd.date_range('20130101', periods=5,freq='s')) 2013-01-01 00:00:00 1 2013-01-01 00:00:01 2 2013-01-01 00:00:02 3 2013-01-01 00:00:03 4 2013-01-01 00:00:04 5 Freq: S, dtype: int64 >>> r = s.resample('2s') DatetimeIndexResampler [freq=<2 * Seconds>, axis=0, closed=left, label=left, convention=start] >>> r.agg(np.sum) 2013-01-01 00:00:00 3 2013-01-01 00:00:02 7 2013-01-01 00:00:04 5 Freq: 2S, dtype: int64 >>> r.agg(['sum','mean','max']) sum mean max 2013-01-01 00:00:00 3 1.5 2 2013-01-01 00:00:02 7 3.5 4 2013-01-01 00:00:04 5 5.0 5 >>> r.agg({'result' : lambda x: x.mean() / x.std(), 'total' : np.sum}) total result 2013-01-01 00:00:00 3 2.121320 2013-01-01 00:00:02 7 4.949747 2013-01-01 00:00:04 5 NaN """ ) @doc( _shared_docs["aggregate"], see_also=_agg_see_also_doc, examples=_agg_examples_doc, klass="DataFrame", axis="", ) def aggregate(self, func, *args, **kwargs): self._set_binner() result, how = aggregate(self, func, *args, **kwargs) if result is None: how = func grouper = None result = self._groupby_and_aggregate(how, grouper, *args, **kwargs) result = self._apply_loffset(result) return result agg = aggregate apply = aggregate def transform(self, arg, *args, **kwargs): """ Call function producing a like-indexed Series on each group and return a Series with the transformed values. Parameters ---------- arg : function To apply to each group. Should return a Series with the same index. Returns ------- transformed : Series Examples -------- >>> resampled.transform(lambda x: (x - x.mean()) / x.std()) """ return self._selected_obj.groupby(self.groupby).transform(arg, *args, **kwargs) def _downsample(self, f): raise AbstractMethodError(self) def _upsample(self, f, limit=None, fill_value=None): raise AbstractMethodError(self) def _gotitem(self, key, ndim: int, subset=None): """ Sub-classes to define. Return a sliced object. Parameters ---------- key : string / list of selections ndim : {1, 2} requested ndim of result subset : object, default None subset to act on """ self._set_binner() grouper = self.grouper if subset is None: subset = self.obj grouped = get_groupby(subset, by=None, grouper=grouper, axis=self.axis) # try the key selection try: return grouped[key] except KeyError: return grouped def _groupby_and_aggregate(self, how, grouper=None, *args, **kwargs): """ Re-evaluate the obj with a groupby aggregation. """ if grouper is None: self._set_binner() grouper = self.grouper obj = self._selected_obj grouped = get_groupby(obj, by=None, grouper=grouper, axis=self.axis) try: if isinstance(obj, ABCDataFrame) and callable(how): # Check if the function is reducing or not. result = grouped._aggregate_item_by_item(how, *args, **kwargs) else: result = grouped.aggregate(how, *args, **kwargs) except (DataError, AttributeError, KeyError): # we have a non-reducing function; try to evaluate # alternatively we want to evaluate only a column of the input result = grouped.apply(how, *args, **kwargs) except ValueError as err: if "Must produce aggregated value" in str(err): # raised in _aggregate_named pass elif "len(index) != len(labels)" in str(err): # raised in libgroupby validation pass elif "No objects to concatenate" in str(err): # raised in concat call # In tests this is reached via either # _apply_to_column_groupbys (ohlc) or DataFrameGroupBy.nunique pass else: raise # we have a non-reducing function # try to evaluate result = grouped.apply(how, *args, **kwargs) result = self._apply_loffset(result) return self._wrap_result(result) def _apply_loffset(self, result): """ If loffset is set, offset the result index. This is NOT an idempotent routine, it will be applied exactly once to the result. Parameters ---------- result : Series or DataFrame the result of resample """ # pandas\core\resample.py:409: error: Cannot determine type of # 'loffset' [has-type] needs_offset = ( isinstance( self.loffset, # type: ignore[has-type] (DateOffset, timedelta, np.timedelta64), ) and isinstance(result.index, DatetimeIndex) and len(result.index) > 0 ) if needs_offset: # pandas\core\resample.py:415: error: Cannot determine type of # 'loffset' [has-type] result.index = result.index + self.loffset # type: ignore[has-type] self.loffset = None return result def _get_resampler_for_grouping(self, groupby, **kwargs): """ Return the correct class for resampling with groupby. """ return self._resampler_for_grouping(self, groupby=groupby, **kwargs) def _wrap_result(self, result): """ Potentially wrap any results. """ if isinstance(result, ABCSeries) and self._selection is not None: result.name = self._selection if isinstance(result, ABCSeries) and result.empty: obj = self.obj result.index = _asfreq_compat(obj.index, freq=self.freq) result.name = getattr(obj, "name", None) return result def pad(self, limit=None): """ Forward fill the values. Parameters ---------- limit : int, optional Limit of how many values to fill. Returns ------- An upsampled Series. See Also -------- Series.fillna: Fill NA/NaN values using the specified method. DataFrame.fillna: Fill NA/NaN values using the specified method. """ return self._upsample("pad", limit=limit) ffill = pad def nearest(self, limit=None): """ Resample by using the nearest value. When resampling data, missing values may appear (e.g., when the resampling frequency is higher than the original frequency). The `nearest` method will replace ``NaN`` values that appeared in the resampled data with the value from the nearest member of the sequence, based on the index value. Missing values that existed in the original data will not be modified. If `limit` is given, fill only this many values in each direction for each of the original values. Parameters ---------- limit : int, optional Limit of how many values to fill. Returns ------- Series or DataFrame An upsampled Series or DataFrame with ``NaN`` values filled with their nearest value. See Also -------- backfill : Backward fill the new missing values in the resampled data. pad : Forward fill ``NaN`` values. Examples -------- >>> s = pd.Series([1, 2], ... index=pd.date_range('20180101', ... periods=2, ... freq='1h')) >>> s 2018-01-01 00:00:00 1 2018-01-01 01:00:00 2 Freq: H, dtype: int64 >>> s.resample('15min').nearest() 2018-01-01 00:00:00 1 2018-01-01 00:15:00 1 2018-01-01 00:30:00 2 2018-01-01 00:45:00 2 2018-01-01 01:00:00 2 Freq: 15T, dtype: int64 Limit the number of upsampled values imputed by the nearest: >>> s.resample('15min').nearest(limit=1) 2018-01-01 00:00:00 1.0 2018-01-01 00:15:00 1.0 2018-01-01 00:30:00 NaN 2018-01-01 00:45:00 2.0 2018-01-01 01:00:00 2.0 Freq: 15T, dtype: float64 """ return self._upsample("nearest", limit=limit) def backfill(self, limit=None): """ Backward fill the new missing values in the resampled data. In statistics, imputation is the process of replacing missing data with substituted values [1]_. When resampling data, missing values may appear (e.g., when the resampling frequency is higher than the original frequency). The backward fill will replace NaN values that appeared in the resampled data with the next value in the original sequence. Missing values that existed in the original data will not be modified. Parameters ---------- limit : int, optional Limit of how many values to fill. Returns ------- Series, DataFrame An upsampled Series or DataFrame with backward filled NaN values. See Also -------- bfill : Alias of backfill. fillna : Fill NaN values using the specified method, which can be 'backfill'. nearest : Fill NaN values with nearest neighbor starting from center. pad : Forward fill NaN values. Series.fillna : Fill NaN values in the Series using the specified method, which can be 'backfill'. DataFrame.fillna : Fill NaN values in the DataFrame using the specified method, which can be 'backfill'. References ---------- .. [1] https://en.wikipedia.org/wiki/Imputation_(statistics) Examples -------- Resampling a Series: >>> s = pd.Series([1, 2, 3], ... index=pd.date_range('20180101', periods=3, freq='h')) >>> s 2018-01-01 00:00:00 1 2018-01-01 01:00:00 2 2018-01-01 02:00:00 3 Freq: H, dtype: int64 >>> s.resample('30min').backfill() 2018-01-01 00:00:00 1 2018-01-01 00:30:00 2 2018-01-01 01:00:00 2 2018-01-01 01:30:00 3 2018-01-01 02:00:00 3 Freq: 30T, dtype: int64 >>> s.resample('15min').backfill(limit=2) 2018-01-01 00:00:00 1.0 2018-01-01 00:15:00 NaN 2018-01-01 00:30:00 2.0 2018-01-01 00:45:00 2.0 2018-01-01 01:00:00 2.0 2018-01-01 01:15:00 NaN 2018-01-01 01:30:00 3.0 2018-01-01 01:45:00 3.0 2018-01-01 02:00:00 3.0 Freq: 15T, dtype: float64 Resampling a DataFrame that has missing values: >>> df = pd.DataFrame({'a': [2, np.nan, 6], 'b': [1, 3, 5]}, ... index=pd.date_range('20180101', periods=3, ... freq='h')) >>> df a b 2018-01-01 00:00:00 2.0 1 2018-01-01 01:00:00 NaN 3 2018-01-01 02:00:00 6.0 5 >>> df.resample('30min').backfill() a b 2018-01-01 00:00:00 2.0 1 2018-01-01 00:30:00 NaN 3 2018-01-01 01:00:00 NaN 3 2018-01-01 01:30:00 6.0 5 2018-01-01 02:00:00 6.0 5 >>> df.resample('15min').backfill(limit=2) a b 2018-01-01 00:00:00 2.0 1.0 2018-01-01 00:15:00 NaN NaN 2018-01-01 00:30:00 NaN 3.0 2018-01-01 00:45:00 NaN 3.0 2018-01-01 01:00:00 NaN 3.0 2018-01-01 01:15:00 NaN NaN 2018-01-01 01:30:00 6.0 5.0 2018-01-01 01:45:00 6.0 5.0 2018-01-01 02:00:00 6.0 5.0 """ return self._upsample("backfill", limit=limit) bfill = backfill def fillna(self, method, limit=None): """ Fill missing values introduced by upsampling. In statistics, imputation is the process of replacing missing data with substituted values [1]_. When resampling data, missing values may appear (e.g., when the resampling frequency is higher than the original frequency). Missing values that existed in the original data will not be modified. Parameters ---------- method : {'pad', 'backfill', 'ffill', 'bfill', 'nearest'} Method to use for filling holes in resampled data * 'pad' or 'ffill': use previous valid observation to fill gap (forward fill). * 'backfill' or 'bfill': use next valid observation to fill gap. * 'nearest': use nearest valid observation to fill gap. limit : int, optional Limit of how many consecutive missing values to fill. Returns ------- Series or DataFrame An upsampled Series or DataFrame with missing values filled. See Also -------- backfill : Backward fill NaN values in the resampled data. pad : Forward fill NaN values in the resampled data. nearest : Fill NaN values in the resampled data with nearest neighbor starting from center. interpolate : Fill NaN values using interpolation. Series.fillna : Fill NaN values in the Series using the specified method, which can be 'bfill' and 'ffill'. DataFrame.fillna : Fill NaN values in the DataFrame using the specified method, which can be 'bfill' and 'ffill'. References ---------- .. [1] https://en.wikipedia.org/wiki/Imputation_(statistics) Examples -------- Resampling a Series: >>> s = pd.Series([1, 2, 3], ... index=pd.date_range('20180101', periods=3, freq='h')) >>> s 2018-01-01 00:00:00 1 2018-01-01 01:00:00 2 2018-01-01 02:00:00 3 Freq: H, dtype: int64 Without filling the missing values you get: >>> s.resample("30min").asfreq() 2018-01-01 00:00:00 1.0 2018-01-01 00:30:00 NaN 2018-01-01 01:00:00 2.0 2018-01-01 01:30:00 NaN 2018-01-01 02:00:00 3.0 Freq: 30T, dtype: float64 >>> s.resample('30min').fillna("backfill") 2018-01-01 00:00:00 1 2018-01-01 00:30:00 2 2018-01-01 01:00:00 2 2018-01-01 01:30:00 3 2018-01-01 02:00:00 3 Freq: 30T, dtype: int64 >>> s.resample('15min').fillna("backfill", limit=2) 2018-01-01 00:00:00 1.0 2018-01-01 00:15:00 NaN 2018-01-01 00:30:00 2.0 2018-01-01 00:45:00 2.0 2018-01-01 01:00:00 2.0 2018-01-01 01:15:00 NaN 2018-01-01 01:30:00 3.0 2018-01-01 01:45:00 3.0 2018-01-01 02:00:00 3.0 Freq: 15T, dtype: float64 >>> s.resample('30min').fillna("pad") 2018-01-01 00:00:00 1 2018-01-01 00:30:00 1 2018-01-01 01:00:00 2 2018-01-01 01:30:00 2 2018-01-01 02:00:00 3 Freq: 30T, dtype: int64 >>> s.resample('30min').fillna("nearest") 2018-01-01 00:00:00 1 2018-01-01 00:30:00 2 2018-01-01 01:00:00 2 2018-01-01 01:30:00 3 2018-01-01 02:00:00 3 Freq: 30T, dtype: int64 Missing values present before the upsampling are not affected. >>> sm = pd.Series([1, None, 3], ... index=pd.date_range('20180101', periods=3, freq='h')) >>> sm 2018-01-01 00:00:00 1.0 2018-01-01 01:00:00 NaN 2018-01-01 02:00:00 3.0 Freq: H, dtype: float64 >>> sm.resample('30min').fillna('backfill') 2018-01-01 00:00:00 1.0 2018-01-01 00:30:00 NaN 2018-01-01 01:00:00 NaN 2018-01-01 01:30:00 3.0 2018-01-01 02:00:00 3.0 Freq: 30T, dtype: float64 >>> sm.resample('30min').fillna('pad') 2018-01-01 00:00:00 1.0 2018-01-01 00:30:00 1.0 2018-01-01 01:00:00 NaN 2018-01-01 01:30:00 NaN 2018-01-01 02:00:00 3.0 Freq: 30T, dtype: float64 >>> sm.resample('30min').fillna('nearest') 2018-01-01 00:00:00 1.0 2018-01-01 00:30:00 NaN 2018-01-01 01:00:00 NaN 2018-01-01 01:30:00 3.0 2018-01-01 02:00:00 3.0 Freq: 30T, dtype: float64 DataFrame resampling is done column-wise. All the same options are available. >>> df = pd.DataFrame({'a': [2, np.nan, 6], 'b': [1, 3, 5]}, ... index=pd.date_range('20180101', periods=3, ... freq='h')) >>> df a b 2018-01-01 00:00:00 2.0 1 2018-01-01 01:00:00 NaN 3 2018-01-01 02:00:00 6.0 5 >>> df.resample('30min').fillna("bfill") a b 2018-01-01 00:00:00 2.0 1 2018-01-01 00:30:00 NaN 3 2018-01-01 01:00:00 NaN 3 2018-01-01 01:30:00 6.0 5 2018-01-01 02:00:00 6.0 5 """ return self._upsample(method, limit=limit) @doc(NDFrame.interpolate, **_shared_docs_kwargs) def interpolate( self, method="linear", axis=0, limit=None, inplace=False, limit_direction="forward", limit_area=None, downcast=None, **kwargs, ): """ Interpolate values according to different methods. """ result = self._upsample("asfreq") return result.interpolate( method=method, axis=axis, limit=limit, inplace=inplace, limit_direction=limit_direction, limit_area=limit_area, downcast=downcast, **kwargs, ) def asfreq(self, fill_value=None): """ Return the values at the new freq, essentially a reindex. Parameters ---------- fill_value : scalar, optional Value to use for missing values, applied during upsampling (note this does not fill NaNs that already were present). Returns ------- DataFrame or Series Values at the specified freq. See Also -------- Series.asfreq: Convert TimeSeries to specified frequency. DataFrame.asfreq: Convert TimeSeries to specified frequency. """ return self._upsample("asfreq", fill_value=fill_value) def std(self, ddof=1, *args, **kwargs): """ Compute standard deviation of groups, excluding missing values. Parameters ---------- ddof : int, default 1 Degrees of freedom. Returns ------- DataFrame or Series Standard deviation of values within each group. """ nv.validate_resampler_func("std", args, kwargs) # pandas\core\resample.py:850: error: Unexpected keyword argument # "ddof" for "_downsample" [call-arg] return self._downsample("std", ddof=ddof) # type: ignore[call-arg] def var(self, ddof=1, *args, **kwargs): """ Compute variance of groups, excluding missing values. Parameters ---------- ddof : int, default 1 Degrees of freedom. Returns ------- DataFrame or Series Variance of values within each group. """ nv.validate_resampler_func("var", args, kwargs) # pandas\core\resample.py:867: error: Unexpected keyword argument # "ddof" for "_downsample" [call-arg] return self._downsample("var", ddof=ddof) # type: ignore[call-arg] @doc(GroupBy.size) def size(self): result = self._downsample("size") if not len(self.ax): from pandas import Series if self._selected_obj.ndim == 1: name = self._selected_obj.name else: name = None result = Series([], index=result.index, dtype="int64", name=name) return result @doc(GroupBy.count) def count(self): result = self._downsample("count") if not len(self.ax): if self._selected_obj.ndim == 1: result = type(self._selected_obj)( [], index=result.index, dtype="int64", name=self._selected_obj.name ) else: from pandas import DataFrame result = DataFrame( [], index=result.index, columns=result.columns, dtype="int64" ) return result def quantile(self, q=0.5, **kwargs): """ Return value at the given quantile. .. versionadded:: 0.24.0 Parameters ---------- q : float or array-like, default 0.5 (50% quantile) Returns ------- DataFrame or Series Quantile of values within each group. See Also -------- Series.quantile Return a series, where the index is q and the values are the quantiles. DataFrame.quantile Return a DataFrame, where the columns are the columns of self, and the values are the quantiles. DataFrameGroupBy.quantile Return a DataFrame, where the coulmns are groupby columns, and the values are its quantiles. """ # pandas\core\resample.py:920: error: Unexpected keyword argument "q" # for "_downsample" [call-arg] # pandas\core\resample.py:920: error: Too many arguments for # "_downsample" [call-arg] return self._downsample("quantile", q=q, **kwargs) # type: ignore[call-arg] # downsample methods for method in ["sum", "prod", "min", "max", "first", "last"]: def f(self, _method=method, min_count=0, *args, **kwargs): nv.validate_resampler_func(_method, args, kwargs) return self._downsample(_method, min_count=min_count) f.__doc__ = getattr(GroupBy, method).__doc__ setattr(Resampler, method, f) # downsample methods for method in ["mean", "sem", "median", "ohlc"]: def g(self, _method=method, *args, **kwargs): nv.validate_resampler_func(_method, args, kwargs) return self._downsample(_method) g.__doc__ = getattr(GroupBy, method).__doc__ setattr(Resampler, method, g) # series only methods for method in ["nunique"]: def h(self, _method=method): return self._downsample(_method) h.__doc__ = getattr(SeriesGroupBy, method).__doc__ setattr(Resampler, method, h) class _GroupByMixin(GotItemMixin): """ Provide the groupby facilities. """ def __init__(self, obj, *args, **kwargs): parent = kwargs.pop("parent", None) groupby = kwargs.pop("groupby", None) if parent is None: parent = obj # initialize our GroupByMixin object with # the resampler attributes for attr in self._attributes: setattr(self, attr, kwargs.get(attr, getattr(parent, attr))) # pandas\core\resample.py:972: error: Too many arguments for "__init__" # of "object" [call-arg] super().__init__(None) # type: ignore[call-arg] self._groupby = groupby self._groupby.mutated = True self._groupby.grouper.mutated = True self.groupby = copy.copy(parent.groupby) @no_type_check def _apply(self, f, grouper=None, *args, **kwargs): """ Dispatch to _upsample; we are stripping all of the _upsample kwargs and performing the original function call on the grouped object. """ def func(x): x = self._shallow_copy(x, groupby=self.groupby) if isinstance(f, str): return getattr(x, f)(**kwargs) return x.apply(f, *args, **kwargs) result = self._groupby.apply(func) return self._wrap_result(result) _upsample = _apply _downsample = _apply _groupby_and_aggregate = _apply class DatetimeIndexResampler(Resampler): @property def _resampler_for_grouping(self): return DatetimeIndexResamplerGroupby def _get_binner_for_time(self): # this is how we are actually creating the bins if self.kind == "period": return self.groupby._get_time_period_bins(self.ax) return self.groupby._get_time_bins(self.ax) def _downsample(self, how, **kwargs): """ Downsample the cython defined function. Parameters ---------- how : string / cython mapped function **kwargs : kw args passed to how function """ self._set_binner() how = self._get_cython_func(how) or how ax = self.ax obj = self._selected_obj if not len(ax): # reset to the new freq obj = obj.copy() obj.index = obj.index._with_freq(self.freq) assert obj.index.freq == self.freq, (obj.index.freq, self.freq) return obj # do we have a regular frequency if ax.freq is not None or ax.inferred_freq is not None: # pandas\core\resample.py:1037: error: "BaseGrouper" has no # attribute "binlabels" [attr-defined] if ( len(self.grouper.binlabels) > len(ax) # type: ignore[attr-defined] and how is None ): # let's do an asfreq return self.asfreq() # we are downsampling # we want to call the actual grouper method here result = obj.groupby(self.grouper, axis=self.axis).aggregate(how, **kwargs) result = self._apply_loffset(result) return self._wrap_result(result) def _adjust_binner_for_upsample(self, binner): """ Adjust our binner when upsampling. The range of a new index should not be outside specified range """ if self.closed == "right": binner = binner[1:] else: binner = binner[:-1] return binner def _upsample(self, method, limit=None, fill_value=None): """ Parameters ---------- method : string {'backfill', 'bfill', 'pad', 'ffill', 'asfreq'} method for upsampling limit : int, default None Maximum size gap to fill when reindexing fill_value : scalar, default None Value to use for missing values See Also -------- .fillna: Fill NA/NaN values using the specified method. """ self._set_binner() if self.axis: raise AssertionError("axis must be 0") if self._from_selection: raise ValueError( "Upsampling from level= or on= selection " "is not supported, use .set_index(...) " "to explicitly set index to datetime-like" ) ax = self.ax obj = self._selected_obj binner = self.binner res_index = self._adjust_binner_for_upsample(binner) # if we have the same frequency as our axis, then we are equal sampling if ( limit is None and to_offset(ax.inferred_freq) == self.freq and len(obj) == len(res_index) ): result = obj.copy() result.index = res_index else: result = obj.reindex( res_index, method=method, limit=limit, fill_value=fill_value ) result = self._apply_loffset(result) return self._wrap_result(result) def _wrap_result(self, result): result = super()._wrap_result(result) # we may have a different kind that we were asked originally # convert if needed if self.kind == "period" and not isinstance(result.index, PeriodIndex): result.index = result.index.to_period(self.freq) return result class DatetimeIndexResamplerGroupby(_GroupByMixin, DatetimeIndexResampler): """ Provides a resample of a groupby implementation """ @property def _constructor(self): return DatetimeIndexResampler class PeriodIndexResampler(DatetimeIndexResampler): @property def _resampler_for_grouping(self): return PeriodIndexResamplerGroupby def _get_binner_for_time(self): if self.kind == "timestamp": return super()._get_binner_for_time() return self.groupby._get_period_bins(self.ax) def _convert_obj(self, obj): obj = super()._convert_obj(obj) if self._from_selection: # see GH 14008, GH 12871 msg = ( "Resampling from level= or on= selection " "with a PeriodIndex is not currently supported, " "use .set_index(...) to explicitly set index" ) raise NotImplementedError(msg) if self.loffset is not None: # Cannot apply loffset/timedelta to PeriodIndex -> convert to # timestamps self.kind = "timestamp" # convert to timestamp if self.kind == "timestamp": obj = obj.to_timestamp(how=self.convention) return obj def _downsample(self, how, **kwargs): """ Downsample the cython defined function. Parameters ---------- how : string / cython mapped function **kwargs : kw args passed to how function """ # we may need to actually resample as if we are timestamps if self.kind == "timestamp": return super()._downsample(how, **kwargs) how = self._get_cython_func(how) or how ax = self.ax if is_subperiod(ax.freq, self.freq): # Downsampling return self._groupby_and_aggregate(how, grouper=self.grouper, **kwargs) elif is_superperiod(ax.freq, self.freq): if how == "ohlc": # GH #13083 # upsampling to subperiods is handled as an asfreq, which works # for pure aggregating/reducing methods # OHLC reduces along the time dimension, but creates multiple # values for each period -> handle by _groupby_and_aggregate() return self._groupby_and_aggregate(how, grouper=self.grouper) return self.asfreq() elif ax.freq == self.freq: return self.asfreq() raise IncompatibleFrequency( f"Frequency {ax.freq} cannot be resampled to {self.freq}, " "as they are not sub or super periods" ) def _upsample(self, method, limit=None, fill_value=None): """ Parameters ---------- method : string {'backfill', 'bfill', 'pad', 'ffill'} Method for upsampling. limit : int, default None Maximum size gap to fill when reindexing. fill_value : scalar, default None Value to use for missing values. See Also -------- .fillna: Fill NA/NaN values using the specified method. """ # we may need to actually resample as if we are timestamps if self.kind == "timestamp": return super()._upsample(method, limit=limit, fill_value=fill_value) self._set_binner() ax = self.ax obj = self.obj new_index = self.binner # Start vs. end of period memb = ax.asfreq(self.freq, how=self.convention) # Get the fill indexer indexer = memb.get_indexer(new_index, method=method, limit=limit) return self._wrap_result( _take_new_index(obj, indexer, new_index, axis=self.axis) ) class PeriodIndexResamplerGroupby(_GroupByMixin, PeriodIndexResampler): """ Provides a resample of a groupby implementation. """ @property def _constructor(self): return PeriodIndexResampler class TimedeltaIndexResampler(DatetimeIndexResampler): @property def _resampler_for_grouping(self): return TimedeltaIndexResamplerGroupby def _get_binner_for_time(self): return self.groupby._get_time_delta_bins(self.ax) def _adjust_binner_for_upsample(self, binner): """ Adjust our binner when upsampling. The range of a new index is allowed to be greater than original range so we don't need to change the length of a binner, GH 13022 """ return binner class TimedeltaIndexResamplerGroupby(_GroupByMixin, TimedeltaIndexResampler): """ Provides a resample of a groupby implementation. """ @property def _constructor(self): return TimedeltaIndexResampler def get_resampler(obj, kind=None, **kwds): """ Create a TimeGrouper and return our resampler. """ tg = TimeGrouper(**kwds) return tg._get_resampler(obj, kind=kind) get_resampler.__doc__ = Resampler.__doc__ def get_resampler_for_grouping( groupby, rule, how=None, fill_method=None, limit=None, kind=None, **kwargs ): """ Return our appropriate resampler when grouping as well. """ # .resample uses 'on' similar to how .groupby uses 'key' kwargs["key"] = kwargs.pop("on", None) tg = TimeGrouper(freq=rule, **kwargs) resampler = tg._get_resampler(groupby.obj, kind=kind) return resampler._get_resampler_for_grouping(groupby=groupby) class TimeGrouper(Grouper): """ Custom groupby class for time-interval grouping. Parameters ---------- freq : pandas date offset or offset alias for identifying bin edges closed : closed end of interval; 'left' or 'right' label : interval boundary to use for labeling; 'left' or 'right' convention : {'start', 'end', 'e', 's'} If axis is PeriodIndex """ _attributes = Grouper._attributes + ( "closed", "label", "how", "loffset", "kind", "convention", "origin", "offset", ) def __init__( self, freq="Min", closed: Optional[str] = None, label: Optional[str] = None, how="mean", axis=0, fill_method=None, limit=None, loffset=None, kind: Optional[str] = None, convention: Optional[str] = None, base: Optional[int] = None, origin: Union[str, TimestampConvertibleTypes] = "start_day", offset: Optional[TimedeltaConvertibleTypes] = None, **kwargs, ): # Check for correctness of the keyword arguments which would # otherwise silently use the default if misspelled if label not in {None, "left", "right"}: raise ValueError(f"Unsupported value {label} for `label`") if closed not in {None, "left", "right"}: raise ValueError(f"Unsupported value {closed} for `closed`") if convention not in {None, "start", "end", "e", "s"}: raise ValueError(f"Unsupported value {convention} for `convention`") freq = to_offset(freq) end_types = {"M", "A", "Q", "BM", "BA", "BQ", "W"} rule = freq.rule_code if rule in end_types or ("-" in rule and rule[: rule.find("-")] in end_types): if closed is None: closed = "right" if label is None: label = "right" else: # The backward resample sets ``closed`` to ``'right'`` by default # since the last value should be considered as the edge point for # the last bin. When origin in "end" or "end_day", the value for a # specific ``Timestamp`` index stands for the resample result from # the current ``Timestamp`` minus ``freq`` to the current # ``Timestamp`` with a right close. if origin in ["end", "end_day"]: if closed is None: closed = "right" if label is None: label = "right" else: if closed is None: closed = "left" if label is None: label = "left" self.closed = closed self.label = label self.kind = kind self.convention = convention or "E" self.convention = self.convention.lower() self.how = how self.fill_method = fill_method self.limit = limit if origin in ("epoch", "start", "start_day", "end", "end_day"): self.origin = origin else: try: self.origin = Timestamp(origin) except Exception as e: raise ValueError( "'origin' should be equal to 'epoch', 'start', 'start_day', " "'end', 'end_day' or " f"should be a Timestamp convertible type. Got '{origin}' instead." ) from e try: self.offset = Timedelta(offset) if offset is not None else None except Exception as e: raise ValueError( "'offset' should be a Timedelta convertible type. " f"Got '{offset}' instead." ) from e # always sort time groupers kwargs["sort"] = True # Handle deprecated arguments since v1.1.0 of `base` and `loffset` (GH #31809) if base is not None and offset is not None: raise ValueError("'offset' and 'base' cannot be present at the same time") if base and isinstance(freq, Tick): # this conversion handle the default behavior of base and the # special case of GH #10530. Indeed in case when dealing with # a TimedeltaIndex base was treated as a 'pure' offset even though # the default behavior of base was equivalent of a modulo on # freq_nanos. self.offset = Timedelta(base * freq.nanos // freq.n) if isinstance(loffset, str): loffset = to_offset(loffset) self.loffset = loffset super().__init__(freq=freq, axis=axis, **kwargs) def _get_resampler(self, obj, kind=None): """ Return my resampler or raise if we have an invalid axis. Parameters ---------- obj : input object kind : string, optional 'period','timestamp','timedelta' are valid Returns ------- a Resampler Raises ------ TypeError if incompatible axis """ self._set_grouper(obj) ax = self.ax if isinstance(ax, DatetimeIndex): return DatetimeIndexResampler(obj, groupby=self, kind=kind, axis=self.axis) elif isinstance(ax, PeriodIndex) or kind == "period": return PeriodIndexResampler(obj, groupby=self, kind=kind, axis=self.axis) elif isinstance(ax, TimedeltaIndex): return TimedeltaIndexResampler(obj, groupby=self, axis=self.axis) raise TypeError( "Only valid with DatetimeIndex, " "TimedeltaIndex or PeriodIndex, " f"but got an instance of '{type(ax).__name__}'" ) def _get_grouper(self, obj, validate: bool = True): # create the resampler and return our binner r = self._get_resampler(obj) r._set_binner() return r.binner, r.grouper, r.obj def _get_time_bins(self, ax): if not isinstance(ax, DatetimeIndex): raise TypeError( "axis must be a DatetimeIndex, but got " f"an instance of {type(ax).__name__}" ) if len(ax) == 0: binner = labels = DatetimeIndex(data=[], freq=self.freq, name=ax.name) return binner, [], labels first, last = _get_timestamp_range_edges( ax.min(), ax.max(), self.freq, closed=self.closed, origin=self.origin, offset=self.offset, ) # GH #12037 # use first/last directly instead of call replace() on them # because replace() will swallow the nanosecond part # thus last bin maybe slightly before the end if the end contains # nanosecond part and lead to `Values falls after last bin` error # GH 25758: If DST lands at midnight (e.g. 'America/Havana'), user feedback # has noted that ambiguous=True provides the most sensible result binner = labels = date_range( freq=self.freq, start=first, end=last, tz=ax.tz, name=ax.name, ambiguous=True, nonexistent="shift_forward", ) ax_values = ax.asi8 binner, bin_edges = self._adjust_bin_edges(binner, ax_values) # general version, knowing nothing about relative frequencies bins = lib.generate_bins_dt64( ax_values, bin_edges, self.closed, hasnans=ax.hasnans ) if self.closed == "right": labels = binner if self.label == "right": labels = labels[1:] elif self.label == "right": labels = labels[1:] if ax.hasnans: binner = binner.insert(0, NaT) labels = labels.insert(0, NaT) # if we end up with more labels than bins # adjust the labels # GH4076 if len(bins) < len(labels): labels = labels[: len(bins)] return binner, bins, labels def _adjust_bin_edges(self, binner, ax_values): # Some hacks for > daily data, see #1471, #1458, #1483 if self.freq != "D" and is_superperiod(self.freq, "D"): if self.closed == "right": # GH 21459, GH 9119: Adjust the bins relative to the wall time bin_edges = binner.tz_localize(None) bin_edges = bin_edges + timedelta(1) - Nano(1) bin_edges = bin_edges.tz_localize(binner.tz).asi8 else: bin_edges = binner.asi8 # intraday values on last day if bin_edges[-2] > ax_values.max(): bin_edges = bin_edges[:-1] binner = binner[:-1] else: bin_edges = binner.asi8 return binner, bin_edges def _get_time_delta_bins(self, ax): if not isinstance(ax, TimedeltaIndex): raise TypeError( "axis must be a TimedeltaIndex, but got " f"an instance of {type(ax).__name__}" ) if not len(ax): binner = labels = TimedeltaIndex(data=[], freq=self.freq, name=ax.name) return binner, [], labels start, end = ax.min(), ax.max() labels = binner = timedelta_range( start=start, end=end, freq=self.freq, name=ax.name ) end_stamps = labels + self.freq bins = ax.searchsorted(end_stamps, side="left") if self.offset: # GH 10530 & 31809 labels += self.offset if self.loffset: # GH 33498 labels += self.loffset return binner, bins, labels def _get_time_period_bins(self, ax: DatetimeIndex): if not isinstance(ax, DatetimeIndex): raise TypeError( "axis must be a DatetimeIndex, but got " f"an instance of {type(ax).__name__}" ) freq = self.freq if not len(ax): binner = labels = PeriodIndex(data=[], freq=freq, name=ax.name) return binner, [], labels labels = binner = period_range(start=ax[0], end=ax[-1], freq=freq, name=ax.name) end_stamps = (labels + freq).asfreq(freq, "s").to_timestamp() if ax.tz: end_stamps = end_stamps.tz_localize(ax.tz) bins = ax.searchsorted(end_stamps, side="left") return binner, bins, labels def _get_period_bins(self, ax: PeriodIndex): if not isinstance(ax, PeriodIndex): raise TypeError( "axis must be a PeriodIndex, but got " f"an instance of {type(ax).__name__}" ) memb = ax.asfreq(self.freq, how=self.convention) # NaT handling as in pandas._lib.lib.generate_bins_dt64() nat_count = 0 if memb.hasnans: nat_count = np.sum(memb._isnan) memb = memb[~memb._isnan] # if index contains no valid (non-NaT) values, return empty index if not len(memb): binner = labels = PeriodIndex(data=[], freq=self.freq, name=ax.name) return binner, [], labels freq_mult = self.freq.n start = ax.min().asfreq(self.freq, how=self.convention) end = ax.max().asfreq(self.freq, how="end") bin_shift = 0 if isinstance(self.freq, Tick): # GH 23882 & 31809: get adjusted bin edge labels with 'origin' # and 'origin' support. This call only makes sense if the freq is a # Tick since offset and origin are only used in those cases. # Not doing this check could create an extra empty bin. p_start, end = _get_period_range_edges( start, end, self.freq, closed=self.closed, origin=self.origin, offset=self.offset, ) # Get offset for bin edge (not label edge) adjustment start_offset = Period(start, self.freq) - Period(p_start, self.freq) bin_shift = start_offset.n % freq_mult start = p_start labels = binner = period_range( start=start, end=end, freq=self.freq, name=ax.name ) i8 = memb.asi8 # when upsampling to subperiods, we need to generate enough bins expected_bins_count = len(binner) * freq_mult i8_extend = expected_bins_count - (i8[-1] - i8[0]) rng = np.arange(i8[0], i8[-1] + i8_extend, freq_mult) rng += freq_mult # adjust bin edge indexes to account for base rng -= bin_shift # Wrap in PeriodArray for PeriodArray.searchsorted prng = type(memb._data)(rng, dtype=memb.dtype) bins = memb.searchsorted(prng, side="left") if nat_count > 0: # NaT handling as in pandas._lib.lib.generate_bins_dt64() # shift bins by the number of NaT bins += nat_count bins = np.insert(bins, 0, nat_count) binner = binner.insert(0, NaT) labels = labels.insert(0, NaT) return binner, bins, labels def _take_new_index(obj, indexer, new_index, axis=0): if isinstance(obj, ABCSeries): new_values = algos.take_1d(obj._values, indexer) return obj._constructor(new_values, index=new_index, name=obj.name) elif isinstance(obj, ABCDataFrame): if axis == 1: raise NotImplementedError("axis 1 is not supported") return obj._constructor( obj._mgr.reindex_indexer(new_axis=new_index, indexer=indexer, axis=1) ) else: raise ValueError("'obj' should be either a Series or a DataFrame") def _get_timestamp_range_edges( first, last, freq, closed="left", origin="start_day", offset=None ): """ Adjust the `first` Timestamp to the preceding Timestamp that resides on the provided offset. Adjust the `last` Timestamp to the following Timestamp that resides on the provided offset. Input Timestamps that already reside on the offset will be adjusted depending on the type of offset and the `closed` parameter. Parameters ---------- first : pd.Timestamp The beginning Timestamp of the range to be adjusted. last : pd.Timestamp The ending Timestamp of the range to be adjusted. freq : pd.DateOffset The dateoffset to which the Timestamps will be adjusted. closed : {'right', 'left'}, default None Which side of bin interval is closed. origin : {'epoch', 'start', 'start_day'} or Timestamp, default 'start_day' The timestamp on which to adjust the grouping. The timezone of origin must match the timezone of the index. If a timestamp is not used, these values are also supported: - 'epoch': `origin` is 1970-01-01 - 'start': `origin` is the first value of the timeseries - 'start_day': `origin` is the first day at midnight of the timeseries offset : pd.Timedelta, default is None An offset timedelta added to the origin. Returns ------- A tuple of length 2, containing the adjusted pd.Timestamp objects. """ if isinstance(freq, Tick): index_tz = first.tz if isinstance(origin, Timestamp) and (origin.tz is None) != (index_tz is None): raise ValueError("The origin must have the same timezone as the index.") elif origin == "epoch": # set the epoch based on the timezone to have similar bins results when # resampling on the same kind of indexes on different timezones origin = Timestamp("1970-01-01", tz=index_tz) if isinstance(freq, Day): # _adjust_dates_anchored assumes 'D' means 24H, but first/last # might contain a DST transition (23H, 24H, or 25H). # So "pretend" the dates are naive when adjusting the endpoints first = first.tz_localize(None) last = last.tz_localize(None) if isinstance(origin, Timestamp): origin = origin.tz_localize(None) first, last = _adjust_dates_anchored( first, last, freq, closed=closed, origin=origin, offset=offset ) if isinstance(freq, Day): first = first.tz_localize(index_tz) last = last.tz_localize(index_tz) else: first = first.normalize() last = last.normalize() if closed == "left": first = Timestamp(freq.rollback(first)) else: first = Timestamp(first - freq) last = Timestamp(last + freq) return first, last def _get_period_range_edges( first, last, freq, closed="left", origin="start_day", offset=None ): """ Adjust the provided `first` and `last` Periods to the respective Period of the given offset that encompasses them. Parameters ---------- first : pd.Period The beginning Period of the range to be adjusted. last : pd.Period The ending Period of the range to be adjusted. freq : pd.DateOffset The freq to which the Periods will be adjusted. closed : {'right', 'left'}, default None Which side of bin interval is closed. origin : {'epoch', 'start', 'start_day'}, Timestamp, default 'start_day' The timestamp on which to adjust the grouping. The timezone of origin must match the timezone of the index. If a timestamp is not used, these values are also supported: - 'epoch': `origin` is 1970-01-01 - 'start': `origin` is the first value of the timeseries - 'start_day': `origin` is the first day at midnight of the timeseries offset : pd.Timedelta, default is None An offset timedelta added to the origin. Returns ------- A tuple of length 2, containing the adjusted pd.Period objects. """ if not all(isinstance(obj, Period) for obj in [first, last]): raise TypeError("'first' and 'last' must be instances of type Period") # GH 23882 first = first.to_timestamp() last = last.to_timestamp() adjust_first = not freq.is_on_offset(first) adjust_last = freq.is_on_offset(last) first, last = _get_timestamp_range_edges( first, last, freq, closed=closed, origin=origin, offset=offset ) first = (first + int(adjust_first) * freq).to_period(freq) last = (last - int(adjust_last) * freq).to_period(freq) return first, last def _adjust_dates_anchored( first, last, freq, closed="right", origin="start_day", offset=None ): # First and last offsets should be calculated from the start day to fix an # error cause by resampling across multiple days when a one day period is # not a multiple of the frequency. See GH 8683 # To handle frequencies that are not multiple or divisible by a day we let # the possibility to define a fixed origin timestamp. See GH 31809 origin_nanos = 0 # origin == "epoch" if origin == "start_day": origin_nanos = first.normalize().value elif origin == "start": origin_nanos = first.value elif isinstance(origin, Timestamp): origin_nanos = origin.value elif origin in ["end", "end_day"]: origin = last if origin == "end" else last.ceil("D") sub_freq_times = (origin.value - first.value) // freq.nanos if closed == "left": sub_freq_times += 1 first = origin - sub_freq_times * freq origin_nanos = first.value origin_nanos += offset.value if offset else 0 # GH 10117 & GH 19375. If first and last contain timezone information, # Perform the calculation in UTC in order to avoid localizing on an # Ambiguous or Nonexistent time. first_tzinfo = first.tzinfo last_tzinfo = last.tzinfo if first_tzinfo is not None: first = first.tz_convert("UTC") if last_tzinfo is not None: last = last.tz_convert("UTC") foffset = (first.value - origin_nanos) % freq.nanos loffset = (last.value - origin_nanos) % freq.nanos if closed == "right": if foffset > 0: # roll back fresult = first.value - foffset else: fresult = first.value - freq.nanos if loffset > 0: # roll forward lresult = last.value + (freq.nanos - loffset) else: # already the end of the road lresult = last.value else: # closed == 'left' if foffset > 0: fresult = first.value - foffset else: # start of the road fresult = first.value if loffset > 0: # roll forward lresult = last.value + (freq.nanos - loffset) else: lresult = last.value + freq.nanos fresult = Timestamp(fresult) lresult = Timestamp(lresult) if first_tzinfo is not None: fresult = fresult.tz_localize("UTC").tz_convert(first_tzinfo) if last_tzinfo is not None: lresult = lresult.tz_localize("UTC").tz_convert(last_tzinfo) return fresult, lresult def asfreq(obj, freq, method=None, how=None, normalize=False, fill_value=None): """ Utility frequency conversion method for Series/DataFrame. See :meth:`pandas.NDFrame.asfreq` for full documentation. """ if isinstance(obj.index, PeriodIndex): if method is not None: raise NotImplementedError("'method' argument is not supported") if how is None: how = "E" new_obj = obj.copy() new_obj.index = obj.index.asfreq(freq, how=how) elif len(obj.index) == 0: new_obj = obj.copy() new_obj.index = _asfreq_compat(obj.index, freq) else: dti = date_range(obj.index[0], obj.index[-1], freq=freq) dti.name = obj.index.name new_obj = obj.reindex(dti, method=method, fill_value=fill_value) if normalize: new_obj.index = new_obj.index.normalize() return new_obj def _asfreq_compat(index, freq): """ Helper to mimic asfreq on (empty) DatetimeIndex and TimedeltaIndex. Parameters ---------- index : PeriodIndex, DatetimeIndex, or TimedeltaIndex freq : DateOffset Returns ------- same type as index """ if len(index) != 0: # This should never be reached, always checked by the caller raise ValueError( "Can only set arbitrary freq for empty DatetimeIndex or TimedeltaIndex" ) new_index: Index if isinstance(index, PeriodIndex): new_index = index.asfreq(freq=freq) elif isinstance(index, DatetimeIndex): new_index = DatetimeIndex([], dtype=index.dtype, freq=freq, name=index.name) elif isinstance(index, TimedeltaIndex): new_index = TimedeltaIndex([], dtype=index.dtype, freq=freq, name=index.name) else: # pragma: no cover raise TypeError(type(index)) return new_index
bsd-3-clause
southampton/cortex
corpus/rubrik.py
1
5894
import traceback from urllib.parse import quote, urljoin import requests class RubrikVMNotFound(Exception): pass class RubrikVCenterNotFound(Exception): def __init__(self, vcenter): super().__init__("vCenter Not Found Exception") self.vcenter = vcenter class Rubrik: """A RESTful client for Rubrik""" def __init__(self, helper): """Initialise the client and create a bearer token""" self._helper = helper self._api_url_base = helper.config["RUBRIK_API_URL_BASE"] self._api_default_version = helper.config["RUBRIK_API_DEFAULT_VERSION"] self._verify = helper.config["RUBRIK_API_VERIFY_SERVER"] self._headers = {"Accept": "application/json"} self._auth = (helper.config["RUBRIK_API_USER"], helper.config["RUBRIK_API_PASS"]) self._get_api_token() def _url(self, endpoint, version=None): """Construct the URL for a given endpoint""" # If the version was not specified use the default if not version: version = self._api_default_version # Version must have a trailing slash if not version.endswith("/"): version = version + "/" # Endpoint shouldn"t have a leading slash if endpoint.startswith("/"): endpoint = endpoint[1:] return urljoin(urljoin(self._api_url_base, version), endpoint) def _get_api_token(self): """Obtain a bearer token and insert it into the client"s headers""" r = requests.post( self._url("session"), headers=self._headers, auth=self._auth, verify=self._verify, ) r.raise_for_status() self._headers["Authorization"] = "Bearer " + r.json().get("token") def _request(self, method, endpoint, version, **kwargs): """Make an API request to an endpoint""" # Remove the keys we will override for k in ("headers", "verify"): kwargs.pop(k, None) # Attempt to make a request r = requests.request( method, self._url(endpoint, version=version), headers=self._headers, verify=self._verify, **kwargs ) r.raise_for_status() if r.text: return r.json() return {} def get_request(self, endpoint, version=None, payload=None): """Make a GET request to an API endpoint""" return self._request("GET", endpoint, version, params=payload) def patch_request(self, endpoint, version=None, payload=None): """Make a PATCH request to an API endpoint""" payload = {} if payload is None else payload return self._request("PATCH", endpoint, version, json=payload) def post_request(self, endpoint, version=None, payload=None): """Make a POST request to an API endpoint""" return self._request("POST", endpoint, version, json=payload) def get_sla_domains(self): """Gets the backup SLA categories from Rubrik""" return self.get_request("sla_domain") def assign_sla_domain(self, sla_domain_id, managed_id): """Use the internal API to assign an SLA domain""" return self.post_request( "sla_domain/{sla_domain_id}/assign".format(sla_domain_id=quote(sla_domain_id)), version="internal", payload={ "managedIds": [managed_id,], "existingSnapshotRetention": "RetainSnapshots" } ) def get_vcenter_managed_id(self, vcenter_hostname): """Gets the Managed ID of the vCenter object in Rubrik for a given vCenter hostname.""" # pylint: disable=invalid-name # Get all the vCenter information from Rubrik vcenters = self.get_request("vmware/vcenter") # For case insensitive comparison vcenter_hostname = vcenter_hostname.lower() # Iterate over the vCenters vcManagedId = None for vc in vcenters["data"]: # If this is the right vCenter if vc["hostname"].lower() == vcenter_hostname: vcManagedId = vc["id"] break if vcManagedId is None: raise RubrikVCenterNotFound(vcenter_hostname) return vcManagedId def get_vm_managed_id(self, system): """Works out the Rubrik Managed ID of a VM""" # pylint: disable=invalid-name # Format of Rubrik Managed ID is: # VirtualMachine:::<vcenter-rubrik-managed-id>-<vm-moId> if "vmware_vcenter" not in system or "vmware_moid" not in system: raise KeyError("Missing vCenter or moId information from system") if system["vmware_vcenter"] is None or system["vmware_moid"] is None: raise RuntimeError("No vCenter or moId information available") # Get the vCenter managed ID vcManagedId = self.get_vcenter_managed_id(system["vmware_vcenter"]) # Remove the leading "vCenter:::" text vcManagedId = vcManagedId[10:] return "VirtualMachine:::" + vcManagedId + "-" + system["vmware_moid"] def get_vm(self, system): """Detailed view of a VM param system The details of the system as a dict-like object (i.e. row from systems_info_view) """ try: vm_id = self.get_vm_managed_id(system) # bubble up the RubrikVMNotFound and RubrikVCenterNotFound exceptions except (RubrikVMNotFound, RubrikVCenterNotFound) as ex: raise ex except Exception as ex: self._helper.logger.error("Error getting Rubrik VM ID:\n" + traceback.format_exc()) raise Exception("Error getting Rubrik VM ID: " + str(ex)) try: return self.get_request("vmware/vm/{id}".format(id=quote(vm_id))) except requests.exceptions.HTTPError as ex: if ex.response is not None and ex.response.status_code == 404: raise RubrikVMNotFound() self._helper.logger.error("Error getting Rubrik VM ID:\n" + traceback.format_exc()) raise Exception("Error getting VM from Rubrik: " + str(ex)) except Exception as ex: self._helper.logger.error("Error getting Rubrik VM ID:\n" + traceback.format_exc()) raise Exception("Error getting VM from Rubrik: " + str(ex)) def get_vm_snapshots(self, managed_id, **_kwargs): """Get a list of snapshots for the vm""" return self.get_request("vmware/vm/{managed_id}/snapshot".format(managed_id=quote(managed_id))) def update_vm(self, managed_id, data): """update a vm with a new set of properties""" return self.patch_request("vmware/vm/{managed_id}".format(managed_id=quote(managed_id)), payload=data)
gpl-3.0
jqk6/node-gyp
gyp/pylib/gyp/MSVSUserFile.py
2696
5094
# Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Visual Studio user preferences file writer.""" import os import re import socket # for gethostname import gyp.common import gyp.easy_xml as easy_xml #------------------------------------------------------------------------------ def _FindCommandInPath(command): """If there are no slashes in the command given, this function searches the PATH env to find the given command, and converts it to an absolute path. We have to do this because MSVS is looking for an actual file to launch a debugger on, not just a command line. Note that this happens at GYP time, so anything needing to be built needs to have a full path.""" if '/' in command or '\\' in command: # If the command already has path elements (either relative or # absolute), then assume it is constructed properly. return command else: # Search through the path list and find an existing file that # we can access. paths = os.environ.get('PATH','').split(os.pathsep) for path in paths: item = os.path.join(path, command) if os.path.isfile(item) and os.access(item, os.X_OK): return item return command def _QuoteWin32CommandLineArgs(args): new_args = [] for arg in args: # Replace all double-quotes with double-double-quotes to escape # them for cmd shell, and then quote the whole thing if there # are any. if arg.find('"') != -1: arg = '""'.join(arg.split('"')) arg = '"%s"' % arg # Otherwise, if there are any spaces, quote the whole arg. elif re.search(r'[ \t\n]', arg): arg = '"%s"' % arg new_args.append(arg) return new_args class Writer(object): """Visual Studio XML user user file writer.""" def __init__(self, user_file_path, version, name): """Initializes the user file. Args: user_file_path: Path to the user file. version: Version info. name: Name of the user file. """ self.user_file_path = user_file_path self.version = version self.name = name self.configurations = {} def AddConfig(self, name): """Adds a configuration to the project. Args: name: Configuration name. """ self.configurations[name] = ['Configuration', {'Name': name}] def AddDebugSettings(self, config_name, command, environment = {}, working_directory=""): """Adds a DebugSettings node to the user file for a particular config. Args: command: command line to run. First element in the list is the executable. All elements of the command will be quoted if necessary. working_directory: other files which may trigger the rule. (optional) """ command = _QuoteWin32CommandLineArgs(command) abs_command = _FindCommandInPath(command[0]) if environment and isinstance(environment, dict): env_list = ['%s="%s"' % (key, val) for (key,val) in environment.iteritems()] environment = ' '.join(env_list) else: environment = '' n_cmd = ['DebugSettings', {'Command': abs_command, 'WorkingDirectory': working_directory, 'CommandArguments': " ".join(command[1:]), 'RemoteMachine': socket.gethostname(), 'Environment': environment, 'EnvironmentMerge': 'true', # Currently these are all "dummy" values that we're just setting # in the default manner that MSVS does it. We could use some of # these to add additional capabilities, I suppose, but they might # not have parity with other platforms then. 'Attach': 'false', 'DebuggerType': '3', # 'auto' debugger 'Remote': '1', 'RemoteCommand': '', 'HttpUrl': '', 'PDBPath': '', 'SQLDebugging': '', 'DebuggerFlavor': '0', 'MPIRunCommand': '', 'MPIRunArguments': '', 'MPIRunWorkingDirectory': '', 'ApplicationCommand': '', 'ApplicationArguments': '', 'ShimCommand': '', 'MPIAcceptMode': '', 'MPIAcceptFilter': '' }] # Find the config, and add it if it doesn't exist. if config_name not in self.configurations: self.AddConfig(config_name) # Add the DebugSettings onto the appropriate config. self.configurations[config_name].append(n_cmd) def WriteIfChanged(self): """Writes the user file.""" configs = ['Configurations'] for config, spec in sorted(self.configurations.iteritems()): configs.append(spec) content = ['VisualStudioUserFile', {'Version': self.version.ProjectVersion(), 'Name': self.name }, configs] easy_xml.WriteXmlIfChanged(content, self.user_file_path, encoding="Windows-1252")
mit
jgu1/myKafka
system_test/utils/testcase_env.py
71
7198
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. #!/usr/bin/env python # =================================== # testcase_env.py # =================================== import json import os import sys import thread import system_test_utils class TestcaseEnv(): def __init__(self, systemTestEnv, classInstance): self.systemTestEnv = systemTestEnv # ================================ # Generic testcase environment # ================================ # dictionary of entity_id to ppid for Zookeeper entities # key: entity_id # val: ppid of Zookeeper associated to that entity_id # { 0: 12345, 1: 12389, ... } self.entityZkParentPidDict = {} # dictionary of entity_id to ppid for broker entities # key: entity_id # val: ppid of broker associated to that entity_id # { 0: 12345, 1: 12389, ... } self.entityBrokerParentPidDict = {} # dictionary of entity_id to ppid for mirror-maker entities # key: entity_id # val: ppid of broker associated to that entity_id # { 0: 12345, 1: 12389, ... } self.entityMirrorMakerParentPidDict = {} # dictionary of entity_id to ppid for console-consumer entities # key: entity_id # val: ppid of console consumer associated to that entity_id # { 0: 12345, 1: 12389, ... } self.entityConsoleConsumerParentPidDict = {} # dictionary of entity_id to ppid for migration tool entities # key: entity_id # val: ppid of broker associated to that entity_id # { 0: 12345, 1: 12389, ... } self.entityMigrationToolParentPidDict = {} # dictionary of entity_id to list of JMX ppid # key: entity_id # val: list of JMX ppid associated to that entity_id # { 1: [1234, 1235, 1236], 2: [2234, 2235, 2236], ... } self.entityJmxParentPidDict = {} # dictionary of hostname-topic-ppid for consumer # key: hostname # val: dict of topic-ppid # { host1: { test1 : 12345 }, host1: { test2 : 12389 }, ... } self.consumerHostParentPidDict = {} # dictionary of hostname-topic-ppid for producer # key: hostname # val: dict of topic-ppid # { host1: { test1 : 12345 }, host1: { test2 : 12389 }, ... } self.producerHostParentPidDict = {} # list of testcase configs self.testcaseConfigsList = [] # dictionary to keep track of testcase arguments such as replica_factor, num_partition self.testcaseArgumentsDict = {} # gather the test case related info and add to an SystemTestEnv object self.testcaseResultsDict = {} self.testcaseResultsDict["_test_class_name"] = classInstance.__class__.__name__ self.testcaseResultsDict["_test_case_name"] = "" self.validationStatusDict = {} self.testcaseResultsDict["validation_status"] = self.validationStatusDict self.systemTestEnv.systemTestResultsList.append(self.testcaseResultsDict) # FIXME: in a distributed environement, kafkaBaseDir could be different in individual host # => TBD self.kafkaBaseDir = "" self.systemTestBaseDir = systemTestEnv.SYSTEM_TEST_BASE_DIR # to be initialized in the Test Module self.testSuiteBaseDir = "" self.testCaseBaseDir = "" self.testCaseLogsDir = "" self.testCaseDashboardsDir = "" self.testcasePropJsonPathName = "" self.testcaseNonEntityDataDict = {} # ================================ # dictionary to keep track of # user-defined environment variables # ================================ # LEADER_ELECTION_COMPLETED_MSG = "completed the leader state transition" # REGX_LEADER_ELECTION_PATTERN = "\[(.*?)\] .* Broker (.*?) " + \ # LEADER_ELECTION_COMPLETED_MSG + \ # " for topic (.*?) partition (.*?) \(.*" # zkConnectStr = "" # consumerLogPathName = "" # consumerConfigPathName = "" # producerLogPathName = "" # producerConfigPathName = "" self.userDefinedEnvVarDict = {} # Lock object for producer threads synchronization self.lock = thread.allocate_lock() self.numProducerThreadsRunning = 0 # to be used when validating data match - these variables will be # updated by kafka_system_test_utils.start_producer_in_thread self.producerTopicsString = "" self.consumerTopicsString = "" def initWithKnownTestCasePathName(self, testCasePathName): testcaseDirName = os.path.basename(testCasePathName) self.testcaseResultsDict["_test_case_name"] = testcaseDirName self.testCaseBaseDir = testCasePathName self.testCaseLogsDir = self.testCaseBaseDir + "/logs" self.testCaseDashboardsDir = self.testCaseBaseDir + "/dashboards" # find testcase properties json file self.testcasePropJsonPathName = system_test_utils.get_testcase_prop_json_pathname(testCasePathName) # get the dictionary that contains the testcase arguments and description self.testcaseNonEntityDataDict = system_test_utils.get_json_dict_data(self.testcasePropJsonPathName) def printTestCaseDescription(self, testcaseDirName): testcaseDescription = "" for k,v in self.testcaseNonEntityDataDict.items(): if ( k == "description" ): testcaseDescription = v print "\n" print "=======================================================================================" print "Test Case Name :", testcaseDirName print "=======================================================================================" print "Description :" for step in sorted(testcaseDescription.iterkeys()): print " ", step, ":", testcaseDescription[step] print "=======================================================================================" print "Test Case Args :" for k,v in self.testcaseArgumentsDict.items(): print " ", k, " : ", v self.testcaseResultsDict["arg : " + k] = v print "======================================================================================="
apache-2.0
Alidron/demo-nao
alidron-env/lib/python2.7/site-packages/pip/commands/hash.py
510
1597
from __future__ import absolute_import import hashlib import logging import sys from pip.basecommand import Command from pip.status_codes import ERROR from pip.utils import read_chunks from pip.utils.hashes import FAVORITE_HASH, STRONG_HASHES logger = logging.getLogger(__name__) class HashCommand(Command): """ Compute a hash of a local package archive. These can be used with --hash in a requirements file to do repeatable installs. """ name = 'hash' usage = '%prog [options] <file> ...' summary = 'Compute hashes of package archives.' def __init__(self, *args, **kw): super(HashCommand, self).__init__(*args, **kw) self.cmd_opts.add_option( '-a', '--algorithm', dest='algorithm', choices=STRONG_HASHES, action='store', default=FAVORITE_HASH, help='The hash algorithm to use: one of %s' % ', '.join(STRONG_HASHES)) self.parser.insert_option_group(0, self.cmd_opts) def run(self, options, args): if not args: self.parser.print_usage(sys.stderr) return ERROR algorithm = options.algorithm for path in args: logger.info('%s:\n--hash=%s:%s', path, algorithm, _hash_of_file(path, algorithm)) def _hash_of_file(path, algorithm): """Return the hash digest of a file.""" with open(path, 'rb') as archive: hash = hashlib.new(algorithm) for chunk in read_chunks(archive): hash.update(chunk) return hash.hexdigest()
mpl-2.0
dknights/football
node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/generator/dump_dependency_json.py
1523
3426
# Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import collections import os import gyp import gyp.common import gyp.msvs_emulation import json import sys generator_supports_multiple_toolsets = True generator_wants_static_library_dependencies_adjusted = False generator_filelist_paths = { } generator_default_variables = { } for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR', 'LIB_DIR', 'SHARED_LIB_DIR']: # Some gyp steps fail if these are empty(!). generator_default_variables[dirname] = 'dir' for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME', 'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT', 'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX', 'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX', 'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX', 'CONFIGURATION_NAME']: generator_default_variables[unused] = '' def CalculateVariables(default_variables, params): generator_flags = params.get('generator_flags', {}) for key, val in generator_flags.items(): default_variables.setdefault(key, val) default_variables.setdefault('OS', gyp.common.GetFlavor(params)) flavor = gyp.common.GetFlavor(params) if flavor =='win': # Copy additional generator configuration data from VS, which is shared # by the Windows Ninja generator. import gyp.generator.msvs as msvs_generator generator_additional_non_configuration_keys = getattr(msvs_generator, 'generator_additional_non_configuration_keys', []) generator_additional_path_sections = getattr(msvs_generator, 'generator_additional_path_sections', []) gyp.msvs_emulation.CalculateCommonVariables(default_variables, params) def CalculateGeneratorInputInfo(params): """Calculate the generator specific info that gets fed to input (called by gyp).""" generator_flags = params.get('generator_flags', {}) if generator_flags.get('adjust_static_libraries', False): global generator_wants_static_library_dependencies_adjusted generator_wants_static_library_dependencies_adjusted = True toplevel = params['options'].toplevel_dir generator_dir = os.path.relpath(params['options'].generator_output or '.') # output_dir: relative path from generator_dir to the build directory. output_dir = generator_flags.get('output_dir', 'out') qualified_out_dir = os.path.normpath(os.path.join( toplevel, generator_dir, output_dir, 'gypfiles')) global generator_filelist_paths generator_filelist_paths = { 'toplevel': toplevel, 'qualified_out_dir': qualified_out_dir, } def GenerateOutput(target_list, target_dicts, data, params): # Map of target -> list of targets it depends on. edges = {} # Queue of targets to visit. targets_to_visit = target_list[:] while len(targets_to_visit) > 0: target = targets_to_visit.pop() if target in edges: continue edges[target] = [] for dep in target_dicts[target].get('dependencies', []): edges[target].append(dep) targets_to_visit.append(dep) try: filepath = params['generator_flags']['output_dir'] except KeyError: filepath = '.' filename = os.path.join(filepath, 'dump.json') f = open(filename, 'w') json.dump(edges, f) f.close() print 'Wrote json to %s.' % filename
mit
ArcherSys/ArcherSys
Lib/test/test_json/test_pass3.py
1
1772
<<<<<<< HEAD <<<<<<< HEAD from test.test_json import PyTest, CTest # from http://json.org/JSON_checker/test/pass3.json JSON = r''' { "JSON Test Pattern pass3": { "The outermost value": "must be an object or array.", "In this test": "It is an object." } } ''' class TestPass3: def test_parse(self): # test in/out equivalence and parsing res = self.loads(JSON) out = self.dumps(res) self.assertEqual(res, self.loads(out)) class TestPyPass3(TestPass3, PyTest): pass class TestCPass3(TestPass3, CTest): pass ======= from test.test_json import PyTest, CTest # from http://json.org/JSON_checker/test/pass3.json JSON = r''' { "JSON Test Pattern pass3": { "The outermost value": "must be an object or array.", "In this test": "It is an object." } } ''' class TestPass3: def test_parse(self): # test in/out equivalence and parsing res = self.loads(JSON) out = self.dumps(res) self.assertEqual(res, self.loads(out)) class TestPyPass3(TestPass3, PyTest): pass class TestCPass3(TestPass3, CTest): pass >>>>>>> b875702c9c06ab5012e52ff4337439b03918f453 ======= from test.test_json import PyTest, CTest # from http://json.org/JSON_checker/test/pass3.json JSON = r''' { "JSON Test Pattern pass3": { "The outermost value": "must be an object or array.", "In this test": "It is an object." } } ''' class TestPass3: def test_parse(self): # test in/out equivalence and parsing res = self.loads(JSON) out = self.dumps(res) self.assertEqual(res, self.loads(out)) class TestPyPass3(TestPass3, PyTest): pass class TestCPass3(TestPass3, CTest): pass >>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
mit
trentm/node-gyp
gyp/test/defines-escaping/gyptest-defines-escaping.py
343
4737
#!/usr/bin/env python # Copyright (c) 2010 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Verifies build of an executable with C++ define specified by a gyp define using various special characters such as quotes, commas, etc. """ import os import TestGyp test = TestGyp.TestGyp() # Tests string literals, percents, and backslash escapes. try: os.environ['GYP_DEFINES'] = ( r"""test_format='\n%s\n' """ r"""test_args='"Simple test of %s with a literal"'""") test.run_gyp('defines-escaping.gyp') finally: del os.environ['GYP_DEFINES'] test.build('defines-escaping.gyp') expect = """ Simple test of %s with a literal """ test.run_built_executable('defines_escaping', stdout=expect) # Test multiple comma-and-space-separated string literals. try: os.environ['GYP_DEFINES'] = \ r"""test_format='\n%s and %s\n' test_args='"foo", "bar"'""" test.run_gyp('defines-escaping.gyp') finally: del os.environ['GYP_DEFINES'] test.sleep() test.touch('defines-escaping.c') test.build('defines-escaping.gyp') expect = """ foo and bar """ test.run_built_executable('defines_escaping', stdout=expect) # Test string literals containing quotes. try: os.environ['GYP_DEFINES'] = ( r"""test_format='\n%s %s %s %s %s\n' """ r"""test_args='"\"These,\"",""" r""" "\"words,\"",""" r""" "\"are,\"",""" r""" "\"in,\"",""" r""" "\"quotes.\""'""") test.run_gyp('defines-escaping.gyp') finally: del os.environ['GYP_DEFINES'] test.sleep() test.touch('defines-escaping.c') test.build('defines-escaping.gyp') expect = """ "These," "words," "are," "in," "quotes." """ test.run_built_executable('defines_escaping', stdout=expect) # Test string literals containing single quotes. try: os.environ['GYP_DEFINES'] = ( r"""test_format='\n%s %s %s %s %s\n' """ r"""test_args="\"'These,'\",""" r""" \"'words,'\",""" r""" \"'are,'\",""" r""" \"'in,'\",""" r""" \"'quotes.'\"" """) test.run_gyp('defines-escaping.gyp') finally: del os.environ['GYP_DEFINES'] test.sleep() test.touch('defines-escaping.c') test.build('defines-escaping.gyp') expect = """ 'These,' 'words,' 'are,' 'in,' 'quotes.' """ test.run_built_executable('defines_escaping', stdout=expect) # Test string literals containing different numbers of backslashes before quotes # (to exercise Windows' quoting behaviour). try: os.environ['GYP_DEFINES'] = ( r"""test_format='\n%s\n%s\n%s\n' """ r"""test_args='"\\\"1 visible slash\\\"",""" r""" "\\\\\"2 visible slashes\\\\\"",""" r""" "\\\\\\\"3 visible slashes\\\\\\\""'""") test.run_gyp('defines-escaping.gyp') finally: del os.environ['GYP_DEFINES'] test.sleep() test.touch('defines-escaping.c') test.build('defines-escaping.gyp') expect = r""" \"1 visible slash\" \\"2 visible slashes\\" \\\"3 visible slashes\\\" """ test.run_built_executable('defines_escaping', stdout=expect) # Test that various scary sequences are passed unfettered. try: os.environ['GYP_DEFINES'] = ( r"""test_format='\n%s\n' """ r"""test_args='"$foo, &quot; `foo`;"'""") test.run_gyp('defines-escaping.gyp') finally: del os.environ['GYP_DEFINES'] test.sleep() test.touch('defines-escaping.c') test.build('defines-escaping.gyp') expect = """ $foo, &quot; `foo`; """ test.run_built_executable('defines_escaping', stdout=expect) # VisualStudio 2010 can't handle passing %PATH% if not (test.format == 'msvs' and test.uses_msbuild): try: os.environ['GYP_DEFINES'] = ( """test_format='%s' """ """test_args='"%PATH%"'""") test.run_gyp('defines-escaping.gyp') finally: del os.environ['GYP_DEFINES'] test.sleep() test.touch('defines-escaping.c') test.build('defines-escaping.gyp') expect = "%PATH%" test.run_built_executable('defines_escaping', stdout=expect) # Test commas and semi-colons preceded by backslashes (to exercise Windows' # quoting behaviour). try: os.environ['GYP_DEFINES'] = ( r"""test_format='\n%s\n%s\n' """ r"""test_args='"\\, \\\\;",""" # Same thing again, but enclosed in visible quotes. r""" "\"\\, \\\\;\""'""") test.run_gyp('defines-escaping.gyp') finally: del os.environ['GYP_DEFINES'] test.sleep() test.touch('defines-escaping.c') test.build('defines-escaping.gyp') expect = r""" \, \\; "\, \\;" """ test.run_built_executable('defines_escaping', stdout=expect) # We deliberately do not test having an odd number of quotes in a string # literal because that isn't feasible in MSVS. test.pass_test()
mit
rschnapka/odoo
addons/hr_holidays/wizard/hr_holidays_summary_department.py
51
2366
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # $Id: account.py 1005 2005-07-25 08:41:42Z nicoe $ # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time from openerp.osv import fields, osv from openerp.tools.translate import _ class hr_holidays_summary_dept(osv.osv_memory): _name = 'hr.holidays.summary.dept' _description = 'HR Leaves Summary Report By Department' _columns = { 'date_from': fields.date('From', required=True), 'depts': fields.many2many('hr.department', 'summary_dept_rel', 'sum_id', 'dept_id', 'Department(s)'), 'holiday_type': fields.selection([('Approved','Approved'),('Confirmed','Confirmed'),('both','Both Approved and Confirmed')], 'Leave Type', required=True) } _defaults = { 'date_from': lambda *a: time.strftime('%Y-%m-01'), 'holiday_type': 'Approved' } def print_report(self, cr, uid, ids, context=None): data = self.read(cr, uid, ids, [], context=context)[0] if not data['depts']: raise osv.except_osv(_('Error!'), _('You have to select at least one Department. And try again.')) datas = { 'ids': [], 'model': 'ir.ui.menu', 'form': data } return { 'type': 'ir.actions.report.xml', 'report_name': 'holidays.summary', 'datas': datas, } hr_holidays_summary_dept() # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
chheplo/jaikuengine
common/management/commands/test.py
31
2817
# Copyright 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from django.core.management.base import BaseCommand from optparse import make_option import sys class Command(BaseCommand): """ Copied from the default django test command, extended to include coverage """ option_list = BaseCommand.option_list + ( make_option( '--noinput', action='store_false', dest='interactive', default=True, help='Tells Django to NOT prompt the user for input of any kind.' ), make_option( '--coverage', action='store_true', dest='coverage', default=False, help='Includes coverage reporting for the tests' ), make_option( '--profile_all', action='store_true', dest='profile_all', default=False, help='Includes profile reporting for all tests' ), make_option( '--include_profile', action='store_true', dest='include_profile', default=False, help='Includes profile reporting for profiled tests' ), ) help = 'Runs the test suite for the specified applications, or the entire site if no apps are specified.' args = '[appname ...]' requires_model_validation = False def handle(self, *test_labels, **options): from django.conf import settings verbosity = int(options.get('verbosity', 1)) interactive = options.get('interactive', True) include_coverage = options.get('coverage', False) profile_all = options.get('profile_all', False) include_profile = options.get('include_profile', False) test_path = settings.TEST_RUNNER.split('.') # Allow for Python 2.5 relative paths if len(test_path) > 1: test_module_name = '.'.join(test_path[:-1]) else: test_module_name = '.' test_module = __import__(test_module_name, {}, {}, test_path[-1]) test_runner = getattr(test_module, test_path[-1]) failures = test_runner(test_labels, verbosity=verbosity, interactive=interactive, include_coverage=include_coverage, include_profile=include_profile, profile_all=profile_all) if failures: sys.exit(failures)
apache-2.0
victorlapin/kernel_lge_bullhead-NG
tools/perf/scripts/python/syscall-counts.py
11176
1522
# system call counts # (c) 2010, Tom Zanussi <tzanussi@gmail.com> # Licensed under the terms of the GNU GPL License version 2 # # Displays system-wide system call totals, broken down by syscall. # If a [comm] arg is specified, only syscalls called by [comm] are displayed. import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import syscall_name usage = "perf script -s syscall-counts.py [comm]\n"; for_comm = None if len(sys.argv) > 2: sys.exit(usage) if len(sys.argv) > 1: for_comm = sys.argv[1] syscalls = autodict() def trace_begin(): print "Press control+C to stop and show the summary" def trace_end(): print_syscall_totals() def raw_syscalls__sys_enter(event_name, context, common_cpu, common_secs, common_nsecs, common_pid, common_comm, id, args): if for_comm is not None: if common_comm != for_comm: return try: syscalls[id] += 1 except TypeError: syscalls[id] = 1 def print_syscall_totals(): if for_comm is not None: print "\nsyscall events for %s:\n\n" % (for_comm), else: print "\nsyscall events:\n\n", print "%-40s %10s\n" % ("event", "count"), print "%-40s %10s\n" % ("----------------------------------------", \ "-----------"), for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \ reverse = True): print "%-40s %10d\n" % (syscall_name(id), val),
gpl-2.0
abhishekgahlot/youtube-dl
youtube_dl/postprocessor/xattrpp.py
115
7179
from __future__ import unicode_literals import os import subprocess import sys import errno from .common import PostProcessor from ..utils import ( check_executable, hyphenate_date, version_tuple, PostProcessingError, encodeArgument, encodeFilename, ) class XAttrMetadataError(PostProcessingError): def __init__(self, code=None, msg='Unknown error'): super(XAttrMetadataError, self).__init__(msg) self.code = code # Parsing code and msg if (self.code in (errno.ENOSPC, errno.EDQUOT) or 'No space left' in self.msg or 'Disk quota excedded' in self.msg): self.reason = 'NO_SPACE' elif self.code == errno.E2BIG or 'Argument list too long' in self.msg: self.reason = 'VALUE_TOO_LONG' else: self.reason = 'NOT_SUPPORTED' class XAttrMetadataPP(PostProcessor): # # More info about extended attributes for media: # http://freedesktop.org/wiki/CommonExtendedAttributes/ # http://www.freedesktop.org/wiki/PhreedomDraft/ # http://dublincore.org/documents/usageguide/elements.shtml # # TODO: # * capture youtube keywords and put them in 'user.dublincore.subject' (comma-separated) # * figure out which xattrs can be used for 'duration', 'thumbnail', 'resolution' # def run(self, info): """ Set extended attributes on downloaded file (if xattr support is found). """ # This mess below finds the best xattr tool for the job and creates a # "write_xattr" function. try: # try the pyxattr module... import xattr # Unicode arguments are not supported in python-pyxattr until # version 0.5.0 # See https://github.com/rg3/youtube-dl/issues/5498 pyxattr_required_version = '0.5.0' if version_tuple(xattr.__version__) < version_tuple(pyxattr_required_version): self._downloader.report_warning( 'python-pyxattr is detected but is too old. ' 'youtube-dl requires %s or above while your version is %s. ' 'Falling back to other xattr implementations' % ( pyxattr_required_version, xattr.__version__)) raise ImportError def write_xattr(path, key, value): try: xattr.set(path, key, value) except EnvironmentError as e: raise XAttrMetadataError(e.errno, e.strerror) except ImportError: if os.name == 'nt': # Write xattrs to NTFS Alternate Data Streams: # http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29 def write_xattr(path, key, value): assert ':' not in key assert os.path.exists(path) ads_fn = path + ":" + key try: with open(ads_fn, "wb") as f: f.write(value) except EnvironmentError as e: raise XAttrMetadataError(e.errno, e.strerror) else: user_has_setfattr = check_executable("setfattr", ['--version']) user_has_xattr = check_executable("xattr", ['-h']) if user_has_setfattr or user_has_xattr: def write_xattr(path, key, value): value = value.decode('utf-8') if user_has_setfattr: executable = 'setfattr' opts = ['-n', key, '-v', value] elif user_has_xattr: executable = 'xattr' opts = ['-w', key, value] cmd = ([encodeFilename(executable, True)] + [encodeArgument(o) for o in opts] + [encodeFilename(path, True)]) try: p = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) except EnvironmentError as e: raise XAttrMetadataError(e.errno, e.strerror) stdout, stderr = p.communicate() stderr = stderr.decode('utf-8', 'replace') if p.returncode != 0: raise XAttrMetadataError(p.returncode, stderr) else: # On Unix, and can't find pyxattr, setfattr, or xattr. if sys.platform.startswith('linux'): self._downloader.report_error( "Couldn't find a tool to set the xattrs. " "Install either the python 'pyxattr' or 'xattr' " "modules, or the GNU 'attr' package " "(which contains the 'setfattr' tool).") else: self._downloader.report_error( "Couldn't find a tool to set the xattrs. " "Install either the python 'xattr' module, " "or the 'xattr' binary.") # Write the metadata to the file's xattrs self._downloader.to_screen('[metadata] Writing metadata to file\'s xattrs') filename = info['filepath'] try: xattr_mapping = { 'user.xdg.referrer.url': 'webpage_url', # 'user.xdg.comment': 'description', 'user.dublincore.title': 'title', 'user.dublincore.date': 'upload_date', 'user.dublincore.description': 'description', 'user.dublincore.contributor': 'uploader', 'user.dublincore.format': 'format', } for xattrname, infoname in xattr_mapping.items(): value = info.get(infoname) if value: if infoname == "upload_date": value = hyphenate_date(value) byte_value = value.encode('utf-8') write_xattr(filename, xattrname, byte_value) return [], info except XAttrMetadataError as e: if e.reason == 'NO_SPACE': self._downloader.report_warning( 'There\'s no disk space left or disk quota exceeded. ' + 'Extended attributes are not written.') elif e.reason == 'VALUE_TOO_LONG': self._downloader.report_warning( 'Unable to write extended attributes due to too long values.') else: msg = 'This filesystem doesn\'t support extended attributes. ' if os.name == 'nt': msg += 'You need to use NTFS.' else: msg += '(You may have to enable them in your /etc/fstab)' self._downloader.report_error(msg) return [], info
unlicense
woozzu/pylearn2
pylearn2/packaged_dependencies/theano_linear/unshared_conv/test_gpu_unshared_conv.py
37
7950
from __future__ import print_function import unittest from nose.plugins.skip import SkipTest import numpy import theano # Skip test if cuda_ndarray is not available. from nose.plugins.skip import SkipTest import theano.sandbox.cuda as cuda_ndarray if cuda_ndarray.cuda_available == False: raise SkipTest('Optional package cuda disabled') from theano.sandbox.cuda.var import float32_shared_constructor from .unshared_conv import FilterActs from .unshared_conv import WeightActs from .unshared_conv import ImgActs from .gpu_unshared_conv import ( GpuFilterActs, GpuWeightActs, GpuImgActs, ) import test_unshared_conv if theano.config.mode == 'FAST_COMPILE': mode_with_gpu = theano.compile.mode.get_mode('FAST_RUN').including('gpu') else: mode_with_gpu = theano.compile.mode.get_default_mode().including('gpu') class TestGpuFilterActs(test_unshared_conv.TestFilterActs): """ This class tests GpuWeightActs via the gradient of GpuFilterAct The correctness of GpuFilterActs is tested in TestMatchFilterActs """ ishape = (1, 1, 4, 4, 2) # 2 4x4 greyscale images fshape = (2, 2, 1, 3, 3, 1, 16) # 5 3x3 filters at each location in a 2x2 grid module_stride = 1 dtype = 'float32' mode = mode_with_gpu def setUp(self): test_unshared_conv.TestFilterActs.setUp(self) self.gpu_op = GpuFilterActs( module_stride=self.module_stride, partial_sum=1) self.s_images = float32_shared_constructor( self.s_images.get_value()) self.s_filters = float32_shared_constructor( self.s_filters.get_value()) def test_gpu_shape(self): import theano.sandbox.cuda as cuda_ndarray if cuda_ndarray.cuda_available == False: raise SkipTest('Optional package cuda disabled') gpuout = self.gpu_op(self.s_images, self.s_filters) assert 'Cuda' in str(self.s_filters.type) f = theano.function([], gpuout, mode=mode_with_gpu) outval = f() assert outval.shape == ( self.fshape[-2], self.fshape[-1], self.fshape[0], self.fshape[1], self.ishape[-1]) def test_insert_gpu_filter_acts(self): out = self.op(self.s_images, self.s_filters) f = self.function([], out) try: fgraph = f.maker.fgraph except: # this needs to work for older versions of theano too fgraph = f.maker.env assert isinstance( fgraph.toposort()[0].op, GpuFilterActs) def test_gpu_op_eq(self): assert GpuFilterActs(1, 1) == GpuFilterActs(1, 1) assert not (GpuFilterActs(1, 1) != GpuFilterActs(1, 1)) assert (GpuFilterActs(1, 2) != GpuFilterActs(1, 1)) assert (GpuFilterActs(2, 1) != GpuFilterActs(1, 1)) assert GpuFilterActs(2, 1) != None class TestGpuWeightActs(unittest.TestCase): """ """ ishape = (1, 1, 4, 4, 2) # 2 4x4 greyscale images hshape = (1, 16, 2, 2, 2) fshape = (2, 2, 1, 3, 3, 1, 16) # 5 3x3 filters at each location in a 2x2 grid frows = 3 fcols = 3 module_stride = 1 partial_sum = 1 dtype = 'float32' def setUp(self): self.gwa = GpuWeightActs( module_stride=self.module_stride, partial_sum=self.partial_sum) self.gpu_images = float32_shared_constructor( numpy.random.rand(*self.ishape).astype(self.dtype)) self.gpu_hidact = float32_shared_constructor( numpy.random.rand(*self.hshape).astype(self.dtype)) def test_shape(self): dfilters = self.gwa(self.gpu_images, self.gpu_hidact, self.frows, self.fcols) f = theano.function([], dfilters) outval = f() assert outval.shape == self.fshape class TestGpuImgActs(unittest.TestCase): """ """ ishape = (1, 1, 4, 4, 2) # 2 4x4 greyscale images hshape = (1, 16, 2, 2, 2) fshape = (2, 2, 1, 3, 3, 1, 16) # 5 3x3 filters at each location in a 2x2 grid irows = 4 icols = 4 module_stride = 1 partial_sum = 1 dtype = 'float32' def setUp(self): self.gia = GpuImgActs( module_stride=self.module_stride, partial_sum=self.partial_sum) self.gpu_images = float32_shared_constructor( numpy.random.rand(*self.ishape).astype(self.dtype)) self.gpu_hidact = float32_shared_constructor( numpy.random.rand(*self.hshape).astype(self.dtype)) self.gpu_filters = float32_shared_constructor( numpy.random.rand(*self.fshape).astype(self.dtype)) def test_shape(self): dimages = self.gia(self.gpu_filters, self.gpu_hidact, self.irows, self.icols) f = theano.function([], dimages) outval = f() assert outval.shape == self.ishape if 1: class TestMatchFilterActs(unittest.TestCase): def setUp(self): numpy.random.seed(77) def run_match(self, images, filters, module_stride, retvals=False, partial_sum=1): gfa = GpuFilterActs(module_stride, partial_sum) fa = FilterActs(module_stride) gpu_images = float32_shared_constructor(images) gpu_filters = float32_shared_constructor(filters) cpu_images = theano.shared(images) cpu_filters = theano.shared(filters) gpu_out = gfa(gpu_images, gpu_filters) cpu_out = fa(cpu_images, cpu_filters) f = theano.function([], [cpu_out, gpu_out]) cpuval, gpuval = f() gpuval = numpy.asarray(gpuval) if retvals: return cpuval, gpuval else: #print 'run_match: cpu shape', cpuval.shape #print 'run_match: gpu shape', gpuval.shape assert cpuval.shape == gpuval.shape assert numpy.allclose(cpuval, gpuval) def run_match_shape(self, ishape, fshape, module_stride, dtype='float32'): return self.run_match( images=numpy.random.rand(*ishape).astype(dtype), filters=numpy.random.rand(*fshape).astype(dtype), module_stride=module_stride) def test_small_random(self): self.run_match_shape( ishape = (1, 1, 4, 4, 2), fshape = (2, 2, 1, 3, 3, 1, 16), module_stride = 1) def test_small_random_colors(self): self.run_match_shape( ishape = (1, 6, 4, 4, 2), fshape = (2, 2, 6, 3, 3, 1, 16), module_stride = 1) def test_small_random_groups(self): self.run_match_shape( ishape = (5, 6, 4, 4, 2), fshape = (2, 2, 6, 3, 3, 5, 16), module_stride = 1) def test_small_random_module_stride(self): self.run_match_shape( ishape = (4, 6, 5, 5, 1), fshape = (2, 2, 6, 3, 3, 4, 16), module_stride = 2) def test_med_random_module_stride(self): self.run_match_shape( ishape = (4, 6, 32, 32, 1), fshape = (12, 12, 6, 3, 3, 4, 16), module_stride = 2) def _blah_topcorner_filter1(self): ishape = (1, 1, 4, 4, 2) fshape = (2, 2, 1, 3, 3, 1, 16) images = numpy.random.rand(*ishape).astype('float32') filters = numpy.random.rand(*fshape).astype('float32') filters *= 0 filters[0,0,0,0,0,0,0] = 1 self.run_match(images, filters, 1) def _blah_botcorner_filter1(self): ishape = (1, 1, 4, 4, 2) fshape = (2, 2, 1, 3, 3, 1, 16) images = numpy.random.rand(*ishape).astype('float32') filters = numpy.random.rand(*fshape).astype('float32') filters *= 0 filters[1,1,0,0,0,0,0] = 1 cpuval, gpuval = self.run_match(images, filters, 1, retvals=True) print(images) print(cpuval[:, :, 1, 1, :]) print(gpuval[:, :, 1, 1, :])
bsd-3-clause
kaaveland/trackit
trackit/data.py
1
10217
# coding=utf-8 # Copyright (c) 2013 Robin Kåveland Hansen # # This file is a part of trackit. It is distributed under the terms # of the modified BSD license. The full license is available in # LICENSE, distributed as part of this software. """ Interface to the data models used in trackit. """ import sqlite3 import time from contextlib import closing from trackit.util import dumb_constructor, DefaultRepr from trackit.exceptions import TrackitException class TooManyTasksInProgress(TrackitException): """Data integrity problem - only one task should be in progress at any time.""" pass class NoTaskInProgress(TrackitException): """Logical problem - should not stop when no task is in progress.""" pass class InconsistentTaskIntervals(TrackitException): """Data integrity problem - overlapping task intervals, it should only be possible to track one at a time.""" pass class ClosesCursor(object): """Inherit to get context managed cursor, enabling the following idiom: >>> with self.cursor() as cursor: ... cursor.execute(sql) ... >>> # cursor was closed automatically. For this to work, the instance needs to have a conn attribute or property. """ def cursor(self): """Context-managed cursor from self.conn.""" return closing(self.conn.cursor()) def _execute_with_except(cursor, sql): """Ignore sqlite3 exceptions when executing this sql.""" try: cursor.execute(sql) except sqlite3.OperationalError: pass class Task(DefaultRepr): """Model for a Task.""" @dumb_constructor def __init__(self, _task_id, name, description): pass @property def task_id(self): """Readonly - the row id of the task.""" return self._task_id @classmethod def map_row(cls, row): return cls(*row) class Tasks(ClosesCursor): """Repository to use for accessing, creating and updating Task.""" SCHEMA = """ CREATE TABLE TASK( TASK INTEGER, NAME TEXT NOT NULL, DESCRIPTION TEXT, PRIMARY KEY(TASK) ); """ @dumb_constructor def __init__(self, conn): """Create a Tasks repository. This will attempt to register the schema. Arguments: - `conn`: sqlite3 database connection. """ with self.cursor() as cursor: _execute_with_except(cursor, Tasks.SCHEMA) def create(self, name, description=None): """Create a Task and return a valid instance stored in the db. Arguments: - `name`: The name of the task. - `description`: An optional description of the task. """ with self.cursor() as cursor: cursor.execute("INSERT INTO TASK(NAME, DESCRIPTION)" "VALUES(?, ?)", (name, description)) return Task(cursor.lastrowid, name, description) def update(self, task): """Update a task in the database. Arguments: - `task`: The task to update. """ with self.cursor() as cursor: cursor.execute("UPDATE TASK SET NAME = ?, DESCRIPTION = ?" "WHERE TASK = ?", (task.name, task.description, task.task_id)) def by_name(self, name): """Attempt to find tasks by their name. Arguments: - `name`: the name to search for. """ name_like = "%{}%".format(name) with self.cursor() as cursor: cursor.execute("SELECT TASK, NAME, DESCRIPTION FROM" " TASK WHERE NAME LIKE ?", (name_like,)) return [Task.map_row(row) for row in cursor.fetchall()] def all(self): """Retrieve all tasks in the database.""" with self.cursor() as cursor: cursor.execute("SELECT TASK, NAME, DESCRIPTION FROM TASK") return [Task.map_row(row) for row in cursor.fetchall()] def by_id(self, id_): """Find a task with a given id. Will raise KeyError if there is no such id. Arguments: - `id_`: The id of the task to retrieve. """ with self.cursor() as cursor: cursor.execute("SELECT TASK, NAME, DESCRIPTION FROM TASK WHERE" " TASK = ?", (id_,)) row = cursor.fetchone() if not row: raise KeyError("No Task with id: {}".format(id_)) return Task.map_row(row) class TaskInterval(DefaultRepr): """Model for a time spent working on some task.""" @dumb_constructor def __init__(self, _task, _task_interval, start_time, stop_time=None): pass @property def task(self): """Readonly - the task this interval was spent working with.""" return self._task @property def task_interval(self): """Readonly - the id of this taskinterval.""" return self._task_interval @property def in_progress(self): """True when the time interval has not been stopped.""" return self.stop_time is None @property def duration(self): """Duration in seconds this task has been ongoing.""" stop = self.stop_time if self.stop_time is not None else time.time() return stop - self.start_time @classmethod def map_row(cls, task, row): return cls(task, *row) class TaskIntervals(ClosesCursor): """Repository to use for accessing, creating and updating TaskIntervals. """ SCHEMA = """ CREATE TABLE TASKINTERVAL( TASKINTERVAL INTEGER, TASK INTEGER NOT NULL, START_TIME INTEGER NOT NULL, STOP_TIME INTEGER, PRIMARY KEY(TASKINTERVAL), FOREIGN KEY(TASK) REFERENCES TASK(TASK) ); """ @dumb_constructor def __init__(self, conn, tasks=None): """Create a TaskIntervals repository. This will attempt to register the schema. Arguments: - `conn`: sqlite3 database connection. - `tasks`: Tasks repository - if None, one will be created. """ if tasks is None: self.tasks = Tasks(conn) with self.cursor() as cursor: _execute_with_except(cursor, TaskIntervals.SCHEMA) def start(self, task, when=None): """Start working on a task. Arguments: - `task`: the task to start working on. - `when`: unix-time for when task was started.""" assert task is not None, "may not start task None" in_progress = self.in_progress() if in_progress is not None: raise TooManyTasksInProgress("Must stop working on {} before" " starting on new task." .format(in_progress)) when = time.time() if when is None else when with self.cursor() as cursor: cursor.execute("SELECT TASK, START_TIME, STOP_TIME FROM " "TASKINTERVAL WHERE START_TIME < ? AND " "STOP_TIME > ?", (when, when)) rows = cursor.fetchall() if rows: task, start, stop = rows[0] message = ("Already an interal from {} to {} working on task {}" .format(start, stop, task)) raise InconsistentTaskIntervals(message) sql = "INSERT INTO TASKINTERVAL(TASK, START_TIME) VALUES(?, ?)" cursor.execute(sql, (task.task_id, when)) return TaskInterval(task, cursor.lastrowid, when) def stop(self, task, when=None): """Stop working on a task. Arguments: - `task`: the task to stop working on. - `when`: unix time for when task was stopped.""" latest = ("SELECT TASKINTERVAL, START_TIME FROM TASKINTERVAL " "WHERE TASK = ? AND START_TIME = " "(SELECT MAX(START_TIME) FROM TASKINTERVAL WHERE TASK = ?)") when = time.time() if when is None else when stop = "UPDATE TASKINTERVAL SET STOP_TIME = ? WHERE TASKINTERVAL = ?" with self.cursor() as cursor: cursor.execute(latest, (task.task_id, task.task_id)) row = cursor.fetchone() if not row: raise NoTaskInProgress("No work in progress on task: {}" .format(task)) interval_id, start_time = row if start_time >= when: message = ("Start time is {} which is *after* stop time: {}" .format(start_time, when)) raise InconsistentTaskIntervals(message) cursor.execute(stop, (when, interval_id)) return TaskInterval(task, interval_id, start_time, when) def for_task(self, task): """Extract all task intervals spent working on some task. Arguments: - `task`: the task to extract intervals for. """ sql = ("SELECT TASKINTERVAL, START_TIME, STOP_TIME " "FROM TASKINTERVAL WHERE TASK = ? ORDER BY TASKINTERVAL") with self.cursor() as cursor: cursor.execute(sql, (task.task_id,)) return [TaskInterval.map_row(task, row) for row in cursor.fetchall()] def in_progress(self): """Extract the task interval currently in progress.""" interval_sql = ("SELECT TASK, TASKINTERVAL, START_TIME, STOP_TIME " "FROM TASKINTERVAL " "WHERE STOP_TIME IS NULL") with self.cursor() as cursor: cursor.execute(interval_sql) rows = cursor.fetchall() if len(rows) > 1: message = ("Should only have one task in progress but found: {}" .format(rows)) raise TooManyTasksInProgress(message) if not rows: return None task_id, interval = rows[0][0], rows[0][1:] task = self.tasks.by_id(task_id) return TaskInterval.map_row(task, interval) class Data(object): def __init__(self, conn): self.tasks = Tasks(conn) self.intervals = TaskIntervals(conn) self.conn = conn
bsd-2-clause