repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 22
values | size
stringlengths 4
7
| content
stringlengths 626
1.05M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 5.21
99.9
| line_max
int64 12
999
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
Jan-zou/LeetCode | python/HashTable/290_word_pattern.py | 1 | 1472 | # !/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Description:
Given a pattern and a string str, find if str follows the same pattern.
Here follow means a full match, such that there is a bijection between a letter in pattern and a non-empty word in str.
Examples:
pattern = "abba", str = "dog cat cat dog" should return true.
pattern = "abba", str = "dog cat cat fish" should return false.
pattern = "aaaa", str = "dog cat cat dog" should return false.
pattern = "abba", str = "dog dog dog dog" should return false.
Notes:
You may assume pattern contains only lowercase letters, and str contains lowercase letters separated by a single space.
Tags: Hash Table
Time: O(n); Space: O(n)
'''
class Solution(object):
def wordPattern(self, pattern, str):
"""
:type pattern: str
:type str: str
:rtype: bool
"""
words = str.split() # Space: O(n)
if len(pattern) != len(words):
return False
w2p, p2w = {}, {}
for p, w in zip(pattern, words):
if w not in w2p and p not in p2w:
# Build mapping. Space: O(c)
w2p[w] = p
p2w[p] = w
elif w not in w2p or w2p[w] != p:
# elif p not in p2w or p2w[p]!=w:
# Contradict mapping.
return False
return True
if __name__ == '__main__':
print Solution().wordPattern('abba', 'dog cat cat dog')
| mit | -7,861,965,359,965,198,000 | 31 | 123 | 0.567935 | false |
shmilee/gdpy3 | src/processors/processor.py | 1 | 32536 | # -*- coding: utf-8 -*-
# Copyright (c) 2020 shmilee
'''
Contains processor base class.
'''
import os
import re
import time
import pickle
import hashlib
from .. import __gversion__
from ..glogger import getGLogger
from ..loaders import is_rawloader, get_rawloader, is_pckloader, get_pckloader
from ..savers import is_pcksaver, get_pcksaver
from ..cores.exporter import Exporter
from ..visplters import get_visplter, is_visplter
__all__ = ['Processor']
plog = getGLogger('P')
class Processor(object):
'''
Serial Processor class.
Attributes
----------
name: class name of this processor
rawloader: rawloader object to get raw data
converters: converter cores to convert raw data to pickled data
pcksaver: pcksaver object to save pickled data
pckloader: pckloader object to get pickled data
ressaver: cachepcksaver object to save dig results
resfilesaver: pcksaver object to save long time dig results
diggers: digger cores to calculate pickled data to results
availablelabels: list
figure labels in this processor, like 'group/fignum'
resloader: cachepckloader object to get dig results
resfileloader: pckloader object to get long time dig results
diggedlabels: set
figlabels/kwargstr digged in ressaver or resfilesaver
like 'group/fignum/a=1,b=2'
exporters: dict
exporters generated
exporttemplates: list
templates of generated exporters
Notes
-----
1. :attr:`saltname` means base name of salt file for `saver.path`.
The :attr:`rawloader` must have at least one salt file.
If more than one salt files, the one with min path depth will be used.
:attr:`saltstr` is the salt string generated from salt file.
2. :attr:`dig_acceptable_time` means if :meth:`dig` spends more
time than this, the results will be saved in :attr:`resfilesaver`.
'''
@property
def name(self):
return type(self).__name__
parallel = 'off'
# # Start Convert Part
__slots__ = ['_rawloader', '_pcksaver', '_converters', '_saltstr']
ConverterCores = []
saltname = ''
def __check_rawloader(self, rawloader):
if rawloader is None:
return False
if not is_rawloader(rawloader):
plog.error("%s: Not a rawloader object!" % self.name)
return False
saltfiles = rawloader.refind('^(?:|.*/)%s$' % self.saltname)
if len(saltfiles) == 0:
plog.error("%s: Can't find '%s' in '%s'!"
% (self.name, self.saltname, rawloader.path))
return False
elif len(saltfiles) > 1:
plog.warning(
"%s: More than one '%s' %s found in '%s'!"
% (self.name, self.saltname, saltfiles, rawloader.path))
lth = [len(f.split('/')) for f in saltfiles]
idx = lth.index(min(lth)) # min path depth
ignore_dirs = [os.path.dirname(saltfiles[i])
for i in range(len(saltfiles)) if i != idx]
plog.warning(
"%s: Use '%s' as salt file, ignore other files in %s!"
% (self.name, saltfiles[idx], ignore_dirs))
_old_fe = rawloader.filenames_exclude
_new_fe = [r'^%s.*$' % d for d in ignore_dirs]
rawloader = get_rawloader(
rawloader.path, filenames_exclude=_old_fe + _new_fe)
return rawloader
def _get_rawloader(self):
return self._rawloader
def _set_rawloader(self, rawloader):
self._converters = []
rawloader = self.__check_rawloader(rawloader)
if rawloader:
self._rawloader = rawloader
for Cc in self.ConverterCores:
self._converters.extend(Cc.generate_cores(rawloader))
else:
self._rawloader = None
rawloader = property(_get_rawloader, _set_rawloader)
@property
def converters(self):
return self._converters
def _get_pcksaver(self):
return self._pcksaver
def _set_pcksaver(self, pcksaver):
if is_pcksaver(pcksaver):
self._pcksaver = pcksaver
else:
self._pcksaver = None
pcksaver = property(_get_pcksaver, _set_pcksaver)
def set_prefer_pcksaver(self, savetype, ext2='converted'):
'''
Set preferable pcksaver path beside raw data.
Parameters
----------
savetype: str
extension of pcksaver.path, like (.npz)
ext2: str
second extension, like name.(converted).npz
'''
if not self.rawloader:
raise IOError("%s: Need a rawloader object!" % self.name)
# salt
saltfile = self.rawloader.refind('^(?:|.*/)%s$' % self.saltname)[0]
if self.rawloader.loader_type in ['sftp.directory']:
salt = hashlib.sha1(saltfile.encode('utf-8')).hexdigest()
else:
try:
with self.rawloader.get(saltfile) as f:
salt = hashlib.sha1(f.read().encode('utf-8')).hexdigest()
except Exception:
plog.warning("Failed to read salt file '%s'!" % saltfile)
salt = hashlib.sha1(saltfile.encode('utf-8')).hexdigest()
plog.debug("Get salt string: '%s'." % salt)
# prefix
prefix = self.rawloader.beside_path(self.name.lower())
# savetype
if os.access(os.path.dirname(prefix), os.W_OK):
if savetype not in ['.npz', '.hdf5']:
plog.warning("Use default savetype '.npz'.")
savetype = '.npz'
else:
plog.debug("Use savetype '.cache' because %s isn't writable!"
% os.path.dirname(prefix))
savetype = '.cache'
# assemble
savepath = '%s-%s.%s%s' % (prefix, salt[:6], ext2, savetype)
self._saltstr = salt
self.pcksaver = get_pcksaver(savepath)
@property
def saltstr(self):
if getattr(self, '_saltstr', None):
# has rawloader, pcksaver
return self._saltstr
elif getattr(self, 'pckloader', None):
# no rawloader, pcksaver
if 'saltstr' in self.pckloader:
return self.pckloader.get('saltstr')
else:
m = re.match('.*-(.{6})\.converted\..*', self.pckloader.path)
if m:
return m.groups()[0]
# fallback
return '123456'
@property
def _rawsummary(self):
return "Raw data files in %s '%s'" % (
self.rawloader.loader_type, self.rawloader.path)
def _pre_convert(self, add_desc=None):
if not self.rawloader:
plog.error("%s: Need a rawloader object!" % self.name)
return
if not self.pcksaver:
plog.error("%s: Need a pcksaver object!" % self.name)
return
summary = "Pck data converted from %s." % self._rawsummary
description = ("%s\nCreated by gdpy3 v%s.\nCreated on %s."
% (summary, __gversion__, time.asctime()))
if add_desc:
description += '\n' + str(add_desc)
with self.pcksaver:
self.pcksaver.write('/', {'description': description,
'saltstr': self.saltstr,
'processor': self.name})
def _post_convert(self):
plog.info("%s are converted to %s!"
% (self._rawsummary, self.pcksaver.path))
def convert(self, add_desc=None):
'''
Convert raw data in rawloader.path, and save them in pcksaver.
'''
self._pre_convert(add_desc=add_desc)
with self.pcksaver:
for core in self.converters:
self.pcksaver.write(core.group, core.convert())
self._post_convert()
# # End Convert Part
# # Start Dig Part
__slots__.extend(['_pckloader', '_ressaver', '_resfilesaver',
'_diggers', '_availablelabels_lib', '_availablelabels',
'_resloader', '_resfileloader', '_diggedlabels'])
DiggerCores = []
dig_acceptable_time = 30
def _check_pckloader_backward_version(self, pckloader):
return False
def _check_pckloader_forward_version(self, pckloader):
return False
def __check_pckloader(self, pckloader):
if not is_pckloader(pckloader):
plog.error("%s: Not a pckloader object!" % self.name)
return False
if (self._check_pckloader_backward_version(pckloader)
or self._check_pckloader_forward_version(pckloader)):
return True
if 'processor' not in pckloader:
plog.error("%s: Can't find 'processor' in '%s'!"
% (self.name, pckloader.path))
return False
pname = pckloader.get('processor')
if pname != self.name:
plog.error("%s: Invalid 'processor' '%s'! Did you mean '%s'?"
% (self.name, pname, self.name))
return False
return True
def _get_pckloader(self):
return self._pckloader
def _set_pckloader(self, pckloader):
self._diggers = []
if pckloader and self.__check_pckloader(pckloader):
self._pckloader = pckloader
for Dc in self.DiggerCores:
self._diggers.extend(Dc.generate_cores(pckloader))
else:
self._pckloader = None
self._availablelabels_lib = {dc.figlabel: dc for dc in self._diggers}
self._availablelabels = sorted(self._availablelabels_lib.keys())
pckloader = property(_get_pckloader, _set_pckloader)
@property
def diggers(self):
return self._diggers
@property
def availablelabels(self):
return self._availablelabels
# save results
def _get_ressaver(self):
return self._ressaver
def _set_ressaver(self, ressaver):
if is_pcksaver(ressaver):
self._ressaver = ressaver
else:
self._ressaver = None
ressaver = property(_get_ressaver, _set_ressaver)
def _get_resfilesaver(self):
return self._resfilesaver
def _set_resfilesaver(self, resfilesaver):
if is_pcksaver(resfilesaver):
self._resfilesaver = resfilesaver
else:
self._resfilesaver = None
resfilesaver = property(_get_resfilesaver, _set_resfilesaver)
# reload results
def _get_resloader(self):
return self._resloader
def _set_resloader(self, resloader):
if not getattr(self, '_diggedlabels', None):
self._diggedlabels = set()
if resloader and self.__check_pckloader(resloader):
self._resloader = resloader
self._diggedlabels.update(resloader.datagroups)
else:
self._resloader = None
resloader = property(_get_resloader, _set_resloader)
def _get_resfileloader(self):
return self._resfileloader
def _set_resfileloader(self, resfileloader):
if not getattr(self, '_diggedlabels', None):
self._diggedlabels = set()
if resfileloader and self.__check_pckloader(resfileloader):
self._resfileloader = resfileloader
self._diggedlabels.update(resfileloader.datagroups)
else:
self._resfileloader = None
resfileloader = property(_get_resfileloader, _set_resfileloader)
@property
def diggedlabels(self):
return self._diggedlabels
def set_prefer_ressaver(self, ext2='digged', oldext2='converted',
overwrite=False):
'''
Set preferable ressaver resfilesaver beside converted data.
Parameters
----------
ext2: str
second extension, like name.(digged).npz
oldext2: str
second extension of converted file, like name.(converted).npz
overwrite: bool
overwrite existing resfilesaver.path file or not, default False
'''
if not self.pckloader:
raise IOError("%s: Need a pckloader object!" % self.name)
saverstr, ext = os.path.splitext(self.pckloader.path)
saverstr = saverstr.replace(oldext2, ext2)
respath = '%s.cache' % saverstr
ressaver = get_pcksaver(respath)
with ressaver:
ressaver.write('/', {'processor': self.name})
self.ressaver = ressaver
self.resloader = get_pckloader(ressaver.get_store())
plog.debug("Default %s data cache is %s." % (ext2, respath))
if ext != '.cache':
try:
respath = '%s%s' % (saverstr, ext)
resfilesaver = get_pcksaver(respath)
if overwrite and os.path.isfile(respath):
plog.warning("Remove old %s data file: %s!"
% (ext2, respath))
os.remove(respath)
if not os.path.isfile(respath):
# new file
with resfilesaver:
resfilesaver.write('/', {'processor': self.name})
self.resfilesaver = resfilesaver
self.resfileloader = get_pckloader(resfilesaver.get_store())
plog.info("Default %s data path is %s." % (ext2, respath))
except Exception:
plog.error("%s: Failed to set results file pcksaver, '%s'!"
% (self.name, respath), exc_info=1)
self.resfilesaver = None
def _before_new_dig(self, figlabel, redig, kwargs):
'''Get digcore, try old dig results'''
if not self.pckloader:
plog.error("%s: Need a pckloader object!" % self.name)
return None, 'No pckloader', None
if not self.ressaver:
plog.error("%s: Need a results pcksaver object!" % self.name)
return None, 'No pcksaver', None
if figlabel not in self.availablelabels:
plog.error("%s: Figure %s not found!" % (self.name, figlabel))
return None, 'Invalid figlabel', None
digcore = self._availablelabels_lib[figlabel]
gotkwargstr = digcore.str_dig_kwargs(kwargs) or 'DEFAULT'
gotfiglabel = '%s/%s' % (figlabel, gotkwargstr)
if not redig and gotfiglabel in self.diggedlabels:
if gotfiglabel in self.resloader.datagroups:
# use resloader first
gotresloader, fileloader = self.resloader, False
elif (self.resfileloader and
gotfiglabel in self.resfileloader.datagroups):
gotresloader, fileloader = self.resfileloader, True
else:
plog.error('%s: Not found %s in diggedlabels!'
% (self.name, gotfiglabel))
return digcore, gotfiglabel, None
plog.info('Find %s digged results in %s.' % (
gotfiglabel, os.path.basename(gotresloader.path)))
if gotfiglabel.endswith('/DEFAULT'):
try_link_key = '%s/_LINK' % gotfiglabel
if try_link_key in gotresloader:
linkgotfiglabel = gotresloader.get(try_link_key)
plog.debug('Find %s digged results link to %s.' % (
gotfiglabel, linkgotfiglabel))
gotfiglabel = linkgotfiglabel
results = gotresloader.get_by_group(gotfiglabel)
if fileloader:
# reload kwoptions
digcore.kwoptions = pickle.loads(
results.pop('kwoptions', None))
return digcore, gotfiglabel, results
else:
return digcore, gotfiglabel, None
def _do_new_dig(self, digcore, kwargs):
'''Dig new results.'''
results, acckwargstr, digtime = digcore.dig(**kwargs)
if not acckwargstr:
acckwargstr = 'DEFAULT'
accfiglabel = '%s/%s' % (digcore.figlabel, acckwargstr)
return accfiglabel, results, digtime
def _cachesave_new_dig(self, accfiglabel, gotfiglabel, results):
'''Cache dig results, link DEFAULT to accfiglabel.'''
with self.ressaver:
self.ressaver.write(accfiglabel, results)
if (gotfiglabel.endswith('/DEFAULT')
and not accfiglabel.endswith('/DEFAULT')):
# link double cache
self.ressaver.write(gotfiglabel, dict(_LINK=accfiglabel))
def _filesave_new_dig(self, accfiglabel, gotfiglabel, results, digcore):
'''Save dig results in file, link DEFAULT to accfiglabel.'''
# also save kwoptions
kwopts = dict(kwoptions=pickle.dumps(digcore.kwoptions))
with self.resfilesaver:
shortpath = os.path.basename(self.resfilesaver.path)
plog.info('Save %s digged results in %s.' % (
accfiglabel, shortpath))
self.resfilesaver.write(accfiglabel, results)
self.resfilesaver.write(accfiglabel, kwopts)
if (gotfiglabel.endswith('/DEFAULT')
and not accfiglabel.endswith('/DEFAULT')):
# link double cache
plog.info('Save %s digged results in %s.' % (
gotfiglabel, shortpath))
self.resfilesaver.write(
gotfiglabel, dict(_LINK=accfiglabel))
def dig(self, figlabel, redig=False, callback=None, post=True, **kwargs):
'''
Get digged results of *figlabel*.
Use :meth:`dig_doc` to see *kwargs* for *figlabel*.
Return accfiglabel, results and template name,
and accfiglabel is 'figlabel/digkwargstr'.
Parameters
----------
redig: bool
If :attr:`resfilesaver` type is '.npz', *redig* may cause warning:
"zipfile.py: UserWarning: Duplicate name ..."
Recommend using '.hdf5' when *redig* is True or
setting :attr:`resfilesaver.duplicate_name`=False to rebuild
a new zip archive when we get duplicate names.
callback: a callable
It accepts two arguments, accfiglabel and dig results before
post_dig. This can be used to get some numbers from results.
post: bool
call post_dig
'''
data = self._before_new_dig(figlabel, redig, kwargs)
if data[0] is None:
return data
digcore, gotfiglabel, results = data
if results is None:
accfiglabel, results, digtime = self._do_new_dig(digcore, kwargs)
self._cachesave_new_dig(accfiglabel, gotfiglabel, results)
self.resloader = get_pckloader(self.ressaver.get_store())
if self.resfilesaver and digtime > self.dig_acceptable_time:
# long execution time
self._filesave_new_dig(
accfiglabel, gotfiglabel, results, digcore)
self.resfileloader = get_pckloader(
self.resfilesaver.get_store())
else:
accfiglabel = gotfiglabel
if callable(callback):
callback(accfiglabel, results)
if post:
results = digcore.post_dig(results)
return accfiglabel, results, digcore.post_template
def dig_doc(self, figlabel, see='help'):
'''
help(digcore.dig) or digcore.dig.__doc__
Parameters
----------
see: str
'help', 'print' or 'return'
'''
if figlabel not in self.availablelabels:
plog.error("%s: Figure %s not found!" % (self.name, figlabel))
return
digcore = self._availablelabels_lib[figlabel]
if see == 'help':
help(digcore.dig)
elif see == 'print':
print(digcore.dig.__doc__)
elif see == 'return':
return digcore.dig.__doc__
else:
pass
def refind(self, pattern):
'''Find the figlabels which match the regular expression *pattern*.'''
pat = re.compile(pattern)
return tuple(filter(
lambda k: True if re.match(pat, k) else False, self.availablelabels))
# # End Dig Part
# # Start Export Part
ExporterCores = [Exporter]
exporters = {pt: Exporter(pt) for pt in Exporter.template_available}
_exporttemplates = list(Exporter.template_available)
@property
def exporttemplates(self):
return self._exporttemplates
def _get_exporter(self, post_tmpl):
if post_tmpl in self._exporttemplates:
return self.exporters[post_tmpl]
else:
try:
ecore = Exporter(post_tmpl)
except Exception:
pass
else:
self.exporters[post_tmpl] = ecore
self._exporttemplates.append(post_tmpl)
return ecore
return None
def export(self, figlabel, what='axes', fmt='dict',
callback=None, **kwargs):
'''
Get and assemble digged results, template of *figlabel*.
Use :meth:`dig_doc` to see *kwargs* for *figlabel*.
Use :meth:`export_doc` to see *kwargs* for :meth:`exportcore.export`.
Returns
-------
assembled results in format *fmt*
If *what* is 'axes', results['accfiglabel'] will be updated
from 'figlabel/digkwargstr' to 'figlabel/digkwargstr,viskwargstr',
where 'viskwargstr' is :meth:`exportcore.export` *kwargs* to str.
Parameters
----------
what: str
'axes'(default), results for visplter
'options', options for GUI widgets
fmt: str
export format, 'dict'(default), 'pickle' or 'json'
callback: see :meth:`dig`, only for what='axes'
'''
if what not in ('axes', 'options'):
waht = 'axes'
if fmt not in ('dict', 'pickle', 'json'):
fmt = 'dict'
if figlabel in self.availablelabels:
if what == 'axes':
label_kw, res, tmpl = self.dig(
figlabel, callback=callback, post=True, **kwargs)
exportcore = self._get_exporter(tmpl)
if exportcore:
return exportcore.export(
res, otherinfo=dict(status=200,
figlabel=figlabel,
accfiglabel=label_kw,
), fmt=fmt, **kwargs)
else:
status, reason = 500, 'invalid template'
elif what == 'options':
digcore = self._availablelabels_lib[figlabel]
exportcore = self._get_exporter(digcore.post_template)
if exportcore:
if digcore.kwoptions is None:
a, b, c = self.dig(figlabel, post=False, **kwargs)
return exportcore.export_options(
digcore.kwoptions, otherinfo=dict(status=200,
figlabel=figlabel,
), fmt=fmt)
else:
status, reason = 500, 'invalid template'
else:
plog.error("%s: Figure %s not found!" % (self.name, figlabel))
status, reason = 404, 'figlabel not found'
exportcore = self._get_exporter('tmpl_line')
return exportcore.fmt_export(
dict(status=status, reason=reason, figlabel=figlabel), fmt=fmt)
def export_doc(self, template, see='help'):
'''
see docstring of :meth:`exportercore._export_*template*`
Parameters
----------
see: str
'help', 'print' or 'return'
'''
if template not in Exporter.template_available:
plog.error("%s: Template %s not found!" % (self.name, template))
return
exportcore = self._get_exporter(template)
meth = getattr(exportcore, '_export_%s' % template)
if see == 'help':
help(meth)
elif see == 'print':
print(meth.__doc__)
elif see == 'return':
return meth.__doc__
else:
pass
# # End Export Part
# # Start Visplt Part
__slots__.extend(['_visplter'])
def _get_visplter(self):
return self._visplter
def _set_visplter(self, visplter):
if is_visplter(visplter):
self._visplter = visplter
else:
self._visplter = visplter
visplter = property(_get_visplter, _set_visplter)
def visplt(self, figlabel, revis=False, show=True,
callback=None, **kwargs):
'''
Get results of *figlabel* and visualize(plot).
Use :meth:`dig_doc` :meth:`export_doc` to see *kwargs* for *figlabel*.
Return accfiglabel or None.
Parameters
----------
revis: bool
replot *figlabel* if it was already ploted
show: bool
display *figlabel* after it ploted
_show_kwargs: parameters pick from *kwargs*
They startswith('_show_') for :attr:`visplter`.show_figure,
like '_show_width', '_show_mod' etc.
callback: see :meth:`dig`
'''
if not self.visplter:
plog.error("%s: Need a visplter object!" % self.name)
return
# pop show kwargs
shkws = {k[6:]: kwargs.pop(k)
for k in tuple(kwargs.keys()) if k.startswith('_show_')}
results = self.export(
figlabel, what='axes', fmt='dict', callback=callback, **kwargs)
if results['status'] == 200:
try:
figure = self.visplter.create_template_figure(
results, replace=revis)
except Exception:
plog.error("%s: Failed to create figure %s!" % (
self.name, results['accfiglabel']), exc_info=1)
else:
if show:
self.visplter.show_figure(results['accfiglabel'], **shkws)
return results['accfiglabel']
else:
plog.error("%s: Failed to create figure %s: %s" % (
self.name, figlabel, results['status']), exc_info=1)
# # End Visplt Part
def __repr__(self):
# i = (' rawloader: %r\n pcksaver: %r\n'
# ' pckloader: %r\n ressaver: %r\n resfilesaver: %r\n'
# ' resloader: %r\n resfileloader: %r\n'
# ' visplter: %r'
# % (self.rawloader, self.pcksaver,
# self.pckloader, self.ressaver, self.resfilesaver,
# self.resloader, self.resfileloader,
# self.visplter))
i = (' rawloader: %r\n pckloader: %r\n'
' resloader: %r\n resfileloader: %r\n'
' visplter: %r'
% (self.rawloader, self.pckloader,
self.resloader, self.resfileloader,
self.visplter))
return '<\n {0}.{1} object at {2},\n{3}\n>'.format(
self.__module__, type(self).__name__, hex(id(self)), i)
def __init__(self, path, add_desc=None, filenames_exclude=None,
savetype='.npz', overwrite=False, Sid=False,
datagroups_exclude=None, add_visplter='mpl::'):
'''
Pick up raw data or converted data in *path*,
set processor's rawloader, pcksaver and pckloader, etc.
Parameters
----------
path: str
path of raw data or converted data to open
add_desc: str
additional description of raw data
filenames_exclude: list
regular expressions to exclude filenames in rawloader
savetype: '.npz' or '.hdf5'
extension of pcksaver.path, default '.npz'
when pcksaver.path isn't writable, use '.cache'
overwrite: bool
overwrite existing pcksaver.path file or not, default False
Sid: bool
If Sid is True(here), only rawloader and pcksaver will be set
and converted to a .npz or .hdf5 file if needed. And any other
codes(like Buzz Lightyear) will be omitted(destroyed).
Default False.
datagroups_exclude: list
regular expressions to exclude datagroups in pckloader
add_visplter: str
add visplter by type *add_visplter*, default 'mpl::'
'''
root, ext1 = os.path.splitext(path)
root, ext2 = os.path.splitext(root)
if ((ext2, ext1) in [('', '.npz'), ('', '.hdf5')]
and os.path.basename(root).startswith('gdpy3-pickled-data-')):
# pckloader.path backward compatibility
plog.warning("This is an old converted data path %s!" % path)
ext2 = '.converted'
old_pickled_path = True
else:
old_pickled_path = False
if (ext2, ext1) in [('.digged', '.npz'), ('.digged', '.hdf5')]:
# resfileloader.path
plog.warning("This is a digged data path %s!" % path)
path = '%s%s%s' % (root, '.converted', ext1)
plog.warning("Try converted data path %s beside it!" % path)
if os.path.isfile(path):
root, ext1 = os.path.splitext(path)
root, ext2 = os.path.splitext(root)
else:
plog.error("%s: Can't find path %s!" % (self.name, path))
return
if (ext2, ext1) in [('.converted', '.npz'), ('.converted', '.hdf5')]:
# pckloader.path
self.rawloader, self.pcksaver = None, None
if Sid:
return
try:
self.pckloader = get_pckloader(
path, datagroups_exclude=datagroups_exclude)
except Exception:
plog.error("%s: Invalid pckloader path '%s'!"
% (self.name, path), exc_info=1)
return
try:
if old_pickled_path:
self.set_prefer_ressaver(
ext2='digged', oldext2='pickled', overwrite=overwrite)
else:
self.set_prefer_ressaver(
ext2='digged', overwrite=overwrite)
except Exception:
plog.error("%s: Failed to set ressaver object!"
% self.name, exc_info=1)
else:
# rawloader.path
try:
self.rawloader = get_rawloader(
path, filenames_exclude=filenames_exclude)
except Exception:
plog.error("%s: Invalid rawloader path '%s'!"
% (self.name, path), exc_info=1)
return
try:
self.set_prefer_pcksaver(savetype, ext2='converted')
except Exception:
plog.error("%s: Failed to set pcksaver object!"
% self.name, exc_info=1)
return
plog.info("Default %s data path is %s." %
('converted', self.pcksaver.path))
if Sid and self.pcksaver._extension not in ['.npz', '.hdf5']:
return
if os.path.isfile(self.pcksaver.path):
if overwrite:
plog.warning("Remove old %s data file: %s!"
% ('converted', self.pcksaver.path))
os.remove(self.pcksaver.path)
self.convert(add_desc=add_desc)
else:
self.convert(add_desc=add_desc)
if Sid and self.pcksaver._extension in ['.npz', '.hdf5']:
return
try:
self.pckloader = get_pckloader(
self.pcksaver.get_store(), datagroups_exclude=datagroups_exclude)
except Exception:
plog.error("%s: Invalid pckloader path '%s'!"
% (self.name, path), exc_info=1)
return
try:
self.set_prefer_ressaver(ext2='digged', overwrite=overwrite)
except Exception:
plog.error("%s: Failed to set ressaver object!"
% self.name, exc_info=1)
# set visplter
if add_visplter:
self.visplter = get_visplter(str(add_visplter) + path)
else:
self.visplter = None
| mit | 3,366,053,551,240,477,700 | 37.779499 | 85 | 0.546656 | false |
reincubate/eventbrite2ical | eventbrite2ical/eventbrite2ical.py | 1 | 2500 | from datetime import datetime
from icalendar import Calendar, Event, vCalAddress
from eventbrite import EventbriteClient
from pytz import timezone
def fetch_eb_organizer_feed( credentials, event_args ):
''' Pulls down a feed of events for an EventBrite organizer. '''
eb_client = EventbriteClient( credentials )
return eb_client.organizer_list_events( event_args )
def ical_from_eb_feed( eb_feed, ignore_draft=True ):
''' Converts an EventBrite feed into iCal format. '''
cal = Calendar()
cal.add('prodid', '-//eventbrite2ical//reincubate//')
cal.add('version', '2.0')
for event in eb_feed['events']:
if ignore_draft and event['event']['status'] == 'Draft':
continue
tzinfo = timezone( event['event']['timezone'] )
title = event['event']['title']
description = event['event']['title']
url = event['event']['url']
organiser = event['event']['organizer']['name']
if not 'venue' in event['event']:
venue = None
else:
venue = event['event']['venue']
addresses = [ venue['name'], venue['address'], venue['address_2'], venue['city'], venue['region'], venue['postal_code'], venue['country'], ]
filled_addresses = []
for a in addresses:
if a: filled_addresses.append( a )
venue_address = ', '.join( filled_addresses )
latitude = venue['latitude']
longitude = venue['longitude']
start_date = datetime.strptime( event['event']['start_date'], '%Y-%m-%d %H:%M:%S' ).replace(tzinfo=tzinfo)
end_date = datetime.strptime( event['event']['end_date'], '%Y-%m-%d %H:%M:%S' ).replace(tzinfo=tzinfo)
created = datetime.strptime( event['event']['created'], '%Y-%m-%d %H:%M:%S' ).replace(tzinfo=tzinfo)
entry = Event()
entry.add( 'summary', title )
if url:
description = '%s\n\n%s' % ( description, url )
entry.add( 'description', description )
entry.add( 'dtstart', start_date )
entry.add( 'dtend', end_date )
entry.add( 'dtstamp', created )
if venue:
entry.add( 'location', venue_address )
entry.add( 'geoLat', latitude )
entry.add( 'geoLong', longitude )
eorganiser = vCalAddress( url )
eorganiser.params['cn'] = organiser
entry['organizer'] = eorganiser
cal.add_component( entry )
return cal.to_ical()
| gpl-2.0 | 2,970,935,567,330,812,400 | 35.231884 | 152 | 0.58 | false |
michaelroland/wdnas-dl2100-hwtools | threadedsockets/packetclient.py | 1 | 5267 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Packet-based Client-side Socket Interface.
Copyright (c) 2017 Michael Roland <mi.roland@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import socket
import threadedsockets.packets as packets
import threadedsockets.socketclient as socketclient
class BasicPacketClient(socketclient.BasicSocketClient):
"""A basic client socket wrapper to send and receive packet-structured data using a ``socket.SocketType``.
"""
MAX_RECEIVE_BUFFER_SIZE = 0x40000
def __init__(self, client_socket, packet_class=packets.BasicPacket):
"""Initializes a new client socket wrapper.
Args:
client_socket (socket.SocketType): A connected client socket.
packet_class (type(packets.BasicPacket)): A packet parser implementation.
"""
self.__read_buffer = bytearray()
self.__packet_class = packet_class
super().__init__(client_socket)
def receivePacket(self):
"""Receive a single protocol packet.
Returns:
packets.BasicPacket: The received packet.
"""
while True:
data = self.receiveData()
self.__read_buffer.extend(data)
buffer_length = len(self.__read_buffer)
if buffer_length > self.MAX_RECEIVE_BUFFER_SIZE:
raise ValueError("Received data exceeds the maximum supported receive buffer size.")
offset = 0
try:
if offset < buffer_length:
(packet, next_offset) = self.__packet_class.parse(self.__read_buffer, offset)
offset = next_offset
return packet
except packets.IncompletePacketError:
pass
except packets.InvalidPacketError:
offset += 1
if offset < buffer_length:
offset = buffer_length
finally:
if offset > 0:
self.__read_buffer[0:offset] = []
def sendPacket(self, packet):
"""Send a single protocol packet.
Args:
packet (packets.BasicPacket): The packet to send.
"""
self.sendData(packet.serialize())
class ThreadedPacketClient(socketclient.ThreadedSocketClient):
"""A client socket wrapper to send and receive packet-structured data using a ``socket.SocketType``.
Data is continously received using a receiver thread.
Attributes:
is_running: Is the socket connection handler thread in running state?
"""
MAX_RECEIVE_BUFFER_SIZE = 0x40000
def __init__(self, client_socket, packet_class=packets.BasicPacket):
"""Initializes a new client socket wrapper.
Args:
client_socket (socket.SocketType): A connected client socket.
packet_class (type(packets.BasicPacket)): A packet parser implementation.
"""
self.__read_buffer = bytearray()
self.__packet_class = packet_class
super().__init__(client_socket)
def dataReceived(self, data):
self.__read_buffer.extend(data)
buffer_length = len(self.__read_buffer)
if buffer_length > self.MAX_RECEIVE_BUFFER_SIZE:
raise ValueError("Received data exceeds the maximum supported receive buffer size.")
offset = 0
try:
while offset < buffer_length:
try:
(packet, next_offset) = self.__packet_class.parse(self.__read_buffer, offset)
except packets.InvalidPacketError:
offset += 1
else:
offset = next_offset
self.packetReceived(packet)
except packets.IncompletePacketError:
pass
finally:
if offset > 0:
self.__read_buffer[0:offset] = []
def packetReceived(self, packet):
"""Callback for receiving a single protocol packet.
This callback is invoked on the receiver thread and blocking may result
in loss of incoming data in full-duplex communication.
Args:
packet (packets.BasicPacket): The received packet.
"""
pass
def sendPacket(self, packet):
"""Send a single protocol packet.
Args:
packet (packets.BasicPacket): The packet to send.
"""
self.sendData(packet.serialize())
if __name__ == "__main__":
import sys
sys.exit("This library is not intended to be run directly. Unit tests are not implemented.")
| gpl-3.0 | 4,983,777,271,503,932,000 | 33.424837 | 110 | 0.605278 | false |
mutalyzer/ansible-role-mutalyzer | templates/settings.py | 1 | 1098 | """
Mutalyzer config file
Specify the location of this file in the `MUTALYZER_SETTINGS` environment
variable.
"""
from __future__ import unicode_literals
EMAIL = 'mutalyzer@humgen.nl'
BATCH_NOTIFICATION_EMAIL = '{{ mutalyzer_batch_notification_email }}'
CACHE_DIR = '/opt/mutalyzer/cache'
LOG_FILE = '/opt/mutalyzer/log/mutalyzer.log'
EXTRACTOR_MAX_INPUT_LENGTH = {{ mutalyzer_extractor_max_input_length }}
REVERSE_PROXIED = True
{% if mutalyzer_database_url %}
DATABASE_URI = '{{ mutalyzer_database_url }}'
{% else %}
DATABASE_URI = 'postgresql://mutalyzer:{{ mutalyzer_database_password }}@localhost/mutalyzer'
{% endif %}
{% if mutalyzer_redis_url %}
REDIS_URI = '{{ mutalyzer_redis_url }}'
{% else %}
REDIS_URI = 'redis://localhost'
{% endif %}
WEBSITE_ROOT_URL = 'https://{{ mutalyzer_server_name }}'
SOAP_WSDL_URL = 'https://{{ mutalyzer_server_name }}/services/?wsdl'
JSON_ROOT_URL = 'https://{{ mutalyzer_server_name }}/json'
{% if mutalyzer_piwik %}
PIWIK = True
PIWIK_BASE_URL = '{{ mutalyzer_piwik.base_url }}'
PIWIK_SITE_ID = {{ mutalyzer_piwik.site_id }}
{% endif %}
| mit | 8,395,991,191,554,671,000 | 21.875 | 93 | 0.686703 | false |
qoollo/streamcentre-storage | storageapp/clip_manager.py | 1 | 2426 | # -*- coding: utf-8 -*-
from flask import send_file, jsonify, Response
import os.path
import sys
class ClipManager():
DIR_CAPACITY = 10000
def __init__(self, config):
self.clips_path = config['CLIPS_PATH']
if not os.path.exists(self.clips_path):
os.makedirs(self.clips_path)
self.eblob_instance = None
if (config['EBLOB_STORAGE']):
sys.path.append(config['LIBEBLOB_PATH'])
from libeblob_python import eblob, eblob_config
eb_config = eblob_config()
eb_config.file = os.path.join(self.clips_path, 'data')
eb_config.blob_size = config['EBLOB_BLOB_SIZE']
self.eblob_instance = eblob(os.path.join(self.clips_path, 'log_file.txt'), 0, eb_config)
def get_clip(self, clip_meta):
if self.eblob_instance:
return Response(self._eblob_reader(clip_meta), mimetype='video/' + clip_meta.container_format)
mypaths = self._path_maker(clip_meta)
if not os.path.isfile(mypaths['full_name']):
return jsonify(error_message='no clip with id {0}'.format(clip_meta.id)), 404
return send_file(mypaths['full_name'], mimetype='video/' + clip_meta.container_format)
def write_clip(self, clip_meta, data):
if self.eblob_instance:
self._eblob_writer(clip_meta, data)
return True
mypaths = self._path_maker(clip_meta)
if not os.path.exists(mypaths['directory']):
os.makedirs(mypaths['directory'])
try:
data.save(mypaths['full_name'])
return True
except:
return False
def _path_maker(self, clip_meta):
directory = os.path.join(self.clips_path,
str(clip_meta.id // self.DIR_CAPACITY))
file_name = str(clip_meta.id) + '.' + clip_meta.container_format
return {
'file_name': file_name,
'directory': directory,
'full_name': os.path.join(directory, file_name)
}
def _eblob_reader(self, clip_meta):
return self.eblob_instance.read_hashed(str(clip_meta.id), 0, 0)
def _eblob_writer(self, clip_meta, data):
raw_data = data.read()
self.eblob_instance.write_hashed(str(clip_meta.id), raw_data, 0, 0)
| gpl-2.0 | -3,270,712,590,617,198,000 | 30.921053 | 106 | 0.560594 | false |
petervaro/pycasso | python/api_ideas.py | 1 | 4209 | # Hierarchy of Pycasso's basic units:
#
#
# { Window }
#
# ^
# |
#
# { Region }
#
# ^
# |
#
# { Geometry + Constraint + Geometry
# Geometry + Constraint + Layout
# Layout + Constraint + Layout }
#
# ^
# |
#
# { Geometry }
#
#
# Window: set of regions
# Region: single layout
# Layout: visually unrepresentable object
# Geometry: visually represantable object
# Constraint: relationship defined between layouts and geometries
class Window:
@property
def title(self):
return self._title
@title.setter
def title(self, value):
self._title = value
@property
def x(self):
return self._x
@x.setter
def x(self, value):
self._x = value
@property
def y(self):
return self._y
@y.setter
def y(self, value):
self._y = value
@property
def position(self):
return self.x, self.y
@position.setter
def position(self, values):
self.x, self.y = value
@property
def width(self):
return self._width
@width.setter
def width(self, value):
self._width = value
@property
def height(self):
return self._height
@height.setter
def height(self, value):
self._height = value
@property
def dimension(self):
return self.width, self.height
@dimension.setter
def dimension(self, values):
self.width, self.height = values
@property
def basecolor(self):
return self._basecolor
@basecolor.setter
def basecolor(self, value):
self._basecolor = value
def __init__(self,
title = 'Pycasso <version>',
x = pycasso.constants.CENTER,
y = pycasso.constants.CENTER,
width = 512,
height = 512,
basecolor = pycasso.color.rgb(0, 0, 0)):
# Set values
self.title = title
self.position = x, y
self.dimension = width, height
self.basecolor = basecolor
def add_region(self, region):
pass
def run(self):
while True:
for region in self.region:
if region.changed:
region.draw()
self.wait_events()
class Region:
def close(self):
self.parent.remove_region(self)
def draw(self):
pass
#------------------------------------------------------------------------------#
# EXAMPLE
# Import modules
import pycasso
from pycasso.constants import *
# Setup window
window = pycasso.Window()
window.title = 'My first Pycasso App'
window.position = CENTER, CENTER
window.dimension = 1024, 768
# Create main region
region = pycasso.Region(x=0, y=0, width=FILL, height=FILL)
window.add_region(region)
# Create region layout
region.layout = pycasso.Layout()
# Create geometry
rectangle = pycasso.geometry.Rectangle()
circle = pycasso.geometry.Circle()
region.layout.add_object(rectangle)
region.layout.add_object(circle)
# Create constraints
rectangle.x == circle.x #<-- anonym consts
rectangle.y == rectangle.y
layout.const.dim1 = rectangle.width == circle.width * 2 #<-- named consts
layout.const.dim2 = rectangle.height == circle.height * 1.5
# Initial values
rectangle.position = CENTER, CENTER
circle.bring_top()
# Add event
@window.on_mouse_move
def on_mouse_move(x, y, dx, dy):
rectangle.position = x, y
@window.on_mouse_left_drag
def on_mouse_left_drag(x, y, dx, dy):
rectangle.width += dx
rectangle.height += dy
space_key_flag = True
@window.on_space_pressed
def on_space_pressed():
# Change constraits
if space_key_flag:
layout.const.dim1 = circle.width == rectangle.width * 2
layout.const.dim2 = circle.height == rectangle.height * 1.5
else:
layout.const.dim1 = rectangle.width == circle.width * 2
layout.const.dim2 = rectangle.height == circle.height * 1.5
# Rebuild
region.layout.build()
# Toogle
space_key_flag = not space_key_flag
# Build constraints
region.layout.build()
# Enter event loop
window.run()
| gpl-3.0 | -3,833,698,772,384,926,700 | 21.508021 | 80 | 0.584699 | false |
dantiston/pydelphin | delphin/mrs/query.py | 1 | 8165 | from itertools import product
# query methods
def select_nodeids(xmrs, iv=None, label=None, pred=None):
"""
Return the list of all nodeids whose respective |EP| has the
matching *iv* (intrinsic variable), *label*, or *pred* values. If
none match, return an empty list.
"""
g = xmrs._graph
nids = []
datamatch = lambda d: ((iv is None or d['iv'] == iv) and
(pred is None or d['pred'] == pred) and
(label is None or d['label'] == label))
for nid in g.nodeids:
data = g.node[nid]
if datamatch(data):
nids.append(nid)
return nids
def select_nodes(xmrs, nodeid=None, pred=None):
"""
Return the list of all |Nodes| that have the matching *nodeid*
and/or *pred* values. If none match, return an empty list.
"""
nodematch = lambda n: ((nodeid is None or n.nodeid == nodeid) and
(pred is None or n.pred == pred))
return list(filter(nodematch, xmrs.nodes))
def select_eps(xmrs, anchor=None, iv=None, label=None, pred=None):
"""
Return the list of all |EPs| that have the matching *anchor*,
*iv*, *label*, and or *pred* values. If none match, return an
empty list.
"""
epmatch = lambda n: ((anchor is None or n.anchor == anchor) and
(iv is None or n.iv == iv) and
(label is None or n.label == label) and
(pred is None or n.pred == pred))
return list(filter(epmatch, xmrs.eps))
def select_args(xmrs, anchor=None, rargname=None, value=None):
"""
Return the list of all |Arguments| that have the matching
*anchor*, *rargname*, and/or *value* values. If none match,
return an empty list.
"""
argmatch = lambda a: ((anchor is None or a.anchor == anchor) and
(rargname is None or
a.argname.upper() == rargname.upper()) and
(value is None or a.value == value))
return list(filter(argmatch, xmrs.args))
def select_links(xmrs, source=None, target=None, rargname=None, post=None):
"""
Return the list of all |Links| that have the matching *source*,
*target*, *rargname*, and/or *post* values. If none match, return
an empty list.
"""
linkmatch = lambda l: (
(source is None or l.source == source) and
(target is None or l.target == target) and
(rargname is None or l.argname == rargname) and
(post is None or l.post == post))
return list(filter(linkmatch, xmrs.links))
def select_hcons(xmrs, hi=None, relation=None, lo=None):
"""
Return the list of all |HandleConstraints| that have the matching
*hi*, *relation*, and/or *lo* values. If none match, return an
empty list.
"""
hcmatch = lambda hc: (
(hi is None or hc.hi == hi) and
(relation is None or hc.relation == relation) and
(lo is None or hc.lo == lo))
return list(filter(hcmatch, xmrs.hcons))
def select_icons(xmrs, target=None, relation=None, clause=None):
"""
Return the list of all |IndividualConstraints| that have the
matching *target*, *relation*, and/or *clause* values. If none
match, return an empty list.
"""
icmatch = lambda ic: (
(target is None or ic.target == target) and
(relation is None or ic.relation == relation) and
(clause is None or ic.clause == clause))
return list(filter(icmatch, xmrs.icons))
def find_argument_target(xmrs, nodeid, rargname):
"""
Return the target of an argument (rather than just the variable).
Args:
xmrs: The |Xmrs| object to use.
nodeid: The nodeid (or anchor) of the argument.
rargname: The role-argument name of the argument.
Returns:
The object that is the target of the argument. Possible values
include:
================== ===== =================================
Arg value e.g. Target
================== ===== =================================
intrinsic variable x4 nodeid; of the EP with the IV
hole variable h0 nodeid; the HCONS's labelset head
label h1 nodeid; the label's labelset head
unbound variable i3 the variable itself
constant "IBM" the constant itself
================== ===== =================================
Note:
If the argument value is an intrinsic variable whose target is
an EP that has a quantifier, the non-quantifier EP's nodeid
will be returned. With this nodeid, one can then use
:py:meth:`find_quantifier` to get its quantifier's nodeid.
"""
g = xmrs._graph
tgt = None
try:
tgt_val = xmrs.get_arg(nodeid, rargname).value
tgt_attr = g.node[tgt_val]
# intrinsic variable
if 'iv' in tgt_attr:
tgt = tgt_attr['iv']
# hcons; tgt_val is a hole
if 'hcons' in tgt_attr:
tgt_val = tgt_attr['hcons'].lo
# label or hcons lo variable (see previous if block)
if tgt_val in g.labels:
tgt = xmrs.labelset_head(tgt_val)
# otherwise likely a constant or unbound variable
tgt = tgt_val
# nodeid or rargname were missing, or tgt_val wasn't a node
except (AttributeError, KeyError):
pass
#logging.warning('Cannot find argument target; argument is '
# 'invalid: {}:{}'.format(nodeid, rargname))
return tgt
def find_quantifier(xmrs, nodeid):
"""
Return the nodeid of the quantifier of the EP given by `nodeid`.
Args:
xmrs: The |Xmrs| object to use.
nodeid: The nodeid of the quantified EP/node.
Returns:
The nodeid of the quantifier for `nodeid`. If `nodeid` is not
in the Xmrs, it itself is a quantifier, or if it does not have
a quantifier, None is returned.
"""
ep = xmrs.get_ep(nodeid)
if (not ep or
ep.is_quantifier() or
ep.iv not in xmrs._graph.node or
'bv' not in xmrs._graph.node[ep.iv]):
# in some subgraphs, an IV might not exist even when specified
return None
return xmrs._graph.node[ep.iv]['bv']
def get_outbound_args(xmrs, nodeid, allow_unbound=True):
"""
Yield the |Arguments| of `nodeid` that point to other EPs/nodes.
Args:
xmrs: The |Xmrs| object to use.
nodeid: The nodeid of the EP/node whose arguments to yield.
allow_unbound: If True, also yield arguments that point to
unbound (e.g. dropped) EPs/nodes or constants.
Yields:
|Arguments| whose targets are not the given `nodeid`.
"""
g = xmrs._graph
ep = xmrs.get_ep(nodeid)
for arg in ep.args:
nid = arg.nodeid
tgt = arg.value
data = g.node.get(tgt, {})
# ignore intrinsic arguments
if data.get('iv') == nid or data.get('bv') == nid:
continue
is_outbound = 'iv' in data or 'hcons' in data or tgt in g.labels
if (allow_unbound or is_outbound):
yield arg
def find_subgraphs_by_preds(xmrs, preds, connected=None):
"""
Yield subgraphs matching a list of preds. Because preds may match
multiple EPs/nodes in the Xmrs, more than one subgraph is
possible.
Args:
xmrs: The |Xmrs| object to use.
preds: An iterable of |Preds| to include in subgraphs.
connected: If True, all yielded subgraphs must be connected,
as determined by :py:meth:`Xmrs.is_connected`.
Yields:
|Xmrs| objects for the found subgraphs.
"""
preds = list(preds)
# find all lists of nodeids such that the lists have no repeated nids;
# keep them as a list (fixme: why not just get sets?)
nidsets = list(
filter(lambda ps: len(set(ps)) == len(ps),
map(lambda p: select_nodeids(xmrs, pred=p), preds))
)
for sg in map(xmrs.subgraph, product(*nidsets)):
if connected is not None and sg.is_connected() != connected:
continue
yield sg
| mit | -8,780,973,681,080,689,000 | 35.128319 | 75 | 0.581996 | false |
freevo/freevo1 | src/tv/plugins/view_line_in.py | 1 | 1840 | # -*- coding: iso-8859-1 -*-
# -----------------------------------------------------------------------
# view_line_in.py - view the line in in VCR mode
# -----------------------------------------------------------------------
# $Id$
#
# Notes:
# Todo:
#
# -----------------------------------------------------------------------
# Freevo - A Home Theater PC framework
# Copyright (C) 2003 Krister Lagerstrom, et al.
# Please see the file freevo/Docs/CREDITS for a complete list of authors.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MER-
# CHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# -----------------------------------------------------------------------
import config
import plugin
import menu
class PluginInterface(plugin.Plugin):
"""
View the line in in VCR mode
"""
def __init__(self):
"""
normal plugin init, but sets _type to 'mainmenu_tv'
"""
plugin.Plugin.__init__(self)
self._type = 'mainmenu_tv'
self.parent = None
def items(self, parent):
self.parent = parent
return [menu.MenuItem(_('View VCR Input'), action=self.start_vcr)]
def start_vcr(self, menuw=None, arg=None):
plugin.getbyname(plugin.TV).Play('vcr', None)
| gpl-2.0 | 2,313,714,594,056,654,300 | 33.074074 | 74 | 0.573913 | false |
remilap/Raspberry_dev | Robot/lib_motors_base.py | 1 | 2781 | #!/usr/bin/python
# Library: Basic Driving and Turning
import RPi.GPIO as GPIO # Import the GPIO Library
import time
import lib_util as util
# Set variables for the GPIO motor pins
pinMotorRightForwards = 10
pinMotorRightBackwards = 9
pinMotorLeftForwards = 8
pinMotorLeftBackwards = 7
# Init this library
def init():
util.trace("lib_motors_base.init")
# Set the GPIO modes
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
# Set the GPIO Pin mode
for x in range(pinMotorLeftBackwards, pinMotorRightForwards+1):
util.trace("set pin " + str(x))
GPIO.setup(x, GPIO.OUT)
# Turn Right motor off
def stopRightMotor():
util.trace("lib_motors_base.stopRightMotor")
GPIO.output(pinMotorRightForwards, 0)
GPIO.output(pinMotorRightBackwards, 0)
# Turn Left motor off
def stopLeftMotor():
util.trace("lib_motors_base.stopLeftMotor")
GPIO.output(pinMotorLeftForwards, 0)
GPIO.output(pinMotorLeftBackwards, 0)
# Turn all motors off
def stopMotors():
util.trace("lib_motors_base.stopMotors")
stopRightMotor()
stopLeftMotor()
# Turn the right motor forwards
def forwardsRightMotor():
util.trace("lib_motors_base.forwardsRightMotor")
if util.getDebug == 0:
GPIO.output(pinMotorRightForwards, 1)
GPIO.output(pinMotorRightBackwards, 0)
# Turn the right motor backwards
def backwardsRightMotor():
util.trace("lib_motors_base.backwardsRightMotor")
GPIO.output(pinMotorRightForwards, 0)
if util.getDebug == 0:
GPIO.output(pinMotorRightBackwards, 1)
# Turn the left motor forwards
def forwardsLeftMotor():
util.trace("lib_motors_base.forwardsLeftMotor")
if util.getDebug == 0:
GPIO.output(pinMotorLeftForwards, 1)
GPIO.output(pinMotorLeftBackwards, 0)
# Turn the left motor backwards
def backwardsLeftMotor():
util.trace("lib_motors_base.backwardsLeftMotor")
GPIO.output(pinMotorLeftForwards, 0)
if util.getDebug == 0:
GPIO.output(pinMotorLeftBackwards, 1)
# Turn both motors forwards
def forwards():
util.trace("lib_motors_base.forwards")
forwardsRightMotor()
forwardsLeftMotor()
# Turn both motors backwards
def backwards():
util.trace("lib_motors_base.backwards")
backwardsRightMotor()
backwardsLeftMotor()
# Turn left
def left():
util.trace("lib_motors_base.left")
stopLeftMotor()
forwardsRightMotor()
# Turn left without moving forward
def leftStay():
util.trace("lib_motors_base.leftStay")
backwardsLeftMotor()
forwardsRightMotor()
# Turn right
def right():
util.trace("lib_motors_base.right")
forwardsLeftMotor()
stopRightMotor()
# Turn right without moving forward
def rightStay():
util.trace("lib_motors_base.rightStay")
forwardsLeftMotor()
backwardsRightMotor()
# Ending the use of the library
def end():
util.trace("lib_motors_base.end")
stopMotors()
# Reset the GPIO pins (turn off motors too)
GPIO.cleanup()
| gpl-3.0 | 8,712,035,068,088,668,000 | 22.769231 | 64 | 0.761956 | false |
gford1000/awssl | awssl/task_state.py | 1 | 10603 | from .state_retry_catch import StateRetryCatch
class Task(StateRetryCatch):
"""
Models the ``Task`` state.
The Task can be used to invoke either an AWS Lambda function or an ``Activity``, which provides a general mechanism for all
types of processing. The Task supports retries and catching of specified errors to provide structured error handling, as well
as supporting Timeout for processing as one of those error types.
Either:
* ``EndState`` is ``True`` and ``NextState`` must be ``None``
* ``EndState`` is ``False`` and ``NextState`` must be a valid instance of a class derived from ``StateBase``.
:param Name: [Required] The name of the state within the branch of the state machine
:type Name: str
:param Comment: [Optional] A comment describing the intent of this pass state
:type Comment: str
:param InputPath: [Optional] Filter on the Input information to be passed to the Pass state. Default is "$", signifying that all the Input information will be provided
:type InputPath: str
:param OutputPath: [Optional] Filter on the Output information to be returned from the Pass state. Default is "$", signifying that all the result information will be provided
:type OutputPath: str
:param EndState: [Optional] Flag indicating if this state terminates a branch of the state machine. Defaults to ``False``
:type EndState: bool
:param NextState: [Optional] Next state to be invoked within this branch. Must not be ``None`` unless ``EndState`` is ``True``
:type NextState: instance of class derived from ``StateBase``
:param ResultPath: [Optional] JSONPath indicating where results should be added to the Input. Defaults to "$", indicating results replace the Input entirely.
:type ResultPath: str
:param RetryList: [Optional] ``list`` of ``Retrier`` instances corresponding to error states that cause the entire set of branches to be retried
:type: RetryList: list of ``Retrier``
:param CatcherList: [Optional] ``list`` of ``Catcher`` instances corresponding to error states that can be caught and handled by further states being executed in the ``StateMachine``.
:type: CatcherList: list of ``Catcher``
:param ResourceArn: [Required] The Arn for the ``Lambda`` function or ``Activity`` that the ``Task`` should invoke
:type: ResourceArn: str
:param: TimeoutSeconds: [Optional] The number of seconds in which the ``Task`` should complete
:type: TimeoutSeconds: int
:param: HeartbeatSeconds: [Optional] The number of seconds between heartbeats from an ``Activity``, to indicate it is still running
:type: HeartbeatSeconds: int
"""
def __init__(self, Name=None, Comment="", InputPath="$", OutputPath="$", NextState=None, EndState=None,
ResultPath="$", RetryList=None, CatcherList=None,
ResourceArn=None, TimeoutSeconds=99999999, HeartbeatSeconds=99999999):
"""
Initializer for the Task state.
Either:
* ``EndState`` is ``True`` and ``NextState`` must be ``None``
* ``EndState`` is ``False`` and ``NextState`` must be a valid instance of a class derived from ``StateBase``.
:param Name: [Required] The name of the state within the branch of the state machine
:type Name: str
:param Comment: [Optional] A comment describing the intent of this pass state
:type Comment: str
:param InputPath: [Optional] Filter on the Input information to be passed to the Pass state. Default is "$", signifying that all the Input information will be provided
:type InputPath: str
:param OutputPath: [Optional] Filter on the Output information to be returned from the Pass state. Default is "$", signifying that all the result information will be provided
:type OutputPath: str
:param EndState: [Optional] Flag indicating if this state terminates a branch of the state machine. Defaults to ``False``
:type EndState: bool
:param NextState: [Optional] Next state to be invoked within this branch. Must not be ``None`` unless ``EndState`` is ``True``
:type NextState: instance of class derived from ``StateBase``
:param ResultPath: [Optional] JSONPath indicating where results should be added to the Input. Defaults to "$", indicating results replace the Input entirely.
:type ResultPath: str
:param RetryList: [Optional] ``list`` of ``Retrier`` instances corresponding to error states that cause the entire set of branches to be retried
:type: RetryList: list of ``Retrier``
:param CatcherList: [Optional] ``list`` of ``Catcher`` instances corresponding to error states that can be caught and handled by further states being executed in the ``StateMachine``.
:type: CatcherList: list of ``Catcher``
:param ResourceArn: [Required] The Arn for the ``Lambda`` function or ``Activity`` that the ``Task`` should invoke
:type: ResourceArn: str
:param: TimeoutSeconds: [Optional] The number of seconds in which the ``Task`` should complete
:type: TimeoutSeconds: int
:param: HeartbeatSeconds: [Optional] The number of seconds between heartbeats from an ``Activity``, to indicate it is still running
:type: HeartbeatSeconds: int
"""
super(Task, self).__init__(Name=Name, Type="Task", Comment=Comment,
InputPath=InputPath, OutputPath=OutputPath, NextState=NextState, EndState=EndState,
ResultPath=ResultPath, RetryList=RetryList, CatcherList=CatcherList)
self._resource_arn = None
self._timeout_seconds = None
self._heartbeat_seconds = None
self.set_resource_arn(ResourceArn)
self.set_timeout_seconds(TimeoutSeconds)
self.set_heartbeat_seconds(HeartbeatSeconds)
def validate(self):
"""
Validates this instance is correctly specified.
Raises ``Exception`` with details of the error, if the state is incorrectly defined.
"""
super(Task, self).validate()
def to_json(self):
"""
Returns the JSON representation of this instance.
:returns: dict -- The JSON representation
"""
j = super(Task, self).to_json()
j["Resource"] = self.get_resource_arn()
if self.get_timeout_seconds():
j["TimeoutSeconds"] = self.get_timeout_seconds()
if self.get_heartbeat_seconds():
j["HeartbeatSeconds"] = self.get_heartbeat_seconds()
return j
def get_resource_arn(self):
"""
Returns the Arn of the Lambda or ``Activity`` that will be invoked by this ``Task``.
:returns: str -- The Arn of the resource to be invoked.
"""
return self._resource_arn
def set_resource_arn(self, ResourceArn=None):
"""
Sets the Arn of the Lambda of ``Activity`` to be invoked by this ``Task``. Cannot be ``None`` and must be a valid Arn formatted string.
:param ResourceArn: [Required] The Arn for the ``Lambda`` function or ``Activity`` that the ``Task`` should invoke
:type: ResourceArn: str
"""
if not ResourceArn:
raise Exception("ResourceArn must be specified for Task state (step '{}')".format(self.get_name()))
if not isinstance(ResourceArn, str):
raise Exception("ResourceArn must be a string for Task state (step '{}')".format(self.get_name()))
self._resource_arn = ResourceArn
def get_timeout_seconds(self):
"""
Returns the timeout seconds for the ``Task``, afterwhich a ``States.Timeout`` error is raised.
:returns: int -- The timeout seconds for the state.
"""
return self._timeout_seconds
def set_timeout_seconds(self, TimeoutSeconds=99999999):
"""
Sets the timeout seconds for the ``Task``, afterwhich a ``States.Timeout`` error is raised.
If specified, must not be less than zero seconds. Default value is ``99999999``.
:param: TimeoutSeconds: [Optional] The number of seconds in which the ``Task`` should complete
:type: TimeoutSeconds: int
"""
if TimeoutSeconds:
if not isinstance(TimeoutSeconds, int):
raise Exception("TimeoutSeconds must be an integer if specified for Task (step '{}')".format(self.get_name()))
if TimeoutSeconds < 1:
raise Exception("TimeoutSeconds must be greater than zero if specified for Task (step '{}')".format(self.get_name()))
self._timeout_seconds = TimeoutSeconds
def get_heartbeat_seconds(self):
"""
Returns the heartbeat interval for the ``Task``. If more than two heartbeats are missed then the state will
fail with a ``States.Timeout`` error.
:returns: int -- The heartbeat seconds for the state.
"""
return self._heartbeat_seconds
def set_heartbeat_seconds(self, HeartbeatSeconds=99999999):
"""
Sets the heartbeats seconds for the ``Task``. If more than two heartbeats are missed then the state will
fail with a ``States.Timeout`` error.
If specified, must not be less than zero seconds. Default value is ``99999999``.
:param: HeartbeatSeconds: [Optional] The number of seconds between heartbeats from an ``Activity``, to indicate it is still running
:type: HeartbeatSeconds: int
"""
if HeartbeatSeconds:
if not isinstance(HeartbeatSeconds, int):
raise Exception("HeartbeatSeconds must be an integer if specified for Task (step '{}')".format(self.get_name()))
if HeartbeatSeconds < 1:
raise Exception("HeartbeatSeconds must be greater than zero if specified for Task (step '{}')".format(self.get_name()))
self._heartbeat_seconds = HeartbeatSeconds
def clone(self, NameFormatString="{}"):
"""
Returns a clone of this instance, with the clone named per the NameFormatString, to avoid state name clashes.
If this instance is not an end state, then the next state will also be cloned, to establish a complete clone
of the branch form this instance onwards.
:param NameFormatString: [Required] The naming template to be applied to generate the name of the new instance.
:type NameFormatString: str
:returns: ``Task`` -- A new instance of this instance and any other instances in its branch.
"""
if not NameFormatString:
raise Exception("NameFormatString must not be None (step '{}')".format(self.get_name()))
if not isinstance(NameFormatString, str):
raise Exception("NameFormatString must be a str (step '{}')".format(self.get_name()))
c = Task(
Name=NameFormatString.format(self.get_name()),
Comment=self.get_comment(),
InputPath=self.get_input_path(),
OutputPath=self.get_output_path(),
EndState=self.get_end_state(),
ResultPath=self.get_result_path(),
ResourceArn=self.get_resource_arn(),
TimeoutSeconds=self.get_timeout_seconds(),
HeartbeatSeconds=self.get_heartbeat_seconds())
if self.get_retry_list():
c.set_retry_list(RetryList=[ r.clone() for r in self.get_retry_list() ])
if self.get_catcher_list():
c.set_catcher_list(CatcherList=[ c.clone(NameFormatString) for c in self.get_catcher_list() ])
if self.get_next_state():
c.set_next_state(NextState=self.get_next_state.clone(NameFormatString))
return c
| mit | -7,387,184,903,166,974,000 | 46.761261 | 185 | 0.727058 | false |
fabiolapozyk/IncrementalFCM | Python/FCM/TestIncrementalFCM.py | 1 | 9582 | from FCM.Class_File_Manager import FileManager
from FCM.Class_SemiSupervisedFCM import SemiSupervisedFCM
import numpy
from FCM.Class_FCM import FuzzyCMeans
from FCM.Class_ValidityMeasures import ValidityMeasures
from numpy import zeros
from FCM.Class_DataManager import DataManager
from shutil import copyfile
import random
def mergeDict(dict1,dict2):
for k in dict2:
for el in dict2.get(k):
dict1.setdefault(k,[]).append(el)
return dict1
if __name__ == "__main__":
c = 70
nchunk = 5
column_label = 33
column_index = 0
n_execution = 5
percentToLabel = 0.25
useMedoids = True
outputFileName = "output"
FileManager.createFileTXT(outputFileName)
dm = DataManager()
# uncomment if would create new chunk
dm.loadDataset('Completedataset', column_label, column_index)
dm.createChunks(c, nchunk)
trainingSet = ['chunk0','chunk1','chunk2','chunk3',]
testSet = 'chunk4'
avgPurity = zeros(nchunk - 1)
avgPrecision = zeros(c)
avgRecall = zeros(c)
avgAcc = 0.
accuratezze = []
for j in range(n_execution):
FileManager.writeTxt(outputFileName, "Esecuzione " + str(j + 1) + "\n", True)
history = []
dm.loadDataset(trainingSet[0], column_label, column_index)
classes=dm.classes
chunk = dm.getDataset()
true_label = dm.getTrueLabels()
# uncomment if would create new labels for the first chunk
(b, F) = dm.labeldataset(percentToLabel, 'b0', 'F0')
# dm.loadSemiSupervisedInfo('b0', 'F0')
for i in range(nchunk - 1):
print("chunk " + str(i))
alfa = c*len(b) / float(numpy.count_nonzero(b))
# avgCentr = zeros((c,32))
# for z in range(10):
p1 = SemiSupervisedFCM(chunk, c, b, F, alfa, FuzzyCMeans.MAHALONOBIS_NORM, FuzzyCMeans.INIT_MU_RANDOM)
# p1 = FuzzyCMeans(chunk, c, 2,FuzzyCMeans.MAHALONOBIS_NORM,FuzzyCMeans.INIT_MU_RANDOM)
prototypes = p1()
# avgCentr += prototypes
# avgCentr = avgCentr / 10
# p1.setc(avgCentr)
FileManager.writeMatrix('prototypeChunk'+str(i)+'esecuzione'+str(j), prototypes, False)
clusters = p1.getClusters()
#confusionMatrix = ValidityMeasures.getConfusionMatrix(true_label, clusters, c)
#clustersLabel = ValidityMeasures.getClusterLabels(confusionMatrix)
clustersLabel=p1.getClusterLabel(true_label)
if(useMedoids):
prototypes = p1.getMedoids(true_label)
historyIndex = {}
historyTrueLabel = []
historyCluster = []
if(len(history) > 0):
for dmchunk in history:
z = 0
for el in dmchunk.getDataset():
prototypePoint = numpy.argmin(p1.computeDistance(el, prototypes))
historyIndex.setdefault(prototypePoint, []).append((dmchunk.getIndexPoint()[z],classes[dmchunk.getTrueLabels()[z]],'H'))
historyCluster.append(prototypePoint)
z = z + 1
historyTrueLabel.append(dmchunk.getTrueLabels())
FileManager.writeTxt('HistoryIndexChunk'+str(i)+' Esecuzione'+str(j),str(historyIndex)+'\n Prototipi a cui appartengono le immagini \n'+str(historyCluster),True)
confusionMatrix = ValidityMeasures.getConfusionMatrix(numpy.append(true_label,historyTrueLabel), numpy.append(clusters,historyCluster), c)
FileManager.writeMatrix('ConfusionMatrixChunk'+str(i)+' Esecuzione'+str(j),confusionMatrix)
#clustersLabel = ValidityMeasures.getClusterLabels(confusionMatrix)
FileManager.writeTxt('HistoryIndexChunk'+str(i)+' Esecuzione'+str(j),'Label dei prototipi:\n'+str(range(c))+'\n'+ str(classes[clustersLabel]),True)
print('clustersLabel' + str(clustersLabel))
pur = ValidityMeasures.getAveragePurityError(confusionMatrix)
avgPurity[i] += pur
print("pur: " + str(pur))
if(i > 0):
clusters = clusters[c:]
idClusters = dm.clusterIndices(clusters)
idClusters = mergeDict(idClusters, historyIndex)
'''write result in file'''
out = "\n" + dm.fileName + " len=" + str(len(chunk)) + "\n"
out += "average purity error: " + str(pur) + "\n"
out += "Clusters label: " + str(classes[clustersLabel]) + "\n"
for cl in range(c):
out += "Cluster " + str(cl) + "\n"
out += "\t" + str(idClusters.get(cl)) + "\n"
FileManager.writeTxt(outputFileName, out, True)
history.append(dm)
if(i < nchunk - 2):
dm = DataManager()
dm.loadDataset(trainingSet[i + 1], column_label, column_index)
# comment after if wouldn't use label for other chunks
(b, F) = dm.labeldataset(percentToLabel, 'b' + str(i + 1) , 'F' + str(i + 1))
true_label = numpy.append(clustersLabel, dm.getTrueLabels())
# toggle comments if wouldn't use label for other chunks
# b = numpy.append(numpy.ones(c), numpy.zeros(len(dm.getTrueLabels())))
b = numpy.append(numpy.ones(c), b)
Fcentr = zeros((c, c))
Fcentr[range(c), clustersLabel] = 1
F = numpy.concatenate((Fcentr, F), axis=0)
chunk = numpy.concatenate((prototypes, dm.getDataset()), axis=0)
dm.loadDataset(testSet, column_label, column_index)
label_test = numpy.zeros(len(dm.dataset))
true_label_test=numpy.zeros(len(dm.dataset))
i = 0
out='--------------------------------------------------------------Test--------------------------------------------------------------------------------------\n'
for el in dm.getDataset():
label_test[i] = clustersLabel[numpy.argmin(p1.computeDistance(el, prototypes))]
true_label_test=dm.getTrueLabels()[i]
indexPoint=dm.getIndexPoint()[i]
out+='Index Point: '+ str(indexPoint)+' True Label: '+classes[true_label_test] +' Predicted Label: '+ classes[label_test[i]]+'\n'
i = i + 1
confMatrix = ValidityMeasures.getConfusionMatrix(dm.getTrueLabels(), label_test, c)
acc = ValidityMeasures.getClusteringAccuracy(confMatrix)
prec = ValidityMeasures.getPrecisions(confMatrix)
rec = ValidityMeasures.getRecalls(confMatrix)
avgAcc += acc
accuratezze.append(acc)
avgPrecision += prec
avgRecall += rec
print("acc: " + str(acc))
out += "\naccuracy: " + str(acc) + "\n"
out += "\n\n#######################################################################################################################################################\n"
FileManager.writeTxt(outputFileName, out, True)
avgPurity = avgPurity / n_execution
avgAcc = avgAcc / n_execution
avgPrecision = avgPrecision / n_execution
avgRecall = avgRecall / n_execution
'''
if(avgAcc > maxAcc):
maxAcc = avgAcc
path = "C:/Users/Fabio/git/Tesi_Fabio_Sonya_Python/Python"
for f in range(nchunk):
copyfile(path + "/FCM/chunk" + str(f) + ".csv" , path + "/chunk buoni 280x5/chunk" + str(f) + ".csv")
FileManager.writeTxt(path+"/chunk buoni 280x5/accuratezza_ottenuta", "Accuratezza: "+str(maxAcc)+"\nPurity: "+str(avgPurity))
'''
print("accuratezza media ottenuta in " + str(n_execution) + " esecuzioni: " + str(avgAcc))
print("errori medi di purezza ottenuti: " + str(avgPurity))
print("precision media ottenuta in " + str(n_execution) + "esecuzioni" + str(avgPrecision))
print("recall media ottenuta in " + str(n_execution) + "esecuzioni" + str(avgRecall))
out = "accuratezza media ottenuta in " + str(n_execution) + " esecuzioni: " + str(avgAcc) + "\n"
out += "accuratezze ottenute: " + str(accuratezze) + "\n"
out += "errori medi di purezza ottenuti: " + str(avgPurity) + "\n"
out += "Precision e Recall:\n\n\tClasse | Precision | Recall\n"
for cl in range(c):
out += "\t" + str(cl + 1) + " | " + str(avgPrecision[cl]) + " | " + str(avgRecall[cl]) + "\n"
FileManager.writeTxt(outputFileName, out, True)
| cc0-1.0 | -6,513,427,054,993,748,000 | 49.516129 | 181 | 0.50908 | false |
ande0581/pynet | class3/class3_ex2.py | 1 | 3306 | # SnmpWalk.exe -r:10.40.0.1 -t:10 -c:"private" -os:1.3.6.1.2.1.2.2.1 -op:1.3.6.1.2.1.2.2.1.20
import time
import snmp_helper
import pygal
# Print debug output
DEBUG = False
# Polling Interval in Seconds
INTERVAL_TIME = 60
# Number of interfaces
INTERVAL_COUNT = 5
def main():
ip = '10.40.0.1'
a_user = 'mysnmpuser'
auth_key = 'myauthkey'
encrypt_key = 'myencryptkey'
snmp_user = (a_user, auth_key, encrypt_key)
my_router = (ip, 161)
systemName = '1.3.6.1.2.1.1.5.0'
fa8description = '1.3.6.1.2.1.2.2.1.2.10'
fa8InOctets = '1.3.6.1.2.1.2.2.1.10.10'
fa8InUcastPkts = '1.3.6.1.2.1.2.2.1.11.10'
fa8OutOctets = '1.3.6.1.2.1.2.2.1.16.10'
fa8OutUcastPkts = '1.3.6.1.2.1.2.2.1.17.10'
fa8_in_octets = []
fa8_in_packets = []
fa8_out_octets = []
fa8_out_packets = []
fa8_in_octets_last = 0
fa8_in_packets_last = 0
fa8_out_octets_last = 0
fa8_out_packets_last = 0
for i in range(INTERVAL_COUNT + 1):
snmp_data = snmp_helper.snmp_get_oid_v3(my_router, snmp_user, oid=fa8InOctets)
snmp_data = int(snmp_helper.snmp_extract(snmp_data))
if fa8_in_octets:
fa8_in_octets.append(snmp_data - fa8_in_octets_last)
else:
fa8_in_octets.append(snmp_data)
fa8_in_octets_last = snmp_data
snmp_data = snmp_helper.snmp_get_oid_v3(my_router, snmp_user, oid=fa8InUcastPkts)
snmp_data = int(snmp_helper.snmp_extract(snmp_data))
if fa8_in_packets:
fa8_in_packets.append(snmp_data - fa8_in_packets_last)
else:
fa8_in_packets.append(snmp_data)
fa8_in_packets_last = snmp_data
snmp_data = snmp_helper.snmp_get_oid_v3(my_router, snmp_user, oid=fa8OutOctets)
snmp_data = int(snmp_helper.snmp_extract(snmp_data))
if fa8_out_octets:
fa8_out_octets.append(snmp_data - fa8_out_octets_last)
else:
fa8_out_octets.append(snmp_data)
fa8_out_octets_last = snmp_data
snmp_data = snmp_helper.snmp_get_oid_v3(my_router, snmp_user, oid=fa8OutUcastPkts)
snmp_data = int(snmp_helper.snmp_extract(snmp_data))
if fa8_out_packets:
fa8_out_packets.append(snmp_data - fa8_out_packets_last)
else:
fa8_out_packets.append(snmp_data)
fa8_out_packets_last = snmp_data
print "{}% Complete".format((float(i) / INTERVAL_COUNT) * 100)
time.sleep(INTERVAL_TIME)
if DEBUG:
print fa8_in_octets[1:]
print fa8_in_packets[1:]
print fa8_out_octets[1:]
print fa8_out_packets[1:]
# Create a Chart of type Line
line_chart = pygal.Line()
# Title
line_chart.title = 'Input/Output Packets and Bytes - Interface Fa8'
# X-axis labels
label = []
for i in range(INTERVAL_COUNT):
label.append(str(i + 1))
line_chart.x_labels = label
# Add each one of the above lists to the graph as a line with corresponding label
line_chart.add('InPackets', fa8_in_packets[1:])
line_chart.add('OutPackets', fa8_out_packets[1:])
line_chart.add('InBytes', fa8_in_octets[1:])
line_chart.add('OutBytes', fa8_out_octets[1:])
# Create an output image file
line_chart.render_to_file('fa8.svg')
if __name__ == '__main__':
main() | apache-2.0 | 5,520,988,679,979,173,000 | 29.62037 | 93 | 0.60859 | false |
harshnag/fantasyfooty | grids/migrations/0004_auto_20161103_1609.py | 1 | 1310 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-03 23:09
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('grids', '0003_auto_20161031_1720'),
]
operations = [
migrations.CreateModel(
name='Position',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('row', models.IntegerField(default=0)),
('col', models.IntegerField(default=0)),
('board', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='grids.GameBoard')),
('player', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='grids.Rating')),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
],
),
migrations.RenameField(
model_name='player',
old_name='player_name',
new_name='name',
),
]
| agpl-3.0 | 4,283,665,319,876,594,700 | 33.473684 | 114 | 0.562595 | false |
Answeror/aip | aip/__init__.py | 1 | 2231 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
from flask import Flask
from .log import RedisPub
def init_conf(app, config):
# basic config
from . import config as base
app.config.from_object(base)
if config is None:
config = 'application.cfg'
if type(config) is str:
# config from file
app.config.from_pyfile(config, silent=True)
elif type(config) is dict:
app.config.update(**config)
else:
# config from params
app.config.from_object(config)
if 'AIP_TEMP_PATH' not in app.config:
import tempfile
app.config['AIP_TEMP_PATH'] = tempfile.mkdtemp()
def init_store(app):
from . import store
store = store.make(app=app)
app.store = store
class App(Flask):
def __init__(self, *args, **kargs):
super(App, self).__init__(*args, **kargs)
self.redispub = RedisPub()
def __call__(self, *args, **kargs):
with self.redispub.threadbound():
return super(App, self).__call__(*args, **kargs)
def init_core(app):
from .core import Core
app.core = app._core = Core(
db=app.store,
baidupan_cookie=app.config['AIP_BAIDUPAN_COOKIE'],
baidupan_timeout=app.config['AIP_BAIDUPAN_TIMEOUT'],
)
def make(config=None, dbmode=False, **kargs):
try:
from .rq import q
except:
pass
if 'instance_path' in kargs:
kargs['instance_path'] = os.path.abspath(kargs['instance_path'])
app = App(
__name__,
template_folder='templates',
static_folder='static',
**kargs
)
app.kargs = kargs
init_conf(app, config)
init_store(app)
init_core(app)
if not dbmode:
#init_slaves(app)
from flask.ext.openid import OpenID
oid = OpenID(app, 'temp/openid')
from . import cache
cached = cache.make(app)
from . import views
views.make(app=app, oid=oid, cached=cached, store=app.store)
from . import api
api.make(app=app, cached=cached, store=app.store)
from . import admin
admin.make(app=app)
return app
def make_slave_app(options):
return make(dbmode=True, **options)
| mit | 7,287,214,491,313,543,000 | 21.089109 | 72 | 0.591215 | false |
montefra/pytest-qt | tests/test_wait_signal.py | 1 | 24915 | import functools
import fnmatch
import pytest
from pytestqt.qt_compat import qt_api
from pytestqt.wait_signal import SignalEmittedError
def test_signal_blocker_exception(qtbot):
"""
Make sure waitSignal without signals and timeout doesn't hang, but raises
ValueError instead.
"""
with pytest.raises(ValueError):
qtbot.waitSignal(None, None).wait()
with pytest.raises(ValueError):
qtbot.waitSignals([], None).wait()
def explicit_wait(qtbot, signal, timeout, multiple, raising, should_raise):
"""
Explicit wait for the signal using blocker API.
"""
func = qtbot.waitSignals if multiple else qtbot.waitSignal
blocker = func(signal, timeout, raising=raising)
assert not blocker.signal_triggered
if should_raise:
with pytest.raises(qtbot.SignalTimeoutError):
blocker.wait()
else:
blocker.wait()
return blocker
def context_manager_wait(qtbot, signal, timeout, multiple, raising,
should_raise):
"""
Waiting for signal using context manager API.
"""
func = qtbot.waitSignals if multiple else qtbot.waitSignal
if should_raise:
with pytest.raises(qtbot.SignalTimeoutError):
with func(signal, timeout, raising=raising) as blocker:
pass
else:
with func(signal, timeout, raising=raising) as blocker:
pass
return blocker
def build_signal_tests_variants(params):
"""
Helper function to use with pytest's parametrize, to generate additional
combinations of parameters in a parametrize call:
- explicit wait and context-manager wait
- raising True and False (since we check for the correct behavior inside
each test).
"""
result = []
for param in params:
for wait_function in (explicit_wait, context_manager_wait):
for raising in (True, False):
result.append(param + (wait_function, raising))
return result
@pytest.mark.parametrize(
('delay', 'timeout', 'expected_signal_triggered',
'wait_function', 'raising'),
build_signal_tests_variants([
# delay, timeout, expected_signal_triggered
(200, None, True),
(200, 400, True),
(400, 200, False),
])
)
def test_signal_triggered(qtbot, timer, stop_watch, wait_function, delay,
timeout, expected_signal_triggered, raising,
signaller):
"""
Testing for a signal in different conditions, ensuring we are obtaining
the expected results.
"""
timer.single_shot(signaller.signal, delay)
should_raise = raising and not expected_signal_triggered
stop_watch.start()
blocker = wait_function(qtbot, signaller.signal, timeout, raising=raising,
should_raise=should_raise, multiple=False)
# ensure that either signal was triggered or timeout occurred
assert blocker.signal_triggered == expected_signal_triggered
stop_watch.check(timeout, delay)
@pytest.mark.parametrize('configval, raises', [
('false', False),
('true', True),
(None, True),
])
def test_raising(qtbot, testdir, configval, raises):
if configval is not None:
testdir.makeini("""
[pytest]
qt_wait_signal_raising = {}
""".format(configval))
testdir.makepyfile("""
import pytest
from pytestqt.qt_compat import qt_api
class Signaller(qt_api.QtCore.QObject):
signal = qt_api.Signal()
def test_foo(qtbot):
signaller = Signaller()
with qtbot.waitSignal(signaller.signal, timeout=10):
pass
""")
res = testdir.runpytest()
if raises:
res.stdout.fnmatch_lines(['*1 failed*'])
else:
res.stdout.fnmatch_lines(['*1 passed*'])
def test_raising_by_default_overridden(qtbot, testdir):
testdir.makeini("""
[pytest]
qt_wait_signal_raising = false
""")
testdir.makepyfile("""
import pytest
from pytestqt.qt_compat import qt_api
class Signaller(qt_api.QtCore.QObject):
signal = qt_api.Signal()
def test_foo(qtbot):
signaller = Signaller()
signal = signaller.signal
with qtbot.waitSignal(signal, raising=True, timeout=10) as blocker:
pass
""")
res = testdir.runpytest()
res.stdout.fnmatch_lines(['*1 failed*'])
@pytest.mark.parametrize(
('delay_1', 'delay_2', 'timeout', 'expected_signal_triggered',
'wait_function', 'raising'),
build_signal_tests_variants([
# delay1, delay2, timeout, expected_signal_triggered
(200, 300, 400, True),
(300, 200, 400, True),
(200, 300, None, True),
(400, 400, 200, False),
(200, 400, 300, False),
(400, 200, 200, False),
(200, 1000, 400, False),
])
)
def test_signal_triggered_multiple(qtbot, timer, stop_watch, wait_function,
delay_1, delay_2, timeout, signaller,
expected_signal_triggered, raising):
"""
Testing for a signal in different conditions, ensuring we are obtaining
the expected results.
"""
timer.single_shot(signaller.signal, delay_1)
timer.single_shot(signaller.signal_2, delay_2)
should_raise = raising and not expected_signal_triggered
stop_watch.start()
blocker = wait_function(qtbot, [signaller.signal, signaller.signal_2],
timeout, multiple=True, raising=raising,
should_raise=should_raise)
# ensure that either signal was triggered or timeout occurred
assert blocker.signal_triggered == expected_signal_triggered
stop_watch.check(timeout, delay_1, delay_2)
def test_explicit_emit(qtbot, signaller):
"""
Make sure an explicit emit() inside a waitSignal block works.
"""
with qtbot.waitSignal(signaller.signal, timeout=5000) as waiting:
signaller.signal.emit()
assert waiting.signal_triggered
def test_explicit_emit_multiple(qtbot, signaller):
"""
Make sure an explicit emit() inside a waitSignal block works.
"""
with qtbot.waitSignals([signaller.signal, signaller.signal_2],
timeout=5000) as waiting:
signaller.signal.emit()
signaller.signal_2.emit()
assert waiting.signal_triggered
@pytest.fixture
def signaller(timer):
"""
Fixture that provides an object with to signals that can be emitted by
tests.
.. note:: we depend on "timer" fixture to ensure that signals emitted
with "timer" are disconnected before the Signaller() object is destroyed.
This was the reason for some random crashes experienced on Windows (#80).
"""
class Signaller(qt_api.QtCore.QObject):
signal = qt_api.Signal()
signal_2 = qt_api.Signal()
signal_args = qt_api.Signal(str, int)
signal_args_2 = qt_api.Signal(str, int)
assert timer
return Signaller()
@pytest.yield_fixture
def timer():
"""
Fixture that provides a callback with signature: (signal, delay) that
triggers that signal once after the given delay in ms.
The fixture is responsible for cleaning up after the timers.
"""
class Timer(qt_api.QtCore.QObject):
def __init__(self):
qt_api.QtCore.QObject.__init__(self)
self.timers_and_slots = []
def shutdown(self):
for t, slot in self.timers_and_slots:
t.stop()
t.timeout.disconnect(slot)
self.timers_and_slots[:] = []
def single_shot(self, signal, delay):
t = qt_api.QtCore.QTimer(self)
t.setSingleShot(True)
slot = functools.partial(self._emit, signal)
t.timeout.connect(slot)
t.start(delay)
self.timers_and_slots.append((t, slot))
def _emit(self, signal):
signal.emit()
timer = Timer()
yield timer
timer.shutdown()
@pytest.mark.parametrize('multiple', [True, False])
@pytest.mark.parametrize('raising', [True, False])
def test_wait_signals_handles_exceptions(qtbot, multiple, raising, signaller):
"""
Make sure waitSignal handles exceptions correctly.
"""
class TestException(Exception):
pass
if multiple:
func = qtbot.waitSignals
arg = [signaller.signal, signaller.signal_2]
else:
func = qtbot.waitSignal
arg = signaller.signal
with pytest.raises(TestException):
with func(arg, timeout=10, raising=raising):
raise TestException
@pytest.mark.parametrize('multiple', [True, False])
@pytest.mark.parametrize('do_timeout', [True, False])
def test_wait_twice(qtbot, timer, multiple, do_timeout, signaller):
"""
https://github.com/pytest-dev/pytest-qt/issues/69
"""
if multiple:
func = qtbot.waitSignals
arg = [signaller.signal]
else:
func = qtbot.waitSignal
arg = signaller.signal
if do_timeout:
with func(arg, timeout=100, raising=False):
timer.single_shot(signaller.signal, 200)
with func(arg, timeout=100, raising=False):
timer.single_shot(signaller.signal, 200)
else:
with func(arg):
signaller.signal.emit()
with func(arg):
signaller.signal.emit()
def test_wait_signals_invalid_strict_parameter(qtbot, signaller):
with pytest.raises(ValueError):
qtbot.waitSignals([signaller.signal], order='invalid')
def test_destroyed(qtbot):
"""Test that waitSignal works with the destroyed signal (#82).
For some reason, this crashes PySide although it seems perfectly fine code.
"""
if qt_api.pytest_qt_api == 'pyside':
pytest.skip('test crashes PySide')
import sip
class Obj(qt_api.QtCore.QObject):
pass
obj = Obj()
with qtbot.waitSignal(obj.destroyed):
obj.deleteLater()
assert sip.isdeleted(obj)
class TestArgs:
"""Try to get the signal arguments from the signal blocker."""
def test_simple(self, qtbot, signaller):
"""The blocker should store the signal args in an 'args' attribute."""
with qtbot.waitSignal(signaller.signal_args) as blocker:
signaller.signal_args.emit('test', 123)
assert blocker.args == ['test', 123]
def test_timeout(self, qtbot):
"""If there's a timeout, the args attribute is None."""
with qtbot.waitSignal(timeout=100, raising=False) as blocker:
pass
assert blocker.args is None
def test_without_args(self, qtbot, signaller):
"""If a signal has no args, the args attribute is an empty list."""
with qtbot.waitSignal(signaller.signal) as blocker:
signaller.signal.emit()
assert blocker.args == []
def test_multi(self, qtbot, signaller):
"""A MultiSignalBlocker doesn't have an args attribute."""
with qtbot.waitSignals([signaller.signal]) as blocker:
signaller.signal.emit()
with pytest.raises(AttributeError):
blocker.args
def test_connected_signal(self, qtbot, signaller):
"""A second signal connected via .connect also works."""
with qtbot.waitSignal(signaller.signal_args) as blocker:
blocker.connect(signaller.signal_args_2)
signaller.signal_args_2.emit('foo', 2342)
assert blocker.args == ['foo', 2342]
def test_signal_identity(signaller):
"""
Tests that the identity of signals can be determined correctly, using str(signal).
Some Qt frameworks, such as PyQt4 or PyQt5, have the following issue:
x = signaller.signal
y = signaller.signal
x == y # is False
id(signaller.signal) == id(signaller.signal) # only True because of garbage collection
between first and second id() call
id(x) == id(y) # is False
str(x) == str(y) # is True (for all Qt frameworks)
"""
assert str(signaller.signal) == str(signaller.signal)
x = signaller.signal
y = signaller.signal
assert str(x) == str(y)
# different signals should also be recognized as different ones
z = signaller.signal_2
assert str(x) != str(z)
def get_waitsignals_cases_all(order):
"""
Returns the list of tuples (emitted-signal-list, expected-signal-list, expect_signal_triggered) for the
given order parameter of waitSignals().
"""
cases = get_waitsignals_cases(order, working=True)
cases.extend(get_waitsignals_cases(order, working=False))
return cases
def get_waitsignals_cases(order, working):
"""
Builds combinations for signals to be emitted and expected for working cases (i.e. blocker.signal_triggered == True)
and non-working cases, depending on the order.
Note:
The order ("none", "simple", "strict") becomes stricter from left to right.
Working cases of stricter cases also work in less stricter cases.
Non-working cases in less stricter cases also are non-working in stricter cases.
"""
if order == "none":
if working:
cases = get_waitsignals_cases(order="simple", working=True)
cases.extend([
# allow even out-of-order signals
(('A1', 'A2'), ('A2', 'A1'), True),
(('A1', 'A2'), ('A2', 'Ax'), True),
(('A1', 'B1'), ('B1', 'A1'), True),
(('A1', 'B1'), ('B1', 'Ax'), True),
(('A1', 'B1', 'B1'), ('B1', 'A1', 'B1'), True),
])
return cases
else:
return [
(('A2',), ('A1',), False),
(('A1',), ('B1',), False),
(('A1',), ('Bx',), False),
(('A1', 'A1'), ('A1', 'B1'), False),
(('A1', 'A1'), ('A1', 'Bx'), False),
(('A1', 'A1'), ('B1', 'A1'), False),
(('A1', 'B1'), ('A1', 'A1'), False),
(('A1', 'B1'), ('B1', 'B1'), False),
(('A1', 'B1', 'B1'), ('A1', 'A1', 'B1'), False),
]
elif order == "simple":
if working:
cases = get_waitsignals_cases(order="strict", working=True)
cases.extend([
# allow signals that occur in-between, before or after the expected signals
(('B1', 'A1', 'A1', 'B1', 'A1'), ('A1', 'B1'), True),
(('A1', 'A1', 'A1'), ('A1', 'A1'), True),
(('A1', 'A1', 'A1'), ('A1', 'Ax'), True),
(('A1', 'A2', 'A1'), ('A1', 'A1'), True),
])
return cases
else:
cases = get_waitsignals_cases(order="none", working=False)
cases.extend([
# don't allow out-of-order signals
(('A1', 'B1'), ('B1', 'A1'), False),
(('A1', 'B1'), ('B1', 'Ax'), False),
(('A1', 'B1', 'B1'), ('B1', 'A1', 'B1'), False),
(('A1', 'B1', 'B1'), ('B1', 'B1', 'A1'), False),
])
return cases
elif order == "strict":
if working:
return [
# only allow exactly the same signals to be emitted that were also expected
(('A1',), ('A1',), True),
(('A1',), ('Ax',), True),
(('A1', 'A1'), ('A1', 'A1'), True),
(('A1', 'A1'), ('A1', 'Ax'), True),
(('A1', 'A1'), ('Ax', 'Ax'), True),
(('A1', 'A2'), ('A1', 'A2'), True),
(('A2', 'A1'), ('A2', 'A1'), True),
(('A1', 'B1'), ('A1', 'B1'), True),
(('A1', 'A1', 'B1'), ('A1', 'A1', 'B1'), True),
(('A1', 'A2', 'B1'), ('A1', 'A2', 'B1'), True),
(('A1', 'B1', 'A1'), ('A1', 'A1'), True), # blocker doesn't know about signal B1 -> test passes
(('A1', 'B1', 'A1'), ('Ax', 'A1'), True),
]
else:
cases = get_waitsignals_cases(order="simple", working=False)
cases.extend([
# don't allow in-between signals
(('A1', 'A1', 'A2', 'B1'), ('A1', 'A2', 'B1'), False),
])
return cases
class TestCallback:
"""
Tests the callback parameter for waitSignal (callbacks in case of waitSignals).
Uses so-called "signal codes" such as "A1", "B1" or "Ax" which are converted to signals and callback functions.
The first letter ("A" or "B" is allowed) specifies the signal (signaller.signal_args or signaller.signal_args_2
respectively), the second letter specifies the parameter to expect or emit ('x' stands for "don't care", i.e. allow
any value - only applicable for expected signals (not for emitted signals)).
"""
@staticmethod
def get_signal_from_code(signaller, code):
"""Converts a code such as 'A1' to a signal (signaller.signal_args for example)."""
assert type(code) == str and len(code) == 2
signal = signaller.signal_args if code[0] == 'A' else signaller.signal_args_2
return signal
@staticmethod
def emit_parametrized_signals(signaller, emitted_signal_codes):
"""Emits the signals as specified in the list of emitted_signal_codes using the signaller."""
for code in emitted_signal_codes:
signal = TestCallback.get_signal_from_code(signaller, code)
param_str = code[1]
assert param_str != "x", "x is not allowed in emitted_signal_codes, only in expected_signal_codes"
param_int = int(param_str)
signal.emit(param_str, param_int)
@staticmethod
def parameter_evaluation_callback(param_str, param_int, expected_param_str, expected_param_int):
"""
This generic callback method evaluates that the two provided parameters match the expected ones (which are bound
using functools.partial).
"""
return param_str == expected_param_str and param_int == expected_param_int
@staticmethod
def parameter_evaluation_callback_accept_any(param_str, param_int):
return True
@staticmethod
def get_signals_and_callbacks(signaller, expected_signal_codes):
"""
Converts an iterable of strings, such as ('A1', 'A2') to a tuple of the form
(list of Qt signals, matching parameter-evaluation callbacks)
Example: ('A1', 'A2') is converted to
([signaller.signal_args, signaller.signal_args], [callback(str,int), callback(str,int)]) where the
first callback expects the values to be '1' and 1, and the second one '2' and 2 respectively.
I.e. the first character of each signal-code determines the Qt signal, the second one the parameter values.
"""
signals_to_expect = []
callbacks = []
for code in expected_signal_codes:
# e.g. "A2" means to use signaller.signal_args with parameters "2", 2
signal = TestCallback.get_signal_from_code(signaller, code)
signals_to_expect.append(signal)
param_value_as_string = code[1]
if param_value_as_string == "x":
callback = TestCallback.parameter_evaluation_callback_accept_any
else:
param_value_as_int = int(param_value_as_string)
callback = functools.partial(TestCallback.parameter_evaluation_callback,
expected_param_str=param_value_as_string,
expected_param_int=param_value_as_int)
callbacks.append(callback)
return signals_to_expect, callbacks
@pytest.mark.parametrize(
('emitted_signal_codes', 'expected_signal_codes', 'expected_signal_triggered'),
[
# working cases
(('A1',), ('A1',), True),
(('A1',), ('Ax',), True),
(('A1', 'A1'), ('A1',), True),
(('A1', 'A2'), ('A1',), True),
(('A2', 'A1'), ('A1',), True),
# non working cases
(('A2',), ('A1',), False),
(('B1',), ('A1',), False),
(('A1',), ('Bx',), False),
]
)
def test_wait_signal(self, qtbot, signaller, emitted_signal_codes, expected_signal_codes,
expected_signal_triggered):
"""Tests that waitSignal() correctly checks the signal parameters using the provided callback"""
signals_to_expect, callbacks = TestCallback.get_signals_and_callbacks(signaller, expected_signal_codes)
with qtbot.waitSignal(signal=signals_to_expect[0], check_params_cb=callbacks[0], timeout=200,
raising=False) as blocker:
TestCallback.emit_parametrized_signals(signaller, emitted_signal_codes)
assert blocker.signal_triggered == expected_signal_triggered
@pytest.mark.parametrize(
('emitted_signal_codes', 'expected_signal_codes', 'expected_signal_triggered'),
get_waitsignals_cases_all(order="none")
)
def test_wait_signals_none_order(self, qtbot, signaller, emitted_signal_codes, expected_signal_codes,
expected_signal_triggered):
"""Tests waitSignals() with order="none"."""
self._test_wait_signals(qtbot, signaller, emitted_signal_codes, expected_signal_codes,
expected_signal_triggered, order="none")
@pytest.mark.parametrize(
('emitted_signal_codes', 'expected_signal_codes', 'expected_signal_triggered'),
get_waitsignals_cases_all(order="simple")
)
def test_wait_signals_simple_order(self, qtbot, signaller, emitted_signal_codes, expected_signal_codes,
expected_signal_triggered):
"""Tests waitSignals() with order="simple"."""
self._test_wait_signals(qtbot, signaller, emitted_signal_codes, expected_signal_codes,
expected_signal_triggered, order="simple")
@pytest.mark.parametrize(
('emitted_signal_codes', 'expected_signal_codes', 'expected_signal_triggered'),
get_waitsignals_cases_all(order="strict")
)
def test_wait_signals_strict_order(self, qtbot, signaller, emitted_signal_codes, expected_signal_codes,
expected_signal_triggered):
"""Tests waitSignals() with order="strict"."""
self._test_wait_signals(qtbot, signaller, emitted_signal_codes, expected_signal_codes,
expected_signal_triggered, order="strict")
@staticmethod
def _test_wait_signals(qtbot, signaller, emitted_signal_codes, expected_signal_codes,
expected_signal_triggered, order):
signals_to_expect, callbacks = TestCallback.get_signals_and_callbacks(signaller, expected_signal_codes)
with qtbot.waitSignals(signals=signals_to_expect, order=order, check_params_cbs=callbacks,
timeout=200, raising=False) as blocker:
TestCallback.emit_parametrized_signals(signaller, emitted_signal_codes)
assert blocker.signal_triggered == expected_signal_triggered
def test_signals_and_callbacks_length_mismatch(self, qtbot, signaller):
"""
Tests that a ValueError is raised if the number of expected signals doesn't match the number of provided
callbacks.
"""
expected_signal_codes = ('A1', 'A2')
signals_to_expect, callbacks = TestCallback.get_signals_and_callbacks(signaller, expected_signal_codes)
callbacks.append(None)
with pytest.raises(ValueError):
with qtbot.waitSignals(signals=signals_to_expect, order="none", check_params_cbs=callbacks,
raising=False):
pass
class TestAssertNotEmitted:
"""Tests for qtbot.assertNotEmitted."""
def test_not_emitted(self, qtbot, signaller):
with qtbot.assertNotEmitted(signaller.signal):
pass
def test_emitted(self, qtbot, signaller):
with pytest.raises(SignalEmittedError) as excinfo:
with qtbot.assertNotEmitted(signaller.signal):
signaller.signal.emit()
fnmatch.fnmatchcase(str(excinfo.value),
"Signal * unexpectedly emitted.")
def test_emitted_args(self, qtbot, signaller):
with pytest.raises(SignalEmittedError) as excinfo:
with qtbot.assertNotEmitted(signaller.signal_args):
signaller.signal_args.emit('foo', 123)
fnmatch.fnmatchcase(str(excinfo.value),
"Signal * unexpectedly emitted with arguments "
"['foo', 123]")
def test_disconnected(self, qtbot, signaller):
with qtbot.assertNotEmitted(signaller.signal):
pass
signaller.signal.emit()
| lgpl-3.0 | 5,334,715,047,530,798,000 | 36.186567 | 120 | 0.592494 | false |
redelinux-ime-usp/supermegazord | scripts/account/deactivate.py | 1 | 1140 | # -*- coding: utf-8 -*-
def main(self):
if self.group.name == "exaluno": return "Conta já inativa."
from supermegazord.lib.tools import ErrorString
import supermegazord.db.path as path
try:
with open(path.MEGAZORD_DB + "/emails/account.deactivate") as f:
self.mail("Conta Desativada", f.read().format(**self.__dict__))
except: pass
import supermegazord.lib.remote as remote
command = "sudo /megazord/scripts/desativa_conta " + self.login + " " + self.group.name
results = remote.run_remote_batch(['mail', 'printer', 'nfs'], command, "megazord")
results['ldap'] = self.group.add_member(self) and self.change_group('exaluno') and (
self.change_home("/home/exaluno/" + self.login) and self.change_shell("/bin/false"))
self.log("Conta '{0}' desativada. Status: {1}".format(self.login, str(results)))
if not reduce(lambda a, b: a and b, results.values()):
return ErrorString("Erro ao desativar conta. Verifique 'DB/usuarios/historicos/{0}' para detalhes.".format(self.nid))
else:
return "Conta '{0}' desativada com sucesso.".format(self.login)
def description():
return "Desativa uma conta, mantendo apenas o e-mail."
| gpl-2.0 | -5,936,291,138,590,686,000 | 48.521739 | 119 | 0.705882 | false |
w495/python-video-shot-detector | shot_detector/charts/event/threshold/adaptive/z_test_event_chart.py | 1 | 3747 | # -*- coding: utf8 -*-
"""
This is part of shot detector.
Produced by w495 at 2017.05.04 04:18:27
"""
from __future__ import (absolute_import,
division,
print_function,
unicode_literals)
import logging
import numpy as numeric
from shot_detector.charts.event.base import (
BaseEventChart,
FilterDescription,
PlotOptions
)
from shot_detector.filters import (
BaseSWFilter,
ShiftSWFilter,
DelayFilter,
NormFilter
)
from shot_detector.utils.tex_template import tex_template
class ZTestEventChart(BaseEventChart):
"""
...
"""
__logger = logging.getLogger(__name__)
def seq_filters(self):
"""
:return:
"""
# Linear delay filter. Builtin filter.
delay = DelayFilter()
# The incoming signal is unchanged.
original = delay(0)
# Shift signal to one frame. Builtin filter.
shift = ShiftSWFilter()
# The difference between neighboring frames.
diff = original - shift
# The norm of the signal. Builtin filter.
norm = NormFilter()
# Abstract sliding window. Builtin filter.
sw = BaseSWFilter(min_size=2)
# Sum of absolute difference filter.
sad_filter = original | diff | abs | norm(l=1)
sw_mean = sw | numeric.mean
# or sw_mean = MeanSWFilter()
sw_std = sw | numeric.std
# or sw_std = StdSWFilter()
def z_score(size=1):
"""
:param int size:
:return:
"""
return (
(
(
original - sw_mean(s=size)
)
/ sw_std(s=size)
)
/ numeric.sqrt(size)
| abs
)
def z_test(size=1):
"""
:param size:
:return:
"""
estimation = z_score(size)
return estimation
return [
FilterDescription(
name='$F_{L_1} = ||F_{t}||_{L_1}$',
plot_options=PlotOptions(
style='-',
color='gray',
width=3.0,
),
formula=norm(l=1),
),
FilterDescription(
# Sum of absolute difference filter.
name='$D_{t} = ||F_{t} - F_{t-1}||_{L_1}$',
plot_options=PlotOptions(
style='-',
color='blue',
width=2.0,
),
formula=sad_filter
),
FilterDescription(
name=(
'$D_{{t}} > E_{{ {size} }}\ (D_{{t}})$'.format(
size=100
)
),
plot_options=PlotOptions(
style='-',
color='green',
width=1.0,
),
formula=(
diff | norm(l=1)
| z_test(size=50)
)
),
FilterDescription(
name=(
tex_template(
'$D_{t} > E_{ ${size} }\ (D_{t})$',
size=200
)
),
plot_options=PlotOptions(
style='-',
color='red',
width=1.0,
),
formula=(
diff | norm(l=1)
| z_test(size=200)
)
),
]
| bsd-3-clause | 4,005,643,600,252,667,400 | 23.490196 | 67 | 0.39178 | false |
faroos3/OpSysProject1 | srt.py | 1 | 9812 | from fcfs import *
import heapq
# Format the queue for output
def format_queue(queue):
queue.sort(key=lambda x: x[1].get_process_id())
output = "[Q"
if len(queue) == 0:
output += " <empty>]"
else:
for i in range(len(queue)):
if i == (len(queue) - 1):
output += " " + str(queue[i][1]) + "]"
else:
output += " " + str(queue[i][1])
return output
def increase_wait(queue, time):
for process in queue:
process[1].increase_wait_t(time)
def srt(processes_list):
processes_list.sort(key=lambda x: x.get_process_id())
t = 0 # time in ms
t_cs = 8 # time to perform context switch
IO_list = {} # { {time process's IO will finish : process} }
ready = True # CPU state
ready_queue = [] # heapq, [ [process cpu burst time, process] ]
burst_end_time = 0 # time the current process will finish it's burst
current_process = None
completed_processes = []
context_switch = finished = replace = False # check for context switch and completion
context = avg_wait = avg_turn = avg_burst = total_bursts = preemption = 0
# Set up some stats
for process in processes_list:
total_bursts+=process.get_num_bursts()
avg_burst+=process.get_cpu_t()*process.get_num_bursts()
print("time 0ms: Simulator started for SRT [Q <empty>]")
while(finished != True):
'''
If the current process has finised it's burst check if it has remaining bursts
add it to the IO List and set the CPU up to take a new process. If not, mark
the process as finished.
'''
if t != 0 and t == burst_end_time:
current_process.burst_complete() # Decrement the number of bursts
return_time = t+current_process.get_io_t()+4 # IO end time with account for context switch
# Send process to complete IO
if current_process.get_num_bursts() > 0:
replace = True
IO_list[return_time] = current_process
if current_process.get_num_bursts() == 1:
print("time {}ms: Process {} completed a CPU burst; {} burst to go {}".format(t,current_process,current_process.get_num_bursts(),format_queue(ready_queue)))
else:
print("time {}ms: Process {} completed a CPU burst; {} bursts to go {}".format(t,current_process,current_process.get_num_bursts(),format_queue(ready_queue)))
print("time {}ms: Process {} switching out of CPU; will block on I/O until time {}ms {}".format(t,current_process,return_time,
format_queue(ready_queue)))
else:
# Mark the process as completed
current_process.set_end_t(t)
completed_processes.append(current_process)
if len(ready_queue) == 2:
increase_wait_t(ready_queue,4)
print("time {}ms: Process {} terminated {}".format(t,current_process,format_queue(ready_queue)))
# If the queue is empty we won't need to account for the 2nf half of the context switch
if len(ready_queue) != 0:
replace = True
ready = True
burst_end_time = 0
current_process = None
'''
Check if a processes finished their IO, if it did, check if
any bursts are left. If bursts are left, check for preemption
or add it to the queue, if not mark it as finished.
'''
for key in IO_list:
if key == t:
process = IO_list[key]
# Check if process terminated
if process.get_num_bursts() == 0:
context_switch = True
print("time {}ms: Process {} terminated {}".format(t,process,format_queue(ready_queue)))
process.set_end_t(t)
if len(ready_queue) == 1:
increase_wait(ready_queue,4)
completed_processes.append(process)
else:
# Check for preemption
IO = True
context_switch = True
if process.get_cpu_t() < (burst_end_time - t):
replace = True
print("time {}ms: Process {} completed I/O and will preempt {} {}".format(t,process,current_process,format_queue(ready_queue)))
process.increase_wait_t(4)
if (len(ready_queue) == 0): # If the queue is empty, the current process won't we taken off after we add it back
current_process.increase_wait_t(4)
heapq.heappush(ready_queue,[(burst_end_time-t,str(current_process)), current_process])
heapq.heappush(ready_queue,[(process.get_cpu_t(),str(process)), process])
preemption+=1
# Mark the CPU as open
ready = True
burst_end_time = 0
current_process = None
else:
context_switch = True
# if (len(ready_queue) == 0):
# process.increase_wait_t(4)
heapq.heappush(ready_queue,[(process.get_cpu_t(),str(process)), process])
print("time {}ms: Process {} completed I/O; added to ready queue {}".format(t,process,format_queue(ready_queue)))
'''
Check if a a processed arrived, then check for preemption
or add it to the queue
'''
for process in processes_list:
if(t == process.get_arrival_t()):
'''
Check for preemption by comparing burst time of the process and
the remaining time of the current process
'''
if (process.get_cpu_t() < (burst_end_time - t)) or (process.get_cpu_t() == (burst_end_time - t) and (str(process) < str(current_process))):
print("time {}ms: Process {} arrived and will preempt {} {}".format(t,process,current_process,format_queue(ready_queue)))
replace = True
# Add the current process back to the queue with it's remaining time
current_process.increase_wait_t(8)
heapq.heappush(ready_queue,[(burst_end_time - t,str(current_process)), current_process])
# Add the process to the queue if there is no preemption
process.increase_wait_t(4)
heapq.heappush(ready_queue,[(process.get_cpu_t(),str(process)),process])
# Set the process as the new current process
preemption+=1
context_switch =True
# Mark the CPU as open
ready = True
burst_end_time = 0
current_process = None
else:
# Add the process to the queue if there is no preemption
# if (len(ready_queue) == 0): # improves case 6
# process.increase_wait_t(4)
heapq.heappush(ready_queue,[(process.get_cpu_t(),str(process)),process])
context_switch = True
print("time {}ms: Process {} arrived and added to ready queue {}".format(t,process,format_queue(ready_queue)))
# Start a process if the CPU is open
if ready == True and len(ready_queue) > 0:
ready = False
context_switch = True
queued_process = heapq.heappop(ready_queue)
new_time = t
if replace == True:
burst_end_time = queued_process[0][0]+t+t_cs # context switch for taking off queue
new_time +=t_cs
increase_wait(ready_queue,8)
elif context_switch == True:
burst_end_time = queued_process[0][0]+t+4 # context switch for taking off queue
new_time+= 4
increase_wait(ready_queue,4)
current_process = queued_process[1]
if queued_process[0][0] < queued_process[1].get_cpu_t():
print("time {}ms: Process {} started using the CPU with {}ms remaining {}".format(new_time,current_process,queued_process[0][0],format_queue(ready_queue)))
else:
print("time {}ms: Process {} started using the CPU {}".format(new_time,current_process,format_queue(ready_queue)))
# Exit when all processes are complete (No mory CPU Bursts or IO Operations)
if len(processes_list) == len(completed_processes):
context_switch = True # account for final exit from CPU
finished = True
# Increment time normally if a context switch didn't occur
if replace == True:
t+=t_cs
context+=0.5
replace = False
elif context_switch == True:
t+=4 # about to switch to new process
context+=0.5
context_switch = False
else:
t+=1
increase_wait(ready_queue,1)
print("time {}ms: Simulator ended for SRT".format(t))
# Calulate stats
# Average turnaround time = total turnaround time for all processes / total number of CPU bursts (for ALL processes)
# Turnaround time for a single process = finished time - arrival time - (iotime * (number of bursts -1))
# Theoretically, the below should work as well:
# Turnaround time for a single process = total context switch time + (number of bursts) * cpu burst time + wait time
# Add the turnaround times for each process and you get the total turnaround time for all processes.
# Average wait time = total wait time / total number of CPU bursts (for ALL processes)
# Wait time for a single process = total time process spends in the ready queue
for process in sorted(completed_processes):
avg_wait+=process.get_wait_t()
# avg_turn+= process.get_end_t() - process.get_arrival_t() - (process.get_io_t() * ())
return [float(avg_burst)/total_bursts,float(avg_wait)/total_bursts,float(avg_turn)/total_bursts,int(context),preemption]
# if __name__ == '__main__':
# Input 1 WORKING
# process_list = list([
# Process('A',0,168,5,287),
# Process('B',0,385,1,0),
# Process('C',190,97,5,2499),
# Process('D',250,1770,2,822)
# ])
# Input 2 WORKING
# process_list = list([
# Process('X',0,80,5,500)
# ])
# Input 3 WORKING, Wait time off BY 4
# process_list = list([
# Process('X',0,560,5,20),
# Process('Y',0,840,5,20),
# Process('Z',0,924,5,20)
# ])
# Input 4 WORKING
# process_list = list({
# Process('A',0,100,4,200),
# Process('B',0,101,4,200),
# Process('C',0,102,4,200),
# Process('X',0,103,4,200),
# Process('Y',0,104,4,200),
# Process('Z',0,105,4,200)
# })
# Input 5 OFF BY 4
# process_list = list([
# Process('T',0,700,5,20),
# Process('U',20,340,6,40),
# Process('V',190,940,3,200)
# ])
# Input 6 WORKING
# process_list = list([
# Process("A",0,20,5,40),
# Process("B",20,36,2,100),
# Process("C",68,30,1,0)
# ])
# print(srt(process_list)) | mit | -2,682,632,519,010,425,000 | 34.756554 | 162 | 0.636669 | false |
Splawik/pytigon | pytigon/appdata/plugins/standard/html_print/printframework.py | 1 | 4874 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#Pytigon - wxpython and django application framework
#author: "Slawomir Cholaj (slawomir.cholaj@gmail.com)"
#copyright: "Copyright (C) ????/2012 Slawomir Cholaj"
#license: "LGPL 3.0"
#version: "0.1a"
import os
import wx
from pytigon_lib.schhtml.wxdc import DcDc
from pytigon_lib.schhtml.cairodc import CairoDc
from pytigon_lib.schhtml.htmlviewer import HtmlViewerParser
from pytigon_lib.schhtml.cairodc import CairoDc
class HtmlCanvas(object):
def __init__(self, zip_name):
self.zip_name = zip_name
self.scale = 1.0
self.dc_zip = DcDc(calc_only=False, scale=self.scale)
self.dc_zip.load(self.zip_name)
self.page_no = 1
self.width = self.dc_zip.width * self.scale
self.height = self.dc_zip.height * self.scale
self.page_count = self.dc_zip.get_page_count()
self.state = self.dc_zip.state()
def set_page(self, page_no):
self.page_no = page_no
def getWidth(self):
return self.width
def getHeight(self):
return self.height
def DoDrawing(self, dc, option, scale):
self.scale = scale
self.dc_zip.restore_state(self.state)
self.dc_zip.set_scale(scale)
dc_buf = self.dc_zip.dc
self.dc_zip.dc = dc
self.dc_zip.play(self.page_no - 1)
self.dc_zip.dc = dc_buf
def save(self, file_name):
dc = CairoDc(calc_only=False, width=self.width/self.scale, height=self.height/self.scale, output_name=file_name)
dc.load(self.zip_name)
count = dc.get_page_count()
for i in range(0, count):
if i > 0:
dc.start_page()
dc.play(i)
dc.close()
class MyPrintout(wx.Printout):
def __init__(self, canvas):
wx.Printout.__init__(self)
self.canvas = canvas
def HasPage(self, page):
if page <= self.canvas.page_count:
return True
else:
return False
def GetPageInfo(self):
return (1, self.canvas.page_count, 1, self.canvas.page_count)
def OnPrintPage(self, page):
dc = self.GetDC()
max_x = self.canvas.getWidth()
max_y = self.canvas.getHeight()
margin_x = 0
margin_y = 0
max_x = max_x + 2 * margin_x
max_y = max_y + 2 * margin_y
(w, h) = dc.GetSize()
scale_x = 1.0 * w / max_x
scale_y = 1.0 * h / max_y
actual_scale = min(scale_x, scale_y)
pos_x = (w - self.canvas.getWidth() * actual_scale) / 2.0
pos_y = (h - self.canvas.getHeight() * actual_scale) / 2.0
dc.SetDeviceOrigin(int(pos_x), int(pos_y))
dc.Clear()
self.canvas.set_page(page)
self.canvas.DoDrawing(dc, True, actual_scale)
return True
class HtmlPreviewCanvas(wx.PreviewCanvas):
def __init__(self, parent, **argv):
self.canvas = HtmlCanvas(parent.Parametry)
self.printData = wx.PrintData()
self.printData.SetPaperId(wx.PAPER_A4)
self.printData.SetPrintMode(wx.PRINT_MODE_PRINTER)
self.printData.SetOrientation(wx.PORTRAIT)
data = wx.PrintDialogData(self.printData)
printout = MyPrintout(self.canvas)
printout2 = MyPrintout(self.canvas)
self.preview = wx.PrintPreview(printout, printout2, data)
if 'name' in argv:
name = argv['name']
else:
name = 'htmlpreview'
wx.PreviewCanvas.__init__(self, self.preview, parent, name=name)
self.preview.SetCanvas(self)
self.preview.SetZoom(100)
def save(self):
dlg = wx.FileDialog(self, message="Save file as ...", defaultDir=os.getcwd(),
defaultFile="", wildcard="Pdf document (*.pdf)|*.pdf", style=wx.SAVE)
if dlg.ShowModal() == wx.ID_OK:
path = dlg.GetPath()
self.canvas.save(path)
dlg.Destroy()
def Print(self):
pdd = wx.PrintDialogData(self.printData)
printer = wx.Printer(pdd)
printout = MyPrintout(self.canvas)
if not printer.Print(self, printout, True):
wx.MessageBox("There was a problem printing.\nPerhaps your current printer is not set correctly?", "Printing", wx.OK)
else:
self.printData = wx.PrintData( printer.GetPrintDialogData().GetPrintData() )
printout.Destroy()
| lgpl-3.0 | 1,808,380,376,999,584,000 | 31.932432 | 129 | 0.614485 | false |
alfredodeza/pacha | pacha/tests/test_daemon.py | 1 | 6888 | import getpass
import os
import unittest
import shutil
import pacha
from guachi import ConfigMapper
from pacha import daemon, host
from pacha.database import Worker
from mock import MockSys
DICT_CONF = dict(
frequency = 60,
master = 'False',
host = '%s' % host.hostname(),
ssh_user = '%s' % getpass.getuser(),
ssh_port = 22,
hosts_path = '/tmp/remote_pacha/hosts',
hg_autocorrect = 'True',
log_enable = 'False',
log_path = 'False',
log_level = 'DEBUG',
log_format = '%(asctime)s %(levelname)s %(name)s %(message)s',
log_datefmt = '%H=%M=%S'
)
class SingleRepository(unittest.TestCase):
username = getpass.getuser()
dict_conf = dict(
ssh_user = username,
host = host.hostname(),
hosts_path = '/tmp/remote_pacha/hosts'
)
def setUp(self):
test_dir = '/tmp/pacha_test'
remote_dir = '/tmp/remote_pacha'
pacha_host = '/tmp/pacha_test_host'
if os.path.isdir(test_dir):
shutil.rmtree(test_dir)
if os.path.isdir(remote_dir):
shutil.rmtree(remote_dir)
if os.path.isdir(pacha_host):
shutil.rmtree(pacha_host)
pacha.DB_DIR = '/tmp/pacha_test'
pacha.DB_FILE ='/tmp/pacha_test/pacha_test.db'
pacha.permissions.DB_FILE ='/tmp/pacha_test/pacha_test.db'
pacha.sync.DB_FILE ='/tmp/pacha_test/pacha_test.db'
pacha.hg.DB_FILE ='/tmp/pacha_test/pacha_test.db'
pacha.database.DB_FILE = '/tmp/pacha_test/pacha_test.db'
pacha.database.DB_DIR = '/tmp/pacha_test'
pacha.daemon.PID_DIR = '/tmp/pacha_test'
pacha.daemon.DB_FILE = '/tmp/pacha_test/pacha_test.db'
os.makedirs('/tmp/remote_pacha/hosts/%s' % host.hostname())
os.mkdir(test_dir)
conf = open('/tmp/pacha_test/pacha.conf', 'w')
conf.write('[DEFAULT]\n')
conf.write('pacha.ssh.user = %s\n' % self.username)
conf.write('pacha.host = %s\n' % host.hostname())
conf.write('pacha.hosts.path = /tmp/remote_pacha/hosts\n')
conf.close()
conf = ConfigMapper('/tmp/pacha_test/pacha_test.db').stored_config()
for k, v in DICT_CONF.items():
conf[k] = v
def tearDown(self):
# make sure we do not have db file
test_dir = '/tmp/pacha_test'
remote_dir = '/tmp/remote_pacha'
pacha_host = '/tmp/pacha_test_host'
if os.path.isdir(test_dir):
shutil.rmtree(test_dir)
if os.path.isdir(remote_dir):
shutil.rmtree(remote_dir)
if os.path.isdir(pacha_host):
shutil.rmtree(pacha_host)
def test_init(self):
"""Should get a normpath from a path"""
path = "/tmp/"
watch = daemon.SingleRepository(path)
actual = watch.path
expected = "/tmp"
self.assertEqual(actual, expected)
self.assertEqual(watch.dir_path, expected)
def test_init_dir(self):
"""convert a path ending in file to a path ending in dir"""
path = "/tmp/file.txt"
watch = daemon.SingleRepository(path)
actual = watch.dir_path
expected = "/tmp"
self.assertEqual(actual, expected)
self.assertEqual(watch.path, '/tmp/file.txt')
def test_is_modified_true(self):
"""Return true when a directory timestamp is newer than the tracked
one"""
db = Worker('/tmp/pacha_test/pacha_test.db')
db.insert('/tmp',None, None, timestamp=1)
watch = daemon.SingleRepository('/tmp')
self.assertTrue(watch.is_modified())
def test_is_modified_false(self):
"""Return false when the directory timestamp is older than the tracked
one"""
db = Worker('/tmp/pacha_test/pacha_test.db')
db.insert('/tmp',None, None, timestamp=9997446874)
watch = daemon.SingleRepository('/tmp')
self.assertFalse(watch.is_modified())
def test_store_timestamp(self):
"""Make sure we are storing (updating) the timestamp"""
db = Worker('/tmp/pacha_test/pacha_test.db')
db.insert('/tmp',None, None, timestamp=9997446874)
db.closedb()
watch = daemon.SingleRepository('/tmp')
watch.store_timestamp(111)
dbase = Worker('/tmp/pacha_test/pacha_test.db')
repo = [i for i in dbase.get_repo('/tmp')]
actual = repo[0][4]
expected = u'111'
self.assertEqual(actual, expected)
def test_synchronize_true(self):
"""When we find a modified file we need to synchronize"""
db = Worker('/tmp/pacha_test/pacha_test.db')
db.insert('/tmp/pacha_test',None, None, timestamp=1)
watch = daemon.SingleRepository('/tmp/pacha_test')
watch.synchronize()
repo = [i for i in db.get_repo('/tmp/pacha_test')]
self.assertTrue(os.path.isdir('/tmp/remote_pacha/hosts/%s/pacha_test' %
host.hostname()))
self.assertNotEqual(repo[0][4], 1)
def test_synchronize_false(self):
"""Do not synchronize anything if the timestamp is older"""
db = Worker('/tmp/pacha_test/pacha_test.db')
db.insert('/tmp/pacha_test',None, None, timestamp=9999999999)
watch = daemon.SingleRepository('/tmp/pacha_test')
watch.synchronize()
repo = [i for i in db.get_repo('/tmp/pacha_test')]
self.assertFalse(os.path.isdir('/tmp/remote_pacha/hosts/%s/pacha_test' %
host.hostname()))
self.assertEqual(repo[0][4], u'9999999999')
def test_daemon_do_nothing(self):
"""Since the repos are empty just run once and exit cleanly """
self.assertRaises(SystemExit, daemon.start, foreground=True,
run_once=True)
self.assertEqual(len(os.listdir('/tmp/remote_pacha/hosts/%s' %
host.hostname())), 0)
class TestFrecuency(unittest.TestCase):
def test_freq_string(self):
"Return an integer if we send a string"
actual = daemon.frecuency("10")
expected = 10
self.assertEqual(actual, expected)
def test_freq_valueerror(self):
"Get 60 secs back if we have something other than an int"
actual = daemon.frecuency("")
expected = 60
self.assertEqual(actual, expected)
def test_freq_under_ten(self):
"If we have less than 10 secs return 60 secs"
actual = daemon.frecuency("4")
expected = 60
self.assertEqual(actual, expected)
def test_freq_exception(self):
"No matter what we send we get 60 secs back"
actual = daemon.frecuency({})
expected = 60
self.assertEqual(actual, expected)
if __name__ == '__main__':
unittest.main()
| mit | -4,560,421,373,802,544,000 | 35.638298 | 80 | 0.585075 | false |
Nikola-K/RESTool | RESTool/restoolgui.py | 1 | 31410 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'RESTool/ui/design_v2.ui'
#
# Created: Fri May 29 22:07:35 2015
# by: PyQt4 UI code generator 4.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(556, 543)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.horizontalLayout = QtGui.QHBoxLayout(self.centralwidget)
self.horizontalLayout.setSpacing(9)
self.horizontalLayout.setMargin(3)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.MainTabWidget = QtGui.QTabWidget(self.centralwidget)
self.MainTabWidget.setObjectName(_fromUtf8("MainTabWidget"))
self.BackupRestoreMigrateTab = QtGui.QWidget()
self.BackupRestoreMigrateTab.setObjectName(_fromUtf8("BackupRestoreMigrateTab"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.BackupRestoreMigrateTab)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.MainContentLayout = QtGui.QVBoxLayout()
self.MainContentLayout.setObjectName(_fromUtf8("MainContentLayout"))
self.BrowseerPickerLayout = QtGui.QHBoxLayout()
self.BrowseerPickerLayout.setSpacing(5)
self.BrowseerPickerLayout.setObjectName(_fromUtf8("BrowseerPickerLayout"))
self.FirstBrowserLayout = QtGui.QHBoxLayout()
self.FirstBrowserLayout.setSpacing(0)
self.FirstBrowserLayout.setObjectName(_fromUtf8("FirstBrowserLayout"))
self.FirstBrowserVerticalLayout = QtGui.QVBoxLayout()
self.FirstBrowserVerticalLayout.setSpacing(0)
self.FirstBrowserVerticalLayout.setObjectName(_fromUtf8("FirstBrowserVerticalLayout"))
self.StaticFirstBrowserLabel = QtGui.QLabel(self.BackupRestoreMigrateTab)
font = QtGui.QFont()
font.setItalic(True)
self.StaticFirstBrowserLabel.setFont(font)
self.StaticFirstBrowserLabel.setAlignment(QtCore.Qt.AlignCenter)
self.StaticFirstBrowserLabel.setObjectName(_fromUtf8("StaticFirstBrowserLabel"))
self.FirstBrowserVerticalLayout.addWidget(self.StaticFirstBrowserLabel)
self.FirstBrowserNameLayout = QtGui.QHBoxLayout()
self.FirstBrowserNameLayout.setObjectName(_fromUtf8("FirstBrowserNameLayout"))
self.StaticBrowserName = QtGui.QLabel(self.BackupRestoreMigrateTab)
self.StaticBrowserName.setObjectName(_fromUtf8("StaticBrowserName"))
self.FirstBrowserNameLayout.addWidget(self.StaticBrowserName)
self.cboFirstBrowser = QtGui.QComboBox(self.BackupRestoreMigrateTab)
self.cboFirstBrowser.setObjectName(_fromUtf8("cboFirstBrowser"))
self.FirstBrowserNameLayout.addWidget(self.cboFirstBrowser)
self.FirstBrowserNameLayout.setStretch(0, 40)
self.FirstBrowserNameLayout.setStretch(1, 60)
self.FirstBrowserVerticalLayout.addLayout(self.FirstBrowserNameLayout)
self.FirstBrowserProfileLayout = QtGui.QHBoxLayout()
self.FirstBrowserProfileLayout.setObjectName(_fromUtf8("FirstBrowserProfileLayout"))
self.StaticProfileName = QtGui.QLabel(self.BackupRestoreMigrateTab)
self.StaticProfileName.setObjectName(_fromUtf8("StaticProfileName"))
self.FirstBrowserProfileLayout.addWidget(self.StaticProfileName)
self.cboFirstBrowserProfile = QtGui.QComboBox(self.BackupRestoreMigrateTab)
self.cboFirstBrowserProfile.setObjectName(_fromUtf8("cboFirstBrowserProfile"))
self.FirstBrowserProfileLayout.addWidget(self.cboFirstBrowserProfile)
self.FirstBrowserProfileLayout.setStretch(0, 40)
self.FirstBrowserProfileLayout.setStretch(1, 60)
self.FirstBrowserVerticalLayout.addLayout(self.FirstBrowserProfileLayout)
self.FirstBrowserDBFoundLayout = QtGui.QHBoxLayout()
self.FirstBrowserDBFoundLayout.setObjectName(_fromUtf8("FirstBrowserDBFoundLayout"))
self.StaticRESInfo = QtGui.QLabel(self.BackupRestoreMigrateTab)
self.StaticRESInfo.setObjectName(_fromUtf8("StaticRESInfo"))
self.FirstBrowserDBFoundLayout.addWidget(self.StaticRESInfo)
self.FirstBrowserRESLabel = QtGui.QLabel(self.BackupRestoreMigrateTab)
self.FirstBrowserRESLabel.setObjectName(_fromUtf8("FirstBrowserRESLabel"))
self.FirstBrowserDBFoundLayout.addWidget(self.FirstBrowserRESLabel)
self.FirstBrowserDBFoundLayout.setStretch(0, 40)
self.FirstBrowserDBFoundLayout.setStretch(1, 60)
self.FirstBrowserVerticalLayout.addLayout(self.FirstBrowserDBFoundLayout)
self.FirstBrowserVerticalLayout.setStretch(0, 10)
self.FirstBrowserVerticalLayout.setStretch(1, 20)
self.FirstBrowserVerticalLayout.setStretch(2, 20)
self.FirstBrowserVerticalLayout.setStretch(3, 20)
self.FirstBrowserLayout.addLayout(self.FirstBrowserVerticalLayout)
self.BrowseerPickerLayout.addLayout(self.FirstBrowserLayout)
self.StaticBrowserDividerLine = QtGui.QFrame(self.BackupRestoreMigrateTab)
self.StaticBrowserDividerLine.setFrameShape(QtGui.QFrame.VLine)
self.StaticBrowserDividerLine.setFrameShadow(QtGui.QFrame.Sunken)
self.StaticBrowserDividerLine.setObjectName(_fromUtf8("StaticBrowserDividerLine"))
self.BrowseerPickerLayout.addWidget(self.StaticBrowserDividerLine)
self.SecondBrowserLayout = QtGui.QHBoxLayout()
self.SecondBrowserLayout.setSpacing(0)
self.SecondBrowserLayout.setObjectName(_fromUtf8("SecondBrowserLayout"))
self.verticalLayout = QtGui.QVBoxLayout()
self.verticalLayout.setSpacing(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.StaticSecondBrowserLabel = QtGui.QLabel(self.BackupRestoreMigrateTab)
font = QtGui.QFont()
font.setItalic(True)
self.StaticSecondBrowserLabel.setFont(font)
self.StaticSecondBrowserLabel.setAlignment(QtCore.Qt.AlignCenter)
self.StaticSecondBrowserLabel.setObjectName(_fromUtf8("StaticSecondBrowserLabel"))
self.verticalLayout.addWidget(self.StaticSecondBrowserLabel)
self.SecondBrowserNameLayout = QtGui.QHBoxLayout()
self.SecondBrowserNameLayout.setObjectName(_fromUtf8("SecondBrowserNameLayout"))
self.StaticBrowserName_2 = QtGui.QLabel(self.BackupRestoreMigrateTab)
self.StaticBrowserName_2.setObjectName(_fromUtf8("StaticBrowserName_2"))
self.SecondBrowserNameLayout.addWidget(self.StaticBrowserName_2)
self.cboSecondBrowser = QtGui.QComboBox(self.BackupRestoreMigrateTab)
self.cboSecondBrowser.setObjectName(_fromUtf8("cboSecondBrowser"))
self.SecondBrowserNameLayout.addWidget(self.cboSecondBrowser)
self.SecondBrowserNameLayout.setStretch(0, 40)
self.SecondBrowserNameLayout.setStretch(1, 60)
self.verticalLayout.addLayout(self.SecondBrowserNameLayout)
self.SecondBrowserProfileLayout = QtGui.QHBoxLayout()
self.SecondBrowserProfileLayout.setObjectName(_fromUtf8("SecondBrowserProfileLayout"))
self.StaticProfileName_2 = QtGui.QLabel(self.BackupRestoreMigrateTab)
self.StaticProfileName_2.setObjectName(_fromUtf8("StaticProfileName_2"))
self.SecondBrowserProfileLayout.addWidget(self.StaticProfileName_2)
self.cboSecondBrowserProfile = QtGui.QComboBox(self.BackupRestoreMigrateTab)
self.cboSecondBrowserProfile.setObjectName(_fromUtf8("cboSecondBrowserProfile"))
self.SecondBrowserProfileLayout.addWidget(self.cboSecondBrowserProfile)
self.SecondBrowserProfileLayout.setStretch(0, 40)
self.SecondBrowserProfileLayout.setStretch(1, 60)
self.verticalLayout.addLayout(self.SecondBrowserProfileLayout)
self.SecondBrowserDBFoundLayout = QtGui.QHBoxLayout()
self.SecondBrowserDBFoundLayout.setSpacing(6)
self.SecondBrowserDBFoundLayout.setObjectName(_fromUtf8("SecondBrowserDBFoundLayout"))
self.StaticRESInfo_2 = QtGui.QLabel(self.BackupRestoreMigrateTab)
self.StaticRESInfo_2.setObjectName(_fromUtf8("StaticRESInfo_2"))
self.SecondBrowserDBFoundLayout.addWidget(self.StaticRESInfo_2)
self.SecondBrowserRESLabel = QtGui.QLabel(self.BackupRestoreMigrateTab)
self.SecondBrowserRESLabel.setObjectName(_fromUtf8("SecondBrowserRESLabel"))
self.SecondBrowserDBFoundLayout.addWidget(self.SecondBrowserRESLabel)
self.SecondBrowserDBFoundLayout.setStretch(0, 40)
self.SecondBrowserDBFoundLayout.setStretch(1, 60)
self.verticalLayout.addLayout(self.SecondBrowserDBFoundLayout)
self.verticalLayout.setStretch(0, 10)
self.verticalLayout.setStretch(1, 20)
self.verticalLayout.setStretch(2, 20)
self.verticalLayout.setStretch(3, 20)
self.SecondBrowserLayout.addLayout(self.verticalLayout)
self.BrowseerPickerLayout.addLayout(self.SecondBrowserLayout)
self.MainContentLayout.addLayout(self.BrowseerPickerLayout)
self.StaticBrowserFeaturesLine = QtGui.QFrame(self.BackupRestoreMigrateTab)
self.StaticBrowserFeaturesLine.setFrameShape(QtGui.QFrame.HLine)
self.StaticBrowserFeaturesLine.setFrameShadow(QtGui.QFrame.Sunken)
self.StaticBrowserFeaturesLine.setObjectName(_fromUtf8("StaticBrowserFeaturesLine"))
self.MainContentLayout.addWidget(self.StaticBrowserFeaturesLine)
self.labelMessage = QtGui.QLabel(self.BackupRestoreMigrateTab)
self.labelMessage.setEnabled(True)
self.labelMessage.setStyleSheet(_fromUtf8("color: rgb(255, 0, 0);"))
self.labelMessage.setFrameShape(QtGui.QFrame.NoFrame)
self.labelMessage.setAlignment(QtCore.Qt.AlignCenter)
self.labelMessage.setObjectName(_fromUtf8("labelMessage"))
self.MainContentLayout.addWidget(self.labelMessage)
self.ApplicationActionsLayout = QtGui.QHBoxLayout()
self.ApplicationActionsLayout.setObjectName(_fromUtf8("ApplicationActionsLayout"))
self.BackupsLayout = QtGui.QVBoxLayout()
self.BackupsLayout.setSpacing(6)
self.BackupsLayout.setObjectName(_fromUtf8("BackupsLayout"))
self.StaticLabelMigrating = QtGui.QLabel(self.BackupRestoreMigrateTab)
self.StaticLabelMigrating.setObjectName(_fromUtf8("StaticLabelMigrating"))
self.BackupsLayout.addWidget(self.StaticLabelMigrating)
self.btnFirstToSecond = QtGui.QPushButton(self.BackupRestoreMigrateTab)
self.btnFirstToSecond.setEnabled(False)
self.btnFirstToSecond.setObjectName(_fromUtf8("btnFirstToSecond"))
self.BackupsLayout.addWidget(self.btnFirstToSecond)
self.btnSecondToFirst = QtGui.QPushButton(self.BackupRestoreMigrateTab)
self.btnSecondToFirst.setEnabled(False)
self.btnSecondToFirst.setObjectName(_fromUtf8("btnSecondToFirst"))
self.BackupsLayout.addWidget(self.btnSecondToFirst)
self.StaticDivider = QtGui.QFrame(self.BackupRestoreMigrateTab)
self.StaticDivider.setFrameShape(QtGui.QFrame.HLine)
self.StaticDivider.setFrameShadow(QtGui.QFrame.Sunken)
self.StaticDivider.setObjectName(_fromUtf8("StaticDivider"))
self.BackupsLayout.addWidget(self.StaticDivider)
self.StaticLabelBackups_2 = QtGui.QLabel(self.BackupRestoreMigrateTab)
self.StaticLabelBackups_2.setObjectName(_fromUtf8("StaticLabelBackups_2"))
self.BackupsLayout.addWidget(self.StaticLabelBackups_2)
self.btnBackupFirst = QtGui.QPushButton(self.BackupRestoreMigrateTab)
self.btnBackupFirst.setEnabled(False)
self.btnBackupFirst.setObjectName(_fromUtf8("btnBackupFirst"))
self.BackupsLayout.addWidget(self.btnBackupFirst)
self.btnBackupSecond = QtGui.QPushButton(self.BackupRestoreMigrateTab)
self.btnBackupSecond.setEnabled(False)
self.btnBackupSecond.setObjectName(_fromUtf8("btnBackupSecond"))
self.BackupsLayout.addWidget(self.btnBackupSecond)
self.btnRestoreToFirst = QtGui.QPushButton(self.BackupRestoreMigrateTab)
self.btnRestoreToFirst.setEnabled(False)
self.btnRestoreToFirst.setObjectName(_fromUtf8("btnRestoreToFirst"))
self.BackupsLayout.addWidget(self.btnRestoreToFirst)
self.btnRestoreToSecond = QtGui.QPushButton(self.BackupRestoreMigrateTab)
self.btnRestoreToSecond.setEnabled(False)
self.btnRestoreToSecond.setObjectName(_fromUtf8("btnRestoreToSecond"))
self.BackupsLayout.addWidget(self.btnRestoreToSecond)
self.VersionLayout = QtGui.QHBoxLayout()
self.VersionLayout.setSpacing(0)
self.VersionLayout.setContentsMargins(-1, -1, -1, 0)
self.VersionLayout.setObjectName(_fromUtf8("VersionLayout"))
self.lblVersion = QtGui.QLabel(self.BackupRestoreMigrateTab)
self.lblVersion.setWordWrap(True)
self.lblVersion.setObjectName(_fromUtf8("lblVersion"))
self.VersionLayout.addWidget(self.lblVersion)
self.StaticLabelVersion = QtGui.QLabel(self.BackupRestoreMigrateTab)
self.StaticLabelVersion.setObjectName(_fromUtf8("StaticLabelVersion"))
self.VersionLayout.addWidget(self.StaticLabelVersion)
self.BackupsLayout.addLayout(self.VersionLayout)
self.lblUpdateAvailable = QtGui.QLabel(self.BackupRestoreMigrateTab)
self.lblUpdateAvailable.setEnabled(True)
self.lblUpdateAvailable.setAlignment(QtCore.Qt.AlignCenter)
self.lblUpdateAvailable.setObjectName(_fromUtf8("lblUpdateAvailable"))
self.BackupsLayout.addWidget(self.lblUpdateAvailable)
self.ApplicationActionsLayout.addLayout(self.BackupsLayout)
self.MainFeaturesLayout = QtGui.QVBoxLayout()
self.MainFeaturesLayout.setObjectName(_fromUtf8("MainFeaturesLayout"))
self.StaticLabelBackups = QtGui.QLabel(self.BackupRestoreMigrateTab)
self.StaticLabelBackups.setObjectName(_fromUtf8("StaticLabelBackups"))
self.MainFeaturesLayout.addWidget(self.StaticLabelBackups)
self.listBackups = QtGui.QListWidget(self.BackupRestoreMigrateTab)
self.listBackups.setObjectName(_fromUtf8("listBackups"))
self.MainFeaturesLayout.addWidget(self.listBackups)
self.btnDeleteBackup = QtGui.QPushButton(self.BackupRestoreMigrateTab)
self.btnDeleteBackup.setObjectName(_fromUtf8("btnDeleteBackup"))
self.MainFeaturesLayout.addWidget(self.btnDeleteBackup)
self.ApplicationActionsLayout.addLayout(self.MainFeaturesLayout)
self.ApplicationActionsLayout.setStretch(0, 60)
self.ApplicationActionsLayout.setStretch(1, 40)
self.MainContentLayout.addLayout(self.ApplicationActionsLayout)
self.MainContentLayout.setStretch(0, 30)
self.MainContentLayout.setStretch(3, 70)
self.verticalLayout_2.addLayout(self.MainContentLayout)
self.MainTabWidget.addTab(self.BackupRestoreMigrateTab, _fromUtf8(""))
self.SettingsTab = QtGui.QWidget()
self.SettingsTab.setObjectName(_fromUtf8("SettingsTab"))
self.verticalLayout_4 = QtGui.QVBoxLayout(self.SettingsTab)
self.verticalLayout_4.setObjectName(_fromUtf8("verticalLayout_4"))
self.BackupFolderLayout = QtGui.QVBoxLayout()
self.BackupFolderLayout.setObjectName(_fromUtf8("BackupFolderLayout"))
self.BackupFolderLayout_2 = QtGui.QVBoxLayout()
self.BackupFolderLayout_2.setObjectName(_fromUtf8("BackupFolderLayout_2"))
self.StaticBackupFolderDescLabel = QtGui.QLabel(self.SettingsTab)
self.StaticBackupFolderDescLabel.setObjectName(_fromUtf8("StaticBackupFolderDescLabel"))
self.BackupFolderLayout_2.addWidget(self.StaticBackupFolderDescLabel)
self.BackupFolderLayout_3 = QtGui.QHBoxLayout()
self.BackupFolderLayout_3.setObjectName(_fromUtf8("BackupFolderLayout_3"))
self.StaticBackupFolderLabel = QtGui.QLabel(self.SettingsTab)
self.StaticBackupFolderLabel.setObjectName(_fromUtf8("StaticBackupFolderLabel"))
self.BackupFolderLayout_3.addWidget(self.StaticBackupFolderLabel)
self.lneBackupFolder = QtGui.QLineEdit(self.SettingsTab)
self.lneBackupFolder.setObjectName(_fromUtf8("lneBackupFolder"))
self.BackupFolderLayout_3.addWidget(self.lneBackupFolder)
self.btnBrowseBackupsFolder = QtGui.QPushButton(self.SettingsTab)
self.btnBrowseBackupsFolder.setObjectName(_fromUtf8("btnBrowseBackupsFolder"))
self.BackupFolderLayout_3.addWidget(self.btnBrowseBackupsFolder)
self.BackupFolderLayout_2.addLayout(self.BackupFolderLayout_3)
self.chkAutomaticBakFolder = QtGui.QCheckBox(self.SettingsTab)
self.chkAutomaticBakFolder.setObjectName(_fromUtf8("chkAutomaticBakFolder"))
self.BackupFolderLayout_2.addWidget(self.chkAutomaticBakFolder)
self.BackupFolderLayout.addLayout(self.BackupFolderLayout_2)
self.verticalLayout_4.addLayout(self.BackupFolderLayout)
self.SettingsDividerLine = QtGui.QFrame(self.SettingsTab)
self.SettingsDividerLine.setFrameShape(QtGui.QFrame.HLine)
self.SettingsDividerLine.setFrameShadow(QtGui.QFrame.Sunken)
self.SettingsDividerLine.setObjectName(_fromUtf8("SettingsDividerLine"))
self.verticalLayout_4.addWidget(self.SettingsDividerLine)
self.BackupDateFormatLayout = QtGui.QVBoxLayout()
self.BackupDateFormatLayout.setObjectName(_fromUtf8("BackupDateFormatLayout"))
self.StaticBackupDateFormatDescLabel = QtGui.QLabel(self.SettingsTab)
self.StaticBackupDateFormatDescLabel.setObjectName(_fromUtf8("StaticBackupDateFormatDescLabel"))
self.BackupDateFormatLayout.addWidget(self.StaticBackupDateFormatDescLabel)
self.BackupDateFormatLayout_2 = QtGui.QHBoxLayout()
self.BackupDateFormatLayout_2.setObjectName(_fromUtf8("BackupDateFormatLayout_2"))
self.StaticBackupDateFormatLabel = QtGui.QLabel(self.SettingsTab)
self.StaticBackupDateFormatLabel.setObjectName(_fromUtf8("StaticBackupDateFormatLabel"))
self.BackupDateFormatLayout_2.addWidget(self.StaticBackupDateFormatLabel)
self.lneBackupTimeFormat = QtGui.QLineEdit(self.SettingsTab)
self.lneBackupTimeFormat.setObjectName(_fromUtf8("lneBackupTimeFormat"))
self.BackupDateFormatLayout_2.addWidget(self.lneBackupTimeFormat)
self.BackupDateFormatLayout.addLayout(self.BackupDateFormatLayout_2)
self.verticalLayout_4.addLayout(self.BackupDateFormatLayout)
self.SettingsDividerLine_2 = QtGui.QFrame(self.SettingsTab)
self.SettingsDividerLine_2.setFrameShape(QtGui.QFrame.HLine)
self.SettingsDividerLine_2.setFrameShadow(QtGui.QFrame.Sunken)
self.SettingsDividerLine_2.setObjectName(_fromUtf8("SettingsDividerLine_2"))
self.verticalLayout_4.addWidget(self.SettingsDividerLine_2)
self.PortableSettingsFormatLayout = QtGui.QVBoxLayout()
self.PortableSettingsFormatLayout.setObjectName(_fromUtf8("PortableSettingsFormatLayout"))
self.StaticPortableSettingsDescLabel = QtGui.QLabel(self.SettingsTab)
self.StaticPortableSettingsDescLabel.setWordWrap(True)
self.StaticPortableSettingsDescLabel.setObjectName(_fromUtf8("StaticPortableSettingsDescLabel"))
self.PortableSettingsFormatLayout.addWidget(self.StaticPortableSettingsDescLabel)
self.horizontalLayout_2 = QtGui.QHBoxLayout()
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.chkPortableSettings = QtGui.QCheckBox(self.SettingsTab)
self.chkPortableSettings.setChecked(True)
self.chkPortableSettings.setObjectName(_fromUtf8("chkPortableSettings"))
self.horizontalLayout_2.addWidget(self.chkPortableSettings)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem)
self.btnRemoveSystemConfig = QtGui.QPushButton(self.SettingsTab)
self.btnRemoveSystemConfig.setObjectName(_fromUtf8("btnRemoveSystemConfig"))
self.horizontalLayout_2.addWidget(self.btnRemoveSystemConfig)
self.btnRemoveLocalConfig = QtGui.QPushButton(self.SettingsTab)
self.btnRemoveLocalConfig.setObjectName(_fromUtf8("btnRemoveLocalConfig"))
self.horizontalLayout_2.addWidget(self.btnRemoveLocalConfig)
self.PortableSettingsFormatLayout.addLayout(self.horizontalLayout_2)
self.verticalLayout_4.addLayout(self.PortableSettingsFormatLayout)
self.SettingsDividerLine_3 = QtGui.QFrame(self.SettingsTab)
self.SettingsDividerLine_3.setFrameShape(QtGui.QFrame.HLine)
self.SettingsDividerLine_3.setFrameShadow(QtGui.QFrame.Sunken)
self.SettingsDividerLine_3.setObjectName(_fromUtf8("SettingsDividerLine_3"))
self.verticalLayout_4.addWidget(self.SettingsDividerLine_3)
self.DebuggingSettingsLayout = QtGui.QVBoxLayout()
self.DebuggingSettingsLayout.setObjectName(_fromUtf8("DebuggingSettingsLayout"))
self.StaticDebuggingSettingsLabel = QtGui.QLabel(self.SettingsTab)
self.StaticDebuggingSettingsLabel.setObjectName(_fromUtf8("StaticDebuggingSettingsLabel"))
self.DebuggingSettingsLayout.addWidget(self.StaticDebuggingSettingsLabel)
self.DebuggingStatusLabel = QtGui.QLabel(self.SettingsTab)
self.DebuggingStatusLabel.setObjectName(_fromUtf8("DebuggingStatusLabel"))
self.DebuggingSettingsLayout.addWidget(self.DebuggingStatusLabel)
self.DebuggingSettingsLayout_2 = QtGui.QHBoxLayout()
self.DebuggingSettingsLayout_2.setObjectName(_fromUtf8("DebuggingSettingsLayout_2"))
self.btnEnableLogging = QtGui.QPushButton(self.SettingsTab)
self.btnEnableLogging.setObjectName(_fromUtf8("btnEnableLogging"))
self.DebuggingSettingsLayout_2.addWidget(self.btnEnableLogging)
self.btnDisableLogging = QtGui.QPushButton(self.SettingsTab)
self.btnDisableLogging.setObjectName(_fromUtf8("btnDisableLogging"))
self.DebuggingSettingsLayout_2.addWidget(self.btnDisableLogging)
self.btnSubmitBug = QtGui.QPushButton(self.SettingsTab)
self.btnSubmitBug.setObjectName(_fromUtf8("btnSubmitBug"))
self.DebuggingSettingsLayout_2.addWidget(self.btnSubmitBug)
self.DebuggingSettingsLayout.addLayout(self.DebuggingSettingsLayout_2)
self.verticalLayout_4.addLayout(self.DebuggingSettingsLayout)
self.SettingsDividerLine_4 = QtGui.QFrame(self.SettingsTab)
self.SettingsDividerLine_4.setFrameShape(QtGui.QFrame.HLine)
self.SettingsDividerLine_4.setFrameShadow(QtGui.QFrame.Sunken)
self.SettingsDividerLine_4.setObjectName(_fromUtf8("SettingsDividerLine_4"))
self.verticalLayout_4.addWidget(self.SettingsDividerLine_4)
self.AutomaticUpdateLayer = QtGui.QVBoxLayout()
self.AutomaticUpdateLayer.setObjectName(_fromUtf8("AutomaticUpdateLayer"))
self.StaticAutomaticUpdatesDescLabel = QtGui.QLabel(self.SettingsTab)
self.StaticAutomaticUpdatesDescLabel.setWordWrap(True)
self.StaticAutomaticUpdatesDescLabel.setObjectName(_fromUtf8("StaticAutomaticUpdatesDescLabel"))
self.AutomaticUpdateLayer.addWidget(self.StaticAutomaticUpdatesDescLabel)
self.chkAutomaticUpdates = QtGui.QCheckBox(self.SettingsTab)
self.chkAutomaticUpdates.setChecked(False)
self.chkAutomaticUpdates.setObjectName(_fromUtf8("chkAutomaticUpdates"))
self.AutomaticUpdateLayer.addWidget(self.chkAutomaticUpdates)
self.verticalLayout_4.addLayout(self.AutomaticUpdateLayer)
spacerItem1 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.verticalLayout_4.addItem(spacerItem1)
self.SettingsButtonsLayout = QtGui.QHBoxLayout()
self.SettingsButtonsLayout.setObjectName(_fromUtf8("SettingsButtonsLayout"))
self.btnRestoreSettings = QtGui.QPushButton(self.SettingsTab)
self.btnRestoreSettings.setObjectName(_fromUtf8("btnRestoreSettings"))
self.SettingsButtonsLayout.addWidget(self.btnRestoreSettings)
self.btnSaveSettings = QtGui.QPushButton(self.SettingsTab)
self.btnSaveSettings.setObjectName(_fromUtf8("btnSaveSettings"))
self.SettingsButtonsLayout.addWidget(self.btnSaveSettings)
self.verticalLayout_4.addLayout(self.SettingsButtonsLayout)
self.MainTabWidget.addTab(self.SettingsTab, _fromUtf8(""))
self.horizontalLayout.addWidget(self.MainTabWidget)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
self.MainTabWidget.setCurrentIndex(0)
QtCore.QObject.connect(self.chkAutomaticBakFolder, QtCore.SIGNAL(_fromUtf8("toggled(bool)")), self.lneBackupFolder.setDisabled)
QtCore.QObject.connect(self.chkAutomaticBakFolder, QtCore.SIGNAL(_fromUtf8("toggled(bool)")), self.btnBrowseBackupsFolder.setDisabled)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "RESTool", None))
self.StaticFirstBrowserLabel.setText(_translate("MainWindow", "First Browser", None))
self.StaticBrowserName.setText(_translate("MainWindow", "Browser Name:", None))
self.StaticProfileName.setText(_translate("MainWindow", "Profile Name:", None))
self.StaticRESInfo.setText(_translate("MainWindow", "RES DB found:", None))
self.FirstBrowserRESLabel.setText(_translate("MainWindow", "N/A", None))
self.StaticSecondBrowserLabel.setText(_translate("MainWindow", "Second Browser", None))
self.StaticBrowserName_2.setText(_translate("MainWindow", "Browser Name:", None))
self.StaticProfileName_2.setText(_translate("MainWindow", "Profile Name:", None))
self.StaticRESInfo_2.setText(_translate("MainWindow", "RES DB found:", None))
self.SecondBrowserRESLabel.setText(_translate("MainWindow", "N/A", None))
self.labelMessage.setText(_translate("MainWindow", "<html><head/><body><p>Warning message</p></body></html>", None))
self.StaticLabelMigrating.setText(_translate("MainWindow", "Migrating existing data", None))
self.btnFirstToSecond.setText(_translate("MainWindow", "First browser to the second browser", None))
self.btnSecondToFirst.setText(_translate("MainWindow", "Second browser to the first browser", None))
self.StaticLabelBackups_2.setText(_translate("MainWindow", "RES database backups", None))
self.btnBackupFirst.setText(_translate("MainWindow", "Backup first browser", None))
self.btnBackupSecond.setText(_translate("MainWindow", "Backup second browser", None))
self.btnRestoreToFirst.setText(_translate("MainWindow", "Restore selected backup to the first browser", None))
self.btnRestoreToSecond.setText(_translate("MainWindow", "Restore selected backup to the second browser", None))
self.lblVersion.setText(_translate("MainWindow", "<html><head/><body><p>Version 0.2.1</p></body></html>", None))
self.StaticLabelVersion.setText(_translate("MainWindow", "<html><head/><body><p>Website: <a href=\"http://nikola-k.github.io/RESTool/\"><span style=\" text-decoration: underline; color:#0000ff;\">nikola-k.github.io/RESTool</span></a></p></body></html>", None))
self.lblUpdateAvailable.setText(_translate("MainWindow", "<html><head/><body><p>Update available. Visit website for more info.</p></body></html>", None))
self.StaticLabelBackups.setText(_translate("MainWindow", "Available Backups", None))
self.btnDeleteBackup.setText(_translate("MainWindow", "Delete Selected Backup", None))
self.MainTabWidget.setTabText(self.MainTabWidget.indexOf(self.BackupRestoreMigrateTab), _translate("MainWindow", "RESTool", None))
self.StaticBackupFolderDescLabel.setText(_translate("MainWindow", "<html><head/><body><p><span style=\" font-style:italic; color:#585858;\">Folder where the backups will be saved, default: res_backups</span></p></body></html>", None))
self.StaticBackupFolderLabel.setText(_translate("MainWindow", "Backup Folder", None))
self.lneBackupFolder.setText(_translate("MainWindow", "res_backups", None))
self.btnBrowseBackupsFolder.setText(_translate("MainWindow", "Browse", None))
self.chkAutomaticBakFolder.setText(_translate("MainWindow", "Use automatic system specific app directory", None))
self.StaticBackupDateFormatDescLabel.setText(_translate("MainWindow", "<html><head/><body><p><span style=\" font-style:italic; color:#535353;\">Custom backup date/time format, default: %Y-%m-%d - </span><a href=\"https://docs.python.org/2/library/datetime.html#strftime-and-strptime-behavior\"><span style=\" font-style:italic; text-decoration: underline; color:#0055ff;\">available variables</span></a></p></body></html>", None))
self.StaticBackupDateFormatLabel.setText(_translate("MainWindow", "Backup time format:", None))
self.lneBackupTimeFormat.setText(_translate("MainWindow", "%Y-%m-%d", None))
self.StaticPortableSettingsDescLabel.setText(_translate("MainWindow", "<html><head/><body><p>Portable RES settings storage (settings.json with your custom config will be stored next to app if checked, otherwise it will be placed in auto-detected system specific configuration directory)</p></body></html>", None))
self.chkPortableSettings.setText(_translate("MainWindow", "Portable settings", None))
self.btnRemoveSystemConfig.setText(_translate("MainWindow", "Remove System Settings File", None))
self.btnRemoveLocalConfig.setText(_translate("MainWindow", "Remove Local Settings File", None))
self.StaticDebuggingSettingsLabel.setText(_translate("MainWindow", "Debugging/Log reporting functions:", None))
self.DebuggingStatusLabel.setText(_translate("MainWindow", "Current logging status: Disabled", None))
self.btnEnableLogging.setText(_translate("MainWindow", "Enable Logging", None))
self.btnDisableLogging.setText(_translate("MainWindow", "Disable Logging", None))
self.btnSubmitBug.setText(_translate("MainWindow", "Submit a bug report", None))
self.StaticAutomaticUpdatesDescLabel.setText(_translate("MainWindow", "<html><head/><body><p>Automatic checking for new version. No personal information is sent to the server.</p></body></html>", None))
self.chkAutomaticUpdates.setText(_translate("MainWindow", "Check for updates on startup.", None))
self.btnRestoreSettings.setText(_translate("MainWindow", "Restore settings to default", None))
self.btnSaveSettings.setText(_translate("MainWindow", "Save current settings", None))
self.MainTabWidget.setTabText(self.MainTabWidget.indexOf(self.SettingsTab), _translate("MainWindow", "Settings", None))
| apache-2.0 | 6,926,639,291,354,577,000 | 71.373272 | 438 | 0.757179 | false |
dani-i/bachelor-project | utils/new_file_details.py | 1 | 2259 | import file_experts.file_expert as fe
class NewFileDetails:
def __init__(self):
self._directory_path = ''
self._file_name = ''
self._description = ''
def __str__(self):
rez = '\n---- New file details ----\n'
rez += '\nDirectory path : ' + self.directory_path
rez += '\nName : ' + self.file_name
rez += '\nDescription : ' + self.description
rez += '\n-- Is valid : ' + str(self.is_valid()) + ' --'
return rez
##########################################################################
# directory_path
@property
def directory_path(self):
return self._directory_path
@directory_path.setter
def directory_path(
self,
value: str):
self._directory_path = value
##########################################################################
# file_name
@property
def file_name(self):
return self._file_name
@file_name.setter
def file_name(self,
value: str):
self._file_name = value
##########################################################################
# description
@property
def description(self):
return self._description
@description.setter
def description(self,
value: str):
self._description = value
##########################################################################
def is_valid(self):
"""
- Checks if valid
:return: - True if valid
- False otherwise
"""
if not isinstance(self.directory_path, str) \
or not isinstance(self.description, str) \
or not isinstance(self.file_name, str):
return False
if self.directory_path == '' \
or self.description == '' \
or self.file_name == '':
return False
file_path = self.directory_path + '/' + self.file_name
if not fe.is_directory(self.directory_path) \
or not fe.is_file(file_path):
return False
return True
##########################################################################
| apache-2.0 | -3,288,485,386,326,377,500 | 24.965517 | 78 | 0.420983 | false |
kubapok/tank-game | Target.py | 1 | 2292 | #!/usr/bin/python3
import pygame
import os
import random
class Target():
targets = []
def __init__(self,destroyable,name):
self.targetName = name
self.destroyable = destroyable
Target.targets.append(self)
def delete(target):
if target.targetName == 'train':
target.kill()
flash = Destroyed(target.rect.x + target.image.get_rect().centerx,target.rect.y + target.image.get_rect().centery)
elif target.targetName == 'sheep':
blood = Destroyed(target.rect.x + target.image.get_rect().centerx,target.rect.y + target.image.get_rect().centery, blood = True)
Target.targets.remove(target)
else:
flash = Destroyed(target.rect.x + target.image.get_rect().centerx,target.rect.y + target.image.get_rect().centery)
Target.targets.remove(target)
def detectCollison(target, object):
if (target.destroyable == True) and pygame.sprite.collide_mask(target, object):
return True
class Destroyed(pygame.sprite.Sprite,Target):
destroyed = []
displayFlashTime = 15
displayFlashTimeShift = 15
displayFlashPosShift = 20
def __init__(self,x,y, blood = False):
pygame.sprite.Sprite.__init__(self)
self.displayFlash = Destroyed.displayFlashTime + int(random.random()*2*Destroyed.displayFlashTimeShift)
if blood == False:
self.image = pygame.image.load(os.path.join('Images','flash.png')).convert_alpha()
else:
self.image = pygame.image.load(os.path.join('Images','blood.png')).convert_alpha()
self.rect = self.image.get_rect()
self.rect.x = x - self.image.get_rect().centerx
self.rect.y = y - self.image.get_rect().centery
Destroyed.destroyed.append(self)
def delete(destroyed):
Destroyed.destroyed.remove(destroyed)
def display(self, display):
if self.displayFlashTime:
display.blit(self.image, (self.rect.x + int(random.random()*2*Destroyed.displayFlashPosShift) - Destroyed.displayFlashPosShift,
self.rect.y + int(random.random()*2*Destroyed.displayFlashPosShift) - Destroyed.displayFlashPosShift))
self.displayFlashTime -= 1
else:
Destroyed.delete(self)
| mit | 770,327,994,473,418,600 | 37.2 | 140 | 0.643106 | false |
hmgoalie35/makefile-generator | makefile_generator.py | 1 | 6903 | import os
import argparse
import platform
SUPPORTED_LANGUAGES = ['c++', 'c']
class MakefileGenerator(object):
def __init__(self):
self.__directory = ""
self.__compiler = "g++"
self.__flags = "-g -Wall -std=c++11"
self.__executable = ""
self.__args = ""
self.__lib = ""
self.__lang = ""
self.parse_command_line_input()
def create_parser(self):
parser = argparse.ArgumentParser(description="Generate makefile for files in the specified directory")
parser.add_argument('dir', help="Directory with the file(s)")
parser.add_argument('-flags', required=False, help="Flag(s) to use when compiling, enclosed in \"\" (Default: %s)" % self.__flags)
parser.add_argument('-cc', required=False, help="Compiler (Default: %s)" % self.__compiler)
parser.add_argument('-exec', required=False, help="Executable name")
parser.add_argument('-lang', required=False, choices=SUPPORTED_LANGUAGES, help="Use the default configs for the selected language")
parser.add_argument('-lib', required=False, help="Libraries (if there are multiple, must be separated by a space)")
parser.add_argument('-mode', required=False, help="If specified, user will enter in data via command line prompts", default=False, action='store_true')
return parser
def parse_command_line_input(self):
parser = self.create_parser()
self.__args = vars(parser.parse_args())
if not os.path.isdir(self.__args["dir"]) and not os.path.exists(self.__args["dir"]):
print "Invalid directory %s, exiting..." % self.__args["dir"]
exit(1)
self.__directory = self.__args['dir']
if self.__args['mode']:
self.prompt_user_for_input()
return 0
if self.__args["cc"]:
self.__compiler = self.__args["cc"]
if self.__args["flags"]:
self.__flags = self.__args["flags"]
if self.__args["exec"]:
self.__executable = self.__args["exec"]
if self.__args['lang']:
self.__lang = self.__args['lang']
if self.__args['lang'].lower() == "c++":
self.__compiler = 'g++'
self.__flags = '-g -Wall -std=c++11'
else:
# mode is c
self.__compiler = 'gcc'
self.__flags = '-g -Wall -std=c11'
if self.__args['lib']:
self.__lib = self.__args['lib']
self.makefile_exists()
def write_to_file(self, file_name):
VALID_EXTENSIONS = [".cpp", ".c"]
file_list = list(filter(lambda x: os.path.splitext(x)[1] in VALID_EXTENSIONS, os.listdir(os.path.dirname(file_name))))
if len(file_list) == 0:
print "No valid files were found in %s, exiting..." % os.path.dirname(file_name)
exit(1)
the_file = open(file_name, 'w')
file_name_list = []
write_str = ""
for a_file in file_list:
print "Processing File: %s" % a_file
name = os.path.splitext(a_file)[0]
file_name_list.append(name)
write_str += "%s.o:\t%s" % (name, a_file)
write_str += "\n\t%s $(%s) -c %s -o %s\n\n" % (self.__compiler, "FLAGS", a_file, name + ".o")
the_file.write("FLAGS = %s\n\n" % self.__flags)
the_file.write("all:\tMain\n\n")
file_names = ""
i = 0
for name in file_name_list:
if i+1 < len(file_name_list):
file_names += name + ".o "
else:
file_names += name + ".o"
i+=1
the_file.write("Main:\tclean %s\n" % file_names)
if self.__executable:
if self.__lib:
the_file.write("\t%s %s -o %s -l %s\n\n" % (self.__compiler, file_names, self.__executable, self.__lib))
else:
the_file.write("\t%s %s -o %s\n\n" % (self.__compiler, file_names, self.__executable))
else:
if self.__lib:
the_file.write("\t%s %s -l %s\n\n" % (self.__compiler, file_names, self.__lib))
else:
the_file.write("\t%s %s\n\n" % (self.__compiler, file_names))
the_file.write(write_str)
if self.__executable:
the_file.write("clean:\n\trm -f *.o %s\n" % self.__executable)
else:
the_file.write("clean:\n\trm -f *.o %s\n" % ("a.exe" if platform.system().lower() == "windows" else "a.out"))
the_file.close()
print "%s successfully saved." % file_name
def prompt_user_for_input(self):
print "Please fill out the following, or press <return> to ignore and use the default."
lang = raw_input("Language: if specified, defaults for the selected language will be used and you will not be able to further customize anything. Select from %s: " % self.list_to_string(SUPPORTED_LANGUAGES))
if lang:
self.__lang = lang.strip().lower()
if self.__lang == 'c++':
self.__compiler = 'g++'
self.__flags = '-g -Wall -std=c++11'
elif self.__lang == 'c':
# mode is c
self.__compiler = 'gcc'
self.__flags = '-g -Wall -std=c11'
else:
print "%s is not a valid selection, select from %s" % (self.__lang, self.list_to_string(SUPPORTED_LANGUAGES))
exit(1)
else:
compiler = raw_input("Compiler: ").strip()
if compiler:
self.__compiler = compiler
flags = raw_input("Flags: ").strip()
if flags:
self.__flags = flags
lib = raw_input("Extra libraries, separated by a space: ").strip()
if lib:
self.__lib = lib
executable = raw_input("Executable Name: ").strip()
if executable:
self.__executable = executable
self.makefile_exists()
def makefile_exists(self):
makefile_name = "makefile"
the_file = os.path.join(self.__directory, makefile_name)
# note the exists funciton is case insensitive
if os.path.exists(the_file):
if os.path.isfile(the_file):
answer = raw_input("%s already exists, overwrite? (y/n): " % os.path.basename(the_file))
if answer.strip().lower() == 'y':
self.write_to_file(the_file)
else:
print "File not overwritten, exiting..."
exit(0)
else:
print "%s is not a valid file and/or path to a file. Is %s a directory?" % (the_file, the_file)
else:
self.write_to_file(the_file)
def list_to_string(self, the_list):
return '[' + ', '.join(the_list) + ']'
if __name__ == '__main__':
MakefileGenerator()
| mit | -844,886,670,939,234,000 | 40.590361 | 215 | 0.524265 | false |
Kromey/piroute | iptables/forms.py | 1 | 1254 | from django import forms
from helpers.forms import PirouteForm
from . import formsets, services
class InterfacesForm(PirouteForm):
internal_nic = forms.CharField()
external_nic = forms.CharField()
class RuleForm(PirouteForm):
action = forms.ChoiceField(choices=(('accept','Allow'),('drop','Ignore'),('reject','Block'),('forward','Redirect...')))
enabled = forms.BooleanField(initial=True, required=False)
nic = forms.ChoiceField(choices=(('int','Internal'),('ext','External')))
service = forms.ChoiceField(choices=services.get_service_choices())
proto = forms.ChoiceField(choices=services.PROTOCOL_LIST, required=False)
port = forms.IntegerField(min_value=1, max_value=65535, required=False)
comment = forms.CharField(required=False)
def clean(self):
cleaned_data = super().clean()
proto = cleaned_data.get('proto')
port = cleaned_data.get('port')
service = cleaned_data.get('service')
if not (service or port):
# No port/service provided, mark for deletion
cleaned_data[forms.formsets.DELETION_FIELD_NAME] = True
return cleaned_data
RuleFormset = formsets.selfcleanformset_factory(RuleForm, min_num=1, extra=0, can_order=True)
| mit | 4,692,705,303,859,145,000 | 32 | 123 | 0.685805 | false |
bretttegart/treadmill | lib/python/treadmill/discovery.py | 1 | 4324 | """List Treadmill endpoints matching a given pattern.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import fnmatch
import logging
import kazoo.exceptions
from six.moves import queue
from treadmill import zknamespace as z
_LOGGER = logging.getLogger(__name__)
class Discovery(object):
"""Treadmill endpoint discovery."""
def __init__(self, zkclient, pattern, endpoint):
_LOGGER.debug('Treadmill discovery: %s:%s', pattern, endpoint)
self.queue = queue.Queue()
# Pattern is assumed to be in the form of <proid>.<pattern>
self.prefix, self.pattern = pattern.split('.', 1)
if '#' not in self.pattern:
self.pattern = self.pattern + '#*'
self.endpoint = endpoint
self.state = set()
self.zkclient = zkclient
def iteritems(self, block=True, timeout=None):
"""List matching endpoints. """
while True:
try:
endpoint, hostport = self.queue.get(block, timeout)
if (endpoint, hostport) == (None, None):
break
yield (endpoint, hostport)
except queue.Empty:
break
def apps_watcher(self, event):
"""Watch for created/deleted apps that match monitored pattern."""
_LOGGER.debug('apps_watcher: %s', event)
self.sync()
def sync(self, watch=True):
"""Find matching endpoints and put them on the queue for processing.
If watch is True, establish a watch on /apps for new changes,
otherwise put termination signal into the queue.
"""
watch_cb = None
if watch:
watch_cb = self.apps_watcher
match = self.get_endpoints_zk(watch_cb=watch_cb)
created = match - set(self.state)
deleted = set(self.state) - match
for endpoint in created:
_LOGGER.debug('added endpoint: %s', endpoint)
hostport = self.resolve_endpoint(endpoint)
self.queue.put(('.'.join([self.prefix, endpoint]), hostport))
for endpoint in deleted:
_LOGGER.debug('deleted endpoint: %s', endpoint)
self.queue.put(('.'.join([self.prefix, endpoint]), None))
self.state = match
def snapshot(self):
"""Returns the current state of the matching endpoints."""
return ['.'.join([self.prefix, endpoint]) for endpoint in self.state]
def exit_loop(self):
"""Put termination event on the queue."""
self.queue.put((None, None))
def get_endpoints(self):
"""Returns the current list of endpoints in host:port format"""
endpoints = self.get_endpoints_zk()
hostports = [self.resolve_endpoint(endpoint)
for endpoint in endpoints]
return hostports
def get_endpoints_zk(self, watch_cb=None):
"""Returns the current list of endpoints."""
endpoints_path = z.join_zookeeper_path(z.ENDPOINTS, self.prefix)
full_pattern = ':'.join([self.pattern, '*', self.endpoint])
try:
endpoints = self.zkclient.get_children(
endpoints_path, watch=watch_cb
)
match = set([endpoint for endpoint in endpoints
if fnmatch.fnmatch(endpoint, full_pattern)])
except kazoo.exceptions.NoNodeError:
if watch_cb:
self.zkclient.exists(endpoints_path, watch=watch_cb)
match = set()
return match
def resolve_endpoint(self, endpoint):
"""Resolves a endpoint to a hostport"""
fullpath = z.join_zookeeper_path(z.ENDPOINTS, self.prefix, endpoint)
try:
hostport, _metadata = self.zkclient.get(fullpath)
hostport = hostport.decode()
except kazoo.exceptions.NoNodeError:
hostport = None
return hostport
def iterator(zkclient, pattern, endpoint, watch):
"""Returns app discovery iterator based on native zk discovery.
"""
app_discovery = Discovery(zkclient, pattern, endpoint)
app_discovery.sync(watch)
if not watch:
app_discovery.exit_loop()
for (app, hostport) in app_discovery.iteritems():
yield app, hostport
| apache-2.0 | -4,177,558,828,921,700,000 | 31.511278 | 77 | 0.605227 | false |
luca-heltai/ePICURE | utilities/arc_lib.py | 1 | 1106 | # In this library we try to create all the functions and objects needed to reparamitrized a curve using the arc-length parametrization
import numpy as np
import math
import sys
#sys.path.append('../interfaces')
from interfaces.vector_space import *
from interfaces.lagrange_vector_space import *
from utilities.matrices import *
class Curve(object):
"""A python utilities that handle a generic curve. As inputs it
requires a vector space and control points.
"""
def __init__(self, vector_space, control_points):
"""We need to check that the input data are consistent"""
assert(len(control_points) == vector_space.n_dofs)
self.vector_space = AffineVectorSpace(vector_space, 0, 1)
#self.vector_space = vector_space
self.control_points = control_points
self.n_dofs = vector_space.n_dofs
def __call__(self):
return self.vector_space.element(self.control_points)
def curve_derivative(self):
return self.vector_space.element_der(self.control_points)
def get_vector_space(self):
return self.vector_space
| gpl-2.0 | 9,099,227,665,084,174,000 | 31.529412 | 134 | 0.700723 | false |
jiayisuse/cs73 | wp-admin/bigrams_perplexity.py | 1 | 2550 | #!/usr/bin/env python
import sys
import re
import math
import os
import glob
import nltk
import json
import include
categories_info = []
lambdaa = 0.2
html = sys.argv[1].lower()
def do_read_train(uni_dict, bi_dict, train_file):
file = open(train_file, "r")
lines = file.readlines()
file.close()
for line in lines:
words = line.split()
'''
if words[4] == "3":
break
'''
bi_dict[words[0]] = [words[1], words[2]]
uni_dict[words[0].split("|")[1]] = [words[3], words[4]]
num_tokens = line.split()[-1]
uni_dict["UNK"] = [min(include.min_UNK, 1 / (float(num_tokens))), "1"]
def read_train_info(categories_info, dataset_dir):
def read_train(categories_info, dirpath, namelist):
pattern = os.path.join(dirpath, "*" + include.bi_train_suffix)
for bi_train_file in glob.glob(pattern):
categories = bi_train_file[len(dataset_dir):]
categories = os.path.split(categories)[0]
category_names = categories.split("/")
#print category_names
uni_train_dict = {}
bi_train_dict = {}
do_read_train(uni_train_dict, bi_train_dict, bi_train_file)
category_info = []
category_info.append(category_names[1])
category_info.append(uni_train_dict)
category_info.append(bi_train_dict)
categories_info.append(category_info)
os.path.walk(dataset_dir, read_train, categories_info)
def suprisal(first, second, uni_dict, bi_dict):
if uni_dict.has_key(first):
uni_p = float(uni_dict[first][0])
else:
uni_p = float(uni_dict["UNK"][0])
key = first + "|" + second
if bi_dict.has_key(key):
bi_p = float(bi_dict[key][0])
else:
bi_p = 0
p = (1 - lambdaa) * bi_p + lambdaa * uni_p
return -math.log(p, 2)
read_train_info(categories_info, include.dataset_dir)
text = nltk.clean_html(html)
tokens = include.my_tokenizer(text)
num_tokens = len(tokens)
for cate_info in categories_info:
uni_train_dict = cate_info[1]
bi_train_dict = cate_info[2]
entropy = 0.0
for i in range(1, num_tokens):
first = tokens[i]
second = tokens[i - 1]
entropy += suprisal(first, second, uni_train_dict, bi_train_dict)
entropy /= num_tokens
ppl = 2 ** entropy
cate_info.append(ppl)
sorted_cate = sorted(categories_info, key = lambda x: x[3])
min_category = sorted_cate[0]
cate_names = []
for i in range(min(3, len(sorted_cate))):
if min_category[3] <= include.ppl_distinct and sorted_cate[i][3] > include.ppl_max:
continue
cate_names.append(sorted_cate[i][0])
#print sorted_cate[i][0], " ", sorted_cate[i][3]
if min_category[3] > include.ppl_distinct:
cate_names.insert(0, "0")
print json.dumps(cate_names)
| gpl-2.0 | 395,110,760,087,540,100 | 25.5625 | 84 | 0.669412 | false |
thinkAmi/9784798123028_GAE | chap5/payment_handlers.py | 1 | 11752 | # -*- coding: utf-8 -*-
import webapp2
import os
import urlparse # Python2.7なので、cgiではなくurlparseを使う
import logging
from google.appengine.ext.webapp import template
from webapp2_extras import sessions
from webapp2_extras import sessions_memcache
from webapp2_extras import sessions_ndb
from paypal.helper import ExpressCheckout as EC
from paypal.souvenir import Souvenir
from paypal.error_operation import ErrorOperation
URL_START = '/paypal/'
URL_PAYMENT = '/paypal/payment'
URL_CANCEL_REDIRECT = '/paypal/redirect/cancel'
URL_CANCEL = '/paypal/cancel/'
SESSION_NAME = 'my-session-name'
class BaseSessionHandler(webapp2.RequestHandler):
def dispatch(self):
self.session_store = sessions.get_store(request=self.request)
try:
webapp2.RequestHandler.dispatch(self)
finally:
self.session_store.save_sessions(self.response)
@webapp2.cached_property
def session(self):
# 以下はmemcache時
#return self.session_store.get_session(backend='memcache')
# 以下はCookie版
#return self.session_store.get_session(backend='securecookie')
# Datastore版
return self.session_store.get_session(backend='datastore')
def has_session_error(self):
# セッション変数が取れない場合、エラー描画
if self.session.get(SESSION_NAME) is None:
self.response.out.write(template.render('html/payment_error.html',{}))
return True
return False
class CartPageHandler(BaseSessionHandler):
#def __init__(self, request, response):
# super(BaseSessionHandler, self).__init__(request, response)
def get(self):
souvenirInfo = Souvenir.get_souvenir()
# セッション変数をクッキー・バックエンドへと設定
self.session[SESSION_NAME] = 'message'
self.response.out.write(template.render('html/cart.html',
{'name': souvenirInfo['name'],
'unitPrice': souvenirInfo['unitPrice'],
}))
def post(self):
if self.has_session_error():
return
quantity = int(self.request.get('quantity'))
souvenirInfo = Souvenir.get_souvenir()
amount = (souvenirInfo['unitPrice'] + souvenirInfo['tax']) * quantity + souvenirInfo['carriage']
nvpParams = {# APIの設定
'RETURNURL': self.request.host_url + URL_PAYMENT,
'CANCELURL': self.request.host_url + URL_CANCEL_REDIRECT,
'LANDINGPAGE': 'Billing',
'SOLUTIONTYPE': 'Sole',
'GIFTMESSAGEENABLE': 0,
'GIFTRECEIPTENABLE': 0,
'GIFTWRAPENABLE': 0,
'LOCALECODE': 'jp_JP',
'LANDINGPAGE': 'Billing',
'ALLOWNOTE': 0,
# 商品全体の設定
'PAYMENTREQUEST_0_AMT': (souvenirInfo['unitPrice'] + souvenirInfo['tax']) * quantity + souvenirInfo['carriage'],
'PAYMENTREQUEST_0_CURRENCYCODE': souvenirInfo['currency'],
'PAYMENTREQUEST_0_PAYMENTACTION': 'Sale',
'PAYMENTREQUEST_0_ITEMAMT': souvenirInfo['unitPrice'] * quantity,
'PAYMENTREQUEST_0_SHIPPINGAMT': souvenirInfo['carriage'],
'PAYMENTREQUEST_0_TAXAMT': souvenirInfo['tax'] * quantity,
# 商品明細の設定
'L_PAYMENTREQUEST_0_ITEMCATEGORY0': 'Physical',
'L_PAYMENTREQUEST_0_NAME0': souvenirInfo['name'],
'L_PAYMENTREQUEST_0_QTY0': quantity,
'L_PAYMENTREQUEST_0_TAXAMT0': souvenirInfo['tax'],
'L_PAYMENTREQUEST_0_AMT0': souvenirInfo['unitPrice'],
}
paypalResponse = EC.set_express_checkout(nvpParams)
hasError = ErrorOperation.has_set_error(self.response, paypalResponse, 'SetExpressCheckout')
if hasError:
return
contents = urlparse.parse_qs(paypalResponse.content)
# tokenをつけて、PayPalのページへ移動
redirect_url = EC.generate_express_checkout_redirect_url(contents['TOKEN'][0])
return self.redirect(redirect_url)
class PaymentPageHandler(BaseSessionHandler):
def get(self):
if self.has_session_error():
return
paypalResponse = EC.get_express_checkout_details(self.request.get('token'))
hasError = ErrorOperation.has_get_error(self.response, paypalResponse, 'GetExpressCheckoutDetails')
if hasError:
return
contents = urlparse.parse_qs(paypalResponse.content)
params = { # システムまわり
'postUrl': URL_PAYMENT + '?' + self.request.query_string,
# 顧客情報
'email': contents['EMAIL'][0],
'firstname': contents['FIRSTNAME'][0],
'lastname': contents['LASTNAME'][0],
'shipToName': contents['PAYMENTREQUEST_0_SHIPTONAME'][0], # 姓名が入る
'shipToStreet': contents['PAYMENTREQUEST_0_SHIPTOSTREET'][0],
'shipToStreet2': contents['PAYMENTREQUEST_0_SHIPTOSTREET2'][0] if 'PAYMENTREQUEST_0_SHIPTOSTREET2' in contents else '',
'shipToCity': contents['PAYMENTREQUEST_0_SHIPTOCITY'][0],
'shipToState': contents['PAYMENTREQUEST_0_SHIPTOSTATE'][0],
'shipToZip': contents['PAYMENTREQUEST_0_SHIPTOZIP'][0],
# 日本では取得できない?
'shipToPhoneNo': contents['PAYMENTREQUEST_0_SHIPTOPHONENUM'][0] if 'PAYMENTREQUEST_0_SHIPTOPHONENUM' in contents else '',
# 商品情報
'amount': contents['PAYMENTREQUEST_0_AMT'][0],
'itemAmount': contents['PAYMENTREQUEST_0_ITEMAMT'][0],
'shippingAmount': contents['PAYMENTREQUEST_0_SHIPPINGAMT'][0],
'taxAmount': contents['PAYMENTREQUEST_0_TAXAMT'][0],
'itemName': contents['L_PAYMENTREQUEST_0_NAME0'][0],
'itemUnitPrice': contents['L_PAYMENTREQUEST_0_AMT0'][0],
'quantity': contents['L_PAYMENTREQUEST_0_QTY0'][0],
'tax': contents['L_PAYMENTREQUEST_0_TAXAMT0'][0],
# トランザクション情報:この時点では取得できない
#'transactionId': contents['PAYMENTREQUEST_0_TRANSACTIONID'][0],
#'requestId': contents['PAYMENTREQUEST_0_PAYMENTREQUESTID'][0],
}
self.response.out.write(template.render('html/confirm.html',{'params': params,}))
# 本ではgetしていたが、重要なデータがあるので同じクラスのpostに変更する
# 本でgetしていた理由がわからない (hrefで次の画面に遷移していたため、postが使えない?)
def post(self):
if self.has_session_error():
return
payerId = self.request.get('PayerID')
souvenirInfo = Souvenir.get_souvenir()
# もう一度 GetExpressCheckoutで支払額合計を取得する
paypalResponse = EC.get_express_checkout_details(self.request.get('token'))
hasGetError = ErrorOperation.has_get_error(self.response, paypalResponse, 'GetExpressCheckoutDetails')
if hasGetError:
return
contents = urlparse.parse_qs(paypalResponse.content)
# get時と金額を変えてもエラーも何も出ずに決済されるので、そこが怖い...
nvpParams = { 'PAYERID': payerId,
'PAYMENTREQUEST_0_PAYMENTACTION': 'Sale',
'PAYMENTREQUEST_0_AMT': contents['PAYMENTREQUEST_0_AMT'][0],
'PAYMENTREQUEST_0_CURRENCYCODE': souvenirInfo['currency'],
}
paypalResponse = EC.do_express_checkout_payment(self.request.get('token'),
nvpParams
)
hasDoError = ErrorOperation.has_do_error(self.response, paypalResponse, 'DoExpressCheckoutPayment')
if hasDoError:
return
self.response.out.write(template.render('html/success.html',{}))
class CancelHandler(webapp2.RequestHandler):
def get(self):
# URLにtokenがついてやってくるため、キャンセルページヘリダイレクト
return self.redirect(self.request.host_url + URL_CANCEL)
class CancelPageHandler(webapp2.RequestHandler):
def get(self):
self.response.out.write(template.render('html/cancel.html',{}))
debug = os.environ.get('SERVER_SOFTWARE', '').startswith('Dev')
config = {}
config['webapp2_extras.sessions'] = {
'secret_key': 'my-secret-key',
'cookie_name' : 'my-session-name',
'cookie_args' : {
'max_age' : None, # Noneの場合、クライアント終了時にクッキー削除
'domain' : None, # 徳丸本(p218)より
'path' : '/',
'secure' : True, # セキュア属性、https接続なのでTrue -> localhostだとhttps不可なので注意
'httponly': True # 徳丸本(p219)より、ONにしておく
},
'backends': {'datastore': 'webapp2_extras.appengine.sessions_ndb.DatastoreSessionFactory',
'memcache': 'webapp2_extras.appengine.sessions_memcache.MemcacheSessionFactory',
'securecookie': 'webapp2_extras.sessions.SecureCookieSessionFactory',
}
}
app = webapp2.WSGIApplication([(URL_START, CartPageHandler),
(URL_PAYMENT + '*', PaymentPageHandler),
(URL_CANCEL_REDIRECT + '.*', CancelHandler),
(URL_CANCEL, CancelPageHandler),
],
config=config,
debug=debug)
| apache-2.0 | -8,702,523,491,979,784,000 | 41.058366 | 148 | 0.498283 | false |
jlcmoore/vuExposed | src/test_alexa.py | 1 | 1263 | #!/usr/bin/env python
import random
import socket
import ssl
import sys
import time
import threading
import urllib2
from user_agents import common_user_agents
from no_https_hosts import top_1000_no_https_hosts
DELAY = 15
ROUNDS = 1
SOCKET_TIMEOUT = 1
URLS_PER_ROUND = 50
def main(rounds=ROUNDS, urls=URLS_PER_ROUND, delay=DELAY):
for i in range(rounds):
print "round number %d" % i
for j in range(urls):
user_agent = common_user_agents[random.randint(0, len(common_user_agents) - 1)]
url = top_1000_no_https_hosts[random.randint(0, len(top_1000_no_https_hosts) - 1)]
num = "%d %d" % (i, j)
thread = threading.Thread(target=open_page, args=(num, user_agent, url))
thread.daemon = True
thread.start()
time.sleep(DELAY)
def open_page(num, user_agent, url):
try:
opener = urllib2.build_opener()
opener.addheaders = [('User-Agent', user_agent)]
opener.open(url)
print '%s opened %s' % (num, url)
except (urllib2.HTTPError, urllib2.URLError, ssl.SSLError, socket.error, socket.timeout) as error:
print '%s url %s open error: %s' % (num, url, error)
if __name__ == "__main__":
if len(sys.argv) > 1:
main(int(sys.argv[1]), int(sys.argv[2]), int(sys.argv[3]))
else:
main()
| mit | -3,673,820,579,408,161,300 | 28.372093 | 99 | 0.653998 | false |
abhaystoic/barati | barati/vendors/urls.py | 1 | 1793 | from django.contrib.auth.decorators import login_required
from django.conf.urls import patterns, include, url, handler404
from vendors import views
import os
#Importing all the clustered views
dir_name = 'vendors.views_cluster.'
file_list = os.listdir(os.path.dirname(__file__) + '/views_cluster')
for files in file_list:
mod_name,file_ext = os.path.splitext(os.path.split(files)[-1])
if file_ext.lower() == '.py':
if mod_name != '__init__':
exec "from {0} import {1}".format(dir_name + files.split(".")[0], files.split(".")[0].title())
urlpatterns = patterns('',
url(r'^$', login_required(Dashboard.as_view()), name = 'dashboard'),
url(r'^queued_orders/$', login_required(Queued_Orders.as_view()), name = 'queued_orders'),
url(r'^active_orders/$', login_required(Active_Orders.as_view()), name = 'active_orders'),
url(r'^completed_orders/$', login_required(Completed_Orders.as_view()), name = 'completed_orders'),
url(r'^queued_orders/$', login_required(Queued_Orders.as_view()), name = 'queued_orders'),
url(r'^inprogress_orders/$', login_required(Inprogress_Orders.as_view()), name = 'inprogress_orders'),
url(r'^confirm_order/(?P<order_id>[-\w]+)$', login_required(Confirm_Order.as_view()), name = 'confirm_order'),
url(r'^cancel_order/(?P<order_id>[-\w]+)$', login_required(Cancel_Order.as_view()), name = 'cancel_order'),
url(r'^update_order_status/(?P<order_id>[-\w]+)$', login_required(Update_Order_Status.as_view()), name = 'update_order_status'),
url(r'^list_product/$', login_required(List_Product.as_view()), name = 'list_product'),
url(r'^submit_product/$', login_required(Submit_Product.as_view()), name = 'submit_product'),
url(r'^block_product/$', login_required(Block_Product.as_view()), name = 'block_product'),
)
| apache-2.0 | -2,615,640,745,527,957,500 | 65.407407 | 131 | 0.668154 | false |
VinnieJohns/barancev_python_training | fixture/session.py | 1 | 1451 | __author__ = 'VinnieJohns'
class SessionHelper:
def __init__(self, app):
self.app = app
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def login(self, username, password):
wd = self.app.wd
self.app.open_home_page()
self.change_field_value("user", username)
self.change_field_value("pass", password)
wd.find_element_by_css_selector('input[type="submit"]').click()
def logout(self):
wd = self.app.wd
wd.find_element_by_link_text("Logout").click()
def is_logged_in(self):
wd = self.app.wd
return len(wd.find_elements_by_link_text("Logout")) > 0
def get_logged_user(self):
wd = self.app.wd
return wd.find_element_by_css_selector('div[id="top"] form[name="logout"] b').text[1:-1]
def is_logged_in_as(self, username):
return self.get_logged_user() == username
def ensure_login(self, username, password):
if self.is_logged_in():
if self.is_logged_in_as(username):
return
else:
self.logout()
self.login(username, password)
def ensure_logout(self):
if self.is_logged_in():
self.logout() | apache-2.0 | -821,486,765,525,905,800 | 29.25 | 96 | 0.576844 | false |
chanchett/ds3_python_sdk | ds3/ds3.py | 1 | 37137 | # Copyright 2014-2015 Spectra Logic Corporation. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use
# this file except in compliance with the License. A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file.
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
from ctypes import *
import libds3
def checkExistence(obj, wrapper = lambda ds3Str: ds3Str.contents.value, defaultReturnValue = None):
if obj:
return wrapper(obj)
else:
return defaultReturnValue
def arrayToList(array, length, wrapper = lambda ds3Str: ds3Str.contents.value):
result = []
for i in xrange(0, length):
result.append(wrapper(array[i]))
return result
class Ds3Error(Exception):
"""Returns an exception to the client. Attributes:
reason (string): error contents message
response (string): error contents error
if(has_response == true)
statusCode (int): http return value
statusMessage (string): http return status_message
message (string): error body
Cast to string for description.
"""
def __init__(self, libds3Error):
self.reason = libds3Error.contents.message.contents.value
response = libds3Error.contents.error
self._hasResponse = False
self.statusCode = None
self.statusMessage = None
self.message = None
if response:
self._hasResponse = True
self.statusCode = response.contents.status_code
self.statusMessage = response.contents.status_message.contents.value
self.message = checkExistence(response.contents.error_body)
libds3.lib.ds3_free_error(libds3Error)
def __str__(self):
errorMessage = "Reason: " + self.reason
if self._hasResponse:
errorMessage += " | StatusCode: " + str(self.statusCode)
errorMessage += " | StatusMessage: " + self.statusMessage
if self.message:
errorMessage += " | Message: " + self.message
return errorMessage
def __repr__(self):
return self.__str__()
class Credentials(object):
"""Credential object to build client
Construct with (accessKey, secretKey) for target device or build client from environment variables
"""
def __init__(self, accessKey, secretKey):
self.accessKey = accessKey
self.secretKey = secretKey
class Ds3Bucket(object):
"""Descibes a Bucket (name and creationDate)
Members:
name (string) : The name of the bucket.
creationDate (date) : The date and time the bucket was created in the format YYYY-MM-DDThh:mm:ss.xxxZ.
Cast to string for description.
"""
def __init__(self, ds3Bucket):
self.name = ds3Bucket.name.contents.value
self.creationDate = ds3Bucket.creation_date.contents.value
def __str__(self):
return "Name: " + self.name + " | Creation Date: " + self.creationDate
def __repr__(self):
return self.__str__()
class Ds3Owner(object):
"""Describes an object owner (name and id)
Members:
name (string) : The name of the owner.
id (string) : The UUID of the object owner.
Cast to string for description.
"""
def __init__(self, ds3Owner):
ownerContents = ds3Owner.contents
self.name = ownerContents.name.contents.value
self.id = ownerContents.id.contents.value
def __str__(self):
return "Name: " + self.name + " | ID: " + self.id
def __repr__(self):
return self.__str__()
class Ds3Object(object):
"""Describes a Object
Members:
name (string) : The object name
etag (string) : The http entity tag.
size (long) : The size of the object in bytes.
owner (Ds3Owner) : The owner of the object.
Cast to string for description.
"""
def __init__(self, ds3Object):
self.name = ds3Object.name.contents.value
self.etag = checkExistence(ds3Object.etag)
self.size = ds3Object.size
self.owner = Ds3Owner(ds3Object.owner)
def __str__(self):
return "Name: " + self.name + " | Size: " + str(self.size) + " | Etag: " + str(self.etag) + " | Owner: " + str(self.owner)
def __repr__(self):
return self.__str__()
class Ds3BucketDetails(object):
"""Response for ListObjects (Get Bucket)
Members:
commonPrefixes (List<string>) : If a delimiter is specified, contains the portion of an object's name between the prefix and the next occurrence of the delimiter.
creationDate (string) : The date and time the bucket was created in the format YYYY-MM-DDThh:mm:ss.xxxZ.
delimiter (string) : The character used to group object names.
isTruncated (bool) : Specifies whether the results were truncated (true) or not (false) due to the number of results exceeding MaxKeys.
marker (string) : The object name where the bucket listing begins. Included in the response if it was specified in the request.
maxKeys (int) : The maximum number of keys (object names) returned in the response.
name (string) : The name of the bucket.
nextMarker (string) : If the delimiter parameter was specified, and isTruncated is true, indicates the object name to use in the marker field in the next request to get the next set of objects.
objects (List<Ds3Objects>) : Bucket contents matching search criteria
prefix (string) : The string used to limit the response keys. Only object names that begin with the specified prefix are listed.
size (long) : The size of the object in bytes.
Cast to string for description.
"""
def __init__(self, ds3Bucket):
bucketContents = ds3Bucket.contents
self.name = bucketContents.name.contents.value
self.creationDate = checkExistence(bucketContents.creation_date)
self.isTruncated = bool(bucketContents.is_truncated)
self.marker = checkExistence(bucketContents.marker)
self.delimiter = checkExistence(bucketContents.delimiter)
self.maxKeys = bucketContents.max_keys
self.nextMarker = checkExistence(bucketContents.next_marker)
self.prefix = checkExistence(bucketContents.prefix)
self.commonPrefixes = arrayToList(bucketContents.common_prefixes, bucketContents.num_common_prefixes)
self.objects = arrayToList(bucketContents.objects, bucketContents.num_objects, wrapper = Ds3Object)
class Ds3BulkObject(object):
"""Object description from bulk response
Members:
name (string) : The object name
length (long) : The length in bytes of the object or part of the object.
offset (long) : The offset in bytes from the start of the object.
inCache (bool) : Indicates if the object is currently in cache on the BlackPearl Deep Storage Gateway.
Cast to string for description.
"""
def __init__(self, bulkObject):
self.name = bulkObject.name.contents.value
self.length = bulkObject.length
self.offset = bulkObject.offset
self.inCache = bool(bulkObject.in_cache)
def __str__(self):
return "Name:" + self.name + " | Length: " + str(self.length) + " | Offset: " + str(self.offset) + " | InCache: " + str(self.inCache)
def __repr__(self):
return self.__str__()
class Ds3CacheList(object):
"""Collection of chunks to process.
Members:
chunkId (string) : The UUID for the job chunk.
chunkNumber (string) : The position of the chunk within the job.
nodeId (string) : The UUID for the BlackPearl node.
objects (List<Ds3BulkObject>) : Container for information about the objects.
"""
def __init__(self, bulkObjectList):
contents = bulkObjectList.contents
self.chunkNumber = contents.chunk_number
self.nodeId = checkExistence(contents.node_id)
self.serverId = checkExistence(contents.server_id)
self.chunkId = contents.chunk_id.contents.value
self.objects = arrayToList(contents.list, contents.size, wrapper = Ds3BulkObject)
class Ds3BulkPlan(object):
"""Collection of job chunks and associated parameters.
Members:
startDate (string) : The date and time the job was started in the format YYYY-MM-DDThh:mm:ss.xxxZ.
userId (string) : The UUID for the user who initiated the job.
userName (string) : The username of the user who initiated the job.
requestType (string) : Specifies whether job chunks are written as quickly as possible (PERFORMANCE) or across as few tapes as possible (CAPACITY). Values: CAPACITY, PERFORMANCE,
status (string) : Values COMPLETED, CANCELLED, IN_PROGRESS
cachedSize (long) : The amount of data successfully transferred to the BlackPearl Deep Storage Gateway from the client.
completedSize (long) : The amount of data written to tape media.
originalSize (long) : The amount of data for the job to transfer.
jobId (string) : The UUID for the job.
chunks (Ds3CacheList) : list of chunks to process.
Cast to string for description.
"""
def __init__(self, ds3BulkResponse):
contents = ds3BulkResponse.contents
self.bucketName = checkExistence(contents.bucket_name)
self.cachedSize = checkExistence(contents.cached_size_in_bytes, 0)
self.completedSize = checkExistence(contents.completed_size_in_bytes, 0)
self.jobId = checkExistence(contents.job_id)
self.originalSize = checkExistence(contents.original_size_in_bytes, 0)
self.startDate = checkExistence(contents.start_date)
self.userId = checkExistence(contents.user_id)
self.userName = checkExistence(contents.user_name)
self.requestType = contents.request_type
self.status = contents.status
self.chunks = arrayToList(contents.list, contents.list_size, wrapper = Ds3CacheList)
def __str__(self):
response = "JobId: " + self.jobId
response += " | Status: " + str(self.status)
response += " | Request Type: " + str(self.requestType)
response += " | BucketName: " + self.bucketName
response += " | UserName: " + self.userName
response += " | Chunks: " + str(self.chunks)
return response
def __repr__(self):
return self.__str__()
class Ds3AllocateChunkResponse(object):
"""This class has been deprecated
"""
def __init__(self, ds3AllocateChunkResponse):
contents = ds3AllocateChunkResponse.contents
self.retryAfter = contents.retry_after
self.chunk = Ds3CacheList(contents.objects)
class Ds3AvailableChunksResponse(object):
"""A list of all chunks in a job that can currently be processed.
Members:
retryAfter (int) : retry interval in seconds.
bulkPlan(Ds3BulkPlan) : collection of chunks.
"""
def __init__(self, ds3AvailableChunksResponse):
contents = ds3AvailableChunksResponse.contents
self.retryAfter = contents.retry_after
self.bulkPlan = Ds3BulkPlan(contents.object_list)
class Ds3SearchObject(object):
"""Search parameters for Get Objects
Members:
bucketId (string) : The UUID or name for a bucket.
id (string) : The UUID for an object.
lastModified (string) : The date the object was created in the format YYYY-MM-DD hh:mm:ss.xxx.
name (string) : The name of an object.
owner (Ds3Owner) : The owner of an object.
size (long) : The size of the object in bytes.
storageClass (string) : unused.
type (string) : The type of object. Values: DATA, FOLDER
version (string) : The version of an object.
Cast to string for description.
"""
def __init__(self, ds3SearchObject):
contents = ds3SearchObject.contents
self.bucketId = checkExistence(contents.bucket_id)
self.id = checkExistence(contents.id)
self.name = checkExistence(contents.name)
self.size = contents.size
self.owner = checkExistence(contents.owner, wrapper = Ds3Owner)
self.lastModified = checkExistence(contents.last_modified)
self.storageClass = checkExistence(contents.storage_class)
self.type = checkExistence(contents.type)
self.version = checkExistence(contents.version)
def __str__(self):
response = "BucketId: " + str(self.bucketId)
response += " | Id: " + str(self.id)
response += " | Name: " + str(self.name)
response += " | Size: " + str(self.size)
response += " | Owner: (" + str(self.id) + ")"
response += " | LastModified: " + str(self.lastModified)
response += " | StorageClass: " + str(self.storageClass)
response += " | Type: " + str(self.type)
response += " | Version: " + str(self.version)
return response
class Ds3BuildInformation(object):
"""Describes the Black Pearl Software
Members:
branch (string) : The branch used to build the API.
revision (string) : The revision of the software build.
version (string) : The version of the software build.
Cast to string for description.
"""
def __init__(self, ds3BuildInfo):
contents = ds3BuildInfo.contents
self.branch = checkExistence(contents.branch)
self.revision = checkExistence(contents.revision)
self.version = checkExistence(contents.version)
def __str__(self):
response = "Branch: " + str(self.branch)
response += " | Revision: " + str(self.revision)
response += " | Version: " + str(self.version)
return response
class Ds3SystemInformation(object):
"""Describes the Black Pearl Hardware and Software
Members:
apiVersion (string) : The version of the DS3 API. The version is in the form X.Y, where X is the MD5 checksum across all request handler major revisions and Y is the MD5 checksum across all request handler full versions (including the minor revision).
buildInformation (ds3BuildInfo) : A container for the information about the build.
serialNumber (string) : The serial number of the BlackPearl Deep Storage Gateway
Cast to string for description.
"""
def __init__(self, ds3SystemInfo):
contents = ds3SystemInfo.contents
self.apiVersion = checkExistence(contents.api_version)
self.serialNumber = checkExistence(contents.serial_number)
self.buildInformation = checkExistence(contents.build_information, wrapper = Ds3BuildInformation)
def __str__(self):
response = "API Version: " + str(self.apiVersion)
response += " | Serial Number: " + str(self.serialNumber)
response += " | Build Information: " + str(self.buildInformation)
return response
class Ds3SystemHealthInformation(object):
"""Verifies that the system appears to be online and functioning normally. If critical components in the data path between the client and the BlackPearl gateway are unresponsive, an error is generated.
Members:
msRequiredToVerifyDataPlannerHealth (long) : The amount of time, in milliseconds, that it took the gateway to respond.
"""
def __init__(self, ds3HealthInfo):
contents = ds3HealthInfo.contents
self.msRequiredToVerifyDataPlannerHealth = contents.ms_required_to_verify_data_planner_health
def typeCheck(input_arg, type_to_check):
if isinstance(input_arg, type_to_check):
return input_arg
else:
raise TypeError("expected instance of type " + type_to_check.__name__ + ", got instance of type " + type(input_arg).__name__)
def ensureUTF8(input_arg):
if isinstance(input_arg, unicode):
return input_arg.encode('utf-8')
return input_arg
def typeCheckString(input_arg):
return ensureUTF8(typeCheck(input_arg, basestring))
def typeCheckObjectList(fileList):
result = []
for item in fileList:
if isinstance(item, tuple):
result.append((typeCheckString(item[0]), item[1]))
else:
result.append(typeCheckString(item))
return result
def enumCheck(input_arg, enum_dict):
if input_arg in enum_dict.keys():
return enum_dict[input_arg]
else:
raise TypeError("expected value to be one of " + str(enum_dict.keys()) + ", got " + str(input_arg))
def enumCheckDs3ObjectType(input_arg):
return enumCheck(input_arg, {"DATA":0, "FOLDER":1})
def addMetadataToRequest(request, metadata):
if metadata:
for key in metadata:
if type(metadata[key]) is list or type(metadata[key]) is tuple:
for value in metadata[key]:
libds3.lib.ds3_request_set_metadata(request, typeCheckString(key), typeCheckString(value));
else:
libds3.lib.ds3_request_set_metadata(request, typeCheckString(key), typeCheckString(metadata[key]));
def extractMetadataFromResponse(metaData):
result = {}
keys = libds3.lib.ds3_metadata_keys(metaData)
if keys:
for key_index in xrange(0, keys.contents.num_keys):
key = keys.contents.keys[key_index].contents.value
metadataEntry = libds3.lib.ds3_metadata_get_entry(metaData, key)
result[key] = arrayToList(metadataEntry.contents.values, metadataEntry.contents.num_values)
libds3.lib.ds3_free_metadata_entry(metadataEntry)
libds3.lib.ds3_free_metadata_keys(keys)
return result
def createClientFromEnv():
"""Build a Ds3Client from environment varialbles.
Required: DS3_ACCESS_KEY, DS3_SECRET_KEY, DS3_ENDPOINT
Optional: http_proxy
"""
libDs3Client = POINTER(libds3.LibDs3Client)()
error = libds3.lib.ds3_create_client_from_env(byref(libDs3Client))
if error:
raise Ds3Error(error)
clientContents = libDs3Client.contents
clientCreds = clientContents.creds.contents
creds = Credentials(clientCreds.access_id.contents.value, clientCreds.secret_key.contents.value)
proxyValue = checkExistence(clientContents.proxy)
client = Ds3Client(clientContents.endpoint.contents.value, creds, proxyValue)
libds3.lib.ds3_free_creds(clientContents.creds)
libds3.lib.ds3_free_client(libDs3Client)
return client
class Ds3Client(object):
"""This object is used to communicate with a remote DS3/Spectra S3 endpoint. All communication with the Spectra S3 API is done with this class.
"""
def __init__(self, endpoint, credentials, proxy = None):
self._ds3Creds = libds3.lib.ds3_create_creds(c_char_p(credentials.accessKey), c_char_p(credentials.secretKey))
self._client = libds3.lib.ds3_create_client(c_char_p(endpoint), self._ds3Creds)
self.credentials = credentials
self.endpoint = endpoint
self.proxy = proxy
if proxy:
libds3.lib.ds3_client_proxy(self._client, proxy)
def verifySystemHealth(self):
"""Returns how long it took to verify the health of the system.
In the event that the system is in a bad state, an error will be thrown.
"""
response = POINTER(libds3.LibDs3VerifySystemHealthResponse)()
request = libds3.lib.ds3_init_verify_system_health()
error = libds3.lib.ds3_verify_system_health(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
result = Ds3SystemHealthInformation(response)
libds3.lib.ds3_free_verify_system_health(response)
return result
def getService(self):
"""Returns a list of all the buckets the current access id has access to.
"""
response = POINTER(libds3.LibDs3GetServiceResponse)()
request = libds3.lib.ds3_init_get_service()
error = libds3.lib.ds3_get_service(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
contents = response.contents
for i in xrange(0, contents.num_buckets):
yield Ds3Bucket(contents.buckets[i])
libds3.lib.ds3_free_service_response(response)
def getBucket(self, bucketName, prefix = None, nextMarker = None, delimiter = None, maxKeys = None):
"""Returns a list of all the objects in a specific bucket as specified by `bucketName`. This will return at most 1000 objects.
In order to retrieve more, pagination must be used. The `nextMarker` is used to specify where the next 1000 objects will
start listing from.
`delimiter` can be used to list objects like directories. So for example, if delimiter is set to '/' then it will return
a list of 'directories' in the commons prefixes field in the response. In order to list all the files in that directory use the prefix parameter.
For example:
client.getBucket("my_bucket", prefix = 'dir', delimiter = '/')
The above will list any files and directories that are in the 'dir' directory.
"""
response = POINTER(libds3.LibDs3GetBucketResponse)()
request = libds3.lib.ds3_init_get_bucket(typeCheckString(bucketName))
if prefix:
libds3.lib.ds3_request_set_prefix(request, typeCheckString(prefix))
if nextMarker:
libds3.lib.ds3_request_set_marker(request, nextMarker)
if delimiter:
libds3.lib.ds3_request_set_delimiter(request, typeCheckString(delimiter))
if maxKeys:
libds3.lib.ds3_request_set_max_keys(request, maxKeys)
error = libds3.lib.ds3_get_bucket(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
bucket = Ds3BucketDetails(response)
libds3.lib.ds3_free_bucket_response(response)
return bucket
def headObject(self, bucketName, objectName):
"""Returns the metadata for the retrieved object as a dictionary of lists. If the object does not exist
an error is thrown with a status code of 404.
"""
response = POINTER(libds3.LibDs3Metadata)()
request = libds3.lib.ds3_init_head_object(typeCheckString(bucketName), typeCheckString(objectName))
error = libds3.lib.ds3_head_object(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
metadata = extractMetadataFromResponse(response)
libds3.lib.ds3_free_metadata(response)
return metadata
def headBucket(self, bucketName):
"""Checks whether a bucket exists.
"""
request = libds3.lib.ds3_init_head_bucket(typeCheckString(bucketName))
error = libds3.lib.ds3_head_bucket(self._client, request)
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
def deleteFolder(self, bucketName, folderName):
"""Deletes a folder and all the objects contained within it.
"""
request = libds3.lib.ds3_init_delete_folder(typeCheckString(bucketName), typeCheckString(folderName))
error = libds3.lib.ds3_delete_folder(self._client, request)
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
def getSystemInformation(self):
"""Returns the version and other information about the Spectra S3 endpoint.
"""
response = POINTER(libds3.LibDs3GetSystemInformationResponse)()
request = libds3.lib.ds3_init_get_system_information()
error = libds3.lib.ds3_get_system_information(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
result = Ds3SystemInformation(response)
libds3.lib.ds3_free_get_system_information(response)
return result
def getObject(self, bucketName, objectName, offset, jobId, realFileName = None):
"""Gets an object from the Spectra S3 endpoint. Use `realFileName` when the `objectName`
that you are getting from Spectra S3 does not match what will be on the local filesystem.
Returns the metadata for the retrieved object as a dictionary, where keys are
associated with a list of the values for that key.
This can only be used within the context of a Bulk Get Job.
"""
objectName = typeCheckString(objectName)
effectiveFileName = objectName
if realFileName:
effectiveFileName = typeCheckString(realFileName)
response = POINTER(libds3.LibDs3Metadata)()
request = libds3.lib.ds3_init_get_object_for_job(typeCheckString(bucketName), objectName, offset, jobId)
localFile = open(effectiveFileName, "wb")
localFile.seek(offset, 0)
error = libds3.lib.ds3_get_object_with_metadata(self._client, request, byref(c_int(localFile.fileno())), libds3.lib.ds3_write_to_fd, byref(response))
localFile.close()
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
metadata = extractMetadataFromResponse(response)
libds3.lib.ds3_free_metadata(response)
return metadata
def putBucket(self, bucketName):
"""Creates a new bucket where objects can be stored.
"""
bucketName = typeCheckString(bucketName)
request = libds3.lib.ds3_init_put_bucket(bucketName)
error = libds3.lib.ds3_put_bucket(self._client, request)
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
def putObject(self, bucketName, objectName, offset, size, jobId, realFileName = None, metadata = None):
"""Puts an object to the Spectra S3 endpoint. Use `realFileName` when the `objectName`
that you are putting to Spectra S3 does not match what is on the local filesystem.
Use metadata to set the metadata for the object. metadata's value should be
a dictionary, where keys are associated with either a value or a list of the
values for that key.
This can only be used within the context of a Spectra S3 Bulk Put job.
"""
objectName = typeCheckString(objectName)
effectiveFileName = objectName
if realFileName:
effectiveFileName = typeCheckString(realFileName)
request = libds3.lib.ds3_init_put_object_for_job(typeCheckString(bucketName), objectName, c_ulonglong(offset), c_ulonglong(size), jobId)
addMetadataToRequest(request, metadata)
localFile = open(effectiveFileName, "rb")
localFile.seek(offset, 0)
error = libds3.lib.ds3_put_object(self._client, request, byref(c_int(localFile.fileno())), libds3.lib.ds3_read_from_fd)
localFile.close()
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
def deleteObject(self, bucketName, objName):
"""Deletes an object from the specified bucket. If deleting several files at once, use `deleteObjects` instead.
"""
request = libds3.lib.ds3_init_delete_object(typeCheckString(bucketName), typeCheckString(objName))
error = libds3.lib.ds3_delete_object(self._client, request)
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
def deleteObjects(self, bucketName, fileNameList):
"""Deletes multiple objects from the bucket using a single API call.
"""
bulkObjs = libds3.toDs3BulkObjectList(typeCheckObjectList(fileNameList))
request = libds3.lib.ds3_init_delete_objects(typeCheckString(bucketName))
error = libds3.lib.ds3_delete_objects(self._client, request, bulkObjs)
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
def deleteBucket(self, bucketName):
"""Deletes a bucket. If the bucket is not empty, then this request will fail. All objects must be deleted first
before the bucket can be deleted.
"""
request = libds3.lib.ds3_init_delete_bucket(typeCheckString(bucketName))
error = libds3.lib.ds3_delete_bucket(self._client, request)
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
def putBulk(self, bucketName, fileInfoList):
"""Initiates a start bulk put with the remote Spectra S3 endpoint. The `fileInfoList` is a list of (objectName, size) tuples.
`objectName` does not have to be the actual name on the local file system, but it will be the name that you must
initiate a single object put to later. `size` must reflect the actual size of the file that is being put.
"""
bulkObjs = libds3.toDs3BulkObjectList(typeCheckObjectList(fileInfoList))
response = POINTER(libds3.LibDs3BulkResponse)()
request = libds3.lib.ds3_init_put_bulk(typeCheckString(bucketName), bulkObjs)
error = libds3.lib.ds3_bulk(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
bulkResponse = Ds3BulkPlan(response)
libds3.lib.ds3_free_bulk_response(response)
return bulkResponse
def getBulk(self, bucketName, fileNameList, chunkOrdering = True):
"""Initiates a start bulk get with the remote Spectra S3 endpoint. All the files that will be retrieved must be specified in
`fileNameList`.
"""
bulkObjs = libds3.toDs3BulkObjectList(typeCheckObjectList(fileNameList))
response = POINTER(libds3.LibDs3BulkResponse)()
chunkOrderingValue = libds3.LibDs3ChunkOrdering.IN_ORDER
if not chunkOrdering:
chunkOrderingValue = libds3.LibDs3ChunkOrdering.NONE
request = libds3.lib.ds3_init_get_bulk(typeCheckString(bucketName), bulkObjs, chunkOrderingValue)
error = libds3.lib.ds3_bulk(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
bulkResponse = Ds3BulkPlan(response)
libds3.lib.ds3_free_bulk_response(response)
return bulkResponse
def getObjects(self, bucketName = None, creationDate = None, objId = None, name = None, pageLength = None, pageOffset = None, objType = None, version = None):
"""Returns a list of objects.
Optional Search parameters:
bucketName (string) : The UUID or name for a bucket.
creationDate (string) : The date the object was created in the format YYYY-MM-DD hh:mm:ss.xxx.
name (string) : The name of an object.
objId (string) : The UUID for an object.
objType (string) : The type of object. Values: DATA, FOLDER
pageLength (int) : The maximum number of objects to list. The default is all items after pageOffset.
pageOffset (int) : The starting point for the first object to list. The default is 0.
version (string) : The version of an object.
"""
request = libds3.lib.ds3_init_get_objects()
response = POINTER(libds3.LibDs3GetObjectsResponse)()
if bucketName:
libds3.lib.ds3_request_set_bucket_name(request, typeCheckString(bucketName))
if creationDate:
libds3.lib.ds3_request_set_creation_date(request, typeCheckString(creationDate))
if objId:
libds3.lib.ds3_request_set_id(request, typeCheckString(objId))
if name:
libds3.lib.ds3_request_set_name(request, typeCheckString(name))
if pageLength:
libds3.lib.ds3_request_set_page_length(request, typeCheckString(str(pageLength)))
if pageOffset:
libds3.lib.ds3_request_set_page_offset(request, typeCheckString(str(pageOffset)))
if objType:
libds3.lib.ds3_request_set_type(request, enumCheckDs3ObjectType(objType))
if version:
libds3.lib.ds3_request_set_version(request, typeCheckString(str(version)))
error = libds3.lib.ds3_get_objects(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
result = arrayToList(response.contents.objects, response.contents.num_objects, wrapper = Ds3SearchObject)
libds3.lib.ds3_free_objects_response(response)
return result
def allocateChunk(self, chunkId):
"""*Deprecated* - Allocates a specific chunk to be allocated in cache so that the objects in that chunk can safely be put without a need
to handle 307 redirects.
"""
request = libds3.lib.ds3_init_allocate_chunk(chunkId)
response = POINTER(libds3.LibDs3AllocateChunkResponse)()
error = libds3.lib.ds3_allocate_chunk(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
result = Ds3AllocateChunkResponse(response)
libds3.lib.ds3_free_allocate_chunk_response(response)
return result
def getAvailableChunks(self, jobId):
"""Returns a list of all chunks in a job that can currently be processed. It will return a subset of all chunks, and it
will return that same set of chunks until all the data in one of the chunks returned has been either completely gotten,
or been completely put.
"""
request = libds3.lib.ds3_init_get_available_chunks(jobId)
response = POINTER(libds3.LibDs3GetAvailableChunksResponse)()
error = libds3.lib.ds3_get_available_chunks(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
result = Ds3AvailableChunksResponse(response)
libds3.lib.ds3_free_available_chunks_response(response)
return result
def _sendJobRequest(self, func, request):
response = POINTER(libds3.LibDs3BulkResponse)()
error = func(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
bulkResponse = Ds3BulkPlan(response)
libds3.lib.ds3_free_bulk_response(response)
return bulkResponse
def getJob(self, jobId):
"""Returns information about a job, including all the chunks in the job, as well as the status of the job.
"""
request = libds3.lib.ds3_init_get_job(jobId)
return self._sendJobRequest(libds3.lib.ds3_get_job, request)
def getJobs(self):
"""Returns a list of all jobs.
"""
request = libds3.lib.ds3_init_get_jobs()
response = POINTER(libds3.LibDs3GetJobsResponse)()
error = libds3.lib.ds3_get_jobs(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
result = []
for index in xrange(0, response.contents.jobs_size):
result.append(Ds3BulkPlan(response.contents.jobs[index]))
libds3.lib.ds3_free_get_jobs_response(response)
return result
def putJob(self, jobId):
"""Modifies a job to reset the timeout timer for the job.
"""
request = libds3.lib.ds3_init_put_job(jobId)
return self._sendJobRequest(libds3.lib.ds3_put_job, request)
def deleteJob(self, jobId):
"""Cancels a currently in progress job.
"""
request = libds3.lib.ds3_init_delete_job(jobId)
error = libds3.lib.ds3_delete_job(self._client, request)
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
def getPhysicalPlacement(self, bucketName, fileNameList, fullDetails = False):
"""Returns where in the Spectra S3 system each file in `fileNameList` is located.
"""
response = POINTER(libds3.LibDs3GetPhysicalPlacementResponse)()
bulkObjs = libds3.toDs3BulkObjectList(typeCheckObjectList(fileNameList))
bucketName=typeCheckString(bucketName)
if fullDetails:
request = libds3.lib.ds3_init_get_physical_placement(bucketName, bulkObjs)
else:
request = libds3.lib.ds3_init_get_physical_placement_full_details(bucketName, bulkObjs)
error = libds3.lib.ds3_get_physical_placement(self._client, request, byref(response))
libds3.lib.ds3_free_request(request)
if error:
raise Ds3Error(error)
placements = []
if response:
placements = arrayToList(response.contents.tapes, response.contents.num_tapes, lambda obj: obj.barcode.contents.value)
libds3.lib.ds3_free_get_physical_placement_response(response)
return placements
| apache-2.0 | 7,559,333,647,387,191,000 | 40.401338 | 259 | 0.665886 | false |
edx/ecommerce | ecommerce/enterprise/management/commands/update_effective_contract_discount.py | 1 | 3927 | """
Update effective contract discount percentage and discounted price for order lines created by
Manual Order Offers via the Enrollment API by a given Enterprise Customer UUID
"""
import datetime
import logging
from decimal import Decimal
from django.core.management import BaseCommand
from oscar.core.loading import get_model
from ecommerce.enterprise.mixins import EnterpriseDiscountMixin
from ecommerce.extensions.order.conditions import ManualEnrollmentOrderDiscountCondition
from ecommerce.programs.custom import class_path
Condition = get_model('offer', 'Condition')
ConditionalOffer = get_model('offer', 'ConditionalOffer')
OrderDiscount = get_model('order', 'OrderDiscount')
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class Command(BaseCommand, EnterpriseDiscountMixin):
"""
Management command to update the effective_contract_discount_percentage and
effective_contract_discounted_price price for order lines created by
Manual Order Offers for a given Enterprise Customer UUID
"""
def add_arguments(self, parser):
""" Adds argument(s) to the the command """
parser.add_argument(
'--enterprise-customer',
action='store',
dest='enterprise_customer',
default=None,
required=True,
help='UUID of an existing enterprise customer.',
type=str,
)
parser.add_argument(
'--discount-percentage',
action='store',
dest='discount_percentage',
default=None,
required=True,
help='The discount to apply to orders as a percentage (0-100).',
type=float,
)
parser.add_argument(
'--start-date',
action='store',
dest='start_date',
default=None,
help='The starting date to change all orders forward from this point.',
type=datetime.datetime.fromisoformat,
)
def handle(self, *args, **options):
enterprise_customer = options['enterprise_customer']
discount_percentage = options['discount_percentage']
start_date = options['start_date']
logger.info(
'Updating all Manual Orders for Enterprise [%s] to have a discount of [%f].',
enterprise_customer,
discount_percentage
)
# An enterprise should only have a single ManualEnrollmentOrderDiscountCondition used for
# API enrollment orders
try:
condition = Condition.objects.get(
proxy_class=class_path(ManualEnrollmentOrderDiscountCondition),
enterprise_customer_uuid=enterprise_customer
)
except Condition.DoesNotExist:
logger.exception(
'Unable to find ManualEnrollmentOrderDiscountCondition for enterprise [%s]',
enterprise_customer
)
return
# Using the ConditionalOffer we can then get back to a list of OrderDiscounts and Orders
try:
offer = ConditionalOffer.objects.get(condition=condition)
except ConditionalOffer.DoesNotExist:
logger.exception('Unable to find ConditionalOffer for [%s]', condition)
return
discounts = OrderDiscount.objects.filter(offer_id=offer.id).select_related('order')
if start_date:
discounts = discounts.filter(order__date_placed__gte=start_date)
for discount in discounts:
order = discount.order
# ManualEnrollment orders only have one order_line per order, so no need to loop over lines here
self.update_orderline_with_enterprise_discount_metadata(
order=order,
line=order.lines.first(),
discount_percentage=Decimal(discount_percentage),
is_manual_order=True
)
| agpl-3.0 | 6,452,113,077,746,869,000 | 35.700935 | 108 | 0.641966 | false |
wmontgomery4/cells | src/cell.py | 1 | 5100 | """
Class for cell type.
"""
import pymunk as pm
import copy
import math
import time
import random
import collision
# Physics constants.
DENSITY = 1e-4
FRICTION = 0.3
ENERGY_FORCE_RATIO = 1e-4
# Gene definitions and ranges.
GENES = ['radius', 'color']
RADIUS_MIN = 5.
RADIUS_MAX = 30.
COLOR_ALPHA = 0.8
MUTATION_RATE = 0.1
LOG_GAIN_MIN = -5.
LOG_GAIN_MAX = -3.
class Genome():
""" Container class for cell genomes. """
def __init__(self):
# Uniform for radius.
self.radius = random.uniform(RADIUS_MIN, RADIUS_MAX)
# Dirichlet distribution for color.
self.r = random.gammavariate(COLOR_ALPHA, 1)
self.g = random.gammavariate(COLOR_ALPHA, 1)
self.b = random.gammavariate(COLOR_ALPHA, 1)
N = self.r + self.g + self.b
self.rgb = (self.r/N, self.g/N, self.b/N)
# Log-Uniform for gain.
self.log_gain = random.uniform(LOG_GAIN_MIN, LOG_GAIN_MAX)
self.gain = math.exp(self.log_gain)
def mutate(self, rate=MUTATION_RATE):
""" Randomize each gene with probability 'rate'. """
# Add gaussian noise to radius.
self.radius += random.gauss(0,self.radius*rate)
self.radius = min(RADIUS_MAX, max(RADIUS_MIN, self.radius))
# Potentially draw new gammavariates.
if random.random() < rate:
self.r = random.gammavariate(COLOR_ALPHA, 1)
if random.random() < rate:
self.g = random.gammavariate(COLOR_ALPHA, 1)
if random.random() < rate:
self.b = random.gammavariate(COLOR_ALPHA, 1)
N = self.r + self.g + self.b
self.rgb = (self.r/N, self.g/N, self.b/N)
# Add gaussian noise to gain.
self.log_gain += random.gauss(0, rate)
self.log_gain = min(LOG_GAIN_MAX, max(LOG_GAIN_MIN, self.radius))
self.gain = math.exp(self.log_gain)
class Cell():
""" Container class for cell automatons. """
def __init__(self, world, genes=None):
""" Initialize a Cell with 'genes', random if None given. """
self.world = world
if genes is None:
genes = Genome()
self.genes = genes
# Initialize body.
r = self.genes.radius
mass = DENSITY * r**2
moment = pm.moment_for_circle(mass, 0, r, (0,0))
self.body = pm.Body(mass, moment)
# Initialize shape.
self.shape = pm.Circle(self.body, r, (0,0))
self.shape.friction = FRICTION
self.shape.collision_type = collision.CELL
self.shape.filter = pm.ShapeFilter(categories=collision.CELL)
# Store reference to cell in shape for collisons.
# TODO: This feels hacky, sign of bad design?
self.shape.cell = self
# Initialize life.
self.time = time.time()
self.alive = True
self.force = (0, 0)
self.energy = 0
self.max_energy = 2*r**2
self.update_energy(r**2)
def update_energy(self, delta):
""" Add or consume energy. """
self.energy += delta
if self.energy <= 0:
self.die()
return
elif self.energy > self.max_energy:
self.energy = self.max_energy
# Set base color proportional to energy and genes.
base = 0.5*self.max_energy
mult = 255 * self.energy / base
color = [mult*c for c in self.genes.rgb]
# Add equally to RGB past the base energy.
if self.energy > base:
diff = self.energy - base
add = 255 * diff / base
color = [min(255, c + add) for c in color]
self.shape.color = color
def die(self):
""" Remove self from space. """
self.body.space.remove(self.body, self.shape)
self.alive = False
def think(self):
""" Choose a new action. """
# Query for closest food.
# TODO: Add vision for nearest cells too.
r = self.genes.radius
pos = self.body.position
mask = pm.ShapeFilter.ALL_MASKS ^ (collision.CELL | collision.WALL)
info = self.world.space.point_query_nearest(pos, 12*r,
pm.ShapeFilter(mask=mask))
# Initialize force.
ux = 0
uy = 0
self.force = ux, uy
# No thinking without information (yet?)
if info is None:
return
# Apply gains.
K = self.genes.gain
delta = pos - info.point
self.force -= K*delta
def split(self):
""" Split into two cells. """
# Create mutated copy of self.
new_genes = copy.deepcopy(self.genes)
new_genes.mutate()
new_cell = self.world.add_cell(self.body.position, new_genes)
# Pay penalty.
self.update_energy(-new_cell.energy)
return new_cell
def loop(self):
""" Main loop for cells. """
# Choose new action.
self.think()
# Apply force.
x, y = self.force
self.body.apply_force_at_local_point((x,y), point=(0,0))
# Pay penalty.
cost = -ENERGY_FORCE_RATIO * (x**2 + y**2)
self.update_energy(cost)
| mit | -2,686,967,191,176,848,000 | 29.722892 | 75 | 0.569608 | false |
AnsgarKlein/DPLL-SAT-Solver | tests/helper/evaluation.py | 1 | 1108 | #!/usr/bin/env python3
import sys
UNSAT = 0
SAT = 1
UNKNOWN = 2
def evaluate_clause(clause, model):
# At least on literal has to be contained in model for clause to be SAT
for literal in clause:
if literal in model:
return SAT
# If at least one literal is not yet assigned clause can still be satisfied
inverted_model = [lit * -1 for lit in model]
for literal in clause:
if literal not in inverted_model:
return UNKNOWN
# If every literal is already assigned (and not true) clause is UNSAT
return UNSAT
def correct_model(clauses, model):
# Model is correct if all clauses are true
for clause in clauses:
if evaluate_clause(clause, model) != SAT:
return False
return True
def incorrect_model(clauses, model):
# Model is incorrect if one clause is false
for clause in clauses:
if evaluate_clause(clause, model) == UNSAT:
return True
return False
if __name__ == '__main__':
print('Error: Given module cannot be run directly', file = sys.stderr)
sys.exit(1)
| gpl-3.0 | -1,669,664,102,323,930,000 | 26.7 | 79 | 0.646209 | false |
kotfu/chogm | chogm.py | 1 | 12489 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2007-2017, Jared Crapo
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
"""Change the owner, group, and mode of some files with a single command
chogm [OPTIONS] files_spec directories_spec file [file file ...]
-R, --recursive recurse through the directory tree of each file
-v, --verbose show progress
-h, --help display this usage message
file_spec owner:group:perms to set on files
directory_spec owner:group:perms to set on directories
file one or more files to operate on. Use '-' to
process stdin as a list of files.
file_spec tells what owner, group, and permissions should be given to any
files. Each of the three elements are separated by a ':'. If a value is
not given for a particular element, that that element is not changed on
the encountered files.
directory_spec works just like files_spec, but it is applied to
directories. If any element of directory_spec is a comma, the value of that
element will be used from file_spec
EXAMPLES
chogm www-data:www-data:644 ,:,:755 /pub/www/*
Change all files in /pub/www to have an owner and group of www-data,
and permissions of -rw-r--r--. Also change all directories in
/pub/www/ to have an owner and group of www-data, but permissions of
-rwxr-xr-x. This is equivilent to the following shell commands:
$ chown www-data:www-group /pub/www/*
$ find /pub/www -maxdepth 1 -type f | xargs chmod 644
$ find /pub/www -maxdepth 1 -type d | tail -n +2 | xargs chmod 755
chogm -R :accounting:g+rw,o= :,:g=rwx,o= /mnt/acct
Change the group of all files in /mnt/acct to be accounting, and
make sure people in that group can read, write, and create files
anywhere in that directory tree. Also make sure that the hoi palloi
can't peek at accounting's files. This is the same as doing:
$ chgrp -R accounting /mnt/acct
$ find /mnt/acct -type f -print | xargs chmod g+rw,o=
$ find /mnt/acct -type d -print | xargs chmod g=rwx,o=
find ~/src -depth 2 -type d -print | grep -v '/.git$' | chogm -R :staff:660 :-:770 -
Assuming your ~/src directory contains a bunch of directories, each
with their own git project, change all those files to have a group
of staff and permissions of -rw-rw---- and all the directories to
also have a group of staff but permissions of -rwxrwx---. While
doing all of that, don't change the permissions of any of the files
inside of .git directories.
REQUIREMENTS
This script uses the operating system commands xargs, chmod, chgrp, and
chmod to do it's work. It also uses the python multiprocessing module from
the standard library which was added in python 2.6, so it won't work with
python versions earlier than that. It works in python 2.7 and 3+.
EXIT STATUS
0 everything OK
1 some operations not successful (ie permission denied on a directory)
2 incorrect usage
"""
from __future__ import print_function
import sys
import os
import argparse
import stat
import multiprocessing as mp
import subprocess
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
class Ogm:
"""store an owner, group, and mode"""
def __init__(self):
self.owner = None
self.group = None
self.mode = None
class Worker:
"""Launch an operating system process and feed it data
a worker class that uses python multiprocessing module clone itself, launch an OS
processes, and then catch new work from a multiprocessing.Pipe and send it to the
OS process to get done.
The OS process is xargs, so that we don't have to execute a new OS process for
every file we want to modify. We just send it to standard in, and let xargs take
care of how often it actually need to execute the chmod, chgrp or chmod
"""
def __init__(self, cmd, arg):
self.cmd = cmd
self.arg = arg
# set up a pipe so we can communicate with our multiprocessing.Process.
# From the parent process, we write filenames into the child pipe and read error
# messages from it. From the child process, we read filenames from the parent pipe
# and write error messages into it.
self.pipe_parent, self.pipe_child = mp.Pipe(duplex = True)
self.p = mp.Process(target=self.runner, args=(cmd,arg,))
self.p.start()
###self.pipe_parent.close() # this is the parent so we close the reading end of the pipe
def name(self):
"""return the name of this worker
the command it runs and the first argument for that command, ie 'chown www-data'
"""
return "%s %s" % (self.cmd, self.arg)
def add(self, file):
"""send a filename to the child process via a pipe"""
# this is called by the parent, and writes a filename to the child pipe
self.pipe_child.send(file)
def runner(self, cmd, arg):
"""Start a subprocess and feed it data from a pipe
This function is run in a child process. So we read from the parent
pipe to get work to do, and write to the parent pipe to send error messages
We also fire up an xargs subprocess to actually do the work, and feed stuff
from our parent pipe to stdin of the subprocess.
"""
xargs = subprocess.Popen(["xargs", cmd, arg], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
if debug:
print("--worker '%s' started xargs subprocess pid=%i" % (self.name(), xargs.pid), file=sys.stderr)
while True:
try:
# receive work from our parent pipe
filename = self.pipe_parent.recv()
# if we get message that there is None work, then we are done
if filename == None:
if debug:
print("--worker '%s' has no more work to do" % self.name(), file=sys.stderr)
break
# send the file to the stdin of the xargs process
print(filename, file=xargs.stdin)
if debug:
print("--worker '%s' received %s" % (self.name(), filename), file=sys.stderr)
except EOFError:
break
# we have broken out of the loop, so that means we have no more work to do
# gracefully close down the xargs process, save the contents of stderr, and
# write the exit code and the errors into the pipe to our parent
(stdoutdata,stderrdata) = xargs.communicate()
if debug:
print("--worker '%s' xargs pid=%i returncode=%i" % (self.name(), xargs.pid, xargs.returncode), file=sys.stderr)
print("--worker '%s' xargs stderr=%s" % (self.name(), stderrdata), file=sys.stderr)
self.pipe_parent.send( (xargs.returncode, stderrdata.rstrip('\r\n')) )
def gohome(self):
if debug:
print("--worker '%s' joining mp.Process" % self.name(), file=sys.stderr)
(rtncode,errmsgs) = self.pipe_child.recv()
self.p.join()
return (rtncode,errmsgs)
class Manager:
"""Start and manage all of the subprocesses"""
def __init__(self, fogm, dogm, verbose=False):
self.haveError = False
self.fogm = fogm
self.dogm = dogm
self.verbose = verbose
self.fchown = None
self.dchown = None
self.fchgrp = None
self.dchgrp = None
self.fchmod = None
self.dchmod = None
if fogm.owner:
self.fchown = Worker('chown', fogm.owner)
if dogm.owner:
self.dchown = Worker('chown', dogm.owner)
if fogm.group:
self.fchgrp = Worker('chgrp', fogm.group)
if dogm.group:
self.dchgrp = Worker('chgrp', dogm.group)
if fogm.mode:
self.fchmod = Worker('chmod', fogm.mode)
if dogm.mode:
self.dchmod = Worker('chmod', dogm.mode)
def do_file(self, file):
"""pass file to our subprocesses to change its owner, group and mode"""
if self.fchown:
self.fchown.add(file)
if self.fchgrp:
self.fchgrp.add(file)
if self.fchmod:
self.fchmod.add(file)
def do_dir(self, file):
"""pass a directory to our subprocesses to change its owner group and mode"""
if self.dchown:
self.dchown.add(file)
if self.dchgrp:
self.dchgrp.add(file)
if self.dchmod:
self.dchmod.add(file)
def report_information(self,message):
"""report information to stderr if verbose is set"""
if self.verbose:
print(message, file=sys.stderr)
def report_error(self, message):
"""report an error by printing it to stderr"""
self.haveError = True
print(message, file=sys.stderr)
def finish(self):
"""fire all of our workers and return a proper shell return code"""
self.fire(self.fchown)
self.fire(self.dchown)
self.fire(self.fchgrp)
self.fire(self.dchgrp)
self.fire(self.fchmod)
self.fire(self.dchmod)
if self.haveError:
return 1
else:
return 0
def fire(self, worker):
"""tell a worker there is no more work for them and send them home"""
if worker:
# put the "no more work" paper in the inbox
worker.add(None)
# and send the worker home
(rtncode,stderrdata) = worker.gohome()
if rtncode != 0:
self.report_error(stderrdata)
def main(argv=None):
parser = argparse.ArgumentParser(description='Change the owner, group, and mode of some files with a single command')
parser.add_argument('-R', '--recursive', action='store_true', help='recurse through the directory tree of each filespec')
parser.add_argument('-v', '--verbose', action='store_true', help='show progress')
parser.add_argument('file_spec', nargs=1, help='owner:group:perms to set on files')
parser.add_argument('directory_spec', nargs=1, help='owner:group:perms to set on directories')
parser.add_argument('file', nargs='+', help='one or more files to operate on. Use \'-\' to process stdin as a list of files')
args = parser.parse_args()
verbose = args.verbose
recursive = args.recursive
global debug
debug = False
spec = args.file_spec[0].split(':')
if len(spec) != 3:
parser.error('Invalid file_spec')
fileOgm = Ogm()
fileOgm.owner = spec[0]
fileOgm.group = spec[1]
fileOgm.mode = spec[2]
spec = args.directory_spec[0].split(':')
if len(spec) != 3:
parser.error('Invalid directory_spec')
dirOgm = Ogm()
dirOgm.owner = spec[0]
dirOgm.group = spec[1]
dirOgm.mode = spec[2]
# check for ',' which means to clone the argument from the file_spec
if dirOgm.owner == ',':
dirOgm.owner = fileOgm.owner
if dirOgm.group == ',':
dirOgm.group = fileOgm.group
if dirOgm.mode == ',':
dirOgm.mode = fileOgm.mode
# start up the child processes
m = Manager(fileOgm, dirOgm, verbose)
# examine each of the files
for filename in args.file:
if filename == '-':
while True:
onefile = sys.stdin.readline()
if onefile == '': break
examine(m, onefile.rstrip('\r\n'), parser, recursive)
else:
examine(m, filename, parser, recursive)
# and finish up
return m.finish()
def examine(m, thisfile, parser, recursive=False):
"""Recursively process a single file or directory"""
if debug:
print("--examining '%s'" % thisfile, file=sys.stderr)
try:
if os.path.isfile(thisfile):
m.do_file(thisfile)
elif os.path.isdir(thisfile):
m.do_dir(thisfile)
if recursive:
m.report_information("Processing directory %s...." % thisfile)
try:
for eachfile in os.listdir(thisfile):
examine(m, os.path.join(thisfile, eachfile), parser, recursive)
except OSError as e:
# do nicer formatting for common errors
if e.errno == 13:
m.report_error("%s: %s: Permission denied" % (parser.prog, e.filename))
else:
m.report_error("%s: %s" % (parser.prog, e))
else:
m.report_error("%s: cannot access '%s': No such file or directory" % (parser.prog, thisfile))
except OSError as ose:
m.report_error("%s: %s" % (parser.prog, e))
if __name__ == "__main__":
sys.exit(main())
| mit | -2,110,178,060,360,650,500 | 33.983193 | 143 | 0.693971 | false |
uwosh/UWOshOIE | tests/testStateFacApprovedNeedsProgramManagerReview.py | 1 | 3979 | import os, sys
if __name__ == '__main__':
execfile(os.path.join(sys.path[0], 'framework.py'))
from Products.UWOshOIE.tests.uwoshoietestcase import UWOshOIETestCase
from Products.CMFCore.WorkflowCore import WorkflowException
class TestStateFacApprovedNeedsProgramManagerReview(UWOshOIETestCase):
"""Ensure product is properly installed"""
def afterSetUp(self):
self.acl_users = self.portal.acl_users
self.portal_workflow = self.portal.portal_workflow
self.portal_registration = self.portal.portal_registration
self.mockMailHost()
self.createUsers()
def createFacApprovedNeedsProgramManagerReviewApplication(self):
self.login(self._default_user)
self.portal.invokeFactory(type_name="OIEStudentApplication", id="testapplication")
app = self.portal['testapplication']
self.fill_out_application(app)
self.portal_workflow.doActionFor(app, 'submit')
self.logout()
self.login('front_line_advisor')
self.portal_workflow.doActionFor(app, 'waitForPrintedMaterials')
app.setWithdrawalRefund(True)
app.setApplicationFeeOK(True)
app.setUWSystemStatementOK(True)
app.setUWOshkoshStatementOK(True)
app.setTranscriptsOK(True)
self.portal_workflow.doActionFor(app, 'sendForDirectorReview')
self.logout()
self.login('director')
self.portal_workflow.doActionFor(app, 'sendForProgramManagerReview')
self.logout()
self.login('program_manager')
self.portal_workflow.doActionFor(app, 'sendForFacultyReview')
self.logout()
self.login('fac_review')
self.portal_workflow.doActionFor(app, 'facultyApproves')
self.logout()
return app
def test_should_be_in_facApprovedNeedsProgramManagerReview_state(self):
app = self.createFacApprovedNeedsProgramManagerReviewApplication()
self.assertEquals('facApprovedNeedsProgramManagerReview', self.getState(app))
def test_should_be_able_to_addComment(self):
app = self.createFacApprovedNeedsProgramManagerReviewApplication()
self.login(self._default_user)
self.portal_workflow.doActionFor(app, 'addComment')
self.assertEquals('facApprovedNeedsProgramManagerReview', self.getState(app))
self.logout()
def test_should_be_able_to_addToWaitlist(self):
app = self.createFacApprovedNeedsProgramManagerReviewApplication()
self.login('program_manager')
self.portal_workflow.doActionFor(app, 'addToWaitlist')
self.assertEquals('waitlist', self.getState(app))
self.logout()
def test_should_be_able_to_assertReadyForConditionalAdmit(self):
app = self.createFacApprovedNeedsProgramManagerReviewApplication()
self.login('program_manager')
self.portal_workflow.doActionFor(app, 'assertReadyForConditionalAdmit')
self.assertEquals('readyForConditionalAdmit', self.getState(app))
self.logout()
def test_should_be_able_to_decline(self):
app = self.createFacApprovedNeedsProgramManagerReviewApplication()
self.login('director')
self.portal_workflow.doActionFor(app, 'decline')
self.assertEquals('declined', self.getState(app))
self.logout()
def test_should_be_able_to_withdraw(self):
app = self.createFacApprovedNeedsProgramManagerReviewApplication()
self.login(self._default_user)
self.portal_workflow.doActionFor(app, 'withdraw')
self.assertEquals('withdrawn', self.getState(app))
self.logout()
def test_suite():
from unittest import TestSuite, makeSuite
suite = TestSuite()
suite.addTest(makeSuite(TestStateFacApprovedNeedsProgramManagerReview))
return suite
if __name__ == '__main__':
framework()
| gpl-2.0 | 3,223,219,520,424,574,000 | 35.504587 | 90 | 0.67806 | false |
cgvarela/Impala | tests/query_test/test_join_queries.py | 2 | 6020 | # Copyright (c) 2012 Cloudera, Inc. All rights reserved.
# Targeted tests for Impala joins
#
import logging
import os
import pytest
from copy import copy
from tests.common.test_vector import *
from tests.common.impala_test_suite import *
from tests.common.skip import SkipIfS3, SkipIfIsilon
class TestJoinQueries(ImpalaTestSuite):
BATCH_SIZES = [0, 1]
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestJoinQueries, cls).add_test_dimensions()
cls.TestMatrix.add_dimension(
TestDimension('batch_size', *TestJoinQueries.BATCH_SIZES))
# TODO: Look into splitting up join tests to accomodate hbase.
# Joins with hbase tables produce drastically different results.
cls.TestMatrix.add_constraint(lambda v:\
v.get_value('table_format').file_format in ['parquet'])
if cls.exploration_strategy() != 'exhaustive':
# Cut down on execution time when not running in exhaustive mode.
cls.TestMatrix.add_constraint(lambda v: v.get_value('batch_size') != 1)
def test_joins(self, vector):
new_vector = copy(vector)
new_vector.get_value('exec_option')['batch_size'] = vector.get_value('batch_size')
self.run_test_case('QueryTest/joins', new_vector)
@SkipIfS3.hbase
@SkipIfIsilon.hbase
def test_joins_against_hbase(self, vector):
new_vector = copy(vector)
new_vector.get_value('exec_option')['batch_size'] = vector.get_value('batch_size')
self.run_test_case('QueryTest/joins-against-hbase', new_vector)
def test_outer_joins(self, vector):
new_vector = copy(vector)
new_vector.get_value('exec_option')['batch_size'] = vector.get_value('batch_size')
self.run_test_case('QueryTest/outer-joins', new_vector)
class TestTPCHJoinQueries(ImpalaTestSuite):
# Uses the tpch dataset in order to have larger joins. Needed for example to test
# the repartitioning codepaths.
BATCH_SIZES = [0, 1]
@classmethod
def get_workload(cls):
return 'tpch'
@classmethod
def add_test_dimensions(cls):
super(TestTPCHJoinQueries, cls).add_test_dimensions()
cls.TestMatrix.add_dimension(
TestDimension('batch_size', *TestJoinQueries.BATCH_SIZES))
cls.TestMatrix.add_constraint(lambda v:\
v.get_value('table_format').file_format in ['parquet'])
if cls.exploration_strategy() != 'exhaustive':
# Cut down on execution time when not running in exhaustive mode.
cls.TestMatrix.add_constraint(lambda v: v.get_value('batch_size') != 1)
@classmethod
def teardown_class(cls):
cls.client.execute('set mem_limit = 0');
super(TestTPCHJoinQueries, cls).teardown_class()
def test_outer_joins(self, vector):
new_vector = copy(vector)
new_vector.get_value('exec_option')['batch_size'] = vector.get_value('batch_size')
self.run_test_case('tpch-outer-joins', new_vector)
@SkipIfS3.insert
class TestSemiJoinQueries(ImpalaTestSuite):
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestSemiJoinQueries, cls).add_test_dimensions()
cls.TestMatrix.add_dimension(
TestDimension('batch_size', *TestJoinQueries.BATCH_SIZES))
# Joins with hbase tables produce drastically different results.
cls.TestMatrix.add_constraint(lambda v:\
v.get_value('table_format').file_format in ['parquet'])
if cls.exploration_strategy() != 'exhaustive':
# Cut down on execution time when not running in exhaustive mode.
cls.TestMatrix.add_constraint(lambda v: v.get_value('batch_size') != 1)
@classmethod
def setup_class(cls):
super(TestSemiJoinQueries, cls).setup_class()
cls.__cleanup_semi_join_tables()
cls.__load_semi_join_tables()
@classmethod
def teardown_class(cls):
cls.__cleanup_semi_join_tables()
super(TestSemiJoinQueries, cls).teardown_class()
@classmethod
def __load_semi_join_tables(cls):
SEMIJOIN_TABLES = ['functional.SemiJoinTblA', 'functional.SemiJoinTblB']
# Cleanup, create and load fresh test tables for semi/anti-join tests
cls.client.execute('create table if not exists '\
'functional.SemiJoinTblA(a int, b int, c int)')
cls.client.execute('create table if not exists '\
'functional.SemiJoinTblB(a int, b int, c int)')
# loads some values with NULLs in the first table
cls.client.execute('insert into %s values(1,1,1)' % SEMIJOIN_TABLES[0]);
cls.client.execute('insert into %s values(1,1,10)' % SEMIJOIN_TABLES[0]);
cls.client.execute('insert into %s values(1,2,10)' % SEMIJOIN_TABLES[0]);
cls.client.execute('insert into %s values(1,3,10)' % SEMIJOIN_TABLES[0]);
cls.client.execute('insert into %s values(NULL,NULL,30)' % SEMIJOIN_TABLES[0]);
cls.client.execute('insert into %s values(2,4,30)' % SEMIJOIN_TABLES[0]);
cls.client.execute('insert into %s values(2,NULL,20)' % SEMIJOIN_TABLES[0]);
# loads some values with NULLs in the second table
cls.client.execute('insert into %s values(1,1,1)' % SEMIJOIN_TABLES[1]);
cls.client.execute('insert into %s values(1,1,10)' % SEMIJOIN_TABLES[1]);
cls.client.execute('insert into %s values(1,2,5)' % SEMIJOIN_TABLES[1]);
cls.client.execute('insert into %s values(1,NULL,10)' % SEMIJOIN_TABLES[1]);
cls.client.execute('insert into %s values(2,10,NULL)' % SEMIJOIN_TABLES[1]);
cls.client.execute('insert into %s values(3,NULL,NULL)' % SEMIJOIN_TABLES[1]);
cls.client.execute('insert into %s values(3,NULL,50)' % SEMIJOIN_TABLES[1]);
@classmethod
def __cleanup_semi_join_tables(cls):
cls.client.execute('drop table if exists functional.SemiJoinTblA')
cls.client.execute('drop table if exists functional.SemiJoinTblB')
@pytest.mark.execute_serially
def test_semi_joins(self, vector):
new_vector = copy(vector)
new_vector.get_value('exec_option')['batch_size'] = vector.get_value('batch_size')
self.run_test_case('QueryTest/semi-joins', new_vector)
| apache-2.0 | -4,894,372,004,654,164,000 | 40.517241 | 86 | 0.698007 | false |
furas/exaile-plugins | plugins/testing/__init__.py | 1 | 13312 | #!/usr/bin/env python3
from xl import player, event, providers, settings #common,
from xlgui.widgets import menu, menuitems
import urllib, webbrowser
DATA = [
# default value if can't read from 'settings.ini' [plugin/testing/search]
['Google.com', 'https://www.google.com/search?q=%s'],
['YouTube.com', 'https://www.youtube.com/results?search_query=%s'],
['Vimeo.com', 'https://vimeo.com/search?q=%s'],
['Vimeo.com [category: music]', 'https://vimeo.com/search?category=music&q=%s'],
['SoundCloud.com', 'https://soundcloud.com/search?q=%s'],
]
class Testing(object):
def display(self, name, player, track=None):
print '---------------------------------'
print 'TESTING: display(name, player, track)'
print 'TESTING: ------------------------'
print 'TESTING: name:', name
print 'TESTING: player:', player
print 'TESTING: track:', track
print 'TESTING: ------------------------'
print 'TESTING: player.current:', player.current
if track:
print 'TESTING: track.list_tags():', track.list_tags()
print 'TESTING: track:', track
for tag in ('title', 'album', 'artist', 'tracknumber', '__loc', '__basename'):
data = track.get_tag_display(tag)
print 'TESTING: tag: %11s =' % tag, data
print '---------------------------------'
def other(self, name):
global DATA
print 'TESTING: other(name)'
print 'TESTING: ------------------------'
print 'TESTING: name:', name
print 'TESTING: ------------------------'
print "TESTING: settings.get_option('plugin/testing/search'):", settings.get_option('plugin/testing/search', DEFAULT_DATA)
DATA = settings.set_option('plugin/testing/search', DEFAULT_DATA)
print 'TESTING: ------------------------'
# -----------------------------------------------------------------
def enable(self, exaile):
self.player = player.PLAYER
self.display('enable', player.PLAYER)
self.other('enable')
event.add_callback(self.on_playback_start, 'playback_track_start')
def disable(self, exaile):
self.display('disable', player.PLAYER)
self.other('disable')
def teardown(self, exaile):
self.display('teardown', player.PLAYER)
self.other('teardown')
event.remove_callback(self.on_playback_start, 'playback_track_start')
def on_gui_loaded(self):
self.display('on_gui_loaded', player.PLAYER)
self.other('on_gui_loaded')
### main menu / tools ###
self.menu = menu.simple_menu_item('furas', '', 'Get Current Track',
callback=self.on_view_menu)
#providers.register('menubar-tools-menu', self.menu)
self.menu.register('menubar-tools-menu')
### menu
#self.create_test_menu()
self.create_menu()
#print 'TESTING: register menu'
def item_register(self, item, menu):
print '---------------------------------'
print 'TESTING: item.register(', menu, ')'
print '---------------------------------'
item.register(menu)
def provider_register(self, item, menu):
print '---------------------------------'
print 'TESTING: provider.register(', menu, ')'
print '---------------------------------'
providers.register(menu, item)
def test_callback(self, window, name, parent, context):
print '---------------------------------'
print 'TESTING: test_callback(window, name, parent, context)'
print 'TESTING: ------------------------'
print 'TESTING: window:', window
print 'TESTING: name:', name
print 'TESTING: parent:', parent
print 'TESTING: context:', context
print 'TESTING: ------------------------'
print 'TESTING: dir(parent):'
for x in dir(parent):
print 'TESTING: ->', x
print 'TESTING: ------------------------'
print 'TESTING: items:'
for item in parent.get_selected_items():
print 'TESTING: -> item:', item
print 'TESTING: -> type(item):', type(item)
print 'TESTING: -----'
print 'TESTING: ------------------------'
print 'TESTING: paths:'
for path in parent.get_selected_paths():
print 'TESTING: -> path:', path
print 'TESTING: -> type(path):', type(path)
print 'TESTING: -----'
print 'TESTING: ------------------------'
print 'tracks:', parent.get_selected_tracks()
for track in parent.get_selected_tracks():
print track
print type(track)
#print dir(track)
print '-------------------'
for tag in ('title', 'album', 'artist', 'tracknumber', '__loc', '__basename'):
data = track.get_tag_display(tag)
print ': %11s:' % tag, data
print '-------------------'
title = track.get_tag_display('title')
webbrowser.open('http://www.wrzuta.pl/szukaj/%s' % title.replace(' ', '+'))
webbrowser.open('https://www.google.pl/search?q=%s' % title.replace(' ', '+'))
self.display('test_callback', player.PLAYER)
with open('tracks.txt', 'a') as f:
for track in parent.get_selected_tracks():
artist = track.get_tag_display('artist')
title = track.get_tag_display('title')
f.write('%s,%s\n' % (artist, title))
print '---------------------------------'
### main menu / tools ###
def on_view_menu(self, widget, name, parent, context):
print '---------------------------------'
print 'TESTING: on_view_menu(widget, name, parent, context)'
print 'TESTING: ------------------------'
#~ if self.window:
#~ self.window.present()
#~ else:
#~ self.window = DeveloperWindow(self.exaile.gui.main.window, self)
#~ def _delete(w, e):
#~ self.window = None
#~ self.window.connect('delete-event', _delete)
#~ self.window.show_all()
print '---------------------------------'
def on_exaile_loaded(self):
self.display('on_exaile_loaded', player.PLAYER)
self.other('on_exaile_loaded')
def on_playback_start(self, type, player, track):
self.display('on_playback_start', player, track)
self.other('on_playback_start')
### menu ###
def create_test_menu(self):
self.item = menu.simple_menu_item('furas-item', # name
['properties'], # after ie. [] or ['properties']
'Testowy', # display text
'gtk-save', # icon name # ie. 'document-properties'
self.test_callback, # callback function
callback_args=[] # callback extra arguments
)
#self.item_register(self.item, 'track-panel-menu')
#self.item_register(self.item, 'playlist-panel-menu')
#self.item_register(self.item, 'playlist-panel-context-menu')
#self.item_register(self.item, 'collection-panel-context-menu')
#self.item_register(self.item, 'files-panel-context-menu')
#self.item_register(self.item, 'radio-panel-menu')
#providers.register('menubar-file-menu', item)
#providers.register('menubar-edit-menu', item)
#providers.register('menubar-playlist-menu', item)
#providers.register('menubar-playlist-menu', item)
#providers.register('menubar-tools-menu', item)
#providers.register('menubar-help-menu', item)
#self.item.register('main-panel')
self.provider_register(self.item, 'playlist-context-menu')
#self.item_register(self.item, 'playlist-context-menu')
#self.item_register(self.item, 'playlist-columns-menu')
def create_menu(self):
sep = menu.simple_separator('furas-item-sep', ['properties'])
#sep._pos = 'normal'
self.provider_register(sep, 'playlist-context-menu')
for n, (name, url) in enumerate(DATA):
item = menu.simple_menu_item(
'furas-item-%i' % n, # unique name
['furas-item-sep'], # after ie. [] or ['properties']
name, # displayed text
'gtk-save', # icon name # ie. 'document-properties'
self.webbrowser_cb, # callback function
callback_args=[url] # callback extra arguments
)
#print(dir(item))
self.provider_register(item, 'playlist-context-menu')
print '---------------------------------'
print 'TESTING: register menu #1'
### submenu ###
self.submenu = menu.Menu(self, inherit_context=True)
for n, (name, url) in enumerate(DATA):
self.submenu.add_item(menu.simple_menu_item(
'furas-item-sub-%i' % n, # unique name
[], # after ie. [] or ['properties']
name, # displayed text
'gtk-save', # icon name # ie. 'document-properties'
self.webbrowser_cb, # callback function
callback_args=[url] # callback extra arguments
))
item = menu.simple_menu_item(
'furas-item-sub', # unique name
['furas-item-sep'], # after ie. [] or ['properties']
'Szukaj',
submenu=self.submenu)
self.provider_register(item, 'playlist-context-menu')
print 'TESTING: self.submenu._items:'
print self.submenu._items
print '---------------------------------'
print 'TESTING: register menu #2'
for p in providers.get('playlist-context-menu'):
print 'TESTING: -> menu:', p.name, p._pos, p.after
print '---------------------------------'
def webbrowser_cb(self, window, name, parent, context, url):
print '---------------------------------'
print 'TESTING: webbrowser_cb(window, name, parent, context, url)'
print 'TESTING: ------------------------'
print 'TESTING: window:', window
print 'TESTING: name:', name
print 'TESTING: parent:', parent
print 'TESTING: context:', context
print 'TESTING: url:', url
print 'TESTING: ------------------------'
for track in parent.get_selected_tracks():
title = track.get_tag_display('title')
print 'TESTING: selected_track:', title
title = urllib.quote_plus(title)
webbrowser.open(url % title)
print '---------------------------------'
plugin_class = Testing
### events
#~ def enable(exaile):
#~ event.add_callback(on_stop_action, 'quit_application')
#~ event.add_callback(on_stop_action, 'playback_player_end', player.PLAYER)
#~ event.add_callback(on_begin_action, 'playback_track_start', player.PLAYER)
#~ event.add_callback(on_pause_action, 'playback_toggle_pause', player.PLAYER)
#~ def disable(exaile):
#~ event.remove_callback(on_stop_action, 'quit_application')
#~ event.remove_callback(on_stop_action, 'playback_player_end', player.PLAYER)
#~ event.remove_callback(on_begin_action, 'playback_track_start', player.PLAYER)
#~ event.remove_callback(on_pause_action, 'playback_toggle_pause', player.PLAYER)
### xlgui/panel/menus.py
#~ from xlgui.widgets import (
#~ menu,
#~ menuitems
#~ )
### Generic track selection menus
#~ def __create_track_panel_menus():
#~ items = []
#~ items.append(menuitems.EnqueueMenuItem('enqueue', after=['top-sep']))
#~ items.append(menuitems.AppendMenuItem('append', after=[items[-1].name]))
#~ items.append(menuitems.ReplaceCurrentMenuItem('replace', after=[items[-1].name]))
#~ items.append(menuitems.RatingMenuItem('rating', after=[items[-1].name]))
#~ items.append(menu.simple_separator('tp-sep', after=[items[-1].name]))
#~ items.append(menuitems.PropertiesMenuItem('properties', after=[items[-1].name]))
#~ for item in items:
#~ item.register('track-panel-menu')
#~ __create_track_panel_menus()
#~ class TrackPanelMenu(menu.ProviderMenu):
#~ '''
#~ Context menu when a track is clicked on a panel
#~ Provider key: track-panel-menu
### xlgui/widgets/menuitems.py
#~ def _properties_cb(widget, name, parent, context, get_tracks_func, dialog_parent):
#~ tracks = get_tracks_func(parent, context)
#~ if tracks:
#~ properties.TrackPropertiesDialog(dialog_parent, tracks)
#~ def PropertiesMenuItem(name, after, get_tracks_func=generic_get_tracks_func,
#~ dialog_parent=None):
#~ return menu.simple_menu_item(name, after, _("_Track Properties"),
#~ 'document-properties', _properties_cb,
#~ callback_args=[get_tracks_func, dialog_parent])
| mit | 639,402,065,340,357,400 | 37.363112 | 130 | 0.52479 | false |
googleapis/googleapis-gen | google/cloud/asset/v1p5beta1/asset-v1p5beta1-py/google/cloud/asset_v1p5beta1/__init__.py | 1 | 1079 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .services.asset_service import AssetServiceClient
from .services.asset_service import AssetServiceAsyncClient
from .types.asset_service import ListAssetsRequest
from .types.asset_service import ListAssetsResponse
from .types.asset_service import ContentType
from .types.assets import Asset
from .types.assets import Resource
__all__ = (
'AssetServiceAsyncClient',
'Asset',
'AssetServiceClient',
'ContentType',
'ListAssetsRequest',
'ListAssetsResponse',
'Resource',
)
| apache-2.0 | -6,398,374,797,990,011,000 | 30.735294 | 74 | 0.772938 | false |
funkybob/knights-templater | tests/test_blocks.py | 1 | 3108 |
import pathlib
from .utils import TemplateTestCase, Mock
class BlockTagTest(TemplateTestCase):
def test_block_parse(self):
self.assertRendered('{% block name %}%{% endblock %}', '%')
class ForTagTest(TemplateTestCase):
def test_simple_for(self):
self.assertRendered(
'{% for item in seq %}{{ item }} {% endfor %}',
'a b c d e ',
{'seq': 'abcde'},
)
def test_unpack_for(self):
self.assertRendered(
'{% for a, b in seq %}{{ a }} == {{ b }},{% endfor %}',
'a == 1,b == 2,',
{'seq': (('a', 1), ('b', 2))}
)
def test_for_empty_false(self):
self.assertRendered(
'{% for a, b in seq %}{{ a }} == {{ b }},{% empty %}empty{% endfor %}',
'a == 1,b == 2,',
{'seq': (('a', 1), ('b', 2))},
)
def test_for_empty_true(self):
self.assertRendered(
'{% for a, b in seq %}{{ a }} == {{ b }},{% empty %}empty{% endfor %}',
'empty',
{'seq': ()},
)
def test_scope(self):
self.assertRendered(
'{% for a in seq %}{{ a * b }} {% endfor %}',
'2 4 6 ',
{'seq': (1, 2, 3), 'b': 2},
)
def test_attr_source(self):
self.assertRendered(
'{% for a in obj.seq %}{{ a }}{% endfor %}',
'1234',
{'obj': Mock(seq=[1, 2, 3, 4])},
)
class IfTagTest(TemplateTestCase):
def test_simple_if(self):
self.assertRendered(
'{% if a == 1 %}Yes!{% endif %}',
'Yes!',
{'a': 1}
)
self.assertRendered(
'{% if a == 1 %}Yes!{% endif %}',
'',
{'a': 2}
)
def test_if_else(self):
tmpl = '{% if a == 1 %}Yes!{% else %}No!{% endif %}'
self.assertRendered(tmpl, 'Yes!', {'a': 1})
self.assertRendered(tmpl, 'No!', {'a': 2})
class WithTagTest(TemplateTestCase):
def test_simple_with(self):
self.assertRendered(
'''{% with a=1, b=c %}{{ a * b }}{% endwith %}''',
'''3''',
{'c': 3}
)
class IncludeTagTest(TemplateTestCase):
def test_include(self):
self.assertRendered(
'''{% include "include.html" %}''',
'''included\n''',
{}
)
def test_include_with(self):
self.assertRendered(
'''{% include "include_more.html", a=val, b=6 %}''',
'''product: 18\n''',
{'val': 3}
)
class MacroTagTest(TemplateTestCase):
def test_macro_does_not_render(self):
self.assertRendered(
'''{% macro foo %}Foo{% endmacro %}''',
'',
)
def test_macro_renders(self):
self.assertRendered(
'''{% macro foo %}Foo{% endmacro %}{% use "foo" %}''',
'Foo',
)
def test_macro_takes_args(self):
self.assertRendered(
'''{% macro foo %}{{ foo }}{% endmacro %}{% use "foo", foo="bar" %}''',
'''bar''',
)
| mit | -3,845,713,191,750,642,700 | 24.68595 | 83 | 0.427284 | false |
dseuss/mpnum | tests/mparray_test.py | 1 | 57892 | # encoding: utf-8
# FIXME Is there a better metric to compare two arrays/scalars than
# assert_(array)_almost_equal? Something that takes magnitude into
# account?
from __future__ import absolute_import, division, print_function
import functools as ft
import itertools as it
import h5py as h5
import numpy as np
import pytest as pt
from numpy.testing import (assert_almost_equal, assert_array_almost_equal,
assert_array_equal)
import mpnum.factory as factory
import mpnum.mparray as mp
from mpnum import utils
from mpnum._testing import (assert_correct_normalization,
assert_mpa_almost_equal, assert_mpa_identical,
compression_svd)
from six.moves import range, zip
def update_copy_of(target, newvals):
new = target.copy()
new.update(newvals)
return new
###############################################################################
# Basic creation & operations #
###############################################################################
@pt.mark.parametrize('dtype', pt.MP_TEST_DTYPES)
@pt.mark.parametrize('nr_sites, local_dim, _', pt.MP_TEST_PARAMETERS)
def test_from_full(nr_sites, local_dim, _, rgen, dtype):
psi = factory._random_vec(nr_sites, local_dim, randstate=rgen, dtype=dtype)
mps = mp.MPArray.from_array(psi, 1)
assert_array_almost_equal(psi, mps.to_array())
assert mps.dtype == dtype
op = factory._random_op(nr_sites, local_dim, randstate=rgen, dtype=dtype)
mpo = mp.MPArray.from_array(op, 2)
assert_array_almost_equal(op, mpo.to_array())
assert mpo.dtype == dtype
def test_from_inhomogenous(rgen):
array = rgen.randn(4, 3, 3, 3)
mpa = mp.MPArray.from_array(array, ndims=(2, 1, 1))
assert_array_almost_equal(array, mpa.to_array())
assert mpa.ndims == (2, 1, 1)
assert mpa.shape == ((4, 3), (3,), (3,))
@pt.mark.parametrize('dtype', pt.MP_TEST_DTYPES)
@pt.mark.parametrize('nr_sites, local_dim, rank', pt.MP_TEST_PARAMETERS)
def test_from_kron(nr_sites, local_dim, rank, dtype):
ndims = 2
randfun = factory._randfuncs[dtype]
factors = tuple(randfun([nr_sites] + ([local_dim] * ndims)))
op = utils.mkron(*factors)
op.shape = [local_dim] * (ndims * nr_sites)
mpo = mp.MPArray.from_kron(factors)
assert_array_almost_equal(op, mpo.to_array_global())
assert mpo.dtype == dtype
@pt.mark.parametrize('dtype', pt.MP_TEST_DTYPES)
@pt.mark.parametrize('nr_sites, local_dim, _', pt.MP_TEST_PARAMETERS)
def test_conjugations(nr_sites, local_dim, _, rgen, dtype):
op = factory._random_op(nr_sites, local_dim, randstate=rgen, dtype=dtype)
mpo = mp.MPArray.from_array(op, 2)
assert_array_almost_equal(np.conj(op), mpo.conj().to_array())
assert mpo.conj().dtype == dtype
mpo.canonicalize()
mpo_c = mpo.conj()
assert_correct_normalization(mpo_c)
@pt.mark.parametrize('dtype', pt.MP_TEST_DTYPES)
@pt.mark.parametrize('nr_sites, local_dim, _', pt.MP_TEST_PARAMETERS)
def test_transpose(nr_sites, local_dim, _, rgen, dtype):
op = factory._random_op(nr_sites, local_dim, randstate=rgen, dtype=dtype)
mpo = mp.MPArray.from_array(utils.global_to_local(op, nr_sites), 2)
opT = op.reshape((local_dim**nr_sites,) * 2).T \
.reshape((local_dim,) * 2 * nr_sites)
assert_array_almost_equal(opT, (mpo.T).to_array_global())
assert mpo.T.dtype == dtype
mpo.canonicalize()
mpo_T = mpo.T
assert_correct_normalization(mpo_T)
def test_transpose_axes(rgen):
ldim = (2, 5, 3)
axes = (2, 0, 1)
new_ldim = tuple(ldim[ax] for ax in axes)
# Easy (to implement) test: One physical site only.
vec = factory._zrandn(ldim, rgen)
mps = mp.MPArray.from_array(vec, ndims=len(ldim))
assert len(mps) == 1
vec_t = vec.transpose(axes)
mps_t = mps.transpose(axes)
mps_t_to_vec = mps_t.to_array()
assert vec_t.shape == new_ldim
assert_array_equal(mps_t_to_vec, vec_t)
assert_correct_normalization(mps_t)
# Test with 3 sites
nr_sites = 3
tensor = factory._zrandn(ldim * nr_sites, rgen) # local form
mpa = mp.MPArray.from_array(tensor, ndims=len(ldim))
assert len(mpa) == nr_sites
assert mpa.shape == (ldim,) * nr_sites
# transpose axes in local form
tensor_axes = tuple(ax + site * len(ldim)
for site in range(nr_sites) for ax in axes)
tensor_t = tensor.transpose(tensor_axes)
mpa_t = mpa.transpose(axes)
mpa_t_to_tensor = mpa_t.to_array()
assert mpa_t.shape == (new_ldim,) * nr_sites
assert_array_almost_equal(mpa_t_to_tensor, tensor_t)
assert_correct_normalization(mpa_t)
@pt.mark.parametrize('dtype', pt.MP_TEST_DTYPES)
def test_dump_and_load(tmpdir, dtype):
mpa = factory.random_mpa(5, [(4,), (2, 3), (1,), (4,), (4, 3)],
(4, 7, 1, 3), dtype=dtype)
mpa.canonicalize(left=1, right=3)
with h5.File(str(tmpdir / 'dump_load_test.h5'), 'w') as buf:
newgroup = buf.create_group('mpa')
mpa.dump(newgroup)
with h5.File(str(tmpdir / 'dump_load_test.h5'), 'r') as buf:
mpa_loaded = mp.MPArray.load(buf['mpa'])
assert_mpa_identical(mpa, mpa_loaded)
mpa.dump(str(tmpdir / 'dump_load_test_str.h5'))
mpa_loaded = mp.MPArray.load(str(tmpdir / 'dump_load_test_str.h5'))
assert_mpa_identical(mpa, mpa_loaded)
###############################################################################
# Algebraic operations #
###############################################################################
@pt.mark.parametrize('dtype', pt.MP_TEST_DTYPES)
@pt.mark.parametrize('nr_sites, local_dim, rank', pt.MP_TEST_PARAMETERS)
def test_sum(nr_sites, local_dim, rank, rgen, dtype):
"""Compare mpa.sum() with full array computation"""
mpa = factory.random_mpa(nr_sites, local_dim, rank, rgen, dtype)
array_sum = mpa.to_array().sum()
# Test summation over all indices and different argument values.
assert_almost_equal(mpa.sum(), array_sum)
assert_almost_equal(mpa.sum(0), array_sum)
assert_almost_equal(mpa.sum([0]), array_sum)
assert_almost_equal(mpa.sum([[0]] * nr_sites), array_sum)
# Test summation over site-dependent indices
n_plegs = 3 if nr_sites <= 4 and local_dim <= 2 else 2
mpa = factory.random_mpa(nr_sites, [local_dim] * n_plegs, rank, rgen, dtype)
# Pseudo-randomly choose how many physical legs to sum over at each site.
num_sum = ((rgen.choice(range(ndims + 1)), ndims) for ndims in mpa.ndims)
# Pseudo-randomly choose which physical legs to sum over.
axes = tuple(
rgen.choice(range(ndims), num, replace=False) for num, ndims in num_sum)
array_axes = tuple(n_plegs * pos + a
for pos, ax in enumerate(axes) for a in ax)
mpa_sum = mpa.sum(axes)
if hasattr(mpa_sum, 'to_array'): # possibly, no physical legs are left
mpa_sum = mpa_sum.to_array()
array_sum = mpa.to_array().sum(array_axes)
assert_array_almost_equal(mpa_sum, array_sum)
@pt.mark.parametrize('dtype', pt.MP_TEST_DTYPES)
@pt.mark.parametrize('nr_sites, local_dim, rank', pt.MP_TEST_PARAMETERS)
def test_dot(nr_sites, local_dim, rank, rgen, dtype):
mpo1 = factory.random_mpa(nr_sites, (local_dim, local_dim), rank,
randstate=rgen, dtype=dtype)
op1 = mpo1.to_array_global()
mpo2 = factory.random_mpa(nr_sites, (local_dim, local_dim), rank,
randstate=rgen, dtype=dtype)
op2 = mpo2.to_array_global()
# Dotproduct of all 1st physical with 0th physical legs = np.dot
dot_np = np.tensordot(op1.reshape((local_dim**nr_sites, ) * 2),
op2.reshape((local_dim**nr_sites, ) * 2),
axes=([1], [0]))
dot_np = dot_np.reshape(op1.shape)
dot_mp = mp.dot(mpo1, mpo2, axes=(1, 0)).to_array_global()
assert_array_almost_equal(dot_np, dot_mp)
assert dot_mp.dtype == dtype
# this should also be the default axes
dot_mp = mp.dot(mpo1, mpo2).to_array_global()
assert_array_almost_equal(dot_np, dot_mp)
# Dotproduct of all 0th physical with 1st physical legs = np.dot
dot_np = np.tensordot(op1.reshape((local_dim**nr_sites, ) * 2),
op2.reshape((local_dim**nr_sites, ) * 2),
axes=([0], [1]))
dot_np = dot_np.reshape(op1.shape)
dot_mp = mp.dot(mpo1, mpo2, axes=(0, 1)).to_array_global()
assert_array_almost_equal(dot_np, dot_mp)
assert dot_mp.dtype == dtype
# this should also be the default axes
dot_mp = mp.dot(mpo1, mpo2, axes=(-2, -1)).to_array_global()
assert_array_almost_equal(dot_np, dot_mp)
def test_dot_multiaxes(rgen):
ldim1 = (2, 2, 3, 2)
ldim2 = (3, 2, 4)
ax1 = (0, 2)
ax2 = (-2, 0)
assert len(ax1) == len(ax2)
# Easy (to implement) test: One physical site.
vec1 = factory._zrandn(ldim1, rgen)
vec2 = factory._zrandn(ldim2, rgen)
mpa1 = mp.MPArray.from_array(vec1, ndims=len(ldim1))
mpa2 = mp.MPArray.from_array(vec2, ndims=len(ldim2))
assert len(mpa1) == 1
assert len(mpa2) == 1
mpa_prod = mp.dot(mpa1, mpa2, axes=(ax1, ax2)).to_array()
vec_prod = np.tensordot(vec1, vec2, (ax1, ax2))
assert_array_almost_equal(mpa_prod, vec_prod)
# Test with 3 sites
nr_sites = 3
vec1 = factory._zrandn(ldim1 * nr_sites, rgen) # local form
vec2 = factory._zrandn(ldim2 * nr_sites, rgen) # local form
mpa1 = mp.MPArray.from_array(vec1, ndims=len(ldim1))
mpa2 = mp.MPArray.from_array(vec2, ndims=len(ldim2))
assert len(mpa1) == nr_sites
assert len(mpa2) == nr_sites
mpa_prod = mp.dot(mpa1, mpa2, axes=(ax1, ax2)).to_array()
vec_ax1, vec_ax2 = (
tuple(ax + site * nldim
if ax >= 0 else ax - (nr_sites - site - 1) * nldim
for site in range(nr_sites) for ax in ax_n)
for ax_n, nldim in ((ax1, len(ldim1)), (ax2, len(ldim2)))
)
vec_prod = np.tensordot(vec1, vec2, (vec_ax1, vec_ax2))
# The problem with vec_prod is: The order of the indices does not
# match the order of the indices in mpa_prod. We need to change
# that order:
nldim1, nldim2 = (len(ldim1) - len(ax1), len(ldim2) - len(ax2))
assert vec_prod.ndim == nr_sites * (nldim1 + nldim2)
perm = tuple(
offset + site * nldim + ax
for site in range(nr_sites)
for offset, nldim in ((0, nldim1), (nr_sites * nldim1, nldim2))
for ax in range(nldim)
)
vec_prod = vec_prod.transpose(perm)
assert_array_almost_equal(mpa_prod, vec_prod)
@pt.mark.parametrize('dtype', pt.MP_TEST_DTYPES)
@pt.mark.parametrize('nr_sites, local_dim, rank', pt.MP_TEST_PARAMETERS)
def test_partialdot(nr_sites, local_dim, rank, rgen, dtype):
# Only for at least two sites, we can apply an operator to a part
# of a chain.
if nr_sites < 2:
return
part_sites = nr_sites // 2
start_at = min(2, nr_sites // 2)
mpo = factory.random_mpa(nr_sites, (local_dim, local_dim), rank,
randstate=rgen, dtype=dtype)
op = mpo.to_array_global().reshape((local_dim**nr_sites,) * 2)
mpo_part = factory.random_mpa(part_sites, (local_dim, local_dim), rank,
randstate=rgen, dtype=dtype)
op_part = mpo_part.to_array_global().reshape((local_dim**part_sites,) * 2)
op_part_embedded = np.kron(
np.kron(np.eye(local_dim**start_at), op_part),
np.eye(local_dim**(nr_sites - part_sites - start_at)))
prod1 = np.dot(op, op_part_embedded)
prod2 = np.dot(op_part_embedded, op)
prod1_mpo = mp.partialdot(mpo, mpo_part, start_at=start_at)
prod2_mpo = mp.partialdot(mpo_part, mpo, start_at=start_at)
prod1_mpo = prod1_mpo.to_array_global().reshape((local_dim**nr_sites,) * 2)
prod2_mpo = prod2_mpo.to_array_global().reshape((local_dim**nr_sites,) * 2)
assert_array_almost_equal(prod1, prod1_mpo)
assert_array_almost_equal(prod2, prod2_mpo)
assert prod1_mpo.dtype == dtype
assert prod2_mpo.dtype == dtype
def test_partialdot_multiaxes(rgen):
ldim1 = (2, 2, 3, 2)
ldim2 = (3, 2, 4)
ax1 = (0, 2)
ax2 = (-2, 0)
assert len(ax1) == len(ax2)
# Easy (to implement) test: One physical site.
vec1 = factory._zrandn(ldim1, rgen)
vec2 = factory._zrandn(ldim2, rgen)
mpa1 = mp.MPArray.from_array(vec1, ndims=len(ldim1))
mpa2 = mp.MPArray.from_array(vec2, ndims=len(ldim2))
assert len(mpa1) == 1
assert len(mpa2) == 1
mpa_prod = mp.partialdot(mpa1, mpa2, start_at=0, axes=(ax1, ax2)).to_array()
vec_prod = np.tensordot(vec1, vec2, (ax1, ax2))
assert_array_almost_equal(mpa_prod, vec_prod)
# Test with 3 sites
nr_sites = 3
nr_sites_shorter = 2
start_at = 1
vec1 = factory._zrandn(ldim1 * nr_sites, rgen) # local form
vec2 = factory._zrandn(ldim2 * nr_sites_shorter, rgen) # local form
mpa1 = mp.MPArray.from_array(vec1, ndims=len(ldim1))
mpa2 = mp.MPArray.from_array(vec2, ndims=len(ldim2))
assert len(mpa1) == nr_sites
assert len(mpa2) == nr_sites_shorter
mpa_prod = mp.partialdot(mpa1, mpa2, start_at, axes=(ax1, ax2)).to_array()
vec_ax1, vec_ax2 = (
tuple(ax + (startsite + site) * nldim
if ax >= 0 else ax - (nr_sites_shorter - site - 1) * nldim
for site in range(nr_sites_shorter) for ax in ax_n)
for ax_n, nldim, startsite in
((ax1, len(ldim1), start_at), (ax2, len(ldim2), 0))
)
vec_prod = np.tensordot(vec1, vec2, (vec_ax1, vec_ax2))
# The problem with vec_prod is: The order of the indices does not
# match the order of the indices in mpa_prod. We need to change
# that order:
nldim1, nldim2 = (len(ldim1) - len(ax1), len(ldim2) - len(ax2))
assert vec_prod.ndim == (start_at * len(ldim1)
+ nr_sites_shorter * (nldim1 + nldim2))
# For sites before start_at, the axes of `vec1` remain unchanged.
perm = tuple(range(len(ldim1) * start_at))
# For site start_at and following sites, we need to fix the order
# of sites. We use the same scheme as `test_dot_multiaxes` above.
perm2 = tuple(
offset + site * nldim + ax
for site in range(nr_sites_shorter)
for offset, nldim in ((0, nldim1), (nr_sites_shorter * nldim1, nldim2))
for ax in range(nldim)
)
# Now we displace that permutation by the number of unchanged
# sites at the beginning:
perm += tuple(len(perm) + ax for ax in perm2)
vec_prod = vec_prod.transpose(perm)
assert_array_almost_equal(mpa_prod, vec_prod)
@pt.mark.parametrize('dtype', pt.MP_TEST_DTYPES)
@pt.mark.parametrize('nr_sites, local_dim, rank', pt.MP_TEST_PARAMETERS)
def test_inner_vec(nr_sites, local_dim, rank, rgen, dtype):
mp_psi1 = factory.random_mpa(nr_sites, local_dim, rank, randstate=rgen,
dtype=dtype)
psi1 = mp_psi1.to_array().ravel()
mp_psi2 = factory.random_mpa(nr_sites, local_dim, rank, randstate=rgen,
dtype=dtype)
psi2 = mp_psi2.to_array().ravel()
inner_np = np.vdot(psi1, psi2)
inner_mp = mp.inner(mp_psi1, mp_psi2)
assert_almost_equal(inner_mp, inner_np)
assert inner_mp.dtype == dtype
@pt.mark.parametrize('dtype', pt.MP_TEST_DTYPES)
@pt.mark.parametrize('nr_sites, local_dim, rank', pt.MP_TEST_PARAMETERS)
def test_inner_mat(nr_sites, local_dim, rank, rgen, dtype):
mpo1 = factory.random_mpa(nr_sites, (local_dim, local_dim), rank,
randstate=rgen, dtype=dtype)
op1 = mpo1.to_array_global().reshape((local_dim**nr_sites, ) * 2)
mpo2 = factory.random_mpa(nr_sites, (local_dim, local_dim), rank,
randstate=rgen, dtype=dtype)
op2 = mpo2.to_array_global().reshape((local_dim**nr_sites, ) * 2)
inner_np = np.trace(np.dot(op1.conj().transpose(), op2))
inner_mp = mp.inner(mpo1, mpo2)
assert_almost_equal(inner_mp, inner_np)
assert inner_mp.dtype == dtype
@pt.mark.parametrize('dtype', pt.MP_TEST_DTYPES)
@pt.mark.parametrize('nr_sites, local_dim, rank', pt.MP_TEST_PARAMETERS)
def test_sandwich(nr_sites, local_dim, rank, rgen, dtype):
mps = factory.random_mpa(nr_sites, local_dim, rank,
randstate=rgen, dtype=dtype, normalized=True)
mps2 = factory.random_mpa(nr_sites, local_dim, rank,
randstate=rgen, dtype=dtype, normalized=True)
mpo = factory.random_mpa(nr_sites, [local_dim] * 2, rank,
randstate=rgen, dtype=dtype)
mpo.canonicalize()
mpo /= mp.trace(mpo)
vec = mps.to_array().ravel()
op = mpo.to_array_global().reshape([local_dim**nr_sites] * 2)
res_arr = np.vdot(vec, np.dot(op, vec))
res_mpo = mp.inner(mps, mp.dot(mpo, mps))
res_sandwich = mp.sandwich(mpo, mps)
assert_almost_equal(res_mpo, res_arr)
assert_almost_equal(res_sandwich, res_arr)
vec2 = mps2.to_array().ravel()
res_arr = np.vdot(vec2, np.dot(op, vec))
res_mpo = mp.inner(mps2, mp.dot(mpo, mps))
res_sandwich = mp.sandwich(mpo, mps, mps2)
assert_almost_equal(res_mpo, res_arr)
assert_almost_equal(res_sandwich, res_arr)
@pt.mark.parametrize('dtype', pt.MP_TEST_DTYPES)
@pt.mark.parametrize('nr_sites, local_dim, rank', pt.MP_TEST_PARAMETERS)
def test_norm(nr_sites, local_dim, rank, dtype, rgen):
mp_psi = factory.random_mpa(nr_sites, local_dim, rank, randstate=rgen,
dtype=dtype)
psi = mp_psi.to_array()
assert_almost_equal(mp.inner(mp_psi, mp_psi), mp.norm(mp_psi)**2)
assert_almost_equal(np.sum(psi.conj() * psi), mp.norm(mp_psi)**2)
@pt.mark.parametrize('dtype', pt.MP_TEST_DTYPES)
@pt.mark.parametrize('nr_sites, local_dim, rank', pt.MP_TEST_PARAMETERS)
def test_normdist(nr_sites, local_dim, rank, dtype, rgen):
psi1 = factory.random_mpa(nr_sites, local_dim, rank, dtype=dtype,
randstate=rgen)
psi2 = factory.random_mpa(nr_sites, local_dim, rank, dtype=dtype,
randstate=rgen)
assert_almost_equal(mp.normdist(psi1, psi2), mp.norm(psi1 - psi2))
@pt.mark.parametrize('dtype', pt.MP_TEST_DTYPES)
@pt.mark.parametrize('nr_sites, local_dim, rank, keep_width',
[(6, 2, 4, 3), (4, 3, 5, 2)])
def test_partialtrace(nr_sites, local_dim, rank, keep_width, rgen, dtype):
mpo = factory.random_mpa(nr_sites, (local_dim, local_dim), rank,
randstate=rgen, dtype=dtype)
op = mpo.to_array_global()
for site in range(nr_sites - keep_width + 1):
traceout = tuple(range(site)) \
+ tuple(range(site + keep_width, nr_sites))
axes = [(0, 1) if site in traceout else None for site in range(nr_sites)]
red_mpo = mp.partialtrace(mpo, axes=axes)
red_from_op = utils.partial_trace(op, traceout)
assert_array_almost_equal(red_mpo.to_array_global(), red_from_op,
err_msg="not equal at site {}".format(site))
assert red_mpo.dtype == dtype
@pt.mark.parametrize('dtype', pt.MP_TEST_DTYPES)
@pt.mark.parametrize('nr_sites, local_dim', [(4, 3)])
def test_partialtrace_axes(nr_sites, local_dim, rgen, dtype):
mpa = factory.random_mpa(nr_sites, (local_dim,) * 3, 1,
randstate=rgen, dtype=dtype)
# Verify that an exception is raised if `axes` does not refer to a
# physical leg.
valid = [(0, 2), (-3, -2)]
invalid = [(0, 3), (-4, 2), (-4, 3)]
for axes in valid:
mp.partialtrace(mpa, axes=axes)
for axes in invalid:
with pt.raises(AssertionError) as exc:
mp.partialtrace(mpa, axes=(0, 3))
assert exc.value.args == ('Too few legs',), "Wrong assertion"
@pt.mark.parametrize('dtype', pt.MP_TEST_DTYPES)
@pt.mark.parametrize('nr_sites, local_dim, rank', pt.MP_TEST_PARAMETERS)
def test_trace(nr_sites, local_dim, rank, rgen, dtype):
mpo = factory.random_mpa(nr_sites, (local_dim, local_dim), rank,
randstate=rgen, dtype=dtype)
op = mpo.to_array_global().reshape((local_dim**nr_sites,) * 2)
mpo_trace = mp.trace(mpo)
assert_almost_equal(np.trace(op), mpo_trace)
assert np.array(mpo_trace).dtype == dtype
@pt.mark.parametrize('dtype', pt.MP_TEST_DTYPES)
@pt.mark.parametrize('nr_sites, local_dim, rank', pt.MP_TEST_PARAMETERS)
def test_add_and_subtr(nr_sites, local_dim, rank, rgen, dtype):
mpo1 = factory.random_mpa(nr_sites, (local_dim, local_dim), rank,
randstate=rgen, dtype=dtype)
op1 = mpo1.to_array_global()
mpo2 = factory.random_mpa(nr_sites, (local_dim, local_dim), rank,
randstate=rgen, dtype=dtype)
op2 = mpo2.to_array_global()
assert_array_almost_equal(op1 + op2, (mpo1 + mpo2).to_array_global())
assert_array_almost_equal(op1 - op2, (mpo1 - mpo2).to_array_global())
assert (mpo1 + mpo2).dtype == dtype
assert (mpo1 + mpo2).dtype == dtype
mpo1 += mpo2
assert_array_almost_equal(op1 + op2, mpo1.to_array_global())
assert mpo1.dtype == dtype
@pt.mark.parametrize('nr_sites, local_dim, rank', [(3, 2, 2)])
def test_operations_typesafety(nr_sites, local_dim, rank, rgen):
# create a real MPA
mpo1 = factory.random_mpa(nr_sites, (local_dim, local_dim), rank,
randstate=rgen, dtype=np.float_)
mpo2 = factory.random_mpa(nr_sites, (local_dim, local_dim), rank,
randstate=rgen, dtype=np.complex_)
assert mpo1.dtype == np.float_
assert mpo2.dtype == np.complex_
assert (mpo1 + mpo1).dtype == np.float_
assert (mpo1 + mpo2).dtype == np.complex_
assert (mpo2 + mpo1).dtype == np.complex_
assert mp.sumup((mpo1, mpo1)).dtype == np.float_
assert mp.sumup((mpo1, mpo2)).dtype == np.complex_
assert mp.sumup((mpo2, mpo1)).dtype == np.complex_
assert (mpo1 - mpo1).dtype == np.float_
assert (mpo1 - mpo2).dtype == np.complex_
assert (mpo2 - mpo1).dtype == np.complex_
mpo1 += mpo2
assert mpo1.dtype == np.complex_
@pt.mark.parametrize('dtype', pt.MP_TEST_DTYPES)
@pt.mark.parametrize('nr_sites, local_dim, rank', pt.MP_TEST_PARAMETERS)
def test_sumup(nr_sites, local_dim, rank, rgen, dtype):
mpas = [factory.random_mpa(nr_sites, local_dim, 3, dtype=dtype, randstate=rgen)
for _ in range(rank if rank is not np.nan else 1)]
sum_naive = ft.reduce(mp.MPArray.__add__, mpas)
sum_mp = mp.sumup(mpas)
assert_array_almost_equal(sum_naive.to_array(), sum_mp.to_array())
assert all(r <= 3 * rank for r in sum_mp.ranks)
assert(sum_mp.dtype is dtype)
weights = rgen.randn(len(mpas))
summands = [w * mpa for w, mpa in zip(weights, mpas)]
sum_naive = ft.reduce(mp.MPArray.__add__, summands)
sum_mp = mp.sumup(mpas, weights=weights)
assert_array_almost_equal(sum_naive.to_array(), sum_mp.to_array())
assert all(r <= 3 * rank for r in sum_mp.ranks)
assert(sum_mp.dtype is dtype)
@pt.mark.parametrize('dtype', pt.MP_TEST_DTYPES)
@pt.mark.parametrize('nr_sites, local_dim, rank', pt.MP_TEST_PARAMETERS)
def test_mult_mpo_scalar(nr_sites, local_dim, rank, rgen, dtype):
mpo = factory.random_mpa(nr_sites, (local_dim, local_dim), rank,
randstate=rgen, dtype=dtype)
# FIXME Change behavior of to_array
# For nr_sites == 1, changing `mpo` below will change `op` as
# well, unless we call .copy().
op = mpo.to_array_global().copy()
scalar = rgen.randn()
assert_array_almost_equal(scalar * op, (scalar * mpo).to_array_global())
mpo *= scalar
assert_array_almost_equal(scalar * op, mpo.to_array_global())
assert mpo.dtype == dtype
assert (1.j * mpo).dtype == np.complex_
@pt.mark.parametrize('nr_sites, local_dim, rank', pt.MP_TEST_PARAMETERS)
def test_div_mpo_scalar(nr_sites, local_dim, rank, rgen):
mpo = factory.random_mpa(nr_sites, (local_dim, local_dim), rank,
dtype=np.complex_, randstate=rgen)
# FIXME Change behavior of to_array
# For nr_sites == 1, changing `mpo` below will change `op` as
# well, unless we call .copy().
op = mpo.to_array_global().copy()
scalar = rgen.randn() + 1.j * rgen.randn()
assert_array_almost_equal(op / scalar, (mpo / scalar).to_array_global())
mpo /= scalar
assert_array_almost_equal(op / scalar, mpo.to_array_global())
@pt.mark.parametrize('dtype', pt.MP_TEST_DTYPES)
@pt.mark.parametrize('nr_sites, local_dim, rank', pt.MP_TEST_PARAMETERS)
def test_chain(nr_sites, local_dim, rank, rgen, dtype):
# This test produces at most `nr_sites` by tensoring two
# MPOs. This doesn't work for :code:`nr_sites = 1`.
if nr_sites < 2:
return
# NOTE: Everything here is in local form!!!
mpo = factory.random_mpa(nr_sites // 2, (local_dim, local_dim), rank,
randstate=rgen, dtype=dtype)
op = mpo.to_array()
# Test with 2-factors with full form
mpo_double = mp.chain((mpo, mpo))
op_double = np.tensordot(op, op, axes=(tuple(), ) * 2)
assert len(mpo_double) == 2 * len(mpo)
assert_array_almost_equal(op_double, mpo_double.to_array())
assert_array_equal(mpo_double.ranks, mpo.ranks + (1,) + mpo.ranks)
assert mpo.dtype == dtype
# Test 3-factors iteratively (since full form would be too large!!
diff = mp.chain((mpo, mpo, mpo)) - mp.chain((mpo, mp.chain((mpo, mpo))))
diff.canonicalize()
assert len(diff) == 3 * len(mpo)
assert mp.norm(diff) < 1e-6
# local_dim, rank
MP_TEST_PARAMETERS_INJECT = [(2, 4), (3, 3), (2, 5), (2, 1), (1, 2)]
@pt.mark.parametrize('local_dim, rank', MP_TEST_PARAMETERS_INJECT)
def test_inject(local_dim, rank):
"""mp.inject() vs. computation with full arrays"""
# rank is np.nan for nr_sites = 1 (first argument,
# ignored). We require a value for rank.
if np.isnan(rank):
return
# ndims = 3 is hardcoded below (argument to .transpose()).
# Uniform local dimension is also hardcoded below (arguments to
# .reshape()).
ndims = 3
local_dim = (local_dim,) * ndims
a, b, c = factory._zrandn((3, 2) + local_dim)
# We don't use b[1, :]
b = b[0, :]
# Here, only global order (as given by np.kron()).
abbc0 = utils.mkron(a[0, :], b, b, c[0, :])
abbc1 = utils.mkron(a[1, :], b, b, c[1, :])
abbc = (abbc0 + abbc1).reshape(4 * local_dim)
ac0 = np.kron(a[0, :], c[0, :])
ac1 = np.kron(a[1, :], c[1, :])
ac = (ac0 + ac1).reshape(2 * local_dim)
ac_mpo = mp.MPArray.from_array(utils.global_to_local(ac, sites=2), ndims)
abbc_mpo = mp.inject(ac_mpo, pos=1, num=2, inject_ten=b)
abbc_mpo2 = mp.inject(ac_mpo, pos=[1], num=[2], inject_ten=[b])
abbc_mpo3 = mp.inject(ac_mpo, pos=[1], num=None, inject_ten=[[b, b]])
assert_array_almost_equal(abbc, abbc_mpo.to_array_global())
assert_array_almost_equal(abbc, abbc_mpo2.to_array_global())
assert_array_almost_equal(abbc, abbc_mpo3.to_array_global())
# Here, only local order.
ac = factory._zrandn(local_dim * 2)
b = factory._zrandn(local_dim)
acb = np.tensordot(ac, b, axes=((), ()))
abc = acb.transpose((0, 1, 2, 6, 7, 8, 3, 4, 5))
ac_mpo = mp.MPArray.from_array(ac, ndims)
abc_mpo = mp.inject(ac_mpo, pos=1, num=1, inject_ten=b)
# Keep local order
abc_from_mpo = abc_mpo.to_array()
assert_array_almost_equal(abc, abc_from_mpo)
# ndims = 2 is hardcoded below (argument to .transpose()).
# Uniform local dimension is also hardcoded below (arguments to
# .reshape()).
ndims = 2
local_dim = (local_dim[0],) * ndims
a, c = factory._zrandn((2, 2) + local_dim)
b = np.eye(local_dim[0])
# Here, only global order (as given by np.kron()).
abbc0 = utils.mkron(a[0, :], b, b, c[0, :])
abbc1 = utils.mkron(a[1, :], b, b, c[1, :])
abbc = (abbc0 + abbc1).reshape(4 * local_dim)
ac0 = np.kron(a[0, :], c[0, :])
ac1 = np.kron(a[1, :], c[1, :])
ac = (ac0 + ac1).reshape(2 * local_dim)
ac_mpo = mp.MPArray.from_array(utils.global_to_local(ac, sites=2), ndims)
abbc_mpo = mp.inject(ac_mpo, pos=1, num=2, inject_ten=None)
abbc_mpo2 = mp.inject(ac_mpo, pos=[1], num=[2])
abbc_mpo3 = mp.inject(ac_mpo, pos=[1], inject_ten=[[None, None]])
assert_array_almost_equal(abbc, abbc_mpo.to_array_global())
assert_array_almost_equal(abbc, abbc_mpo2.to_array_global())
assert_array_almost_equal(abbc, abbc_mpo3.to_array_global())
# Here, only local order.
ac = factory._zrandn(local_dim * 2)
b = np.eye(local_dim[0])
acb = np.tensordot(ac, b, axes=((), ()))
abc = acb.transpose((0, 1, 4, 5, 2, 3))
ac_mpo = mp.MPArray.from_array(ac, ndims)
abc_mpo = mp.inject(ac_mpo, pos=1, num=1, inject_ten=None)
# Keep local order
abc_from_mpo = abc_mpo.to_array()
assert_array_almost_equal(abc, abc_from_mpo)
@pt.mark.parametrize('local_dim, rank', MP_TEST_PARAMETERS_INJECT)
def test_inject_many(local_dim, rank, rgen):
"""Calling mp.inject() repeatedly vs. calling it with sequence arguments"""
mpa = factory.random_mpa(3, local_dim, rank, rgen, normalized=True,
dtype=np.complex_)
inj_lt = [factory._zrandn(s, rgen) for s in [(2, 3), (1,), (2, 2), (3, 2)]]
mpa_inj1 = mp.inject(mpa, 1, None, [inj_lt[0]])
mpa_inj1 = mp.inject(mpa_inj1, 2, 1, inj_lt[0])
mpa_inj1 = mp.inject(mpa_inj1, 4, None, [inj_lt[2]])
mpa_inj2 = mp.inject(mpa, [1, 2], [2, None], [inj_lt[0], [inj_lt[2]]])
mpa_inj3 = mp.inject(mpa, [1, 2], [2, 1], [inj_lt[0], inj_lt[2]])
assert_mpa_almost_equal(mpa_inj1, mpa_inj2, True)
assert_mpa_almost_equal(mpa_inj1, mpa_inj3, True)
inj_lt = [inj_lt[:2], inj_lt[2:]]
mpa_inj1 = mp.inject(mpa, 1, None, inj_lt[0])
mpa_inj1 = mp.inject(mpa_inj1, 4, inject_ten=inj_lt[1])
mpa_inj2 = mp.inject(mpa, [1, 2], None, inj_lt)
assert_mpa_almost_equal(mpa_inj1, mpa_inj2, True)
def test_inject_shapes(rgen):
"""Check that mp.inject() picks up the correct shape"""
mpa = factory.random_mpa(3, ([1], [2], [3]), 3, rgen, normalized=True)
print(mpa.shape)
mpa_inj = mp.inject(mpa, [0, 2], [1, 1])
assert mpa_inj.shape == ((1, 1), (1,), (2,), (3, 3), (3,))
mpa_inj = mp.inject(mpa, [1, 3], [1, 1], None)
assert mpa_inj.shape == ((1,), (2, 2), (2,), (3,), (3, 3))
@pt.mark.parametrize('nr_sites, local_dim, rank', pt.MP_TEST_PARAMETERS)
def test_inject_vs_chain(nr_sites, local_dim, rank, rgen):
"""Compare mp.inject() with mp.chain()"""
if nr_sites == 1:
return
mpa = factory.random_mpa(nr_sites // 2, local_dim, rank, rgen,
dtype=np.complex_, normalized=True)
pten = [factory._zrandn((local_dim,) * 2) for _ in range(nr_sites // 2)]
pten_mpa = mp.MPArray.from_kron(pten)
outer1 = mp.chain((pten_mpa, mpa))
outer2 = mp.inject(mpa, 0, inject_ten=pten)
assert_mpa_almost_equal(outer1, outer2, True)
outer1 = mp.chain((mpa, pten_mpa))
outer2 = mp.inject(mpa, [len(mpa)], [None], inject_ten=[pten])
assert_mpa_almost_equal(outer1, outer2, True)
@pt.mark.parametrize('nr_sites, local_dim, rank', pt.MP_TEST_PARAMETERS)
def test_localouter(nr_sites, local_dim, rank, rgen):
mpa1 = factory.random_mpa(nr_sites, local_dim, rank, randstate=rgen)
mpa2 = factory.random_mpa(nr_sites, local_dim, rank, randstate=rgen)
arr1 = mpa1.to_array()
arr1 = arr1.reshape(arr1.shape + (1, ) * nr_sites)
arr2 = mpa2.to_array()
arr2 = arr2.reshape((1, ) * nr_sites + arr2.shape)
tensor_mp = mp.localouter(mpa1, mpa2)
tensor_np = arr1 * arr2
assert tensor_mp.ndims == (2,) * nr_sites
assert tensor_np.shape == (local_dim,) * (2 * nr_sites)
assert_array_almost_equal(tensor_np, tensor_mp.to_array_global())
@pt.mark.parametrize('dtype', pt.MP_TEST_DTYPES)
@pt.mark.parametrize('nr_sites, local_dim, rank, local_width',
[(5, 2, 3, 1), (6, 2, 4, 3), (4, 3, 5, 2)])
def test_local_sum(nr_sites, local_dim, rank, local_width, dtype, rgen):
eye_mpa = factory.eye(1, local_dim)
def embed_mpa(mpa, startpos):
mpas = [eye_mpa] * startpos + [mpa] + \
[eye_mpa] * (nr_sites - startpos - local_width)
res = mp.chain(mpas)
return res
nr_startpos = nr_sites - local_width + 1
mpas = [factory.random_mpa(local_width, (local_dim,) * 2, rank,
dtype=dtype, randstate=rgen)
for i in range(nr_startpos)]
# Embed with mp.chain() and calculate naive MPA sum:
mpas_embedded = [embed_mpa(mpa, i) for i, mpa in enumerate(mpas)]
mpa_sum = mpas_embedded[0]
for mpa in mpas_embedded[1:]:
mpa_sum += mpa
# Compare with local_sum: Same result, smaller rank
mpa_local_sum = mp.local_sum(mpas)
# Check that local_sum() is no worse than naive sum
assert all(d1 <= d2 for d1, d2 in zip(mpa_local_sum.ranks, mpa_sum.ranks))
# Check that local_sum() is actually better than naive sum because
# it calls local_sum_simple().
assert any(d1 < d2 for d1, d2 in zip(mpa_local_sum.ranks, mpa_sum.ranks))
assert_array_almost_equal(mpa_local_sum.to_array(), mpa_sum.to_array())
@pt.mark.parametrize('nr_sites, local_dim, rank', pt.MP_TEST_PARAMETERS)
def test_diag_1pleg(nr_sites, local_dim, rank, rgen):
mpa = factory.random_mpa(nr_sites, local_dim, rank, randstate=rgen)
mpa_np = mpa.to_array()
# this should be a single, 1D numpy array
diag_mp = mp.diag(mpa)
diag_np = np.array([mpa_np[(i,) * nr_sites] for i in range(local_dim)])
assert_array_almost_equal(diag_mp, diag_np)
@pt.mark.parametrize('nr_sites, local_dim, rank', pt.MP_TEST_PARAMETERS)
def test_diag_2plegs(nr_sites, local_dim, rank, rgen):
mpa = factory.random_mpa(nr_sites, 2 * (local_dim,), rank, randstate=rgen)
mpa_np = mpa.to_array()
# this should be a single, 1D numpy array
diag_mp = mp.diag(mpa, axis=1)
diag_np = np.array([mpa_np[(slice(None), i) * nr_sites]
for i in range(local_dim)])
for a, b in zip(diag_mp, diag_np):
assert a.ndims[0] == 1
assert_array_almost_equal(a.to_array(), b)
###############################################################################
# Shape changes, conversions #
###############################################################################
# nr_sites, local_dim, rank, sites_per_group
MP_TEST_PARAMETERS_GROUPS = [(6, 2, 4, 3), (6, 2, 4, 2), (4, 3, 5, 2)]
@pt.mark.parametrize('nr_sites, local_dim, rank, sites_per_group',
MP_TEST_PARAMETERS_GROUPS)
def test_group_sites(nr_sites, local_dim, rank, sites_per_group, rgen):
assert (nr_sites % sites_per_group) == 0, \
'nr_sites not a multiple of sites_per_group'
mpa = factory.random_mpa(nr_sites, (local_dim,) * 2, rank, randstate=rgen)
grouped_mpa = mpa.group_sites(sites_per_group)
op = mpa.to_array()
grouped_op = grouped_mpa.to_array()
assert_array_almost_equal(op, grouped_op)
@pt.mark.parametrize('nr_sites, local_dim, rank, sites_per_group',
MP_TEST_PARAMETERS_GROUPS)
def test_split_sites(nr_sites, local_dim, rank, sites_per_group, rgen):
assert (nr_sites % sites_per_group) == 0, \
'nr_sites not a multiple of sites_per_group'
ndims = (local_dim,) * (2 * sites_per_group)
mpa = factory.random_mpa(nr_sites // sites_per_group, ndims, rank, randstate=rgen)
split_mpa = mpa.split_sites(sites_per_group)
op = mpa.to_array()
split_op = split_mpa.to_array()
assert_array_almost_equal(op, split_op)
@pt.mark.parametrize('ndims', [1, 2, 3])
@pt.mark.parametrize('nr_sites, local_dim, rank', pt.MP_TEST_PARAMETERS)
def test_reverse(nr_sites, local_dim, rank, ndims, rgen):
mpa = factory.random_mpa(nr_sites, (local_dim,) * ndims, rank, rgen,
normalized=True)
arr = mpa.to_array()
rev_arr = arr.transpose(np.arange(nr_sites * ndims)
.reshape((nr_sites, ndims))[::-1, :].ravel())
rev_mpa = mpa.reverse()
rev_arr2 = rev_mpa.to_array()
assert_almost_equal(rev_arr, rev_arr2)
@pt.mark.parametrize('nr_sites, local_dim, rank', pt.MP_TEST_PARAMETERS)
def test_bleg2pleg_pleg2bleg(nr_sites, local_dim, rank, rgen):
mpa = factory.random_mpa(nr_sites, local_dim, rank, randstate=rgen)
# +2 so we cover all possibilities
mpa.canonicalize(left=nr_sites // 2, right=min(nr_sites // 2 + 2, nr_sites))
for pos in range(nr_sites - 1):
mpa_t = mpa.vleg2leg(pos)
true_rank = mpa.ranks[pos]
pshape = [(local_dim,)] * pos + [(local_dim, true_rank)] + \
[(true_rank, local_dim)] + [(local_dim,)] * (nr_sites - pos - 2)
ranks = list(mpa.ranks)
ranks[pos] = 1
assert_array_equal(mpa_t.shape, pshape)
assert_array_equal(mpa_t.ranks, ranks)
assert_correct_normalization(mpa_t)
mpa_t = mpa_t.leg2vleg(pos)
# This is an ugly hack, but necessary to use the assert_mpa_identical
# function. Normalization-awareness gets lost in the process!
mpa_t._lt._lcanonical, mpa_t._lt._rcanonical = mpa.canonical_form
assert_mpa_identical(mpa, mpa_t)
if nr_sites > 1:
mpa = factory.random_mpa(nr_sites, local_dim, 1, randstate=rgen)
mpa.canonicalize()
mpa_t = mpa.leg2vleg(nr_sites // 2 - 1)
assert_correct_normalization(mpa_t)
@pt.mark.parametrize('nr_sites, local_dim, rank', pt.MP_TEST_PARAMETERS)
def test_split(nr_sites, local_dim, rank, rgen):
if nr_sites < 2:
return
mpa = factory.random_mpa(nr_sites, local_dim, rank, randstate=rgen)
for pos in range(nr_sites - 1):
mpa_l, mpa_r = mpa.split(pos)
assert len(mpa_l) == pos + 1
assert len(mpa_l) + len(mpa_r) == nr_sites
assert_correct_normalization(mpa_l)
assert_correct_normalization(mpa_r)
recons = np.tensordot(mpa_l.to_array(), mpa_r.to_array(), axes=(-1, 0))
assert_array_almost_equal(mpa.to_array(), recons)
for (lnorm, rnorm) in it.product(range(nr_sites - 1), range(1, nr_sites)):
mpa_l, mpa_r = mpa.split(nr_sites // 2 - 1)
assert_correct_normalization(mpa_l)
assert_correct_normalization(mpa_r)
def test_reshape(rgen):
mpa = factory.random_mpa(4, [(3, 2), (4,), (2, 5), (24,)], 4)
mpa.canonicalize()
mpa_r = mpa.reshape([(2, 3), (2, 2), (10,), (3, 2, 4)])
assert all(s1 == s2 for s1, s2 in
zip(mpa_r.shape, [(2, 3), (2, 2), (10,), (3, 2, 4)]))
assert_correct_normalization(mpa_r, *mpa.canonical_form)
###############################################################################
# Normalization & Compression #
###############################################################################
@pt.mark.parametrize('nr_sites, local_dim, _', pt.MP_TEST_PARAMETERS)
def test_canonicalization_from_full(nr_sites, local_dim, _, rgen):
op = factory._random_op(nr_sites, local_dim, randstate=rgen)
mpo = mp.MPArray.from_array(op, 2)
assert_correct_normalization(mpo, nr_sites - 1, nr_sites)
# FIXME Add counter to normalization functions
@pt.mark.parametrize('dtype', pt.MP_TEST_DTYPES)
@pt.mark.parametrize('nr_sites, local_dim, rank', pt.MP_TEST_PARAMETERS)
def test_canonicalization_incremental(nr_sites, local_dim, rank, rgen, dtype):
mpo = factory.random_mpa(nr_sites, (local_dim, local_dim), rank,
randstate=rgen, dtype=dtype)
op = mpo.to_array_global()
assert_correct_normalization(mpo, 0, nr_sites)
assert_array_almost_equal(op, mpo.to_array_global())
for site in range(1, nr_sites):
mpo.canonicalize(left=site)
assert_correct_normalization(mpo, site, nr_sites)
assert_array_almost_equal(op, mpo.to_array_global())
assert mpo.dtype == dtype
for site in range(nr_sites - 1, 0, -1):
mpo.canonicalize(right=site)
assert_correct_normalization(mpo, site - 1, site)
assert_array_almost_equal(op, mpo.to_array_global())
assert mpo.dtype == dtype
# FIXME Add counter to normalization functions
@pt.mark.parametrize('dtype', pt.MP_TEST_DTYPES)
@pt.mark.parametrize('nr_sites, local_dim, rank', pt.MP_TEST_PARAMETERS)
def test_canonicalization_jump(nr_sites, local_dim, rank, rgen, dtype):
# This test assumes at least two sites.
if nr_sites == 1:
return
mpo = factory.random_mpa(nr_sites, (local_dim, local_dim), rank,
randstate=rgen, dtype=dtype)
op = mpo.to_array_global()
assert_correct_normalization(mpo, 0, nr_sites)
assert_array_almost_equal(op, mpo.to_array_global())
center = nr_sites // 2
mpo.canonicalize(left=center - 1, right=center)
assert_correct_normalization(mpo, center - 1, center)
assert_array_almost_equal(op, mpo.to_array_global())
assert mpo.dtype == dtype
@pt.mark.parametrize('dtype', pt.MP_TEST_DTYPES)
@pt.mark.parametrize('nr_sites, local_dim, rank', pt.MP_TEST_PARAMETERS)
def test_canonicalization_full(nr_sites, local_dim, rank, rgen, dtype):
mpo = factory.random_mpa(nr_sites, (local_dim, local_dim), rank,
randstate=rgen, dtype=dtype)
op = mpo.to_array_global()
assert_correct_normalization(mpo, 0, nr_sites)
assert_array_almost_equal(op, mpo.to_array_global())
mpo.canonicalize(right=1)
assert_correct_normalization(mpo, 0, 1)
assert_array_almost_equal(op, mpo.to_array_global())
assert mpo.dtype == dtype
###########################################################################
mpo = factory.random_mpa(nr_sites, (local_dim, local_dim), rank,
randstate=rgen, dtype=dtype)
op = mpo.to_array_global()
assert_correct_normalization(mpo, 0, nr_sites)
assert_array_almost_equal(op, mpo.to_array_global())
mpo.canonicalize(left=len(mpo) - 1)
assert_correct_normalization(mpo, len(mpo) - 1, len(mpo))
assert_array_almost_equal(op, mpo.to_array_global())
assert mpo.dtype == dtype
@pt.mark.parametrize('nr_sites, local_dim, rank', pt.MP_TEST_PARAMETERS)
def test_canonicalization_default_args(nr_sites, local_dim, rank, rgen):
# The following normalizations assume at least two sites.
if nr_sites == 1:
return
mpo = factory.random_mpa(nr_sites, (local_dim, local_dim), rank, randstate=rgen)
assert_correct_normalization(mpo, 0, nr_sites)
mpo.canonicalize(left=1)
mpo.canonicalize()
assert_correct_normalization(mpo, nr_sites - 1, nr_sites)
mpo = factory.random_mpa(nr_sites, (local_dim, local_dim), rank, randstate=rgen)
assert_correct_normalization(mpo, 0, nr_sites)
# The following normalization assumes at least three sites.
if nr_sites == 2:
return
mpo.canonicalize(left=1)
mpo.canonicalize(right=nr_sites - 2)
mpo.canonicalize()
assert_correct_normalization(mpo, 0, 1)
def test_canonicalization_compression(rgen):
"""If the rank is too large at the boundary, qr decompostion
in normalization may yield smaller rank"""
mpo = factory.random_mpa(sites=2, ldim=2, rank=20, randstate=rgen)
mpo.canonicalize(right=1)
assert_correct_normalization(mpo, 0, 1)
assert mpo.ranks[0] == 2
mpo = factory.random_mpa(sites=2, ldim=2, rank=20, randstate=rgen)
mpo.canonicalize(left=1)
assert_correct_normalization(mpo, 1, 2)
assert mpo.ranks[0] == 2
@pt.mark.parametrize('nr_sites, local_dim, rank', pt.MP_TEST_PARAMETERS)
def test_mult_mpo_scalar_normalization(nr_sites, local_dim, rank, rgen):
if nr_sites < 2:
# Re-normalization has no effect for nr_sites == 1. There is
# nothing more to test than :func:`test_mult_mpo_scalar`.
return
mpo = factory.random_mpa(nr_sites, (local_dim, local_dim), rank,
dtype=np.complex_, randstate=rgen)
op = mpo.to_array_global()
scalar = rgen.randn() + 1.j * rgen.randn()
center = nr_sites // 2
mpo.canonicalize(left=center - 1, right=center)
mpo_times_two = scalar * mpo
assert_array_almost_equal(scalar * op, mpo_times_two.to_array_global())
assert_correct_normalization(mpo_times_two, center - 1, center)
mpo *= scalar
assert_array_almost_equal(scalar * op, mpo.to_array_global())
assert_correct_normalization(mpo, center - 1, center)
@pt.mark.parametrize('dtype', pt.MP_TEST_DTYPES)
@pt.mark.parametrize('nr_sites, local_dim, rank', pt.MP_TEST_PARAMETERS)
def test_singularvals(nr_sites, local_dim, rank, dtype, rgen):
mps = factory.random_mpa(nr_sites, local_dim, rank, randstate=rgen,
dtype=dtype, normalized=True, force_rank=True)
psi = mps.to_array()
# Start from a non-normalized state
assert mps.canonical_form == (0, nr_sites)
svals = list(mps.singularvals())
if nr_sites == 1:
assert mps.canonical_form == (0, 1)
else:
# The last local tensor update from _compress_svd_r() is not
# carried out. This behaviour may change.
assert mps.canonical_form == (nr_sites - 2, nr_sites - 1)
assert len(svals) == nr_sites - 1
for n_left in range(1, nr_sites):
sv = svals[n_left - 1]
mat = psi.reshape((local_dim**n_left, -1))
sv2 = np.linalg.svd(mat, full_matrices=False, compute_uv=False)
n_sv = min(len(sv), len(sv2))
# Output from `svd()` is always in descending order
assert_almost_equal(sv[n_sv:], 0.0)
assert_almost_equal(sv2[n_sv:], 0.0)
assert_array_almost_equal(sv[:n_sv], sv2[:n_sv])
@pt.mark.parametrize('nr_sites, local_dim, rank', pt.MP_TEST_PARAMETERS)
def test_pad_ranks(nr_sites, local_dim, rank, rgen):
mps = factory.random_mpa(nr_sites, local_dim, rank, randstate=rgen,
normalized=True)
mps2 = mps.pad_ranks(2 * rank)
assert mps2.ranks == tuple(min(d, 2 * rank) for d in mp.full_rank(mps.shape))
assert_almost_equal(mp.normdist(mps, mps2), 0.0)
mps2 = mps.pad_ranks(2 * rank, force_rank=True)
assert mps2.ranks == (2 * rank,) * (nr_sites - 1)
assert_almost_equal(mp.normdist(mps, mps2), 0.0)
#####################################
# SVD and variational compression #
#####################################
# nr_sites, local_dims, rank
compr_sizes = pt.mark.parametrize(
# Start with `2*rank` and compress to `rank`.
'nr_sites, local_dims, rank',
(
(4, 2, 3),
pt.mark.long((2, (2, 3), 5)),
pt.mark.long((5, 3, 4)),
# TODO Create a separate marker for very long tests:
# (4, (2, 3), 5),
# (6, 2, 3),
# (5, (2, 2, 2), 20), # about 2 minutes (Core i5-3380M)
# (16, 2, 10), # about 2 minutes
# (16, 2, 30), # about 10 minutes
)
)
compr_settings = pt.mark.parametrize(
'comparg',
(
dict(method='svd', direction='left'),
dict(method='svd', direction='right'),
dict(method='svd', direction='left', relerr=1e-6),
dict(method='svd', direction='right', relerr=1e-6),
pt.mark.long(dict(method='var', num_sweeps=1, var_sites=1)),
dict(method='var', num_sweeps=2, var_sites=1),
pt.mark.long(dict(method='var', num_sweeps=3, var_sites=1)),
pt.mark.long(dict(method='var', num_sweeps=1, var_sites=2)),
dict(method='var', num_sweeps=2, var_sites=2),
pt.mark.long(dict(method='var', num_sweeps=3, var_sites=2)),
# See :func:`call_compression` below for the meaning of
# 'fillbelow'.
dict(method='var', num_sweeps=2, var_sites=1, startmpa='fillbelow'),
)
)
# Test compression works for different normalizations of the MPA
# before compression.
compr_normalization = pt.mark.parametrize(
'canonicalize',
(dict(left=1, right=-1), dict()) +
tuple(pt.mark.long(x) for x in (
None,
dict(left='afull'),
dict(right='afull'),
dict(left=1), dict(left=-1), dict(right=1), dict(right=-1),
dict(left=1, right=2), dict(left=-2, right=-1),
dict(left=1, right=-1),
))
)
def _chain_decorators(*args):
def chain_decorator(f):
for deco in reversed(args):
f = deco(f)
return f
return chain_decorator
compr_test_params = _chain_decorators(compr_sizes, compr_settings,
compr_normalization)
def normalize_if_applicable(mpa, nmz):
"""Check whether the given normalization can be applied.
:param mp.MPArray mpa: Will call `mpa.canonicalize()`
:param nmz: Keyword arguments for `mpa.canonicalize()` or `None`
:returns: True if the normalization has been applied.
`nmz=None` means not to call `mpa.canonicalize()` at all.
The test whether the normalization can be applied is not
comprehensive.
"""
# Make sure the input is non-normalized. Otherwise, the output can
# be more normalized than desired for the test.
assert mpa.canonical_form == (0, len(mpa)), "want non-normalized MPA for test"
if nmz is not None:
if nmz.get('left') == 1 and nmz.get('right') == -1 and len(mpa) == 2:
return False
mpa.canonicalize(**nmz)
return True
def call_compression(mpa, comparg, target_rank, rgen, call_compress=False):
"""Call `mpa.compress` or `mpa.compression` with suitable arguments.
Does not make a copy of `mpa` in any case.
:param target_rank: Compress to rank `target_rank`.
:param call_compress: If `True`, call `mpa.compress` instead of
`mpa.compression` (the default).
:param comparg: Settings dict for compression. If `relerr` is not
present, add `rank = target_rank`. If `startmpa` is equal to
`'fillbelow'`, insert a random MPA.
:returns: Compressed MPA.
"""
if not ('relerr' in comparg) and (comparg.get('startmpa') == 'fillbelow'):
startmpa = factory.random_mpa(len(mpa), mpa.shape[0], target_rank,
normalized=True, randstate=rgen,
dtype=mpa.dtype)
comparg = update_copy_of(comparg, {'startmpa': startmpa})
else:
comparg = update_copy_of(comparg, {'rank': target_rank})
if (comparg.get('method') == 'var') and not ('startmpa' in comparg):
comparg = update_copy_of(comparg, {'randstate': rgen})
if call_compress:
return mpa.compress(**comparg)
else:
return mpa.compression(**comparg)
# We want check compression for inputs with norm different from 1. In the next
# function and below, we do this with a normalized state multiplied with a
# constant with magnitude different from 1. This is to avoid errors like
# "123456789.1 and 123456789.2 are not equal to six decimals" and is related to
# the fixme at the module start.
@pt.mark.parametrize('dtype', pt.MP_TEST_DTYPES)
@compr_test_params
def test_compression_and_compress(nr_sites, local_dims, rank, canonicalize,
comparg, dtype, rgen):
"""Test that .compression() and .compress() produce identical results.
"""
# See comment above on "4.2 *"
mpa = 4.2 * factory.random_mpa(nr_sites, local_dims, rank * 2,
normalized=True, dtype=dtype, randstate=rgen)
if not normalize_if_applicable(mpa, canonicalize):
return
comparg = comparg.copy()
if comparg['method'] == 'var':
# Exact equality between `compr` and `compr2` below requires
# using the same start vector in both cases.
comparg['startmpa'] = factory.random_mpa(nr_sites, local_dims, rank,
dtype=dtype, randstate=rgen)
# The results from .compression() and .compress() must match
# exactly. No numerical difference is allowed.
compr2 = mpa.copy()
overlap2 = call_compression(compr2, comparg, rank, rgen, call_compress=True)
compr, overlap = call_compression(mpa, comparg, rank, rgen)
assert_almost_equal(overlap, overlap2)
# FIXME Why do they not agree completely? We are doing the same thing...
assert_mpa_identical(compr, compr2, decimal=12)
@pt.mark.parametrize('dtype', pt.MP_TEST_DTYPES)
@compr_test_params
def test_compression_result_properties(nr_sites, local_dims, rank,
canonicalize, comparg, rgen, dtype):
"""Test general properties of the MPA coming from a compression.
* Compare SVD compression against simpler implementation
* Check that all implementations return the correct overlap
* Check that the rank has decreased and that it is as
prescribed
* Check that the normalization advertised in the result is correct
* Check that compression doesnt change the dtype
TODO: The worst case for compression is that all singular values
have the same size. This gives a fidelity lower bound for the
compression result. Check that lower bound.
FIXME: Make this test a wrapper around MPArray.compression() to
reduce code duplication. This wrapper would replace
call_compression(). This would also apply more tests
.compress(). At the moment, we mostly test .compression().
"""
mpa = 4.2 * factory.random_mpa(nr_sites, local_dims, rank * 2,
normalized=True, randstate=rgen, dtype=dtype)
if not normalize_if_applicable(mpa, canonicalize):
return
compr, overlap = call_compression(mpa.copy(), comparg, rank, rgen)
# 'relerr' is currently 1e-6 and no rank is provided, so no
# compression will occur.
if 'relerr' not in comparg:
# Check that the rank has changed.
assert max(compr.ranks) < max(mpa.ranks)
# Check that the target rank is satisfied
assert max(compr.ranks) <= rank
# Check that the inner product is correct.
assert_almost_equal(overlap, mp.inner(mpa, compr))
# SVD: Check that .canonical_form is as expected.
if comparg['method'] == 'svd':
normtarget = {'left': (0, 1), 'right': (len(compr) - 1, len(compr))}
assert compr.canonical_form == normtarget[comparg['direction']]
# Check the content of .canonical_form is correct.
assert_correct_normalization(compr)
assert compr.dtype == dtype
# SVD: compare with alternative implementation
if comparg['method'] == 'svd' and 'relerr' not in comparg:
alt_compr = compression_svd(mpa.to_array(), rank, comparg['direction'])
compr = compr.to_array()
assert_array_almost_equal(alt_compr, compr)
@pt.mark.parametrize('dtype', pt.MP_TEST_DTYPES)
@pt.mark.parametrize('nr_sites, local_dim, rank', pt.MP_TEST_PARAMETERS)
def test_var_no_worse_than_svd(nr_sites, local_dim, rank, rgen, dtype):
"""Variational compresssion should always improve the overlap of the
compressed mpa with the original one -- we test this by running a single
variational compression sweep after an SVD compression and check that
the overlap did not become smaller"""
mpa = 4.2 * factory.random_mpa(nr_sites, local_dim, 5 * rank,
normalized=True, randstate=rgen, dtype=dtype)
mpa_svd, overlap_svd = mpa.compression(method='svd', rank=rank)
overlap_svd /= mp.norm(mpa.copy()) * mp.norm(mpa_svd)
mpa_var, overlap_var = mpa.compression(method='var', rank=rank,
startmpa=mpa_svd, num_sweeps=1)
overlap_var /= mp.norm(mpa) * mp.norm(mpa_var)
assert overlap_var > overlap_svd * (1 - 1e-14)
@compr_test_params
def test_compression_rank_noincrease(nr_sites, local_dims, rank,
canonicalize, comparg, rgen):
"""Check that rank does not increase if the target rank
is larger than the MPA rank
"""
if 'relerr' in comparg:
return # Test does not apply
mpa = 4.2 * factory.random_mpa(nr_sites, local_dims, rank, normalized=True,
randstate=rgen)
norm = mp.norm(mpa.copy())
if not normalize_if_applicable(mpa, canonicalize):
return
for factor in (1, 2):
compr, overlap = call_compression(mpa, comparg, rank * factor, rgen)
assert_almost_equal(overlap, norm**2)
assert_mpa_almost_equal(compr, mpa, full=True)
assert (np.array(compr.ranks) <= np.array(mpa.ranks)).all()
@pt.mark.parametrize('add', ('zero', 'self', 'self2'))
@compr_test_params
def test_compression_trivialsum(nr_sites, local_dims, rank, canonicalize,
comparg, add, rgen):
"""Check that `a + b` compresses exactly to a multiple of `a` if `b`
is equal to one of `0`, `a` or `-2*a`
"""
mpa = 4.2 * factory.random_mpa(nr_sites, local_dims, rank, normalized=True,
randstate=rgen)
norm = mp.norm(mpa.copy())
if not normalize_if_applicable(mpa, canonicalize):
return
zero = factory.zero(nr_sites, local_dims, rank)
choices = {'zero': (zero, 1), 'self': (mpa, 2), 'self2': (-2*mpa, -1)}
add, factor = choices[add]
msum = mpa + add
assert_mpa_almost_equal(msum, factor * mpa, full=True)
# Check that rank has increased (they exactly add)
for dim1, dim2, sum_dim in zip(mpa.ranks, add.ranks, msum.ranks):
assert dim1 + dim2 == sum_dim
compr, overlap = call_compression(msum, comparg, rank, rgen)
assert_almost_equal(overlap, (norm * factor)**2)
assert_mpa_almost_equal(compr, factor * mpa, full=True)
assert (np.array(compr.ranks) <= np.array(mpa.ranks)).all()
| bsd-3-clause | 8,778,198,888,598,796,000 | 40.029057 | 86 | 0.613487 | false |
dstl/ideaworks | backend/ideaworks/contentapp/api_functions.py | 1 | 10278 |
# (c) Crown Copyright 2014 Defence Science and Technology Laboratory UK
# Author: Rich Brantingham
import json
import datetime
from HTMLParser import HTMLParser #Used to create a non-marked up snippet
from django.contrib.auth.models import User
from ideaworks.generic_resources import BaseCorsResource
# Contentapp objects, authentication class and data output serializer
import contentapp.documents as documents
from contentapp.authentication import CustomApiKeyAuthentication
from contentapp.serializers import CustomSerializer
def count_builder(bundle, field, count_field):
""" Generates count values based on the length of another list/embedded field"""
# Patch in the count values
if bundle.data.has_key(field):
bundle.data[count_field] = len(bundle.data[field])
else:
bundle.data[count_field] = 0
bundle.data[field] = []
return bundle
#--------------------------------------------------------------------------------
def get_top_level_pm_elements(bundle):
""" Extracts the useful PM elements out to the top level """
# Ensure PM field exists
try:
pm = bundle.data['protective_marking']
except:
bundle.data['pretty_pm'] = 'NO PROTECTIVE MARKING FOUND'
bundle.data['classification_short'] = 'NO PROTECTIVE MARKING FOUND'
# Try to access the pretty pm
try: bundle.data['pretty_pm'] = pm.data['pretty_pm']
except: bundle.data['pretty_pm'] = 'NO PROTECTIVE MARKING FOUND'
# Try to access the short classification
try: bundle.data['classification_short'] = pm.data['classification_short']
except: bundle.data['classification_short'] = 'NO PROTECTIVE MARKING FOUND'
return bundle
#--------------------------------------------------------------------------------
def get_contributors_info(bundle, contributor=None):
""" Get the user info for a specific contributor"""
# Get the id of the user
if not contributor:
contributor = bundle.data['user']
# Get the user object
user_obj = None
try:
user_obj = User.objects.get(username=contributor)
except:
user_obj = User.objects.get(email=contributor)
if user_obj:
bundle.data['contributor_name'] = user_obj.first_name.title() + ' ' + user_obj.last_name.title()
return bundle
#--------------------------------------------------------------------------------
def get_all_pms(documents, subdocs_to_check=[], pm_name='protective_marking', keep_field=None):
""" Access all the PM elements from documents and lists of sub-documents
keep_field specifies the field to keep - removing all the rest from the PM response."""
pm_docs = []
# Loop the objects
for doc in documents:
# Loop the fields and check if any of them are lists
# If they are, then check for a PM subdocument to work on too
for fld in doc.data.keys():
if fld.lower() in subdocs_to_check:
for sub_object in doc.data[fld]:
# Grab the pm object from any subdocuments
try:
pm_docs.append(sub_object.data[pm_name])
except KeyError:
print 'failed to append pm of listed subdoc'
# Grab the protective marking object from the top level object
try:
pm_docs.append(doc.data[pm_name])
except AttributeError:
pm_docs.append(doc[pm_name])
except KeyError:
print 'Failed to get pm subdocument'
return pm_docs
#--------------------------------------------------------------------------------
def get_sub_field(object, field_name):
""" This is basically (realised after the fact) to catch the fact that
in some cases the object being passed in is a bundle and in others (namely my tests)
it is a list, which doesn't have the .data attribute """
if object == None:
content = None
else:
try:
content = object.data[field_name]
except AttributeError:
content = object[field_name]
return content
#--------------------------------------------------------------------------------
def get_max_pm(pm_docs):
""" Get the maximum protective marking elements """
max_class_rank = -1
max_class_full = 'PUBLIC'
max_class_short = 'PU'
max_nat_cavs_rank = 0
max_nat_cavs = ''
max_nat_cavs_members = []
codewords = []
codewords_short = []
descriptors = []
# Derive maximums (classifications and national caveats)
for doc in pm_docs:
doc_class_rank = get_sub_field(doc, 'classification_rank')
if doc_class_rank == None:
continue
doc_cavs_rank = get_sub_field(doc, 'national_caveats_rank')
if doc_class_rank == None:
continue
# CLASSIFICATION
# Is it higher than the current max rank value?
if not doc:
continue
if int(doc_class_rank) > max_class_rank:
max_class_full = get_sub_field(doc, 'classification')
max_class_short = get_sub_field(doc, 'classification_short')
max_class_rank = get_sub_field(doc, 'classification_rank')
if int(doc_cavs_rank) > max_nat_cavs_rank:
max_nat_cavs = get_sub_field(doc, 'national_caveats_primary_name')
max_nat_cavs_members = get_sub_field(doc, 'national_caveats_members')
max_nat_cavs_rank = get_sub_field(doc, 'national_caveats_rank')
# Concatenate the codewords - assumed not mutually exclusive
codewords += get_sub_field(doc, 'codewords')
codewords_short += get_sub_field(doc, 'codewords_short')
# Concatenate the descriptors - assumed not mutually exclusive
if get_sub_field(doc, 'descriptor') and get_sub_field(doc, 'descriptor').upper() not in descriptors:
descriptors.append(get_sub_field(doc, 'descriptor'))
#TODO: Just joining the descriptors together rather than handling them properly as a list
descriptors_out = ','.join(descriptors)
max_pm = documents.ProtectiveMarking(classification = max_class_full,
classification_short = max_class_short,
classification_rank = max_class_rank,
national_caveats_primary_name = max_nat_cavs,
national_caveats_members = max_nat_cavs_members,
national_caveats_rank = max_nat_cavs_rank,
codewords = list(set(codewords)), # Get a unique list
codewords_short = list(set(codewords_short)),
descriptor = descriptors_out)
return json.loads(max_pm.to_json())
#--------------------------------------------------------------------------------
def calculate_informal_time(time_stamp):
""" Calculates an informal time and presents it as a string
Outside the classes as it may get used in several places."""
now = datetime.datetime.utcnow()
delta = now - time_stamp
if delta.days < 1 and delta.seconds <= 60:
informal_format = "just now"
elif delta.days < 1 and delta.seconds > 60 and delta.seconds <= 120:
informal_format = "1 minute ago"
elif delta.days < 1 and delta.seconds < 3600:
informal_format = "%s minutes ago" %(int(delta.seconds/60))
elif delta.days < 1 and delta.seconds/3600 <= 2:
informal_format = "%s hour ago" %(int(delta.seconds/3600))
elif delta.days < 1 and delta.seconds/3600 <= 24:
informal_format = "%s hours ago" %(int(delta.seconds/3600))
# If it was within the last week
elif delta.days <= 7:
informal_format = time_stamp.strftime("%A at %H:%M z")
# If it was last 6 months
elif delta.days <= 182:
informal_format = time_stamp.strftime("%a, %d %b at %H:%M z")
# If it was within the last year
elif delta.days <= 365:
informal_format = time_stamp.strftime("%d %B")
# If it was last year
else:
informal_format = time_stamp.strftime("%d %b '%y")
return informal_format
# ----------------------------------------------------------------------------------
class MLStripper(HTMLParser):
""" Used to strip out the tags from html content
Primarily for summary content text. """
def __init__(self):
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
# ----------------------------------------------------------------------------------
def strip_tags(html):
""" Remove the tags"""
s = MLStripper()
s.feed(html)
return s.get_data()
# ----------------------------------------------------------------------------------
def smart_truncate(content, length=100, suffix='...'):
""" Truncate a string based on words """
if len(content) <= length:
return content
else:
return ' '.join(content[:length+1].split(' ')[0:-1]) + suffix
# ----------------------------------------------------------------------------------
def derive_snippet(text_html, chrs=240):
""" Strips text of html tags and truncates on nearest full word """
if not text_html or text_html == '':
text = text_html
else:
stripped_text = strip_tags(text_html.replace('\n', ''))
text = smart_truncate(stripped_text, length=chrs, suffix='...')
return text
| agpl-3.0 | -840,781,207,488,749,700 | 35.510949 | 123 | 0.527729 | false |
pdamodaran/yellowbrick | tests/test_classifier/test_class_prediction_error.py | 1 | 4038 | # tests.test_classifier.test_class_prediction_error
# Testing for the ClassPredictionError visualizer
#
# Author: Benjamin Bengfort <bbengfort@districtdatalabs.com>
# Author: Rebecca Bilbro <rbilbro@districtdatalabs.com>
# Author: Larry Gray
# Created: Tue May 23 13:41:55 2017 -0700
#
# Copyright (C) 2017 District Data Labs
# For license information, see LICENSE.txt
#
# ID: test_rocauc.py [] benjamin@bengfort.com $
"""
Testing for the ClassPredictionError visualizer
"""
##########################################################################
## Imports
##########################################################################
import pytest
import matplotlib.pyplot as plt
from tests.dataset import DatasetMixin
from yellowbrick.classifier.class_prediction_error import *
from yellowbrick.exceptions import ModelError
from sklearn.svm import LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_multilabel_classification, make_classification
from tests.base import VisualTestCase
##########################################################################
## Data
##########################################################################
X, y = make_classification(
n_classes=4, n_informative=3, n_clusters_per_class=1, random_state=42
)
##########################################################################
## Tests
##########################################################################
class ClassPredictionErrorTests(VisualTestCase, DatasetMixin):
def test_integration_class_prediction_error(self):
"""
Assert no errors occur during class prediction error integration
"""
model = LinearSVC()
model.fit(X, y)
visualizer = ClassPredictionError(model, classes=["A", "B", "C", "D"])
visualizer.score(X, y)
visualizer.finalize()
# AppVeyor and Linux conda fail due to non-text-based differences
self.assert_images_similar(visualizer, tol=9.5)
def test_class_prediction_error_quickmethod(self):
"""
Test the ClassPreditionError quickmethod
"""
fig = plt.figure()
ax = fig.add_subplot()
clf = LinearSVC(random_state=42)
viz = class_prediction_error(clf, X, y, ax=ax, random_state=42)
self.assert_images_similar(viz)
def test_classes_greater_than_indices(self):
"""
Assert error when y and y_pred contain zero values for
one of the specified classess
"""
model = LinearSVC()
model.fit(X, y)
with self.assertRaises(ModelError):
visualizer = ClassPredictionError(
model, classes=["A", "B", "C", "D", "E"]
)
visualizer.score(X, y)
def test_classes_less_than_indices(self):
"""
Assert error when there is an attempt to filter classes
"""
model = LinearSVC()
model.fit(X, y)
with self.assertRaises(NotImplementedError):
visualizer = ClassPredictionError(model, classes=["A"])
visualizer.score(X, y)
@pytest.mark.skip(reason="not implemented yet")
def test_no_classes_provided(self):
"""
Assert no errors when no classes are provided
"""
pass
def test_class_type(self):
"""
Test class must be either binary or multiclass type
"""
X, y = make_multilabel_classification()
model = RandomForestClassifier()
model.fit(X, y)
with self.assertRaises(YellowbrickValueError):
visualizer = ClassPredictionError(model)
visualizer.score(X, y)
def test_score_returns_score(self):
"""
Test that ClassPredictionError score() returns a score between 0 and 1
"""
# Create and fit the visualizer
visualizer = ClassPredictionError(LinearSVC())
visualizer.fit(X, y)
# Score the visualizer
s = visualizer.score(X, y)
assert 0 <= s <= 1
| apache-2.0 | -3,490,448,286,058,558,500 | 31.047619 | 80 | 0.573551 | false |
VirgilSecurity/virgil-sdk-python | virgil_sdk/tests/jwt/callback_jwt_provider_test.py | 1 | 3958 | # Copyright (C) 2016-2019 Virgil Security Inc.
#
# Lead Maintainer: Virgil Security Inc. <support@virgilsecurity.com>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# (1) Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# (2) Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# (3) Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ''AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
from time import sleep
from virgil_sdk.tests import BaseTest
from virgil_sdk.jwt import TokenContext, Jwt
from virgil_sdk.jwt.providers import CallbackJwtProvider, ConstAccessTokenProvider
from virgil_sdk.utils import Utils
class AccessTokenProviderTest(BaseTest):
def test_get_token_from_server(self):
# STC-24
call_back_provider = CallbackJwtProvider(self._get_token_from_server)
context = TokenContext("test_identity", "some_operation")
token1 = call_back_provider.get_token(context)
sleep(1)
token2 = call_back_provider.get_token(context)
self.assertNotEqual(token1.to_string(), token2.to_string())
self.assertNotEqual(token1, token2)
def test_get_invalid_token_from_server(self):
# STC-24
def failed_get_from_server(context):
return Utils.b64encode(os.urandom(30))
callback_provider = CallbackJwtProvider(failed_get_from_server)
context = TokenContext("test_identity", "some_operation")
self.assertRaises(ValueError, callback_provider.get_token, context)
def test_get_const_access_token(self):
# STC-37
token_from_server = self._get_token_from_server(
TokenContext(
Utils.b64encode(os.urandom(20)),
"some_operation"
)
)
jwt = Jwt.from_string(token_from_server)
const_token_provider = ConstAccessTokenProvider(jwt)
token1 = const_token_provider.get_token(
TokenContext(
Utils.b64encode(os.urandom(10)),
Utils.b64encode(os.urandom(10)),
True
)
)
token2 = const_token_provider.get_token(
TokenContext(
Utils.b64encode(os.urandom(10)),
Utils.b64encode(os.urandom(10)),
True
)
)
self.assertEqual(token1, token2)
def test_imported_token_compare_with_origin(self):
callback_provider = CallbackJwtProvider(self._get_token_from_server)
context = TokenContext(
Utils.b64encode(os.urandom(20)),
"some_operation"
)
token = callback_provider.get_token(context)
imported_token = Jwt.from_string(token.to_string())
self.assertTrue(token, imported_token)
| bsd-3-clause | -7,540,959,289,813,770,000 | 38.58 | 82 | 0.680647 | false |
emonty/oslo-packaging | pbr/tests/test_version.py | 1 | 2794 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Red Hat, Inc.
# Copyright 2012-2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import StringIO
import sys
from oslo.config import cfg
from pbr.tests import utils
from pbr import version
class DeferredVersionTestCase(utils.BaseTestCase):
def setUp(self):
super(DeferredVersionTestCase, self).setUp()
self.conf = cfg.ConfigOpts()
def test_cached_version(self):
class MyVersionInfo(version.VersionInfo):
def _get_version_from_pkg_resources(self):
return "5.5.5.5"
deferred_string = MyVersionInfo("openstack").\
cached_version_string()
self.conf([], project="project", prog="prog", version=deferred_string)
self.assertEquals("5.5.5.5", str(self.conf.version))
def test_print_cached_version(self):
class MyVersionInfo(version.VersionInfo):
def _get_version_from_pkg_resources(self):
return "5.5.5.5"
deferred_string = MyVersionInfo("openstack")\
.cached_version_string()
self.stubs.Set(sys, 'stderr', StringIO.StringIO())
self.assertRaises(SystemExit,
self.conf, ['--version'],
project="project",
prog="prog",
version=deferred_string)
self.assertEquals("5.5.5.5", sys.stderr.getvalue().strip())
def test_print_cached_version_with_long_string(self):
my_version = "11111222223333344444555556666677777888889999900000"
class MyVersionInfo(version.VersionInfo):
def _get_version_from_pkg_resources(self):
return my_version
deferred_string = MyVersionInfo("openstack")\
.cached_version_string()
for i in range(50):
self.stubs.Set(sys, 'stderr', StringIO.StringIO())
self.assertRaises(SystemExit,
self.conf, ['--version'],
project="project",
prog="prog",
version=deferred_string)
self.assertEquals(my_version, sys.stderr.getvalue().strip())
| apache-2.0 | -8,466,458,493,837,001,000 | 36.253333 | 78 | 0.616321 | false |
LinDA-tools/LindaWorkbench | linda/linda_app/multiple_choice_field.py | 1 | 3227 | from django.contrib.humanize.templatetags.humanize import apnumber
from django.core.exceptions import ValidationError
from django.template.defaultfilters import capfirst
__author__ = 'dimitris'
# See https://djangosnippets.org/snippets/1200/
from django.db import models
from django import forms
class MultiSelectFormField(forms.MultipleChoiceField):
widget = forms.SelectMultiple
def __init__(self, *args, **kwargs):
self.max_choices = kwargs.pop('max_choices', 0)
super(MultiSelectFormField, self).__init__(*args, **kwargs)
def clean(self, value):
if not value and self.required:
raise forms.ValidationError(self.error_messages['required'])
if value and self.max_choices and len(value) > self.max_choices:
raise forms.ValidationError('You must select a maximum of %s choices.'
% (apnumber(self.max_choices)))
return value
class MultiSelectField(models.Field):
__metaclass__ = models.SubfieldBase
def get_internal_type(self):
return "CharField"
def get_choices_default(self):
return self.get_choices(include_blank=False)
def _get_FIELD_display(self, field):
value = getattr(self, field.attname)
choicedict = dict(field.choices)
def formfield(self, **kwargs):
# don't call super, as that overrides default widget if it has choices
defaults = {'required': not self.blank, 'label': capfirst(self.verbose_name),
'help_text': self.help_text, 'choices':self.choices}
try:
defaults['initial'] = self.initial
except:
pass
if self.has_default():
defaults['initial'] = self.get_default()
defaults.update(kwargs)
return MultiSelectFormField(**defaults)
def get_db_prep_value(self, value, connection, prepared=False):
if isinstance(value, str):
return value
elif isinstance(value, list):
return ",".join(value)
def to_python(self, value):
if isinstance(value, list):
return value
if not value:
return []
else:
return value.split(",")
def value_to_string(self, obj):
value = self._get_val_from_obj(obj)
return self.get_prep_value(value)
def contribute_to_class(self, cls, name):
super(MultiSelectField, self).contribute_to_class(cls, name)
if self.choices:
func = lambda self, fieldname = name, choicedict = dict(self.choices):",".join([choicedict.get(value,value) for value in getattr(self,fieldname)])
setattr(cls, 'get_%s_display' % self.name, func)
def validate(self, value, model_instance):
arr_choices = self.get_choices_selected(self.get_choices_default())
for opt_select in value:
if (opt_select not in arr_choices):
raise ValidationError(self.error_messages['invalid_choice'] % value)
return
def get_choices_selected(self, arr_choices=''):
if not arr_choices:
return False
list = []
for choice_selected in arr_choices:
list.append(choice_selected[0])
return list | mit | -5,814,798,102,426,203,000 | 33.709677 | 158 | 0.629687 | false |
NickleDave/hybrid-vocal-classifier | src/hvc/audiofileIO.py | 1 | 29192 | import warnings
import evfuncs
import numpy as np
import scipy.signal
from matplotlib.mlab import specgram
from .parse.ref_spect_params import refs_dict
class WindowError(Exception):
pass
class SegmentParametersMismatchError(Exception):
pass
def butter_bandpass(freq_cutoffs, samp_freq, order=8):
"""returns filter coefficients for Butterworth bandpass filter
Parameters
----------
freq_cutoffs: list
low and high frequencies of pass band, e.g. [500, 10000]
samp_freq: int
sampling frequency
order: int
of filter, default is 8
Returns
-------
b, a: ndarray, ndarray
adopted from the SciPy cookbook:
http://scipy-cookbook.readthedocs.io/items/ButterworthBandpass.html
"""
nyquist = 0.5 * samp_freq
freq_cutoffs = np.asarray(freq_cutoffs) / nyquist
b, a = scipy.signal.butter(order, freq_cutoffs, btype="bandpass")
return b, a
def butter_bandpass_filter(data, samp_freq, freq_cutoffs, order=8):
"""applies Butterworth bandpass filter to data
Parameters
----------
data: ndarray
1-d array of raw audio data
samp_freq: int
sampling frequency
freq_cutoffs: list
low and high frequencies of pass band, e.g. [500, 10000]
order: int
of filter, default is 8
Returns
-------
data: ndarray
data after filtering
adopted from the SciPy cookbook:
http://scipy-cookbook.readthedocs.io/items/ButterworthBandpass.html
"""
b, a = butter_bandpass(freq_cutoffs, samp_freq, order=order)
return scipy.signal.lfilter(b, a, data)
class Spectrogram:
"""class for making spectrograms.
Abstracts out function calls so user just has to put spectrogram parameters
in YAML config file.
"""
def __init__(
self,
nperseg=None,
noverlap=None,
freq_cutoffs=(500, 10000),
window=None,
filter_func=None,
spect_func=None,
log_transform_spect=True,
thresh=-4.0,
remove_dc=True,
):
"""Spectrogram.__init__ function
Parameters
----------
nperseg : int
numper of samples per segment for FFT, e.g. 512
noverlap : int
number of overlapping samples in each segment
nperseg and noverlap are required for __init__
Other Parameters
----------------
freq_cutoffs : two-element list of integers
limits of frequency band to keep, e.g. [1000,8000]
Spectrogram.make keeps the band:
freq_cutoffs[0] >= spectrogram > freq_cutoffs[1]
Default is [500, 10000].
window : str
window to apply to segments
valid strings are 'Hann', 'dpss', None
Hann -- Uses np.Hanning with parameter M (window width) set to value of nperseg
dpss -- Discrete prolate spheroidal sequence AKA Slepian.
Uses scipy.signal.slepian with M parameter equal to nperseg and
width parameter equal to 4/nperseg, as in [2]_.
Default is None.
filter_func : str
filter to apply to raw audio. valid strings are 'diff' or None
'diff' -- differential filter, literally np.diff applied to signal as in [1]_.
Default is None.
Note this is different from filters applied to isolate frequency band.
spect_func : str
which function to use for spectrogram.
valid strings are 'scipy' or 'mpl'.
'scipy' uses scipy.signal.spectrogram,
'mpl' uses matplotlib.matlab.specgram.
Default is 'scipy'.
log_transform_spect : bool
if True, applies np.log10 to spectrogram to increase range.
Default is True.
thresh : float
threshold for spectrogram.
All values below thresh are set to thresh;
increases contrast when visualizing spectrogram with a colormap.
Default is -4 (assumes log_transform_spect==True)
remove_dc : bool
if True, remove the zero-frequency component of the spectrogram,
i.e. the DC offset, which in a sound recording should be zero.
Default is True. Calculation of some features (e.g. cepstrum)
requires the DC component however.
References
----------
.. [1] Tachibana, Ryosuke O., Naoya Oosugi, and Kazuo Okanoya. "Semi-
automatic classification of birdsong elements using a linear support vector
machine." PloS one 9.3 (2014): e92584.
.. [2] Koumura, Takuya, and Kazuo Okanoya. "Automatic recognition of element
classes and boundaries in the birdsong with variable sequences."
PloS one 11.7 (2016): e0159188.
"""
if nperseg is None:
raise ValueError("nperseg requires a value for Spectrogram.__init__")
if noverlap is None:
raise ValueError("noverlap requires a value for Spectrogram.__init__")
if spect_func is None:
# switch to default
# can't have in args list because need to check above for
# conflict with default spectrogram functions for each ref
spect_func = "scipy"
if type(nperseg) != int:
raise TypeError(
"type of nperseg must be int, but is {}".format(type(nperseg))
)
else:
self.nperseg = nperseg
if type(noverlap) != int:
raise TypeError(
"type of noverlap must be int, but is {}".format(type(noverlap))
)
else:
self.noverlap = noverlap
if window is None:
self.window = None
else:
if type(window) != str:
raise TypeError(
"type of window must be str, but is {}".format(type(window))
)
else:
if window not in ["Hann", "dpss"]:
raise ValueError(
"{} is not a valid specification for window".format(window)
)
else:
if window == "Hann":
self.window = np.hanning(self.nperseg)
elif window == "dpss":
self.window = scipy.signal.windows.dpss(
self.nperseg, 4 / self.nperseg
)
if freq_cutoffs is None:
self.freqCutoffs = None
else:
if freq_cutoffs == (500, 10000):
# if default, convert to list
# don't want to have a mutable list as the default
# because mutable defaults can give rise to nasty bugs
freq_cutoffs = list(freq_cutoffs)
if type(freq_cutoffs) != list:
raise TypeError(
"type of freq_cutoffs must be list, but is {}".format(
type(freq_cutoffs)
)
)
elif len(freq_cutoffs) != 2:
raise ValueError(
"freq_cutoffs list should have length 2, but length is {}".format(
len(freq_cutoffs)
)
)
elif not all([type(val) == int for val in freq_cutoffs]):
raise ValueError("all values in freq_cutoffs list must be ints")
else:
self.freqCutoffs = freq_cutoffs
if freq_cutoffs is not None and filter_func is None:
self.filterFunc = "butter_bandpass" # default
if filter_func is not None and type(filter_func) != str:
raise TypeError(
"type of filter_func must be str, but is {}".format(type(filter_func))
)
elif filter_func not in ["diff", "bandpass_filtfilt", "butter_bandpass", None]:
raise ValueError(
"string '{}' is not valid for filter_func. "
"Valid values are: 'diff' or None.".format(filter_func)
)
else:
self.filterFunc = filter_func
if type(spect_func) != str:
raise TypeError(
"type of spect_func must be str, but is {}".format(type(spect_func))
)
elif spect_func not in ["scipy", "mpl"]:
raise ValueError(
"string '{}' is not valid for filter_func. "
"Valid values are: 'scipy' or 'mpl'.".format(spect_func)
)
else:
self.spectFunc = spect_func
if type(log_transform_spect) is not bool:
raise ValueError(
"Value for log_transform_spect is {}, but"
" it must be bool.".format(type(log_transform_spect))
)
else:
self.logTransformSpect = log_transform_spect
if type(thresh) is not float and thresh is not None:
try:
thresh = float(thresh)
self.tresh = thresh
except:
raise ValueError(
"Value for thresh is {}, but"
" it must be float.".format(type(thresh))
)
else:
self.thresh = thresh
if type(remove_dc) is not bool:
raise TypeError(
"Value for remove_dc should be boolean, not {}".format(type(remove_dc))
)
else:
self.remove_dc = remove_dc
def make(self, raw_audio, samp_freq):
"""makes spectrogram using assigned properties
Parameters
----------
raw_audio : 1-d numpy array
raw audio waveform
samp_freq : integer scalar
sampling frequency in Hz
Returns
-------
spect : 2-d numpy array
freq_bins : 1-d numpy array
time_bins : 1-d numpy array
"""
if self.filterFunc == "diff":
raw_audio = np.diff(
raw_audio
) # differential filter_func, as applied in Tachibana Okanoya 2014
elif self.filterFunc == "bandpass_filtfilt":
raw_audio = evfuncs.bandpass_filtfilt(
raw_audio, samp_freq, self.freqCutoffs
)
elif self.filterFunc == "butter_bandpass":
raw_audio = butter_bandpass_filter(raw_audio, samp_freq, self.freqCutoffs)
try: # try to make spectrogram
if self.spectFunc == "scipy":
if self.window is not None:
freq_bins, time_bins, spect = scipy.signal.spectrogram(
raw_audio,
samp_freq,
window=self.window,
nperseg=self.nperseg,
noverlap=self.noverlap,
)
else:
freq_bins, time_bins, spect = scipy.signal.spectrogram(
raw_audio,
samp_freq,
nperseg=self.nperseg,
noverlap=self.noverlap,
)
elif self.spectFunc == "mpl":
# note that the matlab specgram function returns the STFT by default
# whereas the default for the matplotlib.mlab version of specgram
# returns the PSD. So to get the behavior of matplotlib.mlab.specgram
# to match, mode must be set to 'complex'
# I think I determined empirically at one point (by staring at single
# cases) that mlab.specgram gave me values that were closer to Matlab's
# specgram function than scipy.signal.spectrogram
# Matlab's specgram is what Tachibana used in his original feature
# extraction code. So I'm maintaining the option to use it here.
# 'mpl' is set to return complex frequency spectrum,
# not power spectral density,
# because some tachibana features (based on CUIDADO feature set)
# need to use the freq. spectrum before taking np.abs or np.log10
if self.window is not None:
spect, freq_bins, time_bins = specgram(
raw_audio,
NFFT=self.nperseg,
Fs=samp_freq,
window=self.window,
noverlap=self.noverlap,
mode="complex",
)
else:
spect, freq_bins, time_bins = specgram(
raw_audio,
NFFT=self.nperseg,
Fs=samp_freq,
noverlap=self.noverlap,
mode="complex",
)
except ValueError as err: # if `try` to make spectrogram raised error
if str(err) == "window is longer than input signal":
raise WindowError()
else: # unrecognized error
raise
if self.remove_dc:
# remove zero-frequency component
freq_bins = freq_bins[1:]
spect = spect[1:, :]
# we take the absolute magnitude
# because we almost always want just that for our purposes
spect = np.abs(spect)
if self.logTransformSpect:
spect = np.log10(spect) # log transform to increase range
if self.thresh is not None:
spect[spect < self.thresh] = self.thresh
# below, I set freq_bins to >= freq_cutoffs
# so that Koumura default of [1000,8000] returns 112 freq. bins
if self.freqCutoffs is not None:
f_inds = np.nonzero(
(freq_bins >= self.freqCutoffs[0]) & (freq_bins <= self.freqCutoffs[1])
)[
0
] # returns tuple
freq_bins = freq_bins[f_inds]
spect = spect[f_inds, :]
return spect, freq_bins, time_bins
class Segmenter:
def __init__(self, threshold=5000, min_syl_dur=0.02, min_silent_dur=0.002):
"""__init__ for Segmenter
Parameters
----------
segment_params : dict
with the following keys
threshold : int
value above which amplitude is considered part of a segment. default is 5000.
min_syl_dur : float
minimum duration of a segment. default is 0.02, i.e. 20 ms.
min_silent_dur : float
minimum duration of silent gap between segment. default is 0.002, i.e. 2 ms.
"""
self.threshold = threshold
self.min_syl_dur = min_syl_dur
self.min_silent_dur = min_silent_dur
def compute_amp(spect):
"""
compute amplitude of spectrogram
Assumes the values for frequencies are power spectral density (PSD).
Sums PSD for each time bin, i.e. in each column.
Inputs:
spect -- output from spect_from_song
Returns:
amp -- amplitude
"""
return np.sum(spect, axis=0)
def segment(self, array_to_segment, method, time_bins=None, samp_freq=None):
"""Divides songs into segments based on threshold crossings of amplitude.
Returns onsets and offsets of segments, corresponding to syllables in a song.
Parameters
----------
array_to_segment : ndarray
Either amplitude of power spectral density, returned by compute_amp,
or smoothed amplitude of filtered audio, returned by evfuncs.smooth_data
time_bins : 1-d numpy array
time in s, must be same length as log amp. Returned by Spectrogram.make.
samp_freq : int
sampling frequency
method : str
{'evsonganaly','psd'}
Method to use.
evsonganaly -- gives same result as segmentation by evsonganaly.m
(a Matlab GUI for labeling song developed in the Brainard lab)
Uses smoothed filtered amplitude of audio, as returned by evfuncs.smooth_data
psd -- uses power spectral density of spectrogram, as returned by _compute_amp
Returns
-------
segment_dict : dict
with following key, value pairs
onsets_Hz : ndarray
onset times given in sample number (Hz)
offsets_Hz : ndarray
offset times given in sample number (Hz)
onsets_s : ndarray
onset times given in sample number (Hz)
offsets_s : 1-d numpy array
arrays of onsets and offsets of segments.
So for syllable 1 of a song, its onset is onsets[0] and its offset is offsets[0].
To get that segment of the spectrogram, you'd take spect[:,onsets[0]:offsets[0]]
"""
if time_bins is None and samp_freq is None:
raise ValueError(
"Values needed for either time_bins or samp_freq parameters "
"needed to segment song."
)
if time_bins is not None and samp_freq is not None:
raise ValueError(
"Can only use one of time_bins or samp_freq to segment song, "
"but values were passed for both parameters"
)
if method == "evsonganaly":
if time_bins is not None:
raise ValueError("cannot use time_bins with method 'evsonganaly'")
if samp_freq is None:
raise ValueError("must provide samp_freq with method 'evsonganaly'")
if array_to_segment.ndim != 1:
raise ValueError(
"If method is 'evsonganaly', then array_to_segment "
"must be one-dimensional (i.e., raw audio signal)"
)
if method == "psd":
if samp_freq is not None:
raise ValueError("cannot use samp_freq with method 'psd'")
if time_bins is None:
raise ValueError("must provide time_bins with method 'psd'")
if array_to_segment.ndim != 2:
raise ValueError(
"If method is 'psd', then array_to_segment "
"must be two-dimensional (i.e., a spectrogram)"
)
if array_to_segment.shape[-1] != time_bins.shape[-1]:
raise ValueError(
"if using time_bins, "
"array_to_segment and time_bins must have same length"
)
if method == "evsonganaly":
amp = evfuncs.smooth_data(
array_to_segment, samp_freq, refs_dict["evsonganaly"]["freq_cutoffs"]
)
elif method == "psd":
amp = self.compute_amp(array_to_segment)
above_th = amp > self.threshold
h = [1, -1]
# convolving with h causes:
# +1 whenever above_th changes from 0 to 1
# and -1 whenever above_th changes from 1 to 0
above_th_convoluted = np.convolve(h, above_th)
if time_bins is not None:
# if amp was taken from time_bins using compute_amp
# note that np.where calls np.nonzero which returns a tuple
# but numpy "knows" to use this tuple to index into time_bins
onsets_s = time_bins[np.where(above_th_convoluted > 0)]
offsets_s = time_bins[np.where(above_th_convoluted < 0)]
elif samp_freq is not None:
# if amp was taken from smoothed audio using smooth_data
# here, need to get the array out of the tuple returned by np.where
# **also note we avoid converting from samples to s
# until *after* we find segments**
onsets_Hz = np.where(above_th_convoluted > 0)[0]
offsets_Hz = np.where(above_th_convoluted < 0)[0]
onsets_s = onsets_Hz / samp_freq
offsets_s = offsets_Hz / samp_freq
if onsets_s.shape[0] < 1 or offsets_s.shape[0] < 1:
return None, None # because no onsets or offsets in this file
# get rid of silent intervals that are shorter than min_silent_dur
silent_gap_durs = onsets_s[1:] - offsets_s[:-1] # duration of silent gaps
keep_these = np.nonzero(silent_gap_durs > self.min_silent_dur)
onsets_s = np.concatenate((onsets_s[0, np.newaxis], onsets_s[1:][keep_these]))
offsets_s = np.concatenate(
(offsets_s[:-1][keep_these], offsets_s[-1, np.newaxis])
)
if "onsets_Hz" in locals():
onsets_Hz = np.concatenate(
(onsets_Hz[0, np.newaxis], onsets_Hz[1:][keep_these])
)
offsets_Hz = np.concatenate(
(offsets_Hz[:-1][keep_these], offsets_Hz[-1, np.newaxis])
)
# eliminate syllables with duration shorter than min_syl_dur
syl_durs = offsets_s - onsets_s
keep_these = np.nonzero(syl_durs > self.min_syl_dur)
onsets_s = onsets_s[keep_these]
offsets_s = offsets_s[keep_these]
if "onsets_Hz" in locals():
onsets_Hz = onsets_Hz[keep_these]
offsets_Hz = offsets_Hz[keep_these]
segment_dict = {"onsets_s": onsets_s, "offsets_s": offsets_s}
if "onsets_Hz" in locals():
segment_dict["onsets_Hz"] = onsets_Hz
segment_dict["offsets_Hz"] = offsets_Hz
return segment_dict
class Syllable:
"""
syllable object, returned by make_syl_spect.
Properties
----------
syl_audio : 1-d numpy array
raw waveform from audio file
sampfreq : integer
sampling frequency in Hz as determined by scipy.io.wavfile function
spect : 2-d m by n numpy array
spectrogram as computed by Spectrogram.make(). Each of the m rows is a frequency bin,
and each of the n columns is a time bin. Value in each bin is power at that frequency and time.
nfft : integer
number of samples used for each FFT
overlap : integer
number of samples that each consecutive FFT window overlapped
time_bins : 1d vector
values are times represented by each bin in s
freq_bins : 1d vector
values are power spectral density in each frequency bin
index: int
index of this syllable in song.syls.labels
label: int
label of this syllable from song.syls.labels
"""
def __init__(
self,
syl_audio,
samp_freq,
spect,
nfft,
overlap,
freq_cutoffs,
freq_bins,
time_bins,
index,
label,
):
self.sylAudio = syl_audio
self.sampFreq = samp_freq
self.spect = spect
self.nfft = nfft
self.overlap = overlap
self.freqCutoffs = freq_cutoffs
self.freqBins = freq_bins
self.timeBins = time_bins
self.index = index
self.label = label
def make_syls(
raw_audio,
samp_freq,
spect_maker,
labels,
onsets_Hz,
offsets_Hz,
labels_to_use="all",
syl_spect_width=-1,
return_as_stack=False,
):
"""Make spectrograms from syllables.
This method isolates making spectrograms from selecting syllables
to use so that spectrograms can be loaded 'lazily', e.g., if only
duration features are being extracted that don't require spectrograms.
Parameters
----------
raw_audio : ndarray
samp_freq : int
labels : str, list, or ndarray
onsetz_Hz : ndarray
offsets_Hz : ndarray
labels_to_use : str or nmmpy ndarray
if ndarray, must be of type bool and same length as labels, and
will be used to index into labels
syl_spect_width : float
Optional parameter to set constant duration for each spectrogram of a
syllable, in seconds. E.g., 0.05 for an average 50 millisecond syllable.
Used for creating inputs to neural network where each input
must be of a fixed size.
Default value is -1; in this case, the width of the spectrogram will
be the duration of the syllable as determined by the segmentation
algorithm, i.e. the onset and offset that are stored in an annotation file.
If a different value is given, then the duration of each spectrogram
will be that value. Note that if any individual syllable has a duration
greater than syl_spect_duration, the function raises an error.
"""
if syl_spect_width > 0:
if syl_spect_width > 1:
warnings.warn(
"syl_spect_width set greater than 1; note that "
"this parameter is in units of seconds, so using "
"a value greater than one will make it hard to "
"center the syllable/segment of interest within"
"the spectrogram, and additionally consume a lot "
"of memory."
)
syl_spect_width_Hz = int(syl_spect_width * samp_freq)
if syl_spect_width_Hz > raw_audio.shape[-1]:
raise ValueError(
"syl_spect_width, converted to samples, " "is longer than song file."
)
if type(labels) not in [str, list, np.ndarray]:
raise TypeError(
"labels must be of type str, list, or numpy ndarray, " "not {}".type(labels)
)
if type(labels) is str:
labels = list(labels)
if type(labels) is list:
labels = np.asarray(labels)
if type(labels_to_use) is str:
if labels_to_use == "all":
use_these_labels_bool = np.ones((labels.shape)).astype(bool)
else:
use_these_labels_bool = np.asarray(
[label in labels_to_use for label in labels]
)
elif type(labels_to_use) is np.ndarray and labels_to_use.dtype == bool:
if labels_to_use.ndim > 2:
raise ValueError(
"if labels_to_use is array, should not have " "more than two dimensions"
)
else:
labels_to_use = np.squeeze(labels_to_use)
if labels_to_use.shape[-1] != len(labels):
raise ValueError(
"if labels_to_use is an array, must have " "same length as labels"
)
elif type(labels_to_use) is np.ndarray and labels_to_use.dtype != bool:
raise TypeError("if labels_to_use is an array, must be of type bool")
else:
raise TypeError(
"labels_to_use should be a string or a boolean numpy "
"array, not type {}".format(type(labels_to_use))
)
all_syls = []
for ind, (label, onset, offset) in enumerate(zip(labels, onsets_Hz, offsets_Hz)):
if "syl_spect_width_Hz" in locals():
syl_duration_in_samples = offset - onset
if syl_duration_in_samples > syl_spect_width_Hz:
raise ValueError(
"syllable duration of syllable {} with label {} "
"width specified for all syllable spectrograms.".format(ind, label)
)
if "syl_spect_width_Hz" in locals():
width_diff = syl_spect_width_Hz - syl_duration_in_samples
# take half of difference between syllable duration and spect width
# so one half of 'empty' area will be on one side of spect
# and the other half will be on other side
# i.e., center the spectrogram
left_width = int(round(width_diff / 2))
right_width = width_diff - left_width
if left_width > onset: # if duration before onset is less than left_width
# (could happen with first onset)
syl_audio = raw_audio[0:syl_spect_width_Hz]
elif offset + right_width > raw_audio.shape[-1]:
# if right width greater than length of file
syl_audio = raw_audio[-syl_spect_width_Hz:]
else:
syl_audio = raw_audio[onset - left_width : offset + right_width]
else:
syl_audio = raw_audio[onset:offset]
try:
spect, freq_bins, time_bins = spect_maker.make(syl_audio, samp_freq)
except WindowError as err:
warnings.warn(
"Segment {0} with label {1} "
"not long enough for window function"
" set with current spect_params.\n"
"spect will be set to nan.".format(ind, label)
)
spect, freq_bins, time_bins = (np.nan, np.nan, np.nan)
curr_syl = Syllable(
syl_audio=syl_audio,
samp_freq=samp_freq,
spect=spect,
nfft=spect_maker.nperseg,
overlap=spect_maker.noverlap,
freq_cutoffs=spect_maker.freqCutoffs,
freq_bins=freq_bins,
time_bins=time_bins,
index=ind,
label=label,
)
all_syls.append(curr_syl)
if return_as_stack:
# stack with dimensions (samples, height, width)
return np.stack([syl.spect for syl in all_syls], axis=0)
else:
return all_syls
| bsd-3-clause | 1,072,147,749,275,268,000 | 37.259502 | 103 | 0.555666 | false |
tatsuhirosatou/JMdictDB | web/cgi/jbedits.py | 1 | 2280 | #!/usr/bin/env python3
#######################################################################
# This file is part of JMdictDB.
# Copyright (c) 2010 Stuart McGraw
#
# JMdictDB is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation; either version 2 of the License,
# or (at your option) any later version.
#
# JMdictDB is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with JMdictDB; if not, write to the Free Software Foundation,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
#######################################################################
import sys, cgi, re, os
sys.path.extend (['../lib','../../python/lib','../python/lib'])
import logger; from logger import L; logger.enable()
import jdb, jmcgi
Enc = 'utf-8'
def main (args, opts):
jdb.reset_encoding (sys.stdout, 'utf-8')
errs = []
try: form, svc, dbg, cur, sid, sess, parms, cfg = jmcgi.parseform()
except Exception as e: jmcgi.err_page ([str(e)])
# The filesystem path of the directory containing editdata files.
filesdir = cfg['web']['EDITDATA_DIR']
# The URL for the directory containing editdata files.
httpdir = cfg['web']['EDITDATA_URL']
fv = lambda x:(form.getfirst(x) or '').decode(Enc)
is_editor = jmcgi.is_editor (sess)
allfiles = sorted (os.listdir (filesdir))
editfiles = [x for x in allfiles if re.search (r'[0-9]{5}\.dat$', x) ]
logfiles = [x for x in allfiles if re.search (r'((ok)|(bad))\.log$', x) ]
jmcgi.jinja_page ('jbedits.jinja', parms=parms,
filesdir=filesdir, httpdir=httpdir,
editfiles=editfiles, logfiles=logfiles,
svc=svc, dbg=dbg, sid=sid, session=sess, cfg=cfg,
this_page='jbedits.py')
if __name__ == '__main__':
args, opts = jmcgi.args()
main (args, opts)
| gpl-2.0 | 4,588,567,772,028,226,600 | 41.222222 | 81 | 0.591228 | false |
ajfazan/tools | scripts/compute_band_correlation.py | 1 | 1934 | #!/usr/bin/env osgeo_python
from osgeo import gdal
import numpy as np
import math
import sys
import os
gdal.UseExceptions()
def Usage( code ):
print( "Usage:" )
print( "\t%s <IMG1> <IMG2>" % os.path.basename( sys.argv[0] ) )
sys.exit( code )
def OpenImage( filename ):
handle = gdal.Open( filename )
if handle is None:
print( "Unable to open image %s" % filename )
sys.exit( 1 )
return handle
def DisplayMetadata( himg ):
n = himg.RasterCount
print( "Band count: %s" % n )
for band in range( 1, n + 1 ):
channel = himg.GetRasterBand( band )
if channel is None:
continue
print( "NODATA VALUE = ", channel.GetNoDataValue() )
print( "MINIMUM = ", channel.GetMinimum() )
print( "MAXIMUM = ", channel.GetMaximum() )
print( "SCALE = ", channel.GetScale() )
print( "UNIT TYPE = ", channel.GetUnitType() )
def ComputeCorr( img1, img2 ):
n = img1.RasterCount
if n != img2.RasterCount:
print( "Band count mismatch" )
return None
for band in range( 1, n + 1 ):
x = np.array( img1.GetRasterBand( band ).ReadAsArray() )
y = np.array( img2.GetRasterBand( band ).ReadAsArray() )
k = x.size
if k == y.size:
x = ( x - x.mean() ) / k
y = ( y - y.mean() ) / k
s_xx = np.sum( x * x, dtype = np.float64 )
s_yy = np.sum( y * y, dtype = np.float64 )
s_xy = np.sum( x * y, dtype = np.float64 )
try:
corr = s_xy / ( math.sqrt( s_xx ) * math.sqrt( s_yy ) )
print( "Correlation coefficient [%d|%d]: %f" % ( band, band, corr ) )
except( ValueError, e ):
print( "Unable to compute correlation: %s" % e )
pass
def main( img1, img2 ):
h1 = OpenImage( img1 )
h2 = OpenImage( img2 )
# DisplayMetadata( h1 )
# DisplayMetadata( h2 )
ComputeCorr( h1, h2 )
if __name__ == "__main__":
if len( sys.argv ) != 3:
Usage( 1 )
main( sys.argv[1], sys.argv[2] )
| gpl-2.0 | -4,786,940,266,292,448,000 | 19.145833 | 77 | 0.571872 | false |
lcpt/xc | verif/tests/preprocessor/cad/test_sec_lineas_01.py | 1 | 2319 | # -*- coding: utf-8 -*-
from __future__ import division
import xc_base
import geom
import xc
import math
import os
from solution import predefined_solutions
from model import predefined_spaces
from materials import typical_materials
__author__= "Luis C. Pérez Tato (LCPT)"
__copyright__= "Copyright 2014, LCPT"
__license__= "GPL"
__version__= "3.0"
__email__= "l.pereztato@gmail.com"
NumDiv= 8
CooMax= 10
# Problem type
feProblem= xc.FEProblem()
preprocessor= feProblem.getPreprocessor
nodes= preprocessor.getNodeHandler
modelSpace= predefined_spaces.SolidMechanics3D(nodes)
# Materials definition
elast= typical_materials.defElasticMaterial(preprocessor, "elast",3000)
nodes.newSeedNode()
seedElemHandler= preprocessor.getElementHandler.seedElemHandler
seedElemHandler.dimElem= 3 #Bars defined in a three dimensional space.
seedElemHandler.defaultMaterial= "elast"
seedElemHandler.defaultTag= 1 #Number for the next element will be 1.
truss= seedElemHandler.newElement("Truss",xc.ID([0,0]))
truss.area= 10
points= preprocessor.getMultiBlockTopology.getPoints
pt= points.newPntIDPos3d(1,geom.Pos3d(0.0,0.0,0.0))
pt= points.newPntIDPos3d(2,geom.Pos3d(CooMax/2.0,CooMax/2.0,CooMax/2.0))
pt= points.newPntIDPos3d(3,geom.Pos3d(CooMax,CooMax,CooMax))
lines= preprocessor.getMultiBlockTopology.getLines
lines.defaultTag= 1
l1= lines.newLineSequence()
l1.addPoints(xc.ID([1,2,3]))
l1.nDiv= NumDiv
setTotal= preprocessor.getSets.getSet("total")
setTotal.genMesh(xc.meshDir.I)
nnodPline= l1.getNumNodes
nelemPline= l1.getNumElements
'''
print "number of nodes: ",nnod
nodes= preprocessor.getNodeHandler
for_each
print " node: ",tag," x= ",coord[0],", y= ",coord[1],", z= ",coord[2]
print "number of elements: ",nelem
'''
elements= setTotal.getElements
ratio1= 0.0
vteor2= (CooMax/NumDiv)**2
lteor= math.sqrt(3*vteor2)
for e in elements:
#print " elem: ",tag," nod. I: ",nod[0].tag," nod. J: ",nod[1].tag," L= ",e.getL()
ratio1+= (e.getL()-lteor)/lteor
ratio2= (nnodPline-(NumDiv+1))
ratio3= (nelemPline-NumDiv)
'''
print "ratio1: ", ratio1
print "ratio2= ", ratio2
print "ratio3= ", ratio3
'''
import os
from miscUtils import LogMessages as lmsg
fname= os.path.basename(__file__)
if (ratio1<1e-4) & (ratio2<=1e-15) & (ratio3<=1e-15):
print "test ",fname,": ok."
else:
lmsg.error(fname+' ERROR.')
| gpl-3.0 | -3,117,599,558,102,291,500 | 25.044944 | 85 | 0.735979 | false |
halbbob/dff | api/vfs/vfs.py | 1 | 4280 | # DFF -- An Open Source Digital Forensics Framework
# Copyright (C) 2009-2011 ArxSys
# This program is free software, distributed under the terms of
# the GNU General Public License Version 2. See the LICENSE file
# at the top of the source tree.
#
# See http://www.digital-forensic.org for more information about this
# project. Please do not directly contact any of the maintainers of
# DFF for assistance; the project provides a web site, mailing lists
# and IRC channels for your use.
#
# Author(s):
# Solal J. <sja@digital-forensic.org>
#
from libvfs import *
import types
class vfs():
def __init__(self):
self.libvfs = VFS.Get()
def walk(self, top, topdown=True, depth=-1):
if depth == 0:
return
if type(top) == types.StringType:
node = self.getnode(top.replace("//", "/"))
elif isinstance(top, Node):
node = top
else:
raise ValueError("top must be a string or a Node")
if node == None:
return
children = node.children()
dirs, files = [], []
for child in children:
if type(top) == types.StringType:
item = child.name()
elif isinstance(top, Node):
item = child
if child.hasChildren() or child.isDir():
if child.size():
files.append(item)
dirs.append(item)
else:
files.append(item)
#if child.size() > 0:
# files.append(item)
if topdown:
yield top, dirs, files
for name in dirs:
if type(top) == types.StringType:
newtop = str(top + "/" + name).replace("//", "/")
elif isinstance(top, Node):
newtop = name
for x in self.walk(newtop, topdown, depth-1):
yield x
if not topdown:
yield top, dirs, files
def getnode(self, path):
if not path:
return self.getcwd()
#if type(path) != type(""):
#return path
if path and path[0] != "/":
abspath = self.getcwd().absolute()
path = str(abspath + "/" + path).replace("//", "/")
# Avoid trailing '/'
while len(path) > 1 and path[-1:] == "/":
path = path[:-1]
if type(path) == unicode:
path = str(path)
node = self.libvfs.GetNode(path)
if node:
return node
return None
def open(self, path):
if type(path) == type(""):
node = self.getnode(path)
if node: #and node.is_file:
return node.open()
else:
return
def gettree(self):
return self.libvfs.GetTree()
def getcwd(self):
return self.libvfs.GetCWD()
def setcwd(self, path):
self.libvfs.cd(path)
def deletenode(self, node):
return self.libvfs.DeleteNode(node)
# return a Node's Dictionary with directory of nodeDir
def listingDirectories(self, nodeDir):
if nodeDir == False:
return False
listing = []
list = nodeDir.children()
for i in list:
if i.hasChildren():# or not i.is_file :
listing.append(i)
return listing
# return a Node's Dictionary with files and directory of nodeDir
def listingDirectoriesAndFiles(self, nodeDir):
if nodeDir == False:
return False
if not nodeDir.hasChildren(): #and nodeDir.is_file:
return False
listing = []
list = nodeDir.children()
for i in list:
listing.append(i)
return listing
def getInfoDirectory(self, nodeDir):
list = nodeDir.children()
info = {}
info['size'] = 0
info['item'] = 0
for i in list :
if i.hasChildren(): #or not i.is_file :
info_child = self.getInfoDirectory(i)
info['size'] = info['size'] + info_child['size']
info['item'] = info['item'] + info_child['item'] + 1
else :
info['item'] = info['item'] + 1
info['size'] = info['size'] + i.size()
return info
def link(self, node, dest):
pass
#Link(node, dest)
| gpl-2.0 | 2,682,501,138,731,059,700 | 28.93007 | 69 | 0.530374 | false |
pirate42/docc | tests/test_docc_api_image.py | 1 | 5003 | # coding=utf-8
import unittest
from mock import MagicMock
from docc.image import Image
from docc.credentials import Credentials
from docc.service import Service
class TestImage(unittest.TestCase):
def test___init__(self):
image = Image(3, "def", "abc")
self.assertEquals(3, image.id)
self.assertEquals("def", image.name)
self.assertEquals("abc", image.distribution)
def test___repr__(self):
region = Image(3, "def", "abc")
self.assertEqual("<3: def>", region.__repr__())
def test___str__(self):
region = Image(3, "def", "abc")
self.assertEqual("3: def, abc", region.__str__())
def test_get(self):
credentials = Credentials("abc", "def")
service = Service(credentials)
response = {
'status': 'OK',
'image': {'name': 'Name 1',
'id': 1,
'distribution': "Ubuntu 10.04"
},
}
service.get = MagicMock(return_value=response)
image = Image.get(service, 1)
self.assertEquals(image.id, 1)
self.assertEquals(image.name, 'Name 1')
def test_images(self):
credentials = Credentials("abc", "def")
service = Service(credentials)
response = {
'status': 'OK',
'images': [
{'name': 'Name 1',
'id': 1,
'distribution': "Ubuntu 10.04"
},
{'name': 'Name 2',
'id': 2,
'distribution': "Ubuntu 12.04"
},
]
}
mock = MagicMock(return_value=response)
service.get = mock
images = Image.images(service)
mock.assert_called_once_with('images')
self.assertEquals(len(images), 2)
def test_global_images(self):
credentials = Credentials("abc", "def")
service = Service(credentials)
response = {
'status': 'OK',
'images': [
{'name': 'Name 1',
'id': 1,
'distribution': "Ubuntu 10.04"
},
{'name': 'Name 2',
'id': 2,
'distribution': "Ubuntu 12.04"
},
]
}
mock = MagicMock(return_value=response)
service.get = mock
images = Image.global_images(service)
mock.assert_called_once_with('images', {'filter': 'global'})
self.assertEquals(len(images), 2)
def test_my_images(self):
credentials = Credentials("abc", "def")
service = Service(credentials)
response = {
'status': 'OK',
'images': [
{'name': 'Name 1',
'id': 1,
'distribution': "Ubuntu 10.04"
},
{'name': 'Name 2',
'id': 2,
'distribution': "Ubuntu 12.04"
},
]
}
mock = MagicMock(return_value=response)
service.get = mock
images = Image.my_images(service)
mock.assert_called_once_with('images', {'filter': 'my_images'})
self.assertEquals(len(images), 2)
def test___eq__(self):
image1 = Image(1, "Ubuntu 10.02", "A linux distribution")
image2 = Image(1, "Ubuntu 10.02", "A linux distribution")
image3 = Image(2, "Ubuntu 10.02", "A linux distribution")
image4 = Image(1, "Ubuntu 12.10", "A linux distribution")
image5 = Image(1, "Ubuntu 10.02", "A windows distribution")
self.assertTrue(image1.__eq__(image2))
self.assertTrue(image2.__eq__(image1))
self.assertFalse(image1.__eq__(image3))
self.assertFalse(image1.__eq__(image4))
self.assertFalse(image1.__eq__(image5))
def test___ne__(self):
image1 = Image(1, "Ubuntu 10.02", "A linux distribution")
image2 = Image(1, "Ubuntu 10.02", "A linux distribution")
image3 = Image(2, "Ubuntu 10.02", "A linux distribution")
image4 = Image(1, "Ubuntu 12.10", "A linux distribution")
image5 = Image(1, "Ubuntu 10.02", "A windows distribution")
self.assertFalse(image1.__ne__(image2))
self.assertFalse(image2.__ne__(image1))
self.assertTrue(image1.__ne__(image3))
self.assertTrue(image1.__ne__(image4))
self.assertTrue(image1.__ne__(image5))
def test_destroy(self):
image = Image(
21345,
"This is a test",
"This is a test"
)
credentials = Credentials("abc", "def")
service = Service(credentials)
response = {
"status": "OK",
"event_id": 1417387
}
mock = MagicMock(return_value=response)
service.get = mock
self.assertTrue(image.destroy(service))
mock.assert_called_once_with(
'images/21345/destroy'
)
if __name__ == '__main__':
unittest.main()
| mit | -6,956,756,327,629,574,000 | 30.664557 | 71 | 0.507895 | false |
maxfischer2781/chainlet | chainlet_unittests/test_chainlet/test_concurrency/test_base.py | 1 | 3196 | from __future__ import absolute_import, division
import unittest
from chainlet.concurrency import base
from . import testbase_primitives
def return_stored(payload):
return payload
def raise_stored(payload):
raise payload
class TestMultiIter(unittest.TestCase):
def test_builtin(self):
"""multi iter on list, tuple, ..."""
values = tuple(range(20))
for base_type in (list, tuple, set):
iterable = base_type(values)
self._test_multi_tee(iterable, values)
def test_generator(self):
"""multi iter on `(val for val in values)`"""
values = tuple(range(20))
iterable = (val for val in values)
self._test_multi_tee(iterable, values)
def test_future_chain(self):
"""multi iter on future chain results"""
values = tuple(range(20))
value_iter = iter(values)
iterable = base.FutureChainResults([base.StoredFuture(lambda itr: [next(itr)], value_iter) for _ in range(len(values))])
self._test_multi_tee(iterable, values)
def _test_multi_tee(self, iterable, values):
iters = list(base.multi_iter(iterable, count=4))
self.assertEqual(len(iters), 4)
a, b, c, d = iters
# test single iteration
self.assertEqual(set(a), set(values))
# test interleaved iteration
self.assertEqual((next(b), next(b)), (next(c), next(c)))
for _ in range(8):
self.assertEqual(next(b), next(c))
self.assertEqual(next(c), next(b))
self.assertEqual((next(b), next(b)), (next(c), next(c)))
with self.assertRaises(StopIteration):
next(b)
with self.assertRaises(StopIteration):
next(c)
# test final iteration
self.assertEqual(set(d), set(values))
class TestFutureChainResults(unittest.TestCase):
def test_exception(self):
for ex_type in (Exception, ArithmeticError, KeyError, IndexError, OSError, AssertionError, SystemExit):
with self.subTest(ex_type=ex_type):
raise_middle_iterable = base.FutureChainResults([
base.StoredFuture(return_stored, [1, 2]),
base.StoredFuture(raise_stored, ex_type),
base.StoredFuture(return_stored, [3])
])
a, b, c, d = (iter(raise_middle_iterable) for _ in range(4))
self.assertEqual((next(a), next(b)), (1, 1))
self.assertEqual((next(a), next(b), next(c)), (2, 2, 1))
self.assertEqual(next(c), 2)
with self.assertRaises(ex_type):
next(a)
with self.assertRaises(ex_type):
next(b)
with self.assertRaises(ex_type):
next(c)
with self.assertRaises(ex_type):
list(d)
# non-concurrent primitives
class NonConcurrentBundle(testbase_primitives.PrimitiveTestCases.ConcurrentBundle):
test_concurrent = None
# dummy-concurrent primitives
class LocalBundle(testbase_primitives.PrimitiveTestCases.ConcurrentBundle):
test_concurrent = None
bundle_type = base.ConcurrentBundle
| mit | 3,650,394,867,760,230,000 | 34.910112 | 128 | 0.597935 | false |
hhstore/iPyScript | python/src/exercise/py27/ex03_callable_usage.py | 2 | 1777 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
可调用对象:
- 魔法方法: __call__(self, [args...])
- 允许类的一个实例, 像函数那样被调用
- 允许你自己类的对象, 表现得像是函数,然后你就可以“调用”它们,把它们传递到使用函数做参数的函数中
- 本质上这代表了 x() 和 x.__call__() 是相同的
- 注意 __call__ 可以有多个参数,可以像定义其他任何函数一样,定义 __call__ ,喜欢用多少参数就用多少.
- __call__ 在某些需要经常改变状态的类的实例中显得特别有用.
- “调用”这个实例来改变它的状态,是一种更加符合直觉,也更加优雅的方法.
参考:
- http://pyzh.readthedocs.io/en/latest/python-magic-methods-guide.html#id20
"""
class A(object):
"""表示一个实体的类.
调用它的实例, 可以更新实体的位置
"""
def __init__(self, x, y):
self.x, self.y = x, y
print "<__init__() is called.> | x={}, y={}".format(self.x, self.y)
def __call__(self, x, y):
"""改变实体的位置
:param x:
:param y:
:return:
"""
self.x, self.y = x, y
print "<__call__() is called.> | x={}, y={}".format(self.x, self.y)
class B(object):
"""表示一个实体的类.
调用它的实例, 可以更新实体的位置
"""
def __init__(self, x, y):
self.x, self.y = x, y
print "<__init__() is called.> | x={}, y={}".format(self.x, self.y)
def __call__(self, *args, **kwargs):
self.x, self.y = self.y, self.x
print "<__call__() is called.> | x={}, y={}".format(self.x, self.y)
if __name__ == '__main__':
m = A(2, 3)
m(4, 8)
n = B(1, 9)
n()
| mit | -7,918,310,326,699,231,000 | 20.688525 | 79 | 0.497354 | false |
Tset-Noitamotua/_learnpython | playground/__mdreader.py | 1 | 2522 | # Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from io import BytesIO
from io import StringIO
from robot.utils import Utf8Reader
NBSP = u'\xA0'
class MarkDownReader(object):
def read(self, markdown_file, populator):
process = False
# makes an empty list where we are going to store robot code
robot_lines = []
# opens our file under alias 'md_file' and operates all the following
# statments on it
with open(markdown_file) as md_file:
# creates a boolean var
include_line = False
# for each line of the file that we passed as arguement to this script
# do the steps below
for line in md_file:
if not include_line:
include_line = line.strip().lower() == "```robotframework"
elif line.strip() == "```":
include_line = False
else:
robot_lines.append(line)
robot_data = str(''.join(robot_lines))
print(robot_data)
# txtfile = BytesIO(robot_data.encode('UTF-8'))
a = StringIO(robot_data)
for row in a.readlines():
row = self._process_row(row)
cells = [self._process_cell(cell) for cell in self.split_row(row)]
if cells and cells[0].strip().startswith('*') and \
populator.start_table([c.replace('*', '') for c in cells]):
process = True
elif process:
populator.add(cells)
populator.eof()
def _process_row(self, row):
if NBSP in row:
row = row.replace(NBSP, ' ')
return row.rstrip()
@classmethod
def split_row(cls, row):
return row.split('\t')
def _process_cell(self, cell):
if len(cell) > 1 and cell[0] == cell[-1] == '"':
cell = cell[1:-1].replace('""', '"')
return cell
| gpl-2.0 | 1,565,531,593,124,979,500 | 34.027778 | 82 | 0.589215 | false |
csaez/ibl_stuff | ibl_stuff/libs/ibl.py | 1 | 1908 | import os
import json
from ibl_stuff.libs.relpath import relpath
BASE = {"title": "",
"type": "",
"lighting": "",
"location": "",
"tags": list(),
"projects": list(),
"author": "",
"date": "",
"comments": "",
"pano": "/home/csaez/Works/dev/ibl_stuff/refs/pano.png",
"sample": "/home/csaez/Works/dev/ibl_stuff/refs/sample.png",
}
def normalize_path(fp, start="/"):
if "./" in fp[:3] or not fp.startswith("/"): # is relative
basedir = start if os.path.isdir(start) else os.path.dirname(start)
fp = os.path.join(basedir, fp)
return os.path.normpath(fp)
class IBL(dict):
def __init__(self, *arg, **kwds):
self.update(BASE)
super(IBL, self).__init__(*arg, **kwds)
self.filepath = None
def export_data(self, filepath):
d = self.copy()
# save paths relatives to json filepath
for attr in ("pano", "sample"):
d[attr] = relpath(os.path.dirname(filepath), d.get(attr))
# export as json
with open(filepath, "w") as fp:
json.dump(d, fp, indent=4, separators=(",", ": "))
return True
def import_data(self, filepath):
# validate filepath
if not os.path.isfile(filepath):
return False
# import data
self.filepath = filepath
with open(filepath) as fp:
d = json.load(fp)
# normalize paths to absolute
for attr in ("pano", "sample"):
d[attr] = normalize_path(d[attr], filepath)
# update and success
self.update(d)
return True
def save(self):
if self.filepath:
self.export_data(self.filepath)
return True
return False
@classmethod
def from_data(cls, filepath):
o = cls()
o.import_data(filepath)
return o
| mit | 7,437,422,137,244,577,000 | 26.652174 | 75 | 0.535115 | false |
abramhindle/UnnaturalCodeFork | python/testdata/launchpad/lib/lp/translations/tests/test_translations_to_review.py | 1 | 7710 | # Copyright 2009-2011 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Test the choice of "translations to review" for a user."""
__metaclass__ = type
from datetime import (
datetime,
timedelta,
)
from pytz import UTC
import transaction
from zope.security.proxy import removeSecurityProxy
from lp.app.enums import ServiceUsage
from lp.services.worlddata.model.language import LanguageSet
from lp.testing import TestCaseWithFactory
from lp.testing.layers import DatabaseFunctionalLayer
from lp.translations.interfaces.translationsperson import ITranslationsPerson
from lp.translations.model.translator import TranslatorSet
class ReviewTestMixin:
"""Base for testing which translations a reviewer can review."""
def setUpMixin(self, for_product=True):
"""Set up test environment.
Sets up a person, as well as a translation that that person is a
reviewer for and has contributed to.
If for_product is true, the translation will be for a product.
Otherwise, it will be for a distribution.
"""
# Set up a person, and a translation that this person is a
# reviewer for and has contributed to.
self.base_time = datetime.now(UTC)
self.person = self.factory.makePerson()
self.translationgroup = self.factory.makeTranslationGroup(
owner=self.factory.makePerson())
self.dutch = LanguageSet().getLanguageByCode('nl')
TranslatorSet().new(
translationgroup=self.translationgroup, language=self.dutch,
translator=self.person)
if for_product:
self.distroseries = None
self.distribution = None
self.sourcepackagename = None
self.productseries = removeSecurityProxy(
self.factory.makeProductSeries())
self.product = self.productseries.product
self.supercontext = self.product
else:
self.productseries = None
self.product = None
self.distroseries = removeSecurityProxy(
self.factory.makeDistroSeries())
self.distribution = self.distroseries.distribution
self.distribution.translation_focus = self.distroseries
self.sourcepackagename = self.factory.makeSourcePackageName()
self.supercontext = self.distribution
transaction.commit()
self.supercontext.translationgroup = self.translationgroup
self.supercontext.translations_usage = ServiceUsage.LAUNCHPAD
self.potemplate = self.factory.makePOTemplate(
productseries=self.productseries, distroseries=self.distroseries,
sourcepackagename=self.sourcepackagename)
self.pofile = removeSecurityProxy(self.factory.makePOFile(
potemplate=self.potemplate, language_code='nl'))
self.potmsgset = self.factory.makePOTMsgSet(
potemplate=self.potemplate, singular='hi')
self.translation = self.factory.makeCurrentTranslationMessage(
potmsgset=self.potmsgset, pofile=self.pofile,
translator=self.person, translations=['bi'],
date_created=self.base_time, date_reviewed=self.base_time)
later_time = self.base_time + timedelta(0, 3600)
self.suggestion = removeSecurityProxy(
self.factory.makeSuggestion(
potmsgset=self.potmsgset, pofile=self.pofile,
translator=self.factory.makePerson(), translations=['wi'],
date_created=later_time))
self.pofile.updateStatistics()
self.assertEqual(self.pofile.unreviewed_count, 1)
def _getReviewables(self, *args, **kwargs):
"""Shorthand for `self.person.getReviewableTranslationFiles`."""
person = ITranslationsPerson(self.person)
return list(person.getReviewableTranslationFiles(
*args, **kwargs))
class ReviewableTranslationFilesTest:
"""Test getReviewableTranslationFiles for a given setup.
Can be applied to product or distribution setups.
"""
def test_OneFileToReview(self):
# In the base case, the method finds one POFile for self.person
# to review.
self.assertEqual(self._getReviewables(), [self.pofile])
def test_getReviewableTranslationFiles_no_older_than_pass(self):
# The no_older_than parameter keeps translations that the
# reviewer worked on at least that recently.
self.assertEqual(
self._getReviewables(no_older_than=self.base_time), [self.pofile])
def test_getReviewableTranslationFiles_no_older_than_filter(self):
# The no_older_than parameter filters translations that the
# reviewer has not worked on since the given time.
next_day = self.base_time + timedelta(1)
self.assertEqual(self._getReviewables(no_older_than=next_day), [])
def test_getReviewableTranslationFiles_not_translating_in_launchpad(self):
# We don't see products/distros that don't use Launchpad for
# translations.
self.supercontext.translations_usage = ServiceUsage.NOT_APPLICABLE
self.assertEqual(self._getReviewables(), [])
def test_getReviewableTranslationFiles_non_reviewer(self):
# The method does not show translations that the user is not a
# reviewer for.
self.supercontext.translationgroup = None
self.assertEqual(self._getReviewables(), [])
def test_getReviewableTranslationFiles_other_language(self):
# We only get translations in languages that the person is a
# reviewer for.
self.pofile.language = LanguageSet().getLanguageByCode('de')
self.assertEqual(self._getReviewables(), [])
def test_getReviewableTranslationFiles_no_new_suggestions(self):
# Translation files only show up if they have new suggestions.
self.suggestion.date_created -= timedelta(2)
self.pofile.updateStatistics()
self.assertEqual(self._getReviewables(), [])
def test_getReviewableTranslationFiles_ignores_english(self):
# POFiles that "translate to English" are ignored.
english = LanguageSet().getLanguageByCode('en')
TranslatorSet().new(
translationgroup=self.translationgroup, language=english,
translator=self.person)
self.pofile.language = english
self.assertEqual(self._getReviewables(), [])
class TestReviewableProductTranslationFiles(TestCaseWithFactory,
ReviewTestMixin,
ReviewableTranslationFilesTest):
"""Test `Person.getReviewableTranslationFiles` for products."""
layer = DatabaseFunctionalLayer
def setUp(self):
super(TestReviewableProductTranslationFiles, self).setUp()
ReviewTestMixin.setUpMixin(self, for_product=True)
def test_getReviewableTranslationFiles_project_deactivated(self):
# Deactive project are excluded from the list.
from lp.testing import celebrity_logged_in
with celebrity_logged_in('admin'):
self.product.active = False
self.assertEqual([], self._getReviewables())
class TestReviewableDistroTranslationFiles(TestCaseWithFactory,
ReviewTestMixin,
ReviewableTranslationFilesTest):
"""Test `Person.getReviewableTranslationFiles` for distros."""
layer = DatabaseFunctionalLayer
def setUp(self):
super(TestReviewableDistroTranslationFiles, self).setUp()
ReviewTestMixin.setUpMixin(self, for_product=False)
| agpl-3.0 | 304,215,327,847,859,300 | 41.362637 | 78 | 0.678859 | false |
JzHuai0108/vio_common | python/check_bags.py | 1 | 1252 | import os
import subprocess
import sys
if __name__ == "__main__":
"""Examine status of bags listed in bagnames_file which are
stored under bags_dir without subfolders.""" # pylint: disable=pointless-string-statement
if len(sys.argv) < 3:
print('Usage: {} bagnames_file bags_dir'.format(sys.argv[0]))
sys.exit(1)
script, bagname_file, output_dir = sys.argv
bagname_list = []
with open(bagname_file, 'r') as stream:
for line in stream:
line = line.strip()
if line:
bagname = os.path.basename(line)
bagname_list.append(os.path.join(output_dir, bagname))
print('Found #bagnames: {}'.format(len(bagname_list)))
for bagname in bagname_list:
# Run command with arguments and return its output as a byte string.
try:
status = subprocess.check_output(['rosbag', 'info', bagname],
stdin=None,
stderr=None,
shell=False,
universal_newlines=False)
# print(status)
except Exception as inst:
print(inst.args)
| bsd-3-clause | -7,644,749,024,074,684,000 | 35.823529 | 94 | 0.525559 | false |
crawfordsm/pysalt | saltred/saltobslog.py | 1 | 10740 | ################################# LICENSE ##################################
# Copyright (c) 2009, South African Astronomical Observatory (SAAO) #
# All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# * Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer #
# in the documentation and/or other materials provided with the #
# distribution. #
# * Neither the name of the South African Astronomical Observatory #
# (SAAO) nor the names of its contributors may be used to endorse #
# or promote products derived from this software without specific #
# prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE SAAO ''AS IS'' AND ANY EXPRESS OR #
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED #
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE #
# DISCLAIMED. IN NO EVENT SHALL THE SAAO BE LIABLE FOR ANY #
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL #
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS #
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN #
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
############################################################################
#!/usr/bin/env python
# Author Version Date
# -----------------------------------------------
# Martin Still (SAAO) 1.0 25 Jul 2007
# S M Crawford (SAA0) 1.1 16 Jul 2009
# S M Crawford (SAA0) 1.2 19 Apr 2011
# saltobslog reads the critical header keywords of SALT FITS data and
# collates them into a FITS table. This can be used for the basis
# of an observation log or as a meta-table for pipeline processing.
#
# Updates:
# 16 Jul 2009 Handled a bug on how if data is missing in the headers
# 5 Apr 2011 Updated to handle new header key words and changes in existing
# keywords. We did not make it backwards compatible
# Changes made:
# TELTEMP->TELTEM
# PAYLTEMP -> PAYLTEM
# Removed DETSIZE
# Added EPOCH
# Removed UTC-OBS--now using TIME-OBS
# FOCUS->CAMFOCUS
# INTFR->TELFOCUS
# 19 Apr 2011 Updated to handle the new error handling and so it returns the
# contents without updating everything in the process
# Converted it to using dictionary instead of a bunch of lists
from __future__ import with_statement
from pyraf import iraf
import os, glob, time
from astropy.io import fits
import saltsafekey as saltkey
import saltsafeio as saltio
from saltsafelog import logging
from salterror import SaltError, SaltIOError
# -----------------------------------------------------------
# core routine
headerList=['FILENAME', 'PROPID', 'PROPOSER', 'OBJECT', 'RA', 'DEC', 'OBJEPOCH', 'EPOCH', 'EQUINOX', 'DATE-OBS', 'UTC-OBS', 'TIME-OBS', 'EXPTIME', 'OBSMODE', 'DETMODE', 'CCDTYPE', 'DETSIZE', 'NCCDS', 'CCDSUM', 'GAINSET', 'ROSPEED', 'INSTRUME', 'FILTER', 'CAMFOCUS', 'TELHA', 'TELRA', 'TELDEC', 'TELPA', 'TELAZ', 'TELALT', 'TRKX', 'TRKY', 'TRKZ', 'TRKPHI', 'TRKTHETA', 'TRKRHO', 'TELFOCUS', 'COLPHI', 'COLTHETA', 'TELTEM', 'PAYLTEM', 'CCDTEM', 'DEWTEM', 'AMPTEM', 'CENTEM', 'DETSWV', 'BLOCKID', 'BVISITID']
formatList=['32A', '50A', '20A', '100A', '1A', '12A', 'E', 'E', 'E', '10A', '12A',
'12A', 'D', '20A', '20A', '8A', '23A', 'I', '5A', '6A', '4A', '8A', '8A',
'J', '11A', '11A', '12A', 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E',
'E', 'E', 'E', 'E', 'E', 'E', 'D', 'E', 'E', 'D', '16A', 'E', 'E' ]
rssheaderList=['DEWPRE', 'POSANG', 'LAMPID', 'CALFILT', 'CALND', 'TELRHO', 'PELLICLE', 'INSTPORT', 'CF-STATE', 'SM-STATE', 'SM-STA', 'SM-STEPS', 'SM-VOLTS', 'SM-STA-S', 'SM-STA-V', 'MASKID', 'MASKTYP', 'WP-STATE', 'HWP-CMD', 'HW-STEPS', 'HWP-STA', 'QWP-CMD', 'QW-STEPS', 'QWP-STA', 'QWP-ANG', 'HWP-ANG', 'SH-STATE', 'FO-STATE', 'FO-POS', 'FO-VOLTS', 'FO-POS-S', 'FO-POS-V', 'GR-STATE', 'GR-STA', 'GR-ANGLE', 'GM-STEPS', 'GM-VOLTS', 'GR-STA-S', 'GR-STA-V', 'GR-STEPS', 'GRATING', 'GRTILT', 'BS-STATE', 'FI-STATE', 'FI-STA', 'FM-STEPS', 'FM-VOLTS', 'FM-STA-S', 'FM-STA-V', 'AR-STATE', 'AR-STA', 'CAMANG', 'AR-STA-S', 'AR-ANGLE', 'COLTEMP', 'CAMTEMP', 'PROC', 'PCS-VER', 'WPPATERN']
rssformatList=['D', 'E', '8A', '8A', 'E', 'E', '8A', '8A', '20A', '20A', '8A', 'J', 'E', 'E', 'E', '16A', '16A', '20A', '16A', 'J', 'E', '16A', 'J', 'E', 'E', 'E', '20A', '20A', 'E', 'E', 'E', 'E', '20A', '10A', 'E', 'J', 'E', 'E', 'E', 'J', '8A', 'E', '24A', '20A', '7A', 'J', 'E', 'E', 'E', '24A', '16A', 'E', 'E', 'E', 'E', 'E', '20A', '4A', '20A']
scamheaderList=['FILPOS']
scamformatList=['I']
debug=True
def saltobslog(images,outfile,clobber=False,logfile='salt.log',verbose=True):
"""Create the observation log from the input files"""
#start the logging
with logging(logfile,debug) as log:
# Check the input images
infiles = saltio.argunpack ('Input',images)
#create the header dictionary
headerDict=obslog(infiles, log)
#clobber the output if it exists
if (clobber and os.path.isfile(outfile)):
saltio.delete(outfile)
#create the fits file
struct=createobslogfits(headerDict)
# close table file
saltio.writefits(struct, outfile)
#indicate the log was created
log.message('\nSALTLOG -- created observation log ' + outfile)
def createobslogfits(headerDict):
"""Create the fits table for the observation log"""
# define generic columns of output table
col=[]
for k, f in zip(headerList, formatList):
print k,f, headerDict[k]
col.append(fits.Column(name=k, format=f, array=headerDict[k]))
for k, f in zip(scamheaderList, scamformatList):
print k,f, headerDict[k]
col.append(fits.Column(name=k, format=f, array=headerDict[k]))
for k, f in zip(rssheaderList, rssformatList):
print k,f, headerDict[k]
col.append(fits.Column(name=k, format=f, array=headerDict[k]))
# construct FITS table from columns
table = fits.ColDefs(col)
# write FITS table to output file
struct = fits.BinTableHDU.from_columns(table)
# name the table extension
struct.header['EXTNAME'] = 'OBSLOG'
struct.header['SAL_TLM'] = time.asctime(time.localtime())
#saltkey.new('EXTNAME','OBSLOG','extension name', struct)
#saltkey.put('SAL-TLM',time.asctime(time.localtime()), struct)
# housekeeping keywords
return struct
# -----------------------------------------------------------
# read keyword and append to list
def obslog(infiles, log=None):
"""For a set of input files, create a dictionary contain all the header
information from the files. Will print things to a saltlog if log is
not None
returns Dictionary
"""
#create the header dictionary
headerDict={}
for k in headerList: headerDict[k]=[]
for k in scamheaderList: headerDict[k]=[]
for k in rssheaderList: headerDict[k]=[]
# interate over and open image files
infiles.sort()
for infile in infiles:
#open the file
struct = saltio.openfits(infile)
# instrument
scam = False
rss = False
instrume = saltkey.get('INSTRUME', struct[0])
if (instrume=='RSS'): rss = True
if (instrume=='SALTICAM'): scam=True
#add in the image name
headerDict['FILENAME'].append(os.path.basename(infile))
# ingest primary keywords from files in the image list
for k,f in zip(headerList[1:], formatList[1:]):
default=finddefault(f)
headerDict[k].append(getkey(struct[0], k, default=default, log=log, warn=True))
# ingest scam specific primary keywords from files in the image list
for k,f in zip(scamheaderList[1:], scamformatList[1:]):
default=finddefault(f)
headerDict[k].append(getkey(struct[0], k, default=default, log=log, warn=scam))
# ingest rss specific primary keywords from files in the image list
for k,f in zip(rssheaderList[1:], rssformatList[1:]):
default=finddefault(f)
headerDict[k].append(getkey(struct[0], k, default=default, log=log, warn=rss))
# close image files
saltio.closefits(struct)
if log: log.message('SALTOBSLOG -- read %s' % infile, with_header=False)
return headerDict
def finddefault(f):
"""return the default value given a format"""
if f.count('A'):
default="UNKNOWN"
else:
default=-999
return default
def getkey(struct,keyword,default,warn=True, log=None):
"""Return the keyword value. Throw a warning if it doesn't work """
try:
value = saltkey.get(keyword, struct)
if isinstance(default, str): value=value.strip()
except SaltIOError:
value = default
infile=struct._file.name
message = 'WARNING: cannot find keyword %s in %s' %(keyword, infile)
if warn and log: log.message(message, with_header=False)
if (str(value).strip() == ''): value = default
#if (type(value) != type(default)):
# infile=struct._file.name
# message='WARNING: Type mismatch for %s for %s in %s[0]' % (str(value), keyword, infile)
# if warn and log: log.message(message, with_header=False)
# value=default
return value
# -----------------------------------------------------------
# main code
if not iraf.deftask('saltobslog'):
parfile = iraf.osfn("saltred$saltobslog.par")
t = iraf.IrafTaskFactory(taskname="saltobslog",value=parfile,function=saltobslog, pkgname='saltred')
| bsd-3-clause | -3,349,455,368,698,446,300 | 44.316456 | 679 | 0.57784 | false |
vallard/KUBaM | stage2/network/UCSNet.py | 1 | 5493 | from ucsmsdk.ucsexception import UcsException
def listVLANs(handle):
# <fabricVlan childAction="deleteNonPresent" cloud="ethlan" compressionType="included" configIssues="" defaultNet="no" dn="fabric/lan/net-10" epDn="" fltAggr="0" global="0" id="10" ifRole="network" ifType="virtual" local="0" locale="external" mcastPolicyName="" name="10" operMcastPolicyName="org-root/mc-policy-default" operState="ok" peerDn="" policyOwner="local" pubNwDn="" pubNwId="0" pubNwName="" sharing="none" switchId="dual" transport="ether" type="lan"/>
# get only VLANs not appliance port vlans
filter_string = '(dn, "fabric/lan/net-[A-Za-z0-9]+", type="re")'
vlans = handle.query_classid("fabricVlan", filter_string)
#vlans = handle.query_classid("fabricVlan")
val = -1
while val < 0 or val > len(vlans):
for i, vlan in enumerate(vlans):
print "[%d] VLAN %s" % (i+1, vlan.name)
print "-" * 80
val = raw_input("Please Select a VLAN for the Kubernetes Server to use: ")
val = int(val)
val = val - 1
return vlans[val]
def createKubeMacs(handle):
print "Creating Kubernetes MAC Pools"
from ucsmsdk.mometa.macpool.MacpoolPool import MacpoolPool
from ucsmsdk.mometa.macpool.MacpoolBlock import MacpoolBlock
mo = MacpoolPool(parent_mo_or_dn="org-root", policy_owner="local", descr="Kubernetes MAC Pool A", assignment_order="default", name="kubeA")
mo_1 = MacpoolBlock(parent_mo_or_dn=mo, to="00:25:B5:88:8A:FF", r_from="00:25:B5:88:8A:00")
handle.add_mo(mo)
mo = MacpoolPool(parent_mo_or_dn="org-root", policy_owner="local", descr="Kubernetes MAC Pool B", assignment_order="default", name="kubeB")
mo_1 = MacpoolBlock(parent_mo_or_dn=mo, to="00:25:B5:88:8B:FF", r_from="00:25:B5:88:8B:00")
handle.add_mo(mo)
try:
handle.commit()
except UcsException as err:
if err.error_code == "103":
print "\tKubernetes MAC Pools already exist"
def deleteKubeMacs(handle):
print "Deleting Kubernetes MAC Pools"
moa = handle.query_dn("org-root/mac-pool-kubeA")
mob = handle.query_dn("org-root/mac-pool-kubeB")
try:
handle.remove_mo(moa)
handle.remove_mo(mob)
handle.commit()
except AttributeError:
print "\talready deleted"
#handle.commit()
def createVNICTemplates(handle, vlan):
print "Creating Kubernetes VNIC Templates"
from ucsmsdk.mometa.vnic.VnicLanConnTempl import VnicLanConnTempl
from ucsmsdk.mometa.vnic.VnicEtherIf import VnicEtherIf
mo = VnicLanConnTempl(parent_mo_or_dn="org-root", templ_type="updating-template", name="kubeA", descr="", stats_policy_name="default", switch_id="A", pin_to_group_name="", mtu="1500", policy_owner="local", qos_policy_name="", ident_pool_name="kubeA", nw_ctrl_policy_name="")
mo_1 = VnicEtherIf(parent_mo_or_dn=mo, default_net="yes", name=vlan)
handle.add_mo(mo)
mob = VnicLanConnTempl(parent_mo_or_dn="org-root", templ_type="updating-template", name="kubeB", descr="", stats_policy_name="default", switch_id="B", pin_to_group_name="", mtu="1500", policy_owner="local", qos_policy_name="", ident_pool_name="kubeB", nw_ctrl_policy_name="")
mo_2 = VnicEtherIf(parent_mo_or_dn=mob, default_net="yes", name=vlan)
handle.add_mo(mob)
try:
handle.commit()
except UcsException as err:
if err.error_code == "103":
print "\tVNIC Templates already exist"
def deleteVNICTemplates(handle):
print "Deleting VNIC Templates"
moa = handle.query_dn("org-root/lan-conn-templ-kubeA")
mob = handle.query_dn("org-root/lan-conn-templ-kubeB")
try:
handle.remove_mo(moa)
handle.remove_mo(mob)
handle.commit()
except AttributeError:
print "\talready deleted"
def createLanConnPolicy(handle):
print "Creating Kubernetes LAN connectivity policy"
from ucsmsdk.mometa.vnic.VnicLanConnPolicy import VnicLanConnPolicy
from ucsmsdk.mometa.vnic.VnicEther import VnicEther
mo = VnicLanConnPolicy(parent_mo_or_dn="org-root", policy_owner="local", name="kube", descr="Kubernetes LAN Connectivity Policy")
mo_1 = VnicEther(parent_mo_or_dn=mo, addr="derived", nw_ctrl_policy_name="", admin_vcon="any", stats_policy_name="default", switch_id="A", pin_to_group_name="", mtu="1500", qos_policy_name="", adaptor_profile_name="Linux", ident_pool_name="", order="1", nw_templ_name="kubeA", name="eth0")
mo_2 = VnicEther(parent_mo_or_dn=mo, addr="derived", nw_ctrl_policy_name="", admin_vcon="any", stats_policy_name="default", switch_id="A", pin_to_group_name="", mtu="1500", qos_policy_name="", adaptor_profile_name="Linux", ident_pool_name="", order="2", nw_templ_name="kubeB", name="eth1")
try:
handle.add_mo(mo)
handle.commit()
except UcsException as err:
if err.error_code == "103":
print "\tLAN connectivity policy 'kube' already exists"
def deleteLanConnPolicy(handle):
print "Deleting kube LAN Connectivity policy"
mo = handle.query_dn("org-root/lan-conn-pol-kube")
try:
handle.remove_mo(mo)
handle.commit()
except AttributeError:
print "\talready deleted"
def createKubeNetworking(handle, org):
vlan = listVLANs(handle)
createKubeMacs(handle)
createVNICTemplates(handle, vlan.name)
createLanConnPolicy(handle)
def deleteKubeNetworking(handle, org):
deleteLanConnPolicy(handle)
deleteVNICTemplates(handle)
deleteKubeMacs(handle)
| apache-2.0 | -4,097,232,162,858,375,700 | 48.486486 | 469 | 0.676861 | false |
streamlink/streamlink | src/streamlink/plugins/turkuvaz.py | 1 | 2024 | import logging
import re
from streamlink.plugin import Plugin, pluginmatcher
from streamlink.plugin.api import useragents, validate
from streamlink.stream import HLSStream
log = logging.getLogger(__name__)
@pluginmatcher(re.compile(r"""https?://(?:www\.)?
(?:
(?:
(atvavrupa)\.tv
|
(atv|a2tv|ahaber|aspor|minikago|minikacocuk|anews)\.com\.tr
)/webtv/(?:live-broadcast|canli-yayin)
|
(ahaber)\.com\.tr/video/canli-yayin
|
atv\.com\.tr/(a2tv)/canli-yayin
|
sabah\.com\.tr/(apara)/canli-yayin
)
""", re.VERBOSE))
class Turkuvaz(Plugin):
_hls_url = "https://trkvz-live.ercdn.net/{channel}/{channel}.m3u8"
_token_url = "https://securevideotoken.tmgrup.com.tr/webtv/secure"
_token_schema = validate.Schema(validate.all(
{
"Success": True,
"Url": validate.url(),
},
validate.get("Url"))
)
def _get_streams(self):
url_m = self.match
domain = url_m.group(1) or url_m.group(2) or url_m.group(3) or url_m.group(4) or url_m.group(5)
# remap the domain to channel
channel = {"atv": "atvhd",
"ahaber": "ahaberhd",
"apara": "aparahd",
"aspor": "asporhd",
"anews": "anewshd",
"minikacocuk": "minikagococuk"}.get(domain, domain)
hls_url = self._hls_url.format(channel=channel)
# get the secure HLS URL
res = self.session.http.get(self._token_url,
params="url={0}".format(hls_url),
headers={"Referer": self.url,
"User-Agent": useragents.CHROME})
secure_hls_url = self.session.http.json(res, schema=self._token_schema)
log.debug("Found HLS URL: {0}".format(secure_hls_url))
return HLSStream.parse_variant_playlist(self.session, secure_hls_url)
__plugin__ = Turkuvaz
| bsd-2-clause | 6,941,490,820,942,419,000 | 32.733333 | 103 | 0.544466 | false |
cmptrgeekken/evething | thing/models/poswatchposhistory.py | 1 | 2203 | # ------------------------------------------------------------------------------
# Copyright (c) 2010-2013, EVEthing team
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
# ------------------------------------------------------------------------------
from django.db import models
from thing.models.corporation import Corporation
class PosWatchPosHistory(models.Model):
id = models.BigIntegerField(primary_key=True)
corp = models.ForeignKey(Corporation)
type_id = models.IntegerField()
pos_id = models.IntegerField()
state = models.IntegerField()
location_id = models.BigIntegerField()
date = models.DateField()
moon_id = models.IntegerField()
taxable = models.BooleanField(default=False)
state_timestamp = models.DateTimeField(default=None)
online_timestamp = models.DateTimeField(default=None)
class Meta:
app_label = 'thing'
db_table = 'thing_poswatch_poshistory'
| bsd-2-clause | -8,042,947,434,338,316,000 | 46.891304 | 88 | 0.702678 | false |
repgarent/searchCadastre | cadastre_menu.py | 1 | 17289 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
Cadastre - QGIS plugin menu class
A QGIS plugin
This plugins helps users to import the french land registry ('cadastre')
into a database. It is meant to ease the use of the data in QGIs
by providing search tools and appropriate layer symbology.
-------------------
begin : 2013-06-11
copyright : (C) 2013 by 3liz
email : info@3liz.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
from cadastre_identify_parcelle import IdentifyParcelle
from cadastre_dialogs import *
import ConfigParser
# ---------------------------------------------
class cadastre_menu:
def __init__(self, iface):
self.iface = iface
self.mapCanvas = iface.mapCanvas()
self.cadastre_menu = None
self.cadastre_search_dialog = None
self.simple_cadastre_search_dialog = None
self.qc = None
def cadastre_add_submenu(self, submenu):
if self.cadastre_menu != None:
self.cadastre_menu.addMenu(submenu)
else:
self.iface.addPluginToMenu("&cadastre", submenu.menuAction())
def initGui(self):
# Add Cadastre to QGIS menu
self.cadastre_menu = QMenu(QCoreApplication.translate("cadastre", "Cadastre"))
self.iface.mainWindow().menuBar().insertMenu(self.iface.firstRightStandardMenu().menuAction(), self.cadastre_menu)
# Import Submenu
icon = QIcon(os.path.dirname(__file__) + "/icons/database.png")
self.import_action = QAction(icon, u"Importer des données", self.iface.mainWindow())
QObject.connect(self.import_action, SIGNAL("triggered()"), self.open_import_dialog)
# Search Submenu
icon = QIcon(os.path.dirname(__file__) + "/icons/search.png")
self.search_action = QAction(icon, u"Outils de recherche", self.iface.mainWindow())
QObject.connect(self.search_action, SIGNAL("triggered()"), self.toggle_search_dialog)
if not self.cadastre_search_dialog:
dialog = cadastre_search_dialog(self.iface)
self.cadastre_search_dialog = dialog
self.qc = cadastre_common(dialog)
# simple Search Submenu
icon = QIcon(os.path.dirname(__file__) + "/icons/simpleSearch.png")
self.simple_search_action = QAction(icon, u"Outils de recherche", self.iface.mainWindow())
QObject.connect(self.simple_search_action, SIGNAL("triggered()"), self.toggle_search_dialog)
if not self.simple_cadastre_search_dialog:
dialog = simple_cadastre_search_dialog(self.iface)
self.simple_cadastre_search_dialog = dialog
self.qc = cadastre_common(dialog)
# Load Submenu
icon = QIcon(os.path.dirname(__file__) + "/icons/output.png")
self.load_action = QAction(icon, u"Charger des données", self.iface.mainWindow())
QObject.connect(self.load_action, SIGNAL("triggered()"), self.open_load_dialog)
# Options Submenu
icon = QIcon(os.path.dirname(__file__) + "/icons/config.png")
self.option_action = QAction(icon, u"Configurer le plugin", self.iface.mainWindow())
QObject.connect(self.option_action, SIGNAL("triggered()"), self.open_option_dialog)
# About Submenu
icon = QIcon(os.path.dirname(__file__) + "/icons/about.png")
self.about_action = QAction(icon, u"À propos", self.iface.mainWindow())
QObject.connect(self.about_action, SIGNAL("triggered()"), self.open_about_dialog)
# Help Submenu
icon = QIcon(os.path.dirname(__file__) + "/icons/about.png")
self.help_action = QAction(icon, u"Aide", self.iface.mainWindow())
QObject.connect(self.help_action, SIGNAL("triggered()"), self.open_help)
# version Submenu
icon = QIcon(os.path.dirname(__file__) + "/icons/about.png")
self.version_action = QAction(icon, u"Notes de version", self.iface.mainWindow())
QObject.connect(self.version_action, SIGNAL("triggered()"), self.open_message_dialog)
# Add actions to Cadastre menu
self.cadastre_menu.addAction(self.import_action)
self.cadastre_menu.addAction(self.load_action)
self.cadastre_menu.addAction(self.search_action)
self.cadastre_menu.addAction(self.simple_search_action)
self.cadastre_menu.addAction(self.option_action)
self.cadastre_menu.addAction(self.about_action)
self.cadastre_menu.addAction(self.help_action)
self.cadastre_menu.addAction(self.version_action)
# Add cadastre toolbar
self.toolbar = self.iface.addToolBar(u'Cadastre');
# open import dialog
self.openImportAction = QAction(
QIcon(os.path.dirname(__file__) +"/icons/database.png"),
u"Importer des données",
self.iface.mainWindow()
)
self.openImportAction.triggered.connect(self.open_import_dialog)
self.toolbar.addAction(self.openImportAction)
self.toolbar.setObjectName("cadastreToolbar");
# open load dialog
self.openLoadAction = QAction(
QIcon(os.path.dirname(__file__) +"/icons/output.png"),
u"Charger des données",
self.iface.mainWindow()
)
self.openLoadAction.triggered.connect(self.open_load_dialog)
self.toolbar.addAction(self.openLoadAction)
# open search dialog
self.openSearchAction = QAction(
QIcon(os.path.dirname(__file__) +"/icons/search.png"),
u"Outils de recherche",
self.iface.mainWindow()
)
self.openSearchAction.triggered.connect(self.toggle_search_dialog)
#~ self.openSearchAction.setCheckable(True)
self.toolbar.addAction(self.openSearchAction)
# open simple search dialog
self.opensimpleSearchAction = QAction(
QIcon(os.path.dirname(__file__) +"/icons/simpleSearch.png"),
u"Outils de recherche",
self.iface.mainWindow()
)
self.opensimpleSearchAction.triggered.connect(self.simple_toggle_search_dialog)
#~ self.opensimpleSearchAction.setCheckable(True)
self.toolbar.addAction(self.opensimpleSearchAction)
# open Option dialog
self.openOptionAction = QAction(
QIcon(os.path.dirname(__file__) +"/icons/config.png"),
u"Configurer le plugin",
self.iface.mainWindow()
)
self.openOptionAction.triggered.connect(self.open_option_dialog)
self.toolbar.addAction(self.openOptionAction)
# open About dialog
self.openAboutAction = QAction(
QIcon(os.path.dirname(__file__) +"/icons/about.png"),
u"À propos",
self.iface.mainWindow()
)
self.openAboutAction.triggered.connect(self.open_about_dialog)
self.toolbar.addAction(self.openAboutAction)
# Create action for "Parcelle information"
self.identifyParcelleAction = QAction(
QIcon(os.path.dirname(__file__) +"/icons/toolbar/get-parcelle-info.png"),
"Infos parcelle",
self.iface.mainWindow()
)
self.identifyParcelleAction.setCheckable(True)
self.initializeIdentifyParcelleTool()
# Display About window on first use
s = QSettings()
firstUse = s.value("cadastre/isFirstUse" , 1, type=int)
if firstUse == 1:
s.setValue("cadastre/isFirstUse", 0)
self.open_about_dialog()
# Display some messages depending on version number
mConfig = ConfigParser.ConfigParser()
metadataFile = os.path.dirname(__file__) + "/metadata.txt"
mConfig.read( metadataFile )
self.mConfig = mConfig
myVersion = mConfig.get('general', 'version').replace('.', '_')
myVersionMsg = s.value("cadastre/version_%s" % myVersion , 1, type=int)
if myVersionMsg == 1:
s.setValue("cadastre/version_%s" % myVersion , 0)
self.open_message_dialog()
# Project load or create : refresh search and identify tool
self.iface.projectRead.connect(self.onProjectRead)
self.iface.newProjectCreated.connect(self.onNewProjectCreated)
def open_import_dialog(self):
'''
Import dialog
'''
dialog = cadastre_import_dialog(self.iface)
dialog.exec_()
def open_load_dialog(self):
'''
Load dialog
'''
dialog = cadastre_load_dialog(
self.iface,
self.cadastre_search_dialog
)
# refresh identify tool when new data loaded
# data loaded with plugin tool
dialog.ql.cadastreLoadingFinished.connect(self.refreshIdentifyParcelleTool)
dialog.exec_()
def toggle_search_dialog(self):
'''
Search dock widget
'''
if self.cadastre_search_dialog.isVisible():
self.cadastre_search_dialog.hide()
else:
self.cadastre_search_dialog.show()
#ouverture du nouveau panneau de recherche
def simple_toggle_search_dialog(self):
'''
Search dock widget
'''
if self.simple_cadastre_search_dialog.isVisible():
self.simple_cadastre_search_dialog.hide()
else:
self.simple_cadastre_search_dialog.show()
def open_option_dialog(self):
'''
Config dialog
'''
dialog = cadastre_option_dialog(self.iface)
dialog.exec_()
def open_about_dialog(self):
'''
About dialog
'''
dialog = cadastre_about_dialog(self.iface)
dialog.exec_()
def initializeIdentifyParcelleTool(self):
'''
Initialize the identify tool for parcelles
'''
self.identyParcelleTool = IdentifyParcelle(self.mapCanvas)
self.identyParcelleTool.geomIdentified.connect(self.getParcelleInfo)
self.identyParcelleTool.geomUnidentified.connect(self.setParcelleAsActiveLayer)
self.identyParcelleTool.setAction(self.identifyParcelleAction)
self.identifyParcelleAction.triggered.connect(self.setIndentifyParcelleTool)
self.toolbar.addAction(self.identifyParcelleAction)
def refreshIdentifyParcelleTool(self):
'''
Reinit identify parcelle tool
'''
self.toolbar.removeAction(self.identifyParcelleAction)
self.initializeIdentifyParcelleTool()
self.setIndentifyParcelleTool()
def setIndentifyParcelleTool(self):
'''
Activite the identify tool
for the layer geo_parcelle
'''
# First set Parcelle as active layer
self.setParcelleAsActiveLayer()
# The activate identify tool
self.mapCanvas.setMapTool(self.identyParcelleTool)
def setParcelleAsActiveLayer(self):
'''
Search among layers
and set Parcelles layer as
the current active layer
'''
# First set Parcelle as active layer
layer = self.qc.getLayerFromLegendByTableProps('geo_parcelle')
if not layer:
return
# Set active layer -> geo_parcelle
self.iface.setActiveLayer(layer)
def getParcelleInfo(self, layer, feature):
'''
Return information of the identified
parcelle
'''
# Find parcelle layer
parcelleLayer = self.qc.getLayerFromLegendByTableProps('geo_parcelle')
if not parcelleLayer:
return
# Check if current active layer is parcelle layer
if parcelleLayer.id() != layer.id():
setActiveQuestion = QMessageBox.question(
self.cadastre_search_dialog,
u"Cadastre",
u'"Parcelles" doit être la couche active dans QGIS pour utiliser cet outil. Activer la couche ?',
QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes
)
if setActiveQuestion == QMessageBox.Yes:
self.iface.setActiveLayer(parcelleLayer)
return
# show parcelle form
parcelleDialog = cadastre_parcelle_dialog(
self.iface,
layer,
feature,
self.cadastre_search_dialog
)
parcelleDialog.show()
def onProjectRead(self):
'''
Refresh search dialog when new data has been loaded
'''
if self.cadastre_search_dialog:
self.cadastre_search_dialog.checkMajicContent()
self.cadastre_search_dialog.clearComboboxes()
self.cadastre_search_dialog.setupSearchCombobox('commune', None, 'sql')
self.cadastre_search_dialog.setupSearchCombobox('section', None, 'sql')
self.refreshIdentifyParcelleTool()
def onNewProjectCreated(self):
'''
Refresh search dialog when new data has been loaded
'''
if self.cadastre_search_dialog:
self.cadastre_search_dialog.checkMajicContent()
self.cadastre_search_dialog.clearComboboxes()
def open_help(self):
'''Opens the html help file content with default browser'''
#~ localHelpUrl = "https://github.com/3liz/QgisCadastrePlugin/blob/master/doc/index.rst"
localHelpUrl = os.path.dirname(__file__) + "/doc/index.html"
QDesktopServices.openUrl( QUrl(localHelpUrl) )
def open_message_dialog(self):
'''
Display a message to the user
'''
versionMessages = {
'1.1.0': [
[
u'Compatibilité avec QGIS 2.6',
u'La compatibilité n\'est pas assurée à 100 % avec la dernière version 2.6 de QGIS, notamment pour la création d\'une base Spatialite vide. Vous pouvez utiliser les outils de QGIS pour le faire.'
] ,
[
u'Lien entre les parcelles EDIGEO et MAJIC',
u'Pour cette nouvelle version du plugin, la structure de la base de données a été légèrement modifiée. Pour pouvoir utiliser les fonctions du plugin Cadastre, vous devez donc impérativement <b>réimporter les données dans une base vide</b>'
] ,
[
u'Validation des géométries',
u'Certaines données EDIGEO contiennent des géométries invalides (polygones croisés dit "papillons", polygones non fermés, etc.). Cette version utilise une fonction de PostGIS qui tente de corriger ces invalidités. Il faut impérativement <b>utiliser une version récente de PostGIS</b> : 2.0.4 minimum pour la version 2, ou les version ultérieures (2.1 par exemple)'
]
]
}
mConfig = self.mConfig
version = mConfig.get('general', 'version')
changelog = mConfig.get('general', 'changelog')
message = '<h2>Version %s - notes concernant cette version</h2>' % version
if version in versionMessages:
message+='<ul>'
for item in versionMessages[version]:
message+='<li><b>%s</b> - %s</li>' % (item[0], item[1])
message+='</ul>'
message+= '<h3>Changelog</h3>'
message+= '<p>'
i = 0
for item in changelog.split('*'):
if i == 0:
message+= '<b>%s</b><ul>' % item.decode('utf-8')
else:
message+= '<li>%s</li>' % item.decode('utf-8')
i+=1
message+='</ul>'
message+= '</p>'
dialog = cadastre_message_dialog(self.iface, message)
dialog.exec_()
def unload(self):
if self.cadastre_menu != None:
self.iface.mainWindow().menuBar().removeAction(self.cadastre_menu.menuAction())
self.cadastre_menu.deleteLater()
self.iface.mainWindow().removeToolBar(self.toolbar)
else:
self.iface.removePluginMenu("&cadastre", self.cadastre_menu.menuAction())
self.cadastre_menu.deleteLater()
if self.cadastre_search_dialog:
self.iface.removeDockWidget(self.cadastre_search_dialog)
| gpl-3.0 | -8,185,853,041,021,640,000 | 40.183771 | 384 | 0.586926 | false |
aldariz/Sick-Beard | sickbeard/providers/__init__.py | 1 | 4214 | # Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
__all__ = ['ezrss',
'eztv',
'hdbits',
'tvtorrents',
'womble',
'btn',
'omgwtfnzbs',
'kickass',
'torrentz',
'thepiratebay',
'torrentleech',
'torrentday',
'sceneaccess',
'iptorrents',
'bithdtv',
'btdigg',
'torrentshack',
'speed',
'revolutiontt'
]
import sickbeard
from sickbeard import logger
from os import sys
def sortedProviderList():
initialList = sickbeard.providerList + sickbeard.newznabProviderList
providerDict = dict(zip([x.getID() for x in initialList], initialList))
newList = []
# add all modules in the priority list, in order
for curModule in sickbeard.PROVIDER_ORDER:
if curModule in providerDict:
newList.append(providerDict[curModule])
# add any modules that are missing from that list
for curModule in providerDict:
if providerDict[curModule] not in newList:
newList.append(providerDict[curModule])
return newList
def makeProviderList():
return [x.provider for x in [getProviderModule(y) for y in __all__] if x]
def getNewznabProviderList(data):
defaultList = [makeNewznabProvider(x) for x in getDefaultNewznabProviders().split('!!!')]
providerList = filter(lambda x: x, [makeNewznabProvider(x) for x in data.split('!!!')])
providerDict = dict(zip([x.name for x in providerList], providerList))
for curDefault in defaultList:
if not curDefault:
continue
if curDefault.name not in providerDict:
curDefault.default = True
providerList.append(curDefault)
else:
providerDict[curDefault.name].default = True
providerDict[curDefault.name].name = curDefault.name
providerDict[curDefault.name].url = curDefault.url
providerDict[curDefault.name].needs_auth = curDefault.needs_auth
return filter(lambda x: x, providerList)
def makeNewznabProvider(configString):
if not configString:
return None
try:
name, url, key, catIDs, enabled = configString.split('|')
except ValueError:
logger.log(u"Skipping Newznab provider string: '" + configString + "', incorrect format", logger.ERROR)
return None
newznab = sys.modules['sickbeard.providers.newznab']
newProvider = newznab.NewznabProvider(name, url, key=key, catIDs=catIDs)
newProvider.enabled = enabled == '1'
return newProvider
def getDefaultNewznabProviders():
return 'Sick Beard Index|http://lolo.sickbeard.com/|0|5030,5040|1!!!NZBs.org|http://nzbs.org/||5030,5040,5070,5090|0!!!Usenet-Crawler|https://www.usenet-crawler.com/||5030,5040|0'
def getProviderModule(name):
name = name.lower()
prefix = "sickbeard.providers."
if name in __all__ and prefix + name in sys.modules:
return sys.modules[prefix + name]
else:
raise Exception("Can't find " + prefix + name + " in " + repr(sys.modules))
def getProviderClass(providerID):
providerMatch = [x for x in sickbeard.providerList + sickbeard.newznabProviderList if x.getID() == providerID]
if len(providerMatch) != 1:
return None
else:
return providerMatch[0]
| gpl-3.0 | -4,247,578,226,064,979,500 | 29.684211 | 183 | 0.635026 | false |
cc-it/odoo_mod | odoo_web_login/__openerp__.py | 1 | 1721 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Samples module for Odoo Web Login Screen
# Copyright (C) 2016- XUBI.ME (http://www.xubi.me)
# @author binhnguyenxuan (https://www.linkedin.com/in/binh-nguyen-xuan-46556279)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Background Source: http://forum.xda-developers.com/showpost.php?p=37322378
#
##############################################################################
{
'name': 'Odoo Web Login Screen',
'summary': 'The new configurable Odoo Web Login Screen',
'version': '9.0.1.0.0',
'category': 'Website',
'summary': """
The new configurable Odoo Web Login Screen
""",
'author': "binhnguyenxuan (www.xubi.me)",
'website': 'http://www.xubi.me',
'license': 'AGPL-3',
'depends': [
],
'data': [
'data/ir_config_parameter.xml',
'templates/webclient_templates.xml',
'templates/website_templates.xml',
],
'qweb': [
],
'installable': True,
'application': True,
}
| mit | -6,586,122,549,708,059,000 | 36.413043 | 83 | 0.598489 | false |
sgongar/Herschel-PACS-Toolbox-Red-Leak | pipeline_scripts/L05_Frames.py | 1 | 10119 | #
# This file is part of Herschel Common Science System (HCSS).
# Copyright 2001-2012 Herschel Science Ground Segment Consortium
#
# HCSS is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# HCSS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General
# Public License along with HCSS.
# If not, see <http://www.gnu.org/licenses/>.
#
"""
VERSION
$Id: L05_Frames.py,v 1.48 2015/10/15 18:39:45 jdejong Exp $ <do not touch. This field is changed by CVS>
PURPOSE
PACS Spectrometer Pipeline for AORs starting from spu mode of onboard fitted ramps
This is the first level of the pipeline scripts of the SPG - the Standard Product Generation,
a.k.a. the automatic pipeline that the HSC runs - and that which your HSA-gotten ObservationContext
were reduced with, if they were reduced in this same track.
The SPG scripts are similar to the interactive pipeline (ipipe) scripts that PACS provides in HIPE,
but there are extra tasks that the ipipe scripts include, and so users should consider re-reducing
their data with the ipipe scripts. However, for this script there are almost no differences between
the SPG and the interactive pipelines.
This SPG script is provided so you can see what your HSA-gotten Observation Context levels
were reduced with. We provide some comments explaining what the individual tasks do, but for detailed
comments see the ipipe scripts, and for a detailed explanation of the pipeline, see the PACS Data
Reduction Guide.
The ipipe scripts can be found under the HIPE menu Pipelines->Spectrometer->. From there you go
to the pipeline suited for your observation, such as Chopped line scan and short range scan->lineScan.
WARNING
Do not edit this file! This is the reference copy for your current
installation of HIPE. We recommend you first copy it to a different
location before editing.
AUTHOR
Juergen Schreiber <schreiber@mpia.de>
INPUTS
- obs : ObservationContext
- Products needed to run the pipeline are extracted from it
- Must already be loaded in HIPE, and should contain the previously-reduced Level 0.5
- camera : camera to reduce (only one done at a time)
- "red" or "blue"
- calTree : Calibration Tree from the observation, or generated within your HIPE session
HISTORY
2009-03-13 JS 1.0 initial version
2009-06-24 JS 1.1 updated
2013-04-04 KME improve comments
"""
from herschel.pacs.signal import SlicedFrames
from herschel.pacs.signal.context import *
from herschel.pacs.spg.common import *
from herschel.pacs.spg.spec import *
from herschel.pacs.cal import *
from herschel.ia.numeric import *
from herschel.ia.jconsole import *
from herschel.pacs.spg.pipeline import *
#*******************************************************************************
# Preparation
#*******************************************************************************
#
#--------------------------------------------------------------------------------
# SETUP 1:
# red or blue camera? This the user has to set before running the script, using the command e.g.
# > camera = "blue"
# the try/except here will set the camera to "blue" if it has not already been defined
try:
camera
except NameError:
camera = 'blue'
# Checks for a particular type of anomaly (H_SC-70 DECMEC) and adds a quality flag if found.
# (This refers to a loss of data, and if this quality flag became set while the SPG was running
# on your data, then your data would have gone through an extra quality control.)
# If the anomaly is present, then a Meta Data entry "qflag_DMANOG4L_p" will be added to obs, and
# a flag is added to the "quality" of obs.
obs = checkForAnomaly70(obs)
# filter meta keywords and update descriptions
modifyMetaData(obs)
# add extra meta data
pacsEnhanceMetaData(obs)
#
# copy the metadata from the ObservationContext to the level0 product
pacsPropagateMetaKeywords(obs,'0', obs.level0)
#
# Extract the level0 from the ObservationContext
level0 = PacsContext(obs.level0)
level0 = level0.updateContextType()
obs.level0 = level0
#
# Extract the pointing product
pp = obs.auxiliary.pointing
#
# Extract the orbit ephemeris information
orbitEphem = obs.auxiliary.orbitEphemeris
#
# Extract Time Correlation which is used to convert in addUtc
timeCorr = obs.auxiliary.timeCorrelation
#-------------------------------------------------------------------------------------------
# SETUP 2:
# Set up the calibration tree.
# First check whether calTree already exists, since it could have been filled by the SPG pipeline
# If not, then take it from your current HIPE build, and then put it into the ObservationContext
# so that it is stored there for future reference
try:
calTree
except NameError:
calTree = getCalTree(obs=obs)
obs.calibration = calTree
#
# Extract the Horizons product
#
try :
hp = obs.auxiliary.refs["HorizonsProduct"].product
except :
print "WARNING : No Horizons found !"
hp = None
#
# For your camera, extract the Frames (scientific data), the rawramps (raw data
# for one pixel), and the DMC header (the mechanisms' status information,
# sampled at a high frequency)
slicedFrames = SlicedFrames(level0.fitted.getCamera(camera).product)
slicedRawRamp = level0.raw.getCamera(camera).product
slicedDmcHead = level0.dmc.getCamera(camera).product
#
# ***********************************************************************************
# Processing
# ***********************************************************************************
#
# Flag the saturated data in a mask "SATURATION" (and "RAWSATURATION": this exploits
# the raw ramps downlinked for a single pixel of the data array)
# used cal files: RampSatLimits and SignalSatLimits
# copy=1 makes slicedFrames a fully independent product
slicedFrames = specFlagSaturationFrames(slicedFrames, rawRamp = slicedRawRamp, calTree=calTree, copy=1)
#
# Convert digital units to Volts, used cal file: Readouts2Volts
slicedFrames = specConvDigit2VoltsPerSecFrames(slicedFrames, calTree=calTree)
#
# Identifies the calibration blocks and fills the CALSOURCE status entry
slicedFrames = detectCalibrationBlock(slicedFrames)
#
# This tasks adds the time information in UTC to the status
slicedFrames = addUtc(slicedFrames, timeCorr)
#
# Add the pointing information of the central spaxel to the Status
# Uses the pointing, horizons product (solar system object ephemeries),
# orbitEphemeris products, and the SIAM cal file.
slicedFrames = specAddInstantPointing(slicedFrames, pp, calTree = calTree, orbitEphem = orbitEphem, horizonsProduct = hp)
#copy the saa meta keyword to the ObservationContext meta HCSS-SCR 19230
obs.meta["solarAspectAngleMean"] = slicedFrames.meta["solarAspectAngleMean"].copy()
obs.meta["solarAspectAngleRms"] = slicedFrames.meta["solarAspectAngleRms"].copy()
#
# If SSO, move SSO target to a fixed position in sky. This is needed for mapping SSOs.
if (isSolarSystemObject(obs)):
slicedFrames = correctRaDec4Sso (slicedFrames, orbitEphem=orbitEphem, horizonsProduct=hp, linear=0)
#
# This task extends the Status of Frames with the parameters GRATSCAN, CHOPPER, CHOPPOS
# used cal file: ChopperThrowDescription
slicedFrames = specExtendStatus(slicedFrames, calTree=calTree)
#
# This task converts the chopper readouts to an angle wrt. focal plane unit and the sky
# and adds this to the status, used cal file: ChopperAngle and ChopperSkyAngle
slicedFrames = convertChopper2Angle(slicedFrames, calTree=calTree)
#
# This task adds the positions for each pixel (Ra and Dec dataset)
# used cal files: ArrayInstrument and ModuleArray
slicedFrames = specAssignRaDec(slicedFrames, calTree=calTree)
#
# This task adds the wavelength for each pixel (Wave dataset), used cal file: WavePolynomes
slicedFrames = waveCalc(slicedFrames, calTree=calTree)
#
# This task corrects the wavelength for the s/c velocity, uses the pointing, orbitEphemeris and TimeCorrelation product
slicedFrames = specCorrectHerschelVelocity(slicedFrames, orbitEphem, pp, timeCorr, horizonsProduct = hp)
#
# Find the major blocks of this observation and organise it in the block table attached to the Frames
# used cal file: ObcpDescription
slicedFrames = findBlocks(slicedFrames, calTree = calTree)
#
# This task flags the known bad or noisy pixels in the mask "BADPIXELS" and "NOISYPIXELS"
# used cal files: BadPixelMask and NoisyPixelMask
slicedFrames = specFlagBadPixelsFrames(slicedFrames, calTree=calTree)
#
# Slice the data by Line/Range, Raster Point, nod position, nod cycle, on/off position and per band.
# The parameters removeUndefined and removeMasked are for cleaning purposes
slicedFrames, additionalOutContexts = pacsSliceContext(slicedFrames,[slicedDmcHead],removeUndefined=True, removeMasked=True, spgMode = True)
slicedDmcHead = additionalOutContexts[0]
# This task flags the data effected by the chopper movement in the mask "UNCLEANCHOP"
# it uses the high resolution Dec/Mec header and the cal files ChopperAngle and ChopperJitterThreshold
slicedFrames = flagChopMoveFrames(slicedFrames, dmcHead=slicedDmcHead, calTree=calTree)
#
# This task flags the data affected by the grating movement in the mask "GRATMOVE"
# it uses the high resolution Dec/Mec header and the cal file GratingJitterThreshold
slicedFrames = flagGratMoveFrames(slicedFrames, dmcHead=slicedDmcHead, calTree=calTree)
#
# Update of the observation context
obs = updatePacsObservation(obs, 0.5, [slicedFrames, slicedDmcHead])
# remove some variables (clean-up of memory)
del pp, orbitEphem, slicedDmcHead, slicedFrames, slicedRawRamp, timeCorr, hp, level0, additionalOutContexts
| lgpl-3.0 | -7,621,928,281,649,547,000 | 43.381579 | 140 | 0.732187 | false |
REGOVAR/Regovar | regovar/core/managers/sample_manager.py | 1 | 3698 | #!env/python3
# coding: utf-8
try:
import ipdb
except ImportError:
pass
import os
import json
import datetime
import uuid
import psycopg2
import hashlib
import asyncio
import ped_parser
from config import *
from core.framework.common import *
from core.framework.postgresql import execute
import core.model as Model
# =====================================================================================================================
# Samples MANAGER
# =====================================================================================================================
class SampleManager:
def __init__(self):
pass
def list(self, ref_id=0):
"""
List all samples by default, or samples for a provided reference_id
"""
sql_where = " WHERE reference_id={}".format(ref_id) if ref_id > 0 else ""
sql = "SELECT id, subject_id, name, comment, is_mosaic, file_id, loading_progress, reference_id, status FROM sample{} ORDER BY id".format(sql_where)
result = []
for res in execute(sql):
result.append({
"id": res.id,
"subject_id": res.subject_id,
"name": res.name,
"comment": res.comment,
"status": res.status,
"is_mosaic": res.is_mosaic,
"file_id": res.file_id,
"loading_progress": res.loading_progress,
"reference_id": res.reference_id
})
return result
def get(self, fields=None, query:str=None, order:str=None, offset:int=None, limit:int=None, depth:int=0):
"""
Generic method to get sample data according to provided filtering options
"""
if not isinstance(fields, dict):
fields = None
if query is None:
query = {}
if order is None:
order = "name"
if offset is None:
offset = 0
if limit is None:
limit = RANGE_MAX
s = Model.Session()
samples = s.query(Model.Sample).filter_by(**query).order_by(order).limit(limit).offset(offset).all()
for s in samples: s.init(depth)
return samples
async def import_from_file(self, file_id:int, reference_id:int, analysis_id:int=None):
from core.managers.imports.vcf_manager import VcfManager
# Check ref_id
if analysis_id:
analysis = Model.Analysis.from_id(analysis_id)
if analysis and not reference_id:
reference_id=analysis.reference_id
# create instance of importer
importer = VcfManager() # Only import from VCF is supported for samples
print ("Using import manager {}. {}".format(VcfManager.metadata["name"],VcfManager.metadata["description"]))
try:
result = await importer.import_data(file_id, reference_id=reference_id)
except Exception as ex:
msg = "Error occured when caling: core.samples.import_from_file > VcfManager.import_data(file_id={}, ref_id={}).".format(file_id, reference_id)
raise RegovarException(msg, exception=ex)
# if analysis_id set, associate it to sample
if result and result["success"]:
samples = [result["samples"][s] for s in result["samples"].keys()]
if analysis_id:
for s in samples:
Model.AnalysisSample.new(s.id, analysis_id)
s.init()
if result["success"]:
return [result["samples"][s] for s in result["samples"].keys()]
return False # TODO raise error
| agpl-3.0 | 655,206,874,678,697,900 | 31.165217 | 156 | 0.538129 | false |
masamitsu-murase/pausable_unittest | test/testpauser.py | 1 | 2085 |
import os
import sys
import subprocess
import pausable_unittest
class Pauser(pausable_unittest.BasePauser):
def add_actions(self):
def shutdown(self, wake_after_sec=None):
self.pause(("shutdown", wake_after_sec))
self.add_action("shutdown", shutdown)
def reboot(self):
self.pause(("reboot",))
self.add_action("reboot", reboot)
def exec_for_reboot(self, command, expected_exitcode=0):
self.pause(("exec_for_reboot", command, expected_exitcode))
self.add_action("exec_for_reboot", exec_for_reboot)
def bat_path(self, base_dir):
return self.call_pauser_callback("bat_path", base_dir)
self.add_action("bat_path", bat_path)
def create_bat(self):
return self.call_pauser_callback("create_bat")
self.add_action("create_bat", create_bat)
def do_pause(self, info):
# for consistent output of travis ci.
sys.stdout.flush()
if info[0] == "shutdown":
if info[1] is not None:
pass
print("Run again")
elif info[0] == "reboot":
print("Reboot!")
elif info[0] == "exec_for_reboot":
cmd = info[1]
expected_exitcode = info[2]
ret = os.system(cmd)
if type(expected_exitcode) == list or type(expected_exitcode) == tuple:
if ret in expected_exitcode:
raise subprocess.CalledProcessError(ret, str(cmd))
else:
if ret != expected_exitcode:
raise subprocess.CalledProcessError(ret, str(cmd))
def bat_path(self, base_dir):
return base_dir + "_sample"
def create_bat(self):
pass
def exec_callback(self, action, info):
if action == "bat_path":
return self.bat_path(info)
elif action == "create_bat":
return self.create_bat()
else:
super(Pauser, self).exec_callback(action, info)
| mit | 6,684,585,030,722,237,000 | 31.095238 | 83 | 0.547722 | false |
caseyching/incubator-airflow | tests/jobs.py | 1 | 27755 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import logging
import os
import time
import unittest
from airflow import AirflowException, settings
from airflow import models
from airflow.bin import cli
from airflow.executors import DEFAULT_EXECUTOR
from airflow.jobs import BackfillJob, SchedulerJob
from airflow.models import DAG, DagModel, DagBag, DagRun, Pool, TaskInstance as TI
from airflow.operators.dummy_operator import DummyOperator
from airflow.utils.db import provide_session
from airflow.utils.state import State
from airflow.utils.timeout import timeout
from tests.executor.test_executor import TestExecutor
from airflow import configuration
configuration.load_test_config()
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
DEV_NULL = '/dev/null'
DEFAULT_DATE = datetime.datetime(2016, 1, 1)
class BackfillJobTest(unittest.TestCase):
def setUp(self):
self.parser = cli.CLIFactory.get_parser()
self.dagbag = DagBag(include_examples=True)
@unittest.skipIf('sqlite' in configuration.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_trigger_controller_dag(self):
dag = self.dagbag.get_dag('example_trigger_controller_dag')
target_dag = self.dagbag.get_dag('example_trigger_target_dag')
dag.clear()
target_dag.clear()
scheduler = SchedulerJob()
queue = mock.Mock()
scheduler._process_task_instances(target_dag, queue=queue)
self.assertFalse(queue.append.called)
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_first_depends_on_past=True
)
job.run()
scheduler = SchedulerJob()
queue = mock.Mock()
scheduler._process_task_instances(target_dag, queue=queue)
self.assertTrue(queue.append.called)
target_dag.clear()
dag.clear()
@unittest.skipIf('sqlite' in configuration.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_backfill_multi_dates(self):
dag = self.dagbag.get_dag('example_bash_operator')
dag.clear()
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE+datetime.timedelta(days=1),
ignore_first_depends_on_past=True
)
job.run()
session = settings.Session()
drs = session.query(DagRun).filter(
DagRun.dag_id=='example_bash_operator'
).order_by(DagRun.execution_date).all()
self.assertTrue(drs[0].execution_date == DEFAULT_DATE)
self.assertTrue(drs[0].state == State.SUCCESS)
self.assertTrue(drs[1].execution_date == DEFAULT_DATE+datetime.timedelta(days=1))
self.assertTrue(drs[1].state == State.SUCCESS)
dag.clear()
session.close()
@unittest.skipIf('sqlite' in configuration.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_backfill_examples(self):
"""
Test backfilling example dags
"""
# some DAGs really are just examples... but try to make them work!
skip_dags = [
'example_http_operator',
'example_twitter_dag',
'example_trigger_target_dag',
'example_trigger_controller_dag', # tested above
'test_utils', # sleeps forever
]
logger = logging.getLogger('BackfillJobTest.test_backfill_examples')
dags = [
dag for dag in self.dagbag.dags.values()
if 'example_dags' in dag.full_filepath
and dag.dag_id not in skip_dags
]
for dag in dags:
dag.clear(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
for i, dag in enumerate(sorted(dags, key=lambda d: d.dag_id)):
logger.info('*** Running example DAG #{}: {}'.format(i, dag.dag_id))
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_first_depends_on_past=True)
job.run()
def test_backfill_pooled_tasks(self):
"""
Test that queued tasks are executed by BackfillJob
Test for https://github.com/airbnb/airflow/pull/1225
"""
session = settings.Session()
pool = Pool(pool='test_backfill_pooled_task_pool', slots=1)
session.add(pool)
session.commit()
dag = self.dagbag.get_dag('test_backfill_pooled_task_dag')
dag.clear()
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
# run with timeout because this creates an infinite loop if not
# caught
with timeout(seconds=30):
job.run()
ti = TI(
task=dag.get_task('test_backfill_pooled_task'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
def test_backfill_depends_on_past(self):
"""
Test that backfill respects ignore_depends_on_past
"""
dag = self.dagbag.get_dag('test_depends_on_past')
dag.clear()
run_date = DEFAULT_DATE + datetime.timedelta(days=5)
# backfill should deadlock
self.assertRaisesRegexp(
AirflowException,
'BackfillJob is deadlocked',
BackfillJob(dag=dag, start_date=run_date, end_date=run_date).run)
BackfillJob(
dag=dag,
start_date=run_date,
end_date=run_date,
ignore_first_depends_on_past=True).run()
# ti should have succeeded
ti = TI(dag.tasks[0], run_date)
ti.refresh_from_db()
self.assertEquals(ti.state, State.SUCCESS)
def test_cli_backfill_depends_on_past(self):
"""
Test that CLI respects -I argument
"""
dag_id = 'test_dagrun_states_deadlock'
run_date = DEFAULT_DATE + datetime.timedelta(days=1)
args = [
'backfill',
dag_id,
'-l',
'-s',
run_date.isoformat(),
]
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertRaisesRegexp(
AirflowException,
'BackfillJob is deadlocked',
cli.backfill,
self.parser.parse_args(args))
cli.backfill(self.parser.parse_args(args + ['-I']))
ti = TI(dag.get_task('test_depends_on_past'), run_date)
ti.refresh_from_db()
# task ran
self.assertEqual(ti.state, State.SUCCESS)
dag.clear()
class SchedulerJobTest(unittest.TestCase):
# These defaults make the test faster to run
default_scheduler_args = {"file_process_interval": 0,
"processor_poll_interval": 0.5}
def setUp(self):
self.dagbag = DagBag()
@provide_session
def evaluate_dagrun(
self,
dag_id,
expected_task_states, # dict of task_id: state
dagrun_state,
run_kwargs=None,
advance_execution_date=False,
session=None):
"""
Helper for testing DagRun states with simple two-task DAGS.
This is hackish: a dag run is created but its tasks are
run by a backfill.
"""
if run_kwargs is None:
run_kwargs = {}
scheduler = SchedulerJob(**self.default_scheduler_args)
dag = self.dagbag.get_dag(dag_id)
dag.clear()
dr = scheduler.create_dag_run(dag)
if advance_execution_date:
# run a second time to schedule a dagrun after the start_date
dr = scheduler.create_dag_run(dag)
ex_date = dr.execution_date
try:
dag.run(start_date=ex_date, end_date=ex_date, **run_kwargs)
except AirflowException:
pass
# test tasks
for task_id, expected_state in expected_task_states.items():
task = dag.get_task(task_id)
ti = TI(task, ex_date)
ti.refresh_from_db()
self.assertEqual(ti.state, expected_state)
# load dagrun
dr = DagRun.find(dag_id=dag_id, execution_date=ex_date)
dr = dr[0]
dr.dag = dag
self.assertEqual(dr.state, dagrun_state)
def test_dagrun_fail(self):
"""
DagRuns with one failed and one incomplete root task -> FAILED
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_fail',
expected_task_states={
'test_dagrun_fail': State.FAILED,
'test_dagrun_succeed': State.UPSTREAM_FAILED,
},
dagrun_state=State.FAILED)
def test_dagrun_success(self):
"""
DagRuns with one failed and one successful root task -> SUCCESS
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_success',
expected_task_states={
'test_dagrun_fail': State.FAILED,
'test_dagrun_succeed': State.SUCCESS,
},
dagrun_state=State.SUCCESS)
def test_dagrun_root_fail(self):
"""
DagRuns with one successful and one failed root task -> FAILED
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_root_fail',
expected_task_states={
'test_dagrun_succeed': State.SUCCESS,
'test_dagrun_fail': State.FAILED,
},
dagrun_state=State.FAILED)
def test_dagrun_deadlock_ignore_depends_on_past_advance_ex_date(self):
"""
DagRun is marked a success if ignore_first_depends_on_past=True
Test that an otherwise-deadlocked dagrun is marked as a success
if ignore_first_depends_on_past=True and the dagrun execution_date
is after the start_date.
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_deadlock',
expected_task_states={
'test_depends_on_past': State.SUCCESS,
'test_depends_on_past_2': State.SUCCESS,
},
dagrun_state=State.SUCCESS,
advance_execution_date=True,
run_kwargs=dict(ignore_first_depends_on_past=True))
def test_dagrun_deadlock_ignore_depends_on_past(self):
"""
Test that ignore_first_depends_on_past doesn't affect results
(this is the same test as
test_dagrun_deadlock_ignore_depends_on_past_advance_ex_date except
that start_date == execution_date so depends_on_past is irrelevant).
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_deadlock',
expected_task_states={
'test_depends_on_past': State.SUCCESS,
'test_depends_on_past_2': State.SUCCESS,
},
dagrun_state=State.SUCCESS,
run_kwargs=dict(ignore_first_depends_on_past=True))
def test_scheduler_start_date(self):
"""
Test that the scheduler respects start_dates, even when DAGS have run
"""
dag_id = 'test_start_date_scheduling'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertTrue(dag.start_date > DEFAULT_DATE)
scheduler = SchedulerJob(dag_id,
num_runs=2,
**self.default_scheduler_args)
scheduler.run()
# zero tasks ran
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 0)
# previously, running this backfill would kick off the Scheduler
# because it would take the most recent run and start from there
# That behavior still exists, but now it will only do so if after the
# start date
backfill = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
backfill.run()
# one task ran
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
scheduler = SchedulerJob(dag_id,
num_runs=2,
**self.default_scheduler_args)
scheduler.run()
# still one task
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
def test_scheduler_multiprocessing(self):
"""
Test that the scheduler can successfully queue multiple dags in parallel
"""
dag_ids = ['test_start_date_scheduling', 'test_dagrun_states_success']
for dag_id in dag_ids:
dag = self.dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_ids=dag_ids,
file_process_interval=0,
processor_poll_interval=0.5,
num_runs=2)
scheduler.run()
# zero tasks ran
dag_id = 'test_start_date_scheduling'
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 0)
def test_scheduler_dagrun_once(self):
"""
Test if the scheduler does not create multiple dagruns
if a dag is scheduled with @once and a start_date
"""
dag = DAG(
'test_scheduler_dagrun_once',
start_date=datetime.datetime(2015, 1, 1),
schedule_interval="@once")
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
def test_scheduler_process_task_instances(self):
"""
Test if _process_task_instances puts the right task instances into the
queue.
"""
dag = DAG(
dag_id='test_scheduler_process_execute_task',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
queue = mock.Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.append.assert_called_with(
(dag.dag_id, dag_task1.task_id, DEFAULT_DATE)
)
def test_scheduler_do_not_schedule_removed_task(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_removed_task',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dag = DAG(
dag_id='test_scheduler_do_not_schedule_removed_task',
start_date=DEFAULT_DATE)
queue = mock.Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_do_not_schedule_too_early(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_too_early',
start_date=datetime.datetime(2200, 1, 1))
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
queue = mock.Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_do_not_run_finished(self):
dag = DAG(
dag_id='test_scheduler_do_not_run_finished',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
tis = dr.get_task_instances(session=session)
for ti in tis:
ti.state = State.SUCCESS
session.commit()
session.close()
queue = mock.Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_add_new_task(self):
"""
Test if a task instance will be added if the dag is updated
"""
dag = DAG(
dag_id='test_scheduler_add_new_task',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
tis = dr.get_task_instances()
self.assertEquals(len(tis), 1)
dag_task2 = DummyOperator(
task_id='dummy2',
dag=dag,
owner='airflow')
queue = mock.Mock()
scheduler._process_task_instances(dag, queue=queue)
tis = dr.get_task_instances()
self.assertEquals(len(tis), 2)
def test_scheduler_verify_max_active_runs(self):
"""
Test if a a dagrun will not be scheduled if max_dag_runs has been reached
"""
dag = DAG(
dag_id='test_scheduler_verify_max_active_runs',
start_date=DEFAULT_DATE)
dag.max_active_runs = 1
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob(dag.dag_id,
run_duration=1)
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr2 = scheduler.create_dag_run(dag)
self.assertIsNone(dr2)
dag.clear()
dag.max_active_runs = 0
scheduler.run()
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag.dag_id).all()), 0)
def test_scheduler_fail_dagrun_timeout(self):
"""
Test if a a dagrun wil be set failed if timeout
"""
dag = DAG(
dag_id='test_scheduler_fail_dagrun_timeout',
start_date=DEFAULT_DATE)
dag.dagrun_timeout = datetime.timedelta(seconds=60)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr.start_date = datetime.datetime.now() - datetime.timedelta(days=1)
session.merge(dr)
session.commit()
dr2 = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr2)
dr.refresh_from_db(session=session)
self.assertEquals(dr.state, State.FAILED)
def test_scheduler_verify_max_active_runs_and_dagrun_timeout(self):
"""
Test if a a dagrun will not be scheduled if max_dag_runs has been reached and dagrun_timeout is not reached
Test if a a dagrun will be scheduled if max_dag_runs has been reached but dagrun_timeout is also reached
"""
dag = DAG(
dag_id='test_scheduler_verify_max_active_runs_and_dagrun_timeout',
start_date=DEFAULT_DATE)
dag.max_active_runs = 1
dag.dagrun_timeout = datetime.timedelta(seconds=60)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
# Should not be scheduled as DagRun has not timedout and max_active_runs is reached
new_dr = scheduler.create_dag_run(dag)
self.assertIsNone(new_dr)
# Should be scheduled as dagrun_timeout has passed
dr.start_date = datetime.datetime.now() - datetime.timedelta(days=1)
session.merge(dr)
session.commit()
new_dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(new_dr)
def test_scheduler_auto_align(self):
"""
Test if the schedule_interval will be auto aligned with the start_date
such that if the start_date coincides with the schedule the first
execution_date will be start_date, otherwise it will be start_date +
interval.
"""
dag = DAG(
dag_id='test_scheduler_auto_align_1',
start_date=datetime.datetime(2016, 1, 1, 10, 10, 0),
schedule_interval="4 5 * * *"
)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEquals(dr.execution_date, datetime.datetime(2016, 1, 2, 5, 4))
dag = DAG(
dag_id='test_scheduler_auto_align_2',
start_date=datetime.datetime(2016, 1, 1, 10, 10, 0),
schedule_interval="10 10 * * *"
)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEquals(dr.execution_date, datetime.datetime(2016, 1, 1, 10, 10))
def test_scheduler_reschedule(self):
"""
Checks if tasks that are not taken up by the executor
get rescheduled
"""
executor = TestExecutor()
dagbag = DagBag(executor=executor)
dagbag.dags.clear()
dagbag.executor = executor
dag = DAG(
dag_id='test_scheduler_reschedule',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
dag.clear()
dag.is_subdag = False
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dagbag.bag_dag(dag=dag, root_dag=dag, parent_dag=dag)
@mock.patch('airflow.models.DagBag', return_value=dagbag)
@mock.patch('airflow.models.DagBag.collect_dags')
def do_schedule(function, function2):
# Use a empty file since the above mock will return the
# expected DAGs. Also specify only a single file so that it doesn't
# try to schedule the above DAG repeatedly.
scheduler = SchedulerJob(num_runs=1,
executor=executor,
subdir=os.path.join(models.DAGS_FOLDER,
"no_dags.py"))
scheduler.heartrate = 0
scheduler.run()
do_schedule()
self.assertEquals(1, len(executor.queued_tasks))
executor.queued_tasks.clear()
do_schedule()
self.assertEquals(2, len(executor.queued_tasks))
def test_scheduler_run_duration(self):
"""
Verifies that the scheduler run duration limit is followed.
"""
dag_id = 'test_start_date_scheduling'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertTrue(dag.start_date > DEFAULT_DATE)
expected_run_duration = 5
start_time = datetime.datetime.now()
scheduler = SchedulerJob(dag_id,
run_duration=expected_run_duration,
**self.default_scheduler_args)
scheduler.run()
end_time = datetime.datetime.now()
run_duration = (end_time - start_time).total_seconds()
logging.info("Test ran in %.2fs, expected %.2fs",
run_duration,
expected_run_duration)
assert run_duration - expected_run_duration < 5.0
def test_dag_with_system_exit(self):
"""
Test to check that a DAG with a system.exit() doesn't break the scheduler.
"""
dag_id = 'exit_test_dag'
dag_ids = [dag_id]
dag_directory = os.path.join(models.DAGS_FOLDER,
"..",
"dags_with_system_exit")
dag_file = os.path.join(dag_directory,
'b_test_scheduler_dags.py')
dagbag = DagBag(dag_folder=dag_file)
for dag_id in dag_ids:
dag = dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_ids=dag_ids,
subdir= dag_directory,
num_runs=1,
**self.default_scheduler_args)
scheduler.run()
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
| apache-2.0 | 7,649,650,385,541,425,000 | 31.161066 | 115 | 0.569555 | false |
denmojo/pygrow | grow/deployments/indexes.py | 1 | 10441 | from . import messages
from . import utils
from grow.common import utils as common_utils
from protorpc import protojson
if common_utils.is_appengine():
pool = None
else:
from multiprocessing import pool
import ConfigParser
import datetime
import hashlib
import logging
import progressbar
import texttable
class Error(Exception):
pass
class CorruptIndexError(Error):
pass
class Diff(object):
POOL_SIZE = 100 # Thread pool size for applying a diff.
@classmethod
def is_empty(cls, diff):
return not diff.adds and not diff.deletes and not diff.edits
@classmethod
def _format_author(cls, author, include_email=True):
if include_email:
return '{} <{}>'.format(author.name, author.email) if author else ''
return author.name if author else ''
@classmethod
def _make_diff_row(cls, color, label, message):
label = texttable.get_color_string(color, label)
path = texttable.get_color_string(texttable.bcolors.WHITE, message.path)
formatted_author = cls._format_author(message.deployed_by, True)
deployed = str(message.deployed).split('.')[0][:-3] if message.deployed else ''
return [label, path, deployed, formatted_author]
@classmethod
def pretty_print(cls, diff):
last_commit = diff.indexes[0].commit
new_commit = diff.indexes[1].commit
last_index = diff.indexes[0]
new_index = diff.indexes[1]
table = texttable.Texttable(max_width=0)
table.set_deco(texttable.Texttable.HEADER)
rows = []
rows.append(['Action', 'Path', 'Last deployed', 'By'])
file_rows = []
for add in diff.adds:
file_rows.append(cls._make_diff_row(texttable.bcolors.GREEN, 'add', add))
for edit in diff.edits:
file_rows.append(cls._make_diff_row(texttable.bcolors.PURPLE, 'edit', edit))
for delete in diff.deletes:
file_rows.append(cls._make_diff_row(texttable.bcolors.RED, 'delete', delete))
file_rows.sort(key=lambda row: row[1])
rows += file_rows
table.add_rows(rows)
logging.info('\n' + table.draw() + '\n')
if last_index.deployed and last_index.deployed_by:
logging.info('Last deployed: {} by {}'.format(
last_index.deployed, cls._format_author(last_index.deployed_by)))
last_commit_sha = last_commit.sha if last_commit else ''
new_commit_sha = new_commit.sha if new_commit else ''
if new_index.deployed_by:
between_commits = '{}..{}'.format(
last_commit_sha[:7],
new_commit_sha[:7])
if new_commit:
if new_commit.has_unstaged_changes:
between_commits += ' (with unstaged changes)'
else:
between_commits += ' (initial commit)'
logging.info('Diff: {} as {}'.format(
between_commits, new_index.deployed_by.email))
if diff.what_changed:
logging.info(diff.what_changed + '\n')
@classmethod
def create(cls, index, theirs, repo=None):
git = common_utils.get_git()
diff = messages.DiffMessage()
diff.indexes = []
diff.indexes.append(theirs or messages.IndexMessage())
diff.indexes.append(index or messages.IndexMessage())
index_paths_to_shas = {}
their_paths_to_shas = {}
for file_message in index.files:
index_paths_to_shas[file_message.path] = file_message.sha
for file_message in theirs.files:
their_paths_to_shas[file_message.path] = file_message.sha
for path, sha in index_paths_to_shas.iteritems():
if path in their_paths_to_shas:
if index_paths_to_shas[path] == their_paths_to_shas[path]:
file_message = messages.FileMessage()
file_message.path = path
file_message.deployed = theirs.deployed
file_message.deployed_by = theirs.deployed_by
diff.nochanges.append(file_message)
else:
file_message = messages.FileMessage()
file_message.path = path
file_message.deployed = theirs.deployed
file_message.deployed_by = theirs.deployed_by
diff.edits.append(file_message)
del their_paths_to_shas[path]
else:
file_message = messages.FileMessage()
file_message.path = path
diff.adds.append(file_message)
for path, sha in their_paths_to_shas.iteritems():
file_message = messages.FileMessage()
file_message.path = path
file_message.deployed = theirs.deployed
file_message.deployed_by = theirs.deployed_by
diff.deletes.append(file_message)
# What changed in the pod between deploy commits.
if (repo is not None
and index.commit and index.commit.sha
and theirs.commit and theirs.commit.sha):
try:
what_changed = repo.git.log(
'--date=short',
'--pretty=format:[%h] %ad <%ae> %s',
'{}..{}'.format(theirs.commit.sha, index.commit.sha))
if isinstance(what_changed, unicode):
what_changed = what_changed.encode('utf-8')
diff.what_changed = what_changed.decode('utf-8')
except git.exc.GitCommandError:
logging.info('Unable to determine changes between deploys.')
# If on the original deploy show commit log messages only.
elif (repo is not None
and index.commit and index.commit.sha):
what_changed = repo.git.log(
'--date=short',
'--pretty=format:[%h] %ad <%ae> %s')
if isinstance(what_changed, unicode):
what_changed = what_changed.encode('utf-8')
diff.what_changed = what_changed.decode('utf-8')
return diff
@classmethod
def to_string(cls, message):
return protojson.encode_message(message)
@classmethod
def apply(cls, message, paths_to_content, write_func, delete_func,
threaded=True, batch_writes=False):
if pool is None:
text = 'Deployment is unavailable in this environment.'
raise common_utils.UnavailableError(text)
thread_pool = pool.ThreadPool(cls.POOL_SIZE)
diff = message
num_files = len(diff.adds) + len(diff.edits) + len(diff.deletes)
text = 'Deploying: %(value)d/{} (in %(elapsed)s)'
widgets = [progressbar.FormatLabel(text.format(num_files))]
bar = progressbar.ProgressBar(widgets=widgets, maxval=num_files)
def run_with_progress(func, *args):
func(*args)
bar.update(bar.currval + 1)
if batch_writes:
writes_paths_to_contents = {}
for file_message in diff.adds:
writes_paths_to_contents[file_message.path] = \
paths_to_content[file_message.path]
for file_message in diff.edits:
writes_paths_to_contents[file_message.path] = \
paths_to_content[file_message.path]
deletes_paths = [file_message.path for file_message in diff.deletes]
if writes_paths_to_contents:
write_func(writes_paths_to_contents)
if deletes_paths:
delete_func(deletes_paths)
else:
bar.start()
for file_message in diff.adds:
content = paths_to_content[file_message.path]
if threaded:
args = (write_func, file_message.path, content)
thread_pool.apply_async(run_with_progress, args=args)
else:
run_with_progress(write_func, file_message.path, content)
for file_message in diff.edits:
content = paths_to_content[file_message.path]
if threaded:
args = (write_func, file_message.path, content)
thread_pool.apply_async(run_with_progress, args=args)
else:
run_with_progress(write_func, file_message.path, content)
for file_message in diff.deletes:
if threaded:
args = (delete_func, file_message.path)
thread_pool.apply_async(run_with_progress, args=args)
else:
run_with_progress(delete_func, file_message.path)
if threaded:
thread_pool.close()
thread_pool.join()
if not batch_writes:
bar.finish()
class Index(object):
@classmethod
def create(cls, paths_to_contents=None):
message = messages.IndexMessage()
message.deployed = datetime.datetime.now()
message.files = []
if paths_to_contents is None:
return message
for pod_path, contents in paths_to_contents.iteritems():
cls.add_file(message, pod_path, contents)
return message
@classmethod
def add_file(cls, message, path, contents):
pod_path = '/' + path.lstrip('/')
m = hashlib.sha1()
if isinstance(contents, unicode):
contents = contents.encode('utf-8')
m.update(contents)
sha = m.hexdigest()
message.files.append(messages.FileMessage(path=pod_path, sha=sha))
return message
@classmethod
def add_repo(cls, message, repo):
config = repo.config_reader()
try:
message.deployed_by = messages.AuthorMessage(
name=config.get('user', 'name'),
email=config.get('user', 'email'))
except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):
logging.warning("Couldn't find user info in repository config.")
try:
message.commit = utils.create_commit_message(repo)
except utils.NoGitHeadError as e:
logging.warning(e)
return message
@classmethod
def to_string(cls, message):
return protojson.encode_message(message)
@classmethod
def from_string(cls, content):
return protojson.decode_message(messages.IndexMessage, content)
| mit | 703,216,207,148,093,200 | 38.25188 | 89 | 0.579542 | false |
tjyang/vitess | py/vtdb/vtgate_utils.py | 1 | 6266 | """Simple utility values, methods, and classes."""
import logging
import re
import time
from vtdb import dbexceptions
from vtdb import vtdb_logger
from vtproto import vtrpc_pb2
INITIAL_DELAY_MS = 5
NUM_RETRIES = 3
MAX_DELAY_MS = 100
BACKOFF_MULTIPLIER = 2
def log_exception(exc, keyspace=None, tablet_type=None):
"""This method logs the exception.
Args:
exc: exception raised by calling code
keyspace: keyspace for the exception
tablet_type: tablet_type for the exception
"""
logger_object = vtdb_logger.get_logger()
shard_name = None
if isinstance(exc, dbexceptions.IntegrityError):
logger_object.integrity_error(exc)
else:
logger_object.vtclient_exception(keyspace, shard_name, tablet_type,
exc)
def exponential_backoff_retry(
retry_exceptions,
initial_delay_ms=INITIAL_DELAY_MS,
num_retries=NUM_RETRIES,
backoff_multiplier=BACKOFF_MULTIPLIER,
max_delay_ms=MAX_DELAY_MS):
"""Decorator for exponential backoff retry.
Log and raise exception if unsuccessful.
Do not retry while in a session.
Args:
retry_exceptions: tuple of exceptions to check.
initial_delay_ms: initial delay between retries in ms.
num_retries: number max number of retries.
backoff_multiplier: multiplier for each retry e.g. 2 will double the
retry delay.
max_delay_ms: upper bound on retry delay.
Returns:
A decorator method that returns wrapped method.
"""
def decorator(method):
"""Returns wrapper that calls method and retries on retry_exceptions."""
def wrapper(self, *args, **kwargs):
attempt = 0
delay = initial_delay_ms
while True:
try:
return method(self, *args, **kwargs)
except retry_exceptions as e:
attempt += 1
if attempt > num_retries or self.session:
# In this case it is hard to discern keyspace
# and tablet_type from exception.
log_exception(e)
raise e
logging.error(
'retryable error: %s, retrying in %d ms, attempt %d of %d', e,
delay, attempt, num_retries)
time.sleep(delay/1000.0)
delay *= backoff_multiplier
delay = min(max_delay_ms, delay)
return wrapper
return decorator
class VitessError(Exception):
"""VitessError is raised by an RPC with a server-side application error.
VitessErrors have an error code and message.
"""
_errno_pattern = re.compile(r'\(errno (\d+)\)')
def __init__(self, method_name, error=None):
"""Initializes a VitessError with appropriate defaults from an error dict.
Args:
method_name: RPC method name, as a string, that was called.
error: error dict returned by an RPC call.
"""
if error is None or not isinstance(error, dict):
error = {}
self.method_name = method_name
self.code = error.get('Code', vtrpc_pb2.UNKNOWN_ERROR)
self.message = error.get('Message', 'Missing error message')
# Make self.args reflect the error components
super(VitessError, self).__init__(self.message, method_name, self.code)
def __str__(self):
"""Print the error nicely, converting the proto error enum to its name."""
return '%s returned %s with message: %s' % (
self.method_name, vtrpc_pb2.ErrorCode.Name(self.code), self.message)
def convert_to_dbexception(self, args):
"""Converts from a VitessError to the appropriate dbexceptions class.
Args:
args: argument tuple to use to create the new exception.
Returns:
An exception from dbexceptions.
"""
if self.code == vtrpc_pb2.TRANSIENT_ERROR:
return dbexceptions.TransientError(args)
if self.code == vtrpc_pb2.INTEGRITY_ERROR:
# Prune the error message to truncate after the mysql errno, since
# the error message may contain the query string with bind variables.
msg = self.message.lower()
parts = self._errno_pattern.split(msg)
pruned_msg = msg[:msg.find(parts[2])]
new_args = (pruned_msg,) + tuple(args[1:])
return dbexceptions.IntegrityError(new_args)
return dbexceptions.DatabaseError(args)
def extract_rpc_error(method_name, response):
"""Extracts any app error that's embedded in an RPC response.
Args:
method_name: RPC name, as a string.
response: response from an RPC.
Raises:
VitessError: If there is an app error embedded in the reply.
"""
reply = response.reply
if not reply or not isinstance(reply, dict):
return
# Handle the case of new client => old server
err = reply.get('Err', None)
if err:
raise VitessError(method_name, err)
def unique_join(str_list, delim='|'):
return delim.join(sorted(set(str(item) for item in str_list)))
def keyspace_id_prefix(packed_keyspace_id):
"""Return the first str byte of packed_keyspace_id if it exists."""
return '%02x' % ord(packed_keyspace_id[0])
def keyspace_id_prefixes(packed_keyspace_ids):
"""Return the first str byte of each packed_keyspace_id if it exists."""
return unique_join(keyspace_id_prefix(pkid) for pkid in packed_keyspace_ids)
def convert_exception_kwarg(key, value):
if value is None:
return key, value
if key in (
'entity_column_name',
'keyspace',
'num_queries',
'tablet_type'):
return key, value
elif key == 'entity_keyspace_id_map':
return 'entity_keyspace_ids', keyspace_id_prefixes(
value.values())
elif key in (
'keyspace_ids',
'merged_keyspace_ids'):
return key, keyspace_id_prefixes(value)
elif key in (
'keyranges',
'keyspaces',
'sqls'):
return key, unique_join(value)
else:
return key, 'unknown'
def convert_exception_kwargs(kwargs):
"""Convert kwargs into a readable str.
Args:
kwargs: A (str: value) dict.
Returns:
A comma-delimited string of converted, truncated key=value pairs.
All non-None kwargs are included in alphabetical order.
"""
new_kwargs = {}
for key, value in kwargs.iteritems():
new_key, new_value = convert_exception_kwarg(key, value)
new_kwargs[new_key] = new_value
return ', '.join(
('%s=%s' % (k, v))[:256]
for (k, v) in sorted(new_kwargs.iteritems())
if v is not None)
| bsd-3-clause | -1,216,271,494,707,351,300 | 28.980861 | 78 | 0.663102 | false |
Code4SA/mma-dexter | tests/fixtures/__init__.py | 1 | 2298 | import datetime
from fixture import DataSet, NamedDataStyle, SQLAlchemyFixture
from dexter.models import db, Person, Entity, Author, Document, User, Role
class PersonData(DataSet):
class joe_author:
name = 'Joe Author'
gender_id = 2
race_id = 1
class zuma:
name = 'Jacob Zuma'
gender_id = 2
race_id = 2
class sue_no_gender:
name = 'Sue'
class EntityData(DataSet):
class joe_author:
name = 'Joe Author'
group = 'person'
person = PersonData.joe_author
class zuma:
name = 'Jacob Zuma'
group = 'person'
person = PersonData.zuma
class sue_no_gender:
name = 'Sue'
group = 'person'
person = PersonData.sue_no_gender
class AuthorData(DataSet):
class joe_author:
name = 'Joe Author'
author_type_id = 1
person = PersonData.joe_author
class RoleData(DataSet):
class monitor:
name = 'monitor'
description = 'monitor'
class UserData(DataSet):
class user:
first_name = 'User'
last_name = 'Smith'
email = 'user@example.com'
country_id = 1
password = 'foo'
admin = 1
class user2:
first_name = 'Joe'
last_name = 'Bloggs'
email = 'joe@example.com'
country_id = 1
password = 'foo'
admin = 0
class DocumentData(DataSet):
class simple:
url = 'http://mg.co.za/articles/2012-01-01-foo'
title = 'Title'
summary = 'A document summary'
text = 'Today, we do fun things.'
published_at = datetime.datetime(2012, 1, 1)
medium_id = 1
document_type_id = 1
author = AuthorData.joe_author
created_by = UserData.user
country_id = 1
class simple2:
url = 'http://mg.co.za/articles/2012-03-03-bar'
title = 'Another title'
summary = 'Another document summary'
text = 'Today, we do fun things.'
published_at = datetime.datetime(2012, 3, 3)
medium_id = 1
document_type_id = 1
author = AuthorData.joe_author
created_by = UserData.user
country_id = 1
dbfixture = SQLAlchemyFixture(
env=globals(),
style=NamedDataStyle(),
engine=db.engine)
| apache-2.0 | -7,353,189,440,585,254,000 | 23.446809 | 74 | 0.570496 | false |
att-comdev/deckhand | deckhand/tests/unit/control/test_base_controller.py | 1 | 1477 | # Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from deckhand.control import base as api_base
from deckhand.tests.unit.control import base as test_base
class TestBaseController(test_base.BaseControllerTest):
def setUp(self):
super(TestBaseController, self).setUp()
self.base_resource = api_base.BaseResource()
@mock.patch.object(api_base, 'dir') # noqa
def test_on_options(self, mock_dir):
expected_methods = ['on_get', 'on_post', 'on_put', 'on_delete',
'on_patch']
mock_dir.return_value = expected_methods
mock_resp = mock.Mock(headers={})
self.base_resource.on_options(None, mock_resp)
self.assertIn('Allow', mock_resp.headers)
self.assertEqual('GET,POST,PUT,DELETE,PATCH',
mock_resp.headers['Allow'])
self.assertEqual('200 OK', mock_resp.status)
| apache-2.0 | 4,100,489,499,763,393,000 | 37.868421 | 74 | 0.687204 | false |
emanuelschuetze/OpenSlides | openslides/motions/projector.py | 1 | 11044 | import re
from typing import Any, Dict
from ..users.projector import get_user_name
from ..utils.projector import (
AllData,
ProjectorElementException,
get_config,
register_projector_slide,
)
motion_placeholder_regex = re.compile(r"\[motion:(\d+)\]")
# Important: All functions have to be prune. This means, that thay can only
# access the data, that they get as argument and do not have any
# side effects. They are called from an async context. So they have
# to be fast!
def get_state(
all_data: AllData, motion: Dict[str, Any], state_id: int
) -> Dict[str, Any]:
"""
Returns a state element from one motion.
Returns an error if the state_id does not exist for the workflow in the motion.
"""
states = all_data["motions/workflow"][motion["workflow_id"]]["states"]
for state in states:
if state["id"] == state_id:
return state
raise ProjectorElementException(
f"motion {motion['id']} can not be on the state with id {state_id}"
)
def get_amendment_merge_into_motion_diff(all_data, motion, amendment):
"""
HINT: This implementation should be consistent to showInDiffView() in ViewMotionAmendedParagraph.ts
"""
if amendment["state_id"] is None:
return 0
state = get_state(all_data, motion, amendment["state_id"])
if state["merge_amendment_into_final"] == -1:
return 0
if state["merge_amendment_into_final"] == 1:
return 1
if amendment["recommendation_id"] is None:
return 0
recommendation = get_state(all_data, motion, amendment["recommendation_id"])
if recommendation["merge_amendment_into_final"] == 1:
return 1
return 0
def get_amendment_merge_into_motion_final(all_data, motion, amendment):
"""
HINT: This implementation should be consistent to showInFinalView() in ViewMotionAmendedParagraph.ts
"""
if amendment["state_id"] is None:
return 0
state = get_state(all_data, motion, amendment["state_id"])
if state["merge_amendment_into_final"] == 1:
return 1
return 0
def get_amendments_for_motion(motion, all_data):
amendment_data = []
for amendment_id, amendment in all_data["motions/motion"].items():
if amendment["parent_id"] == motion["id"]:
merge_amendment_into_final = get_amendment_merge_into_motion_final(
all_data, motion, amendment
)
merge_amendment_into_diff = get_amendment_merge_into_motion_diff(
all_data, motion, amendment
)
amendment_data.append(
{
"id": amendment["id"],
"identifier": amendment["identifier"],
"title": amendment["title"],
"amendment_paragraphs": amendment["amendment_paragraphs"],
"merge_amendment_into_diff": merge_amendment_into_diff,
"merge_amendment_into_final": merge_amendment_into_final,
}
)
return amendment_data
def get_amendment_base_motion(amendment, all_data):
try:
motion = all_data["motions/motion"][amendment["parent_id"]]
except KeyError:
motion_id = amendment["parent_id"]
raise ProjectorElementException(f"motion with id {motion_id} does not exist")
return {
"identifier": motion["identifier"],
"title": motion["title"],
"text": motion["text"],
}
def get_amendment_base_statute(amendment, all_data):
try:
statute = all_data["motions/statute-paragraph"][
amendment["statute_paragraph_id"]
]
except KeyError:
statute_id = amendment["statute_paragraph_id"]
raise ProjectorElementException(f"statute with id {statute_id} does not exist")
return {"title": statute["title"], "text": statute["text"]}
def extend_reference_motion_dict(
all_data: AllData,
recommendation: str,
referenced_motions: Dict[int, Dict[str, str]],
) -> None:
"""
Extends a dict of motion ids mapped to their title information.
The client can replace the placeholders in the recommendation correctly.
"""
# Collect all meantioned motions via [motion:<id>]
referenced_ids = [
int(id) for id in motion_placeholder_regex.findall(recommendation)
]
for id in referenced_ids:
# Put every referenced motion into the referenced_motions dict
if id not in referenced_motions and id in all_data["motions/motion"]:
referenced_motions[id] = {
"title": all_data["motions/motion"][id]["title"],
"identifier": all_data["motions/motion"][id]["identifier"],
}
def motion_slide(
all_data: AllData, element: Dict[str, Any], projector_id: int
) -> Dict[str, Any]:
"""
Motion slide.
The returned dict can contain the following fields:
* identifier
* title
* text
* amendment_paragraphs
* is_child
* show_meta_box
* reason
* modified_final_version
* recommendation
* recommendation_extension
* recommender
* change_recommendations
* submitter
"""
mode = element.get("mode", get_config(all_data, "motions_recommendation_text_mode"))
motion_id = element.get("id")
if motion_id is None:
raise ProjectorElementException("id is required for motion slide")
try:
motion = all_data["motions/motion"][motion_id]
except KeyError:
raise ProjectorElementException(f"motion with id {motion_id} does not exist")
show_meta_box = not get_config(all_data, "motions_disable_sidebox_on_projector")
line_length = get_config(all_data, "motions_line_length")
line_numbering_mode = get_config(all_data, "motions_default_line_numbering")
motions_preamble = get_config(all_data, "motions_preamble")
if motion["statute_paragraph_id"]:
change_recommendations = [] # type: ignore
amendments = [] # type: ignore
base_motion = None
base_statute = get_amendment_base_statute(motion, all_data)
elif bool(motion["parent_id"]) and motion["amendment_paragraphs"]:
change_recommendations = []
amendments = []
base_motion = get_amendment_base_motion(motion, all_data)
base_statute = None
else:
change_recommendations = list(
filter(
lambda reco: reco["internal"] is False, motion["change_recommendations"]
)
)
amendments = get_amendments_for_motion(motion, all_data)
base_motion = None
base_statute = None
return_value = {
"identifier": motion["identifier"],
"title": motion["title"],
"preamble": motions_preamble,
"text": motion["text"],
"amendment_paragraphs": motion["amendment_paragraphs"],
"base_motion": base_motion,
"base_statute": base_statute,
"is_child": bool(motion["parent_id"]),
"show_meta_box": show_meta_box,
"change_recommendations": change_recommendations,
"amendments": amendments,
"line_length": line_length,
"line_numbering_mode": line_numbering_mode,
}
if not get_config(all_data, "motions_disable_reason_on_projector"):
return_value["reason"] = motion["reason"]
if mode == "final":
return_value["modified_final_version"] = motion["modified_final_version"]
if show_meta_box:
if (
not get_config(all_data, "motions_disable_recommendation_on_projector")
and motion["recommendation_id"]
):
recommendation_state = get_state(
all_data, motion, motion["recommendation_id"]
)
return_value["recommendation"] = recommendation_state[
"recommendation_label"
]
if recommendation_state["show_recommendation_extension_field"]:
recommendation_extension = motion["recommendation_extension"]
# All title information for referenced motions in the recommendation
referenced_motions: Dict[int, Dict[str, str]] = {}
extend_reference_motion_dict(
all_data, recommendation_extension, referenced_motions
)
return_value["recommendation_extension"] = recommendation_extension
return_value["referenced_motions"] = referenced_motions
return_value["recommender"] = get_config(
all_data, "motions_recommendations_by"
)
return_value["submitter"] = [
get_user_name(all_data, submitter["user_id"])
for submitter in sorted(
motion["submitters"], key=lambda submitter: submitter["weight"]
)
]
return return_value
def motion_block_slide(
all_data: AllData, element: Dict[str, Any], projector_id: int
) -> Dict[str, Any]:
"""
Motion block slide.
"""
motion_block_id = element.get("id")
if motion_block_id is None:
raise ProjectorElementException("id is required for motion block slide")
try:
motion_block = all_data["motions/motion-block"][motion_block_id]
except KeyError:
raise ProjectorElementException(
f"motion block with id {motion_block_id} does not exist"
)
# All motions in this motion block
motions = []
# All title information for referenced motions in the recommendation
referenced_motions: Dict[int, Dict[str, str]] = {}
# Search motions.
for motion in all_data["motions/motion"].values():
if motion["motion_block_id"] == motion_block_id:
motion_object = {
"title": motion["title"],
"identifier": motion["identifier"],
}
recommendation_id = motion["recommendation_id"]
if recommendation_id is not None:
recommendation = get_state(
all_data, motion, motion["recommendation_id"]
)
motion_object["recommendation"] = {
"name": recommendation["recommendation_label"],
"css_class": recommendation["css_class"],
}
if recommendation["show_recommendation_extension_field"]:
recommendation_extension = motion["recommendation_extension"]
extend_reference_motion_dict(
all_data, recommendation_extension, referenced_motions
)
motion_object["recommendation_extension"] = recommendation_extension
motions.append(motion_object)
return {
"title": motion_block["title"],
"motions": motions,
"referenced_motions": referenced_motions,
}
def register_projector_slides() -> None:
register_projector_slide("motions/motion", motion_slide)
register_projector_slide("motions/motion-block", motion_block_slide)
| mit | 799,354,398,066,915,300 | 33.72956 | 104 | 0.608747 | false |
ArthurChiao/code-snippets | python/python-scripts/25_ip2geolocation.py | 1 | 1822 | import csv
import requests
def get_addresses(filename):
"""
Given a CSV file, this function returns a list of lists
where each element (list) in the outer list contains the
row info from the csv file.
"""
all_addresses = []
with open(filename, 'rt') as f:
reader = csv.reader(f)
for row in reader:
all_addresses.append(row)
return all_addresses
def get_geolocation(all_the_ip_address):
"""
Given a list of lists from `get_addresses()`, this function
returns an updated lists of lists containing the geolocation.
"""
print("Getting geo information...")
updated_addresses = []
counter = 1
# update header
header_row = all_the_ip_address.pop(0)
header_row.extend(['Country', 'City'])
# get geolocation
for line in all_the_ip_address:
print("Grabbing geo info for row # {0}".format(counter))
r = requests.get('https://freegeoip.net/json/{0}'.format(line[0]))
line.extend([str(r.json()['country_name']), str(r.json()['city'])])
updated_addresses.append(line)
counter += 1
updated_addresses.insert(0, header_row)
return updated_addresses
def create_csv(updated_address_list):
"""
Given the updated lists of lists from `get_geolocation()`, this function
creates a new CSV.
"""
import sys
if sys.version_info >= (3, 0, 0):
f = open('output.csv', 'w', newline='')
else:
f = open('output.csv', 'wb')
with f:
writer = csv.writer(f)
writer.writerows(updated_address_list)
print("All done!")
if __name__ == '__main__':
csv_file = '25_sample_csv.csv'
all_the_ip_address = get_addresses(csv_file)
updated_address_list = get_geolocation(all_the_ip_address)
create_csv(updated_address_list)
| mit | 7,261,807,812,556,115,000 | 28.868852 | 76 | 0.620746 | false |
luckyagarwal/author-clustering | Main.py | 1 | 9270 | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 10 12:30:18 2018
"""
#main
from __future__ import division, unicode_literals
from sklearn import metrics
from sklearn.metrics import r2_score
from textblob import TextBlob as tb
import numpy as np
import sys
import glob
import codecs
import json
import os
from nltk.tokenize import word_tokenize
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans
from sklearn.cluster import AgglomerativeClustering
import gensim
import sortedcollections
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
from itertools import combinations
import itertools
import math
from gensim.models import LogEntropyModel
from gensim.test.utils import common_texts
from gensim.corpora import Dictionary
def tf(word, blob):
return blob.words.count(word) / len(blob.words)
def n_containing(word, bloblist):
return sum(1 for blob in bloblist if word in blob.words)
def idf(word, bloblist):
return math.log(len(bloblist) / (1 + n_containing(word, bloblist)))
def tfidf(word, blob, bloblist):
return tf(word, blob) * idf(word, bloblist)
def info(eval_dir):
path=os.path.join(eval_dir,"info.json")
s=open(path,'r')
data = json.load(s)
dict_file = {}
for i in xrange(len(data)):
dict_file[data[i][str("folder")]] = data[i][str("language")]
return dict_file
def merge_prblm_docs(eval_dir,k):
prblm_path=os.path.join(eval_dir,k)
d=glob.glob(prblm_path + '\\*')
doc=[]
for i in d:
f=codecs.open(i, 'r', 'utf-8')
text=f.read()
doc.append(text)
f.close()
return doc
def pre_process_w2v(doc):
doc=[i.lower() for i in doc]
x=0
for i in doc:
temp=''
for j in i:
if j.isalpha() or j==' ' or j=="'":
temp+=j
doc[x]=temp
x=x+1
return doc
def stopwords_r(doc,lang):
words=[word_tokenize(i) for i in doc]
if lang == "en":
with open("stopwords/stopwords_en.txt") as f:
stoplist = f.readlines()
stoplist = [x.strip('\n') for x in stoplist]
elif lang == "nl":
with open("stopwords/stopwords_nl.txt") as f:
stoplist = f.readlines()
stoplist = [x.strip('\n') for x in stoplist]
elif lang == "gr":
with open("stopwords/stopwords_gr.txt") as f:
stoplist = f.readlines()
stoplist = [x.strip('\n') for x in stoplist]
x=0
for i in words:
temp=[]
for j in i:
if j not in stoplist:
temp.append(j)
words[x]=temp
x=x+1
return words
def vec_fec(doc_bag,model,review_list):
sen_vec=[]
review_list=[tb(sentence) for sentence in review_list]
for sen,blob in zip(doc_bag,review_list):
dummy_vec = np.zeros((300))
for word in sen:
try:
#new code added
t=tfidf(word,blob,review_list)
x=((model[word])*t)
#print x
dummy_vec +=x
except:
pass
sen_vec.append(dummy_vec/len(sen))
sen_vec=[i for i in sen_vec]
return sen_vec
def clustering_word2vec(data_features):
d={}
for i in range(6,len(data_features)):
#spectral = KMeans(n_clusters=i).fit((np.array(data_features)))
spectral = AgglomerativeClustering(n_clusters=i, linkage='ward').fit(np.array(data_features))
label = spectral.fit_predict((np.array(data_features)))
#score=metrics.calinski_harabaz_score((np.array(data_features)), label)
score= metrics.silhouette_score((np.array(data_features)), label,metric='euclidean')
d[i]=score
n_c=0
for key,val in d.iteritems():
if(val==max(d.values())):
n_c=key
break
print n_c
spectral = AgglomerativeClustering(n_clusters=n_c, linkage='ward').fit(np.array(data_features))
#spectral = KMeans(n_clusters=n_c).fit(data_features)
label = spectral.fit_predict((np.array(data_features)))
return label
def prod_output(eval_dir,out_dir,k,labels):
prblm_path=os.path.join(eval_dir,k)
doc_path=glob.glob(prblm_path + '\\*')
doc_list_name=[]
for i in doc_path:
m=i.split('\\')
m=m[-1]
doc_list_name.append(m)
dic={}
for i,j in zip(doc_list_name,labels):
dic[i]=j
list_all = []
list_val = []
for v in dic.values():
list_val.append(v)
set_val = set(list_val)
for val in set_val:
list_per_cluster = []
for key, value in dic.iteritems():
if val == value:
list_per_cluster.append(key)
list_all.append(list_per_cluster)
list_all_output = []
for i in xrange(len(list_all)):
list_cluster = []
for j in xrange(len(list_all[i])):
dict_per_doc = {}
dict_per_doc["document"] = list_all[i][j]
list_cluster.append(dict_per_doc)
list_all_output.append(list_cluster)
if(os.path.exists(out_dir+"\\"+k)==False):
os.mkdir(out_dir+"\\"+k)
out_folder=out_dir + '\\' +k
out_path = out_folder + "\\clustering.json"
out_file = open(out_path, "w")
json.dump(list_all_output, out_file, indent=4)
return list_all_output
def similarity_score(list_all, dict_features):
list_all_comb = []
for i in xrange(len(list_all)):
list_comb_percluster = []
if len(list_all[i]) > 1:
for j in xrange(len(list_all[i])):
list_comb_percluster.append(list_all[i][j]["document"])
list_all_comb.append(list_comb_percluster)
combs = []
for i in xrange(len(list_all_comb)):
comb = list(combinations(list_all_comb[i], 2))
combs.append(comb)
comb_list = list(itertools.chain(*combs))
all_sim = []
for i in xrange(len(comb_list)):
doc1 = comb_list[i][0].split(",")
doc2 = comb_list[i][1].split(",")
vec1 = dict_features[doc1[0]]
vec2 = dict_features[doc2[0]]
vec1=[vec1]
vec2=[vec2]
sim = cosine_similarity(vec1, vec2)
all_sim.append(sim)
return comb_list, all_sim
def write_ranking(comb_list, all_sim, out_dir,k):
list_all_output = []
for i in xrange(len(comb_list)):
dict_sim_perpair = {}
dict_sim_perpair["document1"] = comb_list[i][0]
dict_sim_perpair["document2"] = comb_list[i][1]
dict_sim_perpair["score"] = round(all_sim[i][0][0],6)
list_all_output.append(dict_sim_perpair)
out_folder=out_dir + '\\' +k
out_path = out_folder + "\\ranking.json"
out_file = open(out_path, "w")
json.dump(list_all_output, out_file, indent=4)
return list_all_output
# the Below code is to check if the model has been loaded or not
try:
word1 = model1['bag']
word2 = model2['slechte']
word3 = model3[u'φύλο']
print 'using loaded model....'
except:
pass
model1 = gensim.models.KeyedVectors.load_word2vec_format("GoogleNews-vectors-negative300.bin.gz",binary=True)
model2=gensim.models.KeyedVectors.load_word2vec_format("wiki.nl.bin", binary=True)
model3=gensim.models.KeyedVectors.load_word2vec_format("wiki.el.bin", binary=True)
eval_dir="pan17-author-clustering-test-dataset-2017-03-14"
out_dir="tfidf_weight_x_word2vec_output_pan17-author-clustering-test-dataset-2017-03-14"
dict_f=info(eval_dir)
dict_f=sortedcollections.OrderedDict(sorted(dict_f.items()))
#checkpoint 1
for k,v in dict_f.iteritems():
doc_list=merge_prblm_docs(eval_dir,k)
if v=="en":
review_en=pre_process_w2v(doc_list)
review_en_new=stopwords_r(review_en,v)
vectors=vec_fec(review_en_new,model1,review_en)
labels=clustering_word2vec(vectors)
elif v=="nl":
review_nl=pre_process_w2v(doc_list)
review_nl_new=stopwords_r(review_en,v)
vectors=vec_fec(review_nl_new,model2,review_nl)
labels=clustering_word2vec(vectors)
elif v=="gr":
review_gr=pre_process_w2v(doc_list)
vectors=vec_fec(review_gr_new,model3,review_gr)
labels=clustering_word2vec(vectors)
list_all=prod_output(eval_dir,out_dir,k,labels)
dict_features={}
prblm_path=os.path.join(eval_dir,k)
doc_path=glob.glob(prblm_path + '\\*')
doc_list_name=[]
for i in doc_path:
m=i.split('\\')
m=m[-1]
doc_list_name.append(m)
i=0
for j in doc_list_name:
dict_features[j]=vectors[i]
i=i+1
# similarity between documents
list_comb, all_sim = similarity_score(list_all, dict_features)
list_sim = write_ranking(list_comb, all_sim, out_dir,k)
| mit | -1,108,620,832,915,330,700 | 30.742049 | 117 | 0.571012 | false |
openstack/neutron | neutron/tests/unit/plugins/ml2/drivers/ovn/mech_driver/ovsdb/extensions/test_qos.py | 2 | 24842 | # Copyright 2020 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import netaddr
from neutron_lib.api.definitions import portbindings as portbindings_api
from neutron_lib.api.definitions import qos as qos_api
from neutron_lib import constants
from neutron_lib import context
from neutron_lib.db import api as db_api
from neutron_lib.services.qos import constants as qos_constants
from oslo_config import cfg
from oslo_utils import uuidutils
from neutron.api import extensions
from neutron.common.ovn import constants as ovn_const
from neutron.core_extensions import qos as core_qos
from neutron.objects import network as network_obj
from neutron.objects import ports as port_obj
from neutron.objects.qos import policy as policy_obj
from neutron.objects.qos import rule as rule_obj
from neutron.objects import router as router_obj
from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb.extensions \
import qos as qos_extension
from neutron.tests.unit.plugins.ml2 import test_plugin
QOS_RULE_BW_1 = {'max_kbps': 200, 'max_burst_kbps': 100}
QOS_RULE_BW_2 = {'max_kbps': 300}
QOS_RULE_DSCP_1 = {'dscp_mark': 16}
QOS_RULE_DSCP_2 = {'dscp_mark': 20}
QOS_RULE_MINBW_1 = {'min_kbps': 500}
class _Context(object):
def __enter__(self):
return self
def __exit__(self, *args):
return
class TestOVNClientQosExtension(test_plugin.Ml2PluginV2TestCase):
CORE_PLUGIN_CLASS = 'neutron.plugins.ml2.plugin.Ml2Plugin'
_extension_drivers = [qos_api.ALIAS]
l3_plugin = ('neutron.tests.unit.extensions.test_qos_fip.'
'TestFloatingIPQoSL3NatServicePlugin')
def setUp(self):
cfg.CONF.set_override('extension_drivers', self._extension_drivers,
group='ml2')
cfg.CONF.set_override('enable_distributed_floating_ip', 'False',
group='ovn')
extensions.register_custom_supported_check(qos_api.ALIAS, lambda: True,
plugin_agnostic=True)
super(TestOVNClientQosExtension, self).setUp()
self.setup_coreplugin(self.CORE_PLUGIN_CLASS, load_plugins=True)
self._mock_qos_loaded = mock.patch.object(
core_qos.QosCoreResourceExtension, 'plugin_loaded')
self.mock_qos_loaded = self._mock_qos_loaded.start()
self.txn = _Context()
mock_driver = mock.Mock()
mock_driver._nb_idl.transaction.return_value = self.txn
self.qos_driver = qos_extension.OVNClientQosExtension(mock_driver)
self._mock_rules = mock.patch.object(self.qos_driver,
'_update_port_qos_rules')
self.mock_rules = self._mock_rules.start()
self.addCleanup(self._mock_rules.stop)
self.ctx = context.get_admin_context()
self.project_id = uuidutils.generate_uuid()
self._initialize_objs()
def _get_random_db_fields(self, obj_cls=None):
obj_cls = obj_cls or self._test_class
return obj_cls.modify_fields_to_db(
self.get_random_object_fields(obj_cls))
def _create_one_port(self, mac_address_int, network_id):
mac_address = netaddr.EUI(mac_address_int)
port = port_obj.Port(
self.ctx, project_id=self.project_id,
network_id=network_id, device_owner='',
admin_state_up=True, status='DOWN', device_id='2',
mac_address=mac_address)
port.create()
return port
def _create_one_router(self):
self.router_gw_port = self._create_one_port(2000, self.fips_network.id)
self.router = router_obj.Router(self.ctx, id=uuidutils.generate_uuid(),
gw_port_id=self.router_gw_port.id)
self.router.create()
def _initialize_objs(self):
self.qos_policies = []
self.ports = []
self.networks = []
self.fips = []
self.fips_network = network_obj.Network(
self.ctx, id=uuidutils.generate_uuid(), project_id=self.project_id)
self.fips_network.create()
self._create_one_router()
self.fips_ports = []
fip_cidr = netaddr.IPNetwork('10.10.0.0/24')
for net_idx in range(2):
qos_policy = policy_obj.QosPolicy(
self.ctx, id=uuidutils.generate_uuid(),
project_id=self.project_id)
qos_policy.create()
self.qos_policies.append(qos_policy)
# Any QoS policy should have at least one rule, in order to have
# the port dictionary extended with the QoS policy information; see
# QoSPlugin._extend_port_resource_request
qos_rule = rule_obj.QosDscpMarkingRule(
self.ctx, dscp_mark=20, id=uuidutils.generate_uuid(),
qos_policy_id=qos_policy.id)
qos_rule.create()
self.fips_ports.append(self._create_one_port(1000 + net_idx,
self.fips_network.id))
fip_ip = str(netaddr.IPAddress(fip_cidr.ip + net_idx + 1))
fip = router_obj.FloatingIP(
self.ctx, id=uuidutils.generate_uuid(),
project_id=self.project_id, floating_ip_address=fip_ip,
floating_network_id=self.fips_network.id,
floating_port_id=self.fips_ports[-1].id)
fip.create()
self.fips.append(fip)
network = network_obj.Network(
self.ctx, id=uuidutils.generate_uuid(),
project_id=self.project_id)
network.create()
self.networks.append(network)
for port_idx in range(3):
self.ports.append(
self._create_one_port(net_idx * 16 + port_idx, network.id))
@mock.patch.object(qos_extension.LOG, 'warning')
@mock.patch.object(rule_obj, 'get_rules')
def test__qos_rules(self, mock_get_rules, mock_warning):
rules = [
rule_obj.QosBandwidthLimitRule(
direction=constants.EGRESS_DIRECTION, **QOS_RULE_BW_1),
rule_obj.QosBandwidthLimitRule(
direction=constants.INGRESS_DIRECTION, **QOS_RULE_BW_2),
rule_obj.QosDscpMarkingRule(**QOS_RULE_DSCP_1),
rule_obj.QosMinimumBandwidthRule(**QOS_RULE_MINBW_1)]
mock_get_rules.return_value = rules
expected = {
constants.EGRESS_DIRECTION: {
qos_constants.RULE_TYPE_BANDWIDTH_LIMIT: QOS_RULE_BW_1,
qos_constants.RULE_TYPE_DSCP_MARKING: QOS_RULE_DSCP_1},
constants.INGRESS_DIRECTION: {
qos_constants.RULE_TYPE_BANDWIDTH_LIMIT: QOS_RULE_BW_2}
}
self.assertEqual(expected, self.qos_driver._qos_rules(mock.ANY,
'policy_id1'))
msg = ('Rule type %(rule_type)s from QoS policy %(policy_id)s is not '
'supported in OVN')
mock_warning.assert_called_once_with(
msg, {'rule_type': qos_constants.RULE_TYPE_MINIMUM_BANDWIDTH,
'policy_id': 'policy_id1'})
@mock.patch.object(rule_obj, 'get_rules')
def test__qos_rules_no_rules(self, mock_get_rules):
mock_get_rules.return_value = []
expected = {constants.EGRESS_DIRECTION: {},
constants.INGRESS_DIRECTION: {}}
self.assertEqual(expected,
self.qos_driver._qos_rules(mock.ANY, mock.ANY))
def _test__ovn_qos_rule_ingress(self, fip_id=None, ip_address=None):
direction = constants.INGRESS_DIRECTION
rule = {qos_constants.RULE_TYPE_BANDWIDTH_LIMIT: QOS_RULE_BW_1}
match = self.qos_driver._ovn_qos_rule_match(
direction, 'port_id', ip_address, 'resident_port')
expected = {'burst': 100, 'rate': 200, 'direction': 'to-lport',
'match': match,
'priority': qos_extension.OVN_QOS_DEFAULT_RULE_PRIORITY,
'switch': 'neutron-network_id'}
if fip_id:
expected['external_ids'] = {ovn_const.OVN_FIP_EXT_ID_KEY: fip_id}
result = self.qos_driver._ovn_qos_rule(
direction, rule, 'port_id', 'network_id', fip_id=fip_id,
ip_address=ip_address, resident_port='resident_port')
self.assertEqual(expected, result)
def test__ovn_qos_rule_ingress(self):
self._test__ovn_qos_rule_ingress()
def test__ovn_qos_rule_ingress_fip(self):
self._test__ovn_qos_rule_ingress(fip_id='fipid', ip_address='1.2.3.4')
def _test__ovn_qos_rule_egress(self, fip_id=None, ip_address=None):
direction = constants.EGRESS_DIRECTION
rule = {qos_constants.RULE_TYPE_DSCP_MARKING: QOS_RULE_DSCP_1}
match = self.qos_driver._ovn_qos_rule_match(
direction, 'port_id', ip_address, 'resident_port')
expected = {'direction': 'from-lport', 'match': match,
'dscp': 16, 'switch': 'neutron-network_id',
'priority': qos_extension.OVN_QOS_DEFAULT_RULE_PRIORITY}
if fip_id:
expected['external_ids'] = {ovn_const.OVN_FIP_EXT_ID_KEY: fip_id}
result = self.qos_driver._ovn_qos_rule(
direction, rule, 'port_id', 'network_id', fip_id=fip_id,
ip_address=ip_address, resident_port='resident_port')
self.assertEqual(expected, result)
rule = {qos_constants.RULE_TYPE_BANDWIDTH_LIMIT: QOS_RULE_BW_2,
qos_constants.RULE_TYPE_DSCP_MARKING: QOS_RULE_DSCP_2}
expected = {'direction': 'from-lport', 'match': match,
'rate': 300, 'dscp': 20, 'switch': 'neutron-network_id',
'priority': qos_extension.OVN_QOS_DEFAULT_RULE_PRIORITY}
if fip_id:
expected['external_ids'] = {ovn_const.OVN_FIP_EXT_ID_KEY: fip_id}
result = self.qos_driver._ovn_qos_rule(
direction, rule, 'port_id', 'network_id', fip_id=fip_id,
ip_address=ip_address, resident_port='resident_port')
self.assertEqual(expected, result)
def test__ovn_qos_rule_egress(self):
self._test__ovn_qos_rule_egress()
def test__ovn_qos_rule_egress_fip(self):
self._test__ovn_qos_rule_egress(fip_id='fipid', ip_address='1.2.3.4')
def test__port_effective_qos_policy_id(self):
port = {'qos_policy_id': 'qos1'}
self.assertEqual(('qos1', 'port'),
self.qos_driver._port_effective_qos_policy_id(port))
port = {'qos_network_policy_id': 'qos1'}
self.assertEqual(('qos1', 'network'),
self.qos_driver._port_effective_qos_policy_id(port))
port = {'qos_policy_id': 'qos_port',
'qos_network_policy_id': 'qos_network'}
self.assertEqual(('qos_port', 'port'),
self.qos_driver._port_effective_qos_policy_id(port))
port = {}
self.assertEqual((None, None),
self.qos_driver._port_effective_qos_policy_id(port))
port = {'qos_policy_id': None, 'qos_network_policy_id': None}
self.assertEqual((None, None),
self.qos_driver._port_effective_qos_policy_id(port))
port = {'qos_policy_id': 'qos1', 'device_owner': 'neutron:port'}
self.assertEqual((None, None),
self.qos_driver._port_effective_qos_policy_id(port))
def test_update_port(self):
port = self.ports[0]
original_port = self.ports[1]
# Remove QoS policy
original_port.qos_policy_id = self.qos_policies[0].id
self.qos_driver.update_port(mock.ANY, port, original_port)
self.mock_rules.assert_called_once_with(
mock.ANY, port.id, port.network_id, None, None)
# Change from port policy (qos_policy0) to network policy (qos_policy1)
self.mock_rules.reset_mock()
port.qos_network_policy_id = self.qos_policies[1].id
self.qos_driver.update_port(mock.ANY, port, original_port)
self.mock_rules.assert_called_once_with(
mock.ANY, port.id, port.network_id, self.qos_policies[1].id, None)
# No change (qos_policy0)
self.mock_rules.reset_mock()
port.qos_policy_id = self.qos_policies[0].id
original_port.qos_policy_id = self.qos_policies[0].id
self.qos_driver.update_port(mock.ANY, port, original_port)
self.mock_rules.assert_not_called()
# No change (no policy)
self.mock_rules.reset_mock()
port.qos_policy_id = None
port.qos_network_policy_id = None
original_port.qos_policy_id = None
original_port.qos_network_policy_id = None
self.qos_driver.update_port(mock.ANY, port, original_port)
self.mock_rules.assert_not_called()
# Reset (no policy)
self.qos_driver.update_port(mock.ANY, port, original_port, reset=True)
self.mock_rules.assert_called_once_with(
mock.ANY, port.id, port.network_id, None, None)
# Reset (qos_policy0, regardless of being the same a in the previous
# state)
self.mock_rules.reset_mock()
port.qos_policy_id = self.qos_policies[0].id
original_port.qos_policy_id = self.qos_policies[1].id
self.qos_driver.update_port(mock.ANY, port, original_port, reset=True)
self.mock_rules.assert_called_once_with(
mock.ANY, port.id, port.network_id, self.qos_policies[0].id, None)
# External port, OVN QoS extension does not apply.
self.mock_rules.reset_mock()
port.qos_policy_id = self.qos_policies[0].id
port_obj.PortBinding(self.ctx, port_id=port.id, host='host',
profile={}, vif_type='',
vnic_type=portbindings_api.VNIC_DIRECT).create()
# NOTE(ralonsoh): this OVO retrieval must include, in the port object,
# the port binding register created.
port = port_obj.Port.get_object(self.ctx, id=port.id)
self.qos_driver.update_port(mock.ANY, port, original_port)
self.mock_rules.assert_not_called()
def test_delete_port(self):
self.mock_rules.reset_mock()
self.qos_driver.delete_port(mock.ANY, self.ports[1])
# Assert that rules are deleted
self.mock_rules.assert_called_once_with(
mock.ANY, self.ports[1].id, self.ports[1].network_id, None, None)
def test_update_network(self):
"""Test update network.
net1: [(1) from qos_policy0 to no QoS policy,
(2) from qos_policy0 to qos_policy1]
- port10: no QoS port policy
- port11: qos_policy0
- port12: qos_policy1
"""
policies_ports = [
(None, {self.ports[0].id}),
(self.qos_policies[1].id, {self.ports[0].id})]
self.ports[1].qos_policy_id = self.qos_policies[0].id
self.ports[1].update()
self.ports[2].qos_policy_id = self.qos_policies[1].id
self.ports[2].update()
for qos_policy_id, reference_ports in policies_ports:
self.networks[0].qos_policy_id = qos_policy_id
self.networks[0].update()
original_network = {'qos_policy_id': self.qos_policies[0]}
reviewed_port_ids = self.qos_driver.update_network(
mock.ANY, self.networks[0], original_network)
self.assertEqual(reference_ports, reviewed_port_ids)
calls = [mock.call(mock.ANY, self.ports[0].id,
self.ports[0].network_id, qos_policy_id,
None)]
self.mock_rules.assert_has_calls(calls)
self.mock_rules.reset_mock()
def test_update_network_no_policy_change(self):
"""Test update network if the QoS policy is the same.
net1: [(1) from qos_policy0 to qos_policy0,
(2) from no QoS policy to no QoS policy]
"""
for qos_policy_id in (self.qos_policies[0].id, None):
self.networks[0].qos_policy_id = qos_policy_id
self.networks[0].update()
original_network = {'qos_policy_id': qos_policy_id}
reviewed_port_ids = self.qos_driver.update_network(
mock.ANY, self.networks[0], original_network)
self.assertEqual(set([]), reviewed_port_ids)
self.mock_rules.assert_not_called()
def test_update_network_reset(self):
"""Test update network.
net1: [(1) from qos_policy1 to qos_policy1,
(2) from no QoS policy to no QoS policy]
- port10: no QoS port policy
- port11: qos_policy0
- port12: qos_policy1
"""
policies_ports = [
(self.qos_policies[1].id, {self.ports[0].id}),
(None, {self.ports[0].id})]
self.ports[1].qos_policy_id = self.qos_policies[0].id
self.ports[1].update()
self.ports[2].qos_policy_id = self.qos_policies[1].id
self.ports[2].update()
for qos_policy_id, reference_ports in policies_ports:
self.networks[0].qos_policy_id = qos_policy_id
self.networks[0].update()
original_network = {'qos_policy_id': self.qos_policies[0]}
reviewed_port_ids = self.qos_driver.update_network(
mock.ANY, self.networks[0], original_network, reset=True)
self.assertEqual(reference_ports, reviewed_port_ids)
calls = [mock.call(mock.ANY, self.ports[0].id,
self.ports[0].network_id, qos_policy_id, None)]
self.mock_rules.assert_has_calls(calls)
self.mock_rules.reset_mock()
def test_update_network_external_ports(self):
"""Test update network with external ports.
- port10: no QoS port policy
- port11: no QoS port policy but external
- port12: qos_policy0
"""
policies_ports = [(self.qos_policies[0].id, {self.ports[0].id})]
self.ports[2].qos_policy_id = self.qos_policies[0].id
self.ports[2].update()
port_obj.PortBinding(self.ctx, port_id=self.ports[1].id, host='host',
profile={}, vif_type='',
vnic_type=portbindings_api.VNIC_DIRECT).create()
with mock.patch.object(self.qos_driver._driver._nb_idl,
'get_lswitch_port') as mock_lsp:
mock_lsp.side_effect = [
mock.Mock(type=ovn_const.LSP_TYPE_LOCALNET),
mock.Mock(type=ovn_const.LSP_TYPE_EXTERNAL)]
for qos_policy_id, reference_ports in policies_ports:
self.networks[0].qos_policy_id = qos_policy_id
self.networks[0].update()
original_network = {'qos_policy_id': self.qos_policies[0]}
reviewed_port_ids = self.qos_driver.update_network(
mock.ANY, self.networks[0], original_network, reset=True)
self.assertEqual(reference_ports, reviewed_port_ids)
calls = [mock.call(
mock.ANY, self.ports[0].id, self.ports[0].network_id,
qos_policy_id, None)]
self.mock_rules.assert_has_calls(calls)
self.mock_rules.reset_mock()
def test_update_policy(self):
"""Test update QoS policy, networks and ports bound are updated.
QoS policy updated: qos_policy0
net1: no QoS policy
- port10: no port QoS policy
- port11: qos_policy0 --> handled during "update_port" and updated
- port12: qos_policy1
net2: qos_policy0
- port20: no port QoS policy --> handled during "update_network"
and updated
- port21: qos_policy0 --> handled during "update_network", not updated
handled during "update_port" and updated
- port22: qos_policy1 --> handled during "update_network", not updated
fip1: qos_policy0
fip2: qos_policy1
"""
self.ports[1].qos_policy_id = self.qos_policies[0].id
self.ports[1].update()
self.ports[2].qos_policy_id = self.qos_policies[1].id
self.ports[2].update()
self.ports[4].qos_policy_id = self.qos_policies[0].id
self.ports[4].update()
self.ports[5].qos_policy_id = self.qos_policies[1].id
self.ports[5].update()
self.networks[1].qos_policy_id = self.qos_policies[0].id
self.networks[1].update()
self.fips[0].qos_policy_id = self.qos_policies[0].id
self.fips[0].update()
self.fips[1].qos_policy_id = self.qos_policies[1].id
self.fips[1].update()
mock_qos_rules = mock.Mock()
with mock.patch.object(self.qos_driver, '_qos_rules',
return_value=mock_qos_rules), \
mock.patch.object(self.qos_driver, 'update_floatingip') as \
mock_update_fip:
self.qos_driver.update_policy(self.ctx, self.qos_policies[0])
updated_ports = [self.ports[1], self.ports[3], self.ports[4]]
calls = [mock.call(self.txn, port.id, port.network_id,
self.qos_policies[0].id, mock_qos_rules)
for port in updated_ports]
# We can't ensure the call order because we are not enforcing any order
# when retrieving the port and the network list.
self.mock_rules.assert_has_calls(calls, any_order=True)
with db_api.CONTEXT_READER.using(self.ctx):
fip = self.qos_driver._plugin_l3.get_floatingip(self.ctx,
self.fips[0].id)
mock_update_fip.assert_called_once_with(self.txn, fip)
def test_update_floatingip(self):
nb_idl = self.qos_driver._driver._nb_idl
fip = self.fips[0]
original_fip = self.fips[1]
txn = mock.Mock()
# Update FIP, no QoS policy nor port/router
self.qos_driver.update_floatingip(txn, fip)
nb_idl.qos_del_ext_ids.assert_called_once()
nb_idl.qos_add.assert_not_called()
nb_idl.reset_mock()
# Attach a port and a router, not QoS policy
fip.router_id = self.router.id
fip.fixed_port_id = self.fips_ports[0].id
fip.update()
self.qos_driver.update_floatingip(txn, fip)
nb_idl.qos_del_ext_ids.assert_called_once()
nb_idl.qos_add.assert_not_called()
nb_idl.reset_mock()
# Add a QoS policy
fip.qos_policy_id = self.qos_policies[0].id
fip.update()
self.qos_driver.update_floatingip(txn, fip)
nb_idl.qos_del_ext_ids.assert_called_once()
nb_idl.qos_add.assert_called_once()
nb_idl.reset_mock()
# Remove QoS
fip.qos_policy_id = None
fip.update()
original_fip.qos_policy_id = self.qos_policies[0].id
original_fip.update()
self.qos_driver.update_floatingip(txn, fip)
nb_idl.qos_del_ext_ids.assert_called_once()
nb_idl.qos_add.assert_not_called()
nb_idl.reset_mock()
# Add again another QoS policy
fip.qos_policy_id = self.qos_policies[1].id
fip.update()
original_fip.qos_policy_id = None
original_fip.update()
self.qos_driver.update_floatingip(txn, fip)
nb_idl.qos_del_ext_ids.assert_called_once()
nb_idl.qos_add.assert_called_once()
nb_idl.reset_mock()
# Detach the port and the router
fip.router_id = None
fip.fixed_port_id = None
fip.update()
original_fip.router_id = self.router.id
original_fip.fixed_port_id = self.fips_ports[0].id
original_fip.qos_policy_id = self.qos_policies[1].id
original_fip.update()
self.qos_driver.update_floatingip(txn, fip)
nb_idl.qos_del_ext_ids.assert_called_once()
nb_idl.qos_add.assert_not_called()
nb_idl.reset_mock()
# Force reset (delete any QoS)
fip_dict = {'floating_network_id': fip.floating_network_id,
'id': fip.id}
self.qos_driver.update_floatingip(txn, fip_dict)
nb_idl.qos_del_ext_ids.assert_called_once()
nb_idl.qos_add.assert_not_called()
| apache-2.0 | -6,424,755,259,399,994,000 | 43.440072 | 79 | 0.592344 | false |
CIRCL/AIL-framework | bin/Categ.py | 1 | 4121 | #!/usr/bin/env python3
# -*-coding:UTF-8 -*
"""
The ZMQ_PubSub_Categ Module
============================
This module is consuming the Redis-list created by the ZMQ_PubSub_Tokenize_Q
Module.
Each words files created under /files/ are representing categories.
This modules take these files and compare them to
the stream of data given by the ZMQ_PubSub_Tokenize_Q Module.
When a word from a paste match one or more of these words file, the filename of
the paste is published/forwarded to the next modules.
Each category (each files) are representing a dynamic channel.
This mean that if you create 1000 files under /files/ you'll have 1000 channels
where every time there is a matching word to a category, the paste containing
this word will be pushed to this specific channel.
..note:: The channel will have the name of the file created.
Implementing modules can start here, create your own category file,
and then create your own module to treat the specific paste matching this
category.
..note:: Module ZMQ_Something_Q and ZMQ_Something are closely bound, always put
the same Subscriber name in both of them.
Requirements
------------
*Need running Redis instances. (Redis)
*Categories files of words in /files/ need to be created
*Need the ZMQ_PubSub_Tokenize_Q Module running to be able to work properly.
"""
##################################
# Import External packages
##################################
import os
import argparse
import time
import re
##################################
# Import Project packages
##################################
from module.abstract_module import AbstractModule
from pubsublogger import publisher
from packages import Paste
from Helper import Process
class Categ(AbstractModule):
"""
Categ module for AIL framework
"""
def __init__(self):
"""
Init Categ
"""
super(Categ, self).__init__()
self.matchingThreshold = self.process.config.getint("Categ", "matchingThreshold")
# SCRIPT PARSER #
parser = argparse.ArgumentParser(description='Start Categ module on files.')
parser.add_argument(
'-d', type=str, default="../files/",
help='Path to the directory containing the category files.',
action='store')
args = parser.parse_args()
self.redis_logger.info("Script Categ started")
categories = ['CreditCards', 'Mail', 'Onion', 'Web', 'Credential', 'Cve', 'ApiKey']
tmp_dict = {}
for filename in categories:
bname = os.path.basename(filename)
tmp_dict[bname] = []
with open(os.path.join(args.d, filename), 'r') as f:
patterns = [r'%s' % ( re.escape(s.strip()) ) for s in f]
tmp_dict[bname] = re.compile('|'.join(patterns), re.IGNORECASE)
self.categ_items = tmp_dict.items()
prec_filename = None
def compute(self, message):
# Cast message as paste
paste = Paste.Paste(message)
# Get paste content
content = paste.get_p_content()
# init categories found
is_categ_found = False
# Search for pattern categories in paste content
for categ, pattern in self.categ_items:
found = set(re.findall(pattern, content))
lenfound = len(found)
if lenfound >= self.matchingThreshold:
is_categ_found = True
msg = '{} {}'.format(paste.p_rel_path, lenfound)
self.redis_logger.debug('%s;%s %s'%(self.module_name, msg, categ))
# Export message to categ queue
self.process.populate_set_out(msg, categ)
self.redis_logger.info(
'Categ;{};{};{};Detected {} as {};{}'.format(
paste.p_source, paste.p_date, paste.p_name,
lenfound, categ, paste.p_rel_path))
if not is_categ_found:
self.redis_logger.debug('No %s found in this paste: %s'%(self.module_name, paste.p_name))
if __name__ == '__main__':
module = Categ()
module.run()
| agpl-3.0 | 2,038,237,332,338,391,600 | 30.458015 | 101 | 0.606406 | false |
VasuAgrawal/tartanHacks2015 | site/cardsgame.py | 1 | 10069 | from pprint import pprint
from pysnap import Snapchat
from ripText import TextDetector
from imageProcessor import ImageProcessor
import random
import time
class RoundStage:
Entries = 0
Judging = 1
class GameInstance:
players = []
processedSnaps = []
gameRound = 0
roundStage = RoundStage.Entries
roundStart = 0
roundDuration = 60 * 5 # i.e. five minutes
numCycles = 1
gameFinished = False
def restart(self):
print "Resetting variables"
self.api = Snapchat()
self.api.login('snapsvshumanity', 'ilovetosnap69')
# self.imp = ImageProcessor()
# self.detector = TextDetector()
self.gameRound = 0
self.winner = ""
counter = 0
while True:
newJudge = random.randint(0, len(self.players) -1)
if self.players[newJudge] != self.judge or counter > 10:
break
counter += 1
for i, player in enumerate(self.players):
player['organizer'] = True if i == newJudge else False
player['confirmed'] = False
player['judged'] = False
GameInstance.processedSnaps = []
self.roundStage = RoundStage.Entries
self.roundStart = 0
self.roundDuration = 60 * 5
self.numCycles = 1
self.gameFinished = False
self.run()
# Constructor
def __init__(self, organizer, gamePlayers):
self.api = Snapchat()
self.api.login('snapsvshumanity', 'ilovetosnap69')
self.imp = ImageProcessor()
self.detector = TextDetector()
gameround = 0;
self.players.append({
'username' : organizer,
'organizer': True,
'confirmed': False,
'judged' : False
})
for p in gamePlayers.split():
currentPlayer = {
'username' : p,
'organizer': False,
'confirmed': False,
'judged' : False
}
self.players.append(currentPlayer)
self.winner = ""
# Main logic loop
def run(self):
self.api.clear_feed()
self.friendPlayers()
while (not GameInstance.gameFinished):
print "In game loop, round:", self.gameRound
print "Stage: ", self.roundStage
snaps = self.pollAndFetchSnaps()
self.processSnaps(snaps)
if self.gameRound == 0:
self.checkForAccepts()
elif self.gameRound > self.numCycles * len(self.players):
self.sendWinnerAndClose()
elif self.roundStage == RoundStage.Entries:
print "Self Entries: ", self.entries
if (time.time() - self.roundStart > self.roundDuration):
self.roundStage = RoundStage.Judging
self.winner = str(self.entries[-1]['id'])
self.proceedToJudging()
if (len(self.entries) >= len(self.players) - 1):
self.roundStage = RoundStage.Judging
self.winner = str(self.entries[-1]['id'])
self.proceedToJudging()
elif self.roundStage == RoundStage.Judging:
time.sleep(15)
print "Judging!"
if snaps != None:
print "WE FOUND A WINNER!"
self.sendWinnerAndClose()
break
# else:
# print "This shouldn't happen"
time.sleep(30)
print "Game is over, starting again!"
time.sleep(10)
self.restart()
# For each snap in snaps, OCR/classify
def processSnaps(self, snaps):
print "Processing ..."
if snaps != None:
print len(snaps)
for snap in snaps:
print "Processing a snap ...",
text = self.detector.getText(snap['id'])[0]
print "Text: ", text
if text == "##CONFIRM":
for p in self.players:
if p['username'] == snap['sender']:
p['confirmed'] = True
break
elif "##" in text:
print "THE WINNER IS", text
if text.replace("##", "").isdigit(): print text
if text.replace("##", "").isdigit() and self.roundStage == RoundStage.Judging and not self.gameRound == 0:
if int(text) <= len(self.entries):
announceRoundWinner(self.entries[int(text)]['userid'])
if (self.gameRound == self.numCycles * len(self.players)):
self.sendWinnerAndClose()
self.startRound()
else:
print "errrrrorrrrrr"
elif self.roundStage == RoundStage.Entries and not self.gameRound == 0:
#if (snap['userid'] in [x['userid'] for x in self.entries]):
self.entries.append(snap)
# Sends a snap to everyone announcing the round winner
def announceRoundWinner(self, winnerid):
pass
# Checks to see who won by finding max score (from player object)
def sendWinnerAndClose(self):
names = [x['username'] for x in self.players]
self.sendSnap('snaps/' + self.winner + ".jpg", ','.join(names), 10)
# Send snapchats to users inviting them to play
def sendInvitationSnap(self, users):
# invitation snap = some stuff
print users
self.sendSnap('static/snap-confirm.jpg', users, 10)
# Creates prompt image for current round
def createPrompt(self):
return 'static/blackcard.jpg'
# Sends question prompts to all players as well as judge
def sendPromptMessages(self):
print "Sending prompty messages"
prompt = self.createPrompt()
judgenotify = 'static/snap-judge.jpg'
names = [x['username'] for x in self.players]
self.sendSnap(judgenotify, self.judge['username'], 10)
print "Sent to judge"
self.sendSnap(prompt, ','.join(names), 10)
print "Sent to users"
# Check to see if all unconfirmed players have accepted
# Starts game if true
def checkForAccepts(self):
print "Checking for accepts"
unconfirmedPlayers = [x for x in self.players
if x['confirmed'] == False]
print "Unconfirmed Players:", unconfirmedPlayers
if (len(unconfirmedPlayers) == 0):
self.gameRound = 1
for player in self.players:
if player['organizer']:
player['winner'] = None
player['judged'] = True
self.judge = player
self.startRound()
# Enters judging mode, sends all choices to judge
def proceedToJudging(self):
recipient = self.judge['username']
for i, entry in enumerate(self.entries):
# self.imp.addNumber(str(entry['id']), i + 1)
path = 'snaps/' + entry['id'] + '.jpg'
time = entry['time']
self.sendSnap(path, recipient, time)
# Initializes the round
def startRound(self):
print "Starting Round"
self.roundStage = RoundStage.Entries
self.entries = []
self.sendPromptMessages()
self.roundStart = time.time()
# gets all new snaps, and returns a list of them
def pollAndFetchSnaps(self):
if self.roundStage == RoundStage.Judging: pass
playernames = [x['username'] for x in self.players]
foundSnaps = None
while True:
try:
foundSnaps = self.api.get_snaps()
break
except:
self.api.login('snapsvshumanity', 'ilovetosnap69')
snaps = [x for x in self.api.get_snaps()
if x['status'] == 1 # Unopened
and x['sender'] in playernames
and x['media_type'] == 0
and x not in GameInstance.processedSnaps # Is a photo, not a video
]
successfullyDownloaded = []
if snaps != None:
for snap in snaps:
if self.fetchPhotoSnap(snap['id']):
successfullyDownloaded.append(snap)
self.api.mark_viewed(snap['id'], 1)
return successfullyDownloaded
# Sends friend requests and invitations to all players
def friendPlayers(self):
friendslist = [x['name'] for x in self.api.get_friends()]
toadd = [x['username'] for x in self.players
if x['username'] not in friendslist]
# print "toAdd", toadd
for user in toadd:
self.api.add_friend(user)
self.sendInvitationSnap(','.join([x['username'] for x in self.players]));
print "All players are friended!"
# Prints a list of current players
def printPlayers(self):
for p in self.players:
print p['username']
# Prints a list of all snaps that are available to download
def listSnaps(self):
snaps = [x for x in s.get_snaps() if x['status'] == 1]
pprint(snaps)
# Downloads and saves the snap with id snapid
def fetchPhotoSnap(self, snapid):
name = "snaps/" + snapid + ".jpg"
if self.roundStage == RoundStage.Entries:
self.winner = snapid
f = open(name, 'wb')
blob = self.api.get_blob(snapid)
if blob == None:
f.close()
return False
else:
f.write(blob)
f.close()
return True
# Sends a snapchat stored at path to recipients
# recipients should be comma separated (no space!) list of usernames
def sendSnap(self, path, recipients, time=5):
mediaid = self.api.upload(path)
self.api.send(mediaid, recipients, time)
| mit | 3,276,799,947,125,313,500 | 33.248299 | 122 | 0.538187 | false |
molmod/hipart | hipart/gint/tests/utils.py | 1 | 1244 | # -*- coding: utf-8 -*-
# HiPart is a program to analyze the electronic structure of molecules with
# fuzzy-atom partitioning methods.
# Copyright (C) 2007 - 2012 Toon Verstraelen <Toon.Verstraelen@UGent.be>
#
# This file is part of HiPart.
#
# HiPart is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HiPart is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
#--
import os, tempfile, shutil
__all__ = ["setup_gaussian"]
def setup_gaussian(fchk_name):
tmpdir = tempfile.mkdtemp("hipart")
if not os.path.isdir("input"):
raise IOError("Input directory with test files is not present")
fn_fchk = os.path.join(tmpdir, "gaussian.fchk")
shutil.copy("input/%s.fchk" % fchk_name, fn_fchk)
return tmpdir, fn_fchk
| gpl-3.0 | -2,752,648,033,246,534,000 | 32.621622 | 75 | 0.723473 | false |
samitnuk/online_shop | apps/shop/migrations/0001_initial.py | 1 | 4459 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-11 11:07
from __future__ import unicode_literals
import apps.shop.utils
import autoslug.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=200)),
('slug', autoslug.fields.AutoSlugField(always_update=True, editable=False, populate_from='name', slugify=apps.shop.utils.slugify_, unique=True)),
('image', models.ImageField(blank=True, upload_to=apps.shop.utils.category_img_path, verbose_name='Зображення')),
('parent_category', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='shop.Category')),
],
options={
'verbose_name': 'Категорія',
'ordering': ['name'],
'verbose_name_plural': 'Категорії',
},
),
migrations.CreateModel(
name='Manufacturer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=200, verbose_name='Виробник')),
('slug', autoslug.fields.AutoSlugField(always_update=True, editable=False, populate_from='name', slugify=apps.shop.utils.slugify_, unique=True)),
('image', models.ImageField(blank=True, upload_to=apps.shop.utils.manufacturer_img_path, verbose_name='Зображення')),
],
options={
'verbose_name': 'Виробник',
'ordering': ['name'],
'verbose_name_plural': 'Виробники',
},
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(db_index=True, max_length=200, verbose_name='Назва')),
('model_name', models.CharField(blank=True, max_length=200, verbose_name='Модель')),
('slug', autoslug.fields.AutoSlugField(always_update=True, editable=False, populate_from=apps.shop.utils.base_for_product_slug, slugify=apps.shop.utils.slugify_, unique=True)),
('main_image', models.ImageField(blank=True, upload_to=apps.shop.utils.product_main_img_path, verbose_name='Зображення')),
('description', models.TextField(blank=True, verbose_name='Опис')),
('price', models.DecimalField(decimal_places=2, max_digits=10, verbose_name='Ціна')),
('stock', models.PositiveIntegerField(verbose_name='На складі')),
('available', models.BooleanField(default=True, verbose_name='Доступний')),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='category_products', to='shop.Category', verbose_name='Категорія')),
('manufacturer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='manufacturer_products', to='shop.Manufacturer', verbose_name='Виробник')),
],
options={
'verbose_name': 'Товар',
'ordering': ['name'],
'verbose_name_plural': 'Товари',
},
),
migrations.CreateModel(
name='ProductImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(blank=True, upload_to=apps.shop.utils.product_img_path, verbose_name='Зображення')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='images', to='shop.Product')),
],
),
]
| mit | 5,091,021,321,522,080,000 | 53.582278 | 192 | 0.596707 | false |
eljost/pysisyphus | pysisyphus/io/hdf5.py | 1 | 1630 | import h5py
def init_h5_group(f, group_name, data_model):
"""Create group with given name and data model."""
group = f.create_group(group_name)
# Create (resizable) datasets by using None in maxshape
for key, shape in data_model.items():
assert len(shape) <= 2, "3D not yet supported"
maxshape = (None, ) if (len(shape) == 1) else (None, shape[-1])
group.create_dataset(key, shape, maxshape=maxshape)
def get_h5_group(fn, group_name, data_model=None, reset=False):
"""Return (and create if neccesary) group with given name and
data model."""
f = h5py.File(fn, mode="a")
# Shortcut
if data_model is None:
return f[group_name]
if group_name not in f:
init_h5_group(f, group_name, data_model)
group = f[group_name]
# Check compatibility of data_model and group. If they aren't compatible
# recreate the group with the proper shapes.
try:
compatible = [group[key].shape == shape for key, shape in data_model.items()]
except KeyError:
compatible = (False, )
compatible = all(compatible)
if (not compatible) or reset:
del f[group_name]
init_h5_group(f, group_name, data_model)
group = f[group_name]
return group
def resize_h5_group(group, max_cycles):
"""Increase size of first dimension of datasets in the given group."""
for key, dataset in group.items():
# No need to resize scalar datasets
if dataset.shape == ():
continue
new_shape = list(dataset.shape).copy()
new_shape[0] = max_cycles
dataset.resize(new_shape)
| gpl-3.0 | 2,387,800,816,038,997,500 | 31.6 | 85 | 0.630675 | false |
tgecho/pipedream | pipedream/tests/resolver_test.py | 1 | 2980 | import pytest
from pipedream import Dispatcher, CircularDependency, UnresolvableDependency
from pipedream.dispatcher import OrderedDict
def add_many(dispatcher, funcs):
for name, reqs in funcs.items():
func = lambda **k: name + str(k)
func.__name__ = name
dispatcher.add(func, requires=reqs)
def test_resolving_simple(dispatcher):
funcs = {
'a': ['b'],
'b': []
}
add_many(dispatcher, funcs)
assert list(dispatcher.resolve_dependency_graph('a').keys()) == ['b', 'a']
assert list(dispatcher.resolve_dependency_graph('b').keys()) == ['b']
def test_resolving_multi(dispatcher):
funcs = {
'a': ['b', 'c'],
'b': ['c', 'd'],
'c': ['d'],
'd': []
}
add_many(dispatcher, funcs)
assert list(dispatcher.resolve_dependency_graph('a').keys()) == ['d', 'c', 'b', 'a']
assert list(dispatcher.resolve_dependency_graph('b').keys()) == ['d', 'c', 'b']
assert list(dispatcher.resolve_dependency_graph('c').keys()) == ['d', 'c']
assert list(dispatcher.resolve_dependency_graph('d').keys()) == ['d']
def test_resolving_preresolved(dispatcher):
funcs = {
'a': ['b', 'd'],
'b': ['c', 'e'],
'c': ['d', 'e'],
'd': [],
'e': []
}
add_many(dispatcher, funcs)
assert list(dispatcher.resolve_dependency_graph('a', resolved={'b': 'b'}).keys()) == ['b', 'd', 'a']
assert list(dispatcher.resolve_dependency_graph('a', resolved={'c': 'c'}).keys()) == ['c', 'e', 'b', 'd', 'a']
assert list(dispatcher.resolve_dependency_graph('a', resolved=OrderedDict((('b', 'b'), ('d', 'd')))).keys()) == ['b', 'd', 'a']
assert list(dispatcher.resolve_dependency_graph('b', resolved={'b': 'b'}).keys()) == ['b']
assert list(dispatcher.resolve_dependency_graph('b', resolved={'c': 'c'}).keys()) == ['c', 'e', 'b']
def test_resolving_circular(dispatcher):
funcs = {
'a': ['b'],
'b': ['a'],
}
add_many(dispatcher, funcs)
with pytest.raises(CircularDependency):
dispatcher.resolve_dependency_graph('a')
def test_resolving_unresolvable(dispatcher):
funcs = {
'a': ['b']
}
add_many(dispatcher, funcs)
with pytest.raises(UnresolvableDependency):
dispatcher.resolve_dependency_graph('a')
def test_deep_resolving():
one = Dispatcher()
two = Dispatcher()
three = Dispatcher()
one.add_sub_dispatcher(two)
two.add_sub_dispatcher(three)
@three.add
def a():
return 'a' # pragma: no cover
one.find_resource('a')
with pytest.raises(UnresolvableDependency) as ex:
one.find_resource('b')
assert 'a' in ex.value.available
def test_circular_dispatchers():
one = Dispatcher()
two = Dispatcher()
three = Dispatcher()
one.add_sub_dispatcher(two)
two.add_sub_dispatcher(three)
three.add_sub_dispatcher(one)
with pytest.raises(UnresolvableDependency):
one.find_resource('a')
| bsd-2-clause | 2,716,361,694,664,296,400 | 29.10101 | 131 | 0.583893 | false |
jptomo/rpython-lang-scheme | rpython/annotator/policy.py | 1 | 2761 | # base annotation policy for specialization
from rpython.annotator.specialize import default_specialize as default
from rpython.annotator.specialize import (
specialize_argvalue, specialize_argtype, specialize_arglistitemtype,
specialize_arg_or_var, memo, specialize_call_location)
class AnnotatorPolicy(object):
"""
Possibly subclass and pass an instance to the annotator to control
special-casing during annotation
"""
def event(pol, bookkeeper, what, *args):
pass
def get_specializer(pol, directive):
if directive is None:
return pol.default_specialize
# specialize[(args)]
directive_parts = directive.split('(', 1)
if len(directive_parts) == 1:
[name] = directive_parts
parms = ()
else:
name, parms = directive_parts
try:
parms = eval("(lambda *parms: parms)(%s" % parms)
except (KeyboardInterrupt, SystemExit):
raise
except:
raise Exception("broken specialize directive parms: %s" % directive)
name = name.replace(':', '__')
try:
specializer = getattr(pol, name)
except AttributeError:
raise AttributeError("%r specialize tag not defined in annotation"
"policy %s" % (name, pol))
else:
if not parms:
return specializer
else:
def specialize_with_parms(funcdesc, args_s):
return specializer(funcdesc, args_s, *parms)
return specialize_with_parms
# common specializations
default_specialize = staticmethod(default)
specialize__memo = staticmethod(memo)
specialize__arg = staticmethod(specialize_argvalue) # specialize:arg(N)
specialize__arg_or_var = staticmethod(specialize_arg_or_var)
specialize__argtype = staticmethod(specialize_argtype) # specialize:argtype(N)
specialize__arglistitemtype = staticmethod(specialize_arglistitemtype)
specialize__call_location = staticmethod(specialize_call_location)
def specialize__ll(pol, *args):
from rpython.rtyper.annlowlevel import LowLevelAnnotatorPolicy
return LowLevelAnnotatorPolicy.default_specialize(*args)
def specialize__ll_and_arg(pol, *args):
from rpython.rtyper.annlowlevel import LowLevelAnnotatorPolicy
return LowLevelAnnotatorPolicy.specialize__ll_and_arg(*args)
def no_more_blocks_to_annotate(pol, annotator):
# hint to all pending specializers that we are done
for callback in annotator.bookkeeper.pending_specializations:
callback()
del annotator.bookkeeper.pending_specializations[:]
| mit | -4,241,144,023,246,808,600 | 38.442857 | 84 | 0.646505 | false |
goblint/analyzer | sv-comp/witness-isomorphism.py | 1 | 2820 | #!/usr/bin/python3
import networkx as nx
from pathlib import Path
import sys
def categorical_node_warn(attr, default):
def warn(data1, data2):
return [(attr, data1.get(attr, d), data2.get(attr, d)) for attr, d in list(zip(attr, default)) if data1.get(attr, d) != data2.get(attr, d)]
return warn
def are_isomorphic(path1, path2):
# def witness_node_match(n1, n2):
# return True
witness_node_match = nx.algorithms.isomorphism.categorical_node_match(
["entry", "sink", "violation", "invariant", "invariant.scope"],
[False, False, False, None, None]
)
witness_node_warn = categorical_node_warn([], [])
# witness_node_match = nx.algorithms.isomorphism.categorical_node_match(
# ["entry", "sink", "violation"],
# [False, False, False]
# )
# witness_node_warn = categorical_node_warn(
# ["invariant", "invariant.scope"],
# [None, None]
# )
# def witness_edge_match(e1, e2):
# return True
witness_edge_match = nx.algorithms.isomorphism.categorical_multiedge_match(
["assumption", "assumption.scope", "assumption.resultfunction", "control", "startline", "endline", "startoffset", "endoffset", "enterLoopHead", "enterFunction", "returnFromFunction", "threadId", "createThread"],
[None, None, None, None, None, None, None, None, False, None, None, None, None]
)
expected = nx.read_graphml(path1, force_multigraph=True)
actual = nx.read_graphml(path2, force_multigraph=True)
matcher = nx.algorithms.isomorphism.MultiDiGraphMatcher(expected, actual, node_match=witness_node_match, edge_match=witness_edge_match)
if matcher.is_isomorphic():
for node1, node2 in matcher.mapping.items():
warn = witness_node_warn(expected.nodes[node1], actual.nodes[node2])
for attr, expected_value, actual_value in warn:
print(f" {path1} vs {path2}: {attr}: {expected_value} vs {actual_value}")
return True
else:
return False
def check_file(path1, path2):
isomorphic = are_isomorphic(path1, path2)
if not isomorphic:
print(f"{path1} vs {path2}: {isomorphic}")
def check_directory(path1, path2):
items1 = {path.relative_to(path1) for path in path1.rglob("*") if path.is_file()}
items2 = {path.relative_to(path2) for path in path2.rglob("*") if path.is_file()}
items12 = items1 - items2
items21 = items2 - items1
if items12:
print(f"Only in 1: {items12}")
if items21:
print(f"Only in 2: {items21}")
items = items1 & items2
for item in items:
check_file(path1 / item, path2 / item)
path1 = Path(sys.argv[1])
path2 = Path(sys.argv[2])
if path1.is_file() and path2.is_file():
check_file(path1, path2)
else:
check_directory(path1, path2)
| mit | -3,314,044,316,633,470,500 | 35.153846 | 219 | 0.641844 | false |
piotroxp/scibibscan | scib/lib/python3.5/site-packages/astropy/units/utils.py | 1 | 6992 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Miscellaneous utilities for `astropy.units`.
None of the functions in the module are meant for use outside of the
package.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numbers
import io
import re
import warnings
import numpy as np
from numpy import finfo
from ..extern import six
from ..utils.compat.fractions import Fraction
from ..utils.exceptions import AstropyDeprecationWarning
_float_finfo = finfo(float)
# take float here to ensure comparison with another float is fast
# give a little margin since often multiple calculations happened
_JUST_BELOW_UNITY = float(1.-4.*_float_finfo.epsneg)
_JUST_ABOVE_UNITY = float(1.+4.*_float_finfo.eps)
def _get_first_sentence(s):
"""
Get the first sentence from a string and remove any carriage
returns.
"""
x = re.match(".*?\S\.\s", s)
if x is not None:
s = x.group(0)
return s.replace('\n', ' ')
def _iter_unit_summary(namespace):
"""
Generates the ``(unit, doc, represents, aliases, prefixes)``
tuple used to format the unit summary docs in `generate_unit_summary`.
"""
from . import core
# Get all of the units, and keep track of which ones have SI
# prefixes
units = []
has_prefixes = set()
for key, val in six.iteritems(namespace):
# Skip non-unit items
if not isinstance(val, core.UnitBase):
continue
# Skip aliases
if key != val.name:
continue
if isinstance(val, core.PrefixUnit):
# This will return the root unit that is scaled by the prefix
# attached to it
has_prefixes.add(val._represents.bases[0].name)
else:
units.append(val)
# Sort alphabetically, case insensitive
units.sort(key=lambda x: x.name.lower())
for unit in units:
doc = _get_first_sentence(unit.__doc__).strip()
represents = ''
if isinstance(unit, core.Unit):
represents = ":math:`{0}`".format(
unit._represents.to_string('latex')[1:-1])
aliases = ', '.join('``{0}``'.format(x) for x in unit.aliases)
yield (unit, doc, represents, aliases, unit.name in has_prefixes)
def generate_unit_summary(namespace):
"""
Generates a summary of units from a given namespace. This is used
to generate the docstring for the modules that define the actual
units.
Parameters
----------
namespace : dict
A namespace containing units.
Returns
-------
docstring : str
A docstring containing a summary table of the units.
"""
docstring = io.StringIO()
docstring.write("""
.. list-table:: Available Units
:header-rows: 1
:widths: 10 20 20 20 1
* - Unit
- Description
- Represents
- Aliases
- SI Prefixes
""")
for unit_summary in _iter_unit_summary(namespace):
docstring.write("""
* - ``{0}``
- {1}
- {2}
- {3}
- {4!s:.1}
""".format(*unit_summary))
return docstring.getvalue()
def is_effectively_unity(value):
# value is *almost* always real, except, e.g., for u.mag**0.5, when
# it will be complex. Use try/except to ensure normal case is fast
try:
return _JUST_BELOW_UNITY <= value <= _JUST_ABOVE_UNITY
except TypeError: # value is complex
return (_JUST_BELOW_UNITY <= value.real <= _JUST_ABOVE_UNITY and
_JUST_BELOW_UNITY <= value.imag + 1 <= _JUST_ABOVE_UNITY)
def sanitize_scale(scale):
if is_effectively_unity(scale):
return 1.0
if np.iscomplex(scale): # scale is complex
if scale == 0.0:
return 0.0
if abs(scale.real) > abs(scale.imag):
if is_effectively_unity(scale.imag/scale.real + 1):
scale = scale.real
else:
if is_effectively_unity(scale.real/scale.imag + 1):
scale = complex(0., scale.imag)
return scale
def validate_power(p, support_tuples=False):
"""
Handles the conversion of a power to a floating point or a
rational number.
Parameters
----------
support_tuples : bool, optional
If `True`, treat 2-tuples as `Fraction` objects. This
behavior is deprecated and will be removed in astropy 0.5.
"""
# For convenience, treat tuples as Fractions
if support_tuples and isinstance(p, tuple) and len(p) == 2:
# Deprecated in 0.3.1
warnings.warn(
"Using a tuple as a fractional power is deprecated and may be "
"removed in a future version. Use Fraction(n, d) instead.",
AstropyDeprecationWarning)
p = Fraction(p[0], p[1])
if isinstance(p, (numbers.Rational, Fraction)):
# If the fractional power can be represented *exactly* as a
# floating point number, we convert it to a float, to make the
# math much faster, otherwise, we retain it as a
# `fractions.Fraction` object to avoid losing precision.
denom = p.denominator
if denom == 1:
p = int(p.numerator)
# This is bit-twiddling hack to see if the integer is a
# power of two
elif (denom & (denom - 1)) == 0:
p = float(p)
else:
if not np.isscalar(p):
raise ValueError(
"Quantities and Units may only be raised to a scalar power")
p = float(p)
# If the value is indistinguishable from a rational number
# with a low-numbered denominator, convert to a Fraction
# object. We don't want to convert for denominators that are
# a power of 2, since those can be perfectly represented, and
# subsequent operations are much faster if they are retained
# as floats. Nor do we need to test values that are divisors
# of a higher number, such as 3, since it is already addressed
# by 6.
# First check for denominator of 1
if (p % 1.0) == 0.0:
p = int(p)
# Leave alone if the denominator is exactly 2, 4 or 8
elif (p * 8.0) % 1.0 == 0.0:
pass
else:
for i in [10, 9, 7, 6]:
scaled = p * float(i)
if((scaled + 4. * _float_finfo.eps) % 1.0 <
8. * _float_finfo.eps):
p = Fraction(int(round(scaled)), i)
break
return p
def resolve_fractions(a, b):
"""
If either input is a Fraction, convert the other to a Fraction.
This ensures that any operation involving a Fraction will use
rational arithmetic and preserve precision.
"""
a_is_fraction = isinstance(a, Fraction)
b_is_fraction = isinstance(b, Fraction)
if a_is_fraction and not b_is_fraction:
b = Fraction(b)
elif not a_is_fraction and b_is_fraction:
a = Fraction(a)
return a, b
| mit | 3,504,517,540,989,733,000 | 28.880342 | 76 | 0.599113 | false |
rafaelmartins/foo-tools | setup.py | 1 | 1217 | #!/usr/bin/env python
from setuptools import setup
import foo
import glob
import os
current_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(current_dir, 'README.rst')) as fp:
long_description = fp.read()
install_requires = []
try:
import argparse
except ImportError:
install_requires.append('argparse')
setup(
name='foo-tools',
version=foo.__version__,
license=foo.__license__,
description=foo.__description__,
long_description=long_description,
author=foo.__author__,
author_email=foo.__email__,
url=foo.__url__,
py_modules=['foo'],
install_requires=install_requires,
tests_require=['mock'],
test_suite='test_foo',
data_files=[('libexec/foo-tools', glob.glob('modules/*'))],
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Unix Shell',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: System',
],
entry_points={'console_scripts': ['foo = foo:main']},
)
| bsd-3-clause | -1,710,944,276,475,376,400 | 26.044444 | 63 | 0.6212 | false |
xiexiangwei/xGame | logingate/redishelper.py | 1 | 3057 | #coding=utf-8
import logging
from twisted.internet import task
import config
from common import redispool
import time
class RedisHelper(object):
def __init__(self):
self.redis_linkcount = config.instance.redis_linkcount if config.instance.redis_linkcount else 2
self.__redispool = redispool.RedisConnectionPool(ip=config.instance.redis_ip,
port=config.instance.redis_port,
db=config.instance.redis_db,
password=config.instance.redis_pwd,
linkcount=self.redis_linkcount)
#登录服务器列表
self.__loginserverlist=[]
def start(self):
l = task.LoopingCall(self.OnTimer)
l.start(1, False)
self.__redispool.start()
def OnTimer(self):
self.LoadLoginServerList()
def HashIndex(self, v):
return int(v) % self.redis_linkcount
#获取登录服务器信息列表
def LoadLoginServerList(self):
cmd = redispool.RedisCommand(index=self.HashIndex(time.time()),
func=self.LoadLoginServerListFunc,
params=(),
ctx=(),
finish=self.LoadLoginServerListFinish)
self.__redispool.putCmd(cmd)
def LoadLoginServerListFunc(self,redisclient):
res = []
loginserverid_list = redisclient.smembers(u"loginserver:loginserver_list")
for loginserverid in loginserverid_list:
key = u"loginserver:loginserver%s"%loginserverid
if redisclient.exists(key):
res.append(redisclient.hgetall(key))
else:
redisclient.srem(u"loginserver:loginserver_list",loginserverid)
return res
def LoadLoginServerListFinish(self,err,ctx,rows):
self.__loginserverlist = rows
#更新登录服务器使用次数
def UpdateLoginServerTimes(self,id,value):
cmd = redispool.RedisCommand(index=self.HashIndex(time.time()),
func=self.UpdateLoginServerTimesFunc,
params=(id,value),
ctx=(),
finish=self.UpdateLoginServerTimesFinish)
self.__redispool.putCmd(cmd)
def UpdateLoginServerTimesFunc(self,redisclient, id, value):
loginserver_key = u"loginserver:loginserver%d" % id
if redisclient.exists(loginserver_key):
curtimes = int(redisclient.hget(loginserver_key, u"times"))
redisclient.hset(loginserver_key, u"times", curtimes + value)
else:
redisclient.srem(u"loginserver:loginserver_list", id)
def UpdateLoginServerTimesFinish(self,err,ctx,rows):
pass
def GetLoginServerList(self):
return self.__loginserverlist
instance = RedisHelper()
| apache-2.0 | -157,922,002,595,841,340 | 36.024691 | 104 | 0.567856 | false |
hevi9/mknew | creat/action/copy.py | 1 | 1249 | """ Action to copy tree or file. """
import shlex
from pathlib import Path
from shutil import copytree
from typing import Any, Mapping
from creat.action.bases import Action
from creat.context import render
from creat.source import Source
class Copy(Action):
""" Action to copy tree or file. """
path_from: str
path_to: str
def __init__(self, source: Source, make_item: dict):
super().__init__(source, make_item)
params = make_item["copy"]
if isinstance(params, str):
self.path_from, self.path_to = shlex.split(params)
elif isinstance(params, dict):
self.path_from = params["from"]
self.path_to = params["to"]
else:
raise ValueError(f"Invalid type {type(params)} for field 'copy' in {source._location}")
def run(self, context: Mapping[str, Any]) -> None:
path_from = Path(render(self.path_from, context))
path_to = Path(render(self.path_to, context))
with self._run_context():
copytree(
path_from,
path_to,
symlinks=False,
ignore=None,
ignore_dangling_symlinks=False,
dirs_exist_ok=False,
)
| lgpl-2.1 | 121,943,395,006,206,260 | 29.463415 | 99 | 0.581265 | false |
GoogleCloudPlatform/PerfKitBenchmarker | tests/linux_benchmarks/sysbench_benchmark_test.py | 1 | 2151 | # Copyright 2015 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for sysbench."""
import logging
import os
import unittest
from perfkitbenchmarker import sample
from perfkitbenchmarker import test_util
from perfkitbenchmarker.linux_benchmarks import sysbench_benchmark
class MySQLServiceBenchmarkTestCase(unittest.TestCase,
test_util.SamplesTestMixin):
def setUp(self):
path = os.path.join(os.path.dirname(__file__), '..', 'data',
'sysbench-output-sample.txt')
with open(path) as fp:
self.contents = fp.read()
def testParseSysbenchResult(self):
results = []
metadata = {}
sysbench_benchmark.AddMetricsForSysbenchOutput(
self.contents, results, metadata)
logging.info('results are, %s', results)
expected_results = [
sample.Sample('tps_array', -1, 'tps', {'tps': [
1012.86, 1006.64, 1022.3, 1016.16, 1009.03, 1016.99, 1010.0, 1018.0,
1002.01, 998.49, 959.52, 913.49, 936.98, 916.01, 957.96]}),
sample.Sample('latency_array', -1, 'ms', {'latency': [
28.67, 64.47, 38.94, 44.98, 89.16, 29.72, 106.75, 46.63, 116.8,
41.85, 27.17, 104.84, 58.92, 75.82, 73.13]}),
sample.Sample('qps_array', -1, 'qps', {'qps': [
20333.18, 20156.38, 20448.49, 20334.15, 20194.07, 20331.31,
20207.00, 20348.96, 20047.11, 19972.86, 19203.97, 18221.83,
18689.14, 18409.68, 19155.63]})]
self.assertSampleListsEqualUpToTimestamp(results, expected_results)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -5,931,858,623,994,031,000 | 38.833333 | 80 | 0.655044 | false |
privacyidea/privacyidea | tests/test_lib_tokens_sms.py | 1 | 20023 | """
This test file tests the lib.tokens.smstoken
"""
PWFILE = "tests/testdata/passwords"
from .base import MyTestCase, FakeFlaskG, FakeAudit
from privacyidea.lib.resolver import (save_resolver)
from privacyidea.lib.realm import (set_realm)
from privacyidea.lib.user import (User)
from privacyidea.lib.utils import is_true
from privacyidea.lib.tokenclass import DATE_FORMAT
from privacyidea.lib.tokens.smstoken import SmsTokenClass, SMSACTION
from privacyidea.models import (Token, Config, Challenge)
from privacyidea.lib.config import (set_privacyidea_config, set_prepend_pin)
from privacyidea.lib.policy import set_policy, SCOPE, PolicyClass
from privacyidea.lib import _
import datetime
import mock
import responses
class SMSTokenTestCase(MyTestCase):
"""
Test the token on the database level
"""
phone1 = "+49 123456789"
otppin = "topsecret"
resolvername1 = "resolver1"
resolvername2 = "Resolver2"
resolvername3 = "reso3"
realm1 = "realm1"
realm2 = "realm2"
serial1 = "SE123456"
serial2 = "SE222222"
otpkey = "3132333435363738393031323334353637383930"
SMSHttpUrl = "http://smsgateway.com/sms_send_api.cgi"
SMSProviderConfig = '''{"URL": "http://smsgateway.com/sms_send_api.cgi",
"PARAMETER": {"from": "0170111111",
"password": "yoursecret",
"sender": "name",
"account": "company_ltd"},
"SMS_TEXT_KEY": "text",
"SMS_PHONENUMBER_KEY": "destination",
"HTTP_Method": "POST",
"PROXY": "http://username:password@your-proxy:8080",
"RETURN_SUCCESS": "ID"
}'''
success_body = "ID 12345"
# add_user, get_user, reset, set_user_identifiers
def test_00_create_user_realm(self):
rid = save_resolver({"resolver": self.resolvername1,
"type": "passwdresolver",
"fileName": PWFILE})
self.assertTrue(rid > 0, rid)
(added, failed) = set_realm(self.realm1,
[self.resolvername1])
self.assertTrue(len(failed) == 0)
self.assertTrue(len(added) == 1)
user = User(login="root",
realm=self.realm1,
resolver=self.resolvername1)
user_str = "{0!s}".format(user)
self.assertTrue(user_str == "<root.resolver1@realm1>", user_str)
self.assertFalse(user.is_empty())
self.assertTrue(User().is_empty())
user_repr = "{0!r}".format(user)
expected = "User(login='root', realm='realm1', resolver='resolver1')"
self.assertTrue(user_repr == expected, user_repr)
def test_01_create_token(self):
db_token = Token(self.serial1, tokentype="sms")
db_token.save()
token = SmsTokenClass(db_token)
token.update({"phone": self.phone1})
token.save()
self.assertTrue(token.token.serial == self.serial1, token)
self.assertTrue(token.token.tokentype == "sms", token.token)
self.assertTrue(token.type == "sms", token.type)
class_prefix = token.get_class_prefix()
self.assertTrue(class_prefix == "PISM", class_prefix)
self.assertTrue(token.get_class_type() == "sms", token)
db_token = Token(self.serial2, tokentype="sms")
db_token.save()
token = SmsTokenClass(db_token)
token.update({"dynamic_phone": True})
token.save()
self.assertTrue(token.token.serial == self.serial2, token)
self.assertTrue(token.token.tokentype == "sms", token.token)
self.assertTrue(is_true(token.get_tokeninfo("dynamic_phone")))
self.assertTrue(token.type == "sms", token.type)
class_prefix = token.get_class_prefix()
self.assertTrue(class_prefix == "PISM", class_prefix)
self.assertTrue(token.get_class_type() == "sms", token)
token.add_user(User(login="cornelius",
realm=self.realm1))
def test_02_set_user(self):
db_token = Token.query.filter_by(serial=self.serial1).first()
token = SmsTokenClass(db_token)
self.assertTrue(token.token.tokentype == "sms",
token.token.tokentype)
self.assertTrue(token.type == "sms", token.type)
token.add_user(User(login="cornelius",
realm=self.realm1))
self.assertEqual(token.token.first_owner.resolver, self.resolvername1)
self.assertEqual(token.token.first_owner.user_id, "1000")
user_object = token.user
self.assertTrue(user_object.login == "cornelius",
user_object)
self.assertTrue(user_object.resolver == self.resolvername1,
user_object)
def test_03_reset_failcounter(self):
db_token = Token.query.filter_by(serial=self.serial1).first()
token = SmsTokenClass(db_token)
token.token.failcount = 10
token.reset()
self.assertTrue(token.token.failcount == 0,
token.token.failcount)
def test_04_base_methods(self):
db_token = Token.query.filter_by(serial=self.serial1).first()
token = SmsTokenClass(db_token)
self.assertTrue(token.check_otp("123456", 1, 10) == -1)
# get class info
cli = token.get_class_info()
self.assertTrue(cli.get("type") == "sms", cli.get("type"))
cli = token.get_class_info("type")
self.assertTrue(cli == "sms", cli)
# set the description
token.set_description("something new")
self.assertTrue(token.token.description == "something new",
token.token)
# set defaults
token.set_defaults()
self.assertTrue(token.token.otplen == 6)
self.assertTrue(token.token.sync_window == 1000)
token.resync("1234", "3456")
token.token.count_window = 17
self.assertTrue(token.get_otp_count_window() == 17)
token.token.count = 18
self.assertTrue(token.get_otp_count() == 18)
token.token.active = False
self.assertTrue(token.is_active() is False)
token.token.failcount = 7
self.assertTrue(token.get_failcount() == 7)
token.set_failcount(8)
self.assertTrue(token.token.failcount == 8)
token.token.maxfail = 12
self.assertTrue(token.get_max_failcount() == 12)
self.assertEqual(token.get_user_id(), token.token.first_owner.user_id)
self.assertTrue(token.get_serial() == "SE123456", token.token.serial)
self.assertTrue(token.get_tokentype() == "sms",
token.token.tokentype)
token.set_so_pin("sopin")
token.set_user_pin("userpin")
token.set_otpkey(self.otpkey)
token.set_otplen(8)
token.set_otp_count(1000)
self.assertTrue(len(token.token.so_pin) == 32,
token.token.so_pin)
self.assertTrue(len(token.token.user_pin) == 32,
token.token.user_pin)
self.assertTrue(len(token.token.key_enc) == 192,
token.token.key_enc)
self.assertTrue(token.get_otplen() == 8)
self.assertTrue(token.token.count == 1000,
token.token.count)
token.set_maxfail(1000)
self.assertTrue(token.token.maxfail == 1000)
token.set_count_window(52)
self.assertTrue(token.get_count_window() == 52)
token.set_sync_window(53)
self.assertTrue(token.get_sync_window() == 53)
def test_06_set_pin(self):
db_token = Token.query.filter_by(serial=self.serial1).first()
token = SmsTokenClass(db_token)
token.set_pin("hallo")
(ph1, pseed) = token.get_pin_hash_seed()
# check the database
token.set_pin("blubber")
ph2 = token.token.pin_hash
self.assertTrue(ph1 != ph2)
token.set_pin_hash_seed(ph1, pseed)
def test_07_enable(self):
db_token = Token.query.filter_by(serial=self.serial1).first()
token = SmsTokenClass(db_token)
token.enable(False)
self.assertTrue(token.token.active is False)
token.enable()
self.assertTrue(token.token.active)
def test_05_get_set_realms(self):
set_realm(self.realm2)
db_token = Token.query.filter_by(serial=self.serial1).first()
token = SmsTokenClass(db_token)
realms = token.get_realms()
self.assertTrue(len(realms) == 1, realms)
token.set_realms([self.realm1, self.realm2])
realms = token.get_realms()
self.assertTrue(len(realms) == 2, realms)
def test_99_delete_token(self):
db_token = Token.query.filter_by(serial=self.serial1).first()
token = SmsTokenClass(db_token)
token.delete_token()
db_token = Token.query.filter_by(serial=self.serial1).first()
self.assertTrue(db_token is None, db_token)
def test_08_info(self):
db_token = Token.query.filter_by(serial=self.serial1).first()
token = SmsTokenClass(db_token)
token.set_hashlib("sha1")
ti = token.get_tokeninfo()
self.assertTrue("hashlib" in ti, ti)
def test_09_failcount(self):
db_token = Token.query.filter_by(serial=self.serial1).first()
token = SmsTokenClass(db_token)
start = token.token.failcount
end = token.inc_failcount()
self.assertTrue(end == start + 1, (end, start))
def test_10_get_hashlib(self):
# check if functions are returned
for hl in ["sha1", "md5", "sha256", "sha512",
"sha224", "sha384", "", None]:
self.assertTrue(hasattr(SmsTokenClass.get_hashlib(hl),
'__call__'),
SmsTokenClass.get_hashlib(hl))
def test_11_tokeninfo(self):
db_token = Token.query.filter_by(serial=self.serial1).first()
token = SmsTokenClass(db_token)
token.add_tokeninfo("key1", "value2")
info1 = token.get_tokeninfo()
self.assertTrue("key1" in info1, info1)
token.add_tokeninfo("key2", "value3")
info2 = token.get_tokeninfo()
self.assertTrue("key2" in info2, info2)
token.set_tokeninfo(info1)
info2 = token.get_tokeninfo()
self.assertTrue("key2" not in info2, info2)
self.assertTrue(token.get_tokeninfo("key1") == "value2",
info2)
# auth counter
token.set_count_auth_success_max(200)
token.set_count_auth_max(1000)
token.set_count_auth_success(100)
token.inc_count_auth_success()
token.set_count_auth(200)
token.inc_count_auth()
self.assertTrue(token.get_count_auth_success_max() == 200)
self.assertTrue(token.get_count_auth_success() == 101)
self.assertTrue(token.get_count_auth_max() == 1000)
self.assertTrue(token.get_count_auth() == 201)
self.assertTrue(token.check_auth_counter())
token.set_count_auth_max(10)
self.assertFalse(token.check_auth_counter())
token.set_count_auth_max(1000)
token.set_count_auth_success_max(10)
self.assertFalse(token.check_auth_counter())
def test_12_inc_otp_counter(self):
db_token = Token.query.filter_by(serial=self.serial1).first()
token = SmsTokenClass(db_token)
token.set_otp_count(10)
self.assertTrue(token.token.count == 10, token.token.count)
# increase counter by 1
token.inc_otp_counter()
self.assertTrue(token.token.count == 11, token.token.count)
# increase counter to 21
Config(Key="DefaultResetFailCount", Value=True).save()
token.inc_otp_counter(counter=20)
self.assertTrue(token.token.count == 21, token.token.count)
def test_13_check_otp(self):
db_token = Token.query.filter_by(serial=self.serial1).first()
token = SmsTokenClass(db_token)
token.update({"otpkey": self.otpkey,
"pin": "test",
"otplen": 6,
"phone": self.phone1})
# OTP does not exist
self.assertTrue(token.check_otp_exist("222333") == -1)
# OTP does exist
res = token.check_otp_exist("969429")
self.assertTrue(res == 3, res)
def test_14_split_pin_pass(self):
db_token = Token.query.filter_by(serial=self.serial1).first()
token = SmsTokenClass(db_token)
token.token.otplen = 6
# postpend pin
set_prepend_pin(False)
_res, pin, value = token.split_pin_pass("222333test")
self.assertTrue(pin == "test", pin)
self.assertTrue(value == "222333", value)
# prepend pin
set_prepend_pin(True)
_res, pin, value = token.split_pin_pass("test222333")
self.assertTrue(pin == "test", pin)
self.assertTrue(value == "222333", value)
def test_15_check_pin(self):
db_token = Token.query.filter_by(serial=self.serial1).first()
token = SmsTokenClass(db_token)
# test the encrypted pin
token.set_pin("encrypted", encrypt=True)
self.assertTrue(token.check_pin("encrypted"))
self.assertFalse(token.check_pin("wrong pin"))
# test the hashed pin
token.set_pin("test")
self.assertTrue(token.check_pin("test"))
self.assertFalse(token.check_pin("wrong pin"))
def test_17_challenge_token(self):
db_token = Token.query.filter_by(serial=self.serial1).first()
token = SmsTokenClass(db_token)
token.set_pin(self.otppin)
r = token.is_challenge_request(self.otppin)
self.assertTrue(r)
@responses.activate
def test_18_challenge_request(self):
responses.add(responses.POST,
self.SMSHttpUrl,
body=self.success_body)
transactionid = "123456098712"
set_privacyidea_config("sms.providerConfig", self.SMSProviderConfig)
db_token = Token.query.filter_by(serial=self.serial1).first()
token = SmsTokenClass(db_token)
self.assertTrue(token.check_otp("123456", 1, 10) == -1)
c = token.create_challenge(transactionid)
self.assertTrue(c[0], c)
otp = c[1]
self.assertTrue(c[3].get("state"), transactionid)
# check for the challenges response
r = token.check_challenge_response(passw=otp,
options={"transaction_id":
transactionid})
self.assertTrue(r, r)
@responses.activate
def test_18a_challenge_request_dynamic(self):
# Send a challenge request for an SMS token with a dynamic phone number
responses.add(responses.POST,
self.SMSHttpUrl,
body=self.success_body)
transactionid = "123456098712"
set_privacyidea_config("sms.providerConfig", self.SMSProviderConfig)
db_token = Token.query.filter_by(serial=self.serial2).first()
token = SmsTokenClass(db_token)
self.assertTrue(token.check_otp("123456", 1, 10) == -1)
c = token.create_challenge(transactionid)
self.assertTrue(c[0], c)
otp = c[1]
self.assertTrue(c[3].get("state"), transactionid)
# check for the challenges response
r = token.check_challenge_response(passw=otp,
options={"transaction_id":
transactionid})
self.assertTrue(r, r)
@responses.activate
def test_18b_challenge_request_dynamic_multivalue(self):
responses.add(responses.POST,
self.SMSHttpUrl,
body=self.success_body)
transactionid = "123456098712"
set_privacyidea_config("sms.providerConfig", self.SMSProviderConfig)
db_token = Token.query.filter_by(serial=self.serial2).first()
token = SmsTokenClass(db_token)
# if the email is a multi-value attribute, the first address should be chosen
new_user_info = token.user.info.copy()
new_user_info['mobile'] = ['1234', '5678']
with mock.patch('privacyidea.lib.resolvers.PasswdIdResolver.IdResolver.getUserInfo') as mock_user_info:
mock_user_info.return_value = new_user_info
c = token.create_challenge(transactionid)
self.assertTrue(c[0], c)
self.assertIn('destination=1234', responses.calls[0].request.body)
self.assertNotIn('destination=5678', responses.calls[0].request.body)
@responses.activate
def test_19_smstext(self):
# The single quotes in the smstext "'Your <otp>'" is legacy and results in
# the string without single quotes "Your <otp>".
smstext_tests = {"'Your <otp>'": r"Your [0-9]{6}",
"Your <otp>": r"Your [0-9]{6}",
"{user} has the OTP: {otp}": r"Cornelius has the OTP: [0-9]{6}"}
for pol_text, result_text in smstext_tests.items():
# create a SMSTEXT policy:
p = set_policy(name="smstext",
action="{0!s}={1!s}".format(SMSACTION.SMSTEXT, pol_text),
scope=SCOPE.AUTH)
self.assertTrue(p > 0)
g = FakeFlaskG()
P = PolicyClass()
g.audit_object = FakeAudit()
g.policy_object = P
options = {"g": g,
"user": User("cornelius", self.realm1)}
responses.add(responses.POST,
self.SMSHttpUrl,
body=self.success_body)
set_privacyidea_config("sms.providerConfig", self.SMSProviderConfig)
db_token = Token.query.filter_by(serial=self.serial1).first()
token = SmsTokenClass(db_token)
c = token.create_challenge(options=options)
self.assertTrue(c[0], c)
display_message = c[1]
self.assertEqual(display_message, _("Enter the OTP from the SMS:"))
self.assertEqual(c[3].get("state"), None)
smstext = token._get_sms_text(options)
self.assertEqual(pol_text.strip("'"), smstext)
r, message = token._send_sms(smstext, options)
self.assertRegexpMatches(message, result_text)
# Test AUTOSMS
p = set_policy(name="autosms",
action=SMSACTION.SMSAUTO,
scope=SCOPE.AUTH)
self.assertTrue(p > 0)
g = FakeFlaskG()
P = PolicyClass()
g.policy_object = P
g.audit_object = FakeAudit()
options = {"g": g}
r = token.check_otp(self.valid_otp_values[5 + len(smstext_tests)], options=options)
self.assertTrue(r > 0, r)
def test_21_failed_loading(self):
transactionid = "123456098712"
set_privacyidea_config("sms.providerConfig", "noJSON")
set_privacyidea_config("sms.provider",
"privacyidea.lib.smsprovider."
"HttpSMSProvider.HttpSMSProviderWRONG")
db_token = Token.query.filter_by(serial=self.serial1).first()
token = SmsTokenClass(db_token)
c = token.create_challenge(transactionid)
self.assertFalse(c[0], c)
self.assertTrue("The PIN was correct, but" in c[1], c[1])
set_privacyidea_config("sms.provider",
"privacyidea.lib.smsprovider."
"HttpSMSProvider.HttpSMSProvider")
c = token.create_challenge(transactionid)
self.assertFalse(c[0], c)
self.assertTrue("Failed to load sms.providerConfig" in c[1], c[1])
# test with the parameter exception=1
self.assertRaises(Exception, token.create_challenge, transactionid, {"exception": "1"})
| agpl-3.0 | -4,112,196,651,265,308,000 | 39.287726 | 111 | 0.588174 | false |
zh012/flask-dropin | setup.py | 1 | 1280 | #!/usr/bin/env python
from setuptools import setup
options = dict(
name='Flask-DropIn',
version='0.0.1',
description='Flask-DropIn let you easily organize large flask project.',
author='Jerry Zhang',
author_email='hui.calife@gmail.com',
url='https://github.com/zh012/flask-dropin.git',
packages=['flask_dropin'],
license='MIT',
zip_safe=False,
platforms='any',
install_requires=[
'Flask',
'click',
'six',
],
tests_require=[
'pytest>=2.7.1',
'pytest-cov>=2.2.0',
'tox',
],
entry_points={
'console_scripts': []
},
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
],
)
setup(**options)
| mit | 8,108,575,025,833,285,000 | 27.444444 | 76 | 0.567188 | false |
AlessandroZ/LaZagne | Windows/lazagne/softwares/sysadmin/wsl.py | 1 | 1527 | # -*- coding: utf-8 -*-
from lazagne.config.module_info import ModuleInfo
from lazagne.config.constant import constant
import os
class Wsl(ModuleInfo):
def __init__(self):
ModuleInfo.__init__(self, 'wsl', 'sysadmin')
def run(self):
pwd_found = []
shadow_files_list = []
# Old WSL PATH
old_path = os.path.join(constant.profile['LOCALAPPDATA'], u'lxss\\rootfs\\etc\\shadow')
if os.path.exists(old_path):
shadow_files_list.append(old_path)
# New WSL PATH need to look into Package folder
new_path = os.path.join(constant.profile['LOCALAPPDATA'], u'Packages\\')
if os.path.exists(new_path):
for root, dirs, files in os.walk(new_path):
for file in files:
if file == "shadow":
shadow_files_list.append(os.path.join(root, file))
# Extract the hashes
for shadow in shadow_files_list:
with open(shadow, 'r') as shadow_file:
for line in shadow_file.readlines():
user_hash = line.replace('\n', '')
line = user_hash.split(':')
# Check if a password is defined
if not line[1] in ['x', '*', '!']:
pwd_found.append({
'Hash': ':'.join(user_hash.split(':')[1:]),
'Login': user_hash.split(':')[0].replace('\n', '')
})
return pwd_found
| lgpl-3.0 | 8,340,774,244,110,136,000 | 33.704545 | 95 | 0.504257 | false |
thejdeep/CoAPthon | coapfifocache.py | 1 | 2883 | from cachetools import LRUCache
from coapthon.caching.coapcache import CoapCache
class fifocache ():
def __init__(self,max_dim=2048):
self.queue=[]
self.cachevalues={}
self.maxsize=max_dim
def update(self, elements):
try:
if(elements[0] not in self.cachevalues.keys()):
if(len(self.cachevalues)==self.maxsize):
del self.cachevalues[self.queue[0]]
print "DELETING IN FIFO: ", self.queue[0]
del self.queue[0]
self.cachevalues[elements[0][0]]=elements[0][1]
self.queue.append(elements[0][0])
else:
self.cachevalues[elements[0][0]]=elements[0][1]
except KeyError():
self.cachevalues[elements[0][0]]=None
def __getitem__(self, key):
value = self.cachevalues[key]
return value
# def __setitem__(self, key, value):
# self.cachevalues[key]=value
def currsize(self):
return len(self.cachevalues)
def items(self):
"D.items() -> list of D's (key, value) pairs, as 2-tuples"
return [(key, self.cachevalues[key]) for key in self.cachevalues.keys()]
def keys(self):
return [key for key in self.cachevalues.keys()]
class CoapFIFOCache(CoapCache):
def __init__(self, max_dim):
"""
:param max_dim:
"""
print "Using FIFO Cache with dimension : " + str(max_dim)
self.cache = fifocache(max_dim=max_dim)
def update(self, key, element):
"""
:param key:
:param element:
:return:
"""
print "updating cache"
print "key: ", key.hashkey
print "element: ", element
self.cache.update([(key.hashkey, element)])
def get(self, key):
"""
:param key:
:return: CacheElement
"""
try:
print "Getting cache response"
response = self.cache[key.hashkey]
except KeyError:
print "problem here"
response = None
return response
def is_full(self):
"""
:return:
"""
if self.cache.currsize() == self.cache.maxsize:
return True
return False
def is_empty(self):
"""
:return:
"""
if self.cache.currsize() == 0:
return True
return False
def debug_print(self):
"""
:return:
"""
print "size = ", self.cache.currsize()
list = self.cache.items()
for key, element in list:
print "element.max age ", element.max_age
print "element.uri", element.uri
print "element.freshness ", element.freshness
| mit | -6,695,820,534,116,193,000 | 24.449541 | 80 | 0.506417 | false |
goldsborough/lnk | tests/lnk/test_lnk_errors.py | 1 | 3365 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import click
import ecstasy
import pytest
import requests
import tests.paths
import lnk.errors
def test_verbosity_system_works_without_additional():
what = 'something happened'
error = lnk.errors.Error(what)
typ = ecstasy.beautify('<Type>: Error', ecstasy.Color.Red)
assert error.what == what
assert error.levels[2] == typ
def test_get_levels_works():
error = lnk.errors.Error('something happened')
assert len(error.levels) == 4
assert any(error.levels)
assert not error.levels[1]
assert not error.levels[3]
assert 'Error' in error.levels[0]
assert 'Type' in error.levels[2]
def test_verbosity_system_works_with_additional():
foo = lnk.errors.Message(what='foo?', level=1)
bar = lnk.errors.Message(what='bar!', level=3)
error = lnk.errors.Error('something happened', Foo=foo, Bar=bar)
assert len(error.levels) == 4
assert all(error.levels)
assert 'Foo' in error.levels[1]
assert 'foo?' in error.levels[1]
assert 'Bar' in error.levels[3]
assert 'bar!' in error.levels[3]
def test_catch_catches_lnk_error(capsys):
def throws():
raise lnk.errors.Error('oops')
lnk.errors.catch(throws)
captured = capsys.readouterr()
assert captured
assert 'Error' in captured[0]
assert 'oops' in captured[0]
def test_catch_shows_only_wanted_levels_for_verbosity_0(capsys):
catch = lnk.errors.Catch()
def throws():
raise lnk.errors.Error('oops')
catch.catch(throws)
captured = capsys.readouterr()
assert captured
levels = [i for i in captured[0].split('\n') if i]
assert len(levels) == 1
assert 'Error' in levels[0]
assert 'oops' in levels[0]
def test_catch_shows_all_levels_for_verbosity_4(capsys):
catch = lnk.errors.Catch(3)
def throws():
foo = lnk.errors.Message(what='foo?', level=1)
bar = lnk.errors.Message(what='bar!', level=3)
raise lnk.errors.InternalError('oops', Foo=foo, Bar=bar)
catch.catch(throws)
captured = capsys.readouterr()
assert captured
levels = [i for i in captured[0].split('\n') if i]
assert len(levels) == 4
assert 'Error' in levels[0]
assert 'oops' in levels[0]
assert 'Foo' in levels[1]
assert 'foo?' in levels[1]
assert 'Type' in levels[2]
assert 'InternalError' in levels[2]
assert 'Bar' in levels[3]
assert 'bar!' in levels[3]
def test_catch_catches_click_exception(capsys):
catch = lnk.errors.Catch(2)
def throws():
raise click.ClickException('')
catch.catch(throws)
captured = capsys.readouterr()
assert captured
levels = [i for i in captured[0].split('\n') if i]
assert 'Error' in levels[0]
assert 'Type' in levels[1]
assert 'UsageError' in levels[1]
def test_catch_catches_requests_exception(capsys):
catch = lnk.errors.Catch(2)
def throws():
raise requests.exceptions.ConnectionError
catch.catch(throws)
captured = capsys.readouterr()
assert captured
levels = [i for i in captured[0].split('\n') if i]
assert 'Error' in levels[0]
assert 'Type' in levels[2]
assert 'ConnectionError' in levels[2]
def test_catch_bubbles_up_other_exceptions():
catch = lnk.errors.Catch()
def throws():
raise RuntimeError
with pytest.raises(RuntimeError):
catch.catch(throws)
def test_warn_works(capsys):
lnk.errors.warn('Sauron is coming')
captured = capsys.readouterr()
assert captured
assert 'Warning' in captured[0]
assert '\a' in captured[0]
assert 'Sauron is coming' in captured[0]
| mit | -4,525,069,628,563,459,600 | 22.206897 | 65 | 0.709955 | false |
gengwg/leetcode | 422_valid_word_square.py | 1 | 1830 | # 422 Valid Word Square
# Given a sequence of words, check whether it forms a valid word square.
#
# A sequence of words forms a valid word square if the kth row and column read the exact same string, where 0 ≤ k < max(numRows, numColumns).
#
# Note:
#
# The number of words given is at least 1 and does not exceed 500.
# Word length will be at least 1 and does not exceed 500.
# Each word contains only lowercase English alphabet a-z.
#
# Example 1:
#
# Input:
# [
# "abcd",
# "bnrt",
# "crmy",
# "dtye"
# ]
#
# Output:
# true
#
# Explanation:
# The first row and first column both read "abcd".
# The second row and second column both read "bnrt".
# The third row and third column both read "crmy".
# The fourth row and fourth column both read "dtye".
#
# Therefore, it is a valid word square.
#
# Example 2:
#
# Input:
# [
# "abcd",
# "bnrt",
# "crm",
# "dt"
# ]
#
# Output:
# true
#
# Explanation:
# The first row and first column both read "abcd".
# The second row and second column both read "bnrt".
# The third row and third column both read "crm".
# The fourth row and fourth column both read "dt".
#
# Therefore, it is a valid word square.
#
# Example 3:
#
# Input:
# [
# "ball",
# "area",
# "read",
# "lady"
# ]
#
# Output:
# false
#
# Explanation:
# The third row reads "read" while the third column reads "lead".
#
# Therefore, it is NOT a valid word square.
#
class Solution:
def validWordSquare(self, words):
for i in range(len(words)):
for j in range(len(words[i])):
if j >= len(words) or i >= len(words[j]) or words[i][j] != words[j][i]:
return False
return True
sol = Solution()
print(sol.validWordSquare(["abcd", "bnrt", "crm", "dt"]))
print(sol.validWordSquare(["ball", "area", "read", "lady"]))
| apache-2.0 | -5,595,526,661,145,807,000 | 20.761905 | 141 | 0.620897 | false |
cpburnz/python-path-specification | pathspec/_meta.py | 1 | 1295 | # encoding: utf-8
"""
This module contains the project meta-data.
"""
__author__ = "Caleb P. Burns"
__copyright__ = "Copyright © 2013-2021 Caleb P. Burns"
__credits__ = [
"dahlia <https://github.com/dahlia>",
"highb <https://github.com/highb>",
"029xue <https://github.com/029xue>",
"mikexstudios <https://github.com/mikexstudios>",
"nhumrich <https://github.com/nhumrich>",
"davidfraser <https://github.com/davidfraser>",
"demurgos <https://github.com/demurgos>",
"ghickman <https://github.com/ghickman>",
"nvie <https://github.com/nvie>",
"adrienverge <https://github.com/adrienverge>",
"AndersBlomdell <https://github.com/AndersBlomdell>",
"highb <https://github.com/highb>",
"thmxv <https://github.com/thmxv>",
"wimglenn <https://github.com/wimglenn>",
"hugovk <https://github.com/hugovk>",
"dcecile <https://github.com/dcecile>",
"mroutis <https://github.com/mroutis>",
"jdufresne <https://github.com/jdufresne>",
"groodt <https://github.com/groodt>",
"ftrofin <https://github.com/ftrofin>",
"pykong <https://github.com/pykong>",
"nhhollander <https://github.com/nhhollander>",
"KOLANICH <https://github.com/KOLANICH>",
"JonjonHays <https://github.com/JonjonHays>",
"Isaac0616 <https://github.com/Isaac0616>",
]
__license__ = "MPL 2.0"
__version__ = "0.9.0.dev1"
| mpl-2.0 | -1,687,340,402,531,221,200 | 34.944444 | 54 | 0.679289 | false |