repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringlengths 1
5
| size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
vertexproject/synapse | synapse/tests/test_lib_rstorm.py | 1 | 7127 | import os
import synapse.exc as s_exc
import synapse.common as s_common
import synapse.lib.rstorm as s_rstorm
import synapse.tests.utils as s_test
rst_in = '''
HI
##
.. storm-cortex:: default
.. storm-cortex:: default
.. storm-opts:: {"vars": {"foo": 10, "bar": "baz"}}
.. storm-pre:: [ inet:asn=$foo ]
.. storm:: $lib.print($bar) $lib.warn(omgomgomg)
.. storm-expect:: baz
.. storm-pre:: [ inet:ipv6=0 ]
.. storm-pkg:: synapse/tests/files/stormpkg/testpkg.yaml
.. storm:: --hide-props testpkgcmd foo
.. storm:: --hide-query $lib.print(secret)
.. storm:: --hide-query file:bytes
.. storm-svc:: synapse.tests.files.rstorm.testsvc.Testsvc test {"secret": "jupiter"}
.. storm:: testsvc.test
'''
rst_out = '''
HI
##
::
> $lib.print($bar) $lib.warn(omgomgomg)
baz
WARNING: omgomgomg
::
> testpkgcmd foo
inet:ipv6=::ffff:0
::
secret
::
::
> testsvc.test
jupiter
'''
rst_in_debug = '''
HI
##
.. storm-cortex:: default
.. storm:: --debug [ inet:ipv4=0 ]
'''
rst_in_props = '''
HI
##
.. storm-cortex:: default
.. storm:: [ inet:ipv4=0 ]
'''
rst_out_props = '''
HI
##
::
> [ inet:ipv4=0 ]
inet:ipv4=0.0.0.0
:type = private
'''
rst_in_http = '''
HI
##
.. storm-cortex:: default
.. storm:: $resp=$lib.inet.http.get("http://foo.com") $d=$resp.json() $lib.print($d)
.. storm-mock-http:: synapse/tests/files/rstorm/httpresp1.json
.. storm:: $resp=$lib.inet.http.get("http://foo.com") $d=$resp.json() [ inet:ipv4=$d.data ]
.. storm-mock-http:: synapse/tests/files/rstorm/httpresp2.json
.. storm:: $resp=$lib.inet.http.get("http://foo.com") $d=$resp.json() [ inet:ipv4=$d.data ]
.. storm-mock-http:: synapse/tests/files/rstorm/httpresp3.json
.. storm:: $resp=$lib.inet.http.get("http://foo.com") $d=$resp.body.decode() [ it:dev:str=$d ]
'''
boom1 = '''
.. storm:: $lib.print(newp)
'''
boom2 = '''
.. storm-pre:: $lib.print(newp)
'''
boom3 = '''
.. storm-cortex:: default
.. storm:: $x = (10 + "foo")
'''
boom4 = '''
.. storm-pkg:: synapse/tests/files/stormpkg/testpkg.yaml
'''
boom5 = '''
.. storm-svc:: synapse.tests.files.rstorm.testsvc.Testsvc test {"secret": "jupiter"}
'''
boom6 = '''
.. storm-cortex:: default
.. storm-svc:: synapse.tests.files.rstorm.testsvc.Testsvc test
'''
boom7 = '''
.. storm-cortex:: default
.. storm-pkg:: synapse/tests/files/stormpkg/newp.newp
'''
boom8 = '''
.. storm-cortex:: default
.. storm-mock-http:: synapse/tests/files/rstorm/newp.newp
'''
boom9 = '''
.. storm-newp:: newp
'''
async def get_rst_text(rstfile):
async with await s_rstorm.StormRst.anit(rstfile) as rstorm:
lines = await rstorm.run()
return ''.join(lines)
class RStormLibTest(s_test.SynTest):
async def test_lib_rstorm(self):
with self.getTestDir() as dirn:
path = s_common.genpath(dirn, 'test.rst')
with s_common.genfile(path) as fd:
fd.write(rst_in.encode())
text = await get_rst_text(path)
self.eq(text, rst_out)
# debug output
path = s_common.genpath(dirn, 'test2.rst')
with s_common.genfile(path) as fd:
fd.write(rst_in_debug.encode())
text = await get_rst_text(path)
self.isin('node:edits', text)
self.isin('inet:ipv4', text)
# props output
path = s_common.genpath(dirn, 'test3.rst')
with s_common.genfile(path) as fd:
fd.write(rst_in_props.encode())
text = await get_rst_text(path)
text_nocrt = '\n'.join(line for line in text.split('\n') if '.created =' not in line)
self.eq(text_nocrt, rst_out_props)
# http
path = s_common.genpath(dirn, 'http.rst')
with s_common.genfile(path) as fd:
fd.write(rst_in_http.encode())
text = await get_rst_text(path)
self.isin('{}', text) # no mock gives empty response
self.isin('inet:ipv4=1.2.3.4', text) # first mock
self.isin('inet:ipv4=5.6.7.8', text) # one mock at a time
self.isin('it:dev:str=notjson', text) # one mock at a time
# boom1 test
path = s_common.genpath(dirn, 'boom1.rst')
with s_common.genfile(path) as fd:
fd.write(boom1.encode())
with self.raises(s_exc.NoSuchVar):
await get_rst_text(path)
# boom2 test
path = s_common.genpath(dirn, 'boom2.rst')
with s_common.genfile(path) as fd:
fd.write(boom2.encode())
with self.raises(s_exc.NoSuchVar):
await get_rst_text(path)
# boom3 test
path_boom3 = s_common.genpath(dirn, 'boom3.rst')
with s_common.genfile(path_boom3) as fd:
fd.write(boom3.encode())
with self.raises(s_exc.StormRuntimeError):
await get_rst_text(path_boom3)
# boom4 test
path = s_common.genpath(dirn, 'boom4.rst')
with s_common.genfile(path) as fd:
fd.write(boom4.encode())
with self.raises(s_exc.NoSuchVar):
await get_rst_text(path)
# boom5 test
path = s_common.genpath(dirn, 'boom5.rst')
with s_common.genfile(path) as fd:
fd.write(boom5.encode())
with self.raises(s_exc.NoSuchVar):
await get_rst_text(path)
# boom6 test
path = s_common.genpath(dirn, 'boom6.rst')
with s_common.genfile(path) as fd:
fd.write(boom6.encode())
with self.raises(s_exc.NeedConfValu):
await get_rst_text(path)
# boom7 test
path = s_common.genpath(dirn, 'boom7.rst')
with s_common.genfile(path) as fd:
fd.write(boom7.encode())
with self.raises(s_exc.NoSuchFile):
await get_rst_text(path)
# boom8 test
path = s_common.genpath(dirn, 'boom8.rst')
with s_common.genfile(path) as fd:
fd.write(boom8.encode())
with self.raises(s_exc.NoSuchFile):
await get_rst_text(path)
# boom9 test
path = s_common.genpath(dirn, 'boom9.rst')
with s_common.genfile(path) as fd:
fd.write(boom9.encode())
with self.raises(s_exc.NoSuchName):
await get_rst_text(path)
# make sure things get cleaned up
async with await s_rstorm.StormRst.anit(path_boom3) as rstorm:
try:
await rstorm.run()
self.fail('This must raise')
except s_exc.StormRuntimeError:
pass
self.true(rstorm.core.isfini)
self.true(rstorm.isfini)
self.false(os.path.exists(rstorm.core.dirn))
# bad path
path = s_common.genpath(dirn, 'newp.newp')
with self.raises(s_exc.BadConfValu):
await get_rst_text(path)
| apache-2.0 | -8,834,182,335,303,480,000 | 23.832753 | 97 | 0.54974 | false |
jmesteve/openerpseda | openerp/tools/which.py | 456 | 6884 | #!/usr/bin/env python
""" Which - locate a command
* adapted from Brian Curtin's http://bugs.python.org/file15381/shutil_which.patch
* see http://bugs.python.org/issue444582
* uses ``PATHEXT`` on Windows
* searches current directory before ``PATH`` on Windows,
but not before an explicitly passed path
* accepts both string or iterable for an explicitly passed path, or pathext
* accepts an explicitly passed empty path, or pathext (either '' or [])
* does not search ``PATH`` for files that have a path specified in their name already
* moved defpath and defpathext lists initialization to module level,
instead of initializing them on each function call
* changed interface: which_files() returns generator, which() returns first match,
or raises IOError(errno.ENOENT)
.. function:: which_files(file [, mode=os.F_OK | os.X_OK[, path=None[, pathext=None]]])
Return a generator which yields full paths in which the *file* name exists
in a directory that is part of the file name, or on *path*,
and has the given *mode*.
By default, *mode* matches an inclusive OR of os.F_OK and os.X_OK - an
existing executable file.
The *path* is, by default, the ``PATH`` variable on the platform,
or the string/iterable passed in as *path*.
In the event that a ``PATH`` variable is not found, :const:`os.defpath` is used.
On Windows, a current directory is searched before using the ``PATH`` variable,
but not before an explicitly passed *path*.
The *pathext* is only used on Windows to match files with given extensions appended as well.
It defaults to the ``PATHEXT`` variable, or the string/iterable passed in as *pathext*.
In the event that a ``PATHEXT`` variable is not found,
default value for Windows XP/Vista is used.
The command is always searched without extension first,
even when *pathext* is explicitly passed.
.. function:: which(file [, mode=os.F_OK | os.X_OK[, path=None[, pathext=None]]])
Return first match generated by which_files(file, mode, path, pathext),
or raise IOError(errno.ENOENT).
"""
__docformat__ = 'restructuredtext en'
__all__ = 'which which_files pathsep defpath defpathext F_OK R_OK W_OK X_OK'.split()
import sys
from os import access, defpath, pathsep, environ, F_OK, R_OK, W_OK, X_OK
from os.path import exists, dirname, split, join
windows = sys.platform.startswith('win')
defpath = environ.get('PATH', defpath).split(pathsep)
if windows:
defpath.insert(0, '.') # can insert without checking, when duplicates are removed
# given the quite usual mess in PATH on Windows, let's rather remove duplicates
seen = set()
defpath = [dir for dir in defpath if dir.lower() not in seen and not seen.add(dir.lower())]
del seen
defpathext = [''] + environ.get('PATHEXT',
'.COM;.EXE;.BAT;.CMD;.VBS;.VBE;.JS;.JSE;.WSF;.WSH;.MSC').lower().split(pathsep)
else:
defpathext = ['']
def which_files(file, mode=F_OK | X_OK, path=None, pathext=None):
""" Locate a file in a path supplied as a part of the file name,
or the user's path, or a supplied path.
The function yields full paths (not necessarily absolute paths),
in which the given file name matches an existing file in a directory on the path.
>>> def test_which(expected, *args, **argd):
... result = list(which_files(*args, **argd))
... assert result == expected, 'which_files: %s != %s' % (result, expected)
...
... try:
... result = [ which(*args, **argd) ]
... except IOError:
... result = []
... assert result[:1] == expected[:1], 'which: %s != %s' % (result[:1], expected[:1])
>>> if windows: cmd = environ['COMSPEC']
>>> if windows: test_which([cmd], 'cmd')
>>> if windows: test_which([cmd], 'cmd.exe')
>>> if windows: test_which([cmd], 'cmd', path=dirname(cmd))
>>> if windows: test_which([cmd], 'cmd', pathext='.exe')
>>> if windows: test_which([cmd], cmd)
>>> if windows: test_which([cmd], cmd, path='<nonexistent>')
>>> if windows: test_which([cmd], cmd, pathext='<nonexistent>')
>>> if windows: test_which([cmd], cmd[:-4])
>>> if windows: test_which([cmd], cmd[:-4], path='<nonexistent>')
>>> if windows: test_which([], 'cmd', path='<nonexistent>')
>>> if windows: test_which([], 'cmd', pathext='<nonexistent>')
>>> if windows: test_which([], '<nonexistent>/cmd')
>>> if windows: test_which([], cmd[:-4], pathext='<nonexistent>')
>>> if not windows: sh = '/bin/sh'
>>> if not windows: test_which([sh], 'sh')
>>> if not windows: test_which([sh], 'sh', path=dirname(sh))
>>> if not windows: test_which([sh], 'sh', pathext='<nonexistent>')
>>> if not windows: test_which([sh], sh)
>>> if not windows: test_which([sh], sh, path='<nonexistent>')
>>> if not windows: test_which([sh], sh, pathext='<nonexistent>')
>>> if not windows: test_which([], 'sh', mode=W_OK) # not running as root, are you?
>>> if not windows: test_which([], 'sh', path='<nonexistent>')
>>> if not windows: test_which([], '<nonexistent>/sh')
"""
filepath, file = split(file)
if filepath:
path = (filepath,)
elif path is None:
path = defpath
elif isinstance(path, str):
path = path.split(pathsep)
if pathext is None:
pathext = defpathext
elif isinstance(pathext, str):
pathext = pathext.split(pathsep)
if not '' in pathext:
pathext.insert(0, '') # always check command without extension, even for custom pathext
for dir in path:
basepath = join(dir, file)
for ext in pathext:
fullpath = basepath + ext
if exists(fullpath) and access(fullpath, mode):
yield fullpath
def which(file, mode=F_OK | X_OK, path=None, pathext=None):
""" Locate a file in a path supplied as a part of the file name,
or the user's path, or a supplied path.
The function returns full path (not necessarily absolute path),
in which the given file name matches an existing file in a directory on the path,
or raises IOError(errno.ENOENT).
>>> # for doctest see which_files()
"""
try:
return iter(which_files(file, mode, path, pathext)).next()
except StopIteration:
try:
from errno import ENOENT
except ImportError:
ENOENT = 2
raise IOError(ENOENT, '%s not found' % (mode & X_OK and 'command' or 'file'), file)
if __name__ == '__main__':
import doctest
doctest.testmod()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 4,400,156,635,876,288,500 | 43.412903 | 99 | 0.61534 | false |
Elandril/Sick-Beard | cherrypy/process/win32.py | 35 | 6047 | """Windows service. Requires pywin32."""
import os
import win32api
import win32con
import win32event
import win32service
import win32serviceutil
from cherrypy.process import wspbus, plugins
class ConsoleCtrlHandler(plugins.SimplePlugin):
"""A WSPBus plugin for handling Win32 console events (like Ctrl-C)."""
def __init__(self, bus):
self.is_set = False
plugins.SimplePlugin.__init__(self, bus)
def start(self):
if self.is_set:
self.bus.log('Handler for console events already set.', level=40)
return
result = win32api.SetConsoleCtrlHandler(self.handle, 1)
if result == 0:
self.bus.log('Could not SetConsoleCtrlHandler (error %r)' %
win32api.GetLastError(), level=40)
else:
self.bus.log('Set handler for console events.', level=40)
self.is_set = True
def stop(self):
if not self.is_set:
self.bus.log('Handler for console events already off.', level=40)
return
try:
result = win32api.SetConsoleCtrlHandler(self.handle, 0)
except ValueError:
# "ValueError: The object has not been registered"
result = 1
if result == 0:
self.bus.log('Could not remove SetConsoleCtrlHandler (error %r)' %
win32api.GetLastError(), level=40)
else:
self.bus.log('Removed handler for console events.', level=40)
self.is_set = False
def handle(self, event):
"""Handle console control events (like Ctrl-C)."""
if event in (win32con.CTRL_C_EVENT, win32con.CTRL_LOGOFF_EVENT,
win32con.CTRL_BREAK_EVENT, win32con.CTRL_SHUTDOWN_EVENT,
win32con.CTRL_CLOSE_EVENT):
self.bus.log('Console event %s: shutting down bus' % event)
# Remove self immediately so repeated Ctrl-C doesn't re-call it.
try:
self.stop()
except ValueError:
pass
self.bus.exit()
# 'First to return True stops the calls'
return 1
return 0
class Win32Bus(wspbus.Bus):
"""A Web Site Process Bus implementation for Win32.
Instead of time.sleep, this bus blocks using native win32event objects.
"""
def __init__(self):
self.events = {}
wspbus.Bus.__init__(self)
def _get_state_event(self, state):
"""Return a win32event for the given state (creating it if needed)."""
try:
return self.events[state]
except KeyError:
event = win32event.CreateEvent(None, 0, 0,
"WSPBus %s Event (pid=%r)" %
(state.name, os.getpid()))
self.events[state] = event
return event
def _get_state(self):
return self._state
def _set_state(self, value):
self._state = value
event = self._get_state_event(value)
win32event.PulseEvent(event)
state = property(_get_state, _set_state)
def wait(self, state, interval=0.1, channel=None):
"""Wait for the given state(s), KeyboardInterrupt or SystemExit.
Since this class uses native win32event objects, the interval
argument is ignored.
"""
if isinstance(state, (tuple, list)):
# Don't wait for an event that beat us to the punch ;)
if self.state not in state:
events = tuple([self._get_state_event(s) for s in state])
win32event.WaitForMultipleObjects(events, 0, win32event.INFINITE)
else:
# Don't wait for an event that beat us to the punch ;)
if self.state != state:
event = self._get_state_event(state)
win32event.WaitForSingleObject(event, win32event.INFINITE)
class _ControlCodes(dict):
"""Control codes used to "signal" a service via ControlService.
User-defined control codes are in the range 128-255. We generally use
the standard Python value for the Linux signal and add 128. Example:
>>> signal.SIGUSR1
10
control_codes['graceful'] = 128 + 10
"""
def key_for(self, obj):
"""For the given value, return its corresponding key."""
for key, val in self.items():
if val is obj:
return key
raise ValueError("The given object could not be found: %r" % obj)
control_codes = _ControlCodes({'graceful': 138})
def signal_child(service, command):
if command == 'stop':
win32serviceutil.StopService(service)
elif command == 'restart':
win32serviceutil.RestartService(service)
else:
win32serviceutil.ControlService(service, control_codes[command])
class PyWebService(win32serviceutil.ServiceFramework):
"""Python Web Service."""
_svc_name_ = "Python Web Service"
_svc_display_name_ = "Python Web Service"
_svc_deps_ = None # sequence of service names on which this depends
_exe_name_ = "pywebsvc"
_exe_args_ = None # Default to no arguments
# Only exists on Windows 2000 or later, ignored on windows NT
_svc_description_ = "Python Web Service"
def SvcDoRun(self):
from cherrypy import process
process.bus.start()
process.bus.block()
def SvcStop(self):
from cherrypy import process
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
process.bus.exit()
def SvcOther(self, control):
process.bus.publish(control_codes.key_for(control))
if __name__ == '__main__':
win32serviceutil.HandleCommandLine(PyWebService)
| gpl-3.0 | -4,735,544,542,773,538,000 | 32.752874 | 81 | 0.568546 | false |
joshgabriel/MPInterfaces | mpinterfaces/calibrate.py | 1 | 60446 | # coding: utf-8
# Copyright (c) Henniggroup.
# Distributed under the terms of the MIT License.
from __future__ import division, print_function, unicode_literals, \
absolute_import
"""
Calibration module:
This module contains the classes for
1. Calibrate: Base class for specifying the parameters for
calibration and setting up the VASP jobs in directory
structure according to
2. CalibrateBulk: calibrating a periodic bulk structure,
3. CalibrateSlab: creates a slab of given crystallographic facet,
thickness and vacuum spacing,
3. CalibrateMolecule: creates a molecule in a box
4. CalibrateInterface: calibrates an interface composed of slab plus
molecule
The attribute turn_knobs controls the parameters to be calibrated
for a given structure
"""
from six.moves import range
import sys
import os
import re
import datetime
from itertools import product
from collections import OrderedDict
import numpy as np
from pymatgen import Lattice
from pymatgen.core.structure import Structure
from pymatgen.core.surface import SlabGenerator
from pymatgen.io.vasp.inputs import Incar, Poscar
from pymatgen.io.vasp.inputs import Potcar, Kpoints
from pymatgen.io.vasp.outputs import Outcar, Vasprun
from pymatgen.symmetry.bandstructure import HighSymmKpath
from custodian.custodian import Custodian
from monty.json import MSONable, MontyEncoder
from monty.serialization import dumpfn
from mpinterfaces.instrument import MPINTVaspInputSet, MPINTVaspJob
from mpinterfaces.interface import Interface, Ligand
from mpinterfaces.utils import get_ase_slab, get_magmom_string, get_magmom_afm, \
get_magmom_mae, get_magmom_init, print_exception,get_defo_structure
from mpinterfaces.mat2d.electronic_structure import get_2D_hse_kpoints,\
get_2D_incar_hse_prep, get_2D_incar_hse
from mpinterfaces.default_logger import get_default_logger
__author__ = "Kiran Mathew, Joshua J. Gabriel"
__copyright__ = "Copyright 2017, Henniggroup"
__maintainer__ = "Joshua J. Gabriel"
__email__ = "joshgabriel92@gmail.com"
__status__ = "Production"
__date__ = "March 3, 2017"
logger = get_default_logger(__name__)
class Calibrate(MSONable):
"""
The base class for creating vasp work flows for
calibrating the input parameters for different systems
A wrapper around Custodian
"""
LOG_FILE = "calibrate.json"
def __init__(self, incar, poscar, potcar, kpoints, system=None,
is_matrix=False, Grid_type='A',
parent_job_dir='.', job_dir='Job',
qadapter=None, job_cmd='qsub', wait=True,
mappings_override=None, pseudopotential="PBE",
database=None, magnetism=None, mag_init=None, reuse=None,
reuse_override=None, reuse_incar=None, solvation=None,
turn_knobs=OrderedDict([('ENCUT', []),
('KPOINTS', [])]),
checkpoint_file=None, finer_kpoint=None, cal_logger=None,
test=False,incar_remove=None):
"""
Calibrate constructor
Args:
incar (Incar object): input INCAR
poscar (Poscar object): input POSCAR
potcar (Potcar object): input POTCAR
kpoints (Kpoints object): input KPOINTS
system: system info as a dictionary,
slab or interface example:
system={'hkl':[1,1,1], 'ligand':None},
is_matrix (bool): whether the jobs are dependent on each
other
Grid_type (str): kpoints grid_type
parent_job_dir (str): the directory from which all the
jobs are launched
job_dir (str): job directory created for each job in the
parent_job_dir
qadapter (?): adapter for the batch system
job_cmd (str): command to be used for submitting the job. If
qadapter is specified then job_cmd is ignored
wait (bool): whther to wait for the job to finish. If the job is
being submitted to the queue then there is no need for
waiting
turn_knobs (dict): an ordered dictionary of parmaters and the
corresponding values
mappings_override (dict): override symbol mapping in potcar
eg:- {'S':'S_sv'}
pseudopotential (str): pseudopotential set
database (str): A work in progress, will be a database_name.yaml
file for defaults specific to a database workflow
that will have defaults for
INCAR: cutoff, convergence for relaxation and
continuation jobs
KPOINTS: for relaxation, band structure jobs
POTCAR: database specific
For now defaults to None, if set to 'twod'
activates twod set of directives
reuse (list or bool): list of filenames for reuse
Eg: ['CHGCAR', 'WAVECAR']
'CONTCAR' is copied by default and if found empty
warning is issued. Use the following flag for override
only if you know what you are doing
'True' for just copying the CONTCAR file
reuse_override (bool): whether to override the missing CONTCAR for a
reuse calc
magnetism (str): specifies magnetism calculation to be used
implemented are 'AntiFerroMagnetism' and
'Magntic Anisotropy Energy'
solvation (bool): whether to activate a solvation job, sets LSOL=True
for now
Calibrate jobs represent the engine configuration of mpinterfaces,
where the fuel (input file sources) and driving method (kind of calculation)
are decided . The Engine itself is instrument.py which creates the input set
configured in Calibrate.
Current fueling methods:
1. simplest test case involving a single job:
- specify the incar, kpoints, poscar, potcar (aka the VASP 4)
explicitly as pymatgen objects
- turn_knobs = {} , is_matrix = False
2. test case for calibration of parameters:
- specify an initial configuration for the VASP 4
- specify parameters to calibrate via turn_knobs,
set is_matrix = True only if number of parameters > 1
3. Database production case: (possibly most used)
- specify initial configuration for the VASP 4 based on
a database.yaml
- specify an input.yaml that details the workflow
Note: input structure if needed will be obtained from the
provided poscar object
"""
self.name = datetime.datetime.now().isoformat()
self.system = system
self.parent_job_dir = os.path.abspath(parent_job_dir)
self.job_dir = job_dir
self.incar = incar
self.poscar = poscar
self.potcar = potcar
self.test = test
if poscar:
if mappings_overload:
maps = [mappings_overload[s] for s in poscar.site_symbols]
else:
maps = poscar.site_symbols
self.potcar = Potcar(symbols=maps,
functional=pseudopotential)
self.kpoints = kpoints
if incar:
self.incar_orig = incar.as_dict()
if poscar:
self.poscar_orig = poscar.as_dict()
if self.potcar:
self.potcar_orig = self.potcar.as_dict()
if kpoints:
self.kpoints_orig = kpoints.as_dict()
self.qadapter = qadapter
self.job_dir_list = []
self.jobs = []
self.job_ids = []
self.handlers = []
self.job_cmd = job_cmd
self.n_atoms = 0
self.turn_knobs = turn_knobs
self.response_to_knobs = {}
self.sorted_response_to_knobs = {}
for k, v in turn_knobs.items():
self.response_to_knobs[k] = {}
self.sorted_response_to_knobs[k] = {}
self.is_matrix = is_matrix
self.Grid_type = Grid_type
self.wait = wait
self.cal_log = []
self.mappings_override = mappings_override
self.database = database
self.magnetism = magnetism
self.mag_init = mag_init
self.solvation = solvation
self.reuse = reuse
self.reuse_incar = reuse_incar
self.incar_remove = incar_remove
self.reuse_override = reuse_override
self.reuse_paths = None # list object communicated to instrument
self.finer_kpoint = finer_kpoint
self.pseudopotential = pseudopotential
self.checkpoint_file = checkpoint_file
if cal_logger:
self.logger = cal_logger
else:
self.logger = logger
def setup(self):
"""
set up the jobs for the given turn_knobs dict
is_matrix = True implies that the params in the dict are
interrelated. Otherwise calcs corresponding to each dict key
is independent
"""
if self.is_matrix:
self.setup_matrix_job()
else:
self._setup()
def _setup(self, turn_knobs=None):
"""
invoke the set up methods corresponding to the dict keys
any key other than KPOINTS, VOLUME and POTCAR are treated
as INCAR parameters
Args:
turn_knobs: knobs aka paramters to be tuned
Note: poscar jobs setup through the VOLUME is only for
backward compatibility, use POSCAR key in the
turn_knobs to tune poscars
"""
if turn_knobs is None:
turn_knobs = self.turn_knobs
if any(turn_knobs.values()):
for k, v in turn_knobs.items():
if k == 'POSCAR' and v:
if 'VOLUME' in list(self.turn_knobs.keys()):
self.setup_poscar_jobs(scale_list=self.turn_knobs['VOLUME'],\
poscar_list=v)
#del self.turn_knobs['VOLUME']
elif 'STRAINS' in list(self.turn_knobs.keys()):
self.setup_poscar_jobs(scale_list=self.turn_knobs['STRAINS'],\
poscar_list=v)
del self.turn_knobs['STRAINS']
else:
self.setup_poscar_jobs(poscar_list=v)
elif k == 'KPOINTS' and v:
self.setup_kpoints_jobs(kpoints_list=v)
elif k == 'VOLUME' and v:
self.setup_poscar_jobs(scale_list=v)
elif k == 'STRAINS' and v:
self.setup_poscar_jobs(scale_list=v)
elif k == 'POTCAR' and v:
self.setup_potcar_jobs(mappings=v, functional_list=None)
elif k == 'POTCAR_pseudopotential' and v:
self.setup_potcar_jobs(mappings=None,functional_list=v)
elif k == 'FUNCTIONAL' and v:
self.setup_incar_jobs(k,v)
else:
self.setup_incar_jobs(k, v)
else:
self.logger.warn('knobs not set, running a single job')
self.add_job(name='single_job', job_dir=self.job_dir)
def setup_matrix_job(self):
"""
set up jobs where the dict keys are interrelated
mind: its an ordered dict, the order in which the keys
are specified determines the nested directory structure
"""
orig_job_dir = self.job_dir
job_dir = self.job_dir
n_items = len(list(self.turn_knobs.items()))
keys = list(self.turn_knobs.keys())
#print (keys)
if 'POSCAR' in keys and 'STRAINS' in keys and len(keys)==2:
self._setup(turn_knobs=dict([('POSCAR',
self.turn_knobs['POSCAR'])]))
else:
#print ('Else here', keys[0])
#self._setup(turn_knobs=dict([(keys[0],
# self.turn_knobs[keys[0]])]))
self.recursive_jobs(n_items, keys, 0)
# restore
self.job_dir = orig_job_dir
def recursive_jobs(self, n, keys, i):
"""
recursively setup the jobs: used by setup_matrix_job
Args:
n: total number of knobs aka parameters to be tuned
keys: list of knobs i.e parameter names
i: ith knob
"""
#### Testing ####
# Orig
# job_dir = self.job_dir + os.sep + self.key_to_name(keys[i])
#print (n)
#print (i)
#print (self.job_dir)
#print (keys[i])
try:
job_dir = '__'.join(
[self.job_dir.split('/')[-1], self.key_to_name(keys[i])])
except:
job_dir = '__'.join(
[self.job_dir.split('__')[-1], self.key_to_name(keys[i])])
#### Testing ####
if i == n - 1 and i != 0:
for val in self.turn_knobs[keys[i]]:
##
## self.job_dir = job_dir + os.sep + self.val_to_name(val)
self.job_dir = '__'.join([job_dir, self.val_to_name(val)])
self.logger.info(
'setting jobs in the directory: ' + self.job_dir)
self._setup(turn_knobs=dict([(keys[i], [val])]))
#print ('add job for ',self.job_dir, val, keys[i])
self.add_job(name=job_dir, job_dir=self.job_dir)
elif i==0 and len(list(self.turn_knobs.keys()))==1:
## should be case iff when POSCAR and poscar transform are called together
for val in self.turn_knobs[keys[i]]:
self.job_dir = '__'.join([job_dir, self.val_to_name(val)])
self.logger.info(
'setting jobs in the directory: ' + self.job_dir)
self._setup(turn_knobs=dict([(keys[i], [val])]))
#self.add_job(name=job_dir, job_dir=self.job_dir)
else:
for val in self.turn_knobs[keys[i]]:
##
## self.job_dir = job_dir + os.sep + self.val_to_name(val)
#print ('comes here with',self.job_dir)
self.job_dir = '__'.join([job_dir, self.val_to_name(val)])
self.logger.info(
'setting jobs in the directory: ' + self.job_dir)
self._setup(turn_knobs=dict([(keys[i], [val])]))
self.recursive_jobs(n, keys, i + 1)
def key_to_name(self, key):
"""
convenient string mapping for the keys in the turn_knobs dict
Args:
key: key to the knob dict
Returns:
an appropriate string representation of the key so that
the name doesnt clash with the filenames
"""
if key == 'KPOINTS':
return 'KPTS'
elif key == 'POTCAR_map' or key == 'POTCAR_pseudopotential':
return 'POT'
elif key == 'POSCAR':
return 'POS'
else:
return key
def val_to_name(self, val):
"""
convert a value to a string so that it can be used for naming
the job directory
the decimal points in floats are replaced with underscore
character
if the value is of type list, kpoint_to_name method is used
since
only kpoint values are expected to be of type list
if the values is of type dict then potcar_to_name method is
invoked
Args:
val: knob value to be converted into an appropriate string
representation
Returns:
a string filename for the value
"""
if isinstance(val, float):
return re.sub('\.', '_', str(val))
elif isinstance(val, list):
return self.kpoint_to_name(val, 'M')
elif isinstance(val, dict):
return self.potcar_to_name(mapping=val)
elif isinstance(val, Poscar):
name = ''.join((val.comment).split()) + '_'+ \
str(val.structure.composition.reduced_formula)
#+ '_' + str(int(val.structure.lattice.volume))
return name.replace('\\', '_').replace('(', '_').replace(')', '_')
else:
return str(val)
def kpoint_to_name(self, kpoint, grid_type):
"""
get a string representation for the given kpoint
Args:
kpoint: an iterable
grid_type: grid_type used for the KPOINTS
Returns:
string representation for kpoint eg: Monkhorst Pack
2 2 2 will be named 2x2x2
"""
if grid_type == 'M' or grid_type == 'G':
kpoint = [str(k).replace('.', '_') for k in kpoint]
return 'x'.join(kpoint)
else:
return str(kpoint)
def potcar_to_name(self, mapping=None, pseudopotential=None):
"""
convert a symbol mapping and pseudopotential to a name that
can be used for setting up the potcar jobs
Args:
mapping: example:- if mapping = {'Pt':'Pt_pv',
'Si':'Si_GW'} then the name will be PBE_Pt_pv_Si_GW
with self.pseudopotential="PBE"
Returns:
string
"""
if mapping:
l = [v for k, v in mapping.items()]
return '_'.join(l)
elif functional:
return '_'.join(functional)
else:
return '_'.join(self.pseudopotential)
def set_incar(self, param, val):
"""
set the incar paramter, param = val
"""
print (param, val)
self.incar[param] = val
#print (self.incar)
def set_functional(self,val):
"""
"""
if val == 'PBE':
print ('PBE')
self.logger.info('setting PBE as functional')
elif val == 'PBEsol':
print ('PS')
self.logger.info('setting PBEsol as functional')
func_dict = {'GGA':'PS'}
self.incar.update(func_dict)
elif val == 'vdW-OPTB88':
print ('vdW')
self.logger.info('setting vdW-OPTB88 as functional')
func_dict = {\
'AGGAC': 0.0,
'GGA': 'BO',
'LUSE_VDW': True,
'PARAM1': 0.1833333333,
'PARAM2': 0.22}
self.incar.update(func_dict)
elif val == 'SCAN':
print ('SCAN')
self.logger.info('setting vdW-OPTB88 as functional')
func_dict = {\
'METAGGA': 'SCAN'}
self.incar.update(func_dict)
print (self.incar)
def set_poscar(self, scale=None, poscar=None):
"""
perturbs given structure by volume scaling factor
or takes user defined variants of Poscar
Args:
scale : Volume Scaling parameter
poscar : Poscar object of user defined structure
set the poscar: volume scaled by the scale factor
"""
if scale is not None:
try:
structure = self.poscar.structure
except:
print (print_exception())
structure = Poscar.from_dict(self.poscar_orig).structure
# case 1 volume scaling of poscar
#try:
if type(scale)!=list:
volume = structure.volume
structure.scale_lattice(scale * volume)
self.poscar=Poscar(structure,comment='Volume_{}'.format(scale))
elif scale[0]=='N11' or scale[0]=='N22':
self.poscar=\
get_defo_structure(structure,strain=scale[1],strain_direction=scale[0])
#except:
# print (print_exception())
# sys.exit()
elif poscar is not None:
self.poscar = poscar
def set_potcar(self, mapping=None, pseudopotential='PBE'):
"""
set the potcar: symbol to potcar type mapping
"""
symbols = self.poscar.site_symbols
mapped_symbols = []
if mapping:
for sym in symbols:
mapped_symbols.append(mapping[sym])
elif self.mappings_override:
for sym in symbols:
if sym in list(self.mappings_override.keys()):
mapped_symbols.append(self.mappings_override[sym])
else:
mapped_symbols.append(sym)
else:
mapped_symbols = symbols
if pseudopotential in ['PBE','LDA']:
func = pseudopotential
else:
func = self.pseudopotential
print ('setting potcar from', mapped_symbols)
self.potcar = Potcar(symbols=mapped_symbols,
functional=func)
def set_kpoints(self, kpoint=None, poscar=None, ibzkpth=None):
"""
set the kpoint
"""
# useful to check if a poscar is supplied from setup_poscar_jobs (most often the case)
# or this is a single poscar use case
if not poscar:
poscar = self.poscar
# splitting into two if elif branches means fewer if statements to check on
# a run
# Most general method of setting the k-points for
# different grid types
# NOTE: requires that at least one k-points value be passed
# as a turn - knobs list value
# this is not true for values that may be caculated out of
# a database
# use this part only if this is a non-database run for example
# for k-points calibration
if not self.database:
if self.Grid_type == 'M':
self.kpoints = Kpoints.monkhorst_automatic(kpts=kpoint)
elif self.Grid_type == 'A':
self.kpoints = Kpoints.automatic(subdivisions=kpoint)
elif self.Grid_type == 'G':
self.kpoints = Kpoints.gamma_automatic(kpts=kpoint)
elif self.Grid_type == '3D_vol':
self.kpoints = Kpoints.automatic_density_by_vol(structure=poscar.structure,
kppvol=kpoint)
elif self.Grid_type == 'bulk_bands_pbe':
self.kpoints = Kpoints.automatic_linemode(divisions=kpoint,
ibz=HighSymmKpath(
poscar.structure))
elif self.Grid_type == 'D':
self.kpoints = Kpoints.automatic_density(structure=poscar.structure,kppa=kpoint)
elif self.Grid_type == 'Finer_G_Mesh':
# kpoint is the scaling factor and self.kpoints is the old kpoint mesh
self.logger.info('Setting Finer G Mesh for {0} by scale {1}'.
format(kpoint, self.finer_kpoint))
self.kpoints = Kpoints.gamma_automatic(kpts = \
[i * self.finer_kpoint for i in kpoint])
self.logger.info('Finished scaling operation of k-mesh')
elif self.Grid_type == 'TwoD':
self.kpoints = Kpoints.automatic_density(structure=poscar.structure,kppa=kpoint)
kpoint_dict = self.kpoints.as_dict()
kpoint_dict['kpoints'][0][2] = 1 # remove z kpoints
self.kpoints = Kpoints.from_dict(kpoint_dict)
elif self.Grid_type == 'DG':
self.kpoints = Kpoints.automatic_gamma_density(structure=poscar.structure,kppa=kpoint)
# applicable for database runs
# future constructs or settinsg can be activated via a yaml file
# database yaml file or better still the input deck from its speification
# decides what combination of input calibrate constructor settings to use
# one of them being the grid_type tag
elif self.database == 'twod':
# set of kpoints settings according to the 2D database profile
# the actual settings of k-points density
# will in future come from any database input file set
if self.Grid_type == 'hse_bands_2D_prep':
kpoint_dict = Kpoints.automatic_gamma_density(poscar.structure,
200).as_dict()
kpoint_dict['kpoints'][0][2] = 1 # remove z kpoints
self.kpoints = Kpoints.from_dict(kpoint_dict)
elif self.Grid_type == 'hse_bands_2D':
# can at most return the path to the correct kpoints file
# needs kpoints to be written out in instrument in a different way
# not using the Kpoints object
self.kpoints = get_2D_hse_kpoints(poscar.structure, ibzkpth)
elif self.Grid_type == 'bands_2D':
kpoint_dict = Kpoints.automatic_linemode(divisions=20,
ibz=HighSymmKpath(poscar.structure)).as_dict()
self.kpoints = Kpoints.from_dict(kpoint_dict)
elif self.Grid_type == 'relax_2D':
# general relaxation settings for 2D
kpoint_dict = Kpoints.automatic_gamma_density(poscar.structure,
1000).as_dict()
kpoint_dict['kpoints'][0][2] = 1
self.kpoints = Kpoints.from_dict(kpoint_dict)
elif self.Grid_type == 'relax_3D':
# general relaxation settings for 3D
kpoint_dict = Kpoints.automatic_gamma_density(
poscar.structure, 1000)
self.kpoints = Kpoints.from_dict(kpoint_dict)
def setup_incar_jobs(self, param, val_list):
"""
set up incar jobs,, calls set_incar to set the value to param
Args:
param: Name of INCAR parameter
val_list: List of values to vary for the param
"""
if val_list != ['2D_default'] and param!='FUNCTIONAL':
for val in val_list:
self.logger.info('setting INCAR parameter ' + param + ' = '
+ str(val))
self.set_incar(param, val)
if not self.is_matrix:
job_dir = self.job_dir + os.sep + \
param + os.sep + self.val_to_name(val)
self.add_job(name=job_dir, job_dir=job_dir)
# print ('add job called')
elif param == 'FUNCTIONAL':
for val in val_list:
self.set_functional(val)
else:
self.logger.warn('incar list empty')
def setup_kpoints_jobs(self, kpoints_list=None):
"""
setup the kpoint jobs
"""
if kpoints_list:
for kpoint in kpoints_list:
self.set_kpoints(kpoint)
if not self.is_matrix:
job_dir = self.job_dir + os.sep + self.key_to_name(
'KPOINTS') \
+ os.sep + self.kpoint_to_name(kpoint,
self.Grid_type)
self.add_job(name=job_dir, job_dir=job_dir)
#print ('add job called')
else:
self.logger.warn('kpoints_list empty')
def setup_poscar_jobs(self, scale_list=None, poscar_list=None):
"""
for scaling the latice vectors of the original structure,
scale_list is volume scaling factor list
"""
incar_init = self.incar
incar_remove = self.incar_remove
if scale_list and not poscar_list:
for scale in scale_list:
self.set_poscar(scale=scale)
self.set_potcar()
if not self.is_matrix:
job_dir = self.job_dir + os.sep + 'POS' + \
os.sep + 'VOLUME_' + str(scale)
self.add_job(name=job_dir, job_dir=job_dir)
#print ('add job called')
elif poscar_list:
if not scale_list:
scale_list = [[1.0]]
for pos in poscar_list:
for n,scale in enumerate(scale_list):
print ('setting volume scaling to cell as ', scale)
# if it is a twod_database run or any general standard database run,
# the incar, kpoints and potcar follow a standard input set
# which will be activated by the twod_database tag set to true
# NOTE: this implementation means that the first turn_knobs tag
# needs to be the poscar objects list
# the database tag will be set to the name of the yaml file with the
# standard input deck definition for that database
# this incar dict provided as the init can be general format
# based on the chosen functional, cutoff
# so self.incar is a vdW incar for re-relaxation in vdW, gga for every
# other calculation or LDA+U for LSDA+U calculations
incar_dict = incar_init
#print (incar_dict)
if self.reuse:
# if this is a true list minimally, ['CONTCAR']
# it is to be ensured that the poscar list is a
# list of paths as opposed to list of poscar objects by the turn knobs
# values
# Here pos is the path and r in each self.reuse is the name of the file(s)
# to be reused
# in a reuse calculation the following are possible:
# update incar (Solvation calculations) or reset incar (HSE calculations)
# reset kpoints file with IBZKPT
# copy a CHGCAR or WAVECAR or both perhaps
try:
# first setup of POSCAR initial, INCAR, KPOINTS
self.poscar = Poscar.from_file(pos + os.sep + 'CONTCAR')
self.set_poscar(scale=scale)
#print ('Transform',scale)
if scale_list[0] == 1.0 and len(scale_list)==1:
self.job_dir = pos.split('/')[-1]
else:
# think that transform is unnecessary
#self.job_dir = pos.split('/')[-1]+'_TRANSFORM_'+\
# str(scale).replace('.','_').replace("[",'').\
# replace("]",'')
self.job_dir = pos.split('/')[-1]+'_'+\
self.kpoint_to_name(scale,grid_type='G')
potcar = Potcar.from_file(pos + os.sep + 'POTCAR').as_dict()
poscar = self.poscar
#kpoints = Kpoints.from_file(pos+os.sep+'KPOINTS')
self.logger.info('Read previous relaxed CONTCAR file from {}'.
format(pos))
# check if it is KPOINTS altering job like HSE
if self.Grid_type == 'hse_bands_2D_prep':
# HSE prep calcualtions
# reset the INCAR file with a magmom only if exists
try:
incar_dict = {
'MAGMOM': get_magmom_string(poscar)}
except:
incar_dict = {}
incar_dict = get_2D_incar_hse_prep(incar_dict)
self.set_kpoints(poscar=poscar)
self.logger.info(
'updated input set for HSE 2D prep calcaultion')
elif self.Grid_type == 'hse_bands_2D':
# HSE calculation
# reset the incar and kpoints file builds
# on the preceding calculations (prep calculation)
# IBZKPT
try:
incar_dict = {
'MAGMOM': get_magmom_string(poscar)}
except:
incar_dict = {}
incar_dict = get_2D_incar_hse(incar_dict)
self.set_kpoints(poscar=poscar,
ibzkpth=pos + os.sep + 'IBZKPT')
self.logger.info('updated input set for HSE calcaultion\
using IBZKPT from {0}'.format(pos + os.sep + 'IBZKPT'))
elif self.Grid_type == 'hse_bands':
# general HSE bands
pass
elif self.Grid_type == 'Finer_G_Mesh':
self.logger.info('updating to Finer G Mesh')
kpoint = Kpoints.from_file(pos+os.sep+'KPOINTS')
self.set_kpoints(kpoint=kpoint.kpts[0])
else:
# use the same kpoints file and build from the old
# incar
self.kpoints = Kpoints.from_file(
pos + os.sep + 'KPOINTS')
# decide on how to use incar, use same one or
# update or afresh
if self.reuse_incar == 'old':
# reuse same incar with no updates done to it
incar_dict = Incar.from_file(
pos + os.sep + 'INCAR').as_dict()
elif self.reuse_incar == 'update':
# reuse same incar but with updates done to it
self.logger.info('updating incar at {}'.format(pos))
incar_dict_init = Incar.from_file(pos + os.sep + 'INCAR')
#print ('Reading INCAR from directory ', incar_dict_init)
#print ('What should be updating', incar_dict)
incar_dict_init.update(incar_dict)
incar_dict = incar_dict_init
#print ('Final update')
#print (incar_dict)
elif self.reuse_incar == 'update_remove':
self.logger.info('updating incar at {}'.format(pos))
incar_dict_init = Incar.from_file(pos + os.sep + 'INCAR')
print (incar_dict_init)
incar_dict_init.update(incar_dict)
for i in self.incar_remove:
print (i)
del incar_dict_init[i]
incar_dict = incar_dict_init
else:
# use a fresh incar as specified by the init
# way to go for example for LDAU or other
# major removals done to INCAR
# but always retain the MAGMOM if present
old_incar_dict = Incar.from_file(
pos + os.sep + 'INCAR').as_dict()
if 'MAGMOM' in old_incar_dict.keys():
incar_dict['MAGMOM'] = old_incar_dict[
'MAGMOM']
else:
incar_dict = incar_dict
if isinstance(self.reuse, list):
# for files to be reused: example CHGCAR, WAVECAR, etc.
reuse_paths = [
pos + os.sep + r for r in self.reuse]
self.reuse_paths = reuse_paths
# Magnetism use cases, updates to be made to the INCAR (MAE)
# and poscar (AFM)
# MAE and AFM
#print ('Here')
if self.magnetism == 'MAE':
# remove vdW tags for MAE calculations
vdW_tags = ('GGA', 'AGGAC', 'LUSE_VDW',
'PARAM1', 'PARAM2')
for key in vdW_tags:
if key in incar_dict:
del incar_dict[key]
self.logger.info(
'updating input set for MAE calculation')
self.mag_init = Outcar(
pos + os.sep + 'OUTCAR').total_mag
nbands = 2 * \
Vasprun(pos + os.sep +
'vasprun.xml').parameters['NBANDS']
# u_value = Vasprun(pos+os.sep+'vasprun.xml').incar['LDAUU']
# u_value = 4.0
self.logger.info(
"updating mag mom with value {0}".format(self.mag_init))
self.logger.info(
"updating NBANDS with {0}".format(nbands))
incar_dict.update({'NBANDS': nbands,
'LSORBIT': True,
'EDIFF': 1e-08,
'ICHARG': 11,
'LMAXMIX': 4,
'LCHARG': False,
'ISYM': 0,
'NSW': 0,
'ISPIN': 2,
'IBRION': -1,
'LORBIT': 11,
'MAGMOM': get_magmom_mae(poscar, self.mag_init)
})
# incar_dict.update({'LDAUU': u_value})
elif self.magnetism == 'AFM':
self.logger.info(
'updating INCAR and POSCAR for AFM calculation')
afm, poscar = get_magmom_afm(poscar, self.database)
self.set_poscar(poscar=poscar)
incar_dict.update({'MAGMOM': afm})
elif self.magnetism == 'Relaxed':
self.logger.info(
'updating INCAR with the total magnetization obtained in the relaxed state')
try:
out_mag = Outcar(
pos + os.sep + 'OUTCAR')
if out_mag.magnetization:
#print ('reading tot')
self.mag_init = [i['tot'] for i in out_mag.magnetization]
else:
#print ('reading total mag')
mag_tot = out_mag.total_mag
self.mag_init = mag_tot/len(poscar.structure.sites)
incar_dict.update({'MAGMOM':get_magmom_init(poscar, self.mag_init)})
except:
self.logger.info('no mag relaxed')
elif self.magnetism == 'Remove':
try:
del incar_dict['MAGMOM']
incar_dict.update({'ISPIN':1})
self.logger.info('Removed magnetism settings')
except:
self.logger.info('No previous magnetism settings')
except:
# check what to do if the previous calculation being reused is not
# actuall done .. system exit or adopt a user override
# with POSCAR
print (print_exception())
self.logger.warn(
'Empty relaxed CONTCAR file .. Probably job not done')
if not self.reuse_override:
self.logger.warn(
'You can set reuse_override to continue with POSCAR file, exiting now ..')
sys.exit(0)
else:
self.logger.info('Using old Poscar for rerun')
poscar = Poscar.from_file(pos + os.sep + 'POSCAR')
self.incar = Incar.from_dict(incar_dict)
# case for non - reuse
else:
poscar = pos
# temporary: magnetism only set if twod flag is activated
if self.database == 'twod':
incar_dict.update(
{'MAGMOM': get_magmom_string(poscar)})
self.set_kpoints(poscar=poscar)
#self.incar = Incar.from_dict(incar_dict)
# Long term solution for magmom initialization
if self.magnetism == 'Init_by_file':
self.logger.info('Updating magmom from input mag_inits.yaml')
if 'LSORBIT' in list(incar_dict.keys()):
magmom = get_magmom_init(poscar,is_spin_orbit=True)
else:
magmom = get_magmom_init(poscar)
incar_dict.update({'MAGMOM': magmom})
elif self.magnetism == 'General_Init':
self.logger.info('Updating magmom with transition metal as 6.0 \
everything else as 0.5')
incar_dict.update(\
{'MAGMOM': get_magmom_string(poscar.structure),\
'ISPIN': 2})
self.incar = Incar.from_dict(incar_dict)
self.poscar = poscar
#self.set_poscar(poscar=poscar)
if not self.reuse:
self.set_potcar()
else:
self.potcar = Potcar.from_dict(potcar)
if not self.is_matrix:
job_dir = self.job_dir + os.sep + 'POS' + \
os.sep + self.val_to_name(poscar)
self.add_job(name=job_dir, job_dir=job_dir)
#elif self.is_matrix and scale_list[0]!=[1.0]:
#print ('adding poscar and volume job')
# self.add_job(name=self.job_dir, job_dir=self.job_dir)
#print ('set job dir', self.job_dir)
def setup_potcar_jobs(self, mappings, functional_list):
"""
take a list of symbol mappings and setup the potcar jobs
"""
if functional_list:
for func in functional_list:
self.set_potcar(pseudopotential=func)
if not self.is_matrix:
job_dir = self.job_dir + os.sep \
+ self.key_to_name('POTCAR') \
+ os.sep + self.potcar_to_name(func)
self.add_job(name=job_dir, job_dir=job_dir)
elif mappings:
for mapping in mappings:
self.set_potcar(mapping)
if not self.is_matrix:
job_dir = self.job_dir + os.sep \
+ self.key_to_name('POTCAR') \
+ os.sep + self.potcar_to_name(mapping)
self.add_job(name=job_dir, job_dir=job_dir)
def add_job(self, name='noname', job_dir='.'):
"""
add a single job using the current incar, poscar, potcar and
kpoints
"""
#print ('call add job')
vis = MPINTVaspInputSet(name, self.incar, self.poscar,
self.kpoints, self.potcar,
self.qadapter, vis_logger=self.logger,
reuse_path=self.reuse_paths, test=self.test)
# the job command can be overrridden in the run method
job = MPINTVaspJob(self.job_cmd, name=name, final=True,
parent_job_dir=self.parent_job_dir,
job_dir=job_dir, vis=vis, wait=self.wait,
vjob_logger=self.logger)
self.job_dir_list.append(os.path.abspath(job_dir))
self.jobs.append(job)
def run(self, job_cmd=None):
"""
run the vasp jobs through custodian
if the job list is empty,
run a single job with the initial input set
"""
for j in self.jobs:
if job_cmd is not None:
j.job_cmd = job_cmd
else:
j.job_cmd = self.job_cmd
c_params = {'jobs': [j.as_dict() for j in self.jobs],
'handlers': [h.as_dict() for h in self.handlers],
'max_errors': 5}
c = Custodian(self.handlers, self.jobs, max_errors=5)
c.run()
for j in self.jobs:
self.cal_log.append({"job": j.as_dict(),
'job_id': j.job_id,
"corrections": [],
'final_energy': None})
self.job_ids.append(j.job_id)
if self.checkpoint_file:
dumpfn(self.cal_log, self.checkpoint_file,
cls=MontyEncoder, indent=4)
else:
dumpfn(self.cal_log, Calibrate.LOG_FILE, cls=MontyEncoder,
indent=4)
def as_dict(self):
qadapter = None
system = None
if self.qadapter:
qadapter = self.qadapter.to_dict()
if self.system is not None:
system = self.system
d = dict(incar=self.incar.as_dict(),
poscar=self.poscar.as_dict(),
potcar=self.potcar.as_dict(),
kpoints=self.kpoints.as_dict(),
system=system, is_matrix=self.is_matrix,
Grid_type=self.Grid_type,
parent_job_dir=self.parent_job_dir,
job_dir=self.job_dir,
qadapter=qadapter, job_cmd=self.job_cmd,
wait=self.wait,
turn_knobs=self.turn_knobs,
job_dir_list=self.job_dir_list,
job_ids=self.job_ids)
d["@module"] = self.__class__.__module__
d["@class"] = self.__class__.__name__
# d['calibrate'] = self.__class__.__name__
return d
@classmethod
def from_dict(cls, d):
incar = Incar.from_dict(d["incar"])
poscar = Poscar.from_dict(d["poscar"])
potcar = Potcar.from_dict(d["potcar"])
kpoints = Kpoints.from_dict(d["kpoints"])
cal = Calibrate(incar, poscar, potcar, kpoints,
system=d["system"], is_matrix=d["is_matrix"],
Grid_type=d["Grid_type"],
parent_job_dir=d["parent_job_dir"],
job_dir=d["job_dir"], qadapter=d.get("qadapter"),
job_cmd=d["job_cmd"], wait=d["wait"],
turn_knobs=d["turn_knobs"])
cal.job_dir_list = d["job_dir_list"]
cal.job_ids = d["job_ids"]
return cal
class CalibrateMolecule(Calibrate):
"""
Calibrate paramters for Molecule calculations
"""
def __init__(self, incar, poscar, potcar, kpoints, system=None,
is_matrix=False, Grid_type='A',
parent_job_dir='.',
job_dir='./Molecule', qadapter=None,
job_cmd='qsub', wait=True,
mappings_override=None, pseudopotential="PBE",
turn_knobs={'ENCUT': [], 'KPOINTS': []},
checkpoint_file=None, cal_logger=None):
Calibrate.__init__(self, incar, poscar, potcar, kpoints,
system=system, is_matrix=is_matrix,
Grid_type=Grid_type,
parent_job_dir=parent_job_dir,
job_dir=job_dir, qadapter=qadapter,
job_cmd=job_cmd, wait=wait,
mappings_override=mappings_override,
pseudopotential=pseudopotential,
turn_knobs=turn_knobs,
checkpoint_file=checkpoint_file,
cal_logger=cal_logger)
def setup_kpoints_jobs(self, Grid_type='M',
kpoints_list=None, conv_step=1):
self.logger.warn("Its a molecule ! no need for kpoint convergence")
self.kpoints = Kpoints.monkhorst_automatic(kpts=[1, 1, 1])
return
class CalibrateBulk(Calibrate):
"""
Calibrate parameters for Bulk calculations
"""
def __init__(self, incar, poscar, potcar, kpoints, system=None,
is_matrix=False, Grid_type='A',
parent_job_dir='.',
job_dir='./Bulk', qadapter=None,
job_cmd='qsub', wait=True,
mappings_override=None, pseudopotential="PBE",
turn_knobs={'ENCUT': [], 'KPOINTS': []},
checkpoint_file=None, cal_logger=None, test=False):
Calibrate.__init__(self, incar, poscar, potcar, kpoints,
system=system, is_matrix=is_matrix,
Grid_type=Grid_type,
parent_job_dir=parent_job_dir,
job_dir=job_dir, qadapter=qadapter,
job_cmd=job_cmd, wait=wait,
mappings_override=mappings_override,
pseudopotential=pseudopotential,
turn_knobs=OrderedDict(turn_knobs),
checkpoint_file=checkpoint_file,
cal_logger=cal_logger, test=test)
class CalibrateSlab(Calibrate):
"""
Calibrate paramters for Slab calculations
"""
def __init__(self, incar, poscar, potcar, kpoints, system=None,
is_matrix=False, Grid_type='A',
parent_job_dir='.', job_dir='./Slab',
qadapter=None, job_cmd='qsub', wait=True,
mappings_override=None, pseudopotential="PBE",
turn_knobs={'VACUUM': [], 'THICKNESS': []},
from_ase=False, checkpoint_file=None,
cal_logger=None, test=False):
self.from_ase = from_ase
self.is_matrix = is_matrix
self.system = system
self.input_structure = poscar.structure.copy()
self.slab_setup(turn_knobs=turn_knobs)
Calibrate.__init__(self, incar, poscar, potcar, kpoints,
system=system, is_matrix=is_matrix,
Grid_type=Grid_type,
parent_job_dir=parent_job_dir,
job_dir=job_dir, qadapter=qadapter,
job_cmd=job_cmd, wait=wait,
mappings_override=mappings_override,
pseudopotential=pseudopotential,
turn_knobs=turn_knobs,
checkpoint_file=checkpoint_file,
cal_logger=cal_logger, test=test)
def slab_setup(self, turn_knobs=None):
"""
invoke the set up methods corresponding to the dict keys:
VACUUM and THICKNESS
sets the POSCAR key in the turn_knobs
"""
if turn_knobs is None:
turn_knobs = self.turn_knobs
if any(turn_knobs.values()):
keys = ['VACUUM', 'THICKNESS']
poscar_list = []
if self.is_matrix:
prod_list = [turn_knobs[k] for k in keys]
for params in product(*tuple(prod_list)):
poscar_list.append(self.create_slab(*params))
else:
for k, v in turn_knobs.items():
if k == 'VACUUM' and v:
poscar_list += self.setup_vacuum_jobs(v)
elif k == 'THICKNESS' and v:
poscar_list += self.setup_thickness_jobs(v)
for k in keys:
if turn_knobs.get(k):
del turn_knobs[k]
turn_knobs['POSCAR'] = poscar_list
def setup_vacuum_jobs(self, vacuum_list):
"""
create slabs with the provided vacuum settings
returns list of poscars
"""
return [self.create_slab(vacuum=val) for val in vacuum_list]
def setup_thickness_jobs(self, thickness_list):
"""
create slabs with the provided thickness settings
returns list of poscars
"""
return [self.create_slab(thickness=val) for val in thickness_list]
def create_slab(self, vacuum=12, thickness=10):
"""
set the vacuum spacing, slab thickness and call sd_flags
for top 2 layers
returns the poscar corresponding to the modified structure
"""
strt_structure = self.input_structure.copy()
if self.from_ase:
slab_struct = get_ase_slab(strt_structure, hkl=self.system['hkl'],
min_thick=thickness, min_vac=vacuum)
else:
slab_struct = SlabGenerator(initial_structure=strt_structure,
miller_index=self.system['hkl'],
min_slab_size=thickness,
min_vacuum_size=vacuum,
lll_reduce=False, center_slab=True,
primitive=False).get_slab()
slab_struct.sort()
sd = self.set_sd_flags(slab_struct)
comment = 'VAC' + str(vacuum) + 'THICK' + str(thickness)
return Poscar(slab_struct, comment=comment,
selective_dynamics=sd)
@staticmethod
def set_sd_flags(interface=None, n_layers=2, top=True, bottom=True):
"""
set the relaxation flags for top and bottom layers of interface.
The upper and lower bounds of the z coordinate are determined
based on the slab. All layers above and below the bounds will
be relaxed. This means if there is a ligand on top of the slab,
all of its atoms will also be relaxed.
"""
sd_flags = np.zeros_like(interface.frac_coords)
if isinstance(interface, Interface):
slab = interface.slab
else:
slab = interface
z_coords = interface.frac_coords[:, 2]
z_coords_slab = slab.frac_coords[:, 2]
z_lower_bound = None
z_upper_bound = None
if bottom:
z_lower_bound = np.unique(z_coords_slab)[n_layers - 1]
sd_flags[np.where(z_coords <= z_lower_bound)] = np.ones((1, 3))
if top:
z_upper_bound = np.unique(z_coords_slab)[-n_layers]
sd_flags[np.where(z_coords >= z_upper_bound)] = np.ones((1, 3))
return sd_flags.tolist()
def set_reconstructed_surface(self, sites_to_add):
"""
Append sites as needed for reconstruction TODO
"""
pass
class CalibrateInterface(CalibrateSlab):
"""
Calibrate paramters for interface calculations
"""
def __init__(self, incar, poscar, potcar, kpoints, system=None,
is_matrix=False, Grid_type='A',
parent_job_dir='.', job_dir='./Interface',
qadapter=None, job_cmd='qsub', wait=True,
mappings_override=None, pseudopotential="PBE",
turn_knobs={'VACUUM': [], 'THICKNESS': []},
from_ase=False, checkpoint_file=None,
cal_logger=None):
CalibrateSlab.__init__(self, incar, poscar, potcar, kpoints,
system=system, is_matrix=is_matrix,
Grid_type=Grid_type,
parent_job_dir=parent_job_dir,
job_dir=job_dir, qadapter=qadapter,
job_cmd=job_cmd, wait=wait,
mappings_override=mappings_override,
pseudopotential=pseudopotential,
turn_knobs=turn_knobs,
from_ase=from_ase,
checkpoint_file=checkpoint_file,
cal_logger=cal_logger)
self.interface_setup(turn_knobs=turn_knobs)
def interface_setup(self, turn_knobs=None):
if not self.system.get('ligand'):
return
else:
if turn_knobs is None:
turn_knobs = self.turn_knobs
if any(turn_knobs.values()):
poscar_list = []
poscar_list.append(self.create_interface())
turn_knobs['POSCAR'] = poscar_list
def create_interface(self):
"""
add params that you want to vary
"""
structure = self.input_structure.copy()
iface = sorted(Interface(structure,
hkl=self.system['hkl'],
ligand=Ligand.from_dict(
self.system['ligand']),
from_ase=self.from_ase))
sd = self.set_sd_flags(iface, n_layers=2)
# if there are other paramters that are being varied
# change the comment accordingly
comment = self.system['hkl'] + self.system['ligand']['name']
return Poscar(iface, comment=comment,
selective_dynamics=sd)
if __name__ == '__main__':
# STRUCTURE
a0 = 3.965
lvec = [[0.5, 0.0, 0.5], [0.5, 0.5, 0.0], [0.0, 0.5, 0.5]]
lvec = np.array(lvec) * a0
lattice = Lattice(lvec)
structure = Structure(lattice, ['Pt'], [[0.0, 0.0, 0.0]],
coords_are_cartesian=False)
# INITIAL VASP INPUT SET
incarparams = {'System': 'test',
'ENCUT': 400,
'ISMEAR': 1,
'SIGMA': 0.1,
'EDIFF': 1E-6}
incar = Incar(params=incarparams)
poscar = Poscar(structure, comment='test')
potcar = Potcar(symbols=poscar.site_symbols, functional='PBE',
sym_potcar_map=None)
kpoints = Kpoints.monkhorst_automatic(kpts=(16, 16, 16),
shift=(0, 0, 0))
# CALIBRATION INPUT
system = {'hkl': [1, 1, 1], 'ligand': None}
turn_knobs = OrderedDict([
('SIGMA', [0.025, 0.50]),
('POTCAR', [{'Pt': 'Pt'}, {'Pt': 'Pt_pv'}, {'Pt': 'Pt_GW'}]),
('IBRION', [1, 2, 3]),
('KPOINTS', [k for k in range(20, 40, 10)]),
('ENCUT', list(range(400, 700, 100))),
('VACUUM', [10, 12, 15]),
('THICKNESS', [11])
])
is_matrix = True
job_dir = 'Slab'
job_cmd = ['ls', '-lt']
# SETUP AND RUN
cal = CalibrateSlab(incar, poscar, potcar, kpoints,
system=system,
is_matrix=is_matrix,
job_dir=job_dir,
turn_knobs=turn_knobs, test=True)
cal.setup()
cal.run(job_cmd)
| mit | -5,084,235,460,146,115,000 | 42.896877 | 118 | 0.49373 | false |
tedder/ansible | lib/ansible/modules/web_infrastructure/sophos_utm/utm_proxy_exception.py | 31 | 7499 | #!/usr/bin/python
# Copyright: (c) 2018, Sebastian Schenzel <sebastian.schenzel@mailbox.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = """
---
module: utm_proxy_exception
author:
- Sebastian Schenzel (@RickS-C137)
short_description: Create, update or destroy reverse_proxy exception entry in Sophos UTM
description:
- Create, update or destroy a reverse_proxy exception entry in SOPHOS UTM.
- This module needs to have the REST Ability of the UTM to be activated.
version_added: "2.8"
options:
name:
description:
- The name of the object. Will be used to identify the entry
required: True
type: str
op:
description:
- The operand to be used with the entries of the path parameter
default: 'AND'
choices:
- 'AND'
- 'OR'
required: False
type: str
path:
description:
- The paths the exception in the reverse proxy is defined for
type: list
default: []
required: False
skip_custom_threats_filters:
description:
- A list of threats to be skipped
type: list
default: []
required: False
skip_threats_filter_categories:
description:
- Define which categories of threats are skipped
type: list
default: []
required: False
skipav:
description:
- Skip the Antivirus Scanning
default: False
type: bool
required: False
skipbadclients:
description:
- Block clients with bad reputation
default: False
type: bool
required: False
skipcookie:
description:
- Skip the Cookie Signing check
default: False
type: bool
required: False
skipform:
description:
- Enable form hardening
default: False
type: bool
required: False
skipform_missingtoken:
description:
- Enable form hardening with missing tokens
default: False
type: bool
required: False
skiphtmlrewrite:
description:
- Protection against SQL
default: False
type: bool
required: False
skiptft:
description:
- Enable true file type control
default: False
type: bool
required: False
skipurl:
description:
- Enable static URL hardening
default: False
type: bool
required: False
source:
description:
- Define which categories of threats are skipped
type: list
default: []
required: False
status:
description:
- Status of the exception rule set
default: True
type: bool
required: False
extends_documentation_fragment:
- utm
"""
EXAMPLES = """
- name: Create UTM proxy_exception
utm_proxy_exception:
utm_host: sophos.host.name
utm_token: abcdefghijklmno1234
name: TestExceptionEntry
backend: REF_OBJECT_STRING
state: present
- name: Remove UTM proxy_exception
utm_proxy_exception:
utm_host: sophos.host.name
utm_token: abcdefghijklmno1234
name: TestExceptionEntry
state: absent
"""
RETURN = """
result:
description: The utm object that was created
returned: success
type: complex
contains:
_ref:
description: The reference name of the object
type: string
_locked:
description: Whether or not the object is currently locked
type: boolean
_type:
description: The type of the object
type: string
name:
description: The name of the object
type: string
comment:
description: The optional comment string
op:
description: The operand to be used with the entries of the path parameter
type: string
path:
description: The paths the exception in the reverse proxy is defined for
type: array
skip_custom_threats_filters:
description: A list of threats to be skipped
type: array
skip_threats_filter_categories:
description: Define which categories of threats are skipped
type: array
skipav:
description: Skip the Antivirus Scanning
type: bool
skipbadclients:
description: Block clients with bad reputation
type: bool
skipcookie:
description: Skip the Cookie Signing check
type: bool
skipform:
description: Enable form hardening
type: bool
skipform_missingtoken:
description: Enable form hardening with missing tokens
type: bool
skiphtmlrewrite:
description: Protection against SQL
type: bool
skiptft:
description: Enable true file type control
type: bool
skipurl:
description: Enable static URL hardening
type: bool
source:
description: Define which categories of threats are skipped
type: array
"""
from ansible.module_utils.utm_utils import UTM, UTMModule
from ansible.module_utils._text import to_native
def main():
endpoint = "reverse_proxy/exception"
key_to_check_for_changes = ["op", "path", "skip_custom_threats_filters", "skip_threats_filter_categories", "skipav",
"comment", "skipbadclients", "skipcookie", "skipform", "status", "skipform_missingtoken",
"skiphtmlrewrite", "skiptft", "skipurl", "source"]
module = UTMModule(
argument_spec=dict(
name=dict(type='str', required=True),
op=dict(type='str', required=False, default='AND', choices=['AND', 'OR']),
path=dict(type='list', elements='string', required=False, default=[]),
skip_custom_threats_filters=dict(type='list', elements='string', required=False, default=[]),
skip_threats_filter_categories=dict(type='list', elements='string', required=False, default=[]),
skipav=dict(type='bool', required=False, default=False),
skipbadclients=dict(type='bool', required=False, default=False),
skipcookie=dict(type='bool', required=False, default=False),
skipform=dict(type='bool', required=False, default=False),
skipform_missingtoken=dict(type='bool', required=False, default=False),
skiphtmlrewrite=dict(type='bool', required=False, default=False),
skiptft=dict(type='bool', required=False, default=False),
skipurl=dict(type='bool', required=False, default=False),
source=dict(type='list', elements='string', required=False, default=[]),
status=dict(type='bool', required=False, default=True),
)
)
try:
UTM(module, endpoint, key_to_check_for_changes).execute()
except Exception as e:
module.fail_json(msg=to_native(e))
if __name__ == '__main__':
main()
| gpl-3.0 | -5,666,724,200,757,903,000 | 29.987603 | 121 | 0.595546 | false |
Pexego/odoo | openerp/tools/amount_to_text_en.py | 441 | 5103 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from translate import _
_logger = logging.getLogger(__name__)
#-------------------------------------------------------------
#ENGLISH
#-------------------------------------------------------------
to_19 = ( 'Zero', 'One', 'Two', 'Three', 'Four', 'Five', 'Six',
'Seven', 'Eight', 'Nine', 'Ten', 'Eleven', 'Twelve', 'Thirteen',
'Fourteen', 'Fifteen', 'Sixteen', 'Seventeen', 'Eighteen', 'Nineteen' )
tens = ( 'Twenty', 'Thirty', 'Forty', 'Fifty', 'Sixty', 'Seventy', 'Eighty', 'Ninety')
denom = ( '',
'Thousand', 'Million', 'Billion', 'Trillion', 'Quadrillion',
'Quintillion', 'Sextillion', 'Septillion', 'Octillion', 'Nonillion',
'Decillion', 'Undecillion', 'Duodecillion', 'Tredecillion', 'Quattuordecillion',
'Sexdecillion', 'Septendecillion', 'Octodecillion', 'Novemdecillion', 'Vigintillion' )
def _convert_nn(val):
"""convert a value < 100 to English.
"""
if val < 20:
return to_19[val]
for (dcap, dval) in ((k, 20 + (10 * v)) for (v, k) in enumerate(tens)):
if dval + 10 > val:
if val % 10:
return dcap + '-' + to_19[val % 10]
return dcap
def _convert_nnn(val):
"""
convert a value < 1000 to english, special cased because it is the level that kicks
off the < 100 special case. The rest are more general. This also allows you to
get strings in the form of 'forty-five hundred' if called directly.
"""
word = ''
(mod, rem) = (val % 100, val // 100)
if rem > 0:
word = to_19[rem] + ' Hundred'
if mod > 0:
word += ' '
if mod > 0:
word += _convert_nn(mod)
return word
def english_number(val):
if val < 100:
return _convert_nn(val)
if val < 1000:
return _convert_nnn(val)
for (didx, dval) in ((v - 1, 1000 ** v) for v in range(len(denom))):
if dval > val:
mod = 1000 ** didx
l = val // mod
r = val - (l * mod)
ret = _convert_nnn(l) + ' ' + denom[didx]
if r > 0:
ret = ret + ', ' + english_number(r)
return ret
def amount_to_text(number, currency):
number = '%.2f' % number
units_name = currency
list = str(number).split('.')
start_word = english_number(int(list[0]))
end_word = english_number(int(list[1]))
cents_number = int(list[1])
cents_name = (cents_number > 1) and 'Cents' or 'Cent'
return ' '.join(filter(None, [start_word, units_name, (start_word or units_name) and (end_word or cents_name) and 'and', end_word, cents_name]))
#-------------------------------------------------------------
# Generic functions
#-------------------------------------------------------------
_translate_funcs = {'en' : amount_to_text}
#TODO: we should use the country AND language (ex: septante VS soixante dix)
#TODO: we should use en by default, but the translation func is yet to be implemented
def amount_to_text(nbr, lang='en', currency='euro'):
""" Converts an integer to its textual representation, using the language set in the context if any.
Example::
1654: thousands six cent cinquante-quatre.
"""
import openerp.loglevels as loglevels
# if nbr > 10000000:
# _logger.warning(_("Number too large '%d', can not translate it"))
# return str(nbr)
if not _translate_funcs.has_key(lang):
_logger.warning(_("no translation function found for lang: '%s'"), lang)
#TODO: (default should be en) same as above
lang = 'en'
return _translate_funcs[lang](abs(nbr), currency)
if __name__=='__main__':
from sys import argv
lang = 'nl'
if len(argv) < 2:
for i in range(1,200):
print i, ">>", int_to_text(i, lang)
for i in range(200,999999,139):
print i, ">>", int_to_text(i, lang)
else:
print int_to_text(int(argv[1]), lang)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 5,969,969,919,104,919,000 | 37.08209 | 148 | 0.538899 | false |
cloudbase/cinder | cinder/tests/unit/volume/drivers/emc/scaleio/test_consistencygroups.py | 6 | 10098 | # Copyright (c) 2013 - 2016 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import mock
from cinder import context
from cinder.tests.unit.consistencygroup import fake_consistencygroup
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import fake_snapshot
from cinder.tests.unit import fake_volume
from cinder.tests.unit.volume.drivers.emc import scaleio
from cinder.tests.unit.volume.drivers.emc.scaleio import mocks
class TestConsistencyGroups(scaleio.TestScaleIODriver):
"""Test cases for ``ScaleIODriver consistency groups support``"""
def setUp(self):
"""Setup a test case environment.
Creates a fake volume object and sets up the required API responses.
"""
super(TestConsistencyGroups, self).setUp()
self.ctx = context.RequestContext('fake', 'fake', auth_token=True)
self.consistency_group = (
fake_consistencygroup.fake_consistencyobject_obj(
self.ctx, **{'id': fake.CONSISTENCY_GROUP_ID}))
fake_volume1 = fake_volume.fake_volume_obj(
self.ctx,
**{'id': fake.VOLUME_ID, 'provider_id': fake.PROVIDER_ID})
fake_volume2 = fake_volume.fake_volume_obj(
self.ctx,
**{'id': fake.VOLUME2_ID, 'provider_id': fake.PROVIDER2_ID})
fake_volume3 = fake_volume.fake_volume_obj(
self.ctx,
**{'id': fake.VOLUME3_ID, 'provider_id': fake.PROVIDER3_ID})
fake_volume4 = fake_volume.fake_volume_obj(
self.ctx,
**{'id': fake.VOLUME4_ID, 'provider_id': fake.PROVIDER4_ID})
self.volumes = [fake_volume1, fake_volume2]
self.volumes2 = [fake_volume3, fake_volume4]
fake_snapshot1 = fake_snapshot.fake_snapshot_obj(
self.ctx,
**{'id': fake.SNAPSHOT_ID, 'volume_id': fake.VOLUME_ID,
'volume': fake_volume1})
fake_snapshot2 = fake_snapshot.fake_snapshot_obj(
self.ctx,
**{'id': fake.SNAPSHOT2_ID, 'volume_id': fake.VOLUME2_ID, 'volume':
fake_volume2})
self.snapshots = [fake_snapshot1, fake_snapshot2]
self.snapshot_reply = json.dumps({
'volumeIdList': ['sid1', 'sid2'],
'snapshotGroupId': 'sgid1'})
self.HTTPS_MOCK_RESPONSES = {
self.RESPONSE_MODE.Valid: {
'instances/Volume::{}/action/removeVolume'.format(
fake_volume1['provider_id']
): fake_volume1['provider_id'],
'instances/Volume::{}/action/removeVolume'.format(
fake_volume2['provider_id']
): fake_volume2['provider_id'],
'instances/Volume::{}/action/removeMappedSdc'.format(
fake_volume1['provider_id']
): fake_volume1['provider_id'],
'instances/Volume::{}/action/removeMappedSdc'.format(
fake_volume2['provider_id']
): fake_volume2['provider_id'],
'instances/System/action/snapshotVolumes':
self.snapshot_reply,
},
self.RESPONSE_MODE.BadStatus: {
'instances/Volume::{}/action/removeVolume'.format(
fake_volume1['provider_id']
): mocks.MockHTTPSResponse(
{
'errorCode': 401,
'message': 'BadStatus Volume Test',
}, 401
),
'instances/Volume::{}/action/removeVolume'.format(
fake_volume2['provider_id']
): mocks.MockHTTPSResponse(
{
'errorCode': 401,
'message': 'BadStatus Volume Test',
}, 401
),
'instances/System/action/snapshotVolumes':
self.BAD_STATUS_RESPONSE
},
}
def _fake_cgsnapshot(self):
cgsnap = {'id': 'cgsid', 'name': 'testsnap',
'consistencygroup_id': fake.CONSISTENCY_GROUP_ID,
'status': 'available'}
return cgsnap
def test_create_consistencygroup(self):
result = self.driver.create_consistencygroup(self.ctx,
self.consistency_group)
self.assertEqual('available', result['status'])
def test_delete_consistencygroup_valid(self):
self.set_https_response_mode(self.RESPONSE_MODE.Valid)
self.driver.configuration.set_override(
'sio_unmap_volume_before_deletion',
override=True)
result_model_update, result_volumes_update = (
self.driver.delete_consistencygroup(self.ctx,
self.consistency_group,
self.volumes))
self.assertTrue(all(volume['status'] == 'deleted' for volume in
result_volumes_update))
self.assertEqual('deleted', result_model_update['status'])
def test_delete_consistency_group_fail(self):
self.set_https_response_mode(self.RESPONSE_MODE.BadStatus)
result_model_update, result_volumes_update = (
self.driver.delete_consistencygroup(self.ctx,
self.consistency_group,
self.volumes))
self.assertTrue(any(volume['status'] == 'error_deleting' for volume in
result_volumes_update))
self.assertIn(result_model_update['status'],
['error_deleting', 'error'])
def test_create_consistencygroup_from_cg(self):
self.set_https_response_mode(self.RESPONSE_MODE.Valid)
result_model_update, result_volumes_model_update = (
self.driver.create_consistencygroup_from_src(
self.ctx, self.consistency_group, self.volumes2,
source_cg=self.consistency_group, source_vols=self.volumes))
self.assertEqual('available', result_model_update['status'])
get_pid = lambda snapshot: snapshot['provider_id']
volume_provider_list = list(map(get_pid, result_volumes_model_update))
self.assertListEqual(volume_provider_list, ['sid1', 'sid2'])
def test_create_consistencygroup_from_cgs(self):
self.snapshots[0]['provider_id'] = fake.PROVIDER_ID
self.snapshots[1]['provider_id'] = fake.PROVIDER2_ID
self.set_https_response_mode(self.RESPONSE_MODE.Valid)
result_model_update, result_volumes_model_update = (
self.driver.create_consistencygroup_from_src(
self.ctx, self.consistency_group, self.volumes2,
cgsnapshot=self._fake_cgsnapshot(),
snapshots=self.snapshots))
self.assertEqual('available', result_model_update['status'])
get_pid = lambda snapshot: snapshot['provider_id']
volume_provider_list = list(map(get_pid, result_volumes_model_update))
self.assertListEqual(['sid1', 'sid2'], volume_provider_list)
@mock.patch('cinder.objects.snapshot')
@mock.patch('cinder.objects.snapshot')
def test_create_cgsnapshots(self, snapshot1, snapshot2):
type(snapshot1).volume = mock.PropertyMock(
return_value=self.volumes[0])
type(snapshot2).volume = mock.PropertyMock(
return_value=self.volumes[1])
snapshots = [snapshot1, snapshot2]
self.set_https_response_mode(self.RESPONSE_MODE.Valid)
result_model_update, result_snapshot_model_update = (
self.driver.create_cgsnapshot(
self.ctx,
self._fake_cgsnapshot(),
snapshots
))
self.assertEqual('available', result_model_update['status'])
self.assertTrue(all(snapshot['status'] == 'available' for snapshot in
result_snapshot_model_update))
get_pid = lambda snapshot: snapshot['provider_id']
snapshot_provider_list = list(map(get_pid,
result_snapshot_model_update))
self.assertListEqual(['sid1', 'sid2'], snapshot_provider_list)
@mock.patch('cinder.objects.snapshot')
@mock.patch('cinder.objects.snapshot')
def test_delete_cgsnapshots(self, snapshot1, snapshot2):
type(snapshot1).volume = mock.PropertyMock(
return_value=self.volumes[0])
type(snapshot2).volume = mock.PropertyMock(
return_value=self.volumes[1])
type(snapshot1).provider_id = mock.PropertyMock(
return_value=fake.PROVIDER_ID)
type(snapshot2).provider_id = mock.PropertyMock(
return_value=fake.PROVIDER2_ID)
snapshots = [snapshot1, snapshot2]
self.set_https_response_mode(self.RESPONSE_MODE.Valid)
result_model_update, result_snapshot_model_update = (
self.driver.delete_cgsnapshot(
self.ctx,
self._fake_cgsnapshot(),
snapshots
))
self.assertEqual('deleted', result_model_update['status'])
self.assertTrue(all(snapshot['status'] == 'deleted' for snapshot in
result_snapshot_model_update))
| apache-2.0 | 6,317,232,707,455,018,000 | 45.85782 | 79 | 0.575163 | false |
zsiki/ulyxes | pyapi/tcpiface.py | 1 | 4981 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. module:: tcpiface.py
:platform: Unix, Windows
:synopsis: Ulyxes - an open source project to drive total stations and
publish observation results. GPL v2.0 license Copyright (C)
2010- Zoltan Siki <siki.zoltan@epito.bme.hu>.
.. moduleauthor:: Bence Turak <bence.turak@gmail.com>
"""
import sys
import socket
import logging
import re
from iface import Iface
class TCPIface(Iface):
"""Interface to communicate on TCP/IP protocol. This class requires socket.
:param name: name of tcp interface (str)
:param address: address of server (tuple) (ip(str), port(int))
:param bufSize: size of buffer in case of file (int)
:param timeout: communication timeout seconds (int), default 15
"""
def __init__(self, name, address, bufSize = 1024, timeout=15):
""" Constructor for TCP socket interface
"""
super(TCPIface, self).__init__(name)
self.sock = None
self.bufSize = None
# open socket
self.Open(address, bufSize, timeout)
def __del__(self):
""" Destructor for TCP socket interface
"""
self.Close()
def Open(self, address, bufSize = 1024, timeout=15):
""" Open TCP socket
"""
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect(address)
self.sock.settimeout(timeout)
self.bufSize = bufSize
self.opened = True
self.state = self.IF_OK
except Exception:
self.opened = False
self.state = self.IF_ERROR
logging.error(" cannot open TCP socket")
def Close(self):
""" Close TCP socket
"""
try:
self.sock.close()
self.opened = False
self.state = self.IF_OK
except Exception:
self.state = self.IF_ERROR
logging.error(" cannot close TCP socet")
def GetLine(self, fileSize = None):
""" read from TCP interface until end of line
:param fileSize: the size of the expected file (int)
:returns: line read from TCP (str) or empty string on timeout or error, state is set also
"""
if self.sock is None or not self.opened or self.state != self.IF_OK:
logging.error(" TCP socket not opened")
return None
# read answer till end of line
ans = b''
a = b''
try:
if fileSize != None:
a = self.sock.recv(self.bufSize)
ans += a
while sys.getsizeof(ans) < fileSize + 17:
l = sys.getsizeof(ans)
a = self.sock.recv(self.bufSize)
ans += a
else:
a = self.sock.recv(1)
ans += a
while a != b'\n':
a = self.sock.recv(1)
ans += a
except Exception as e:
#self.state = self.IF_READ
logging.error(" cannot read TCP socket")
if ans == b'':
# timeout exit loop
#self.state = self.IF_TIMEOUT
logging.error(" timeout on TCP socket")
# remove end of line
logging.debug(" message got: %s", ans)
ans = ans.strip(b'\n')
return ans
def PutLine(self, msg):
""" send message through the TCP socket
:param msg: message to send (str)
:returns: 0 - on OK, -1 on error or interface is in error state
"""
# do nothing if interface is in error state
if self.sock is None or not self.opened or self.state != self.IF_OK:
logging.error(" TCP socket not opened or in error state")
return -1
# add CR/LF to message end
if (msg[-1:] != '\n'):
msg += '\n'
# remove special characters
msg = msg.encode('ascii', 'ignore')
# send message to socket
logging.debug(" message sent: %s", msg)
try:
self.sock.send(msg)
except Exception:
self.state = self.IF_WRITE
logging.error(" cannot write tcp")
return -1
return 0
def Send(self, msg):
""" send message to TCP socket and read answer
:param msg: message to send, it can be multipart message separated by '|' (str)
:returns: answer from server (str)
"""
msglist = re.split("\|", msg)
res = b''
# sending
for m in msglist:
if self.PutLine(m) == 0:
res += self.GetLine() + b"|"
if res.endswith(b"|"):
res = res[:-1]
res = res.decode('ascii')
return res
if __name__ == "__main__":
a = TCPIface('test', ('127.0.0.1', 80), 1024, 15)
print (a.GetName())
print (a.GetState())
print (a.Send('GET /index.html HTTP/1.1'))
| gpl-2.0 | -1,690,776,806,671,815,200 | 30.929487 | 101 | 0.530616 | false |
nave91/dbt | dbt/compilation.py | 1 | 7958 | import itertools
import os
import json
from collections import OrderedDict, defaultdict
import sqlparse
import dbt.project
import dbt.utils
import dbt.include
import dbt.tracking
from dbt.utils import get_materialization, NodeType, is_type
from dbt.linker import Linker
import dbt.compat
import dbt.context.runtime
import dbt.contracts.project
import dbt.exceptions
import dbt.flags
import dbt.loader
from dbt.contracts.graph.compiled import CompiledNode, CompiledGraph
from dbt.clients.system import write_json
from dbt.logger import GLOBAL_LOGGER as logger
graph_file_name = 'graph.gpickle'
manifest_file_name = 'manifest.json'
def print_compile_stats(stats):
names = {
NodeType.Model: 'models',
NodeType.Test: 'tests',
NodeType.Archive: 'archives',
NodeType.Analysis: 'analyses',
NodeType.Macro: 'macros',
NodeType.Operation: 'operations',
NodeType.Seed: 'seed files',
}
results = {k: 0 for k in names.keys()}
results.update(stats)
stat_line = ", ".join(
["{} {}".format(ct, names.get(t)) for t, ct in results.items()])
logger.info("Found {}".format(stat_line))
def _add_prepended_cte(prepended_ctes, new_cte):
for dct in prepended_ctes:
if dct['id'] == new_cte['id']:
dct['sql'] = new_cte['sql']
return
prepended_ctes.append(new_cte)
def _extend_prepended_ctes(prepended_ctes, new_prepended_ctes):
for new_cte in new_prepended_ctes:
_add_prepended_cte(prepended_ctes, new_cte)
def prepend_ctes(model, manifest):
model, _, manifest = recursively_prepend_ctes(model, manifest)
return (model, manifest)
def recursively_prepend_ctes(model, manifest):
if model.extra_ctes_injected:
return (model, model.extra_ctes, manifest)
if dbt.flags.STRICT_MODE:
# ensure that all the nodes in this manifest are compiled
CompiledGraph(**manifest.to_flat_graph())
prepended_ctes = []
for cte in model.extra_ctes:
cte_id = cte['id']
cte_to_add = manifest.nodes.get(cte_id)
cte_to_add, new_prepended_ctes, manifest = recursively_prepend_ctes(
cte_to_add, manifest)
_extend_prepended_ctes(prepended_ctes, new_prepended_ctes)
new_cte_name = '__dbt__CTE__{}'.format(cte_to_add.get('name'))
sql = ' {} as (\n{}\n)'.format(new_cte_name, cte_to_add.compiled_sql)
_add_prepended_cte(prepended_ctes, {'id': cte_id, 'sql': sql})
model.prepend_ctes(prepended_ctes)
manifest.nodes[model.unique_id] = model
return (model, prepended_ctes, manifest)
class Compiler(object):
def __init__(self, project):
self.project = project
def initialize(self):
dbt.clients.system.make_directory(self.project['target-path'])
dbt.clients.system.make_directory(self.project['modules-path'])
def compile_node(self, node, manifest):
logger.debug("Compiling {}".format(node.get('unique_id')))
data = node.to_dict()
data.update({
'compiled': False,
'compiled_sql': None,
'extra_ctes_injected': False,
'extra_ctes': [],
'injected_sql': None,
})
compiled_node = CompiledNode(**data)
context = dbt.context.runtime.generate(
compiled_node, self.project, manifest)
compiled_node.compiled_sql = dbt.clients.jinja.get_rendered(
node.get('raw_sql'),
context,
node)
compiled_node.compiled = True
injected_node, _ = prepend_ctes(compiled_node, manifest)
should_wrap = {NodeType.Test, NodeType.Analysis, NodeType.Operation}
if injected_node.resource_type in should_wrap:
# data tests get wrapped in count(*)
# TODO : move this somewhere more reasonable
if 'data' in injected_node.tags and \
is_type(injected_node, NodeType.Test):
injected_node.wrapped_sql = (
"select count(*) from (\n{test_sql}\n) sbq").format(
test_sql=injected_node.injected_sql)
else:
# don't wrap schema tests or analyses.
injected_node.wrapped_sql = injected_node.injected_sql
elif is_type(injected_node, NodeType.Archive):
# unfortunately we do everything automagically for
# archives. in the future it'd be nice to generate
# the SQL at the parser level.
pass
elif(is_type(injected_node, NodeType.Model) and
get_materialization(injected_node) == 'ephemeral'):
pass
else:
injected_node.wrapped_sql = None
return injected_node
def write_manifest_file(self, manifest):
"""Write the manifest file to disk.
manifest should be a Manifest.
"""
filename = manifest_file_name
manifest_path = os.path.join(self.project['target-path'], filename)
write_json(manifest_path, manifest.serialize())
def write_graph_file(self, linker):
filename = graph_file_name
graph_path = os.path.join(self.project['target-path'], filename)
linker.write_graph(graph_path)
def link_node(self, linker, node, manifest):
linker.add_node(node.unique_id)
linker.update_node_data(
node.unique_id,
node.to_dict())
for dependency in node.depends_on_nodes:
if manifest.nodes.get(dependency):
linker.dependency(
node.unique_id,
(manifest.nodes.get(dependency).unique_id))
else:
dbt.exceptions.dependency_not_found(node, dependency)
def link_graph(self, linker, manifest):
for node in manifest.nodes.values():
self.link_node(linker, node, manifest)
cycle = linker.find_cycles()
if cycle:
raise RuntimeError("Found a cycle: {}".format(cycle))
def get_all_projects(self):
root_project = self.project.cfg
all_projects = {root_project.get('name'): root_project}
dependency_projects = dbt.utils.dependency_projects(self.project)
for project in dependency_projects:
name = project.cfg.get('name', 'unknown')
all_projects[name] = project.cfg
if dbt.flags.STRICT_MODE:
dbt.contracts.project.ProjectList(**all_projects)
return all_projects
def _check_resource_uniqueness(cls, manifest):
names_resources = {}
alias_resources = {}
for resource, node in manifest.nodes.items():
if node.resource_type not in NodeType.refable():
continue
name = node.name
alias = "{}.{}".format(node.schema, node.alias)
existing_node = names_resources.get(name)
if existing_node is not None:
dbt.exceptions.raise_duplicate_resource_name(
existing_node, node)
existing_alias = alias_resources.get(alias)
if existing_alias is not None:
dbt.exceptions.raise_ambiguous_alias(
existing_alias, node)
names_resources[name] = node
alias_resources[alias] = node
def compile(self):
linker = Linker()
all_projects = self.get_all_projects()
manifest = dbt.loader.GraphLoader.load_all(self.project, all_projects)
self.write_manifest_file(manifest)
self._check_resource_uniqueness(manifest)
self.link_graph(linker, manifest)
stats = defaultdict(int)
for node_name, node in itertools.chain(
manifest.nodes.items(),
manifest.macros.items()):
stats[node.resource_type] += 1
self.write_graph_file(linker)
print_compile_stats(stats)
return manifest, linker
| apache-2.0 | 8,952,308,713,549,154,000 | 29.725869 | 78 | 0.608696 | false |
matehall/Python-koan | python 3/koans/about_dice_project.py | 14 | 1958 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
import random
class DiceSet:
def __init__(self):
self._values = None
@property
def values(self):
return self._values
def roll(self, n):
# Needs implementing!
# Tip: random.randint(min, max) can be used to generate random numbers
pass
class AboutDiceProject(Koan):
def test_can_create_a_dice_set(self):
dice = DiceSet()
self.assertTrue(dice)
def test_rolling_the_dice_returns_a_set_of_integers_between_1_and_6(self):
dice = DiceSet()
dice.roll(5)
self.assertTrue(isinstance(dice.values, list), "should be a list")
self.assertEqual(5, len(dice.values))
for value in dice.values:
self.assertTrue(value >= 1 and value <= 6, "value " + str(value) + " must be between 1 and 6")
def test_dice_values_do_not_change_unless_explicitly_rolled(self):
dice = DiceSet()
dice.roll(5)
first_time = dice.values
second_time = dice.values
self.assertEqual(first_time, second_time)
def test_dice_values_should_change_between_rolls(self):
dice = DiceSet()
dice.roll(5)
first_time = dice.values
dice.roll(5)
second_time = dice.values
self.assertNotEqual(first_time, second_time, \
"Two rolls should not be equal")
# THINK ABOUT IT:
#
# If the rolls are random, then it is possible (although not
# likely) that two consecutive rolls are equal. What would be a
# better way to test this?
def test_you_can_roll_different_numbers_of_dice(self):
dice = DiceSet()
dice.roll(3)
self.assertEqual(3, len(dice.values))
dice.roll(1)
self.assertEqual(1, len(dice.values))
| mit | -5,772,233,633,943,863,000 | 28.223881 | 106 | 0.569459 | false |
rudhir-upretee/Sumo_With_Netsim | docs/tutorial/city_mobil/statistics.py | 6 | 4395 | #!/usr/bin/env python
"""
@file statistics.py
@author Michael Behrisch
@author Daniel Krajzewicz
@date 2008-10-17
@version $Id: statistics.py 12898 2012-10-26 08:58:14Z behrisch $
Collecting statistics for the CityMobil parking lot
SUMO, Simulation of Urban MObility; see http://sumo.sourceforge.net/
Copyright (C) 2008-2012 DLR (http://www.dlr.de/) and contributors
All rights reserved
"""
persons = {}
personsRunning = 0
class Person:
def __init__(self, id, source, target, step):
self.id = id
self.source = source
self.target = target
self.waitStart = step
self.depart = None
self.arrive = None
def personArrived(personID, edge, target, step):
global personsRunning
persons[personID] = Person(personID, edge, target, step)
personsRunning += 1
def personLoaded(personID, step):
persons[personID].depart = step
def personUnloaded(personID, step):
global personsRunning
persons[personID].arrive = step
personsRunning -= 1
def evaluate(forTest=False):
try:
import numpy, math
except ImportError:
print "No numpy available, skipping statistics"
return
waitTimes = []
routeTimes = {}
for person in persons.itervalues():
waitTimes.append(person.depart - person.waitStart)
route = (person.source, person.target)
if not route in routeTimes:
routeTimes[route] = []
routeTimes[route].append(person.arrive - person.depart)
waitArray = numpy.array(waitTimes)
if forTest:
print "waiting time (max, mean, dev):", waitArray.max() < 1000, waitArray.mean() < 1000, math.sqrt(waitArray.var()) < 100
else:
print "waiting time (max, mean, dev):", waitArray.max(), waitArray.mean(), math.sqrt(waitArray.var())
for route, times in sorted(routeTimes.iteritems()):
timeArray = numpy.array(times)
if forTest:
print route, timeArray.max() < 1000, timeArray.mean() < 1000, math.sqrt(timeArray.var()) < 100
else:
print route, timeArray.max(), timeArray.mean(), math.sqrt(timeArray.var())
co2 = 0.
for line in open("aggregated.xml"):
if "cyber" in line:
pos = line.find('CO2_abs="') + 9
if pos >= 9:
endpos = line.find('"', pos)
co2 += float(line[pos:endpos])
if forTest:
print "CO2:", co2 < 10000000
else:
print "CO2:", co2
if __name__ == "__main__":
from pylab import *
stats = open(sys.argv[1])
demand = []
simpleWaitMean = []
agentWaitMean = []
simpleWaitDev = []
agentWaitDev = []
simpleRouteMean = []
agentRouteMean = []
simpleRouteDev = []
agentRouteDev = []
for line in stats:
if "simple" in line:
mean = simpleWaitMean
dev = simpleWaitDev
rmean = simpleRouteMean
rdev = simpleRouteDev
demand.append(int(line.split()[-1]))
if "agent" in line:
mean = agentWaitMean
dev = agentWaitDev
rmean = agentRouteMean
rdev = agentRouteDev
if "waiting" in line:
mean.append(float(line.split()[-2]))
dev.append(float(line.split()[-1]))
if line.startswith("('footmain0to1'"):
rmean.append(float(line.split()[-2]))
rdev.append(float(line.split()[-1]))
stats.close()
figure()
errorbar(demand, simpleWaitMean, simpleWaitDev, lw=2, ms=10, fmt='o', label='standard bus scenario')
errorbar(demand, agentWaitMean, agentWaitDev, lw=2, ms=10, color="red", fmt='o', label='agent controlled cyber cars')
xlim(0, 50)
ylim(0, 3300)
xlabel('Repeater interval (s)')
ylabel('Waiting time (s)')
title('Mean and standard deviation of waiting time')
legend(numpoints=1)
savefig("waitingtime.png")
figure()
errorbar(demand, simpleRouteMean, simpleRouteDev, lw=2, ms=10, fmt='o', label='standard bus scenario')
errorbar(demand, agentRouteMean, agentRouteDev, lw=2, ms=10, color="red", fmt='o', label='agent controlled cyber cars')
xlim(0, 50)
ylim(0, 300)
xlabel('Repeater interval (s)')
ylabel('Travel time (s)')
title('Mean and standard deviation of travel time on the longest route')
legend(numpoints=1)
savefig("traveltime.png")
show()
| gpl-3.0 | -5,872,591,375,053,857,000 | 32.549618 | 129 | 0.612514 | false |
edx/lettuce | tests/integration/lib/Django-1.3/django/core/management/sql.py | 229 | 8259 | import os
import re
from django.conf import settings
from django.core.management.base import CommandError
from django.db import models
from django.db.models import get_models
def sql_create(app, style, connection):
"Returns a list of the CREATE TABLE SQL statements for the given app."
if connection.settings_dict['ENGINE'] == 'django.db.backends.dummy':
# This must be the "dummy" database backend, which means the user
# hasn't set ENGINE for the databse.
raise CommandError("Django doesn't know which syntax to use for your SQL statements,\n" +
"because you haven't specified the ENGINE setting for the database.\n" +
"Edit your settings file and change DATBASES['default']['ENGINE'] to something like\n" +
"'django.db.backends.postgresql' or 'django.db.backends.mysql'.")
# Get installed models, so we generate REFERENCES right.
# We trim models from the current app so that the sqlreset command does not
# generate invalid SQL (leaving models out of known_models is harmless, so
# we can be conservative).
app_models = models.get_models(app, include_auto_created=True)
final_output = []
tables = connection.introspection.table_names()
known_models = set([model for model in connection.introspection.installed_models(tables) if model not in app_models])
pending_references = {}
for model in app_models:
output, references = connection.creation.sql_create_model(model, style, known_models)
final_output.extend(output)
for refto, refs in references.items():
pending_references.setdefault(refto, []).extend(refs)
if refto in known_models:
final_output.extend(connection.creation.sql_for_pending_references(refto, style, pending_references))
final_output.extend(connection.creation.sql_for_pending_references(model, style, pending_references))
# Keep track of the fact that we've created the table for this model.
known_models.add(model)
# Handle references to tables that are from other apps
# but don't exist physically.
not_installed_models = set(pending_references.keys())
if not_installed_models:
alter_sql = []
for model in not_installed_models:
alter_sql.extend(['-- ' + sql for sql in
connection.creation.sql_for_pending_references(model, style, pending_references)])
if alter_sql:
final_output.append('-- The following references should be added but depend on non-existent tables:')
final_output.extend(alter_sql)
return final_output
def sql_delete(app, style, connection):
"Returns a list of the DROP TABLE SQL statements for the given app."
# This should work even if a connection isn't available
try:
cursor = connection.cursor()
except:
cursor = None
# Figure out which tables already exist
if cursor:
table_names = connection.introspection.get_table_list(cursor)
else:
table_names = []
output = []
# Output DROP TABLE statements for standard application tables.
to_delete = set()
references_to_delete = {}
app_models = models.get_models(app, include_auto_created=True)
for model in app_models:
if cursor and connection.introspection.table_name_converter(model._meta.db_table) in table_names:
# The table exists, so it needs to be dropped
opts = model._meta
for f in opts.local_fields:
if f.rel and f.rel.to not in to_delete:
references_to_delete.setdefault(f.rel.to, []).append( (model, f) )
to_delete.add(model)
for model in app_models:
if connection.introspection.table_name_converter(model._meta.db_table) in table_names:
output.extend(connection.creation.sql_destroy_model(model, references_to_delete, style))
# Close database connection explicitly, in case this output is being piped
# directly into a database client, to avoid locking issues.
if cursor:
cursor.close()
connection.close()
return output[::-1] # Reverse it, to deal with table dependencies.
def sql_reset(app, style, connection):
"Returns a list of the DROP TABLE SQL, then the CREATE TABLE SQL, for the given module."
# This command breaks a lot and should be deprecated
import warnings
warnings.warn(
'This command has been deprecated. The command ``sqlflush`` can be used to delete everything. You can also use ALTER TABLE or DROP TABLE statements manually.',
PendingDeprecationWarning
)
return sql_delete(app, style, connection) + sql_all(app, style, connection)
def sql_flush(style, connection, only_django=False):
"""
Returns a list of the SQL statements used to flush the database.
If only_django is True, then only table names that have associated Django
models and are in INSTALLED_APPS will be included.
"""
if only_django:
tables = connection.introspection.django_table_names(only_existing=True)
else:
tables = connection.introspection.table_names()
statements = connection.ops.sql_flush(
style, tables, connection.introspection.sequence_list()
)
return statements
def sql_custom(app, style, connection):
"Returns a list of the custom table modifying SQL statements for the given app."
output = []
app_models = get_models(app)
app_dir = os.path.normpath(os.path.join(os.path.dirname(app.__file__), 'sql'))
for model in app_models:
output.extend(custom_sql_for_model(model, style, connection))
return output
def sql_indexes(app, style, connection):
"Returns a list of the CREATE INDEX SQL statements for all models in the given app."
output = []
for model in models.get_models(app):
output.extend(connection.creation.sql_indexes_for_model(model, style))
return output
def sql_all(app, style, connection):
"Returns a list of CREATE TABLE SQL, initial-data inserts, and CREATE INDEX SQL for the given module."
return sql_create(app, style, connection) + sql_custom(app, style, connection) + sql_indexes(app, style, connection)
def custom_sql_for_model(model, style, connection):
opts = model._meta
app_dir = os.path.normpath(os.path.join(os.path.dirname(models.get_app(model._meta.app_label).__file__), 'sql'))
output = []
# Post-creation SQL should come before any initial SQL data is loaded.
# However, this should not be done for models that are unmanaged or
# for fields that are part of a parent model (via model inheritance).
if opts.managed:
post_sql_fields = [f for f in opts.local_fields if hasattr(f, 'post_create_sql')]
for f in post_sql_fields:
output.extend(f.post_create_sql(style, model._meta.db_table))
# Some backends can't execute more than one SQL statement at a time,
# so split into separate statements.
statements = re.compile(r";[ \t]*$", re.M)
# Find custom SQL, if it's available.
backend_name = connection.settings_dict['ENGINE'].split('.')[-1]
sql_files = [os.path.join(app_dir, "%s.%s.sql" % (opts.object_name.lower(), backend_name)),
os.path.join(app_dir, "%s.sql" % opts.object_name.lower())]
for sql_file in sql_files:
if os.path.exists(sql_file):
fp = open(sql_file, 'U')
for statement in statements.split(fp.read().decode(settings.FILE_CHARSET)):
# Remove any comments from the file
statement = re.sub(ur"--.*([\n\Z]|$)", "", statement)
if statement.strip():
output.append(statement + u";")
fp.close()
return output
def emit_post_sync_signal(created_models, verbosity, interactive, db):
# Emit the post_sync signal for every application.
for app in models.get_apps():
app_name = app.__name__.split('.')[-2]
if verbosity >= 2:
print "Running post-sync handlers for application", app_name
models.signals.post_syncdb.send(sender=app, app=app,
created_models=created_models, verbosity=verbosity,
interactive=interactive, db=db)
| gpl-3.0 | 4,728,522,245,343,453,000 | 42.468421 | 167 | 0.668604 | false |
pgmillon/ansible | lib/ansible/plugins/terminal/enos.py | 101 | 2824 | # (C) 2017 Red Hat Inc.
# Copyright (C) 2017 Lenovo.
#
# GNU General Public License v3.0+
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
# Contains terminal Plugin methods for ENOS Config Module
# Lenovo Networking
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import re
from ansible.errors import AnsibleConnectionFailure
from ansible.module_utils._text import to_text, to_bytes
from ansible.plugins.terminal import TerminalBase
class TerminalModule(TerminalBase):
terminal_stdout_re = [
re.compile(br"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"),
re.compile(br"\[\w+\@[\w\-\.]+(?: [^\]])\] ?[>#\$] ?$"),
re.compile(br">[\r\n]?")
]
terminal_stderr_re = [
re.compile(br"% ?Error"),
re.compile(br"% ?Bad secret"),
re.compile(br"invalid input", re.I),
re.compile(br"(?:incomplete|ambiguous) command", re.I),
re.compile(br"connection timed out", re.I),
re.compile(br"[^\r\n]+ not found"),
re.compile(br"'[^']' +returned error code: ?\d+"),
]
def on_open_shell(self):
try:
for cmd in (b'\n', b'terminal-length 0\n'):
self._exec_cli_command(cmd)
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to set terminal parameters')
def on_become(self, passwd=None):
if self._get_prompt().endswith(b'#'):
return
cmd = {u'command': u'enable'}
if passwd:
# Note: python-3.5 cannot combine u"" and r"" together. Thus make
# an r string and use to_text to ensure it's text
# on both py2 and py3.
cmd[u'prompt'] = to_text(r"[\r\n]?password: $",
errors='surrogate_or_strict')
cmd[u'answer'] = passwd
try:
self._exec_cli_command(to_bytes(json.dumps(cmd),
errors='surrogate_or_strict'))
except AnsibleConnectionFailure:
msg = 'unable to elevate privilege to enable mode'
raise AnsibleConnectionFailure(msg)
def on_unbecome(self):
prompt = self._get_prompt()
if prompt is None:
# if prompt is None most likely the terminal is hung up at a prompt
return
if b'(config' in prompt:
self._exec_cli_command(b'end')
self._exec_cli_command(b'disable')
elif prompt.endswith(b'#'):
self._exec_cli_command(b'disable')
| gpl-3.0 | -7,970,105,739,172,920,000 | 33.024096 | 79 | 0.582861 | false |
lyapun/django-lean-42cc | django_lean/lean_analytics/tests.py | 4 | 15078 | from __future__ import with_statement
from contextlib import contextmanager
from django.conf import settings
from django.contrib.auth.models import AnonymousUser, User
from django.http import HttpRequest
from django_lean.experiments.models import (AnonymousVisitor, Experiment,
GoalRecord, GoalType, Participant)
from django_lean.experiments.tests.utils import get_session, patch, TestCase
from django_lean.experiments.utils import StaticUser, WebUser
from django_lean.lean_analytics import (get_all_analytics,
get_all_analytics_names,
reset_caches,
IdentificationError)
from django_lean.lean_analytics.base import BaseAnalytics
import mox
class TestAnalytics(TestCase):
def test_get_all_analytics_names(self):
with patch(settings, 'LEAN_ANALYTICS', NotImplemented):
reset_caches()
self.assertEqual(get_all_analytics_names(), ())
with patch(settings, 'LEAN_ANALYTICS', []):
reset_caches()
self.assertEqual(get_all_analytics_names(), [])
base_name = '%s.%s' % (BaseAnalytics.__module__, BaseAnalytics.__name__)
with patch(settings, 'LEAN_ANALYTICS', [base_name]):
reset_caches()
self.assertEqual(get_all_analytics_names(), [base_name])
def test_get_all_analytics(self):
with patch(settings, 'LEAN_ANALYTICS', NotImplemented):
reset_caches()
self.assertEqual(get_all_analytics(), [])
with patch(settings, 'LEAN_ANALYTICS', []):
reset_caches()
self.assertEqual(get_all_analytics(), [])
base_name = '%s.%s' % (BaseAnalytics.__module__, BaseAnalytics.__name__)
with patch(settings, 'LEAN_ANALYTICS', [base_name]):
reset_caches()
self.assertEqual([a.__class__.__name__ for a in get_all_analytics()],
[BaseAnalytics.__name__])
#############
# KISSMETRICS
#############
try:
import django_kissmetrics
except ImportError:
if 'django_lean.lean_analytics.kissmetrics.KissMetrics' in \
get_all_analytics_names():
traceback.print_exc()
else:
from django_lean.lean_analytics.kissmetrics import KissMetrics
class TestKissMetrics(TestCase):
def setUp(self):
self.mox = mox.Mox()
self.analytics = KissMetrics()
def test_id_from_user(self):
user = User.objects.create_user('user', 'user@example.com', 'user')
self.assertEqual(self.analytics._id_from_user(user),
'User %d' % user.pk)
self.assertRaises(IdentificationError,
self.analytics._id_from_user, None)
def test_id_from_session(self):
# With real session
with self.web_user(AnonymousUser()) as experiment_user:
self.mox.ReplayAll()
session = experiment_user.session
self.assertEqual(
self.analytics._id_from_session(experiment_user.session),
'Session %s' % session.session_key
)
self.mox.VerifyAll()
# With dict as session
experiment_user = StaticUser()
self.assertRaises(IdentificationError,
self.analytics._id_from_session,
experiment_user.session)
def test_compute_id(self):
# With anonymous WebUser
with self.web_user(AnonymousUser()) as experiment_user:
session = experiment_user.session
self.mox.ReplayAll()
self.assertEqual(self.analytics._compute_id(experiment_user),
'Session %s' % session.session_key)
self.mox.VerifyAll()
# With authenticated WebUser
user = User.objects.create_user('user', 'user@example.com', 'user')
with self.web_user(user) as experiment_user:
self.mox.ReplayAll()
self.assertEqual(self.analytics._compute_id(experiment_user),
'User %d' % user.id)
self.mox.VerifyAll()
# With StaticUser
experiment_user = StaticUser()
self.assertRaises(IdentificationError,
self.analytics._compute_id, experiment_user)
def test_identify(self):
# With anonymous WebUser
with self.web_user(AnonymousUser()) as experiment_user:
self.mox.ReplayAll()
self.assertTrue(self.analytics._identify(experiment_user))
self.mox.VerifyAll()
# With authenticated WebUser
user = User.objects.create_user('user', 'user@example.com', 'user')
with self.web_user(user) as experiment_user:
self.mox.ReplayAll()
self.assertTrue(self.analytics._identify(experiment_user))
self.mox.VerifyAll()
# With StaticUser
experiment_user = StaticUser()
self.assertFalse(self.analytics._identify(experiment_user))
def test_enroll(self):
experiment = Experiment.objects.create(name='Experiment')
user = User.objects.create_user('user', 'user@example.com', 'user')
KM = self.mox.CreateMockAnything()
analytics = KissMetrics(KM=KM)
with self.web_user(user) as experiment_user:
KM.identify(analytics._compute_id(experiment_user))
KM.record(action='Enrolled In Experiment',
props={'Experiment': experiment.name,
'Group': 'Test'})
self.mox.ReplayAll()
analytics.enroll(experiment=experiment,
experiment_user=experiment_user,
group_id=Participant.TEST_GROUP)
self.mox.VerifyAll()
def test_record(self):
KM = self.mox.CreateMockAnything()
analytics = KissMetrics(KM=KM)
with self.web_user(AnonymousUser()) as experiment_user:
KM.identify(analytics._id_from_session(experiment_user.session))
KM.record(action='Goal Recorded',
props={'Goal Type': 'Goal Type'})
self.mox.ReplayAll()
goal_type = GoalType.objects.create(name='Goal Type')
goal_record = GoalRecord.record(goal_name=goal_type.name,
experiment_user=experiment_user)
analytics.record(goal_record=goal_record,
experiment_user=experiment_user)
self.mox.VerifyAll()
def test_event(self):
KM = self.mox.CreateMockAnything()
analytics = KissMetrics(KM=KM)
with self.web_user(AnonymousUser()) as experiment_user:
KM.identify(analytics._id_from_session(experiment_user.session))
KM.record(action='Event', props={'Foo': 'Bar'})
self.mox.ReplayAll()
analytics.event(name='Event',
properties={'Foo': 'Bar'},
request=experiment_user.request)
self.mox.VerifyAll()
@contextmanager
def web_user(self, user):
session = get_session(None)
request = self.mox.CreateMock(HttpRequest)
request.user = user
request.session = session
experiment_user = WebUser(request)
experiment_user.get_or_create_anonymous_visitor()
yield experiment_user
##########
# MIXPANEL
##########
try:
import mixpanel
except ImportError:
if 'django_lean.lean_analytics.mixpanel.Mixpanel' in \
get_all_analytics_names():
traceback.print_exc()
else:
from django_lean.lean_analytics.mixpanel import Mixpanel
class TestMixpanel(TestCase):
def setUp(self):
self.mox = mox.Mox()
self.analytics = Mixpanel()
def tearDown(self):
self.mox.UnsetStubs()
def test_id_from_user(self):
user = User.objects.create_user('user', 'user@example.com', 'user')
self.assertEqual(self.analytics._id_from_user(user),
'User %d' % user.pk)
self.assertRaises(IdentificationError,
self.analytics._id_from_user, None)
def test_id_from_session(self):
# With real session
with self.web_user(AnonymousUser()) as experiment_user:
self.mox.ReplayAll()
session = experiment_user.session
self.assertEqual(
self.analytics._id_from_session(experiment_user.session),
'Session %s' % session.session_key
)
self.mox.VerifyAll()
# With dict as session
experiment_user = StaticUser()
self.assertRaises(IdentificationError,
self.analytics._id_from_session,
experiment_user.session)
def test_compute_id(self):
# With anonymous WebUser
with self.web_user(AnonymousUser()) as experiment_user:
session = experiment_user.session
self.mox.ReplayAll()
self.assertEqual(self.analytics._compute_id(experiment_user),
'Session %s' % session.session_key)
self.mox.VerifyAll()
# With authenticated WebUser
user = User.objects.create_user('user', 'user@example.com', 'user')
with self.web_user(user) as experiment_user:
self.mox.ReplayAll()
self.assertEqual(self.analytics._compute_id(experiment_user),
'User %d' % user.id)
self.mox.VerifyAll()
# With StaticUser
experiment_user = StaticUser()
self.assertRaises(IdentificationError,
self.analytics._compute_id, experiment_user)
def test_identify(self):
# With anonymous WebUser
with self.web_user(AnonymousUser()) as experiment_user:
self.mox.ReplayAll()
self.assertTrue(self.analytics._identify(experiment_user))
self.assertEqual(
self.analytics.identity,
'Session %s' % experiment_user.session.session_key
)
self.mox.VerifyAll()
# With authenticated WebUser
user = User.objects.create_user('user', 'user@example.com', 'user')
with self.web_user(user) as experiment_user:
self.mox.ReplayAll()
self.assertTrue(self.analytics._identify(experiment_user))
self.assertEqual(self.analytics.identity,
'User %s' % experiment_user.user.pk)
self.mox.VerifyAll()
# With StaticUser
experiment_user = StaticUser()
self.assertFalse(self.analytics._identify(experiment_user))
self.assertEqual(self.analytics.identity, None)
def test_enroll(self):
import time
experiment = Experiment.objects.create(name='Experiment')
user = User.objects.create_user('user', 'user@example.com', 'user')
tracker = self.mox.CreateMockAnything()
analytics = Mixpanel(tracker=tracker)
now = time.gmtime()
self.mox.StubOutWithMock(time, 'gmtime')
time.gmtime().AndReturn(now)
with self.web_user(user) as experiment_user:
properties = {'time': '%d' % time.mktime(now),
'distinct_id': 'User %d' % user.pk,
'Experiment': experiment.name,
'Group': 'Test'}
tracker.run(event_name='Enrolled In Experiment',
properties=properties)
self.mox.ReplayAll()
analytics.enroll(experiment=experiment,
experiment_user=experiment_user,
group_id=Participant.TEST_GROUP)
self.mox.VerifyAll()
def test_record(self):
import time
tracker = self.mox.CreateMockAnything()
analytics = Mixpanel(tracker=tracker)
now = time.gmtime()
self.mox.StubOutWithMock(time, 'gmtime')
time.gmtime().AndReturn(now)
with self.web_user(AnonymousUser()) as experiment_user:
properties = {
'time': '%d' % time.mktime(now),
'distinct_id': ('Session %s' %
experiment_user.session.session_key),
'Goal Type': 'Goal Type'
}
tracker.run(event_name='Goal Recorded',
properties=properties)
self.mox.ReplayAll()
goal_type = GoalType.objects.create(name='Goal Type')
goal_record = GoalRecord.record(goal_name=goal_type.name,
experiment_user=experiment_user)
analytics.record(goal_record=goal_record,
experiment_user=experiment_user)
self.mox.VerifyAll()
def test_event(self):
import time
tracker = self.mox.CreateMockAnything()
analytics = Mixpanel(tracker=tracker)
now = time.gmtime()
self.mox.StubOutWithMock(time, 'gmtime')
time.gmtime().AndReturn(now)
with self.web_user(AnonymousUser()) as experiment_user:
properties = {
'time': '%d' % time.mktime(now),
'distinct_id': ('Session %s' %
experiment_user.session.session_key),
'Foo': 'Bar'
}
tracker.run(event_name='Event',
properties=properties)
self.mox.ReplayAll()
analytics.event(name='Event',
properties={'Foo': 'Bar'},
request=experiment_user.request)
self.mox.VerifyAll()
@contextmanager
def web_user(self, user):
session = get_session(None)
request = self.mox.CreateMock(HttpRequest)
request.user = user
request.session = session
experiment_user = WebUser(request)
experiment_user.get_or_create_anonymous_visitor()
yield experiment_user
| bsd-3-clause | 8,983,311,479,879,011,000 | 41.59322 | 81 | 0.535814 | false |
frozstone/concept | utilities/HeurDep.py | 1 | 5042 | from os import listdir, path
from lxml import etree, objectify
from pickle import load
from sys import argv
from StringIO import StringIO
from collections import OrderedDict
import time
from utilities.norm_arxiv import norm_arxiv
from utilities.norm_attribute import norm_attribute
from utilities.norm_mrow import norm_mrow
from utilities.norm_outer_fence import norm_outer_fence
from utilities.norm_splitter import norm_splitter
from utilities.norm_tag import norm_tag
from utilities.utils import Link_Types, Matching_Methods, utils
from utilities.depgraph_heur import depgraph_heur
__dtd = '<!DOCTYPE math SYSTEM "resources/xhtml-math11-f.dtd">'
__xmlns = ' xmlns="http://www.w3.org/1998/Math/MathML"'
__relation_fl = 'resources/math_symbols_unicode.dump'
__xml_parser = etree.XMLParser(remove_blank_text = True, load_dtd = True, resolve_entities = True)
def __get_clean_mathml(mt_string):
mt_tree = etree.parse(StringIO(__dtd + mt_string), __xml_parser).getroot()
objectify.deannotate(mt_tree, cleanup_namespaces=True)
return mt_tree
def __extract_math_line_arxiv(line):
cells = line.strip().split('\t')
latexml_id = cells[0]
para_id = cells[1]
kmcs_id = cells[2]
gmid = '#'.join([para_id, kmcs_id, latexml_id])
mt_string = '\t'.join(cells[3:]).replace(__xmlns, "")
mt = __get_clean_mathml(mt_string)
return gmid, mt
def __extract_math_line_acl(line):
cells = line.strip().split('\t')
gmid = cells[0]
mt_string = '\t'.join(cells[1:]).replace(__xmlns, "")
mt = __get_clean_mathml(mt_string)
return gmid, mt
def __write_edges(edges, toflname):
lns = []
for gmid, nodes in edges.iteritems():
lns.append( '\t'.join([gmid, ' '.join([node[0] for node in nodes])]) + '\n')
f = open(toflname, 'w')
f.writelines(lns)
f.close()
def __get_dep_graph(math_dir, dep_dir, fl, matching_method):
'''
input: file from math_new
output:
1. edges: {gumid1:[(gumid2, linktype)]} --> component list
2. gumidmappings: {gmid:gumid}
'''
#useful utilities classes
n_arxiv = norm_arxiv()
n_attribute = norm_attribute()
n_mrow = norm_mrow(__dtd)
n_outer_fence = norm_outer_fence()
n_tag = norm_tag(__dtd)
n_splitter = norm_splitter(__dtd, __relation_fl)
u = utils()
depgraph = depgraph_heur(matching_method)
lns = open(path.join(math_dir, fl)).readlines()
#enumerate if there is no id in the <math> tag
mts = OrderedDict()
#for xhtml, enumerate mathtag; for xml, enumerate expressiontag; for math_new, enumerate the lines
for ln in lns:
if ln.strip() == '': continue
gmid, mt = __extract_math_line_arxiv(ln)
#replace <m:math> with <math>
mt_string_initial = n_arxiv.remove_math_prefix(etree.tostring(mt))
#remove annotation, attributes, and finally get rid the <math> tag
mt_string_formatted = n_arxiv.remove_annotation(etree.parse(StringIO(__dtd + mt_string_initial)).getroot())
mt_string_formatted = n_attribute.normalize(mt_string_formatted)
#normalize mrow
mt_string_formatted = n_mrow.normalize(mt_string_formatted)
#remove fences
mt_string_formatted = etree.tostring(n_outer_fence.remove_outer_fence(etree.parse(StringIO(__dtd + mt_string_formatted)).getroot()))[6:-7]
#expand maths (normalize tags and/or case)
expanded = n_tag.normalize_tags('<math>%s</math>' % mt_string_formatted)
if len(expanded) > 0:
expanded[-1] = n_mrow.normalize('<math>%s</math>' % expanded[-1])[6:-7]
expanded.extend([etree.tostring(n_outer_fence.remove_outer_fence(etree.parse(StringIO(__dtd + '<math>%s</math>' % exp)).getroot()))[6:-7] for exp in expanded])
else:
expanded = [mt_string_formatted]
mts[gmid] = expanded
#split around the equality and get the left side subexpressions
left_subexp = n_splitter.split('<math>%s</math>' % expanded[-1])
if left_subexp is None: continue
left_subexp = n_mrow.normalize(left_subexp)[6:-7]
if not u.is_empty_tag(left_subexp):
expanded_left = n_tag.normalize_tags(left_subexp)
expanded_left = [n_mrow.normalize('<math>%s</math>' % exp)[6:-7] for exp in expanded_left]
mts[gmid].append(left_subexp)
mts[gmid].extend(expanded_left)
mts[gmid] = list(set(mts[gmid]))
edges = depgraph.create_edges(mts)
__write_edges(edges, path.join(dep_dir, fl))
if __name__ == '__main__':
#Preparation
math_path = "../mathmlandextra/math_new/5/0704.0005.txt"#argv[1]
dep_dir = "./"#argv[2]
math_dir = path.dirname(math_path) #path to math_new directory
math_fl = path.basename(math_path) #./1/0704.0097.txt
# try:
__get_dep_graph(math_dir, dep_dir, math_fl, Matching_Methods.heur)
# except:
# print math_path
| mit | -6,480,478,689,920,378,000 | 36.348148 | 171 | 0.628322 | false |
vadimtk/chrome4sdp | tools/telemetry/third_party/gsutilz/third_party/boto/boto/support/exceptions.py | 151 | 1715 | # Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.exception import JSONResponseError
class CaseIdNotFound(JSONResponseError):
pass
class CaseCreationLimitExceeded(JSONResponseError):
pass
class InternalServerError(JSONResponseError):
pass
class AttachmentLimitExceeded(JSONResponseError):
pass
class DescribeAttachmentLimitExceeded(JSONResponseError):
pass
class AttachmentSetIdNotFound(JSONResponseError):
pass
class AttachmentSetExpired(JSONResponseError):
pass
class AttachmentIdNotFound(JSONResponseError):
pass
class AttachmentSetSizeLimitExceeded(JSONResponseError):
pass
| bsd-3-clause | -3,968,585,153,565,571,600 | 28.568966 | 77 | 0.783673 | false |
4022321818/2015cd_midterm2 | static/Brython3.1.1-20150328-091302/Lib/xml/sax/saxutils.py | 730 | 11688 | """\
A library of useful helper classes to the SAX classes, for the
convenience of application and driver writers.
"""
import os, urllib.parse, urllib.request
import io
from . import handler
from . import xmlreader
def __dict_replace(s, d):
"""Replace substrings of a string using a dictionary."""
for key, value in d.items():
s = s.replace(key, value)
return s
def escape(data, entities={}):
"""Escape &, <, and > in a string of data.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
# must do ampersand first
data = data.replace("&", "&")
data = data.replace(">", ">")
data = data.replace("<", "<")
if entities:
data = __dict_replace(data, entities)
return data
def unescape(data, entities={}):
"""Unescape &, <, and > in a string of data.
You can unescape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
data = data.replace("<", "<")
data = data.replace(">", ">")
if entities:
data = __dict_replace(data, entities)
# must do ampersand last
return data.replace("&", "&")
def quoteattr(data, entities={}):
"""Escape and quote an attribute value.
Escape &, <, and > in a string of data, then quote it for use as
an attribute value. The \" character will be escaped as well, if
necessary.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
entities = entities.copy()
entities.update({'\n': ' ', '\r': ' ', '\t':'	'})
data = escape(data, entities)
if '"' in data:
if "'" in data:
data = '"%s"' % data.replace('"', """)
else:
data = "'%s'" % data
else:
data = '"%s"' % data
return data
def _gettextwriter(out, encoding):
if out is None:
import sys
return sys.stdout
if isinstance(out, io.TextIOBase):
# use a text writer as is
return out
# wrap a binary writer with TextIOWrapper
if isinstance(out, io.RawIOBase):
# Keep the original file open when the TextIOWrapper is
# destroyed
class _wrapper:
__class__ = out.__class__
def __getattr__(self, name):
return getattr(out, name)
buffer = _wrapper()
buffer.close = lambda: None
else:
# This is to handle passed objects that aren't in the
# IOBase hierarchy, but just have a write method
buffer = io.BufferedIOBase()
buffer.writable = lambda: True
buffer.write = out.write
try:
# TextIOWrapper uses this methods to determine
# if BOM (for UTF-16, etc) should be added
buffer.seekable = out.seekable
buffer.tell = out.tell
except AttributeError:
pass
return io.TextIOWrapper(buffer, encoding=encoding,
errors='xmlcharrefreplace',
newline='\n',
write_through=True)
class XMLGenerator(handler.ContentHandler):
def __init__(self, out=None, encoding="iso-8859-1", short_empty_elements=False):
handler.ContentHandler.__init__(self)
out = _gettextwriter(out, encoding)
self._write = out.write
self._flush = out.flush
self._ns_contexts = [{}] # contains uri -> prefix dicts
self._current_context = self._ns_contexts[-1]
self._undeclared_ns_maps = []
self._encoding = encoding
self._short_empty_elements = short_empty_elements
self._pending_start_element = False
def _qname(self, name):
"""Builds a qualified name from a (ns_url, localname) pair"""
if name[0]:
# Per http://www.w3.org/XML/1998/namespace, The 'xml' prefix is
# bound by definition to http://www.w3.org/XML/1998/namespace. It
# does not need to be declared and will not usually be found in
# self._current_context.
if 'http://www.w3.org/XML/1998/namespace' == name[0]:
return 'xml:' + name[1]
# The name is in a non-empty namespace
prefix = self._current_context[name[0]]
if prefix:
# If it is not the default namespace, prepend the prefix
return prefix + ":" + name[1]
# Return the unqualified name
return name[1]
def _finish_pending_start_element(self,endElement=False):
if self._pending_start_element:
self._write('>')
self._pending_start_element = False
# ContentHandler methods
def startDocument(self):
self._write('<?xml version="1.0" encoding="%s"?>\n' %
self._encoding)
def endDocument(self):
self._flush()
def startPrefixMapping(self, prefix, uri):
self._ns_contexts.append(self._current_context.copy())
self._current_context[uri] = prefix
self._undeclared_ns_maps.append((prefix, uri))
def endPrefixMapping(self, prefix):
self._current_context = self._ns_contexts[-1]
del self._ns_contexts[-1]
def startElement(self, name, attrs):
self._finish_pending_start_element()
self._write('<' + name)
for (name, value) in attrs.items():
self._write(' %s=%s' % (name, quoteattr(value)))
if self._short_empty_elements:
self._pending_start_element = True
else:
self._write(">")
def endElement(self, name):
if self._pending_start_element:
self._write('/>')
self._pending_start_element = False
else:
self._write('</%s>' % name)
def startElementNS(self, name, qname, attrs):
self._finish_pending_start_element()
self._write('<' + self._qname(name))
for prefix, uri in self._undeclared_ns_maps:
if prefix:
self._write(' xmlns:%s="%s"' % (prefix, uri))
else:
self._write(' xmlns="%s"' % uri)
self._undeclared_ns_maps = []
for (name, value) in attrs.items():
self._write(' %s=%s' % (self._qname(name), quoteattr(value)))
if self._short_empty_elements:
self._pending_start_element = True
else:
self._write(">")
def endElementNS(self, name, qname):
if self._pending_start_element:
self._write('/>')
self._pending_start_element = False
else:
self._write('</%s>' % self._qname(name))
def characters(self, content):
if content:
self._finish_pending_start_element()
self._write(escape(content))
def ignorableWhitespace(self, content):
if content:
self._finish_pending_start_element()
self._write(content)
def processingInstruction(self, target, data):
self._finish_pending_start_element()
self._write('<?%s %s?>' % (target, data))
class XMLFilterBase(xmlreader.XMLReader):
"""This class is designed to sit between an XMLReader and the
client application's event handlers. By default, it does nothing
but pass requests up to the reader and events on to the handlers
unmodified, but subclasses can override specific methods to modify
the event stream or the configuration requests as they pass
through."""
def __init__(self, parent = None):
xmlreader.XMLReader.__init__(self)
self._parent = parent
# ErrorHandler methods
def error(self, exception):
self._err_handler.error(exception)
def fatalError(self, exception):
self._err_handler.fatalError(exception)
def warning(self, exception):
self._err_handler.warning(exception)
# ContentHandler methods
def setDocumentLocator(self, locator):
self._cont_handler.setDocumentLocator(locator)
def startDocument(self):
self._cont_handler.startDocument()
def endDocument(self):
self._cont_handler.endDocument()
def startPrefixMapping(self, prefix, uri):
self._cont_handler.startPrefixMapping(prefix, uri)
def endPrefixMapping(self, prefix):
self._cont_handler.endPrefixMapping(prefix)
def startElement(self, name, attrs):
self._cont_handler.startElement(name, attrs)
def endElement(self, name):
self._cont_handler.endElement(name)
def startElementNS(self, name, qname, attrs):
self._cont_handler.startElementNS(name, qname, attrs)
def endElementNS(self, name, qname):
self._cont_handler.endElementNS(name, qname)
def characters(self, content):
self._cont_handler.characters(content)
def ignorableWhitespace(self, chars):
self._cont_handler.ignorableWhitespace(chars)
def processingInstruction(self, target, data):
self._cont_handler.processingInstruction(target, data)
def skippedEntity(self, name):
self._cont_handler.skippedEntity(name)
# DTDHandler methods
def notationDecl(self, name, publicId, systemId):
self._dtd_handler.notationDecl(name, publicId, systemId)
def unparsedEntityDecl(self, name, publicId, systemId, ndata):
self._dtd_handler.unparsedEntityDecl(name, publicId, systemId, ndata)
# EntityResolver methods
def resolveEntity(self, publicId, systemId):
return self._ent_handler.resolveEntity(publicId, systemId)
# XMLReader methods
def parse(self, source):
self._parent.setContentHandler(self)
self._parent.setErrorHandler(self)
self._parent.setEntityResolver(self)
self._parent.setDTDHandler(self)
self._parent.parse(source)
def setLocale(self, locale):
self._parent.setLocale(locale)
def getFeature(self, name):
return self._parent.getFeature(name)
def setFeature(self, name, state):
self._parent.setFeature(name, state)
def getProperty(self, name):
return self._parent.getProperty(name)
def setProperty(self, name, value):
self._parent.setProperty(name, value)
# XMLFilter methods
def getParent(self):
return self._parent
def setParent(self, parent):
self._parent = parent
# --- Utility functions
def prepare_input_source(source, base=""):
"""This function takes an InputSource and an optional base URL and
returns a fully resolved InputSource object ready for reading."""
if isinstance(source, str):
source = xmlreader.InputSource(source)
elif hasattr(source, "read"):
f = source
source = xmlreader.InputSource()
source.setByteStream(f)
if hasattr(f, "name"):
source.setSystemId(f.name)
if source.getByteStream() is None:
sysid = source.getSystemId()
basehead = os.path.dirname(os.path.normpath(base))
sysidfilename = os.path.join(basehead, sysid)
if os.path.isfile(sysidfilename):
source.setSystemId(sysidfilename)
f = open(sysidfilename, "rb")
else:
source.setSystemId(urllib.parse.urljoin(base, sysid))
f = urllib.request.urlopen(source.getSystemId())
source.setByteStream(f)
return source
| agpl-3.0 | -3,012,627,070,021,310,000 | 31.831461 | 84 | 0.612252 | false |
vaquer/ckanext-dkan | ckanext/dkan/harvesters/dkanharvester.py | 1 | 31034 | import json
import urllib
import urllib2
import httplib
import datetime
import socket
import datetime
# from ckanext.harvest.harvesters.ckanharvester import CKANHarvester
from ckanext.harvest.harvesters.base import HarvesterBase
from ckanext.harvest.model import HarvestObject
from ckan.logic import ValidationError, NotFound, get_action
from ckan import model
import ckan.lib.munge as munge
from ckan.plugins import toolkit
log = __import__('logging').getLogger(__name__)
MIMETYPE_FORMATS = {
'text/html': 'HTML',
'text/csv': 'CSV',
'text/xml': 'XML',
'application/pdf': 'PDF',
'application/zip': 'ZIP',
'application/rdf+xml': 'RDF',
'application/json': 'JSON',
'application/vnd.ms-excel': 'XLS',
'application/vnd.google-earth.kml+xml': 'KML',
'application/msword': 'DOC',
}
class DKANHarvester(HarvesterBase):
ckan_revision_api_works = False
config = None
api_version = 2
action_api_version = 3
def info(self):
return {
'name': 'dkan',
'title': 'DKAN HARVESTER',
'description': 'Harvests remote DKAN instances',
'form_config_interface': 'Text'
}
def _get_action_api_offset(self):
return '/api/%d/action' % self.action_api_version
def _get_search_api_offset(self):
return '%s/current_package_list_with_resources' % self._get_action_api_offset()
def _get_content(self, url):
http_request = urllib2.Request(url=url)
api_key = self.config.get('api_key')
if api_key:
http_request.add_header('Authorization', api_key)
try:
http_response = urllib2.urlopen(http_request, timeout=90)
except urllib2.HTTPError, e:
if e.getcode() == 404:
raise ContentNotFoundError('HTTP error: %s' % e.code)
else:
raise ContentFetchError('HTTP error: %s' % e.code)
except urllib2.URLError, e:
raise ContentFetchError('URL error: %s' % e.reason)
except httplib.HTTPException, e:
raise ContentFetchError('HTTP Exception: %s' % e)
except socket.error, e:
raise ContentFetchError('HTTP socket error: %s' % e)
except Exception, e:
raise ContentFetchError('HTTP general exception: %s' % e)
return http_response.read()
def _get_group(self, base_url, group):
url = base_url + self._get_action_api_offset() + '/group_show?id=' + \
group['id']
try:
content = self._get_content(url)
data = json.loads(content)
if self.action_api_version == 3:
return data.pop('result')
return data
except (ContentFetchError, ValueError):
log.debug('Could not fetch/decode remote group')
raise RemoteResourceError('Could not fetch/decode remote group')
def _get_organization(self, base_url, org_name):
url = base_url + self._get_action_api_offset() + \
'/organization_show?id=' + org_name
try:
content = self._get_content(url)
content_dict = json.loads(content)
return content_dict['result']
except (ContentFetchError, ValueError, KeyError):
log.debug('Could not fetch/decode remote group')
raise RemoteResourceError(
'Could not fetch/decode remote organization')
def _set_config(self, config_str):
if config_str:
self.config = json.loads(config_str)
if 'api_version' in self.config:
self.api_version = int(self.config['api_version'])
log.debug('Using config: %r', self.config)
else:
self.config = {}
def validate_config(self, config):
if not config:
return config
try:
config_obj = json.loads(config)
if 'api_version' in config_obj:
try:
int(config_obj['api_version'])
except ValueError:
raise ValueError('api_version must be an integer')
if 'default_tags' in config_obj:
if not isinstance(config_obj['default_tags'], list):
raise ValueError('default_tags must be a list')
if config_obj['default_tags'] and \
not isinstance(config_obj['default_tags'][0], dict):
raise ValueError('default_tags must be a list of '
'dictionaries')
if 'default_groups' in config_obj:
if not isinstance(config_obj['default_groups'], list):
raise ValueError('default_groups must be a *list* of group'
' names/ids')
if config_obj['default_groups'] and \
not isinstance(config_obj['default_groups'][0],
basestring):
raise ValueError('default_groups must be a list of group '
'names/ids (i.e. strings)')
# Check if default groups exist
context = {'model': model, 'user': toolkit.c.user}
config_obj['default_group_dicts'] = []
for group_name_or_id in config_obj['default_groups']:
try:
group = get_action('group_show')(
context, {'id': group_name_or_id})
# save the dict to the config object, as we'll need it
# in the import_stage of every dataset
config_obj['default_group_dicts'].append(group)
except NotFound, e:
raise ValueError('Default group not found')
config = json.dumps(config_obj)
if 'default_extras' in config_obj:
if not isinstance(config_obj['default_extras'], dict):
raise ValueError('default_extras must be a dictionary')
if 'organizations_filter_include' in config_obj \
and 'organizations_filter_exclude' in config_obj:
raise ValueError('Harvest configuration cannot contain both '
'organizations_filter_include and organizations_filter_exclude')
if 'user' in config_obj:
# Check if user exists
context = {'model': model, 'user': toolkit.c.user}
try:
user = get_action('user_show')(
context, {'id': config_obj.get('user')})
except NotFound:
raise ValueError('User not found')
for key in ('read_only', 'force_all'):
if key in config_obj:
if not isinstance(config_obj[key], bool):
raise ValueError('%s must be boolean' % key)
except ValueError, e:
raise e
return config
def _get_all_packages(self, base_url, harvest_job):
# Request all remote packages
url = base_url + '/api/3/action/package_list'
log.debug('Getting all DKAN packages: %s', url)
try:
content = self._get_content(url)
except Exception, e:
self._save_gather_error('Unable to get content for URL: %s - %s'
% (url, e), harvest_job)
return None
packages = json.loads(content)['result']
return packages
def _get_package(self, base_url, harvest_object):
url = base_url + '/api/3/action/package_show/' + harvest_object.guid
log.debug('Getting DKAN package: %s', url)
# Get contents
try:
content = self._get_content(url)
except Exception, e:
self._save_object_error(
'Unable to get content for package: %s - %r' % (url, e),
harvest_object)
return None, None
package = json.loads(content)['result'][0]
return url, json.dumps(package)
def fetch_stage(self, harvest_object):
# Nothing to do here - we got the package dict in the search in the
# gather stage
return True
def gather_stage(self, harvest_job):
log.debug('In DKANHarvester gather_stage (%s)',
harvest_job.source.url)
toolkit.requires_ckan_version(min_version='2.0')
get_all_packages = True
self._set_config(harvest_job.source.config)
# Get source URL
remote_ckan_base_url = harvest_job.source.url.rstrip('/')
# Filter in/out datasets from particular organizations
fq_terms = []
org_filter_include = self.config.get('organizations_filter_include', [])
org_filter_exclude = self.config.get('organizations_filter_exclude', [])
if org_filter_include:
fq_terms.append(' OR '.join(
'organization:%s' % org_name for org_name in org_filter_include))
elif org_filter_exclude:
fq_terms.extend(
'-organization:%s' % org_name for org_name in org_filter_exclude)
# Ideally we can request from the remote CKAN only those datasets
# modified since the last completely successful harvest.
last_error_free_job = self.last_error_free_job(harvest_job)
log.debug('Last error-free job: %r', last_error_free_job)
if (last_error_free_job and
not self.config.get('force_all', False)):
get_all_packages = False
# Request only the datasets modified since
last_time = last_error_free_job.gather_started
# Note: SOLR works in UTC, and gather_started is also UTC, so
# this should work as long as local and remote clocks are
# relatively accurate. Going back a little earlier, just in case.
get_changes_since = \
(last_time - datetime.timedelta(hours=1)).isoformat()
log.info('Searching for datasets modified since: %s UTC',
get_changes_since)
fq_since_last_time = 'metadata_modified:[{since}Z TO *]' \
.format(since=get_changes_since)
try:
pkg_dicts = self._search_for_datasets(
remote_ckan_base_url,
fq_terms + [fq_since_last_time])
except SearchError, e:
log.info('Searching for datasets changed since last time '
'gave an error: %s', e)
get_all_packages = True
if not get_all_packages and not pkg_dicts:
log.info('No datasets have been updated on the remote '
'DKAN instance since the last harvest job %s',
last_time)
return None
# Fall-back option - request all the datasets from the remote CKAN
if get_all_packages:
# Request all remote packages
try:
pkg_dicts = self._search_for_datasets(remote_ckan_base_url,
fq_terms)
except SearchError, e:
log.info('Searching for all datasets gave an error: %s', e)
self._save_gather_error(
'Unable to search remote DKAN for datasets:%s url:%s'
'terms:%s' % (e, remote_ckan_base_url, fq_terms),
harvest_job)
return None
if not pkg_dicts:
self._save_gather_error(
'No datasets found at DKAN: %s' % remote_ckan_base_url,
harvest_job)
return None
# Create harvest objects for each dataset
try:
package_ids = set()
object_ids = []
for pkg_dict in pkg_dicts:
if pkg_dict is None:
continue
if pkg_dict['id'] in package_ids:
log.info('Discarding duplicate dataset %s - probably due '
'to datasets being changed at the same time as '
'when the harvester was paging through',
pkg_dict['id'])
continue
package_ids.add(pkg_dict['id'])
log.debug('Package: %s', pkg_dict)
log.debug('Creating HarvestObject for %s %s',
pkg_dict['name'], pkg_dict['id'])
obj = HarvestObject(guid=pkg_dict['id'],
job=harvest_job,
content=json.dumps(pkg_dict))
obj.save()
object_ids.append(obj.id)
return object_ids
except Exception, e:
self._save_gather_error('%r' % e.message, harvest_job)
def _search_for_datasets(self, remote_ckan_base_url, fq_terms=None):
'''Does a dataset search on a remote CKAN and returns the results.
Deals with paging to return all the results, not just the first page.
'''
base_search_url = remote_ckan_base_url + self._get_search_api_offset()
params = {'limit': '100', 'offset': '0'}
# There is the worry that datasets will be changed whilst we are paging
# through them.
# * In SOLR 4.7 there is a cursor, but not using that yet
# because few CKANs are running that version yet.
# * However we sort, then new names added or removed before the current
# page would cause existing names on the next page to be missed or
# double counted.
# * Another approach might be to sort by metadata_modified and always
# ask for changes since (and including) the date of the last item of
# the day before. However if the entire page is of the exact same
# time, then you end up in an infinite loop asking for the same page.
# * We choose a balanced approach of sorting by ID, which means
# datasets are only missed if some are removed, which is far less
# likely than any being added. If some are missed then it is assumed
# they will harvested the next time anyway. When datasets are added,
# we are at risk of seeing datasets twice in the paging, so we detect
# and remove any duplicates.
pkg_dicts = []
pkg_ids = set()
previous_content = None
while True:
url = base_search_url + '?' + urllib.urlencode(params)
log.debug('Searching for DKAN datasets: %s', url)
try:
content = self._get_content(url)
except ContentFetchError, e:
raise SearchError(
'Error sending request to search remote '
'DKAN instance %s using URL %r. Error: %s' %
(remote_ckan_base_url, url, e))
if previous_content and content == previous_content:
raise SearchError('The paging doesn\'t seem to work. URL: %s' %
url)
try:
response_dict = json.loads(content)
except ValueError:
raise SearchError('Response from remote DKAN was not JSON: %r'
% content)
try:
pkg_dicts_page = response_dict.get('result', [])
except ValueError:
raise SearchError('Response JSON did not contain '
'result/results: %r' % response_dict)
if len(pkg_dicts_page) == 0:
break
# Weed out any datasets found on previous pages (should datasets be
# changing while we page)
if type(pkg_dicts_page[0]) == list:
pkg_dicts_page = pkg_dicts_page[0]
pkg_dicts_page = [self._convert_dkan_package_to_ckan(p) for p in pkg_dicts_page]
ids_in_page = set(p['id'] for p in pkg_dicts_page if p is not None)
duplicate_ids = ids_in_page & pkg_ids
if duplicate_ids:
pkg_dicts_page = [p for p in pkg_dicts_page if p['id'] not in duplicate_ids]
pkg_ids |= ids_in_page
pkg_dicts.extend(pkg_dicts_page)
params['offset'] = str(int(params['offset']) + int(params['limit']))
return pkg_dicts
def import_stage(self, harvest_object):
log.debug('In DKANHarvester import_stage')
base_context = {'model': model, 'session': model.Session,
'user': self._get_user_name()}
if not harvest_object:
log.error('No harvest object received')
return False
if harvest_object.content is None:
self._save_object_error('Empty content for object %s' %
harvest_object.id,
harvest_object, 'Import')
return False
self._set_config(harvest_object.job.source.config)
try:
package_dict = json.loads(harvest_object.content)
if package_dict.get('type') == 'harvest':
log.warn('Remote dataset is a harvest source, ignoring...')
return True
# Set default tags if needed
default_tags = self.config.get('default_tags', [])
if default_tags:
if not 'tags' in package_dict:
package_dict['tags'] = []
package_dict['tags'].extend(
[t for t in default_tags if t not in package_dict['tags']])
remote_groups = self.config.get('remote_groups', None)
if not remote_groups in ('only_local', 'create'):
# Ignore remote groups
package_dict.pop('groups', None)
else:
if not 'groups' in package_dict:
package_dict['groups'] = []
# check if remote groups exist locally, otherwise remove
validated_groups = []
for group_ in package_dict['groups']:
try:
data_dict = {'id': group_['id']}
group = get_action('group_show')(base_context.copy(), data_dict)
validated_groups.append({'id': group['id'], 'name': group['name']})
except NotFound, e:
log.info('Group %s is not available', group_)
if remote_groups == 'create':
try:
group = self._get_group(harvest_object.source.url, group_)
except RemoteResourceError:
log.error('Could not get remote group %s', group_)
continue
for key in ['packages', 'created', 'users', 'groups', 'tags', 'extras', 'display_name']:
group.pop(key, None)
get_action('group_create')(base_context.copy(), group)
log.info('Group %s has been newly created', group_)
validated_groups.append({'id': group['id'], 'name': group['name']})
package_dict['groups'] = validated_groups
# Local harvest source organization
source_dataset = get_action('package_show')(base_context.copy(), {'id': harvest_object.source.id})
local_org = source_dataset.get('owner_org')
remote_orgs = self.config.get('remote_orgs', None)
if not remote_orgs in ('only_local', 'create'):
# Assign dataset to the source organization
package_dict['owner_org'] = local_org
else:
if not 'owner_org' in package_dict:
package_dict['owner_org'] = None
# check if remote org exist locally, otherwise remove
validated_org = None
remote_org = package_dict['owner_org']
if remote_org:
try:
data_dict = {'id': remote_org}
org = get_action('organization_show')(base_context.copy(), data_dict)
validated_org = org['id']
except NotFound, e:
log.info('Organization %s is not available', remote_org)
if remote_orgs == 'create':
try:
try:
org = self._get_organization(harvest_object.source.url, remote_org)
except RemoteResourceError:
# fallback if remote CKAN exposes organizations as groups
# this especially targets older versions of CKAN
org = self._get_group(harvest_object.source.url, remote_org)
for key in ['packages', 'created', 'users', 'groups', 'tags', 'extras', 'display_name', 'type']:
org.pop(key, None)
get_action('organization_create')(base_context.copy(), org)
log.info('Organization %s has been newly created', remote_org)
validated_org = org['id']
except (RemoteResourceError, ValidationError):
log.error('Could not get remote org %s', remote_org)
package_dict['owner_org'] = validated_org or local_org
# Set default groups if needed
default_groups = self.config.get('default_groups', [])
if default_groups:
if not 'groups' in package_dict:
package_dict['groups'] = []
existing_group_ids = [g['id'] for g in package_dict['groups']]
package_dict['groups'].extend(
[g for g in self.config['default_group_dicts']
if g['id'] not in existing_group_ids])
# Set default extras if needed
default_extras = self.config.get('default_extras', {})
def get_extra(key, package_dict):
for extra in package_dict.get('extras', []):
if extra['key'] == key:
return extra
if default_extras:
override_extras = self.config.get('override_extras', False)
if not 'extras' in package_dict:
package_dict['extras'] = []
for key, value in default_extras.iteritems():
existing_extra = get_extra(key, package_dict)
if existing_extra and not override_extras:
continue # no need for the default
if existing_extra:
package_dict['extras'].remove(existing_extra)
# Look for replacement strings
if isinstance(value, basestring):
value = value.format(
harvest_source_id=harvest_object.job.source.id,
harvest_source_url=
harvest_object.job.source.url.strip('/'),
harvest_source_title=
harvest_object.job.source.title,
harvest_job_id=harvest_object.job.id,
harvest_object_id=harvest_object.id,
dataset_id=package_dict['id'])
package_dict['extras'].append({'key': key, 'value': value})
for resource in package_dict.get('resources', []):
# Clear remote url_type for resources (eg datastore, upload) as
# we are only creating normal resources with links to the
# remote ones
resource.pop('url_type', None)
# Clear revision_id as the revision won't exist on this CKAN
# and saving it will cause an IntegrityError with the foreign
# key.
resource.pop('revision_id', None)
result = self._create_or_update_package(
package_dict, harvest_object, package_dict_form='package_show')
log.info(result)
return result
except ValidationError, e:
self._save_object_error('Invalid package with GUID %s: %r' %
(harvest_object.guid, e.error_dict),
harvest_object, 'Import')
except Exception, e:
self._save_object_error('%s' % e, harvest_object, 'Import')
def _convert_dkan_package_to_ckan(self, package):
"""
Function: Change the package dict's DKAN-style
to CKAN-style
Return: <dict>
"""
resources = []
try:
if 'extras' not in package:
package['extras'] = []
if 'title' not in package:
raise ValueError("Dataset has not title")
if 'name' not in package:
package['name'] = munge.munge_title_to_name(package['title'])
if 'description' in package:
package['notes'] = package['description']
for license in model.Package.get_license_register().values():
if license.title == package['license_title']:
package['license_id'] = license.id
break
if 'private' not in package:
package['private'] = False
else:
package['private'] = True if package['private'] != 'Publicado' else False
package['state'] = package['state'].lower()
package['type'] = package['type'].lower()
if 'metadata_created' in package:
package['metadata_created'] = self._convert_date_package_handling_error(package, 'metadata_created')
if 'metadata_modified' in package:
package['metadata_modified'] = self._convert_date_package_handling_error(package, 'metadata_modified')
if 'revision_timestamp' in package:
package['revision_timestamp'] = self._convert_date_package_handling_error(package, 'revision_timestamp')
if 'resources' not in package:
raise ValueError('Dataset has no resources')
package = self._fix_tags(package)
for resource in package['resources']:
resource['description'] = resource['name']
if not resource.get('url', ''):
next
if 'size' in resource:
if type(resource['size']) == unicode or type(resource['size']) == str:
clean_size = resource['size'].replace('KB', '').replace('MB', '').strip()
try:
resource['size'] = int(float(clean_size))
except:
log.error(u'Incorrect size file format Package: {0}, Resource: {1}'.format(package['name'], resource['name']))
resource['size'] = 0
return None
self._convert_date_resource_handling_error(resource, 'created', package['name'])
self._convert_date_resource_handling_error(resource, 'last_modified', package['name'], last_modified=True)
if 'revision_id' in resource:
del resource['revision_id']
if 'format' not in resource:
resource['format'] = MIMETYPE_FORMATS.get(resource.get('mimetype'), '')
resources.append(resource)
package['resources'] = resources
if 'private' in package:
# DKAN appears to have datasets with private=True which are
# still public: https://github.com/NuCivic/dkan/issues/950. If
# they were really private then we'd not get be able to access
# them, so assume they are not private.
package['private'] = False
return package
except Exception, e:
log.error('Unable to get convert DKAN to CKAN package: %s' % e)
return None
def _convert_date_package_handling_error(self, package, key, last_modified=False):
"""
Function: Convert package's format dates
Return: <string>
"""
try:
return self._convert_date(package[key], last_modified=last_modified)
except:
log.error(
u'Incorrect date metadata_created format in Package {0}: {1}'.format(package['name'], package[key])
)
return datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%f")
def _convert_date_resource_handling_error(self, resource, key, package_name, last_modified=False):
"""
Function: Convert resources's format dates
Return: <string>
"""
try:
return self._convert_date(resource[key], last_modified=last_modified)
except:
log.error(
u'Incorrect date last_modified format in Package: {0}, Source: {1} Date: {2}'.format(package_name, resource['name'], resource[key])
)
return datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%f")
def _convert_date(self, date, last_modified=False):
"""
Function: Convert generic format to ckan dates format
Return: <string>
"""
try:
date_object = datetime.datetime.strptime(date, "%Y-%m-%dT%H:%M:%S.%f")
return date
except:
pass
try:
date_object = datetime.datetime.strptime(date, "%Y-%m-%dT%H:%M:%S")
return date
except:
pass
date_correct_format = date.replace('Date changed\t', '')[4:].lstrip() if last_modified else date[4:].lstrip()
date_object = datetime.datetime.strptime(date_correct_format, '%m/%d/%Y - %H:%M:%S')
return date_object.strftime("%Y-%m-%dT%H:%M:%S.%f")
def _fix_tags(self, package_dict):
"""
Function: Purge the vocabulary tags
Return: <dict>
"""
tags = []
tag_aux = None
for tag in package_dict.get('tags', []):
tag_aux = tag
if 'vocabulary_id' in tag_aux:
tag_aux['vocabulary_id'] = None
tags.append(tag_aux)
package_dict['tags'] = tags
return package_dict
class ContentFetchError(Exception):
pass
class ContentNotFoundError(ContentFetchError):
pass
class RemoteResourceError(Exception):
pass
class SearchError(Exception):
pass
| agpl-3.0 | 8,176,333,278,479,023,000 | 40.656376 | 147 | 0.526165 | false |
TeamExodus/external_chromium_org | third_party/cython/src/Cython/Debugger/Tests/test_libpython_in_gdb.py | 110 | 3979 | # -*- coding: UTF-8 -*-
"""
Test libpython.py. This is already partly tested by test_libcython_in_gdb and
Lib/test/test_gdb.py in the Python source. These tests are run in gdb and
called from test_libcython_in_gdb.main()
"""
import os
import sys
import gdb
from Cython.Debugger import libcython
from Cython.Debugger import libpython
import test_libcython_in_gdb
from test_libcython_in_gdb import _debug, inferior_python_version
class TestPrettyPrinters(test_libcython_in_gdb.DebugTestCase):
"""
Test whether types of Python objects are correctly inferred and that
the right libpython.PySomeTypeObjectPtr classes are instantiated.
Also test whether values are appropriately formatted (don't be too
laborious as Lib/test/test_gdb.py already covers this extensively).
Don't take care of decreffing newly allocated objects as a new
interpreter is started for every test anyway.
"""
def setUp(self):
super(TestPrettyPrinters, self).setUp()
self.break_and_run('b = c = d = 0')
def get_pyobject(self, code):
value = gdb.parse_and_eval(code)
assert libpython.pointervalue(value) != 0
return value
def pyobject_fromcode(self, code, gdbvar=None):
if gdbvar is not None:
d = {'varname':gdbvar, 'code':code}
gdb.execute('set $%(varname)s = %(code)s' % d)
code = '$' + gdbvar
return libpython.PyObjectPtr.from_pyobject_ptr(self.get_pyobject(code))
def get_repr(self, pyobject):
return pyobject.get_truncated_repr(libpython.MAX_OUTPUT_LEN)
def alloc_bytestring(self, string, gdbvar=None):
if inferior_python_version < (3, 0):
funcname = 'PyString_FromStringAndSize'
else:
funcname = 'PyBytes_FromStringAndSize'
assert '"' not in string
# ensure double quotes
code = '(PyObject *) %s("%s", %d)' % (funcname, string, len(string))
return self.pyobject_fromcode(code, gdbvar=gdbvar)
def alloc_unicodestring(self, string, gdbvar=None):
self.alloc_bytestring(string.encode('UTF-8'), gdbvar='_temp')
postfix = libpython.get_inferior_unicode_postfix()
funcname = 'PyUnicode%s_FromEncodedObject' % (postfix,)
return self.pyobject_fromcode(
'(PyObject *) %s($_temp, "UTF-8", "strict")' % funcname,
gdbvar=gdbvar)
def test_bytestring(self):
bytestring = self.alloc_bytestring("spam")
if inferior_python_version < (3, 0):
bytestring_class = libpython.PyStringObjectPtr
expected = repr("spam")
else:
bytestring_class = libpython.PyBytesObjectPtr
expected = "b'spam'"
self.assertEqual(type(bytestring), bytestring_class)
self.assertEqual(self.get_repr(bytestring), expected)
def test_unicode(self):
unicode_string = self.alloc_unicodestring(u"spam ἄλφα")
expected = "'spam ἄλφα'"
if inferior_python_version < (3, 0):
expected = 'u' + expected
self.assertEqual(type(unicode_string), libpython.PyUnicodeObjectPtr)
self.assertEqual(self.get_repr(unicode_string), expected)
def test_int(self):
if inferior_python_version < (3, 0):
intval = self.pyobject_fromcode('PyInt_FromLong(100)')
self.assertEqual(type(intval), libpython.PyIntObjectPtr)
self.assertEqual(self.get_repr(intval), '100')
def test_long(self):
longval = self.pyobject_fromcode('PyLong_FromLong(200)',
gdbvar='longval')
assert gdb.parse_and_eval('$longval->ob_type == &PyLong_Type')
self.assertEqual(type(longval), libpython.PyLongObjectPtr)
self.assertEqual(self.get_repr(longval), '200')
def test_frame_type(self):
frame = self.pyobject_fromcode('PyEval_GetFrame()')
self.assertEqual(type(frame), libpython.PyFrameObjectPtr)
| bsd-3-clause | -1,975,595,020,977,279,000 | 33.513043 | 79 | 0.64651 | false |
cs243iitg/vehicle-webapp | webapp/vms/views.py | 1 | 12301 | from django.shortcuts import render, render_to_response
from django.http import HttpResponse, HttpResponseRedirect
from django.views import generic
from django.core.context_processors import csrf
from django.views.decorators.csrf import csrf_protect
from django.contrib import auth
from django.contrib import messages
from django.template import RequestContext
from django.contrib.auth.decorators import login_required
from django.contrib.admin.views.decorators import staff_member_required
from django.shortcuts import get_object_or_404
from .forms import TheftForm, StudentVehicleForm, SuspiciousVehicleForm
from .models import TheftReport, StudentVehicle, BusTiming, EmployeeVehicle, SuspiciousVehicle, Guard, ParkingSlot, StudentCycle, OnDutyGuard, PersonPass
from datetime import datetime
import requests, threading
from vms import pdf
def login(request):
"""
Displays login page at the start
"""
c = {}
c.update(csrf(request))
if request.method == 'POST':
return render_to_response('vms/login.html', {
'form_errors': form_errors,
})
else:
return render_to_response('vms/login.html', c)
@login_required(login_url="/vms")
def logout(request):
"""
Logs the user out, if he is logged in.
"""
auth.logout(request)
return HttpResponseRedirect('/vms/', {
'form_errors': "You've succesfully logged out."
})
def auth_view(request):
"""
Authenticates user from the username and password from POST -- REQUIRES CHANGES DEPENDING ON MODEL
"""
username = request.POST.get('username', '')
password = request.POST.get('password', '')
user = auth.authenticate(username=username, password=password)
if user is not None:
auth.login(request, user)
return HttpResponseRedirect('/vms/users/dashboard') #CHANGE THIS!! -- SHOULD WORK ACCORDING TO USER
else:
return HttpResponseRedirect('/vms/')
#------------------------------------------------------------
# Theft Reporting for User
#------------------------------------------------------------
# @login_required(login_url="/vms/")
# def home(request):
# """
# Home page for user, with his previous tasks
# """
# today = str.lower(datetime.now().strftime("%A"))
# buses = sorted(j for j in BusTiming.objects.all() if (j.from_time >= datetime.now().time() and filter(lambda x: str(x).lower() == today, j.availability.all()) ))
# return render(request, 'vms/dashboard.html',{
# 'username': request.user.first_name,
# 'is_user': True,
# 'user': request.user,
# 'buses': buses[0:3],
# })
@login_required(login_url="/vms/")
def home(request):
"""
Home page for user, with his previous tasks
"""
today = str.lower(datetime.now().strftime("%A"))
# buses = sorted(j for j in BusTiming.objects.all() if (j.from_time >= datetime.now().time() and filter(lambda x: str(x).lower() == today, j.availability.all()) ))
if not request.user.is_superuser == True:
num_suspicious = len(SuspiciousVehicle.objects.filter(reporter=request.user))
x1 = [j for j in StudentVehicle.objects.all() if (j.user == request.user and j.registered_with_security_section==None)]
num_pending = len(x1)
x2 = [j.available_slots for j in ParkingSlot.objects.all()]
num_guards = sum(x2)
x3 = [j for j in TheftReport.objects.all() if j.reporter==request.user]
num_thefts = len(x3)
return render(request, 'vms/dashboard.html',{
'username': request.user.first_name,
'user': request.user,
# 'buses': buses[0:3],
'num_suspicious': num_suspicious,
'num_pending': num_pending,
'num_guards': num_guards,
'num_thefts': num_thefts,
'user_thefts': x3,
})
else:
num_suspicious = len(SuspiciousVehicle.objects.all())
num_pending = len(StudentVehicle.objects.filter(registered_with_security_section=None)) + len(EmployeeVehicle.objects.filter(registered_with_security_section=None))
num_approved = len(StudentVehicle.objects.filter(registered_with_security_section=True)) + len(EmployeeVehicle.objects.filter(registered_with_security_section=True))
num_denied = len(StudentVehicle.objects.filter(registered_with_security_section=False)) + len(EmployeeVehicle.objects.filter(registered_with_security_section=False))
num_guards = len(OnDutyGuard.objects.all())
num_thefts = len(TheftReport.objects.filter(status="Submitted"))
passes=PersonPass.objects.all()
total_blocked = len(passes.filter(is_blocked=True))
total_issued = len(passes.filter(is_blocked=False))
x = [j for j in passes if j.expiry_date < datetime.now().date()]
total_expired = len(x)
return render(request, 'vms/dashboard.html',{
'username': request.user.first_name,
'is_user': True,
'user': request.user,
# 'buses': buses[0:3],
'num_suspicious': num_suspicious,
'num_pending': num_pending,
'num_guards': num_guards,
'num_thefts': num_thefts,
'num_approved': num_approved,
'num_denied': num_denied,
'total_issued': total_issued,
'total_expired': total_expired,
'total_blocked': total_blocked,
})
@login_required(login_url="/vms/")
def busdetails(request):
return render(request, 'vms/busdetails.html')
#Ayush Mananiya
#----------thread function for sending sms---------------------------------------------
def send_sms(message, numbers):
proxy = "http://sumeet.ranka:weh,hftg@202.141.80.24:3128" #change the username and password
status1=''
for i in numbers:
response = requests.get("https://site2sms.p.mashape.com/index.php?msg="+message+"&phone="+str(i)+"&pwd=CS243iitg&uid=8011035945",headers={"X-Mashape-Key": "CW4gX5MRw2mshX6uxzLHMxEVoB0Op1v4cMrjsnZoeRXbk3LD46", "Accept": "application/json"},proxies={"http":proxy,"https":proxy,"ftp":proxy},)
#-------------------------end-----------------------------------------------------
@login_required(login_url="/vms/")
def theft_report_form(request):
"""
Displays theft report form for user -- NOTE: This form is common to admin and user
"""
if request.method == 'POST':
form = TheftForm(request.POST)
if form.is_valid():
task = form.save(commit = False)
task.reporter = request.user
if request.user.user.is_student:
vehicles=StudentVehicle.objects.filter(user=request.user)
cycles=StudentCycle.objects.filter(user=request.user)
try:
vehicle = StudentVehicle.objects.get(vehicle_pass_no=task.vehicle_pass_no)
cycle=0
except:
message = "Vehicle does not belong to you."
vehicle = None
try:
vehicle = StudentCycle.objects.get(cycle_pass_no=task.vehicle_pass_no)
cycle=1
except:
message = "Vehicle does not belong to you."
vehicle = None
return render(request, "vms/theft.html",{
'message':message,
'user':request.user,
'form':form,
})
else:
vehicles=EmployeeVehicle.objects.filter(user=request.user)
try:
vehicle = EmployeeVehicle.objects.get(vehicle_pass_no=task.vehicle_pass_no)
cycle=0
except:
vehicle = None
message = "Vehicle does not belong to you."
return render(request, "vms/theft.html",{
'message':message,
'user':request.user,
'form':form,
})
if vehicle != None and vehicle in vehicles:
if request.user.user.is_student:
task.stud_vehicle=vehicle
else:
task.emp_vehicle=vehicle
#ayush Mananiya
#my funct started--------------------------------------------------------------------------
if cycle == 0:
message = vehicle.make_and_model +' '+ task.vehicle_pass_no + ' is stolen from ' + task.theft_place +' at '+ str(task.theft_time.strftime('%d-%b-%Y %H:%M')) #extract the form fields and generate message text
else:
message = vehicle.cycle_model+ ' '+vehicle.cycle_color+' '+' '+'is stolen from '+task.theft_place+' at '+ str(task.theft_time)
numbers = list(Guard.objects.values_list('guard_phone_number', flat=True)) #retrieves the phone numbers of all the guards
sms_thread = threading.Thread(target=send_sms, args=(message, numbers)) #threading
sms_thread.start()
#ended here--------------------------------------------------------------------------------------------------------------------------
task.save()
messages.success(request, 'Your theft report is submitted.')
return render(request, "vms/theft.html",{
'message':"Theft Report successfully submitted.",
'user':request.user,
'form':form,
'success':True,
'id':task.id,
})
else:
form = TheftForm()
return render(request, "vms/theft.html", {
'form':form,
'user':request.user,
})
@login_required(login_url="/vms/")
def generate_report(request, report_id):
rep = TheftReport.objects.filter(id=report_id)
if len(rep) > 0:
# print rep[0].theft_time
return pdf.pdf_gen(rep[0])
return HttpResponse("done")
@login_required(login_url="/vms/")
def vehicles_missing(request):
"""
Displays to users their theft reports
"""
reports = TheftReport.objects.all()
return render(request, "vms/theft_reports.html", {
'reports': reports,
})
@login_required(login_url="/vms/")
def parking_slot_availability(request):
"""
Function to serve the parking spaces that are available
"""
return render(request, 'users/parking.html', {
'pslots': ParkingSlot.objects.all(),
})
@login_required(login_url="/vms/")
def suspicious_vehicle_report_form(request):
"""
Function to report suspicious vehicles
"""
if request.method == 'POST':
form = SuspiciousVehicleForm(request.POST, request.FILES)
if form.is_valid():
task = form.save(commit = False)
task.reporter=request.user
task.save()
return render(request, 'vms/suspicious.html',{
'user':request.user,
'form':form,
'message':"Vehicle has been reported. Thanks for the caution."
})
else:
form=SuspiciousVehicleForm()
return render(request, 'vms/suspicious.html', {
'user': request.user,
'form':form,
})
@login_required(login_url="/vms/")
def suspicious_vehicles(request):
"""
Function to allow users to view all suspicious reported activity
"""
str1=""
if request.POST:
SuspiciousVehicle.objects.get(id=request.POST['Delete']).delete()
vehicles = SuspiciousVehicle.objects.all()
messages.success(request,"Report for suspicious activity is deleted")
return render(request, 'vms/suspicious_vehicles.html',{
'user':request.user,
'vehicles':vehicles,
})
else:
vehicles = SuspiciousVehicle.objects.all()
return render(request, 'vms/suspicious_vehicles.html', {
'user': request.user,
'vehicles':vehicles,
})
@login_required(login_url="/vms/")
def delete_suspicious_vehicles(request, suspicious_vehicle_id):
SuspiciousVehicle.objects.get(id=suspicious_vehicle_id).delete()
pass
| mit | -7,569,760,152,077,840,000 | 39.463816 | 297 | 0.576538 | false |
jambonrose/DjangoUnleashed-1.8 | blog/migrations/0002_post_data.py | 1 | 4440 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from datetime import date
from django.db import migrations, models
POSTS = [
{
"title": "Django 1.0 Release",
"slug": "django-10-released",
"pub_date": date(2008, 9, 3),
"startups": [],
"tags": ["django", "python", "web"],
"text": "THE Web Framework.",
},
{
"title": "Simple Robots for Sale",
"slug": "simple-robots-for-sale",
"pub_date": date(2011, 2, 21),
"startups": ["simple-robots"],
"tags": ["augmented-reality", "python"],
"text":
"If only they would make "
"spider bots.",
},
{
"title": "Django Training",
"slug": "django-training",
"pub_date": date(2013, 1, 18),
"startups": ["jambon-software"],
"tags": ["django"],
"text":
"Want to learn Django in a class "
"setting? JamBon Software offers "
"hands-on courses in the web "
"framework. Just looking for help? "
"They'll consult on your web and "
"mobile products and can also be "
"hired for end-to-end development.",
},
{
"title": "Django 1.8 Release",
"slug": "django-18-released",
"pub_date": date(2015, 4, 1),
"startups": [],
"tags": ["django", "python", "web"],
"text": "Django 1.8 is Django's newest "
"version, and the next version "
"slated for Long-Term Support "
"(LTS). LTS means that Django 1.8 "
"will be supported for longer than "
"regular versions: Django core "
"developers will specify a single "
"release as LTS, and then continue "
"to update that version regardless "
"of the usual release cycle. This "
"will last until they pick a new "
"LTS version, which typically "
"happens every 3 to 4 years. The "
"last LTS version was 1.4, "
"released in March 2012, which "
"will stop being supported in "
"October 2015.\n\n"
"For more information: \n"
"http://andrewsforge.com/article/"
"upgrading-django-to-17/part-1-"
"introduction-and-django-releases/",
},
{
"title": "More Django Info",
"slug": "more-django-info",
"pub_date": date(2015, 4, 8),
"startups": ["jambon-software"],
"tags": ["django", "web"],
"text":
"Remember that the official websites "
"for Django and this book contain a "
"number of extra resources.\n\n"
"https://djangoproject.com\n"
"https://django-unleashed.com\n\n"
"Want more Django info? "
"There's always my personal blog!\n\n"
"https://AndrewsForge.com",
},
{
"title": "New Django Version",
"slug": "new-django-version",
"pub_date": date(2020, 5, 15),
"startups": [],
"tags": ["django", "python", "web"],
"text":
"Better integration with "
"HTML Boilerstrap 9.",
},
]
def add_post_data(apps, schema_editor):
Post = apps.get_model('blog', 'Post')
Startup = apps.get_model(
'organizer', 'Startup')
Tag = apps.get_model('organizer', 'Tag')
for post_dict in POSTS:
post = Post.objects.create(
title=post_dict['title'],
slug=post_dict['slug'],
text=post_dict['text'])
post.pub_date = post_dict['pub_date']
post.save()
for tag_slug in post_dict['tags']:
post.tags.add(
Tag.objects.get(
slug=tag_slug))
for startup_slug in post_dict['startups']:
post.startups.add(
Startup.objects.get(
slug=startup_slug))
def remove_post_data(apps, schema_editor):
Post = apps.get_model('blog', 'Post')
for post_dict in POSTS:
post = Post.objects.get(
slug=post_dict['slug'])
post.delete()
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
('organizer', '0003_startup_data'),
]
operations = [
migrations.RunPython(
add_post_data,
remove_post_data)
]
| bsd-2-clause | -5,321,216,935,047,657,000 | 30.714286 | 50 | 0.510135 | false |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/event_handling/zoom_window.py | 1 | 2014 | """
===========
Zoom Window
===========
This example shows how to connect events in one window, for example, a mouse
press, to another figure window.
If you click on a point in the first window, the z and y limits of the
second will be adjusted so that the center of the zoom in the second
window will be the x,y coordinates of the clicked point.
Note the diameter of the circles in the scatter are defined in
points**2, so their size is independent of the zoom
"""
import matplotlib.pyplot as plt #import figure, show
import numpy as np
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
figsrc = plt.figure()
figzoom = plt.figure()
axsrc = figsrc.add_subplot(111, xlim=(0, 1), ylim=(0, 1), autoscale_on=False)
axzoom = figzoom.add_subplot(111, xlim=(0.45, 0.55), ylim=(0.4, .6),
autoscale_on=False)
axsrc.set_title('Click to zoom')
axzoom.set_title('zoom window')
x, y, s, c = np.random.rand(4, 200)
s *= 200
axsrc.scatter(x, y, s, c)
axzoom.scatter(x, y, s, c)
def onpress(event):
if event.button != 1:
return
x, y = event.xdata, event.ydata
axzoom.set_xlim(x - 0.1, x + 0.1)
axzoom.set_ylim(y - 0.1, y + 0.1)
figzoom.canvas.draw()
figsrc.canvas.mpl_connect('button_press_event', onpress)
pltshow(plt)
| mit | -4,583,119,347,413,692,400 | 24.493671 | 82 | 0.621152 | false |
Perferom/android_external_chromium_org | chrome/test/pyautolib/remote_host.py | 80 | 3108 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import cStringIO
import os
import pickle
import socket
import sys
import pyauto
class RemoteHost(object):
"""Class used as a host for tests that use the PyAuto RemoteProxy.
This class fires up a listener which waits for a connection from a RemoteProxy
and receives method call requests. Run python remote_host.py
remote_host.RemoteHost.RunHost to start up a PyAuto remote instance that you
can connect to and automate using pyauto.RemoteProxy.
"""
def __init__(self, host, *args, **kwargs):
self.StartSocketServer(host)
def StartSocketServer(self, host):
listening_socket = socket.socket()
listening_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listening_socket.bind(host)
listening_socket.listen(1)
print 'Listening for incoming connections on port %d.' % host[1]
self._socket, address = listening_socket.accept()
print 'Accepted connection from %s:%d.' % address
while self.Connected():
self._HandleRPC()
def StopSocketServer(self):
if self._socket:
try:
self._socket.shutdown(socket.SHUT_RDWR)
self._socket.close()
except socket.error:
pass
self._socket = None
def Connected(self):
return self._socket
def CreateTarget(self, target_class):
"""Creates an instance of the specified class to serve as the RPC target.
RPC calls can be made on the target.
"""
self.target = target_class()
def _HandleRPC(self):
"""Receives a method call request over the socket and executes the method.
This method captures stdout and stderr for the duration of the method call,
and sends those, the return value, and any thrown exceptions back to the
RemoteProxy.
"""
# Receive request.
request = self._socket.recv(4096)
if not request:
self.StopSocketServer()
return
request = pickle.loads(request)
# Redirect output to strings.
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = stdout = cStringIO.StringIO()
sys.stderr = stderr = cStringIO.StringIO()
# Make requested method call.
result = None
exception = None
try:
if getattr(self, request[0], None):
result = getattr(self, request[0])(*request[1], **request[2])
else:
result = getattr(self.target, request[0])(*request[1], **request[2])
except BaseException, e:
exception = (e.__class__.__name__, str(e))
# Put output back to the way it was before.
sys.stdout = old_stdout
sys.stderr = old_stderr
# Package up and send the result of the method call.
response = pickle.dumps((result, stdout.getvalue(), stderr.getvalue(),
exception))
if self._socket.send(response) != len(response):
self.StopSocketServer()
if __name__ == '__main__':
pyauto_suite = pyauto.PyUITestSuite(sys.argv)
RemoteHost(('', 7410))
del pyauto_suite
| bsd-3-clause | -9,016,086,451,230,564,000 | 29.772277 | 80 | 0.673745 | false |
HybridF5/jacket | jacket/storage/keymgr/conf_key_mgr.py | 1 | 4888 | # Copyright (c) 2013 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
An implementation of a key manager that reads its key from the project's
configuration options.
This key manager implementation provides limited security, assuming that the
key remains secret. Using the volume encryption feature as an example,
encryption provides protection against a lost or stolen disk, assuming that
the configuration file that contains the key is not stored on the disk.
Encryption also protects the confidentiality of data as it is transmitted via
iSCSI from the compute host to the storage host (again assuming that an
attacker who intercepts the data does not know the secret key).
Because this implementation uses a single, fixed key, it proffers no
protection once that key is compromised. In particular, different volumes
encrypted with a key provided by this key manager actually share the same
encryption key so *any* volume can be decrypted once the fixed key is known.
"""
import array
import binascii
from oslo_config import cfg
from oslo_log import log as logging
from jacket.storage import exception
from jacket.storage.i18n import _, _LW
from jacket.storage.keymgr import key
from jacket.storage.keymgr import key_mgr
key_mgr_opts = [
cfg.StrOpt('fixed_key',
help='Fixed key returned by key manager, specified in hex'),
]
CONF = cfg.CONF
CONF.register_opts(key_mgr_opts, group='storage_keymgr')
LOG = logging.getLogger(__name__)
class ConfKeyManager(key_mgr.KeyManager):
"""Key Manager that supports one key defined by the fixed_key conf option.
This key manager implementation supports all the methods specified by the
key manager interface. This implementation creates a single key in response
to all invocations of create_key. Side effects (e.g., raising exceptions)
for each method are handled as specified by the key manager interface.
"""
def __init__(self):
super(ConfKeyManager, self).__init__()
self.key_id = '00000000-0000-0000-0000-000000000000'
def _generate_key(self, **kwargs):
_hex = self._generate_hex_key(**kwargs)
key_list = array.array('B', binascii.unhexlify(_hex)).tolist()
return key.SymmetricKey('AES', key_list)
def _generate_hex_key(self, **kwargs):
if CONF.storage_keymgr.fixed_key is None:
LOG.warning(
_LW('config option storage_keymgr.fixed_key has not been defined:'
' some operations may fail unexpectedly'))
raise ValueError(_('storage_keymgr.fixed_key not defined'))
return CONF.storage_keymgr.fixed_key
def create_key(self, ctxt, **kwargs):
"""Creates a key.
This implementation returns a UUID for the created key. A
NotAuthorized exception is raised if the specified context is None.
"""
if ctxt is None:
raise exception.NotAuthorized()
return self.key_id
def store_key(self, ctxt, key, **kwargs):
"""Stores (i.e., registers) a key with the key manager."""
if ctxt is None:
raise exception.NotAuthorized()
if key != self._generate_key():
raise exception.KeyManagerError(
reason="cannot store arbitrary keys")
return self.key_id
def copy_key(self, ctxt, key_id, **kwargs):
if ctxt is None:
raise exception.NotAuthorized()
return self.key_id
def get_key(self, ctxt, key_id, **kwargs):
"""Retrieves the key identified by the specified id.
This implementation returns the key that is associated with the
specified UUID. A NotAuthorized exception is raised if the specified
context is None; a KeyError is raised if the UUID is invalid.
"""
if ctxt is None:
raise exception.NotAuthorized()
if key_id != self.key_id:
raise KeyError(key_id)
return self._generate_key()
def delete_key(self, ctxt, key_id, **kwargs):
if ctxt is None:
raise exception.NotAuthorized()
if key_id != self.key_id:
raise exception.KeyManagerError(
reason="cannot delete non-existent key")
LOG.warning(_LW("Not deleting key %s"), key_id)
| apache-2.0 | -2,527,113,439,486,799,400 | 34.941176 | 82 | 0.683715 | false |
mancoast/CPythonPyc_test | cpython/263_test_nis.py | 58 | 1317 | from test import test_support
import unittest
import nis
class NisTests(unittest.TestCase):
def test_maps(self):
try:
maps = nis.maps()
except nis.error, msg:
# NIS is probably not active, so this test isn't useful
if test_support.verbose:
print "Test Skipped:", msg
# Can't raise TestSkipped as regrtest only recognizes the exception
# import time.
return
try:
# On some systems, this map is only accessible to the
# super user
maps.remove("passwd.adjunct.byname")
except ValueError:
pass
done = 0
for nismap in maps:
mapping = nis.cat(nismap)
for k, v in mapping.items():
if not k:
continue
if nis.match(k, nismap) != v:
self.fail("NIS match failed for key `%s' in map `%s'" % (k, nismap))
else:
# just test the one key, otherwise this test could take a
# very long time
done = 1
break
if done:
break
def test_main():
test_support.run_unittest(NisTests)
if __name__ == '__main__':
test_main()
| gpl-3.0 | -118,805,081,679,479,950 | 29.627907 | 88 | 0.493546 | false |
numo16/wesnoth | data/tools/terrain2wiki.py | 25 | 3386 | #!/usr/bin/python
# -*- coding:utf-8 -*-
"""
A script to create the "Terrain Table" on the TerrainCodeTableWML wiki page.
Add the output to the wiki whenever a new terrain is added to mainline.
"""
from __future__ import with_statement # For python < 2.6
import os
import sys
import re
try:
import argparse
except ImportError:
print('Please install argparse by running "easy_install argparse"')
sys.exit(1)
# Where to get terrain images
terrain_url = "https://raw.github.com/wesnoth/wesnoth/master/data/core/images/terrain/%s.png"
def parse_terrain(data):
"""
Parses the terrains. Input looks like this:
[terrain_type]
symbol_image=water/ocean-grey-tile
id=deep_water_gray
editor_name= _ "Gray Deep Water"
string=Wog
aliasof=Wo
submerge=0.5
editor_group=water
[/terrain_type]
Output is a text in wiki format.
"""
# Remove all comments.
data = "\n".join([i for i in data.split("\n") if not i.startswith("#")])
terrains = re.compile("\[terrain_type\](.*?)\[\/terrain_type\]", re.DOTALL).findall(data)
data = """{{AutogeneratedWML}}{| border="1"
!terrain
!name
!string
!alias of
!editor group
"""
for i in terrains:
# Strip unneeded things.
i = i[5:]
i = i.split("\n ")
# Don't parse special files that are hacks. They shouldn't be used
# directly. (They're only there to make aliasing work.)
if i[0].startswith(" "):
continue
# This avoids problems due to additional = in strings. Exact string
# removal does not matter as long as we do not print help_topic_text
# in the wiki page.
removeus = ("<italic>text='", "'</italic>", "<ref>dst='", "text='", "'</ref>")
for text in removeus:
i = [a.replace(text, "") for a in i]
# Create a dictionary of key and values
content = dict([v.strip().split("=") for v in i])
# Hidden things shouldn't be displayed
if 'hidden' in content:
continue
data += """|-
| %s
| %s
| <code>%s</code>
| <code>%s</code>
| %s
""" % (
terrain_url % (content['editor_image'] if 'editor_image' in content else content['symbol_image']),
content['editor_name'][4:-1] if 'editor_name' in content else content['name'][4:-1],
content['string'].replace("# wmllint: ignore", "").replace("|", "|"),
content['aliasof'].replace("|", "|") if 'aliasof' in content else "",
content['editor_group'])
data += "|}"
return data
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='terrain2wiki is a tool to\
convert the terrain codes located in terrain.cfg to wiki formatted text.')
parser.add_argument('-f', '--file', default='data/core/terrain.cfg',
dest='path', help="The location of the terrain.cfg file.")
parser.add_argument('-o', '--output', default='/tmp/TerrainCodeTableWML',
dest='output_path', help="The location of the output file.")
args = parser.parse_args()
path = args.path
output_path = args.output_path
if not os.path.exists(path) or not path.endswith('.cfg'):
print("Invalid path: '%s' does not exist or not a .cfg file.") % path
sys.exit(1)
with open(path, "r") as input_file:
data = input_file.read()
data = parse_terrain(data)
with open(output_path, "w") as output:
output.write(data)
| gpl-2.0 | -6,830,693,503,334,181,000 | 30.351852 | 98 | 0.619905 | false |
talele08/appengine-mapreduce | python/src/mapreduce/property_range.py | 48 | 12527 | #!/usr/bin/env python
"""A class representing entity property range."""
# pylint: disable=g-bad-name
# pylint: disable=g-import-not-at-top
import datetime
from google.appengine.ext import ndb
from google.appengine.ext import db
from mapreduce import errors
from mapreduce import util
__all__ = [
"should_shard_by_property_range",
"PropertyRange"]
def should_shard_by_property_range(filters):
"""Returns whether these filters suggests sharding by property range.
Args:
filters: user supplied filters. Each filter should be a list or tuple of
format (<property_name_as_str>, <query_operator_as_str>,
<value_of_certain_type>). Value type is up to the property's type.
Returns:
True if these filters suggests sharding by property range. False
Otherwise.
"""
if not filters:
return False
for f in filters:
if f[1] != "=":
return True
return False
class PropertyRange(object):
"""A class that represents a range on a db.Model's property.
It supports splitting the range into n shards and generating a query that
returns entities within that range.
"""
def __init__(self,
filters,
model_class_path):
"""Init.
Args:
filters: user supplied filters. Each filter should be a list or tuple of
format (<property_name_as_str>, <query_operator_as_str>,
<value_of_certain_type>). Value type should satisfy the property's type.
model_class_path: full path to the model class in str.
"""
self.filters = filters
self.model_class_path = model_class_path
self.model_class = util.for_name(self.model_class_path)
self.prop, self.start, self.end = self._get_range_from_filters(
self.filters, self.model_class)
@classmethod
def _get_range_from_filters(cls, filters, model_class):
"""Get property range from filters user provided.
This method also validates there is one and only one closed range on a
single property.
Args:
filters: user supplied filters. Each filter should be a list or tuple of
format (<property_name_as_str>, <query_operator_as_str>,
<value_of_certain_type>). Value type should satisfy the property's type.
model_class: the model class for the entity type to apply filters on.
Returns:
a tuple of (property, start_filter, end_filter). property is the model's
field that the range is about. start_filter and end_filter define the
start and the end of the range. (None, None, None) if no range is found.
Raises:
BadReaderParamsError: if any filter is invalid in any way.
"""
if not filters:
return None, None, None
range_property = None
start_val = None
end_val = None
start_filter = None
end_filter = None
for f in filters:
prop, op, val = f
if op in [">", ">=", "<", "<="]:
if range_property and range_property != prop:
raise errors.BadReaderParamsError(
"Range on only one property is supported.")
range_property = prop
if val is None:
raise errors.BadReaderParamsError(
"Range can't be None in filter %s", f)
if op in [">", ">="]:
if start_val is not None:
raise errors.BadReaderParamsError(
"Operation %s is specified more than once.", op)
start_val = val
start_filter = f
else:
if end_val is not None:
raise errors.BadReaderParamsError(
"Operation %s is specified more than once.", op)
end_val = val
end_filter = f
elif op != "=":
raise errors.BadReaderParamsError(
"Only < <= > >= = are supported as operation. Got %s", op)
if not range_property:
return None, None, None
if start_val is None or end_val is None:
raise errors.BadReaderParamsError(
"Filter should contains a complete range on property %s",
range_property)
if issubclass(model_class, db.Model):
property_obj = model_class.properties()[range_property]
else:
property_obj = (
model_class._properties[ # pylint: disable=protected-access
range_property])
supported_properties = (
_DISCRETE_PROPERTY_SPLIT_FUNCTIONS.keys() +
_CONTINUOUS_PROPERTY_SPLIT_FUNCTIONS.keys())
if not isinstance(property_obj, tuple(supported_properties)):
raise errors.BadReaderParamsError(
"Filtered property %s is not supported by sharding.", range_property)
if not start_val < end_val:
raise errors.BadReaderParamsError(
"Start value %s should be smaller than end value %s",
start_val, end_val)
return property_obj, start_filter, end_filter
def split(self, n):
"""Evenly split this range into contiguous, non overlapping subranges.
Args:
n: number of splits.
Returns:
a list of contiguous, non overlapping sub PropertyRanges. Maybe less than
n when not enough subranges.
"""
new_range_filters = []
name = self.start[0]
prop_cls = self.prop.__class__
if prop_cls in _DISCRETE_PROPERTY_SPLIT_FUNCTIONS:
splitpoints = _DISCRETE_PROPERTY_SPLIT_FUNCTIONS[prop_cls](
self.start[2], self.end[2], n,
self.start[1] == ">=", self.end[1] == "<=")
start_filter = (name, ">=", splitpoints[0])
for p in splitpoints[1:]:
end_filter = (name, "<", p)
new_range_filters.append([start_filter, end_filter])
start_filter = (name, ">=", p)
else:
splitpoints = _CONTINUOUS_PROPERTY_SPLIT_FUNCTIONS[prop_cls](
self.start[2], self.end[2], n)
start_filter = self.start
for p in splitpoints:
end_filter = (name, "<", p)
new_range_filters.append([start_filter, end_filter])
start_filter = (name, ">=", p)
new_range_filters.append([start_filter, self.end])
for f in new_range_filters:
f.extend(self._equality_filters)
return [self.__class__(f, self.model_class_path) for f in new_range_filters]
def make_query(self, ns):
"""Make a query of entities within this range.
Query options are not supported. They should be specified when the query
is run.
Args:
ns: namespace of this query.
Returns:
a db.Query or ndb.Query, depends on the model class's type.
"""
if issubclass(self.model_class, db.Model):
query = db.Query(self.model_class, namespace=ns)
for f in self.filters:
query.filter("%s %s" % (f[0], f[1]), f[2])
else:
query = self.model_class.query(namespace=ns)
for f in self.filters:
query = query.filter(ndb.FilterNode(*f))
return query
@property
def _equality_filters(self):
return [f for f in self.filters if f[1] == "="]
def to_json(self):
return {"filters": self.filters,
"model_class_path": self.model_class_path}
@classmethod
def from_json(cls, json):
return cls(json["filters"], json["model_class_path"])
def _split_datetime_property(start, end, n, include_start, include_end):
# datastore stored datetime precision is microsecond.
if not include_start:
start += datetime.timedelta(microseconds=1)
if include_end:
end += datetime.timedelta(microseconds=1)
delta = end - start
stride = delta // n
if stride <= datetime.timedelta():
raise ValueError("Range too small to split: start %r end %r", start, end)
splitpoints = [start]
previous = start
for _ in range(n-1):
point = previous + stride
if point == previous or point > end:
continue
previous = point
splitpoints.append(point)
if end not in splitpoints:
splitpoints.append(end)
return splitpoints
def _split_float_property(start, end, n):
delta = float(end - start)
stride = delta / n
if stride <= 0:
raise ValueError("Range too small to split: start %r end %r", start, end)
splitpoints = []
for i in range(1, n):
splitpoints.append(start + i * stride)
return splitpoints
def _split_integer_property(start, end, n, include_start, include_end):
if not include_start:
start += 1
if include_end:
end += 1
delta = float(end - start)
stride = delta / n
if stride <= 0:
raise ValueError("Range too small to split: start %r end %r", start, end)
splitpoints = [start]
previous = start
for i in range(1, n):
point = start + int(round(i * stride))
if point == previous or point > end:
continue
previous = point
splitpoints.append(point)
if end not in splitpoints:
splitpoints.append(end)
return splitpoints
def _split_string_property(start, end, n, include_start, include_end):
try:
start = start.encode("ascii")
end = end.encode("ascii")
except UnicodeEncodeError, e:
raise ValueError("Only ascii str is supported.", e)
return _split_byte_string_property(start, end, n, include_start, include_end)
# The alphabet splitting supports.
_ALPHABET = "".join(chr(i) for i in range(128))
# String length determines how many unique strings we can choose from.
# We can't split into more shards than this: len(_ALPHABET)^_STRING_LENGTH
_STRING_LENGTH = 4
def _split_byte_string_property(start, end, n, include_start, include_end):
# Get prefix, suffix, and the real start/end to split on.
i = 0
for i, (s, e) in enumerate(zip(start, end)):
if s != e:
break
common_prefix = start[:i]
start_suffix = start[i+_STRING_LENGTH:]
end_suffix = end[i+_STRING_LENGTH:]
start = start[i:i+_STRING_LENGTH]
end = end[i:i+_STRING_LENGTH]
# Convert str to ord.
weights = _get_weights(_STRING_LENGTH)
start_ord = _str_to_ord(start, weights)
if not include_start:
start_ord += 1
end_ord = _str_to_ord(end, weights)
if include_end:
end_ord += 1
# Do split.
stride = (end_ord - start_ord) / float(n)
if stride <= 0:
raise ValueError("Range too small to split: start %s end %s", start, end)
splitpoints = [_ord_to_str(start_ord, weights)]
previous = start_ord
for i in range(1, n):
point = start_ord + int(round(stride * i))
if point == previous or point > end_ord:
continue
previous = point
splitpoints.append(_ord_to_str(point, weights))
end_str = _ord_to_str(end_ord, weights)
if end_str not in splitpoints:
splitpoints.append(end_str)
# Append suffix.
splitpoints[0] += start_suffix
splitpoints[-1] += end_suffix
return [common_prefix + point for point in splitpoints]
def _get_weights(max_length):
"""Get weights for each offset in str of certain max length.
Args:
max_length: max length of the strings.
Returns:
A list of ints as weights.
Example:
If max_length is 2 and alphabet is "ab", then we have order "", "a", "aa",
"ab", "b", "ba", "bb". So the weight for the first char is 3.
"""
weights = [1]
for i in range(1, max_length):
weights.append(weights[i-1] * len(_ALPHABET) + 1)
weights.reverse()
return weights
def _str_to_ord(content, weights):
"""Converts a string to its lexicographical order.
Args:
content: the string to convert. Of type str.
weights: weights from _get_weights.
Returns:
an int or long that represents the order of this string. "" has order 0.
"""
ordinal = 0
for i, c in enumerate(content):
ordinal += weights[i] * _ALPHABET.index(c) + 1
return ordinal
def _ord_to_str(ordinal, weights):
"""Reverse function of _str_to_ord."""
chars = []
for weight in weights:
if ordinal == 0:
return "".join(chars)
ordinal -= 1
index, ordinal = divmod(ordinal, weight)
chars.append(_ALPHABET[index])
return "".join(chars)
# discrete property split functions all have the same interface.
# They take start, end, shard_number n, include_start, include_end.
# They return at most n+1 points, forming n ranges.
# Each range should be include_start, exclude_end.
_DISCRETE_PROPERTY_SPLIT_FUNCTIONS = {
db.DateTimeProperty: _split_datetime_property,
db.IntegerProperty: _split_integer_property,
db.StringProperty: _split_string_property,
db.ByteStringProperty: _split_byte_string_property,
# ndb.
ndb.DateTimeProperty: _split_datetime_property,
ndb.IntegerProperty: _split_integer_property,
ndb.StringProperty: _split_string_property,
ndb.BlobProperty: _split_byte_string_property
}
_CONTINUOUS_PROPERTY_SPLIT_FUNCTIONS = {
db.FloatProperty: _split_float_property,
# ndb.
ndb.FloatProperty: _split_float_property,
}
| apache-2.0 | 1,427,884,614,381,187,000 | 29.703431 | 80 | 0.648918 | false |
telerik/cloudbase-init | cloudbaseinit/tests/plugins/windows/userdataplugins/test_urldownload.py | 1 | 2120 | # Copyright 2013 Mirantis Inc.
# Copyright 2014 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import unittest
from cloudbaseinit.openstack.common import cfg
from cloudbaseinit.plugins.windows.userdataplugins import urldownload
CONF = cfg.CONF
class UrlDownloadHandlerTests(unittest.TestCase):
def setUp(self):
self._urldownload = urldownload.URLDownloadPlugin()
@mock.patch('cloudbaseinit.plugins.windows.userdatautils'
'.execute_user_data_script')
def _test_process(self, mock_execute_user_data_script, filename):
mock_part = mock.MagicMock()
mock_part.get_filename.return_value = filename
response = self._urldownload.process(mock_part)
mock_part.get_filename.assert_called_with()
if filename:
mock_execute_user_data_script.assert_called_with(
mock_part.get_payload())
self.assertEqual(response, mock_execute_user_data_script())
else:
self.assertTrue(response is None)
response = self._urldownload.process(mock_part)
mock_part.get_filename.assert_called_with()
if filename:
mock_execute_user_data_script.assert_called_with(
mock_part.get_payload())
self.assertEqual(response, mock_execute_user_data_script())
else:
self.assertTrue(response is None)
def test_process(self):
self._test_process(filename='cfn-userdata')
def test_process_content_not_supported(self):
self._test_process(filename=None)
| apache-2.0 | 9,086,089,707,488,034,000 | 34.333333 | 78 | 0.684906 | false |
thepiper/standoff | venv/lib/python2.7/site-packages/werkzeug/contrib/profiler.py | 362 | 5151 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.profiler
~~~~~~~~~~~~~~~~~~~~~~~~~
This module provides a simple WSGI profiler middleware for finding
bottlenecks in web application. It uses the :mod:`profile` or
:mod:`cProfile` module to do the profiling and writes the stats to the
stream provided (defaults to stderr).
Example usage::
from werkzeug.contrib.profiler import ProfilerMiddleware
app = ProfilerMiddleware(app)
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import sys
import time
import os.path
try:
try:
from cProfile import Profile
except ImportError:
from profile import Profile
from pstats import Stats
available = True
except ImportError:
available = False
class MergeStream(object):
"""An object that redirects `write` calls to multiple streams.
Use this to log to both `sys.stdout` and a file::
f = open('profiler.log', 'w')
stream = MergeStream(sys.stdout, f)
profiler = ProfilerMiddleware(app, stream)
"""
def __init__(self, *streams):
if not streams:
raise TypeError('at least one stream must be given')
self.streams = streams
def write(self, data):
for stream in self.streams:
stream.write(data)
class ProfilerMiddleware(object):
"""Simple profiler middleware. Wraps a WSGI application and profiles
a request. This intentionally buffers the response so that timings are
more exact.
By giving the `profile_dir` argument, pstat.Stats files are saved to that
directory, one file per request. Without it, a summary is printed to
`stream` instead.
For the exact meaning of `sort_by` and `restrictions` consult the
:mod:`profile` documentation.
.. versionadded:: 0.9
Added support for `restrictions` and `profile_dir`.
:param app: the WSGI application to profile.
:param stream: the stream for the profiled stats. defaults to stderr.
:param sort_by: a tuple of columns to sort the result by.
:param restrictions: a tuple of profiling strictions, not used if dumping
to `profile_dir`.
:param profile_dir: directory name to save pstat files
"""
def __init__(self, app, stream=None,
sort_by=('time', 'calls'), restrictions=(), profile_dir=None):
if not available:
raise RuntimeError('the profiler is not available because '
'profile or pstat is not installed.')
self._app = app
self._stream = stream or sys.stdout
self._sort_by = sort_by
self._restrictions = restrictions
self._profile_dir = profile_dir
def __call__(self, environ, start_response):
response_body = []
def catching_start_response(status, headers, exc_info=None):
start_response(status, headers, exc_info)
return response_body.append
def runapp():
appiter = self._app(environ, catching_start_response)
response_body.extend(appiter)
if hasattr(appiter, 'close'):
appiter.close()
p = Profile()
start = time.time()
p.runcall(runapp)
body = b''.join(response_body)
elapsed = time.time() - start
if self._profile_dir is not None:
prof_filename = os.path.join(self._profile_dir,
'%s.%s.%06dms.%d.prof' % (
environ['REQUEST_METHOD'],
environ.get('PATH_INFO').strip(
'/').replace('/', '.') or 'root',
elapsed * 1000.0,
time.time()
))
p.dump_stats(prof_filename)
else:
stats = Stats(p, stream=self._stream)
stats.sort_stats(*self._sort_by)
self._stream.write('-' * 80)
self._stream.write('\nPATH: %r\n' % environ.get('PATH_INFO'))
stats.print_stats(*self._restrictions)
self._stream.write('-' * 80 + '\n\n')
return [body]
def make_action(app_factory, hostname='localhost', port=5000,
threaded=False, processes=1, stream=None,
sort_by=('time', 'calls'), restrictions=()):
"""Return a new callback for :mod:`werkzeug.script` that starts a local
server with the profiler enabled.
::
from werkzeug.contrib import profiler
action_profile = profiler.make_action(make_app)
"""
def action(hostname=('h', hostname), port=('p', port),
threaded=threaded, processes=processes):
"""Start a new development server."""
from werkzeug.serving import run_simple
app = ProfilerMiddleware(app_factory(), stream, sort_by, restrictions)
run_simple(hostname, port, app, False, None, threaded, processes)
return action
| gpl-3.0 | 7,953,734,890,017,996,000 | 34.040816 | 82 | 0.580858 | false |
bcb/qutebrowser | tests/unit/misc/test_split.py | 1 | 6937 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2016 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Tests for qutebrowser.misc.split."""
import collections
import pytest
from qutebrowser.misc import split
# Most tests copied from Python's shlex.
# The original test data set was from shellwords, by Hartmut Goebel.
# Format: input/split|output|without|keep/split|output|with|keep/
test_data_str = r"""
one two/one|two/one| two/
one "two three" four/one|two three|four/one| "two three"| four/
one 'two three' four/one|two three|four/one| 'two three'| four/
one "two\" three" four/one|two" three|four/one| "two\" three"| four/
one 'two'\'' three' four/one|two' three|four/one| 'two'\'' three'| four/
one "two three/one|two three/one| "two three/
one 'two three/one|two three/one| 'two three/
one\/one\/one\/
one "two\/one|two\/one| "two\/
one /one/one| /
open -t i/open|-t|i/open| -t| i/
foo bar/foo|bar/foo| bar/
foo bar/foo|bar/ foo| bar/
foo bar /foo|bar/ foo| bar| /
foo bar bla fasel/foo|bar|bla|fasel/foo| bar| bla| fasel/
x y z xxxx/x|y|z|xxxx/x| y| z| xxxx/
\x bar/x|bar/\x| bar/
\ x bar/ x|bar/\ x| bar/
\ bar/ bar/\ bar/
foo \x bar/foo|x|bar/foo| \x| bar/
foo \ x bar/foo| x|bar/foo| \ x| bar/
foo \ bar/foo| bar/foo| \ bar/
foo "bar" bla/foo|bar|bla/foo| "bar"| bla/
"foo" "bar" "bla"/foo|bar|bla/"foo"| "bar"| "bla"/
"foo" bar "bla"/foo|bar|bla/"foo"| bar| "bla"/
"foo" bar bla/foo|bar|bla/"foo"| bar| bla/
foo 'bar' bla/foo|bar|bla/foo| 'bar'| bla/
'foo' 'bar' 'bla'/foo|bar|bla/'foo'| 'bar'| 'bla'/
'foo' bar 'bla'/foo|bar|bla/'foo'| bar| 'bla'/
'foo' bar bla/foo|bar|bla/'foo'| bar| bla/
blurb foo"bar"bar"fasel" baz/blurb|foobarbarfasel|baz/blurb| foo"bar"bar"fasel"| baz/
blurb foo'bar'bar'fasel' baz/blurb|foobarbarfasel|baz/blurb| foo'bar'bar'fasel'| baz/
""//""/
''//''/
foo "" bar/foo||bar/foo| ""| bar/
foo '' bar/foo||bar/foo| ''| bar/
foo "" "" "" bar/foo||||bar/foo| ""| ""| ""| bar/
foo '' '' '' bar/foo||||bar/foo| ''| ''| ''| bar/
\"/"/\"/
"\""/"/"\""/
"foo\ bar"/foo\ bar/"foo\ bar"/
"foo\\ bar"/foo\ bar/"foo\\ bar"/
"foo\\ bar\""/foo\ bar"/"foo\\ bar\""/
"foo\\" bar\"/foo\|bar"/"foo\\"| bar\"/
"foo\\ bar\" dfadf"/foo\ bar" dfadf/"foo\\ bar\" dfadf"/
"foo\\\ bar\" dfadf"/foo\\ bar" dfadf/"foo\\\ bar\" dfadf"/
"foo\\\x bar\" dfadf"/foo\\x bar" dfadf/"foo\\\x bar\" dfadf"/
"foo\x bar\" dfadf"/foo\x bar" dfadf/"foo\x bar\" dfadf"/
\'/'/\'/
'foo\ bar'/foo\ bar/'foo\ bar'/
'foo\\ bar'/foo\\ bar/'foo\\ bar'/
"foo\\\x bar\" df'a\ 'df"/foo\\x bar" df'a\ 'df/"foo\\\x bar\" df'a\ 'df"/
\"foo/"foo/\"foo/
\"foo\x/"foox/\"foo\x/
"foo\x"/foo\x/"foo\x"/
"foo\ "/foo\ /"foo\ "/
foo\ xx/foo xx/foo\ xx/
foo\ x\x/foo xx/foo\ x\x/
foo\ x\x\"/foo xx"/foo\ x\x\"/
"foo\ x\x"/foo\ x\x/"foo\ x\x"/
"foo\ x\x\\"/foo\ x\x\/"foo\ x\x\\"/
"foo\ x\x\\""foobar"/foo\ x\x\foobar/"foo\ x\x\\""foobar"/
"foo\ x\x\\"\'"foobar"/foo\ x\x\'foobar/"foo\ x\x\\"\'"foobar"/
"foo\ x\x\\"\'"fo'obar"/foo\ x\x\'fo'obar/"foo\ x\x\\"\'"fo'obar"/
"foo\ x\x\\"\'"fo'obar" 'don'\''t'/foo\ x\x\'fo'obar|don't/"foo\ x\x\\"\'"fo'obar"| 'don'\''t'/
"foo\ x\x\\"\'"fo'obar" 'don'\''t' \\/foo\ x\x\'fo'obar|don't|\/"foo\ x\x\\"\'"fo'obar"| 'don'\''t'| \\/
foo\ bar/foo bar/foo\ bar/
:-) ;-)/:-)|;-)/:-)| ;-)/
áéíóú/áéíóú/áéíóú/
"""
def _parse_split_test_data_str():
"""
Parse the test data set into a namedtuple to use in tests.
Returns:
A list of namedtuples with str attributes: input, keep, no_keep
"""
tuple_class = collections.namedtuple('TestCase', 'input, keep, no_keep')
for line in test_data_str.splitlines():
if not line:
continue
data = line.split('/')
item = tuple_class(input=data[0], keep=data[1].split('|'),
no_keep=data[2].split('|'))
yield item
yield tuple_class(input='', keep=[], no_keep=[])
class TestSplit:
"""Test split."""
@pytest.fixture(params=_parse_split_test_data_str(), ids=lambda e: e.input)
def split_test_case(self, request):
"""Fixture to automatically parametrize all depending tests.
It will use the test data from test_data_str, parsed using
_parse_split_test_data_str().
"""
return request.param
def test_split(self, split_test_case):
"""Test splitting."""
items = split.split(split_test_case.input)
assert items == split_test_case.keep
def test_split_keep_original(self, split_test_case):
"""Test if splitting with keep=True yields the original string."""
items = split.split(split_test_case.input, keep=True)
assert ''.join(items) == split_test_case.input
def test_split_keep(self, split_test_case):
"""Test splitting with keep=True."""
items = split.split(split_test_case.input, keep=True)
assert items == split_test_case.no_keep
class TestSimpleSplit:
"""Test simple_split."""
TESTS = {
' foo bar': [' foo', ' bar'],
'foobar': ['foobar'],
' foo bar baz ': [' foo', ' bar', ' baz', ' '],
'f\ti\ts\th': ['f', '\ti', '\ts', '\th'],
'foo\nbar': ['foo', '\nbar'],
}
@pytest.mark.parametrize('test', TESTS, ids=repr)
def test_str_split(self, test):
"""Test if the behavior matches str.split."""
assert split.simple_split(test) == test.rstrip().split()
@pytest.mark.parametrize('s, maxsplit',
[("foo bar baz", 1), (" foo bar baz ", 0)],
ids=repr)
def test_str_split_maxsplit(self, s, maxsplit):
"""Test if the behavior matches str.split with given maxsplit."""
actual = split.simple_split(s, maxsplit=maxsplit)
expected = s.rstrip().split(maxsplit=maxsplit)
assert actual == expected
@pytest.mark.parametrize('test, expected', TESTS.items(), ids=repr)
def test_split_keep(self, test, expected):
"""Test splitting with keep=True."""
assert split.simple_split(test, keep=True) == expected
def test_maxsplit_0_keep(self):
"""Test special case with maxsplit=0 and keep=True."""
s = "foo bar"
assert split.simple_split(s, keep=True, maxsplit=0) == [s]
| gpl-3.0 | 6,512,470,582,566,713,000 | 35.819149 | 104 | 0.595926 | false |
repotvsupertuga/tvsupertuga.repository | instal/script.module.resolveurl/lib/resolveurl/plugins/prostream.py | 2 | 1310 | """
Plugin for ResolveUrl
Copyright (C) 2020 gujal
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from __resolve_generic__ import ResolveGeneric
from lib import helpers
class ProStreamResolver(ResolveGeneric):
name = "prostream.to"
domains = ['prostream.to']
pattern = r'(?://|\.)(prostream\.to)/(?:embed-)?([0-9a-zA-Z]+)'
def get_media_url(self, host, media_id):
return helpers.get_media_url(self.get_url(host, media_id),
patterns=[r'''sources:\s*\["\s*(?P<url>[^"]+)'''],
generic_patterns=False)
def get_url(self, host, media_id):
return self._default_get_url(host, media_id, template='https://{host}/embed-{media_id}.html')
| gpl-2.0 | 4,390,770,347,793,681,400 | 37.529412 | 101 | 0.677863 | false |
ppwwyyxx/tensorflow | tensorflow/lite/testing/op_tests/unpack.py | 3 | 2091 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for unpack."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_unpack_tests(options):
"""Make a set of tests to do unpack."""
test_parameters = [{
"base_shape": [[3, 4, 3], [3, 4], [5, 6, 7, 8]],
"axis": [0, 1, 2, 3],
}]
def get_valid_axis(parameters):
"""Return a tweaked version of 'axis'."""
axis = parameters["axis"]
shape = parameters["base_shape"][:]
while axis > len(shape) - 1:
axis -= 1
return axis
def build_graph(parameters):
input_tensor = tf.compat.v1.placeholder(
dtype=tf.float32, name=("input"), shape=parameters["base_shape"])
outs = tf.unstack(input_tensor, axis=get_valid_axis(parameters))
return [input_tensor], [outs[0]]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(np.float32, shape=parameters["base_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
| apache-2.0 | -8,705,402,902,339,652,000 | 37.018182 | 80 | 0.678623 | false |
google-code-export/tvstreamrecord | cherrypy/scaffold/__init__.py | 80 | 1859 | """<MyProject>, a CherryPy application.
Use this as a base for creating new CherryPy applications. When you want
to make a new app, copy and paste this folder to some other location
(maybe site-packages) and rename it to the name of your project,
then tweak as desired.
Even before any tweaking, this should serve a few demonstration pages.
Change to this directory and run:
../cherryd -c site.conf
"""
import cherrypy
from cherrypy import tools, url
import os
local_dir = os.path.join(os.getcwd(), os.path.dirname(__file__))
class Root:
_cp_config = {'tools.log_tracebacks.on': True,
}
def index(self):
return """<html>
<body>Try some <a href='%s?a=7'>other</a> path,
or a <a href='%s?n=14'>default</a> path.<br />
Or, just look at the pretty picture:<br />
<img src='%s' />
</body></html>""" % (url("other"), url("else"),
url("files/made_with_cherrypy_small.png"))
index.exposed = True
def default(self, *args, **kwargs):
return "args: %s kwargs: %s" % (args, kwargs)
default.exposed = True
def other(self, a=2, b='bananas', c=None):
cherrypy.response.headers['Content-Type'] = 'text/plain'
if c is None:
return "Have %d %s." % (int(a), b)
else:
return "Have %d %s, %s." % (int(a), b, c)
other.exposed = True
files = cherrypy.tools.staticdir.handler(
section="/files",
dir=os.path.join(local_dir, "static"),
# Ignore .php files, etc.
match=r'\.(css|gif|html?|ico|jpe?g|js|png|swf|xml)$',
)
root = Root()
# Uncomment the following to use your own favicon instead of CP's default.
#favicon_path = os.path.join(local_dir, "favicon.ico")
#root.favicon_ico = tools.staticfile.handler(filename=favicon_path)
| gpl-3.0 | -2,239,561,753,261,844,500 | 29.47541 | 74 | 0.601399 | false |
tboyce021/home-assistant | homeassistant/components/mcp23017/switch.py | 8 | 2770 | """Support for switch sensor using I2C MCP23017 chip."""
from adafruit_mcp230xx.mcp23017 import MCP23017 # pylint: disable=import-error
import board # pylint: disable=import-error
import busio # pylint: disable=import-error
import digitalio # pylint: disable=import-error
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA
from homeassistant.const import DEVICE_DEFAULT_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import ToggleEntity
CONF_INVERT_LOGIC = "invert_logic"
CONF_I2C_ADDRESS = "i2c_address"
CONF_PINS = "pins"
CONF_PULL_MODE = "pull_mode"
DEFAULT_INVERT_LOGIC = False
DEFAULT_I2C_ADDRESS = 0x20
_SWITCHES_SCHEMA = vol.Schema({cv.positive_int: cv.string})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_PINS): _SWITCHES_SCHEMA,
vol.Optional(CONF_INVERT_LOGIC, default=DEFAULT_INVERT_LOGIC): cv.boolean,
vol.Optional(CONF_I2C_ADDRESS, default=DEFAULT_I2C_ADDRESS): vol.Coerce(int),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the MCP23017 devices."""
invert_logic = config.get(CONF_INVERT_LOGIC)
i2c_address = config.get(CONF_I2C_ADDRESS)
i2c = busio.I2C(board.SCL, board.SDA)
mcp = MCP23017(i2c, address=i2c_address)
switches = []
pins = config.get(CONF_PINS)
for pin_num, pin_name in pins.items():
pin = mcp.get_pin(pin_num)
switches.append(MCP23017Switch(pin_name, pin, invert_logic))
add_entities(switches)
class MCP23017Switch(ToggleEntity):
"""Representation of a MCP23017 output pin."""
def __init__(self, name, pin, invert_logic):
"""Initialize the pin."""
self._name = name or DEVICE_DEFAULT_NAME
self._pin = pin
self._invert_logic = invert_logic
self._state = False
self._pin.direction = digitalio.Direction.OUTPUT
self._pin.value = self._invert_logic
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def is_on(self):
"""Return true if device is on."""
return self._state
@property
def assumed_state(self):
"""Return true if optimistic updates are used."""
return True
def turn_on(self, **kwargs):
"""Turn the device on."""
self._pin.value = not self._invert_logic
self._state = True
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Turn the device off."""
self._pin.value = self._invert_logic
self._state = False
self.schedule_update_ha_state()
| apache-2.0 | -4,846,330,605,321,315,000 | 29.43956 | 85 | 0.661372 | false |
cryptica/slapnet | benchmarks/scalable/LeaderElectionCR79/make_net.py | 1 | 2184 | #!/usr/bin/python3
import sys
import random
def make_net(n,order):
def previous(i):
return (((i-2) % n) + 1)
print('petri net "leader election %i" {' % n)
print(' places {')
for i in range(1,n+1):
print(' ', end='')
for j in range(1,n+1):
print('s%in%i ' % (i,j), end='')
print()
print(' ', end='')
for j in range(1,n+1):
print('s%im%i ' % (i,j), end='')
print()
print()
print(' lead')
print(' }')
print(' transitions {')
for i in range(1,n+1):
print(' ', end='')
for j in range(1,n+1):
print('s%isend%i ' % (i,j), end='')
print()
print(' ', end='')
for j in range(1,n+1):
if j < i:
print('s%idisc%i ' % (i,j), end='')
elif i == j:
print('s%iacpt%i ' % (i,j), end='')
else:
print('s%ipass%i ' % (i,j), end='')
print()
print()
print(' newleader')
print(' }')
print(' arcs {')
for i in range(1,n+1):
for j in range(1,n+1):
print(' s%in%i -> s%isend%i -> s%im%i' % (i,j,i,j,i,j))
print()
for j in range(1,n+1):
print(' s%im%i -> ' % (previous(i),j), end='')
if j < i:
print('s%idisc%i ' % (i,j))
elif i == j:
print('s%iacpt%i -> lead' % (i,j))
else:
print('s%ipass%i -> s%im%i' % (i,j,i,j))
print()
print()
print(' lead -> newleader -> { ', end='')
for i in range(1,n+1):
print('s%in%i ' % (i,order[i-1]), end='')
print('}')
print(' }')
print(' initial { ', end='')
for i in range(1,n+1):
print('s%in%i ' % (i,order[i-1]), end='')
print('}')
print('}')
#print('safety property {')
#print(' lead >= 2')
#print('}')
print('liveness property {')
print(' newleader = 0')
print('}')
n = int(sys.argv[1])
o = sys.argv[2]
order = list(range(1,n+1))
if o == 'rand':
random.shuffle(order)
elif o == 'rev':
order.reverse()
make_net(n,order)
| gpl-3.0 | -3,174,904,058,223,468,500 | 25.962963 | 70 | 0.408425 | false |
Distrotech/intellij-community | python/lib/Lib/site-packages/django/contrib/gis/db/models/manager.py | 505 | 3578 | from django.db.models.manager import Manager
from django.contrib.gis.db.models.query import GeoQuerySet
class GeoManager(Manager):
"Overrides Manager to return Geographic QuerySets."
# This manager should be used for queries on related fields
# so that geometry columns on Oracle and MySQL are selected
# properly.
use_for_related_fields = True
def get_query_set(self):
return GeoQuerySet(self.model, using=self._db)
def area(self, *args, **kwargs):
return self.get_query_set().area(*args, **kwargs)
def centroid(self, *args, **kwargs):
return self.get_query_set().centroid(*args, **kwargs)
def collect(self, *args, **kwargs):
return self.get_query_set().collect(*args, **kwargs)
def difference(self, *args, **kwargs):
return self.get_query_set().difference(*args, **kwargs)
def distance(self, *args, **kwargs):
return self.get_query_set().distance(*args, **kwargs)
def envelope(self, *args, **kwargs):
return self.get_query_set().envelope(*args, **kwargs)
def extent(self, *args, **kwargs):
return self.get_query_set().extent(*args, **kwargs)
def extent3d(self, *args, **kwargs):
return self.get_query_set().extent3d(*args, **kwargs)
def force_rhr(self, *args, **kwargs):
return self.get_query_set().force_rhr(*args, **kwargs)
def geohash(self, *args, **kwargs):
return self.get_query_set().geohash(*args, **kwargs)
def geojson(self, *args, **kwargs):
return self.get_query_set().geojson(*args, **kwargs)
def gml(self, *args, **kwargs):
return self.get_query_set().gml(*args, **kwargs)
def intersection(self, *args, **kwargs):
return self.get_query_set().intersection(*args, **kwargs)
def kml(self, *args, **kwargs):
return self.get_query_set().kml(*args, **kwargs)
def length(self, *args, **kwargs):
return self.get_query_set().length(*args, **kwargs)
def make_line(self, *args, **kwargs):
return self.get_query_set().make_line(*args, **kwargs)
def mem_size(self, *args, **kwargs):
return self.get_query_set().mem_size(*args, **kwargs)
def num_geom(self, *args, **kwargs):
return self.get_query_set().num_geom(*args, **kwargs)
def num_points(self, *args, **kwargs):
return self.get_query_set().num_points(*args, **kwargs)
def perimeter(self, *args, **kwargs):
return self.get_query_set().perimeter(*args, **kwargs)
def point_on_surface(self, *args, **kwargs):
return self.get_query_set().point_on_surface(*args, **kwargs)
def reverse_geom(self, *args, **kwargs):
return self.get_query_set().reverse_geom(*args, **kwargs)
def scale(self, *args, **kwargs):
return self.get_query_set().scale(*args, **kwargs)
def snap_to_grid(self, *args, **kwargs):
return self.get_query_set().snap_to_grid(*args, **kwargs)
def svg(self, *args, **kwargs):
return self.get_query_set().svg(*args, **kwargs)
def sym_difference(self, *args, **kwargs):
return self.get_query_set().sym_difference(*args, **kwargs)
def transform(self, *args, **kwargs):
return self.get_query_set().transform(*args, **kwargs)
def translate(self, *args, **kwargs):
return self.get_query_set().translate(*args, **kwargs)
def union(self, *args, **kwargs):
return self.get_query_set().union(*args, **kwargs)
def unionagg(self, *args, **kwargs):
return self.get_query_set().unionagg(*args, **kwargs)
| apache-2.0 | -7,499,098,740,079,269,000 | 33.737864 | 69 | 0.625769 | false |
xinwu/horizon | openstack_dashboard/dashboards/project/networks/views.py | 43 | 5560 | # Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing Neutron Networks.
"""
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tables
from horizon.utils import memoized
from horizon import workflows
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.networks \
import forms as project_forms
from openstack_dashboard.dashboards.project.networks.ports \
import tables as port_tables
from openstack_dashboard.dashboards.project.networks.subnets \
import tables as subnet_tables
from openstack_dashboard.dashboards.project.networks \
import tables as project_tables
from openstack_dashboard.dashboards.project.networks \
import workflows as project_workflows
class IndexView(tables.DataTableView):
table_class = project_tables.NetworksTable
template_name = 'project/networks/index.html'
page_title = _("Networks")
def get_data(self):
try:
tenant_id = self.request.user.tenant_id
networks = api.neutron.network_list_for_tenant(self.request,
tenant_id)
except Exception:
networks = []
msg = _('Network list can not be retrieved.')
exceptions.handle(self.request, msg)
return networks
class CreateView(workflows.WorkflowView):
workflow_class = project_workflows.CreateNetwork
ajax_template_name = 'project/networks/create.html'
class UpdateView(forms.ModalFormView):
context_object_name = 'network'
form_class = project_forms.UpdateNetwork
form_id = "update_network_form"
modal_header = _("Edit Network")
submit_label = _("Save Changes")
submit_url = "horizon:project:networks:update"
success_url = reverse_lazy("horizon:project:networks:index")
template_name = 'project/networks/update.html'
page_title = _("Update Network")
def get_context_data(self, **kwargs):
context = super(UpdateView, self).get_context_data(**kwargs)
args = (self.kwargs['network_id'],)
context["network_id"] = self.kwargs['network_id']
context["submit_url"] = reverse(self.submit_url, args=args)
return context
@memoized.memoized_method
def _get_object(self, *args, **kwargs):
network_id = self.kwargs['network_id']
try:
return api.neutron.network_get(self.request, network_id)
except Exception:
redirect = self.success_url
msg = _('Unable to retrieve network details.')
exceptions.handle(self.request, msg, redirect=redirect)
def get_initial(self):
network = self._get_object()
return {'network_id': network['id'],
'tenant_id': network['tenant_id'],
'name': network['name'],
'admin_state': network['admin_state_up']}
class DetailView(tables.MultiTableView):
table_classes = (subnet_tables.SubnetsTable, port_tables.PortsTable)
template_name = 'project/networks/detail.html'
page_title = _("Network Details: {{ network.name }}")
def get_subnets_data(self):
try:
network = self._get_data()
subnets = api.neutron.subnet_list(self.request,
network_id=network.id)
except Exception:
subnets = []
msg = _('Subnet list can not be retrieved.')
exceptions.handle(self.request, msg)
return subnets
def get_ports_data(self):
try:
network_id = self.kwargs['network_id']
ports = api.neutron.port_list(self.request, network_id=network_id)
except Exception:
ports = []
msg = _('Port list can not be retrieved.')
exceptions.handle(self.request, msg)
return ports
@memoized.memoized_method
def _get_data(self):
try:
network_id = self.kwargs['network_id']
network = api.neutron.network_get(self.request, network_id)
network.set_id_as_name_if_empty(length=0)
except Exception:
msg = _('Unable to retrieve details for network "%s".') \
% (network_id)
exceptions.handle(self.request, msg,
redirect=self.get_redirect_url())
return network
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
network = self._get_data()
context["network"] = network
table = project_tables.NetworksTable(self.request)
context["url"] = self.get_redirect_url()
context["actions"] = table.render_row_actions(network)
return context
@staticmethod
def get_redirect_url():
return reverse_lazy('horizon:project:networks:index')
| apache-2.0 | 8,858,201,523,746,321,000 | 36.066667 | 78 | 0.643525 | false |
kmackenzieii/marauders-map | capture/make_fingerprint.py | 1 | 1582 | import re
import os
import kirk
import numpy as n
import pickle
def trimmean(arr, percent):
n = len(arr)
k = int(round(n*(float(percent)/100)/2))
return n.mean(arr[k+1:n-k])
File = kirk.File
width = kirk.width
height = kirk.height
box_size = kirk.box_size
#Dictionary data structure to hold out parsed data
#For each MAC address there is a multidimensional array of size [x][y]
#In each of those arrays is a list of RSSI values found at that location
rssi = {}
#Loop through every file in our data directory and extract data into rssi
for filename in os.listdir('./fingerprint'):
data = re.split('_',filename)
x = int(data[0])
y = int(data[1])
f = open('./fingerprint/'+filename)
for line in f:
read = line.split()
if len(read)==3 and read[0] == read[1]:
mac = read[0]
if read[2] != '':
strength = int(read[2].strip())
if mac in rssi:
rssi[mac][x][y].append(strength)
else:
if mac != "48:5a:3f:45:21:0f": #Filter out my cellphone
arr = [[[] for _ in range(kirk.x)] for _ in range(kirk.y)]
rssi.update({mac:arr})
rssi[mac][x][y].append(strength)
#Now that we have the data, calculate averages for each location
fingerprint = {}
for mac in rssi:
avg = [[None for _ in range(kirk.x)] for _ in range(kirk.y)]
for x in range(len(rssi[mac])):
for y in range(len(rssi[mac][x])):
l = rssi[mac][x][y]
if len(l) > 0:
avg[x][y] = n.mean(l)
#avg[x][y] = trimmean(l, 80)
fingerprint.update({mac:avg})
finger_file = open(r'fingerprint.pkl', 'wb')
pickle.dump(fingerprint, finger_file)
finger_file.close()
| mit | 3,963,946,510,713,891,300 | 27.763636 | 73 | 0.64665 | false |
cwu2011/seaborn | seaborn/timeseries.py | 6 | 13239 | """Timeseries plotting functions."""
from __future__ import division
import numpy as np
import pandas as pd
from scipy import stats, interpolate
import matplotlib as mpl
import matplotlib.pyplot as plt
from .external.six import string_types
from . import utils
from . import algorithms as algo
from .palettes import color_palette
def tsplot(data, time=None, unit=None, condition=None, value=None,
err_style="ci_band", ci=68, interpolate=True, color=None,
estimator=np.mean, n_boot=5000, err_palette=None, err_kws=None,
legend=True, ax=None, **kwargs):
"""Plot one or more timeseries with flexible representation of uncertainty.
This function can take data specified either as a long-form (tidy)
DataFrame or as an ndarray with dimensions for sampling unit, time, and
(optionally) condition. The interpretation of some of the other parameters
changes depending on the type of object passed as data.
Parameters
----------
data : DataFrame or ndarray
Data for the plot. Should either be a "long form" dataframe or an
array with dimensions (unit, time, condition). In both cases, the
condition field/dimension is optional. The type of this argument
determines the interpretation of the next few parameters.
time : string or series-like
Either the name of the field corresponding to time in the data
DataFrame or x values for a plot when data is an array. If a Series,
the name will be used to label the x axis.
unit : string
Field in the data DataFrame identifying the sampling unit (e.g.
subject, neuron, etc.). The error representation will collapse over
units at each time/condition observation. This has no role when data
is an array.
value : string
Either the name of the field corresponding to the data values in
the data DataFrame (i.e. the y coordinate) or a string that forms
the y axis label when data is an array.
condition : string or Series-like
Either the name of the field identifying the condition an observation
falls under in the data DataFrame, or a sequence of names with a length
equal to the size of the third dimension of data. There will be a
separate trace plotted for each condition. If condition is a Series
with a name attribute, the name will form the title for the plot
legend (unless legend is set to False).
err_style : string or list of strings or None
Names of ways to plot uncertainty across units from set of
{ci_band, ci_bars, boot_traces, boot_kde, unit_traces, unit_points}.
Can use one or more than one method.
ci : float or list of floats in [0, 100]
Confidence interaval size(s). If a list, it will stack the error
plots for each confidence interval. Only relevant for error styles
with "ci" in the name.
interpolate : boolean
Whether to do a linear interpolation between each timepoint when
plotting. The value of this parameter also determines the marker
used for the main plot traces, unless marker is specified as a keyword
argument.
color : seaborn palette or matplotlib color name or dictionary
Palette or color for the main plots and error representation (unless
plotting by unit, which can be separately controlled with err_palette).
If a dictionary, should map condition name to color spec.
estimator : callable
Function to determine central tendency and to pass to bootstrap
must take an ``axis`` argument.
n_boot : int
Number of bootstrap iterations.
err_palette: seaborn palette
Palette name or list of colors used when plotting data for each unit.
err_kws : dict, optional
Keyword argument dictionary passed through to matplotlib function
generating the error plot,
ax : axis object, optional
Plot in given axis; if None creates a new figure
kwargs :
Other keyword arguments are passed to main plot() call
Returns
-------
ax : matplotlib axis
axis with plot data
"""
# Sort out default values for the parameters
if ax is None:
ax = plt.gca()
if err_kws is None:
err_kws = {}
# Handle different types of input data
if isinstance(data, pd.DataFrame):
xlabel = time
ylabel = value
# Condition is optional
if condition is None:
condition = pd.Series(np.ones(len(data)))
legend = False
legend_name = None
n_cond = 1
else:
legend = True and legend
legend_name = condition
n_cond = len(data[condition].unique())
else:
data = np.asarray(data)
# Data can be a timecourse from a single unit or
# several observations in one condition
if data.ndim == 1:
data = data[np.newaxis, :, np.newaxis]
elif data.ndim == 2:
data = data[:, :, np.newaxis]
n_unit, n_time, n_cond = data.shape
# Units are experimental observations. Maybe subjects, or neurons
if unit is None:
units = np.arange(n_unit)
unit = "unit"
units = np.repeat(units, n_time * n_cond)
ylabel = None
# Time forms the xaxis of the plot
if time is None:
times = np.arange(n_time)
else:
times = np.asarray(time)
xlabel = None
if hasattr(time, "name"):
xlabel = time.name
time = "time"
times = np.tile(np.repeat(times, n_cond), n_unit)
# Conditions split the timeseries plots
if condition is None:
conds = range(n_cond)
legend = False
if isinstance(color, dict):
err = "Must have condition names if using color dict."
raise ValueError(err)
else:
conds = np.asarray(condition)
legend = True and legend
if hasattr(condition, "name"):
legend_name = condition.name
else:
legend_name = None
condition = "cond"
conds = np.tile(conds, n_unit * n_time)
# Value forms the y value in the plot
if value is None:
ylabel = None
else:
ylabel = value
value = "value"
# Convert to long-form DataFrame
data = pd.DataFrame(dict(value=data.ravel(),
time=times,
unit=units,
cond=conds))
# Set up the err_style and ci arguments for the loop below
if isinstance(err_style, string_types):
err_style = [err_style]
elif err_style is None:
err_style = []
if not hasattr(ci, "__iter__"):
ci = [ci]
# Set up the color palette
if color is None:
current_palette = mpl.rcParams["axes.color_cycle"]
if len(current_palette) < n_cond:
colors = color_palette("husl", n_cond)
else:
colors = color_palette(n_colors=n_cond)
elif isinstance(color, dict):
colors = [color[c] for c in data[condition].unique()]
else:
try:
colors = color_palette(color, n_cond)
except ValueError:
color = mpl.colors.colorConverter.to_rgb(color)
colors = [color] * n_cond
# Do a groupby with condition and plot each trace
for c, (cond, df_c) in enumerate(data.groupby(condition, sort=False)):
df_c = df_c.pivot(unit, time, value)
x = df_c.columns.values.astype(np.float)
# Bootstrap the data for confidence intervals
boot_data = algo.bootstrap(df_c.values, n_boot=n_boot,
axis=0, func=estimator)
cis = [utils.ci(boot_data, v, axis=0) for v in ci]
central_data = estimator(df_c.values, axis=0)
# Get the color for this condition
color = colors[c]
# Use subroutines to plot the uncertainty
for style in err_style:
# Allow for null style (only plot central tendency)
if style is None:
continue
# Grab the function from the global environment
try:
plot_func = globals()["_plot_%s" % style]
except KeyError:
raise ValueError("%s is not a valid err_style" % style)
# Possibly set up to plot each observation in a different color
if err_palette is not None and "unit" in style:
orig_color = color
color = color_palette(err_palette, len(df_c.values))
# Pass all parameters to the error plotter as keyword args
plot_kwargs = dict(ax=ax, x=x, data=df_c.values,
boot_data=boot_data,
central_data=central_data,
color=color, err_kws=err_kws)
# Plot the error representation, possibly for multiple cis
for ci_i in cis:
plot_kwargs["ci"] = ci_i
plot_func(**plot_kwargs)
if err_palette is not None and "unit" in style:
color = orig_color
# Plot the central trace
kwargs.setdefault("marker", "" if interpolate else "o")
ls = kwargs.pop("ls", "-" if interpolate else "")
kwargs.setdefault("linestyle", ls)
label = cond if legend else "_nolegend_"
ax.plot(x, central_data, color=color, label=label, **kwargs)
# Pad the sides of the plot only when not interpolating
ax.set_xlim(x.min(), x.max())
x_diff = x[1] - x[0]
if not interpolate:
ax.set_xlim(x.min() - x_diff, x.max() + x_diff)
# Add the plot labels
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if legend:
ax.legend(loc=0, title=legend_name)
return ax
# Subroutines for tsplot errorbar plotting
# ----------------------------------------
def _plot_ci_band(ax, x, ci, color, err_kws, **kwargs):
"""Plot translucent error bands around the central tendancy."""
low, high = ci
if "alpha" not in err_kws:
err_kws["alpha"] = 0.2
ax.fill_between(x, low, high, color=color, **err_kws)
def _plot_ci_bars(ax, x, central_data, ci, color, err_kws, **kwargs):
"""Plot error bars at each data point."""
for x_i, y_i, (low, high) in zip(x, central_data, ci.T):
ax.plot([x_i, x_i], [low, high], color=color,
solid_capstyle="round", **err_kws)
def _plot_boot_traces(ax, x, boot_data, color, err_kws, **kwargs):
"""Plot 250 traces from bootstrap."""
err_kws.setdefault("alpha", 0.25)
err_kws.setdefault("linewidth", 0.25)
if "lw" in err_kws:
err_kws["linewidth"] = err_kws.pop("lw")
ax.plot(x, boot_data.T, color=color, label="_nolegend_", **err_kws)
def _plot_unit_traces(ax, x, data, ci, color, err_kws, **kwargs):
"""Plot a trace for each observation in the original data."""
if isinstance(color, list):
if "alpha" not in err_kws:
err_kws["alpha"] = .5
for i, obs in enumerate(data):
ax.plot(x, obs, color=color[i], label="_nolegend_", **err_kws)
else:
if "alpha" not in err_kws:
err_kws["alpha"] = .2
ax.plot(x, data.T, color=color, label="_nolegend_", **err_kws)
def _plot_unit_points(ax, x, data, color, err_kws, **kwargs):
"""Plot each original data point discretely."""
if isinstance(color, list):
for i, obs in enumerate(data):
ax.plot(x, obs, "o", color=color[i], alpha=0.8, markersize=4,
label="_nolegend_", **err_kws)
else:
ax.plot(x, data.T, "o", color=color, alpha=0.5, markersize=4,
label="_nolegend_", **err_kws)
def _plot_boot_kde(ax, x, boot_data, color, **kwargs):
"""Plot the kernal density estimate of the bootstrap distribution."""
kwargs.pop("data")
_ts_kde(ax, x, boot_data, color, **kwargs)
def _plot_unit_kde(ax, x, data, color, **kwargs):
"""Plot the kernal density estimate over the sample."""
_ts_kde(ax, x, data, color, **kwargs)
def _ts_kde(ax, x, data, color, **kwargs):
"""Upsample over time and plot a KDE of the bootstrap distribution."""
kde_data = []
y_min, y_max = data.min(), data.max()
y_vals = np.linspace(y_min, y_max, 100)
upsampler = interpolate.interp1d(x, data)
data_upsample = upsampler(np.linspace(x.min(), x.max(), 100))
for pt_data in data_upsample.T:
pt_kde = stats.kde.gaussian_kde(pt_data)
kde_data.append(pt_kde(y_vals))
kde_data = np.transpose(kde_data)
rgb = mpl.colors.ColorConverter().to_rgb(color)
img = np.zeros((kde_data.shape[0], kde_data.shape[1], 4))
img[:, :, :3] = rgb
kde_data /= kde_data.max(axis=0)
kde_data[kde_data > 1] = 1
img[:, :, 3] = kde_data
ax.imshow(img, interpolation="spline16", zorder=2,
extent=(x.min(), x.max(), y_min, y_max),
aspect="auto", origin="lower")
| bsd-3-clause | 1,424,119,876,327,610,600 | 36.610795 | 79 | 0.59642 | false |
dllsf/odootest | addons/l10n_be_invoice_bba/invoice.py | 11 | 12783 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import re, time, random
from openerp import api
from openerp.osv import fields, osv
from openerp.tools.translate import _
import logging
_logger = logging.getLogger(__name__)
"""
account.invoice object:
- Add support for Belgian structured communication
- Rename 'reference' field labels to 'Communication'
"""
class account_invoice(osv.osv):
_inherit = 'account.invoice'
@api.cr_uid_context
def _get_reference_type(self, cursor, user, context=None):
"""Add BBA Structured Communication Type and change labels from 'reference' into 'communication' """
res = super(account_invoice, self)._get_reference_type(cursor, user,
context=context)
res[[i for i,x in enumerate(res) if x[0] == 'none'][0]] = ('none', 'Free Communication')
res.append(('bba', 'BBA Structured Communication'))
#l_logger.warning('reference_type = %s' %res )
return res
def check_bbacomm(self, val):
supported_chars = '0-9+*/ '
pattern = re.compile('[^' + supported_chars + ']')
if pattern.findall(val or ''):
return False
bbacomm = re.sub('\D', '', val or '')
if len(bbacomm) == 12:
base = int(bbacomm[:10])
mod = base % 97 or 97
if mod == int(bbacomm[-2:]):
return True
return False
def _check_communication(self, cr, uid, ids):
for inv in self.browse(cr, uid, ids):
if inv.reference_type == 'bba':
return self.check_bbacomm(inv.reference)
return True
def onchange_partner_id(self, cr, uid, ids, type, partner_id,
date_invoice=False, payment_term=False,
partner_bank_id=False, company_id=False,
context=None):
result = super(account_invoice, self).onchange_partner_id(cr, uid, ids, type, partner_id,
date_invoice, payment_term, partner_bank_id, company_id, context)
# reference_type = self.default_get(cr, uid, ['reference_type'])['reference_type']
# _logger.warning('partner_id %s' % partner_id)
reference = False
reference_type = 'none'
if partner_id:
if (type == 'out_invoice'):
reference_type = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context).out_inv_comm_type
if reference_type:
reference = self.generate_bbacomm(cr, uid, ids, type, reference_type, partner_id, '', context=context)['value']['reference']
res_update = {
'reference_type': reference_type or 'none',
'reference': reference,
}
result['value'].update(res_update)
return result
def generate_bbacomm(self, cr, uid, ids, type, reference_type, partner_id, reference, context=None):
partner_obj = self.pool.get('res.partner')
reference = reference or ''
algorithm = False
if partner_id:
algorithm = partner_obj.browse(cr, uid, partner_id, context=context).out_inv_comm_algorithm
algorithm = algorithm or 'random'
if (type == 'out_invoice'):
if reference_type == 'bba':
if algorithm == 'date':
if not self.check_bbacomm(reference):
doy = time.strftime('%j')
year = time.strftime('%Y')
seq = '001'
seq_ids = self.search(cr, uid,
[('type', '=', 'out_invoice'), ('reference_type', '=', 'bba'),
('reference', 'like', '+++%s/%s/%%' % (doy, year))], order='reference')
if seq_ids:
prev_seq = int(self.browse(cr, uid, seq_ids[-1]).reference[12:15])
if prev_seq < 999:
seq = '%03d' % (prev_seq + 1)
else:
raise osv.except_osv(_('Warning!'),
_('The daily maximum of outgoing invoices with an automatically generated BBA Structured Communications has been exceeded!' \
'\nPlease create manually a unique BBA Structured Communication.'))
bbacomm = doy + year + seq
base = int(bbacomm)
mod = base % 97 or 97
reference = '+++%s/%s/%s%02d+++' % (doy, year, seq, mod)
elif algorithm == 'partner_ref':
if not self.check_bbacomm(reference):
partner_ref = self.pool.get('res.partner').browse(cr, uid, partner_id).ref
partner_ref_nr = re.sub('\D', '', partner_ref or '')
if (len(partner_ref_nr) < 3) or (len(partner_ref_nr) > 7):
raise osv.except_osv(_('Warning!'),
_('The Partner should have a 3-7 digit Reference Number for the generation of BBA Structured Communications!' \
'\nPlease correct the Partner record.'))
else:
partner_ref_nr = partner_ref_nr.ljust(7, '0')
seq = '001'
seq_ids = self.search(cr, uid,
[('type', '=', 'out_invoice'), ('reference_type', '=', 'bba'),
('reference', 'like', '+++%s/%s/%%' % (partner_ref_nr[:3], partner_ref_nr[3:]))], order='reference')
if seq_ids:
prev_seq = int(self.browse(cr, uid, seq_ids[-1]).reference[12:15])
if prev_seq < 999:
seq = '%03d' % (prev_seq + 1)
else:
raise osv.except_osv(_('Warning!'),
_('The daily maximum of outgoing invoices with an automatically generated BBA Structured Communications has been exceeded!' \
'\nPlease create manually a unique BBA Structured Communication.'))
bbacomm = partner_ref_nr + seq
base = int(bbacomm)
mod = base % 97 or 97
reference = '+++%s/%s/%s%02d+++' % (partner_ref_nr[:3], partner_ref_nr[3:], seq, mod)
elif algorithm == 'random':
if not self.check_bbacomm(reference):
base = random.randint(1, 9999999999)
bbacomm = str(base).rjust(10, '0')
base = int(bbacomm)
mod = base % 97 or 97
mod = str(mod).rjust(2, '0')
reference = '+++%s/%s/%s%s+++' % (bbacomm[:3], bbacomm[3:7], bbacomm[7:], mod)
else:
raise osv.except_osv(_('Error!'),
_("Unsupported Structured Communication Type Algorithm '%s' !" \
"\nPlease contact your OpenERP support channel.") % algorithm)
return {'value': {'reference': reference}}
def create(self, cr, uid, vals, context=None):
reference = vals.get('reference', False)
reference_type = vals.get('reference_type', False)
if vals.get('type') == 'out_invoice' and not reference_type:
# fallback on default communication type for partner
reference_type = self.pool.get('res.partner').browse(cr, uid, vals['partner_id']).out_inv_comm_type
if reference_type == 'bba':
reference = self.generate_bbacomm(cr, uid, [], vals['type'], reference_type, vals['partner_id'], '', context={})['value']['reference']
vals.update({
'reference_type': reference_type or 'none',
'reference': reference,
})
if reference_type == 'bba':
if not reference:
raise osv.except_osv(_('Warning!'),
_('Empty BBA Structured Communication!' \
'\nPlease fill in a unique BBA Structured Communication.'))
if self.check_bbacomm(reference):
reference = re.sub('\D', '', reference)
vals['reference'] = '+++' + reference[0:3] + '/' + reference[3:7] + '/' + reference[7:] + '+++'
same_ids = self.search(cr, uid,
[('type', '=', 'out_invoice'), ('reference_type', '=', 'bba'),
('reference', '=', vals['reference'])])
if same_ids:
raise osv.except_osv(_('Warning!'),
_('The BBA Structured Communication has already been used!' \
'\nPlease create manually a unique BBA Structured Communication.'))
return super(account_invoice, self).create(cr, uid, vals, context=context)
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
for inv in self.browse(cr, uid, ids, context):
if vals.has_key('reference_type'):
reference_type = vals['reference_type']
else:
reference_type = inv.reference_type or ''
if reference_type == 'bba':
if vals.has_key('reference'):
bbacomm = vals['reference']
else:
bbacomm = inv.reference or ''
if self.check_bbacomm(bbacomm):
reference = re.sub('\D', '', bbacomm)
vals['reference'] = '+++' + reference[0:3] + '/' + reference[3:7] + '/' + reference[7:] + '+++'
same_ids = self.search(cr, uid,
[('id', '!=', inv.id), ('type', '=', 'out_invoice'),
('reference_type', '=', 'bba'), ('reference', '=', vals['reference'])])
if same_ids:
raise osv.except_osv(_('Warning!'),
_('The BBA Structured Communication has already been used!' \
'\nPlease create manually a unique BBA Structured Communication.'))
return super(account_invoice, self).write(cr, uid, ids, vals, context)
def copy(self, cr, uid, id, default=None, context=None):
default = default or {}
invoice = self.browse(cr, uid, id, context=context)
if invoice.type in ['out_invoice']:
reference_type = invoice.reference_type or 'none'
default['reference_type'] = reference_type
if reference_type == 'bba':
partner = invoice.partner_id
default['reference'] = self.generate_bbacomm(cr, uid, id,
invoice.type, reference_type,
partner.id, '', context=context)['value']['reference']
return super(account_invoice, self).copy(cr, uid, id, default, context=context)
_columns = {
'reference': fields.char('Communication', help="The partner reference of this invoice."),
'reference_type': fields.selection(_get_reference_type, 'Communication Type',
required=True),
}
_constraints = [
(_check_communication, 'Invalid BBA Structured Communication !', ['Communication']),
]
account_invoice()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 3,424,724,559,644,794,400 | 51.710084 | 165 | 0.500665 | false |
WillieMaddox/numpy | numpy/linalg/tests/test_regression.py | 78 | 3097 | """ Test functions for linalg module
"""
from __future__ import division, absolute_import, print_function
import numpy as np
from numpy import linalg, arange, float64, array, dot, transpose
from numpy.testing import (
TestCase, run_module_suite, assert_equal, assert_array_equal,
assert_array_almost_equal, assert_array_less
)
rlevel = 1
class TestRegression(TestCase):
def test_eig_build(self, level=rlevel):
# Ticket #652
rva = array([1.03221168e+02 + 0.j,
-1.91843603e+01 + 0.j,
-6.04004526e-01 + 15.84422474j,
-6.04004526e-01 - 15.84422474j,
-1.13692929e+01 + 0.j,
-6.57612485e-01 + 10.41755503j,
-6.57612485e-01 - 10.41755503j,
1.82126812e+01 + 0.j,
1.06011014e+01 + 0.j,
7.80732773e+00 + 0.j,
-7.65390898e-01 + 0.j,
1.51971555e-15 + 0.j,
-1.51308713e-15 + 0.j])
a = arange(13 * 13, dtype=float64)
a.shape = (13, 13)
a = a % 17
va, ve = linalg.eig(a)
va.sort()
rva.sort()
assert_array_almost_equal(va, rva)
def test_eigh_build(self, level=rlevel):
# Ticket 662.
rvals = [68.60568999, 89.57756725, 106.67185574]
cov = array([[77.70273908, 3.51489954, 15.64602427],
[3.51489954, 88.97013878, -1.07431931],
[15.64602427, -1.07431931, 98.18223512]])
vals, vecs = linalg.eigh(cov)
assert_array_almost_equal(vals, rvals)
def test_svd_build(self, level=rlevel):
# Ticket 627.
a = array([[0., 1.], [1., 1.], [2., 1.], [3., 1.]])
m, n = a.shape
u, s, vh = linalg.svd(a)
b = dot(transpose(u[:, n:]), a)
assert_array_almost_equal(b, np.zeros((2, 2)))
def test_norm_vector_badarg(self):
# Regression for #786: Froebenius norm for vectors raises
# TypeError.
self.assertRaises(ValueError, linalg.norm, array([1., 2., 3.]), 'fro')
def test_lapack_endian(self):
# For bug #1482
a = array([[5.7998084, -2.1825367],
[-2.1825367, 9.85910595]], dtype='>f8')
b = array(a, dtype='<f8')
ap = linalg.cholesky(a)
bp = linalg.cholesky(b)
assert_array_equal(ap, bp)
def test_large_svd_32bit(self):
# See gh-4442, 64bit would require very large/slow matrices.
x = np.eye(1000, 66)
np.linalg.svd(x)
def test_svd_no_uv(self):
# gh-4733
for shape in (3, 4), (4, 4), (4, 3):
for t in float, complex:
a = np.ones(shape, dtype=t)
w = linalg.svd(a, compute_uv=False)
c = np.count_nonzero(np.absolute(w) > 0.5)
assert_equal(c, 1)
assert_equal(np.linalg.matrix_rank(a), 1)
assert_array_less(1, np.linalg.norm(a, ord=2))
if __name__ == '__main__':
run_module_suite()
| bsd-3-clause | -3,510,802,748,468,733,400 | 31.6 | 78 | 0.513077 | false |
pcostell/apitools | apitools/base/py/exceptions.py | 8 | 4111 | #!/usr/bin/env python
#
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exceptions for generated client libraries."""
class Error(Exception):
"""Base class for all exceptions."""
class TypecheckError(Error, TypeError):
"""An object of an incorrect type is provided."""
class NotFoundError(Error):
"""A specified resource could not be found."""
class UserError(Error):
"""Base class for errors related to user input."""
class InvalidDataError(Error):
"""Base class for any invalid data error."""
class CommunicationError(Error):
"""Any communication error talking to an API server."""
class HttpError(CommunicationError):
"""Error making a request. Soon to be HttpError."""
def __init__(self, response, content, url,
method_config=None, request=None):
super(HttpError, self).__init__()
self.response = response
self.content = content
self.url = url
self.method_config = method_config
self.request = request
def __str__(self):
content = self.content
if isinstance(content, bytes):
content = self.content.decode('ascii', 'replace')
return 'HttpError accessing <%s>: response: <%s>, content <%s>' % (
self.url, self.response, content)
@property
def status_code(self):
# TODO(craigcitro): Turn this into something better than a
# KeyError if there is no status.
return int(self.response['status'])
@classmethod
def FromResponse(cls, http_response):
return cls(http_response.info, http_response.content,
http_response.request_url)
class InvalidUserInputError(InvalidDataError):
"""User-provided input is invalid."""
class InvalidDataFromServerError(InvalidDataError, CommunicationError):
"""Data received from the server is malformed."""
class BatchError(Error):
"""Error generated while constructing a batch request."""
class ConfigurationError(Error):
"""Base class for configuration errors."""
class GeneratedClientError(Error):
"""The generated client configuration is invalid."""
class ConfigurationValueError(UserError):
"""Some part of the user-specified client configuration is invalid."""
class ResourceUnavailableError(Error):
"""User requested an unavailable resource."""
class CredentialsError(Error):
"""Errors related to invalid credentials."""
class TransferError(CommunicationError):
"""Errors related to transfers."""
class TransferRetryError(TransferError):
"""Retryable errors related to transfers."""
class TransferInvalidError(TransferError):
"""The given transfer is invalid."""
class RequestError(CommunicationError):
"""The request was not successful."""
class RetryAfterError(HttpError):
"""The response contained a retry-after header."""
def __init__(self, response, content, url, retry_after):
super(RetryAfterError, self).__init__(response, content, url)
self.retry_after = int(retry_after)
@classmethod
def FromResponse(cls, http_response):
return cls(http_response.info, http_response.content,
http_response.request_url, http_response.retry_after)
class BadStatusCodeError(HttpError):
"""The request completed but returned a bad status code."""
class NotYetImplementedError(GeneratedClientError):
"""This functionality is not yet implemented."""
class StreamExhausted(Error):
"""Attempted to read more bytes from a stream than were available."""
| apache-2.0 | -7,502,974,637,699,833,000 | 23.470238 | 75 | 0.691073 | false |
akretion/odoo | addons/stock/tests/common.py | 15 | 5095 | # -*- coding: utf-8 -*-
from odoo.tests import common
class TestStockCommon(common.TransactionCase):
def setUp(self):
super(TestStockCommon, self).setUp()
self.ProductObj = self.env['product.product']
self.UomObj = self.env['uom.uom']
self.PartnerObj = self.env['res.partner']
self.ModelDataObj = self.env['ir.model.data']
self.StockPackObj = self.env['stock.move.line']
self.StockQuantObj = self.env['stock.quant']
self.PickingObj = self.env['stock.picking']
self.MoveObj = self.env['stock.move']
self.InvObj = self.env['stock.inventory']
self.InvLineObj = self.env['stock.inventory.line']
self.LotObj = self.env['stock.production.lot']
# Model Data
self.partner_agrolite_id = self.ModelDataObj.xmlid_to_res_id('base.res_partner_2')
self.partner_delta_id = self.ModelDataObj.xmlid_to_res_id('base.res_partner_4')
self.picking_type_in = self.ModelDataObj.xmlid_to_res_id('stock.picking_type_in')
self.picking_type_out = self.ModelDataObj.xmlid_to_res_id('stock.picking_type_out')
self.supplier_location = self.ModelDataObj.xmlid_to_res_id('stock.stock_location_suppliers')
self.stock_location = self.ModelDataObj.xmlid_to_res_id('stock.stock_location_stock')
pack_location = self.env.ref('stock.location_pack_zone')
pack_location.active = True
self.pack_location = pack_location.id
output_location = self.env.ref('stock.stock_location_output')
output_location.active = True
self.output_location = output_location.id
self.customer_location = self.ModelDataObj.xmlid_to_res_id('stock.stock_location_customers')
self.categ_unit = self.ModelDataObj.xmlid_to_res_id('uom.product_uom_categ_unit')
self.categ_kgm = self.ModelDataObj.xmlid_to_res_id('uom.product_uom_categ_kgm')
# Product Created A, B, C, D
self.productA = self.ProductObj.create({'name': 'Product A', 'type': 'product'})
self.productB = self.ProductObj.create({'name': 'Product B', 'type': 'product'})
self.productC = self.ProductObj.create({'name': 'Product C', 'type': 'product'})
self.productD = self.ProductObj.create({'name': 'Product D', 'type': 'product'})
self.productE = self.ProductObj.create({'name': 'Product E', 'type': 'product'})
# Configure unit of measure.
self.uom_kg = self.env['uom.uom'].search([('category_id', '=', self.categ_kgm), ('uom_type', '=', 'reference')], limit=1)
self.uom_kg.write({
'name': 'Test-KG',
'rounding': 0.000001})
self.uom_tone = self.UomObj.create({
'name': 'Test-Tone',
'category_id': self.categ_kgm,
'uom_type': 'bigger',
'factor_inv': 1000.0,
'rounding': 0.001})
self.uom_gm = self.UomObj.create({
'name': 'Test-G',
'category_id': self.categ_kgm,
'uom_type': 'smaller',
'factor': 1000.0,
'rounding': 0.001})
self.uom_mg = self.UomObj.create({
'name': 'Test-MG',
'category_id': self.categ_kgm,
'uom_type': 'smaller',
'factor': 100000.0,
'rounding': 0.001})
# Check Unit
self.uom_unit = self.env['uom.uom'].search([('category_id', '=', self.categ_unit), ('uom_type', '=', 'reference')], limit=1)
self.uom_unit.write({
'name': 'Test-Unit',
'rounding': 1.0})
self.uom_dozen = self.UomObj.create({
'name': 'Test-DozenA',
'category_id': self.categ_unit,
'factor_inv': 12,
'uom_type': 'bigger',
'rounding': 0.001})
self.uom_sdozen = self.UomObj.create({
'name': 'Test-SDozenA',
'category_id': self.categ_unit,
'factor_inv': 144,
'uom_type': 'bigger',
'rounding': 0.001})
self.uom_sdozen_round = self.UomObj.create({
'name': 'Test-SDozenA Round',
'category_id': self.categ_unit,
'factor_inv': 144,
'uom_type': 'bigger',
'rounding': 1.0})
# Product for different unit of measure.
self.DozA = self.ProductObj.create({'name': 'Dozon-A', 'type': 'product', 'uom_id': self.uom_dozen.id, 'uom_po_id': self.uom_dozen.id})
self.SDozA = self.ProductObj.create({'name': 'SuperDozon-A', 'type': 'product', 'uom_id': self.uom_sdozen.id, 'uom_po_id': self.uom_sdozen.id})
self.SDozARound = self.ProductObj.create({'name': 'SuperDozenRound-A', 'type': 'product', 'uom_id': self.uom_sdozen_round.id, 'uom_po_id': self.uom_sdozen_round.id})
self.UnitA = self.ProductObj.create({'name': 'Unit-A', 'type': 'product'})
self.kgB = self.ProductObj.create({'name': 'kg-B', 'type': 'product', 'uom_id': self.uom_kg.id, 'uom_po_id': self.uom_kg.id})
self.gB = self.ProductObj.create({'name': 'g-B', 'type': 'product', 'uom_id': self.uom_gm.id, 'uom_po_id': self.uom_gm.id})
| agpl-3.0 | 7,401,060,998,887,452,000 | 49.95 | 173 | 0.58371 | false |
boundary/boundary-plugin-aws-elb | boundary_aws_plugin/cloudwatch_plugin.py | 8 | 4606 | from __future__ import (absolute_import, division, print_function, unicode_literals)
import logging
import datetime
import time
from . import boundary_plugin
from . import status_store
"""
If getting statistics from CloudWatch fails, we will retry up to this number of times before
giving up and aborting the plugin. Use 0 for unlimited retries.
"""
PLUGIN_RETRY_COUNT = 0
"""
If getting statistics from CloudWatch fails, we will wait this long (in seconds) before retrying.
This value must not be greater than 30 seconds, because the Boundary Relay will think we've
timed out and terminate us after 30 seconds of inactivity.
"""
PLUGIN_RETRY_DELAY = 5
class CloudwatchPlugin(object):
def __init__(self, cloudwatch_metrics_type, boundary_metric_prefix, status_store_filename):
self.cloudwatch_metrics_type = cloudwatch_metrics_type
self.boundary_metric_prefix = boundary_metric_prefix
self.status_store_filename = status_store_filename
def get_metric_data_with_retries(self, *args, **kwargs):
"""
Calls the get_metric_data function, taking into account retry configuration.
"""
retry_range = xrange(PLUGIN_RETRY_COUNT) if PLUGIN_RETRY_COUNT > 0 else iter(int, 1)
for _ in retry_range:
try:
return self.cloudwatch_metrics.get_metric_data(*args, **kwargs)
except Exception as e:
logging.error("Error retrieving CloudWatch data: %s" % e)
boundary_plugin.report_alive()
time.sleep(PLUGIN_RETRY_DELAY)
boundary_plugin.report_alive()
logging.fatal("Max retries exceeded retrieving CloudWatch data")
raise Exception("Max retries exceeded retrieving CloudWatch data")
def handle_metrics(self, data, reported_metrics):
# Data format:
# (RegionId, EntityName, MetricName) -> [(Timestamp, Value, Statistic), (Timestamp, Value, Statistic), ...]
for metric_key, metric_list in data.items():
region_id, entity_name, metric_name = metric_key
for metric_list_item in metric_list:
# Do not report duplicate or past samples (note: we are comparing tuples here, which
# amounts to comparing their timestamps).
if reported_metrics.get(metric_key, (datetime.datetime.min,)) >= metric_list_item:
continue
metric_timestamp, metric_value, metric_statistic = metric_list_item
boundary_plugin.boundary_report_metric(self.boundary_metric_prefix + metric_name,
metric_value, entity_name, metric_timestamp)
reported_metrics[metric_key] = metric_list_item
status_store.save_status_store(self.status_store_filename, reported_metrics)
def main(self):
settings = boundary_plugin.parse_params()
reported_metrics = status_store.load_status_store(self.status_store_filename) or dict()
logging.basicConfig(level=logging.ERROR, filename=settings.get('log_file', None))
reports_log = settings.get('report_log_file', None)
if reports_log:
boundary_plugin.log_metrics_to_file(reports_log)
boundary_plugin.start_keepalive_subprocess()
self.cloudwatch_metrics = self.cloudwatch_metrics_type(settings['access_key_id'], settings['secret_key'])
# Bring us up to date! Get all data since the last time we know we reported valid data
# (minus 20 minutes as a buffer), and report it now, so that we report data on any time
# this plugin was down for any reason.
try:
earliest_timestamp = max(reported_metrics.values(), key=lambda v: v[0])[0] - datetime.timedelta(minutes=20)
except ValueError:
# Probably first run or someone deleted our status store file - just start from now
logging.error("No status store data; starting data collection from now")
pass
else:
logging.error("Starting historical data collection from %s" % earliest_timestamp)
data = self.get_metric_data_with_retries(only_latest=False,
start_time=earliest_timestamp, end_time=datetime.datetime.utcnow())
self.handle_metrics(data, reported_metrics)
logging.error("Historical data collection complete")
while True:
data = self.get_metric_data_with_retries()
self.handle_metrics(data, reported_metrics)
boundary_plugin.sleep_interval()
| apache-2.0 | -1,106,427,700,143,722,400 | 46.979167 | 120 | 0.65241 | false |
ryfeus/lambda-packs | Tensorflow/source/tensorflow/python/keras/wrappers/scikit_learn/__init__.py | 73 | 1062 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras scikit-learn API wrapper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras._impl.keras.wrappers.scikit_learn import KerasClassifier
from tensorflow.python.keras._impl.keras.wrappers.scikit_learn import KerasRegressor
del absolute_import
del division
del print_function
| mit | 5,678,663,099,695,419,000 | 39.846154 | 85 | 0.721281 | false |
Lonchadepavo/EstacionCisma | tools/expand_filedir_paths.py | 166 | 3839 | #!/usr/bin/env python
import re, os, sys, fnmatch
# Regex pattern to extract the directory path in a #define FILE_DIR
filedir_pattern = re.compile(r'^#define\s*FILE_DIR\s*"(.*?)"')
# Regex pattern to extract any single quoted piece of text. This can also
# match single quoted strings inside of double quotes, which is part of a
# regular text string and should not be replaced. The replacement function
# however will any match that doesn't appear to be a filename so these
# extra matches should not be a problem.
rename_pattern = re.compile(r"'(.+?)'")
# Only filenames matching this pattern will have their resources renamed
source_pattern = re.compile(r"^.*?\.(dm|dmm)$")
# Open the .dme file and return a list of all FILE_DIR paths in it
def read_filedirs(filename):
result = []
dme_file = file(filename, "rt")
# Read each line from the file and check for regex pattern match
for row in dme_file:
match = filedir_pattern.match(row)
if match:
result.append(match.group(1))
dme_file.close()
return result
# Search through a list of directories, and build a dictionary which
# maps every file to its full pathname (relative to the .dme file)
# If the same filename appears in more than one directory, the earlier
# directory in the list takes preference.
def index_files(file_dirs):
result = {}
# Reverse the directory list so the earlier directories take precedence
# by replacing the previously indexed file of the same name
for directory in reversed(file_dirs):
for name in os.listdir(directory):
# Replace backslash path separators on Windows with forward slash
# Force "name" to lowercase when used as a key since BYOND resource
# names are case insensitive, even on Linux.
if name.find(".") == -1:
continue
result[name.lower()] = directory.replace('\\', '/') + '/' + name
return result
# Recursively search for every .dm/.dmm file in the .dme file directory. For
# each file, search it for any resource names in single quotes, and replace
# them with the full path previously found by index_files()
def rewrite_sources(resources):
# Create a closure for the regex replacement function to capture the
# resources dictionary which can't be passed directly to this function
def replace_func(name):
key = name.group(1).lower()
if key in resources:
replacement = resources[key]
else:
replacement = name.group(1)
return "'" + replacement + "'"
# Search recursively for all .dm and .dmm files
for (dirpath, dirs, files) in os.walk("."):
for name in files:
if source_pattern.match(name):
path = dirpath + '/' + name
source_file = file(path, "rt")
output_file = file(path + ".tmp", "wt")
# Read file one line at a time and perform replacement of all
# single quoted resource names with the fullpath to that resource
# file. Write the updated text back out to a temporary file.
for row in source_file:
row = rename_pattern.sub(replace_func, row)
output_file.write(row)
output_file.close()
source_file.close()
# Delete original source file and replace with the temporary
# output. On Windows, an atomic rename() operation is not
# possible like it is under POSIX.
os.remove(path)
os.rename(path + ".tmp", path)
dirs = read_filedirs("tgstation.dme");
resources = index_files(dirs)
rewrite_sources(resources)
| gpl-3.0 | 7,966,900,155,057,483,000 | 39.27957 | 81 | 0.622818 | false |
fishscene/streamlink | src/streamlink/plugins/ustreamtv.py | 3 | 20932 | import re
from collections import namedtuple
from functools import partial
from random import randint
from time import sleep
from streamlink.compat import urlparse, urljoin, range
from streamlink.exceptions import StreamError, PluginError, NoStreamsError
from streamlink.plugin import Plugin, PluginOptions
from streamlink.plugin.api import http, validate
from streamlink.stream import RTMPStream, HLSStream, HTTPStream, Stream
from streamlink.stream.flvconcat import FLVTagConcat
from streamlink.stream.segmented import (
SegmentedStreamReader, SegmentedStreamWriter, SegmentedStreamWorker
)
try:
import librtmp
HAS_LIBRTMP = True
except ImportError:
HAS_LIBRTMP = False
_url_re = re.compile("""
http(s)?://(www\.)?ustream.tv
(?:
(/embed/|/channel/id/)(?P<channel_id>\d+)
)?
(?:
/recorded/(?P<video_id>\d+)
)?
""", re.VERBOSE)
_channel_id_re = re.compile("\"channelId\":(\d+)")
HLS_PLAYLIST_URL = (
"http://iphone-streaming.ustream.tv"
"/uhls/{0}/streams/live/iphone/playlist.m3u8"
)
RECORDED_URL = "http://tcdn.ustream.tv/video/{0}"
RTMP_URL = "rtmp://r{0}-1-{1}-channel-live.ums.ustream.tv:1935/ustream"
SWF_URL = "http://static-cdn1.ustream.tv/swf/live/viewer.rsl:505.swf"
_module_info_schema = validate.Schema(
list,
validate.length(1),
validate.get(0),
dict
)
_amf3_array = validate.Schema(
validate.any(
validate.all(
{int: object},
validate.transform(lambda a: list(a.values())),
),
list
)
)
_recorded_schema = validate.Schema({
validate.optional("stream"): validate.all(
_amf3_array,
[{
"name": validate.text,
"streams": validate.all(
_amf3_array,
[{
"streamName": validate.text,
"bitrate": float,
}],
),
validate.optional("url"): validate.text,
}]
)
})
_stream_schema = validate.Schema(
validate.any({
"name": validate.text,
"url": validate.text,
"streams": validate.all(
_amf3_array,
[{
"chunkId": validate.any(int, float),
"chunkRange": {validate.text: validate.text},
"chunkTime": validate.any(int, float),
"offset": validate.any(int, float),
"offsetInMs": validate.any(int, float),
"streamName": validate.text,
validate.optional("bitrate"): validate.any(int, float),
validate.optional("height"): validate.any(int, float),
validate.optional("description"): validate.text,
validate.optional("isTranscoded"): bool
}],
)
},
{
"name": validate.text,
"varnishUrl": validate.text
})
)
_channel_schema = validate.Schema({
validate.optional("stream"): validate.any(
validate.all(
_amf3_array,
[_stream_schema],
),
"offline"
)
})
Chunk = namedtuple("Chunk", "num url offset")
if HAS_LIBRTMP:
from io import BytesIO
from time import time
from librtmp.rtmp import RTMPTimeoutError, PACKET_TYPE_INVOKE
from streamlink.packages.flashmedia.types import AMF0Value
def decode_amf(body):
def generator():
fd = BytesIO(body)
while True:
try:
yield AMF0Value.read(fd)
except IOError:
break
return list(generator())
class FlashmediaRTMP(librtmp.RTMP):
"""RTMP connection using python-flashmedia's AMF decoder.
TODO: Move to python-librtmp instead.
"""
def process_packets(self, transaction_id=None, invoked_method=None,
timeout=None):
start = time()
while self.connected and transaction_id not in self._invoke_results:
if timeout and (time() - start) >= timeout:
raise RTMPTimeoutError("Timeout")
packet = self.read_packet()
if packet.type == PACKET_TYPE_INVOKE:
try:
decoded = decode_amf(packet.body)
except IOError:
continue
try:
method, transaction_id_, obj = decoded[:3]
args = decoded[3:]
except ValueError:
continue
if method == "_result":
if len(args) > 0:
result = args[0]
else:
result = None
self._invoke_results[transaction_id_] = result
else:
handler = self._invoke_handlers.get(method)
if handler:
res = handler(*args)
if res is not None:
self.call("_result", res,
transaction_id=transaction_id_)
if method == invoked_method:
self._invoke_args[invoked_method] = args
break
if transaction_id_ == 1.0:
self._connect_result = packet
else:
self.handle_packet(packet)
else:
self.handle_packet(packet)
if transaction_id:
result = self._invoke_results.pop(transaction_id, None)
return result
if invoked_method:
args = self._invoke_args.pop(invoked_method, None)
return args
def create_ums_connection(app, media_id, page_url, password,
exception=PluginError):
url = RTMP_URL.format(randint(0, 0xffffff), media_id)
params = {
"application": app,
"media": str(media_id),
"password": password
}
conn = FlashmediaRTMP(url,
swfurl=SWF_URL,
pageurl=page_url,
connect_data=params)
try:
conn.connect()
except librtmp.RTMPError:
raise exception("Failed to connect to RTMP server")
return conn
class UHSStreamWriter(SegmentedStreamWriter):
def __init__(self, *args, **kwargs):
SegmentedStreamWriter.__init__(self, *args, **kwargs)
self.concater = FLVTagConcat(flatten_timestamps=True,
sync_headers=True)
def fetch(self, chunk, retries=None):
if not retries or self.closed:
return
try:
params = {}
if chunk.offset:
params["start"] = chunk.offset
return http.get(chunk.url,
timeout=self.timeout,
params=params,
exception=StreamError)
except StreamError as err:
self.logger.error("Failed to open chunk {0}: {1}", chunk.num, err)
return self.fetch(chunk, retries - 1)
def write(self, chunk, res, chunk_size=8192):
try:
for data in self.concater.iter_chunks(buf=res.content,
skip_header=not chunk.offset):
self.reader.buffer.write(data)
if self.closed:
break
else:
self.logger.debug("Download of chunk {0} complete", chunk.num)
except IOError as err:
self.logger.error("Failed to read chunk {0}: {1}", chunk.num, err)
class UHSStreamWorker(SegmentedStreamWorker):
def __init__(self, *args, **kwargs):
SegmentedStreamWorker.__init__(self, *args, **kwargs)
self.chunk_ranges = {}
self.chunk_id = None
self.chunk_id_max = None
self.chunks = []
self.filename_format = ""
self.module_info_reload_time = 2
self.process_module_info()
def fetch_module_info(self):
self.logger.debug("Fetching module info")
conn = create_ums_connection("channel",
self.stream.channel_id,
self.stream.page_url,
self.stream.password,
exception=StreamError)
try:
result = conn.process_packets(invoked_method="moduleInfo",
timeout=10)
except (IOError, librtmp.RTMPError) as err:
raise StreamError("Failed to get module info: {0}".format(err))
finally:
conn.close()
result = _module_info_schema.validate(result)
return _channel_schema.validate(result, "module info")
def process_module_info(self):
if self.closed:
return
try:
result = self.fetch_module_info()
except PluginError as err:
self.logger.error("{0}", err)
return
providers = result.get("stream")
if not providers or providers == "offline":
self.logger.debug("Stream went offline")
self.close()
return
for provider in providers:
if provider.get("name") == self.stream.provider:
break
else:
return
try:
stream = provider["streams"][self.stream.stream_index]
except IndexError:
self.logger.error("Stream index not in result")
return
filename_format = stream["streamName"].replace("%", "%s")
filename_format = urljoin(provider["url"], filename_format)
self.filename_format = filename_format
self.update_chunk_info(stream)
def update_chunk_info(self, result):
chunk_range = result["chunkRange"]
if not chunk_range:
return
chunk_id = int(result["chunkId"])
chunk_offset = int(result["offset"])
chunk_range = dict(map(partial(map, int), chunk_range.items()))
self.chunk_ranges.update(chunk_range)
self.chunk_id_min = sorted(chunk_range)[0]
self.chunk_id_max = int(result["chunkId"])
self.chunks = [Chunk(i, self.format_chunk_url(i),
not self.chunk_id and i == chunk_id and chunk_offset)
for i in range(self.chunk_id_min, self.chunk_id_max + 1)]
if self.chunk_id is None and self.chunks:
self.chunk_id = chunk_id
def format_chunk_url(self, chunk_id):
chunk_hash = ""
for chunk_start in sorted(self.chunk_ranges):
if chunk_id >= chunk_start:
chunk_hash = self.chunk_ranges[chunk_start]
return self.filename_format % (chunk_id, chunk_hash)
def valid_chunk(self, chunk):
return self.chunk_id and chunk.num >= self.chunk_id
def iter_segments(self):
while not self.closed:
for chunk in filter(self.valid_chunk, self.chunks):
self.logger.debug("Adding chunk {0} to queue", chunk.num)
yield chunk
# End of stream
if self.closed:
return
self.chunk_id = chunk.num + 1
if self.wait(self.module_info_reload_time):
try:
self.process_module_info()
except StreamError as err:
self.logger.warning("Failed to process module info: {0}", err)
class UHSStreamReader(SegmentedStreamReader):
__worker__ = UHSStreamWorker
__writer__ = UHSStreamWriter
def __init__(self, stream, *args, **kwargs):
self.logger = stream.session.logger.new_module("stream.uhs")
SegmentedStreamReader.__init__(self, stream, *args, **kwargs)
class UHSStream(Stream):
__shortname__ = "uhs"
def __init__(self, session, channel_id, page_url, provider,
stream_index, password=""):
Stream.__init__(self, session)
self.channel_id = channel_id
self.page_url = page_url
self.provider = provider
self.stream_index = stream_index
self.password = password
def __repr__(self):
return "<UHSStream({0!r}, {1!r}, {2!r}, {3!r}, {4!r})>".format(
self.channel_id, self.page_url, self.provider,
self.stream_index, self.password
)
def __json__(self):
json = Stream.__json__(self)
json.update({
"channel_id": self.channel_id,
"page_url": self.page_url,
"provider": self.provider,
"stream_index": self.stream_index,
"password": self.password
})
return json
def open(self):
reader = UHSStreamReader(self)
reader.open()
return reader
class UStreamTV(Plugin):
options = PluginOptions({
"password": ""
})
@classmethod
def can_handle_url(cls, url):
return _url_re.match(url)
@classmethod
def stream_weight(cls, stream):
match = re.match("mobile_(\w+)", stream)
if match:
weight, group = Plugin.stream_weight(match.group(1))
weight -= 1
group = "mobile_ustream"
elif stream == "recorded":
weight, group = 720, "ustream"
else:
weight, group = Plugin.stream_weight(stream)
return weight, group
def _get_channel_id(self):
res = http.get(self.url)
match = _channel_id_re.search(res.text)
if match:
return int(match.group(1))
def _get_hls_streams(self, channel_id, wait_for_transcode=False):
# HLS streams are created on demand, so we may have to wait
# for a transcode to be started.
attempts = wait_for_transcode and 10 or 1
playlist_url = HLS_PLAYLIST_URL.format(channel_id)
streams = {}
while attempts and not streams:
try:
streams = HLSStream.parse_variant_playlist(self.session,
playlist_url,
nameprefix="mobile_")
except IOError:
# Channel is probably offline
break
attempts -= 1
sleep(3)
return streams
def _create_rtmp_stream(self, cdn, stream_name):
parsed = urlparse(cdn)
params = {
"rtmp": cdn,
"app": parsed.path[1:],
"playpath": stream_name,
"pageUrl": self.url,
"swfUrl": SWF_URL,
"live": True
}
return RTMPStream(self.session, params)
def _get_module_info(self, app, media_id, password="", schema=None):
self.logger.debug("Waiting for moduleInfo invoke")
conn = create_ums_connection(app, media_id, self.url, password)
attempts = 3
while conn.connected and attempts:
try:
result = conn.process_packets(invoked_method="moduleInfo",
timeout=10)
except (IOError, librtmp.RTMPError) as err:
raise PluginError("Failed to get stream info: {0}".format(err))
try:
result = _module_info_schema.validate(result)
break
except PluginError:
attempts -= 1
conn.close()
if schema:
result = schema.validate(result)
return result
def _get_desktop_streams(self, channel_id):
password = self.options.get("password")
channel = self._get_module_info("channel", channel_id, password,
schema=_channel_schema)
if not isinstance(channel.get("stream"), list):
raise NoStreamsError(self.url)
streams = {}
for provider in channel["stream"]:
if provider["name"] == u"uhs_akamai": # not heavily tested, but got a stream working
continue
provider_url = provider["url"]
provider_name = provider["name"]
for stream_index, stream_info in enumerate(provider["streams"]):
stream = None
stream_height = int(stream_info.get("height", 0))
stream_name = stream_info.get("description")
if not stream_name:
if stream_height > 0:
if not stream_info.get("isTranscoded"):
stream_name = "{0}p+".format(stream_height)
else:
stream_name = "{0}p".format(stream_height)
else:
stream_name = "live"
if stream_name in streams:
provider_name_clean = provider_name.replace("uhs_", "")
stream_name += "_alt_{0}".format(provider_name_clean)
if provider_name.startswith("uhs_"):
stream = UHSStream(self.session, channel_id,
self.url, provider_name,
stream_index, password)
elif provider_url.startswith("rtmp"):
playpath = stream_info["streamName"]
stream = self._create_rtmp_stream(provider_url,
playpath)
if stream:
streams[stream_name] = stream
return streams
def _get_live_streams(self, channel_id):
has_desktop_streams = False
if HAS_LIBRTMP:
try:
streams = self._get_desktop_streams(channel_id)
# TODO: Replace with "yield from" when dropping Python 2.
for stream in streams.items():
has_desktop_streams = True
yield stream
except PluginError as err:
self.logger.error("Unable to fetch desktop streams: {0}", err)
except NoStreamsError:
pass
else:
self.logger.warning(
"python-librtmp is not installed, but is needed to access "
"the desktop streams"
)
try:
streams = self._get_hls_streams(channel_id,
wait_for_transcode=not has_desktop_streams)
# TODO: Replace with "yield from" when dropping Python 2.
for stream in streams.items():
yield stream
except PluginError as err:
self.logger.error("Unable to fetch mobile streams: {0}", err)
except NoStreamsError:
pass
def _get_recorded_streams(self, video_id):
if HAS_LIBRTMP:
recording = self._get_module_info("recorded", video_id,
schema=_recorded_schema)
if not isinstance(recording.get("stream"), list):
return
for provider in recording["stream"]:
base_url = provider.get("url")
for stream_info in provider["streams"]:
bitrate = int(stream_info.get("bitrate", 0))
stream_name = (bitrate > 0 and "{0}k".format(bitrate) or
"recorded")
url = stream_info["streamName"]
if base_url:
url = base_url + url
if url.startswith("http"):
yield stream_name, HTTPStream(self.session, url)
elif url.startswith("rtmp"):
params = dict(rtmp=url, pageUrl=self.url)
yield stream_name, RTMPStream(self.session, params)
else:
self.logger.warning(
"The proper API could not be used without python-librtmp "
"installed. Stream URL is not guaranteed to be valid"
)
url = RECORDED_URL.format(video_id)
random_hash = "{0:02x}{1:02x}".format(randint(0, 255),
randint(0, 255))
params = dict(hash=random_hash)
stream = HTTPStream(self.session, url, params=params)
yield "recorded", stream
def _get_streams(self):
match = _url_re.match(self.url)
video_id = match.group("video_id")
if video_id:
return self._get_recorded_streams(video_id)
channel_id = match.group("channel_id") or self._get_channel_id()
if channel_id:
return self._get_live_streams(channel_id)
__plugin__ = UStreamTV
| bsd-2-clause | 9,127,619,429,026,349,000 | 32.4377 | 97 | 0.516577 | false |
khara914/cf-phpbuildpack | lib/build_pack_utils/downloads.py | 15 | 4096 | import os
import urllib2
import re
import logging
from subprocess import Popen
from subprocess import PIPE
class Downloader(object):
def __init__(self, config):
self._ctx = config
self._log = logging.getLogger('downloads')
self._init_proxy()
def _init_proxy(self):
handlers = {}
for key in self._ctx.keys():
if key.lower().endswith('_proxy'):
handlers[key.split('_')[0]] = self._ctx[key]
self._log.debug('Loaded proxy handlers [%s]', handlers)
openers = []
if handlers:
openers.append(urllib2.ProxyHandler(handlers))
for handler in handlers.values():
if '@' in handler:
openers.append(urllib2.ProxyBasicAuthHandler())
opener = urllib2.build_opener(*openers)
urllib2.install_opener(opener)
def download(self, url, toFile):
path_to_download_executable = os.path.join(
self._ctx['BP_DIR'],
'compile-extensions',
'bin',
'download_dependency')
command_arguments = [
path_to_download_executable,
url,
toFile]
process = Popen(command_arguments, stdout=PIPE)
exit_code = process.wait()
translated_uri = process.stdout.read().rstrip()
if exit_code == 0:
print "Downloaded [%s] to [%s]" % (translated_uri, toFile)
elif exit_code == 1:
raise RuntimeError("Could not download dependency: %s" % url)
elif exit_code == 3:
raise RuntimeError("MD5 of downloaded dependency does not match expected value")
def custom_extension_download(self, url, toFile):
res = urllib2.urlopen(url)
with open(toFile, 'w') as f:
f.write(res.read())
print 'Downloaded [%s] to [%s]' % (url, toFile)
self._log.info('Downloaded [%s] to [%s]', url, toFile)
def download_direct(self, url):
buf = urllib2.urlopen(url).read()
self._log.info('Downloaded [%s] to memory', url)
self._log.debug("Downloaded [%s] [%s]", url, buf)
return buf
class CurlDownloader(object):
def __init__(self, config):
self._ctx = config
self._status_pattern = re.compile(r'^(.*)<!-- Status: (\d+) -->$',
re.DOTALL)
self._log = logging.getLogger('downloads')
def download(self, url, toFile):
cmd = ["curl", "-s",
"-o", toFile,
"-w", '%{http_code}']
for key in self._ctx.keys():
if key.lower().endswith('_proxy'):
cmd.extend(['-x', self._ctx[key]])
cmd.append(url)
self._log.debug("Running [%s]", cmd)
proc = Popen(cmd, stdout=PIPE)
output, unused_err = proc.communicate()
proc.poll()
self._log.debug("Curl returned [%s]", output)
if output and \
(output.startswith('4') or
output.startswith('5')):
raise RuntimeError("curl says [%s]" % output)
print 'Downloaded [%s] to [%s]' % (url, toFile)
self._log.info('Downloaded [%s] to [%s]', url, toFile)
def download_direct(self, url):
cmd = ["curl", "-s",
"-w", '<!-- Status: %{http_code} -->']
for key in self._ctx.keys():
if key.lower().endswith('_proxy'):
cmd.extend(['-x', self._ctx[key]])
cmd.append(url)
self._log.debug("Running [%s]", cmd)
proc = Popen(cmd, stdout=PIPE)
output, unused_err = proc.communicate()
proc.poll()
m = self._status_pattern.match(output)
if m:
resp = m.group(1)
code = m.group(2)
self._log.debug("Curl returned [%s]", code)
if (code.startswith('4') or code.startswith('5')):
raise RuntimeError("curl says [%s]" % output)
self._log.info('Downloaded [%s] to memory', url)
self._log.debug('Downloaded [%s] [%s]', url, resp)
return resp
| apache-2.0 | 5,014,073,709,486,819,000 | 34.310345 | 92 | 0.526367 | false |
SelenaProject/selena | app/core/modules/weather/weather.py | 1 | 5028 | # !/usr/bin/env python3
import threading
import time
import urllib.request
import json
from .. import modulebase
weather_check_interval = 60 # check every minute
city = 'Kanata,ON'
cur_weather_url = ('http://api.openweathermap.org/data/2.5/weather?q=%s&units=metric') % (city)
forecast_url = ('http://api.openweathermap.org/data/2.5/forecast?q=%s&units=metric') % (city)
class weather(modulebase.ModuleBase):
data = None
encode = lambda x : json.dumps(x).encode('utf-8')
def __init__(self) :
weather.data = WeatherData()
def deinit(self) :
pass
def GET_temperature(self):
data = {
'temp' : weather.data.cur_temp()
}
return weather.encode(data)
def GET_current(self) :
wd = weather.data
data = {
'city' : city,
'temp' : wd.cur_temp(),
'weather' : wd.cur_weather(),
'humidity' : wd.cur_humidity(),
'clouds' : wd.cur_clouds(),
'timestamp' : wd.timestamp()
}
return weather.encode(data)
def GET_forecast(self) :
data = weather.data.forecast()
return weather.encode(data)
def POST_test(self) :
return "Good!"
class WeatherData :
def __init__(self) :
self.__cur_temp = -1
self.__humidity = -1
self.__clouds = -1
self.__cur_weather = {}
self.__forecast = []
self.__timestamp = 0
self.__lock = threading.Lock()
self.__start_checker()
'''
Public getters
'''
def cur_temp(self) :
with self.__lock :
return self.__cur_temp
def cur_weather(self) :
with self.__lock :
return self.__cur_weather
def cur_humidity(self) :
with self.__lock :
return self.__humidity
def cur_clouds(self) :
with self.__lock :
return self.__clouds
def forecast(self) :
with self.__lock :
return self.__forecast
def timestamp(self) :
with self.__lock :
return self.__timestamp
'''
Private setters
'''
def __set_cur_temp(self, temp) :
with self.__lock :
self.__cur_temp = temp
def __set_cur_weather(self, weather_id, weather_descr) :
with self.__lock :
self.__cur_weather['id'] = weather_id
self.__cur_weather['descr'] = weather_descr
def __set_cur_humidity(self, hum) :
with self.__lock :
self.__humidity = hum
def __set_cur_clouds(self, clouds) :
with self.__lock :
self.__clouds = clouds
def __set_forecast(self, forecast) :
with self.__lock :
self.__forecast = forecast
def __set_timestamp(self, timestamp) :
with self.__lock :
self.__timestamp = timestamp
'''
Threading
'''
def __start_checker(self) :
print('Starting weather checker...')
self.__checker = threading.Thread(target=self.__check_weather)
self.__checker.daemon = True
self.__checker.start()
def __check_weather(self) :
while True :
print('Checking weather...')
response = urllib.request.urlopen( urllib.request.Request(url=cur_weather_url) )
json_obj = json.loads(response.read().decode('utf-8'))
print (str(json_obj))
self.__set_timestamp(int(time.time()))
main = json_obj.get('main', {})
temp = main.get('temp', -1)
hum = main.get('humidity', -1)
self.__set_cur_temp(temp)
self.__set_cur_humidity(hum)
weather = json_obj.get('weather', [])
if len(weather) > 0 :
wthr_id = weather[0].get('id', 0)
wthr_descr = weather[0].get('main', '')
self.__set_cur_weather(wthr_id, wthr_descr)
clouds = json_obj.get('clouds', {}).get('all', -1)
self.__set_cur_clouds(clouds)
# get forecast
response = urllib.request.urlopen( urllib.request.Request(url=forecast_url) )
json_obj = json.loads(response.read().decode('utf-8'))
# extract data
data_list = json_obj.get('list', [])
fc_data = []
for list_item in data_list[:8] :
fc_item = {}
fc_item['timestamp'] = list_item.get('dt', 0)
fc_main = list_item.get('main', {})
fc_item['temp'] = fc_main.get('temp', -1)
fc_item['humidity'] = fc_main.get('humidity', -1)
fc_weather = list_item.get('weather', [])
fc_item['weather'] = {
'id' : fc_weather[0].get('id', 0),
'descr' : fc_weather[0].get('main', '')
} if len(fc_weather) > 0 else { 'id' : 0, 'descr': '' }
fc_data.append(fc_item)
self.__set_forecast(fc_data)
time.sleep(weather_check_interval)
| apache-2.0 | 7,716,334,859,486,772,000 | 26.626374 | 96 | 0.513325 | false |
sunsrising/xnhb | contrib/devtools/fix-copyright-headers.py | 80 | 1348 | #!/usr/bin/env python
'''
Run this script to update all the copyright headers of files
that were changed this year.
For example:
// Copyright (c) 2009-2012 The Bitcoin Core developers
it will change it to
// Copyright (c) 2009-2015 The Bitcoin Core developers
'''
import os
import time
import re
year = time.gmtime()[0]
CMD_GIT_DATE = 'git log --format=@%%at -1 %s | date +"%%Y" -u -f -'
CMD_REGEX= "perl -pi -e 's/(20\d\d)(?:-20\d\d)? The Bitcoin/$1-%s The Bitcoin/' %s"
REGEX_CURRENT= re.compile("%s The Bitcoin" % year)
CMD_LIST_FILES= "find %s | grep %s"
FOLDERS = ["./qa", "./src"]
EXTENSIONS = [".cpp",".h", ".py"]
def get_git_date(file_path):
r = os.popen(CMD_GIT_DATE % file_path)
for l in r:
# Result is one line, so just return
return l.replace("\n","")
return ""
n=1
for folder in FOLDERS:
for extension in EXTENSIONS:
for file_path in os.popen(CMD_LIST_FILES % (folder, extension)):
file_path = os.getcwd() + file_path[1:-1]
if file_path.endswith(extension):
git_date = get_git_date(file_path)
if str(year) == git_date:
# Only update if current year is not found
if REGEX_CURRENT.search(open(file_path, "r").read()) is None:
print n,"Last git edit", git_date, "-", file_path
os.popen(CMD_REGEX % (year,file_path))
n = n + 1
| mit | 5,409,074,151,913,464,000 | 28.304348 | 83 | 0.615727 | false |
malexzx/grpc | examples/python/multiplex/helloworld_pb2.py | 29 | 6763 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: helloworld.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='helloworld.proto',
package='helloworld',
syntax='proto3',
serialized_pb=_b('\n\x10helloworld.proto\x12\nhelloworld\"\x1c\n\x0cHelloRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\x1d\n\nHelloReply\x12\x0f\n\x07message\x18\x01 \x01(\t2I\n\x07Greeter\x12>\n\x08SayHello\x12\x18.helloworld.HelloRequest\x1a\x16.helloworld.HelloReply\"\x00\x42\x36\n\x1bio.grpc.examples.helloworldB\x0fHelloWorldProtoP\x01\xa2\x02\x03HLWb\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_HELLOREQUEST = _descriptor.Descriptor(
name='HelloRequest',
full_name='helloworld.HelloRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='helloworld.HelloRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=32,
serialized_end=60,
)
_HELLOREPLY = _descriptor.Descriptor(
name='HelloReply',
full_name='helloworld.HelloReply',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='message', full_name='helloworld.HelloReply.message', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=62,
serialized_end=91,
)
DESCRIPTOR.message_types_by_name['HelloRequest'] = _HELLOREQUEST
DESCRIPTOR.message_types_by_name['HelloReply'] = _HELLOREPLY
HelloRequest = _reflection.GeneratedProtocolMessageType('HelloRequest', (_message.Message,), dict(
DESCRIPTOR = _HELLOREQUEST,
__module__ = 'helloworld_pb2'
# @@protoc_insertion_point(class_scope:helloworld.HelloRequest)
))
_sym_db.RegisterMessage(HelloRequest)
HelloReply = _reflection.GeneratedProtocolMessageType('HelloReply', (_message.Message,), dict(
DESCRIPTOR = _HELLOREPLY,
__module__ = 'helloworld_pb2'
# @@protoc_insertion_point(class_scope:helloworld.HelloReply)
))
_sym_db.RegisterMessage(HelloReply)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\033io.grpc.examples.helloworldB\017HelloWorldProtoP\001\242\002\003HLW'))
import grpc
from grpc.beta import implementations as beta_implementations
from grpc.beta import interfaces as beta_interfaces
from grpc.framework.common import cardinality
from grpc.framework.interfaces.face import utilities as face_utilities
class GreeterStub(object):
"""The greeting service definition.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.SayHello = channel.unary_unary(
'/helloworld.Greeter/SayHello',
request_serializer=HelloRequest.SerializeToString,
response_deserializer=HelloReply.FromString,
)
class GreeterServicer(object):
"""The greeting service definition.
"""
def SayHello(self, request, context):
"""Sends a greeting
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_GreeterServicer_to_server(servicer, server):
rpc_method_handlers = {
'SayHello': grpc.unary_unary_rpc_method_handler(
servicer.SayHello,
request_deserializer=HelloRequest.FromString,
response_serializer=HelloReply.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'helloworld.Greeter', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class BetaGreeterServicer(object):
"""The greeting service definition.
"""
def SayHello(self, request, context):
"""Sends a greeting
"""
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
class BetaGreeterStub(object):
"""The greeting service definition.
"""
def SayHello(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
"""Sends a greeting
"""
raise NotImplementedError()
SayHello.future = None
def beta_create_Greeter_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
request_deserializers = {
('helloworld.Greeter', 'SayHello'): HelloRequest.FromString,
}
response_serializers = {
('helloworld.Greeter', 'SayHello'): HelloReply.SerializeToString,
}
method_implementations = {
('helloworld.Greeter', 'SayHello'): face_utilities.unary_unary_inline(servicer.SayHello),
}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
def beta_create_Greeter_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
request_serializers = {
('helloworld.Greeter', 'SayHello'): HelloRequest.SerializeToString,
}
response_deserializers = {
('helloworld.Greeter', 'SayHello'): HelloReply.FromString,
}
cardinalities = {
'SayHello': cardinality.Cardinality.UNARY_UNARY,
}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'helloworld.Greeter', cardinalities, options=stub_options)
# @@protoc_insertion_point(module_scope)
| bsd-3-clause | -2,592,863,832,961,628,700 | 32.151961 | 369 | 0.732663 | false |
sve-odoo/odoo | addons/website_sale/models/sale_order.py | 26 | 10438 | # -*- coding: utf-8 -*-
import random
from openerp import SUPERUSER_ID
from openerp.osv import osv, orm, fields
from openerp.addons.web.http import request
class payment_transaction(orm.Model):
_inherit = 'payment.transaction'
_columns = {
# link with the sale order
'sale_order_id': fields.many2one('sale.order', 'Sale Order'),
}
class sale_order(osv.Model):
_inherit = "sale.order"
def _cart_qty(self, cr, uid, ids, field_name, arg, context=None):
res = dict()
for order in self.browse(cr, uid, ids, context=context):
res[order.id] = int(sum(l.product_uom_qty for l in (order.website_order_line or [])))
return res
_columns = {
'website_order_line': fields.one2many(
'sale.order.line', 'order_id',
string='Order Lines displayed on Website', readonly=True,
help='Order Lines to be displayed on the website. They should not be used for computation purpose.',
),
'cart_quantity': fields.function(_cart_qty, type='integer', string='Cart Quantity'),
'payment_acquirer_id': fields.many2one('payment.acquirer', 'Payment Acquirer', on_delete='set null'),
'payment_tx_id': fields.many2one('payment.transaction', 'Transaction', on_delete='set null'),
}
def _get_errors(self, cr, uid, order, context=None):
return []
def _get_website_data(self, cr, uid, order, context):
return {
'partner': order.partner_id.id,
'order': order
}
def _cart_find_product_line(self, cr, uid, ids, product_id=None, line_id=None, context=None, **kwargs):
for so in self.browse(cr, uid, ids, context=context):
domain = [('order_id', '=', so.id), ('product_id', '=', product_id)]
if line_id:
domain += [('id', '=', line_id)]
return self.pool.get('sale.order.line').search(cr, SUPERUSER_ID, domain, context=context)
def _website_product_id_change(self, cr, uid, ids, order_id, product_id, line_id=None, context=None):
so = self.pool.get('sale.order').browse(cr, uid, order_id, context=context)
values = self.pool.get('sale.order.line').product_id_change(cr, SUPERUSER_ID, [],
pricelist=so.pricelist_id.id,
product=product_id,
partner_id=so.partner_id.id,
context=context
)['value']
if line_id:
line = self.pool.get('sale.order.line').browse(cr, SUPERUSER_ID, line_id, context=context)
values['name'] = line.name
else:
product = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
values['name'] = product.description_sale or product.name
values['product_id'] = product_id
values['order_id'] = order_id
if values.get('tax_id') != None:
values['tax_id'] = [(6, 0, values['tax_id'])]
return values
def _cart_update(self, cr, uid, ids, product_id=None, line_id=None, add_qty=0, set_qty=0, context=None, **kwargs):
""" Add or set product quantity, add_qty can be negative """
sol = self.pool.get('sale.order.line')
quantity = 0
for so in self.browse(cr, uid, ids, context=context):
if line_id != False:
line_ids = so._cart_find_product_line(product_id, line_id, context=context, **kwargs)
if line_ids:
line_id = line_ids[0]
# Create line if no line with product_id can be located
if not line_id:
values = self._website_product_id_change(cr, uid, ids, so.id, product_id, context=context)
line_id = sol.create(cr, SUPERUSER_ID, values, context=context)
if add_qty:
add_qty -= 1
# compute new quantity
if set_qty:
quantity = set_qty
elif add_qty != None:
quantity = sol.browse(cr, SUPERUSER_ID, line_id, context=context).product_uom_qty + (add_qty or 0)
# Remove zero of negative lines
if quantity <= 0:
sol.unlink(cr, SUPERUSER_ID, [line_id], context=context)
else:
# update line
values = self._website_product_id_change(cr, uid, ids, so.id, product_id, line_id, context=context)
values['product_uom_qty'] = quantity
sol.write(cr, SUPERUSER_ID, [line_id], values, context=context)
return {'line_id': line_id, 'quantity': quantity}
def _cart_accessories(self, cr, uid, ids, context=None):
for order in self.browse(cr, uid, ids, context=context):
s = set(j.id for l in (order.website_order_line or []) for j in (l.product_id.accessory_product_ids or []))
s -= set(l.product_id.id for l in order.order_line)
product_ids = random.sample(s, min(len(s),3))
return self.pool['product.product'].browse(cr, uid, product_ids, context=context)
class website(orm.Model):
_inherit = 'website'
_columns = {
'pricelist_id': fields.related('user_id','partner_id','property_product_pricelist',
type='many2one', relation='product.pricelist', string='Default Pricelist'),
'currency_id': fields.related('pricelist_id','currency_id',
type='many2one', relation='res.currency', string='Default Currency'),
}
def sale_product_domain(self, cr, uid, ids, context=None):
return [("sale_ok", "=", True)]
def sale_get_order(self, cr, uid, ids, force_create=False, code=None, update_pricelist=None, context=None):
sale_order_obj = self.pool['sale.order']
sale_order_id = request.session.get('sale_order_id')
sale_order = None
# create so if needed
if not sale_order_id and (force_create or code):
# TODO cache partner_id session
partner = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context).partner_id
for w in self.browse(cr, uid, ids):
values = {
'user_id': w.user_id.id,
'partner_id': partner.id,
'pricelist_id': partner.property_product_pricelist.id,
'section_id': self.pool.get('ir.model.data').get_object_reference(cr, uid, 'website', 'salesteam_website_sales')[1],
}
sale_order_id = sale_order_obj.create(cr, SUPERUSER_ID, values, context=context)
values = sale_order_obj.onchange_partner_id(cr, SUPERUSER_ID, [], partner.id, context=context)['value']
sale_order_obj.write(cr, SUPERUSER_ID, [sale_order_id], values, context=context)
request.session['sale_order_id'] = sale_order_id
if sale_order_id:
# TODO cache partner_id session
partner = self.pool['res.users'].browse(cr, SUPERUSER_ID, uid, context=context).partner_id
sale_order = sale_order_obj.browse(cr, SUPERUSER_ID, sale_order_id, context=context)
if not sale_order.exists():
request.session['sale_order_id'] = None
return None
# check for change of pricelist with a coupon
if code and code != sale_order.pricelist_id.code:
pricelist_ids = self.pool['product.pricelist'].search(cr, SUPERUSER_ID, [('code', '=', code)], context=context)
if pricelist_ids:
pricelist_id = pricelist_ids[0]
request.session['sale_order_code_pricelist_id'] = pricelist_id
update_pricelist = True
request.session['sale_order_code_pricelist_id'] = False
pricelist_id = request.session.get('sale_order_code_pricelist_id') or partner.property_product_pricelist.id
# check for change of partner_id ie after signup
if sale_order.partner_id.id != partner.id and request.website.partner_id.id != partner.id:
flag_pricelist = False
if pricelist_id != sale_order.pricelist_id.id:
flag_pricelist = True
fiscal_position = sale_order.fiscal_position and sale_order.fiscal_position.id or False
values = sale_order_obj.onchange_partner_id(cr, SUPERUSER_ID, [sale_order_id], partner.id, context=context)['value']
if values.get('fiscal_position'):
order_lines = map(int,sale_order.order_line)
values.update(sale_order_obj.onchange_fiscal_position(cr, SUPERUSER_ID, [],
values['fiscal_position'], [[6, 0, order_lines]], context=context)['value'])
values['partner_id'] = partner.id
sale_order_obj.write(cr, SUPERUSER_ID, [sale_order_id], values, context=context)
if flag_pricelist or values.get('fiscal_position') != fiscal_position:
update_pricelist = True
# update the pricelist
if update_pricelist:
values = {'pricelist_id': pricelist_id}
values.update(sale_order.onchange_pricelist_id(pricelist_id, None)['value'])
sale_order.write(values)
for line in sale_order.order_line:
sale_order._cart_update(product_id=line.product_id.id, add_qty=0)
# update browse record
if (code and code != sale_order.pricelist_id.code) or sale_order.partner_id.id != partner.id:
sale_order = sale_order_obj.browse(cr, SUPERUSER_ID, sale_order.id, context=context)
return sale_order
def sale_get_transaction(self, cr, uid, ids, context=None):
transaction_obj = self.pool.get('payment.transaction')
tx_id = request.session.get('sale_transaction_id')
if tx_id:
tx_ids = transaction_obj.search(cr, uid, [('id', '=', tx_id), ('state', 'not in', ['cancel'])], context=context)
if tx_ids:
return transaction_obj.browse(cr, uid, tx_ids[0], context=context)
else:
request.session['sale_transaction_id'] = False
return False
def sale_reset(self, cr, uid, ids, context=None):
request.session.update({
'sale_order_id': False,
'sale_transaction_id': False,
'sale_order_code_pricelist_id': False,
})
| agpl-3.0 | 7,735,798,088,223,761,000 | 46.018018 | 136 | 0.581816 | false |
saurabh6790/omnitech-libs | core/doctype/custom_script/custom_script.py | 34 | 1208 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes.utils import cstr
class DocType:
def __init__(self, d, dl):
self.doc, self.doclist = d, dl
def autoname(self):
self.doc.name = self.doc.dt + "-" + self.doc.script_type
def on_update(self):
webnotes.clear_cache(doctype=self.doc.dt)
def on_trash(self):
webnotes.clear_cache(doctype=self.doc.dt)
def make_custom_server_script_file(doctype, script=None):
import os
from webnotes.plugins import get_path
file_path = get_path(None, "DocType", doctype)
if os.path.exists(file_path):
raise IOError(file_path + " already exists")
# create folder if not exists
webnotes.create_folder(os.path.dirname(file_path))
# create file
custom_script = """from __future__ import unicode_literals
import webnotes
from webnotes.utils import cint, cstr, flt
from webnotes.model.doc import Document
from webnotes.model.code import get_obj
from webnotes import msgprint, _
class CustomDocType(DocType):
{script}""".format(script=script or "\tpass")
with open(file_path, "w") as f:
f.write(custom_script.encode("utf-8")) | mit | 7,915,574,700,104,366,000 | 27.116279 | 71 | 0.730132 | false |
BenSto/pybikes | pybikes/bysykkel.py | 4 | 1909 | # -*- coding: utf-8 -*-
import json
from .base import BikeShareSystem, BikeShareStation
from . import utils
class BySykkel(BikeShareSystem):
authed = True
meta = {
'system': 'BySykkel',
'company': ['Urban Infrastructure Partner']
}
def __init__(self, tag, meta, feed_url, feed_details_url, key):
super(BySykkel, self).__init__(tag, meta)
self.feed_url = feed_url
self.feed_details_url = feed_details_url
self.key = key
def update(self, scraper=None):
if scraper is None:
scraper = utils.PyBikesScraper()
scraper.headers['Client-Identifier'] = self.key
self.stations = []
stations_data = json.loads(scraper.request(self.feed_url))
details_data = json.loads(scraper.request(self.feed_details_url))
# Aggregate status and information by uid
stations_data = {s['id']: s for s in stations_data['stations']}
details_data = {s['id']: s for s in details_data['stations']}
# Join stationsdata in stations
stations = [
(stations_data[id], details_data[id])
for id in stations_data.keys()
]
# append all data to info part of stations and create objects of this
for info, status in stations:
info.update(status)
station = BySykkelStation(info)
self.stations.append(station)
class BySykkelStation(BikeShareStation):
def __init__(self, info):
super(BySykkelStation, self).__init__()
self.name = info['title']
self.longitude = float(info['center']['longitude'])
self.latitude = float(info['center']['latitude'])
self.bikes = info['availability']['bikes']
self.free = info['availability']['locks']
self.extra = {
'uid': info['id'],
'placement': info['subtitle'],
}
| lgpl-3.0 | -2,898,308,452,387,493,000 | 26.271429 | 77 | 0.587742 | false |
yourcelf/btb | scanblog/profiles/models.py | 1 | 18215 | import os
import datetime
import itertools
import string
from django.db import models
from django.db.models import Q
from django.contrib.auth.models import User, Group
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from django.template.defaultfilters import slugify
from django.conf import settings
from scanning.models import Document
from comments.models import Comment
from btb.utils import OrgManager, OrgQuerySet
class ProfileManager(OrgManager):
"""
For statuses based on letters (e.g. invited, waitlisted, etc.), any letter,
whether sent or not, considers the status fulfilled. That is, one is
"invited" if an Letter(type='invited') has been created for the person,
whether or not it was sent. Creating a Letter is a contract to send it.
This differs from the v1 implementation.
"""
def active(self):
""" Everyone that hasn't been removed. """
return self.filter(user__is_active=True)
def inactive(self):
""" They have been removed for whatever reason. """
return self.filter(user__is_active=False)
def inactive_commenters(self):
return self.filter(user__is_active=False, blogger=False)
def inactive_bloggers(self):
return self.filter(user__is_active=False, blogger=True)
def active_and_inactive_commenters(self):
return self.filter(blogger=False)
def commenters(self):
""" They are not in prison. """
return self.active().filter(blogger=False)
def bloggers(self):
""" They are in prison. """
return self.active().filter(blogger=True)
def bloggers_with_posts(self):
return self.bloggers().select_related('user').filter(
user__documents_authored__status="published",
user__documents_authored__type="post",
).annotate(
authored_posts_count=models.Count('user__documents_authored'),
latest_post=models.Max(
'user__documents_authored__date_written'
),
).order_by('display_name')
def bloggers_with_profiles(self):
return self.bloggers().select_related('user').filter(
user__documents_authored__status="published",
user__documents_authored__type="profile",
).annotate(
authored_posts_count=models.Count('user__documents_authored'),
latest_post=models.Max(
'user__documents_authored__date_written'
),
).order_by('display_name')
def bloggers_with_just_profiles(self):
return self.bloggers().select_related('user').filter(
user__documents_authored__status="published",
user__documents_authored__type="profile",
).exclude(
user__documents_authored__type="post",
user__documents_authored__status="published",
).order_by('display_name')
def bloggers_with_published_content(self):
return self.bloggers().select_related('user').filter(
Q(user__documents_authored__status="published",
user__documents_authored__type="profile") |
Q(user__documents_authored__status="published",
user__documents_authored__type="post")
).distinct().order_by('display_name')
def enrolled(self):
""" They have returned a consent form. """
return self.bloggers().filter(consent_form_received=True)
def enrolled_in_contact(self):
""" They have returned a consent form, and we haven't lost contact. """
return self.enrolled().filter(lost_contact=False)
#
# Letter-based statuses
#
def invitable(self):
"""
No invitation letter has been created for them.
"""
return self.bloggers().filter(
consent_form_received=False
).exclude(
user__received_letters__type="consent_form"
)
def invited(self):
"""
An invitation letter has been created, but not returned.
"""
return self.bloggers().filter(
consent_form_received=False
).filter(
user__received_letters__type="consent_form"
)
def waitlistable(self):
"""
They have not been sent a consent form or waitlist postcard, and we
haven't received a consent form.
"""
return self.bloggers().filter(
consent_form_received=False,
).exclude(
user__received_letters__type="waitlist",
).exclude(
user__received_letters__type="consent_form",
)
def waitlisted(self):
"""
No invitation letter has been created, and a waitlist postcard has been
created.
"""
return self.bloggers().filter(
consent_form_received=False
).filter(
user__received_letters__type="waitlist"
).exclude(
user__received_letters__type="consent_form"
)
def needs_signup_complete_letter(self):
return self.enrolled().exclude(user__received_letters__type="signup_complete")
def needs_first_post_letter(self):
return (
self.enrolled().filter(user__documents_authored__status="published")
).exclude(user__received_letters__type="first_post")
def needs_comments_letter(self):
# Couldn't figure out how to make this a flat ORM query. Using two
# queries and custom SQL instead.
pks = Comment.objects.unmailed().values_list('document__author__pk', flat=True)
if pks:
where = '"{0}"."{1}" in ({2})'.format(
Profile._meta.db_table,
Profile.user.field.get_attname_column()[0],
",".join("%s" for i in pks),
)
return self.enrolled().extra(
where=[where],
params=pks
)
return self.none()
def recently_active(self, days=2*365):
"""
All bloggers with whom we haven't lost contact, are enrolled or have
been invited, and have sent us something within the last N days.
"""
cutoff = datetime.datetime.now() - datetime.timedelta(days=days)
return self.bloggers().filter(
lost_contact=False
).filter(
Q(consent_form_received=True) |
Q(user__received_letters__type="consent_form")
).filter(
user__documents_authored__created__gte=cutoff
).distinct()
class Profile(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL, primary_key=True)
display_name = models.CharField(max_length=50)
show_adult_content = models.BooleanField(
default=False,
help_text=_('Show posts and comments that have been marked as adult?')
)
blogger = models.BooleanField(default=False)
managed = models.BooleanField(default=False)
lost_contact = models.BooleanField(default=False)
blog_name = models.CharField(blank=True, default="", max_length=255)
comments_disabled = models.BooleanField(default=False)
mailing_address = models.TextField(blank=True, default="")
special_mail_handling = models.TextField(blank=True, default="")
consent_form_received = models.BooleanField(default=False)
objects = ProfileManager()
class QuerySet(OrgQuerySet):
orgs = ["user__organization"]
def light_dict(self):
return {
'id': self.pk,
'username': self.user.username,
'email': self.user.email,
'is_active': self.user.is_active,
'date_joined': self.user.date_joined.isoformat(),
'blogger': self.blogger,
'managed': self.managed,
'lost_contact': self.lost_contact,
'comments_disabled': self.comments_disabled,
'blog_name': self.blog_name,
'display_name': self.display_name,
'mailing_address': self.mailing_address,
'special_mail_handling': self.special_mail_handling,
'consent_form_received': self.consent_form_received,
'blog_url': self.get_blog_url(),
'profile_url': self.get_absolute_url(),
'edit_url': self.get_edit_url(),
'is_public': self.is_public(),
}
def to_dict(self):
scans_authored = getattr(self, "user__scans_authored", None)
dct = self.light_dict()
dct.update({
u'organizations': [o.light_dict() for o in self.user.organization_set.all()],
u'invited': Profile.objects.invited().filter(pk=self.pk).exists(),
u'waitlisted': Profile.objects.waitlisted().filter(pk=self.pk).exists(),
u'waitlistable': Profile.objects.waitlistable().filter(pk=self.pk).exists(),
u'scans_authored': scans_authored,
u'has_public_profile': self.has_public_profile(),
})
return dct
def save(self, *args, **kwargs):
if not self.display_name:
self.display_name = self.user.username
super(Profile, self).save(*args, **kwargs)
# Since profile status (active/license) can impact publicness of
# documents, we need to bump the documents if we save profiles.
for doc in self.user.documents_authored.all():
doc.set_publicness()
def __unicode__(self):
return self.display_name
def get_absolute_url(self):
return reverse('profiles.profile_show', args=[self.pk])
def get_user_edit_url(self):
return reverse('profiles.profile_edit', args=[self.pk])
def get_edit_url(self):
return "%s#/users/%s" % (reverse('moderation.home'), self.pk)
def get_blog_url(self):
return reverse('blogs.blog_show', args=[self.pk, self.get_blog_slug()])
def get_bare_blog_url(self):
return reverse('blogs.blog_show', args=[self.pk, ""])
def get_blog_slug(self):
return slugify(self.display_name)
def full_address(self):
return "\n".join((
self.display_name,
self.mailing_address
))
def is_public(self):
return self.user.is_active and ((not self.blogger) or self.consent_form_received)
def has_public_profile(self):
return Document.objects.filter(author__pk=self.pk, type="profile",
status="published").exists()
def has_blog_posts(self):
return Document.objects.filter(author__pk=self.pk, type="post",
status="published").exists()
def set_random_password(self):
"""
Set a random password on our associated user object. Does not save the user.
"""
chars = set(string.ascii_uppercase + string.digits)
char_gen = (c for c in itertools.imap(os.urandom, itertools.repeat(1)) if c in chars)
self.user.set_password(''.join(itertools.islice(char_gen, None, 32)))
def all_published_posts_as_latex_list(self):
from correspondence.utils import tex_escape
posts = self.user.documents_authored.public().order_by('date_written')
parts = [ur'\begin{itemize}']
for post in posts:
if post.in_reply_to:
try:
orig = posts.get(reply_code=post.in_reply_to)
except Document.DoesNotExist:
title = post.get_title()
else:
title = u'{} (in reply to {})'.format(
post.get_title(),
orig.get_title()
)
else:
title = post.get_title()
parts.append(ur' \item %s (\emph{%s})' % (
tex_escape(title),
post.date_written.strftime('%Y-%m-%d')
))
parts.append(ur'\end{itemize}')
return u"\n".join(parts)
class OrganizationManager(OrgManager):
def public(self):
return self.filter(public=True)
class Organization(models.Model):
name = models.CharField(max_length=255, unique=True)
slug = models.SlugField(unique=True)
personal_contact = models.CharField(max_length=255, blank=True)
public = models.BooleanField(
default=False,
help_text="Check to make this organization appear in the 'Groups' tab"
)
custom_intro_packet = models.FileField(upload_to=settings.UPLOAD_TO + "/org_intro_packets",
help_text="Leave blank to use the default packet, formatted with your address.",
blank=True, null=True)
mailing_address = models.TextField()
outgoing_mail_handled_by = models.ForeignKey('self', blank=True, null=True)
about = models.TextField(
blank=True,
help_text="Main text that will appear on the groups page.",
)
footer = models.TextField(
blank=True,
help_text="Additional text that will appear at the bottom of each post by a member of this organization.",
)
members = models.ManyToManyField(settings.AUTH_USER_MODEL, blank=True)
moderators = models.ManyToManyField(settings.AUTH_USER_MODEL,
related_name="organizations_moderated",
blank=True
)
objects = OrganizationManager()
class QuerySet(OrgQuerySet):
orgs = [""]
def to_dict(self):
dct = self.light_dict()
dct['moderators'] = [u.profile.light_dict() for u in self.moderators.select_related('profile').all()]
dct['members'] = [u.profile.light_dict() for u in self.members.select_related('profile').all()]
dct['about'] = self.about
dct['footer'] = self.footer
dct['mailing_address'] = self.mailing_address
dct['personal_contact'] = self.personal_contact
if self.custom_intro_packet:
dct['custom_intro_packet_url'] = self.custom_intro_packet.url
else:
dct['custom_intro_packet_url'] = None
if self.outgoing_mail_handled_by:
dct['outgoing_mail_handled_by'] = self.outgoing_mail_handled_by.light_dict()
else:
dct['outgoing_mail_handled_by'] = {}
return dct
def light_dict(self):
return {
u'id': self.pk,
u'slug': self.slug,
u'name': self.name,
u'public': self.public,
u'mailing_address': self.mailing_address,
}
def members_count(self):
return self.members.count()
def moderators_list(self):
return ", ".join(unicode(u.profile) for u in self.moderators.all())
def get_absolute_url(self):
return reverse("profiles.profile_list", kwargs={'org_slug': self.slug})
def __unicode__(self):
return self.name
class AffiliationManager(OrgManager):
def public(self): return self.all().public()
def private(self): return self.all().private()
class Affiliation(models.Model):
"""
Affiliations are like a "super tag" for posts, which:
1. can append additional HTML to the top of list and detail views
2. is limited to use by particular org's.
"""
title = models.CharField(max_length=255)
slug = models.SlugField(max_length=255, unique=True,
help_text="Use this to identify this affiliation when editing documents.")
logo = models.ImageField(upload_to="public/uploads/affiliations/",
blank=True, null=True)
list_body = models.TextField(
help_text="HTML for the top of the group page.")
detail_body = models.TextField(
help_text="HTML to append to individual posts for this group.")
organizations = models.ManyToManyField(Organization,
help_text="Which organizations are allowed to mark posts"
" as belonging to this affiliation?")
public = models.BooleanField(
default=False,
help_text="If false, the affiliation won't be listed publicly.")
order = models.IntegerField(
default=0,
help_text="Use to set the order for the list of affiliations on"
" the categories view. Lower numbers come first.")
created = models.DateTimeField(default=datetime.datetime.now)
modified = models.DateTimeField(blank=True)
objects = AffiliationManager()
class QuerySet(OrgQuerySet):
orgs = ["organizations"]
def public(self):
return self.filter(public=True)
def private(self):
return self.filter(public=False)
class Meta:
ordering = ['order', '-created']
def to_dict(self):
return {
u'id': self.pk,
u'title': self.title,
u'slug': self.slug,
u'logo_url': self.logo.url if self.logo else None,
u'list_body': self.list_body,
u'detail_body': self.detail_body,
u'organizations': [o.light_dict() for o in self.organizations.all()],
u'public': self.public,
u'order': self.order,
}
def total_num_responses(self):
return self.document_set.count()
def get_absolute_url(self):
return reverse("blogs.show_affiliation", args=[self.slug])
def save(self, *args, **kwargs):
self.modified = datetime.datetime.now()
return super(Affiliation, self).save(*args, **kwargs)
def __unicode__(self):
return self.slug
@receiver(post_save, sender=User)
def create_profile(sender, instance=None, **kwargs):
"""
Creates a profile on the User's save signal, so we know every user has one.
Add the user to the "readers" group.
"""
if instance is None:
return
profile, created = Profile.objects.get_or_create(user=instance)
readers, created = Group.objects.get_or_create(name="readers")
profile.user.groups.add(readers)
| agpl-3.0 | 809,162,240,108,021,900 | 36.32582 | 114 | 0.596596 | false |
astropy/astropy | astropy/io/ascii/tests/test_html.py | 7 | 22379 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module tests some of the methods related to the ``HTML``
reader/writer and aims to document its functionality.
Requires `BeautifulSoup <http://www.crummy.com/software/BeautifulSoup/>`_
to be installed.
"""
from io import StringIO
from astropy.io.ascii import html
from astropy.io.ascii import core
from astropy.table import Table
import pytest
import numpy as np
from .common import setup_function, teardown_function # noqa
from astropy.io import ascii
from astropy.utils.compat.optional_deps import HAS_BLEACH, HAS_BS4 # noqa
if HAS_BS4:
from bs4 import BeautifulSoup, FeatureNotFound
@pytest.mark.skipif('not HAS_BS4')
def test_soupstring():
"""
Test to make sure the class SoupString behaves properly.
"""
soup = BeautifulSoup('<html><head></head><body><p>foo</p></body></html>',
'html.parser')
soup_str = html.SoupString(soup)
assert isinstance(soup_str, str)
assert isinstance(soup_str, html.SoupString)
assert soup_str == '<html><head></head><body><p>foo</p></body></html>'
assert soup_str.soup is soup
def test_listwriter():
"""
Test to make sure the class ListWriter behaves properly.
"""
lst = []
writer = html.ListWriter(lst)
for i in range(5):
writer.write(i)
for ch in 'abcde':
writer.write(ch)
assert lst == [0, 1, 2, 3, 4, 'a', 'b', 'c', 'd', 'e']
@pytest.mark.skipif('not HAS_BS4')
def test_identify_table():
"""
Test to make sure that identify_table() returns whether the
given BeautifulSoup tag is the correct table to process.
"""
# Should return False on non-<table> tags and None
soup = BeautifulSoup('<html><body></body></html>', 'html.parser')
assert html.identify_table(soup, {}, 0) is False
assert html.identify_table(None, {}, 0) is False
soup = BeautifulSoup('<table id="foo"><tr><th>A</th></tr><tr>'
'<td>B</td></tr></table>', 'html.parser').table
assert html.identify_table(soup, {}, 2) is False
assert html.identify_table(soup, {}, 1) is True # Default index of 1
# Same tests, but with explicit parameter
assert html.identify_table(soup, {'table_id': 2}, 1) is False
assert html.identify_table(soup, {'table_id': 1}, 1) is True
# Test identification by string ID
assert html.identify_table(soup, {'table_id': 'bar'}, 1) is False
assert html.identify_table(soup, {'table_id': 'foo'}, 1) is True
@pytest.mark.skipif('not HAS_BS4')
def test_missing_data():
"""
Test reading a table with missing data
"""
# First with default where blank => '0'
table_in = ['<table>',
'<tr><th>A</th></tr>',
'<tr><td></td></tr>',
'<tr><td>1</td></tr>',
'</table>']
dat = Table.read(table_in, format='ascii.html')
assert dat.masked is False
assert np.all(dat['A'].mask == [True, False])
assert dat['A'].dtype.kind == 'i'
# Now with a specific value '...' => missing
table_in = ['<table>',
'<tr><th>A</th></tr>',
'<tr><td>...</td></tr>',
'<tr><td>1</td></tr>',
'</table>']
dat = Table.read(table_in, format='ascii.html', fill_values=[('...', '0')])
assert dat.masked is False
assert np.all(dat['A'].mask == [True, False])
assert dat['A'].dtype.kind == 'i'
@pytest.mark.skipif('not HAS_BS4')
def test_rename_cols():
"""
Test reading a table and renaming cols
"""
table_in = ['<table>',
'<tr><th>A</th> <th>B</th></tr>',
'<tr><td>1</td><td>2</td></tr>',
'</table>']
# Swap column names
dat = Table.read(table_in, format='ascii.html', names=['B', 'A'])
assert dat.colnames == ['B', 'A']
assert len(dat) == 1
# Swap column names and only include A (the renamed version)
dat = Table.read(table_in, format='ascii.html', names=['B', 'A'], include_names=['A'])
assert dat.colnames == ['A']
assert len(dat) == 1
assert np.all(dat['A'] == 2)
@pytest.mark.skipif('not HAS_BS4')
def test_no_names():
"""
Test reading a table witn no column header
"""
table_in = ['<table>',
'<tr><td>1</td></tr>',
'<tr><td>2</td></tr>',
'</table>']
dat = Table.read(table_in, format='ascii.html')
assert dat.colnames == ['col1']
assert len(dat) == 2
dat = Table.read(table_in, format='ascii.html', names=['a'])
assert dat.colnames == ['a']
assert len(dat) == 2
@pytest.mark.skipif('not HAS_BS4')
def test_identify_table_fail():
"""
Raise an exception with an informative error message if table_id
is not found.
"""
table_in = ['<table id="foo"><tr><th>A</th></tr>',
'<tr><td>B</td></tr></table>']
with pytest.raises(core.InconsistentTableError) as err:
Table.read(table_in, format='ascii.html', htmldict={'table_id': 'bad_id'},
guess=False)
assert err.match("ERROR: HTML table id 'bad_id' not found$")
with pytest.raises(core.InconsistentTableError) as err:
Table.read(table_in, format='ascii.html', htmldict={'table_id': 3},
guess=False)
assert err.match("ERROR: HTML table number 3 not found$")
@pytest.mark.skipif('not HAS_BS4')
def test_backend_parsers():
"""
Make sure the user can specify which back-end parser to use
and that an error is raised if the parser is invalid.
"""
for parser in ('lxml', 'xml', 'html.parser', 'html5lib'):
try:
Table.read('data/html2.html', format='ascii.html',
htmldict={'parser': parser}, guess=False)
except FeatureNotFound:
if parser == 'html.parser':
raise
# otherwise ignore if the dependency isn't present
# reading should fail if the parser is invalid
with pytest.raises(FeatureNotFound):
Table.read('data/html2.html', format='ascii.html',
htmldict={'parser': 'foo'}, guess=False)
@pytest.mark.skipif('HAS_BS4')
def test_htmlinputter_no_bs4():
"""
This should return an OptionalTableImportError if BeautifulSoup
is not installed.
"""
inputter = html.HTMLInputter()
with pytest.raises(core.OptionalTableImportError):
inputter.process_lines([])
@pytest.mark.skipif('not HAS_BS4')
def test_htmlinputter():
"""
Test to ensure that HTMLInputter correctly converts input
into a list of SoupStrings representing table elements.
"""
f = 'data/html.html'
with open(f) as fd:
table = fd.read()
inputter = html.HTMLInputter()
inputter.html = {}
# In absence of table_id, defaults to the first table
expected = ['<tr><th>Column 1</th><th>Column 2</th><th>Column 3</th></tr>',
'<tr><td>1</td><td>a</td><td>1.05</td></tr>',
'<tr><td>2</td><td>b</td><td>2.75</td></tr>',
'<tr><td>3</td><td>c</td><td>-1.25</td></tr>']
assert [str(x) for x in inputter.get_lines(table)] == expected
# Should raise an InconsistentTableError if the table is not found
inputter.html = {'table_id': 4}
with pytest.raises(core.InconsistentTableError):
inputter.get_lines(table)
# Identification by string ID
inputter.html['table_id'] = 'second'
expected = ['<tr><th>Column A</th><th>Column B</th><th>Column C</th></tr>',
'<tr><td>4</td><td>d</td><td>10.5</td></tr>',
'<tr><td>5</td><td>e</td><td>27.5</td></tr>',
'<tr><td>6</td><td>f</td><td>-12.5</td></tr>']
assert [str(x) for x in inputter.get_lines(table)] == expected
# Identification by integer index
inputter.html['table_id'] = 3
expected = ['<tr><th>C1</th><th>C2</th><th>C3</th></tr>',
'<tr><td>7</td><td>g</td><td>105.0</td></tr>',
'<tr><td>8</td><td>h</td><td>275.0</td></tr>',
'<tr><td>9</td><td>i</td><td>-125.0</td></tr>']
assert [str(x) for x in inputter.get_lines(table)] == expected
@pytest.mark.skipif('not HAS_BS4')
def test_htmlsplitter():
"""
Test to make sure that HTMLSplitter correctly inputs lines
of type SoupString to return a generator that gives all
header and data elements.
"""
splitter = html.HTMLSplitter()
lines = [html.SoupString(BeautifulSoup('<table><tr><th>Col 1</th><th>Col 2</th></tr></table>',
'html.parser').tr),
html.SoupString(BeautifulSoup('<table><tr><td>Data 1</td><td>Data 2</td></tr></table>',
'html.parser').tr)]
expected_data = [['Col 1', 'Col 2'], ['Data 1', 'Data 2']]
assert list(splitter(lines)) == expected_data
# Make sure the presence of a non-SoupString triggers a TypeError
lines.append('<tr><td>Data 3</td><td>Data 4</td></tr>')
with pytest.raises(TypeError):
list(splitter(lines))
# Make sure that passing an empty list triggers an error
with pytest.raises(core.InconsistentTableError):
list(splitter([]))
@pytest.mark.skipif('not HAS_BS4')
def test_htmlheader_start():
"""
Test to ensure that the start_line method of HTMLHeader
returns the first line of header data. Uses t/html.html
for sample input.
"""
f = 'data/html.html'
with open(f) as fd:
table = fd.read()
inputter = html.HTMLInputter()
inputter.html = {}
header = html.HTMLHeader()
lines = inputter.get_lines(table)
assert str(lines[header.start_line(lines)]) == \
'<tr><th>Column 1</th><th>Column 2</th><th>Column 3</th></tr>'
inputter.html['table_id'] = 'second'
lines = inputter.get_lines(table)
assert str(lines[header.start_line(lines)]) == \
'<tr><th>Column A</th><th>Column B</th><th>Column C</th></tr>'
inputter.html['table_id'] = 3
lines = inputter.get_lines(table)
assert str(lines[header.start_line(lines)]) == \
'<tr><th>C1</th><th>C2</th><th>C3</th></tr>'
# start_line should return None if no valid header is found
lines = [html.SoupString(BeautifulSoup('<table><tr><td>Data</td></tr></table>',
'html.parser').tr),
html.SoupString(BeautifulSoup('<p>Text</p>', 'html.parser').p)]
assert header.start_line(lines) is None
# Should raise an error if a non-SoupString is present
lines.append('<tr><th>Header</th></tr>')
with pytest.raises(TypeError):
header.start_line(lines)
@pytest.mark.skipif('not HAS_BS4')
def test_htmldata():
"""
Test to ensure that the start_line and end_lines methods
of HTMLData returns the first line of table data. Uses
t/html.html for sample input.
"""
f = 'data/html.html'
with open(f) as fd:
table = fd.read()
inputter = html.HTMLInputter()
inputter.html = {}
data = html.HTMLData()
lines = inputter.get_lines(table)
assert str(lines[data.start_line(lines)]) == \
'<tr><td>1</td><td>a</td><td>1.05</td></tr>'
# end_line returns the index of the last data element + 1
assert str(lines[data.end_line(lines) - 1]) == \
'<tr><td>3</td><td>c</td><td>-1.25</td></tr>'
inputter.html['table_id'] = 'second'
lines = inputter.get_lines(table)
assert str(lines[data.start_line(lines)]) == \
'<tr><td>4</td><td>d</td><td>10.5</td></tr>'
assert str(lines[data.end_line(lines) - 1]) == \
'<tr><td>6</td><td>f</td><td>-12.5</td></tr>'
inputter.html['table_id'] = 3
lines = inputter.get_lines(table)
assert str(lines[data.start_line(lines)]) == \
'<tr><td>7</td><td>g</td><td>105.0</td></tr>'
assert str(lines[data.end_line(lines) - 1]) == \
'<tr><td>9</td><td>i</td><td>-125.0</td></tr>'
# start_line should raise an error if no table data exists
lines = [html.SoupString(BeautifulSoup('<div></div>', 'html.parser').div),
html.SoupString(BeautifulSoup('<p>Text</p>', 'html.parser').p)]
with pytest.raises(core.InconsistentTableError):
data.start_line(lines)
# end_line should return None if no table data exists
assert data.end_line(lines) is None
# Should raise an error if a non-SoupString is present
lines.append('<tr><td>Data</td></tr>')
with pytest.raises(TypeError):
data.start_line(lines)
with pytest.raises(TypeError):
data.end_line(lines)
def test_multicolumn_write():
"""
Test to make sure that the HTML writer writes multidimensional
columns (those with iterable elements) using the colspan
attribute of <th>.
"""
col1 = [1, 2, 3]
col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)]
col3 = [('a', 'a', 'a'), ('b', 'b', 'b'), ('c', 'c', 'c')]
table = Table([col1, col2, col3], names=('C1', 'C2', 'C3'))
expected = """\
<html>
<head>
<meta charset="utf-8"/>
<meta content="text/html;charset=UTF-8" http-equiv="Content-type"/>
</head>
<body>
<table>
<thead>
<tr>
<th>C1</th>
<th colspan="2">C2</th>
<th colspan="3">C3</th>
</tr>
</thead>
<tr>
<td>1</td>
<td>1.0</td>
<td>1.0</td>
<td>a</td>
<td>a</td>
<td>a</td>
</tr>
<tr>
<td>2</td>
<td>2.0</td>
<td>2.0</td>
<td>b</td>
<td>b</td>
<td>b</td>
</tr>
<tr>
<td>3</td>
<td>3.0</td>
<td>3.0</td>
<td>c</td>
<td>c</td>
<td>c</td>
</tr>
</table>
</body>
</html>
"""
out = html.HTML().write(table)[0].strip()
assert out == expected.strip()
@pytest.mark.skipif('not HAS_BLEACH')
def test_multicolumn_write_escape():
"""
Test to make sure that the HTML writer writes multidimensional
columns (those with iterable elements) using the colspan
attribute of <th>.
"""
col1 = [1, 2, 3]
col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)]
col3 = [('<a></a>', '<a></a>', 'a'), ('<b></b>', 'b', 'b'), ('c', 'c', 'c')]
table = Table([col1, col2, col3], names=('C1', 'C2', 'C3'))
expected = """\
<html>
<head>
<meta charset="utf-8"/>
<meta content="text/html;charset=UTF-8" http-equiv="Content-type"/>
</head>
<body>
<table>
<thead>
<tr>
<th>C1</th>
<th colspan="2">C2</th>
<th colspan="3">C3</th>
</tr>
</thead>
<tr>
<td>1</td>
<td>1.0</td>
<td>1.0</td>
<td><a></a></td>
<td><a></a></td>
<td>a</td>
</tr>
<tr>
<td>2</td>
<td>2.0</td>
<td>2.0</td>
<td><b></b></td>
<td>b</td>
<td>b</td>
</tr>
<tr>
<td>3</td>
<td>3.0</td>
<td>3.0</td>
<td>c</td>
<td>c</td>
<td>c</td>
</tr>
</table>
</body>
</html>
"""
out = html.HTML(htmldict={'raw_html_cols': 'C3'}).write(table)[0].strip()
assert out == expected.strip()
def test_write_no_multicols():
"""
Test to make sure that the HTML writer will not use
multi-dimensional columns if the multicol parameter
is False.
"""
col1 = [1, 2, 3]
col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)]
col3 = [('a', 'a', 'a'), ('b', 'b', 'b'), ('c', 'c', 'c')]
table = Table([col1, col2, col3], names=('C1', 'C2', 'C3'))
expected = """\
<html>
<head>
<meta charset="utf-8"/>
<meta content="text/html;charset=UTF-8" http-equiv="Content-type"/>
</head>
<body>
<table>
<thead>
<tr>
<th>C1</th>
<th>C2</th>
<th>C3</th>
</tr>
</thead>
<tr>
<td>1</td>
<td>1.0 .. 1.0</td>
<td>a .. a</td>
</tr>
<tr>
<td>2</td>
<td>2.0 .. 2.0</td>
<td>b .. b</td>
</tr>
<tr>
<td>3</td>
<td>3.0 .. 3.0</td>
<td>c .. c</td>
</tr>
</table>
</body>
</html>
"""
assert html.HTML({'multicol': False}).write(table)[0].strip() == \
expected.strip()
@pytest.mark.skipif('not HAS_BS4')
def test_multicolumn_read():
"""
Test to make sure that the HTML reader inputs multidimensional
columns (those with iterable elements) using the colspan
attribute of <th>.
Ensure that any string element within a multidimensional column
casts all elements to string prior to type conversion operations.
"""
table = Table.read('data/html2.html', format='ascii.html')
str_type = np.dtype((str, 21))
expected = Table(np.array([(['1', '2.5000000000000000001'], 3),
(['1a', '1'], 3.5)],
dtype=[('A', str_type, (2,)), ('B', '<f8')]))
assert np.all(table == expected)
@pytest.mark.skipif('not HAS_BLEACH')
def test_raw_html_write():
"""
Test that columns can contain raw HTML which is not escaped.
"""
t = Table([['<em>x</em>'], ['<em>y</em>']], names=['a', 'b'])
# One column contains raw HTML (string input)
out = StringIO()
t.write(out, format='ascii.html', htmldict={'raw_html_cols': 'a'})
expected = """\
<tr>
<td><em>x</em></td>
<td><em>y</em></td>
</tr>"""
assert expected in out.getvalue()
# One column contains raw HTML (list input)
out = StringIO()
t.write(out, format='ascii.html', htmldict={'raw_html_cols': ['a']})
assert expected in out.getvalue()
# Two columns contains raw HTML (list input)
out = StringIO()
t.write(out, format='ascii.html', htmldict={'raw_html_cols': ['a', 'b']})
expected = """\
<tr>
<td><em>x</em></td>
<td><em>y</em></td>
</tr>"""
assert expected in out.getvalue()
@pytest.mark.skipif('not HAS_BLEACH')
def test_raw_html_write_clean():
"""
Test that columns can contain raw HTML which is not escaped.
"""
import bleach # noqa
t = Table([['<script>x</script>'], ['<p>y</p>'], ['<em>y</em>']], names=['a', 'b', 'c'])
# Confirm that <script> and <p> get escaped but not <em>
out = StringIO()
t.write(out, format='ascii.html', htmldict={'raw_html_cols': t.colnames})
expected = """\
<tr>
<td><script>x</script></td>
<td><p>y</p></td>
<td><em>y</em></td>
</tr>"""
assert expected in out.getvalue()
# Confirm that we can whitelist <p>
out = StringIO()
t.write(out, format='ascii.html',
htmldict={'raw_html_cols': t.colnames,
'raw_html_clean_kwargs': {'tags': bleach.ALLOWED_TAGS + ['p']}})
expected = """\
<tr>
<td><script>x</script></td>
<td><p>y</p></td>
<td><em>y</em></td>
</tr>"""
assert expected in out.getvalue()
def test_write_table_html_fill_values():
"""
Test that passing fill_values should replace any matching row
"""
buffer_output = StringIO()
t = Table([[1], [2]], names=('a', 'b'))
ascii.write(t, buffer_output, fill_values=('1', 'Hello world'),
format='html')
t_expected = Table([['Hello world'], [2]], names=('a', 'b'))
buffer_expected = StringIO()
ascii.write(t_expected, buffer_expected, format='html')
assert buffer_output.getvalue() == buffer_expected.getvalue()
def test_write_table_html_fill_values_optional_columns():
"""
Test that passing optional column in fill_values should only replace
matching columns
"""
buffer_output = StringIO()
t = Table([[1], [1]], names=('a', 'b'))
ascii.write(t, buffer_output, fill_values=('1', 'Hello world', 'b'),
format='html')
t_expected = Table([[1], ['Hello world']], names=('a', 'b'))
buffer_expected = StringIO()
ascii.write(t_expected, buffer_expected, format='html')
assert buffer_output.getvalue() == buffer_expected.getvalue()
def test_write_table_html_fill_values_masked():
"""
Test that passing masked values in fill_values should only replace
masked columns or values
"""
buffer_output = StringIO()
t = Table([[1], [1]], names=('a', 'b'), masked=True, dtype=('i4', 'i8'))
t['a'] = np.ma.masked
ascii.write(t, buffer_output, fill_values=(ascii.masked, 'TEST'),
format='html')
t_expected = Table([['TEST'], [1]], names=('a', 'b'))
buffer_expected = StringIO()
ascii.write(t_expected, buffer_expected, format='html')
assert buffer_output.getvalue() == buffer_expected.getvalue()
def test_multicolumn_table_html_fill_values():
"""
Test to make sure that the HTML writer writes multidimensional
columns with correctly replaced fill_values.
"""
col1 = [1, 2, 3]
col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)]
col3 = [('a', 'a', 'a'), ('b', 'b', 'b'), ('c', 'c', 'c')]
buffer_output = StringIO()
t = Table([col1, col2, col3], names=('C1', 'C2', 'C3'))
ascii.write(t, buffer_output, fill_values=('a', 'z'),
format='html')
col1 = [1, 2, 3]
col2 = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)]
col3 = [('z', 'z', 'z'), ('b', 'b', 'b'), ('c', 'c', 'c')]
buffer_expected = StringIO()
t_expected = Table([col1, col2, col3], names=('C1', 'C2', 'C3'))
ascii.write(t_expected, buffer_expected, format='html')
assert buffer_output.getvalue() == buffer_expected.getvalue()
def test_multi_column_write_table_html_fill_values_masked():
"""
Test that passing masked values in fill_values should only replace
masked columns or values for multidimensional tables
"""
buffer_output = StringIO()
t = Table([[1, 2, 3, 4], ['--', 'a', '--', 'b']], names=('a', 'b'), masked=True)
t['a'][0:2] = np.ma.masked
t['b'][0:2] = np.ma.masked
ascii.write(t, buffer_output, fill_values=[(ascii.masked, 'MASKED')],
format='html')
t_expected = Table([['MASKED', 'MASKED', 3, 4], [
'MASKED', 'MASKED', '--', 'b']], names=('a', 'b'))
buffer_expected = StringIO()
ascii.write(t_expected, buffer_expected, format='html')
print(buffer_expected.getvalue())
assert buffer_output.getvalue() == buffer_expected.getvalue()
@pytest.mark.skipif('not HAS_BS4')
def test_read_html_unicode():
"""
Test reading an HTML table with unicode values
"""
table_in = ['<table>',
'<tr><td>Δ</td></tr>',
'<tr><td>Δ</td></tr>',
'</table>']
dat = Table.read(table_in, format='ascii.html')
assert np.all(dat['col1'] == ['Δ', 'Δ'])
| bsd-3-clause | 9,105,242,105,700,878,000 | 29.568306 | 100 | 0.562433 | false |
idxos/python-clblob | clblob/event.py | 3 | 8978 | # Copyright 2013 craigslist
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''craigslist blob event module.
This should only be used internally by the client module.'''
import hashlib
import clblob
import clcommon.anybase
import clcommon.profile
class Event(object):
'''Base class for various events used in the client.'''
params = []
def __init__(self, client, method, name, timeout, http_method=None):
self._client = client
if method not in self._client.events:
self._client.events[method] = dict(current=0, max=0, total=0)
self._client.events[method]['total'] += 1
self._client.events[method]['current'] += 1
current = self._client.events[method]['current']
if current > self._client.events[method]['max']:
self._client.events[method]['max'] = current
self.method = method
self.name = name
self.timeout = timeout
self.http_method = http_method or method.upper()
self.parse_response = True
self.profile = clcommon.profile.Profile()
self.data = None
self.modified = None
self.deleted = None
self.modified_deleted = None
self.index_id = None
self.store_id = None
self.encoded = None
self._buckets = None
self._replicas = None
self._is_local = None
def __del__(self):
if hasattr(self, '_client'):
self._client.events[self.method]['current'] -= 1
if hasattr(self, 'profile') and len(self.profile.marks) > 0:
self._client.log.info('profile %s', self.profile)
@property
def url(self):
'''Make a URL for this event.'''
url = '/%s' % self.name
separator = '?'
for param in self.params:
value = getattr(self, param)
if value is not None:
url = '%s%s%s=%s' % (url, separator, param, value)
separator = '&'
return url
def buckets(self, buckets=None):
'''Get or set the buckets for this event.'''
if buckets is not None:
self._buckets = buckets
return
if self._buckets is not None:
return self._buckets
self._buckets = {}
if self.encoded is not False and self._client.config['encode_name']:
self._get_encoded_buckets()
else:
self._get_buckets()
return self._buckets
def _get_buckets(self):
'''Get buckets for a name.'''
name_hash = hashlib.md5(self.name).hexdigest() # pylint: disable=E1101
name_hash = int(name_hash[:8], 16)
for cluster in xrange(len(self._client.weighted_clusters)):
weighted_cluster = self._client.weighted_clusters[cluster]
bucket = weighted_cluster[name_hash % len(weighted_cluster)]
self._buckets[cluster] = bucket
def _get_encoded_buckets(self):
'''Get buckets for an encoded name.'''
if clcommon.anybase.decode(self.name[0], 62) != 0:
raise clblob.InvalidRequest(_('Name version not valid: %s') %
self.name)
buckets = self.name[1:].split('_', 1)[0]
if len(buckets) % 2 != 0:
raise clblob.InvalidRequest(_('Name bucket list corrupt: %s') %
self.name)
buckets = [buckets[offset:offset + 2]
for offset in xrange(0, len(buckets), 2)]
for cluster, bucket in enumerate(buckets):
self._buckets[cluster] = clcommon.anybase.decode(bucket, 62)
def replicas(self, replicas=None):
'''Get or set the replicas for this event.'''
if replicas is not None:
self._replicas = replicas
return
if self._replicas is None:
self._get_replicas()
return self._replicas
def _get_replicas(self):
'''Get a preferred list of replicas for the given buckets. This
will ignore replicas in other clusters if a cluster is configured,
as well as the local replica if the client is a replica.'''
self._replicas = {}
self._is_local = False
for cluster, bucket in self.buckets().iteritems():
if self._client.cluster is None or self._client.cluster == cluster:
if self._client.bucket == bucket:
self._is_local = True
bucket = self._client.config['clusters'][cluster][bucket]
for replica in bucket['replicas']:
if self._client.replica != replica:
self._replicas[replica] = True
@property
def is_local(self):
'''Check to see if the local replica can handle this event.'''
if self._is_local is None:
self._get_replicas()
return self._is_local
@property
def info(self):
'''Make an info dictionary for responses.'''
return dict(name=self.name, modified=self.modified,
deleted=self.deleted, modified_deleted=self.modified_deleted,
buckets=self.buckets())
class Get(Event):
'''Event for tracking getting a blob.'''
params = ['response']
def __init__(self, client, name, response):
super(Get, self).__init__(client, 'get', name,
client.config['request_timeout'])
self.response = response
if response == 'data':
self.parse_response = False
class Delete(Event):
'''Event for tracking deleting a blob.'''
params = ['deleted', 'modified_deleted', 'replicate']
def __init__(self, client, name, replicate):
super(Delete, self).__init__(client, 'delete', name,
client.config['request_timeout'])
self.replicate = replicate
class Put(Event):
'''Event for tracking putting a blob.'''
params = ['modified', 'deleted', 'modified_deleted', 'replicate',
'encoded']
def __init__(self, client, name, replicate, encoded):
super(Put, self).__init__(client, 'put', name,
client.config['request_timeout'])
self.replicate = replicate
self.encoded = encoded
if encoded is False and client.config['encode_name']:
self._encode_name()
def _encode_name(self, version=0):
'''Make a name encoded with clusters and buckets.'''
encoded = [clcommon.anybase.encode(version, 62)]
for _cluster, bucket in sorted(self.buckets().iteritems()):
encoded.append(clcommon.anybase.encode(bucket, 62).zfill(2))
self.name = '%s_%s' % (''.join(encoded), self.name)
self.encoded = True
class Admin(Event):
'''Event for tracking various admin tasks.'''
def __init__(self, client, method, replica=None):
replica = replica or client.replica
if replica is None:
raise clblob.RequestError(_('Must give replica'))
elif replica not in client.config['replicas']:
raise clblob.RequestError(_('Unknown replica: %s') % replica)
super(Admin, self).__init__(client, method,
'_%s/%s' % (method, replica), client.config['admin_timeout'],
'GET')
self.replica = replica
class ConfigCheck(Event):
'''Event for tracking configcheck requests.'''
params = ['brief', 'tolerance']
def __init__(self, client, replica=None):
replica = replica or client.replica
if replica is not None and replica not in client.config['replicas']:
raise clblob.RequestError(_('Unknown replica: %s') % replica)
super(ConfigCheck, self).__init__(client, 'configcheck',
'_configcheck/%s' % replica, client.config['request_timeout'],
'PUT')
self.replica = replica
self.brief = None
self.tolerance = None
class List(Admin):
'''Event for tracking list requests.'''
params = ['modified_start', 'modified_stop', 'checksum', 'checksum_modulo']
def __init__(self, client, replica=None):
super(List, self).__init__(client, 'list', replica)
self.modified_start = None
self.modified_stop = None
self.checksum = None
self.checksum_modulo = None
class Sync(Admin):
'''Event for tracking list requests.'''
params = ['source', 'modified_start', 'modified_stop']
def __init__(self, client, replica=None):
super(Sync, self).__init__(client, 'sync', replica)
self.source = None
self.modified_start = None
self.modified_stop = None
| apache-2.0 | -6,370,264,113,194,574,000 | 34.626984 | 79 | 0.596124 | false |
kans/birgo | deps/breakpad/src/third_party/protobuf/protobuf/python/google/protobuf/internal/wire_format_test.py | 571 | 10848 | #! /usr/bin/python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test for google.protobuf.internal.wire_format."""
__author__ = 'robinson@google.com (Will Robinson)'
import unittest
from google.protobuf import message
from google.protobuf.internal import wire_format
class WireFormatTest(unittest.TestCase):
def testPackTag(self):
field_number = 0xabc
tag_type = 2
self.assertEqual((field_number << 3) | tag_type,
wire_format.PackTag(field_number, tag_type))
PackTag = wire_format.PackTag
# Number too high.
self.assertRaises(message.EncodeError, PackTag, field_number, 6)
# Number too low.
self.assertRaises(message.EncodeError, PackTag, field_number, -1)
def testUnpackTag(self):
# Test field numbers that will require various varint sizes.
for expected_field_number in (1, 15, 16, 2047, 2048):
for expected_wire_type in range(6): # Highest-numbered wiretype is 5.
field_number, wire_type = wire_format.UnpackTag(
wire_format.PackTag(expected_field_number, expected_wire_type))
self.assertEqual(expected_field_number, field_number)
self.assertEqual(expected_wire_type, wire_type)
self.assertRaises(TypeError, wire_format.UnpackTag, None)
self.assertRaises(TypeError, wire_format.UnpackTag, 'abc')
self.assertRaises(TypeError, wire_format.UnpackTag, 0.0)
self.assertRaises(TypeError, wire_format.UnpackTag, object())
def testZigZagEncode(self):
Z = wire_format.ZigZagEncode
self.assertEqual(0, Z(0))
self.assertEqual(1, Z(-1))
self.assertEqual(2, Z(1))
self.assertEqual(3, Z(-2))
self.assertEqual(4, Z(2))
self.assertEqual(0xfffffffe, Z(0x7fffffff))
self.assertEqual(0xffffffff, Z(-0x80000000))
self.assertEqual(0xfffffffffffffffe, Z(0x7fffffffffffffff))
self.assertEqual(0xffffffffffffffff, Z(-0x8000000000000000))
self.assertRaises(TypeError, Z, None)
self.assertRaises(TypeError, Z, 'abcd')
self.assertRaises(TypeError, Z, 0.0)
self.assertRaises(TypeError, Z, object())
def testZigZagDecode(self):
Z = wire_format.ZigZagDecode
self.assertEqual(0, Z(0))
self.assertEqual(-1, Z(1))
self.assertEqual(1, Z(2))
self.assertEqual(-2, Z(3))
self.assertEqual(2, Z(4))
self.assertEqual(0x7fffffff, Z(0xfffffffe))
self.assertEqual(-0x80000000, Z(0xffffffff))
self.assertEqual(0x7fffffffffffffff, Z(0xfffffffffffffffe))
self.assertEqual(-0x8000000000000000, Z(0xffffffffffffffff))
self.assertRaises(TypeError, Z, None)
self.assertRaises(TypeError, Z, 'abcd')
self.assertRaises(TypeError, Z, 0.0)
self.assertRaises(TypeError, Z, object())
def NumericByteSizeTestHelper(self, byte_size_fn, value, expected_value_size):
# Use field numbers that cause various byte sizes for the tag information.
for field_number, tag_bytes in ((15, 1), (16, 2), (2047, 2), (2048, 3)):
expected_size = expected_value_size + tag_bytes
actual_size = byte_size_fn(field_number, value)
self.assertEqual(expected_size, actual_size,
'byte_size_fn: %s, field_number: %d, value: %r\n'
'Expected: %d, Actual: %d'% (
byte_size_fn, field_number, value, expected_size, actual_size))
def testByteSizeFunctions(self):
# Test all numeric *ByteSize() functions.
NUMERIC_ARGS = [
# Int32ByteSize().
[wire_format.Int32ByteSize, 0, 1],
[wire_format.Int32ByteSize, 127, 1],
[wire_format.Int32ByteSize, 128, 2],
[wire_format.Int32ByteSize, -1, 10],
# Int64ByteSize().
[wire_format.Int64ByteSize, 0, 1],
[wire_format.Int64ByteSize, 127, 1],
[wire_format.Int64ByteSize, 128, 2],
[wire_format.Int64ByteSize, -1, 10],
# UInt32ByteSize().
[wire_format.UInt32ByteSize, 0, 1],
[wire_format.UInt32ByteSize, 127, 1],
[wire_format.UInt32ByteSize, 128, 2],
[wire_format.UInt32ByteSize, wire_format.UINT32_MAX, 5],
# UInt64ByteSize().
[wire_format.UInt64ByteSize, 0, 1],
[wire_format.UInt64ByteSize, 127, 1],
[wire_format.UInt64ByteSize, 128, 2],
[wire_format.UInt64ByteSize, wire_format.UINT64_MAX, 10],
# SInt32ByteSize().
[wire_format.SInt32ByteSize, 0, 1],
[wire_format.SInt32ByteSize, -1, 1],
[wire_format.SInt32ByteSize, 1, 1],
[wire_format.SInt32ByteSize, -63, 1],
[wire_format.SInt32ByteSize, 63, 1],
[wire_format.SInt32ByteSize, -64, 1],
[wire_format.SInt32ByteSize, 64, 2],
# SInt64ByteSize().
[wire_format.SInt64ByteSize, 0, 1],
[wire_format.SInt64ByteSize, -1, 1],
[wire_format.SInt64ByteSize, 1, 1],
[wire_format.SInt64ByteSize, -63, 1],
[wire_format.SInt64ByteSize, 63, 1],
[wire_format.SInt64ByteSize, -64, 1],
[wire_format.SInt64ByteSize, 64, 2],
# Fixed32ByteSize().
[wire_format.Fixed32ByteSize, 0, 4],
[wire_format.Fixed32ByteSize, wire_format.UINT32_MAX, 4],
# Fixed64ByteSize().
[wire_format.Fixed64ByteSize, 0, 8],
[wire_format.Fixed64ByteSize, wire_format.UINT64_MAX, 8],
# SFixed32ByteSize().
[wire_format.SFixed32ByteSize, 0, 4],
[wire_format.SFixed32ByteSize, wire_format.INT32_MIN, 4],
[wire_format.SFixed32ByteSize, wire_format.INT32_MAX, 4],
# SFixed64ByteSize().
[wire_format.SFixed64ByteSize, 0, 8],
[wire_format.SFixed64ByteSize, wire_format.INT64_MIN, 8],
[wire_format.SFixed64ByteSize, wire_format.INT64_MAX, 8],
# FloatByteSize().
[wire_format.FloatByteSize, 0.0, 4],
[wire_format.FloatByteSize, 1000000000.0, 4],
[wire_format.FloatByteSize, -1000000000.0, 4],
# DoubleByteSize().
[wire_format.DoubleByteSize, 0.0, 8],
[wire_format.DoubleByteSize, 1000000000.0, 8],
[wire_format.DoubleByteSize, -1000000000.0, 8],
# BoolByteSize().
[wire_format.BoolByteSize, False, 1],
[wire_format.BoolByteSize, True, 1],
# EnumByteSize().
[wire_format.EnumByteSize, 0, 1],
[wire_format.EnumByteSize, 127, 1],
[wire_format.EnumByteSize, 128, 2],
[wire_format.EnumByteSize, wire_format.UINT32_MAX, 5],
]
for args in NUMERIC_ARGS:
self.NumericByteSizeTestHelper(*args)
# Test strings and bytes.
for byte_size_fn in (wire_format.StringByteSize, wire_format.BytesByteSize):
# 1 byte for tag, 1 byte for length, 3 bytes for contents.
self.assertEqual(5, byte_size_fn(10, 'abc'))
# 2 bytes for tag, 1 byte for length, 3 bytes for contents.
self.assertEqual(6, byte_size_fn(16, 'abc'))
# 2 bytes for tag, 2 bytes for length, 128 bytes for contents.
self.assertEqual(132, byte_size_fn(16, 'a' * 128))
# Test UTF-8 string byte size calculation.
# 1 byte for tag, 1 byte for length, 8 bytes for content.
self.assertEqual(10, wire_format.StringByteSize(
5, unicode('\xd0\xa2\xd0\xb5\xd1\x81\xd1\x82', 'utf-8')))
class MockMessage(object):
def __init__(self, byte_size):
self.byte_size = byte_size
def ByteSize(self):
return self.byte_size
message_byte_size = 10
mock_message = MockMessage(byte_size=message_byte_size)
# Test groups.
# (2 * 1) bytes for begin and end tags, plus message_byte_size.
self.assertEqual(2 + message_byte_size,
wire_format.GroupByteSize(1, mock_message))
# (2 * 2) bytes for begin and end tags, plus message_byte_size.
self.assertEqual(4 + message_byte_size,
wire_format.GroupByteSize(16, mock_message))
# Test messages.
# 1 byte for tag, plus 1 byte for length, plus contents.
self.assertEqual(2 + mock_message.byte_size,
wire_format.MessageByteSize(1, mock_message))
# 2 bytes for tag, plus 1 byte for length, plus contents.
self.assertEqual(3 + mock_message.byte_size,
wire_format.MessageByteSize(16, mock_message))
# 2 bytes for tag, plus 2 bytes for length, plus contents.
mock_message.byte_size = 128
self.assertEqual(4 + mock_message.byte_size,
wire_format.MessageByteSize(16, mock_message))
# Test message set item byte size.
# 4 bytes for tags, plus 1 byte for length, plus 1 byte for type_id,
# plus contents.
mock_message.byte_size = 10
self.assertEqual(mock_message.byte_size + 6,
wire_format.MessageSetItemByteSize(1, mock_message))
# 4 bytes for tags, plus 2 bytes for length, plus 1 byte for type_id,
# plus contents.
mock_message.byte_size = 128
self.assertEqual(mock_message.byte_size + 7,
wire_format.MessageSetItemByteSize(1, mock_message))
# 4 bytes for tags, plus 2 bytes for length, plus 2 byte for type_id,
# plus contents.
self.assertEqual(mock_message.byte_size + 8,
wire_format.MessageSetItemByteSize(128, mock_message))
# Too-long varint.
self.assertRaises(message.EncodeError,
wire_format.UInt64ByteSize, 1, 1 << 128)
if __name__ == '__main__':
unittest.main()
| apache-2.0 | 6,601,034,554,819,583,000 | 41.87747 | 80 | 0.664731 | false |
tempbottle/kbengine | kbe/res/scripts/common/Lib/test/test_urllib2net.py | 60 | 12676 | import unittest
from test import support
from test.test_urllib2 import sanepathname2url
import os
import socket
import urllib.error
import urllib.request
import sys
try:
import ssl
except ImportError:
ssl = None
support.requires("network")
TIMEOUT = 60 # seconds
def _retry_thrice(func, exc, *args, **kwargs):
for i in range(3):
try:
return func(*args, **kwargs)
except exc as e:
last_exc = e
continue
except:
raise
raise last_exc
def _wrap_with_retry_thrice(func, exc):
def wrapped(*args, **kwargs):
return _retry_thrice(func, exc, *args, **kwargs)
return wrapped
# Connecting to remote hosts is flaky. Make it more robust by retrying
# the connection several times.
_urlopen_with_retry = _wrap_with_retry_thrice(urllib.request.urlopen,
urllib.error.URLError)
class AuthTests(unittest.TestCase):
"""Tests urllib2 authentication features."""
## Disabled at the moment since there is no page under python.org which
## could be used to HTTP authentication.
#
# def test_basic_auth(self):
# import http.client
#
# test_url = "http://www.python.org/test/test_urllib2/basic_auth"
# test_hostport = "www.python.org"
# test_realm = 'Test Realm'
# test_user = 'test.test_urllib2net'
# test_password = 'blah'
#
# # failure
# try:
# _urlopen_with_retry(test_url)
# except urllib2.HTTPError, exc:
# self.assertEqual(exc.code, 401)
# else:
# self.fail("urlopen() should have failed with 401")
#
# # success
# auth_handler = urllib2.HTTPBasicAuthHandler()
# auth_handler.add_password(test_realm, test_hostport,
# test_user, test_password)
# opener = urllib2.build_opener(auth_handler)
# f = opener.open('http://localhost/')
# response = _urlopen_with_retry("http://www.python.org/")
#
# # The 'userinfo' URL component is deprecated by RFC 3986 for security
# # reasons, let's not implement it! (it's already implemented for proxy
# # specification strings (that is, URLs or authorities specifying a
# # proxy), so we must keep that)
# self.assertRaises(http.client.InvalidURL,
# urllib2.urlopen, "http://evil:thing@example.com")
class CloseSocketTest(unittest.TestCase):
def test_close(self):
# calling .close() on urllib2's response objects should close the
# underlying socket
url = "http://www.example.com/"
with support.transient_internet(url):
response = _urlopen_with_retry(url)
sock = response.fp
self.assertFalse(sock.closed)
response.close()
self.assertTrue(sock.closed)
class OtherNetworkTests(unittest.TestCase):
def setUp(self):
if 0: # for debugging
import logging
logger = logging.getLogger("test_urllib2net")
logger.addHandler(logging.StreamHandler())
# XXX The rest of these tests aren't very good -- they don't check much.
# They do sometimes catch some major disasters, though.
def test_ftp(self):
urls = [
'ftp://ftp.debian.org/debian/README',
('ftp://ftp.debian.org/debian/non-existent-file',
None, urllib.error.URLError),
'ftp://gatekeeper.research.compaq.com/pub/DEC/SRC'
'/research-reports/00README-Legal-Rules-Regs',
]
self._test_urls(urls, self._extra_handlers())
def test_file(self):
TESTFN = support.TESTFN
f = open(TESTFN, 'w')
try:
f.write('hi there\n')
f.close()
urls = [
'file:' + sanepathname2url(os.path.abspath(TESTFN)),
('file:///nonsensename/etc/passwd', None,
urllib.error.URLError),
]
self._test_urls(urls, self._extra_handlers(), retry=True)
finally:
os.remove(TESTFN)
self.assertRaises(ValueError, urllib.request.urlopen,'./relative_path/to/file')
# XXX Following test depends on machine configurations that are internal
# to CNRI. Need to set up a public server with the right authentication
# configuration for test purposes.
## def test_cnri(self):
## if socket.gethostname() == 'bitdiddle':
## localhost = 'bitdiddle.cnri.reston.va.us'
## elif socket.gethostname() == 'bitdiddle.concentric.net':
## localhost = 'localhost'
## else:
## localhost = None
## if localhost is not None:
## urls = [
## 'file://%s/etc/passwd' % localhost,
## 'http://%s/simple/' % localhost,
## 'http://%s/digest/' % localhost,
## 'http://%s/not/found.h' % localhost,
## ]
## bauth = HTTPBasicAuthHandler()
## bauth.add_password('basic_test_realm', localhost, 'jhylton',
## 'password')
## dauth = HTTPDigestAuthHandler()
## dauth.add_password('digest_test_realm', localhost, 'jhylton',
## 'password')
## self._test_urls(urls, self._extra_handlers()+[bauth, dauth])
def test_urlwithfrag(self):
urlwith_frag = "https://docs.python.org/2/glossary.html#glossary"
with support.transient_internet(urlwith_frag):
req = urllib.request.Request(urlwith_frag)
res = urllib.request.urlopen(req)
self.assertEqual(res.geturl(),
"https://docs.python.org/2/glossary.html#glossary")
def test_redirect_url_withfrag(self):
redirect_url_with_frag = "http://bit.ly/1iSHToT"
with support.transient_internet(redirect_url_with_frag):
req = urllib.request.Request(redirect_url_with_frag)
res = urllib.request.urlopen(req)
self.assertEqual(res.geturl(),
"https://docs.python.org/3.4/glossary.html#term-global-interpreter-lock")
def test_custom_headers(self):
url = "http://www.example.com"
with support.transient_internet(url):
opener = urllib.request.build_opener()
request = urllib.request.Request(url)
self.assertFalse(request.header_items())
opener.open(request)
self.assertTrue(request.header_items())
self.assertTrue(request.has_header('User-agent'))
request.add_header('User-Agent','Test-Agent')
opener.open(request)
self.assertEqual(request.get_header('User-agent'),'Test-Agent')
def test_sites_no_connection_close(self):
# Some sites do not send Connection: close header.
# Verify that those work properly. (#issue12576)
URL = 'http://www.imdb.com' # mangles Connection:close
with support.transient_internet(URL):
try:
with urllib.request.urlopen(URL) as res:
pass
except ValueError as e:
self.fail("urlopen failed for site not sending \
Connection:close")
else:
self.assertTrue(res)
req = urllib.request.urlopen(URL)
res = req.read()
self.assertTrue(res)
def _test_urls(self, urls, handlers, retry=True):
import time
import logging
debug = logging.getLogger("test_urllib2").debug
urlopen = urllib.request.build_opener(*handlers).open
if retry:
urlopen = _wrap_with_retry_thrice(urlopen, urllib.error.URLError)
for url in urls:
with self.subTest(url=url):
if isinstance(url, tuple):
url, req, expected_err = url
else:
req = expected_err = None
with support.transient_internet(url):
try:
f = urlopen(url, req, TIMEOUT)
except OSError as err:
if expected_err:
msg = ("Didn't get expected error(s) %s for %s %s, got %s: %s" %
(expected_err, url, req, type(err), err))
self.assertIsInstance(err, expected_err, msg)
else:
raise
except urllib.error.URLError as err:
if isinstance(err[0], socket.timeout):
print("<timeout: %s>" % url, file=sys.stderr)
continue
else:
raise
else:
try:
with support.time_out, \
support.socket_peer_reset, \
support.ioerror_peer_reset:
buf = f.read()
debug("read %d bytes" % len(buf))
except socket.timeout:
print("<timeout: %s>" % url, file=sys.stderr)
f.close()
time.sleep(0.1)
def _extra_handlers(self):
handlers = []
cfh = urllib.request.CacheFTPHandler()
self.addCleanup(cfh.clear_cache)
cfh.setTimeout(1)
handlers.append(cfh)
return handlers
class TimeoutTest(unittest.TestCase):
def test_http_basic(self):
self.assertIsNone(socket.getdefaulttimeout())
url = "http://www.example.com"
with support.transient_internet(url, timeout=None):
u = _urlopen_with_retry(url)
self.addCleanup(u.close)
self.assertIsNone(u.fp.raw._sock.gettimeout())
def test_http_default_timeout(self):
self.assertIsNone(socket.getdefaulttimeout())
url = "http://www.example.com"
with support.transient_internet(url):
socket.setdefaulttimeout(60)
try:
u = _urlopen_with_retry(url)
self.addCleanup(u.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(u.fp.raw._sock.gettimeout(), 60)
def test_http_no_timeout(self):
self.assertIsNone(socket.getdefaulttimeout())
url = "http://www.example.com"
with support.transient_internet(url):
socket.setdefaulttimeout(60)
try:
u = _urlopen_with_retry(url, timeout=None)
self.addCleanup(u.close)
finally:
socket.setdefaulttimeout(None)
self.assertIsNone(u.fp.raw._sock.gettimeout())
def test_http_timeout(self):
url = "http://www.example.com"
with support.transient_internet(url):
u = _urlopen_with_retry(url, timeout=120)
self.addCleanup(u.close)
self.assertEqual(u.fp.raw._sock.gettimeout(), 120)
FTP_HOST = "ftp://ftp.mirror.nl/pub/gnu/"
def test_ftp_basic(self):
self.assertIsNone(socket.getdefaulttimeout())
with support.transient_internet(self.FTP_HOST, timeout=None):
u = _urlopen_with_retry(self.FTP_HOST)
self.addCleanup(u.close)
self.assertIsNone(u.fp.fp.raw._sock.gettimeout())
def test_ftp_default_timeout(self):
self.assertIsNone(socket.getdefaulttimeout())
with support.transient_internet(self.FTP_HOST):
socket.setdefaulttimeout(60)
try:
u = _urlopen_with_retry(self.FTP_HOST)
self.addCleanup(u.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(u.fp.fp.raw._sock.gettimeout(), 60)
def test_ftp_no_timeout(self):
self.assertIsNone(socket.getdefaulttimeout())
with support.transient_internet(self.FTP_HOST):
socket.setdefaulttimeout(60)
try:
u = _urlopen_with_retry(self.FTP_HOST, timeout=None)
self.addCleanup(u.close)
finally:
socket.setdefaulttimeout(None)
self.assertIsNone(u.fp.fp.raw._sock.gettimeout())
def test_ftp_timeout(self):
with support.transient_internet(self.FTP_HOST):
u = _urlopen_with_retry(self.FTP_HOST, timeout=60)
self.addCleanup(u.close)
self.assertEqual(u.fp.fp.raw._sock.gettimeout(), 60)
if __name__ == "__main__":
unittest.main()
| lgpl-3.0 | 7,038,316,231,340,377,000 | 36.064327 | 93 | 0.556011 | false |
ssaavedra/liquidhaskell | tests/regrtest.py | 8 | 6362 | #!/usr/bin/python
# Copyright (c) 2009 The Regents of the University of California. All rights reserved.
#
# Permission is hereby granted, without written agreement and without
# license or royalty fees, to use, copy, modify, and distribute this
# software and its documentation for any purpose, provided that the
# above copyright notice and the following two paragraphs appear in
# all copies of this software.
#
# IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY
# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
# ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
# IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
#
# THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
# ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATION
# TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
import time, subprocess, optparse, sys, socket, os
sys.path.append("../")
import rtest as rtest
solve = "liquid ".split()
null = open("/dev/null", "w")
now = (time.asctime(time.localtime(time.time()))).replace(" ","_")
logfile = "../tests/logs/regrtest_results_%s_%s" % (socket.gethostname (), now)
argcomment = "--! run with "
liquidcomment = "{--! run liquid with "
endcomment = "-}"
def logged_sys_call(args, out=None, err=None, dir=None):
print "exec: " + " ".join(args)
return subprocess.call(args, stdout=out, stderr=err, cwd=dir)
def solve_quals(dir,file,bare,time,quiet,flags,lflags):
if quiet: out = null
else: out = None
if time: time = ["time"]
else: time = []
if lflags: lflags = ["--" + f for f in lflags]
hygiene_flags = []
(dn, bn) = os.path.split(file)
try:
os.makedirs(os.path.join(dir,dn,".liquid"))
except OSError:
pass
out = open(os.path.join(dir,dn,".liquid",bn) + ".log", "w")
rv = logged_sys_call(time + solve + flags + lflags + hygiene_flags + [file],
out=None, err=subprocess.STDOUT, dir=dir)
out.close()
return rv
def run_script(file,quiet):
if quiet: out = null
else: out = None
return logged_sys_call(file, out)
def getfileargs(file):
f = open(file)
l = f.readline()
f.close()
if l.startswith(argcomment):
return l[len(argcomment):].strip().split(" ")
else:
return []
def getliquidargs(file):
f = open(file)
l = f.readline()
f.close()
if l.startswith(liquidcomment):
return [arg for arg in l[len(liquidcomment):].strip().split(" ")
if arg!=endcomment]
else:
return []
class Config (rtest.TestConfig):
def __init__ (self, dargs, testdirs, logfile, threadcount):
rtest.TestConfig.__init__ (self, testdirs, logfile, threadcount)
self.dargs = dargs
def run_test (self, dir, file):
path = os.path.join(dir,file)
if self.is_test(file):
lflags = getliquidargs(path)
fargs = getfileargs(path)
fargs = self.dargs + fargs
return solve_quals(dir, file, True, False, True, fargs, lflags)
elif file.endswith(".sh"):
return run_script(path, True)
def is_test (self, file):
return file.endswith(".hs") # or file.endswith(".lhs")
#####################################################################################
#DEFAULT
textIgnored = { "Data/Text/Axioms.hs"
, "Data/Text/Encoding/Error.hs"
, "Data/Text/Encoding/Fusion.hs"
, "Data/Text/Encoding/Fusion/Common.hs"
, "Data/Text/Encoding/Utf16.hs"
, "Data/Text/Encoding/Utf32.hs"
, "Data/Text/Encoding/Utf8.hs"
, "Data/Text/Fusion/CaseMapping.hs"
, "Data/Text/Fusion/Common.hs"
, "Data/Text/Fusion/Internal.hs"
, "Data/Text/IO.hs"
, "Data/Text/IO/Internal.hs"
, "Data/Text/Lazy/Builder/Functions.hs"
, "Data/Text/Lazy/Builder/Int.hs"
, "Data/Text/Lazy/Builder/Int/Digits.hs"
, "Data/Text/Lazy/Builder/Internal.hs"
, "Data/Text/Lazy/Builder/RealFloat.hs"
, "Data/Text/Lazy/Builder/RealFloat/Functions.hs"
, "Data/Text/Lazy/Encoding/Fusion.hs"
, "Data/Text/Lazy/IO.hs"
, "Data/Text/Lazy/Read.hs"
, "Data/Text/Read.hs"
, "Data/Text/Unsafe/Base.hs"
, "Data/Text/UnsafeShift.hs"
, "Data/Text/Util.hs"
}
demosIgnored = { "Composition.hs"
, "Eval.hs"
, "Inductive.hs"
, "Loop.hs"
, "TalkingAboutSets.hs"
, "refinements101reax.hs"
}
regtestdirs = [ ("pos", {}, 0)
, ("neg", {}, 1)
, ("crash", {}, 2)
, ("parser/pos", {}, 0)
, ("error_messages/pos", {}, 0)
, ("error_messages/crash", {}, 2)
]
benchtestdirs = [ ("../web/demos", demosIgnored, 0)
, ("../benchmarks/esop2013-submission", {"Base0.hs"}, 0)
, ("../benchmarks/bytestring-0.9.2.1", {}, 0)
, ("../benchmarks/text-0.11.2.3", textIgnored, 0)
, ("../benchmarks/vector-algorithms-0.5.4.2", {}, 0)
, ("../benchmarks/hscolour-1.20.0.0", {}, 0)
]
parser = optparse.OptionParser()
parser.add_option("-a", "--all", action="store_true", dest="alltests", help="run all tests")
parser.add_option("-t", "--threads", dest="threadcount", default=1, type=int, help="spawn n threads")
parser.add_option("-o", "--opts", dest="opts", default=[], action='append', type=str, help="additional arguments to liquid")
parser.disable_interspersed_args()
options, args = parser.parse_args()
print "options =", options
print "args =", args
def testdirs():
global testdirs
if options.alltests:
return regtestdirs + benchtestdirs
else:
return regtestdirs
testdirs = testdirs()
clean = os.path.abspath("../cleanup")
[os.system(("cd %s; %s; cd ../" % (d,clean))) for (d,_,_) in testdirs]
runner = rtest.TestRunner (Config (options.opts, testdirs, logfile, options.threadcount))
sys.exit(runner.run())
| bsd-3-clause | -6,117,226,347,591,025,000 | 35.147727 | 124 | 0.593838 | false |
revmischa/boto | boto/s3/__init__.py | 114 | 2836 | # Copyright (c) 2006-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
# Copyright (c) 2014, Steven Richards <sbrichards@mit.edu>
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.regioninfo import RegionInfo, get_regions
class S3RegionInfo(RegionInfo):
def connect(self, **kw_params):
"""
Connect to this Region's endpoint. Returns an connection
object pointing to the endpoint associated with this region.
You may pass any of the arguments accepted by the connection
class's constructor as keyword arguments and they will be
passed along to the connection object.
:rtype: Connection object
:return: The connection to this regions endpoint
"""
if self.connection_cls:
return self.connection_cls(host=self.endpoint, **kw_params)
def regions():
"""
Get all available regions for the Amazon S3 service.
:rtype: list
:return: A list of :class:`boto.regioninfo.RegionInfo`
"""
from boto.s3.connection import S3Connection
return get_regions(
's3',
region_cls=S3RegionInfo,
connection_cls=S3Connection
)
def connect_to_region(region_name, **kw_params):
for region in regions():
if 'host' in kw_params.keys():
# Make sure the host specified is not nothing
if kw_params['host'] not in ['', None]:
region.endpoint = kw_params['host']
del kw_params['host']
return region.connect(**kw_params)
# If it is nothing then remove it from kw_params and proceed with default
else:
del kw_params['host']
if region.name == region_name:
return region.connect(**kw_params)
return None
| mit | -6,787,125,218,032,336,000 | 37.324324 | 85 | 0.683004 | false |
girving/tensorflow | tensorflow/python/framework/graph_io.py | 6 | 2539 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for reading/writing graphs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import os.path
from google.protobuf import text_format
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.util.tf_export import tf_export
@tf_export('io.write_graph', 'train.write_graph')
def write_graph(graph_or_graph_def, logdir, name, as_text=True):
"""Writes a graph proto to a file.
The graph is written as a text proto unless `as_text` is `False`.
```python
v = tf.Variable(0, name='my_variable')
sess = tf.Session()
tf.train.write_graph(sess.graph_def, '/tmp/my-model', 'train.pbtxt')
```
or
```python
v = tf.Variable(0, name='my_variable')
sess = tf.Session()
tf.train.write_graph(sess.graph, '/tmp/my-model', 'train.pbtxt')
```
Args:
graph_or_graph_def: A `Graph` or a `GraphDef` protocol buffer.
logdir: Directory where to write the graph. This can refer to remote
filesystems, such as Google Cloud Storage (GCS).
name: Filename for the graph.
as_text: If `True`, writes the graph as an ASCII proto.
Returns:
The path of the output proto file.
"""
if isinstance(graph_or_graph_def, ops.Graph):
graph_def = graph_or_graph_def.as_graph_def()
else:
graph_def = graph_or_graph_def
# gcs does not have the concept of directory at the moment.
if not file_io.file_exists(logdir) and not logdir.startswith('gs:'):
file_io.recursive_create_dir(logdir)
path = os.path.join(logdir, name)
if as_text:
file_io.atomic_write_string_to_file(path,
text_format.MessageToString(graph_def))
else:
file_io.atomic_write_string_to_file(path, graph_def.SerializeToString())
return path
| apache-2.0 | 7,503,392,494,628,514,000 | 33.310811 | 80 | 0.684128 | false |
zak-k/cis | cis/test/plot_tests/idiff.py | 3 | 2350 | #!/usr/bin/env python
# (C) British Crown Copyright 2010 - 2014, Met Office
#
# This file was heavily influenced by a similar file in the iris package.
"""
Provides "diff-like" comparison of images.
Currently relies on matplotlib for image processing so limited to PNG format.
"""
from __future__ import (absolute_import, division, print_function)
import os.path
import shutil
import matplotlib.pyplot as plt
import matplotlib.image as mimg
import matplotlib.widgets as mwidget
def diff_viewer(expected_fname, result_fname, diff_fname):
plt.figure(figsize=(16, 16))
plt.suptitle(os.path.basename(expected_fname))
ax = plt.subplot(221)
ax.imshow(mimg.imread(expected_fname))
ax = plt.subplot(222, sharex=ax, sharey=ax)
ax.imshow(mimg.imread(result_fname))
ax = plt.subplot(223, sharex=ax, sharey=ax)
ax.imshow(mimg.imread(diff_fname))
def accept(event):
# removes the expected result, and move the most recent result in
print('ACCEPTED NEW FILE: %s' % (os.path.basename(expected_fname), ))
os.remove(expected_fname)
shutil.copy2(result_fname, expected_fname)
os.remove(diff_fname)
plt.close()
def reject(event):
print('REJECTED: %s' % (os.path.basename(expected_fname), ))
plt.close()
ax_accept = plt.axes([0.6, 0.35, 0.1, 0.075])
ax_reject = plt.axes([0.71, 0.35, 0.1, 0.075])
bnext = mwidget.Button(ax_accept, 'Accept change')
bnext.on_clicked(accept)
bprev = mwidget.Button(ax_reject, 'Reject')
bprev.on_clicked(reject)
plt.show()
def step_over_diffs():
import cis.test.plot_tests
image_dir = os.path.join(os.path.dirname(cis.test.plot_tests.__file__),
'reference', 'visual_tests')
diff_dir = os.path.join(os.path.dirname(cis.test.plot_tests.__file__),
'result_image_comparison')
for expected_fname in sorted(os.listdir(image_dir)):
result_path = os.path.join(diff_dir, expected_fname)
diff_path = result_path[:-4] + '-failed-diff.png'
# if the test failed, there will be a diff file
if os.path.exists(diff_path):
expected_path = os.path.join(image_dir, expected_fname)
diff_viewer(expected_path, result_path, diff_path)
if __name__ == '__main__':
step_over_diffs()
| gpl-3.0 | 691,823,414,193,231,400 | 31.638889 | 77 | 0.648511 | false |
macndesign/lettuce_webdriver | lettuce_webdriver/screenshot.py | 4 | 2043 | """Steps and utility functions for taking screenshots."""
import uuid
from lettuce import (
after,
step,
world,
)
import os.path
import json
def set_save_directory(base, source):
"""Sets the root save directory for saving screenshots.
Screenshots will be saved in subdirectories under this directory by
browser window size. """
root = os.path.join(base, source)
if not os.path.isdir(root):
os.makedirs(root)
world.screenshot_root = root
def resolution_path(world):
window_size = world.browser.get_window_size()
return os.path.join(
world.screenshot_root,
'{}x{}'.format(window_size['width'], window_size['height']),
)
@step(r'I capture a screenshot$')
def capture_screenshot(step):
feature = step.scenario.feature
step.shot_name = '{}.png'.format(uuid.uuid4())
if getattr(feature, 'dir_path', None) is None:
feature.dir_path = resolution_path(world)
if not os.path.isdir(feature.dir_path):
os.makedirs(feature.dir_path)
filename = os.path.join(
feature.dir_path,
step.shot_name,
)
world.browser.get_screenshot_as_file(filename)
@step(r'I capture a screenshot after (\d+) seconds?$')
def capture_screenshot_delay(step, delay):
time.sleep(delay)
capture_screenshot()
@after.each_feature
def record_run_feature_report(feature):
if getattr(feature, 'dir_path', None) is None:
return
feature_name_json = '{}.json'.format(os.path.splitext(
os.path.basename(feature.described_at.file)
)[0])
report = {}
for scenario in feature.scenarios:
scenario_report = []
for step in scenario.steps:
shot_name = getattr(step, 'shot_name', None)
if shot_name is not None:
scenario_report.append(shot_name)
if scenario_report:
report[scenario.name] = scenario_report
if report:
with open(os.path.join(feature.dir_path, feature_name_json), 'w') as f:
json.dump(report, f)
| mit | -1,381,010,187,349,928,400 | 26.986301 | 79 | 0.640724 | false |
rohitwaghchaure/erpnext-receipher | erpnext/patches/v5_0/update_item_and_description_again.py | 102 | 1661 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
import frappe
from frappe.utils import cstr
import re
def execute():
item_details = frappe._dict()
for d in frappe.db.sql("select name, description from `tabItem`", as_dict=1):
description = cstr(d.description).strip()
new_desc = extract_description(description)
item_details.setdefault(d.name, frappe._dict({
"old_description": description,
"new_description": new_desc
}))
dt_list= ["Purchase Order Item","Supplier Quotation Item", "BOM", "BOM Explosion Item" , \
"BOM Item", "Opportunity Item" , "Quotation Item" , "Sales Order Item" , "Delivery Note Item" , \
"Material Request Item" , "Purchase Receipt Item" , "Stock Entry Detail"]
for dt in dt_list:
frappe.reload_doctype(dt)
records = frappe.db.sql("""select name, `{0}` as item_code, description from `tab{1}`
where description is not null and description like '%%<table%%'"""
.format("item" if dt=="BOM" else "item_code", dt), as_dict=1)
count = 1
for d in records:
if d.item_code and item_details.get(d.item_code) \
and cstr(d.description) == item_details.get(d.item_code).old_description:
desc = item_details.get(d.item_code).new_description
else:
desc = extract_description(cstr(d.description))
frappe.db.sql("""update `tab{0}` set description = %s
where name = %s """.format(dt), (desc, d.name))
count += 1
if count % 500 == 0:
frappe.db.commit()
def extract_description(desc):
for tag in ("img", "table", "tr", "td"):
desc = re.sub("\</*{0}[^>]*\>".format(tag), "", desc)
return desc
| agpl-3.0 | 8,965,949,781,607,168,000 | 32.897959 | 98 | 0.666466 | false |
homeworkprod/byceps | byceps/services/ticketing/models/ticket_event.py | 1 | 1337 | """
byceps.services.ticketing.models.ticket_event
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2019 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from datetime import datetime
from typing import Any, Dict
from ....database import db, generate_uuid
from ....util.instances import ReprBuilder
from ..transfer.models import TicketID
TicketEventData = Dict[str, Any]
class TicketEvent(db.Model):
"""An event that refers to a ticket."""
__tablename__ = 'ticket_events'
id = db.Column(db.Uuid, default=generate_uuid, primary_key=True)
occurred_at = db.Column(db.DateTime, nullable=False)
event_type = db.Column(db.UnicodeText, index=True, nullable=False)
ticket_id = db.Column(db.Uuid, db.ForeignKey('tickets.id'), index=True, nullable=False)
data = db.Column(db.JSONB)
def __init__(self, occurred_at: datetime, event_type: str,
ticket_id: TicketID, data: TicketEventData) -> None:
self.occurred_at = occurred_at
self.event_type = event_type
self.ticket_id = ticket_id
self.data = data
def __repr__(self) -> str:
return ReprBuilder(self) \
.add_custom(repr(self.event_type)) \
.add_with_lookup('ticket_id') \
.add_with_lookup('data') \
.build()
| bsd-3-clause | -2,172,358,764,212,808,200 | 30.093023 | 91 | 0.631264 | false |
sinkuri256/python-for-android | python3-alpha/python3-src/Lib/distutils/tests/test_install_lib.py | 47 | 3460 | """Tests for distutils.command.install_data."""
import sys
import os
import unittest
from distutils.command.install_lib import install_lib
from distutils.extension import Extension
from distutils.tests import support
from distutils.errors import DistutilsOptionError
from test.support import run_unittest
class InstallLibTestCase(support.TempdirManager,
support.LoggingSilencer,
support.EnvironGuard,
unittest.TestCase):
def test_finalize_options(self):
pkg_dir, dist = self.create_dist()
cmd = install_lib(dist)
cmd.finalize_options()
self.assertEqual(cmd.compile, 1)
self.assertEqual(cmd.optimize, 0)
# optimize must be 0, 1, or 2
cmd.optimize = 'foo'
self.assertRaises(DistutilsOptionError, cmd.finalize_options)
cmd.optimize = '4'
self.assertRaises(DistutilsOptionError, cmd.finalize_options)
cmd.optimize = '2'
cmd.finalize_options()
self.assertEqual(cmd.optimize, 2)
@unittest.skipUnless(not sys.dont_write_bytecode,
'byte-compile not supported')
def test_byte_compile(self):
pkg_dir, dist = self.create_dist()
cmd = install_lib(dist)
cmd.compile = cmd.optimize = 1
f = os.path.join(pkg_dir, 'foo.py')
self.write_file(f, '# python file')
cmd.byte_compile([f])
self.assertTrue(os.path.exists(os.path.join(pkg_dir, 'foo.pyc')))
self.assertTrue(os.path.exists(os.path.join(pkg_dir, 'foo.pyo')))
def test_get_outputs(self):
pkg_dir, dist = self.create_dist()
cmd = install_lib(dist)
# setting up a dist environment
cmd.compile = cmd.optimize = 1
cmd.install_dir = pkg_dir
f = os.path.join(pkg_dir, 'foo.py')
self.write_file(f, '# python file')
cmd.distribution.py_modules = [pkg_dir]
cmd.distribution.ext_modules = [Extension('foo', ['xxx'])]
cmd.distribution.packages = [pkg_dir]
cmd.distribution.script_name = 'setup.py'
# get_output should return 4 elements
self.assertTrue(len(cmd.get_outputs()) >= 2)
def test_get_inputs(self):
pkg_dir, dist = self.create_dist()
cmd = install_lib(dist)
# setting up a dist environment
cmd.compile = cmd.optimize = 1
cmd.install_dir = pkg_dir
f = os.path.join(pkg_dir, 'foo.py')
self.write_file(f, '# python file')
cmd.distribution.py_modules = [pkg_dir]
cmd.distribution.ext_modules = [Extension('foo', ['xxx'])]
cmd.distribution.packages = [pkg_dir]
cmd.distribution.script_name = 'setup.py'
# get_input should return 2 elements
self.assertEqual(len(cmd.get_inputs()), 2)
def test_dont_write_bytecode(self):
# makes sure byte_compile is not used
pkg_dir, dist = self.create_dist()
cmd = install_lib(dist)
cmd.compile = 1
cmd.optimize = 1
old_dont_write_bytecode = sys.dont_write_bytecode
sys.dont_write_bytecode = True
try:
cmd.byte_compile([])
finally:
sys.dont_write_bytecode = old_dont_write_bytecode
self.assertTrue('byte-compiling is disabled' in self.logs[0][1])
def test_suite():
return unittest.makeSuite(InstallLibTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| apache-2.0 | 5,612,818,793,482,029,000 | 32.921569 | 73 | 0.613584 | false |
jalavik/invenio | invenio/legacy/bibingest/ingestion_package_interface.py | 13 | 17273 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""The ingestion package interface."""
__revision__ = "$Id$"
from datetime import datetime
try:
from hashlib import md5
except:
import md5
from .config import CFG_BIBINGEST_VERSIONING, \
CFG_BIBINGEST_ONE_STORAGE_ENGINE_INSTANCE_PER_STORAGE_ENGINE
# ********************
# Validation functions
# ********************
class IngestionFieldValidationError(Exception):
"""
Custom Exception class when field validation errors occur.
"""
pass
def positive_int(fieldname, value):
try:
value = int(value)
if value > 0:
return value
else:
msg = "For field name %s, received a negative integer, expected a positive integer" % (fieldname,)
raise IngestionFieldValidationError(msg)
except:
msg = "For field name %s, received a non integer, expected a positive integer" % (fieldname,)
raise IngestionFieldValidationError(msg)
def valid_string(fieldname, value):
if not value or not (isinstance(value, str) or isinstance(value, unicode)):
msg = "For field name %s, received an invalid or zero length string, expected a non zero length string" % (fieldname,)
raise IngestionFieldValidationError(msg)
else:
return value
_STANDARD_TIME_FORMAT = "%Y-%m-%d %H:%M:%S"
def valid_date(fieldname, value):
if isinstance(value, datetime):
return str(value.strftime(_STANDARD_TIME_FORMAT))
else:
try:
if isinstance(datetime.strptime(value , _STANDARD_TIME_FORMAT), datetime):
return value
except:
msg = "For field name %s, received an unrecognizable datetime format '%s', expected a string like '2002-04-18 14:57:11' or an instance of datetime.datetime" % (fieldname, str(value))
raise IngestionFieldValidationError(msg)
def valid_bit(dummy, value):
if value:
return 1
return 0
_ACCEPTED_FIELD_NAMES = {
# Don't use underscores ('_') for the field names.
# example
# 'fieldname': (default_value, validation_function),
# the ingestion package submission ID
'subid' : (lambda:'0', valid_string),
# the ingestion package record ID
'recid' : (lambda:0, positive_int),
# the date on which the ingestion package was submitted
'date' : (lambda:datetime.now().strftime(_STANDARD_TIME_FORMAT), valid_date),
# the ingestion package itself
'content' : (lambda:None, valid_string),
# the source of the ingestion package
'source' : (lambda:None, valid_string),
}
if CFG_BIBINGEST_VERSIONING:
version = {
# the version of the ingestion package
'version' : (lambda:1, valid_bit),
}
_ACCEPTED_FIELD_NAMES.update(version)
_ACCEPTED_FIELD_OPERATORS = (
#'value', # When no operator is used, the "value" keyword is reserved
# values greater than this
'from',
# values greater or equal than this
'and_from',
# values lower than this
'to',
# values lower or equal than this
'and_to',
# this value should be treated as a regular expression
'regex',
# range of values
'in',
# sort results ascending
'sort_by_asc',
# sort results descending
'sort_by_desc',
# group results
'group_by',
# limit the number results
'limit',
# skip this number of results from the beginning
'skip',
)
class IngestionPackage(object):
"""The Ingestion Package default class"""
def __init__(self, storage_engine_instance, storage_engine_settings = None):
"""
The constructor.
"""
self._accepted_field_names = _ACCEPTED_FIELD_NAMES
self._accepted_field_operators = _ACCEPTED_FIELD_OPERATORS
self._storage_engine = storage_engine_instance
self._storage_engine_settings = storage_engine_settings
if self._storage_engine_settings is not None:
self.reconfigure_storage_engine()
def reconfigure_storage_engine(self):
"""
Reconfigures the storage engine according to the given settings.
"""
self._storage_engine.reconfigure(self._storage_engine_settings)
# Helper functions
def _parse_kwargs(self, kwargs):
"""
Parses the given kwargs based on the list of accepted field names and
operators and returns a dictionary.
"""
parsed_kwargs = {}
if kwargs is None:
return parsed_kwargs
for kwarg_key, kwarg_value in kwargs.iteritems():
# Split the fieldname from the operator
kwarg_key_field_name_and_operator = kwarg_key.split('_', 1)
if len(kwarg_key_field_name_and_operator) == 1:
# Received a fieldname without any operators
kwarg_key_field_name, kwarg_key_field_operator = (kwarg_key_field_name_and_operator[0], '')
else:
# Received a fieldname with an operator
kwarg_key_field_name, kwarg_key_field_operator = kwarg_key_field_name_and_operator
if kwarg_key_field_name:
# Check if this field name is in the list of accpeted field names
if kwarg_key_field_name in self._accepted_field_names.keys():
# Check if this field name is already in the list of parsed keyword arguments
if parsed_kwargs.has_key(kwarg_key_field_name):
# Check if there is an operator set for this field name
if kwarg_key_field_operator:
# Check if the operator is in the list of accepted field operators
if kwarg_key_field_operator in self._accepted_field_operators:
# Add this field operator and its value to the parsed keyword arguments
parsed_kwargs[kwarg_key_field_name][kwarg_key_field_operator] = kwarg_value
else:
# No field operator was set, therefore add the value for this field
# to the parser keyword arguments
kwarg_value = self._accepted_field_names[kwarg_key_field_name][1](kwarg_key_field_name, kwarg_value)
parsed_kwargs[kwarg_key_field_name]['value'] = kwarg_value
else:
# This is a new field name. Check if an operator was set for this field name
if kwarg_key_field_operator:
# Check if the operator is in the list of accepted field operators
if kwarg_key_field_operator in self._accepted_field_operators:
# Add this field operator and its value to the parsed keyword arguments
parsed_kwargs[kwarg_key_field_name] = { kwarg_key_field_operator : kwarg_value }
else:
# No field operator was set, therefore add the value for this field
# to the parser keyword arguments
kwarg_value = self._accepted_field_names[kwarg_key_field_name][1](kwarg_key_field_name, kwarg_value)
parsed_kwargs[kwarg_key_field_name] = { 'value' : kwarg_value }
else:
# The kwarg_key_field_name is empty, it means we have
# an operator like filter, skip, etc
if kwarg_key_field_operator in self._accepted_field_operators:
if parsed_kwargs.has_key('_operators'):
parsed_kwargs['_operators'][kwarg_key_field_operator] = kwarg_value
else:
parsed_kwargs['_operators'] = { kwarg_key_field_operator : kwarg_value }
if CFG_BIBINGEST_VERSIONING:
# Set the latest version, unless it has been explicitly set
version_field_name = 'version'
version_default_value = self._accepted_field_names[version_field_name][0]()
parsed_kwargs.setdefault(version_field_name, { 'value' : version_default_value })
return parsed_kwargs
def _complete_parsed_kwargs(self, parsed_kwargs):
"""
Completes the dictionary of parsed_kwargs with the necessary default values.
"""
for items in self._accepted_field_names.iteritems():
fieldname = items[0]
default_value = items[1][0]()
if fieldname not in parsed_kwargs.keys() and default_value is not None:
parsed_kwargs[fieldname] = { 'value' : default_value }
return parsed_kwargs
# Implement all the CRUD functions: create, read, update and delete
# Read one
def get_one(self, unique_id):
"""
Retrieves the ingestion package from the database given its unique ID.
"""
# TODO: what if we have concurrent requests and the storage engine
# gets reconfigured before actually executing the query?
if CFG_BIBINGEST_ONE_STORAGE_ENGINE_INSTANCE_PER_STORAGE_ENGINE:
self.reconfigure_storage_engine()
return self._storage_engine.get_one(unique_id)
# Read many
def get_many(self, **kwargs):
"""
Retrieves all the ingestion packages from the database that match the given
arguments. Arguments must comply to a specified list of argument names.
"""
parsed_kwargs = self._parse_kwargs(kwargs)
if CFG_BIBINGEST_ONE_STORAGE_ENGINE_INSTANCE_PER_STORAGE_ENGINE:
self.reconfigure_storage_engine()
return self._storage_engine.get_many(parsed_kwargs)
# Create one
def store_one(self, **kwargs):
"""
Stores the ingestion package into the database.
Returns the id of the ingestion_package in the storage engine.
"""
parsed_kwargs = self._parse_kwargs(kwargs)
parsed_kwargs = self._complete_parsed_kwargs(parsed_kwargs)
if CFG_BIBINGEST_ONE_STORAGE_ENGINE_INSTANCE_PER_STORAGE_ENGINE:
self.reconfigure_storage_engine()
# TODO: add optional check to make sure we don't store duplicates
# could do a get_many before storing to check if any results come up
return self._storage_engine.store_one(parsed_kwargs)
# Create many
def store_many(self, ingestion_packages):
"""
Stores the ingestion packages into the database.
Must be given an iterable of dictionaries as input.
Each dictionary must contain "key: value" pairs containing field names and
their values as they would have been give to the store_ingestion_package
function.
"""
data = []
for ingestion_package in ingestion_packages:
parsed_kwargs = self._parse_kwargs(ingestion_package)
parsed_kwargs = self._complete_parsed_kwargs(parsed_kwargs)
data.append(parsed_kwargs)
if CFG_BIBINGEST_ONE_STORAGE_ENGINE_INSTANCE_PER_STORAGE_ENGINE:
self.reconfigure_storage_engine()
# TODO: add optional check to make sure we don't store duplicates
# could do a get_many before storing to check if any results come up
return self._storage_engine.store_many(data)
# Delete one
def remove_one(self, unique_id):
"""
Removes the ingestion package given its unique ID.
"""
if CFG_BIBINGEST_ONE_STORAGE_ENGINE_INSTANCE_PER_STORAGE_ENGINE:
self.reconfigure_storage_engine()
return self._storage_engine.remove_one(unique_id)
# Delete many
def remove_many(self, **kwargs):
"""
Removes the ingestion packages based on the given arguments.
"""
parsed_kwargs = self._parse_kwargs(kwargs)
if CFG_BIBINGEST_ONE_STORAGE_ENGINE_INSTANCE_PER_STORAGE_ENGINE:
self.reconfigure_storage_engine()
if CFG_BIBINGEST_VERSIONING:
# MAYBE: check if version is set as 0 (old versions) and don't continue?
version_field_name = 'version'
version_default_value = self._accepted_field_names[version_field_name][0]()
#changes = { version_field_name : int( not version_default_value ) }
#parsed_changes = self._parse_kwargs(changes)
parsed_changes = { version_field_name : { 'value' : int( not version_default_value ) } }
return self._storage_engine.update_many(parsed_changes, parsed_kwargs)
else:
return self._storage_engine.remove_many(parsed_kwargs)
# Update one
def update_one(self, changes = None, **kwargs):
"""
Updates one ingestion package (the first one found) matching the kwargs
according to the changes dictionary.
The changes dictionary must contain "key: value" pairs containing field names
and their values as they would have been given to the
store_ingestion_package function.
"""
parsed_kwargs = self._parse_kwargs(kwargs)
if CFG_BIBINGEST_ONE_STORAGE_ENGINE_INSTANCE_PER_STORAGE_ENGINE:
self.reconfigure_storage_engine()
if CFG_BIBINGEST_VERSIONING:
version_field_name = 'version'
version_default_value = self._accepted_field_names[version_field_name][0]()
matching_entries = self._storage_engine.get_many(parsed_kwargs)
for matching_entry in matching_entries:
matching_entry.update({ version_field_name : int( not version_default_value ) })
parsed_matching_entry = self._parse_kwargs(matching_entry)
self._storage_engine.store_one(parsed_matching_entry)
break
date_field_name = 'date'
date_now_value = datetime.now().strftime(_STANDARD_TIME_FORMAT)
date_changes = { date_field_name : date_now_value }
changes.update(date_changes)
parsed_changes = self._parse_kwargs(changes)
return self._storage_engine.update_one(parsed_changes, parsed_kwargs)
# Update many
def update_many(self, changes = None, **kwargs):
"""
Updates all the ingestion package matching the kwargs
according to the changes dictionary.
The changes dictionary must contain "key: value" pairs containing field names
and their values as they would have been given to the
store_ingestion_package function.
"""
parsed_kwargs = self._parse_kwargs(kwargs)
if CFG_BIBINGEST_ONE_STORAGE_ENGINE_INSTANCE_PER_STORAGE_ENGINE:
self.reconfigure_storage_engine()
if CFG_BIBINGEST_VERSIONING:
version_field_name = 'version'
version_default_value = self._accepted_field_names[version_field_name][0]()
matching_entries = self._storage_engine.get_many(parsed_kwargs)
# TODO: make this more efficient. Gather all the matching entries,
# change 'version' for all of them and then run store_many
# for all of them together
for matching_entry in matching_entries:
matching_entry.update({ version_field_name : int( not version_default_value ) })
parsed_matching_entry = self._parse_kwargs(matching_entry)
self._storage_engine.store_one(parsed_matching_entry)
date_field_name = 'date'
date_now_value = datetime.now().strftime(_STANDARD_TIME_FORMAT)
date_changes = { date_field_name : date_now_value }
changes.update(date_changes)
parsed_changes = self._parse_kwargs(changes)
return self._storage_engine.update_many(parsed_changes, parsed_kwargs)
# Other functions
def count(self):
"""
Returns the count of total entries for this ingestion package.
"""
if CFG_BIBINGEST_ONE_STORAGE_ENGINE_INSTANCE_PER_STORAGE_ENGINE:
self.reconfigure_storage_engine()
return self._storage_engine.count()
# Validate
def validate(self, content, md5_hash):
"""
Validates the ingestion package by checking its md5 hash.
"""
try:
# when we pass to python >= 2.5 we should
# be able to use md5 from hashlib
content_md5_hash = md5(content).hexdigest()
except:
content_md5_hash = md5.new(content).hexdigest()
return content_md5_hash == md5_hash
| gpl-2.0 | 1,866,466,573,543,977,000 | 38.891455 | 194 | 0.62178 | false |
lbeltrame/bcbio-nextgen | bcbio/ngsalign/star.py | 2 | 14590 | import os
import sys
import shutil
import subprocess
import contextlib
from collections import namedtuple
import bcbio.bed as bed
from bcbio.pipeline import config_utils
from bcbio.distributed.transaction import file_transaction, tx_tmpdir
from bcbio.utils import (safe_makedir, file_exists, is_gzipped)
from bcbio.provenance import do
from bcbio import utils
from bcbio.log import logger
from bcbio.pipeline import datadict as dd
from bcbio.ngsalign import postalign
from bcbio.bam import fastq
from bcbio.heterogeneity import chromhacks
CLEANUP_FILES = ["Aligned.out.sam", "Log.out", "Log.progress.out"]
ALIGN_TAGS = ["NH", "HI", "NM", "MD", "AS"]
def align(fastq_file, pair_file, ref_file, names, align_dir, data):
if not ref_file:
logger.error("STAR index not found. We don't provide the STAR indexes "
"by default because they are very large. You can install "
"the index for your genome with: bcbio_nextgen.py upgrade "
"--aligners star --genomes genome-build-name --data")
sys.exit(1)
max_hits = 10
srna = True if data["analysis"].lower().startswith("smallrna-seq") else False
srna_opts = ""
if srna:
max_hits = 1000
srna_opts = "--alignIntronMax 1"
config = data["config"]
star_dirs = _get_star_dirnames(align_dir, data, names)
if file_exists(star_dirs.final_out):
data = _update_data(star_dirs.final_out, star_dirs.out_dir, names, data)
out_log_file = os.path.join(align_dir, dd.get_lane(data) + "Log.final.out")
data = dd.update_summary_qc(data, "star", base=out_log_file)
return data
star_path = config_utils.get_program("STAR", config)
def _unpack_fastq(f):
"""Use process substitution instead of readFilesCommand for gzipped inputs.
Prevents issues on shared filesystems that don't support FIFO:
https://github.com/alexdobin/STAR/issues/143
"""
if f and is_gzipped(f):
return "<(gunzip -c %s)" % f
else:
return f
fastq_files = (" ".join([_unpack_fastq(fastq_file), _unpack_fastq(pair_file)])
if pair_file else _unpack_fastq(fastq_file))
num_cores = dd.get_num_cores(data)
gtf_file = dd.get_transcriptome_gtf(data, default=dd.get_gtf_file(data))
if ref_file.endswith("chrLength"):
ref_file = os.path.dirname(ref_file)
if index_has_alts(ref_file):
logger.error(
"STAR is being run on an index with ALTs which STAR is not "
"designed for. Please remake your STAR index or use an ALT-aware "
"aligner like hisat2")
sys.exit(1)
with file_transaction(data, align_dir) as tx_align_dir:
tx_1pass_dir = tx_align_dir + "1pass"
tx_star_dirnames = _get_star_dirnames(tx_1pass_dir, data, names)
tx_out_dir, tx_out_file, tx_out_prefix, tx_final_out = tx_star_dirnames
safe_makedir(tx_1pass_dir)
safe_makedir(tx_out_dir)
cmd = ("{star_path} --genomeDir {ref_file} --readFilesIn {fastq_files} "
"--runThreadN {num_cores} --outFileNamePrefix {tx_out_prefix} "
"--outReadsUnmapped Fastx --outFilterMultimapNmax {max_hits} "
"--outStd BAM_Unsorted {srna_opts} "
"--limitOutSJcollapsed 2000000 "
"--outSAMtype BAM Unsorted "
"--outSAMmapqUnique 60 "
"--outSAMunmapped Within --outSAMattributes %s " % " ".join(ALIGN_TAGS))
cmd += _add_sj_index_commands(fastq_file, ref_file, gtf_file) if not srna else ""
cmd += _read_group_option(names)
if dd.get_fusion_caller(data):
if "arriba" in dd.get_fusion_caller(data):
cmd += (
"--chimSegmentMin 10 --chimOutType WithinBAM "
"--chimJunctionOverhangMin 10 --chimScoreMin 1 --chimScoreDropMax 30 "
"--chimScoreJunctionNonGTAG 0 --chimScoreSeparation 1 "
"--alignSJstitchMismatchNmax 5 -1 5 5 "
"--chimSegmentReadGapMax 3 "
"--peOverlapNbasesMin 10 "
"--alignSplicedMateMapLminOverLmate 0.5 ")
else:
cmd += (" --chimSegmentMin 12 --chimJunctionOverhangMin 12 "
"--chimScoreDropMax 30 --chimSegmentReadGapMax 5 "
"--chimScoreSeparation 5 ")
if "oncofuse" in dd.get_fusion_caller(data):
cmd += "--chimOutType Junctions "
else:
cmd += "--chimOutType WithinBAM "
strandedness = utils.get_in(data, ("config", "algorithm", "strandedness"),
"unstranded").lower()
if strandedness == "unstranded" and not srna:
cmd += " --outSAMstrandField intronMotif "
if not srna:
cmd += " --quantMode TranscriptomeSAM "
resources = config_utils.get_resources("star", data["config"])
if resources.get("options", []):
cmd += " " + " ".join([str(x) for x in resources.get("options", [])])
cmd += " | " + postalign.sam_to_sortbam_cl(data, tx_final_out)
cmd += " > {tx_final_out} "
run_message = "Running 1st pass of STAR aligner on %s and %s" % (fastq_file, ref_file)
do.run(cmd.format(**locals()), run_message, None)
sjfile = get_splicejunction_file(tx_out_dir, data)
sjflag = f"--sjdbFileChrStartEnd {sjfile}" if sjfile else ""
tx_star_dirnames = _get_star_dirnames(tx_align_dir, data, names)
tx_out_dir, tx_out_file, tx_out_prefix, tx_final_out = tx_star_dirnames
safe_makedir(tx_align_dir)
safe_makedir(tx_out_dir)
cmd = ("{star_path} --genomeDir {ref_file} --readFilesIn {fastq_files} "
"--runThreadN {num_cores} --outFileNamePrefix {tx_out_prefix} "
"--outReadsUnmapped Fastx --outFilterMultimapNmax {max_hits} "
"--outStd BAM_Unsorted {srna_opts} "
"--limitOutSJcollapsed 2000000 "
"{sjflag} "
"--outSAMtype BAM Unsorted "
"--outSAMmapqUnique 60 "
"--outSAMunmapped Within --outSAMattributes %s " % " ".join(ALIGN_TAGS))
cmd += _add_sj_index_commands(fastq_file, ref_file, gtf_file) if not srna else ""
cmd += _read_group_option(names)
if dd.get_fusion_caller(data):
if "arriba" in dd.get_fusion_caller(data):
cmd += (
"--chimSegmentMin 10 --chimOutType WithinBAM SoftClip Junctions "
"--chimJunctionOverhangMin 10 --chimScoreMin 1 --chimScoreDropMax 30 "
"--chimScoreJunctionNonGTAG 0 --chimScoreSeparation 1 "
"--alignSJstitchMismatchNmax 5 -1 5 5 "
"--chimSegmentReadGapMax 3 ")
else:
cmd += (" --chimSegmentMin 12 --chimJunctionOverhangMin 12 "
"--chimScoreDropMax 30 --chimSegmentReadGapMax 5 "
"--chimScoreSeparation 5 ")
if "oncofuse" in dd.get_fusion_caller(data):
cmd += "--chimOutType Junctions "
else:
cmd += "--chimOutType WithinBAM "
strandedness = utils.get_in(data, ("config", "algorithm", "strandedness"),
"unstranded").lower()
if strandedness == "unstranded" and not srna:
cmd += " --outSAMstrandField intronMotif "
if not srna:
cmd += " --quantMode TranscriptomeSAM "
resources = config_utils.get_resources("star", data["config"])
if resources.get("options", []):
cmd += " " + " ".join([str(x) for x in resources.get("options", [])])
cmd += " | " + postalign.sam_to_sortbam_cl(data, tx_final_out)
cmd += " > {tx_final_out} "
run_message = "Running 2nd pass of STAR aligner on %s and %s" % (fastq_file, ref_file)
do.run(cmd.format(**locals()), run_message, None)
data = _update_data(star_dirs.final_out, star_dirs.out_dir, names, data)
out_log_file = os.path.join(align_dir, dd.get_lane(data) + "Log.final.out")
data = dd.update_summary_qc(data, "star", base=out_log_file)
return data
StarOutDirs = namedtuple(
'StarOutDirs',
['out_dir', 'out_file', 'out_prefix', 'final_out']
)
def _get_star_dirnames(align_dir, data, names):
ALIGNED_OUT_FILE = "Aligned.out.sam"
out_prefix = os.path.join(align_dir, dd.get_lane(data))
out_file = out_prefix + ALIGNED_OUT_FILE
out_dir = os.path.join(align_dir, "%s_star" % dd.get_lane(data))
final_out = os.path.join(out_dir, "{0}.bam".format(names["sample"]))
return StarOutDirs(out_dir, out_file, out_prefix, final_out)
def _add_sj_index_commands(fq1, ref_file, gtf_file):
"""
newer versions of STAR can generate splice junction databases on thephfly
this is preferable since we can tailor it to the read lengths
"""
if _has_sj_index(ref_file):
return ""
else:
rlength = fastq.estimate_maximum_read_length(fq1)
cmd = " --sjdbGTFfile %s " % gtf_file
cmd += " --sjdbOverhang %s " % str(rlength - 1)
return cmd
def _has_sj_index(ref_file):
"""this file won't exist if we can do on the fly splice junction indexing"""
return (file_exists(os.path.join(ref_file, "sjdbInfo.txt")) and
(file_exists(os.path.join(ref_file, "transcriptInfo.tab"))))
def _update_data(align_file, out_dir, names, data):
data = dd.set_work_bam(data, align_file)
data = dd.set_align_bam(data, align_file)
transcriptome_file = _move_transcriptome_file(out_dir, names)
data = dd.set_transcriptome_bam(data, transcriptome_file)
sjfile = get_splicejunction_file(out_dir, data)
if sjfile:
data = dd.set_starjunction(data, sjfile)
sjbed = junction2bed(sjfile)
data = dd.set_junction_bed(data, sjbed)
sjchimfile = get_chimericjunction_file(out_dir, data)
data = dd.set_chimericjunction(data, sjchimfile)
return data
def _move_transcriptome_file(out_dir, names):
out_file = os.path.join(out_dir, "{0}.transcriptome.bam".format(names["sample"]))
star_file = os.path.join(out_dir, os.pardir,
"{0}Aligned.toTranscriptome.out.bam".format(names["lane"]))
# if the out_file or the star_file doesn't exist, we didn't run the
# transcriptome mapping
if not file_exists(out_file):
if not file_exists(star_file):
return None
else:
shutil.move(star_file, out_file)
return out_file
def _read_group_option(names):
rg_id = names["rg"]
rg_sample = names["sample"]
rg_library = names["pl"]
rg_platform_unit = names["pu"]
rg_lb = ("LB:%s " % names.get("lb")) if names.get("lb") else ""
return (" --outSAMattrRGline ID:{rg_id} PL:{rg_library} "
"PU:{rg_platform_unit} SM:{rg_sample} {rg_lb}").format(**locals())
def remap_index_fn(ref_file):
"""Map sequence references to equivalent star indexes
"""
return os.path.join(os.path.dirname(os.path.dirname(ref_file)), "star")
def index(ref_file, out_dir, data):
"""Create a STAR index in the defined reference directory.
"""
(ref_dir, local_file) = os.path.split(ref_file)
gtf_file = dd.get_transcriptome_gtf(data, dd.get_gtf_file(data))
if not utils.file_exists(gtf_file):
raise ValueError("%s not found, could not create a star index." % (gtf_file))
if not utils.file_exists(out_dir):
with tx_tmpdir(data, os.path.dirname(out_dir)) as tx_out_dir:
num_cores = dd.get_cores(data)
cmd = ("STAR --genomeDir {tx_out_dir} --genomeFastaFiles {ref_file} "
"--runThreadN {num_cores} "
"--runMode genomeGenerate --sjdbOverhang 99 --sjdbGTFfile {gtf_file}")
do.run(cmd.format(**locals()), "Index STAR")
if os.path.exists(out_dir):
shutil.rmtree(out_dir)
shutil.move(tx_out_dir, out_dir)
return out_dir
def get_star_version(data):
star_path = config_utils.get_program("STAR", dd.get_config(data))
cmd = "%s --version" % star_path
subp = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True)
with contextlib.closing(subp.stdout) as stdout:
for line in stdout:
if "STAR_" in line:
version = line.split("STAR_")[1].strip()
return version
def get_chimericjunction_file(out_dir, data):
"""
locate the chimeric splice junction file starting from the alignment directory
"""
samplename = dd.get_sample_name(data)
sjfile = os.path.join(out_dir, os.pardir, f"{samplename}Chimeric.out.junction")
if file_exists(sjfile):
return sjfile
else:
return None
def get_splicejunction_file(out_dir, data):
"""
locate the splicejunction file starting from the alignment directory
"""
samplename = dd.get_sample_name(data)
sjfile = os.path.join(out_dir, os.pardir, f"{samplename}SJ.out.tab")
if file_exists(sjfile):
return sjfile
else:
return None
def junction2bed(junction_file):
"""
reformat the STAR junction file to BED3 format, one end of the splice junction per line
"""
base, _ = os.path.splitext(junction_file)
out_file = base + "-minimized.bed"
if file_exists(out_file):
return out_file
if not file_exists(junction_file):
return None
with file_transaction(out_file) as tx_out_file:
with open(junction_file) as in_handle:
with open(tx_out_file, "w") as out_handle:
for line in in_handle:
tokens = line.split()
chrom, sj1, sj2 = tokens[0:3]
if int(sj1) > int(sj2):
tmp = sj1
sj1 = sj2
sj2 = tmp
out_handle.write("\t".join([chrom, sj1, sj1]) + "\n")
out_handle.write("\t".join([chrom, sj2, sj2]) + "\n")
minimize = bed.minimize(tx_out_file)
minimize.saveas(tx_out_file)
return out_file
def index_has_alts(ref_file):
name_file = os.path.join(os.path.dirname(ref_file), "chrName.txt")
with open(name_file) as in_handle:
names = [x.strip() for x in in_handle.readlines()]
has_alts = [chromhacks.is_alt(chrom) for chrom in names]
return any(has_alts)
| mit | -5,362,305,257,976,909,000 | 42.813814 | 94 | 0.59438 | false |
francisco-dlp/hyperspy | hyperspy/tests/__init__.py | 4 | 1870 | # -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
import os
import warnings
from hyperspy.defaults_parser import preferences
preferences.General.show_progressbar = False
# Check if we should fail on external deprecation messages
fail_on_external = os.environ.pop('FAIL_ON_EXTERNAL_DEPRECATION', False)
if isinstance(fail_on_external, str):
fail_on_external = (fail_on_external.lower() in
['true', 't', '1', 'yes', 'y', 'set'])
if fail_on_external:
warnings.filterwarnings(
'error', category=DeprecationWarning)
# Travis setup has these warnings, so ignore:
warnings.filterwarnings(
'ignore',
r"BaseException\.message has been deprecated as of Python 2\.6",
DeprecationWarning)
# Don't care about warnings in hyperspy in this mode!
warnings.filterwarnings('default', module="hyperspy")
else:
# Fall-back filter: Error
warnings.simplefilter('error')
warnings.filterwarnings(
'ignore', "Failed to import the optional scikit image package",
UserWarning)
# We allow extrernal warnings:
warnings.filterwarnings('default',
module="(?!hyperspy)")
| gpl-3.0 | 2,832,677,484,817,261,000 | 37.163265 | 72 | 0.702674 | false |
dablak/boto | boto/resultset.py | 20 | 6557 | # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.s3.user import User
class ResultSet(list):
"""
The ResultSet is used to pass results back from the Amazon services
to the client. It is light wrapper around Python's :py:class:`list` class,
with some additional methods for parsing XML results from AWS.
Because I don't really want any dependencies on external libraries,
I'm using the standard SAX parser that comes with Python. The good news is
that it's quite fast and efficient but it makes some things rather
difficult.
You can pass in, as the marker_elem parameter, a list of tuples.
Each tuple contains a string as the first element which represents
the XML element that the resultset needs to be on the lookout for
and a Python class as the second element of the tuple. Each time the
specified element is found in the XML, a new instance of the class
will be created and popped onto the stack.
:ivar str next_token: A hash used to assist in paging through very long
result sets. In most cases, passing this value to certain methods
will give you another 'page' of results.
"""
def __init__(self, marker_elem=None):
list.__init__(self)
if isinstance(marker_elem, list):
self.markers = marker_elem
else:
self.markers = []
self.marker = None
self.key_marker = None
self.next_marker = None # avail when delimiter used
self.next_key_marker = None
self.next_upload_id_marker = None
self.next_version_id_marker = None
self.next_generation_marker= None
self.version_id_marker = None
self.is_truncated = False
self.next_token = None
self.status = True
def startElement(self, name, attrs, connection):
for t in self.markers:
if name == t[0]:
obj = t[1](connection)
self.append(obj)
return obj
if name == 'Owner':
# Makes owner available for get_service and
# perhaps other lists where not handled by
# another element.
self.owner = User()
return self.owner
return None
def to_boolean(self, value, true_value='true'):
if value == true_value:
return True
else:
return False
def endElement(self, name, value, connection):
if name == 'IsTruncated':
self.is_truncated = self.to_boolean(value)
elif name == 'Marker':
self.marker = value
elif name == 'KeyMarker':
self.key_marker = value
elif name == 'NextMarker':
self.next_marker = value
elif name == 'NextKeyMarker':
self.next_key_marker = value
elif name == 'VersionIdMarker':
self.version_id_marker = value
elif name == 'NextVersionIdMarker':
self.next_version_id_marker = value
elif name == 'NextGenerationMarker':
self.next_generation_marker = value
elif name == 'UploadIdMarker':
self.upload_id_marker = value
elif name == 'NextUploadIdMarker':
self.next_upload_id_marker = value
elif name == 'Bucket':
self.bucket = value
elif name == 'MaxUploads':
self.max_uploads = int(value)
elif name == 'MaxItems':
self.max_items = int(value)
elif name == 'Prefix':
self.prefix = value
elif name == 'return':
self.status = self.to_boolean(value)
elif name == 'StatusCode':
self.status = self.to_boolean(value, 'Success')
elif name == 'ItemName':
self.append(value)
elif name == 'NextToken':
self.next_token = value
elif name == 'nextToken':
self.next_token = value
# Code exists which expects nextToken to be available, so we
# set it here to remain backwards-compatibile.
self.nextToken = value
elif name == 'BoxUsage':
try:
connection.box_usage += float(value)
except:
pass
elif name == 'IsValid':
self.status = self.to_boolean(value, 'True')
else:
setattr(self, name, value)
class BooleanResult(object):
def __init__(self, marker_elem=None):
self.status = True
self.request_id = None
self.box_usage = None
def __repr__(self):
if self.status:
return 'True'
else:
return 'False'
def __nonzero__(self):
return self.status
def startElement(self, name, attrs, connection):
return None
def to_boolean(self, value, true_value='true'):
if value == true_value:
return True
else:
return False
def endElement(self, name, value, connection):
if name == 'return':
self.status = self.to_boolean(value)
elif name == 'StatusCode':
self.status = self.to_boolean(value, 'Success')
elif name == 'IsValid':
self.status = self.to_boolean(value, 'True')
elif name == 'RequestId':
self.request_id = value
elif name == 'requestId':
self.request_id = value
elif name == 'BoxUsage':
self.request_id = value
else:
setattr(self, name, value)
| mit | -7,485,846,445,197,036,000 | 36.683908 | 79 | 0.606375 | false |
windinthew/audacity | lib-src/lv2/lv2/plugins/eg-fifths.lv2/waflib/Logs.py | 196 | 4755 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os,re,traceback,sys
_nocolor=os.environ.get('NOCOLOR','no')not in('no','0','false')
try:
if not _nocolor:
import waflib.ansiterm
except ImportError:
pass
try:
import threading
except ImportError:
if not'JOBS'in os.environ:
os.environ['JOBS']='1'
else:
wlock=threading.Lock()
class sync_stream(object):
def __init__(self,stream):
self.stream=stream
self.encoding=self.stream.encoding
def write(self,txt):
try:
wlock.acquire()
self.stream.write(txt)
self.stream.flush()
finally:
wlock.release()
def fileno(self):
return self.stream.fileno()
def flush(self):
self.stream.flush()
def isatty(self):
return self.stream.isatty()
if not os.environ.get('NOSYNC',False):
if id(sys.stdout)==id(sys.__stdout__):
sys.stdout=sync_stream(sys.stdout)
sys.stderr=sync_stream(sys.stderr)
import logging
LOG_FORMAT="%(asctime)s %(c1)s%(zone)s%(c2)s %(message)s"
HOUR_FORMAT="%H:%M:%S"
zones=''
verbose=0
colors_lst={'USE':True,'BOLD':'\x1b[01;1m','RED':'\x1b[01;31m','GREEN':'\x1b[32m','YELLOW':'\x1b[33m','PINK':'\x1b[35m','BLUE':'\x1b[01;34m','CYAN':'\x1b[36m','NORMAL':'\x1b[0m','cursor_on':'\x1b[?25h','cursor_off':'\x1b[?25l',}
got_tty=not os.environ.get('TERM','dumb')in['dumb','emacs']
if got_tty:
try:
got_tty=sys.stderr.isatty()and sys.stdout.isatty()
except AttributeError:
got_tty=False
if(not got_tty and os.environ.get('TERM','dumb')!='msys')or _nocolor:
colors_lst['USE']=False
def get_term_cols():
return 80
try:
import struct,fcntl,termios
except ImportError:
pass
else:
if got_tty:
def get_term_cols_real():
dummy_lines,cols=struct.unpack("HHHH",fcntl.ioctl(sys.stderr.fileno(),termios.TIOCGWINSZ,struct.pack("HHHH",0,0,0,0)))[:2]
return cols
try:
get_term_cols_real()
except Exception:
pass
else:
get_term_cols=get_term_cols_real
get_term_cols.__doc__="""
Get the console width in characters.
:return: the number of characters per line
:rtype: int
"""
def get_color(cl):
if not colors_lst['USE']:return''
return colors_lst.get(cl,'')
class color_dict(object):
def __getattr__(self,a):
return get_color(a)
def __call__(self,a):
return get_color(a)
colors=color_dict()
re_log=re.compile(r'(\w+): (.*)',re.M)
class log_filter(logging.Filter):
def __init__(self,name=None):
pass
def filter(self,rec):
rec.c1=colors.PINK
rec.c2=colors.NORMAL
rec.zone=rec.module
if rec.levelno>=logging.INFO:
if rec.levelno>=logging.ERROR:
rec.c1=colors.RED
elif rec.levelno>=logging.WARNING:
rec.c1=colors.YELLOW
else:
rec.c1=colors.GREEN
return True
m=re_log.match(rec.msg)
if m:
rec.zone=m.group(1)
rec.msg=m.group(2)
if zones:
return getattr(rec,'zone','')in zones or'*'in zones
elif not verbose>2:
return False
return True
class formatter(logging.Formatter):
def __init__(self):
logging.Formatter.__init__(self,LOG_FORMAT,HOUR_FORMAT)
def format(self,rec):
if rec.levelno>=logging.WARNING or rec.levelno==logging.INFO:
try:
msg=rec.msg.decode('utf-8')
except Exception:
msg=rec.msg
return'%s%s%s'%(rec.c1,msg,rec.c2)
return logging.Formatter.format(self,rec)
log=None
def debug(*k,**kw):
if verbose:
k=list(k)
k[0]=k[0].replace('\n',' ')
global log
log.debug(*k,**kw)
def error(*k,**kw):
global log
log.error(*k,**kw)
if verbose>2:
st=traceback.extract_stack()
if st:
st=st[:-1]
buf=[]
for filename,lineno,name,line in st:
buf.append(' File "%s", line %d, in %s'%(filename,lineno,name))
if line:
buf.append(' %s'%line.strip())
if buf:log.error("\n".join(buf))
def warn(*k,**kw):
global log
log.warn(*k,**kw)
def info(*k,**kw):
global log
log.info(*k,**kw)
def init_log():
global log
log=logging.getLogger('waflib')
log.handlers=[]
log.filters=[]
hdlr=logging.StreamHandler()
hdlr.setFormatter(formatter())
log.addHandler(hdlr)
log.addFilter(log_filter())
log.setLevel(logging.DEBUG)
def make_logger(path,name):
logger=logging.getLogger(name)
hdlr=logging.FileHandler(path,'w')
formatter=logging.Formatter('%(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.DEBUG)
return logger
def make_mem_logger(name,to_log,size=10000):
from logging.handlers import MemoryHandler
logger=logging.getLogger(name)
hdlr=MemoryHandler(size,target=to_log)
formatter=logging.Formatter('%(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.memhandler=hdlr
logger.setLevel(logging.DEBUG)
return logger
def pprint(col,str,label='',sep='\n'):
sys.stderr.write("%s%s%s %s%s"%(colors(col),str,colors.NORMAL,label,sep))
| gpl-2.0 | 1,384,677,174,145,374,700 | 25.864407 | 228 | 0.683281 | false |
bob-white/UnityIronPythonConsole | Assets/IronPythonConsole/Plugins/Lib/repr.py | 417 | 4296 | """Redo the builtin repr() (representation) but with limits on most sizes."""
__all__ = ["Repr","repr"]
import __builtin__
from itertools import islice
class Repr:
def __init__(self):
self.maxlevel = 6
self.maxtuple = 6
self.maxlist = 6
self.maxarray = 5
self.maxdict = 4
self.maxset = 6
self.maxfrozenset = 6
self.maxdeque = 6
self.maxstring = 30
self.maxlong = 40
self.maxother = 20
def repr(self, x):
return self.repr1(x, self.maxlevel)
def repr1(self, x, level):
typename = type(x).__name__
if ' ' in typename:
parts = typename.split()
typename = '_'.join(parts)
if hasattr(self, 'repr_' + typename):
return getattr(self, 'repr_' + typename)(x, level)
else:
s = __builtin__.repr(x)
if len(s) > self.maxother:
i = max(0, (self.maxother-3)//2)
j = max(0, self.maxother-3-i)
s = s[:i] + '...' + s[len(s)-j:]
return s
def _repr_iterable(self, x, level, left, right, maxiter, trail=''):
n = len(x)
if level <= 0 and n:
s = '...'
else:
newlevel = level - 1
repr1 = self.repr1
pieces = [repr1(elem, newlevel) for elem in islice(x, maxiter)]
if n > maxiter: pieces.append('...')
s = ', '.join(pieces)
if n == 1 and trail: right = trail + right
return '%s%s%s' % (left, s, right)
def repr_tuple(self, x, level):
return self._repr_iterable(x, level, '(', ')', self.maxtuple, ',')
def repr_list(self, x, level):
return self._repr_iterable(x, level, '[', ']', self.maxlist)
def repr_array(self, x, level):
header = "array('%s', [" % x.typecode
return self._repr_iterable(x, level, header, '])', self.maxarray)
def repr_set(self, x, level):
x = _possibly_sorted(x)
return self._repr_iterable(x, level, 'set([', '])', self.maxset)
def repr_frozenset(self, x, level):
x = _possibly_sorted(x)
return self._repr_iterable(x, level, 'frozenset([', '])',
self.maxfrozenset)
def repr_deque(self, x, level):
return self._repr_iterable(x, level, 'deque([', '])', self.maxdeque)
def repr_dict(self, x, level):
n = len(x)
if n == 0: return '{}'
if level <= 0: return '{...}'
newlevel = level - 1
repr1 = self.repr1
pieces = []
for key in islice(_possibly_sorted(x), self.maxdict):
keyrepr = repr1(key, newlevel)
valrepr = repr1(x[key], newlevel)
pieces.append('%s: %s' % (keyrepr, valrepr))
if n > self.maxdict: pieces.append('...')
s = ', '.join(pieces)
return '{%s}' % (s,)
def repr_str(self, x, level):
s = __builtin__.repr(x[:self.maxstring])
if len(s) > self.maxstring:
i = max(0, (self.maxstring-3)//2)
j = max(0, self.maxstring-3-i)
s = __builtin__.repr(x[:i] + x[len(x)-j:])
s = s[:i] + '...' + s[len(s)-j:]
return s
def repr_long(self, x, level):
s = __builtin__.repr(x) # XXX Hope this isn't too slow...
if len(s) > self.maxlong:
i = max(0, (self.maxlong-3)//2)
j = max(0, self.maxlong-3-i)
s = s[:i] + '...' + s[len(s)-j:]
return s
def repr_instance(self, x, level):
try:
s = __builtin__.repr(x)
# Bugs in x.__repr__() can cause arbitrary
# exceptions -- then make up something
except Exception:
return '<%s instance at %x>' % (x.__class__.__name__, id(x))
if len(s) > self.maxstring:
i = max(0, (self.maxstring-3)//2)
j = max(0, self.maxstring-3-i)
s = s[:i] + '...' + s[len(s)-j:]
return s
def _possibly_sorted(x):
# Since not all sequences of items can be sorted and comparison
# functions may raise arbitrary exceptions, return an unsorted
# sequence in that case.
try:
return sorted(x)
except Exception:
return list(x)
aRepr = Repr()
repr = aRepr.repr
| mpl-2.0 | 4,721,990,324,382,316,000 | 31.545455 | 77 | 0.499302 | false |
matt-bernhardt/trapp | trapp/check_games.py | 1 | 1339 | # -*- coding: utf-8 -*-
from trapp.checker import Checker
from trapp.competition import Competition
class CheckerGames(Checker):
def reviewCompetition(self, competition, year):
self.log.message('Reviewing competition ' + str(competition))
# Get years this competition was held
sql = ("SELECT DISTINCT(YEAR(MatchTime)) AS MatchYear, "
" COUNT(ID) AS Games "
"FROM tbl_games "
"WHERE MatchTypeID = %s AND YEAR(MatchTime) >= %s "
"GROUP BY YEAR(MatchTime) "
"ORDER BY MatchYear ASC")
rs = self.db.query(sql, (competition, year, ))
if (rs.with_rows):
records = rs.fetchall()
for index, item in enumerate(records):
self.output.message(str(competition) + ',' +
str(item[0]) + ',' +
str(item[1]))
def checkGames(self):
# What year are we starting our checks
startYear = 1990
# Label Columns
self.output.message('Competition,Year,Games')
# Get Competitions list
c = Competition()
c.connectDB()
competitions = c.loadAll()
# Do work
[self.reviewCompetition(record['CompetitionID'], startYear)
for record
in competitions]
| gpl-2.0 | -4,675,455,301,226,018,000 | 30.139535 | 69 | 0.554145 | false |
jorik041/plaso | plaso/parsers/winreg_plugins/mrulistex.py | 1 | 18044 | # -*- coding: utf-8 -*-
"""This file contains MRUListEx Windows Registry plugins."""
import abc
import logging
import construct
from plaso.events import windows_events
from plaso.lib import binary
from plaso.parsers import winreg
from plaso.parsers.shared import shell_items
from plaso.parsers.winreg_plugins import interface
# A mixin class is used here to not to have the duplicate functionality
# to parse the MRUListEx Registry values. However multiple inheritance
# and thus mixins are to be used sparsely in this codebase, hence we need
# to find a better solution in not needing to distinguish between key and
# value plugins.
# TODO: refactor Registry key and value plugin to rid ourselves of the mixin.
class MRUListExPluginMixin(object):
"""Class for common MRUListEx Windows Registry plugin functionality."""
_MRULISTEX_STRUCT = construct.Range(
1, 500, construct.ULInt32(u'entry_number'))
@abc.abstractmethod
def _ParseMRUListExEntryValue(
self, parser_mediator, key, entry_index, entry_number, **kwargs):
"""Parses the MRUListEx entry value.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
key: the Registry key (instance of winreg.WinRegKey) that contains
the MRUListEx value.
entry_index: integer value representing the MRUListEx entry index.
entry_number: integer value representing the entry number.
Returns:
A string containing the value.
"""
def _ParseMRUListExValue(self, key):
"""Parses the MRUListEx value in a given Registry key.
Args:
key: the Registry key (instance of winreg.WinRegKey) that contains
the MRUListEx value.
Returns:
A MRUListEx value generator, which returns the MRU index number
and entry value.
"""
mru_list_value = key.GetValue(u'MRUListEx')
# The key exists but does not contain a value named "MRUListEx".
if not mru_list_value:
return enumerate([])
try:
mru_list = self._MRULISTEX_STRUCT.parse(mru_list_value.data)
except construct.FieldError:
logging.warning(u'[{0:s}] Unable to parse the MRU key: {1:s}'.format(
self.NAME, key.path))
return enumerate([])
return enumerate(mru_list)
def _ParseMRUListExKey(
self, parser_mediator, key, registry_type=None, codepage=u'cp1252'):
"""Extract event objects from a MRUListEx Registry key.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
key: the Registry key (instance of winreg.WinRegKey).
registry_type: Optional Registry type string. The default is None.
codepage: Optional extended ASCII string codepage. The default is cp1252.
"""
text_dict = {}
for entry_index, entry_number in self._ParseMRUListExValue(key):
# TODO: detect if list ends prematurely.
# MRU lists are terminated with 0xffffffff (-1).
if entry_number == 0xffffffff:
break
value_string = self._ParseMRUListExEntryValue(
parser_mediator, key, entry_index, entry_number, codepage=codepage)
value_text = u'Index: {0:d} [MRU Value {1:d}]'.format(
entry_index + 1, entry_number)
text_dict[value_text] = value_string
event_object = windows_events.WindowsRegistryEvent(
key.last_written_timestamp, key.path, text_dict,
offset=key.offset, registry_type=registry_type,
source_append=': MRUListEx')
parser_mediator.ProduceEvent(event_object)
class MRUListExStringPlugin(interface.ValuePlugin, MRUListExPluginMixin):
"""Windows Registry plugin to parse a string MRUListEx."""
NAME = u'mrulistex_string'
DESCRIPTION = u'Parser for Most Recently Used (MRU) Registry data.'
REG_TYPE = u'any'
REG_VALUES = frozenset([u'MRUListEx', u'0'])
URLS = [
u'http://forensicartifacts.com/2011/02/recentdocs/',
u'https://github.com/libyal/winreg-kb/wiki/MRU-keys']
_STRING_STRUCT = construct.Struct(
u'string_and_shell_item',
construct.RepeatUntil(
lambda obj, ctx: obj == b'\x00\x00', construct.Field(u'string', 2)))
def _ParseMRUListExEntryValue(
self, parser_mediator, key, entry_index, entry_number, **unused_kwargs):
"""Parses the MRUListEx entry value.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
key: the Registry key (instance of winreg.WinRegKey) that contains
the MRUListEx value.
entry_index: integer value representing the MRUListEx entry index.
entry_number: integer value representing the entry number.
Returns:
A string containing the value.
"""
value_string = u''
value = key.GetValue(u'{0:d}'.format(entry_number))
if value is None:
logging.debug(
u'[{0:s}] Missing MRUListEx entry value: {1:d} in key: {2:s}.'.format(
self.NAME, entry_number, key.path))
elif value.DataIsString():
value_string = value.data
elif value.DataIsBinaryData():
logging.debug((
u'[{0:s}] Non-string MRUListEx entry value: {1:d} parsed as string '
u'in key: {2:s}.').format(self.NAME, entry_number, key.path))
utf16_stream = binary.ByteStreamCopyToUtf16Stream(value.data)
try:
value_string = utf16_stream.decode(u'utf-16-le')
except UnicodeDecodeError as exception:
value_string = binary.HexifyBuffer(utf16_stream)
logging.warning((
u'[{0:s}] Unable to decode UTF-16 stream: {1:s} in MRUListEx entry '
u'value: {2:d} in key: {3:s} with error: {4:s}').format(
self.NAME, value_string, entry_number, key.path, exception))
return value_string
def GetEntries(
self, parser_mediator, key=None, registry_type=None, codepage=u'cp1252',
**kwargs):
"""Extract event objects from a Registry key containing a MRUListEx value.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
key: Optional Registry key (instance of winreg.WinRegKey).
The default is None.
registry_type: Optional Registry type string. The default is None.
codepage: Optional extended ASCII string codepage. The default is cp1252.
"""
self._ParseMRUListExKey(
parser_mediator, key, registry_type=registry_type, codepage=codepage)
def Process(self, parser_mediator, key=None, codepage=u'cp1252', **kwargs):
"""Determine if we can process this Registry key or not.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
key: Optional Windows Registry key (instance of WinRegKey).
The default is None.
codepage: Optional extended ASCII string codepage. The default is cp1252.
"""
# Prevent this plugin triggering on sub paths of non-string MRUListEx
# values.
if (u'BagMRU' in key.path or u'Explorer\\StreamMRU' in key.path or
u'\\Explorer\\ComDlg32\\OpenSavePidlMRU' in key.path):
return
super(MRUListExStringPlugin, self).Process(
parser_mediator, key=key, codepage=codepage)
class MRUListExShellItemListPlugin(interface.KeyPlugin, MRUListExPluginMixin):
"""Windows Registry plugin to parse a shell item list MRUListEx."""
NAME = u'mrulistex_shell_item_list'
DESCRIPTION = u'Parser for Most Recently Used (MRU) Registry data.'
REG_TYPE = u'any'
REG_KEYS = frozenset([
# The regular expression indicated a file extension (.jpg) or '*'.
(u'\\Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\ComDlg32\\'
u'OpenSavePidlMRU'),
u'\\Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\StreamMRU'])
def _ParseMRUListExEntryValue(
self, parser_mediator, key, entry_index, entry_number, codepage=u'cp1252',
**unused_kwargs):
"""Parses the MRUListEx entry value.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
key: the Registry key (instance of winreg.WinRegKey) that contains
the MRUListEx value.
entry_index: integer value representing the MRUListEx entry index.
entry_number: integer value representing the entry number.
codepage: Optional extended ASCII string codepage. The default is cp1252.
Returns:
A string containing the value.
"""
value_string = u''
value = key.GetValue(u'{0:d}'.format(entry_number))
if value is None:
logging.debug(
u'[{0:s}] Missing MRUListEx entry value: {1:d} in key: {2:s}.'.format(
self.NAME, entry_number, key.path))
elif not value.DataIsBinaryData():
logging.debug((
u'[{0:s}] Non-binary MRUListEx entry value: {1:d} in key: '
u'{2:s}.').format(self.NAME, entry_number, key.path))
elif value.data:
shell_items_parser = shell_items.ShellItemsParser(key.path)
shell_items_parser.UpdateChainAndParse(
parser_mediator, value.data, None, codepage=codepage)
value_string = u'Shell item path: {0:s}'.format(
shell_items_parser.CopyToPath())
return value_string
def GetEntries(
self, parser_mediator, key=None, registry_type=None, codepage=u'cp1252',
**kwargs):
"""Extract event objects from a Registry key containing a MRUListEx value.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
key: Optional Registry key (instance of winreg.WinRegKey).
The default is None.
registry_type: Optional Registry type string. The default is None.
codepage: Optional extended ASCII string codepage. The default is cp1252.
"""
if key.name != u'OpenSavePidlMRU':
self._ParseMRUListExKey(
parser_mediator, key, registry_type=registry_type, codepage=codepage)
if key.name == u'OpenSavePidlMRU':
# For the OpenSavePidlMRU MRUListEx we also need to parse its subkeys
# since the Registry key path does not support wildcards yet.
for subkey in key.GetSubkeys():
self._ParseMRUListExKey(
parser_mediator, subkey, registry_type=registry_type,
codepage=codepage)
class MRUListExStringAndShellItemPlugin(
interface.KeyPlugin, MRUListExPluginMixin):
"""Windows Registry plugin to parse a string and shell item MRUListEx."""
NAME = u'mrulistex_string_and_shell_item'
DESCRIPTION = u'Parser for Most Recently Used (MRU) Registry data.'
REG_TYPE = u'any'
REG_KEYS = frozenset([
u'\\Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\RecentDocs'])
_STRING_AND_SHELL_ITEM_STRUCT = construct.Struct(
u'string_and_shell_item',
construct.RepeatUntil(
lambda obj, ctx: obj == b'\x00\x00', construct.Field(u'string', 2)),
construct.Anchor(u'shell_item'))
def _ParseMRUListExEntryValue(
self, parser_mediator, key, entry_index, entry_number, codepage=u'cp1252',
**unused_kwargs):
"""Parses the MRUListEx entry value.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
key: the Registry key (instance of winreg.WinRegKey) that contains
the MRUListEx value.
entry_index: integer value representing the MRUListEx entry index.
entry_number: integer value representing the entry number.
codepage: Optional extended ASCII string codepage. The default is cp1252.
Returns:
A string containing the value.
"""
value_string = u''
value = key.GetValue(u'{0:d}'.format(entry_number))
if value is None:
logging.debug(
u'[{0:s}] Missing MRUListEx entry value: {1:d} in key: {2:s}.'.format(
self.NAME, entry_number, key.path))
elif not value.DataIsBinaryData():
logging.debug((
u'[{0:s}] Non-binary MRUListEx entry value: {1:d} in key: '
u'{2:s}.').format(self.NAME, entry_number, key.path))
elif value.data:
value_struct = self._STRING_AND_SHELL_ITEM_STRUCT.parse(value.data)
try:
# The struct includes the end-of-string character that we need
# to strip off.
path = b''.join(value_struct.string).decode(u'utf16')[:-1]
except UnicodeDecodeError as exception:
logging.warning((
u'[{0:s}] Unable to decode string MRUListEx entry value: {1:d} '
u'in key: {2:s} with error: {3:s}').format(
self.NAME, entry_number, key.path, exception))
path = u''
if path:
shell_item_list_data = value.data[value_struct.shell_item:]
if not shell_item_list_data:
logging.debug((
u'[{0:s}] Missing shell item in MRUListEx entry value: {1:d}'
u'in key: {2:s}').format(self.NAME, entry_number, key.path))
value_string = u'Path: {0:s}'.format(path)
else:
shell_items_parser = shell_items.ShellItemsParser(key.path)
shell_items_parser.UpdateChainAndParse(
parser_mediator, shell_item_list_data, None, codepage=codepage)
value_string = u'Path: {0:s}, Shell item: [{1:s}]'.format(
path, shell_items_parser.CopyToPath())
return value_string
def GetEntries(
self, parser_mediator, key=None, registry_type=None, codepage=u'cp1252',
**kwargs):
"""Extract event objects from a Registry key containing a MRUListEx value.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
key: Optional Registry key (instance of winreg.WinRegKey).
The default is None.
registry_type: Optional Registry type string. The default is None.
codepage: Optional extended ASCII string codepage. The default is cp1252.
"""
self._ParseMRUListExKey(
parser_mediator, key, registry_type=registry_type, codepage=codepage)
if key.name == u'RecentDocs':
# For the RecentDocs MRUListEx we also need to parse its subkeys
# since the Registry key path does not support wildcards yet.
for subkey in key.GetSubkeys():
self._ParseMRUListExKey(
parser_mediator, subkey, registry_type=registry_type,
codepage=codepage)
class MRUListExStringAndShellItemListPlugin(
interface.KeyPlugin, MRUListExPluginMixin):
"""Windows Registry plugin to parse a string and shell item list MRUListEx."""
NAME = u'mrulistex_string_and_shell_item_list'
DESCRIPTION = u'Parser for Most Recently Used (MRU) Registry data.'
REG_TYPE = u'any'
REG_KEYS = frozenset([
(u'\\Software\\Microsoft\\Windows\\CurrentVersion\\Explorer\\ComDlg32\\'
u'LastVisitedPidlMRU')])
_STRING_AND_SHELL_ITEM_LIST_STRUCT = construct.Struct(
u'string_and_shell_item',
construct.RepeatUntil(
lambda obj, ctx: obj == b'\x00\x00', construct.Field(u'string', 2)),
construct.Anchor(u'shell_item_list'))
def _ParseMRUListExEntryValue(
self, parser_mediator, key, entry_index, entry_number, codepage=u'cp1252',
**unused_kwargs):
"""Parses the MRUListEx entry value.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
key: the Registry key (instance of winreg.WinRegKey) that contains
the MRUListEx value.
entry_index: integer value representing the MRUListEx entry index.
entry_number: integer value representing the entry number.
codepage: Optional extended ASCII string codepage. The default is cp1252.
Returns:
A string containing the value.
"""
value_string = u''
value = key.GetValue(u'{0:d}'.format(entry_number))
if value is None:
logging.debug(
u'[{0:s}] Missing MRUListEx entry value: {1:d} in key: {2:s}.'.format(
self.NAME, entry_number, key.path))
elif not value.DataIsBinaryData():
logging.debug((
u'[{0:s}] Non-binary MRUListEx entry value: {1:d} in key: '
u'{2:s}.').format(self.NAME, entry_number, key.path))
elif value.data:
value_struct = self._STRING_AND_SHELL_ITEM_LIST_STRUCT.parse(value.data)
try:
# The struct includes the end-of-string character that we need
# to strip off.
path = b''.join(value_struct.string).decode(u'utf16')[:-1]
except UnicodeDecodeError as exception:
logging.warning((
u'[{0:s}] Unable to decode string MRUListEx entry value: {1:d} '
u'in key: {2:s} with error: {3:s}').format(
self.NAME, entry_number, key.path, exception))
path = u''
if path:
shell_item_list_data = value.data[value_struct.shell_item_list:]
if not shell_item_list_data:
logging.debug((
u'[{0:s}] Missing shell item in MRUListEx entry value: {1:d}'
u'in key: {2:s}').format(self.NAME, entry_number, key.path))
value_string = u'Path: {0:s}'.format(path)
else:
shell_items_parser = shell_items.ShellItemsParser(key.path)
shell_items_parser.UpdateChainAndParse(
parser_mediator, shell_item_list_data, None, codepage=codepage)
value_string = u'Path: {0:s}, Shell item path: {1:s}'.format(
path, shell_items_parser.CopyToPath())
return value_string
def GetEntries(
self, parser_mediator, key=None, registry_type=None, codepage=u'cp1252',
**kwargs):
"""Extract event objects from a Registry key containing a MRUListEx value.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
key: Optional Registry key (instance of winreg.WinRegKey).
The default is None.
registry_type: Optional Registry type string. The default is None.
codepage: Optional extended ASCII string codepage. The default is cp1252.
"""
self._ParseMRUListExKey(
parser_mediator, key, registry_type=registry_type, codepage=codepage)
winreg.WinRegistryParser.RegisterPlugins([
MRUListExStringPlugin, MRUListExShellItemListPlugin,
MRUListExStringAndShellItemPlugin, MRUListExStringAndShellItemListPlugin])
| apache-2.0 | 6,099,554,587,811,214,000 | 37.228814 | 80 | 0.667867 | false |
kivio/pysllo | docs/conf.py | 2 | 9751 | # -*- coding: utf-8 -*-
#
# Pysllo documentation build configuration file, created by
# sphinx-quickstart on Tue May 31 19:45:48 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc']
autodoc_member_order = 'bysource'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Pysllo'
copyright = u'2016, Marcin Karkocha'
author = u'Marcin Karkocha'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'manni'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'fixed_sidebar': True,
'analytics_id': 'UA-79713650-1',
'github_user': 'kivio',
'github_repo': 'pysllo',
'github_banner': True
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'Pysllo v0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
html_logo = "pysllo2.png"
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico)
# being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Pysllodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Pysllo.tex', u'Pysllo Documentation',
u'Marcin', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pysllo', u'Pysllo Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Pysllo', u'Pysllo Documentation',
author, 'Pysllo', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| bsd-3-clause | 4,232,071,813,671,519,000 | 27.679412 | 79 | 0.690698 | false |
cgcgbcbc/django-xadmin | xadmin/plugins/chart.py | 17 | 5683 |
import datetime
import decimal
import calendar
from django.template import loader
from django.http import HttpResponseNotFound
from django.core.serializers.json import DjangoJSONEncoder
from django.http import HttpResponse
from django.utils.encoding import smart_unicode
from django.db import models
from django.utils.http import urlencode
from django.utils.translation import ugettext_lazy as _, ugettext
from xadmin.sites import site
from xadmin.views import BaseAdminPlugin, ListAdminView
from xadmin.views.dashboard import ModelBaseWidget, widget_manager
from xadmin.util import lookup_field, label_for_field, force_unicode, json
@widget_manager.register
class ChartWidget(ModelBaseWidget):
widget_type = 'chart'
description = _('Show models simple chart.')
template = 'xadmin/widgets/chart.html'
widget_icon = 'fa fa-bar-chart-o'
def convert(self, data):
self.list_params = data.pop('params', {})
self.chart = data.pop('chart', None)
def setup(self):
super(ChartWidget, self).setup()
self.charts = {}
self.one_chart = False
model_admin = self.admin_site._registry[self.model]
chart = self.chart
if hasattr(model_admin, 'data_charts'):
if chart and chart in model_admin.data_charts:
self.charts = {chart: model_admin.data_charts[chart]}
self.one_chart = True
if self.title is None:
self.title = model_admin.data_charts[chart].get('title')
else:
self.charts = model_admin.data_charts
if self.title is None:
self.title = ugettext(
"%s Charts") % self.model._meta.verbose_name_plural
def filte_choices_model(self, model, modeladmin):
return bool(getattr(modeladmin, 'data_charts', None)) and \
super(ChartWidget, self).filte_choices_model(model, modeladmin)
def get_chart_url(self, name, v):
return self.model_admin_url('chart', name) + "?" + urlencode(self.list_params)
def context(self, context):
context.update({
'charts': [{"name": name, "title": v['title'], 'url': self.get_chart_url(name, v)} for name, v in self.charts.items()],
})
# Media
def media(self):
return self.vendor('flot.js', 'xadmin.plugin.charts.js')
class JSONEncoder(DjangoJSONEncoder):
def default(self, o):
if isinstance(o, (datetime.date, datetime.datetime)):
return calendar.timegm(o.timetuple()) * 1000
elif isinstance(o, decimal.Decimal):
return str(o)
else:
try:
return super(JSONEncoder, self).default(o)
except Exception:
return smart_unicode(o)
class ChartsPlugin(BaseAdminPlugin):
data_charts = {}
def init_request(self, *args, **kwargs):
return bool(self.data_charts)
def get_chart_url(self, name, v):
return self.admin_view.model_admin_url('chart', name) + self.admin_view.get_query_string()
# Media
def get_media(self, media):
return media + self.vendor('flot.js', 'xadmin.plugin.charts.js')
# Block Views
def block_results_top(self, context, nodes):
context.update({
'charts': [{"name": name, "title": v['title'], 'url': self.get_chart_url(name, v)} for name, v in self.data_charts.items()],
})
nodes.append(loader.render_to_string('xadmin/blocks/model_list.results_top.charts.html', context_instance=context))
class ChartsView(ListAdminView):
data_charts = {}
def get_ordering(self):
if 'order' in self.chart:
return self.chart['order']
else:
return super(ChartsView, self).get_ordering()
def get(self, request, name):
if name not in self.data_charts:
return HttpResponseNotFound()
self.chart = self.data_charts[name]
self.x_field = self.chart['x-field']
y_fields = self.chart['y-field']
self.y_fields = (
y_fields,) if type(y_fields) not in (list, tuple) else y_fields
datas = [{"data":[], "label": force_unicode(label_for_field(
i, self.model, model_admin=self))} for i in self.y_fields]
self.make_result_list()
for obj in self.result_list:
xf, attrs, value = lookup_field(self.x_field, obj, self)
for i, yfname in enumerate(self.y_fields):
yf, yattrs, yv = lookup_field(yfname, obj, self)
datas[i]["data"].append((value, yv))
option = {'series': {'lines': {'show': True}, 'points': {'show': False}},
'grid': {'hoverable': True, 'clickable': True}}
try:
xfield = self.opts.get_field(self.x_field)
if type(xfield) in (models.DateTimeField, models.DateField, models.TimeField):
option['xaxis'] = {'mode': "time", 'tickLength': 5}
if type(xfield) is models.DateField:
option['xaxis']['timeformat'] = "%y/%m/%d"
elif type(xfield) is models.TimeField:
option['xaxis']['timeformat'] = "%H:%M:%S"
else:
option['xaxis']['timeformat'] = "%y/%m/%d %H:%M:%S"
except Exception:
pass
option.update(self.chart.get('option', {}))
content = {'data': datas, 'option': option}
result = json.dumps(content, cls=JSONEncoder, ensure_ascii=False)
return HttpResponse(result)
site.register_plugin(ChartsPlugin, ListAdminView)
site.register_modelview(r'^chart/(.+)/$', ChartsView, name='%s_%s_chart')
| bsd-3-clause | -6,073,751,838,963,960,000 | 34.742138 | 136 | 0.604082 | false |
omarkhan/ansible-modules-core | cloud/google/gce_pd.py | 130 | 9532 | #!/usr/bin/python
# Copyright 2013 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: gce_pd
version_added: "1.4"
short_description: utilize GCE persistent disk resources
description:
- This module can create and destroy unformatted GCE persistent disks
U(https://developers.google.com/compute/docs/disks#persistentdisks).
It also supports attaching and detaching disks from running instances.
Full install/configuration instructions for the gce* modules can
be found in the comments of ansible/test/gce_tests.py.
options:
detach_only:
description:
- do not destroy the disk, merely detach it from an instance
required: false
default: "no"
choices: ["yes", "no"]
aliases: []
instance_name:
description:
- instance name if you wish to attach or detach the disk
required: false
default: null
aliases: []
mode:
description:
- GCE mount mode of disk, READ_ONLY (default) or READ_WRITE
required: false
default: "READ_ONLY"
choices: ["READ_WRITE", "READ_ONLY"]
aliases: []
name:
description:
- name of the disk
required: true
default: null
aliases: []
size_gb:
description:
- whole integer size of disk (in GB) to create, default is 10 GB
required: false
default: 10
aliases: []
image:
description:
- the source image to use for the disk
required: false
default: null
aliases: []
version_added: "1.7"
snapshot:
description:
- the source snapshot to use for the disk
required: false
default: null
aliases: []
version_added: "1.7"
state:
description:
- desired state of the persistent disk
required: false
default: "present"
choices: ["active", "present", "absent", "deleted"]
aliases: []
zone:
description:
- zone in which to create the disk
required: false
default: "us-central1-b"
aliases: []
service_account_email:
version_added: "1.6"
description:
- service account email
required: false
default: null
aliases: []
pem_file:
version_added: "1.6"
description:
- path to the pem file associated with the service account email
required: false
default: null
aliases: []
project_id:
version_added: "1.6"
description:
- your GCE project ID
required: false
default: null
aliases: []
disk_type:
version_added: "1.9"
description:
- type of disk provisioned
required: false
default: "pd-standard"
choices: ["pd-standard", "pd-ssd"]
aliases: []
requirements:
- "python >= 2.6"
- "apache-libcloud >= 0.13.3"
author: "Eric Johnson (@erjohnso) <erjohnso@google.com>"
'''
EXAMPLES = '''
# Simple attachment action to an existing instance
- local_action:
module: gce_pd
instance_name: notlocalhost
size_gb: 5
name: pd
'''
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceNotFoundError, ResourceInUseError
_ = Provider.GCE
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
def main():
module = AnsibleModule(
argument_spec = dict(
detach_only = dict(type='bool'),
instance_name = dict(),
mode = dict(default='READ_ONLY', choices=['READ_WRITE', 'READ_ONLY']),
name = dict(required=True),
size_gb = dict(default=10),
disk_type = dict(default='pd-standard'),
image = dict(),
snapshot = dict(),
state = dict(default='present'),
zone = dict(default='us-central1-b'),
service_account_email = dict(),
pem_file = dict(),
project_id = dict(),
)
)
if not HAS_LIBCLOUD:
module.fail_json(msg='libcloud with GCE support (0.13.3+) is required for this module')
gce = gce_connect(module)
detach_only = module.params.get('detach_only')
instance_name = module.params.get('instance_name')
mode = module.params.get('mode')
name = module.params.get('name')
size_gb = module.params.get('size_gb')
disk_type = module.params.get('disk_type')
image = module.params.get('image')
snapshot = module.params.get('snapshot')
state = module.params.get('state')
zone = module.params.get('zone')
if detach_only and not instance_name:
module.fail_json(
msg='Must specify an instance name when detaching a disk',
changed=False)
disk = inst = None
changed = is_attached = False
json_output = { 'name': name, 'zone': zone, 'state': state, 'disk_type': disk_type }
if detach_only:
json_output['detach_only'] = True
json_output['detached_from_instance'] = instance_name
if instance_name:
# user wants to attach/detach from an existing instance
try:
inst = gce.ex_get_node(instance_name, zone)
# is the disk attached?
for d in inst.extra['disks']:
if d['deviceName'] == name:
is_attached = True
json_output['attached_mode'] = d['mode']
json_output['attached_to_instance'] = inst.name
except:
pass
# find disk if it already exists
try:
disk = gce.ex_get_volume(name)
json_output['size_gb'] = int(disk.size)
except ResourceNotFoundError:
pass
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
# user wants a disk to exist. If "instance_name" is supplied the user
# also wants it attached
if state in ['active', 'present']:
if not size_gb:
module.fail_json(msg="Must supply a size_gb", changed=False)
try:
size_gb = int(round(float(size_gb)))
if size_gb < 1:
raise Exception
except:
module.fail_json(msg="Must supply a size_gb larger than 1 GB",
changed=False)
if instance_name and inst is None:
module.fail_json(msg='Instance %s does not exist in zone %s' % (
instance_name, zone), changed=False)
if not disk:
if image is not None and snapshot is not None:
module.fail_json(
msg='Cannot give both image (%s) and snapshot (%s)' % (
image, snapshot), changed=False)
lc_image = None
lc_snapshot = None
if image is not None:
lc_image = gce.ex_get_image(image)
elif snapshot is not None:
lc_snapshot = gce.ex_get_snapshot(snapshot)
try:
disk = gce.create_volume(
size_gb, name, location=zone, image=lc_image,
snapshot=lc_snapshot, ex_disk_type=disk_type)
except ResourceExistsError:
pass
except QuotaExceededError:
module.fail_json(msg='Requested disk size exceeds quota',
changed=False)
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
json_output['size_gb'] = size_gb
if image is not None:
json_output['image'] = image
if snapshot is not None:
json_output['snapshot'] = snapshot
changed = True
if inst and not is_attached:
try:
gce.attach_volume(inst, disk, device=name, ex_mode=mode)
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
json_output['attached_to_instance'] = inst.name
json_output['attached_mode'] = mode
changed = True
# user wants to delete a disk (or perhaps just detach it).
if state in ['absent', 'deleted'] and disk:
if inst and is_attached:
try:
gce.detach_volume(disk, ex_node=inst)
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
changed = True
if not detach_only:
try:
gce.destroy_volume(disk)
except ResourceInUseError, e:
module.fail_json(msg=str(e.value), changed=False)
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
changed = True
json_output['changed'] = changed
module.exit_json(**json_output)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.gce import *
if __name__ == '__main__':
main()
| gpl-3.0 | -3,526,913,295,690,532,400 | 31.202703 | 95 | 0.597881 | false |
pfmooney/dd-agent | checks.d/mesos.py | 22 | 5151 | # stdlib
from hashlib import md5
import time
# 3rd party
import requests
# project
from checks import AgentCheck
class Mesos(AgentCheck):
SERVICE_CHECK_NAME = "mesos.can_connect"
def check(self, instance):
"""
DEPRECATED:
This generic Mesosphere check is deprecated not actively developed anymore. It will be
removed in a future version of the Datadog Agent.
Please head over to the Mesosphere master and slave specific checks.
"""
self.warning("This check is deprecated in favor of Mesos master and slave specific checks."
" It will be removed in a future version of the Datadog Agent.")
if 'url' not in instance:
raise Exception('Mesos instance missing "url" value.')
# Load values from the instance config
url = instance['url']
instance_tags = instance.get('tags', [])
default_timeout = self.init_config.get('default_timeout', 5)
timeout = float(instance.get('timeout', default_timeout))
response = self.get_master_roles(url, timeout)
if response is not None:
for role in response['roles']:
tags = ['role:' + role['name']] + instance_tags
self.gauge('mesos.role.frameworks', len(role['frameworks']), tags=tags)
self.gauge('mesos.role.weight', role['weight'], tags=tags)
resources = role['resources']
for attr in ['cpus','mem']:
if attr in resources:
self.gauge('mesos.role.' + attr, resources[attr], tags=tags)
response = self.get_master_stats(url, timeout)
if response is not None:
tags = instance_tags
for key in iter(response):
self.gauge('mesos.stats.' + key, response[key], tags=tags)
response = self.get_master_state(url, timeout)
if response is not None:
tags = instance_tags
for attr in ['deactivated_slaves','failed_tasks','finished_tasks','killed_tasks','lost_tasks','staged_tasks','started_tasks']:
self.gauge('mesos.state.' + attr, response[attr], tags=tags)
for framework in response['frameworks']:
tags = ['framework:' + framework['id']] + instance_tags
resources = framework['resources']
for attr in ['cpus','mem']:
if attr in resources:
self.gauge('mesos.state.framework.' + attr, resources[attr], tags=tags)
for slave in response['slaves']:
tags = ['mesos','slave:' + slave['id']] + instance_tags
resources = slave['resources']
for attr in ['cpus','mem','disk']:
if attr in resources:
self.gauge('mesos.state.slave.' + attr, resources[attr], tags=tags)
def get_master_roles(self, url, timeout):
return self.get_json(url + "/master/roles.json", timeout)
def get_master_stats(self, url, timeout):
return self.get_json(url + "/master/stats.json", timeout)
def get_master_state(self, url, timeout):
return self.get_json(url + "/master/state.json", timeout)
def get_json(self, url, timeout):
# Use a hash of the URL as an aggregation key
aggregation_key = md5(url).hexdigest()
tags = ["url:%s" % url]
msg = None
status = None
try:
r = requests.get(url, timeout=timeout)
if r.status_code != 200:
self.status_code_event(url, r, aggregation_key)
status = AgentCheck.CRITICAL
msg = "Got %s when hitting %s" % (r.status_code, url)
else:
status = AgentCheck.OK
msg = "Mesos master instance detected at %s " % url
except requests.exceptions.Timeout as e:
# If there's a timeout
self.timeout_event(url, timeout, aggregation_key)
msg = "%s seconds timeout when hitting %s" % (timeout, url)
status = AgentCheck.CRITICAL
except Exception as e:
msg = str(e)
status = AgentCheck.CRITICAL
finally:
self.service_check(self.SERVICE_CHECK_NAME, status, tags=tags, message=msg)
if status is AgentCheck.CRITICAL:
self.warning(msg)
return None
return r.json()
def timeout_event(self, url, timeout, aggregation_key):
self.event({
'timestamp': int(time.time()),
'event_type': 'http_check',
'msg_title': 'URL timeout',
'msg_text': '%s timed out after %s seconds.' % (url, timeout),
'aggregation_key': aggregation_key
})
def status_code_event(self, url, r, aggregation_key):
self.event({
'timestamp': int(time.time()),
'event_type': 'http_check',
'msg_title': 'Invalid reponse code for %s' % url,
'msg_text': '%s returned a status of %s' % (url, r.status_code),
'aggregation_key': aggregation_key
})
| bsd-3-clause | 6,378,490,254,408,713,000 | 39.559055 | 138 | 0.562997 | false |
40223137/cdag7test37 | static/Brython3.1.3-20150514-095342/Lib/weakref.py | 769 | 11495 | """Weak reference support for Python.
This module is an implementation of PEP 205:
http://www.python.org/dev/peps/pep-0205/
"""
# Naming convention: Variables named "wr" are weak reference objects;
# they are called this instead of "ref" to avoid name collisions with
# the module-global ref() function imported from _weakref.
from _weakref import (
getweakrefcount,
getweakrefs,
ref,
proxy,
CallableProxyType,
ProxyType,
ReferenceType)
from _weakrefset import WeakSet, _IterationGuard
import collections # Import after _weakref to avoid circular import.
ProxyTypes = (ProxyType, CallableProxyType)
__all__ = ["ref", "proxy", "getweakrefcount", "getweakrefs",
"WeakKeyDictionary", "ReferenceType", "ProxyType",
"CallableProxyType", "ProxyTypes", "WeakValueDictionary",
"WeakSet"]
class WeakValueDictionary(collections.MutableMapping):
"""Mapping class that references values weakly.
Entries in the dictionary will be discarded when no strong
reference to the value exists anymore
"""
# We inherit the constructor without worrying about the input
# dictionary; since it uses our .update() method, we get the right
# checks (if the other dictionary is a WeakValueDictionary,
# objects are unwrapped on the way out, and we always wrap on the
# way in).
def __init__(self, *args, **kw):
def remove(wr, selfref=ref(self)):
self = selfref()
if self is not None:
if self._iterating:
self._pending_removals.append(wr.key)
else:
del self.data[wr.key]
self._remove = remove
# A list of keys to be removed
self._pending_removals = []
self._iterating = set()
self.data = d = {}
self.update(*args, **kw)
def _commit_removals(self):
l = self._pending_removals
d = self.data
# We shouldn't encounter any KeyError, because this method should
# always be called *before* mutating the dict.
while l:
del d[l.pop()]
def __getitem__(self, key):
o = self.data[key]()
if o is None:
raise KeyError(key)
else:
return o
def __delitem__(self, key):
if self._pending_removals:
self._commit_removals()
del self.data[key]
def __len__(self):
return len(self.data) - len(self._pending_removals)
def __contains__(self, key):
try:
o = self.data[key]()
except KeyError:
return False
return o is not None
def __repr__(self):
return "<WeakValueDictionary at %s>" % id(self)
def __setitem__(self, key, value):
if self._pending_removals:
self._commit_removals()
self.data[key] = KeyedRef(value, self._remove, key)
def copy(self):
new = WeakValueDictionary()
for key, wr in self.data.items():
o = wr()
if o is not None:
new[key] = o
return new
__copy__ = copy
def __deepcopy__(self, memo):
from copy import deepcopy
new = self.__class__()
for key, wr in self.data.items():
o = wr()
if o is not None:
new[deepcopy(key, memo)] = o
return new
def get(self, key, default=None):
try:
wr = self.data[key]
except KeyError:
return default
else:
o = wr()
if o is None:
# This should only happen
return default
else:
return o
def items(self):
with _IterationGuard(self):
for k, wr in self.data.items():
v = wr()
if v is not None:
yield k, v
def keys(self):
with _IterationGuard(self):
for k, wr in self.data.items():
if wr() is not None:
yield k
__iter__ = keys
def itervaluerefs(self):
"""Return an iterator that yields the weak references to the values.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the values around longer than needed.
"""
with _IterationGuard(self):
for wr in self.data.values():
yield wr
def values(self):
with _IterationGuard(self):
for wr in self.data.values():
obj = wr()
if obj is not None:
yield obj
def popitem(self):
if self._pending_removals:
self._commit_removals()
while True:
key, wr = self.data.popitem()
o = wr()
if o is not None:
return key, o
def pop(self, key, *args):
if self._pending_removals:
self._commit_removals()
try:
o = self.data.pop(key)()
except KeyError:
if args:
return args[0]
raise
if o is None:
raise KeyError(key)
else:
return o
def setdefault(self, key, default=None):
try:
wr = self.data[key]
except KeyError:
if self._pending_removals:
self._commit_removals()
self.data[key] = KeyedRef(default, self._remove, key)
return default
else:
return wr()
def update(self, dict=None, **kwargs):
if self._pending_removals:
self._commit_removals()
d = self.data
if dict is not None:
if not hasattr(dict, "items"):
dict = type({})(dict)
for key, o in dict.items():
d[key] = KeyedRef(o, self._remove, key)
if len(kwargs):
self.update(kwargs)
def valuerefs(self):
"""Return a list of weak references to the values.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the values around longer than needed.
"""
return list(self.data.values())
class KeyedRef(ref):
"""Specialized reference that includes a key corresponding to the value.
This is used in the WeakValueDictionary to avoid having to create
a function object for each key stored in the mapping. A shared
callback object can use the 'key' attribute of a KeyedRef instead
of getting a reference to the key from an enclosing scope.
"""
__slots__ = "key",
def __new__(type, ob, callback, key):
self = ref.__new__(type, ob, callback)
self.key = key
return self
def __init__(self, ob, callback, key):
super().__init__(ob, callback)
class WeakKeyDictionary(collections.MutableMapping):
""" Mapping class that references keys weakly.
Entries in the dictionary will be discarded when there is no
longer a strong reference to the key. This can be used to
associate additional data with an object owned by other parts of
an application without adding attributes to those objects. This
can be especially useful with objects that override attribute
accesses.
"""
def __init__(self, dict=None):
self.data = {}
def remove(k, selfref=ref(self)):
self = selfref()
if self is not None:
if self._iterating:
self._pending_removals.append(k)
else:
del self.data[k]
self._remove = remove
# A list of dead weakrefs (keys to be removed)
self._pending_removals = []
self._iterating = set()
if dict is not None:
self.update(dict)
def _commit_removals(self):
# NOTE: We don't need to call this method before mutating the dict,
# because a dead weakref never compares equal to a live weakref,
# even if they happened to refer to equal objects.
# However, it means keys may already have been removed.
l = self._pending_removals
d = self.data
while l:
try:
del d[l.pop()]
except KeyError:
pass
def __delitem__(self, key):
del self.data[ref(key)]
def __getitem__(self, key):
return self.data[ref(key)]
def __len__(self):
return len(self.data) - len(self._pending_removals)
def __repr__(self):
return "<WeakKeyDictionary at %s>" % id(self)
def __setitem__(self, key, value):
self.data[ref(key, self._remove)] = value
def copy(self):
new = WeakKeyDictionary()
for key, value in self.data.items():
o = key()
if o is not None:
new[o] = value
return new
__copy__ = copy
def __deepcopy__(self, memo):
from copy import deepcopy
new = self.__class__()
for key, value in self.data.items():
o = key()
if o is not None:
new[o] = deepcopy(value, memo)
return new
def get(self, key, default=None):
return self.data.get(ref(key),default)
def __contains__(self, key):
try:
wr = ref(key)
except TypeError:
return False
return wr in self.data
def items(self):
with _IterationGuard(self):
for wr, value in self.data.items():
key = wr()
if key is not None:
yield key, value
def keys(self):
with _IterationGuard(self):
for wr in self.data:
obj = wr()
if obj is not None:
yield obj
__iter__ = keys
def values(self):
with _IterationGuard(self):
for wr, value in self.data.items():
if wr() is not None:
yield value
def keyrefs(self):
"""Return a list of weak references to the keys.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the keys around longer than needed.
"""
return list(self.data)
def popitem(self):
while True:
key, value = self.data.popitem()
o = key()
if o is not None:
return o, value
def pop(self, key, *args):
return self.data.pop(ref(key), *args)
def setdefault(self, key, default=None):
return self.data.setdefault(ref(key, self._remove),default)
def update(self, dict=None, **kwargs):
d = self.data
if dict is not None:
if not hasattr(dict, "items"):
dict = type({})(dict)
for key, value in dict.items():
d[ref(key, self._remove)] = value
if len(kwargs):
self.update(kwargs)
| gpl-3.0 | -8,913,955,414,854,984,000 | 28.857143 | 76 | 0.551196 | false |
cloudwork/bigcouch | couchjs/scons/scons-local-2.0.1/SCons/Scanner/Fortran.py | 61 | 14347 | """SCons.Scanner.Fortran
This module implements the dependency scanner for Fortran code.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Scanner/Fortran.py 5134 2010/08/16 23:02:40 bdeegan"
import re
import SCons.Node
import SCons.Node.FS
import SCons.Scanner
import SCons.Util
import SCons.Warnings
class F90Scanner(SCons.Scanner.Classic):
"""
A Classic Scanner subclass for Fortran source files which takes
into account both USE and INCLUDE statements. This scanner will
work for both F77 and F90 (and beyond) compilers.
Currently, this scanner assumes that the include files do not contain
USE statements. To enable the ability to deal with USE statements
in include files, add logic right after the module names are found
to loop over each include file, search for and locate each USE
statement, and append each module name to the list of dependencies.
Caching the search results in a common dictionary somewhere so that
the same include file is not searched multiple times would be a
smart thing to do.
"""
def __init__(self, name, suffixes, path_variable,
use_regex, incl_regex, def_regex, *args, **kw):
self.cre_use = re.compile(use_regex, re.M)
self.cre_incl = re.compile(incl_regex, re.M)
self.cre_def = re.compile(def_regex, re.M)
def _scan(node, env, path, self=self):
node = node.rfile()
if not node.exists():
return []
return self.scan(node, env, path)
kw['function'] = _scan
kw['path_function'] = SCons.Scanner.FindPathDirs(path_variable)
kw['recursive'] = 1
kw['skeys'] = suffixes
kw['name'] = name
SCons.Scanner.Current.__init__(self, *args, **kw)
def scan(self, node, env, path=()):
# cache the includes list in node so we only scan it once:
if node.includes != None:
mods_and_includes = node.includes
else:
# retrieve all included filenames
includes = self.cre_incl.findall(node.get_text_contents())
# retrieve all USE'd module names
modules = self.cre_use.findall(node.get_text_contents())
# retrieve all defined module names
defmodules = self.cre_def.findall(node.get_text_contents())
# Remove all USE'd module names that are defined in the same file
# (case-insensitively)
d = {}
for m in defmodules:
d[m.lower()] = 1
modules = [m for m in modules if m.lower() not in d]
# Convert module name to a .mod filename
suffix = env.subst('$FORTRANMODSUFFIX')
modules = [x.lower() + suffix for x in modules]
# Remove unique items from the list
mods_and_includes = SCons.Util.unique(includes+modules)
node.includes = mods_and_includes
# This is a hand-coded DSU (decorate-sort-undecorate, or
# Schwartzian transform) pattern. The sort key is the raw name
# of the file as specifed on the USE or INCLUDE line, which lets
# us keep the sort order constant regardless of whether the file
# is actually found in a Repository or locally.
nodes = []
source_dir = node.get_dir()
if callable(path):
path = path()
for dep in mods_and_includes:
n, i = self.find_include(dep, source_dir, path)
if n is None:
SCons.Warnings.warn(SCons.Warnings.DependencyWarning,
"No dependency generated for file: %s (referenced by: %s) -- file not found" % (i, node))
else:
sortkey = self.sort_key(dep)
nodes.append((sortkey, n))
return [pair[1] for pair in sorted(nodes)]
def FortranScan(path_variable="FORTRANPATH"):
"""Return a prototype Scanner instance for scanning source files
for Fortran USE & INCLUDE statements"""
# The USE statement regex matches the following:
#
# USE module_name
# USE :: module_name
# USE, INTRINSIC :: module_name
# USE, NON_INTRINSIC :: module_name
#
# Limitations
#
# -- While the regex can handle multiple USE statements on one line,
# it cannot properly handle them if they are commented out.
# In either of the following cases:
#
# ! USE mod_a ; USE mod_b [entire line is commented out]
# USE mod_a ! ; USE mod_b [in-line comment of second USE statement]
#
# the second module name (mod_b) will be picked up as a dependency
# even though it should be ignored. The only way I can see
# to rectify this would be to modify the scanner to eliminate
# the call to re.findall, read in the contents of the file,
# treating the comment character as an end-of-line character
# in addition to the normal linefeed, loop over each line,
# weeding out the comments, and looking for the USE statements.
# One advantage to this is that the regex passed to the scanner
# would no longer need to match a semicolon.
#
# -- I question whether or not we need to detect dependencies to
# INTRINSIC modules because these are built-in to the compiler.
# If we consider them a dependency, will SCons look for them, not
# find them, and kill the build? Or will we there be standard
# compiler-specific directories we will need to point to so the
# compiler and SCons can locate the proper object and mod files?
# Here is a breakdown of the regex:
#
# (?i) : regex is case insensitive
# ^ : start of line
# (?: : group a collection of regex symbols without saving the match as a "group"
# ^|; : matches either the start of the line or a semicolon - semicolon
# ) : end the unsaved grouping
# \s* : any amount of white space
# USE : match the string USE, case insensitive
# (?: : group a collection of regex symbols without saving the match as a "group"
# \s+| : match one or more whitespace OR .... (the next entire grouped set of regex symbols)
# (?: : group a collection of regex symbols without saving the match as a "group"
# (?: : establish another unsaved grouping of regex symbols
# \s* : any amount of white space
# , : match a comma
# \s* : any amount of white space
# (?:NON_)? : optionally match the prefix NON_, case insensitive
# INTRINSIC : match the string INTRINSIC, case insensitive
# )? : optionally match the ", INTRINSIC/NON_INTRINSIC" grouped expression
# \s* : any amount of white space
# :: : match a double colon that must appear after the INTRINSIC/NON_INTRINSIC attribute
# ) : end the unsaved grouping
# ) : end the unsaved grouping
# \s* : match any amount of white space
# (\w+) : match the module name that is being USE'd
#
#
use_regex = "(?i)(?:^|;)\s*USE(?:\s+|(?:(?:\s*,\s*(?:NON_)?INTRINSIC)?\s*::))\s*(\w+)"
# The INCLUDE statement regex matches the following:
#
# INCLUDE 'some_Text'
# INCLUDE "some_Text"
# INCLUDE "some_Text" ; INCLUDE "some_Text"
# INCLUDE kind_"some_Text"
# INCLUDE kind_'some_Text"
#
# where some_Text can include any alphanumeric and/or special character
# as defined by the Fortran 2003 standard.
#
# Limitations:
#
# -- The Fortran standard dictates that a " or ' in the INCLUDE'd
# string must be represented as a "" or '', if the quotes that wrap
# the entire string are either a ' or ", respectively. While the
# regular expression below can detect the ' or " characters just fine,
# the scanning logic, presently is unable to detect them and reduce
# them to a single instance. This probably isn't an issue since,
# in practice, ' or " are not generally used in filenames.
#
# -- This regex will not properly deal with multiple INCLUDE statements
# when the entire line has been commented out, ala
#
# ! INCLUDE 'some_file' ; INCLUDE 'some_file'
#
# In such cases, it will properly ignore the first INCLUDE file,
# but will actually still pick up the second. Interestingly enough,
# the regex will properly deal with these cases:
#
# INCLUDE 'some_file'
# INCLUDE 'some_file' !; INCLUDE 'some_file'
#
# To get around the above limitation, the FORTRAN programmer could
# simply comment each INCLUDE statement separately, like this
#
# ! INCLUDE 'some_file' !; INCLUDE 'some_file'
#
# The way I see it, the only way to get around this limitation would
# be to modify the scanning logic to replace the calls to re.findall
# with a custom loop that processes each line separately, throwing
# away fully commented out lines before attempting to match against
# the INCLUDE syntax.
#
# Here is a breakdown of the regex:
#
# (?i) : regex is case insensitive
# (?: : begin a non-saving group that matches the following:
# ^ : either the start of the line
# | : or
# ['">]\s*; : a semicolon that follows a single quote,
# double quote or greater than symbol (with any
# amount of whitespace in between). This will
# allow the regex to match multiple INCLUDE
# statements per line (although it also requires
# the positive lookahead assertion that is
# used below). It will even properly deal with
# (i.e. ignore) cases in which the additional
# INCLUDES are part of an in-line comment, ala
# " INCLUDE 'someFile' ! ; INCLUDE 'someFile2' "
# ) : end of non-saving group
# \s* : any amount of white space
# INCLUDE : match the string INCLUDE, case insensitive
# \s+ : match one or more white space characters
# (?\w+_)? : match the optional "kind-param _" prefix allowed by the standard
# [<"'] : match the include delimiter - an apostrophe, double quote, or less than symbol
# (.+?) : match one or more characters that make up
# the included path and file name and save it
# in a group. The Fortran standard allows for
# any non-control character to be used. The dot
# operator will pick up any character, including
# control codes, but I can't conceive of anyone
# putting control codes in their file names.
# The question mark indicates it is non-greedy so
# that regex will match only up to the next quote,
# double quote, or greater than symbol
# (?=["'>]) : positive lookahead assertion to match the include
# delimiter - an apostrophe, double quote, or
# greater than symbol. This level of complexity
# is required so that the include delimiter is
# not consumed by the match, thus allowing the
# sub-regex discussed above to uniquely match a
# set of semicolon-separated INCLUDE statements
# (as allowed by the F2003 standard)
include_regex = """(?i)(?:^|['">]\s*;)\s*INCLUDE\s+(?:\w+_)?[<"'](.+?)(?=["'>])"""
# The MODULE statement regex finds module definitions by matching
# the following:
#
# MODULE module_name
#
# but *not* the following:
#
# MODULE PROCEDURE procedure_name
#
# Here is a breakdown of the regex:
#
# (?i) : regex is case insensitive
# ^\s* : any amount of white space
# MODULE : match the string MODULE, case insensitive
# \s+ : match one or more white space characters
# (?!PROCEDURE) : but *don't* match if the next word matches
# PROCEDURE (negative lookahead assertion),
# case insensitive
# (\w+) : match one or more alphanumeric characters
# that make up the defined module name and
# save it in a group
def_regex = """(?i)^\s*MODULE\s+(?!PROCEDURE)(\w+)"""
scanner = F90Scanner("FortranScan",
"$FORTRANSUFFIXES",
path_variable,
use_regex,
include_regex,
def_regex)
return scanner
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 | -8,389,844,272,599,720,000 | 44.401899 | 125 | 0.599777 | false |
LordDamionDevil/Lony | lib/discord/member.py | 14 | 8164 | # -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2015-2016 Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from .user import User
from .game import Game
from .permissions import Permissions
from . import utils
from .enums import Status, ChannelType
from .colour import Colour
import copy
class VoiceState:
"""Represents a Discord user's voice state.
Attributes
------------
deaf: bool
Indicates if the user is currently deafened by the server.
mute: bool
Indicates if the user is currently muted by the server.
self_mute: bool
Indicates if the user is currently muted by their own accord.
self_deaf: bool
Indicates if the user is currently deafened by their own accord.
is_afk: bool
Indicates if the user is currently in the AFK channel in the server.
voice_channel: Optional[Union[:class:`Channel`, :class:`PrivateChannel`]]
The voice channel that the user is currently connected to. None if the user
is not currently in a voice channel.
"""
__slots__ = [ 'session_id', 'deaf', 'mute', 'self_mute',
'self_deaf', 'is_afk', 'voice_channel' ]
def __init__(self, **kwargs):
self.session_id = kwargs.get('session_id')
self._update_voice_state(**kwargs)
def _update_voice_state(self, **kwargs):
self.self_mute = kwargs.get('self_mute', False)
self.self_deaf = kwargs.get('self_deaf', False)
self.is_afk = kwargs.get('suppress', False)
self.mute = kwargs.get('mute', False)
self.deaf = kwargs.get('deaf', False)
self.voice_channel = kwargs.get('voice_channel')
def flatten_voice_states(cls):
for attr in VoiceState.__slots__:
def getter(self, x=attr):
return getattr(self.voice, x)
setattr(cls, attr, property(getter))
return cls
@flatten_voice_states
class Member(User):
"""Represents a Discord member to a :class:`Server`.
This is a subclass of :class:`User` that extends more functionality
that server members have such as roles and permissions.
Attributes
----------
voice: :class:`VoiceState`
The member's voice state. Properties are defined to mirror access of the attributes.
e.g. ``Member.is_afk`` is equivalent to `Member.voice.is_afk``.
roles
A list of :class:`Role` that the member belongs to. Note that the first element of this
list is always the default '@everyone' role.
joined_at : `datetime.datetime`
A datetime object that specifies the date and time in UTC that the member joined the server for
the first time.
status : :class:`Status`
The member's status. There is a chance that the status will be a ``str``
if it is a value that is not recognised by the enumerator.
game : :class:`Game`
The game that the user is currently playing. Could be None if no game is being played.
server : :class:`Server`
The server that the member belongs to.
nick : Optional[str]
The server specific nickname of the user.
"""
__slots__ = [ 'roles', 'joined_at', 'status', 'game', 'server', 'nick', 'voice' ]
def __init__(self, **kwargs):
super().__init__(**kwargs.get('user'))
self.voice = VoiceState(**kwargs)
self.joined_at = utils.parse_time(kwargs.get('joined_at'))
self.roles = kwargs.get('roles', [])
self.status = Status.offline
game = kwargs.get('game', {})
self.game = Game(**game) if game else None
self.server = kwargs.get('server', None)
self.nick = kwargs.get('nick', None)
def _update_voice_state(self, **kwargs):
self.voice.self_mute = kwargs.get('self_mute', False)
self.voice.self_deaf = kwargs.get('self_deaf', False)
self.voice.is_afk = kwargs.get('suppress', False)
self.voice.mute = kwargs.get('mute', False)
self.voice.deaf = kwargs.get('deaf', False)
old_channel = getattr(self, 'voice_channel', None)
vc = kwargs.get('voice_channel')
if old_channel is None and vc is not None:
# we joined a channel
vc.voice_members.append(self)
elif old_channel is not None:
try:
# we either left a channel or we switched channels
old_channel.voice_members.remove(self)
except ValueError:
pass
finally:
# we switched channels
if vc is not None:
vc.voice_members.append(self)
self.voice.voice_channel = vc
def _copy(self):
ret = copy.copy(self)
ret.voice = copy.copy(self.voice)
return ret
@property
def colour(self):
"""A property that returns a :class:`Colour` denoting the rendered colour
for the member. If the default colour is the one rendered then an instance
of :meth:`Colour.default` is returned.
There is an alias for this under ``color``.
"""
default_colour = Colour.default()
# highest order of the colour is the one that gets rendered.
# if the highest is the default colour then the next one with a colour
# is chosen instead
if self.roles:
roles = sorted(self.roles, key=lambda r: r.position, reverse=True)
for role in roles:
if role.colour == default_colour:
continue
else:
return role.colour
return default_colour
color = colour
@property
def mention(self):
if self.nick:
return '<@!{}>'.format(self.id)
return '<@{}>'.format(self.id)
def mentioned_in(self, message):
mentioned = super().mentioned_in(message)
if mentioned:
return True
for role in message.role_mentions:
has_role = utils.get(self.roles, id=role.id) is not None
if has_role:
return True
return False
@property
def top_role(self):
"""Returns the member's highest role.
This is useful for figuring where a member stands in the role
hierarchy chain.
"""
if self.roles:
roles = sorted(self.roles, reverse=True)
return roles[0]
return None
@property
def server_permissions(self):
"""Returns the member's server permissions.
This only takes into consideration the server permissions
and not most of the implied permissions or any of the
channel permission overwrites. For 100% accurate permission
calculation, please use either :meth:`permissions_in` or
:meth:`Channel.permissions_for`.
This does take into consideration server ownership and the
administrator implication.
"""
if self.server.owner == self:
return Permissions.all()
base = Permissions.none()
for r in self.roles:
base.value |= r.permissions.value
if base.administrator:
return Permissions.all()
return base
| gpl-3.0 | -7,428,569,703,087,051,000 | 34.650655 | 103 | 0.630696 | false |
JeffsFernandes/cuidando2 | projeto/projeto/views.py | 2 | 32828 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from pyramid.view import view_config
from .models import Cidadao, Cidadao_twitter,Atividade, Atividade_cidadao, Atividade_orcamento, Dados_site, Midia, Midia_comentario, Midia_video, Denuncia, Midia_foto
#from .models import Cidadao, UsrTree, Atividade_cidadao
#from .models import Cidadao, MyModel, UsrTree
#por que MyModel?
from beaker.middleware import SessionMiddleware
from datetime import datetime
import itertools
from BTrees.OOBTree import OOBTree
import tweepy
import facebook
import urllib
from pyramid_mailer import get_mailer
from pyramid_mailer.message import Message
from pyramid_mailer.mailer import Mailer
#from facebook import Facebook
from pyramid.httpexceptions import (
HTTPFound,
HTTPNotFound,
#HTTPForbidden,
)
from pyramid.security import (
remember,
forget,
authenticated_userid,
)
from forms import (
merge_session_with_post,
record_to_appstruct,
FormCadastrar,
FormConfigurar,
FormContato,
FormLogin,
FormMapa,
FormInserirP,
FormOrcamento,
FormOrcamentoResp,
FormRecadSenha,
FormRSenha,
FormPesqMapa,
FormOrcFoto,
FormOrcVideo,
FormSeguirAtv,
FormDenuncia,
)
import deform
import transaction
@view_config(route_name='inicial', renderer='inicial.slim')
def my_view(request):
"""
Página inicial: resgata os contadores estatísticos e outros dados do site
"""
#del request.db["atualAtv"]
if not "dadosSite" in request.db:
request.db["dadosSite"] = Dados_site()
atualiz_atv = request.db['dadosSite'].atualiz_atv
qtde_atv_orc = request.db['dadosSite'].qtde_atv_orc
qtde_atv_usr = request.db['dadosSite'].qtde_atv_usr
qtde_usr = request.db['dadosSite'].qtde_usr
qtde_fotos = request.db['dadosSite'].qtde_fotos
qtde_videos = request.db['dadosSite'].qtde_videos
qtde_coment = request.db['dadosSite'].qtde_coment
destaque_atv = request.db['dadosSite'].destaque_atv
return {
'atualAtv': atualiz_atv,
'qtdeAtvOrc': qtde_atv_orc,
'qtdeAtvUsr': qtde_atv_usr,
'qtdeUsr': qtde_usr,
'qtdeFotos': qtde_fotos,
'qtdeVideos': qtde_videos,
'qtdeComent': qtde_coment,
'destaqueAtv': destaque_atv,
}
@view_config(
route_name='listaUsr',
renderer='listaUsuarios.slim',
permission='comum'
)
def listaUsr(request):
"""
Página para listar usuários cadastrados
"""
cidadaos = request.db['usrTree'].values()
return {
'cidadaos': cidadaos,
}
@view_config(
route_name='listaAtv',
renderer='listaAtividades.slim',
permission='comum'
)
def listaAtv(request):
"""
Página para listar atividades
"""
atividades = request.db['atvTree'].values()
return {
'atividades': atividades,
}
@view_config(route_name='cadastro', renderer='cadastro.slim')
def cadastro(request):
"""Cadastro de usuário"""
# soh eh rodado 1 vez... tem que colocar na configurcao ou coisa assim?...
# Ensure that a ’userdb’ key is present
# in the root
if not request.db.has_key("usrTree"):
request.db["usrTree"] = OOBTree()
esquema = FormCadastrar().bind(request=request)
esquema.title = "Cadastrar novo usuário"
form = deform.Form(esquema, buttons=('Cadastrar',))
if 'Cadastrar' in request.POST:
# Validação do formulário
try:
form.validate(request.POST.items())
except deform.ValidationFailure as e:
return {'form': e.render()}
#deslogando usuário, caso haja algum
headers = forget(request)
# Criação e inserção
cidadao = Cidadao("","")
cidadao = merge_session_with_post(cidadao, request.POST.items())
#tchau lista
#request.db['cidadaos'][cidadao.email] = cidadao
request.db['usrTree'][cidadao.email] = cidadao
dadosSite = request.db['dadosSite']
#chama função para atualizar contadores
Dados_site.addUsr(dadosSite)
transaction.commit()
request.session.flash(u"Usuário registrado com sucesso.")
request.session.flash(u"Agora você já pode logar com ele.")
return HTTPFound(location=request.route_url('inicial'), headers = headers)
else:
# Apresentação do formulário
return {'form': form.render()}
@view_config(
route_name='configuracao',
renderer='configuracao.slim',
permission='basica'
)
def configuracao(request):
"""Configuração de usuário"""
cidadao = Cidadao("","")
cidadao = request.db["usrTree"][authenticated_userid(request)]
#verificar se cidadão está preenchido
appstruct = record_to_appstruct(cidadao)
esquema = FormConfigurar().bind(request=request)
esquema.title = "Configuração de usuário"
form = deform.Form(esquema, buttons=('Salvar', 'Excluir'))
if 'Salvar' in request.POST:
# Validação do formulário
cidadao = merge_session_with_post(cidadao, request.POST.items())
appstruct = record_to_appstruct(cidadao)
try:
esquema = FormConfigurar().bind(request=request)
esquema.title = "Configuração de usuário"
form = deform.Form(esquema, buttons=('Salvar', 'Excluir'))
form.render(appstruct)
appstruct = form.validate(request.POST.items())
except deform.ValidationFailure as e:
return {'form': e.render()}
transaction.commit()
return HTTPFound(location=request.route_url('usuario'))
elif 'Excluir' in request.POST:
del request.db["usrTree"][authenticated_userid(request)]
transaction.commit()
headers = forget(request)
return HTTPFound(location=request.route_url('inicial'))
else:
# Apresentação do formulário
return{'form':form.render(appstruct)}
@view_config(route_name='contato', renderer='contato.slim')
def contato(request):
"""Contato"""
# Import smtplib for the actual sending function
import smtplib
esquema = FormContato().bind(request=request)
esquema.title = "Entre em contato com o Cuidando"
form = deform.Form(esquema, buttons=('Enviar',))
if 'Enviar' in request.POST:
# Validação do formulário
try:
form.validate(request.POST.items())
except deform.ValidationFailure as e:
return {'form': e.render()}
#sender = request.POST.get("email")
#receivers = ['silvailziane@yahoo.com.br']
#message = request.POST.get("assunto")
try:
#s = smtplib.SMTP( [host [, port [, local_hostname]]] )
#s = smtplib.SMTP('pop.mail.yahoo.com.br',587)
#smtpObj.sendmail(sender, receivers, message)
#s.quit()
#mailer = get_mailer(request)
mailer = Mailer()
message = Message(
subject=request.POST.get("assunto"),
sender= request.POST.get("email"), #"admin@cuidando.org",
recipients=['silvailziane@yahoo.com.br'],
body=request.POST.get("mensagem")
)
mailer.send(message)
transaction.commit()
print "Successfully sent email"
#except SMTPException:
except:
print "Error: unable to send email"
return HTTPFound(location=request.route_url('inicial'))
else:
# Apresentação do formulário
return {'form': form.render()}
@view_config(route_name='logout')
def logout(request):
"""Página para logout"""
headers = forget(request)
request.session.flash(u"Você foi deslogado.")
#request.session.pop_flash()
return HTTPFound(location=request.route_url('inicial'), headers=headers)
@view_config(route_name='loginTwitterAuth', renderer='loginTwitterAuth.slim',permission='comum')
def loginTwitterAuth(request):
"""
Loga usuário com conta do twitter já autorizada: chamado a partir do login = authTwitterAcc
testando com twitter da zi
"""
auth = tweepy.OAuthHandler("MBX41ZNwjzKMObK8AHHfQ", "56hnTS8qMDg623XAIw4vdYEGpZFJtzS82VrXhNrILQ")
verifier = request.GET.get('oauth_verifier')
token = request.session.get('request_token')
#request.session.delete('request_token')
auth.set_request_token(token[0], token[1])
print '=============='
print token[0]
#auth.set_access_token(cidadao.twitter_key, cidadao.twitter_secret)
#teste com twitter da zi - acesso permanente à conta
#auth.set_access_token("91435451-hhGY5e7Ga2c3viHCV26kVN1vgLWQm0gJMvJHYOsbh", "rEeRld6tM4V45T1fKX6abNc8BMC7hDF1n6q0tuOKfi2ME")
auth.get_access_token(verifier)
twitterApi = tweepy.API(auth)
if twitterApi:
#cidadao = request.db["twtTree"][token[0]]
userInfo = twitterApi.me()
print userInfo.screen_name
cidadao = Cidadao_twitter()
#cidadao = [] #[str(userInfo.screen_name)]
if not userInfo.screen_name in request.db["twtTree"]:
#cidadao = Cidadao_twitter()
cidadao.nomeUsr = userInfo.screen_name
request.db['twtTree'][cidadao.nomeUsr] = cidadao
dadosSite = request.db['dadosSite']
#chama função para atualizar contadores
Dados_site.addUsr(dadosSite)
transaction.commit()
request.session.flash(u"Usuário registrado com sucesso.")
request.session.flash(u"Agora você já pode logar com ele.")
#print userInfo.__getstate__()
#print userInfo.email
headers = remember(request, userInfo.screen_name)
#headers = remember(request, "ilzi@testecorp.com")
request.session.flash(u"Logado com twitter")
return HTTPFound(location=request.route_url('usuario'), headers=headers)
else:
request.session.flash(u"Erro ao logar com twitter")
return HTTPFound(location=request.route_url('login'))
@view_config(route_name='authTwitter', renderer='authTwitter.slim',permission='comum')
def authTwitter(request):
"""
Autoriza twitter para a conta do usuário logado
chamado em configurações
"""
auth = tweepy.OAuthHandler("MBX41ZNwjzKMObK8AHHfQ", "56hnTS8qMDg623XAIw4vdYEGpZFJtzS82VrXhNrILQ")
#token e secret da aplicação ->pegar no twitter
verifier = request.GET.get('oauth_verifier')
token = request.session.get('request_token')
#request.session.delete('request_token')
auth.set_request_token(token[0], token[1])
try:
auth.get_access_token(verifier)
except tweepy.TweepError:
print 'Error! Failed to get access token.'
auth.set_access_token(auth.access_token.key, auth.access_token.secret)
#auth.set_access_token("91435451-hhGY5e7Ga2c3viHCV26kVN1vgLWQm0gJMvJHYOsbh", "rEeRld6tM4V45T1fKX6abNc8BMC7hDF1n6q0tuOKfi2ME")
twitterApi = tweepy.API(auth)
if twitterApi:
userInfo = twitterApi.me()
cidadao = request.db["usrTree"][authenticated_userid(request)]
cidadao.twitter_key = auth.access_token.key
cidadao.twitter_secret = auth.access_token.secret
cidadao.login_twitter = userInfo.screen_name
transaction.commit()
#headers = remember(request, "teste@testecorp.com")
#headers = remember(request, "ilzi@testecorp.com")
request.session.flash(u"Sua conta do twitter foi conectada ao Cuidando")
return HTTPFound(location=request.route_url('usuario'), headers=headers)
else:
request.session.flash(u"Erro ao conectar com twitter")
return HTTPFound(location=request.route_url('login'))
@view_config(route_name='authTwitterAcc', renderer='authTwitterAcc.slim',permission='comum')
def authTwitterAcc(request):
"""
Apenas autoriza e redireciona usuário para twitter
"""
#autorização OAuth
auth = tweepy.OAuthHandler("MBX41ZNwjzKMObK8AHHfQ", "56hnTS8qMDg623XAIw4vdYEGpZFJtzS82VrXhNrILQ", request.route_url('loginTwitterAuth'))
#token e secret da aplicação ->pegar no twitter
authUrl = auth.get_authorization_url(True)
request.session['request_token'] = (auth.request_token.key, auth.request_token.secret)
request.session.save()
try:
return HTTPFound(authUrl)
except tweepy.TweepError:
print 'Error! Failed to get request token.'
@view_config(route_name='loginTwitter', renderer='loginTwitter.slim',permission='comum')
def loginTwitter(request):
"""
Login com twitter:
- verificar se já foi autorizado o app
- guardar token de acesso em algum lugar
- permitir acesso ao site com esse novo objeto....
"""
auth = tweepy.OAuthHandler("MBX41ZNwjzKMObK8AHHfQ", "56hnTS8qMDg623XAIw4vdYEGpZFJtzS82VrXhNrILQ")
#token e secret da aplicação ->pegar no twitter
verifier = request.GET.get('oauth_verifier')
token = request.session.get('request_token')
#request.session.delete('request_token')
auth.set_request_token(token[0], token[1])
try:
auth.get_access_token(verifier)
except tweepy.TweepError:
print 'Error! Failed to get access token.'
auth.set_access_token(auth.access_token.key, auth.access_token.secret)
#auth.set_access_token("91435451-hhGY5e7Ga2c3viHCV26kVN1vgLWQm0gJMvJHYOsbh", "rEeRld6tM4V45T1fKX6abNc8BMC7hDF1n6q0tuOKfi2ME")
twitterApi = tweepy.API(auth)
if twitterApi:
userInfo = twitterApi.me()
cidadao = request.db["usrTree"][authenticated_userid(request)]
cidadao.twitter_key = auth.access_token.key
cidadao.twitter_secret = auth.access_token.secret
cidadao.login_twitter = userInfo.screen_name
transaction.commit()
#headers = remember(request, "teste@testecorp.com")
#headers = remember(request, "ilzi@testecorp.com")
request.session.flash(u"Usuário logado com twitter")
return HTTPFound(location=request.route_url('usuario'), headers=headers)
else:
request.session.flash(u"Erro ao logar com twitter")
return HTTPFound(location=request.route_url('login'))
@view_config(route_name='authFacebook', renderer='authFacebook.slim',permission='comum')
def authFacebook(request):
"""
Apenas autoriza e redireciona usuário para twitter
"""
#autorização OAuth
#fbApi = Facebook("473549246060347", "ba198578f77ea264f8ed4053dd323054")
#token e secret da aplicação ->pegar no face
args = dict(client_id="473549246060347", redirect_uri=request.route_url('loginAuthFace'))
try:
return HTTPFound("https://graph.facebook.com/oauth/authorize?" + urllib.urlencode(args))
except:
print 'Error! Failed to get request token.'
return HTTPFound(request.route_url('login'))
@view_config(route_name='loginFacebook', renderer='loginFacebook.slim',permission='comum')
def loginFacebook(request):
try:
return HTTPFound(request.route_url('login'))
except:
print 'Error! Failed to get request token.'
@view_config(route_name='loginAuthFace', renderer='loginAuthFace.slim',permission='comum')
def loginAuthFace(request):
try:
return HTTPFound(request.route_url('login'))
except:
print 'Error! Failed to get request token.'
@view_config(route_name='login', renderer='login.slim')
def login(request):
"""
Página para login, site, face e twitter
"""
esquema = FormLogin().bind(request=request)
esquema.title = "Login"
#botoes nao aceitam frases como label = "esqueci a senha"
form = deform.Form(esquema, buttons=('Entrar', 'Esqueci a senha'))
#form = deform.Form(esquema, buttons=('Entrar', 'Esqueci'))
if authenticated_userid(request):
request.session.flash(u"Usuário já está logado, caso queira entrar com usuário diferente, faça o logout.")
return HTTPFound(location=request.route_url('usuario'))
if 'Entrar' in request.POST:
try:
form.validate(request.POST.items())
except deform.ValidationFailure as e:
return {'form': e.render()}
email = request.POST.get("email")
senha = request.POST.get("senha")
if email in request.db["usrTree"]:
cidadao = Cidadao("","")
cidadao = request.db["usrTree"][email]
if cidadao.senha == senha:
headers = remember(request, email)
next = request.route_url('usuario')
request.session.flash(u"Usuário logado")
return HTTPFound(location=next, headers=headers)
else:
request.session.flash(u"Email ou senha inválidos")
else:
request.session.flash(u"Email ou senha inválidos")
return {'form': form.render()}
#não entra nesse elif
#elif 'Esqueci' in request.POST:
elif 'Esqueci_a_senha' in request.POST:
return HTTPFound(location=request.route_url('r_senha'))
else:
return {'form': form.render()}
@view_config(route_name='usuario', renderer='usuario.slim', permission='basica')
def usuario(request):
"""
Página do perfil do usuário
"""
cidadao = Cidadao("","")
if not authenticated_userid(request) in request.db["usrTree"]:
cidadao = request.db["twtTree"][authenticated_userid(request)]
return {
'cidadao': cidadao
}
@view_config(route_name='perfilUsr', renderer='usuario.slim', permission='comum')
def perfilUsuario(request):
"""
Página do perfil do usuário
"""
cidadao = request.db["twtTree"][request.matchdict['id']]
return {
'cidadao': cidadao
}
@view_config(route_name='sobre', renderer='sobre.slim')
def sobre(request):
"""
Página sobre
"""
return {}
@view_config(route_name='mapa', renderer='mapa.slim')
def mapa(request):
"""
Página dos orçamentos mapeados
"""
esquemaPesq = FormPesqMapa().bind(request=request)
esquemaPesq.title = "Pesquisa"
formPesq = deform.Form(esquemaPesq, buttons=('Pesquisar',))
esquema = FormMapa().bind(request=request)
esquema.title = "Mapa"
#legenda do botão - inserir ponto
form = deform.Form(esquema, buttons=('Inserir',))
if 'Pesquisar' in request.POST:
try:
formPesq.validate(request.POST.items())
except deform.ValidationFailure as e:
return {'form': e.render()}
return HTTPFound(location=request.route_url('lista'))
elif 'Inserir' in request.POST:
return HTTPFound(location=request.route_url('inserir_ponto'))
else:
# values passed to template for rendering
return {
'form':form.render(),
'formPesq':formPesq.render(),
'showmenu':True,
}
@view_config(route_name='orcamentoId', renderer='orcamento.slim')
def orcamento(request):
"""
Página de um orçamento individual
"""
id = int(request.matchdict['id'])
esquemaFoto = FormOrcFoto().bind(request=request)
esquemaFoto.title = "Foto"
formFoto = deform.Form(esquemaFoto, buttons=('Upload Foto',))
esquemaVideo = FormOrcVideo().bind(request=request)
esquemaVideo.title = "Video"
formVideo = deform.Form(esquemaVideo, buttons=('Upload Video',))
esquemaSeguir = FormSeguirAtv().bind(request=request)
esquemaSeguir.title = "Seguir atualizações"
formSeguir = deform.Form(esquemaSeguir, buttons=('Salvar',))
esquema = FormOrcamento().bind(request=request)
#esquema.title = "Comentários"
form = deform.Form(esquema, buttons=('Enviar',))
esquemaResp = FormOrcamentoResp().bind(request=request)
#esquema.title = "Resposta"
formResp = deform.Form(esquemaResp, buttons=('Responder',))
#atv_orc = Atividade_orcamento("","")
atv_orc = Atividade()
#modificar o orçamento a ser exibido na página
atv_orc = request.db["atvTree"][id]
#atividade vinda do mapa
#atv_orc = request.db["orcTree"]
#atv_orc = request.db["atvTree"]
#esquema para colocar o id nos forms das respostas
# envia para o template uma lista com forms de resposta
i = 0
formsResps = []
#criar forulários de respostas ao conetários já existentes
#cada form tem um id, para identificarmos qual é o comentário e sua resposta respectiva
for coment in atv_orc.midia_coment:
formResp = deform.Form(esquemaResp, buttons=('Responder',), formid=str(i))
formsResps.append(formResp.render())
i = i + 1
cidadao = Cidadao("","")
if (authenticated_userid(request)):
cidadao = request.db["usrTree"][authenticated_userid(request)]
if 'Upload_Foto' in request.POST:
if (not authenticated_userid(request)):
request.session.flash(u"Você deve estar logado para inserir conteúdos no site")
return HTTPFound(location=request.route_url('login'))
try:
formFoto.validate(request.POST.items())
except deform.ValidationFailure as e:
return {'form': e.render()}
#3 linhas abaixo se repetindo para os 3 forms.... como otimizar??
dadosSite = request.db['dadosSite']
#chama função para inserir na lista de atualizações
Dados_site.addAtual(dadosSite, atv_orc)
Dados_site.addFoto(dadosSite)
foto = Midia_foto(request.POST.get('foto'), datetime.now(), authenticated_userid(request))
Atividade_cidadao.addFoto(atv_orc, foto)
transaction.commit()
return HTTPFound(location=request.route_url('orcamentoId', id=id))
elif 'Upload_Video' in request.POST:
if (not authenticated_userid(request)):
request.session.flash(u"Você deve estar logado para inserir conteúdos no site")
return HTTPFound(location=request.route_url('login'))
try:
formVideo.validate(request.POST.items())
except deform.ValidationFailure as e:
return {'form': e.render()}
#colocar dentro de try catch
#3 linhas abaixo se repetindo para os 3 forms.... como otimizar??
dadosSite = request.db['dadosSite']
#chama função para inserir na lista de atualizações
Dados_site.addAtual(dadosSite, atv_orc)
Dados_site.addVideo(dadosSite)
video = Midia_video(request.POST.get('video'), datetime.now(), authenticated_userid(request))
#bolar alguma validação de lnk?
#colocar essas funções no model
video.link = video.linkOrig.replace('.com/','.com/embed/')
video.link = video.linkOrig.replace('watch?v=','embed/')
Atividade_cidadao.addVideo(atv_orc, video)
transaction.commit()
return HTTPFound(location=request.route_url('orcamentoId', id=id))
elif 'Enviar' in request.POST:
if (not authenticated_userid(request)):
request.session.flash(u"Você deve estar logado para inserir conteúdos no site")
return HTTPFound(location=request.route_url('login'))
try:
esquema = FormOrcamento().bind(request=request)
form = deform.Form(esquema, buttons=('Enviar',))
form.render()
form.validate(request.POST.items())
except deform.ValidationFailure as e:
print "form de comentário deu erro"
return {'form': e.render()}
#3 linhas abaixo se repetindo para os 3 forms.... como otimizar??
dadosSite = request.db['dadosSite']
#chama função para inserir na lista de atualizações
Dados_site.addAtual(dadosSite, atv_orc)
Dados_site.addComent(dadosSite)
coment = Midia_comentario(request.POST.get('comentario'), datetime.now(), authenticated_userid(request))
Atividade_cidadao.addComent(atv_orc, coment)
transaction.commit()
return HTTPFound(location=request.route_url('orcamentoId', id=id))
elif 'Responder' in request.POST:
if (not authenticated_userid(request)):
request.session.flash(u"Você deve estar logado para inserir conteúdos no site")
return HTTPFound(location=request.route_url('login'))
try:
esquemaResp = FormOrcamentoResp().bind(request=request)
formResp = deform.Form(esquemaResp, buttons=('Responder',))
formResp.render()
formResp.validate(request.POST.items())
except deform.ValidationFailure as e:
return {'form': e.render()}
#pega o id do form que enviou a resposta do comentário
posted_formid = int(request.POST['__formid__'])
#3 linhas abaixo se repetindo para os 3 forms.... como otimizar??
dadosSite = request.db['dadosSite']
#chama função para inserir na lista de atualizações
Dados_site.addAtual(dadosSite, atv_orc)
Dados_site.addComent(dadosSite)
coment = Midia_comentario(request.POST.get('resposta'), datetime.now(), authenticated_userid(request))
transaction.commit()
#adiciona a resposta ao comentário pai, conforme o id do form de resposta
comentPai = atv_orc.midia_coment[posted_formid]
comentPai.respostas.append(coment)
comentPai._p_changed = 1
transaction.commit()
return HTTPFound(location=request.route_url('orcamentoId', id=id))
elif 'Salvar' in request.POST:
if (not authenticated_userid(request)):
request.session.flash(u"Você deve estar logado para inserir conteúdos no site")
return HTTPFound(location=request.route_url('login'))
try:
formSeguir.validate(request.POST.items())
except deform.ValidationFailure as e:
return {'form': e.render()}
Cidadao.addSeguir(cidadao, atv_orc, request.POST.get('seguir'))
transaction.commit()
return HTTPFound(location=request.route_url('orcamentoId', id=id))
else:
seguirAtv = cidadao.pontos_a_seguir
#verifica se o usuário logado está seguindo a atividade
if atv_orc.atividade in seguirAtv:
appstruct = {'seguir':True,}
else:
appstruct = {'seguir':False,}
appstructOrc = record_to_appstruct(atv_orc)
return {
'orcamento': atv_orc,
'form': form.render(appstruct=appstructOrc),
'coments': atv_orc.midia_coment,
'formResp': formsResps,
'formVideo': formVideo.render(),
'videos': atv_orc.midia_video,
'fotos': atv_orc.midia_foto,
'formFoto': formFoto.render(),
'formSeguir': formSeguir.render(appstruct=appstruct),
#enviar o midia_foto assim que estiverem cadastradas no banco
}
@view_config(route_name='inserir_ponto', renderer='inserir_ponto.slim', permission='basica')
def inserir_ponto(request):
"""
Página para inserir novos pontos/atividades no mapa pelo usuário
"""
esquema = FormInserirP().bind(request=request)
esquema.title = "Inserir ponto no mapa"
form = deform.Form(esquema, buttons=('Inserir', 'Cancelar'))
#não se se isto fica aqui ou no models
if not request.db.has_key("atvTree"):
request.db["atvTree"] = OOBTree()
if 'Inserir' in request.POST:
try:
form.validate(request.POST.items())
except deform.ValidationFailure as e:
return {'form': e.render()}
if(authenticated_userid(request)):
dadosSite = request.db['dadosSite']
# Criação e inserção
atividade = Atividade_cidadao()
atividade = merge_session_with_post(atividade, request.POST.items())
#inserir id para a atividade?
atividade.data = datetime.now()
atividade.cidadao = authenticated_userid(request)
atividade.id = dadosSite.proxId
request.db['atvTree'][atividade.id] = atividade
dadosSite.proxId = dadosSite.proxId + 1
#chama função para inserir na lista de atualizações
Dados_site.addAtual(dadosSite, atividade)
Dados_site.addAtvUsr(dadosSite)
transaction.commit()
request.session.flash(u"Atividade de usuário cadastrada com sucesso.")
#retorno -> levar atividade inserida
return HTTPFound(location=request.route_url('orcamentoId', id = atividade.id))
else:
return {'form': form.render()}
@view_config(route_name='privacidade', renderer='privacidade.slim')
def privacidade(request):
"""
Página com a política de privacidade do site
"""
return {}
@view_config(route_name='termos', renderer='termos.slim')
def termos(request):
"""
Página com os termos e condições de uso do site
"""
return {}
@view_config(
route_name='rcad_senha',
renderer='rcad_senha.slim',
permission='basica'
)
def rcad_senha(request):
"""Redefinir senha de usuário"""
esquema = FormRecadSenha().bind(request=request)
esquema.title = "Redefinir senha"
cidadao = Cidadao("","")
form = deform.Form(esquema, buttons=('Salvar',))
if 'Salvar' in request.POST:
# Validação do formulário
try:
appstruct = form.validate(request.POST.items())
except deform.ValidationFailure as e:
return {'form': e.render()}
#validar token, se ok, merge session
cidadao = merge_session_with_post(cidadao, request.POST.items())
transaction.commit()
return HTTPFound(location=request.route_url('usuario'))
else:
return{'form':form.render()}
@view_config(
route_name='r_senha',
renderer='r_senha.slim',
permission='comum'
)
def r_senha(request):
"""
Reconfiguração de senha do usuário
Envia token para email do usuário
"""
esquema = FormRSenha().bind(request=request)
esquema.title = "Reenviar senha"
form = deform.Form(esquema, buttons=('Enviar',))
if 'Enviar' in request.POST:
# Validação do formulário
try:
appstruct = form.validate(request.POST.items())
except deform.ValidationFailure as e:
return {'form': e.render()}
email = request.POST.get("email")
if email in request.db["usrTree"]:
#enviar email com token, armazenar esse token
headers = remember(request, email)
return HTTPFound(location=request.route_url('rcad_senha'), headers=headers)
else:
warnings.warn("Email ou senha inválidos", DeprecationWarning)
return HTTPFound(location=request.route_url('rcad_senha'))
else:
return {'form': form.render()}
@view_config(
route_name='denuncia',
renderer='denuncia.slim',
permission='basica'
)
def denunciar(request):
"""
Formulário para enviar denúncia de mídia
"""
id = int(request.matchdict['id'])
tipoMidia = request.matchdict['tmidia']
idMidia = int(request.matchdict['idM'])
atividade = Atividade()
atividade = request.db["atvTree"][id]
if tipoMidia == 'foto':
midia = atividade.midia_foto[idMidia]
elif tipoMidia == 'video':
midia = atividade.midia_video[idMidia]
elif tipoMidia == 'comentario':
midia = atividade.midia_coment[idMidia]
esquema = FormDenuncia().bind(request=request)
esquema.title = "Denunciar mídia"
#midia = Midia("", "")
#selecionar de algum jeito essa mídia vinda de um link
form = deform.Form(esquema, buttons=('Enviar',))
if 'Enviar' in request.POST:
# Validação do formulário
try:
esquema = FormDenuncia().bind(request=request)
esquema.title = "Denunciar mídia"
form = deform.Form(esquema, buttons=('Enviar',))
form.render()
form.validate(request.POST.items())
except deform.ValidationFailure as e:
return {'form': e.render()}
denuncia = Denuncia(request.POST.get("motivo"), authenticated_userid(request))
midia.addDenuncia(denuncia)
atividade.delMidiaDen()
cidadao = request.db["usrTree"][authenticated_userid(request)]
cidadao.addDenuncia(denuncia)
transaction.commit()
return HTTPFound(location=request.route_url('orcamentoId', id=id))
else:
return {'form': form.render()}
| gpl-3.0 | 1,547,272,856,669,598,200 | 34.836443 | 166 | 0.63968 | false |
jalavik/invenio | invenio/modules/upgrader/upgrades/invenio_2013_06_20_new_bibcheck_rules_table.py | 15 | 1210 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2013 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from invenio.legacy.dbquery import run_sql
depends_on = ['invenio_release_1_1_0']
def info():
return "New bibcheck_rules table"
def do_upgrade():
run_sql("""
CREATE TABLE IF NOT EXISTS bibcheck_rules (
name varchar(150) NOT NULL,
last_run datetime NOT NULL default '0000-00-00',
PRIMARY KEY (name)
) ENGINE=MyISAM;
""")
def estimate():
""" Estimate running time of upgrade in seconds (optional). """
return 1
| gpl-2.0 | 8,150,274,435,427,106,000 | 30.025641 | 74 | 0.721488 | false |
FireWRT/OpenWrt-Firefly-Libraries | staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python3.4/test/test_signal.py | 79 | 33870 | import unittest
from test import support
from contextlib import closing
import gc
import pickle
import select
import signal
import struct
import subprocess
import traceback
import sys, os, time, errno
from test.script_helper import assert_python_ok, spawn_python
try:
import threading
except ImportError:
threading = None
class HandlerBCalled(Exception):
pass
def exit_subprocess():
"""Use os._exit(0) to exit the current subprocess.
Otherwise, the test catches the SystemExit and continues executing
in parallel with the original test, so you wind up with an
exponential number of tests running concurrently.
"""
os._exit(0)
def ignoring_eintr(__func, *args, **kwargs):
try:
return __func(*args, **kwargs)
except OSError as e:
if e.errno != errno.EINTR:
raise
return None
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class InterProcessSignalTests(unittest.TestCase):
MAX_DURATION = 20 # Entire test should last at most 20 sec.
def setUp(self):
self.using_gc = gc.isenabled()
gc.disable()
def tearDown(self):
if self.using_gc:
gc.enable()
def format_frame(self, frame, limit=None):
return ''.join(traceback.format_stack(frame, limit=limit))
def handlerA(self, signum, frame):
self.a_called = True
def handlerB(self, signum, frame):
self.b_called = True
raise HandlerBCalled(signum, self.format_frame(frame))
def wait(self, child):
"""Wait for child to finish, ignoring EINTR."""
while True:
try:
child.wait()
return
except OSError as e:
if e.errno != errno.EINTR:
raise
def run_test(self):
# Install handlers. This function runs in a sub-process, so we
# don't worry about re-setting the default handlers.
signal.signal(signal.SIGHUP, self.handlerA)
signal.signal(signal.SIGUSR1, self.handlerB)
signal.signal(signal.SIGUSR2, signal.SIG_IGN)
signal.signal(signal.SIGALRM, signal.default_int_handler)
# Variables the signals will modify:
self.a_called = False
self.b_called = False
# Let the sub-processes know who to send signals to.
pid = os.getpid()
child = ignoring_eintr(subprocess.Popen, ['kill', '-HUP', str(pid)])
if child:
self.wait(child)
if not self.a_called:
time.sleep(1) # Give the signal time to be delivered.
self.assertTrue(self.a_called)
self.assertFalse(self.b_called)
self.a_called = False
# Make sure the signal isn't delivered while the previous
# Popen object is being destroyed, because __del__ swallows
# exceptions.
del child
try:
child = subprocess.Popen(['kill', '-USR1', str(pid)])
# This wait should be interrupted by the signal's exception.
self.wait(child)
time.sleep(1) # Give the signal time to be delivered.
self.fail('HandlerBCalled exception not raised')
except HandlerBCalled:
self.assertTrue(self.b_called)
self.assertFalse(self.a_called)
child = ignoring_eintr(subprocess.Popen, ['kill', '-USR2', str(pid)])
if child:
self.wait(child) # Nothing should happen.
try:
signal.alarm(1)
# The race condition in pause doesn't matter in this case,
# since alarm is going to raise a KeyboardException, which
# will skip the call.
signal.pause()
# But if another signal arrives before the alarm, pause
# may return early.
time.sleep(1)
except KeyboardInterrupt:
pass
except:
self.fail("Some other exception woke us from pause: %s" %
traceback.format_exc())
else:
self.fail("pause returned of its own accord, and the signal"
" didn't arrive after another second.")
# Issue 3864, unknown if this affects earlier versions of freebsd also
@unittest.skipIf(sys.platform=='freebsd6',
'inter process signals not reliable (do not mix well with threading) '
'on freebsd6')
def test_main(self):
# This function spawns a child process to insulate the main
# test-running process from all the signals. It then
# communicates with that child process over a pipe and
# re-raises information about any exceptions the child
# raises. The real work happens in self.run_test().
os_done_r, os_done_w = os.pipe()
with closing(os.fdopen(os_done_r, 'rb')) as done_r, \
closing(os.fdopen(os_done_w, 'wb')) as done_w:
child = os.fork()
if child == 0:
# In the child process; run the test and report results
# through the pipe.
try:
done_r.close()
# Have to close done_w again here because
# exit_subprocess() will skip the enclosing with block.
with closing(done_w):
try:
self.run_test()
except:
pickle.dump(traceback.format_exc(), done_w)
else:
pickle.dump(None, done_w)
except:
print('Uh oh, raised from pickle.')
traceback.print_exc()
finally:
exit_subprocess()
done_w.close()
# Block for up to MAX_DURATION seconds for the test to finish.
r, w, x = select.select([done_r], [], [], self.MAX_DURATION)
if done_r in r:
tb = pickle.load(done_r)
if tb:
self.fail(tb)
else:
os.kill(child, signal.SIGKILL)
self.fail('Test deadlocked after %d seconds.' %
self.MAX_DURATION)
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class PosixTests(unittest.TestCase):
def trivial_signal_handler(self, *args):
pass
def test_out_of_range_signal_number_raises_error(self):
self.assertRaises(ValueError, signal.getsignal, 4242)
self.assertRaises(ValueError, signal.signal, 4242,
self.trivial_signal_handler)
def test_setting_signal_handler_to_none_raises_error(self):
self.assertRaises(TypeError, signal.signal,
signal.SIGUSR1, None)
def test_getsignal(self):
hup = signal.signal(signal.SIGHUP, self.trivial_signal_handler)
self.assertEqual(signal.getsignal(signal.SIGHUP),
self.trivial_signal_handler)
signal.signal(signal.SIGHUP, hup)
self.assertEqual(signal.getsignal(signal.SIGHUP), hup)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
class WindowsSignalTests(unittest.TestCase):
def test_issue9324(self):
# Updated for issue #10003, adding SIGBREAK
handler = lambda x, y: None
checked = set()
for sig in (signal.SIGABRT, signal.SIGBREAK, signal.SIGFPE,
signal.SIGILL, signal.SIGINT, signal.SIGSEGV,
signal.SIGTERM):
# Set and then reset a handler for signals that work on windows.
# Issue #18396, only for signals without a C-level handler.
if signal.getsignal(sig) is not None:
signal.signal(sig, signal.signal(sig, handler))
checked.add(sig)
# Issue #18396: Ensure the above loop at least tested *something*
self.assertTrue(checked)
with self.assertRaises(ValueError):
signal.signal(-1, handler)
with self.assertRaises(ValueError):
signal.signal(7, handler)
class WakeupFDTests(unittest.TestCase):
def test_invalid_fd(self):
fd = support.make_bad_fd()
self.assertRaises(ValueError, signal.set_wakeup_fd, fd)
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class WakeupSignalTests(unittest.TestCase):
def check_wakeup(self, test_body, *signals, ordered=True):
# use a subprocess to have only one thread
code = """if 1:
import fcntl
import os
import signal
import struct
signals = {!r}
def handler(signum, frame):
pass
def check_signum(signals):
data = os.read(read, len(signals)+1)
raised = struct.unpack('%uB' % len(data), data)
if not {!r}:
raised = set(raised)
signals = set(signals)
if raised != signals:
raise Exception("%r != %r" % (raised, signals))
{}
signal.signal(signal.SIGALRM, handler)
read, write = os.pipe()
for fd in (read, write):
flags = fcntl.fcntl(fd, fcntl.F_GETFL, 0)
flags = flags | os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
signal.set_wakeup_fd(write)
test()
check_signum(signals)
os.close(read)
os.close(write)
""".format(signals, ordered, test_body)
assert_python_ok('-c', code)
def test_wakeup_write_error(self):
# Issue #16105: write() errors in the C signal handler should not
# pass silently.
# Use a subprocess to have only one thread.
code = """if 1:
import errno
import fcntl
import os
import signal
import sys
import time
from test.support import captured_stderr
def handler(signum, frame):
1/0
signal.signal(signal.SIGALRM, handler)
r, w = os.pipe()
flags = fcntl.fcntl(r, fcntl.F_GETFL, 0)
fcntl.fcntl(r, fcntl.F_SETFL, flags | os.O_NONBLOCK)
# Set wakeup_fd a read-only file descriptor to trigger the error
signal.set_wakeup_fd(r)
try:
with captured_stderr() as err:
signal.alarm(1)
time.sleep(5.0)
except ZeroDivisionError:
# An ignored exception should have been printed out on stderr
err = err.getvalue()
if ('Exception ignored when trying to write to the signal wakeup fd'
not in err):
raise AssertionError(err)
if ('OSError: [Errno %d]' % errno.EBADF) not in err:
raise AssertionError(err)
else:
raise AssertionError("ZeroDivisionError not raised")
"""
r, w = os.pipe()
try:
os.write(r, b'x')
except OSError:
pass
else:
self.skipTest("OS doesn't report write() error on the read end of a pipe")
finally:
os.close(r)
os.close(w)
assert_python_ok('-c', code)
def test_wakeup_fd_early(self):
self.check_wakeup("""def test():
import select
import time
TIMEOUT_FULL = 10
TIMEOUT_HALF = 5
signal.alarm(1)
before_time = time.time()
# We attempt to get a signal during the sleep,
# before select is called
time.sleep(TIMEOUT_FULL)
mid_time = time.time()
dt = mid_time - before_time
if dt >= TIMEOUT_HALF:
raise Exception("%s >= %s" % (dt, TIMEOUT_HALF))
select.select([read], [], [], TIMEOUT_FULL)
after_time = time.time()
dt = after_time - mid_time
if dt >= TIMEOUT_HALF:
raise Exception("%s >= %s" % (dt, TIMEOUT_HALF))
""", signal.SIGALRM)
def test_wakeup_fd_during(self):
self.check_wakeup("""def test():
import select
import time
TIMEOUT_FULL = 10
TIMEOUT_HALF = 5
signal.alarm(1)
before_time = time.time()
# We attempt to get a signal during the select call
try:
select.select([read], [], [], TIMEOUT_FULL)
except OSError:
pass
else:
raise Exception("OSError not raised")
after_time = time.time()
dt = after_time - before_time
if dt >= TIMEOUT_HALF:
raise Exception("%s >= %s" % (dt, TIMEOUT_HALF))
""", signal.SIGALRM)
def test_signum(self):
self.check_wakeup("""def test():
signal.signal(signal.SIGUSR1, handler)
os.kill(os.getpid(), signal.SIGUSR1)
os.kill(os.getpid(), signal.SIGALRM)
""", signal.SIGUSR1, signal.SIGALRM)
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
def test_pending(self):
self.check_wakeup("""def test():
signum1 = signal.SIGUSR1
signum2 = signal.SIGUSR2
signal.signal(signum1, handler)
signal.signal(signum2, handler)
signal.pthread_sigmask(signal.SIG_BLOCK, (signum1, signum2))
os.kill(os.getpid(), signum1)
os.kill(os.getpid(), signum2)
# Unblocking the 2 signals calls the C signal handler twice
signal.pthread_sigmask(signal.SIG_UNBLOCK, (signum1, signum2))
""", signal.SIGUSR1, signal.SIGUSR2, ordered=False)
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class SiginterruptTest(unittest.TestCase):
def readpipe_interrupted(self, interrupt):
"""Perform a read during which a signal will arrive. Return True if the
read is interrupted by the signal and raises an exception. Return False
if it returns normally.
"""
# use a subprocess to have only one thread, to have a timeout on the
# blocking read and to not touch signal handling in this process
code = """if 1:
import errno
import os
import signal
import sys
interrupt = %r
r, w = os.pipe()
def handler(signum, frame):
pass
signal.signal(signal.SIGALRM, handler)
if interrupt is not None:
signal.siginterrupt(signal.SIGALRM, interrupt)
print("ready")
sys.stdout.flush()
# run the test twice
for loop in range(2):
# send a SIGALRM in a second (during the read)
signal.alarm(1)
try:
# blocking call: read from a pipe without data
os.read(r, 1)
except OSError as err:
if err.errno != errno.EINTR:
raise
else:
sys.exit(2)
sys.exit(3)
""" % (interrupt,)
with spawn_python('-c', code) as process:
try:
# wait until the child process is loaded and has started
first_line = process.stdout.readline()
stdout, stderr = process.communicate(timeout=5.0)
except subprocess.TimeoutExpired:
process.kill()
return False
else:
stdout = first_line + stdout
exitcode = process.wait()
if exitcode not in (2, 3):
raise Exception("Child error (exit code %s): %r"
% (exitcode, stdout))
return (exitcode == 3)
def test_without_siginterrupt(self):
# If a signal handler is installed and siginterrupt is not called
# at all, when that signal arrives, it interrupts a syscall that's in
# progress.
interrupted = self.readpipe_interrupted(None)
self.assertTrue(interrupted)
def test_siginterrupt_on(self):
# If a signal handler is installed and siginterrupt is called with
# a true value for the second argument, when that signal arrives, it
# interrupts a syscall that's in progress.
interrupted = self.readpipe_interrupted(True)
self.assertTrue(interrupted)
def test_siginterrupt_off(self):
# If a signal handler is installed and siginterrupt is called with
# a false value for the second argument, when that signal arrives, it
# does not interrupt a syscall that's in progress.
interrupted = self.readpipe_interrupted(False)
self.assertFalse(interrupted)
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
class ItimerTest(unittest.TestCase):
def setUp(self):
self.hndl_called = False
self.hndl_count = 0
self.itimer = None
self.old_alarm = signal.signal(signal.SIGALRM, self.sig_alrm)
def tearDown(self):
signal.signal(signal.SIGALRM, self.old_alarm)
if self.itimer is not None: # test_itimer_exc doesn't change this attr
# just ensure that itimer is stopped
signal.setitimer(self.itimer, 0)
def sig_alrm(self, *args):
self.hndl_called = True
def sig_vtalrm(self, *args):
self.hndl_called = True
if self.hndl_count > 3:
# it shouldn't be here, because it should have been disabled.
raise signal.ItimerError("setitimer didn't disable ITIMER_VIRTUAL "
"timer.")
elif self.hndl_count == 3:
# disable ITIMER_VIRTUAL, this function shouldn't be called anymore
signal.setitimer(signal.ITIMER_VIRTUAL, 0)
self.hndl_count += 1
def sig_prof(self, *args):
self.hndl_called = True
signal.setitimer(signal.ITIMER_PROF, 0)
def test_itimer_exc(self):
# XXX I'm assuming -1 is an invalid itimer, but maybe some platform
# defines it ?
self.assertRaises(signal.ItimerError, signal.setitimer, -1, 0)
# Negative times are treated as zero on some platforms.
if 0:
self.assertRaises(signal.ItimerError,
signal.setitimer, signal.ITIMER_REAL, -1)
def test_itimer_real(self):
self.itimer = signal.ITIMER_REAL
signal.setitimer(self.itimer, 1.0)
signal.pause()
self.assertEqual(self.hndl_called, True)
# Issue 3864, unknown if this affects earlier versions of freebsd also
@unittest.skipIf(sys.platform in ('freebsd6', 'netbsd5'),
'itimer not reliable (does not mix well with threading) on some BSDs.')
def test_itimer_virtual(self):
self.itimer = signal.ITIMER_VIRTUAL
signal.signal(signal.SIGVTALRM, self.sig_vtalrm)
signal.setitimer(self.itimer, 0.3, 0.2)
start_time = time.time()
while time.time() - start_time < 60.0:
# use up some virtual time by doing real work
_ = pow(12345, 67890, 10000019)
if signal.getitimer(self.itimer) == (0.0, 0.0):
break # sig_vtalrm handler stopped this itimer
else: # Issue 8424
self.skipTest("timeout: likely cause: machine too slow or load too "
"high")
# virtual itimer should be (0.0, 0.0) now
self.assertEqual(signal.getitimer(self.itimer), (0.0, 0.0))
# and the handler should have been called
self.assertEqual(self.hndl_called, True)
# Issue 3864, unknown if this affects earlier versions of freebsd also
@unittest.skipIf(sys.platform=='freebsd6',
'itimer not reliable (does not mix well with threading) on freebsd6')
def test_itimer_prof(self):
self.itimer = signal.ITIMER_PROF
signal.signal(signal.SIGPROF, self.sig_prof)
signal.setitimer(self.itimer, 0.2, 0.2)
start_time = time.time()
while time.time() - start_time < 60.0:
# do some work
_ = pow(12345, 67890, 10000019)
if signal.getitimer(self.itimer) == (0.0, 0.0):
break # sig_prof handler stopped this itimer
else: # Issue 8424
self.skipTest("timeout: likely cause: machine too slow or load too "
"high")
# profiling itimer should be (0.0, 0.0) now
self.assertEqual(signal.getitimer(self.itimer), (0.0, 0.0))
# and the handler should have been called
self.assertEqual(self.hndl_called, True)
class PendingSignalsTests(unittest.TestCase):
"""
Test pthread_sigmask(), pthread_kill(), sigpending() and sigwait()
functions.
"""
@unittest.skipUnless(hasattr(signal, 'sigpending'),
'need signal.sigpending()')
def test_sigpending_empty(self):
self.assertEqual(signal.sigpending(), set())
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
@unittest.skipUnless(hasattr(signal, 'sigpending'),
'need signal.sigpending()')
def test_sigpending(self):
code = """if 1:
import os
import signal
def handler(signum, frame):
1/0
signum = signal.SIGUSR1
signal.signal(signum, handler)
signal.pthread_sigmask(signal.SIG_BLOCK, [signum])
os.kill(os.getpid(), signum)
pending = signal.sigpending()
if pending != {signum}:
raise Exception('%s != {%s}' % (pending, signum))
try:
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum])
except ZeroDivisionError:
pass
else:
raise Exception("ZeroDivisionError not raised")
"""
assert_python_ok('-c', code)
@unittest.skipUnless(hasattr(signal, 'pthread_kill'),
'need signal.pthread_kill()')
def test_pthread_kill(self):
code = """if 1:
import signal
import threading
import sys
signum = signal.SIGUSR1
def handler(signum, frame):
1/0
signal.signal(signum, handler)
if sys.platform == 'freebsd6':
# Issue #12392 and #12469: send a signal to the main thread
# doesn't work before the creation of the first thread on
# FreeBSD 6
def noop():
pass
thread = threading.Thread(target=noop)
thread.start()
thread.join()
tid = threading.get_ident()
try:
signal.pthread_kill(tid, signum)
except ZeroDivisionError:
pass
else:
raise Exception("ZeroDivisionError not raised")
"""
assert_python_ok('-c', code)
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
def wait_helper(self, blocked, test):
"""
test: body of the "def test(signum):" function.
blocked: number of the blocked signal
"""
code = '''if 1:
import signal
import sys
def handler(signum, frame):
1/0
%s
blocked = %s
signum = signal.SIGALRM
# child: block and wait the signal
try:
signal.signal(signum, handler)
signal.pthread_sigmask(signal.SIG_BLOCK, [blocked])
# Do the tests
test(signum)
# The handler must not be called on unblock
try:
signal.pthread_sigmask(signal.SIG_UNBLOCK, [blocked])
except ZeroDivisionError:
print("the signal handler has been called",
file=sys.stderr)
sys.exit(1)
except BaseException as err:
print("error: {}".format(err), file=sys.stderr)
sys.stderr.flush()
sys.exit(1)
''' % (test.strip(), blocked)
# sig*wait* must be called with the signal blocked: since the current
# process might have several threads running, use a subprocess to have
# a single thread.
assert_python_ok('-c', code)
@unittest.skipUnless(hasattr(signal, 'sigwait'),
'need signal.sigwait()')
def test_sigwait(self):
self.wait_helper(signal.SIGALRM, '''
def test(signum):
signal.alarm(1)
received = signal.sigwait([signum])
if received != signum:
raise Exception('received %s, not %s' % (received, signum))
''')
@unittest.skipUnless(hasattr(signal, 'sigwaitinfo'),
'need signal.sigwaitinfo()')
def test_sigwaitinfo(self):
self.wait_helper(signal.SIGALRM, '''
def test(signum):
signal.alarm(1)
info = signal.sigwaitinfo([signum])
if info.si_signo != signum:
raise Exception("info.si_signo != %s" % signum)
''')
@unittest.skipUnless(hasattr(signal, 'sigtimedwait'),
'need signal.sigtimedwait()')
def test_sigtimedwait(self):
self.wait_helper(signal.SIGALRM, '''
def test(signum):
signal.alarm(1)
info = signal.sigtimedwait([signum], 10.1000)
if info.si_signo != signum:
raise Exception('info.si_signo != %s' % signum)
''')
@unittest.skipUnless(hasattr(signal, 'sigtimedwait'),
'need signal.sigtimedwait()')
def test_sigtimedwait_poll(self):
# check that polling with sigtimedwait works
self.wait_helper(signal.SIGALRM, '''
def test(signum):
import os
os.kill(os.getpid(), signum)
info = signal.sigtimedwait([signum], 0)
if info.si_signo != signum:
raise Exception('info.si_signo != %s' % signum)
''')
@unittest.skipUnless(hasattr(signal, 'sigtimedwait'),
'need signal.sigtimedwait()')
def test_sigtimedwait_timeout(self):
self.wait_helper(signal.SIGALRM, '''
def test(signum):
received = signal.sigtimedwait([signum], 1.0)
if received is not None:
raise Exception("received=%r" % (received,))
''')
@unittest.skipUnless(hasattr(signal, 'sigtimedwait'),
'need signal.sigtimedwait()')
def test_sigtimedwait_negative_timeout(self):
signum = signal.SIGALRM
self.assertRaises(ValueError, signal.sigtimedwait, [signum], -1.0)
@unittest.skipUnless(hasattr(signal, 'sigwaitinfo'),
'need signal.sigwaitinfo()')
# Issue #18238: sigwaitinfo() can be interrupted on Linux (raises
# InterruptedError), but not on AIX
@unittest.skipIf(sys.platform.startswith("aix"),
'signal.sigwaitinfo() cannot be interrupted on AIX')
def test_sigwaitinfo_interrupted(self):
self.wait_helper(signal.SIGUSR1, '''
def test(signum):
import errno
hndl_called = True
def alarm_handler(signum, frame):
hndl_called = False
signal.signal(signal.SIGALRM, alarm_handler)
signal.alarm(1)
try:
signal.sigwaitinfo([signal.SIGUSR1])
except OSError as e:
if e.errno == errno.EINTR:
if not hndl_called:
raise Exception("SIGALRM handler not called")
else:
raise Exception("Expected EINTR to be raised by sigwaitinfo")
else:
raise Exception("Expected EINTR to be raised by sigwaitinfo")
''')
@unittest.skipUnless(hasattr(signal, 'sigwait'),
'need signal.sigwait()')
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
@unittest.skipIf(threading is None, "test needs threading module")
def test_sigwait_thread(self):
# Check that calling sigwait() from a thread doesn't suspend the whole
# process. A new interpreter is spawned to avoid problems when mixing
# threads and fork(): only async-safe functions are allowed between
# fork() and exec().
assert_python_ok("-c", """if True:
import os, threading, sys, time, signal
# the default handler terminates the process
signum = signal.SIGUSR1
def kill_later():
# wait until the main thread is waiting in sigwait()
time.sleep(1)
os.kill(os.getpid(), signum)
# the signal must be blocked by all the threads
signal.pthread_sigmask(signal.SIG_BLOCK, [signum])
killer = threading.Thread(target=kill_later)
killer.start()
received = signal.sigwait([signum])
if received != signum:
print("sigwait() received %s, not %s" % (received, signum),
file=sys.stderr)
sys.exit(1)
killer.join()
# unblock the signal, which should have been cleared by sigwait()
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum])
""")
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
def test_pthread_sigmask_arguments(self):
self.assertRaises(TypeError, signal.pthread_sigmask)
self.assertRaises(TypeError, signal.pthread_sigmask, 1)
self.assertRaises(TypeError, signal.pthread_sigmask, 1, 2, 3)
self.assertRaises(OSError, signal.pthread_sigmask, 1700, [])
@unittest.skipUnless(hasattr(signal, 'pthread_sigmask'),
'need signal.pthread_sigmask()')
def test_pthread_sigmask(self):
code = """if 1:
import signal
import os; import threading
def handler(signum, frame):
1/0
def kill(signum):
os.kill(os.getpid(), signum)
def read_sigmask():
return signal.pthread_sigmask(signal.SIG_BLOCK, [])
signum = signal.SIGUSR1
# Install our signal handler
old_handler = signal.signal(signum, handler)
# Unblock SIGUSR1 (and copy the old mask) to test our signal handler
old_mask = signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum])
try:
kill(signum)
except ZeroDivisionError:
pass
else:
raise Exception("ZeroDivisionError not raised")
# Block and then raise SIGUSR1. The signal is blocked: the signal
# handler is not called, and the signal is now pending
signal.pthread_sigmask(signal.SIG_BLOCK, [signum])
kill(signum)
# Check the new mask
blocked = read_sigmask()
if signum not in blocked:
raise Exception("%s not in %s" % (signum, blocked))
if old_mask ^ blocked != {signum}:
raise Exception("%s ^ %s != {%s}" % (old_mask, blocked, signum))
# Unblock SIGUSR1
try:
# unblock the pending signal calls immediately the signal handler
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signum])
except ZeroDivisionError:
pass
else:
raise Exception("ZeroDivisionError not raised")
try:
kill(signum)
except ZeroDivisionError:
pass
else:
raise Exception("ZeroDivisionError not raised")
# Check the new mask
unblocked = read_sigmask()
if signum in unblocked:
raise Exception("%s in %s" % (signum, unblocked))
if blocked ^ unblocked != {signum}:
raise Exception("%s ^ %s != {%s}" % (blocked, unblocked, signum))
if old_mask != unblocked:
raise Exception("%s != %s" % (old_mask, unblocked))
"""
assert_python_ok('-c', code)
@unittest.skipIf(sys.platform == 'freebsd6',
"issue #12392: send a signal to the main thread doesn't work "
"before the creation of the first thread on FreeBSD 6")
@unittest.skipUnless(hasattr(signal, 'pthread_kill'),
'need signal.pthread_kill()')
def test_pthread_kill_main_thread(self):
# Test that a signal can be sent to the main thread with pthread_kill()
# before any other thread has been created (see issue #12392).
code = """if True:
import threading
import signal
import sys
def handler(signum, frame):
sys.exit(3)
signal.signal(signal.SIGUSR1, handler)
signal.pthread_kill(threading.get_ident(), signal.SIGUSR1)
sys.exit(2)
"""
with spawn_python('-c', code) as process:
stdout, stderr = process.communicate()
exitcode = process.wait()
if exitcode != 3:
raise Exception("Child error (exit code %s): %s" %
(exitcode, stdout))
def test_main():
try:
support.run_unittest(PosixTests, InterProcessSignalTests,
WakeupFDTests, WakeupSignalTests,
SiginterruptTest, ItimerTest, WindowsSignalTests,
PendingSignalsTests)
finally:
support.reap_children()
if __name__ == "__main__":
test_main()
| gpl-2.0 | -6,147,222,488,262,197,000 | 35.031915 | 86 | 0.561884 | false |
harryliu/edwin | edwinAgent/site_packages/dbRowFactory/pyDbRowFactory.py | 4 | 14255 | # -*- coding: utf-8 -*-
'''
#@summary: DbRowFactory is one common factory to convert db row tuple into user-defined class object.
It is supported SqlAlchemy, and any database modules conformed to Python Database API
Specification v2.0. e.g. cx_Oracle, zxJDBC
#@note:
Note 1: The DbRowFactory will create one row instance based on row class binding,
and try to assign all fields' value to the new object.
The DbRowFactory maps field and class setter_method/attribute
by matching names. If both a setter_method and an attribute match
the same field, the setter_method will be chosen eventually.
Note 2: __init__() of the class must no other arguments rather than self
#@see: http://www.python.org/dev/peps/pep-0249/
#Tested under: Python 2.7, Jython2.5.2
#Change log:
version 1.0.1, 09 Nov. 2011, initial version
version 1.0.2, 16 Feb. 2012, use pyObjectCreator to instantiate rowClass
version 1.0.3, 08 Mar. 2012, fromSqlAlchemyResultProxy(), fetchAllRowObjects() functions added
version 1.0.4, 31 May. 2013, bug fixed version, disable auto-close cursor if not created by SqlAlchemy
version 1.0.5, 04 Feb. 2014, import pyObjectCreator in explicit relative importing
##====================sample begin=======
#sample code , file: OracleJdbcSample.py
from __future__ import with_statement
from com.ziclix.python.sql import zxJDBC
from pyDbRowFactory import DbRowFactory
class rowClass2(object):
def __init__(self):
self.owner=None
self.tablename=None
def setOWNER(self, value):
self.owner=value
def print2(self):
print("ownerName="+self.owner+",tablename="+self.tablename)
if __name__=="__main__":
#DB API 2.0 cursor sample
jdbc_url="jdbc:oracle:thin:@127.0.0.1:1521:orcl";
username = "user1"
password = "pwd1"
driver = "oracle.jdbc.driver.OracleDriver"
with zxJDBC.connect(jdbc_url, username, password, driver) as conn:
with conn.cursor() as cursor :
cursor.execute("""select tbl.owner, tbl.table_name tablename,
tbl.tablespace_name from all_tables tbl""")
#use DbRowFactory to bind rowClass2 class defined in pkg1.OracleJdbcSample.py
rowFactory=DbRowFactory(cursor, "pkg1.OracleJdbcSample.rowClass2")
for rowObject in rowFactory.fetchAllRowObjects():
rowObject.print2()
#sqlalchemy sample
from sqlalchemy import create_engine
engine=create_engine("sqlite:///:memory:", echo=True)
sql="""select tbl.owner, tbl.table_name tablename,
tbl.tablespace_name from all_tables tbl"""
resultProxy=engine.execute(sql)
rowFactory=DbRowFactory.fromSqlAlchemyResultProxy(resultProxy, "pkg1.OracleJdbcSample.rowClass2")
for rowObject in rowFactory.fetchAllRowObjects():
rowObject.print2()
##====================sample end=======
'''
import inspect
import sys
__author__ = 'Harry Liu, <harrychinese@gmail.com>'
__version__= '1.0.5'
class DbRowFactory(object):
'''
#@summary: DbRowFactory is one common row factory for any database
module conformed to Python Database API Specification
v2.0. e.g. cx_Oracle, zxJDBC
#@note:
Note 1: The DbRowFactory will create one row instance based on row class binding,
and try to assign all fields' value to the new object.
The DbRowFactory maps field and class setter_method/attribute
by matching names. If both a setter_method and an attribute match
the same field, the setter_method will be chosen eventually.
Note 2: __init__() of the class must no other arguments rather than self
#@see: http://www.python.org/dev/peps/pep-0249/
#@author: Harry Liu, harrychinese@gmail.com
'''
FIELD_TO_SETTER=1
FIELD_TO_ATTRIBUTE=2
FIELD_TO_NONE=0
def __init__(self, cursor, rowClassFullName, setterPrefix="set", caseSensitive=False):
'''
##@summary: Constructor of DbRowFactory
[arguments]
cursor: Db API 2.0 cursor object
rowClassFullName: full class name that you want to instantiate, included package and module name if has
setterPrefix: settor method prefix
caseSensitive: match fieldname with class setter_method/attribute in case sensitive or not
'''
self._cursor=cursor
self._setterPrefix=setterPrefix
self._caseSensitive=caseSensitive
self._fieldMemeberMapped=False
self._allMethods=[]
self._allAttributes=[]
self._fieldMapList={}
self._rowClassMeta = getClassMeta(rowClassFullName)
self._resultProxy=None
@classmethod
def fromSqlAlchemyResultProxy(cls, resultProxy, rowClassFullName, setterPrefix="set", caseSensitive=False):
'''
##@summary: another constructor of DbRowFactory
[arguments]
resultProxy: SqlAlchemyResultProxy object, can returned after engine.execute("select 1") called,
rowClassFullName: full class name that you want to instantiate, included package and module name if has
setterPrefix: settor method prefix
caseSensitive: match fieldname with class setter_method/attribute in case sensitive or not
'''
factory= cls(resultProxy.cursor, rowClassFullName, setterPrefix, caseSensitive)
factory._resultProxy=resultProxy
return factory
def createRowInstance(self, row ,*args,**kwargs):
'''
#@summary: create one instance object, and try to assign all fields' value to the new object
[arguments]
row: row tuple in a _cursor
*args: list style arguments in class constructor related to rowClassFullName
*kwargs: dict style arguments in class constructor related to rowClassFullName
'''
#step 1: initialize rowInstance before finding attributes.
rowObject = self._rowClassMeta(*args,**kwargs)
#mapping process run only once in order to gain better performance
if self._fieldMemeberMapped==False:
#dir() cannot list attributes before one class instantiation
self._allAttributes=self._getAllMembers(rowObject)
self._allMethods=self._getAllMembers(rowObject)
self._fieldMapList=self._mapFieldAndMember()
self._fieldMemeberMapped=True
#step 2: assign field values
i=0
#self._fieldMapList is [{Field1:(member1Flag,member1)},{Field2:(member2Flag,member2)}]
for fieldMemberDict in self._fieldMapList:
for field in fieldMemberDict:
member=fieldMemberDict[field]
if member[0]==self.FIELD_TO_NONE:
pass
else:
fieldValue=row[i]
if member[0]==self.FIELD_TO_SETTER:
m=getattr(rowObject, member[1])
m(fieldValue)
elif member[0]==self.FIELD_TO_ATTRIBUTE:
setattr(rowObject, member[1], fieldValue)
i=i+1
return rowObject
def _getAllMembers(self,clazz) :
'''
#@summary: extract all user-defined methods in given class
#@param param clazz: class object
'''
members=[member for member in dir(clazz)]
sysMemberList=['__class__','__doc__','__init__','__new__','__subclasshook__','__dict__', '__module__','__delattr__', '__getattribute__', '__hash__', '__repr__', '__setattr__', '__str__','__format__', '__reduce__', '__reduce_ex__', '__sizeof__', '__weakref__']
members=[member for member in members if str(member) not in sysMemberList]
return members
def _mapFieldAndMember(self):
'''
#@summary: create mapping between field and class setter_method/attribute, setter_method is preferred than attribute
#field can be extract from cursor.description, e.g.
sql: select 1 a, sysdate dt from dual
cursor.description:
[(u'A', 2, 22, None, 0, 0, 1), (u'DT', 91, 7, None, None, None, 1)]
'''
#print(self._cursor.description)
fields=[f[0] for f in self._cursor.description]
mapList=[]
#result is [{Field1:(member1Flag,member1)},{Field2:(member2Flag,member2)}]
for f in fields:
m= self._getSetterMethod(f)
key=f
if m:
value=(self.FIELD_TO_SETTER,m)
else:
m= self._getAttribute(f)
if m:
value=(self.FIELD_TO_ATTRIBUTE,m)
else:
value=(self.FIELD_TO_NONE,None)
mapList.append({key:value})
return mapList
def _getAttribute(self, fieldName):
'''
#@summary: get related attribute to given fieldname
'''
if self._caseSensitive:
if fieldName in self._allAttributes:
return fieldName
else:
fieldNameUpper=fieldName.upper()
allAttributesMap={} # attributeUpper=attribute
for attr in self._allAttributes:
allAttributesMap[attr.upper()]=attr
if fieldNameUpper in allAttributesMap:
return allAttributesMap[fieldNameUpper]
def _getSetterMethod(self, fieldName):
'''
##@summary: get related setter method to given fieldname
'''
if self._caseSensitive:
setter=self._setterPrefix+fieldName
if setter in self._allMethods:
return setter
else:
setterUpper=self._setterPrefix+fieldName
setterUpper=setterUpper.upper()
allMethodMap={} # methodUpper=method
for method in self._allMethods:
allMethodMap[method.upper()]=method
if setterUpper in allMethodMap:
return allMethodMap[setterUpper]
def _closeResultProxy(self):
if self._resultProxy is not None:
if self._resultProxy.closed==False:
self._resultProxy.close()
def _createdBySqlAlchemy(self):
return self._resultProxy!=None
def fetchAllRowObjects(self):
"""Fetch all rows, just like DB-API ``cursor.fetchall()``.
the cursor is automatically closed after this is called
"""
result=[]
rows=self._cursor.fetchall()
for row in rows:
rowObject=self.createRowInstance(row)
result.append(rowObject)
if self._createdBySqlAlchemy():
self._cursor.close()
self._closeResultProxy()
return result
def fetchManyRowObjects(self, size=None):
"""Fetch many rows, just like DB-API
``cursor.fetchmany(size=cursor.arraysize)``.
If rows are present, the cursor remains open after this is called.
Else the cursor is automatically closed and an empty list is returned.
"""
result=[]
rows=self._cursor.fetchmany(size)
for row in rows:
rowObject=self.createRowInstance(row)
result.append(rowObject)
if self._createdBySqlAlchemy():
if len(rows)==0:
self._cursor.close()
self._closeResultProxy()
return result
def fetchOneRowObject(self):
"""Fetch one row, just like DB-API ``cursor.fetchone()``.
If a row is present, the cursor remains open after this is called.
Else the cursor is automatically closed and None is returned.
"""
result=None
row = self._cursor.fetchone()
if row is not None:
result=self.createRowInstance(row)
else:
if self._createdBySqlAlchemy():
self._cursor.close()
self._closeResultProxy()
return result
##reference doc
#http://www.cnblogs.com/sevenyuan/archive/2010/12/06/1898056.html
#http://stackoverflow.com/questions/4513192/python-dynamic-class-names
#http://stackoverflow.com/questions/1796180/python-get-list-of-al-classes-within-current-module
def createInstance(full_class_name,*args,**kwargs):
'''
instantiate class dynamically
[arguments]
full_class_name: full class name that you want to instantiate, included package and module name if has
*args: list style arguments in class constructor
*kwargs: dict style arguments in class constructor
[return]
an instance of this full_class_name
[example]
import pyObjectCreator
full_class_name="knightmade.logging.Logger"
logger=pyObjectCreator.createInstance(full_class_name,'logname')
'''
class_meta=getClassMeta(full_class_name)
if class_meta!=None:
obj=class_meta(*args,**kwargs)
else:
obj=None
return obj
def getClassMeta(full_class_name):
'''
get class meta object of full_class_name, then we can use this meta object to instantiate full_class_name
[arguments]
full_class_name: full class name that you want to instantiate, included package and module name if has
[return]
an instance of this full_class_name
[example]
import pyObjectCreator
full_class_name="knightmade.logging.Logger"
loggerMeta=pyObjectCreator.getClassMeta(full_class_name)
'''
namespace=full_class_name.strip().rsplit('.',1)
if len(namespace)==1:
class_name=namespace[0]
class_meta=_getClassMetaFromCurrModule(class_name)
else:
module_name=namespace[0]
class_name=namespace[1]
class_meta=_getClassMetaFromOtherModule(class_name,module_name)
return class_meta
def _getClassMetaFromCurrModule(class_name):
result=None
module_name="__main__"
for name, obj in inspect.getmembers(sys.modules[module_name]):
if inspect.isclass(obj):
if name==class_name:
result=obj
break
return result
def _getClassMetaFromOtherModule(class_name, module_name):
module_meta=__import__(module_name,globals(), locals(),[class_name])
if module_meta!=None:
class_meta=getattr(module_meta,class_name)
else:
class_meta=None
return class_meta | apache-2.0 | 2,534,508,222,375,085,600 | 35 | 267 | 0.631428 | false |
spierepf/mpf | mpf/modes/tilt/code/tilt.py | 1 | 7456 | """Contains the Tilt mode code"""
# tilt.py
# Mission Pinball Framework
# Written by Brian Madden & Gabe Knuth
# Released under the MIT License. (See license info at the end of this file.)
# Documentation and more info at http://missionpinball.com/mpf
from mpf.system.config import CaseInsensitiveDict
from mpf.system.mode import Mode
from mpf.system.timing import Timing
class Tilt(Mode):
def mode_init(self):
self._balls_to_collect = 0
self._last_warning_tick = 0
self.ball_ending_tilted_queue = None
self.tilt_event_handlers = set()
self.last_tilt_warning_switch_tick = 0
self.tilt_config = self.machine.config_processor.process_config2(
config_spec='tilt',
source=self._get_merged_settings('tilt'),
section_name='tilt')
def mode_start(self, **kwargs):
self._register_switch_handlers()
for event in self.tilt_config['reset_warnings_events']:
self.add_mode_event_handler(event, self.reset_warnings)
def mode_stop(self, **kwargs):
self._remove_switch_handlers()
self.reset_warnings_handlers = set()
def _register_switch_handlers(self):
for switch in self.machine.switches.items_tagged(
self.tilt_config['tilt_warning_switch_tag']):
self.machine.switch_controller.add_switch_handler(
switch_name=switch.name,
callback=self._tilt_warning_switch_handler)
for switch in self.machine.switches.items_tagged(
self.tilt_config['tilt_switch_tag']):
self.machine.switch_controller.add_switch_handler(
switch_name=switch.name,
callback=self.tilt)
for switch in self.machine.switches.items_tagged(
self.tilt_config['slam_tilt_switch_tag']):
self.machine.switch_controller.add_switch_handler(
switch_name=switch.name,
callback=self.slam_tilt)
def _remove_switch_handlers(self):
for switch in self.machine.switches.items_tagged(
self.tilt_config['tilt_warning_switch_tag']):
self.machine.switch_controller.remove_switch_handler(
switch_name=switch.name,
callback=self._tilt_warning_switch_handler)
for switch in self.machine.switches.items_tagged(
self.tilt_config['tilt_switch_tag']):
self.machine.switch_controller.remove_switch_handler(
switch_name=switch.name,
callback=self.tilt)
for switch in self.machine.switches.items_tagged(
self.tilt_config['slam_tilt_switch_tag']):
self.machine.switch_controller.remove_switch_handler(
switch_name=switch.name,
callback=self.slam_tilt)
def tilt_warning(self):
"""Processes a tilt warning. If the number of warnings is the number to
cause a tilt, a tilt will be processed.
"""
self.last_tilt_warning_switch_tick = self.machine.tick_num
if not self.player:
return
self.log.debug("Tilt Warning")
self._last_warning_tick = self.machine.tick_num
self.player[self.tilt_config['tilt_warnings_player_var']] += 1
warnings = self.player[self.tilt_config['tilt_warnings_player_var']]
if warnings >= self.tilt_config['warnings_to_tilt']:
self.tilt()
else:
self.machine.events.post('tilt_warning',
warnings=warnings,
warnings_remaining=(self.tilt_config['warnings_to_tilt'] -
warnings))
self.machine.events.post('tilt_warning_{}'.format(warnings))
def reset_warnings(self, **kwargs):
"""Resets the tilt warnings for the current player."""
try:
self.player[self.tilt_config['tilt_warnings_player_var']] = 0
except AttributeError:
pass
def tilt(self, **kwargs):
"""Causes the ball to tilt."""
if not self.machine.game:
return
self._balls_to_collect = self.machine.playfield.balls
# todo use collection
self.log.debug("Processing Tilt. Balls to collect: %s",
self._balls_to_collect)
self.machine.game.tilted = True
self.machine.events.post('tilt')
self._disable_autofires()
self._disable_flippers()
self.tilt_event_handlers.add(
self.machine.events.add_handler('ball_ending',
self._ball_ending_tilted))
for device in self.machine.ball_devices:
if 'drain' in device.tags:
self.tilt_event_handlers.add(
self.machine.events.add_handler(
'balldevice_{}_ball_enter'.format(device.name),
self._tilted_ball_drain))
else:
self.tilt_event_handlers.add(
self.machine.events.add_handler(
'balldevice_{}_ball_enter'.format(device.name),
self._tilted_ball_entered_non_drain_device))
self.machine.game.ball_ending()
def _disable_flippers(self):
for flipper in self.machine.flippers:
flipper.disable()
def _disable_autofires(self):
for autofire in self.machine.autofires:
autofire.disable()
def _tilted_ball_drain(self, new_balls, unclaimed_balls, device):
self._balls_to_collect -= unclaimed_balls
self.log.debug("Tilted ball drain. Balls to collect: %s",
self._balls_to_collect)
if self._balls_to_collect <= 0:
self._tilt_done()
return {'unclaimed_balls': 0}
def _tilted_ball_entered_non_drain_device(self, new_balls, unclaimed_balls,
device):
return {'unclaimed_balls': unclaimed_balls}
def _tilt_switch_handler(self):
self.tilt()
def _tilt_warning_switch_handler(self):
if (self._last_warning_tick + self.tilt_config['multiple_hit_window']
<= self.machine.tick_num):
self.tilt_warning()
def _ball_ending_tilted(self, queue):
self.ball_ending_tilted_queue = queue
queue.wait()
if not self._balls_to_collect:
self._tilt_done()
def _tilt_done(self):
if self.tilt_settle_ms_remaining():
self.delay.reset(ms=self.tilt_settle_ms_remaining(),
callback=self._tilt_done,
name='tilt')
else:
self.machine.game.tilted = False
self.machine.events.post('tilt_clear')
self.ball_ending_tilted_queue.clear()
self.machine.events.remove_handlers_by_keys(self.tilt_event_handlers)
self.tilt_event_handlers = set()
def tilt_settle_ms_remaining(self):
"""Returns the amount of milliseconds remaining until the tilt settle
time has cleared.
"""
ticks = (self.machine.tick_num - self.last_tilt_warning_switch_tick -
self.tilt_config['settle_time'])
if ticks >= 0:
return 0
else:
return abs(ticks * Timing.ms_per_tick)
def slam_tilt(self):
self.machine.events.post('slam_tilt')
self.game_ended() | mit | -623,721,192,207,639,700 | 34.174528 | 81 | 0.583691 | false |
jkleckner/ansible | docsite/conf.py | 3 | 6348 | # -*- coding: utf-8 -*-
#
# documentation build configuration file, created by
# sphinx-quickstart on Sat Sep 27 13:23:22 2008-2009.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed
# automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys
import os
# pip install sphinx_rtd_theme
#import sphinx_rtd_theme
#html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.append(os.path.abspath('some/directory'))
#
sys.path.insert(0, os.path.join('ansible', 'lib'))
sys.path.append(os.path.abspath('_themes'))
VERSION='0.01'
AUTHOR='AnsibleWorks'
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings.
# They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Later on, add 'sphinx.ext.viewcode' to the list if you want to have
# colorized code generated too for references.
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'Ansible Documentation'
copyright = "2013 AnsibleWorks"
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = VERSION
# The full version, including alpha/beta/rc tags.
release = VERSION
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directories, that shouldn't be
# searched for source files.
#exclude_dirs = []
# A list of glob-style patterns that should be excluded when looking
# for source files.
exclude_patterns = ['modules']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
html_theme_path = ['_themes']
html_theme = 'srtd'
html_short_title = 'Ansible Documentation'
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
#html_style = 'solar.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'Ansible Documentation'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (within the static path) to place at the top of
# the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = 'favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['.static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
html_copy_source = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Poseidodoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class
# [howto/manual]).
latex_documents = [
('index', 'ansible.tex', 'Ansible 1.2 Documentation',
AUTHOR, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
autoclass_content = 'both'
| gpl-3.0 | 4,186,485,526,585,211,000 | 29.228571 | 79 | 0.721014 | false |
wlanslovenija/django-tastypie | tastypie/throttle.py | 5 | 4861 | from __future__ import unicode_literals
import time
from django.core.cache import cache
class BaseThrottle(object):
"""
A simplified, swappable base class for throttling.
Does nothing save for simulating the throttling API and implementing
some common bits for the subclasses.
Accepts a number of optional kwargs::
* ``throttle_at`` - the number of requests at which the user should
be throttled. Default is 150 requests.
* ``timeframe`` - the length of time (in seconds) in which the user
make up to the ``throttle_at`` requests. Default is 3600 seconds (
1 hour).
* ``expiration`` - the length of time to retain the times the user
has accessed the api in the cache. Default is 604800 (1 week).
"""
def __init__(self, throttle_at=150, timeframe=3600, expiration=None):
self.throttle_at = throttle_at
# In seconds, please.
self.timeframe = timeframe
if expiration is None:
# Expire in a week.
expiration = 604800
self.expiration = int(expiration)
def convert_identifier_to_key(self, identifier):
"""
Takes an identifier (like a username or IP address) and converts it
into a key usable by the cache system.
"""
bits = []
for char in identifier:
if char.isalnum() or char in ['_', '.', '-']:
bits.append(char)
safe_string = ''.join(bits)
return "%s_accesses" % safe_string
def should_be_throttled(self, identifier, **kwargs):
"""
Returns whether or not the user has exceeded their throttle limit. If
throttled, can return either True, and int specifying the number of
seconds to wait, or a datetime object specifying when to retry the request.
Always returns ``False``, as this implementation does not actually
throttle the user.
"""
return False
def accessed(self, identifier, **kwargs):
"""
Handles recording the user's access.
Does nothing in this implementation.
"""
pass
class CacheThrottle(BaseThrottle):
"""
A throttling mechanism that uses just the cache.
"""
def should_be_throttled(self, identifier, **kwargs):
"""
Returns whether or not the user has exceeded their throttle limit. If
throttled, can return either True, and int specifying the number of
seconds to wait, or a datetime object specifying when to retry the request.
Maintains a list of timestamps when the user accessed the api within
the cache.
Returns ``False`` if the user should NOT be throttled or ``True`` if
the user should be throttled.
"""
key = self.convert_identifier_to_key(identifier)
# Weed out anything older than the timeframe.
now = int(time.time())
timeframe = int(self.timeframe)
throttle_at = int(self.throttle_at)
minimum_time = now - timeframe
times_accessed = [access for access in cache.get(key, []) if access >= minimum_time]
cache.set(key, times_accessed, self.expiration)
if len(times_accessed) >= throttle_at:
# Throttle them.
return timeframe - (now - times_accessed[-throttle_at])
# Let them through.
return False
def accessed(self, identifier, **kwargs):
"""
Handles recording the user's access.
Stores the current timestamp in the "accesses" list within the cache.
"""
key = self.convert_identifier_to_key(identifier)
times_accessed = cache.get(key, [])
times_accessed.append(int(time.time()))
cache.set(key, times_accessed, self.expiration)
class CacheDBThrottle(CacheThrottle):
"""
A throttling mechanism that uses the cache for actual throttling but
writes-through to the database.
This is useful for tracking/aggregating usage through time, to possibly
build a statistics interface or a billing mechanism.
"""
def accessed(self, identifier, **kwargs):
"""
Handles recording the user's access.
Does everything the ``CacheThrottle`` class does, plus logs the
access within the database using the ``ApiAccess`` model.
"""
# Do the import here, instead of top-level, so that the model is
# only required when using this throttling mechanism.
from tastypie.models import ApiAccess
super(CacheDBThrottle, self).accessed(identifier, **kwargs)
# Write out the access to the DB for logging purposes.
ApiAccess.objects.create(
identifier=identifier,
url=kwargs.get('url', ''),
request_method=kwargs.get('request_method', '')
)
| bsd-3-clause | 645,301,260,950,032,600 | 34.224638 | 92 | 0.627443 | false |
keyurpatel076/MissionPlannerGit | packages/IronPython.StdLib.2.7.4/content/Lib/timeit.py | 76 | 12059 | #! /usr/bin/env python
"""Tool for measuring execution time of small code snippets.
This module avoids a number of common traps for measuring execution
times. See also Tim Peters' introduction to the Algorithms chapter in
the Python Cookbook, published by O'Reilly.
Library usage: see the Timer class.
Command line usage:
python timeit.py [-n N] [-r N] [-s S] [-t] [-c] [-h] [--] [statement]
Options:
-n/--number N: how many times to execute 'statement' (default: see below)
-r/--repeat N: how many times to repeat the timer (default 3)
-s/--setup S: statement to be executed once initially (default 'pass')
-t/--time: use time.time() (default on Unix)
-c/--clock: use time.clock() (default on Windows)
-v/--verbose: print raw timing results; repeat for more digits precision
-h/--help: print this usage message and exit
--: separate options from statement, use when statement starts with -
statement: statement to be timed (default 'pass')
A multi-line statement may be given by specifying each line as a
separate argument; indented lines are possible by enclosing an
argument in quotes and using leading spaces. Multiple -s options are
treated similarly.
If -n is not given, a suitable number of loops is calculated by trying
successive powers of 10 until the total time is at least 0.2 seconds.
The difference in default timer function is because on Windows,
clock() has microsecond granularity but time()'s granularity is 1/60th
of a second; on Unix, clock() has 1/100th of a second granularity and
time() is much more precise. On either platform, the default timer
functions measure wall clock time, not the CPU time. This means that
other processes running on the same computer may interfere with the
timing. The best thing to do when accurate timing is necessary is to
repeat the timing a few times and use the best time. The -r option is
good for this; the default of 3 repetitions is probably enough in most
cases. On Unix, you can use clock() to measure CPU time.
Note: there is a certain baseline overhead associated with executing a
pass statement. The code here doesn't try to hide it, but you should
be aware of it. The baseline overhead can be measured by invoking the
program without arguments.
The baseline overhead differs between Python versions! Also, to
fairly compare older Python versions to Python 2.3, you may want to
use python -O for the older versions to avoid timing SET_LINENO
instructions.
"""
import gc
import sys
import time
try:
import itertools
except ImportError:
# Must be an older Python version (see timeit() below)
itertools = None
__all__ = ["Timer"]
dummy_src_name = "<timeit-src>"
default_number = 1000000
default_repeat = 3
if sys.platform == "win32":
# On Windows, the best timer is time.clock()
default_timer = time.clock
else:
# On most other platforms the best timer is time.time()
default_timer = time.time
# Don't change the indentation of the template; the reindent() calls
# in Timer.__init__() depend on setup being indented 4 spaces and stmt
# being indented 8 spaces.
template = """
def inner(_it, _timer):
%(setup)s
_t0 = _timer()
for _i in _it:
%(stmt)s
_t1 = _timer()
return _t1 - _t0
"""
def reindent(src, indent):
"""Helper to reindent a multi-line statement."""
return src.replace("\n", "\n" + " "*indent)
def _template_func(setup, func):
"""Create a timer function. Used if the "statement" is a callable."""
def inner(_it, _timer, _func=func):
setup()
_t0 = _timer()
for _i in _it:
_func()
_t1 = _timer()
return _t1 - _t0
return inner
class Timer:
"""Class for timing execution speed of small code snippets.
The constructor takes a statement to be timed, an additional
statement used for setup, and a timer function. Both statements
default to 'pass'; the timer function is platform-dependent (see
module doc string).
To measure the execution time of the first statement, use the
timeit() method. The repeat() method is a convenience to call
timeit() multiple times and return a list of results.
The statements may contain newlines, as long as they don't contain
multi-line string literals.
"""
def __init__(self, stmt="pass", setup="pass", timer=default_timer):
"""Constructor. See class doc string."""
self.timer = timer
ns = {}
if isinstance(stmt, basestring):
stmt = reindent(stmt, 8)
if isinstance(setup, basestring):
setup = reindent(setup, 4)
src = template % {'stmt': stmt, 'setup': setup}
elif hasattr(setup, '__call__'):
src = template % {'stmt': stmt, 'setup': '_setup()'}
ns['_setup'] = setup
else:
raise ValueError("setup is neither a string nor callable")
self.src = src # Save for traceback display
code = compile(src, dummy_src_name, "exec")
exec code in globals(), ns
self.inner = ns["inner"]
elif hasattr(stmt, '__call__'):
self.src = None
if isinstance(setup, basestring):
_setup = setup
def setup():
exec _setup in globals(), ns
elif not hasattr(setup, '__call__'):
raise ValueError("setup is neither a string nor callable")
self.inner = _template_func(setup, stmt)
else:
raise ValueError("stmt is neither a string nor callable")
def print_exc(self, file=None):
"""Helper to print a traceback from the timed code.
Typical use:
t = Timer(...) # outside the try/except
try:
t.timeit(...) # or t.repeat(...)
except:
t.print_exc()
The advantage over the standard traceback is that source lines
in the compiled template will be displayed.
The optional file argument directs where the traceback is
sent; it defaults to sys.stderr.
"""
import linecache, traceback
if self.src is not None:
linecache.cache[dummy_src_name] = (len(self.src),
None,
self.src.split("\n"),
dummy_src_name)
# else the source is already stored somewhere else
traceback.print_exc(file=file)
def timeit(self, number=default_number):
"""Time 'number' executions of the main statement.
To be precise, this executes the setup statement once, and
then returns the time it takes to execute the main statement
a number of times, as a float measured in seconds. The
argument is the number of times through the loop, defaulting
to one million. The main statement, the setup statement and
the timer function to be used are passed to the constructor.
"""
if itertools:
it = itertools.repeat(None, number)
else:
it = [None] * number
gcold = gc.isenabled()
gc.disable()
timing = self.inner(it, self.timer)
if gcold:
gc.enable()
return timing
def repeat(self, repeat=default_repeat, number=default_number):
"""Call timeit() a few times.
This is a convenience function that calls the timeit()
repeatedly, returning a list of results. The first argument
specifies how many times to call timeit(), defaulting to 3;
the second argument specifies the timer argument, defaulting
to one million.
Note: it's tempting to calculate mean and standard deviation
from the result vector and report these. However, this is not
very useful. In a typical case, the lowest value gives a
lower bound for how fast your machine can run the given code
snippet; higher values in the result vector are typically not
caused by variability in Python's speed, but by other
processes interfering with your timing accuracy. So the min()
of the result is probably the only number you should be
interested in. After that, you should look at the entire
vector and apply common sense rather than statistics.
"""
r = []
for i in range(repeat):
t = self.timeit(number)
r.append(t)
return r
def timeit(stmt="pass", setup="pass", timer=default_timer,
number=default_number):
"""Convenience function to create Timer object and call timeit method."""
return Timer(stmt, setup, timer).timeit(number)
def repeat(stmt="pass", setup="pass", timer=default_timer,
repeat=default_repeat, number=default_number):
"""Convenience function to create Timer object and call repeat method."""
return Timer(stmt, setup, timer).repeat(repeat, number)
def main(args=None):
"""Main program, used when run as a script.
The optional argument specifies the command line to be parsed,
defaulting to sys.argv[1:].
The return value is an exit code to be passed to sys.exit(); it
may be None to indicate success.
When an exception happens during timing, a traceback is printed to
stderr and the return value is 1. Exceptions at other times
(including the template compilation) are not caught.
"""
if args is None:
args = sys.argv[1:]
import getopt
try:
opts, args = getopt.getopt(args, "n:s:r:tcvh",
["number=", "setup=", "repeat=",
"time", "clock", "verbose", "help"])
except getopt.error, err:
print err
print "use -h/--help for command line help"
return 2
timer = default_timer
stmt = "\n".join(args) or "pass"
number = 0 # auto-determine
setup = []
repeat = default_repeat
verbose = 0
precision = 3
for o, a in opts:
if o in ("-n", "--number"):
number = int(a)
if o in ("-s", "--setup"):
setup.append(a)
if o in ("-r", "--repeat"):
repeat = int(a)
if repeat <= 0:
repeat = 1
if o in ("-t", "--time"):
timer = time.time
if o in ("-c", "--clock"):
timer = time.clock
if o in ("-v", "--verbose"):
if verbose:
precision += 1
verbose += 1
if o in ("-h", "--help"):
print __doc__,
return 0
setup = "\n".join(setup) or "pass"
# Include the current directory, so that local imports work (sys.path
# contains the directory of this script, rather than the current
# directory)
import os
sys.path.insert(0, os.curdir)
t = Timer(stmt, setup, timer)
if number == 0:
# determine number so that 0.2 <= total time < 2.0
for i in range(1, 10):
number = 10**i
try:
x = t.timeit(number)
except:
t.print_exc()
return 1
if verbose:
print "%d loops -> %.*g secs" % (number, precision, x)
if x >= 0.2:
break
try:
r = t.repeat(repeat, number)
except:
t.print_exc()
return 1
best = min(r)
if verbose:
print "raw times:", " ".join(["%.*g" % (precision, x) for x in r])
print "%d loops," % number,
usec = best * 1e6 / number
if usec < 1000:
print "best of %d: %.*g usec per loop" % (repeat, precision, usec)
else:
msec = usec / 1000
if msec < 1000:
print "best of %d: %.*g msec per loop" % (repeat, precision, msec)
else:
sec = msec / 1000
print "best of %d: %.*g sec per loop" % (repeat, precision, sec)
return None
if __name__ == "__main__":
sys.exit(main())
| gpl-3.0 | -2,558,819,460,344,612,000 | 35.765244 | 78 | 0.604279 | false |
kuredatan/taxocluster | featuresVector.py | 2 | 2904 | import sys as s
from parsingMatch import parseAllFact
from parsingFasta import parseFasta
def sanitizeNode(node):
if not node or not (len(node) == 2):
#It means this node cannot appear in the taxonomic tree
return None
else:
return node
#@allMatches is a dictionary of (key=sample ID,value=list of sequences ID matching a read in this sample)
#@idSequences is a dictionary of (key=identifier of node,value=(name,rank of node))
#@filenames is the list of .match file names == list of samples ID /!\
#Returns a dictionary of (key=sample ID,value=list of nodes (name,rank) matching a read in this sample)
def getMatchingNodes(allMatches,idSequences,filenames):
matchingNodes = dict.fromkeys(filenames)
for sample in filenames:
matchingSequencesID = allMatches.get(sample)
matchingNodesInThisSample = []
if not (matchingSequencesID == None):
for sequenceID in matchingSequencesID:
node = idSequences.get(sequenceID)
cleanNode = sanitizeNode(node)
if cleanNode:
matchingNodesInThisSample.append(cleanNode)
matchingNodes[sample] = matchingNodesInThisSample
else:
print "The sample \'",sample,"\' could not be processed."
return matchingNodes
#Returns @matchingNodes, dictionary of (key=sample ID,value=list of nodes matched in this sample -i.e. at least in one read of this sample), and @idSequences, which is a dictionary of (key=identifier of sequence,value=(name,rank) of the node associated to this sequence)
#@filenames is the list of .match file names == list of samples ID /!\
#@fastaFileName is a string of the .fasta file name
#@sampleIDList is the list of samples ID
def featuresCreate(filenames,fastaFileName):
print "/!\ Parsing .match files"
print "[ You may have to wait a few seconds... ]"
#@allMatches is a dictionary of (key=sample ID,value=list of sequences ID matching a read in this sample)
import time
start = time.time()
allMatches = parseAllFact(filenames)
end = time.time()
print "TIME:",(end-start),"sec"
print "/!\ Parsing .fasta files"
print "[ You may have to wait a few seconds... ]"
try:
#@idSequences is a dictionary of (key=identifier,value=((name,rank))
#@paths is the list of paths from root to leaves
#@nodesListTree is the list of all nodes (internal nodes and leaves) in the tree
#We do not care for now of the OTU
idSequences,paths,nodesListTree,_ = parseFasta(fastaFileName)
except IOError:
print "\nERROR: Maybe the filename",fastaFileName,".fasta does not exist in \"meta\" folder\n"
s.exit(0)
matchingNodes = getMatchingNodes(allMatches,idSequences,filenames)
print "/!\ Matching nodes list done."
return matchingNodes,idSequences,paths,nodesListTree
| mit | 3,946,318,179,366,013,000 | 48.220339 | 270 | 0.689738 | false |
opencloudinfra/orchestrator | venv/Lib/distutils/__init__.py | 1211 | 3983 | import os
import sys
import warnings
import imp
import opcode # opcode is not a virtualenv module, so we can use it to find the stdlib
# Important! To work on pypy, this must be a module that resides in the
# lib-python/modified-x.y.z directory
dirname = os.path.dirname
distutils_path = os.path.join(os.path.dirname(opcode.__file__), 'distutils')
if os.path.normpath(distutils_path) == os.path.dirname(os.path.normpath(__file__)):
warnings.warn(
"The virtualenv distutils package at %s appears to be in the same location as the system distutils?")
else:
__path__.insert(0, distutils_path)
real_distutils = imp.load_module("_virtualenv_distutils", None, distutils_path, ('', '', imp.PKG_DIRECTORY))
# Copy the relevant attributes
try:
__revision__ = real_distutils.__revision__
except AttributeError:
pass
__version__ = real_distutils.__version__
from distutils import dist, sysconfig
try:
basestring
except NameError:
basestring = str
## patch build_ext (distutils doesn't know how to get the libs directory
## path on windows - it hardcodes the paths around the patched sys.prefix)
if sys.platform == 'win32':
from distutils.command.build_ext import build_ext as old_build_ext
class build_ext(old_build_ext):
def finalize_options (self):
if self.library_dirs is None:
self.library_dirs = []
elif isinstance(self.library_dirs, basestring):
self.library_dirs = self.library_dirs.split(os.pathsep)
self.library_dirs.insert(0, os.path.join(sys.real_prefix, "Libs"))
old_build_ext.finalize_options(self)
from distutils.command import build_ext as build_ext_module
build_ext_module.build_ext = build_ext
## distutils.dist patches:
old_find_config_files = dist.Distribution.find_config_files
def find_config_files(self):
found = old_find_config_files(self)
system_distutils = os.path.join(distutils_path, 'distutils.cfg')
#if os.path.exists(system_distutils):
# found.insert(0, system_distutils)
# What to call the per-user config file
if os.name == 'posix':
user_filename = ".pydistutils.cfg"
else:
user_filename = "pydistutils.cfg"
user_filename = os.path.join(sys.prefix, user_filename)
if os.path.isfile(user_filename):
for item in list(found):
if item.endswith('pydistutils.cfg'):
found.remove(item)
found.append(user_filename)
return found
dist.Distribution.find_config_files = find_config_files
## distutils.sysconfig patches:
old_get_python_inc = sysconfig.get_python_inc
def sysconfig_get_python_inc(plat_specific=0, prefix=None):
if prefix is None:
prefix = sys.real_prefix
return old_get_python_inc(plat_specific, prefix)
sysconfig_get_python_inc.__doc__ = old_get_python_inc.__doc__
sysconfig.get_python_inc = sysconfig_get_python_inc
old_get_python_lib = sysconfig.get_python_lib
def sysconfig_get_python_lib(plat_specific=0, standard_lib=0, prefix=None):
if standard_lib and prefix is None:
prefix = sys.real_prefix
return old_get_python_lib(plat_specific, standard_lib, prefix)
sysconfig_get_python_lib.__doc__ = old_get_python_lib.__doc__
sysconfig.get_python_lib = sysconfig_get_python_lib
old_get_config_vars = sysconfig.get_config_vars
def sysconfig_get_config_vars(*args):
real_vars = old_get_config_vars(*args)
if sys.platform == 'win32':
lib_dir = os.path.join(sys.real_prefix, "libs")
if isinstance(real_vars, dict) and 'LIBDIR' not in real_vars:
real_vars['LIBDIR'] = lib_dir # asked for all
elif isinstance(real_vars, list) and 'LIBDIR' in args:
real_vars = real_vars + [lib_dir] # asked for list
return real_vars
sysconfig_get_config_vars.__doc__ = old_get_config_vars.__doc__
sysconfig.get_config_vars = sysconfig_get_config_vars
| gpl-3.0 | 8,522,154,490,537,783,000 | 38.435644 | 112 | 0.674868 | false |
vmarkovtsev/django | tests/modeladmin/models.py | 130 | 1603 | # -*- coding: utf-8 -*-
from django.contrib.auth.models import User
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Band(models.Model):
name = models.CharField(max_length=100)
bio = models.TextField()
sign_date = models.DateField()
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
class Concert(models.Model):
main_band = models.ForeignKey(Band, models.CASCADE, related_name='main_concerts')
opening_band = models.ForeignKey(Band, models.CASCADE, related_name='opening_concerts',
blank=True)
day = models.CharField(max_length=3, choices=((1, 'Fri'), (2, 'Sat')))
transport = models.CharField(max_length=100, choices=(
(1, 'Plane'),
(2, 'Train'),
(3, 'Bus')
), blank=True)
class ValidationTestModel(models.Model):
name = models.CharField(max_length=100)
slug = models.SlugField()
users = models.ManyToManyField(User)
state = models.CharField(max_length=2, choices=(("CO", "Colorado"), ("WA", "Washington")))
is_active = models.BooleanField(default=False)
pub_date = models.DateTimeField()
band = models.ForeignKey(Band, models.CASCADE)
# This field is intentionally 2 characters long (#16080).
no = models.IntegerField(verbose_name="Number", blank=True, null=True)
def decade_published_in(self):
return self.pub_date.strftime('%Y')[:3] + "0's"
class ValidationTestInlineModel(models.Model):
parent = models.ForeignKey(ValidationTestModel, models.CASCADE)
| bsd-3-clause | 2,076,538,974,243,730,700 | 32.395833 | 94 | 0.675608 | false |
wreckJ/intellij-community | python/helpers/pydev/runfiles.py | 43 | 10205 | import os
def main():
import sys
#Separate the nose params and the pydev params.
pydev_params = []
other_test_framework_params = []
found_other_test_framework_param = None
NOSE_PARAMS = '--nose-params'
PY_TEST_PARAMS = '--py-test-params'
for arg in sys.argv[1:]:
if not found_other_test_framework_param and arg != NOSE_PARAMS and arg != PY_TEST_PARAMS:
pydev_params.append(arg)
else:
if not found_other_test_framework_param:
found_other_test_framework_param = arg
else:
other_test_framework_params.append(arg)
#Here we'll run either with nose or with the pydev_runfiles.
import pydev_runfiles
import pydev_runfiles_xml_rpc
import pydevd_constants
from pydevd_file_utils import _NormFile
DEBUG = 0
if DEBUG:
sys.stdout.write('Received parameters: %s\n' % (sys.argv,))
sys.stdout.write('Params for pydev: %s\n' % (pydev_params,))
if found_other_test_framework_param:
sys.stdout.write('Params for test framework: %s, %s\n' % (found_other_test_framework_param, other_test_framework_params))
try:
configuration = pydev_runfiles.parse_cmdline([sys.argv[0]] + pydev_params)
except:
sys.stderr.write('Command line received: %s\n' % (sys.argv,))
raise
pydev_runfiles_xml_rpc.InitializeServer(configuration.port) #Note that if the port is None, a Null server will be initialized.
NOSE_FRAMEWORK = 1
PY_TEST_FRAMEWORK = 2
try:
if found_other_test_framework_param:
test_framework = 0 #Default (pydev)
if found_other_test_framework_param == NOSE_PARAMS:
import nose
test_framework = NOSE_FRAMEWORK
elif found_other_test_framework_param == PY_TEST_PARAMS:
import pytest
test_framework = PY_TEST_FRAMEWORK
else:
raise ImportError()
else:
raise ImportError()
except ImportError:
if found_other_test_framework_param:
sys.stderr.write('Warning: Could not import the test runner: %s. Running with the default pydev unittest runner instead.\n' % (
found_other_test_framework_param,))
test_framework = 0
#Clear any exception that may be there so that clients don't see it.
#See: https://sourceforge.net/tracker/?func=detail&aid=3408057&group_id=85796&atid=577329
if hasattr(sys, 'exc_clear'):
sys.exc_clear()
if test_framework == 0:
return pydev_runfiles.main(configuration) #Note: still doesn't return a proper value.
else:
#We'll convert the parameters to what nose or py.test expects.
#The supported parameters are:
#runfiles.py --config-file|-t|--tests <Test.test1,Test2> dirs|files --nose-params xxx yyy zzz
#(all after --nose-params should be passed directly to nose)
#In java:
#--tests = Constants.ATTR_UNITTEST_TESTS
#--config-file = Constants.ATTR_UNITTEST_CONFIGURATION_FILE
#The only thing actually handled here are the tests that we want to run, which we'll
#handle and pass as what the test framework expects.
py_test_accept_filter = {}
files_to_tests = configuration.files_to_tests
if files_to_tests:
#Handling through the file contents (file where each line is a test)
files_or_dirs = []
for file, tests in files_to_tests.items():
if test_framework == NOSE_FRAMEWORK:
for test in tests:
files_or_dirs.append(file + ':' + test)
elif test_framework == PY_TEST_FRAMEWORK:
file = _NormFile(file)
py_test_accept_filter[file] = tests
files_or_dirs.append(file)
else:
raise AssertionError('Cannot handle test framework: %s at this point.' % (test_framework,))
else:
if configuration.tests:
#Tests passed (works together with the files_or_dirs)
files_or_dirs = []
for file in configuration.files_or_dirs:
if test_framework == NOSE_FRAMEWORK:
for t in configuration.tests:
files_or_dirs.append(file + ':' + t)
elif test_framework == PY_TEST_FRAMEWORK:
file = _NormFile(file)
py_test_accept_filter[file] = configuration.tests
files_or_dirs.append(file)
else:
raise AssertionError('Cannot handle test framework: %s at this point.' % (test_framework,))
else:
#Only files or dirs passed (let it do the test-loading based on those paths)
files_or_dirs = configuration.files_or_dirs
argv = other_test_framework_params + files_or_dirs
if test_framework == NOSE_FRAMEWORK:
#Nose usage: http://somethingaboutorange.com/mrl/projects/nose/0.11.2/usage.html
#show_stdout_option = ['-s']
#processes_option = ['--processes=2']
argv.insert(0, sys.argv[0])
if DEBUG:
sys.stdout.write('Final test framework args: %s\n' % (argv[1:],))
import pydev_runfiles_nose
PYDEV_NOSE_PLUGIN_SINGLETON = pydev_runfiles_nose.StartPydevNosePluginSingleton(configuration)
argv.append('--with-pydevplugin')
# Return 'not' because it will return 'success' (so, exit == 0 if success)
return not nose.run(argv=argv, addplugins=[PYDEV_NOSE_PLUGIN_SINGLETON])
elif test_framework == PY_TEST_FRAMEWORK:
if DEBUG:
sys.stdout.write('Final test framework args: %s\n' % (argv,))
sys.stdout.write('py_test_accept_filter: %s\n' % (py_test_accept_filter,))
import os
try:
xrange
except:
xrange = range
for i in xrange(len(argv)):
arg = argv[i]
#Workaround bug in py.test: if we pass the full path it ends up importing conftest
#more than once (so, always work with relative paths).
if os.path.isfile(arg) or os.path.isdir(arg):
from pydev_imports import relpath
arg = relpath(arg)
argv[i] = arg
d = os.path.dirname(__file__)
if d not in sys.path:
sys.path.insert(0, d)
import pickle, zlib, base64
# Update environment PYTHONPATH so that it finds our plugin if using xdist.
os.environ['PYTHONPATH'] = os.pathsep.join(sys.path)
# Set what should be skipped in the plugin through an environment variable
s = base64.b64encode(zlib.compress(pickle.dumps(py_test_accept_filter)))
if pydevd_constants.IS_PY3K:
s = s.decode('ascii') # Must be str in py3.
os.environ['PYDEV_PYTEST_SKIP'] = s
# Identifies the main pid (i.e.: if it's not the main pid it has to connect back to the
# main pid to give xml-rpc notifications).
os.environ['PYDEV_MAIN_PID'] = str(os.getpid())
os.environ['PYDEV_PYTEST_SERVER'] = str(configuration.port)
argv.append('-p')
argv.append('pydev_runfiles_pytest2')
if 'unittest' in sys.modules or 'unittest2' in sys.modules:
sys.stderr.write('pydev test runner error: imported unittest before running pytest.main\n')
return pytest.main(argv)
else:
raise AssertionError('Cannot handle test framework: %s at this point.' % (test_framework,))
if __name__ == '__main__':
try:
main()
finally:
try:
#The server is not a daemon thread, so, we have to ask for it to be killed!
import pydev_runfiles_xml_rpc
pydev_runfiles_xml_rpc.forceServerKill()
except:
pass #Ignore any errors here
import sys
import threading
if hasattr(sys, '_current_frames') and hasattr(threading, 'enumerate'):
import time
import traceback
class DumpThreads(threading.Thread):
def run(self):
time.sleep(10)
thread_id_to_name = {}
try:
for t in threading.enumerate():
thread_id_to_name[t.ident] = '%s (daemon: %s)' % (t.name, t.daemon)
except:
pass
stack_trace = [
'===============================================================================',
'pydev pyunit runner: Threads still found running after tests finished',
'================================= Thread Dump =================================']
for thread_id, stack in sys._current_frames().items():
stack_trace.append('\n-------------------------------------------------------------------------------')
stack_trace.append(" Thread %s" % thread_id_to_name.get(thread_id, thread_id))
stack_trace.append('')
if 'self' in stack.f_locals:
sys.stderr.write(str(stack.f_locals['self'])+'\n')
for filename, lineno, name, line in traceback.extract_stack(stack):
stack_trace.append(' File "%s", line %d, in %s' % (filename, lineno, name))
if line:
stack_trace.append(" %s" % (line.strip()))
stack_trace.append('\n=============================== END Thread Dump ===============================')
sys.stderr.write('\n'.join(stack_trace))
dump_current_frames_thread = DumpThreads()
dump_current_frames_thread.setDaemon(True) # Daemon so that this thread doesn't halt it!
dump_current_frames_thread.start()
| apache-2.0 | 8,427,868,121,583,086,000 | 39.496032 | 139 | 0.546399 | false |
jzbontar/orange-tree | Orange/canvas/gui/splashscreen.py | 16 | 3975 | """
A splash screen widget with support for positioning of the message text.
"""
from PyQt4.QtGui import (
QSplashScreen, QWidget, QPixmap, QPainter, QTextDocument,
QTextBlockFormat, QTextCursor, QApplication
)
from PyQt4.QtCore import Qt
from .utils import is_transparency_supported
class SplashScreen(QSplashScreen):
"""
Splash screen widget.
Parameters
----------
parent : :class:`QWidget`
Parent widget
pixmap : :class:`QPixmap`
Splash window pixmap.
textRect : :class:`QRect`
Bounding rectangle of the shown message on the widget.
"""
def __init__(self, parent=None, pixmap=None, textRect=None, **kwargs):
QSplashScreen.__init__(self, parent, **kwargs)
self.__textRect = textRect
self.__message = ""
self.__color = Qt.black
self.__alignment = Qt.AlignLeft
if pixmap is None:
pixmap = QPixmap()
self.setPixmap(pixmap)
self.setAutoFillBackground(False)
# Also set FramelesWindowHint (if not already set)
self.setWindowFlags(self.windowFlags() | Qt.FramelessWindowHint)
def setTextRect(self, rect):
"""
Set the rectangle (:class:`QRect`) in which to show the message text.
"""
if self.__textRect != rect:
self.__textRect = rect
self.update()
def textRect(self):
"""
Return the text message rectangle.
"""
return self.__textRect
def showEvent(self, event):
QSplashScreen.showEvent(self, event)
# Raise to top on show.
self.raise_()
def drawContents(self, painter):
"""
Reimplementation of drawContents to limit the drawing
inside `textRext`.
"""
painter.setPen(self.__color)
painter.setFont(self.font())
if self.__textRect:
rect = self.__textRect
else:
rect = self.rect().adjusted(5, 5, -5, -5)
if Qt.mightBeRichText(self.__message):
doc = QTextDocument()
doc.setHtml(self.__message)
doc.setTextWidth(rect.width())
cursor = QTextCursor(doc)
cursor.select(QTextCursor.Document)
fmt = QTextBlockFormat()
fmt.setAlignment(self.__alignment)
cursor.mergeBlockFormat(fmt)
painter.save()
painter.translate(rect.topLeft())
doc.drawContents(painter)
painter.restore()
else:
painter.drawText(rect, self.__alignment, self.__message)
def showMessage(self, message, alignment=Qt.AlignLeft, color=Qt.black):
"""
Show the `message` with `color` and `alignment`.
"""
# Need to store all this arguments for drawContents (no access
# methods)
self.__alignment = alignment
self.__color = color
self.__message = message
QSplashScreen.showMessage(self, message, alignment, color)
QApplication.instance().processEvents()
# Reimplemented to allow graceful fall back if the windowing system
# does not support transparency.
def setPixmap(self, pixmap):
self.setAttribute(Qt.WA_TranslucentBackground,
pixmap.hasAlpha() and \
is_transparency_supported())
self.__pixmap = pixmap
QSplashScreen.setPixmap(self, pixmap)
if pixmap.hasAlpha() and not is_transparency_supported():
self.setMask(pixmap.createHeuristicMask())
def repaint(self):
QWidget.repaint(self)
QApplication.flush()
def event(self, event):
if event.type() == event.Paint:
pixmap = self.__pixmap
painter = QPainter(self)
if not pixmap.isNull():
painter.drawPixmap(0, 0, pixmap)
self.drawContents(painter)
return True
return QSplashScreen.event(self, event)
| gpl-3.0 | -4,653,576,518,878,900,000 | 29.113636 | 77 | 0.590692 | false |
Andrew-McNab-UK/DIRAC | tests/Integration/DataManagementSystem/FC_scaling_test.py | 3 | 10751 | ########################################################################
# File : FC_Scaling_test
# Author : Andrei Tsaregorodtsev
########################################################################
"""
Test suite for a generic File Catalog scalability tests
"""
__RCSID__ = "$Id$"
from DIRAC.Core.Base import Script
from DIRAC import S_OK
import sys, pprint, os, numpy
Script.setUsageMessage( """
Test suite for a generic File Catalog scalability tests
""" )
testType = 'noTest'
def setTestType( value ):
global testType
testType = value
return S_OK()
testDir = ''
def setTestDirectory( value ):
global testDir
testDir = value
return S_OK()
nClients = 1
def setNumberOfClients( value ):
global nClients
nClients = int( value )
return S_OK()
nQueries = 100
def setNumberOfQueries( value ):
global nQueries
nQueries = int( value )
return S_OK()
lfnListFile = 'lfns_100.txt'
def setLFNListFile( value ):
global lfnListFile
lfnListFile = value
return S_OK()
outputFile = "output.txt"
def setOutputFile( value ):
global outputFile
outputFile = value
return S_OK()
catalog = 'AugerTestFileCatalog'
def setCatalog( value ):
global catalog
catalog = value
return S_OK()
fullTest = False
def setFullTest( value ):
global fullTest
fullTest = True
return S_OK()
shortRange = False
def setShortRange( value ):
global shortRange
shortRange = True
return S_OK()
verbosity = 0
def setVerbosity( value ):
global verbosity
verbosity += 1
return S_OK()
Script.registerSwitch( "t:", "type=", "test type", setTestType )
Script.registerSwitch( "D:", "directory=", "test directory", setTestDirectory )
Script.registerSwitch( "N:", "clients=", "number of parallel clients", setNumberOfClients )
Script.registerSwitch( "Q:", "queries=", "number of queries in one test", setNumberOfQueries )
Script.registerSwitch( "C:", "catalog=", "catalog to use", setCatalog )
Script.registerSwitch( "L:", "lfnList=", "file with a list of LFNs", setLFNListFile )
Script.registerSwitch( "F", "fullTest", "run the full test", setFullTest )
Script.registerSwitch( "O:", "output=", "file with output result", setOutputFile )
Script.registerSwitch( "v", "verbose", "file with output result", setVerbosity )
Script.registerSwitch( "S", "shortRange", "run short parameter range", setShortRange )
Script.parseCommandLine( ignoreErrors = True )
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
from DIRAC.Core.Utilities.ProcessPool import ProcessPool
from DIRAC import S_OK
import time
fc = FileCatalog( catalogs=[catalog] )
resultTest = []
def listDirectory( n_queries ):
global testDir
start = time.time()
sCount = 0
fCount = 0
resultList = []
startTotal = time.time()
for i in xrange( n_queries ) :
start = time.time()
result = fc.listDirectory( testDir )
resultList.append( time.time() - start )
if result['OK']:
sCount += 1
else:
fCount += 1
total = time.time() - startTotal
average, error = doStats( resultList )
if verbosity >= 1:
print "getReplicas: Total time", total, 'Success', sCount, 'Failure', \
fCount, 'Average', average, 'Stdvar', error
result = S_OK( (resultList, sCount, fCount) )
return result
def getBulkReplicas( n_queries ):
global lfnListFile, verbosity
lFile = open(lfnListFile)
lfnList = [ l.strip().replace('//','/') for l in lFile.read().strip().split() ]
lFile.close()
start = time.time()
sCount = 0
fCount = 0
resultList = []
startTotal = time.time()
for i in xrange( n_queries ) :
start = time.time()
result = fc.getReplicas( lfnList )
resultList.append( time.time() - start )
if verbosity >= 2:
print "getReplicas: received lfns", len(result['Value']['Successful'])
for lfn in result['Value']['Successful']:
print result['Value']['Successful'][lfn]
if verbosity >= 3:
for lfn,res in result['Value']['Successful'].items():
print lfn
print res
break
if result['OK']:
sCount += 1
else:
fCount += 1
total = time.time() - startTotal
average, error = doStats( resultList )
if verbosity >= 1:
print "getReplicas: Total time", total, 'Success', sCount, 'Failure', \
fCount, 'Average', average, 'Stdvar', error
result = S_OK( (resultList, sCount, fCount) )
return result
def getDirectoryReplicas( n_queries ):
global testDir, verbosity
sCount = 0
fCount = 0
resultList = []
startTotal = time.time()
for i in xrange( n_queries ) :
start = time.time()
result = fc.getDirectoryReplicas( testDir )
resultList.append( time.time() - start )
if verbosity >= 2:
print "Returned values", len(result['Value']['Successful'][testDir])
for lfn,res in result['Value']['Successful'][testDir].items():
print lfn
print res
break
if result['OK']:
sCount += 1
else:
fCount += 1
total = time.time() - startTotal
average, error = doStats( resultList )
if verbosity >= 1:
print "getDirectoryReplicas: Total time", total, 'Success', sCount, 'Failure', \
fCount, '\nAverage', average, 'Stdvar', error
result = S_OK( (resultList, sCount, fCount) )
return result
def finalize(task,result):
global resultTest, verbosity
if verbosity >= 2:
if result['OK']:
print "Test time ", result['Value'], task.getTaskID()
else:
print "Error:", result['Message']
resultTest.append( result['Value'] )
def doException( expt ):
print "Exception", expt
def runTest( ):
global nClients, nQueries, testType, resultTest, testDir, lfnListFile
resultTest = []
pp = ProcessPool( nClients )
testFunction = eval( testType )
for c in xrange( nClients ):
pp.createAndQueueTask( testFunction, [nQueries],
callback=finalize,
exceptionCallback=doException )
pp.processAllResults(3600)
pp.finalize(0)
timeResult = []
for testTime,success,failure in resultTest:
#print testTime,success,failure
timeResult += testTime
averageTime, errorTime = doStats( timeResult )
rateResult = [ nClients/t for t in timeResult ]
averageRate, errorRate = doStats( rateResult )
if testDir:
print "\nTest results for clients %d, %s" % ( nClients, testDir )
else:
print "\nTest results for clients %d, %s" % ( nClients, lfnListFile )
print "Query time: %.2f +/- %.2f" % (averageTime, errorTime)
print "Query rate: %.2f +/- %.2f" % (averageRate, errorRate)
return( (averageTime, errorTime), (averageRate, errorRate) )
def doStats( testArray ):
array = list( testArray )
# Delete min and max value first
del array[ array.index(max(array)) ]
del array[ array.index(min(array)) ]
numArray = numpy.array( array )
average = numpy.mean( numArray )
stddev = numpy.std( numArray )
return (average, stddev)
numberOfFilesList = [ 10, 100, 500, 1000, 2000, 5000, 10000, 15000, 20000 ]
numberOfFilesList_short = [ 100, 1000, 5000, 10000, 20000 ]
numberOfClientsList = [1,2,3,5,7,10,12,15,20,30,50,75]
numberOfClientsList_short = [1,5,10,20]
directoriesList = [ (35455, "/auger/prod/QGSjetII_gr20_simADSTv2r5p1/en18.000/th0.65/2008/11/12"),
(24024, "/auger/prod/QGSjetII_gr20/2008/09/04/en17.500/th0.65"),
#(15205, "/auger/generated/2012-09-03"),
(18391,"/auger/prod/QGSjetII_gr20_simADSTv2r5p1/en17.500/th0.65/2008/11/11"),
(9907, "/auger/prod/QGSjetII_gr20/2008/09/03/en17.500/th0.65"),
(5157, "/auger/prod/QGSjetII_gr20/2008/09/04/en20.000/th0.65"),
(2538, "/auger/prod/QGSjetII_gr21/2009/01/12/en18.500/th0.65"),
(1500, "/auger/prod/epos_gr03_sim/en17.500/th26.000"),
(502, "/auger/prod/REPLICATED20081014/epos_gr08/en21.250/th26.000")
]
directoriesList_short = [ (35455, "/auger/prod/QGSjetII_gr20_simADSTv2r5p1/en18.000/th0.65/2008/11/12"),
(18391,"/auger/prod/QGSjetII_gr20_simADSTv2r5p1/en17.500/th0.65/2008/11/11"),
(5157, "/auger/prod/QGSjetII_gr20/2008/09/04/en20.000/th0.65"),
(1000, "/auger/prod/PhotonLib_gr22/2009/02/27/en17.500/th26.000")
]
directoriesList.reverse()
directoriesList_short.reverse()
def executeTest( nc, nf, queryDict, rateDict, queryDict_r, rateDict_r ):
global nClients
nClients = nc
t1,t2 = runTest()
query,querys = t1
rate, rates = t2
fileLabel = "%d files" % nf
queryDict.setdefault( fileLabel, {} )
queryDict[fileLabel][nc] = (query,querys)
rateDict.setdefault( fileLabel, {} )
rateDict[fileLabel][nc] = (rate,rates)
clientLabel = "%d clients" % nc
queryDict_r.setdefault( clientLabel, {} )
queryDict_r[clientLabel][nf] = (query,querys)
rateDict_r.setdefault( clientLabel, {} )
rateDict_r[clientLabel][nf] = (rate,rates)
def runFullTest():
global outputFile, nClients, testDir, lfnListFile, shortRange
queryDict = {}
rateDict = {}
queryDict_r = {}
rateDict_r = {}
ncList = numberOfClientsList
if shortRange:
ncList = numberOfClientsList_short
nfList = numberOfFilesList
if shortRange:
nfList = numberOfFilesList_short
ndList = directoriesList
if shortRange:
ndList = directoriesList_short
for nc in ncList:
if testType in ['getBulkReplicas']:
for nf in nfList:
lfnListFile = "lfns_%d.txt" % nf
executeTest( nc, nf, queryDict, rateDict, queryDict_r, rateDict_r )
elif testType in ['getDirectoryReplicas', "listDirectory"]:
for nf, directory in ndList:
testDir = directory
executeTest( nc, nf, queryDict, rateDict, queryDict_r, rateDict_r )
# Writing out result
outFile = open( outputFile, "w" )
outFile.write( "Test type %s \n" % testType )
outFile.write( "Number of queries per unit test %d \n" % nQueries )
outFile.write( "Results: \n\n\n" )
outFile.write( 'data_f = ' + str( queryDict ) + '\n\n\n' )
outFile.write( 'data_f_r = ' + str( rateDict ) + '\n\n\n' )
outFile.write( 'data_c = ' + str( queryDict_r ) + '\n\n\n' )
outFile.write( 'data_c_r = ' + str( rateDict_r ) + '\n\n\n' )
outFile.close()
pprint.pprint( queryDict )
pprint.pprint( rateDict )
pprint.pprint( queryDict_r )
pprint.pprint( rateDict_r )
#########################################################################
if os.path.exists( outputFile ):
print "Output file %s already exists, exiting ..."
sys.exit(-1)
if fullTest:
runFullTest()
else:
runTest()
| gpl-3.0 | 3,981,916,478,692,358,000 | 26.780362 | 104 | 0.628407 | false |
trabacus-softapps/openerp-8.0-cc | openerp/addons/survey/wizard/survey_print_statistics.py | 4 | 1888 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class survey_print_statistics(osv.osv_memory):
_name = 'survey.print.statistics'
_columns = {
'survey_ids': fields.many2many('survey', string="Survey", required="1"),
}
def action_next(self, cr, uid, ids, context=None):
"""
Print Survey Statistics in pdf format.
"""
if context is None:
context = {}
datas = {'ids': context.get('active_ids', [])}
res = self.read(cr, uid, ids, ['survey_ids'], context=context)
res = res and res[0] or {}
datas['form'] = res
datas['model'] = 'survey.print.statistics'
return {
'type': 'ir.actions.report.xml',
'report_name': 'survey.analysis',
'datas': datas,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -749,390,944,300,806,000 | 37.530612 | 80 | 0.587924 | false |
FFMG/myoddweb.piger | monitor/api/python/Python-3.7.2/Lib/test/test_int_literal.py | 21 | 7053 | """Test correct treatment of hex/oct constants.
This is complex because of changes due to PEP 237.
"""
import unittest
class TestHexOctBin(unittest.TestCase):
def test_hex_baseline(self):
# A few upper/lowercase tests
self.assertEqual(0x0, 0X0)
self.assertEqual(0x1, 0X1)
self.assertEqual(0x123456789abcdef, 0X123456789abcdef)
# Baseline tests
self.assertEqual(0x0, 0)
self.assertEqual(0x10, 16)
self.assertEqual(0x7fffffff, 2147483647)
self.assertEqual(0x7fffffffffffffff, 9223372036854775807)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0x0), 0)
self.assertEqual(-(0x10), -16)
self.assertEqual(-(0x7fffffff), -2147483647)
self.assertEqual(-(0x7fffffffffffffff), -9223372036854775807)
# Ditto with a minus sign and NO parentheses
self.assertEqual(-0x0, 0)
self.assertEqual(-0x10, -16)
self.assertEqual(-0x7fffffff, -2147483647)
self.assertEqual(-0x7fffffffffffffff, -9223372036854775807)
def test_hex_unsigned(self):
# Positive constants
self.assertEqual(0x80000000, 2147483648)
self.assertEqual(0xffffffff, 4294967295)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0x80000000), -2147483648)
self.assertEqual(-(0xffffffff), -4294967295)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-0x80000000, -2147483648)
self.assertEqual(-0xffffffff, -4294967295)
# Positive constants
self.assertEqual(0x8000000000000000, 9223372036854775808)
self.assertEqual(0xffffffffffffffff, 18446744073709551615)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0x8000000000000000), -9223372036854775808)
self.assertEqual(-(0xffffffffffffffff), -18446744073709551615)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-0x8000000000000000, -9223372036854775808)
self.assertEqual(-0xffffffffffffffff, -18446744073709551615)
def test_oct_baseline(self):
# A few upper/lowercase tests
self.assertEqual(0o0, 0O0)
self.assertEqual(0o1, 0O1)
self.assertEqual(0o1234567, 0O1234567)
# Baseline tests
self.assertEqual(0o0, 0)
self.assertEqual(0o20, 16)
self.assertEqual(0o17777777777, 2147483647)
self.assertEqual(0o777777777777777777777, 9223372036854775807)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0o0), 0)
self.assertEqual(-(0o20), -16)
self.assertEqual(-(0o17777777777), -2147483647)
self.assertEqual(-(0o777777777777777777777), -9223372036854775807)
# Ditto with a minus sign and NO parentheses
self.assertEqual(-0o0, 0)
self.assertEqual(-0o20, -16)
self.assertEqual(-0o17777777777, -2147483647)
self.assertEqual(-0o777777777777777777777, -9223372036854775807)
def test_oct_unsigned(self):
# Positive constants
self.assertEqual(0o20000000000, 2147483648)
self.assertEqual(0o37777777777, 4294967295)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0o20000000000), -2147483648)
self.assertEqual(-(0o37777777777), -4294967295)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-0o20000000000, -2147483648)
self.assertEqual(-0o37777777777, -4294967295)
# Positive constants
self.assertEqual(0o1000000000000000000000, 9223372036854775808)
self.assertEqual(0o1777777777777777777777, 18446744073709551615)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0o1000000000000000000000), -9223372036854775808)
self.assertEqual(-(0o1777777777777777777777), -18446744073709551615)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-0o1000000000000000000000, -9223372036854775808)
self.assertEqual(-0o1777777777777777777777, -18446744073709551615)
def test_bin_baseline(self):
# A few upper/lowercase tests
self.assertEqual(0b0, 0B0)
self.assertEqual(0b1, 0B1)
self.assertEqual(0b10101010101, 0B10101010101)
# Baseline tests
self.assertEqual(0b0, 0)
self.assertEqual(0b10000, 16)
self.assertEqual(0b1111111111111111111111111111111, 2147483647)
self.assertEqual(0b111111111111111111111111111111111111111111111111111111111111111, 9223372036854775807)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0b0), 0)
self.assertEqual(-(0b10000), -16)
self.assertEqual(-(0b1111111111111111111111111111111), -2147483647)
self.assertEqual(-(0b111111111111111111111111111111111111111111111111111111111111111), -9223372036854775807)
# Ditto with a minus sign and NO parentheses
self.assertEqual(-0b0, 0)
self.assertEqual(-0b10000, -16)
self.assertEqual(-0b1111111111111111111111111111111, -2147483647)
self.assertEqual(-0b111111111111111111111111111111111111111111111111111111111111111, -9223372036854775807)
def test_bin_unsigned(self):
# Positive constants
self.assertEqual(0b10000000000000000000000000000000, 2147483648)
self.assertEqual(0b11111111111111111111111111111111, 4294967295)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0b10000000000000000000000000000000), -2147483648)
self.assertEqual(-(0b11111111111111111111111111111111), -4294967295)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-0b10000000000000000000000000000000, -2147483648)
self.assertEqual(-0b11111111111111111111111111111111, -4294967295)
# Positive constants
self.assertEqual(0b1000000000000000000000000000000000000000000000000000000000000000, 9223372036854775808)
self.assertEqual(0b1111111111111111111111111111111111111111111111111111111111111111, 18446744073709551615)
# Ditto with a minus sign and parentheses
self.assertEqual(-(0b1000000000000000000000000000000000000000000000000000000000000000), -9223372036854775808)
self.assertEqual(-(0b1111111111111111111111111111111111111111111111111111111111111111), -18446744073709551615)
# Ditto with a minus sign and NO parentheses
# This failed in Python 2.2 through 2.2.2 and in 2.3a1
self.assertEqual(-0b1000000000000000000000000000000000000000000000000000000000000000, -9223372036854775808)
self.assertEqual(-0b1111111111111111111111111111111111111111111111111111111111111111, -18446744073709551615)
if __name__ == "__main__":
unittest.main()
| gpl-2.0 | 1,412,140,431,842,650,000 | 48.321678 | 118 | 0.713313 | false |
androomerrill/scikit-nano | sknano/generators/__init__.py | 2 | 1771 | # -*- coding: utf-8 -*-
"""
======================================================================
Structure generators (:mod:`sknano.generators`)
======================================================================
.. currentmodule:: sknano.generators
Contents
========
Nanostructure generators
------------------------
.. autosummary::
:toctree: generated/
GeneratorBase
FullereneGenerator
GrapheneGenerator
PrimitiveCellGrapheneGenerator
ConventionalCellGrapheneGenerator
BilayerGrapheneGenerator
MWNTGenerator
MWNTBundleGenerator
SWNTGenerator
SWNTBundleGenerator
UnrolledSWNTGenerator
Bulk structure generators
--------------------------
.. autosummary::
:toctree: generated/
BulkGeneratorBase
AlphaQuartzGenerator
GoldGenerator
CopperGenerator
MoS2Generator
CaesiumChlorideStructureGenerator
DiamondStructureGenerator
BCCStructureGenerator
FCCStructureGenerator
RocksaltStructureGenerator
ZincblendeStructureGenerator
Other
-----
.. autodata:: STRUCTURE_GENERATORS
:annotation: = tuple of recognized generator classes.
"""
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
__docformat__ = 'restructuredtext en'
from ._base import *
from ._bulk_structure_generator import *
from ._mixins import *
from ._fullerene_generator import *
from ._graphene_generator import *
from ._bilayer_graphene_generator import *
from ._swnt_generator import *
from ._mwnt_generator import *
from ._nanotube_bundle_generator import *
from ._swnt_bundle_generator import *
from ._mwnt_bundle_generator import *
from ._unrolled_swnt_generator import *
# from ._defect_generators import *
__all__ = [s for s in dir() if not s.startswith('_')]
| bsd-2-clause | -6,295,112,794,061,266,000 | 23.260274 | 70 | 0.67476 | false |
v-iam/azure-sdk-for-python | azure-batch/azure/batch/models/node_file.py | 3 | 1414 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class NodeFile(Model):
"""Information about a file or directory on a compute node.
:param name: The file path.
:type name: str
:param url: The URL of the file.
:type url: str
:param is_directory: Whether the object represents a directory.
:type is_directory: bool
:param properties: The file properties.
:type properties: :class:`FileProperties
<azure.batch.models.FileProperties>`
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'url': {'key': 'url', 'type': 'str'},
'is_directory': {'key': 'isDirectory', 'type': 'bool'},
'properties': {'key': 'properties', 'type': 'FileProperties'},
}
def __init__(self, name=None, url=None, is_directory=None, properties=None):
self.name = name
self.url = url
self.is_directory = is_directory
self.properties = properties
| mit | 7,177,161,634,673,814,000 | 34.35 | 80 | 0.580622 | false |