repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
maxamillion/ansible | refs/heads/devel | test/units/module_utils/basic/test_run_command.py | 17 | # -*- coding: utf-8 -*-
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import errno
from itertools import product
from io import BytesIO
import pytest
from ansible.module_utils._text import to_native
from ansible.module_utils.six import PY2
from ansible.module_utils.compat import selectors
class OpenBytesIO(BytesIO):
"""BytesIO with dummy close() method
So that you can inspect the content after close() was called.
"""
def close(self):
pass
@pytest.fixture
def mock_os(mocker):
def mock_os_chdir(path):
if path == '/inaccessible':
raise OSError(errno.EPERM, "Permission denied: '/inaccessible'")
def mock_os_abspath(path):
if path.startswith('/'):
return path
else:
return os.getcwd.return_value + '/' + path
os = mocker.patch('ansible.module_utils.basic.os')
os.path.expandvars.side_effect = lambda x: x
os.path.expanduser.side_effect = lambda x: x
os.environ = {'PATH': '/bin'}
os.getcwd.return_value = '/home/foo'
os.path.isdir.return_value = True
os.chdir.side_effect = mock_os_chdir
os.path.abspath.side_effect = mock_os_abspath
yield os
class DummyFileObj():
def __init__(self, fileobj):
self.fileobj = fileobj
class SpecialBytesIO(BytesIO):
def __init__(self, *args, **kwargs):
fh = kwargs.pop('fh', None)
super(SpecialBytesIO, self).__init__(*args, **kwargs)
self.fh = fh
def fileno(self):
return self.fh
# We need to do this because some of our tests create a new value for stdout and stderr
# The new value is able to affect the string that is returned by the subprocess stdout and
# stderr but by the time the test gets it, it is too late to change the SpecialBytesIO that
# subprocess.Popen returns for stdout and stderr. If we could figure out how to change those as
# well, then we wouldn't need this.
def __eq__(self, other):
if id(self) == id(other) or self.fh == other.fileno():
return True
return False
class DummyKey:
def __init__(self, fileobj):
self.fileobj = fileobj
@pytest.fixture
def mock_subprocess(mocker):
class MockSelector(selectors.BaseSelector):
def __init__(self):
super(MockSelector, self).__init__()
self._file_objs = []
def register(self, fileobj, events, data=None):
self._file_objs.append(fileobj)
def unregister(self, fileobj):
self._file_objs.remove(fileobj)
def select(self, timeout=None):
ready = []
for file_obj in self._file_objs:
ready.append((DummyKey(subprocess._output[file_obj.fileno()]), selectors.EVENT_READ))
return ready
def get_map(self):
return self._file_objs
def close(self):
super(MockSelector, self).close()
self._file_objs = []
selectors.DefaultSelector = MockSelector
subprocess = mocker.patch('ansible.module_utils.basic.subprocess')
subprocess._output = {mocker.sentinel.stdout: SpecialBytesIO(b'', fh=mocker.sentinel.stdout),
mocker.sentinel.stderr: SpecialBytesIO(b'', fh=mocker.sentinel.stderr)}
cmd = mocker.MagicMock()
cmd.returncode = 0
cmd.stdin = OpenBytesIO()
cmd.stdout = subprocess._output[mocker.sentinel.stdout]
cmd.stderr = subprocess._output[mocker.sentinel.stderr]
subprocess.Popen.return_value = cmd
yield subprocess
@pytest.fixture()
def rc_am(mocker, am, mock_os, mock_subprocess):
am.fail_json = mocker.MagicMock(side_effect=SystemExit)
am._os = mock_os
am._subprocess = mock_subprocess
yield am
class TestRunCommandArgs:
# Format is command as passed to run_command, command to Popen as list, command to Popen as string
ARGS_DATA = (
(['/bin/ls', 'a', 'b', 'c'], [b'/bin/ls', b'a', b'b', b'c'], b'/bin/ls a b c'),
('/bin/ls a " b" "c "', [b'/bin/ls', b'a', b' b', b'c '], b'/bin/ls a " b" "c "'),
)
# pylint bug: https://github.com/PyCQA/pylint/issues/511
# pylint: disable=undefined-variable
@pytest.mark.parametrize('cmd, expected, shell, stdin',
((arg, cmd_str if sh else cmd_lst, sh, {})
for (arg, cmd_lst, cmd_str), sh in product(ARGS_DATA, (True, False))),
indirect=['stdin'])
def test_args(self, cmd, expected, shell, rc_am):
rc_am.run_command(cmd, use_unsafe_shell=shell)
assert rc_am._subprocess.Popen.called
args, kwargs = rc_am._subprocess.Popen.call_args
assert args == (expected, )
assert kwargs['shell'] == shell
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_tuple_as_args(self, rc_am):
with pytest.raises(SystemExit):
rc_am.run_command(('ls', '/'))
assert rc_am.fail_json.called
class TestRunCommandCwd:
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_cwd(self, mocker, rc_am):
rc_am._os.getcwd.return_value = '/old'
rc_am.run_command('/bin/ls', cwd='/new')
assert rc_am._os.chdir.mock_calls == [mocker.call(b'/new'), mocker.call('/old'), ]
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_cwd_relative_path(self, mocker, rc_am):
rc_am._os.getcwd.return_value = '/old'
rc_am.run_command('/bin/ls', cwd='sub-dir')
assert rc_am._os.chdir.mock_calls == [mocker.call(b'/old/sub-dir'), mocker.call('/old'), ]
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_cwd_not_a_dir(self, mocker, rc_am):
rc_am._os.getcwd.return_value = '/old'
rc_am._os.path.isdir.side_effect = lambda d: d != '/not-a-dir'
rc_am.run_command('/bin/ls', cwd='/not-a-dir')
assert rc_am._os.chdir.mock_calls == [mocker.call('/old'), ]
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_cwd_not_a_dir_noignore(self, rc_am):
rc_am._os.getcwd.return_value = '/old'
rc_am._os.path.isdir.side_effect = lambda d: d != '/not-a-dir'
with pytest.raises(SystemExit):
rc_am.run_command('/bin/ls', cwd='/not-a-dir', ignore_invalid_cwd=False)
assert rc_am.fail_json.called
class TestRunCommandPrompt:
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_prompt_bad_regex(self, rc_am):
with pytest.raises(SystemExit):
rc_am.run_command('foo', prompt_regex='[pP)assword:')
assert rc_am.fail_json.called
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_prompt_no_match(self, mocker, rc_am):
rc_am._os._cmd_out[mocker.sentinel.stdout] = BytesIO(b'hello')
(rc, _, _) = rc_am.run_command('foo', prompt_regex='[pP]assword:')
assert rc == 0
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_prompt_match_wo_data(self, mocker, rc_am):
rc_am._subprocess._output = {mocker.sentinel.stdout:
SpecialBytesIO(b'Authentication required!\nEnter password: ',
fh=mocker.sentinel.stdout),
mocker.sentinel.stderr:
SpecialBytesIO(b'', fh=mocker.sentinel.stderr)}
(rc, _, _) = rc_am.run_command('foo', prompt_regex=r'[pP]assword:', data=None)
assert rc == 257
class TestRunCommandRc:
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_check_rc_false(self, rc_am):
rc_am._subprocess.Popen.return_value.returncode = 1
(rc, _, _) = rc_am.run_command('/bin/false', check_rc=False)
assert rc == 1
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_check_rc_true(self, rc_am):
rc_am._subprocess.Popen.return_value.returncode = 1
with pytest.raises(SystemExit):
rc_am.run_command('/bin/false', check_rc=True)
assert rc_am.fail_json.called
args, kwargs = rc_am.fail_json.call_args
assert kwargs['rc'] == 1
class TestRunCommandOutput:
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_text_stdin(self, rc_am):
(rc, stdout, stderr) = rc_am.run_command('/bin/foo', data='hello world')
assert rc_am._subprocess.Popen.return_value.stdin.getvalue() == b'hello world\n'
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_ascii_stdout(self, mocker, rc_am):
rc_am._subprocess._output = {mocker.sentinel.stdout:
SpecialBytesIO(b'hello', fh=mocker.sentinel.stdout),
mocker.sentinel.stderr:
SpecialBytesIO(b'', fh=mocker.sentinel.stderr)}
(rc, stdout, stderr) = rc_am.run_command('/bin/cat hello.txt')
assert rc == 0
# module_utils function. On py3 it returns text and py2 it returns
# bytes because it's returning native strings
assert stdout == 'hello'
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_utf8_output(self, mocker, rc_am):
rc_am._subprocess._output = {mocker.sentinel.stdout:
SpecialBytesIO(u'Žarn§'.encode('utf-8'),
fh=mocker.sentinel.stdout),
mocker.sentinel.stderr:
SpecialBytesIO(u'لرئيسية'.encode('utf-8'),
fh=mocker.sentinel.stderr)}
(rc, stdout, stderr) = rc_am.run_command('/bin/something_ugly')
assert rc == 0
# module_utils function. On py3 it returns text and py2 it returns
# bytes because it's returning native strings
assert stdout == to_native(u'Žarn§')
assert stderr == to_native(u'لرئيسية')
@pytest.mark.parametrize('stdin', [{}], indirect=['stdin'])
def test_run_command_fds(mocker, rc_am):
subprocess_mock = mocker.patch('ansible.module_utils.basic.subprocess')
subprocess_mock.Popen.side_effect = AssertionError
try:
rc_am.run_command('synchronize', pass_fds=(101, 42))
except SystemExit:
pass
if PY2:
assert subprocess_mock.Popen.call_args[1]['close_fds'] is False
assert 'pass_fds' not in subprocess_mock.Popen.call_args[1]
else:
assert subprocess_mock.Popen.call_args[1]['pass_fds'] == (101, 42)
assert subprocess_mock.Popen.call_args[1]['close_fds'] is True
|
lucalianas/ProMort | refs/heads/develop | promort/slides_manager/management/commands/link_cases_to_lab.py | 2 | # Copyright (c) 2019, CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from csv import DictReader
from django.core.management.base import BaseCommand
from slides_manager.models import Laboratory, Case
import logging
logger = logging.getLogger('promort_commands')
class Command(BaseCommand):
help = """
"""
def add_arguments(self, parser):
parser.add_argument('--cases-map', dest='cases_map', type=str, required=True,
help='the CSV file containing the mapping of the cases to related laboratories')
def _get_cases_map(self, cases_file):
with open(cases_file) as cf:
reader = DictReader(cf, ['case', 'laboratory'])
cases_map = {}
for row in reader:
cases_map.setdefault(row['laboratory'], []).append(row['case'])
return cases_map
def _get_laboratory(self, lab_label):
try:
return Laboratory.objects.get(label__iexact=lab_label)
except Laboratory.DoesNotExist:
logger.warning('Laboratory %s does not exist' % lab_label)
return None
def _get_case(self, case_id):
try:
return Case.objects.get(id=case_id)
except Case.DoesNotExist:
logger.warning('Case %s does not exist' % case_id)
return None
def _update_case(self, case_obj, laboratory_obj):
case_obj.laboratory = laboratory_obj
case_obj.save()
def handle(self, *args, **opts):
logger.info('=== Starting update job ===')
cases_map = self._get_cases_map(opts['cases_map'])
for lab, cases in cases_map.items():
lab_obj = self._get_laboratory(lab)
if lab_obj:
logger.info('Processing cases for laboratory %s', lab)
for case in cases:
case_obj = self._get_case(case)
if case_obj:
logger.info('Updating case %s' % case)
self._update_case(case_obj, lab_obj)
logger.info('=== Update job completed ===')
|
tucan21/python_zadania | refs/heads/master | test/test_fill_contacts.py | 1 | from model.contact import Contact
def test_fill_contacts(app,db):
contacts_ui = app.contact.get_contact_list()
contacts_db = db.get_contact_list()
assert sorted(contacts_ui, key=Contact.id_or_max) == sorted(contacts_db, key=Contact.id_or_max) |
FranMachio/plugin.video.Machio.fran | refs/heads/master | servers/servertools.py | 24 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Utilidades para detectar vídeos de los diferentes conectores
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
#LvX Edited Patched
import re,sys
from core import scrapertools
from core import config
from core import logger
# Listas de servidores empleadas a la hora de reproducir para explicarle al usuario por qué no puede ver un vídeo
# Lista de los servidores que se pueden ver sin cuenta premium de ningún tipo
FREE_SERVERS = []
FREE_SERVERS.extend(['directo','allmyvideos','adnstream','bliptv','divxstage','facebook','fourshared', 'hulkshare', 'twitvid'])
FREE_SERVERS.extend(['googlevideo','gigabyteupload','mediafire','moevideos','movshare','novamov']) #,'putlocker'
FREE_SERVERS.extend(['royalvids','sockshare','stagevu','tutv','userporn','veoh','videobam'])
FREE_SERVERS.extend(['vidbux','videoweed','vimeo','vk','watchfreeinhd','youtube'])#,'videobeer','nowdownload'
FREE_SERVERS.extend(['jumbofiles','nowvideo','streamcloud', 'zinwa', 'dailymotion','justintv', 'vidbull'])
FREE_SERVERS.extend(['vureel','nosvideo','videopremium','movreel','flashx','upafile'])
FREE_SERVERS.extend(['fileflyer','playedto','tunepk','powvideo','videomega','mega','vidspot','netutv','rutube'])
FREE_SERVERS.extend(['videozed','documentary','hugefiles', 'firedrive','videott','tumitv','gamovideo'])
FREE_SERVERS.extend(['torrent','video4you','mailru','streaminto','backin','akstream', 'speedvideo', 'junkyvideo', 'rapidvideo'])
# Lista de TODOS los servidores que funcionan con cuenta premium individual
PREMIUM_SERVERS = ['uploadedto','nowvideo']
# Lista de TODOS los servidores soportados por Filenium
FILENIUM_SERVERS = []
FILENIUM_SERVERS.extend(['linkto','uploadedto','gigasize','youtube','filepost','hotfile','rapidshare','turbobit','mediafire','bitshare','depositfiles'])
FILENIUM_SERVERS.extend(['oron','allmyvideos','novamov','videoweed','movshare','letitbit','shareonline','shareflare','rapidgator'])
FILENIUM_SERVERS.extend(['filefactory','netload','nowdownload','filevelocity','freakshare','userporn','divxstage','putlocker','extabit','vidxden'])
FILENIUM_SERVERS.extend(['vimeo','dailymotion','jumbofiles','zippyshare','glumbouploads','bayfiles','twoshared', 'fourshared','crocko','fiberupload'])
FILENIUM_SERVERS.extend(['ifile','megashares','slingfile','uploading','vipfile','filenium','movreel','one80upload','flashx','nowvideo','vk','moevideos'])
FILENIUM_SERVERS.extend(['cloudzer','filecloudio','luckyshare','lumfile','playedto','ryushare','streamcloud','videozed','xenubox','filesmonster'])
#wupload,fileserve
# Lista de TODOS los servidores soportados por Real-Debrid
REALDEBRID_SERVERS = ['one80upload','tenupload','onefichier','onehostclick','twoshared','fourfastfile','fourshared','abc','asfile','badongo','bayfiles','bitshare','cbscom','cloudzer','cramit','crocko','cwtv','dailymotion','dateito',
'dengee','diglo','extabit','fiberupload','filedino','filefactory','fileflyer','filekeen','filemade','filemates','fileover','filepost',
'filesend','filesmonster','filevelocity','freakshare','free','furk','fyels','gigasize','gigaup','glumbouploads','goldfile','hitfile','hipfile','hostingbulk',
'hotfile','hulkshare','hulu','ifile','jakfile','jumbofiles','justintv','letitbit','loadto','mediafire','mega','megashare','megashares','mixturevideo','muchshare','netload',
'novafile','nowdownload','purevid','putbit','putlocker','redtube','rapidgator','rapidshare','rutube','ryushare','scribd','sendspace','sharebees','shareflare','shragle','slingfile','sockshare',
'soundcloud','speedyshare','turbobit','unibytes','uploadc','uploadedto','uploading','uploadspace','uptobox',
'userporn','veevr','vidbux','vidhog','vidxden','vimeo','vipfile','wattv','xfileshare','youporn','youtube','yunfile','zippyshare','justintv','nowvideo','ultramegabit','filesmonster','oboom']
#wupload,fileserve
ALLDEBRID_SERVERS = ['one80upload','onefichier','twoshared','fourfastfile','fourshared','albafile','bayfiles','bitshare','cloudzer','cramit','crocko','cyberlocker','dailymotion','dengee',
'depfile','dlfree','extabit','extmatrix','filefactory','fileflyer','filegag','filehost','fileover','filepost','filerio','filesabc',
'filesend','filesmonster','filestay','freakshare','gigasize','hotfile','hulkshare','jumbofiles','letitbit','loadto','mediafire','megashares','mixturevideo','netload',
'nitrobits','oteupload','purevid','putlocker','rapidgator','rapidshare','redtube','scribd','secureupload','sharebees','shareflare','slingfile','sockshare',
'soundcloud','speedload','speedyshare','turbobit', 'uloadto', 'uploadc','uploadedto','uploading','uptobox',
'userporn','vimeo','vipfile','youporn','youtube','yunfile','zippyshare','lumfile','ultramegabit','filesmonster']
# Lista completa de todos los servidores soportados por pelisalacarta, usada para buscar patrones
ALL_SERVERS = list( set(FREE_SERVERS) | set(FILENIUM_SERVERS) | set(REALDEBRID_SERVERS) | set(ALLDEBRID_SERVERS) )
ALL_SERVERS.sort()
# Función genérica para encontrar vídeos en una página
def find_video_items(item=None, data=None, channel=""):
logger.info("[launcher.py] findvideos")
# Descarga la página
if data is None:
from core import scrapertools
data = scrapertools.cache_page(item.url)
#logger.info(data)
# Busca los enlaces a los videos
from core.item import Item
from servers import servertools
listavideos = servertools.findvideos(data)
if item is None:
item = Item()
itemlist = []
for video in listavideos:
scrapedtitle = item.title.strip() + " - " + video[0].strip()
scrapedurl = video[1]
server = video[2]
itemlist.append( Item(channel=item.channel, title=scrapedtitle , action="play" , server=server, page=item.page, url=scrapedurl, thumbnail=item.thumbnail, show=item.show , plot=item.plot , folder=False) )
return itemlist
def findvideosbyserver(data, serverid):
logger.info("[servertools.py] findvideos")
encontrados = set()
devuelve = []
try:
exec "from servers import "+serverid
exec "devuelve.extend("+serverid+".find_videos(data))"
except ImportError:
logger.info("No existe conector para "+serverid)
except:
logger.info("Error en el conector "+serverid)
import traceback,sys
from pprint import pprint
exc_type, exc_value, exc_tb = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_tb)
for line in lines:
line_splits = line.split("\n")
for line_split in line_splits:
logger.error(line_split)
return devuelve
def findvideos(data):
logger.info("[servertools.py] findvideos")
encontrados = set()
devuelve = []
# Ejecuta el findvideos en cada servidor
for serverid in ALL_SERVERS:
try:
# Sustituye el código por otro "Plex compatible"
#exec "from servers import "+serverid
#exec "devuelve.extend("+serverid+".find_videos(data))"
servers_module = __import__("servers."+serverid)
server_module = getattr(servers_module,serverid)
devuelve.extend( server_module.find_videos(data) )
except ImportError:
logger.info("No existe conector para "+serverid)
except:
logger.info("Error en el conector "+serverid)
import traceback,sys
from pprint import pprint
exc_type, exc_value, exc_tb = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_tb)
for line in lines:
line_splits = line.split("\n")
for line_split in line_splits:
logger.error(line_split)
return devuelve
def get_video_urls(server,url):
'''
servers_module = __import__("servers."+server)
server_module = getattr(servers_module,server)
return server_module.get_video_url( page_url=url)
'''
video_urls,puede,motivo = resolve_video_urls_for_playing(server,url)
return video_urls
def get_channel_module(channel_name):
channels_module = __import__("channels."+channel_name)
channel_module = getattr(channels_module,channel_name)
return channel_module
def get_server_from_url(url):
encontrado = findvideos(url)
if len(encontrado)>0:
devuelve = encontrado[0][2]
else:
devuelve = "directo"
return devuelve
def resolve_video_urls_for_playing(server,url,video_password="",muestra_dialogo=False):
logger.info("[servertools.py] resolve_video_urls_for_playing, server="+server+", url="+url)
video_urls = []
torrent = False
server = server.lower()
# Si el vídeo es "directo", no hay que buscar más
if server=="directo" or server=="local":
logger.info("[servertools.py] server=directo, la url es la buena")
try:
import urlparse
parsed_url = urlparse.urlparse(url)
logger.info("parsed_url="+str(parsed_url))
extension = parsed_url.path[-4:]
except:
extension = url[-4:]
video_urls = [[ "%s [%s]" % (extension,server) , url ]]
return video_urls,True,""
# Averigua las URL de los vídeos
else:
#if server=="torrent":
# server="filenium"
# torrent = True
# Carga el conector
try:
# Muestra un diálogo de progreso
if muestra_dialogo:
import xbmcgui
progreso = xbmcgui.DialogProgress()
progreso.create( "pelisalacarta" , "Conectando con "+server)
# Sustituye el código por otro "Plex compatible"
#exec "from servers import "+server+" as server_connector"
servers_module = __import__("servers."+server)
server_connector = getattr(servers_module,server)
logger.info("[servertools.py] servidor de "+server+" importado")
if muestra_dialogo:
progreso.update( 20 , "Conectando con "+server)
# Si tiene una función para ver si el vídeo existe, lo comprueba ahora
if hasattr(server_connector, 'test_video_exists'):
logger.info("[servertools.py] invocando a "+server+".test_video_exists")
puedes,motivo = server_connector.test_video_exists( page_url=url )
# Si la funcion dice que no existe, fin
if not puedes:
logger.info("[servertools.py] test_video_exists dice que el video no existe")
if muestra_dialogo: progreso.close()
return video_urls,puedes,motivo
else:
logger.info("[servertools.py] test_video_exists dice que el video SI existe")
# Obtiene enlaces free
if server in FREE_SERVERS:
logger.info("[servertools.py] invocando a "+server+".get_video_url")
video_urls = server_connector.get_video_url( page_url=url , video_password=video_password )
# Si no se encuentran vídeos en modo free, es porque el vídeo no existe
if len(video_urls)==0:
if muestra_dialogo: progreso.close()
return video_urls,False,"No se puede encontrar el vídeo en "+server
# Obtiene enlaces premium si tienes cuenta en el server
if server in PREMIUM_SERVERS and config.get_setting(server+"premium")=="true":
video_urls = server_connector.get_video_url( page_url=url , premium=(config.get_setting(server+"premium")=="true") , user=config.get_setting(server+"user") , password=config.get_setting(server+"password"), video_password=video_password )
# Si no se encuentran vídeos en modo premium directo, es porque el vídeo no existe
if len(video_urls)==0:
if muestra_dialogo: progreso.close()
return video_urls,False,"No se puede encontrar el vídeo en "+server
# Obtiene enlaces filenium si tienes cuenta
if server in FILENIUM_SERVERS and config.get_setting("fileniumpremium")=="true":
# Muestra un diálogo de progreso
if muestra_dialogo:
progreso.update( 40 , "Conectando con Filenium")
from servers import filenium as gen_conector
video_gen = gen_conector.get_video_url( page_url=url , premium=(config.get_setting("fileniumpremium")=="true") , user=config.get_setting("fileniumuser") , password=config.get_setting("fileniumpassword"), video_password=video_password )
extension = gen_conector.get_file_extension(video_gen)
logger.info("[xbmctools.py] filenium url="+video_gen)
video_urls.append( [ extension+" ["+server+"][filenium]", video_gen ] )
# Obtiene enlaces realdebrid si tienes cuenta
if server in REALDEBRID_SERVERS and config.get_setting("realdebridpremium")=="true":
# Muestra un diálogo de progreso
if muestra_dialogo:
progreso.update( 60 , "Conectando con Real-Debrid")
from servers import realdebrid as gen_conector
video_gen = gen_conector.get_video_url( page_url=url , premium=(config.get_setting("realdebridpremium")=="true") , user=config.get_setting("realdebriduser") , password=config.get_setting("realdebridpassword"), video_password=video_password )
logger.info("[xbmctools.py] realdebrid url="+video_gen)
if not "REAL-DEBRID" in video_gen:
video_urls.append( [ "."+video_gen.rsplit('.',1)[1]+" [realdebrid]", video_gen ] )
else:
if muestra_dialogo: progreso.close()
# Si RealDebrid da error pero tienes un enlace válido, no te dice nada
if len(video_urls)==0:
return video_urls,False,video_gen
# Obtiene enlaces alldebrid si tienes cuenta
if server in ALLDEBRID_SERVERS and config.get_setting("alldebridpremium")=="true":
# Muestra un diálogo de progreso
if muestra_dialogo:
progreso.update( 80 , "Conectando con All-Debrid")
from servers import alldebrid as gen_conector
video_gen = gen_conector.get_video_url( page_url=url , premium=(config.get_setting("alldebridpremium")=="true") , user=config.get_setting("alldebriduser") , password=config.get_setting("alldebridpassword"), video_password=video_password )
logger.info("[xbmctools.py] alldebrid url="+video_gen)
if video_gen.startswith("http"):
video_urls.append( [ "."+video_gen.rsplit('.',1)[1]+" [alldebrid]", video_gen ] )
else:
# Si Alldebrid da error pero tienes un enlace válido, no te dice nada
if len(video_urls)==0:
return [],False,video_gen.strip()
if muestra_dialogo:
progreso.update( 100 , "Proceso finalizado")
# Cierra el diálogo de progreso
if muestra_dialogo: progreso.close()
# Llegas hasta aquí y no tienes ningún enlace para ver, así que no vas a poder ver el vídeo
if len(video_urls)==0:
# ¿Cual es el motivo?
# 1) No existe -> Ya está controlado
# 2) No tienes alguna de las cuentas premium compatibles
# Lista de las cuentas que soportan este servidor
listapremium = ""
if server in ALLDEBRID_SERVERS: listapremium+="All-Debrid o "
if server in FILENIUM_SERVERS: listapremium+="Filenium o "
if server in REALDEBRID_SERVERS: listapremium+="Real-Debrid o "
if server in PREMIUM_SERVERS: listapremium+=server+" o "
listapremium = listapremium[:-3]
return video_urls,False,"Para ver un vídeo en "+server+" necesitas<br/>una cuenta en "+listapremium
except:
if muestra_dialogo: progreso.close()
import traceback
from pprint import pprint
exc_type, exc_value, exc_tb = sys.exc_info()
lines = traceback.format_exception(exc_type, exc_value, exc_tb)
for line in lines:
line_splits = line.split("\n")
for line_split in line_splits:
logger.error(line_split)
return video_urls,False,"Se ha producido un error en<br/>el conector con "+server
return video_urls,True,"" |
keisuke-umezawa/chainer | refs/heads/master | tests/chainer_tests/function_hooks_tests/test_timer.py | 3 | import os
import time
import unittest
import numpy
import six
import chainer
from chainer.backends import cuda
from chainer import function_hooks
from chainer import functions
from chainer.functions.math import basic_math
from chainer import testing
from chainer.testing import attr
try:
_get_time = time.perf_counter
except AttributeError:
if os.name == 'nt':
_get_time = time.clock
else:
_get_time = time.time
def check_history(self, t, function_type, return_type):
func_name = t[0]
assert func_name == function_type.__name__
assert isinstance(t[1], return_type)
class SimpleLink(chainer.Link):
def __init__(self):
super(SimpleLink, self).__init__()
with self.init_scope():
init_w = numpy.random.uniform(-1, 1, (3, 5)).astype(
numpy.float32)
self.w = chainer.Parameter(init_w)
def forward(self, x):
return self.w * x
class TestTimerHookToLink(unittest.TestCase):
def setUp(self):
self.h = function_hooks.TimerHook()
self.layer = SimpleLink()
self.x = numpy.random.uniform(-0.1, 0.1, (3, 5)).astype(numpy.float32)
self.gy = numpy.random.uniform(-0.1, 0.1, (3, 5)).astype(numpy.float32)
def test_name(self):
assert self.h.name == 'TimerHook'
def check_forward(self, x):
with self.h:
self.layer(chainer.Variable(x))
assert len(self.h.call_history) == 1
check_history(self, self.h.call_history[0], basic_math.Mul, float)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_forward_gpu(self):
self.layer.to_gpu()
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x, gy):
x = chainer.Variable(x)
y = self.layer(x)
y.grad = gy
with self.h:
y.backward()
# It includes forward of + that accumulates gradients to W and b
assert len(self.h.call_history) == 3
for entry in self.h.call_history:
if entry[0] == 'Add':
continue
check_history(self, entry, basic_math.Mul, float)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.layer.to_gpu()
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
class TestTimerHookToFunction(unittest.TestCase):
def setUp(self):
self.h = function_hooks.TimerHook()
self.f = functions.math.exponential.Exp()
self.f.add_hook(self.h)
self.x = numpy.random.uniform(-0.1, 0.1, (3, 5)).astype(numpy.float32)
self.gy = numpy.random.uniform(-0.1, 0.1, (3, 5)).astype(numpy.float32)
def check_forward(self, x):
self.f.apply((chainer.Variable(x),))
assert len(self.h.call_history) == 1
check_history(self, self.h.call_history[0],
functions.math.exponential.Exp, float)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x, gy):
x = chainer.Variable(x)
y = self.f.apply((x,))[0]
y.grad = gy
y.backward()
assert len(self.h.call_history) == 2
check_history(self, self.h.call_history[1],
functions.math.exponential.Exp, float)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
def test_reentrant(self):
# In/grad data are random; these do not simulate the actually possible
# cases.
# any function other than Exp is ok
g = functions.math.identity.Identity()
self.h.backward_preprocess(self.f, (self.x,), (self.gy,))
t1 = _get_time()
time.sleep(0.001) # longer than each hook call
self.h.forward_preprocess(g, (self.x,))
self.h.forward_postprocess(g, (self.x,))
t2 = _get_time()
self.h.backward_postprocess(self.f, (self.x,), (self.gy,))
history = dict(self.h.call_history)
assert len(history) == 2
assert self.f._impl_name in history
assert g._impl_name in history
f_time = history[self.f._impl_name]
g_time = history[g._impl_name]
assert g_time <= t2 - t1
assert f_time >= t2 - t1
def test_reentrant_total_time(self):
g = functions.math.identity.Identity()
t0 = _get_time()
self.h.backward_preprocess(self.f, (self.x,), (self.gy,))
t1 = _get_time()
self.h.forward_preprocess(g, (self.x,))
time.sleep(0.001)
self.h.forward_postprocess(g, (self.x,))
t2 = _get_time()
self.h.backward_postprocess(self.f, (self.x,), (self.gy,))
t3 = _get_time()
assert self.h.total_time() <= t3 - t0
assert self.h.total_time() >= t2 - t1
@testing.parameterize(
{'unit': 'sec'},
{'unit': 'ms'},
{'unit': 'us'},
{'unit': 'ns'},
{'unit': 'auto'},
{'unit': 'auto_foreach'},
)
class TestTimerPrintReport(unittest.TestCase):
def setUp(self):
self.h = function_hooks.TimerHook()
self.f = functions.math.exponential.Exp()
self.f.add_hook(self.h)
self.x = numpy.random.uniform(-0.1, 0.1, (3, 5)).astype(numpy.float32)
def test_summary(self):
x = self.x
self.f.apply((chainer.Variable(x),))
self.f.apply((chainer.Variable(x),))
assert len(self.h.call_history) == 2
assert len(self.h.summary()) == 1
def test_print_report(self):
x = self.x
self.f.apply((chainer.Variable(x),))
self.f.apply((chainer.Variable(x),))
io = six.StringIO()
self.h.print_report(unit=self.unit, file=io)
expect = r'''\AFunctionName +ElapsedTime +Occurrence
+Exp +[0-9.\-e]+(.s|sec) +[0-9]+
\Z'''
actual = io.getvalue()
six.assertRegex(self, actual, expect)
testing.run_module(__name__, __file__)
|
Tagar/incubator-airflow | refs/heads/master | tests/contrib/hooks/test_slack_webhook_hook.py | 15 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import json
import unittest
from airflow import configuration, models
from airflow.utils import db
from airflow.contrib.hooks.slack_webhook_hook import SlackWebhookHook
class TestSlackWebhookHook(unittest.TestCase):
_config = {
'http_conn_id': 'slack-webhook-default',
'webhook_token': 'manual_token',
'message': 'Awesome message to put on Slack',
'channel': '#general',
'username': 'SlackMcSlackFace',
'icon_emoji': ':hankey:',
'link_names': True,
'proxy': 'https://my-horrible-proxy.proxyist.com:8080'
}
expected_message_dict = {'channel': _config['channel'],
'username': _config['username'],
'icon_emoji': _config['icon_emoji'],
'link_names': 1,
'text': _config['message']
}
expected_message = json.dumps(expected_message_dict)
def setUp(self):
configuration.load_test_config()
db.merge_conn(
models.Connection(
conn_id='slack-webhook-default',
extra='{"webhook_token": "your_token_here"}')
)
def test_get_token_manual_token(self):
# Given
manual_token = 'manual_token_here'
hook = SlackWebhookHook(webhook_token=manual_token)
# When
webhook_token = hook._get_token(manual_token, None)
# Then
self.assertEqual(webhook_token, manual_token)
def test_get_token_conn_id(self):
# Given
conn_id = 'slack-webhook-default'
hook = SlackWebhookHook(http_conn_id=conn_id)
expected_webhook_token = 'your_token_here'
# When
webhook_token = hook._get_token(None, conn_id)
# Then
self.assertEqual(webhook_token, expected_webhook_token)
def test_build_slack_message(self):
# Given
hook = SlackWebhookHook(**self._config)
# When
message = hook._build_slack_message()
# Then
self.assertEqual(self.expected_message, message)
if __name__ == '__main__':
unittest.main()
|
xyb/micropython | refs/heads/master | tests/basics/del_local.py | 118 | # delete local then try to reference it
def f():
x = 1
y = 2
print(x, y)
del x
print(y)
try:
print(x)
except NameError:
print("NameError");
f()
# delete local then try to delete it again
def g():
x = 3
y = 4
print(x, y)
del x
print(y)
try:
del x
except NameError:
print("NameError");
g()
|
haydnKing/waistcoat | refs/heads/master | test/postprocess_test.py | 1 | import unittest, testcases, tempfile, shutil, os.path
from waistcoat import postprocess, statistics
DATA_DIR = os.path.join(os.path.split(__file__)[0], "data/postprocess/")
class TestPostprocess(testcases.TestSamfile):
def setUp(self):
self.tempdir = tempfile.mkdtemp(prefix='postprocesstest')
statistics.recording = False
def tearDown(self):
shutil.rmtree(self.tempdir)
statistics.recording = True
def test_accepted_hits(self):
"""Test the postprocessing"""
sample = "sampleName"
tophatDir = os.path.join(self.tempdir, sample)
os.mkdir(tophatDir)
shutil.copy(os.path.join(DATA_DIR, 'accepted_hits.bam'), tophatDir)
shutil.copy(os.path.join(DATA_DIR, 'accepted_hits.bam.bai'), tophatDir)
postprocess.run(self.tempdir, sample,
os.path.join(DATA_DIR, 'genome.fa'),
extend=True)
output = [
'GTCCGTAGTCCTAGTCGTCATCCCCGTA',
'ACTGGACTATTTAGGACGATCGGACTGA',
]
self.assertBAM(os.path.join(self.tempdir, '{}.bam'.format(sample)),
os.path.join(DATA_DIR, 'genome.fa'))
|
openhdf/enigma2-wetek | refs/heads/master | lib/python/Screens/PVRState.py | 70 | from Screen import Screen
from Components.Label import Label
from Components.Pixmap import Pixmap, MultiPixmap
class PVRState(Screen):
def __init__(self, session):
Screen.__init__(self, session)
self["eventname"] = Label()
self["state"] = Label()
self["speed"] = Label()
self["statusicon"] = MultiPixmap()
class TimeshiftState(Screen):
def __init__(self, session):
Screen.__init__(self, session)
self["eventname"] = Label()
self["state"] = Label()
self["speed"] = Label()
self["statusicon"] = MultiPixmap()
self["PTSSeekBack"] = Pixmap()
self["PTSSeekPointer"] = Pixmap()
|
mostafa-z/Gabriel_MM | refs/heads/stockbase | tools/perf/scripts/python/sctop.py | 11180 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
|
chensuchun/fas | refs/heads/master | fas/openid_samadhi.py | 3 | # -*- coding: utf-8 -*-
# Copyright 2008 by Jeffrey C. Ollie
# Copyright 2009 by Red Hat, Inc.
#
# This file is part of Samadhi.
#
# Samadhi is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License.
#
# Samadhi is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Samadhi. If not, see <http://www.gnu.org/licenses/>.
from turbogears import controllers, expose, flash, validators, validate, error_handler
from fas import model
from turbogears import identity, redirect, config
from turbogears import url as tg_url
from cherrypy import request, response, session
# from samadhi import json
import logging
log = logging.getLogger("samadhi.controllers")
import openid
from openid.extensions import sreg
from openid.server import server
from openid.consumer import discover
from urlparse import urljoin
from urllib import unquote
from fas.user import KnownUser
from fas.model import People
from fas.auth import cla_done
def build_url(newpath):
base_url = config.get('samadhi.baseurl')
return urljoin(base_url, tg_url(newpath))
login_url = build_url('/login')
id_base_url = build_url('/openid/id')
endpoint_url = build_url('/openid/server')
yadis_base_url = build_url('/openid/yadis')
class UserID(validators.Schema):
username = KnownUser
class OpenID(controllers.Controller):
def __init__(self, *args, **kw):
super(OpenID, self).__init__(*args, **kw)
store = model.SamadhiStore()
self.openid = server.Server(store, endpoint_url)
@expose(template="fas.templates.openid.welcome")
def index(self):
return dict(login_url = login_url, id_base_url = id_base_url)
@expose(template="fas.templates.error")
def error(self, tg_errors=None):
'''Show a friendly error message'''
if not tg_errors:
redirect('/')
return dict(tg_errors=tg_errors)
@validate(validators=UserID())
@error_handler(error)
@expose(template="fas.templates.openid.id")
def id(self, username):
person = People.by_username(username)
if not cla_done(person):
flash(_('This OpenID will not be active until the user has signed the CLA.'))
person = person.filter_private()
results = dict(endpoint_url = endpoint_url,
yadis_url = build_url(yadis_base_url + '/' + username),
user_url = build_url(id_base_url + '/' + username),
person=person)
return results
@expose(template="fas.templates.openid.yadis", format="xml", content_type="application/xrds+xml")
def yadis(self, username=None):
results = dict(discover = discover,
endpoint_url = endpoint_url,
yadis_url = build_url(yadis_base_url + '/' + username),
user_url = None)
if username:
results['user_url'] = build_url(id_base_url + '/' + username)
return results
@expose()
def server(self, *args, **kw):
try:
openid_request = self.openid.decodeRequest(request.params_backup)
except server.ProtocolError, openid_error:
return self.respond(openid_error)
if openid_request is None:
return dict(tg_template = "fas.templates.openid.about",
endpoint_url = endpoint_url)
elif openid_request.mode in ["checkid_immediate", "checkid_setup"]:
return self.checkidrequest(openid_request)
else:
return self.respond(self.openid.handleRequest(openid_request))
def isauthorized(self, openid_identity, openid_trust_root):
if identity.current.anonymous:
return False
username = identity.current.user.username
person = People.by_username(username)
if not cla_done(person):
return False
if build_url(id_base_url + '/' + identity.current.user_name) != openid_identity:
return False
key = (openid_identity, openid_trust_root)
return session.get(key)
def checkidrequest(self, openid_request):
isauthorized = self.isauthorized(openid_request.identity, openid_request.trust_root)
trust_root = openid_request.trust_root
request_dict = {trust_root: openid_request}
if 'last_request' in session:
session['last_request'].update(request_dict)
else:
session['last_request'] = request_dict
if identity.current.anonymous:
return redirect('login', trust_root=trust_root)
elif isauthorized == False:
return self.respond(openid_request.answer(False))
elif isauthorized == 'always':
return self.respond(openid_request.answer(True))
elif openid_request.immediate or isauthorized == 'never':
return self.respond(openid_request.answer(False))
else:
return self.showdecidepage(openid_request)
def showdecidepage(self, openid_request):
sreg_req = sreg.SRegRequest.fromOpenIDRequest(openid_request)
return dict(tg_template='fas.templates.openid.authorizesite',
identity = openid_request.identity,
trust_root = openid_request.trust_root,
sreg_req = sreg_req,
data_fields = sreg.data_fields)
def respond(self, openid_response):
try:
webresponse = self.openid.encodeResponse(openid_response)
response.status = webresponse.code
response.headers.update(webresponse.headers)
if webresponse.body:
return webresponse.body
return ''
except server.EncodingError, why:
text = why.response.encodeToKVForm()
response.status = 400
response.headers['Content-type'] = 'text/plain; charset=UTF-8'
return text
@expose()
def allow(self, *args, **kw):
trust_root = kw['trust_root']
openid_request = self.request_from_session(trust_root)
if not openid_request:
flash(_('Your last OpenID request could not be retrieved. Try re-authenticating to the OpenID consumer.'))
return redirect('/')
remember_value = ''
if 'yes' in kw:
openid_response = openid_request.answer(True)
remember_value = 'always'
sreg_req = sreg.SRegRequest.fromOpenIDRequest(openid_request)
fields = sreg_req.allRequestedFields()
values = {}
send_values = {}
if 'sreg' in kw and 'send' in kw['sreg']:
values = {
'nickname': identity.current.user.username,
'email': identity.current.user.email,
'fullname': identity.current.user.human_name,
'timezone': identity.current.user.timezone,
'country': identity.current.user.country_code,
}
for field in [f for f in kw['sreg']['send'] if kw['sreg']['send'][f] == 'yes']:
if field in values:
send_values[field] = values[field]
sreg_resp = sreg.SRegResponse.extractResponse(sreg_req, send_values)
openid_response.addExtension(sreg_resp)
elif 'no' in kw:
openid_response = openid_request.answer(False)
remember_value = 'never'
else:
assert False, 'strange allow post %s' % kw
if kw.get('remember', 'no') == 'yes':
session[(openid_request.identity, openid_request.trust_root)] = remember_value
return self.respond(openid_response)
@identity.require(identity.not_anonymous())
@expose()
def login(self, trust_root):
"""Force the user to login, then go back to checkidrequest"""
openid_request = self.request_from_session(trust_root)
if not openid_request:
flash(_('Your last OpenID request could not be retrieved. Try re-authenticating to the OpenID consumer.'))
return redirect('/')
return self.checkidrequest(openid_request)
def request_from_session(self, trust_root):
trust_root = unquote(trust_root)
if 'last_request' in session:
if trust_root in session['last_request']:
return session['last_request'][trust_root]
return None
|
adam111316/SickGear | refs/heads/master | lib/hachoir_parser/archive/rpm.py | 95 | """
RPM archive parser.
Author: Victor Stinner, 1st December 2005.
"""
from hachoir_parser import Parser
from hachoir_core.field import (FieldSet, ParserError,
UInt8, UInt16, UInt32, UInt64, Enum,
NullBytes, Bytes, RawBytes, SubFile,
Character, CString, String)
from hachoir_core.endian import BIG_ENDIAN
from hachoir_parser.archive.gzip_parser import GzipParser
from hachoir_parser.archive.bzip2_parser import Bzip2Parser
class ItemContent(FieldSet):
format_type = {
0: UInt8,
1: Character,
2: UInt8,
3: UInt16,
4: UInt32,
5: UInt64,
6: CString,
7: RawBytes,
8: CString,
9: CString
}
def __init__(self, parent, name, item):
FieldSet.__init__(self, parent, name, item.description)
self.related_item = item
self._name = "content_%s" % item.name
def createFields(self):
item = self.related_item
type = item["type"].value
cls = self.format_type[type]
count = item["count"].value
if cls is RawBytes: # or type == 8:
if cls is RawBytes:
args = (self, "value", count)
else:
args = (self, "value") # cls is CString
count = 1
else:
if 1 < count:
args = (self, "value[]")
else:
args = (self, "value")
for index in xrange(count):
yield cls(*args)
class Item(FieldSet):
type_name = {
0: "NULL",
1: "CHAR",
2: "INT8",
3: "INT16",
4: "INT32",
5: "INT64",
6: "CSTRING",
7: "BIN",
8: "CSTRING_ARRAY",
9: "CSTRING?"
}
tag_name = {
1000: "File size",
1001: "(Broken) MD5 signature",
1002: "PGP 2.6.3 signature",
1003: "(Broken) MD5 signature",
1004: "MD5 signature",
1005: "GnuPG signature",
1006: "PGP5 signature",
1007: "Uncompressed payload size (bytes)",
256+8: "Broken SHA1 header digest",
256+9: "Broken SHA1 header digest",
256+13: "Broken SHA1 header digest",
256+11: "DSA header signature",
256+12: "RSA header signature"
}
def __init__(self, parent, name, description=None, tag_name_dict=None):
FieldSet.__init__(self, parent, name, description)
if tag_name_dict is None:
tag_name_dict = Item.tag_name
self.tag_name_dict = tag_name_dict
def createFields(self):
yield Enum(UInt32(self, "tag", "Tag"), self.tag_name_dict)
yield Enum(UInt32(self, "type", "Type"), Item.type_name)
yield UInt32(self, "offset", "Offset")
yield UInt32(self, "count", "Count")
def createDescription(self):
return "Item: %s (%s)" % (self["tag"].display, self["type"].display)
class ItemHeader(Item):
tag_name = {
61: "Current image",
62: "Signatures",
63: "Immutable",
64: "Regions",
100: "I18N string locales",
1000: "Name",
1001: "Version",
1002: "Release",
1003: "Epoch",
1004: "Summary",
1005: "Description",
1006: "Build time",
1007: "Build host",
1008: "Install time",
1009: "Size",
1010: "Distribution",
1011: "Vendor",
1012: "Gif",
1013: "Xpm",
1014: "Licence",
1015: "Packager",
1016: "Group",
1017: "Changelog",
1018: "Source",
1019: "Patch",
1020: "Url",
1021: "OS",
1022: "Arch",
1023: "Prein",
1024: "Postin",
1025: "Preun",
1026: "Postun",
1027: "Old filenames",
1028: "File sizes",
1029: "File states",
1030: "File modes",
1031: "File uids",
1032: "File gids",
1033: "File rdevs",
1034: "File mtimes",
1035: "File MD5s",
1036: "File link to's",
1037: "File flags",
1038: "Root",
1039: "File username",
1040: "File groupname",
1043: "Icon",
1044: "Source rpm",
1045: "File verify flags",
1046: "Archive size",
1047: "Provide name",
1048: "Require flags",
1049: "Require name",
1050: "Require version",
1051: "No source",
1052: "No patch",
1053: "Conflict flags",
1054: "Conflict name",
1055: "Conflict version",
1056: "Default prefix",
1057: "Build root",
1058: "Install prefix",
1059: "Exclude arch",
1060: "Exclude OS",
1061: "Exclusive arch",
1062: "Exclusive OS",
1064: "RPM version",
1065: "Trigger scripts",
1066: "Trigger name",
1067: "Trigger version",
1068: "Trigger flags",
1069: "Trigger index",
1079: "Verify script",
#TODO: Finish the list (id 1070..1162 using rpm library source code)
}
def __init__(self, parent, name, description=None):
Item.__init__(self, parent, name, description, self.tag_name)
def sortRpmItem(a,b):
return int( a["offset"].value - b["offset"].value )
class PropertySet(FieldSet):
def __init__(self, parent, name, *args):
FieldSet.__init__(self, parent, name, *args)
self._size = self["content_item[1]"].address + self["size"].value * 8
def createFields(self):
# Read chunk header
yield Bytes(self, "signature", 3, r"Property signature (\x8E\xAD\xE8)")
if self["signature"].value != "\x8E\xAD\xE8":
raise ParserError("Invalid property signature")
yield UInt8(self, "version", "Signature version")
yield NullBytes(self, "reserved", 4, "Reserved")
yield UInt32(self, "count", "Count")
yield UInt32(self, "size", "Size")
# Read item header
items = []
for i in range(0, self["count"].value):
item = ItemHeader(self, "item[]")
yield item
items.append(item)
# Sort items by their offset
items.sort( sortRpmItem )
# Read item content
start = self.current_size/8
for item in items:
offset = item["offset"].value
diff = offset - (self.current_size/8 - start)
if 0 < diff:
yield NullBytes(self, "padding[]", diff)
yield ItemContent(self, "content[]", item)
size = start + self["size"].value - self.current_size/8
if 0 < size:
yield NullBytes(self, "padding[]", size)
class RpmFile(Parser):
PARSER_TAGS = {
"id": "rpm",
"category": "archive",
"file_ext": ("rpm",),
"mime": (u"application/x-rpm",),
"min_size": (96 + 16 + 16)*8, # file header + checksum + content header
"magic": (('\xED\xAB\xEE\xDB', 0),),
"description": "RPM package"
}
TYPE_NAME = {
0: "Binary",
1: "Source"
}
endian = BIG_ENDIAN
def validate(self):
if self["signature"].value != '\xED\xAB\xEE\xDB':
return "Invalid signature"
if self["major_ver"].value != 3:
return "Unknown major version (%u)" % self["major_ver"].value
if self["type"].value not in self.TYPE_NAME:
return "Invalid RPM type"
return True
def createFields(self):
yield Bytes(self, "signature", 4, r"RPM file signature (\xED\xAB\xEE\xDB)")
yield UInt8(self, "major_ver", "Major version")
yield UInt8(self, "minor_ver", "Minor version")
yield Enum(UInt16(self, "type", "RPM type"), RpmFile.TYPE_NAME)
yield UInt16(self, "architecture", "Architecture")
yield String(self, "name", 66, "Archive name", strip="\0", charset="ASCII")
yield UInt16(self, "os", "OS")
yield UInt16(self, "signature_type", "Type of signature")
yield NullBytes(self, "reserved", 16, "Reserved")
yield PropertySet(self, "checksum", "Checksum (signature)")
yield PropertySet(self, "header", "Header")
if self._size is None: # TODO: is it possible to handle piped input?
raise NotImplementedError
size = (self._size - self.current_size) // 8
if size:
if 3 <= size and self.stream.readBytes(self.current_size, 3) == "BZh":
yield SubFile(self, "content", size, "bzip2 content", parser=Bzip2Parser)
else:
yield SubFile(self, "content", size, "gzip content", parser=GzipParser)
|
Sweetgrassbuffalo/ReactionSweeGrass-v2 | refs/heads/master | .meteor/local/dev_bundle/python/Lib/test/test_gzip.py | 19 | """Test script for the gzip module.
"""
import unittest
from test import test_support
import os
import io
import struct
gzip = test_support.import_module('gzip')
data1 = """ int length=DEFAULTALLOC, err = Z_OK;
PyObject *RetVal;
int flushmode = Z_FINISH;
unsigned long start_total_out;
"""
data2 = """/* zlibmodule.c -- gzip-compatible data compression */
/* See http://www.gzip.org/zlib/
/* See http://www.winimage.com/zLibDll for Windows */
"""
class TestGzip(unittest.TestCase):
filename = test_support.TESTFN
def setUp(self):
test_support.unlink(self.filename)
def tearDown(self):
test_support.unlink(self.filename)
def write_and_read_back(self, data, mode='b'):
b_data = memoryview(data).tobytes()
with gzip.GzipFile(self.filename, 'w'+mode) as f:
l = f.write(data)
self.assertEqual(l, len(b_data))
with gzip.GzipFile(self.filename, 'r'+mode) as f:
self.assertEqual(f.read(), b_data)
@test_support.requires_unicode
def test_unicode_filename(self):
unicode_filename = test_support.TESTFN_UNICODE
try:
unicode_filename.encode(test_support.TESTFN_ENCODING)
except (UnicodeError, TypeError):
self.skipTest("Requires unicode filenames support")
self.filename = unicode_filename
with gzip.GzipFile(unicode_filename, "wb") as f:
f.write(data1 * 50)
with gzip.GzipFile(unicode_filename, "rb") as f:
self.assertEqual(f.read(), data1 * 50)
# Sanity check that we are actually operating on the right file.
with open(unicode_filename, 'rb') as fobj, \
gzip.GzipFile(fileobj=fobj, mode="rb") as f:
self.assertEqual(f.read(), data1 * 50)
def test_write(self):
with gzip.GzipFile(self.filename, 'wb') as f:
f.write(data1 * 50)
# Try flush and fileno.
f.flush()
f.fileno()
if hasattr(os, 'fsync'):
os.fsync(f.fileno())
f.close()
# Test multiple close() calls.
f.close()
# The following test_write_xy methods test that write accepts
# the corresponding bytes-like object type as input
# and that the data written equals bytes(xy) in all cases.
def test_write_memoryview(self):
self.write_and_read_back(memoryview(data1 * 50))
def test_write_incompatible_type(self):
# Test that non-bytes-like types raise TypeError.
# Issue #21560: attempts to write incompatible types
# should not affect the state of the fileobject
with gzip.GzipFile(self.filename, 'wb') as f:
with self.assertRaises(UnicodeEncodeError):
f.write(u'\xff')
with self.assertRaises(TypeError):
f.write([1])
f.write(data1)
with gzip.GzipFile(self.filename, 'rb') as f:
self.assertEqual(f.read(), data1)
def test_read(self):
self.test_write()
# Try reading.
with gzip.GzipFile(self.filename, 'r') as f:
d = f.read()
self.assertEqual(d, data1*50)
def test_read_universal_newlines(self):
# Issue #5148: Reading breaks when mode contains 'U'.
self.test_write()
with gzip.GzipFile(self.filename, 'rU') as f:
d = f.read()
self.assertEqual(d, data1*50)
def test_io_on_closed_object(self):
# Test that I/O operations on closed GzipFile objects raise a
# ValueError, just like the corresponding functions on file objects.
# Write to a file, open it for reading, then close it.
self.test_write()
f = gzip.GzipFile(self.filename, 'r')
f.close()
with self.assertRaises(ValueError):
f.read(1)
with self.assertRaises(ValueError):
f.seek(0)
with self.assertRaises(ValueError):
f.tell()
# Open the file for writing, then close it.
f = gzip.GzipFile(self.filename, 'w')
f.close()
with self.assertRaises(ValueError):
f.write('')
with self.assertRaises(ValueError):
f.flush()
def test_append(self):
self.test_write()
# Append to the previous file
with gzip.GzipFile(self.filename, 'ab') as f:
f.write(data2 * 15)
with gzip.GzipFile(self.filename, 'rb') as f:
d = f.read()
self.assertEqual(d, (data1*50) + (data2*15))
def test_many_append(self):
# Bug #1074261 was triggered when reading a file that contained
# many, many members. Create such a file and verify that reading it
# works.
with gzip.open(self.filename, 'wb', 9) as f:
f.write('a')
for i in range(0, 200):
with gzip.open(self.filename, "ab", 9) as f: # append
f.write('a')
# Try reading the file
with gzip.open(self.filename, "rb") as zgfile:
contents = ""
while 1:
ztxt = zgfile.read(8192)
contents += ztxt
if not ztxt: break
self.assertEqual(contents, 'a'*201)
def test_buffered_reader(self):
# Issue #7471: a GzipFile can be wrapped in a BufferedReader for
# performance.
self.test_write()
with gzip.GzipFile(self.filename, 'rb') as f:
with io.BufferedReader(f) as r:
lines = [line for line in r]
self.assertEqual(lines, 50 * data1.splitlines(True))
def test_readline(self):
self.test_write()
# Try .readline() with varying line lengths
with gzip.GzipFile(self.filename, 'rb') as f:
line_length = 0
while 1:
L = f.readline(line_length)
if not L and line_length != 0: break
self.assertTrue(len(L) <= line_length)
line_length = (line_length + 1) % 50
def test_readlines(self):
self.test_write()
# Try .readlines()
with gzip.GzipFile(self.filename, 'rb') as f:
L = f.readlines()
with gzip.GzipFile(self.filename, 'rb') as f:
while 1:
L = f.readlines(150)
if L == []: break
def test_seek_read(self):
self.test_write()
# Try seek, read test
with gzip.GzipFile(self.filename) as f:
while 1:
oldpos = f.tell()
line1 = f.readline()
if not line1: break
newpos = f.tell()
f.seek(oldpos) # negative seek
if len(line1)>10:
amount = 10
else:
amount = len(line1)
line2 = f.read(amount)
self.assertEqual(line1[:amount], line2)
f.seek(newpos) # positive seek
def test_seek_whence(self):
self.test_write()
# Try seek(whence=1), read test
with gzip.GzipFile(self.filename) as f:
f.read(10)
f.seek(10, whence=1)
y = f.read(10)
self.assertEqual(y, data1[20:30])
def test_seek_write(self):
# Try seek, write test
with gzip.GzipFile(self.filename, 'w') as f:
for pos in range(0, 256, 16):
f.seek(pos)
f.write('GZ\n')
def test_mode(self):
self.test_write()
with gzip.GzipFile(self.filename, 'r') as f:
self.assertEqual(f.myfileobj.mode, 'rb')
def test_1647484(self):
for mode in ('wb', 'rb'):
with gzip.GzipFile(self.filename, mode) as f:
self.assertTrue(hasattr(f, "name"))
self.assertEqual(f.name, self.filename)
def test_mtime(self):
mtime = 123456789
with gzip.GzipFile(self.filename, 'w', mtime = mtime) as fWrite:
fWrite.write(data1)
with gzip.GzipFile(self.filename) as fRead:
dataRead = fRead.read()
self.assertEqual(dataRead, data1)
self.assertTrue(hasattr(fRead, 'mtime'))
self.assertEqual(fRead.mtime, mtime)
def test_metadata(self):
mtime = 123456789
with gzip.GzipFile(self.filename, 'w', mtime = mtime) as fWrite:
fWrite.write(data1)
with open(self.filename, 'rb') as fRead:
# see RFC 1952: http://www.faqs.org/rfcs/rfc1952.html
idBytes = fRead.read(2)
self.assertEqual(idBytes, '\x1f\x8b') # gzip ID
cmByte = fRead.read(1)
self.assertEqual(cmByte, '\x08') # deflate
flagsByte = fRead.read(1)
self.assertEqual(flagsByte, '\x08') # only the FNAME flag is set
mtimeBytes = fRead.read(4)
self.assertEqual(mtimeBytes, struct.pack('<i', mtime)) # little-endian
xflByte = fRead.read(1)
self.assertEqual(xflByte, '\x02') # maximum compression
osByte = fRead.read(1)
self.assertEqual(osByte, '\xff') # OS "unknown" (OS-independent)
# Since the FNAME flag is set, the zero-terminated filename follows.
# RFC 1952 specifies that this is the name of the input file, if any.
# However, the gzip module defaults to storing the name of the output
# file in this field.
expected = self.filename.encode('Latin-1') + '\x00'
nameBytes = fRead.read(len(expected))
self.assertEqual(nameBytes, expected)
# Since no other flags were set, the header ends here.
# Rather than process the compressed data, let's seek to the trailer.
fRead.seek(os.stat(self.filename).st_size - 8)
crc32Bytes = fRead.read(4) # CRC32 of uncompressed data [data1]
self.assertEqual(crc32Bytes, '\xaf\xd7d\x83')
isizeBytes = fRead.read(4)
self.assertEqual(isizeBytes, struct.pack('<i', len(data1)))
def test_with_open(self):
# GzipFile supports the context management protocol
with gzip.GzipFile(self.filename, "wb") as f:
f.write(b"xxx")
f = gzip.GzipFile(self.filename, "rb")
f.close()
try:
with f:
pass
except ValueError:
pass
else:
self.fail("__enter__ on a closed file didn't raise an exception")
try:
with gzip.GzipFile(self.filename, "wb") as f:
1 // 0
except ZeroDivisionError:
pass
else:
self.fail("1 // 0 didn't raise an exception")
def test_zero_padded_file(self):
with gzip.GzipFile(self.filename, "wb") as f:
f.write(data1 * 50)
# Pad the file with zeroes
with open(self.filename, "ab") as f:
f.write("\x00" * 50)
with gzip.GzipFile(self.filename, "rb") as f:
d = f.read()
self.assertEqual(d, data1 * 50, "Incorrect data in file")
def test_fileobj_from_fdopen(self):
# Issue #13781: Creating a GzipFile using a fileobj from os.fdopen()
# should not embed the fake filename "<fdopen>" in the output file.
fd = os.open(self.filename, os.O_WRONLY | os.O_CREAT)
with os.fdopen(fd, "wb") as f:
with gzip.GzipFile(fileobj=f, mode="w") as g:
self.assertEqual(g.name, "")
def test_read_with_extra(self):
# Gzip data with an extra field
gzdata = (b'\x1f\x8b\x08\x04\xb2\x17cQ\x02\xff'
b'\x05\x00Extra'
b'\x0bI-.\x01\x002\xd1Mx\x04\x00\x00\x00')
with gzip.GzipFile(fileobj=io.BytesIO(gzdata)) as f:
self.assertEqual(f.read(), b'Test')
def test_main(verbose=None):
test_support.run_unittest(TestGzip)
if __name__ == "__main__":
test_main(verbose=True)
|
emanuelschuetze/OpenSlides | refs/heads/master | tests/integration/motions/test_views.py | 2 | from django.test.client import Client
from openslides.core.config import config
from openslides.motions.models import Motion
from openslides.utils.test import TestCase
class AnonymousRequests(TestCase):
"""
Tests requests from the anonymous user.
"""
def setUp(self):
self.client = Client()
config["general_system_enable_anonymous"] = True
def test_motion_detail(self):
Motion.objects.create(title="test_motion")
response = self.client.get("/motions/1/")
self.assertEqual(response.status_code, 200)
|
LoicTouzard/MapIf | refs/heads/master | core/modules/emails.py | 2 | #!/usr/bin/env python3
# -!- encoding:utf8 -!-
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# file: emails.py
# date: 2017-09-22
# authors: david.wobrock, paul.dautry, ...
# purpose:
# Mailing module based on Elasticemail API
# license:
# MapIF - Where are INSA de Lyon IF students right now ?
# Copyright (C) 2017 Loic Touzard
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#===============================================================================
# IMPORTS
#===============================================================================
import re
import html
from flask import escape
from flask import render_template
from core.modules import ini
from core.modules import logger
from core.modules import request
#===============================================================================
# GLOBALS
#===============================================================================
# See http://api.elasticemail.com/public/help#Email_Send for documentation
_APIURL_ = 'https://api.elasticemail.com/v2/email/send'
_APIKEY_ = None
_EMAIL_TEMPLATE_DIR_ = 'src/templates/emails/'
_EMAIL_ENCODING_ = 'utf-8'
_SENDER_EMAIL_ = 'noreply@mapif.com'
_SENDER_NAME_ = 'MapIf'
modlgr = logger.get('mapif.email')
#===============================================================================
# FUNCTIONS
#===============================================================================
#-------------------------------------------------------------------------------
# init_emails
#-------------------------------------------------------------------------------
def init():
global _APIKEY_
try:
_APIKEY_ = ini.config('EMAILS', 'elasticmail_apikey', default=None)
if _APIKEY_ is None or _APIKEY_ == '':
modlgr.warning('No Email APIKEY found')
else:
modlgr.debug('Email APIKEY: {0}'.format(_APIKEY_))
except Exception as ex:
modlgr.exception("Could not load ElasticEmail API KEY")
return False
return True
#-------------------------------------------------------------------------------
# send_email
#-------------------------------------------------------------------------------
def send_email(to, subject, template, template_params):
global _APIKEY_
global _APIURL_
# prepare parameters
template_params = {k: html.escape(v) for k, v in template_params.items()}
# try to send mail
email_html = render_template(template, **template_params)
# print(len(email_html))
email_html = re.sub(r">\s+<", r"><", email_html)
# print(len(email_html))
params = {
'apikey': _APIKEY_,
'bodyHtml': email_html,
'to': to,
'charset': _EMAIL_ENCODING_,
'from': _SENDER_EMAIL_,
'fromName': _SENDER_NAME_,
'subject': '[MapIf] - '+subject
}
# print(params)
response = request.get(_APIURL_, params=params)
# print(response.url)
# print(response)
resp_json = response.json()
# print(resp_json)
if resp_json['success']:
modlgr.debug('Sent email successfully to {0}'.format(to))
else:
modlgr.error(resp_json['error'])
#-------------------------------------------------------------------------------
# send_password_reset_mail
#-------------------------------------------------------------------------------
def send_password_reset_mail(email, firstname, token):
params = {
'firstname': firstname,
'token': token,
'email': email
}
send_email(email, 'Mot de passe oublié', 'emails/password_reset_simple.html', params)
#===============================================================================
# TESTS
#===============================================================================
def test():
raise NotImplementedError
|
bubenkoff/Arkestra | refs/heads/develop | example/example/arkestra_settings.py | 2 | # Before we do anything else, get some default settings built into Arkestra.
# They are not just Arkestra settings, but settings for other applications
# that Arkestra requires to be just so.
# These are the only settings you really need.
# If you need to modify other aspects of Arkestra's behaviour, see the
# settings that are available in arkestra_utilities.settings; copy them
# here and modify them here (not there)
# ------------------------ Arkestra settings
# must match the id of your base or default entity
ARKESTRA_BASE_ENTITY = 1
# MULTIPLE_ENTITY_MODE is for projects hosting the site of more than one entity
# This does not necessarily entail a site for complex organisation,
# or for a number of different organisations - being able to redirect
# news and events items to particular entities for example requires
# MULTIPLE_ENTITY_MODE to be True
MULTIPLE_ENTITY_MODE = True
# how will video be encoded? by a thread? well, that's OK just for proof of
# concept
# but not really viable for anything else. We can use celery instead - but you
# have
# to set it up - see the Django Celery section in settings
USE_CELERY_FOR_VIDEO_ENCODING = False
# ------------------------ Semantic editor
import os
from settings import STATIC_URL
SEMANTICEDITOR_MEDIA_URL = os.path.join(STATIC_URL, "semanticeditor/")
# ------------------------ Link system
# what filetypes can the user provide links to?
PERMITTED_FILETYPES = {
"pdf": "Portable Document Format",
"txt": "Plain text",
"doc": "MS Word (avoid using if possible)",
"rtf": "Rich Text Format",
"csv": "Comma-separated values",
}
# -------- Headings ----------------------
# global value for the heading level for page titles (e.g. entity names in
# entity pages)
PAGE_TITLE_HEADING_LEVEL = 1
# The default (typically, the next down from the PAGE_TITLE_HEADING_LEVEL)
IN_BODY_HEADING_LEVEL = 2
PLUGIN_HEADING_LEVEL_DEFAULT = 2
|
ychfan/tensorflow | refs/heads/master | tensorflow/python/util/protobuf/compare.py | 113 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for comparing proto2 messages in Python.
ProtoEq() compares two proto2 messages for equality.
ClearDefaultValuedFields() recursively clears the fields that are set to their
default values. This is useful for comparing protocol buffers where the
semantics of unset fields and default valued fields are the same.
assertProtoEqual() is useful for unit tests. It produces much more helpful
output than assertEqual() for proto2 messages, e.g. this:
outer {
inner {
- strings: "x"
? ^
+ strings: "y"
? ^
}
}
...compared to the default output from assertEqual() that looks like this:
AssertionError: <my.Msg object at 0x9fb353c> != <my.Msg object at 0x9fb35cc>
Call it inside your unit test's googletest.TestCase subclasses like this:
from tensorflow.python.util.protobuf import compare
class MyTest(googletest.TestCase):
...
def testXXX(self):
...
compare.assertProtoEqual(self, a, b)
Alternatively:
from tensorflow.python.util.protobuf import compare
class MyTest(compare.ProtoAssertions, googletest.TestCase):
...
def testXXX(self):
...
self.assertProtoEqual(a, b)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import six
from google.protobuf import descriptor
from google.protobuf import descriptor_pool
from google.protobuf import message
from google.protobuf import text_format
def assertProtoEqual(self, a, b, check_initialized=True, # pylint: disable=invalid-name
normalize_numbers=False, msg=None):
"""Fails with a useful error if a and b aren't equal.
Comparison of repeated fields matches the semantics of
unittest.TestCase.assertEqual(), ie order and extra duplicates fields matter.
Args:
self: googletest.TestCase
a: proto2 PB instance, or text string representing one.
b: proto2 PB instance -- message.Message or subclass thereof.
check_initialized: boolean, whether to fail if either a or b isn't
initialized.
normalize_numbers: boolean, whether to normalize types and precision of
numbers before comparison.
msg: if specified, is used as the error message on failure.
"""
pool = descriptor_pool.Default()
if isinstance(a, six.string_types):
a = text_format.Merge(a, b.__class__(), descriptor_pool=pool)
for pb in a, b:
if check_initialized:
errors = pb.FindInitializationErrors()
if errors:
self.fail('Initialization errors: %s\n%s' % (errors, pb))
if normalize_numbers:
NormalizeNumberFields(pb)
self.assertMultiLineEqual(
text_format.MessageToString(a, descriptor_pool=pool),
text_format.MessageToString(b, descriptor_pool=pool),
msg=msg)
def NormalizeNumberFields(pb):
"""Normalizes types and precisions of number fields in a protocol buffer.
Due to subtleties in the python protocol buffer implementation, it is possible
for values to have different types and precision depending on whether they
were set and retrieved directly or deserialized from a protobuf. This function
normalizes integer values to ints and longs based on width, 32-bit floats to
five digits of precision to account for python always storing them as 64-bit,
and ensures doubles are floating point for when they're set to integers.
Modifies pb in place. Recurses into nested objects.
Args:
pb: proto2 message.
Returns:
the given pb, modified in place.
"""
for desc, values in pb.ListFields():
is_repeated = True
if desc.label is not descriptor.FieldDescriptor.LABEL_REPEATED:
is_repeated = False
values = [values]
normalized_values = None
# We force 32-bit values to int and 64-bit values to long to make
# alternate implementations where the distinction is more significant
# (e.g. the C++ implementation) simpler.
if desc.type in (descriptor.FieldDescriptor.TYPE_INT64,
descriptor.FieldDescriptor.TYPE_UINT64,
descriptor.FieldDescriptor.TYPE_SINT64):
normalized_values = [int(x) for x in values]
elif desc.type in (descriptor.FieldDescriptor.TYPE_INT32,
descriptor.FieldDescriptor.TYPE_UINT32,
descriptor.FieldDescriptor.TYPE_SINT32,
descriptor.FieldDescriptor.TYPE_ENUM):
normalized_values = [int(x) for x in values]
elif desc.type == descriptor.FieldDescriptor.TYPE_FLOAT:
normalized_values = [round(x, 6) for x in values]
elif desc.type == descriptor.FieldDescriptor.TYPE_DOUBLE:
normalized_values = [round(float(x), 7) for x in values]
if normalized_values is not None:
if is_repeated:
pb.ClearField(desc.name)
getattr(pb, desc.name).extend(normalized_values)
else:
setattr(pb, desc.name, normalized_values[0])
if (desc.type == descriptor.FieldDescriptor.TYPE_MESSAGE or
desc.type == descriptor.FieldDescriptor.TYPE_GROUP):
if (desc.type == descriptor.FieldDescriptor.TYPE_MESSAGE and
desc.message_type.has_options and
desc.message_type.GetOptions().map_entry):
# This is a map, only recurse if the values have a message type.
if (desc.message_type.fields_by_number[2].type ==
descriptor.FieldDescriptor.TYPE_MESSAGE):
for v in six.itervalues(values):
NormalizeNumberFields(v)
else:
for v in values:
# recursive step
NormalizeNumberFields(v)
return pb
def _IsMap(value):
return isinstance(value, collections.Mapping)
def _IsRepeatedContainer(value):
if isinstance(value, six.string_types):
return False
try:
iter(value)
return True
except TypeError:
return False
def ProtoEq(a, b):
"""Compares two proto2 objects for equality.
Recurses into nested messages. Uses list (not set) semantics for comparing
repeated fields, ie duplicates and order matter.
Args:
a: A proto2 message or a primitive.
b: A proto2 message or a primitive.
Returns:
`True` if the messages are equal.
"""
def Format(pb):
"""Returns a dictionary or unchanged pb bases on its type.
Specifically, this function returns a dictionary that maps tag
number (for messages) or element index (for repeated fields) to
value, or just pb unchanged if it's neither.
Args:
pb: A proto2 message or a primitive.
Returns:
A dict or unchanged pb.
"""
if isinstance(pb, message.Message):
return dict((desc.number, value) for desc, value in pb.ListFields())
elif _IsMap(pb):
return dict(pb.items())
elif _IsRepeatedContainer(pb):
return dict(enumerate(list(pb)))
else:
return pb
a, b = Format(a), Format(b)
# Base case
if not isinstance(a, dict) or not isinstance(b, dict):
return a == b
# This list performs double duty: it compares two messages by tag value *or*
# two repeated fields by element, in order. the magic is in the format()
# function, which converts them both to the same easily comparable format.
for tag in sorted(set(a.keys()) | set(b.keys())):
if tag not in a or tag not in b:
return False
else:
# Recursive step
if not ProtoEq(a[tag], b[tag]):
return False
# Didn't find any values that differed, so they're equal!
return True
class ProtoAssertions(object):
"""Mix this into a googletest.TestCase class to get proto2 assertions.
Usage:
class SomeTestCase(compare.ProtoAssertions, googletest.TestCase):
...
def testSomething(self):
...
self.assertProtoEqual(a, b)
See module-level definitions for method documentation.
"""
# pylint: disable=invalid-name
def assertProtoEqual(self, *args, **kwargs):
return assertProtoEqual(self, *args, **kwargs)
|
avanov/django | refs/heads/master | tests/template_backends/apps/good/templatetags/subpackage/tags.py | 1426 | from django.template import Library
register = Library()
|
Acidburn0zzz/servo | refs/heads/master | tests/wpt/webgl/tests/deqp/functional/gles3/textureshadow/textureshadow_test_generator.py | 51 | #!/usr/bin/env python
# Copyright (c) 2016 The Khronos Group Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and/or associated documentation files (the
# "Materials"), to deal in the Materials without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Materials, and to
# permit persons to whom the Materials are furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Materials.
#
# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
"""
Generator for textureformat* tests.
This file needs to be run in its folder.
"""
import sys
_DO_NOT_EDIT_WARNING = """<!--
This file is auto-generated from textureshadow_test_generator.py
DO NOT EDIT!
-->
"""
_HTML_TEMPLATE = """<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>WebGL Texture Shadow Conformance Tests</title>
<link rel="stylesheet" href="../../../../resources/js-test-style.css"/>
<script src="../../../../js/js-test-pre.js"></script>
<script src="../../../../js/webgl-test-utils.js"></script>
<script src="../../../../closure-library/closure/goog/base.js"></script>
<script src="../../../deqp-deps.js"></script>
<script>goog.require('functional.gles3.es3fTextureShadowTests');</script>
</head>
<body>
<div id="description"></div>
<div id="console"></div>
<canvas id="canvas" width="200" height="100"> </canvas>
<script>
var wtu = WebGLTestUtils;
var gl = wtu.create3DContext('canvas', null, 2);
functional.gles3.es3fTextureShadowTests.run(gl, [%(start)s, %(end)s]);
</script>
</body>
</html>
"""
_TARGETS = [
'2d',
'cube',
'2d_array',
]
_FILTERS = [
'nearest',
'linear',
'nearest_mipmap_nearest',
'linear_mipmap_nearest',
'nearest_mipmap_linear',
'linear_mipmap_linear',
]
_COMPARE_FUNCS = [
'less_or_equal',
'greater_or_equal',
'less',
'greater',
'equal',
'not_equal',
'always',
'never',
]
def GenerateFilename(group):
"""Generate test filename."""
filename = group
filename += ".html"
return filename
def WriteTest(filename, start, end):
"""Write one test."""
file = open(filename, "wb")
file.write(_DO_NOT_EDIT_WARNING)
file.write(_HTML_TEMPLATE % {
'start': start,
'end': end
})
file.close
def GenerateTests():
"""Generate all tests."""
filelist = []
ii = 0
for iTarget in range(len(_TARGETS)):
for iFilter in range(len(_FILTERS)):
for iFunc in range(len(_COMPARE_FUNCS)):
item = _TARGETS[iTarget] + '_' + _FILTERS[iFilter] + '_' + _COMPARE_FUNCS[iFunc]
filename = GenerateFilename(item)
filelist.append(filename)
WriteTest(filename, ii, ii + 1)
ii = ii + 1
return filelist
def GenerateTestList(filelist):
file = open("00_test_list.txt", "wb")
file.write('\n'.join(filelist))
file.close
def main(argv):
"""This is the main function."""
filelist = GenerateTests()
GenerateTestList(filelist)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
catherinezucker/dustcurve | refs/heads/master | dustcurve/__init__.py | 1 | import numpy as np
def fillerfunc():
return 'This is a filler function'
|
bperreault-va/eloworld | refs/heads/master | src/lib/werkzeug/__init__.py | 26 | # -*- coding: utf-8 -*-
"""
werkzeug
~~~~~~~~
Werkzeug is the Swiss Army knife of Python web development.
It provides useful classes and functions for any WSGI application to make
the life of a python web developer much easier. All of the provided
classes are independent from each other so you can mix it with any other
library.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from types import ModuleType
import sys
from werkzeug._compat import iteritems
__version__ = '0.12.1'
# This import magic raises concerns quite often which is why the implementation
# and motivation is explained here in detail now.
#
# The majority of the functions and classes provided by Werkzeug work on the
# HTTP and WSGI layer. There is no useful grouping for those which is why
# they are all importable from "werkzeug" instead of the modules where they are
# implemented. The downside of that is, that now everything would be loaded at
# once, even if unused.
#
# The implementation of a lazy-loading module in this file replaces the
# werkzeug package when imported from within. Attribute access to the werkzeug
# module will then lazily import from the modules that implement the objects.
# import mapping to objects in other modules
all_by_module = {
'werkzeug.debug': ['DebuggedApplication'],
'werkzeug.local': ['Local', 'LocalManager', 'LocalProxy', 'LocalStack',
'release_local'],
'werkzeug.serving': ['run_simple'],
'werkzeug.test': ['Client', 'EnvironBuilder', 'create_environ',
'run_wsgi_app'],
'werkzeug.testapp': ['test_app'],
'werkzeug.exceptions': ['abort', 'Aborter'],
'werkzeug.urls': ['url_decode', 'url_encode', 'url_quote',
'url_quote_plus', 'url_unquote', 'url_unquote_plus',
'url_fix', 'Href', 'iri_to_uri', 'uri_to_iri'],
'werkzeug.formparser': ['parse_form_data'],
'werkzeug.utils': ['escape', 'environ_property', 'append_slash_redirect',
'redirect', 'cached_property', 'import_string',
'dump_cookie', 'parse_cookie', 'unescape',
'format_string', 'find_modules', 'header_property',
'html', 'xhtml', 'HTMLBuilder', 'validate_arguments',
'ArgumentValidationError', 'bind_arguments',
'secure_filename'],
'werkzeug.wsgi': ['get_current_url', 'get_host', 'pop_path_info',
'peek_path_info', 'SharedDataMiddleware',
'DispatcherMiddleware', 'ClosingIterator', 'FileWrapper',
'make_line_iter', 'LimitedStream', 'responder',
'wrap_file', 'extract_path_info'],
'werkzeug.datastructures': ['MultiDict', 'CombinedMultiDict', 'Headers',
'EnvironHeaders', 'ImmutableList',
'ImmutableDict', 'ImmutableMultiDict',
'TypeConversionDict',
'ImmutableTypeConversionDict', 'Accept',
'MIMEAccept', 'CharsetAccept',
'LanguageAccept', 'RequestCacheControl',
'ResponseCacheControl', 'ETags', 'HeaderSet',
'WWWAuthenticate', 'Authorization',
'FileMultiDict', 'CallbackDict', 'FileStorage',
'OrderedMultiDict', 'ImmutableOrderedMultiDict'
],
'werkzeug.useragents': ['UserAgent'],
'werkzeug.http': ['parse_etags', 'parse_date', 'http_date', 'cookie_date',
'parse_cache_control_header', 'is_resource_modified',
'parse_accept_header', 'parse_set_header', 'quote_etag',
'unquote_etag', 'generate_etag', 'dump_header',
'parse_list_header', 'parse_dict_header',
'parse_authorization_header',
'parse_www_authenticate_header', 'remove_entity_headers',
'is_entity_header', 'remove_hop_by_hop_headers',
'parse_options_header', 'dump_options_header',
'is_hop_by_hop_header', 'unquote_header_value',
'quote_header_value', 'HTTP_STATUS_CODES'],
'werkzeug.wrappers': ['BaseResponse', 'BaseRequest', 'Request', 'Response',
'AcceptMixin', 'ETagRequestMixin',
'ETagResponseMixin', 'ResponseStreamMixin',
'CommonResponseDescriptorsMixin', 'UserAgentMixin',
'AuthorizationMixin', 'WWWAuthenticateMixin',
'CommonRequestDescriptorsMixin'],
'werkzeug.security': ['generate_password_hash', 'check_password_hash'],
# the undocumented easteregg ;-)
'werkzeug._internal': ['_easteregg']
}
# modules that should be imported when accessed as attributes of werkzeug
attribute_modules = frozenset(['exceptions', 'routing', 'script'])
object_origins = {}
for module, items in iteritems(all_by_module):
for item in items:
object_origins[item] = module
class module(ModuleType):
"""Automatically import objects from the modules."""
def __getattr__(self, name):
if name in object_origins:
module = __import__(object_origins[name], None, None, [name])
for extra_name in all_by_module[module.__name__]:
setattr(self, extra_name, getattr(module, extra_name))
return getattr(module, name)
elif name in attribute_modules:
__import__('werkzeug.' + name)
return ModuleType.__getattribute__(self, name)
def __dir__(self):
"""Just show what we want to show."""
result = list(new_module.__all__)
result.extend(('__file__', '__path__', '__doc__', '__all__',
'__docformat__', '__name__', '__path__',
'__package__', '__version__'))
return result
# keep a reference to this module so that it's not garbage collected
old_module = sys.modules['werkzeug']
# setup the new module and patch it into the dict of loaded modules
new_module = sys.modules['werkzeug'] = module('werkzeug')
new_module.__dict__.update({
'__file__': __file__,
'__package__': 'werkzeug',
'__path__': __path__,
'__doc__': __doc__,
'__version__': __version__,
'__all__': tuple(object_origins) + tuple(attribute_modules),
'__docformat__': 'restructuredtext en'
})
# Due to bootstrapping issues we need to import exceptions here.
# Don't ask :-(
__import__('werkzeug.exceptions')
|
Laimiux/mydeatree | refs/heads/master | django/dispatch/__init__.py | 571 | """Multi-consumer multi-producer dispatching mechanism
Originally based on pydispatch (BSD) http://pypi.python.org/pypi/PyDispatcher/2.0.1
See license.txt for original license.
Heavily modified for Django's purposes.
"""
from django.dispatch.dispatcher import Signal, receiver |
msabramo/ansible | refs/heads/devel | lib/ansible/modules/network/nxos/nxos_snmp_community.py | 19 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: nxos_snmp_community
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages SNMP community configs.
description:
- Manages SNMP community configuration.
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
options:
community:
description:
- Case-sensitive community string.
required: true
access:
description:
- Access type for community.
required: false
default: null
choices: ['ro','rw']
group:
description:
- Group to which the community belongs.
required: false
default: null
acl:
description:
- ACL name to filter snmp requests.
required: false
default: 1
state:
description:
- Manage the state of the resource.
required: true
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
# ensure snmp community is configured
- nxos_snmp_community:
community: TESTING7
group: network-operator
state: present
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"group": "network-operator"}
existing:
description: k/v pairs of existing snmp community
type: dict
sample: {}
end_state:
description: k/v pairs of snmp community after module execution
returned: always
type: dict or null
sample: {"acl": "None", "group": "network-operator"}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["snmp-server community TESTING7 group network-operator"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
from ansible.module_utils.nxos import get_config, load_config, run_commands
from ansible.module_utils.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netcfg import CustomNetworkConfig
import re
import re
def execute_show_command(command, module, command_type='cli_show'):
if module.params['transport'] == 'cli':
if 'show run' not in command:
command += ' | json'
cmds = [command]
body = run_commands(module, cmds)
elif module.params['transport'] == 'nxapi':
cmds = [command]
body = run_commands(module, cmds)
return body
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = str(value)
else:
new_dict[new_key] = value
return new_dict
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_snmp_groups(module):
command = 'show snmp group'
data = execute_show_command(command, module)[0]
group_list = []
try:
group_table = data['TABLE_role']['ROW_role']
for group in group_table:
group_list.append(group['role_name'])
except (KeyError, AttributeError):
return group_list
return group_list
def get_snmp_community(module, find_filter=None):
command = 'show snmp community'
data = execute_show_command(command, module)[0]
community_dict = {}
community_map = {
'grouporaccess': 'group',
'aclfilter': 'acl'
}
try:
community_table = data['TABLE_snmp_community']['ROW_snmp_community']
for each in community_table:
community = apply_key_map(community_map, each)
key = each['community_name']
community_dict[key] = community
except (KeyError, AttributeError):
return community_dict
if find_filter:
find = community_dict.get(find_filter, None)
if find_filter is None or find is None:
return {}
else:
fix_find = {}
for (key, value) in find.items():
if isinstance(value, str):
fix_find[key] = value.strip()
else:
fix_find[key] = value
return fix_find
def config_snmp_community(delta, community):
CMDS = {
'group': 'snmp-server community {0} group {group}',
'acl': 'snmp-server community {0} use-acl {acl}'
}
commands = []
for k, v in delta.items():
cmd = CMDS.get(k).format(community, **delta)
if cmd:
commands.append(cmd)
cmd = None
return commands
def main():
argument_spec = dict(
community=dict(required=True, type='str'),
access=dict(choices=['ro', 'rw']),
group=dict(type='str'),
acl=dict(type='str'),
state=dict(choices=['absent', 'present'], default='present'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=[['access', 'group']],
mutually_exclusive=[['access', 'group']],
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
access = module.params['access']
group = module.params['group']
community = module.params['community']
acl = module.params['acl']
state = module.params['state']
if access:
if access == 'ro':
group = 'network-operator'
elif access == 'rw':
group = 'network-admin'
# group check - ensure group being configured exists on the device
configured_groups = get_snmp_groups(module)
if group not in configured_groups:
module.fail_json(msg="group not on switch."
"please add before moving forward")
existing = get_snmp_community(module, community)
args = dict(group=group, acl=acl)
proposed = dict((k, v) for k, v in args.items() if v is not None)
delta = dict(set(proposed.items()).difference(existing.items()))
changed = False
end_state = existing
commands = []
if state == 'absent':
if existing:
command = "no snmp-server community {0}".format(community)
commands.append(command)
cmds = flatten_list(commands)
elif state == 'present':
if delta:
command = config_snmp_community(dict(delta), community)
commands.append(command)
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
changed = True
load_config(module, cmds)
end_state = get_snmp_community(module, community)
if 'configure' in cmds:
cmds.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['end_state'] = end_state
results['updates'] = cmds
results['changed'] = changed
results['warnings'] = warnings
module.exit_json(**results)
if __name__ == '__main__':
main()
|
lukeiwanski/tensorflow-opencl | refs/heads/master | tensorflow/python/platform/sysconfig.py | 74 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""System configuration library.
@@get_include
@@get_lib
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path as _os_path
from tensorflow.python.util.all_util import remove_undocumented
# pylint: disable=g-import-not-at-top
def get_include():
"""Get the directory containing the TensorFlow C++ header files.
Returns:
The directory as string.
"""
# Import inside the function.
# sysconfig is imported from the tensorflow module, so having this
# import at the top would cause a circular import, resulting in
# the tensorflow module missing symbols that come after sysconfig.
import tensorflow as tf
return _os_path.join(_os_path.dirname(tf.__file__), 'include')
def get_lib():
"""Get the directory containing the TensorFlow framework library.
Returns:
The directory as string.
"""
import tensorflow as tf
return _os_path.join(_os_path.dirname(tf.__file__), 'core')
_allowed_symbols = []
remove_undocumented(__name__, _allowed_symbols)
|
txm/make-good | refs/heads/master | django/contrib/localflavor/pl/forms.py | 273 | """
Polish-specific form helpers
"""
import re
from django.forms import ValidationError
from django.forms.fields import Select, RegexField
from django.utils.translation import ugettext_lazy as _
from django.core.validators import EMPTY_VALUES
class PLProvinceSelect(Select):
"""
A select widget with list of Polish administrative provinces as choices.
"""
def __init__(self, attrs=None):
from pl_voivodeships import VOIVODESHIP_CHOICES
super(PLProvinceSelect, self).__init__(attrs, choices=VOIVODESHIP_CHOICES)
class PLCountySelect(Select):
"""
A select widget with list of Polish administrative units as choices.
"""
def __init__(self, attrs=None):
from pl_administrativeunits import ADMINISTRATIVE_UNIT_CHOICES
super(PLCountySelect, self).__init__(attrs, choices=ADMINISTRATIVE_UNIT_CHOICES)
class PLPESELField(RegexField):
"""
A form field that validates as Polish Identification Number (PESEL).
Checks the following rules:
* the length consist of 11 digits
* has a valid checksum
The algorithm is documented at http://en.wikipedia.org/wiki/PESEL.
"""
default_error_messages = {
'invalid': _(u'National Identification Number consists of 11 digits.'),
'checksum': _(u'Wrong checksum for the National Identification Number.'),
}
def __init__(self, *args, **kwargs):
super(PLPESELField, self).__init__(r'^\d{11}$',
max_length=None, min_length=None, *args, **kwargs)
def clean(self,value):
super(PLPESELField, self).clean(value)
if value in EMPTY_VALUES:
return u''
if not self.has_valid_checksum(value):
raise ValidationError(self.error_messages['checksum'])
return u'%s' % value
def has_valid_checksum(self, number):
"""
Calculates a checksum with the provided algorithm.
"""
multiple_table = (1, 3, 7, 9, 1, 3, 7, 9, 1, 3, 1)
result = 0
for i in range(len(number)):
result += int(number[i]) * multiple_table[i]
return result % 10 == 0
class PLNIPField(RegexField):
"""
A form field that validates as Polish Tax Number (NIP).
Valid forms are: XXX-XXX-YY-YY or XX-XX-YYY-YYY.
Checksum algorithm based on documentation at
http://wipos.p.lodz.pl/zylla/ut/nip-rego.html
"""
default_error_messages = {
'invalid': _(u'Enter a tax number field (NIP) in the format XXX-XXX-XX-XX or XX-XX-XXX-XXX.'),
'checksum': _(u'Wrong checksum for the Tax Number (NIP).'),
}
def __init__(self, *args, **kwargs):
super(PLNIPField, self).__init__(r'^\d{3}-\d{3}-\d{2}-\d{2}$|^\d{2}-\d{2}-\d{3}-\d{3}$',
max_length=None, min_length=None, *args, **kwargs)
def clean(self,value):
super(PLNIPField, self).clean(value)
if value in EMPTY_VALUES:
return u''
value = re.sub("[-]", "", value)
if not self.has_valid_checksum(value):
raise ValidationError(self.error_messages['checksum'])
return u'%s' % value
def has_valid_checksum(self, number):
"""
Calculates a checksum with the provided algorithm.
"""
multiple_table = (6, 5, 7, 2, 3, 4, 5, 6, 7)
result = 0
for i in range(len(number)-1):
result += int(number[i]) * multiple_table[i]
result %= 11
if result == int(number[-1]):
return True
else:
return False
class PLREGONField(RegexField):
"""
A form field that validates its input is a REGON number.
Valid regon number consists of 9 or 14 digits.
See http://www.stat.gov.pl/bip/regon_ENG_HTML.htm for more information.
"""
default_error_messages = {
'invalid': _(u'National Business Register Number (REGON) consists of 9 or 14 digits.'),
'checksum': _(u'Wrong checksum for the National Business Register Number (REGON).'),
}
def __init__(self, *args, **kwargs):
super(PLREGONField, self).__init__(r'^\d{9,14}$',
max_length=None, min_length=None, *args, **kwargs)
def clean(self,value):
super(PLREGONField, self).clean(value)
if value in EMPTY_VALUES:
return u''
if not self.has_valid_checksum(value):
raise ValidationError(self.error_messages['checksum'])
return u'%s' % value
def has_valid_checksum(self, number):
"""
Calculates a checksum with the provided algorithm.
"""
weights = (
(8, 9, 2, 3, 4, 5, 6, 7, -1),
(2, 4, 8, 5, 0, 9, 7, 3, 6, 1, 2, 4, 8, -1),
(8, 9, 2, 3, 4, 5, 6, 7, -1, 0, 0, 0, 0, 0),
)
weights = [table for table in weights if len(table) == len(number)]
for table in weights:
checksum = sum([int(n) * w for n, w in zip(number, table)])
if checksum % 11 % 10:
return False
return bool(weights)
class PLPostalCodeField(RegexField):
"""
A form field that validates as Polish postal code.
Valid code is XX-XXX where X is digit.
"""
default_error_messages = {
'invalid': _(u'Enter a postal code in the format XX-XXX.'),
}
def __init__(self, *args, **kwargs):
super(PLPostalCodeField, self).__init__(r'^\d{2}-\d{3}$',
max_length=None, min_length=None, *args, **kwargs)
|
tri-CSI/Bioinfo | refs/heads/master | projects/mirem/server_additional_scripts/populate_tables.py | 1 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Load hsa and mmu miRNA database on to sqlite.
#
# Author: TRAN MINH TRI
# Date: 26 Nov 2015
#
CREATE_SCRIPT='''
DROP TABLE IF EXISTS Mirna;
CREATE TABLE Mirna (
id INTEGER PRIMARY KEY AUTOINCREMENT,
mirna TEXT UNIQUE,
sequence TEXT
);
DROP TABLE IF EXISTS Genes;
CREATE TABLE Genes (
id INTEGER PRIMARY KEY AUTOINCREMENT,
genename TEXT,
species TEXT,
UNIQUE (genename, species)
);
DROP TABLE IF EXISTS Databases;
CREATE TABLE Databases (
id INTEGER PRIMARY KEY AUTOINCREMENT,
dbname TEXT UNIQUE,
desc TEXT
);
DROP TABLE IF EXISTS GeneMirnaRec;
CREATE TABLE GeneMirnaRec (
mirna_id INTEGER,
database_id INTEGER,
gene_id INTEGER,
FOREIGN KEY (mirna_id) REFERENCES Mirna(id),
FOREIGN KEY (database_id) REFERENCES Databases(id),
FOREIGN KEY (gene_id) REFERENCES Genes(id),
PRIMARY KEY (mirna_id, database_id, gene_id)
);
'''
import sqlite3 as lite
import argparse
# Get filenames to build database
parser = argparse.ArgumentParser( description="Load hsa and mmu miRNA database on to sqlite." )
parser.add_argument( 'human', metavar="<hsa-mir-ref>", type = str, help='human miRNA ref file' )
parser.add_argument( 'mouse', metavar="<mmu-mir-ref>", type = str, help='mouse miRNA ref file' )
parser.add_argument( 'humandb', metavar="<hsa-ref>", type = str, help='human miRNA dblist file' )
parser.add_argument( 'mousedb', metavar="<mmu-ref>", type = str, help='mouse miRNA dblist file' )
args = parser.parse_args()
fileList = {}
# A helper function
def load_db ( dblist ):
with open ( dblist ) as ref:
for line in ref:
if '#' in line: continue
f, dbname = line.strip().split(":")
fileList [ dbname ] = f
# cur.execute( "INSERT OR IGNORE INTO Databases VALUES (null, ?, null)", (dbname, ) )
def insert_one_db ( db, species ):
with open ( fileList [ db ] ) as dbfile:
for line in dbfile:
try:
gene, mirna = line.strip().split("\t")
except:
continue
cur.execute( "INSERT OR IGNORE INTO Genes VALUES (null, ?, ?)", (gene, species) )
cur.execute( "SELECT id FROM Genes WHERE genename = ? ", (gene, ) )
gene_id = cur.fetchone()[0]
cur.execute( "SELECT id FROM Mirna WHERE mirna = ? ", (mirna, ) )
mirna = cur.fetchone()
if mirna is None:
continue
else:
mirna_id = mirna[0]
cur.execute( "SELECT id FROM Databases WHERE dbname = ? ", (db, ) )
db_id = cur.fetchone()[0]
cur.execute( "INSERT OR IGNORE INTO GeneMirnaRec VALUES (?, ?, ?)", (mirna_id, db_id, gene_id) )
def insert_mirnas( table, refFile ):
query = "INSERT INTO " + table + " VALUES (null,?,?) "
with open( refFile ) as ref:
for line in ref:
mirna, dbs, seq = line.strip().split(";")
cur.execute( query, (mirna, seq) )
# start the connection
con = lite.connect('mirem.db')
with con:
cur = con.cursor()
# cur.executescript( CREATE_SCRIPT )
# insert_mirnas( "Mirna", args.human )
# insert_mirnas( "Mirna", args.mouse )
# load_db( args.humandb )
#
#for db in fileList:
# with con:
# cur = con.cursor()
# insert_one_db ( db, "hsa" )
load_db( args.mousedb )
for db in fileList:
with con:
cur = con.cursor()
insert_one_db ( db, "mmu" )
|
AnishShah/tensorflow | refs/heads/master | tensorflow/python/platform/benchmark.py | 10 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to run benchmarks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numbers
import os
import re
import sys
import time
import six
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.util import test_log_pb2
from tensorflow.python.client import timeline
from tensorflow.python.platform import app
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.tf_export import tf_export
# When a subclass of the Benchmark class is created, it is added to
# the registry automatically
GLOBAL_BENCHMARK_REGISTRY = set()
# Environment variable that determines whether benchmarks are written.
# See also tensorflow/core/util/reporter.h TestReporter::kTestReporterEnv.
TEST_REPORTER_TEST_ENV = "TEST_REPORT_FILE_PREFIX"
def _global_report_benchmark(
name, iters=None, cpu_time=None, wall_time=None,
throughput=None, extras=None):
"""Method for recording a benchmark directly.
Args:
name: The BenchmarkEntry name.
iters: (optional) How many iterations were run
cpu_time: (optional) Total cpu time in seconds
wall_time: (optional) Total wall time in seconds
throughput: (optional) Throughput (in MB/s)
extras: (optional) Dict mapping string keys to additional benchmark info.
Raises:
TypeError: if extras is not a dict.
IOError: if the benchmark output file already exists.
"""
if extras is not None:
if not isinstance(extras, dict):
raise TypeError("extras must be a dict")
logging.info("Benchmark [%s] iters: %d, wall_time: %g, cpu_time: %g,"
"throughput: %g %s", name, iters if iters is not None else -1,
wall_time if wall_time is not None else -1, cpu_time if
cpu_time is not None else -1, throughput if
throughput is not None else -1, str(extras) if extras else "")
entries = test_log_pb2.BenchmarkEntries()
entry = entries.entry.add()
entry.name = name
if iters is not None:
entry.iters = iters
if cpu_time is not None:
entry.cpu_time = cpu_time
if wall_time is not None:
entry.wall_time = wall_time
if throughput is not None:
entry.throughput = throughput
if extras is not None:
for (k, v) in extras.items():
if isinstance(v, numbers.Number):
entry.extras[k].double_value = v
else:
entry.extras[k].string_value = str(v)
test_env = os.environ.get(TEST_REPORTER_TEST_ENV, None)
if test_env is None:
# Reporting was not requested, just print the proto
print(str(entries))
return
serialized_entry = entries.SerializeToString()
mangled_name = name.replace("/", "__")
output_path = "%s%s" % (test_env, mangled_name)
if gfile.Exists(output_path):
raise IOError("File already exists: %s" % output_path)
with gfile.GFile(output_path, "wb") as out:
out.write(serialized_entry)
class _BenchmarkRegistrar(type):
"""The Benchmark class registrar. Used by abstract Benchmark class."""
def __new__(mcs, clsname, base, attrs):
newclass = super(mcs, _BenchmarkRegistrar).__new__(
mcs, clsname, base, attrs)
if not newclass.is_abstract():
GLOBAL_BENCHMARK_REGISTRY.add(newclass)
return newclass
class Benchmark(six.with_metaclass(_BenchmarkRegistrar, object)):
"""Abstract class that provides helper functions for running benchmarks.
Any class subclassing this one is immediately registered in the global
benchmark registry.
Only methods whose names start with the word "benchmark" will be run during
benchmarking.
"""
@classmethod
def is_abstract(cls):
# mro: (_BenchmarkRegistrar, Benchmark) means this is Benchmark
return len(cls.mro()) <= 2
def _get_name(self, overwrite_name=None):
"""Returns full name of class and method calling report_benchmark."""
# Find the caller method (outermost Benchmark class)
stack = tf_inspect.stack()
calling_class = None
name = None
for frame in stack[::-1]:
f_locals = frame[0].f_locals
f_self = f_locals.get("self", None)
if isinstance(f_self, Benchmark):
calling_class = f_self # Get the outermost stack Benchmark call
name = frame[3] # Get the method name
break
if calling_class is None:
raise ValueError("Unable to determine calling Benchmark class.")
# Use the method name, or overwrite_name is provided.
name = overwrite_name or name
# Prefix the name with the class name.
class_name = type(calling_class).__name__
name = "%s.%s" % (class_name, name)
return name
def report_benchmark(
self,
iters=None,
cpu_time=None,
wall_time=None,
throughput=None,
extras=None,
name=None):
"""Report a benchmark.
Args:
iters: (optional) How many iterations were run
cpu_time: (optional) median or mean cpu time in seconds.
wall_time: (optional) median or mean wall time in seconds.
throughput: (optional) Throughput (in MB/s)
extras: (optional) Dict mapping string keys to additional benchmark info.
Values may be either floats or values that are convertible to strings.
name: (optional) Override the BenchmarkEntry name with `name`.
Otherwise it is inferred from the top-level method name.
"""
name = self._get_name(overwrite_name=name)
_global_report_benchmark(
name=name, iters=iters, cpu_time=cpu_time, wall_time=wall_time,
throughput=throughput, extras=extras)
@tf_export("test.Benchmark")
class TensorFlowBenchmark(Benchmark):
"""Abstract class that provides helpers for TensorFlow benchmarks."""
@classmethod
def is_abstract(cls):
# mro: (_BenchmarkRegistrar, Benchmark, TensorFlowBenchmark) means
# this is TensorFlowBenchmark.
return len(cls.mro()) <= 3
def run_op_benchmark(self,
sess,
op_or_tensor,
feed_dict=None,
burn_iters=2,
min_iters=10,
store_trace=False,
store_memory_usage=True,
name=None,
extras=None,
mbs=0):
"""Run an op or tensor in the given session. Report the results.
Args:
sess: `Session` object to use for timing.
op_or_tensor: `Operation` or `Tensor` to benchmark.
feed_dict: A `dict` of values to feed for each op iteration (see the
`feed_dict` parameter of `Session.run`).
burn_iters: Number of burn-in iterations to run.
min_iters: Minimum number of iterations to use for timing.
store_trace: Boolean, whether to run an extra untimed iteration and
store the trace of iteration in returned extras.
The trace will be stored as a string in Google Chrome trace format
in the extras field "full_trace_chrome_format". Note that trace
will not be stored in test_log_pb2.TestResults proto.
store_memory_usage: Boolean, whether to run an extra untimed iteration,
calculate memory usage, and store that in extras fields.
name: (optional) Override the BenchmarkEntry name with `name`.
Otherwise it is inferred from the top-level method name.
extras: (optional) Dict mapping string keys to additional benchmark info.
Values may be either floats or values that are convertible to strings.
mbs: (optional) The number of megabytes moved by this op, used to
calculate the ops throughput.
Returns:
A `dict` containing the key-value pairs that were passed to
`report_benchmark`. If `store_trace` option is used, then
`full_chrome_trace_format` will be included in return dictionary even
though it is not passed to `report_benchmark` with `extras`.
"""
for _ in range(burn_iters):
sess.run(op_or_tensor, feed_dict=feed_dict)
deltas = [None] * min_iters
for i in range(min_iters):
start_time = time.time()
sess.run(op_or_tensor, feed_dict=feed_dict)
end_time = time.time()
delta = end_time - start_time
deltas[i] = delta
extras = extras if extras is not None else {}
unreported_extras = {}
if store_trace or store_memory_usage:
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
sess.run(op_or_tensor, feed_dict=feed_dict,
options=run_options, run_metadata=run_metadata)
tl = timeline.Timeline(run_metadata.step_stats)
if store_trace:
unreported_extras["full_trace_chrome_format"] = (
tl.generate_chrome_trace_format())
if store_memory_usage:
step_stats_analysis = tl.analyze_step_stats(show_memory=True)
allocator_maximums = step_stats_analysis.allocator_maximums
for k, v in allocator_maximums.items():
extras["allocator_maximum_num_bytes_%s" % k] = v.num_bytes
def _median(x):
if not x:
return -1
s = sorted(x)
l = len(x)
lm1 = l - 1
return (s[l//2] + s[lm1//2]) / 2.0
median_delta = _median(deltas)
benchmark_values = {
"iters": min_iters,
"wall_time": median_delta,
"extras": extras,
"name": name,
"throughput": mbs / median_delta
}
self.report_benchmark(**benchmark_values)
benchmark_values["extras"].update(unreported_extras)
return benchmark_values
def _run_benchmarks(regex):
"""Run benchmarks that match regex `regex`.
This function goes through the global benchmark registry, and matches
benchmark class and method names of the form
`module.name.BenchmarkClass.benchmarkMethod` to the given regex.
If a method matches, it is run.
Args:
regex: The string regular expression to match Benchmark classes against.
"""
registry = list(GLOBAL_BENCHMARK_REGISTRY)
# Match benchmarks in registry against regex
for benchmark in registry:
benchmark_name = "%s.%s" % (benchmark.__module__, benchmark.__name__)
attrs = dir(benchmark)
# Don't instantiate the benchmark class unless necessary
benchmark_instance = None
for attr in attrs:
if not attr.startswith("benchmark"):
continue
candidate_benchmark_fn = getattr(benchmark, attr)
if not callable(candidate_benchmark_fn):
continue
full_benchmark_name = "%s.%s" % (benchmark_name, attr)
if regex == "all" or re.search(regex, full_benchmark_name):
# Instantiate the class if it hasn't been instantiated
benchmark_instance = benchmark_instance or benchmark()
# Get the method tied to the class
instance_benchmark_fn = getattr(benchmark_instance, attr)
# Call the instance method
instance_benchmark_fn()
def benchmarks_main(true_main, argv=None):
"""Run benchmarks as declared in argv.
Args:
true_main: True main function to run if benchmarks are not requested.
argv: the command line arguments (if None, uses sys.argv).
"""
if argv is None:
argv = sys.argv
found_arg = [arg for arg in argv
if arg.startswith("--benchmarks=")
or arg.startswith("-benchmarks=")]
if found_arg:
# Remove --benchmarks arg from sys.argv
argv.remove(found_arg[0])
regex = found_arg[0].split("=")[1]
app.run(lambda _: _run_benchmarks(regex), argv=argv)
else:
true_main()
|
mozillazg/Unidecode | refs/heads/master | unidecode/x000.py | 113 | data = (
# Code points u+007f and below are equivalent to ASCII and are handled by a
# special case in the code. Hence they are not present in this table.
'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
'', # 0x80
'', # 0x81
'', # 0x82
'', # 0x83
'', # 0x84
'', # 0x85
'', # 0x86
'', # 0x87
'', # 0x88
'', # 0x89
'', # 0x8a
'', # 0x8b
'', # 0x8c
'', # 0x8d
'', # 0x8e
'', # 0x8f
'', # 0x90
'', # 0x91
'', # 0x92
'', # 0x93
'', # 0x94
'', # 0x95
'', # 0x96
'', # 0x97
'', # 0x98
'', # 0x99
'', # 0x9a
'', # 0x9b
'', # 0x9c
'', # 0x9d
'', # 0x9e
'', # 0x9f
' ', # 0xa0
'!', # 0xa1
'C/', # 0xa2
# Not "GBP" - Pound Sign is used for more than just British Pounds.
'PS', # 0xa3
'$?', # 0xa4
'Y=', # 0xa5
'|', # 0xa6
'SS', # 0xa7
'"', # 0xa8
'(c)', # 0xa9
'a', # 0xaa
'<<', # 0xab
'!', # 0xac
'', # 0xad
'(r)', # 0xae
'-', # 0xaf
'deg', # 0xb0
'+-', # 0xb1
# These might be combined with other superscript digits (u+2070 - u+2079)
'2', # 0xb2
'3', # 0xb3
'\'', # 0xb4
'u', # 0xb5
'P', # 0xb6
'*', # 0xb7
',', # 0xb8
'1', # 0xb9
'o', # 0xba
'>>', # 0xbb
'1/4', # 0xbc
'1/2', # 0xbd
'3/4', # 0xbe
'?', # 0xbf
'A', # 0xc0
'A', # 0xc1
'A', # 0xc2
'A', # 0xc3
# Not "AE" - used in languages other than German
'A', # 0xc4
'A', # 0xc5
'AE', # 0xc6
'C', # 0xc7
'E', # 0xc8
'E', # 0xc9
'E', # 0xca
'E', # 0xcb
'I', # 0xcc
'I', # 0xcd
'I', # 0xce
'I', # 0xcf
'D', # 0xd0
'N', # 0xd1
'O', # 0xd2
'O', # 0xd3
'O', # 0xd4
'O', # 0xd5
# Not "OE" - used in languages other than German
'O', # 0xd6
'x', # 0xd7
'O', # 0xd8
'U', # 0xd9
'U', # 0xda
'U', # 0xdb
# Not "UE" - used in languages other than German
'U', # 0xdc
'Y', # 0xdd
'Th', # 0xde
'ss', # 0xdf
'a', # 0xe0
'a', # 0xe1
'a', # 0xe2
'a', # 0xe3
# Not "ae" - used in languages other than German
'a', # 0xe4
'a', # 0xe5
'ae', # 0xe6
'c', # 0xe7
'e', # 0xe8
'e', # 0xe9
'e', # 0xea
'e', # 0xeb
'i', # 0xec
'i', # 0xed
'i', # 0xee
'i', # 0xef
'd', # 0xf0
'n', # 0xf1
'o', # 0xf2
'o', # 0xf3
'o', # 0xf4
'o', # 0xf5
# Not "oe" - used in languages other than German
'o', # 0xf6
'/', # 0xf7
'o', # 0xf8
'u', # 0xf9
'u', # 0xfa
'u', # 0xfb
# Not "ue" - used in languages other than German
'u', # 0xfc
'y', # 0xfd
'th', # 0xfe
'y', # 0xff
)
|
hehongliang/tensorflow | refs/heads/master | tensorflow/python/saved_model/constants.py | 2 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Constants for SavedModel save and restore operations.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.util.tf_export import tf_export
# Subdirectory name containing the asset files.
ASSETS_DIRECTORY = "assets"
tf_export(
"saved_model.ASSETS_DIRECTORY",
v1=[
"saved_model.ASSETS_DIRECTORY", "saved_model.constants.ASSETS_DIRECTORY"
]).export_constant(__name__, "ASSETS_DIRECTORY")
# CollectionDef key containing SavedModel assets.
ASSETS_KEY = "saved_model_assets"
tf_export(
"saved_model.ASSETS_KEY",
v1=["saved_model.ASSETS_KEY",
"saved_model.constants.ASSETS_KEY"]).export_constant(
__name__, "ASSETS_KEY")
# CollectionDef key for the legacy init op.
LEGACY_INIT_OP_KEY = "legacy_init_op"
tf_export(
v1=[
"saved_model.LEGACY_INIT_OP_KEY",
"saved_model.constants.LEGACY_INIT_OP_KEY"
]).export_constant(__name__, "LEGACY_INIT_OP_KEY")
# CollectionDef key for the SavedModel main op.
MAIN_OP_KEY = "saved_model_main_op"
tf_export(
"saved_model.MAIN_OP_KEY",
v1=["saved_model.MAIN_OP_KEY",
"saved_model.constants.MAIN_OP_KEY"]).export_constant(
__name__, "MAIN_OP_KEY")
# CollectionDef key for the SavedModel train op.
# Not exported while export_all_saved_models is in contrib.
TRAIN_OP_KEY = "saved_model_train_op"
# Schema version for SavedModel.
SAVED_MODEL_SCHEMA_VERSION = 1
tf_export(
"saved_model.SAVED_MODEL_SCHEMA_VERSION",
v1=[
"saved_model.SAVED_MODEL_SCHEMA_VERSION",
"saved_model.constants.SAVED_MODEL_SCHEMA_VERSION"
]).export_constant(__name__, "SAVED_MODEL_SCHEMA_VERSION")
# File name for SavedModel protocol buffer.
SAVED_MODEL_FILENAME_PB = "saved_model.pb"
tf_export(
"saved_model.SAVED_MODEL_FILENAME_PB",
v1=[
"saved_model.SAVED_MODEL_FILENAME_PB",
"saved_model.constants.SAVED_MODEL_FILENAME_PB"
]).export_constant(__name__, "SAVED_MODEL_FILENAME_PB")
# File name for text version of SavedModel protocol buffer.
SAVED_MODEL_FILENAME_PBTXT = "saved_model.pbtxt"
tf_export(
"saved_model.SAVED_MODEL_FILENAME_PBTXT",
v1=[
"saved_model.SAVED_MODEL_FILENAME_PBTXT",
"saved_model.constants.SAVED_MODEL_FILENAME_PBTXT"
]).export_constant(__name__, "SAVED_MODEL_FILENAME_PBTXT")
# File name for json format of SavedModel.
# Not exported while keras_saved_model is in contrib.
SAVED_MODEL_FILENAME_JSON = "saved_model.json"
# Subdirectory name containing the variables/checkpoint files.
VARIABLES_DIRECTORY = "variables"
tf_export(
"saved_model.VARIABLES_DIRECTORY",
v1=[
"saved_model.VARIABLES_DIRECTORY",
"saved_model.constants.VARIABLES_DIRECTORY"
]).export_constant(__name__, "VARIABLES_DIRECTORY")
# File name used for variables.
VARIABLES_FILENAME = "variables"
tf_export(
"saved_model.VARIABLES_FILENAME",
v1=[
"saved_model.VARIABLES_FILENAME",
"saved_model.constants.VARIABLES_FILENAME"
]).export_constant(__name__, "VARIABLES_FILENAME")
|
ddayguerrero/blogme | refs/heads/master | flask/lib/python3.4/site-packages/sqlalchemy/dialects/mysql/pymysql.py | 59 | # mysql/pymysql.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mysql+pymysql
:name: PyMySQL
:dbapi: pymysql
:connectstring: mysql+pymysql://<username>:<password>@<host>/<dbname>\
[?<options>]
:url: http://www.pymysql.org/
Unicode
-------
Please see :ref:`mysql_unicode` for current recommendations on unicode
handling.
MySQL-Python Compatibility
--------------------------
The pymysql DBAPI is a pure Python port of the MySQL-python (MySQLdb) driver,
and targets 100% compatibility. Most behavioral notes for MySQL-python apply
to the pymysql driver as well.
"""
from .mysqldb import MySQLDialect_mysqldb
from ...util import py3k
class MySQLDialect_pymysql(MySQLDialect_mysqldb):
driver = 'pymysql'
description_encoding = None
# generally, these two values should be both True
# or both False. PyMySQL unicode tests pass all the way back
# to 0.4 either way. See [ticket:3337]
supports_unicode_statements = True
supports_unicode_binds = True
@classmethod
def dbapi(cls):
return __import__('pymysql')
if py3k:
def _extract_error_code(self, exception):
if isinstance(exception.args[0], Exception):
exception = exception.args[0]
return exception.args[0]
dialect = MySQLDialect_pymysql
|
jmesteve/medical | refs/heads/master | openerp/oemedical/web_doc_oemedical/controllers/main.py | 5 | # -*- coding: utf-8 -*-
import ast
import base64
import csv
import glob
import itertools
import logging
import operator
import datetime
import hashlib
import os
import re
import simplejson
import time
import urllib2
import xmlrpclib
import zlib
from xml.etree import ElementTree
from cStringIO import StringIO
import babel.messages.pofile
import werkzeug.utils
import werkzeug.wrappers
import openerp
import web
def read_base_doc(name):
'''
Method that will be sure to convert path of index in absolute path.
TODO: It is not finished yet
'''
whereami=os.path.dirname(os.path.realpath(__file__))
return ''
class OeMedicalDoc(web.http.Controller):
_cp_path='/oemedicaldoc'
@web.http.httprequest
def index(self, req, s_action=None, **kw):
html = read_template("index.html")
return html
|
BertieJim/thefuck | refs/heads/master | tests/rules/test_heroku_not_command.py | 16 | import pytest
from tests.utils import Command
from thefuck.rules.heroku_not_command import match, get_new_command
def suggest_stderr(cmd):
return ''' ! `{}` is not a heroku command.
! Perhaps you meant `logs`, `pg`.
! See `heroku help` for a list of available commands.'''.format(cmd)
no_suggest_stderr = ''' ! `aaaaa` is not a heroku command.
! See `heroku help` for a list of available commands.'''
@pytest.mark.parametrize('cmd', ['log', 'pge'])
def test_match(cmd):
assert match(
Command('heroku {}'.format(cmd), stderr=suggest_stderr(cmd)))
@pytest.mark.parametrize('script, stderr', [
('cat log', suggest_stderr('log')),
('heroku aaa', no_suggest_stderr)])
def test_not_match(script, stderr):
assert not match(Command(script, stderr=stderr))
@pytest.mark.parametrize('cmd, result', [
('log', ['heroku logs', 'heroku pg']),
('pge', ['heroku pg', 'heroku logs'])])
def test_get_new_command(cmd, result):
command = Command('heroku {}'.format(cmd), stderr=suggest_stderr(cmd))
assert get_new_command(command) == result
|
ingted/voltdb | refs/heads/master | tests/test_apps/adhocbenchmark/scripts/generate.py | 7 | #!/usr/bin/env python
# This file is part of VoltDB.
# Copyright (C) 2008-2015 VoltDB Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import sys, os
from xml.etree.ElementTree import ElementTree
root_dir = os.path.dirname(os.path.dirname(__file__))
config_path = os.path.join(root_dir, 'config.xml')
project_path = os.path.join(root_dir, 'project.xml')
ddl_path = os.path.join(root_dir, 'ddl.sql')
ddl_start = '-- DDL generated for ad hoc benchmark - this will be overwritten'
ddl_end = None
ddl_pk = 'CONSTRAINT PK_%s PRIMARY KEY (%s)'
ddl_partition = '-- PARTITION BY ( %s )'
table_start = '\nCREATE TABLE %s\n('
table_end = ');'
project_start = '''\
<?xml version="1.0"?>
<project>
<info>
<name>Ad Hoc Benchmark</name>
<version>1.0</version>
<description>Analyzes ad hoc query overhead.</description>
</info>
<database>
<schemas>
<schema path='ddl.sql' />
</schemas>
<partitions>'''
project_partition=" <partition table='%s' column='%s' />"
project_end = '''\
</partitions>
</database>
</project>'''
announcement_separator = '-' * 42
class Fatal(Exception):
def __init__(self, *msgs):
self.msgs = msgs
Exception.__init__(self)
def display(tag, *msgs):
for msg in msgs:
if tag:
sys.stderr.write('%s: ' % tag)
sys.stderr.write(str(msg))
sys.stderr.write('\n')
def announce(*msgs):
print(announcement_separator)
for msg in msgs:
print(msg)
print(announcement_separator)
class Column(object):
def __init__(self, name, type, modifiers):
self.name = name
self.type = type
self.modifiers = modifiers
class Table(object):
def __init__(self, name, nvariations, columns, ipartcol, ipkcol):
self.name = name
self.nvariations = nvariations
self.columns = columns
self.partcol = ipartcol
self.pkcol = ipkcol
def create_table(name, prefix, nvariations, ncolumns, ipartcol, ipkcol):
# Add a possible primary / possible partition key and a foreign/non-partition key.
columns = [Column('id', 'bigint', 'NOT NULL'), Column('parent_id', 'bigint', None)]
for i in range(ncolumns):
columns.append(Column('%s_%d' % (prefix, i+1), 'varchar(32)', None))
return Table(name, nvariations, columns, ipartcol, ipkcol)
def get_tables():
print 'Reading %s...' % config_path
tables = []
et = ElementTree()
try:
xmlschema = et.parse(config_path).find('schema')
if xmlschema is None:
raise Fatal('Missing <schema> element')
xmltables = xmlschema.findall('table')
if xmltables is None or len(xmltables) == 0:
raise Fatal('No <table> elements found')
for xmltable in xmltables:
name = xmltable.attrib['name']
if not name:
raise Fatal('Table name is required')
prefix = xmltable.attrib['prefix']
if not prefix:
raise Fatal('Column prefix is required')
nvariations = int(xmltable.attrib.get('variations', 1))
if nvariations < 1:
raise Fatal('Bad variations value: %d' % nvariations)
ncolumns = int(xmltable.attrib['columns'])
if ncolumns < 1:
raise Fatal('Bad columns value: %d' % ncolumns)
try:
ipartcol = int(xmltable.attrib['partitioncolumn'])
if ipartcol < 0 or ipartcol >= ncolumns:
raise Fatal('Bad partition column index value: %d' % ipartcol)
except KeyError, e:
ipartcol = None # The partitioncolumn attribute is optional, tables default to replicated
try:
ipkcol = int(xmltable.attrib['primarykey'])
if ipkcol < 0 or ipkcol >= ncolumns:
raise Fatal('Bad primary key column index value: %d' % ipkcol)
except KeyError, e:
ipkcol = None # The primarykey attribute is optional, tables default to having no pk index
tables.append(create_table(name, prefix, nvariations, ncolumns, ipartcol, ipkcol))
except (OSError, IOError), e:
raise Fatal('Failed to parse %s' % config_path, e)
except KeyError, e:
raise Fatal('Missing table attribute', e)
except ValueError, e:
raise Fatal('Bad table attribute value', e)
return tables
def generate_project(tables):
yield project_start
for table in tables:
if table.partcol is not None:
for variation in range(table.nvariations):
yield project_partition % ('%s_%d' % (table.name, variation+1),
table.columns[table.partcol].name)
yield project_end
def generate_comma_separated_list(generator, indent, comment):
first = True
for line in generator:
if first:
preamble = indent
first = False
elif comment is not None and line.startswith(comment):
preamble = indent
else:
preamble = ',%s' % indent[1:]
yield '%s%s' % (preamble, line)
def generate_table_ddl_lines(table, name):
for column in table.columns:
if column.modifiers:
modifiers = ' %s' % column.modifiers
else:
modifiers = ''
yield '%s %s%s' % (column.name, column.type, modifiers)
if table.pkcol is not None:
yield ddl_pk % (name, table.columns[table.pkcol].name)
if table.partcol is not None:
yield ddl_partition % (table.columns[table.partcol].name)
def generate_ddl(tables):
yield ddl_start
for table in tables:
for variation in range(table.nvariations):
name = '%s_%d' % (table.name, variation + 1)
yield table_start % name
generator = generate_table_ddl_lines(table, name)
for line in generate_comma_separated_list(generator, ' ', '--'):
yield line
yield table_end
yield ddl_end
def generate_file(path, generator):
print 'Generating %s...' % path
try:
try:
f = open(path, 'w')
except (OSError, IOError), e:
raise Fatal('Failed to open %s' % path, e)
for text_block in generator:
if text_block is not None:
try:
f.write('%s\n' % text_block)
except (OSError, IOError), e:
raise Fatal('Failed to write to %s' % path, e)
finally:
f.close()
def main():
tables = get_tables()
generate_file(project_path, generate_project(tables))
generate_file(ddl_path, generate_ddl(tables))
if __name__ == '__main__':
try:
main()
announce('Schema/project generation succeeded.')
except Fatal, e:
display('FATAL', *e.msgs)
announce('Schema/project generation failed.')
sys.exit(1)
|
ity/pants | refs/heads/master | src/python/pants/option/errors.py | 12 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.option.arg_splitter import GLOBAL_SCOPE
class OptionsError(Exception):
"""An options system-related error."""
class RegistrationError(OptionsError):
"""An error at option registration time."""
def __init__(self, msg, scope, option):
super(RegistrationError, self).__init__(
'{} [option {} in {}].'.format(msg, option,
'global scope' if scope == GLOBAL_SCOPE else 'scope {}'.format(scope)))
class ParseError(OptionsError):
"""An error at flag parsing time."""
# Subclasses of RegistrationError. The distinction between them is useful mainly for testing
# that the error we get is the one we expect.
# TODO: Similar thing for ParseError.
def mk_registration_error(msg):
class Anon(RegistrationError):
def __init__(self, scope, option, **msg_format_args):
super(Anon, self).__init__(msg.format(**msg_format_args), scope, option)
return Anon
BooleanOptionNameWithNo = mk_registration_error('Boolean option names cannot start with --no.')
FrozenRegistration = mk_registration_error('Cannot register an option on a scope after registering '
'on any of its inner scopes.')
ImplicitValIsNone = mk_registration_error('Implicit value cannot be None.')
InvalidKwarg = mk_registration_error('Invalid registration kwarg {kwarg}.')
InvalidMemberType = mk_registration_error('member_type {member_type} not allowed.')
MemberTypeNotAllowed = mk_registration_error('member_type not allowed on option with type {type_}. '
'It may only be specified if type=list.')
NoOptionNames = mk_registration_error('No option names provided.')
OptionAlreadyRegistered = mk_registration_error('An option with this name was already registered '
'on this scope.')
OptionNameDash = mk_registration_error('Option name must begin with a dash.')
OptionNameDoubleDash = mk_registration_error('Long option name must begin with a double-dash.')
RecursiveSubsystemOption = mk_registration_error("Subsystem option cannot specify 'recursive'. "
"Subsystem options are always recursive.")
Shadowing = mk_registration_error('Option shadows an option in scope {outer_scope}')
|
raymondxyang/tensorflow | refs/heads/master | tensorflow/compiler/aot/tests/make_test_graphs.py | 43 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generate tensorflow graphs for testing tfcompile."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import app
from tensorflow.python.training import saver as saver_lib
FLAGS = None
def tfadd(_):
x = constant_op.constant([1], name='x_const')
y = constant_op.constant([2], name='y_const')
math_ops.add(x, y, name='x_y_sum')
def tfadd_with_ckpt(out_dir):
x = array_ops.placeholder(dtypes.int32, name='x_hold')
y = variables.Variable(constant_op.constant([0]), name='y_saved')
math_ops.add(x, y, name='x_y_sum')
init_op = variables.initialize_all_variables()
saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V1)
with session.Session() as sess:
sess.run(init_op)
sess.run(y.assign(y + 42))
# Without the checkpoint, the variable won't be set to 42.
ckpt = '%s/test_graph_tfadd_with_ckpt.ckpt' % out_dir
saver.save(sess, ckpt)
def tfadd_with_ckpt_saver(out_dir):
x = array_ops.placeholder(dtypes.int32, name='x_hold')
y = variables.Variable(constant_op.constant([0]), name='y_saved')
math_ops.add(x, y, name='x_y_sum')
init_op = variables.initialize_all_variables()
saver = saver_lib.Saver(name='abcprefix', write_version=saver_pb2.SaverDef.V1)
with session.Session() as sess:
sess.run(init_op)
sess.run(y.assign(y + 42))
# Without the checkpoint, the variable won't be set to 42.
ckpt_file = '%s/test_graph_tfadd_with_ckpt_saver.ckpt' % out_dir
saver.save(sess, ckpt_file)
# Without the SaverDef, the restore op won't be named correctly.
saver_file = '%s/test_graph_tfadd_with_ckpt_saver.saver' % out_dir
with open(saver_file, 'wb') as f:
f.write(saver.as_saver_def().SerializeToString())
def tfgather(_):
params = array_ops.placeholder(dtypes.float32, name='params')
indices = array_ops.placeholder(dtypes.int32, name='indices')
array_ops.gather(params, indices, name='gather_output')
def tfmatmul(_):
x = array_ops.placeholder(dtypes.float32, name='x_hold')
y = array_ops.placeholder(dtypes.float32, name='y_hold')
math_ops.matmul(x, y, name='x_y_prod')
def tfmatmulandadd(_):
# This tests multiple outputs.
x = array_ops.placeholder(dtypes.float32, name='x_hold')
y = array_ops.placeholder(dtypes.float32, name='y_hold')
math_ops.matmul(x, y, name='x_y_prod')
math_ops.add(x, y, name='x_y_sum')
def tffunction(_):
@function.Defun(dtypes.int32, dtypes.int32)
def test_func(a, b):
return a + b
x = constant_op.constant([1], name='x_const')
y = constant_op.constant([2], name='y_const')
test_func(x, y, name='func_call') # pylint: disable=unexpected-keyword-arg
def tfsplits(_):
"""A more complex graph, including splits."""
x = array_ops.placeholder(dtypes.float32, shape=[2, 2], name='x')
y = array_ops.placeholder(dtypes.float32, shape=[2, 2], name='y')
for _ in range(3):
x0, x1 = array_ops.split(x, 2, 0)
y0, y1 = array_ops.split(y, 2, 0)
x0 += 1
y0 += 1
z = math_ops.matmul(x, y, name='x_y_prod')
a = array_ops.concat([x0, y1], axis=0, name='concat_x0_y1')
b = array_ops.concat([y0, x1], axis=0, name='concat_y0_x1')
x = math_ops.matmul(a, b, name='a_b')
y = math_ops.add(x, z)
array_ops.identity(y, name='result')
def write_graph(build_graph, out_dir):
"""Build a graph using build_graph and write it out."""
g = ops.Graph()
with g.as_default():
build_graph(out_dir)
filename = '%s/test_graph_%s.pb' % (out_dir, build_graph.__name__)
with open(filename, 'wb') as f:
f.write(g.as_graph_def().SerializeToString())
def main(_):
write_graph(tfadd, FLAGS.out_dir)
write_graph(tfadd_with_ckpt, FLAGS.out_dir)
write_graph(tfadd_with_ckpt_saver, FLAGS.out_dir)
write_graph(tfgather, FLAGS.out_dir)
write_graph(tfmatmul, FLAGS.out_dir)
write_graph(tfmatmulandadd, FLAGS.out_dir)
write_graph(tffunction, FLAGS.out_dir)
write_graph(tfsplits, FLAGS.out_dir)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--out_dir',
type=str,
default='',
help='Output directory for graphs, checkpoints and savers.')
FLAGS, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
|
resmo/ansible | refs/heads/devel | lib/ansible/modules/remote_management/redfish/redfish_info.py | 6 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017-2018 Dell EMC Inc.
# GNU General Public License v3.0+ (see LICENSE or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: redfish_info
version_added: "2.7"
short_description: Manages Out-Of-Band controllers using Redfish APIs
description:
- Builds Redfish URIs locally and sends them to remote OOB controllers to
get information back.
- Information retrieved is placed in a location specified by the user.
- This module was called C(redfish_facts) before Ansible 2.9, returning C(ansible_facts).
Note that the M(redfish_info) module no longer returns C(ansible_facts)!
options:
category:
required: false
description:
- List of categories to execute on OOB controller
default: ['Systems']
type: list
command:
required: false
description:
- List of commands to execute on OOB controller
type: list
baseuri:
required: true
description:
- Base URI of OOB controller
type: str
username:
required: true
description:
- User for authentication with OOB controller
type: str
version_added: "2.8"
password:
required: true
description:
- Password for authentication with OOB controller
type: str
timeout:
description:
- Timeout in seconds for URL requests to OOB controller
default: 10
type: int
version_added: '2.8'
author: "Jose Delarosa (@jose-delarosa)"
'''
EXAMPLES = '''
- name: Get CPU inventory
redfish_info:
category: Systems
command: GetCpuInventory
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
register: result
- debug:
msg: "{{ result.redfish_facts.cpu.entries | to_nice_json }}"
- name: Get CPU model
redfish_info:
category: Systems
command: GetCpuInventory
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
register: result
- debug:
msg: "{{ result.redfish_facts.cpu.entries.0.Model }}"
- name: Get memory inventory
redfish_info:
category: Systems
command: GetMemoryInventory
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
register: result
- name: Get fan inventory with a timeout of 20 seconds
redfish_info:
category: Chassis
command: GetFanInventory
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
timeout: 20
register: result
- name: Get Virtual Media information
redfish_info:
category: Manager
command: GetVirtualMedia
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
register: result
- debug:
msg: "{{ result.redfish_facts.virtual_media.entries | to_nice_json }}"
- name: Get Volume Inventory
redfish_info:
category: Systems
command: GetVolumeInventory
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
register: result
- debug:
msg: "{{ result.redfish_facts.volume.entries | to_nice_json }}"
- name: Get Session information
redfish_info:
category: Sessions
command: GetSessions
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
register: result
- debug:
msg: "{{ result.redfish_facts.session.entries | to_nice_json }}"
- name: Get default inventory information
redfish_info:
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
register: result
- debug:
msg: "{{ result.redfish_facts | to_nice_json }}"
- name: Get several inventories
redfish_info:
category: Systems
command: GetNicInventory,GetBiosAttributes
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
- name: Get default system inventory and user information
redfish_info:
category: Systems,Accounts
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
- name: Get default system, user and firmware information
redfish_info:
category: ["Systems", "Accounts", "Update"]
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
- name: Get Manager NIC inventory information
redfish_info:
category: Manager
command: GetManagerNicInventory
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
- name: Get boot override information
redfish_info:
category: Systems
command: GetBootOverride
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
- name: Get chassis inventory
redfish_info:
category: Chassis
command: GetChassisInventory
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
- name: Get all information available in the Manager category
redfish_info:
category: Manager
command: all
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
- name: Get firmware update capability information
redfish_info:
category: Update
command: GetFirmwareUpdateCapabilities
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
- name: Get all information available in all categories
redfish_info:
category: all
command: all
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
'''
RETURN = '''
result:
description: different results depending on task
returned: always
type: dict
sample: List of CPUs on system
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.redfish_utils import RedfishUtils
CATEGORY_COMMANDS_ALL = {
"Systems": ["GetSystemInventory", "GetPsuInventory", "GetCpuInventory",
"GetMemoryInventory", "GetNicInventory",
"GetStorageControllerInventory", "GetDiskInventory", "GetVolumeInventory",
"GetBiosAttributes", "GetBootOrder", "GetBootOverride"],
"Chassis": ["GetFanInventory", "GetPsuInventory", "GetChassisPower", "GetChassisThermals", "GetChassisInventory"],
"Accounts": ["ListUsers"],
"Sessions": ["GetSessions"],
"Update": ["GetFirmwareInventory", "GetFirmwareUpdateCapabilities"],
"Manager": ["GetManagerNicInventory", "GetVirtualMedia", "GetLogs"],
}
CATEGORY_COMMANDS_DEFAULT = {
"Systems": "GetSystemInventory",
"Chassis": "GetFanInventory",
"Accounts": "ListUsers",
"Update": "GetFirmwareInventory",
"Sessions": "GetSessions",
"Manager": "GetManagerNicInventory"
}
def main():
result = {}
category_list = []
module = AnsibleModule(
argument_spec=dict(
category=dict(type='list', default=['Systems']),
command=dict(type='list'),
baseuri=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
timeout=dict(type='int', default=10)
),
supports_check_mode=False
)
is_old_facts = module._name == 'redfish_facts'
if is_old_facts:
module.deprecate("The 'redfish_facts' module has been renamed to 'redfish_info', "
"and the renamed one no longer returns ansible_facts", version='2.13')
# admin credentials used for authentication
creds = {'user': module.params['username'],
'pswd': module.params['password']}
# timeout
timeout = module.params['timeout']
# Build root URI
root_uri = "https://" + module.params['baseuri']
rf_utils = RedfishUtils(creds, root_uri, timeout, module)
# Build Category list
if "all" in module.params['category']:
for entry in CATEGORY_COMMANDS_ALL:
category_list.append(entry)
else:
# one or more categories specified
category_list = module.params['category']
for category in category_list:
command_list = []
# Build Command list for each Category
if category in CATEGORY_COMMANDS_ALL:
if not module.params['command']:
# True if we don't specify a command --> use default
command_list.append(CATEGORY_COMMANDS_DEFAULT[category])
elif "all" in module.params['command']:
for entry in range(len(CATEGORY_COMMANDS_ALL[category])):
command_list.append(CATEGORY_COMMANDS_ALL[category][entry])
# one or more commands
else:
command_list = module.params['command']
# Verify that all commands are valid
for cmd in command_list:
# Fail if even one command given is invalid
if cmd not in CATEGORY_COMMANDS_ALL[category]:
module.fail_json(msg="Invalid Command: %s" % cmd)
else:
# Fail if even one category given is invalid
module.fail_json(msg="Invalid Category: %s" % category)
# Organize by Categories / Commands
if category == "Systems":
# execute only if we find a Systems resource
resource = rf_utils._find_systems_resource()
if resource['ret'] is False:
module.fail_json(msg=resource['msg'])
for command in command_list:
if command == "GetSystemInventory":
result["system"] = rf_utils.get_multi_system_inventory()
elif command == "GetCpuInventory":
result["cpu"] = rf_utils.get_multi_cpu_inventory()
elif command == "GetMemoryInventory":
result["memory"] = rf_utils.get_multi_memory_inventory()
elif command == "GetNicInventory":
result["nic"] = rf_utils.get_multi_nic_inventory(category)
elif command == "GetStorageControllerInventory":
result["storage_controller"] = rf_utils.get_multi_storage_controller_inventory()
elif command == "GetDiskInventory":
result["disk"] = rf_utils.get_multi_disk_inventory()
elif command == "GetVolumeInventory":
result["volume"] = rf_utils.get_multi_volume_inventory()
elif command == "GetBiosAttributes":
result["bios_attribute"] = rf_utils.get_multi_bios_attributes()
elif command == "GetBootOrder":
result["boot_order"] = rf_utils.get_multi_boot_order()
elif command == "GetBootOverride":
result["boot_override"] = rf_utils.get_multi_boot_override()
elif category == "Chassis":
# execute only if we find Chassis resource
resource = rf_utils._find_chassis_resource()
if resource['ret'] is False:
module.fail_json(msg=resource['msg'])
for command in command_list:
if command == "GetFanInventory":
result["fan"] = rf_utils.get_fan_inventory()
elif command == "GetPsuInventory":
result["psu"] = rf_utils.get_psu_inventory()
elif command == "GetChassisThermals":
result["thermals"] = rf_utils.get_chassis_thermals()
elif command == "GetChassisPower":
result["chassis_power"] = rf_utils.get_chassis_power()
elif command == "GetChassisInventory":
result["chassis"] = rf_utils.get_chassis_inventory()
elif category == "Accounts":
# execute only if we find an Account service resource
resource = rf_utils._find_accountservice_resource()
if resource['ret'] is False:
module.fail_json(msg=resource['msg'])
for command in command_list:
if command == "ListUsers":
result["user"] = rf_utils.list_users()
elif category == "Update":
# execute only if we find UpdateService resources
resource = rf_utils._find_updateservice_resource()
if resource['ret'] is False:
module.fail_json(msg=resource['msg'])
for command in command_list:
if command == "GetFirmwareInventory":
result["firmware"] = rf_utils.get_firmware_inventory()
elif command == "GetFirmwareUpdateCapabilities":
result["firmware_update_capabilities"] = rf_utils.get_firmware_update_capabilities()
elif category == "Sessions":
# execute only if we find SessionService resources
resource = rf_utils._find_sessionservice_resource()
if resource['ret'] is False:
module.fail_json(msg=resource['msg'])
for command in command_list:
if command == "GetSessions":
result["session"] = rf_utils.get_sessions()
elif category == "Manager":
# execute only if we find a Manager service resource
resource = rf_utils._find_managers_resource()
if resource['ret'] is False:
module.fail_json(msg=resource['msg'])
for command in command_list:
if command == "GetManagerNicInventory":
result["manager_nics"] = rf_utils.get_multi_nic_inventory(category)
elif command == "GetVirtualMedia":
result["virtual_media"] = rf_utils.get_multi_virtualmedia()
elif command == "GetLogs":
result["log"] = rf_utils.get_logs()
# Return data back
if is_old_facts:
module.exit_json(ansible_facts=dict(redfish_facts=result))
else:
module.exit_json(redfish_facts=result)
if __name__ == '__main__':
main()
|
twoporeguys/librpc | refs/heads/master | tools/rpcdoc/librpc_doc/__init__.py | 1 | #!/usr/bin/env python3
#
# Copyright 2017 Two Pore Guys, Inc.
# All rights reserved
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted providing that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES LOSS OF USE, DATA, OR PROFITS OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import os
import argparse
import glob
import shutil
import mako
import mako.template
import mako.lookup
import librpc
from pkg_resources import resource_string
curdir = os.path.abspath(os.path.dirname(__file__))
templates_path = os.path.join(curdir, 'templates')
lookup = mako.lookup.TemplateLookup(directories=[templates_path])
CLASS_NAMES = {
librpc.TypeClass.STRUCT: 'struct',
librpc.TypeClass.UNION: 'union',
librpc.TypeClass.ENUM: 'enum',
librpc.TypeClass.TYPEDEF: 'type',
librpc.TypeClass.BUILTIN: 'builtin'
}
def generate_index(name, typing):
entries = typing.types
types = (t for t in entries if t.is_builtin)
typedefs = (t for t in entries if t.is_typedef)
structures = (t for t in entries if t.is_struct or t.is_union or t.is_enum)
t = lookup.get_template('index.mako')
return t.render(
name=name,
interfaces=sorted(typing.interfaces, key=lambda t: t.name),
types=sorted(types, key=lambda t: t.name),
typedefs=sorted(typedefs, key=lambda t: t.name),
structs=sorted(structures, key=lambda t: t.name)
)
def generate_interface(name, iface):
methods = (m for m in iface.members if type(m) is librpc.Method)
properties = (m for m in iface.members if type(m) is librpc.Property)
events = ()
t = lookup.get_template('interface.mako')
return t.render(
name=name,
iface=iface,
methods=methods,
properties=properties,
events=events
)
def generate_type(name, typ):
t = lookup.get_template('type.mako')
return t.render(name=name, t=typ)
def generate_file(outdir, name, contents):
with open(os.path.join(outdir, name), 'w') as f:
f.write(contents)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--name', metavar='NAME', help='Project name', required=True)
parser.add_argument('--system', action='store_true', help='Use system types')
parser.add_argument('-f', metavar='FILE', action='append', help='IDL file')
parser.add_argument('-d', metavar='DIRECTORY', action='append', help='IDL directory')
parser.add_argument('-o', metavar='DIRECTORY', help='Output directory', required=True)
args = parser.parse_args()
typing = librpc.Typing()
outdir = args.o
paths = []
if args.system:
for p in ('/usr/share/idl', '/usr/local/share/idl'):
for f in glob.iglob('{0}/*.yaml'.format(p)):
typing.read_file(f)
paths.append(f)
for f in args.f or []:
typing.read_file(f)
paths.append(f)
for d in args.d or []:
for f in glob.iglob('{0}/**/*.yaml'.format(d), recursive=True):
try:
typing.read_file(f)
paths.append(f)
except librpc.LibException as err:
print('Processing {0} failed: {1}'.format(f, str(err)))
continue
for p in paths:
typing.load_types(p)
if not os.path.exists(args.o):
os.makedirs(args.o, exist_ok=True)
# Copy the CSS file
shutil.copy(os.path.join(curdir, 'assets/main.css'), outdir)
for t in typing.types:
generate_file(outdir, 'type-{0}.html'.format(t.name), generate_type(args.name, t))
for i in typing.interfaces:
generate_file(outdir, 'interface-{0}.html'.format(i.name), generate_interface(args.name, i))
generate_file(outdir, 'index.html', generate_index(args.name, typing))
if __name__ == '__main__':
main()
|
Snergster/virl-salt | refs/heads/master | virl/files/virl_setup.py | 1 | #! /usr/bin/env python
# CLI menu for VIRL setup
# Copyright (c) 2017, Cisco Systems, Inc.
# All rights reserved.
from __future__ import print_function
import sys
import subprocess
import signal
import os
import datetime
import netaddr
import configparser
import re
import tempfile
import shutil
VINSTALL_CFG = '/etc/virl.ini'
OPENSTACK_SERVICES = [
'nova-api.service',
'nova-compute.service',
'nova-consoleauth.service',
'nova-cert.service',
'nova-conductor.service',
'nova-novncproxy.service',
'nova-serialproxy.service',
'neutron-dhcp-agent.service',
'neutron-linuxbridge-cleanup.service ',
'neutron-server.service',
'neutron-l3-agent.service',
'neutron-metadata-agent.service',
'neutron-linuxbridge-agent.service',
'glance-api.service',
'glance-registry.service',
'keystone.service',
'mysql.service ',
'rabbitmq-server.service'
]
VIRL_SERVICES = [
'virl-std.service',
'virl-tap-counter.service',
'redis.service',
'redis-server.service',
'virl-uwm.service',
'virl-webmux.service',
'ank-cisco-webserver.service',
'virl-vis-mux.service',
'virl-vis-processor.service',
'virl-vis-webserver.service'
]
LOG_PATHS = [
'/var/local/virl/logs/std_server.log',
'/var/local/virl/logs/uwm_server.log',
'/var/log/virl_tap_counter.log'
]
UKSM_KERNEL_PATH = '/sys/kernel/mm/uksm/run'
class InvalidState(Exception):
pass
class Config(object):
""" Handler for configuration files """
# TODO: missing section support, works only for default section
def __init__(self, path, default_section=None):
self._default_section = default_section if default_section else 'DEFAULT'
self._path = path
self._config = {
self._default_section:
self.read_vinstall_configuration(
path=self.path,
section=self.default_section,
),
}
def read_vinstall_configuration(self, path, section):
safeparser = configparser.ConfigParser()
safeparser.optionxform = str # Preserve keys' case
safeparser.read(path)
return dict(safeparser.items(section))
def update_vinstall_configuration(self, path, fields):
"""Update the VIRL installer config file
:param fields: fields and their values to be set
:type fields: dict (str to str)
"""
with open(path) as virl_cfg:
virl_cfg = virl_cfg.read()
written = set()
for field, value in fields.iteritems():
pattern = '^(?:#|\\s)*' + re.escape(field) + '\\s*:.*?$'
sub = (lambda match: written.add(field) or '%s: %s' % (field, value) if field not in written else '')
virl_cfg = re.sub(pattern, sub, virl_cfg, flags=re.M)
for field, value in fields.iteritems():
if field not in written:
virl_cfg += '\n# Appended by Setup Tool\n%s: %s\n' % (field, value)
with tempfile.NamedTemporaryFile() as temp_file:
temp_file.write(virl_cfg)
temp_file.flush()
shutil.copy(temp_file.name, path)
@property
def path(self):
return self._path
@property
def default_section(self):
return self._default_section
@property
def config(self):
return self._config
def get_section(self, section=None):
section = section if section else self.default_section
return self.config[section]
def get(self, field, section=None, default=None):
section = section if section else self.default_section
return self.config.get(section, {}).get(field, default)
def set(self, field, value, section=None):
section = section if section else self.default_section
if section not in self.config:
self.config[section] = {}
self.config[section][field] = value
def has_section(self, section):
return section in self.config
def has(self, field, section=None):
section = section if section else self.default_section
return self.has_section(section) and field in self.config[section][field]
def delete(self, field, section=None):
section = section if section else self.default_section
del self.config[section][field]
def write(self, section=None):
section = section if section else self.default_section
# TODO: missing support for sections
self.update_vinstall_configuration(
path=self.path,
fields=self.config[section],
)
def user_input(self, field, prompt, default, validator=None):
if not validator:
validator = lambda value: (True, value)
while True:
current = self.get(field=field) or default
value = raw_input("%s (default: %s): " % (prompt, current)) or current
(is_valid, value) = validator(value)
if is_valid:
self.set(field=field, value=value)
return value
else:
print("{value} is not a valid value for {field}".format(
value=value,
field=field)
)
class Validators(object):
@staticmethod
def is_ipv4_addr(addr):
try:
addr = str(netaddr.IPAddress(addr))
return (True, addr)
except:
return False, addr
@staticmethod
def is_ipv4_netmask(netmask):
if "." not in netmask:
netmask = '.'.join([str((0xffffffff << (32 - int(netmask)) >> i) & 0xff) for i in [24, 16, 8, 0]])
try:
netmask = netaddr.IPAddress(netmask)
str(netmask)
return (True , str(netmask))
except:
return (False, netmask)
@staticmethod
def is_in_network(addr, network):
if addr.value >= network.first and addr.value <= network.last:
return True
else:
return False
@staticmethod
def is_broadcast(addr, network):
if network.broadcast == addr:
return True
return False
def ask_if_permanent():
print('Make this change permanent ? y/N')
inp = str(raw_input()).lower() or 'n'
if inp not in {'y', 'n'}:
raise InvalidState
return inp == 'y'
def is_sudo():
return os.getuid() == 0
def uksm_enabled_kernel():
return os.path.exists(UKSM_KERNEL_PATH)
def uksm_enabled():
with open(UKSM_KERNEL_PATH, 'r') as uksm_run_file:
uksm_state = uksm_run_file.read().strip()
return not uksm_state == '1'
def run_command(command, on_success_msg=''):
print('')
print('running command {}'.format(command))
try:
subprocess.check_call(command, shell=True)
except subprocess.CalledProcessError as exc:
print('failed with stderr: {}'.format(exc.output))
else:
if on_success_msg:
print(on_success_msg)
def run_salt_state(state):
print('')
print('running salt state {}'.format(state))
cmd = 'salt-call --local state.sls {} --state_verbose=False --state-output=terse'.format(state)
success_msg = ''
run_command(cmd, success_msg)
def press_return_to_continue(next_state=''):
print('')
print('press return to continue')
raw_input()
return next_state
def read_next_state(previous_state, default='0'):
print('Select state (default {}): '.format(default), end='')
inp = raw_input() or default
if len(previous_state) >= 1:
next_state = '{}.{}'.format(previous_state, inp)
else:
next_state = '{}'.format(inp)
print('next_state {}'.format(next_state))
if next_state in STATES:
return next_state
else:
raise InvalidState
def restart_docker():
print('')
print('restarting docker registry')
cmd = 'docker restart registry'
success_msg = 'docker registry restarted'
run_command(cmd, success_msg)
def restart_service(name):
print('')
print('restarting {}'.format(name))
cmd = 'systemctl restart {}'.format(name)
success_msg = '{} restarted'.format(name)
run_command(cmd, success_msg)
def show_status(name):
print('')
print('Status of service {}'.format(name))
cmd = "systemctl --lines=0 --output=short status {} | grep 'Active:\|Memory:\|CPU:'".format(name)
run_command(cmd)
def handle_start():
current_state = ''
print('****** Main menu ******')
print('')
print('1. Network configuration')
print('2. Maintenance')
print('3. Diagnostic')
print('')
print('0. Exit')
return read_next_state(current_state)
def handle_0():
sys.exit()
def handle_1():
current_state = '1'
print('***** Network configuration *****')
print('')
print('*** WARNING: It is recommended that primary interface ***')
print('*** WARNING: configuration is performed on the console ***')
print('*** WARNING: and not over an SSH connection. ***')
print('')
print('1. Switch primary interface')
print('2. Run DHCP on primary interface')
print('3. Static IP configuration on primary interface')
print('4. DNS configuration')
print('5. NTP server')
print('')
print('0. Back')
return read_next_state(current_state)
def handle_1_1():
config = Config(path=VINSTALL_CFG)
config.user_input(
field='public_port',
prompt="Interface",
default='eth0'
)
config.write()
return press_return_to_continue('1')
def handle_1_2():
config = Config(VINSTALL_CFG)
config.set(field='using_dhcp_on_the_public_port', value='True')
config.write()
run_command('sudo vinstall salt')
run_salt_state('virl.host')
run_salt_state('virl.hostname')
run_salt_state('virl.network.system')
return press_return_to_continue('1')
def handle_1_3():
config = Config(VINSTALL_CFG)
config.set(field='using_dhcp_on_the_public_port', value='False')
# static ip
static_ip = config.user_input(
field='Static_IP',
prompt='Static IP',
default='172.16.6.250',
validator=Validators.is_ipv4_addr
)
# public_netmask
netmask = config.user_input(
field='public_netmask',
prompt='Netmask or netmask bits',
default='24',
validator=Validators.is_ipv4_netmask
)
# public network
netmask_bits = netaddr.IPAddress(netmask).netmask_bits()
network = netaddr.IPNetwork(
"{network}/{netmask_bits}".format(
network=static_ip,
netmask_bits=netmask_bits)
)
config.set(field="public_network", value=str(network.network))
# public_gateway:
gateway = config.user_input(
field='public_gateway',
prompt='Public gateway',
default='172.16.6.1',
validator=Validators.is_ipv4_addr,
)
if not Validators.is_in_network(
addr=netaddr.IPAddress(static_ip),
network=network,
):
print("Incorrect settings: IP address {ip} is not valid for the network {net}/{bits}".format(ip=static_ip, net=str(network.network), bits=netmask_bits))
return press_return_to_continue('1')
if not Validators.is_in_network(
addr=netaddr.IPAddress(gateway),
network=network,
):
print("Incorrect settings: Gateway IP address {ip} is not valid for the network {net}/{bits}".format(ip=gateway, net=str(network), bits=netmask_bits))
return press_return_to_continue('1')
if Validators.is_broadcast(
addr=netaddr.IPAddress(static_ip),
network=network,
):
print("Incorrect settings: IP address {ip} is broadcast address of the network {net}/{bits}".format(ip=static_ip, net=str(network), bits=netmask_bits))
return press_return_to_continue('1')
if Validators.is_broadcast(
addr=netaddr.IPAddress(gateway),
network=network,
):
print("Incorrect settings: Gateway IP address {ip} is broadcast address of the network {net}/{bits}".format(ip=gateway, net=str(network), bits=netmask_bits))
return press_return_to_continue('1')
config.write()
run_command('vinstall salt')
run_salt_state('virl.host')
run_salt_state('virl.hostname')
run_salt_state('virl.network.system')
return press_return_to_continue('1')
def handle_1_4():
config = Config(VINSTALL_CFG)
# first nameserver
config.user_input(
field='first_nameserver',
prompt='First nameserver',
default='8.8.8.8',
validator=Validators.is_ipv4_addr,
)
# second nameserver
config.user_input(
field='second_nameserver',
prompt='Second nameserver',
default='8.8.4.4',
validator=Validators.is_ipv4_addr
)
config.write()
run_command('vinstall salt')
run_salt_state('virl.host')
run_salt_state('virl.network.system')
return press_return_to_continue('1')
def handle_1_5():
config = Config(VINSTALL_CFG)
config.user_input(
field='ntp_server',
prompt='NTP Server',
default='ntp.ubuntu.com'
)
config.write()
run_command('vinstall salt')
run_salt_state('virl.ntp')
return press_return_to_continue('1')
def handle_2():
current_state = '2'
print('***** Maintenance ******')
print('')
print('1. Restart VIRL services')
print('2. Restart OpenStack services')
print('3. Reset OpenStack worker pools')
if uksm_enabled_kernel():
if uksm_enabled():
print('4. Enable UKSM')
else:
print('4. Disable UKSM')
else:
print('4. ! A UKSM-enabled kernel is not running')
print('')
print('0. Back')
return read_next_state(current_state)
def handle_2_1():
current_state = '2.1'
print('***** Maintenance - VIRL services restart *****')
print('')
print('1. All VIRL services')
print('2. STD services (includes redis)')
print('3. UWM services (includes webmux)')
print('4. ANK services')
print('5. Live visualisation services')
print('6. Docker registry')
print('')
print('0. Back')
return read_next_state(current_state)
def handle_2_1_1():
print('***** Restarting all VIRL services *****')
for service in VIRL_SERVICES:
restart_service(service)
restart_docker()
return press_return_to_continue('2.1')
def handle_2_1_2():
print('***** Restarting STD services *****')
restart_service('virl-std.service')
restart_service('virl-tap-counter.service')
restart_service('redis.service')
restart_service('redis-server.service')
return press_return_to_continue('2.1')
def handle_2_1_3():
print('***** Restarting UWM services ******')
restart_service('virl-uwm.service')
restart_service('virl-webmux.service')
return press_return_to_continue('2.1')
def handle_2_1_4():
print('***** Restarting ANK services *****')
restart_service('ank-cisco-webserver.service')
return press_return_to_continue('2.1')
def handle_2_1_5():
print('***** Restarting live visualisation services *****')
restart_service('virl-vis-mux.service')
restart_service('virl-vis-processor.service')
restart_service('virl-vis-webserver.service')
return press_return_to_continue('2.1')
def handle_2_1_6():
print('***** Restarting docker registry *****')
print('')
restart_docker()
return press_return_to_continue('2.1')
def handle_2_2():
current_state = '2.2'
print('***** Maintenance - OpenStack services restart *****')
print('')
print('1. All OpenStack and infrastructure services')
print('2. All Compute Services')
print('3. All Networking Services')
print('4. All Image Services')
print('5. Identity service')
print('6. Infrastructure services (MySql, RabbitMQ)')
print('')
print('0. Back')
return read_next_state(current_state)
def handle_2_2_1():
print('***** Restarting OpenStack services *****')
for service in OPENSTACK_SERVICES:
restart_service(service)
return press_return_to_continue('2.2')
def handle_2_2_2():
print('***** Restarting OpenStack Compute services *****')
restart_service('nova-api.service')
restart_service('nova-compute.service')
restart_service('nova-consoleauth.service')
restart_service('nova-cert.service')
restart_service('nova-conductor.service')
restart_service('nova-novncproxy.service')
restart_service('nova-serialproxy.service')
return press_return_to_continue('2.2')
def handle_2_2_3():
print('***** Restarting OpenStack Networking services *****')
restart_service('neutron-dhcp-agent.service')
restart_service('neutron-linuxbridge-cleanup.service ')
restart_service('neutron-server.service')
restart_service('neutron-l3-agent.service')
restart_service('neutron-metadata-agent.service')
restart_service('neutron-linuxbridge-agent.service')
restart_service('neutron-ovs-cleanup.service')
return press_return_to_continue('2.2')
def handle_2_2_4():
print('***** Restarting OpenStack Image services *****')
restart_service('glance-api.service')
restart_service('glance-registry.service')
return press_return_to_continue('2.2')
def handle_2_2_5():
print('***** Restarting OpenStack Identity services *****')
restart_service('keystone.service')
return press_return_to_continue('2.2')
def handle_2_2_6():
print('***** Restarting OpenStack Infrastructure services *****')
restart_service('mysql.service ')
restart_service('rabbitmq-server.service')
return press_return_to_continue('2.2')
def handle_2_3():
print('***** Reseting OpenStack worker pools *****')
print('')
print('This may take some time')
print('')
print('Restarting OpenStack workers')
run_salt_state('openstack.worker_pool')
return press_return_to_continue('2')
def handle_2_4():
print('***** UKSM kernel changes *****')
if uksm_enabled_kernel():
if uksm_enabled():
run_command('echo 1 > {}'.format(UKSM_KERNEL_PATH))
if ask_if_permanent():
run_command('sed -i \'s|echo 0> {}|echo 1> {}|\' /etc/rc.local'
.format(UKSM_KERNEL_PATH, UKSM_KERNEL_PATH))
else:
run_command('echo 0 > {}'.format(UKSM_KERNEL_PATH))
if ask_if_permanent():
run_command('sed -i \'s|echo 1> {}|echo 0> {}|\' /etc/rc.local')\
.format(UKSM_KERNEL_PATH, UKSM_KERNEL_PATH)
return '2'
def handle_3():
current_state = '3'
print('****** Diagnostics *****')
print('')
print('1. VIRL services status')
print('2. Openstack services status')
print('3. Health check')
print('4. Collect logs')
print('')
print('0. Back')
return read_next_state(current_state)
def handle_3_1():
print('***** VIRL services status *****')
for service in VIRL_SERVICES:
show_status(service)
return press_return_to_continue('3')
def handle_3_2():
print('***** OpenStack services status *****')
for service in OPENSTACK_SERVICES:
show_status(service)
return press_return_to_continue('3')
def handle_3_3():
print('***** Health check *****')
print('')
run_command('virl_health_status')
return press_return_to_continue('3')
def handle_3_4():
print('***** Collecting logs *****')
print('')
print('collecting logs')
files = ''
for log_file in LOG_PATHS:
if os.path.isfile(log_file):
files += ' '
files += log_file
output_file = 'virl_logs_' + datetime.datetime.now().isoformat()
output_path = '/var/www/virl_logs/'
if not os.path.exists(output_path):
os.makedirs(output_path)
run_command('tar -zcvf {}{}.tgz {} 1>/dev/null 2>/dev/null'.format(output_path, output_file, files))
print('logs are in {}{}.tgz'.format(output_path, output_file))
print('you can also access logs on http://<server ip>/diagnostics')
return press_return_to_continue('3')
def fsm():
global STATES
current_state = ''
next_state = ''
subprocess.call('clear')
print('')
print('')
while True:
try:
current_state = next_state
next_state = STATES[current_state]()
except InvalidState:
next_state = current_state
subprocess.call('clear')
print('Invalid command !')
print('')
else:
subprocess.call('clear')
print('')
print('')
def sigint_handler(signal, frame):
print('\nCtrl+C detected, exit.')
sys.exit()
def main():
if not is_sudo():
print('You must run this script as root. \'sudo virl_setup\'')
handle_0()
signal.signal(signal.SIGINT, sigint_handler)
fsm()
STATES = {
'': handle_start,
'0': handle_0,
'1': handle_1,
'2': handle_2,
'3': handle_3,
'1.0': handle_start,
'1.1': handle_1_1,
'1.2': handle_1_2,
'1.3': handle_1_3,
'1.4': handle_1_4,
'1.5': handle_1_5,
'2.0': handle_start,
'2.1': handle_2_1,
'2.2': handle_2_2,
'2.3': handle_2_3,
'2.4': handle_2_4,
'3.0': handle_start,
'3.1': handle_3_1,
'3.2': handle_3_2,
'3.3': handle_3_3,
'3.4': handle_3_4,
'2.1.0': handle_2,
'2.1.1': handle_2_1_1,
'2.1.2': handle_2_1_2,
'2.1.3': handle_2_1_3,
'2.1.4': handle_2_1_4,
'2.1.5': handle_2_1_5,
'2.1.6': handle_2_1_6,
'2.2.0': handle_2,
'2.2.1': handle_2_2_1,
'2.2.2': handle_2_2_2,
'2.2.3': handle_2_2_3,
'2.2.4': handle_2_2_4,
'2.2.5': handle_2_2_5,
'2.2.6': handle_2_2_6
}
if __name__ == '__main__':
main()
|
SoftwareMaven/django | refs/heads/master | tests/postgres_tests/test_functions.py | 351 | from datetime import datetime
from time import sleep
from django.contrib.postgres.functions import TransactionNow
from . import PostgreSQLTestCase
from .models import NowTestModel
class TestTransactionNow(PostgreSQLTestCase):
def test_transaction_now(self):
"""
The test case puts everything under a transaction, so two models
updated with a short gap should have the same time.
"""
m1 = NowTestModel.objects.create()
m2 = NowTestModel.objects.create()
NowTestModel.objects.filter(id=m1.id).update(when=TransactionNow())
sleep(0.1)
NowTestModel.objects.filter(id=m2.id).update(when=TransactionNow())
m1.refresh_from_db()
m2.refresh_from_db()
self.assertIsInstance(m1.when, datetime)
self.assertEqual(m1.when, m2.when)
|
mhbu50/erpnext | refs/heads/develop | erpnext/shopping_cart/utils.py | 48 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import frappe.defaults
from erpnext.shopping_cart.doctype.shopping_cart_settings.shopping_cart_settings import is_cart_enabled
def show_cart_count():
if (is_cart_enabled() and
frappe.db.get_value("User", frappe.session.user, "user_type") == "Website User"):
return True
return False
def set_cart_count(login_manager):
role, parties = check_customer_or_supplier()
if role == 'Supplier': return
if show_cart_count():
from erpnext.shopping_cart.cart import set_cart_count
set_cart_count()
def clear_cart_count(login_manager):
if show_cart_count():
frappe.local.cookie_manager.delete_cookie("cart_count")
def update_website_context(context):
cart_enabled = is_cart_enabled()
context["shopping_cart_enabled"] = cart_enabled
def check_customer_or_supplier():
if frappe.session.user:
contact_name = frappe.get_value("Contact", {"email_id": frappe.session.user})
if contact_name:
contact = frappe.get_doc('Contact', contact_name)
for link in contact.links:
if link.link_doctype in ('Customer', 'Supplier'):
return link.link_doctype, link.link_name
return 'Customer', None |
skycucumber/restful | refs/heads/master | python/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.py | 168 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""Access to Python's configuration information."""
import codecs
import os
import re
import sys
from os.path import pardir, realpath
try:
import configparser
except ImportError:
import ConfigParser as configparser
__all__ = [
'get_config_h_filename',
'get_config_var',
'get_config_vars',
'get_makefile_filename',
'get_path',
'get_path_names',
'get_paths',
'get_platform',
'get_python_version',
'get_scheme_names',
'parse_config_h',
]
def _safe_realpath(path):
try:
return realpath(path)
except OSError:
return path
if sys.executable:
_PROJECT_BASE = os.path.dirname(_safe_realpath(sys.executable))
else:
# sys.executable can be empty if argv[0] has been changed and Python is
# unable to retrieve the real program name
_PROJECT_BASE = _safe_realpath(os.getcwd())
if os.name == "nt" and "pcbuild" in _PROJECT_BASE[-8:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir))
# PC/VS7.1
if os.name == "nt" and "\\pc\\v" in _PROJECT_BASE[-10:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir))
# PC/AMD64
if os.name == "nt" and "\\pcbuild\\amd64" in _PROJECT_BASE[-14:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir))
def is_python_build():
for fn in ("Setup.dist", "Setup.local"):
if os.path.isfile(os.path.join(_PROJECT_BASE, "Modules", fn)):
return True
return False
_PYTHON_BUILD = is_python_build()
_cfg_read = False
def _ensure_cfg_read():
global _cfg_read
if not _cfg_read:
from distlib.resources import finder
backport_package = __name__.rsplit('.', 1)[0]
_finder = finder(backport_package)
_cfgfile = _finder.find('sysconfig.cfg')
assert _cfgfile, 'sysconfig.cfg exists'
with _cfgfile.as_stream() as s:
_SCHEMES.readfp(s)
if _PYTHON_BUILD:
for scheme in ('posix_prefix', 'posix_home'):
_SCHEMES.set(scheme, 'include', '{srcdir}/Include')
_SCHEMES.set(scheme, 'platinclude', '{projectbase}/.')
_cfg_read = True
_SCHEMES = configparser.RawConfigParser()
_VAR_REPL = re.compile(r'\{([^{]*?)\}')
def _expand_globals(config):
_ensure_cfg_read()
if config.has_section('globals'):
globals = config.items('globals')
else:
globals = tuple()
sections = config.sections()
for section in sections:
if section == 'globals':
continue
for option, value in globals:
if config.has_option(section, option):
continue
config.set(section, option, value)
config.remove_section('globals')
# now expanding local variables defined in the cfg file
#
for section in config.sections():
variables = dict(config.items(section))
def _replacer(matchobj):
name = matchobj.group(1)
if name in variables:
return variables[name]
return matchobj.group(0)
for option, value in config.items(section):
config.set(section, option, _VAR_REPL.sub(_replacer, value))
#_expand_globals(_SCHEMES)
# FIXME don't rely on sys.version here, its format is an implementation detail
# of CPython, use sys.version_info or sys.hexversion
_PY_VERSION = sys.version.split()[0]
_PY_VERSION_SHORT = sys.version[:3]
_PY_VERSION_SHORT_NO_DOT = _PY_VERSION[0] + _PY_VERSION[2]
_PREFIX = os.path.normpath(sys.prefix)
_EXEC_PREFIX = os.path.normpath(sys.exec_prefix)
_CONFIG_VARS = None
_USER_BASE = None
def _subst_vars(path, local_vars):
"""In the string `path`, replace tokens like {some.thing} with the
corresponding value from the map `local_vars`.
If there is no corresponding value, leave the token unchanged.
"""
def _replacer(matchobj):
name = matchobj.group(1)
if name in local_vars:
return local_vars[name]
elif name in os.environ:
return os.environ[name]
return matchobj.group(0)
return _VAR_REPL.sub(_replacer, path)
def _extend_dict(target_dict, other_dict):
target_keys = target_dict.keys()
for key, value in other_dict.items():
if key in target_keys:
continue
target_dict[key] = value
def _expand_vars(scheme, vars):
res = {}
if vars is None:
vars = {}
_extend_dict(vars, get_config_vars())
for key, value in _SCHEMES.items(scheme):
if os.name in ('posix', 'nt'):
value = os.path.expanduser(value)
res[key] = os.path.normpath(_subst_vars(value, vars))
return res
def format_value(value, vars):
def _replacer(matchobj):
name = matchobj.group(1)
if name in vars:
return vars[name]
return matchobj.group(0)
return _VAR_REPL.sub(_replacer, value)
def _get_default_scheme():
if os.name == 'posix':
# the default scheme for posix is posix_prefix
return 'posix_prefix'
return os.name
def _getuserbase():
env_base = os.environ.get("PYTHONUSERBASE", None)
def joinuser(*args):
return os.path.expanduser(os.path.join(*args))
# what about 'os2emx', 'riscos' ?
if os.name == "nt":
base = os.environ.get("APPDATA") or "~"
if env_base:
return env_base
else:
return joinuser(base, "Python")
if sys.platform == "darwin":
framework = get_config_var("PYTHONFRAMEWORK")
if framework:
if env_base:
return env_base
else:
return joinuser("~", "Library", framework, "%d.%d" %
sys.version_info[:2])
if env_base:
return env_base
else:
return joinuser("~", ".local")
def _parse_makefile(filename, vars=None):
"""Parse a Makefile-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
# Regexes needed for parsing Makefile (and similar syntaxes,
# like old-style Setup files).
_variable_rx = re.compile("([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
_findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
_findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
if vars is None:
vars = {}
done = {}
notdone = {}
with codecs.open(filename, encoding='utf-8', errors="surrogateescape") as f:
lines = f.readlines()
for line in lines:
if line.startswith('#') or line.strip() == '':
continue
m = _variable_rx.match(line)
if m:
n, v = m.group(1, 2)
v = v.strip()
# `$$' is a literal `$' in make
tmpv = v.replace('$$', '')
if "$" in tmpv:
notdone[n] = v
else:
try:
v = int(v)
except ValueError:
# insert literal `$'
done[n] = v.replace('$$', '$')
else:
done[n] = v
# do variable interpolation here
variables = list(notdone.keys())
# Variables with a 'PY_' prefix in the makefile. These need to
# be made available without that prefix through sysconfig.
# Special care is needed to ensure that variable expansion works, even
# if the expansion uses the name without a prefix.
renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS')
while len(variables) > 0:
for name in tuple(variables):
value = notdone[name]
m = _findvar1_rx.search(value) or _findvar2_rx.search(value)
if m is not None:
n = m.group(1)
found = True
if n in done:
item = str(done[n])
elif n in notdone:
# get it on a subsequent round
found = False
elif n in os.environ:
# do it like make: fall back to environment
item = os.environ[n]
elif n in renamed_variables:
if (name.startswith('PY_') and
name[3:] in renamed_variables):
item = ""
elif 'PY_' + n in notdone:
found = False
else:
item = str(done['PY_' + n])
else:
done[n] = item = ""
if found:
after = value[m.end():]
value = value[:m.start()] + item + after
if "$" in after:
notdone[name] = value
else:
try:
value = int(value)
except ValueError:
done[name] = value.strip()
else:
done[name] = value
variables.remove(name)
if (name.startswith('PY_') and
name[3:] in renamed_variables):
name = name[3:]
if name not in done:
done[name] = value
else:
# bogus variable reference (e.g. "prefix=$/opt/python");
# just drop it since we can't deal
done[name] = value
variables.remove(name)
# strip spurious spaces
for k, v in done.items():
if isinstance(v, str):
done[k] = v.strip()
# save the results in the global dictionary
vars.update(done)
return vars
def get_makefile_filename():
"""Return the path of the Makefile."""
if _PYTHON_BUILD:
return os.path.join(_PROJECT_BASE, "Makefile")
if hasattr(sys, 'abiflags'):
config_dir_name = 'config-%s%s' % (_PY_VERSION_SHORT, sys.abiflags)
else:
config_dir_name = 'config'
return os.path.join(get_path('stdlib'), config_dir_name, 'Makefile')
def _init_posix(vars):
"""Initialize the module as appropriate for POSIX systems."""
# load the installed Makefile:
makefile = get_makefile_filename()
try:
_parse_makefile(makefile, vars)
except IOError as e:
msg = "invalid Python installation: unable to open %s" % makefile
if hasattr(e, "strerror"):
msg = msg + " (%s)" % e.strerror
raise IOError(msg)
# load the installed pyconfig.h:
config_h = get_config_h_filename()
try:
with open(config_h) as f:
parse_config_h(f, vars)
except IOError as e:
msg = "invalid Python installation: unable to open %s" % config_h
if hasattr(e, "strerror"):
msg = msg + " (%s)" % e.strerror
raise IOError(msg)
# On AIX, there are wrong paths to the linker scripts in the Makefile
# -- these paths are relative to the Python source, but when installed
# the scripts are in another directory.
if _PYTHON_BUILD:
vars['LDSHARED'] = vars['BLDSHARED']
def _init_non_posix(vars):
"""Initialize the module as appropriate for NT"""
# set basic install directories
vars['LIBDEST'] = get_path('stdlib')
vars['BINLIBDEST'] = get_path('platstdlib')
vars['INCLUDEPY'] = get_path('include')
vars['SO'] = '.pyd'
vars['EXE'] = '.exe'
vars['VERSION'] = _PY_VERSION_SHORT_NO_DOT
vars['BINDIR'] = os.path.dirname(_safe_realpath(sys.executable))
#
# public APIs
#
def parse_config_h(fp, vars=None):
"""Parse a config.h-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
if vars is None:
vars = {}
define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n")
undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n")
while True:
line = fp.readline()
if not line:
break
m = define_rx.match(line)
if m:
n, v = m.group(1, 2)
try:
v = int(v)
except ValueError:
pass
vars[n] = v
else:
m = undef_rx.match(line)
if m:
vars[m.group(1)] = 0
return vars
def get_config_h_filename():
"""Return the path of pyconfig.h."""
if _PYTHON_BUILD:
if os.name == "nt":
inc_dir = os.path.join(_PROJECT_BASE, "PC")
else:
inc_dir = _PROJECT_BASE
else:
inc_dir = get_path('platinclude')
return os.path.join(inc_dir, 'pyconfig.h')
def get_scheme_names():
"""Return a tuple containing the schemes names."""
return tuple(sorted(_SCHEMES.sections()))
def get_path_names():
"""Return a tuple containing the paths names."""
# xxx see if we want a static list
return _SCHEMES.options('posix_prefix')
def get_paths(scheme=_get_default_scheme(), vars=None, expand=True):
"""Return a mapping containing an install scheme.
``scheme`` is the install scheme name. If not provided, it will
return the default scheme for the current platform.
"""
_ensure_cfg_read()
if expand:
return _expand_vars(scheme, vars)
else:
return dict(_SCHEMES.items(scheme))
def get_path(name, scheme=_get_default_scheme(), vars=None, expand=True):
"""Return a path corresponding to the scheme.
``scheme`` is the install scheme name.
"""
return get_paths(scheme, vars, expand)[name]
def get_config_vars(*args):
"""With no arguments, return a dictionary of all configuration
variables relevant for the current platform.
On Unix, this means every variable defined in Python's installed Makefile;
On Windows and Mac OS it's a much smaller set.
With arguments, return a list of values that result from looking up
each argument in the configuration variable dictionary.
"""
global _CONFIG_VARS
if _CONFIG_VARS is None:
_CONFIG_VARS = {}
# Normalized versions of prefix and exec_prefix are handy to have;
# in fact, these are the standard versions used most places in the
# distutils2 module.
_CONFIG_VARS['prefix'] = _PREFIX
_CONFIG_VARS['exec_prefix'] = _EXEC_PREFIX
_CONFIG_VARS['py_version'] = _PY_VERSION
_CONFIG_VARS['py_version_short'] = _PY_VERSION_SHORT
_CONFIG_VARS['py_version_nodot'] = _PY_VERSION[0] + _PY_VERSION[2]
_CONFIG_VARS['base'] = _PREFIX
_CONFIG_VARS['platbase'] = _EXEC_PREFIX
_CONFIG_VARS['projectbase'] = _PROJECT_BASE
try:
_CONFIG_VARS['abiflags'] = sys.abiflags
except AttributeError:
# sys.abiflags may not be defined on all platforms.
_CONFIG_VARS['abiflags'] = ''
if os.name in ('nt', 'os2'):
_init_non_posix(_CONFIG_VARS)
if os.name == 'posix':
_init_posix(_CONFIG_VARS)
# Setting 'userbase' is done below the call to the
# init function to enable using 'get_config_var' in
# the init-function.
if sys.version >= '2.6':
_CONFIG_VARS['userbase'] = _getuserbase()
if 'srcdir' not in _CONFIG_VARS:
_CONFIG_VARS['srcdir'] = _PROJECT_BASE
else:
_CONFIG_VARS['srcdir'] = _safe_realpath(_CONFIG_VARS['srcdir'])
# Convert srcdir into an absolute path if it appears necessary.
# Normally it is relative to the build directory. However, during
# testing, for example, we might be running a non-installed python
# from a different directory.
if _PYTHON_BUILD and os.name == "posix":
base = _PROJECT_BASE
try:
cwd = os.getcwd()
except OSError:
cwd = None
if (not os.path.isabs(_CONFIG_VARS['srcdir']) and
base != cwd):
# srcdir is relative and we are not in the same directory
# as the executable. Assume executable is in the build
# directory and make srcdir absolute.
srcdir = os.path.join(base, _CONFIG_VARS['srcdir'])
_CONFIG_VARS['srcdir'] = os.path.normpath(srcdir)
if sys.platform == 'darwin':
kernel_version = os.uname()[2] # Kernel version (8.4.3)
major_version = int(kernel_version.split('.')[0])
if major_version < 8:
# On Mac OS X before 10.4, check if -arch and -isysroot
# are in CFLAGS or LDFLAGS and remove them if they are.
# This is needed when building extensions on a 10.3 system
# using a universal build of python.
for key in ('LDFLAGS', 'BASECFLAGS',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _CONFIG_VARS[key]
flags = re.sub('-arch\s+\w+\s', ' ', flags)
flags = re.sub('-isysroot [^ \t]*', ' ', flags)
_CONFIG_VARS[key] = flags
else:
# Allow the user to override the architecture flags using
# an environment variable.
# NOTE: This name was introduced by Apple in OSX 10.5 and
# is used by several scripting languages distributed with
# that OS release.
if 'ARCHFLAGS' in os.environ:
arch = os.environ['ARCHFLAGS']
for key in ('LDFLAGS', 'BASECFLAGS',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _CONFIG_VARS[key]
flags = re.sub('-arch\s+\w+\s', ' ', flags)
flags = flags + ' ' + arch
_CONFIG_VARS[key] = flags
# If we're on OSX 10.5 or later and the user tries to
# compiles an extension using an SDK that is not present
# on the current machine it is better to not use an SDK
# than to fail.
#
# The major usecase for this is users using a Python.org
# binary installer on OSX 10.6: that installer uses
# the 10.4u SDK, but that SDK is not installed by default
# when you install Xcode.
#
CFLAGS = _CONFIG_VARS.get('CFLAGS', '')
m = re.search('-isysroot\s+(\S+)', CFLAGS)
if m is not None:
sdk = m.group(1)
if not os.path.exists(sdk):
for key in ('LDFLAGS', 'BASECFLAGS',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _CONFIG_VARS[key]
flags = re.sub('-isysroot\s+\S+(\s|$)', ' ', flags)
_CONFIG_VARS[key] = flags
if args:
vals = []
for name in args:
vals.append(_CONFIG_VARS.get(name))
return vals
else:
return _CONFIG_VARS
def get_config_var(name):
"""Return the value of a single variable using the dictionary returned by
'get_config_vars()'.
Equivalent to get_config_vars().get(name)
"""
return get_config_vars().get(name)
def get_platform():
"""Return a string that identifies the current platform.
This is used mainly to distinguish platform-specific build directories and
platform-specific built distributions. Typically includes the OS name
and version and the architecture (as supplied by 'os.uname()'),
although the exact information included depends on the OS; eg. for IRIX
the architecture isn't particularly important (IRIX only runs on SGI
hardware), but for Linux the kernel version isn't particularly
important.
Examples of returned values:
linux-i586
linux-alpha (?)
solaris-2.6-sun4u
irix-5.3
irix64-6.2
Windows will return one of:
win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc)
win-ia64 (64bit Windows on Itanium)
win32 (all others - specifically, sys.platform is returned)
For other non-POSIX platforms, currently just returns 'sys.platform'.
"""
if os.name == 'nt':
# sniff sys.version for architecture.
prefix = " bit ("
i = sys.version.find(prefix)
if i == -1:
return sys.platform
j = sys.version.find(")", i)
look = sys.version[i+len(prefix):j].lower()
if look == 'amd64':
return 'win-amd64'
if look == 'itanium':
return 'win-ia64'
return sys.platform
if os.name != "posix" or not hasattr(os, 'uname'):
# XXX what about the architecture? NT is Intel or Alpha,
# Mac OS is M68k or PPC, etc.
return sys.platform
# Try to distinguish various flavours of Unix
osname, host, release, version, machine = os.uname()
# Convert the OS name to lowercase, remove '/' characters
# (to accommodate BSD/OS), and translate spaces (for "Power Macintosh")
osname = osname.lower().replace('/', '')
machine = machine.replace(' ', '_')
machine = machine.replace('/', '-')
if osname[:5] == "linux":
# At least on Linux/Intel, 'machine' is the processor --
# i386, etc.
# XXX what about Alpha, SPARC, etc?
return "%s-%s" % (osname, machine)
elif osname[:5] == "sunos":
if release[0] >= "5": # SunOS 5 == Solaris 2
osname = "solaris"
release = "%d.%s" % (int(release[0]) - 3, release[2:])
# fall through to standard osname-release-machine representation
elif osname[:4] == "irix": # could be "irix64"!
return "%s-%s" % (osname, release)
elif osname[:3] == "aix":
return "%s-%s.%s" % (osname, version, release)
elif osname[:6] == "cygwin":
osname = "cygwin"
rel_re = re.compile(r'[\d.]+')
m = rel_re.match(release)
if m:
release = m.group()
elif osname[:6] == "darwin":
#
# For our purposes, we'll assume that the system version from
# distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set
# to. This makes the compatibility story a bit more sane because the
# machine is going to compile and link as if it were
# MACOSX_DEPLOYMENT_TARGET.
cfgvars = get_config_vars()
macver = cfgvars.get('MACOSX_DEPLOYMENT_TARGET')
if True:
# Always calculate the release of the running machine,
# needed to determine if we can build fat binaries or not.
macrelease = macver
# Get the system version. Reading this plist is a documented
# way to get the system version (see the documentation for
# the Gestalt Manager)
try:
f = open('/System/Library/CoreServices/SystemVersion.plist')
except IOError:
# We're on a plain darwin box, fall back to the default
# behaviour.
pass
else:
try:
m = re.search(r'<key>ProductUserVisibleVersion</key>\s*'
r'<string>(.*?)</string>', f.read())
finally:
f.close()
if m is not None:
macrelease = '.'.join(m.group(1).split('.')[:2])
# else: fall back to the default behaviour
if not macver:
macver = macrelease
if macver:
release = macver
osname = "macosx"
if ((macrelease + '.') >= '10.4.' and
'-arch' in get_config_vars().get('CFLAGS', '').strip()):
# The universal build will build fat binaries, but not on
# systems before 10.4
#
# Try to detect 4-way universal builds, those have machine-type
# 'universal' instead of 'fat'.
machine = 'fat'
cflags = get_config_vars().get('CFLAGS')
archs = re.findall('-arch\s+(\S+)', cflags)
archs = tuple(sorted(set(archs)))
if len(archs) == 1:
machine = archs[0]
elif archs == ('i386', 'ppc'):
machine = 'fat'
elif archs == ('i386', 'x86_64'):
machine = 'intel'
elif archs == ('i386', 'ppc', 'x86_64'):
machine = 'fat3'
elif archs == ('ppc64', 'x86_64'):
machine = 'fat64'
elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'):
machine = 'universal'
else:
raise ValueError(
"Don't know machine value for archs=%r" % (archs,))
elif machine == 'i386':
# On OSX the machine type returned by uname is always the
# 32-bit variant, even if the executable architecture is
# the 64-bit variant
if sys.maxsize >= 2**32:
machine = 'x86_64'
elif machine in ('PowerPC', 'Power_Macintosh'):
# Pick a sane name for the PPC architecture.
# See 'i386' case
if sys.maxsize >= 2**32:
machine = 'ppc64'
else:
machine = 'ppc'
return "%s-%s-%s" % (osname, release, machine)
def get_python_version():
return _PY_VERSION_SHORT
def _print_dict(title, data):
for index, (key, value) in enumerate(sorted(data.items())):
if index == 0:
print('%s: ' % (title))
print('\t%s = "%s"' % (key, value))
def _main():
"""Display all information sysconfig detains."""
print('Platform: "%s"' % get_platform())
print('Python version: "%s"' % get_python_version())
print('Current installation scheme: "%s"' % _get_default_scheme())
print()
_print_dict('Paths', get_paths())
print()
_print_dict('Variables', get_config_vars())
if __name__ == '__main__':
_main()
|
eleonrk/SickRage | refs/heads/master | lib/future/backports/_markupbase.py | 84 | """Shared support for scanning document type declarations in HTML and XHTML.
Backported for python-future from Python 3.3. Reason: ParserBase is an
old-style class in the Python 2.7 source of markupbase.py, which I suspect
might be the cause of sporadic unit-test failures on travis-ci.org with
test_htmlparser.py. The test failures look like this:
======================================================================
ERROR: test_attr_entity_replacement (future.tests.test_htmlparser.AttributesStrictTestCase)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/home/travis/build/edschofield/python-future/future/tests/test_htmlparser.py", line 661, in test_attr_entity_replacement
[("starttag", "a", [("b", "&><\"'")])])
File "/home/travis/build/edschofield/python-future/future/tests/test_htmlparser.py", line 93, in _run_check
collector = self.get_collector()
File "/home/travis/build/edschofield/python-future/future/tests/test_htmlparser.py", line 617, in get_collector
return EventCollector(strict=True)
File "/home/travis/build/edschofield/python-future/future/tests/test_htmlparser.py", line 27, in __init__
html.parser.HTMLParser.__init__(self, *args, **kw)
File "/home/travis/build/edschofield/python-future/future/backports/html/parser.py", line 135, in __init__
self.reset()
File "/home/travis/build/edschofield/python-future/future/backports/html/parser.py", line 143, in reset
_markupbase.ParserBase.reset(self)
TypeError: unbound method reset() must be called with ParserBase instance as first argument (got EventCollector instance instead)
This module is used as a foundation for the html.parser module. It has no
documented public API and should not be used directly.
"""
import re
_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9]*\s*').match
_declstringlit_match = re.compile(r'(\'[^\']*\'|"[^"]*")\s*').match
_commentclose = re.compile(r'--\s*>')
_markedsectionclose = re.compile(r']\s*]\s*>')
# An analysis of the MS-Word extensions is available at
# http://www.planetpublish.com/xmlarena/xap/Thursday/WordtoXML.pdf
_msmarkedsectionclose = re.compile(r']\s*>')
del re
class ParserBase(object):
"""Parser base class which provides some common support methods used
by the SGML/HTML and XHTML parsers."""
def __init__(self):
if self.__class__ is ParserBase:
raise RuntimeError(
"_markupbase.ParserBase must be subclassed")
def error(self, message):
raise NotImplementedError(
"subclasses of ParserBase must override error()")
def reset(self):
self.lineno = 1
self.offset = 0
def getpos(self):
"""Return current line number and offset."""
return self.lineno, self.offset
# Internal -- update line number and offset. This should be
# called for each piece of data exactly once, in order -- in other
# words the concatenation of all the input strings to this
# function should be exactly the entire input.
def updatepos(self, i, j):
if i >= j:
return j
rawdata = self.rawdata
nlines = rawdata.count("\n", i, j)
if nlines:
self.lineno = self.lineno + nlines
pos = rawdata.rindex("\n", i, j) # Should not fail
self.offset = j-(pos+1)
else:
self.offset = self.offset + j-i
return j
_decl_otherchars = ''
# Internal -- parse declaration (for use by subclasses).
def parse_declaration(self, i):
# This is some sort of declaration; in "HTML as
# deployed," this should only be the document type
# declaration ("<!DOCTYPE html...>").
# ISO 8879:1986, however, has more complex
# declaration syntax for elements in <!...>, including:
# --comment--
# [marked section]
# name in the following list: ENTITY, DOCTYPE, ELEMENT,
# ATTLIST, NOTATION, SHORTREF, USEMAP,
# LINKTYPE, LINK, IDLINK, USELINK, SYSTEM
rawdata = self.rawdata
j = i + 2
assert rawdata[i:j] == "<!", "unexpected call to parse_declaration"
if rawdata[j:j+1] == ">":
# the empty comment <!>
return j + 1
if rawdata[j:j+1] in ("-", ""):
# Start of comment followed by buffer boundary,
# or just a buffer boundary.
return -1
# A simple, practical version could look like: ((name|stringlit) S*) + '>'
n = len(rawdata)
if rawdata[j:j+2] == '--': #comment
# Locate --.*-- as the body of the comment
return self.parse_comment(i)
elif rawdata[j] == '[': #marked section
# Locate [statusWord [...arbitrary SGML...]] as the body of the marked section
# Where statusWord is one of TEMP, CDATA, IGNORE, INCLUDE, RCDATA
# Note that this is extended by Microsoft Office "Save as Web" function
# to include [if...] and [endif].
return self.parse_marked_section(i)
else: #all other declaration elements
decltype, j = self._scan_name(j, i)
if j < 0:
return j
if decltype == "doctype":
self._decl_otherchars = ''
while j < n:
c = rawdata[j]
if c == ">":
# end of declaration syntax
data = rawdata[i+2:j]
if decltype == "doctype":
self.handle_decl(data)
else:
# According to the HTML5 specs sections "8.2.4.44 Bogus
# comment state" and "8.2.4.45 Markup declaration open
# state", a comment token should be emitted.
# Calling unknown_decl provides more flexibility though.
self.unknown_decl(data)
return j + 1
if c in "\"'":
m = _declstringlit_match(rawdata, j)
if not m:
return -1 # incomplete
j = m.end()
elif c in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ":
name, j = self._scan_name(j, i)
elif c in self._decl_otherchars:
j = j + 1
elif c == "[":
# this could be handled in a separate doctype parser
if decltype == "doctype":
j = self._parse_doctype_subset(j + 1, i)
elif decltype in set(["attlist", "linktype", "link", "element"]):
# must tolerate []'d groups in a content model in an element declaration
# also in data attribute specifications of attlist declaration
# also link type declaration subsets in linktype declarations
# also link attribute specification lists in link declarations
self.error("unsupported '[' char in %s declaration" % decltype)
else:
self.error("unexpected '[' char in declaration")
else:
self.error(
"unexpected %r char in declaration" % rawdata[j])
if j < 0:
return j
return -1 # incomplete
# Internal -- parse a marked section
# Override this to handle MS-word extension syntax <![if word]>content<![endif]>
def parse_marked_section(self, i, report=1):
rawdata= self.rawdata
assert rawdata[i:i+3] == '<![', "unexpected call to parse_marked_section()"
sectName, j = self._scan_name( i+3, i )
if j < 0:
return j
if sectName in set(["temp", "cdata", "ignore", "include", "rcdata"]):
# look for standard ]]> ending
match= _markedsectionclose.search(rawdata, i+3)
elif sectName in set(["if", "else", "endif"]):
# look for MS Office ]> ending
match= _msmarkedsectionclose.search(rawdata, i+3)
else:
self.error('unknown status keyword %r in marked section' % rawdata[i+3:j])
if not match:
return -1
if report:
j = match.start(0)
self.unknown_decl(rawdata[i+3: j])
return match.end(0)
# Internal -- parse comment, return length or -1 if not terminated
def parse_comment(self, i, report=1):
rawdata = self.rawdata
if rawdata[i:i+4] != '<!--':
self.error('unexpected call to parse_comment()')
match = _commentclose.search(rawdata, i+4)
if not match:
return -1
if report:
j = match.start(0)
self.handle_comment(rawdata[i+4: j])
return match.end(0)
# Internal -- scan past the internal subset in a <!DOCTYPE declaration,
# returning the index just past any whitespace following the trailing ']'.
def _parse_doctype_subset(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
j = i
while j < n:
c = rawdata[j]
if c == "<":
s = rawdata[j:j+2]
if s == "<":
# end of buffer; incomplete
return -1
if s != "<!":
self.updatepos(declstartpos, j + 1)
self.error("unexpected char in internal subset (in %r)" % s)
if (j + 2) == n:
# end of buffer; incomplete
return -1
if (j + 4) > n:
# end of buffer; incomplete
return -1
if rawdata[j:j+4] == "<!--":
j = self.parse_comment(j, report=0)
if j < 0:
return j
continue
name, j = self._scan_name(j + 2, declstartpos)
if j == -1:
return -1
if name not in set(["attlist", "element", "entity", "notation"]):
self.updatepos(declstartpos, j + 2)
self.error(
"unknown declaration %r in internal subset" % name)
# handle the individual names
meth = getattr(self, "_parse_doctype_" + name)
j = meth(j, declstartpos)
if j < 0:
return j
elif c == "%":
# parameter entity reference
if (j + 1) == n:
# end of buffer; incomplete
return -1
s, j = self._scan_name(j + 1, declstartpos)
if j < 0:
return j
if rawdata[j] == ";":
j = j + 1
elif c == "]":
j = j + 1
while j < n and rawdata[j].isspace():
j = j + 1
if j < n:
if rawdata[j] == ">":
return j
self.updatepos(declstartpos, j)
self.error("unexpected char after internal subset")
else:
return -1
elif c.isspace():
j = j + 1
else:
self.updatepos(declstartpos, j)
self.error("unexpected char %r in internal subset" % c)
# end of buffer reached
return -1
# Internal -- scan past <!ELEMENT declarations
def _parse_doctype_element(self, i, declstartpos):
name, j = self._scan_name(i, declstartpos)
if j == -1:
return -1
# style content model; just skip until '>'
rawdata = self.rawdata
if '>' in rawdata[j:]:
return rawdata.find(">", j) + 1
return -1
# Internal -- scan past <!ATTLIST declarations
def _parse_doctype_attlist(self, i, declstartpos):
rawdata = self.rawdata
name, j = self._scan_name(i, declstartpos)
c = rawdata[j:j+1]
if c == "":
return -1
if c == ">":
return j + 1
while 1:
# scan a series of attribute descriptions; simplified:
# name type [value] [#constraint]
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
c = rawdata[j:j+1]
if c == "":
return -1
if c == "(":
# an enumerated type; look for ')'
if ")" in rawdata[j:]:
j = rawdata.find(")", j) + 1
else:
return -1
while rawdata[j:j+1].isspace():
j = j + 1
if not rawdata[j:]:
# end of buffer, incomplete
return -1
else:
name, j = self._scan_name(j, declstartpos)
c = rawdata[j:j+1]
if not c:
return -1
if c in "'\"":
m = _declstringlit_match(rawdata, j)
if m:
j = m.end()
else:
return -1
c = rawdata[j:j+1]
if not c:
return -1
if c == "#":
if rawdata[j:] == "#":
# end of buffer
return -1
name, j = self._scan_name(j + 1, declstartpos)
if j < 0:
return j
c = rawdata[j:j+1]
if not c:
return -1
if c == '>':
# all done
return j + 1
# Internal -- scan past <!NOTATION declarations
def _parse_doctype_notation(self, i, declstartpos):
name, j = self._scan_name(i, declstartpos)
if j < 0:
return j
rawdata = self.rawdata
while 1:
c = rawdata[j:j+1]
if not c:
# end of buffer; incomplete
return -1
if c == '>':
return j + 1
if c in "'\"":
m = _declstringlit_match(rawdata, j)
if not m:
return -1
j = m.end()
else:
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
# Internal -- scan past <!ENTITY declarations
def _parse_doctype_entity(self, i, declstartpos):
rawdata = self.rawdata
if rawdata[i:i+1] == "%":
j = i + 1
while 1:
c = rawdata[j:j+1]
if not c:
return -1
if c.isspace():
j = j + 1
else:
break
else:
j = i
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
while 1:
c = self.rawdata[j:j+1]
if not c:
return -1
if c in "'\"":
m = _declstringlit_match(rawdata, j)
if m:
j = m.end()
else:
return -1 # incomplete
elif c == ">":
return j + 1
else:
name, j = self._scan_name(j, declstartpos)
if j < 0:
return j
# Internal -- scan a name token and the new position and the token, or
# return -1 if we've reached the end of the buffer.
def _scan_name(self, i, declstartpos):
rawdata = self.rawdata
n = len(rawdata)
if i == n:
return None, -1
m = _declname_match(rawdata, i)
if m:
s = m.group()
name = s.strip()
if (i + len(s)) == n:
return None, -1 # end of buffer
return name.lower(), m.end()
else:
self.updatepos(declstartpos, i)
self.error("expected name token at %r"
% rawdata[declstartpos:declstartpos+20])
# To be overridden -- handlers for unknown objects
def unknown_decl(self, data):
pass
|
ctbrizhp/swift1.9.2 | refs/heads/master | test/unit/common/middleware/test_acl.py | 12 | # Copyright (c) 2010-2012 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from swift.common.middleware import acl
class TestACL(unittest.TestCase):
def test_clean_acl(self):
value = acl.clean_acl('header', '.r:*')
self.assertEquals(value, '.r:*')
value = acl.clean_acl('header', '.r:specific.host')
self.assertEquals(value, '.r:specific.host')
value = acl.clean_acl('header', '.r:.ending.with')
self.assertEquals(value, '.r:.ending.with')
value = acl.clean_acl('header', '.r:*.ending.with')
self.assertEquals(value, '.r:.ending.with')
value = acl.clean_acl('header', '.r:-*.ending.with')
self.assertEquals(value, '.r:-.ending.with')
value = acl.clean_acl('header', '.r:one,.r:two')
self.assertEquals(value, '.r:one,.r:two')
value = acl.clean_acl('header', '.r:*,.r:-specific.host')
self.assertEquals(value, '.r:*,.r:-specific.host')
value = acl.clean_acl('header', '.r:*,.r:-.ending.with')
self.assertEquals(value, '.r:*,.r:-.ending.with')
value = acl.clean_acl('header', '.r:one,.r:-two')
self.assertEquals(value, '.r:one,.r:-two')
value = acl.clean_acl('header', '.r:one,.r:-two,account,account:user')
self.assertEquals(value, '.r:one,.r:-two,account,account:user')
value = acl.clean_acl('header', 'TEST_account')
self.assertEquals(value, 'TEST_account')
value = acl.clean_acl('header', '.ref:*')
self.assertEquals(value, '.r:*')
value = acl.clean_acl('header', '.referer:*')
self.assertEquals(value, '.r:*')
value = acl.clean_acl('header', '.referrer:*')
self.assertEquals(value, '.r:*')
value = acl.clean_acl('header',
' .r : one , ,, .r:two , .r : - three ')
self.assertEquals(value, '.r:one,.r:two,.r:-three')
self.assertRaises(ValueError, acl.clean_acl, 'header', '.unknown:test')
self.assertRaises(ValueError, acl.clean_acl, 'header', '.r:')
self.assertRaises(ValueError, acl.clean_acl, 'header', '.r:*.')
self.assertRaises(ValueError, acl.clean_acl, 'header', '.r : * . ')
self.assertRaises(ValueError, acl.clean_acl, 'header', '.r:-*.')
self.assertRaises(ValueError, acl.clean_acl, 'header', '.r : - * . ')
self.assertRaises(ValueError, acl.clean_acl, 'header', ' .r : ')
self.assertRaises(ValueError, acl.clean_acl, 'header', 'user , .r : ')
self.assertRaises(ValueError, acl.clean_acl, 'header', '.r:-')
self.assertRaises(ValueError, acl.clean_acl, 'header', ' .r : - ')
self.assertRaises(ValueError, acl.clean_acl, 'header',
'user , .r : - ')
self.assertRaises(ValueError, acl.clean_acl, 'write-header', '.r:r')
def test_parse_acl(self):
self.assertEquals(acl.parse_acl(None), ([], []))
self.assertEquals(acl.parse_acl(''), ([], []))
self.assertEquals(acl.parse_acl('.r:ref1'), (['ref1'], []))
self.assertEquals(acl.parse_acl('.r:-ref1'), (['-ref1'], []))
self.assertEquals(acl.parse_acl('account:user'),
([], ['account:user']))
self.assertEquals(acl.parse_acl('account'), ([], ['account']))
self.assertEquals(acl.parse_acl('acc1,acc2:usr2,.r:ref3,.r:-ref4'),
(['ref3', '-ref4'], ['acc1', 'acc2:usr2']))
self.assertEquals(acl.parse_acl(
'acc1,acc2:usr2,.r:ref3,acc3,acc4:usr4,.r:ref5,.r:-ref6'),
(['ref3', 'ref5', '-ref6'],
['acc1', 'acc2:usr2', 'acc3', 'acc4:usr4']))
def test_referrer_allowed(self):
self.assert_(not acl.referrer_allowed('host', None))
self.assert_(not acl.referrer_allowed('host', []))
self.assert_(acl.referrer_allowed(None, ['*']))
self.assert_(acl.referrer_allowed('', ['*']))
self.assert_(not acl.referrer_allowed(None, ['specific.host']))
self.assert_(not acl.referrer_allowed('', ['specific.host']))
self.assert_(acl.referrer_allowed('http://www.example.com/index.html',
['.example.com']))
self.assert_(acl.referrer_allowed(
'http://user@www.example.com/index.html', ['.example.com']))
self.assert_(acl.referrer_allowed(
'http://user:pass@www.example.com/index.html', ['.example.com']))
self.assert_(acl.referrer_allowed(
'http://www.example.com:8080/index.html', ['.example.com']))
self.assert_(acl.referrer_allowed(
'http://user@www.example.com:8080/index.html', ['.example.com']))
self.assert_(acl.referrer_allowed(
'http://user:pass@www.example.com:8080/index.html',
['.example.com']))
self.assert_(acl.referrer_allowed(
'http://user:pass@www.example.com:8080', ['.example.com']))
self.assert_(acl.referrer_allowed('http://www.example.com',
['.example.com']))
self.assert_(not acl.referrer_allowed('http://thief.example.com',
['.example.com', '-thief.example.com']))
self.assert_(not acl.referrer_allowed('http://thief.example.com',
['*', '-thief.example.com']))
self.assert_(acl.referrer_allowed('http://www.example.com',
['.other.com', 'www.example.com']))
self.assert_(acl.referrer_allowed('http://www.example.com',
['-.example.com', 'www.example.com']))
# This is considered a relative uri to the request uri, a mode not
# currently supported.
self.assert_(not acl.referrer_allowed('www.example.com',
['.example.com']))
self.assert_(not acl.referrer_allowed('../index.html',
['.example.com']))
self.assert_(acl.referrer_allowed('www.example.com', ['*']))
if __name__ == '__main__':
unittest.main()
|
kasparg/lawcats | refs/heads/master | libs/bs4/tests/test_lxml.py | 115 | """Tests to ensure that the lxml tree builder generates good trees."""
import re
import warnings
try:
import lxml.etree
LXML_PRESENT = True
LXML_VERSION = lxml.etree.LXML_VERSION
except ImportError, e:
LXML_PRESENT = False
LXML_VERSION = (0,)
if LXML_PRESENT:
from bs4.builder import LXMLTreeBuilder, LXMLTreeBuilderForXML
from bs4 import (
BeautifulSoup,
BeautifulStoneSoup,
)
from bs4.element import Comment, Doctype, SoupStrainer
from bs4.testing import skipIf
from bs4.tests import test_htmlparser
from bs4.testing import (
HTMLTreeBuilderSmokeTest,
XMLTreeBuilderSmokeTest,
SoupTest,
skipIf,
)
@skipIf(
not LXML_PRESENT,
"lxml seems not to be present, not testing its tree builder.")
class LXMLTreeBuilderSmokeTest(SoupTest, HTMLTreeBuilderSmokeTest):
"""See ``HTMLTreeBuilderSmokeTest``."""
@property
def default_builder(self):
return LXMLTreeBuilder()
def test_out_of_range_entity(self):
self.assertSoupEquals(
"<p>foo�bar</p>", "<p>foobar</p>")
self.assertSoupEquals(
"<p>foo�bar</p>", "<p>foobar</p>")
self.assertSoupEquals(
"<p>foo�bar</p>", "<p>foobar</p>")
# In lxml < 2.3.5, an empty doctype causes a segfault. Skip this
# test if an old version of lxml is installed.
@skipIf(
not LXML_PRESENT or LXML_VERSION < (2,3,5,0),
"Skipping doctype test for old version of lxml to avoid segfault.")
def test_empty_doctype(self):
soup = self.soup("<!DOCTYPE>")
doctype = soup.contents[0]
self.assertEqual("", doctype.strip())
def test_beautifulstonesoup_is_xml_parser(self):
# Make sure that the deprecated BSS class uses an xml builder
# if one is installed.
with warnings.catch_warnings(record=True) as w:
soup = BeautifulStoneSoup("<b />")
self.assertEqual(u"<b/>", unicode(soup.b))
self.assertTrue("BeautifulStoneSoup class is deprecated" in str(w[0].message))
@skipIf(
not LXML_PRESENT,
"lxml seems not to be present, not testing its XML tree builder.")
class LXMLXMLTreeBuilderSmokeTest(SoupTest, XMLTreeBuilderSmokeTest):
"""See ``HTMLTreeBuilderSmokeTest``."""
@property
def default_builder(self):
return LXMLTreeBuilderForXML()
|
WarboyX/MK4_Kernel | refs/heads/master | tools/perf/python/twatch.py | 7370 | #! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, watermark = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
|
esri-es/awesome-arcgis | refs/heads/master | node_modules/gitbook/node_modules/npm/node_modules/node-gyp/gyp/gyp_main.py | 1452 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
# Make sure we're using the version of pylib in this repo, not one installed
# elsewhere on the system.
sys.path.insert(0, os.path.join(os.path.dirname(sys.argv[0]), 'pylib'))
import gyp
if __name__ == '__main__':
sys.exit(gyp.script_main())
|
TileHalo/servo | refs/heads/master | tests/wpt/css-tests/tools/pywebsocket/src/test/mux_client_for_testing.py | 457 | #!/usr/bin/env python
#
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""WebSocket client utility for testing mux extension.
This code should be independent from mod_pywebsocket. See the comment of
client_for_testing.py.
NOTE: This code is far from robust like client_for_testing.py.
"""
import Queue
import base64
import collections
import email
import email.parser
import logging
import math
import os
import random
import socket
import struct
import threading
from mod_pywebsocket import util
from test import client_for_testing
_CONTROL_CHANNEL_ID = 0
_DEFAULT_CHANNEL_ID = 1
_MUX_OPCODE_ADD_CHANNEL_REQUEST = 0
_MUX_OPCODE_ADD_CHANNEL_RESPONSE = 1
_MUX_OPCODE_FLOW_CONTROL = 2
_MUX_OPCODE_DROP_CHANNEL = 3
_MUX_OPCODE_NEW_CHANNEL_SLOT = 4
class _ControlBlock:
def __init__(self, opcode):
self.opcode = opcode
def _parse_handshake_response(response):
status_line, header_lines = response.split('\r\n', 1)
words = status_line.split(' ')
if len(words) < 3:
raise ValueError('Bad Status-Line syntax %r' % status_line)
[version, response_code] = words[:2]
if version != 'HTTP/1.1':
raise ValueError('Bad response version %r' % version)
if response_code != '101':
raise ValueError('Bad response code %r ' % response_code)
headers = email.parser.Parser().parsestr(header_lines)
return headers
def _parse_channel_id(data, offset=0):
length = len(data)
remaining = length - offset
if remaining <= 0:
raise Exception('No channel id found')
channel_id = ord(data[offset])
channel_id_length = 1
if channel_id & 0xe0 == 0xe0:
if remaining < 4:
raise Exception('Invalid channel id format')
channel_id = struct.unpack('!L',
data[offset:offset+4])[0] & 0x1fffffff
channel_id_length = 4
elif channel_id & 0xc0 == 0xc0:
if remaining < 3:
raise Exception('Invalid channel id format')
channel_id = (((channel_id & 0x1f) << 16) +
struct.unpack('!H', data[offset+1:offset+3])[0])
channel_id_length = 3
elif channel_id & 0x80 == 0x80:
if remaining < 2:
raise Exception('Invalid channel id format')
channel_id = struct.unpack('!H', data[offset:offset+2])[0] & 0x3fff
channel_id_length = 2
return channel_id, channel_id_length
def _parse_number(data, offset=0):
first_byte = ord(data[offset])
if (first_byte & 0x80) != 0:
raise Exception('The MSB of number field must be unset')
first_byte = first_byte & 0x7f
if first_byte == 127:
if offset + 9 > len(data):
raise Exception('Invalid number')
return struct.unpack('!Q', data[offset+1:offset+9])[0], 9
if first_byte == 126:
if offset + 3 > len(data):
raise Exception('Invalid number')
return struct.unpack('!H', data[offset+1:offset+3])[0], 3
return first_byte, 1
def _parse_size_and_contents(data, offset=0):
size, advance = _parse_number(data, offset)
start_position = offset + advance
end_position = start_position + size
if len(data) < end_position:
raise Exception('Invalid size of control block (%d < %d)' % (
len(data), end_position))
return data[start_position:end_position], size + advance
def _parse_control_blocks(data):
blocks = []
length = len(data)
pos = 0
while pos < length:
first_byte = ord(data[pos])
pos += 1
opcode = (first_byte >> 5) & 0x7
block = _ControlBlock(opcode)
# TODO(bashi): Support more opcode
if opcode == _MUX_OPCODE_ADD_CHANNEL_RESPONSE:
block.encode = first_byte & 3
block.rejected = (first_byte >> 4) & 1
channel_id, advance = _parse_channel_id(data, pos)
block.channel_id = channel_id
pos += advance
encoded_handshake, advance = _parse_size_and_contents(data, pos)
block.encoded_handshake = encoded_handshake
pos += advance
blocks.append(block)
elif opcode == _MUX_OPCODE_DROP_CHANNEL:
block.mux_error = (first_byte >> 4) & 1
channel_id, advance = _parse_channel_id(data, pos)
block.channel_id = channel_id
pos += advance
reason, advance = _parse_size_and_contents(data, pos)
if len(reason) == 0:
block.drop_code = None
block.drop_message = ''
elif len(reason) >= 2:
block.drop_code = struct.unpack('!H', reason[:2])[0]
block.drop_message = reason[2:]
else:
raise Exception('Invalid DropChannel')
pos += advance
blocks.append(block)
elif opcode == _MUX_OPCODE_FLOW_CONTROL:
channel_id, advance = _parse_channel_id(data, pos)
block.channel_id = channel_id
pos += advance
send_quota, advance = _parse_number(data, pos)
block.send_quota = send_quota
pos += advance
blocks.append(block)
elif opcode == _MUX_OPCODE_NEW_CHANNEL_SLOT:
fallback = first_byte & 1
slots, advance = _parse_number(data, pos)
pos += advance
send_quota, advance = _parse_number(data, pos)
pos += advance
if fallback == 1 and (slots != 0 or send_quota != 0):
raise Exception('slots and send_quota must be zero if F bit '
'is set')
block.fallback = fallback
block.slots = slots
block.send_quota = send_quota
blocks.append(block)
else:
raise Exception(
'Unsupported mux opcode %d received' % opcode)
return blocks
def _encode_channel_id(channel_id):
if channel_id < 0:
raise ValueError('Channel id %d must not be negative' % channel_id)
if channel_id < 2 ** 7:
return chr(channel_id)
if channel_id < 2 ** 14:
return struct.pack('!H', 0x8000 + channel_id)
if channel_id < 2 ** 21:
first = chr(0xc0 + (channel_id >> 16))
return first + struct.pack('!H', channel_id & 0xffff)
if channel_id < 2 ** 29:
return struct.pack('!L', 0xe0000000 + channel_id)
raise ValueError('Channel id %d is too large' % channel_id)
def _encode_number(number):
if number <= 125:
return chr(number)
elif number < (1 << 16):
return chr(0x7e) + struct.pack('!H', number)
elif number < (1 << 63):
return chr(0x7f) + struct.pack('!Q', number)
else:
raise Exception('Invalid number')
def _create_add_channel_request(channel_id, encoded_handshake,
encoding=0):
length = len(encoded_handshake)
handshake_length = _encode_number(length)
first_byte = (_MUX_OPCODE_ADD_CHANNEL_REQUEST << 5) | encoding
return (chr(first_byte) + _encode_channel_id(channel_id) +
handshake_length + encoded_handshake)
def _create_flow_control(channel_id, replenished_quota):
first_byte = (_MUX_OPCODE_FLOW_CONTROL << 5)
return (chr(first_byte) + _encode_channel_id(channel_id) +
_encode_number(replenished_quota))
class _MuxReaderThread(threading.Thread):
"""Mux reader thread.
Reads frames and passes them to the mux client. This thread accesses
private functions/variables of the mux client.
"""
def __init__(self, mux):
threading.Thread.__init__(self)
self.setDaemon(True)
self._mux = mux
self._stop_requested = False
def _receive_message(self):
first_opcode = None
pending_payload = []
while not self._stop_requested:
fin, rsv1, rsv2, rsv3, opcode, payload_length = (
client_for_testing.read_frame_header(self._mux._socket))
if not first_opcode:
if opcode == client_for_testing.OPCODE_TEXT:
raise Exception('Received a text message on physical '
'connection')
if opcode == client_for_testing.OPCODE_CONTINUATION:
raise Exception('Received an intermediate frame but '
'fragmentation was not started')
if (opcode == client_for_testing.OPCODE_BINARY or
opcode == client_for_testing.OPCODE_PONG or
opcode == client_for_testing.OPCODE_PONG or
opcode == client_for_testing.OPCODE_CLOSE):
first_opcode = opcode
else:
raise Exception('Received an undefined opcode frame: %d' %
opcode)
elif opcode != client_for_testing.OPCODE_CONTINUATION:
raise Exception('Received a new opcode before '
'terminating fragmentation')
payload = client_for_testing.receive_bytes(
self._mux._socket, payload_length)
if self._mux._incoming_frame_filter is not None:
payload = self._mux._incoming_frame_filter.filter(payload)
pending_payload.append(payload)
if fin:
break
if self._stop_requested:
return None, None
message = ''.join(pending_payload)
return first_opcode, message
def request_stop(self):
self._stop_requested = True
def run(self):
try:
while not self._stop_requested:
# opcode is OPCODE_BINARY or control opcodes when a message
# is succesfully received.
opcode, message = self._receive_message()
if not opcode:
return
if opcode == client_for_testing.OPCODE_BINARY:
channel_id, advance = _parse_channel_id(message)
self._mux._dispatch_frame(channel_id, message[advance:])
else:
self._mux._process_control_message(opcode, message)
finally:
self._mux._notify_reader_done()
class _InnerFrame(object):
def __init__(self, fin, rsv1, rsv2, rsv3, opcode, payload):
self.fin = fin
self.rsv1 = rsv1
self.rsv2 = rsv2
self.rsv3 = rsv3
self.opcode = opcode
self.payload = payload
class _LogicalChannelData(object):
def __init__(self):
self.queue = Queue.Queue()
self.send_quota = 0
self.receive_quota = 0
class MuxClient(object):
"""WebSocket mux client.
Note that this class is NOT thread-safe. Do not access an instance of this
class from multiple threads at a same time.
"""
def __init__(self, options):
self._logger = util.get_class_logger(self)
self._options = options
self._options.enable_mux()
self._stream = None
self._socket = None
self._handshake = client_for_testing.WebSocketHandshake(self._options)
self._incoming_frame_filter = None
self._outgoing_frame_filter = None
self._is_active = False
self._read_thread = None
self._control_blocks_condition = threading.Condition()
self._control_blocks = []
self._channel_slots = collections.deque()
self._logical_channels_condition = threading.Condition();
self._logical_channels = {}
self._timeout = 2
self._physical_connection_close_event = None
self._physical_connection_close_message = None
def _parse_inner_frame(self, data):
if len(data) == 0:
raise Exception('Invalid encapsulated frame received')
first_byte = ord(data[0])
fin = (first_byte << 7) & 1
rsv1 = (first_byte << 6) & 1
rsv2 = (first_byte << 5) & 1
rsv3 = (first_byte << 4) & 1
opcode = first_byte & 0xf
if self._outgoing_frame_filter:
payload = self._outgoing_frame_filter.filter(
data[1:])
else:
payload = data[1:]
return _InnerFrame(fin, rsv1, rsv2, rsv3, opcode, payload)
def _process_mux_control_blocks(self):
for block in self._control_blocks:
if block.opcode == _MUX_OPCODE_ADD_CHANNEL_RESPONSE:
# AddChannelResponse will be handled in add_channel().
continue
elif block.opcode == _MUX_OPCODE_FLOW_CONTROL:
try:
self._logical_channels_condition.acquire()
if not block.channel_id in self._logical_channels:
raise Exception('Invalid flow control received for '
'channel id %d' % block.channel_id)
self._logical_channels[block.channel_id].send_quota += (
block.send_quota)
self._logical_channels_condition.notify()
finally:
self._logical_channels_condition.release()
elif block.opcode == _MUX_OPCODE_NEW_CHANNEL_SLOT:
self._channel_slots.extend([block.send_quota] * block.slots)
def _dispatch_frame(self, channel_id, payload):
if channel_id == _CONTROL_CHANNEL_ID:
try:
self._control_blocks_condition.acquire()
self._control_blocks += _parse_control_blocks(payload)
self._process_mux_control_blocks()
self._control_blocks_condition.notify()
finally:
self._control_blocks_condition.release()
else:
try:
self._logical_channels_condition.acquire()
if not channel_id in self._logical_channels:
raise Exception('Received logical frame on channel id '
'%d, which is not established' %
channel_id)
inner_frame = self._parse_inner_frame(payload)
self._logical_channels[channel_id].receive_quota -= (
len(inner_frame.payload))
if self._logical_channels[channel_id].receive_quota < 0:
raise Exception('The server violates quota on '
'channel id %d' % channel_id)
finally:
self._logical_channels_condition.release()
self._logical_channels[channel_id].queue.put(inner_frame)
def _process_control_message(self, opcode, message):
# Ping/Pong are not supported.
if opcode == client_for_testing.OPCODE_CLOSE:
self._physical_connection_close_message = message
if self._is_active:
self._stream.send_close(
code=client_for_testing.STATUS_NORMAL_CLOSURE, reason='')
self._read_thread.request_stop()
if self._physical_connection_close_event:
self._physical_connection_close_event.set()
def _notify_reader_done(self):
self._logger.debug('Read thread terminated.')
self.close_socket()
def _assert_channel_slot_available(self):
try:
self._control_blocks_condition.acquire()
if len(self._channel_slots) == 0:
# Wait once
self._control_blocks_condition.wait(timeout=self._timeout)
finally:
self._control_blocks_condition.release()
if len(self._channel_slots) == 0:
raise Exception('Failed to receive NewChannelSlot')
def _assert_send_quota_available(self, channel_id):
try:
self._logical_channels_condition.acquire()
if self._logical_channels[channel_id].send_quota == 0:
# Wait once
self._logical_channels_condition.wait(timeout=self._timeout)
finally:
self._logical_channels_condition.release()
if self._logical_channels[channel_id].send_quota == 0:
raise Exception('Failed to receive FlowControl for channel id %d' %
channel_id)
def connect(self):
self._socket = client_for_testing.connect_socket_with_retry(
self._options.server_host,
self._options.server_port,
self._options.socket_timeout,
self._options.use_tls)
self._handshake.handshake(self._socket)
self._stream = client_for_testing.WebSocketStream(
self._socket, self._handshake)
self._logical_channels[_DEFAULT_CHANNEL_ID] = _LogicalChannelData()
self._read_thread = _MuxReaderThread(self)
self._read_thread.start()
self._assert_channel_slot_available()
self._assert_send_quota_available(_DEFAULT_CHANNEL_ID)
self._is_active = True
self._logger.info('Connection established')
def add_channel(self, channel_id, options):
if not self._is_active:
raise Exception('Mux client is not active')
if channel_id in self._logical_channels:
raise Exception('Channel id %d already exists' % channel_id)
try:
send_quota = self._channel_slots.popleft()
except IndexError, e:
raise Exception('No channel slots: %r' % e)
# Create AddChannel request
request_line = 'GET %s HTTP/1.1\r\n' % options.resource
fields = []
if options.server_port == client_for_testing.DEFAULT_PORT:
fields.append('Host: %s\r\n' % options.server_host.lower())
else:
fields.append('Host: %s:%d\r\n' % (options.server_host.lower(),
options.server_port))
fields.append('Origin: %s\r\n' % options.origin.lower())
fields.append('Connection: Upgrade\r\n')
if len(options.extensions) > 0:
fields.append('Sec-WebSocket-Extensions: %s\r\n' %
', '.join(options.extensions))
handshake = request_line + ''.join(fields) + '\r\n'
add_channel_request = _create_add_channel_request(
channel_id, handshake)
payload = _encode_channel_id(_CONTROL_CHANNEL_ID) + add_channel_request
self._stream.send_binary(payload)
# Wait AddChannelResponse
self._logger.debug('Waiting AddChannelResponse for the request...')
response = None
try:
self._control_blocks_condition.acquire()
while True:
for block in self._control_blocks:
if block.opcode != _MUX_OPCODE_ADD_CHANNEL_RESPONSE:
continue
if block.channel_id == channel_id:
response = block
self._control_blocks.remove(response)
break
if response:
break
self._control_blocks_condition.wait(self._timeout)
if not self._is_active:
raise Exception('AddChannelRequest timed out')
finally:
self._control_blocks_condition.release()
# Validate AddChannelResponse
if response.rejected:
raise Exception('The server rejected AddChannelRequest')
fields = _parse_handshake_response(response.encoded_handshake)
# Should we reject when Upgrade, Connection, or Sec-WebSocket-Accept
# headers exist?
self._logical_channels_condition.acquire()
self._logical_channels[channel_id] = _LogicalChannelData()
self._logical_channels[channel_id].send_quota = send_quota
self._logical_channels_condition.release()
self._logger.debug('Logical channel %d established' % channel_id)
def _check_logical_channel_is_opened(self, channel_id):
if not self._is_active:
raise Exception('Mux client is not active')
if not channel_id in self._logical_channels:
raise Exception('Logical channel %d is not established.')
def drop_channel(self, channel_id):
# TODO(bashi): Implement
pass
def send_flow_control(self, channel_id, replenished_quota):
self._check_logical_channel_is_opened(channel_id)
flow_control = _create_flow_control(channel_id, replenished_quota)
payload = _encode_channel_id(_CONTROL_CHANNEL_ID) + flow_control
# Replenish receive quota
try:
self._logical_channels_condition.acquire()
self._logical_channels[channel_id].receive_quota += (
replenished_quota)
finally:
self._logical_channels_condition.release()
self._stream.send_binary(payload)
def send_message(self, channel_id, message, end=True, binary=False):
self._check_logical_channel_is_opened(channel_id)
if binary:
first_byte = (end << 7) | client_for_testing.OPCODE_BINARY
else:
first_byte = (end << 7) | client_for_testing.OPCODE_TEXT
message = message.encode('utf-8')
try:
self._logical_channels_condition.acquire()
if self._logical_channels[channel_id].send_quota < len(message):
raise Exception('Send quota violation: %d < %d' % (
self._logical_channels[channel_id].send_quota,
len(message)))
self._logical_channels[channel_id].send_quota -= len(message)
finally:
self._logical_channels_condition.release()
payload = _encode_channel_id(channel_id) + chr(first_byte) + message
self._stream.send_binary(payload)
def assert_receive(self, channel_id, payload, binary=False):
self._check_logical_channel_is_opened(channel_id)
try:
inner_frame = self._logical_channels[channel_id].queue.get(
timeout=self._timeout)
except Queue.Empty, e:
raise Exception('Cannot receive message from channel id %d' %
channel_id)
if binary:
opcode = client_for_testing.OPCODE_BINARY
else:
opcode = client_for_testing.OPCODE_TEXT
if inner_frame.opcode != opcode:
raise Exception('Unexpected opcode received (%r != %r)' %
(expected_opcode, inner_frame.opcode))
if inner_frame.payload != payload:
raise Exception('Unexpected payload received')
def send_close(self, channel_id, code=None, reason=''):
self._check_logical_channel_is_opened(channel_id)
if code is not None:
body = struct.pack('!H', code) + reason.encode('utf-8')
else:
body = ''
first_byte = (1 << 7) | client_for_testing.OPCODE_CLOSE
payload = _encode_channel_id(channel_id) + chr(first_byte) + body
self._stream.send_binary(payload)
def assert_receive_close(self, channel_id):
self._check_logical_channel_is_opened(channel_id)
try:
inner_frame = self._logical_channels[channel_id].queue.get(
timeout=self._timeout)
except Queue.Empty, e:
raise Exception('Cannot receive message from channel id %d' %
channel_id)
if inner_frame.opcode != client_for_testing.OPCODE_CLOSE:
raise Exception('Didn\'t receive close frame')
def send_physical_connection_close(self, code=None, reason=''):
self._physical_connection_close_event = threading.Event()
self._stream.send_close(code, reason)
# This method can be used only after calling
# send_physical_connection_close().
def assert_physical_connection_receive_close(
self, code=client_for_testing.STATUS_NORMAL_CLOSURE, reason=''):
self._physical_connection_close_event.wait(timeout=self._timeout)
if (not self._physical_connection_close_event.isSet() or
not self._physical_connection_close_message):
raise Exception('Didn\'t receive closing handshake')
def close_socket(self):
self._is_active = False
self._socket.close()
|
shawnferry/ansible | refs/heads/devel | v1/tests/TestModuleUtilsBasic.py | 103 | import os
import tempfile
import unittest
from nose.tools import raises
from nose.tools import timed
from ansible import errors
from ansible.module_common import ModuleReplacer
from ansible.module_utils.basic import heuristic_log_sanitize
from ansible.utils import checksum as utils_checksum
TEST_MODULE_DATA = """
from ansible.module_utils.basic import *
def get_module():
return AnsibleModule(
argument_spec = dict(),
supports_check_mode = True,
no_log = True,
)
get_module()
"""
class TestModuleUtilsBasic(unittest.TestCase):
def cleanup_temp_file(self, fd, path):
try:
os.close(fd)
os.remove(path)
except:
pass
def cleanup_temp_dir(self, path):
try:
os.rmdir(path)
except:
pass
def setUp(self):
# create a temporary file for the test module
# we're about to generate
self.tmp_fd, self.tmp_path = tempfile.mkstemp()
os.write(self.tmp_fd, TEST_MODULE_DATA)
# template the module code and eval it
module_data, module_style, shebang = ModuleReplacer().modify_module(self.tmp_path, {}, "", {})
d = {}
exec(module_data, d, d)
self.module = d['get_module']()
# module_utils/basic.py screws with CWD, let's save it and reset
self.cwd = os.getcwd()
def tearDown(self):
self.cleanup_temp_file(self.tmp_fd, self.tmp_path)
# Reset CWD back to what it was before basic.py changed it
os.chdir(self.cwd)
#################################################################################
# run_command() tests
# test run_command with a string command
def test_run_command_string(self):
(rc, out, err) = self.module.run_command("/bin/echo -n 'foo bar'")
self.assertEqual(rc, 0)
self.assertEqual(out, 'foo bar')
(rc, out, err) = self.module.run_command("/bin/echo -n 'foo bar'", use_unsafe_shell=True)
self.assertEqual(rc, 0)
self.assertEqual(out, 'foo bar')
# test run_command with an array of args (with both use_unsafe_shell=True|False)
def test_run_command_args(self):
(rc, out, err) = self.module.run_command(['/bin/echo', '-n', "foo bar"])
self.assertEqual(rc, 0)
self.assertEqual(out, 'foo bar')
(rc, out, err) = self.module.run_command(['/bin/echo', '-n', "foo bar"], use_unsafe_shell=True)
self.assertEqual(rc, 0)
self.assertEqual(out, 'foo bar')
# test run_command with leading environment variables
@raises(SystemExit)
def test_run_command_string_with_env_variables(self):
self.module.run_command('FOO=bar /bin/echo -n "foo bar"')
@raises(SystemExit)
def test_run_command_args_with_env_variables(self):
self.module.run_command(['FOO=bar', '/bin/echo', '-n', 'foo bar'])
def test_run_command_string_unsafe_with_env_variables(self):
(rc, out, err) = self.module.run_command('FOO=bar /bin/echo -n "foo bar"', use_unsafe_shell=True)
self.assertEqual(rc, 0)
self.assertEqual(out, 'foo bar')
# test run_command with a command pipe (with both use_unsafe_shell=True|False)
def test_run_command_string_unsafe_with_pipe(self):
(rc, out, err) = self.module.run_command('echo "foo bar" | cat', use_unsafe_shell=True)
self.assertEqual(rc, 0)
self.assertEqual(out, 'foo bar\n')
# test run_command with a shell redirect in (with both use_unsafe_shell=True|False)
def test_run_command_string_unsafe_with_redirect_in(self):
(rc, out, err) = self.module.run_command('cat << EOF\nfoo bar\nEOF', use_unsafe_shell=True)
self.assertEqual(rc, 0)
self.assertEqual(out, 'foo bar\n')
# test run_command with a shell redirect out (with both use_unsafe_shell=True|False)
def test_run_command_string_unsafe_with_redirect_out(self):
tmp_fd, tmp_path = tempfile.mkstemp()
try:
(rc, out, err) = self.module.run_command('echo "foo bar" > %s' % tmp_path, use_unsafe_shell=True)
self.assertEqual(rc, 0)
self.assertTrue(os.path.exists(tmp_path))
checksum = utils_checksum(tmp_path)
self.assertEqual(checksum, 'd53a205a336e07cf9eac45471b3870f9489288ec')
except:
raise
finally:
self.cleanup_temp_file(tmp_fd, tmp_path)
# test run_command with a double shell redirect out (append) (with both use_unsafe_shell=True|False)
def test_run_command_string_unsafe_with_double_redirect_out(self):
tmp_fd, tmp_path = tempfile.mkstemp()
try:
(rc, out, err) = self.module.run_command('echo "foo bar" >> %s' % tmp_path, use_unsafe_shell=True)
self.assertEqual(rc, 0)
self.assertTrue(os.path.exists(tmp_path))
checksum = utils_checksum(tmp_path)
self.assertEqual(checksum, 'd53a205a336e07cf9eac45471b3870f9489288ec')
except:
raise
finally:
self.cleanup_temp_file(tmp_fd, tmp_path)
# test run_command with data
def test_run_command_string_with_data(self):
(rc, out, err) = self.module.run_command('cat', data='foo bar')
self.assertEqual(rc, 0)
self.assertEqual(out, 'foo bar\n')
# test run_command with binary data
def test_run_command_string_with_binary_data(self):
(rc, out, err) = self.module.run_command('cat', data='\x41\x42\x43\x44', binary_data=True)
self.assertEqual(rc, 0)
self.assertEqual(out, 'ABCD')
# test run_command with a cwd set
def test_run_command_string_with_cwd(self):
tmp_path = tempfile.mkdtemp()
try:
(rc, out, err) = self.module.run_command('pwd', cwd=tmp_path)
self.assertEqual(rc, 0)
self.assertTrue(os.path.exists(tmp_path))
self.assertEqual(out.strip(), os.path.realpath(tmp_path))
except:
raise
finally:
self.cleanup_temp_dir(tmp_path)
class TestModuleUtilsBasicHelpers(unittest.TestCase):
''' Test some implementation details of AnsibleModule
Some pieces of AnsibleModule are implementation details but they have
potential cornercases that we need to check. Go ahead and test at
this level that the functions are behaving even though their API may
change and we'd have to rewrite these tests so that we know that we
need to check for those problems in any rewrite.
In the future we might want to restructure higher level code to be
friendlier to unittests so that we can test at the level that the public
is interacting with the APIs.
'''
MANY_RECORDS = 7000
URL_SECRET = 'http://username:pas:word@foo.com/data'
SSH_SECRET = 'username:pas:word@foo.com/data'
def cleanup_temp_file(self, fd, path):
try:
os.close(fd)
os.remove(path)
except:
pass
def cleanup_temp_dir(self, path):
try:
os.rmdir(path)
except:
pass
def _gen_data(self, records, per_rec, top_level, secret_text):
hostvars = {'hostvars': {}}
for i in range(1, records, 1):
host_facts = {'host%s' % i:
{'pstack':
{'running': '875.1',
'symlinked': '880.0',
'tars': [],
'versions': ['885.0']},
}}
if per_rec:
host_facts['host%s' % i]['secret'] = secret_text
hostvars['hostvars'].update(host_facts)
if top_level:
hostvars['secret'] = secret_text
return hostvars
def setUp(self):
self.many_url = repr(self._gen_data(self.MANY_RECORDS, True, True,
self.URL_SECRET))
self.many_ssh = repr(self._gen_data(self.MANY_RECORDS, True, True,
self.SSH_SECRET))
self.one_url = repr(self._gen_data(self.MANY_RECORDS, False, True,
self.URL_SECRET))
self.one_ssh = repr(self._gen_data(self.MANY_RECORDS, False, True,
self.SSH_SECRET))
self.zero_secrets = repr(self._gen_data(self.MANY_RECORDS, False,
False, ''))
self.few_url = repr(self._gen_data(2, True, True, self.URL_SECRET))
self.few_ssh = repr(self._gen_data(2, True, True, self.SSH_SECRET))
# create a temporary file for the test module
# we're about to generate
self.tmp_fd, self.tmp_path = tempfile.mkstemp()
os.write(self.tmp_fd, TEST_MODULE_DATA)
# template the module code and eval it
module_data, module_style, shebang = ModuleReplacer().modify_module(self.tmp_path, {}, "", {})
d = {}
exec(module_data, d, d)
self.module = d['get_module']()
# module_utils/basic.py screws with CWD, let's save it and reset
self.cwd = os.getcwd()
def tearDown(self):
self.cleanup_temp_file(self.tmp_fd, self.tmp_path)
# Reset CWD back to what it was before basic.py changed it
os.chdir(self.cwd)
#################################################################################
#
# Speed tests
#
# Previously, we used regexes which had some pathologically slow cases for
# parameters with large amounts of data with many ':' but no '@'. The
# present function gets slower when there are many replacements so we may
# want to explore regexes in the future (for the speed when substituting
# or flexibility). These speed tests will hopefully tell us if we're
# introducing code that has cases that are simply too slow.
#
# Some regex notes:
# * re.sub() is faster than re.match() + str.join().
# * We may be able to detect a large number of '@' symbols and then use
# a regex else use the present function.
@timed(5)
def test_log_sanitize_speed_many_url(self):
heuristic_log_sanitize(self.many_url)
@timed(5)
def test_log_sanitize_speed_many_ssh(self):
heuristic_log_sanitize(self.many_ssh)
@timed(5)
def test_log_sanitize_speed_one_url(self):
heuristic_log_sanitize(self.one_url)
@timed(5)
def test_log_sanitize_speed_one_ssh(self):
heuristic_log_sanitize(self.one_ssh)
@timed(5)
def test_log_sanitize_speed_zero_secrets(self):
heuristic_log_sanitize(self.zero_secrets)
#
# Test that the password obfuscation sanitizes somewhat cleanly.
#
def test_log_sanitize_correctness(self):
url_data = repr(self._gen_data(3, True, True, self.URL_SECRET))
ssh_data = repr(self._gen_data(3, True, True, self.SSH_SECRET))
url_output = heuristic_log_sanitize(url_data)
ssh_output = heuristic_log_sanitize(ssh_data)
# Basic functionality: Successfully hid the password
try:
self.assertNotIn('pas:word', url_output)
self.assertNotIn('pas:word', ssh_output)
# Slightly more advanced, we hid all of the password despite the ":"
self.assertNotIn('pas', url_output)
self.assertNotIn('pas', ssh_output)
except AttributeError:
# python2.6 or less's unittest
self.assertFalse('pas:word' in url_output, '%s is present in %s' % ('"pas:word"', url_output))
self.assertFalse('pas:word' in ssh_output, '%s is present in %s' % ('"pas:word"', ssh_output))
self.assertFalse('pas' in url_output, '%s is present in %s' % ('"pas"', url_output))
self.assertFalse('pas' in ssh_output, '%s is present in %s' % ('"pas"', ssh_output))
# In this implementation we replace the password with 8 "*" which is
# also the length of our password. The url fields should be able to
# accurately detect where the password ends so the length should be
# the same:
self.assertEqual(len(url_output), len(url_data))
# ssh checking is harder as the heuristic is overzealous in many
# cases. Since the input will have at least one ":" present before
# the password we can tell some things about the beginning and end of
# the data, though:
self.assertTrue(ssh_output.startswith("{'"))
self.assertTrue(ssh_output.endswith("}"))
try:
self.assertIn(":********@foo.com/data'", ssh_output)
except AttributeError:
# python2.6 or less's unittest
self.assertTrue(":********@foo.com/data'" in ssh_output, '%s is not present in %s' % (":********@foo.com/data'", ssh_output))
# The overzealous-ness here may lead to us changing the algorithm in
# the future. We could make it consume less of the data (with the
# possibility of leaving partial passwords exposed) and encourage
# people to use no_log instead of relying on this obfuscation.
|
rogers0/namebench | refs/heads/master | third_party/__init__.py | 176 | import os.path
import sys
# This bit of evil should inject third_party into the path for relative imports.
sys.path.insert(1, os.path.dirname(__file__))
|
PrincetonUniversity/AdvNet-OF_Scripts | refs/heads/master | evaluation/switch/flowmod_test/pox/pox/messenger/log_service.py | 26 | # Copyright 2011,2012 James McCauley
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
"""
This is a messenger service for working with the log.
It does two things:
a) Listen on the "log" channel. You can send messages to this
channel with keys lowerLevels/RaiseLevels/setLevels to adjust
global log levels. See _process_commands() for more info.
b) You can join any channel named log_<something> (your session
ID is a good choice for the something), and a LogBot will
join it. This will result in receiving log messages. In
your join message (or afterwards), you can configure levels,
the message formats, etc. See LogService for more details.
"""
from pox.core import core
from pox.messenger import *
from pox.lib.revent.revent import autoBindEvents
import logging
import traceback
log = core.getLogger()
# These attributes are copied verbatim from the log record
_attributes = [
'created','filename','funcName','levelname','levelno','lineno',
'module','msecs','name','pathname','process','processName',
'relativeCreated','thread','threadName','args',
]
class LogFilter (object):
"""
Filters messages from the web server component
It's a nasty situation when you're using the HTTP messenger transport
to view the log when in debug mode, as every webserver log message
creates a messenger message which creates a webserver message, ...
This just turns off debug messages from the webserver.
"""
def filter (self, record):
if record.levelno != logging.DEBUG: return True
if record.name == "web.webcore.server": return False
return True
class LogHandler (logging.Handler):
"""
A Python logging.Handler for the messenger
Accepts dictionaries with configuration info:
KEY VALUE
level Minimum log level to output (probably one of CRITICAL,
ERROR, WARNING, INFO or DEBUG)
format fmt argument to logging.Formatter
dateFormat datefmt argument to logging.Formatter
json true if you want a bunch of attributes from the LogRecord to
be included. In some cases these are stringized since the
originals are objects and we don't pickle/jsonpickle them.
subsystems A list of logger names to listen to. A "null"/None entry in
the list means the root logger (which is also the default).
add_subsystems A list of ADDITIONAL subsystems to listen to.
"""
#NOTE: We take advantage of the fact that the default value for the
# argument to getLogger() is None. This is currently true, but
# isn't documented, so it might change in the future (though I
# don't see why it would!). Doing this "the right way" results
# in much uglier code.
def __init__ (self, channel, params):
logging.Handler.__init__(self)
self._channel = channel
self.addFilter(LogFilter())
self._json = False
self._format = False # Not valid, should never be set
self._dateFormat = None
self.subsystems = []
if "format" not in params:
params["format"] = None # Force update
if 'subsystems' not in params:
self._add_subsystems([None])
self._process_parameters(params)
def _add_subsystems (self, subsystems):
"""
Add log subsystems to listen to
"""
for subsystem in subsystems:
if subsystem in self.subsystems: continue
try:
logging.getLogger(subsystem).addHandler(self)
self.subsystems.append(subsystem)
except:
pass
def _drop_subsystems (self):
"""
Stop listening to all log subsystems
"""
for subsystem in self.subsystems:
logging.getLogger(subsystem).removeHandler(self)
self.subsystems = []
def _process_parameters (self, params):
if "level" in params:
self.setLevel(params["level"])
if "subsystems" in params:
self._drop_subsystems()
self._add_subsystems(params['subsystems'])
if 'add_subsystems' in params:
self._add_subsystems(params['add_subsystems'])
if 'remove_subsystems' in params:
#TODO
log.error('remove_subsystems unimplemented')
if "json" in params:
self._json = params['json']
if "setLevels" in params:
levels = params['setLevels']
if isinstance(levels, dict):
for k,v in levels.iteritems():
l = core.getLogger(k)
l.setLevel(v)
else:
core.getLogger().setLevel(levels)
doFormat = False
if "format" in params:
fmt = params['format']
if fmt is not self._format:
self._format = fmt
doFormat = True
if "dateFormat" in params:
dateFormat = params['dateFormat']
if dateFormat is not self._dateFormat:
self._dateFormat = dateFormat
doFormat = True
if doFormat:
self.setFormatter(logging.Formatter(self._format, self._dateFormat))
def _close (self):
self._drop_subsystems()
def emit (self, record):
o = {'message' : self.format(record)}
#o['message'] = record.getMessage()
if self._json:
for attr in _attributes:
o[attr] = getattr(record, attr)
o['asctime'] = self.formatter.formatTime(record, self._dateFormat)
if record.exc_info:
o['exc_info'] = [str(record.exc_info[0]),
str(record.exc_info[1]),
traceback.format_tb(record.exc_info[2],1)]
o['exc'] = traceback.format_exception(*record.exc_info)
self._channel.send(o)
def _process_commands (msg):
"""
Processes logger commands
"""
def get (key):
r = msg.get(key)
if r is not None:
if not isinstance(r, list):
r = {None:r}
else:
return {}
return r
lowerLevels = get("lowerLevels") # less verbose
raiseLevels = get("raiseLevels") # more verbose
setLevels = get("setLevels")
for k,v in lowerLevels.iteritems():
logger = core.getLogger(k)
level = logging._checkLevel(v)
if not l.isEnabledFor(level+1):
logger.setLevel(v)
for k,v in raiseLevels.iteritems():
logger = core.getLogger(k)
if not l.isEnabledFor(v):
logger.setLevel(v)
for k,v in setLevels.iteritems():
logger = core.getLogger(k)
logger.setLevel(v)
message = msg.get("message", None)
if message:
level = msg.get("level", "DEBUG")
if isinstance(level, basestring):
import logging
if not level.isalpha():
level = logging.DEBUG
else:
level = level.upper()
level = getattr(logging, level, logging.DEBUG)
sub = msg.get("subsystem", "<external>")
logging.getLogger(sub).log(level, message)
class LogBot (ChannelBot):
def _init (self, extra):
self._handler = None
def _join (self, event, con, msg):
#self.reply(event, hello = "Hello, %s!" % (con,))
if self._handler is not None:
log.warning("Multiple clients on channel " + self.channel.name)
else:
self._handler = LogHandler(self.channel, msg)
def _leave (self, con, empty):
if empty:
self._handler._close()
self._handler = None
def _unhandled (self, event):
_process_commands(event.msg)
self._handler._process_parameters(event.msg)
def _handle_new_channel (event):
if event.channel.name.startswith("log_"):
# New channel named log_<something>? Add a log bot.
LogBot(event.channel)
def launch (nexus = "MessengerNexus"):
def start (nexus):
# One bot for default log channel
real_nexus = core.components[nexus]
LogBot(real_nexus.get_channel('log'))
# This will create new channels on demand
real_nexus.addListener(ChannelCreate, _handle_new_channel)
core.call_when_ready(start, nexus, args=[nexus])
|
Archassault/archassaultweb | refs/heads/master | releng/urls.py | 2 | from django.conf.urls import include, patterns
from .views import ReleaseListView, ReleaseDetailView
feedback_patterns = patterns('releng.views',
(r'^$', 'test_results_overview', {}, 'releng-test-overview'),
(r'^submit/$', 'submit_test_result', {}, 'releng-test-submit'),
(r'^thanks/$', 'submit_test_thanks', {}, 'releng-test-thanks'),
(r'^iso/(?P<iso_id>\d+)/$', 'test_results_iso', {}, 'releng-results-iso'),
(r'^(?P<option>.+)/(?P<value>\d+)/$','test_results_for', {}, 'releng-results-for'),
(r'^iso/overview/$', 'iso_overview', {}, 'releng-iso-overview'),
)
releases_patterns = patterns('releng.views',
(r'^$',
ReleaseListView.as_view(), {}, 'releng-release-list'),
(r'^(?P<version>[-.\w]+)/$',
ReleaseDetailView.as_view(), {}, 'releng-release-detail'),
(r'^(?P<version>[-.\w]+)/torrent/$',
'release_torrent', {}, 'releng-release-torrent'),
)
urlpatterns = patterns('',
(r'^feedback/', include(feedback_patterns)),
(r'^releases/', include(releases_patterns)),
)
# vim: set ts=4 sw=4 et:
|
romain-li/edx-platform | refs/heads/master | common/test/acceptance/tests/lms/test_lms.py | 1 | # -*- coding: utf-8 -*-
"""
End-to-end tests for the LMS.
"""
from datetime import datetime, timedelta
from flaky import flaky
from textwrap import dedent
from unittest import skip
from nose.plugins.attrib import attr
import pytz
import urllib
from bok_choy.promise import EmptyPromise
from common.test.acceptance.tests.helpers import (
UniqueCourseTest,
EventsTestMixin,
load_data_str,
generate_course_key,
select_option_by_value,
element_has_text,
select_option_by_text,
get_selected_option_text
)
from common.test.acceptance.pages.common.logout import LogoutPage
from common.test.acceptance.pages.lms import BASE_URL
from common.test.acceptance.pages.lms.account_settings import AccountSettingsPage
from common.test.acceptance.pages.lms.auto_auth import AutoAuthPage
from common.test.acceptance.pages.lms.bookmarks import BookmarksPage
from common.test.acceptance.pages.lms.create_mode import ModeCreationPage
from common.test.acceptance.pages.lms.course_home import CourseHomePage
from common.test.acceptance.pages.lms.course_info import CourseInfoPage
from common.test.acceptance.pages.lms.course_wiki import (
CourseWikiPage, CourseWikiEditPage, CourseWikiHistoryPage, CourseWikiChildrenPage
)
from common.test.acceptance.pages.lms.courseware import CoursewarePage
from common.test.acceptance.pages.lms.dashboard import DashboardPage
from common.test.acceptance.pages.lms.login_and_register import CombinedLoginAndRegisterPage, ResetPasswordPage
from common.test.acceptance.pages.lms.pay_and_verify import PaymentAndVerificationFlow, FakePaymentPage
from common.test.acceptance.pages.lms.progress import ProgressPage
from common.test.acceptance.pages.lms.problem import ProblemPage
from common.test.acceptance.pages.lms.tab_nav import TabNavPage
from common.test.acceptance.pages.lms.track_selection import TrackSelectionPage
from common.test.acceptance.pages.lms.video.video import VideoPage
from common.test.acceptance.pages.studio.settings import SettingsPage
from common.test.acceptance.fixtures.course import CourseFixture, XBlockFixtureDesc, CourseUpdateDesc
@attr(shard=8)
class ForgotPasswordPageTest(UniqueCourseTest):
"""
Test that forgot password forms is rendered if url contains 'forgot-password-modal'
in hash.
"""
def setUp(self):
""" Initialize the page object """
super(ForgotPasswordPageTest, self).setUp()
self.user_info = self._create_user()
self.reset_password_page = ResetPasswordPage(self.browser)
def _create_user(self):
"""
Create a unique user
"""
auto_auth = AutoAuthPage(self.browser).visit()
user_info = auto_auth.user_info
LogoutPage(self.browser).visit()
return user_info
def test_reset_password_form_visibility(self):
# Navigate to the password reset page
self.reset_password_page.visit()
# Expect that reset password form is visible on the page
self.assertTrue(self.reset_password_page.is_form_visible())
def test_reset_password_confirmation_box_visibility(self):
# Navigate to the password reset page
self.reset_password_page.visit()
# Navigate to the password reset form and try to submit it
self.reset_password_page.fill_password_reset_form(self.user_info['email'])
self.reset_password_page.is_success_visible(".submission-success")
# Expect that we're shown a success message
self.assertIn("Check Your Email", self.reset_password_page.get_success_message())
@attr(shard=8)
class LoginFromCombinedPageTest(UniqueCourseTest):
"""Test that we can log in using the combined login/registration page.
Also test that we can request a password reset from the combined
login/registration page.
"""
def setUp(self):
"""Initialize the page objects and create a test course. """
super(LoginFromCombinedPageTest, self).setUp()
self.login_page = CombinedLoginAndRegisterPage(
self.browser,
start_page="login",
course_id=self.course_id
)
self.dashboard_page = DashboardPage(self.browser)
# Create a course to enroll in
CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
).install()
def test_login_success(self):
# Create a user account
email, password = self._create_unique_user()
# Navigate to the login page and try to log in
self.login_page.visit().login(email=email, password=password)
# Expect that we reach the dashboard and we're auto-enrolled in the course
course_names = self.dashboard_page.wait_for_page().available_courses
self.assertIn(self.course_info["display_name"], course_names)
def test_login_failure(self):
# Navigate to the login page
self.login_page.visit()
# User account does not exist
self.login_page.login(email="nobody@nowhere.com", password="password")
# Verify that an error is displayed
self.assertIn("Email or password is incorrect.", self.login_page.wait_for_errors())
def test_toggle_to_register_form(self):
self.login_page.visit().toggle_form()
self.assertEqual(self.login_page.current_form, "register")
@flaky # ECOM-1165
def test_password_reset_success(self):
# Create a user account
email, password = self._create_unique_user() # pylint: disable=unused-variable
# Navigate to the password reset form and try to submit it
self.login_page.visit().password_reset(email=email)
# Expect that we're shown a success message
self.assertIn("Check Your Email", self.login_page.wait_for_success())
def test_password_reset_no_user(self):
# Navigate to the password reset form
self.login_page.visit()
# User account does not exist
self.login_page.password_reset(email="nobody@nowhere.com")
# Expect that we're shown a success message
self.assertIn("Check Your Email", self.login_page.wait_for_success())
def test_third_party_login(self):
"""
Test that we can login using third party credentials, and that the
third party account gets linked to the edX account.
"""
# Create a user account
email, password = self._create_unique_user()
# Navigate to the login page
self.login_page.visit()
# Baseline screen-shots are different for chrome and firefox.
#self.assertScreenshot('#login .login-providers', 'login-providers-{}'.format(self.browser.name), .25)
#The line above is commented out temporarily see SOL-1937
# Try to log in using "Dummy" provider
self.login_page.click_third_party_dummy_provider()
# The user will be redirected somewhere and then back to the login page:
msg_text = self.login_page.wait_for_auth_status_message()
self.assertIn("You have successfully signed into Dummy", msg_text)
self.assertIn(
u"To link your accounts, sign in now using your édX password",
msg_text
)
# Now login with username and password:
self.login_page.login(email=email, password=password)
# Expect that we reach the dashboard and we're auto-enrolled in the course
course_names = self.dashboard_page.wait_for_page().available_courses
self.assertIn(self.course_info["display_name"], course_names)
try:
# Now logout and check that we can log back in instantly (because the account is linked):
LogoutPage(self.browser).visit()
self.login_page.visit()
self.login_page.click_third_party_dummy_provider()
self.dashboard_page.wait_for_page()
finally:
self._unlink_dummy_account()
def test_hinted_login(self):
""" Test the login page when coming from course URL that specified which third party provider to use """
# Create a user account and link it to third party auth with the dummy provider:
AutoAuthPage(self.browser, course_id=self.course_id).visit()
self._link_dummy_account()
try:
LogoutPage(self.browser).visit()
# When not logged in, try to load a course URL that includes the provider hint ?tpa_hint=...
course_page = CoursewarePage(self.browser, self.course_id)
self.browser.get(course_page.url + '?tpa_hint=oa2-dummy')
# We should now be redirected to the login page
self.login_page.wait_for_page()
self.assertIn(
"Would you like to sign in using your Dummy credentials?",
self.login_page.hinted_login_prompt
)
# Baseline screen-shots are different for chrome and firefox.
#self.assertScreenshot('#hinted-login-form', 'hinted-login-{}'.format(self.browser.name), .25)
#The line above is commented out temporarily see SOL-1937
self.login_page.click_third_party_dummy_provider()
# We should now be redirected to the course page
course_page.wait_for_page()
finally:
self._unlink_dummy_account()
def _link_dummy_account(self):
""" Go to Account Settings page and link the user's account to the Dummy provider """
account_settings = AccountSettingsPage(self.browser).visit()
# switch to "Linked Accounts" tab
account_settings.switch_account_settings_tabs('accounts-tab')
field_id = "auth-oa2-dummy"
account_settings.wait_for_field(field_id)
self.assertEqual("Link Your Account", account_settings.link_title_for_link_field(field_id))
account_settings.click_on_link_in_link_field(field_id)
# make sure we are on "Linked Accounts" tab after the account settings
# page is reloaded
account_settings.switch_account_settings_tabs('accounts-tab')
account_settings.wait_for_link_title_for_link_field(field_id, "Unlink This Account")
def _unlink_dummy_account(self):
""" Verify that the 'Dummy' third party auth provider is linked, then unlink it """
# This must be done after linking the account, or we'll get cross-test side effects
account_settings = AccountSettingsPage(self.browser).visit()
# switch to "Linked Accounts" tab
account_settings.switch_account_settings_tabs('accounts-tab')
field_id = "auth-oa2-dummy"
account_settings.wait_for_field(field_id)
self.assertEqual("Unlink This Account", account_settings.link_title_for_link_field(field_id))
account_settings.click_on_link_in_link_field(field_id)
account_settings.wait_for_message(field_id, "Successfully unlinked")
def _create_unique_user(self):
"""
Create a new user with a unique name and email.
"""
username = "test_{uuid}".format(uuid=self.unique_id[0:6])
email = "{user}@example.com".format(user=username)
password = "password"
# Create the user (automatically logs us in)
AutoAuthPage(
self.browser,
username=username,
email=email,
password=password
).visit()
# Log out
LogoutPage(self.browser).visit()
return (email, password)
@attr(shard=8)
class RegisterFromCombinedPageTest(UniqueCourseTest):
"""Test that we can register a new user from the combined login/registration page. """
def setUp(self):
"""Initialize the page objects and create a test course. """
super(RegisterFromCombinedPageTest, self).setUp()
self.register_page = CombinedLoginAndRegisterPage(
self.browser,
start_page="register",
course_id=self.course_id
)
self.dashboard_page = DashboardPage(self.browser)
# Create a course to enroll in
CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
).install()
def test_register_success(self):
# Navigate to the registration page
self.register_page.visit()
# Fill in the form and submit it
username = "test_{uuid}".format(uuid=self.unique_id[0:6])
email = "{user}@example.com".format(user=username)
self.register_page.register(
email=email,
password="password",
username=username,
full_name="Test User",
country="US",
favorite_movie="Mad Max: Fury Road",
terms_of_service=True
)
# Expect that we reach the dashboard and we're auto-enrolled in the course
course_names = self.dashboard_page.wait_for_page().available_courses
self.assertIn(self.course_info["display_name"], course_names)
def test_register_failure(self):
# Navigate to the registration page
self.register_page.visit()
# Enter a blank for the username field, which is required
# Don't agree to the terms of service / honor code.
# Don't specify a country code, which is required.
# Don't specify a favorite movie.
username = "test_{uuid}".format(uuid=self.unique_id[0:6])
email = "{user}@example.com".format(user=username)
self.register_page.register(
email=email,
password="password",
username="",
full_name="Test User",
terms_of_service=False
)
# Verify that the expected errors are displayed.
errors = self.register_page.wait_for_errors()
self.assertIn(u'Please enter your Public username.', errors)
self.assertIn(
u'You must agree to the édX Terms of Service and Honor Code',
errors
)
self.assertIn(u'Please select your Country.', errors)
self.assertIn(u'Please tell us your favorite movie.', errors)
def test_toggle_to_login_form(self):
self.register_page.visit().toggle_form()
self.assertEqual(self.register_page.current_form, "login")
def test_third_party_register(self):
"""
Test that we can register using third party credentials, and that the
third party account gets linked to the edX account.
"""
# Navigate to the register page
self.register_page.visit()
# Baseline screen-shots are different for chrome and firefox.
#self.assertScreenshot('#register .login-providers', 'register-providers-{}'.format(self.browser.name), .25)
# The line above is commented out temporarily see SOL-1937
# Try to authenticate using the "Dummy" provider
self.register_page.click_third_party_dummy_provider()
# The user will be redirected somewhere and then back to the register page:
msg_text = self.register_page.wait_for_auth_status_message()
self.assertEqual(self.register_page.current_form, "register")
self.assertIn("You've successfully signed into Dummy", msg_text)
self.assertIn("We just need a little more information", msg_text)
# Now the form should be pre-filled with the data from the Dummy provider:
self.assertEqual(self.register_page.email_value, "adama@fleet.colonies.gov")
self.assertEqual(self.register_page.full_name_value, "William Adama")
self.assertIn("Galactica1", self.register_page.username_value)
# Set country, accept the terms, and submit the form:
self.register_page.register(country="US", favorite_movie="Battlestar Galactica", terms_of_service=True)
# Expect that we reach the dashboard and we're auto-enrolled in the course
course_names = self.dashboard_page.wait_for_page().available_courses
self.assertIn(self.course_info["display_name"], course_names)
# Now logout and check that we can log back in instantly (because the account is linked):
LogoutPage(self.browser).visit()
login_page = CombinedLoginAndRegisterPage(self.browser, start_page="login")
login_page.visit()
login_page.click_third_party_dummy_provider()
self.dashboard_page.wait_for_page()
# Now unlink the account (To test the account settings view and also to prevent cross-test side effects)
account_settings = AccountSettingsPage(self.browser).visit()
# switch to "Linked Accounts" tab
account_settings.switch_account_settings_tabs('accounts-tab')
field_id = "auth-oa2-dummy"
account_settings.wait_for_field(field_id)
self.assertEqual("Unlink This Account", account_settings.link_title_for_link_field(field_id))
account_settings.click_on_link_in_link_field(field_id)
account_settings.wait_for_message(field_id, "Successfully unlinked")
@attr(shard=8)
class PayAndVerifyTest(EventsTestMixin, UniqueCourseTest):
"""Test that we can proceed through the payment and verification flow."""
def setUp(self):
"""Initialize the test.
Create the necessary page objects, create a test course and configure its modes,
create a user and log them in.
"""
super(PayAndVerifyTest, self).setUp()
self.track_selection_page = TrackSelectionPage(self.browser, self.course_id)
self.payment_and_verification_flow = PaymentAndVerificationFlow(self.browser, self.course_id)
self.immediate_verification_page = PaymentAndVerificationFlow(self.browser, self.course_id, entry_point='verify-now')
self.upgrade_page = PaymentAndVerificationFlow(self.browser, self.course_id, entry_point='upgrade')
self.fake_payment_page = FakePaymentPage(self.browser, self.course_id)
self.dashboard_page = DashboardPage(self.browser)
# Create a course
CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
).install()
# Add an honor mode to the course
ModeCreationPage(self.browser, self.course_id).visit()
# Add a verified mode to the course
ModeCreationPage(self.browser, self.course_id, mode_slug=u'verified', mode_display_name=u'Verified Certificate', min_price=10, suggested_prices='10,20').visit()
@skip("Flaky 02/02/2015")
def test_immediate_verification_enrollment(self):
# Create a user and log them in
student_id = AutoAuthPage(self.browser).visit().get_user_id()
# Navigate to the track selection page
self.track_selection_page.visit()
# Enter the payment and verification flow by choosing to enroll as verified
self.track_selection_page.enroll('verified')
# Proceed to the fake payment page
self.payment_and_verification_flow.proceed_to_payment()
# Submit payment
self.fake_payment_page.submit_payment()
# Proceed to verification
self.payment_and_verification_flow.immediate_verification()
# Take face photo and proceed to the ID photo step
self.payment_and_verification_flow.webcam_capture()
self.payment_and_verification_flow.next_verification_step(self.immediate_verification_page)
# Take ID photo and proceed to the review photos step
self.payment_and_verification_flow.webcam_capture()
self.payment_and_verification_flow.next_verification_step(self.immediate_verification_page)
# Submit photos and proceed to the enrollment confirmation step
self.payment_and_verification_flow.next_verification_step(self.immediate_verification_page)
# Navigate to the dashboard
self.dashboard_page.visit()
# Expect that we're enrolled as verified in the course
enrollment_mode = self.dashboard_page.get_enrollment_mode(self.course_info["display_name"])
self.assertEqual(enrollment_mode, 'verified')
def test_deferred_verification_enrollment(self):
# Create a user and log them in
student_id = AutoAuthPage(self.browser).visit().get_user_id()
# Navigate to the track selection page
self.track_selection_page.visit()
# Enter the payment and verification flow by choosing to enroll as verified
self.track_selection_page.enroll('verified')
# Proceed to the fake payment page
self.payment_and_verification_flow.proceed_to_payment()
# Submit payment
self.fake_payment_page.submit_payment()
# Navigate to the dashboard
self.dashboard_page.visit()
# Expect that we're enrolled as verified in the course
enrollment_mode = self.dashboard_page.get_enrollment_mode(self.course_info["display_name"])
self.assertEqual(enrollment_mode, 'verified')
def test_enrollment_upgrade(self):
# Create a user, log them in, and enroll them in the honor mode
student_id = AutoAuthPage(self.browser, course_id=self.course_id).visit().get_user_id()
# Navigate to the dashboard
self.dashboard_page.visit()
# Expect that we're enrolled as honor in the course
enrollment_mode = self.dashboard_page.get_enrollment_mode(self.course_info["display_name"])
self.assertEqual(enrollment_mode, 'honor')
# Click the upsell button on the dashboard
self.dashboard_page.upgrade_enrollment(self.course_info["display_name"], self.upgrade_page)
# Select the first contribution option appearing on the page
self.upgrade_page.indicate_contribution()
# Proceed to the fake payment page
self.upgrade_page.proceed_to_payment()
def only_enrollment_events(event):
"""Filter out all non-enrollment events."""
return event['event_type'].startswith('edx.course.enrollment.')
expected_events = [
{
'event_type': 'edx.course.enrollment.mode_changed',
'event': {
'user_id': int(student_id),
'mode': 'verified',
}
}
]
with self.assert_events_match_during(event_filter=only_enrollment_events, expected_events=expected_events):
# Submit payment
self.fake_payment_page.submit_payment()
# Navigate to the dashboard
self.dashboard_page.visit()
# Expect that we're enrolled as verified in the course
enrollment_mode = self.dashboard_page.get_enrollment_mode(self.course_info["display_name"])
self.assertEqual(enrollment_mode, 'verified')
class CourseWikiTest(UniqueCourseTest):
"""
Tests that verify the course wiki.
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(CourseWikiTest, self).setUp()
# self.course_info['number'] must be shorter since we are accessing the wiki. See TNL-1751
self.course_info['number'] = self.unique_id[0:6]
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.course_wiki_page = CourseWikiPage(self.browser, self.course_id)
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.course_wiki_edit_page = CourseWikiEditPage(self.browser, self.course_id, self.course_info)
self.tab_nav = TabNavPage(self.browser)
CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
).install()
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
# Access course wiki page
self.course_info_page.visit()
self.tab_nav.go_to_tab('Wiki')
def _open_editor(self):
self.course_wiki_page.open_editor()
self.course_wiki_edit_page.wait_for_page()
@attr(shard=1)
def test_edit_course_wiki(self):
"""
Wiki page by default is editable for students.
After accessing the course wiki,
Replace the content of the default page
Confirm new content has been saved
"""
content = "hello"
self._open_editor()
self.course_wiki_edit_page.replace_wiki_content(content)
self.course_wiki_edit_page.save_wiki_content()
actual_content = unicode(self.course_wiki_page.q(css='.wiki-article p').text[0])
self.assertEqual(content, actual_content)
@attr('a11y')
def test_view_a11y(self):
"""
Verify the basic accessibility of the wiki page as initially displayed.
"""
self.course_wiki_page.a11y_audit.check_for_accessibility_errors()
@attr('a11y')
def test_edit_a11y(self):
"""
Verify the basic accessibility of edit wiki page.
"""
self._open_editor()
self.course_wiki_edit_page.a11y_audit.check_for_accessibility_errors()
@attr('a11y')
def test_changes_a11y(self):
"""
Verify the basic accessibility of changes wiki page.
"""
self.course_wiki_page.show_history()
history_page = CourseWikiHistoryPage(self.browser, self.course_id, self.course_info)
history_page.wait_for_page()
history_page.a11y_audit.check_for_accessibility_errors()
@attr('a11y')
def test_children_a11y(self):
"""
Verify the basic accessibility of changes wiki page.
"""
self.course_wiki_page.show_children()
children_page = CourseWikiChildrenPage(self.browser, self.course_id, self.course_info)
children_page.wait_for_page()
children_page.a11y_audit.check_for_accessibility_errors()
class HighLevelTabTest(UniqueCourseTest):
"""
Tests that verify each of the high-level tabs available within a course.
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(HighLevelTabTest, self).setUp()
# self.course_info['number'] must be shorter since we are accessing the wiki. See TNL-1751
self.course_info['number'] = self.unique_id[0:6]
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.progress_page = ProgressPage(self.browser, self.course_id)
self.course_home_page = CourseHomePage(self.browser, self.course_id)
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.tab_nav = TabNavPage(self.browser)
self.video = VideoPage(self.browser)
# Install a course with sections/problems, tabs, updates, and handouts
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
course_fix.add_update(
CourseUpdateDesc(date='January 29, 2014', content='Test course update1')
)
course_fix.add_handout('demoPDF.pdf')
course_fix.add_children(
XBlockFixtureDesc('static_tab', 'Test Static Tab', data=r"static tab data with mathjax \(E=mc^2\)"),
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('problem', 'Test Problem 1', data=load_data_str('multiple_choice.xml')),
XBlockFixtureDesc('problem', 'Test Problem 2', data=load_data_str('formula_problem.xml')),
XBlockFixtureDesc('html', 'Test HTML'),
)
),
XBlockFixtureDesc('chapter', 'Test Section 2').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 2'),
XBlockFixtureDesc('sequential', 'Test Subsection 3').add_children(
XBlockFixtureDesc('problem', 'Test Problem A', data=load_data_str('multiple_choice.xml'))
),
)
).install()
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
@attr(shard=1)
def test_course_info(self):
"""
Navigate to the course info page.
"""
# Navigate to the course info page from the progress page
self.progress_page.visit()
self.tab_nav.go_to_tab('Home')
# Expect just one update
self.assertEqual(self.course_info_page.num_updates, 1)
# Expect a link to the demo handout pdf
handout_links = self.course_info_page.handout_links
self.assertEqual(len(handout_links), 1)
self.assertIn('demoPDF.pdf', handout_links[0])
@attr(shard=1)
def test_progress(self):
"""
Navigate to the progress page.
"""
# Navigate to the progress page from the info page
self.course_info_page.visit()
self.tab_nav.go_to_tab('Progress')
# We haven't answered any problems yet, so assume scores are zero
# Only problems should have scores; so there should be 2 scores.
CHAPTER = 'Test Section'
SECTION = 'Test Subsection'
EXPECTED_SCORES = [(0, 3), (0, 1)]
actual_scores = self.progress_page.scores(CHAPTER, SECTION)
self.assertEqual(actual_scores, EXPECTED_SCORES)
@attr(shard=1)
def test_static_tab(self):
"""
Navigate to a static tab (course content)
"""
# From the course info page, navigate to the static tab
self.course_info_page.visit()
self.tab_nav.go_to_tab('Test Static Tab')
self.assertTrue(self.tab_nav.is_on_tab('Test Static Tab'))
@attr(shard=1)
def test_static_tab_with_mathjax(self):
"""
Navigate to a static tab (course content)
"""
# From the course info page, navigate to the static tab
self.course_info_page.visit()
self.tab_nav.go_to_tab('Test Static Tab')
self.assertTrue(self.tab_nav.is_on_tab('Test Static Tab'))
# Verify that Mathjax has rendered
self.tab_nav.mathjax_has_rendered()
@attr(shard=1)
def test_wiki_tab_first_time(self):
"""
Navigate to the course wiki tab. When the wiki is accessed for
the first time, it is created on the fly.
"""
course_wiki = CourseWikiPage(self.browser, self.course_id)
# From the course info page, navigate to the wiki tab
self.course_info_page.visit()
self.tab_nav.go_to_tab('Wiki')
self.assertTrue(self.tab_nav.is_on_tab('Wiki'))
# Assert that a default wiki is created
expected_article_name = "{org}.{course_number}.{course_run}".format(
org=self.course_info['org'],
course_number=self.course_info['number'],
course_run=self.course_info['run']
)
self.assertEqual(expected_article_name, course_wiki.article_name)
# TODO: TNL-6546: This whole function will be able to go away, replaced by test_course_home below.
@attr(shard=1)
def test_courseware_nav(self):
"""
Navigate to a particular unit in the course.
"""
# Navigate to the course page from the info page
self.course_info_page.visit()
self.tab_nav.go_to_tab('Course')
# Check that the course navigation appears correctly
EXPECTED_SECTIONS = {
'Test Section': ['Test Subsection'],
'Test Section 2': ['Test Subsection 2', 'Test Subsection 3']
}
actual_sections = self.courseware_page.nav.sections
for section, subsections in EXPECTED_SECTIONS.iteritems():
self.assertIn(section, actual_sections)
self.assertEqual(actual_sections[section], EXPECTED_SECTIONS[section])
# Navigate to a particular section
self.courseware_page.nav.go_to_section('Test Section', 'Test Subsection')
# Check the sequence items
EXPECTED_ITEMS = ['Test Problem 1', 'Test Problem 2', 'Test HTML']
actual_items = self.courseware_page.nav.sequence_items
self.assertEqual(len(actual_items), len(EXPECTED_ITEMS))
for expected in EXPECTED_ITEMS:
self.assertIn(expected, actual_items)
# Navigate to a particular section other than the default landing section.
self.courseware_page.nav.go_to_section('Test Section 2', 'Test Subsection 3')
self.assertTrue(self.courseware_page.nav.is_on_section('Test Section 2', 'Test Subsection 3'))
@attr(shard=1)
def test_course_home(self):
"""
Navigate to the course home page using the tab.
Includes smoke test of course outline, courseware page, and breadcrumbs.
"""
# TODO: TNL-6546: Use tab navigation and remove course_home_page.visit().
#self.course_info_page.visit()
#self.tab_nav.go_to_tab('Course')
self.course_home_page.visit()
# TODO: TNL-6546: Remove unified_course_view.
self.course_home_page.unified_course_view = True
self.courseware_page.nav.unified_course_view = True
# Check that the tab lands on the course home page.
self.assertTrue(self.course_home_page.is_browser_on_page())
# Check that the course navigation appears correctly
EXPECTED_SECTIONS = {
'Test Section': ['Test Subsection'],
'Test Section 2': ['Test Subsection 2', 'Test Subsection 3']
}
actual_sections = self.course_home_page.outline.sections
for section, subsections in EXPECTED_SECTIONS.iteritems():
self.assertIn(section, actual_sections)
self.assertEqual(actual_sections[section], EXPECTED_SECTIONS[section])
# Navigate to a particular section
self.course_home_page.outline.go_to_section('Test Section', 'Test Subsection')
# Check the sequence items on the courseware page
EXPECTED_ITEMS = ['Test Problem 1', 'Test Problem 2', 'Test HTML']
actual_items = self.courseware_page.nav.sequence_items
self.assertEqual(len(actual_items), len(EXPECTED_ITEMS))
for expected in EXPECTED_ITEMS:
self.assertIn(expected, actual_items)
# Use outline breadcrumb to get back to course home page.
self.courseware_page.nav.go_to_outline()
# Navigate to a particular section other than the default landing section.
self.course_home_page.outline.go_to_section('Test Section 2', 'Test Subsection 3')
self.assertTrue(self.courseware_page.nav.is_on_section('Test Section 2', 'Test Subsection 3'))
# Verify that we can navigate to the bookmarks page
self.course_home_page.visit()
self.course_home_page.click_bookmarks_button()
bookmarks_page = BookmarksPage(self.browser, self.course_id)
self.assertTrue(bookmarks_page.is_browser_on_page())
# Test "Resume Course" button from header
self.course_home_page.visit()
self.course_home_page.resume_course_from_header()
self.assertTrue(self.courseware_page.nav.is_on_section('Test Section 2', 'Test Subsection 3'))
# Test "Resume Course" button from within outline
self.course_home_page.visit()
self.course_home_page.outline.resume_course_from_outline()
self.assertTrue(self.courseware_page.nav.is_on_section('Test Section 2', 'Test Subsection 3'))
@attr('a11y')
def test_course_home_a11y(self):
self.course_home_page.visit()
self.course_home_page.a11y_audit.check_for_accessibility_errors()
@attr(shard=1)
class PDFTextBooksTabTest(UniqueCourseTest):
"""
Tests that verify each of the textbook tabs available within a course.
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(PDFTextBooksTabTest, self).setUp()
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.tab_nav = TabNavPage(self.browser)
# Install a course with TextBooks
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
# Add PDF textbooks to course fixture.
for i in range(1, 3):
course_fix.add_textbook("PDF Book {}".format(i), [{"title": "Chapter Of Book {}".format(i), "url": ""}])
course_fix.install()
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def test_verify_textbook_tabs(self):
"""
Test multiple pdf textbooks loads correctly in lms.
"""
self.course_info_page.visit()
# Verify each PDF textbook tab by visiting, it will fail if correct tab is not loaded.
for i in range(1, 3):
self.tab_nav.go_to_tab("PDF Book {}".format(i))
@attr(shard=1)
class VisibleToStaffOnlyTest(UniqueCourseTest):
"""
Tests that content with visible_to_staff_only set to True cannot be viewed by students.
"""
def setUp(self):
super(VisibleToStaffOnlyTest, self).setUp()
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Subsection With Locked Unit').add_children(
XBlockFixtureDesc('vertical', 'Locked Unit', metadata={'visible_to_staff_only': True}).add_children(
XBlockFixtureDesc('html', 'Html Child in locked unit', data="<html>Visible only to staff</html>"),
),
XBlockFixtureDesc('vertical', 'Unlocked Unit').add_children(
XBlockFixtureDesc('html', 'Html Child in unlocked unit', data="<html>Visible only to all</html>"),
)
),
XBlockFixtureDesc('sequential', 'Unlocked Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc('html', 'Html Child in visible unit', data="<html>Visible to all</html>"),
)
),
XBlockFixtureDesc('sequential', 'Locked Subsection', metadata={'visible_to_staff_only': True}).add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc(
'html', 'Html Child in locked subsection', data="<html>Visible only to staff</html>"
)
)
)
)
).install()
self.course_home_page = CourseHomePage(self.browser, self.course_id)
self.courseware_page = CoursewarePage(self.browser, self.course_id)
def test_visible_to_staff(self):
"""
Scenario: All content is visible for a user marked is_staff (different from course staff)
Given some of the course content has been marked 'visible_to_staff_only'
And I am logged on with an account marked 'is_staff'
Then I can see all course content
"""
AutoAuthPage(self.browser, username="STAFF_TESTER", email="johndoe_staff@example.com",
course_id=self.course_id, staff=True).visit()
self.course_home_page.visit()
self.assertEqual(3, len(self.course_home_page.outline.sections['Test Section']))
self.course_home_page.outline.go_to_section("Test Section", "Subsection With Locked Unit")
self.courseware_page.wait_for_page()
self.assertEqual([u'Locked Unit', u'Unlocked Unit'], self.courseware_page.nav.sequence_items)
self.course_home_page.visit()
self.course_home_page.outline.go_to_section("Test Section", "Unlocked Subsection")
self.courseware_page.wait_for_page()
self.assertEqual([u'Test Unit'], self.courseware_page.nav.sequence_items)
self.course_home_page.visit()
self.course_home_page.outline.go_to_section("Test Section", "Locked Subsection")
self.courseware_page.wait_for_page()
self.assertEqual([u'Test Unit'], self.courseware_page.nav.sequence_items)
def test_visible_to_student(self):
"""
Scenario: Content marked 'visible_to_staff_only' is not visible for students in the course
Given some of the course content has been marked 'visible_to_staff_only'
And I am logged on with an authorized student account
Then I can only see content without 'visible_to_staff_only' set to True
"""
AutoAuthPage(self.browser, username="STUDENT_TESTER", email="johndoe_student@example.com",
course_id=self.course_id, staff=False).visit()
self.course_home_page.visit()
self.assertEqual(2, len(self.course_home_page.outline.sections['Test Section']))
self.course_home_page.outline.go_to_section("Test Section", "Subsection With Locked Unit")
self.courseware_page.wait_for_page()
self.assertEqual([u'Unlocked Unit'], self.courseware_page.nav.sequence_items)
self.course_home_page.visit()
self.course_home_page.outline.go_to_section("Test Section", "Unlocked Subsection")
self.courseware_page.wait_for_page()
self.assertEqual([u'Test Unit'], self.courseware_page.nav.sequence_items)
@attr(shard=1)
class TooltipTest(UniqueCourseTest):
"""
Tests that tooltips are displayed
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(TooltipTest, self).setUp()
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
self.tab_nav = TabNavPage(self.browser)
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
course_fix.add_children(
XBlockFixtureDesc('static_tab', 'Test Static Tab'),
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('problem', 'Test Problem 1', data=load_data_str('multiple_choice.xml')),
XBlockFixtureDesc('problem', 'Test Problem 2', data=load_data_str('formula_problem.xml')),
XBlockFixtureDesc('html', 'Test HTML'),
)
)
).install()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def test_tooltip(self):
"""
Verify that tooltips are displayed when you hover over the sequence nav bar.
"""
self.courseware_page.visit()
self.courseware_page.verify_tooltips_displayed()
@attr(shard=1)
class PreRequisiteCourseTest(UniqueCourseTest):
"""
Tests that pre-requisite course messages are displayed
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(PreRequisiteCourseTest, self).setUp()
CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
).install()
self.prc_info = {
'org': 'test_org',
'number': self.unique_id,
'run': 'prc_test_run',
'display_name': 'PR Test Course' + self.unique_id
}
CourseFixture(
self.prc_info['org'], self.prc_info['number'],
self.prc_info['run'], self.prc_info['display_name']
).install()
pre_requisite_course_key = generate_course_key(
self.prc_info['org'],
self.prc_info['number'],
self.prc_info['run']
)
self.pre_requisite_course_id = unicode(pre_requisite_course_key)
self.dashboard_page = DashboardPage(self.browser)
self.settings_page = SettingsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def test_dashboard_message(self):
"""
Scenario: Any course where there is a Pre-Requisite course Student dashboard should have
appropriate messaging.
Given that I am on the Student dashboard
When I view a course with a pre-requisite course set
Then At the bottom of course I should see course requirements message.'
"""
# visit dashboard page and make sure there is not pre-requisite course message
self.dashboard_page.visit()
self.assertFalse(self.dashboard_page.pre_requisite_message_displayed())
# Logout and login as a staff.
LogoutPage(self.browser).visit()
AutoAuthPage(self.browser, course_id=self.course_id, staff=True).visit()
# visit course settings page and set pre-requisite course
self.settings_page.visit()
self._set_pre_requisite_course()
# Logout and login as a student.
LogoutPage(self.browser).visit()
AutoAuthPage(self.browser, course_id=self.course_id, staff=False).visit()
# visit dashboard page again now it should have pre-requisite course message
self.dashboard_page.visit()
EmptyPromise(lambda: self.dashboard_page.available_courses > 0, 'Dashboard page loaded').fulfill()
self.assertTrue(self.dashboard_page.pre_requisite_message_displayed())
def _set_pre_requisite_course(self):
"""
set pre-requisite course
"""
select_option_by_value(self.settings_page.pre_requisite_course_options, self.pre_requisite_course_id)
self.settings_page.save_changes()
@attr(shard=1)
class ProblemExecutionTest(UniqueCourseTest):
"""
Tests of problems.
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(ProblemExecutionTest, self).setUp()
self.course_home_page = CourseHomePage(self.browser, self.course_id)
self.tab_nav = TabNavPage(self.browser)
# Install a course with sections and problems.
course_fix = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
course_fix.add_asset(['python_lib.zip'])
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('problem', 'Python Problem', data=dedent(
"""\
<problem>
<script type="loncapa/python">
from number_helpers import seventeen, fortytwo
oneseven = seventeen()
def check_function(expect, ans):
if int(ans) == fortytwo(-22):
return True
else:
return False
</script>
<p>What is the sum of $oneseven and 3?</p>
<customresponse expect="20" cfn="check_function">
<textline/>
</customresponse>
</problem>
"""
))
)
)
).install()
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def test_python_execution_in_problem(self):
# Navigate to the problem page
self.course_home_page.visit()
self.course_home_page.outline.go_to_section('Test Section', 'Test Subsection')
problem_page = ProblemPage(self.browser)
self.assertEqual(problem_page.problem_name.upper(), 'PYTHON PROBLEM')
# Does the page have computation results?
self.assertIn("What is the sum of 17 and 3?", problem_page.problem_text)
# Fill in the answer correctly.
problem_page.fill_answer("20")
problem_page.click_submit()
self.assertTrue(problem_page.is_correct())
# Fill in the answer incorrectly.
problem_page.fill_answer("4")
problem_page.click_submit()
self.assertFalse(problem_page.is_correct())
@attr(shard=1)
class EntranceExamTest(UniqueCourseTest):
"""
Tests that course has an entrance exam.
"""
def setUp(self):
"""
Initialize pages and install a course fixture.
"""
super(EntranceExamTest, self).setUp()
CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
).install()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
self.settings_page = SettingsPage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
# Auto-auth register for the course
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def test_entrance_exam_section(self):
"""
Scenario: Any course that is enabled for an entrance exam, should have entrance exam chapter at course
page.
Given that I am on the course page
When I view the course that has an entrance exam
Then there should be an "Entrance Exam" chapter.'
"""
entrance_exam_link_selector = '.accordion .course-navigation .chapter .group-heading'
# visit course page and make sure there is not entrance exam chapter.
self.courseware_page.visit()
self.courseware_page.wait_for_page()
self.assertFalse(element_has_text(
page=self.courseware_page,
css_selector=entrance_exam_link_selector,
text='Entrance Exam'
))
# Logout and login as a staff.
LogoutPage(self.browser).visit()
AutoAuthPage(self.browser, course_id=self.course_id, staff=True).visit()
# visit course settings page and set/enabled entrance exam for that course.
self.settings_page.visit()
self.settings_page.entrance_exam_field.click()
self.settings_page.save_changes()
# Logout and login as a student.
LogoutPage(self.browser).visit()
AutoAuthPage(self.browser, course_id=self.course_id, staff=False).visit()
# visit course info page and make sure there is an "Entrance Exam" section.
self.courseware_page.visit()
self.courseware_page.wait_for_page()
self.assertTrue(element_has_text(
page=self.courseware_page,
css_selector=entrance_exam_link_selector,
text='Entrance Exam'
))
@attr(shard=1)
class NotLiveRedirectTest(UniqueCourseTest):
"""
Test that a banner is shown when the user is redirected to
the dashboard from a non-live course.
"""
def setUp(self):
"""Create a course that isn't live yet and enroll for it."""
super(NotLiveRedirectTest, self).setUp()
CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name'],
start_date=datetime(year=2099, month=1, day=1)
).install()
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def test_redirect_banner(self):
"""
Navigate to the course info page, then check that we're on the
dashboard page with the appropriate message.
"""
url = BASE_URL + "/courses/" + self.course_id + "/" + 'info'
self.browser.get(url)
page = DashboardPage(self.browser)
page.wait_for_page()
self.assertIn(
'The course you are looking for does not start until',
page.banner_text
)
@attr(shard=1)
class EnrollmentClosedRedirectTest(UniqueCourseTest):
"""
Test that a banner is shown when the user is redirected to the
dashboard after trying to view the track selection page for a
course after enrollment has ended.
"""
def setUp(self):
"""Create a course that is closed for enrollment, and sign in as a user."""
super(EnrollmentClosedRedirectTest, self).setUp()
course = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
now = datetime.now(pytz.UTC)
course.add_course_details({
'enrollment_start': (now - timedelta(days=30)).isoformat(),
'enrollment_end': (now - timedelta(days=1)).isoformat()
})
course.install()
# Add an honor mode to the course
ModeCreationPage(self.browser, self.course_id).visit()
# Add a verified mode to the course
ModeCreationPage(
self.browser,
self.course_id,
mode_slug=u'verified',
mode_display_name=u'Verified Certificate',
min_price=10,
suggested_prices='10,20'
).visit()
def _assert_dashboard_message(self):
"""
Assert that the 'closed for enrollment' text is present on the
dashboard.
"""
page = DashboardPage(self.browser)
page.wait_for_page()
self.assertIn(
'The course you are looking for is closed for enrollment',
page.banner_text
)
def test_redirect_banner(self):
"""
Navigate to the course info page, then check that we're on the
dashboard page with the appropriate message.
"""
AutoAuthPage(self.browser).visit()
url = BASE_URL + "/course_modes/choose/" + self.course_id
self.browser.get(url)
self._assert_dashboard_message()
def test_login_redirect(self):
"""
Test that the user is correctly redirected after logistration when
attempting to enroll in a closed course.
"""
url = '{base_url}/register?{params}'.format(
base_url=BASE_URL,
params=urllib.urlencode({
'course_id': self.course_id,
'enrollment_action': 'enroll',
'email_opt_in': 'false'
})
)
self.browser.get(url)
register_page = CombinedLoginAndRegisterPage(
self.browser,
start_page="register",
course_id=self.course_id
)
register_page.wait_for_page()
register_page.register(
email="email@example.com",
password="password",
username="username",
full_name="Test User",
country="US",
favorite_movie="Mad Max: Fury Road",
terms_of_service=True
)
self._assert_dashboard_message()
@attr(shard=1)
class LMSLanguageTest(UniqueCourseTest):
""" Test suite for the LMS Language """
def setUp(self):
super(LMSLanguageTest, self).setUp()
self.dashboard_page = DashboardPage(self.browser)
self.account_settings = AccountSettingsPage(self.browser)
AutoAuthPage(self.browser).visit()
def test_lms_language_change(self):
"""
Scenario: Ensure that language selection is working fine.
First I go to the user dashboard page in LMS. I can see 'English' is selected by default.
Then I choose 'Dummy Language' from drop down (at top of the page).
Then I visit the student account settings page and I can see the language has been updated to 'Dummy Language'
in both drop downs.
After that I select the 'English' language and visit the dashboard page again.
Then I can see that top level language selector persist its value to 'English'.
"""
self.dashboard_page.visit()
language_selector = self.dashboard_page.language_selector
self.assertEqual(
get_selected_option_text(language_selector),
u'English'
)
select_option_by_text(language_selector, 'Dummy Language (Esperanto)')
self.dashboard_page.wait_for_ajax()
self.account_settings.visit()
self.assertEqual(self.account_settings.value_for_dropdown_field('pref-lang'), u'Dummy Language (Esperanto)')
self.assertEqual(
get_selected_option_text(language_selector),
u'Dummy Language (Esperanto)'
)
# changed back to English language.
select_option_by_text(language_selector, 'English')
self.account_settings.wait_for_ajax()
self.assertEqual(self.account_settings.value_for_dropdown_field('pref-lang'), u'English')
self.dashboard_page.visit()
self.assertEqual(
get_selected_option_text(language_selector),
u'English'
)
@attr('a11y')
class CourseInfoA11yTest(UniqueCourseTest):
"""Accessibility test for course home/info page."""
def setUp(self):
super(CourseInfoA11yTest, self).setUp()
self.course_fixture = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
self.course_fixture.add_update(
CourseUpdateDesc(date='January 29, 2014', content='Test course update1')
)
self.course_fixture.add_update(
CourseUpdateDesc(date='February 5th, 2014', content='Test course update2')
)
self.course_fixture.add_update(
CourseUpdateDesc(date='March 31st, 2014', content='Test course update3')
)
self.course_fixture.install()
self.course_info_page = CourseInfoPage(self.browser, self.course_id)
AutoAuthPage(self.browser, course_id=self.course_id).visit()
def test_course_info_a11y(self):
self.course_info_page.visit()
self.course_info_page.a11y_audit.check_for_accessibility_errors()
|
ka7eh/django-oscar | refs/heads/master | src/oscar/apps/dashboard/pages/config.py | 58 | from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class PagesDashboardConfig(AppConfig):
label = 'pages_dashboard'
name = 'oscar.apps.dashboard.pages'
verbose_name = _('Pages dashboard')
|
chishaku/alembic | refs/heads/master | tests/test_offline_environment.py | 3 | from alembic.testing.fixtures import TestBase, capture_context_buffer
from alembic import command, util
from alembic.testing import assert_raises_message
from alembic.testing.env import staging_env, _no_sql_testing_config, \
three_rev_fixture, clear_staging_env, env_file_fixture
import re
a = b = c = None
class OfflineEnvironmentTest(TestBase):
def setUp(self):
staging_env()
self.cfg = _no_sql_testing_config()
global a, b, c
a, b, c = three_rev_fixture(self.cfg)
def tearDown(self):
clear_staging_env()
def test_not_requires_connection(self):
env_file_fixture("""
assert not context.requires_connection()
""")
command.upgrade(self.cfg, a, sql=True)
command.downgrade(self.cfg, "%s:%s" % (b, a), sql=True)
def test_requires_connection(self):
env_file_fixture("""
assert context.requires_connection()
""")
command.upgrade(self.cfg, a)
command.downgrade(self.cfg, a)
def test_starting_rev_post_context(self):
env_file_fixture("""
context.configure(dialect_name='sqlite', starting_rev='x')
assert context.get_starting_revision_argument() == 'x'
""")
command.upgrade(self.cfg, a, sql=True)
command.downgrade(self.cfg, "%s:%s" % (b, a), sql=True)
command.current(self.cfg)
command.stamp(self.cfg, a)
def test_starting_rev_pre_context(self):
env_file_fixture("""
assert context.get_starting_revision_argument() == 'x'
""")
command.upgrade(self.cfg, "x:y", sql=True)
command.downgrade(self.cfg, "x:y", sql=True)
def test_starting_rev_pre_context_cmd_w_no_startrev(self):
env_file_fixture("""
assert context.get_starting_revision_argument() == 'x'
""")
assert_raises_message(
util.CommandError,
"No starting revision argument is available.",
command.current, self.cfg)
def test_starting_rev_current_pre_context(self):
env_file_fixture("""
assert context.get_starting_revision_argument() is None
""")
assert_raises_message(
util.CommandError,
"No starting revision argument is available.",
command.current, self.cfg
)
def test_destination_rev_pre_context(self):
env_file_fixture("""
assert context.get_revision_argument() == '%s'
""" % b)
command.upgrade(self.cfg, b, sql=True)
command.stamp(self.cfg, b, sql=True)
command.downgrade(self.cfg, "%s:%s" % (c, b), sql=True)
def test_destination_rev_post_context(self):
env_file_fixture("""
context.configure(dialect_name='sqlite')
assert context.get_revision_argument() == '%s'
""" % b)
command.upgrade(self.cfg, b, sql=True)
command.downgrade(self.cfg, "%s:%s" % (c, b), sql=True)
command.stamp(self.cfg, b, sql=True)
def test_head_rev_pre_context(self):
env_file_fixture("""
assert context.get_head_revision() == '%s'
""" % c)
command.upgrade(self.cfg, b, sql=True)
command.downgrade(self.cfg, "%s:%s" % (b, a), sql=True)
command.stamp(self.cfg, b, sql=True)
command.current(self.cfg)
def test_head_rev_post_context(self):
env_file_fixture("""
context.configure(dialect_name='sqlite')
assert context.get_head_revision() == '%s'
""" % c)
command.upgrade(self.cfg, b, sql=True)
command.downgrade(self.cfg, "%s:%s" % (b, a), sql=True)
command.stamp(self.cfg, b, sql=True)
command.current(self.cfg)
def test_tag_pre_context(self):
env_file_fixture("""
assert context.get_tag_argument() == 'hi'
""")
command.upgrade(self.cfg, b, sql=True, tag='hi')
command.downgrade(self.cfg, "%s:%s" % (b, a), sql=True, tag='hi')
def test_tag_pre_context_None(self):
env_file_fixture("""
assert context.get_tag_argument() is None
""")
command.upgrade(self.cfg, b, sql=True)
command.downgrade(self.cfg, "%s:%s" % (b, a), sql=True)
def test_tag_cmd_arg(self):
env_file_fixture("""
context.configure(dialect_name='sqlite')
assert context.get_tag_argument() == 'hi'
""")
command.upgrade(self.cfg, b, sql=True, tag='hi')
command.downgrade(self.cfg, "%s:%s" % (b, a), sql=True, tag='hi')
def test_tag_cfg_arg(self):
env_file_fixture("""
context.configure(dialect_name='sqlite', tag='there')
assert context.get_tag_argument() == 'there'
""")
command.upgrade(self.cfg, b, sql=True, tag='hi')
command.downgrade(self.cfg, "%s:%s" % (b, a), sql=True, tag='hi')
def test_tag_None(self):
env_file_fixture("""
context.configure(dialect_name='sqlite')
assert context.get_tag_argument() is None
""")
command.upgrade(self.cfg, b, sql=True)
command.downgrade(self.cfg, "%s:%s" % (b, a), sql=True)
def test_downgrade_wo_colon(self):
env_file_fixture("""
context.configure(dialect_name='sqlite')
""")
assert_raises_message(
util.CommandError,
"downgrade with --sql requires <fromrev>:<torev>",
command.downgrade,
self.cfg, b, sql=True
)
def test_upgrade_with_output_encoding(self):
env_file_fixture("""
url = config.get_main_option('sqlalchemy.url')
context.configure(url=url, output_encoding='utf-8')
assert not context.requires_connection()
""")
command.upgrade(self.cfg, a, sql=True)
command.downgrade(self.cfg, "%s:%s" % (b, a), sql=True)
def test_running_comments_not_in_sql(self):
message = "this is a very long \nand multiline\nmessage"
d = command.revision(self.cfg, message=message)
with capture_context_buffer(transactional_ddl=True) as buf:
command.upgrade(self.cfg, "%s:%s" % (a, d.revision), sql=True)
assert not re.match(r".*-- .*and multiline", buf.getvalue(), re.S | re.M)
def test_starting_rev_pre_context_abbreviated(self):
env_file_fixture("""
assert context.get_starting_revision_argument() == '%s'
""" % b[0:4])
command.upgrade(self.cfg, "%s:%s" % (b[0:4], c), sql=True)
command.stamp(self.cfg, "%s:%s" % (b[0:4], c), sql=True)
command.downgrade(self.cfg, "%s:%s" % (b[0:4], a), sql=True)
def test_destination_rev_pre_context_abbreviated(self):
env_file_fixture("""
assert context.get_revision_argument() == '%s'
""" % b[0:4])
command.upgrade(self.cfg, "%s:%s" % (a, b[0:4]), sql=True)
command.stamp(self.cfg, b[0:4], sql=True)
command.downgrade(self.cfg, "%s:%s" % (c, b[0:4]), sql=True)
def test_starting_rev_context_runs_abbreviated(self):
env_file_fixture("""
context.configure(dialect_name='sqlite')
context.run_migrations()
""")
command.upgrade(self.cfg, "%s:%s" % (b[0:4], c), sql=True)
command.downgrade(self.cfg, "%s:%s" % (b[0:4], a), sql=True)
def test_destination_rev_context_runs_abbreviated(self):
env_file_fixture("""
context.configure(dialect_name='sqlite')
context.run_migrations()
""")
command.upgrade(self.cfg, "%s:%s" % (a, b[0:4]), sql=True)
command.stamp(self.cfg, b[0:4], sql=True)
command.downgrade(self.cfg, "%s:%s" % (c, b[0:4]), sql=True)
|
xionzz/earthquake | refs/heads/master | venv/lib/python2.7/site-packages/pip/_vendor/html5lib/treewalkers/genshistream.py | 1730 | from __future__ import absolute_import, division, unicode_literals
from genshi.core import QName
from genshi.core import START, END, XML_NAMESPACE, DOCTYPE, TEXT
from genshi.core import START_NS, END_NS, START_CDATA, END_CDATA, PI, COMMENT
from . import _base
from ..constants import voidElements, namespaces
class TreeWalker(_base.TreeWalker):
def __iter__(self):
# Buffer the events so we can pass in the following one
previous = None
for event in self.tree:
if previous is not None:
for token in self.tokens(previous, event):
yield token
previous = event
# Don't forget the final event!
if previous is not None:
for token in self.tokens(previous, None):
yield token
def tokens(self, event, next):
kind, data, pos = event
if kind == START:
tag, attribs = data
name = tag.localname
namespace = tag.namespace
converted_attribs = {}
for k, v in attribs:
if isinstance(k, QName):
converted_attribs[(k.namespace, k.localname)] = v
else:
converted_attribs[(None, k)] = v
if namespace == namespaces["html"] and name in voidElements:
for token in self.emptyTag(namespace, name, converted_attribs,
not next or next[0] != END
or next[1] != tag):
yield token
else:
yield self.startTag(namespace, name, converted_attribs)
elif kind == END:
name = data.localname
namespace = data.namespace
if name not in voidElements:
yield self.endTag(namespace, name)
elif kind == COMMENT:
yield self.comment(data)
elif kind == TEXT:
for token in self.text(data):
yield token
elif kind == DOCTYPE:
yield self.doctype(*data)
elif kind in (XML_NAMESPACE, DOCTYPE, START_NS, END_NS,
START_CDATA, END_CDATA, PI):
pass
else:
yield self.unknown(kind)
|
ionelmc/python-cogen | refs/heads/master | cogen/core/__init__.py | 4 | '''
This is a library for network oriented, coroutine based programming.
The interfaces and events/operations aim to mimic some of the regular thread
and socket features.
cogen uses the `enhanced generators <http://www.python.org/dev/peps/pep-0342/>`_
in python 2.5. These generators are bidirectional: they allow to pass values in
and out of the generator. The whole framework is based on this.
The generator yields a `Operation` instance and will receive the result from
that yield when the operation is ready.
Example::
@coroutine
def mycoro(bla):
yield <operation>
yield <operation>
* the `operation` instructs the scheduler what to do with the
coroutine: suspend it till someting happens, add another coro in
the scheduler, raise a event and so on.
* the `operations` are split up in 2 modules: events and sockets
* the `operations` from sockets are related to network, like reading and
writing, and these are done asynchronously but your code in the
coroutine will see them as a regular synchronous or blocking call.
* the `operations` from events are related to signals and
coroutine/scheduler management.
* if a `operation` has a result associated then the yield will return that
result (eg. a string or a (connection, address) tuple) otherwise it will
return the operation instance.
::
Roughly the cogen internals works like this:
+------------------------+
| @coroutine |
| def foo(): |
| ... | op.process(sched, coro)
| +->result = yield op--|----------------+------------+
| | ... | | |
+--|---------------------+ +---------------+ +---------------------+
| | the operation | | the operation can't |
result = op.finalize() | is ready | | complete right now |
| +------|--------+ +----------|----------+
scheduler runs foo | |
| | |
foo gets in the active | |
coroutines queue | |
| | |
+----------------------<----------+ |
| depening on the op
op.run() +---------+
| socket is ready add it in | |
+-------------<------------ ...... the proactor <--+ |
| later |
+------<------------------- ...... add it in some other <-+
some event decides queue for later run
this op is ready
The scheduler basicaly does 3 things:
- runs active (coroutine,operations) pairs -- calls process on the op
- runs the proactor
- checks for timeouts
The proactor basicaly does 2 things:
- calls the system to check what descriptors are ready
- runs the operations that have ready descriptors
The operation does most of the work (via the process, finalize, cleanup, run methods):
- adds itself in the proactor (if it's a socket operation)
- adds itself in some structure to be activated later by some other event
- adds itself and the coro in the scheduler's active coroutines queue
The coroutine decorator wrappes foo in a Coroutine class that does some
niceties like exception handling, getting the result from finalize() etc.
'''
|
2013Commons/HUE-SHARK | refs/heads/master | desktop/core/ext-py/Django-1.2.3/build/lib.linux-i686-2.7/django/contrib/sessions/backends/base.py | 90 | import base64
import os
import random
import sys
import time
from datetime import datetime, timedelta
try:
import cPickle as pickle
except ImportError:
import pickle
from django.conf import settings
from django.core.exceptions import SuspiciousOperation
from django.utils.hashcompat import md5_constructor
# Use the system (hardware-based) random number generator if it exists.
if hasattr(random, 'SystemRandom'):
randrange = random.SystemRandom().randrange
else:
randrange = random.randrange
MAX_SESSION_KEY = 18446744073709551616L # 2 << 63
class CreateError(Exception):
"""
Used internally as a consistent exception type to catch from save (see the
docstring for SessionBase.save() for details).
"""
pass
class SessionBase(object):
"""
Base class for all Session classes.
"""
TEST_COOKIE_NAME = 'testcookie'
TEST_COOKIE_VALUE = 'worked'
def __init__(self, session_key=None):
self._session_key = session_key
self.accessed = False
self.modified = False
def __contains__(self, key):
return key in self._session
def __getitem__(self, key):
return self._session[key]
def __setitem__(self, key, value):
self._session[key] = value
self.modified = True
def __delitem__(self, key):
del self._session[key]
self.modified = True
def keys(self):
return self._session.keys()
def items(self):
return self._session.items()
def get(self, key, default=None):
return self._session.get(key, default)
def pop(self, key, *args):
self.modified = self.modified or key in self._session
return self._session.pop(key, *args)
def setdefault(self, key, value):
if key in self._session:
return self._session[key]
else:
self.modified = True
self._session[key] = value
return value
def set_test_cookie(self):
self[self.TEST_COOKIE_NAME] = self.TEST_COOKIE_VALUE
def test_cookie_worked(self):
return self.get(self.TEST_COOKIE_NAME) == self.TEST_COOKIE_VALUE
def delete_test_cookie(self):
del self[self.TEST_COOKIE_NAME]
def encode(self, session_dict):
"Returns the given session dictionary pickled and encoded as a string."
pickled = pickle.dumps(session_dict, pickle.HIGHEST_PROTOCOL)
pickled_md5 = md5_constructor(pickled + settings.SECRET_KEY).hexdigest()
return base64.encodestring(pickled + pickled_md5)
def decode(self, session_data):
encoded_data = base64.decodestring(session_data)
pickled, tamper_check = encoded_data[:-32], encoded_data[-32:]
if md5_constructor(pickled + settings.SECRET_KEY).hexdigest() != tamper_check:
raise SuspiciousOperation("User tampered with session cookie.")
try:
return pickle.loads(pickled)
# Unpickling can cause a variety of exceptions. If something happens,
# just return an empty dictionary (an empty session).
except:
return {}
def update(self, dict_):
self._session.update(dict_)
self.modified = True
def has_key(self, key):
return self._session.has_key(key)
def values(self):
return self._session.values()
def iterkeys(self):
return self._session.iterkeys()
def itervalues(self):
return self._session.itervalues()
def iteritems(self):
return self._session.iteritems()
def clear(self):
# To avoid unnecessary persistent storage accesses, we set up the
# internals directly (loading data wastes time, since we are going to
# set it to an empty dict anyway).
self._session_cache = {}
self.accessed = True
self.modified = True
def _get_new_session_key(self):
"Returns session key that isn't being used."
# The random module is seeded when this Apache child is created.
# Use settings.SECRET_KEY as added salt.
try:
pid = os.getpid()
except AttributeError:
# No getpid() in Jython, for example
pid = 1
while 1:
session_key = md5_constructor("%s%s%s%s"
% (randrange(0, MAX_SESSION_KEY), pid, time.time(),
settings.SECRET_KEY)).hexdigest()
if not self.exists(session_key):
break
return session_key
def _get_session_key(self):
if self._session_key:
return self._session_key
else:
self._session_key = self._get_new_session_key()
return self._session_key
def _set_session_key(self, session_key):
self._session_key = session_key
session_key = property(_get_session_key, _set_session_key)
def _get_session(self, no_load=False):
"""
Lazily loads session from storage (unless "no_load" is True, when only
an empty dict is stored) and stores it in the current instance.
"""
self.accessed = True
try:
return self._session_cache
except AttributeError:
if self._session_key is None or no_load:
self._session_cache = {}
else:
self._session_cache = self.load()
return self._session_cache
_session = property(_get_session)
def get_expiry_age(self):
"""Get the number of seconds until the session expires."""
expiry = self.get('_session_expiry')
if not expiry: # Checks both None and 0 cases
return settings.SESSION_COOKIE_AGE
if not isinstance(expiry, datetime):
return expiry
delta = expiry - datetime.now()
return delta.days * 86400 + delta.seconds
def get_expiry_date(self):
"""Get session the expiry date (as a datetime object)."""
expiry = self.get('_session_expiry')
if isinstance(expiry, datetime):
return expiry
if not expiry: # Checks both None and 0 cases
expiry = settings.SESSION_COOKIE_AGE
return datetime.now() + timedelta(seconds=expiry)
def set_expiry(self, value):
"""
Sets a custom expiration for the session. ``value`` can be an integer,
a Python ``datetime`` or ``timedelta`` object or ``None``.
If ``value`` is an integer, the session will expire after that many
seconds of inactivity. If set to ``0`` then the session will expire on
browser close.
If ``value`` is a ``datetime`` or ``timedelta`` object, the session
will expire at that specific future time.
If ``value`` is ``None``, the session uses the global session expiry
policy.
"""
if value is None:
# Remove any custom expiration for this session.
try:
del self['_session_expiry']
except KeyError:
pass
return
if isinstance(value, timedelta):
value = datetime.now() + value
self['_session_expiry'] = value
def get_expire_at_browser_close(self):
"""
Returns ``True`` if the session is set to expire when the browser
closes, and ``False`` if there's an expiry date. Use
``get_expiry_date()`` or ``get_expiry_age()`` to find the actual expiry
date/age, if there is one.
"""
if self.get('_session_expiry') is None:
return settings.SESSION_EXPIRE_AT_BROWSER_CLOSE
return self.get('_session_expiry') == 0
def flush(self):
"""
Removes the current session data from the database and regenerates the
key.
"""
self.clear()
self.delete()
self.create()
def cycle_key(self):
"""
Creates a new session key, whilst retaining the current session data.
"""
data = self._session_cache
key = self.session_key
self.create()
self._session_cache = data
self.delete(key)
# Methods that child classes must implement.
def exists(self, session_key):
"""
Returns True if the given session_key already exists.
"""
raise NotImplementedError
def create(self):
"""
Creates a new session instance. Guaranteed to create a new object with
a unique key and will have saved the result once (with empty data)
before the method returns.
"""
raise NotImplementedError
def save(self, must_create=False):
"""
Saves the session data. If 'must_create' is True, a new session object
is created (otherwise a CreateError exception is raised). Otherwise,
save() can update an existing object with the same key.
"""
raise NotImplementedError
def delete(self, session_key=None):
"""
Deletes the session data under this key. If the key is None, the
current session key value is used.
"""
raise NotImplementedError
def load(self):
"""
Loads the session data and returns a dictionary.
"""
raise NotImplementedError
|
berkeleydeeprlcourse/homework | refs/heads/master | hw5/meta/replay_buffer.py | 1 | import numpy as np
class ReplayBuffer(object):
'''
minimalistic replay buffer
a sample consists of
- observation
- action
- reward
- terminal
- hidden state for recurrent policy
it is memory inefficient to store windowed observations this way
so do not run on tasks with large observations (e.g. from vision)
'''
def __init__(self, max_size, ob_dim, ac_dim, hidden_dim, task_dim):
self.max_size = max_size
self.ob_dim = ob_dim
self.ac_dim = ac_dim
self.hidden_dim = hidden_dim
self.task_dim = task_dim
self.flush()
def flush(self):
'''
set buffer to empty
'''
self._observations = np.zeros((self.max_size, *self.ob_dim))
self._actions = np.zeros((self.max_size, *self.ac_dim))
self._rewards = np.zeros((self.max_size, 1))
self._terminals = np.zeros((self.max_size, 1))
self._hiddens = np.zeros((self.max_size, self.hidden_dim))
self._tasks = np.zeros((self.max_size, self.task_dim))
self._top = 0
self._size = 0
def _advance(self):
'''
move pointer to top of buffer
if end of buffer is reached, overwrite oldest data
'''
self._top = (self._top + 1) % self.max_size
if self._size < self.max_size:
self._size += 1
def add_sample(self, ob, ac, re, te, hi, task):
'''
add sample to buffer
'''
self._observations[self._top] = ob
self._actions[self._top] = ac
self._rewards[self._top] = re
self._terminals[self._top] = te
self._hiddens[self._top] = hi
self._tasks[self._top] = task
self._advance()
def get_samples(self, indices):
'''
return buffer data indexed by `indices`
'''
return dict(
observations=self._observations[indices],
actions=self._actions[indices],
rewards=self._rewards[indices],
terminals=self._terminals[indices],
hiddens=self._hiddens[indices],
tasks=self._tasks[indices],
)
def random_batch(self, batch_size):
'''
return random sample of `batch_size` transitions
'''
indices = np.random.randint(0, self._size, batch_size)
return self.get_samples(indices)
def all_batch(self):
'''
return all data in the buffer
'''
indices = list(range(self._size))
return self.get_samples(indices)
def num_steps_can_sample(self):
return self._size
class PPOReplayBuffer(object):
'''
replay buffer for PPO algorithm
store fixed log probs, advantages, and returns for use in multiple updates
n.b. samples must be added as a batch, and we assume that the
batch is the same size as that of the simple buffer
'''
def __init__(self, simple_buffer):
self.simple_buffer = simple_buffer
self.max_size = self.simple_buffer.max_size
self.flush()
def flush(self):
self.simple_buffer.flush()
self._log_probs = np.zeros((self.max_size, 1))
self._advantages = np.zeros((self.max_size, 1))
self._returns = np.zeros((self.max_size, 1))
def add_samples(self, lp, adv, ret):
self._log_probs = lp
self._advantages = adv
self._returns = ret
def get_samples(self, indices):
return dict(
log_probs = self._log_probs[indices],
advantages = self._advantages[indices],
returns = self._returns[indices],
)
def random_batch(self, batch_size):
indices = np.random.randint(0, self.simple_buffer._size, batch_size)
simple = self.simple_buffer.get_samples(indices)
ppo = self.get_samples(indices)
return {**simple, **ppo}
|
xodus7/tensorflow | refs/heads/master | tensorflow/python/ops/clip_ops.py | 3 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for clipping (gradient, weight) tensors to min/max values."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import six
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import numerics
from tensorflow.python.util.tf_export import tf_export
@tf_export("clip_by_value")
def clip_by_value(t, clip_value_min, clip_value_max,
name=None):
"""Clips tensor values to a specified min and max.
Given a tensor `t`, this operation returns a tensor of the same type and
shape as `t` with its values clipped to `clip_value_min` and `clip_value_max`.
Any values less than `clip_value_min` are set to `clip_value_min`. Any values
greater than `clip_value_max` are set to `clip_value_max`.
Note: `clip_value_min` needs to be smaller or equal to `clip_value_max` for
correct results.
Args:
t: A `Tensor`.
clip_value_min: A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape
as `t`. The minimum value to clip by.
clip_value_max: A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape
as `t`. The maximum value to clip by.
name: A name for the operation (optional).
Returns:
A clipped `Tensor`.
Raises:
ValueError: If the clip tensors would trigger array broadcasting
that would make the returned tensor larger than the input.
"""
with ops.name_scope(name, "clip_by_value",
[t, clip_value_min, clip_value_max]) as name:
t = ops.convert_to_tensor(t, name="t")
# Go through list of tensors, for each value in each tensor clip
t_min = math_ops.minimum(t, clip_value_max)
# Assert that the shape is compatible with the initial shape,
# to prevent unintentional broadcasting.
_ = t.shape.merge_with(t_min.shape)
t_max = math_ops.maximum(t_min, clip_value_min, name=name)
_ = t.shape.merge_with(t_max.shape)
return t_max
# TODO(scottzhu): switch to use new implmentation in 2 weeks.
# return gen_math_ops.clip_by_value(
# t, clip_value_min, clip_value_max, name=name)
# TODO(scottzhu): switch to use new implmentation in 2 weeks.
# @ops.RegisterGradient("ClipByValue")
def _clip_by_value_grad(op, grad):
"""Returns grad of clip_by_value."""
x = op.inputs[0]
y = op.inputs[1]
z = op.inputs[2]
gdtype = grad.dtype
sx = array_ops.shape(x)
sy = array_ops.shape(y)
sz = array_ops.shape(z)
gradshape = array_ops.shape(grad)
zeros = array_ops.zeros(gradshape, gdtype)
xymask = math_ops.less(x, y)
xzmask = math_ops.greater(x, z)
rx, ry = gen_array_ops.broadcast_gradient_args(sx, sy)
rx, rz = gen_array_ops.broadcast_gradient_args(sx, sz)
xgrad = array_ops.where(math_ops.logical_or(xymask, xzmask), zeros, grad)
ygrad = array_ops.where(xymask, grad, zeros)
zgrad = array_ops.where(xzmask, grad, zeros)
gx = array_ops.reshape(math_ops.reduce_sum(xgrad, rx), sx)
gy = array_ops.reshape(math_ops.reduce_sum(ygrad, ry), sy)
gz = array_ops.reshape(math_ops.reduce_sum(zgrad, rz), sz)
return (gx, gy, gz)
@tf_export("clip_by_norm")
def clip_by_norm(t, clip_norm, axes=None, name=None):
"""Clips tensor values to a maximum L2-norm.
Given a tensor `t`, and a maximum clip value `clip_norm`, this operation
normalizes `t` so that its L2-norm is less than or equal to `clip_norm`,
along the dimensions given in `axes`. Specifically, in the default case
where all dimensions are used for calculation, if the L2-norm of `t` is
already less than or equal to `clip_norm`, then `t` is not modified. If
the L2-norm is greater than `clip_norm`, then this operation returns a
tensor of the same type and shape as `t` with its values set to:
`t * clip_norm / l2norm(t)`
In this case, the L2-norm of the output tensor is `clip_norm`.
As another example, if `t` is a matrix and `axes == [1]`, then each row
of the output will have L2-norm equal to `clip_norm`. If `axes == [0]`
instead, each column of the output will be clipped.
This operation is typically used to clip gradients before applying them with
an optimizer.
Args:
t: A `Tensor`.
clip_norm: A 0-D (scalar) `Tensor` > 0. A maximum clipping value.
axes: A 1-D (vector) `Tensor` of type int32 containing the dimensions
to use for computing the L2-norm. If `None` (the default), uses all
dimensions.
name: A name for the operation (optional).
Returns:
A clipped `Tensor`.
"""
with ops.name_scope(name, "clip_by_norm", [t, clip_norm]) as name:
t = ops.convert_to_tensor(t, name="t")
# Calculate L2-norm, clip elements by ratio of clip_norm to L2-norm
l2sum = math_ops.reduce_sum(t * t, axes, keepdims=True)
pred = l2sum > 0
# Two-tap tf.where trick to bypass NaN gradients
l2sum_safe = array_ops.where(pred, l2sum, array_ops.ones_like(l2sum))
l2norm = array_ops.where(pred, math_ops.sqrt(l2sum_safe), l2sum)
intermediate = t * clip_norm
# Assert that the shape is compatible with the initial shape,
# to prevent unintentional broadcasting.
_ = t.shape.merge_with(intermediate.shape)
tclip = array_ops.identity(
intermediate / math_ops.maximum(l2norm, clip_norm), name=name)
return tclip
@tf_export("global_norm")
def global_norm(t_list, name=None):
"""Computes the global norm of multiple tensors.
Given a tuple or list of tensors `t_list`, this operation returns the
global norm of the elements in all tensors in `t_list`. The global norm is
computed as:
`global_norm = sqrt(sum([l2norm(t)**2 for t in t_list]))`
Any entries in `t_list` that are of type None are ignored.
Args:
t_list: A tuple or list of mixed `Tensors`, `IndexedSlices`, or None.
name: A name for the operation (optional).
Returns:
A 0-D (scalar) `Tensor` of type `float`.
Raises:
TypeError: If `t_list` is not a sequence.
"""
if (not isinstance(t_list, collections.Sequence)
or isinstance(t_list, six.string_types)):
raise TypeError("t_list should be a sequence")
t_list = list(t_list)
with ops.name_scope(name, "global_norm", t_list) as name:
values = [
ops.convert_to_tensor(
t.values if isinstance(t, ops.IndexedSlices) else t,
name="t_%d" % i)
if t is not None else t
for i, t in enumerate(t_list)]
half_squared_norms = []
for v in values:
if v is not None:
with ops.colocate_with(v):
half_squared_norms.append(gen_nn_ops.l2_loss(v))
half_squared_norm = math_ops.reduce_sum(array_ops.stack(half_squared_norms))
norm = math_ops.sqrt(
half_squared_norm *
constant_op.constant(2.0, dtype=half_squared_norm.dtype),
name="global_norm")
return norm
@tf_export("clip_by_global_norm")
def clip_by_global_norm(t_list, clip_norm, use_norm=None, name=None):
"""Clips values of multiple tensors by the ratio of the sum of their norms.
Given a tuple or list of tensors `t_list`, and a clipping ratio `clip_norm`,
this operation returns a list of clipped tensors `list_clipped`
and the global norm (`global_norm`) of all tensors in `t_list`. Optionally,
if you've already computed the global norm for `t_list`, you can specify
the global norm with `use_norm`.
To perform the clipping, the values `t_list[i]` are set to:
t_list[i] * clip_norm / max(global_norm, clip_norm)
where:
global_norm = sqrt(sum([l2norm(t)**2 for t in t_list]))
If `clip_norm > global_norm` then the entries in `t_list` remain as they are,
otherwise they're all shrunk by the global ratio.
Any of the entries of `t_list` that are of type `None` are ignored.
This is the correct way to perform gradient clipping (for example, see
[Pascanu et al., 2012](http://arxiv.org/abs/1211.5063)
([pdf](http://arxiv.org/pdf/1211.5063.pdf))).
However, it is slower than `clip_by_norm()` because all the parameters must be
ready before the clipping operation can be performed.
Args:
t_list: A tuple or list of mixed `Tensors`, `IndexedSlices`, or None.
clip_norm: A 0-D (scalar) `Tensor` > 0. The clipping ratio.
use_norm: A 0-D (scalar) `Tensor` of type `float` (optional). The global
norm to use. If not provided, `global_norm()` is used to compute the norm.
name: A name for the operation (optional).
Returns:
list_clipped: A list of `Tensors` of the same type as `list_t`.
global_norm: A 0-D (scalar) `Tensor` representing the global norm.
Raises:
TypeError: If `t_list` is not a sequence.
InvalidArgumentError: If global norm is not finite.
"""
if (not isinstance(t_list, collections.Sequence)
or isinstance(t_list, six.string_types)):
raise TypeError("t_list should be a sequence")
t_list = list(t_list)
if use_norm is None:
use_norm = global_norm(t_list, name)
use_norm = numerics.verify_tensor_all_finite(use_norm,
"Found Inf or NaN global norm.")
with ops.name_scope(name, "clip_by_global_norm",
t_list + [clip_norm]) as name:
# Calculate L2-norm, clip elements by ratio of clip_norm to L2-norm
scale = clip_norm * math_ops.minimum(
1.0 / use_norm,
constant_op.constant(1.0, dtype=use_norm.dtype) / clip_norm)
values = [
ops.convert_to_tensor(
t.values if isinstance(t, ops.IndexedSlices) else t,
name="t_%d" % i)
if t is not None else t
for i, t in enumerate(t_list)]
values_clipped = []
for i, v in enumerate(values):
if v is None:
values_clipped.append(None)
else:
with ops.colocate_with(v):
values_clipped.append(
array_ops.identity(v * scale, name="%s_%d" % (name, i)))
list_clipped = [
ops.IndexedSlices(c_v, t.indices, t.dense_shape)
if isinstance(t, ops.IndexedSlices)
else c_v
for (c_v, t) in zip(values_clipped, t_list)]
return list_clipped, use_norm
@tf_export("clip_by_average_norm")
def clip_by_average_norm(t, clip_norm, name=None):
"""Clips tensor values to a maximum average L2-norm.
Given a tensor `t`, and a maximum clip value `clip_norm`, this operation
normalizes `t` so that its average L2-norm is less than or equal to
`clip_norm`. Specifically, if the average L2-norm is already less than or
equal to `clip_norm`, then `t` is not modified. If the average L2-norm is
greater than `clip_norm`, then this operation returns a tensor of the same
type and shape as `t` with its values set to:
`t * clip_norm / l2norm_avg(t)`
In this case, the average L2-norm of the output tensor is `clip_norm`.
This operation is typically used to clip gradients before applying them with
an optimizer.
Args:
t: A `Tensor`.
clip_norm: A 0-D (scalar) `Tensor` > 0. A maximum clipping value.
name: A name for the operation (optional).
Returns:
A clipped `Tensor`.
"""
with ops.name_scope(name, "clip_by_average_norm", [t, clip_norm]) as name:
t = ops.convert_to_tensor(t, name="t")
# Calculate L2-norm per element, clip elements by ratio of clip_norm to
# L2-norm per element
n_element = math_ops.cast(array_ops.size(t), dtypes.float32)
l2norm_inv = math_ops.rsqrt(
math_ops.reduce_sum(t * t, math_ops.range(array_ops.rank(t))))
tclip = array_ops.identity(
t * clip_norm * math_ops.minimum(
l2norm_inv * n_element, constant_op.constant(1.0) / clip_norm),
name=name)
return tclip
|
kjagoo/wger_stark | refs/heads/master | wger/manager/tests/test_workout.py | 2 | # This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
import datetime
from django.core.urlresolvers import reverse
from wger.core.tests import api_base_test
from wger.core.tests.base_testcase import WorkoutManagerDeleteTestCase
from wger.core.tests.base_testcase import WorkoutManagerEditTestCase
from wger.core.tests.base_testcase import WorkoutManagerTestCase
from wger.manager.models import Workout
class WorkoutShareButtonTestCase(WorkoutManagerTestCase):
'''
Test that the share button is correctly displayed and hidden
'''
def test_share_button(self):
workout = Workout.objects.get(pk=1)
response = self.client.get(workout.get_absolute_url())
self.assertFalse(response.context['show_shariff'])
self.user_login('admin')
response = self.client.get(workout.get_absolute_url())
self.assertTrue(response.context['show_shariff'])
self.user_login('test')
response = self.client.get(workout.get_absolute_url())
self.assertFalse(response.context['show_shariff'])
class WorkoutAccessTestCase(WorkoutManagerTestCase):
'''
Test accessing the workout page
'''
def test_access_shared(self):
'''
Test accessing the URL of a shared workout
'''
workout = Workout.objects.get(pk=1)
self.user_login('admin')
response = self.client.get(workout.get_absolute_url())
self.assertEqual(response.status_code, 200)
self.user_login('test')
response = self.client.get(workout.get_absolute_url())
self.assertEqual(response.status_code, 200)
self.user_logout()
response = self.client.get(workout.get_absolute_url())
self.assertEqual(response.status_code, 200)
def test_access_not_shared(self):
'''
Test accessing the URL of a private workout
'''
workout = Workout.objects.get(pk=3)
self.user_login('admin')
response = self.client.get(workout.get_absolute_url())
self.assertEqual(response.status_code, 403)
self.user_login('test')
response = self.client.get(workout.get_absolute_url())
self.assertEqual(response.status_code, 200)
self.user_logout()
response = self.client.get(workout.get_absolute_url())
self.assertEqual(response.status_code, 403)
class AddWorkoutTestCase(WorkoutManagerTestCase):
'''
Tests adding a Workout
'''
def create_workout(self):
'''
Helper function to test creating workouts
'''
# Create a workout
count_before = Workout.objects.count()
response = self.client.get(reverse('manager:workout:add'))
count_after = Workout.objects.count()
# There is always a redirect
self.assertEqual(response.status_code, 302)
# Test creating workout
self.assertGreater(count_after, count_before)
# Test accessing workout
response = self.client.get(reverse('manager:workout:view', kwargs={'pk': 1}))
workout = Workout.objects.get(pk=1)
self.assertEqual(response.context['workout'], workout)
self.assertEqual(response.status_code, 200)
def test_create_workout_logged_in(self):
'''
Test creating a workout a logged in user
'''
self.user_login()
self.create_workout()
self.user_logout()
class DeleteTestWorkoutTestCase(WorkoutManagerDeleteTestCase):
'''
Tests deleting a Workout
'''
object_class = Workout
url = 'manager:workout:delete'
pk = 3
user_success = 'test'
user_fail = 'admin'
class EditWorkoutTestCase(WorkoutManagerEditTestCase):
'''
Tests editing a Workout
'''
object_class = Workout
url = 'manager:workout:edit'
pk = 3
user_success = 'test'
user_fail = 'admin'
data = {'comment': 'A new comment'}
class WorkoutOverviewTestCase(WorkoutManagerTestCase):
'''
Tests the workout overview
'''
def get_workout_overview(self):
'''
Helper function to test the workout overview
'''
response = self.client.get(reverse('manager:workout:overview'))
# Page exists
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.context['workouts']), 2)
def test_dashboard_logged_in(self):
'''
Test creating a workout a logged in user
'''
self.user_login()
self.get_workout_overview()
class WorkoutModelTestCase(WorkoutManagerTestCase):
'''
Tests other functionality from the model
'''
def test_unicode(self):
'''
Test the unicode representation
'''
workout = Workout()
workout.creation_date = datetime.date.today()
self.assertEqual('{0}'.format(workout),
'{0} ({1})'.format(u'Workout', datetime.date.today()))
workout.comment = u'my description'
self.assertEqual('{0}'.format(workout), u'my description')
class WorkoutApiTestCase(api_base_test.ApiBaseResourceTestCase):
'''
Tests the workout overview resource
'''
pk = 3
resource = Workout
private_resource = True
special_endpoints = ('canonical_representation',)
data = {'comment': 'A new comment'}
|
lucuma/sqlalchemy-wrapper | refs/heads/master | sqla_wrapper/__init__.py | 1 | from .core import SQLAlchemy # noqa
from .base_query import BaseQuery # noqa
from .model import Model, DefaultMeta # noqa
from .paginator import Paginator, sanitize_page_number # noqa
__version__ = '2.0.1'
|
ziel5122/rl-cache | refs/heads/master | cache/test_opt.py | 1 | from opt import OPTCache
from unittest import main, TestCase
class TestConstructor(TestCase):
def test_init_error(self):
self.assertRaises(TypeError, OPTCache, 'error not thrown with no args')
def test_init(self):
maxsize = 3
accesses = [3, 4, 5]
cache = OPTCache(maxsize, accesses)
self.assertEqual(cache.get_access_count(), 0, '__access_count nonzeor')
self.assertEqual(cache.get_accesses(), accesses, '__access nonzero')
self.assertEqual(cache.get_data(), {}, '__data nonempty')
self.assertEqual(cache.get_maxsize(), maxsize, '__maxsize incorrect')
self.assertEqual(cache.get_size(), 0, '__size nonzero')
class TestMethods(TestCase):
def setUp(self):
self.cache = OPTCache(3, [1, 2, 3, 4, 1, 2])
def test_set(self):
self.cache[1] = 17
self.cache[2] = 66
self.cache[3] = 54
temp = self.cache.get_data().copy()
self.cache[4] = 7
self.assertNotEqual(self.cache.get_data(), temp, 'cache contents unchanged')
self.assertEqual(self.cache.get_size(), self.cache.get_maxsize())
self.assertEqual(self.cache.get_size(), 3)
def test_replacement(self):
self.cache[1] = 17
self.cache[2] = 66
self.cache[3] = 54
self.cache[4] = 7
self.assertEqual(self.cache.get_data(), {1: 17, 2:66, 4: 7},
'incorrect key removed')
if __name__ == '__main__':
main()
|
elfnor/sverchok | refs/heads/master | node_scripts/SN2-templates/test.py | 4 |
class MyScript( SvScript):
inputs = [("s" , "List")]
outputs = [("s", "Sum")]
def process(self):
data = self.node.inputs[0].sv_get()
out = []
for obj in data:
out.append(sum(obj))
self.node.outputs[0].sv_set(out)
|
openmicroscopy/snoopycrimecop | refs/heads/master | test/integration/test_check_milestone.py | 2 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 University of Dundee & Open Microscopy Environment
# All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from __future__ import absolute_import
import pytest
from yaclifw.framework import main, Stop
from scc.git import CheckMilestone
from .Sandbox import SandboxTest
class TestCheckMilestone(SandboxTest):
def check_milestone(self, *args):
args = ["check-milestone", "--no-ask"] + list(args)
main("scc", args=args, items=[(CheckMilestone.NAME, CheckMilestone)])
def testNonExistingStartTag(self):
with pytest.raises(Stop):
self.check_milestone("0.0.0", "1.0.0")
def testNonExistingEndTag(self):
with pytest.raises(Stop):
self.check_milestone("1.0.0", "0.0.0")
def testNonExistingMilestone(self):
with pytest.raises(Stop):
self.check_milestone("1.0.0", "HEAD", "--set", "0.0.0")
def testCheckMilestone(self):
self.check_milestone("1.0.0", "1.1.1-TEST")
|
rockneurotiko/django | refs/heads/master | django/contrib/sessions/backends/db.py | 227 | import logging
from django.contrib.sessions.backends.base import CreateError, SessionBase
from django.core.exceptions import SuspiciousOperation
from django.db import IntegrityError, router, transaction
from django.utils import timezone
from django.utils.encoding import force_text
from django.utils.functional import cached_property
class SessionStore(SessionBase):
"""
Implements database session store.
"""
def __init__(self, session_key=None):
super(SessionStore, self).__init__(session_key)
@classmethod
def get_model_class(cls):
# Avoids a circular import and allows importing SessionStore when
# django.contrib.sessions is not in INSTALLED_APPS.
from django.contrib.sessions.models import Session
return Session
@cached_property
def model(self):
return self.get_model_class()
def load(self):
try:
s = self.model.objects.get(
session_key=self.session_key,
expire_date__gt=timezone.now()
)
return self.decode(s.session_data)
except (self.model.DoesNotExist, SuspiciousOperation) as e:
if isinstance(e, SuspiciousOperation):
logger = logging.getLogger('django.security.%s' %
e.__class__.__name__)
logger.warning(force_text(e))
self._session_key = None
return {}
def exists(self, session_key):
return self.model.objects.filter(session_key=session_key).exists()
def create(self):
while True:
self._session_key = self._get_new_session_key()
try:
# Save immediately to ensure we have a unique entry in the
# database.
self.save(must_create=True)
except CreateError:
# Key wasn't unique. Try again.
continue
self.modified = True
return
def create_model_instance(self, data):
"""
Return a new instance of the session model object, which represents the
current session state. Intended to be used for saving the session data
to the database.
"""
return self.model(
session_key=self._get_or_create_session_key(),
session_data=self.encode(data),
expire_date=self.get_expiry_date(),
)
def save(self, must_create=False):
"""
Saves the current session data to the database. If 'must_create' is
True, a database error will be raised if the saving operation doesn't
create a *new* entry (as opposed to possibly updating an existing
entry).
"""
if self.session_key is None:
return self.create()
data = self._get_session(no_load=must_create)
obj = self.create_model_instance(data)
using = router.db_for_write(self.model, instance=obj)
try:
with transaction.atomic(using=using):
obj.save(force_insert=must_create, using=using)
except IntegrityError:
if must_create:
raise CreateError
raise
def delete(self, session_key=None):
if session_key is None:
if self.session_key is None:
return
session_key = self.session_key
try:
self.model.objects.get(session_key=session_key).delete()
except self.model.DoesNotExist:
pass
@classmethod
def clear_expired(cls):
cls.get_model_class().objects.filter(expire_date__lt=timezone.now()).delete()
|
francbartoli/geonode | refs/heads/master | geonode/social/views.py | 2 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import logging
from actstream.models import Action
from django.views.generic import ListView
from django.shortcuts import get_object_or_404
from django.core.exceptions import PermissionDenied
from geonode.base.models import ResourceBase
logger = logging.getLogger(__name__)
class RecentActivity(ListView):
"""
Returns recent public activity.
"""
model = Action
template_name = 'social/activity_list.html'
def get_context_data(self, *args, **kwargs):
context = super(RecentActivity, self).get_context_data(*args, **kwargs)
def _filter_actions(action, request):
if action == 'all':
_actions = Action.objects.filter(public=True)[:100]
else:
_actions = Action.objects.filter(
public=True, action_object_content_type__model=action)[:100]
_filtered_actions = []
for _action in _actions:
if _action.target_object_id:
action_object_filter = {
'id': _action.target_object_id
}
elif _action.action_object_object_id:
action_object_filter = {
'id': _action.action_object_object_id
}
try:
obj = get_object_or_404(ResourceBase, **action_object_filter)
resource = obj.get_self_resource()
user = request.user
if user.has_perm('base.view_resourcebase', resource) or \
user.has_perm('view_resourcebase', resource):
_filtered_actions.append(_action.id)
except ResourceBase.DoesNotExist:
_filtered_actions.append(_action.id)
except (PermissionDenied, Exception) as e:
logger.debug(e)
return _filtered_actions
context['action_list'] = Action.objects.filter(
id__in=_filter_actions('all', self.request))[:15]
context['action_list_layers'] = Action.objects.filter(
id__in=_filter_actions('layer', self.request))[:15]
context['action_list_maps'] = Action.objects.filter(
id__in=_filter_actions('map', self.request))[:15]
context['action_list_documents'] = Action.objects.filter(
id__in=_filter_actions('document', self.request))[:15]
context['action_list_comments'] = Action.objects.filter(
id__in=_filter_actions('comment', self.request))[:15]
return context
class UserActivity(ListView):
"""
Returns recent user activity.
"""
context_object_name = 'action_list'
template_name = 'actstream/actor.html'
def get_queryset(self):
# There's no generic foreign key for 'actor', so can't filter directly
# Hence the code below is essentially applying the filter afterwards
return [x for x in Action.objects.filter(public=True)[:15]
if x and x.actor and x.actor.username == self.kwargs['actor']]
def get_context_data(self, *args, **kwargs):
context = super(UserActivity, self).get_context_data(*args, **kwargs)
context['actor'] = self.kwargs['actor']
return context
|
cpcloud/numpy | refs/heads/master | numpy/polynomial/tests/test_hermite_e.py | 123 | """Tests for hermite_e module.
"""
from __future__ import division, absolute_import, print_function
import numpy as np
import numpy.polynomial.hermite_e as herme
from numpy.polynomial.polynomial import polyval
from numpy.testing import (
TestCase, assert_almost_equal, assert_raises,
assert_equal, assert_, run_module_suite)
He0 = np.array([1])
He1 = np.array([0, 1])
He2 = np.array([-1, 0, 1])
He3 = np.array([0, -3, 0, 1])
He4 = np.array([3, 0, -6, 0, 1])
He5 = np.array([0, 15, 0, -10, 0, 1])
He6 = np.array([-15, 0, 45, 0, -15, 0, 1])
He7 = np.array([0, -105, 0, 105, 0, -21, 0, 1])
He8 = np.array([105, 0, -420, 0, 210, 0, -28, 0, 1])
He9 = np.array([0, 945, 0, -1260, 0, 378, 0, -36, 0, 1])
Helist = [He0, He1, He2, He3, He4, He5, He6, He7, He8, He9]
def trim(x):
return herme.hermetrim(x, tol=1e-6)
class TestConstants(TestCase):
def test_hermedomain(self):
assert_equal(herme.hermedomain, [-1, 1])
def test_hermezero(self):
assert_equal(herme.hermezero, [0])
def test_hermeone(self):
assert_equal(herme.hermeone, [1])
def test_hermex(self):
assert_equal(herme.hermex, [0, 1])
class TestArithmetic(TestCase):
x = np.linspace(-3, 3, 100)
def test_hermeadd(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] += 1
res = herme.hermeadd([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_hermesub(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
tgt = np.zeros(max(i, j) + 1)
tgt[i] += 1
tgt[j] -= 1
res = herme.hermesub([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_hermemulx(self):
assert_equal(herme.hermemulx([0]), [0])
assert_equal(herme.hermemulx([1]), [0, 1])
for i in range(1, 5):
ser = [0]*i + [1]
tgt = [0]*(i - 1) + [i, 0, 1]
assert_equal(herme.hermemulx(ser), tgt)
def test_hermemul(self):
# check values of result
for i in range(5):
pol1 = [0]*i + [1]
val1 = herme.hermeval(self.x, pol1)
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
pol2 = [0]*j + [1]
val2 = herme.hermeval(self.x, pol2)
pol3 = herme.hermemul(pol1, pol2)
val3 = herme.hermeval(self.x, pol3)
assert_(len(pol3) == i + j + 1, msg)
assert_almost_equal(val3, val1*val2, err_msg=msg)
def test_hermediv(self):
for i in range(5):
for j in range(5):
msg = "At i=%d, j=%d" % (i, j)
ci = [0]*i + [1]
cj = [0]*j + [1]
tgt = herme.hermeadd(ci, cj)
quo, rem = herme.hermediv(tgt, ci)
res = herme.hermeadd(herme.hermemul(quo, ci), rem)
assert_equal(trim(res), trim(tgt), err_msg=msg)
class TestEvaluation(TestCase):
# coefficients of 1 + 2*x + 3*x**2
c1d = np.array([4., 2., 3.])
c2d = np.einsum('i,j->ij', c1d, c1d)
c3d = np.einsum('i,j,k->ijk', c1d, c1d, c1d)
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
y = polyval(x, [1., 2., 3.])
def test_hermeval(self):
#check empty input
assert_equal(herme.hermeval([], [1]).size, 0)
#check normal input)
x = np.linspace(-1, 1)
y = [polyval(x, c) for c in Helist]
for i in range(10):
msg = "At i=%d" % i
tgt = y[i]
res = herme.hermeval(x, [0]*i + [1])
assert_almost_equal(res, tgt, err_msg=msg)
#check that shape is preserved
for i in range(3):
dims = [2]*i
x = np.zeros(dims)
assert_equal(herme.hermeval(x, [1]).shape, dims)
assert_equal(herme.hermeval(x, [1, 0]).shape, dims)
assert_equal(herme.hermeval(x, [1, 0, 0]).shape, dims)
def test_hermeval2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, herme.hermeval2d, x1, x2[:2], self.c2d)
#test values
tgt = y1*y2
res = herme.hermeval2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = herme.hermeval2d(z, z, self.c2d)
assert_(res.shape == (2, 3))
def test_hermeval3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test exceptions
assert_raises(ValueError, herme.hermeval3d, x1, x2, x3[:2], self.c3d)
#test values
tgt = y1*y2*y3
res = herme.hermeval3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = herme.hermeval3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3))
def test_hermegrid2d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j->ij', y1, y2)
res = herme.hermegrid2d(x1, x2, self.c2d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = herme.hermegrid2d(z, z, self.c2d)
assert_(res.shape == (2, 3)*2)
def test_hermegrid3d(self):
x1, x2, x3 = self.x
y1, y2, y3 = self.y
#test values
tgt = np.einsum('i,j,k->ijk', y1, y2, y3)
res = herme.hermegrid3d(x1, x2, x3, self.c3d)
assert_almost_equal(res, tgt)
#test shape
z = np.ones((2, 3))
res = herme.hermegrid3d(z, z, z, self.c3d)
assert_(res.shape == (2, 3)*3)
class TestIntegral(TestCase):
def test_hermeint(self):
# check exceptions
assert_raises(ValueError, herme.hermeint, [0], .5)
assert_raises(ValueError, herme.hermeint, [0], -1)
assert_raises(ValueError, herme.hermeint, [0], 1, [0, 0])
# test integration of zero polynomial
for i in range(2, 5):
k = [0]*(i - 2) + [1]
res = herme.hermeint([0], m=i, k=k)
assert_almost_equal(res, [0, 1])
# check single integration with integration constant
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [1/scl]
hermepol = herme.poly2herme(pol)
hermeint = herme.hermeint(hermepol, m=1, k=[i])
res = herme.herme2poly(hermeint)
assert_almost_equal(trim(res), trim(tgt))
# check single integration with integration constant and lbnd
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
hermepol = herme.poly2herme(pol)
hermeint = herme.hermeint(hermepol, m=1, k=[i], lbnd=-1)
assert_almost_equal(herme.hermeval(-1, hermeint), i)
# check single integration with integration constant and scaling
for i in range(5):
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [2/scl]
hermepol = herme.poly2herme(pol)
hermeint = herme.hermeint(hermepol, m=1, k=[i], scl=2)
res = herme.herme2poly(hermeint)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with default k
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = herme.hermeint(tgt, m=1)
res = herme.hermeint(pol, m=j)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with defined k
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = herme.hermeint(tgt, m=1, k=[k])
res = herme.hermeint(pol, m=j, k=list(range(j)))
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with lbnd
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = herme.hermeint(tgt, m=1, k=[k], lbnd=-1)
res = herme.hermeint(pol, m=j, k=list(range(j)), lbnd=-1)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with scaling
for i in range(5):
for j in range(2, 5):
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j):
tgt = herme.hermeint(tgt, m=1, k=[k], scl=2)
res = herme.hermeint(pol, m=j, k=list(range(j)), scl=2)
assert_almost_equal(trim(res), trim(tgt))
def test_hermeint_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([herme.hermeint(c) for c in c2d.T]).T
res = herme.hermeint(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([herme.hermeint(c) for c in c2d])
res = herme.hermeint(c2d, axis=1)
assert_almost_equal(res, tgt)
tgt = np.vstack([herme.hermeint(c, k=3) for c in c2d])
res = herme.hermeint(c2d, k=3, axis=1)
assert_almost_equal(res, tgt)
class TestDerivative(TestCase):
def test_hermeder(self):
# check exceptions
assert_raises(ValueError, herme.hermeder, [0], .5)
assert_raises(ValueError, herme.hermeder, [0], -1)
# check that zeroth deriviative does nothing
for i in range(5):
tgt = [0]*i + [1]
res = herme.hermeder(tgt, m=0)
assert_equal(trim(res), trim(tgt))
# check that derivation is the inverse of integration
for i in range(5):
for j in range(2, 5):
tgt = [0]*i + [1]
res = herme.hermeder(herme.hermeint(tgt, m=j), m=j)
assert_almost_equal(trim(res), trim(tgt))
# check derivation with scaling
for i in range(5):
for j in range(2, 5):
tgt = [0]*i + [1]
res = herme.hermeder(
herme.hermeint(tgt, m=j, scl=2), m=j, scl=.5)
assert_almost_equal(trim(res), trim(tgt))
def test_hermeder_axis(self):
# check that axis keyword works
c2d = np.random.random((3, 4))
tgt = np.vstack([herme.hermeder(c) for c in c2d.T]).T
res = herme.hermeder(c2d, axis=0)
assert_almost_equal(res, tgt)
tgt = np.vstack([herme.hermeder(c) for c in c2d])
res = herme.hermeder(c2d, axis=1)
assert_almost_equal(res, tgt)
class TestVander(TestCase):
# some random values in [-1, 1)
x = np.random.random((3, 5))*2 - 1
def test_hermevander(self):
# check for 1d x
x = np.arange(3)
v = herme.hermevander(x, 3)
assert_(v.shape == (3, 4))
for i in range(4):
coef = [0]*i + [1]
assert_almost_equal(v[..., i], herme.hermeval(x, coef))
# check for 2d x
x = np.array([[1, 2], [3, 4], [5, 6]])
v = herme.hermevander(x, 3)
assert_(v.shape == (3, 2, 4))
for i in range(4):
coef = [0]*i + [1]
assert_almost_equal(v[..., i], herme.hermeval(x, coef))
def test_hermevander2d(self):
# also tests hermeval2d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3))
van = herme.hermevander2d(x1, x2, [1, 2])
tgt = herme.hermeval2d(x1, x2, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = herme.hermevander2d([x1], [x2], [1, 2])
assert_(van.shape == (1, 5, 6))
def test_hermevander3d(self):
# also tests hermeval3d for non-square coefficient array
x1, x2, x3 = self.x
c = np.random.random((2, 3, 4))
van = herme.hermevander3d(x1, x2, x3, [1, 2, 3])
tgt = herme.hermeval3d(x1, x2, x3, c)
res = np.dot(van, c.flat)
assert_almost_equal(res, tgt)
# check shape
van = herme.hermevander3d([x1], [x2], [x3], [1, 2, 3])
assert_(van.shape == (1, 5, 24))
class TestFitting(TestCase):
def test_hermefit(self):
def f(x):
return x*(x - 1)*(x - 2)
# Test exceptions
assert_raises(ValueError, herme.hermefit, [1], [1], -1)
assert_raises(TypeError, herme.hermefit, [[1]], [1], 0)
assert_raises(TypeError, herme.hermefit, [], [1], 0)
assert_raises(TypeError, herme.hermefit, [1], [[[1]]], 0)
assert_raises(TypeError, herme.hermefit, [1, 2], [1], 0)
assert_raises(TypeError, herme.hermefit, [1], [1, 2], 0)
assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[[1]])
assert_raises(TypeError, herme.hermefit, [1], [1], 0, w=[1, 1])
# Test fit
x = np.linspace(0, 2)
y = f(x)
#
coef3 = herme.hermefit(x, y, 3)
assert_equal(len(coef3), 4)
assert_almost_equal(herme.hermeval(x, coef3), y)
#
coef4 = herme.hermefit(x, y, 4)
assert_equal(len(coef4), 5)
assert_almost_equal(herme.hermeval(x, coef4), y)
#
coef2d = herme.hermefit(x, np.array([y, y]).T, 3)
assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
# test weighting
w = np.zeros_like(x)
yw = y.copy()
w[1::2] = 1
y[0::2] = 0
wcoef3 = herme.hermefit(x, yw, 3, w=w)
assert_almost_equal(wcoef3, coef3)
#
wcoef2d = herme.hermefit(x, np.array([yw, yw]).T, 3, w=w)
assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
# test scaling with complex values x points whose square
# is zero when summed.
x = [1, 1j, -1, -1j]
assert_almost_equal(herme.hermefit(x, x, 1), [0, 1])
class TestCompanion(TestCase):
def test_raises(self):
assert_raises(ValueError, herme.hermecompanion, [])
assert_raises(ValueError, herme.hermecompanion, [1])
def test_dimensions(self):
for i in range(1, 5):
coef = [0]*i + [1]
assert_(herme.hermecompanion(coef).shape == (i, i))
def test_linear_root(self):
assert_(herme.hermecompanion([1, 2])[0, 0] == -.5)
class TestGauss(TestCase):
def test_100(self):
x, w = herme.hermegauss(100)
# test orthogonality. Note that the results need to be normalized,
# otherwise the huge values that can arise from fast growing
# functions like Laguerre can be very confusing.
v = herme.hermevander(x, 99)
vv = np.dot(v.T * w, v)
vd = 1/np.sqrt(vv.diagonal())
vv = vd[:, None] * vv * vd
assert_almost_equal(vv, np.eye(100))
# check that the integral of 1 is correct
tgt = np.sqrt(2*np.pi)
assert_almost_equal(w.sum(), tgt)
class TestMisc(TestCase):
def test_hermefromroots(self):
res = herme.hermefromroots([])
assert_almost_equal(trim(res), [1])
for i in range(1, 5):
roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2])
pol = herme.hermefromroots(roots)
res = herme.hermeval(roots, pol)
tgt = 0
assert_(len(pol) == i + 1)
assert_almost_equal(herme.herme2poly(pol)[-1], 1)
assert_almost_equal(res, tgt)
def test_hermeroots(self):
assert_almost_equal(herme.hermeroots([1]), [])
assert_almost_equal(herme.hermeroots([1, 1]), [-1])
for i in range(2, 5):
tgt = np.linspace(-1, 1, i)
res = herme.hermeroots(herme.hermefromroots(tgt))
assert_almost_equal(trim(res), trim(tgt))
def test_hermetrim(self):
coef = [2, -1, 1, 0]
# Test exceptions
assert_raises(ValueError, herme.hermetrim, coef, -1)
# Test results
assert_equal(herme.hermetrim(coef), coef[:-1])
assert_equal(herme.hermetrim(coef, 1), coef[:-3])
assert_equal(herme.hermetrim(coef, 2), [0])
def test_hermeline(self):
assert_equal(herme.hermeline(3, 4), [3, 4])
def test_herme2poly(self):
for i in range(10):
assert_almost_equal(herme.herme2poly([0]*i + [1]), Helist[i])
def test_poly2herme(self):
for i in range(10):
assert_almost_equal(herme.poly2herme(Helist[i]), [0]*i + [1])
def test_weight(self):
x = np.linspace(-5, 5, 11)
tgt = np.exp(-.5*x**2)
res = herme.hermeweight(x)
assert_almost_equal(res, tgt)
if __name__ == "__main__":
run_module_suite()
|
bjolivot/ansible | refs/heads/devel | lib/ansible/compat/__init__.py | 332 | # (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
'''
Compat library for ansible. This contains compatibility definitions for older python
When we need to import a module differently depending on python version, do it
here. Then in the code we can simply import from compat in order to get what we want.
'''
|
pcu4dros/pandora-core | refs/heads/master | workspace/lib/python3.5/site-packages/pip/_vendor/distlib/_backport/tarfile.py | 422 | #-------------------------------------------------------------------
# tarfile.py
#-------------------------------------------------------------------
# Copyright (C) 2002 Lars Gustaebel <lars@gustaebel.de>
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
from __future__ import print_function
"""Read from and write to tar format archives.
"""
__version__ = "$Revision$"
version = "0.9.0"
__author__ = "Lars Gust\u00e4bel (lars@gustaebel.de)"
__date__ = "$Date: 2011-02-25 17:42:01 +0200 (Fri, 25 Feb 2011) $"
__cvsid__ = "$Id: tarfile.py 88586 2011-02-25 15:42:01Z marc-andre.lemburg $"
__credits__ = "Gustavo Niemeyer, Niels Gust\u00e4bel, Richard Townsend."
#---------
# Imports
#---------
import sys
import os
import stat
import errno
import time
import struct
import copy
import re
try:
import grp, pwd
except ImportError:
grp = pwd = None
# os.symlink on Windows prior to 6.0 raises NotImplementedError
symlink_exception = (AttributeError, NotImplementedError)
try:
# WindowsError (1314) will be raised if the caller does not hold the
# SeCreateSymbolicLinkPrivilege privilege
symlink_exception += (WindowsError,)
except NameError:
pass
# from tarfile import *
__all__ = ["TarFile", "TarInfo", "is_tarfile", "TarError"]
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
_open = builtins.open # Since 'open' is TarFile.open
#---------------------------------------------------------
# tar constants
#---------------------------------------------------------
NUL = b"\0" # the null character
BLOCKSIZE = 512 # length of processing blocks
RECORDSIZE = BLOCKSIZE * 20 # length of records
GNU_MAGIC = b"ustar \0" # magic gnu tar string
POSIX_MAGIC = b"ustar\x0000" # magic posix tar string
LENGTH_NAME = 100 # maximum length of a filename
LENGTH_LINK = 100 # maximum length of a linkname
LENGTH_PREFIX = 155 # maximum length of the prefix field
REGTYPE = b"0" # regular file
AREGTYPE = b"\0" # regular file
LNKTYPE = b"1" # link (inside tarfile)
SYMTYPE = b"2" # symbolic link
CHRTYPE = b"3" # character special device
BLKTYPE = b"4" # block special device
DIRTYPE = b"5" # directory
FIFOTYPE = b"6" # fifo special device
CONTTYPE = b"7" # contiguous file
GNUTYPE_LONGNAME = b"L" # GNU tar longname
GNUTYPE_LONGLINK = b"K" # GNU tar longlink
GNUTYPE_SPARSE = b"S" # GNU tar sparse file
XHDTYPE = b"x" # POSIX.1-2001 extended header
XGLTYPE = b"g" # POSIX.1-2001 global header
SOLARIS_XHDTYPE = b"X" # Solaris extended header
USTAR_FORMAT = 0 # POSIX.1-1988 (ustar) format
GNU_FORMAT = 1 # GNU tar format
PAX_FORMAT = 2 # POSIX.1-2001 (pax) format
DEFAULT_FORMAT = GNU_FORMAT
#---------------------------------------------------------
# tarfile constants
#---------------------------------------------------------
# File types that tarfile supports:
SUPPORTED_TYPES = (REGTYPE, AREGTYPE, LNKTYPE,
SYMTYPE, DIRTYPE, FIFOTYPE,
CONTTYPE, CHRTYPE, BLKTYPE,
GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
# File types that will be treated as a regular file.
REGULAR_TYPES = (REGTYPE, AREGTYPE,
CONTTYPE, GNUTYPE_SPARSE)
# File types that are part of the GNU tar format.
GNU_TYPES = (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK,
GNUTYPE_SPARSE)
# Fields from a pax header that override a TarInfo attribute.
PAX_FIELDS = ("path", "linkpath", "size", "mtime",
"uid", "gid", "uname", "gname")
# Fields from a pax header that are affected by hdrcharset.
PAX_NAME_FIELDS = set(("path", "linkpath", "uname", "gname"))
# Fields in a pax header that are numbers, all other fields
# are treated as strings.
PAX_NUMBER_FIELDS = {
"atime": float,
"ctime": float,
"mtime": float,
"uid": int,
"gid": int,
"size": int
}
#---------------------------------------------------------
# Bits used in the mode field, values in octal.
#---------------------------------------------------------
S_IFLNK = 0o120000 # symbolic link
S_IFREG = 0o100000 # regular file
S_IFBLK = 0o060000 # block device
S_IFDIR = 0o040000 # directory
S_IFCHR = 0o020000 # character device
S_IFIFO = 0o010000 # fifo
TSUID = 0o4000 # set UID on execution
TSGID = 0o2000 # set GID on execution
TSVTX = 0o1000 # reserved
TUREAD = 0o400 # read by owner
TUWRITE = 0o200 # write by owner
TUEXEC = 0o100 # execute/search by owner
TGREAD = 0o040 # read by group
TGWRITE = 0o020 # write by group
TGEXEC = 0o010 # execute/search by group
TOREAD = 0o004 # read by other
TOWRITE = 0o002 # write by other
TOEXEC = 0o001 # execute/search by other
#---------------------------------------------------------
# initialization
#---------------------------------------------------------
if os.name in ("nt", "ce"):
ENCODING = "utf-8"
else:
ENCODING = sys.getfilesystemencoding()
#---------------------------------------------------------
# Some useful functions
#---------------------------------------------------------
def stn(s, length, encoding, errors):
"""Convert a string to a null-terminated bytes object.
"""
s = s.encode(encoding, errors)
return s[:length] + (length - len(s)) * NUL
def nts(s, encoding, errors):
"""Convert a null-terminated bytes object to a string.
"""
p = s.find(b"\0")
if p != -1:
s = s[:p]
return s.decode(encoding, errors)
def nti(s):
"""Convert a number field to a python number.
"""
# There are two possible encodings for a number field, see
# itn() below.
if s[0] != chr(0o200):
try:
n = int(nts(s, "ascii", "strict") or "0", 8)
except ValueError:
raise InvalidHeaderError("invalid header")
else:
n = 0
for i in range(len(s) - 1):
n <<= 8
n += ord(s[i + 1])
return n
def itn(n, digits=8, format=DEFAULT_FORMAT):
"""Convert a python number to a number field.
"""
# POSIX 1003.1-1988 requires numbers to be encoded as a string of
# octal digits followed by a null-byte, this allows values up to
# (8**(digits-1))-1. GNU tar allows storing numbers greater than
# that if necessary. A leading 0o200 byte indicates this particular
# encoding, the following digits-1 bytes are a big-endian
# representation. This allows values up to (256**(digits-1))-1.
if 0 <= n < 8 ** (digits - 1):
s = ("%0*o" % (digits - 1, n)).encode("ascii") + NUL
else:
if format != GNU_FORMAT or n >= 256 ** (digits - 1):
raise ValueError("overflow in number field")
if n < 0:
# XXX We mimic GNU tar's behaviour with negative numbers,
# this could raise OverflowError.
n = struct.unpack("L", struct.pack("l", n))[0]
s = bytearray()
for i in range(digits - 1):
s.insert(0, n & 0o377)
n >>= 8
s.insert(0, 0o200)
return s
def calc_chksums(buf):
"""Calculate the checksum for a member's header by summing up all
characters except for the chksum field which is treated as if
it was filled with spaces. According to the GNU tar sources,
some tars (Sun and NeXT) calculate chksum with signed char,
which will be different if there are chars in the buffer with
the high bit set. So we calculate two checksums, unsigned and
signed.
"""
unsigned_chksum = 256 + sum(struct.unpack("148B", buf[:148]) + struct.unpack("356B", buf[156:512]))
signed_chksum = 256 + sum(struct.unpack("148b", buf[:148]) + struct.unpack("356b", buf[156:512]))
return unsigned_chksum, signed_chksum
def copyfileobj(src, dst, length=None):
"""Copy length bytes from fileobj src to fileobj dst.
If length is None, copy the entire content.
"""
if length == 0:
return
if length is None:
while True:
buf = src.read(16*1024)
if not buf:
break
dst.write(buf)
return
BUFSIZE = 16 * 1024
blocks, remainder = divmod(length, BUFSIZE)
for b in range(blocks):
buf = src.read(BUFSIZE)
if len(buf) < BUFSIZE:
raise IOError("end of file reached")
dst.write(buf)
if remainder != 0:
buf = src.read(remainder)
if len(buf) < remainder:
raise IOError("end of file reached")
dst.write(buf)
return
filemode_table = (
((S_IFLNK, "l"),
(S_IFREG, "-"),
(S_IFBLK, "b"),
(S_IFDIR, "d"),
(S_IFCHR, "c"),
(S_IFIFO, "p")),
((TUREAD, "r"),),
((TUWRITE, "w"),),
((TUEXEC|TSUID, "s"),
(TSUID, "S"),
(TUEXEC, "x")),
((TGREAD, "r"),),
((TGWRITE, "w"),),
((TGEXEC|TSGID, "s"),
(TSGID, "S"),
(TGEXEC, "x")),
((TOREAD, "r"),),
((TOWRITE, "w"),),
((TOEXEC|TSVTX, "t"),
(TSVTX, "T"),
(TOEXEC, "x"))
)
def filemode(mode):
"""Convert a file's mode to a string of the form
-rwxrwxrwx.
Used by TarFile.list()
"""
perm = []
for table in filemode_table:
for bit, char in table:
if mode & bit == bit:
perm.append(char)
break
else:
perm.append("-")
return "".join(perm)
class TarError(Exception):
"""Base exception."""
pass
class ExtractError(TarError):
"""General exception for extract errors."""
pass
class ReadError(TarError):
"""Exception for unreadable tar archives."""
pass
class CompressionError(TarError):
"""Exception for unavailable compression methods."""
pass
class StreamError(TarError):
"""Exception for unsupported operations on stream-like TarFiles."""
pass
class HeaderError(TarError):
"""Base exception for header errors."""
pass
class EmptyHeaderError(HeaderError):
"""Exception for empty headers."""
pass
class TruncatedHeaderError(HeaderError):
"""Exception for truncated headers."""
pass
class EOFHeaderError(HeaderError):
"""Exception for end of file headers."""
pass
class InvalidHeaderError(HeaderError):
"""Exception for invalid headers."""
pass
class SubsequentHeaderError(HeaderError):
"""Exception for missing and invalid extended headers."""
pass
#---------------------------
# internal stream interface
#---------------------------
class _LowLevelFile(object):
"""Low-level file object. Supports reading and writing.
It is used instead of a regular file object for streaming
access.
"""
def __init__(self, name, mode):
mode = {
"r": os.O_RDONLY,
"w": os.O_WRONLY | os.O_CREAT | os.O_TRUNC,
}[mode]
if hasattr(os, "O_BINARY"):
mode |= os.O_BINARY
self.fd = os.open(name, mode, 0o666)
def close(self):
os.close(self.fd)
def read(self, size):
return os.read(self.fd, size)
def write(self, s):
os.write(self.fd, s)
class _Stream(object):
"""Class that serves as an adapter between TarFile and
a stream-like object. The stream-like object only
needs to have a read() or write() method and is accessed
blockwise. Use of gzip or bzip2 compression is possible.
A stream-like object could be for example: sys.stdin,
sys.stdout, a socket, a tape device etc.
_Stream is intended to be used only internally.
"""
def __init__(self, name, mode, comptype, fileobj, bufsize):
"""Construct a _Stream object.
"""
self._extfileobj = True
if fileobj is None:
fileobj = _LowLevelFile(name, mode)
self._extfileobj = False
if comptype == '*':
# Enable transparent compression detection for the
# stream interface
fileobj = _StreamProxy(fileobj)
comptype = fileobj.getcomptype()
self.name = name or ""
self.mode = mode
self.comptype = comptype
self.fileobj = fileobj
self.bufsize = bufsize
self.buf = b""
self.pos = 0
self.closed = False
try:
if comptype == "gz":
try:
import zlib
except ImportError:
raise CompressionError("zlib module is not available")
self.zlib = zlib
self.crc = zlib.crc32(b"")
if mode == "r":
self._init_read_gz()
else:
self._init_write_gz()
if comptype == "bz2":
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
if mode == "r":
self.dbuf = b""
self.cmp = bz2.BZ2Decompressor()
else:
self.cmp = bz2.BZ2Compressor()
except:
if not self._extfileobj:
self.fileobj.close()
self.closed = True
raise
def __del__(self):
if hasattr(self, "closed") and not self.closed:
self.close()
def _init_write_gz(self):
"""Initialize for writing with gzip compression.
"""
self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED,
-self.zlib.MAX_WBITS,
self.zlib.DEF_MEM_LEVEL,
0)
timestamp = struct.pack("<L", int(time.time()))
self.__write(b"\037\213\010\010" + timestamp + b"\002\377")
if self.name.endswith(".gz"):
self.name = self.name[:-3]
# RFC1952 says we must use ISO-8859-1 for the FNAME field.
self.__write(self.name.encode("iso-8859-1", "replace") + NUL)
def write(self, s):
"""Write string s to the stream.
"""
if self.comptype == "gz":
self.crc = self.zlib.crc32(s, self.crc)
self.pos += len(s)
if self.comptype != "tar":
s = self.cmp.compress(s)
self.__write(s)
def __write(self, s):
"""Write string s to the stream if a whole new block
is ready to be written.
"""
self.buf += s
while len(self.buf) > self.bufsize:
self.fileobj.write(self.buf[:self.bufsize])
self.buf = self.buf[self.bufsize:]
def close(self):
"""Close the _Stream object. No operation should be
done on it afterwards.
"""
if self.closed:
return
if self.mode == "w" and self.comptype != "tar":
self.buf += self.cmp.flush()
if self.mode == "w" and self.buf:
self.fileobj.write(self.buf)
self.buf = b""
if self.comptype == "gz":
# The native zlib crc is an unsigned 32-bit integer, but
# the Python wrapper implicitly casts that to a signed C
# long. So, on a 32-bit box self.crc may "look negative",
# while the same crc on a 64-bit box may "look positive".
# To avoid irksome warnings from the `struct` module, force
# it to look positive on all boxes.
self.fileobj.write(struct.pack("<L", self.crc & 0xffffffff))
self.fileobj.write(struct.pack("<L", self.pos & 0xffffFFFF))
if not self._extfileobj:
self.fileobj.close()
self.closed = True
def _init_read_gz(self):
"""Initialize for reading a gzip compressed fileobj.
"""
self.cmp = self.zlib.decompressobj(-self.zlib.MAX_WBITS)
self.dbuf = b""
# taken from gzip.GzipFile with some alterations
if self.__read(2) != b"\037\213":
raise ReadError("not a gzip file")
if self.__read(1) != b"\010":
raise CompressionError("unsupported compression method")
flag = ord(self.__read(1))
self.__read(6)
if flag & 4:
xlen = ord(self.__read(1)) + 256 * ord(self.__read(1))
self.read(xlen)
if flag & 8:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 16:
while True:
s = self.__read(1)
if not s or s == NUL:
break
if flag & 2:
self.__read(2)
def tell(self):
"""Return the stream's file pointer position.
"""
return self.pos
def seek(self, pos=0):
"""Set the stream's file pointer to pos. Negative seeking
is forbidden.
"""
if pos - self.pos >= 0:
blocks, remainder = divmod(pos - self.pos, self.bufsize)
for i in range(blocks):
self.read(self.bufsize)
self.read(remainder)
else:
raise StreamError("seeking backwards is not allowed")
return self.pos
def read(self, size=None):
"""Return the next size number of bytes from the stream.
If size is not defined, return all bytes of the stream
up to EOF.
"""
if size is None:
t = []
while True:
buf = self._read(self.bufsize)
if not buf:
break
t.append(buf)
buf = "".join(t)
else:
buf = self._read(size)
self.pos += len(buf)
return buf
def _read(self, size):
"""Return size bytes from the stream.
"""
if self.comptype == "tar":
return self.__read(size)
c = len(self.dbuf)
while c < size:
buf = self.__read(self.bufsize)
if not buf:
break
try:
buf = self.cmp.decompress(buf)
except IOError:
raise ReadError("invalid compressed data")
self.dbuf += buf
c += len(buf)
buf = self.dbuf[:size]
self.dbuf = self.dbuf[size:]
return buf
def __read(self, size):
"""Return size bytes from stream. If internal buffer is empty,
read another block from the stream.
"""
c = len(self.buf)
while c < size:
buf = self.fileobj.read(self.bufsize)
if not buf:
break
self.buf += buf
c += len(buf)
buf = self.buf[:size]
self.buf = self.buf[size:]
return buf
# class _Stream
class _StreamProxy(object):
"""Small proxy class that enables transparent compression
detection for the Stream interface (mode 'r|*').
"""
def __init__(self, fileobj):
self.fileobj = fileobj
self.buf = self.fileobj.read(BLOCKSIZE)
def read(self, size):
self.read = self.fileobj.read
return self.buf
def getcomptype(self):
if self.buf.startswith(b"\037\213\010"):
return "gz"
if self.buf.startswith(b"BZh91"):
return "bz2"
return "tar"
def close(self):
self.fileobj.close()
# class StreamProxy
class _BZ2Proxy(object):
"""Small proxy class that enables external file object
support for "r:bz2" and "w:bz2" modes. This is actually
a workaround for a limitation in bz2 module's BZ2File
class which (unlike gzip.GzipFile) has no support for
a file object argument.
"""
blocksize = 16 * 1024
def __init__(self, fileobj, mode):
self.fileobj = fileobj
self.mode = mode
self.name = getattr(self.fileobj, "name", None)
self.init()
def init(self):
import bz2
self.pos = 0
if self.mode == "r":
self.bz2obj = bz2.BZ2Decompressor()
self.fileobj.seek(0)
self.buf = b""
else:
self.bz2obj = bz2.BZ2Compressor()
def read(self, size):
x = len(self.buf)
while x < size:
raw = self.fileobj.read(self.blocksize)
if not raw:
break
data = self.bz2obj.decompress(raw)
self.buf += data
x += len(data)
buf = self.buf[:size]
self.buf = self.buf[size:]
self.pos += len(buf)
return buf
def seek(self, pos):
if pos < self.pos:
self.init()
self.read(pos - self.pos)
def tell(self):
return self.pos
def write(self, data):
self.pos += len(data)
raw = self.bz2obj.compress(data)
self.fileobj.write(raw)
def close(self):
if self.mode == "w":
raw = self.bz2obj.flush()
self.fileobj.write(raw)
# class _BZ2Proxy
#------------------------
# Extraction file object
#------------------------
class _FileInFile(object):
"""A thin wrapper around an existing file object that
provides a part of its data as an individual file
object.
"""
def __init__(self, fileobj, offset, size, blockinfo=None):
self.fileobj = fileobj
self.offset = offset
self.size = size
self.position = 0
if blockinfo is None:
blockinfo = [(0, size)]
# Construct a map with data and zero blocks.
self.map_index = 0
self.map = []
lastpos = 0
realpos = self.offset
for offset, size in blockinfo:
if offset > lastpos:
self.map.append((False, lastpos, offset, None))
self.map.append((True, offset, offset + size, realpos))
realpos += size
lastpos = offset + size
if lastpos < self.size:
self.map.append((False, lastpos, self.size, None))
def seekable(self):
if not hasattr(self.fileobj, "seekable"):
# XXX gzip.GzipFile and bz2.BZ2File
return True
return self.fileobj.seekable()
def tell(self):
"""Return the current file position.
"""
return self.position
def seek(self, position):
"""Seek to a position in the file.
"""
self.position = position
def read(self, size=None):
"""Read data from the file.
"""
if size is None:
size = self.size - self.position
else:
size = min(size, self.size - self.position)
buf = b""
while size > 0:
while True:
data, start, stop, offset = self.map[self.map_index]
if start <= self.position < stop:
break
else:
self.map_index += 1
if self.map_index == len(self.map):
self.map_index = 0
length = min(size, stop - self.position)
if data:
self.fileobj.seek(offset + (self.position - start))
buf += self.fileobj.read(length)
else:
buf += NUL * length
size -= length
self.position += length
return buf
#class _FileInFile
class ExFileObject(object):
"""File-like object for reading an archive member.
Is returned by TarFile.extractfile().
"""
blocksize = 1024
def __init__(self, tarfile, tarinfo):
self.fileobj = _FileInFile(tarfile.fileobj,
tarinfo.offset_data,
tarinfo.size,
tarinfo.sparse)
self.name = tarinfo.name
self.mode = "r"
self.closed = False
self.size = tarinfo.size
self.position = 0
self.buffer = b""
def readable(self):
return True
def writable(self):
return False
def seekable(self):
return self.fileobj.seekable()
def read(self, size=None):
"""Read at most size bytes from the file. If size is not
present or None, read all data until EOF is reached.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
buf = b""
if self.buffer:
if size is None:
buf = self.buffer
self.buffer = b""
else:
buf = self.buffer[:size]
self.buffer = self.buffer[size:]
if size is None:
buf += self.fileobj.read()
else:
buf += self.fileobj.read(size - len(buf))
self.position += len(buf)
return buf
# XXX TextIOWrapper uses the read1() method.
read1 = read
def readline(self, size=-1):
"""Read one entire line from the file. If size is present
and non-negative, return a string with at most that
size, which may be an incomplete line.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
pos = self.buffer.find(b"\n") + 1
if pos == 0:
# no newline found.
while True:
buf = self.fileobj.read(self.blocksize)
self.buffer += buf
if not buf or b"\n" in buf:
pos = self.buffer.find(b"\n") + 1
if pos == 0:
# no newline found.
pos = len(self.buffer)
break
if size != -1:
pos = min(size, pos)
buf = self.buffer[:pos]
self.buffer = self.buffer[pos:]
self.position += len(buf)
return buf
def readlines(self):
"""Return a list with all remaining lines.
"""
result = []
while True:
line = self.readline()
if not line: break
result.append(line)
return result
def tell(self):
"""Return the current file position.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
return self.position
def seek(self, pos, whence=os.SEEK_SET):
"""Seek to a position in the file.
"""
if self.closed:
raise ValueError("I/O operation on closed file")
if whence == os.SEEK_SET:
self.position = min(max(pos, 0), self.size)
elif whence == os.SEEK_CUR:
if pos < 0:
self.position = max(self.position + pos, 0)
else:
self.position = min(self.position + pos, self.size)
elif whence == os.SEEK_END:
self.position = max(min(self.size + pos, self.size), 0)
else:
raise ValueError("Invalid argument")
self.buffer = b""
self.fileobj.seek(self.position)
def close(self):
"""Close the file object.
"""
self.closed = True
def __iter__(self):
"""Get an iterator over the file's lines.
"""
while True:
line = self.readline()
if not line:
break
yield line
#class ExFileObject
#------------------
# Exported Classes
#------------------
class TarInfo(object):
"""Informational class which holds the details about an
archive member given by a tar header block.
TarInfo objects are returned by TarFile.getmember(),
TarFile.getmembers() and TarFile.gettarinfo() and are
usually created internally.
"""
__slots__ = ("name", "mode", "uid", "gid", "size", "mtime",
"chksum", "type", "linkname", "uname", "gname",
"devmajor", "devminor",
"offset", "offset_data", "pax_headers", "sparse",
"tarfile", "_sparse_structs", "_link_target")
def __init__(self, name=""):
"""Construct a TarInfo object. name is the optional name
of the member.
"""
self.name = name # member name
self.mode = 0o644 # file permissions
self.uid = 0 # user id
self.gid = 0 # group id
self.size = 0 # file size
self.mtime = 0 # modification time
self.chksum = 0 # header checksum
self.type = REGTYPE # member type
self.linkname = "" # link name
self.uname = "" # user name
self.gname = "" # group name
self.devmajor = 0 # device major number
self.devminor = 0 # device minor number
self.offset = 0 # the tar header starts here
self.offset_data = 0 # the file's data starts here
self.sparse = None # sparse member information
self.pax_headers = {} # pax header information
# In pax headers the "name" and "linkname" field are called
# "path" and "linkpath".
def _getpath(self):
return self.name
def _setpath(self, name):
self.name = name
path = property(_getpath, _setpath)
def _getlinkpath(self):
return self.linkname
def _setlinkpath(self, linkname):
self.linkname = linkname
linkpath = property(_getlinkpath, _setlinkpath)
def __repr__(self):
return "<%s %r at %#x>" % (self.__class__.__name__,self.name,id(self))
def get_info(self):
"""Return the TarInfo's attributes as a dictionary.
"""
info = {
"name": self.name,
"mode": self.mode & 0o7777,
"uid": self.uid,
"gid": self.gid,
"size": self.size,
"mtime": self.mtime,
"chksum": self.chksum,
"type": self.type,
"linkname": self.linkname,
"uname": self.uname,
"gname": self.gname,
"devmajor": self.devmajor,
"devminor": self.devminor
}
if info["type"] == DIRTYPE and not info["name"].endswith("/"):
info["name"] += "/"
return info
def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors="surrogateescape"):
"""Return a tar header as a string of 512 byte blocks.
"""
info = self.get_info()
if format == USTAR_FORMAT:
return self.create_ustar_header(info, encoding, errors)
elif format == GNU_FORMAT:
return self.create_gnu_header(info, encoding, errors)
elif format == PAX_FORMAT:
return self.create_pax_header(info, encoding)
else:
raise ValueError("invalid format")
def create_ustar_header(self, info, encoding, errors):
"""Return the object as a ustar header block.
"""
info["magic"] = POSIX_MAGIC
if len(info["linkname"]) > LENGTH_LINK:
raise ValueError("linkname is too long")
if len(info["name"]) > LENGTH_NAME:
info["prefix"], info["name"] = self._posix_split_name(info["name"])
return self._create_header(info, USTAR_FORMAT, encoding, errors)
def create_gnu_header(self, info, encoding, errors):
"""Return the object as a GNU header block sequence.
"""
info["magic"] = GNU_MAGIC
buf = b""
if len(info["linkname"]) > LENGTH_LINK:
buf += self._create_gnu_long_header(info["linkname"], GNUTYPE_LONGLINK, encoding, errors)
if len(info["name"]) > LENGTH_NAME:
buf += self._create_gnu_long_header(info["name"], GNUTYPE_LONGNAME, encoding, errors)
return buf + self._create_header(info, GNU_FORMAT, encoding, errors)
def create_pax_header(self, info, encoding):
"""Return the object as a ustar header block. If it cannot be
represented this way, prepend a pax extended header sequence
with supplement information.
"""
info["magic"] = POSIX_MAGIC
pax_headers = self.pax_headers.copy()
# Test string fields for values that exceed the field length or cannot
# be represented in ASCII encoding.
for name, hname, length in (
("name", "path", LENGTH_NAME), ("linkname", "linkpath", LENGTH_LINK),
("uname", "uname", 32), ("gname", "gname", 32)):
if hname in pax_headers:
# The pax header has priority.
continue
# Try to encode the string as ASCII.
try:
info[name].encode("ascii", "strict")
except UnicodeEncodeError:
pax_headers[hname] = info[name]
continue
if len(info[name]) > length:
pax_headers[hname] = info[name]
# Test number fields for values that exceed the field limit or values
# that like to be stored as float.
for name, digits in (("uid", 8), ("gid", 8), ("size", 12), ("mtime", 12)):
if name in pax_headers:
# The pax header has priority. Avoid overflow.
info[name] = 0
continue
val = info[name]
if not 0 <= val < 8 ** (digits - 1) or isinstance(val, float):
pax_headers[name] = str(val)
info[name] = 0
# Create a pax extended header if necessary.
if pax_headers:
buf = self._create_pax_generic_header(pax_headers, XHDTYPE, encoding)
else:
buf = b""
return buf + self._create_header(info, USTAR_FORMAT, "ascii", "replace")
@classmethod
def create_pax_global_header(cls, pax_headers):
"""Return the object as a pax global header block sequence.
"""
return cls._create_pax_generic_header(pax_headers, XGLTYPE, "utf8")
def _posix_split_name(self, name):
"""Split a name longer than 100 chars into a prefix
and a name part.
"""
prefix = name[:LENGTH_PREFIX + 1]
while prefix and prefix[-1] != "/":
prefix = prefix[:-1]
name = name[len(prefix):]
prefix = prefix[:-1]
if not prefix or len(name) > LENGTH_NAME:
raise ValueError("name is too long")
return prefix, name
@staticmethod
def _create_header(info, format, encoding, errors):
"""Return a header block. info is a dictionary with file
information, format must be one of the *_FORMAT constants.
"""
parts = [
stn(info.get("name", ""), 100, encoding, errors),
itn(info.get("mode", 0) & 0o7777, 8, format),
itn(info.get("uid", 0), 8, format),
itn(info.get("gid", 0), 8, format),
itn(info.get("size", 0), 12, format),
itn(info.get("mtime", 0), 12, format),
b" ", # checksum field
info.get("type", REGTYPE),
stn(info.get("linkname", ""), 100, encoding, errors),
info.get("magic", POSIX_MAGIC),
stn(info.get("uname", ""), 32, encoding, errors),
stn(info.get("gname", ""), 32, encoding, errors),
itn(info.get("devmajor", 0), 8, format),
itn(info.get("devminor", 0), 8, format),
stn(info.get("prefix", ""), 155, encoding, errors)
]
buf = struct.pack("%ds" % BLOCKSIZE, b"".join(parts))
chksum = calc_chksums(buf[-BLOCKSIZE:])[0]
buf = buf[:-364] + ("%06o\0" % chksum).encode("ascii") + buf[-357:]
return buf
@staticmethod
def _create_payload(payload):
"""Return the string payload filled with zero bytes
up to the next 512 byte border.
"""
blocks, remainder = divmod(len(payload), BLOCKSIZE)
if remainder > 0:
payload += (BLOCKSIZE - remainder) * NUL
return payload
@classmethod
def _create_gnu_long_header(cls, name, type, encoding, errors):
"""Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence
for name.
"""
name = name.encode(encoding, errors) + NUL
info = {}
info["name"] = "././@LongLink"
info["type"] = type
info["size"] = len(name)
info["magic"] = GNU_MAGIC
# create extended header + name blocks.
return cls._create_header(info, USTAR_FORMAT, encoding, errors) + \
cls._create_payload(name)
@classmethod
def _create_pax_generic_header(cls, pax_headers, type, encoding):
"""Return a POSIX.1-2008 extended or global header sequence
that contains a list of keyword, value pairs. The values
must be strings.
"""
# Check if one of the fields contains surrogate characters and thereby
# forces hdrcharset=BINARY, see _proc_pax() for more information.
binary = False
for keyword, value in pax_headers.items():
try:
value.encode("utf8", "strict")
except UnicodeEncodeError:
binary = True
break
records = b""
if binary:
# Put the hdrcharset field at the beginning of the header.
records += b"21 hdrcharset=BINARY\n"
for keyword, value in pax_headers.items():
keyword = keyword.encode("utf8")
if binary:
# Try to restore the original byte representation of `value'.
# Needless to say, that the encoding must match the string.
value = value.encode(encoding, "surrogateescape")
else:
value = value.encode("utf8")
l = len(keyword) + len(value) + 3 # ' ' + '=' + '\n'
n = p = 0
while True:
n = l + len(str(p))
if n == p:
break
p = n
records += bytes(str(p), "ascii") + b" " + keyword + b"=" + value + b"\n"
# We use a hardcoded "././@PaxHeader" name like star does
# instead of the one that POSIX recommends.
info = {}
info["name"] = "././@PaxHeader"
info["type"] = type
info["size"] = len(records)
info["magic"] = POSIX_MAGIC
# Create pax header + record blocks.
return cls._create_header(info, USTAR_FORMAT, "ascii", "replace") + \
cls._create_payload(records)
@classmethod
def frombuf(cls, buf, encoding, errors):
"""Construct a TarInfo object from a 512 byte bytes object.
"""
if len(buf) == 0:
raise EmptyHeaderError("empty header")
if len(buf) != BLOCKSIZE:
raise TruncatedHeaderError("truncated header")
if buf.count(NUL) == BLOCKSIZE:
raise EOFHeaderError("end of file header")
chksum = nti(buf[148:156])
if chksum not in calc_chksums(buf):
raise InvalidHeaderError("bad checksum")
obj = cls()
obj.name = nts(buf[0:100], encoding, errors)
obj.mode = nti(buf[100:108])
obj.uid = nti(buf[108:116])
obj.gid = nti(buf[116:124])
obj.size = nti(buf[124:136])
obj.mtime = nti(buf[136:148])
obj.chksum = chksum
obj.type = buf[156:157]
obj.linkname = nts(buf[157:257], encoding, errors)
obj.uname = nts(buf[265:297], encoding, errors)
obj.gname = nts(buf[297:329], encoding, errors)
obj.devmajor = nti(buf[329:337])
obj.devminor = nti(buf[337:345])
prefix = nts(buf[345:500], encoding, errors)
# Old V7 tar format represents a directory as a regular
# file with a trailing slash.
if obj.type == AREGTYPE and obj.name.endswith("/"):
obj.type = DIRTYPE
# The old GNU sparse format occupies some of the unused
# space in the buffer for up to 4 sparse structures.
# Save the them for later processing in _proc_sparse().
if obj.type == GNUTYPE_SPARSE:
pos = 386
structs = []
for i in range(4):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
structs.append((offset, numbytes))
pos += 24
isextended = bool(buf[482])
origsize = nti(buf[483:495])
obj._sparse_structs = (structs, isextended, origsize)
# Remove redundant slashes from directories.
if obj.isdir():
obj.name = obj.name.rstrip("/")
# Reconstruct a ustar longname.
if prefix and obj.type not in GNU_TYPES:
obj.name = prefix + "/" + obj.name
return obj
@classmethod
def fromtarfile(cls, tarfile):
"""Return the next TarInfo object from TarFile object
tarfile.
"""
buf = tarfile.fileobj.read(BLOCKSIZE)
obj = cls.frombuf(buf, tarfile.encoding, tarfile.errors)
obj.offset = tarfile.fileobj.tell() - BLOCKSIZE
return obj._proc_member(tarfile)
#--------------------------------------------------------------------------
# The following are methods that are called depending on the type of a
# member. The entry point is _proc_member() which can be overridden in a
# subclass to add custom _proc_*() methods. A _proc_*() method MUST
# implement the following
# operations:
# 1. Set self.offset_data to the position where the data blocks begin,
# if there is data that follows.
# 2. Set tarfile.offset to the position where the next member's header will
# begin.
# 3. Return self or another valid TarInfo object.
def _proc_member(self, tarfile):
"""Choose the right processing method depending on
the type and call it.
"""
if self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK):
return self._proc_gnulong(tarfile)
elif self.type == GNUTYPE_SPARSE:
return self._proc_sparse(tarfile)
elif self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE):
return self._proc_pax(tarfile)
else:
return self._proc_builtin(tarfile)
def _proc_builtin(self, tarfile):
"""Process a builtin type or an unknown type which
will be treated as a regular file.
"""
self.offset_data = tarfile.fileobj.tell()
offset = self.offset_data
if self.isreg() or self.type not in SUPPORTED_TYPES:
# Skip the following data blocks.
offset += self._block(self.size)
tarfile.offset = offset
# Patch the TarInfo object with saved global
# header information.
self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors)
return self
def _proc_gnulong(self, tarfile):
"""Process the blocks that hold a GNU longname
or longlink member.
"""
buf = tarfile.fileobj.read(self._block(self.size))
# Fetch the next header and process it.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
# Patch the TarInfo object from the next header with
# the longname information.
next.offset = self.offset
if self.type == GNUTYPE_LONGNAME:
next.name = nts(buf, tarfile.encoding, tarfile.errors)
elif self.type == GNUTYPE_LONGLINK:
next.linkname = nts(buf, tarfile.encoding, tarfile.errors)
return next
def _proc_sparse(self, tarfile):
"""Process a GNU sparse header plus extra headers.
"""
# We already collected some sparse structures in frombuf().
structs, isextended, origsize = self._sparse_structs
del self._sparse_structs
# Collect sparse structures from extended header blocks.
while isextended:
buf = tarfile.fileobj.read(BLOCKSIZE)
pos = 0
for i in range(21):
try:
offset = nti(buf[pos:pos + 12])
numbytes = nti(buf[pos + 12:pos + 24])
except ValueError:
break
if offset and numbytes:
structs.append((offset, numbytes))
pos += 24
isextended = bool(buf[504])
self.sparse = structs
self.offset_data = tarfile.fileobj.tell()
tarfile.offset = self.offset_data + self._block(self.size)
self.size = origsize
return self
def _proc_pax(self, tarfile):
"""Process an extended or global header as described in
POSIX.1-2008.
"""
# Read the header information.
buf = tarfile.fileobj.read(self._block(self.size))
# A pax header stores supplemental information for either
# the following file (extended) or all following files
# (global).
if self.type == XGLTYPE:
pax_headers = tarfile.pax_headers
else:
pax_headers = tarfile.pax_headers.copy()
# Check if the pax header contains a hdrcharset field. This tells us
# the encoding of the path, linkpath, uname and gname fields. Normally,
# these fields are UTF-8 encoded but since POSIX.1-2008 tar
# implementations are allowed to store them as raw binary strings if
# the translation to UTF-8 fails.
match = re.search(br"\d+ hdrcharset=([^\n]+)\n", buf)
if match is not None:
pax_headers["hdrcharset"] = match.group(1).decode("utf8")
# For the time being, we don't care about anything other than "BINARY".
# The only other value that is currently allowed by the standard is
# "ISO-IR 10646 2000 UTF-8" in other words UTF-8.
hdrcharset = pax_headers.get("hdrcharset")
if hdrcharset == "BINARY":
encoding = tarfile.encoding
else:
encoding = "utf8"
# Parse pax header information. A record looks like that:
# "%d %s=%s\n" % (length, keyword, value). length is the size
# of the complete record including the length field itself and
# the newline. keyword and value are both UTF-8 encoded strings.
regex = re.compile(br"(\d+) ([^=]+)=")
pos = 0
while True:
match = regex.match(buf, pos)
if not match:
break
length, keyword = match.groups()
length = int(length)
value = buf[match.end(2) + 1:match.start(1) + length - 1]
# Normally, we could just use "utf8" as the encoding and "strict"
# as the error handler, but we better not take the risk. For
# example, GNU tar <= 1.23 is known to store filenames it cannot
# translate to UTF-8 as raw strings (unfortunately without a
# hdrcharset=BINARY header).
# We first try the strict standard encoding, and if that fails we
# fall back on the user's encoding and error handler.
keyword = self._decode_pax_field(keyword, "utf8", "utf8",
tarfile.errors)
if keyword in PAX_NAME_FIELDS:
value = self._decode_pax_field(value, encoding, tarfile.encoding,
tarfile.errors)
else:
value = self._decode_pax_field(value, "utf8", "utf8",
tarfile.errors)
pax_headers[keyword] = value
pos += length
# Fetch the next header.
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError("missing or bad subsequent header")
# Process GNU sparse information.
if "GNU.sparse.map" in pax_headers:
# GNU extended sparse format version 0.1.
self._proc_gnusparse_01(next, pax_headers)
elif "GNU.sparse.size" in pax_headers:
# GNU extended sparse format version 0.0.
self._proc_gnusparse_00(next, pax_headers, buf)
elif pax_headers.get("GNU.sparse.major") == "1" and pax_headers.get("GNU.sparse.minor") == "0":
# GNU extended sparse format version 1.0.
self._proc_gnusparse_10(next, pax_headers, tarfile)
if self.type in (XHDTYPE, SOLARIS_XHDTYPE):
# Patch the TarInfo object with the extended header info.
next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors)
next.offset = self.offset
if "size" in pax_headers:
# If the extended header replaces the size field,
# we need to recalculate the offset where the next
# header starts.
offset = next.offset_data
if next.isreg() or next.type not in SUPPORTED_TYPES:
offset += next._block(next.size)
tarfile.offset = offset
return next
def _proc_gnusparse_00(self, next, pax_headers, buf):
"""Process a GNU tar extended sparse header, version 0.0.
"""
offsets = []
for match in re.finditer(br"\d+ GNU.sparse.offset=(\d+)\n", buf):
offsets.append(int(match.group(1)))
numbytes = []
for match in re.finditer(br"\d+ GNU.sparse.numbytes=(\d+)\n", buf):
numbytes.append(int(match.group(1)))
next.sparse = list(zip(offsets, numbytes))
def _proc_gnusparse_01(self, next, pax_headers):
"""Process a GNU tar extended sparse header, version 0.1.
"""
sparse = [int(x) for x in pax_headers["GNU.sparse.map"].split(",")]
next.sparse = list(zip(sparse[::2], sparse[1::2]))
def _proc_gnusparse_10(self, next, pax_headers, tarfile):
"""Process a GNU tar extended sparse header, version 1.0.
"""
fields = None
sparse = []
buf = tarfile.fileobj.read(BLOCKSIZE)
fields, buf = buf.split(b"\n", 1)
fields = int(fields)
while len(sparse) < fields * 2:
if b"\n" not in buf:
buf += tarfile.fileobj.read(BLOCKSIZE)
number, buf = buf.split(b"\n", 1)
sparse.append(int(number))
next.offset_data = tarfile.fileobj.tell()
next.sparse = list(zip(sparse[::2], sparse[1::2]))
def _apply_pax_info(self, pax_headers, encoding, errors):
"""Replace fields with supplemental information from a previous
pax extended or global header.
"""
for keyword, value in pax_headers.items():
if keyword == "GNU.sparse.name":
setattr(self, "path", value)
elif keyword == "GNU.sparse.size":
setattr(self, "size", int(value))
elif keyword == "GNU.sparse.realsize":
setattr(self, "size", int(value))
elif keyword in PAX_FIELDS:
if keyword in PAX_NUMBER_FIELDS:
try:
value = PAX_NUMBER_FIELDS[keyword](value)
except ValueError:
value = 0
if keyword == "path":
value = value.rstrip("/")
setattr(self, keyword, value)
self.pax_headers = pax_headers.copy()
def _decode_pax_field(self, value, encoding, fallback_encoding, fallback_errors):
"""Decode a single field from a pax record.
"""
try:
return value.decode(encoding, "strict")
except UnicodeDecodeError:
return value.decode(fallback_encoding, fallback_errors)
def _block(self, count):
"""Round up a byte count by BLOCKSIZE and return it,
e.g. _block(834) => 1024.
"""
blocks, remainder = divmod(count, BLOCKSIZE)
if remainder:
blocks += 1
return blocks * BLOCKSIZE
def isreg(self):
return self.type in REGULAR_TYPES
def isfile(self):
return self.isreg()
def isdir(self):
return self.type == DIRTYPE
def issym(self):
return self.type == SYMTYPE
def islnk(self):
return self.type == LNKTYPE
def ischr(self):
return self.type == CHRTYPE
def isblk(self):
return self.type == BLKTYPE
def isfifo(self):
return self.type == FIFOTYPE
def issparse(self):
return self.sparse is not None
def isdev(self):
return self.type in (CHRTYPE, BLKTYPE, FIFOTYPE)
# class TarInfo
class TarFile(object):
"""The TarFile Class provides an interface to tar archives.
"""
debug = 0 # May be set from 0 (no msgs) to 3 (all msgs)
dereference = False # If true, add content of linked file to the
# tar file, else the link.
ignore_zeros = False # If true, skips empty or invalid blocks and
# continues processing.
errorlevel = 1 # If 0, fatal errors only appear in debug
# messages (if debug >= 0). If > 0, errors
# are passed to the caller as exceptions.
format = DEFAULT_FORMAT # The format to use when creating an archive.
encoding = ENCODING # Encoding for 8-bit character strings.
errors = None # Error handler for unicode conversion.
tarinfo = TarInfo # The default TarInfo class to use.
fileobject = ExFileObject # The default ExFileObject class to use.
def __init__(self, name=None, mode="r", fileobj=None, format=None,
tarinfo=None, dereference=None, ignore_zeros=None, encoding=None,
errors="surrogateescape", pax_headers=None, debug=None, errorlevel=None):
"""Open an (uncompressed) tar archive `name'. `mode' is either 'r' to
read from an existing archive, 'a' to append data to an existing
file or 'w' to create a new file overwriting an existing one. `mode'
defaults to 'r'.
If `fileobj' is given, it is used for reading or writing data. If it
can be determined, `mode' is overridden by `fileobj's mode.
`fileobj' is not closed, when TarFile is closed.
"""
if len(mode) > 1 or mode not in "raw":
raise ValueError("mode must be 'r', 'a' or 'w'")
self.mode = mode
self._mode = {"r": "rb", "a": "r+b", "w": "wb"}[mode]
if not fileobj:
if self.mode == "a" and not os.path.exists(name):
# Create nonexistent files in append mode.
self.mode = "w"
self._mode = "wb"
fileobj = bltn_open(name, self._mode)
self._extfileobj = False
else:
if name is None and hasattr(fileobj, "name"):
name = fileobj.name
if hasattr(fileobj, "mode"):
self._mode = fileobj.mode
self._extfileobj = True
self.name = os.path.abspath(name) if name else None
self.fileobj = fileobj
# Init attributes.
if format is not None:
self.format = format
if tarinfo is not None:
self.tarinfo = tarinfo
if dereference is not None:
self.dereference = dereference
if ignore_zeros is not None:
self.ignore_zeros = ignore_zeros
if encoding is not None:
self.encoding = encoding
self.errors = errors
if pax_headers is not None and self.format == PAX_FORMAT:
self.pax_headers = pax_headers
else:
self.pax_headers = {}
if debug is not None:
self.debug = debug
if errorlevel is not None:
self.errorlevel = errorlevel
# Init datastructures.
self.closed = False
self.members = [] # list of members as TarInfo objects
self._loaded = False # flag if all members have been read
self.offset = self.fileobj.tell()
# current position in the archive file
self.inodes = {} # dictionary caching the inodes of
# archive members already added
try:
if self.mode == "r":
self.firstmember = None
self.firstmember = self.next()
if self.mode == "a":
# Move to the end of the archive,
# before the first empty block.
while True:
self.fileobj.seek(self.offset)
try:
tarinfo = self.tarinfo.fromtarfile(self)
self.members.append(tarinfo)
except EOFHeaderError:
self.fileobj.seek(self.offset)
break
except HeaderError as e:
raise ReadError(str(e))
if self.mode in "aw":
self._loaded = True
if self.pax_headers:
buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy())
self.fileobj.write(buf)
self.offset += len(buf)
except:
if not self._extfileobj:
self.fileobj.close()
self.closed = True
raise
#--------------------------------------------------------------------------
# Below are the classmethods which act as alternate constructors to the
# TarFile class. The open() method is the only one that is needed for
# public use; it is the "super"-constructor and is able to select an
# adequate "sub"-constructor for a particular compression using the mapping
# from OPEN_METH.
#
# This concept allows one to subclass TarFile without losing the comfort of
# the super-constructor. A sub-constructor is registered and made available
# by adding it to the mapping in OPEN_METH.
@classmethod
def open(cls, name=None, mode="r", fileobj=None, bufsize=RECORDSIZE, **kwargs):
"""Open a tar archive for reading, writing or appending. Return
an appropriate TarFile class.
mode:
'r' or 'r:*' open for reading with transparent compression
'r:' open for reading exclusively uncompressed
'r:gz' open for reading with gzip compression
'r:bz2' open for reading with bzip2 compression
'a' or 'a:' open for appending, creating the file if necessary
'w' or 'w:' open for writing without compression
'w:gz' open for writing with gzip compression
'w:bz2' open for writing with bzip2 compression
'r|*' open a stream of tar blocks with transparent compression
'r|' open an uncompressed stream of tar blocks for reading
'r|gz' open a gzip compressed stream of tar blocks
'r|bz2' open a bzip2 compressed stream of tar blocks
'w|' open an uncompressed stream for writing
'w|gz' open a gzip compressed stream for writing
'w|bz2' open a bzip2 compressed stream for writing
"""
if not name and not fileobj:
raise ValueError("nothing to open")
if mode in ("r", "r:*"):
# Find out which *open() is appropriate for opening the file.
for comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
if fileobj is not None:
saved_pos = fileobj.tell()
try:
return func(name, "r", fileobj, **kwargs)
except (ReadError, CompressionError) as e:
if fileobj is not None:
fileobj.seek(saved_pos)
continue
raise ReadError("file could not be opened successfully")
elif ":" in mode:
filemode, comptype = mode.split(":", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
# Select the *open() function according to
# given compression.
if comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
else:
raise CompressionError("unknown compression type %r" % comptype)
return func(name, filemode, fileobj, **kwargs)
elif "|" in mode:
filemode, comptype = mode.split("|", 1)
filemode = filemode or "r"
comptype = comptype or "tar"
if filemode not in "rw":
raise ValueError("mode must be 'r' or 'w'")
stream = _Stream(name, filemode, comptype, fileobj, bufsize)
try:
t = cls(name, filemode, stream, **kwargs)
except:
stream.close()
raise
t._extfileobj = False
return t
elif mode in "aw":
return cls.taropen(name, mode, fileobj, **kwargs)
raise ValueError("undiscernible mode")
@classmethod
def taropen(cls, name, mode="r", fileobj=None, **kwargs):
"""Open uncompressed tar archive name for reading or writing.
"""
if len(mode) > 1 or mode not in "raw":
raise ValueError("mode must be 'r', 'a' or 'w'")
return cls(name, mode, fileobj, **kwargs)
@classmethod
def gzopen(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open gzip compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if len(mode) > 1 or mode not in "rw":
raise ValueError("mode must be 'r' or 'w'")
try:
import gzip
gzip.GzipFile
except (ImportError, AttributeError):
raise CompressionError("gzip module is not available")
extfileobj = fileobj is not None
try:
fileobj = gzip.GzipFile(name, mode + "b", compresslevel, fileobj)
t = cls.taropen(name, mode, fileobj, **kwargs)
except IOError:
if not extfileobj and fileobj is not None:
fileobj.close()
if fileobj is None:
raise
raise ReadError("not a gzip file")
except:
if not extfileobj and fileobj is not None:
fileobj.close()
raise
t._extfileobj = extfileobj
return t
@classmethod
def bz2open(cls, name, mode="r", fileobj=None, compresslevel=9, **kwargs):
"""Open bzip2 compressed tar archive name for reading or writing.
Appending is not allowed.
"""
if len(mode) > 1 or mode not in "rw":
raise ValueError("mode must be 'r' or 'w'.")
try:
import bz2
except ImportError:
raise CompressionError("bz2 module is not available")
if fileobj is not None:
fileobj = _BZ2Proxy(fileobj, mode)
else:
fileobj = bz2.BZ2File(name, mode, compresslevel=compresslevel)
try:
t = cls.taropen(name, mode, fileobj, **kwargs)
except (IOError, EOFError):
fileobj.close()
raise ReadError("not a bzip2 file")
t._extfileobj = False
return t
# All *open() methods are registered here.
OPEN_METH = {
"tar": "taropen", # uncompressed tar
"gz": "gzopen", # gzip compressed tar
"bz2": "bz2open" # bzip2 compressed tar
}
#--------------------------------------------------------------------------
# The public methods which TarFile provides:
def close(self):
"""Close the TarFile. In write-mode, two finishing zero blocks are
appended to the archive.
"""
if self.closed:
return
if self.mode in "aw":
self.fileobj.write(NUL * (BLOCKSIZE * 2))
self.offset += (BLOCKSIZE * 2)
# fill up the end with zero-blocks
# (like option -b20 for tar does)
blocks, remainder = divmod(self.offset, RECORDSIZE)
if remainder > 0:
self.fileobj.write(NUL * (RECORDSIZE - remainder))
if not self._extfileobj:
self.fileobj.close()
self.closed = True
def getmember(self, name):
"""Return a TarInfo object for member `name'. If `name' can not be
found in the archive, KeyError is raised. If a member occurs more
than once in the archive, its last occurrence is assumed to be the
most up-to-date version.
"""
tarinfo = self._getmember(name)
if tarinfo is None:
raise KeyError("filename %r not found" % name)
return tarinfo
def getmembers(self):
"""Return the members of the archive as a list of TarInfo objects. The
list has the same order as the members in the archive.
"""
self._check()
if not self._loaded: # if we want to obtain a list of
self._load() # all members, we first have to
# scan the whole archive.
return self.members
def getnames(self):
"""Return the members of the archive as a list of their names. It has
the same order as the list returned by getmembers().
"""
return [tarinfo.name for tarinfo in self.getmembers()]
def gettarinfo(self, name=None, arcname=None, fileobj=None):
"""Create a TarInfo object for either the file `name' or the file
object `fileobj' (using os.fstat on its file descriptor). You can
modify some of the TarInfo's attributes before you add it using
addfile(). If given, `arcname' specifies an alternative name for the
file in the archive.
"""
self._check("aw")
# When fileobj is given, replace name by
# fileobj's real name.
if fileobj is not None:
name = fileobj.name
# Building the name of the member in the archive.
# Backward slashes are converted to forward slashes,
# Absolute paths are turned to relative paths.
if arcname is None:
arcname = name
drv, arcname = os.path.splitdrive(arcname)
arcname = arcname.replace(os.sep, "/")
arcname = arcname.lstrip("/")
# Now, fill the TarInfo object with
# information specific for the file.
tarinfo = self.tarinfo()
tarinfo.tarfile = self
# Use os.stat or os.lstat, depending on platform
# and if symlinks shall be resolved.
if fileobj is None:
if hasattr(os, "lstat") and not self.dereference:
statres = os.lstat(name)
else:
statres = os.stat(name)
else:
statres = os.fstat(fileobj.fileno())
linkname = ""
stmd = statres.st_mode
if stat.S_ISREG(stmd):
inode = (statres.st_ino, statres.st_dev)
if not self.dereference and statres.st_nlink > 1 and \
inode in self.inodes and arcname != self.inodes[inode]:
# Is it a hardlink to an already
# archived file?
type = LNKTYPE
linkname = self.inodes[inode]
else:
# The inode is added only if its valid.
# For win32 it is always 0.
type = REGTYPE
if inode[0]:
self.inodes[inode] = arcname
elif stat.S_ISDIR(stmd):
type = DIRTYPE
elif stat.S_ISFIFO(stmd):
type = FIFOTYPE
elif stat.S_ISLNK(stmd):
type = SYMTYPE
linkname = os.readlink(name)
elif stat.S_ISCHR(stmd):
type = CHRTYPE
elif stat.S_ISBLK(stmd):
type = BLKTYPE
else:
return None
# Fill the TarInfo object with all
# information we can get.
tarinfo.name = arcname
tarinfo.mode = stmd
tarinfo.uid = statres.st_uid
tarinfo.gid = statres.st_gid
if type == REGTYPE:
tarinfo.size = statres.st_size
else:
tarinfo.size = 0
tarinfo.mtime = statres.st_mtime
tarinfo.type = type
tarinfo.linkname = linkname
if pwd:
try:
tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0]
except KeyError:
pass
if grp:
try:
tarinfo.gname = grp.getgrgid(tarinfo.gid)[0]
except KeyError:
pass
if type in (CHRTYPE, BLKTYPE):
if hasattr(os, "major") and hasattr(os, "minor"):
tarinfo.devmajor = os.major(statres.st_rdev)
tarinfo.devminor = os.minor(statres.st_rdev)
return tarinfo
def list(self, verbose=True):
"""Print a table of contents to sys.stdout. If `verbose' is False, only
the names of the members are printed. If it is True, an `ls -l'-like
output is produced.
"""
self._check()
for tarinfo in self:
if verbose:
print(filemode(tarinfo.mode), end=' ')
print("%s/%s" % (tarinfo.uname or tarinfo.uid,
tarinfo.gname or tarinfo.gid), end=' ')
if tarinfo.ischr() or tarinfo.isblk():
print("%10s" % ("%d,%d" \
% (tarinfo.devmajor, tarinfo.devminor)), end=' ')
else:
print("%10d" % tarinfo.size, end=' ')
print("%d-%02d-%02d %02d:%02d:%02d" \
% time.localtime(tarinfo.mtime)[:6], end=' ')
print(tarinfo.name + ("/" if tarinfo.isdir() else ""), end=' ')
if verbose:
if tarinfo.issym():
print("->", tarinfo.linkname, end=' ')
if tarinfo.islnk():
print("link to", tarinfo.linkname, end=' ')
print()
def add(self, name, arcname=None, recursive=True, exclude=None, filter=None):
"""Add the file `name' to the archive. `name' may be any type of file
(directory, fifo, symbolic link, etc.). If given, `arcname'
specifies an alternative name for the file in the archive.
Directories are added recursively by default. This can be avoided by
setting `recursive' to False. `exclude' is a function that should
return True for each filename to be excluded. `filter' is a function
that expects a TarInfo object argument and returns the changed
TarInfo object, if it returns None the TarInfo object will be
excluded from the archive.
"""
self._check("aw")
if arcname is None:
arcname = name
# Exclude pathnames.
if exclude is not None:
import warnings
warnings.warn("use the filter argument instead",
DeprecationWarning, 2)
if exclude(name):
self._dbg(2, "tarfile: Excluded %r" % name)
return
# Skip if somebody tries to archive the archive...
if self.name is not None and os.path.abspath(name) == self.name:
self._dbg(2, "tarfile: Skipped %r" % name)
return
self._dbg(1, name)
# Create a TarInfo object from the file.
tarinfo = self.gettarinfo(name, arcname)
if tarinfo is None:
self._dbg(1, "tarfile: Unsupported type %r" % name)
return
# Change or exclude the TarInfo object.
if filter is not None:
tarinfo = filter(tarinfo)
if tarinfo is None:
self._dbg(2, "tarfile: Excluded %r" % name)
return
# Append the tar header and data to the archive.
if tarinfo.isreg():
f = bltn_open(name, "rb")
self.addfile(tarinfo, f)
f.close()
elif tarinfo.isdir():
self.addfile(tarinfo)
if recursive:
for f in os.listdir(name):
self.add(os.path.join(name, f), os.path.join(arcname, f),
recursive, exclude, filter=filter)
else:
self.addfile(tarinfo)
def addfile(self, tarinfo, fileobj=None):
"""Add the TarInfo object `tarinfo' to the archive. If `fileobj' is
given, tarinfo.size bytes are read from it and added to the archive.
You can create TarInfo objects using gettarinfo().
On Windows platforms, `fileobj' should always be opened with mode
'rb' to avoid irritation about the file size.
"""
self._check("aw")
tarinfo = copy.copy(tarinfo)
buf = tarinfo.tobuf(self.format, self.encoding, self.errors)
self.fileobj.write(buf)
self.offset += len(buf)
# If there's data to follow, append it.
if fileobj is not None:
copyfileobj(fileobj, self.fileobj, tarinfo.size)
blocks, remainder = divmod(tarinfo.size, BLOCKSIZE)
if remainder > 0:
self.fileobj.write(NUL * (BLOCKSIZE - remainder))
blocks += 1
self.offset += blocks * BLOCKSIZE
self.members.append(tarinfo)
def extractall(self, path=".", members=None):
"""Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path' specifies a different directory
to extract to. `members' is optional and must be a subset of the
list returned by getmembers().
"""
directories = []
if members is None:
members = self
for tarinfo in members:
if tarinfo.isdir():
# Extract directories with a safe mode.
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 0o700
# Do not set_attrs directories, as we will do that further down
self.extract(tarinfo, path, set_attrs=not tarinfo.isdir())
# Reverse sort directories.
directories.sort(key=lambda a: a.name)
directories.reverse()
# Set correct owner, mtime and filemode on directories.
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError as e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def extract(self, member, path="", set_attrs=True):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a TarInfo object. You can
specify a different directory using `path'. File attributes (owner,
mtime, mode) are set unless `set_attrs' is False.
"""
self._check("r")
if isinstance(member, str):
tarinfo = self.getmember(member)
else:
tarinfo = member
# Prepare the link target for makelink().
if tarinfo.islnk():
tarinfo._link_target = os.path.join(path, tarinfo.linkname)
try:
self._extract_member(tarinfo, os.path.join(path, tarinfo.name),
set_attrs=set_attrs)
except EnvironmentError as e:
if self.errorlevel > 0:
raise
else:
if e.filename is None:
self._dbg(1, "tarfile: %s" % e.strerror)
else:
self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename))
except ExtractError as e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
def extractfile(self, member):
"""Extract a member from the archive as a file object. `member' may be
a filename or a TarInfo object. If `member' is a regular file, a
file-like object is returned. If `member' is a link, a file-like
object is constructed from the link's target. If `member' is none of
the above, None is returned.
The file-like object is read-only and provides the following
methods: read(), readline(), readlines(), seek() and tell()
"""
self._check("r")
if isinstance(member, str):
tarinfo = self.getmember(member)
else:
tarinfo = member
if tarinfo.isreg():
return self.fileobject(self, tarinfo)
elif tarinfo.type not in SUPPORTED_TYPES:
# If a member's type is unknown, it is treated as a
# regular file.
return self.fileobject(self, tarinfo)
elif tarinfo.islnk() or tarinfo.issym():
if isinstance(self.fileobj, _Stream):
# A small but ugly workaround for the case that someone tries
# to extract a (sym)link as a file-object from a non-seekable
# stream of tar blocks.
raise StreamError("cannot extract (sym)link as file object")
else:
# A (sym)link's file object is its target's file object.
return self.extractfile(self._find_link_target(tarinfo))
else:
# If there's no data associated with the member (directory, chrdev,
# blkdev, etc.), return None instead of a file object.
return None
def _extract_member(self, tarinfo, targetpath, set_attrs=True):
"""Extract the TarInfo object tarinfo to a physical
file called targetpath.
"""
# Fetch the TarInfo object for the given name
# and build the destination pathname, replacing
# forward slashes to platform specific separators.
targetpath = targetpath.rstrip("/")
targetpath = targetpath.replace("/", os.sep)
# Create all upper directories.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
# Create directories that are not part of the archive with
# default permissions.
os.makedirs(upperdirs)
if tarinfo.islnk() or tarinfo.issym():
self._dbg(1, "%s -> %s" % (tarinfo.name, tarinfo.linkname))
else:
self._dbg(1, tarinfo.name)
if tarinfo.isreg():
self.makefile(tarinfo, targetpath)
elif tarinfo.isdir():
self.makedir(tarinfo, targetpath)
elif tarinfo.isfifo():
self.makefifo(tarinfo, targetpath)
elif tarinfo.ischr() or tarinfo.isblk():
self.makedev(tarinfo, targetpath)
elif tarinfo.islnk() or tarinfo.issym():
self.makelink(tarinfo, targetpath)
elif tarinfo.type not in SUPPORTED_TYPES:
self.makeunknown(tarinfo, targetpath)
else:
self.makefile(tarinfo, targetpath)
if set_attrs:
self.chown(tarinfo, targetpath)
if not tarinfo.issym():
self.chmod(tarinfo, targetpath)
self.utime(tarinfo, targetpath)
#--------------------------------------------------------------------------
# Below are the different file methods. They are called via
# _extract_member() when extract() is called. They can be replaced in a
# subclass to implement other functionality.
def makedir(self, tarinfo, targetpath):
"""Make a directory called targetpath.
"""
try:
# Use a safe mode for the directory, the real mode is set
# later in _extract_member().
os.mkdir(targetpath, 0o700)
except EnvironmentError as e:
if e.errno != errno.EEXIST:
raise
def makefile(self, tarinfo, targetpath):
"""Make a file called targetpath.
"""
source = self.fileobj
source.seek(tarinfo.offset_data)
target = bltn_open(targetpath, "wb")
if tarinfo.sparse is not None:
for offset, size in tarinfo.sparse:
target.seek(offset)
copyfileobj(source, target, size)
else:
copyfileobj(source, target, tarinfo.size)
target.seek(tarinfo.size)
target.truncate()
target.close()
def makeunknown(self, tarinfo, targetpath):
"""Make a file from a TarInfo object with an unknown type
at targetpath.
"""
self.makefile(tarinfo, targetpath)
self._dbg(1, "tarfile: Unknown file type %r, " \
"extracted as regular file." % tarinfo.type)
def makefifo(self, tarinfo, targetpath):
"""Make a fifo called targetpath.
"""
if hasattr(os, "mkfifo"):
os.mkfifo(targetpath)
else:
raise ExtractError("fifo not supported by system")
def makedev(self, tarinfo, targetpath):
"""Make a character or block device called targetpath.
"""
if not hasattr(os, "mknod") or not hasattr(os, "makedev"):
raise ExtractError("special devices not supported by system")
mode = tarinfo.mode
if tarinfo.isblk():
mode |= stat.S_IFBLK
else:
mode |= stat.S_IFCHR
os.mknod(targetpath, mode,
os.makedev(tarinfo.devmajor, tarinfo.devminor))
def makelink(self, tarinfo, targetpath):
"""Make a (symbolic) link called targetpath. If it cannot be created
(platform limitation), we try to make a copy of the referenced file
instead of a link.
"""
try:
# For systems that support symbolic and hard links.
if tarinfo.issym():
os.symlink(tarinfo.linkname, targetpath)
else:
# See extract().
if os.path.exists(tarinfo._link_target):
os.link(tarinfo._link_target, targetpath)
else:
self._extract_member(self._find_link_target(tarinfo),
targetpath)
except symlink_exception:
if tarinfo.issym():
linkpath = os.path.join(os.path.dirname(tarinfo.name),
tarinfo.linkname)
else:
linkpath = tarinfo.linkname
else:
try:
self._extract_member(self._find_link_target(tarinfo),
targetpath)
except KeyError:
raise ExtractError("unable to resolve link inside archive")
def chown(self, tarinfo, targetpath):
"""Set owner of targetpath according to tarinfo.
"""
if pwd and hasattr(os, "geteuid") and os.geteuid() == 0:
# We have to be root to do so.
try:
g = grp.getgrnam(tarinfo.gname)[2]
except KeyError:
g = tarinfo.gid
try:
u = pwd.getpwnam(tarinfo.uname)[2]
except KeyError:
u = tarinfo.uid
try:
if tarinfo.issym() and hasattr(os, "lchown"):
os.lchown(targetpath, u, g)
else:
if sys.platform != "os2emx":
os.chown(targetpath, u, g)
except EnvironmentError as e:
raise ExtractError("could not change owner")
def chmod(self, tarinfo, targetpath):
"""Set file permissions of targetpath according to tarinfo.
"""
if hasattr(os, 'chmod'):
try:
os.chmod(targetpath, tarinfo.mode)
except EnvironmentError as e:
raise ExtractError("could not change mode")
def utime(self, tarinfo, targetpath):
"""Set modification time of targetpath according to tarinfo.
"""
if not hasattr(os, 'utime'):
return
try:
os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime))
except EnvironmentError as e:
raise ExtractError("could not change modification time")
#--------------------------------------------------------------------------
def next(self):
"""Return the next member of the archive as a TarInfo object, when
TarFile is opened for reading. Return None if there is no more
available.
"""
self._check("ra")
if self.firstmember is not None:
m = self.firstmember
self.firstmember = None
return m
# Read the next block.
self.fileobj.seek(self.offset)
tarinfo = None
while True:
try:
tarinfo = self.tarinfo.fromtarfile(self)
except EOFHeaderError as e:
if self.ignore_zeros:
self._dbg(2, "0x%X: %s" % (self.offset, e))
self.offset += BLOCKSIZE
continue
except InvalidHeaderError as e:
if self.ignore_zeros:
self._dbg(2, "0x%X: %s" % (self.offset, e))
self.offset += BLOCKSIZE
continue
elif self.offset == 0:
raise ReadError(str(e))
except EmptyHeaderError:
if self.offset == 0:
raise ReadError("empty file")
except TruncatedHeaderError as e:
if self.offset == 0:
raise ReadError(str(e))
except SubsequentHeaderError as e:
raise ReadError(str(e))
break
if tarinfo is not None:
self.members.append(tarinfo)
else:
self._loaded = True
return tarinfo
#--------------------------------------------------------------------------
# Little helper methods:
def _getmember(self, name, tarinfo=None, normalize=False):
"""Find an archive member by name from bottom to top.
If tarinfo is given, it is used as the starting point.
"""
# Ensure that all members have been loaded.
members = self.getmembers()
# Limit the member search list up to tarinfo.
if tarinfo is not None:
members = members[:members.index(tarinfo)]
if normalize:
name = os.path.normpath(name)
for member in reversed(members):
if normalize:
member_name = os.path.normpath(member.name)
else:
member_name = member.name
if name == member_name:
return member
def _load(self):
"""Read through the entire archive file and look for readable
members.
"""
while True:
tarinfo = self.next()
if tarinfo is None:
break
self._loaded = True
def _check(self, mode=None):
"""Check if TarFile is still open, and if the operation's mode
corresponds to TarFile's mode.
"""
if self.closed:
raise IOError("%s is closed" % self.__class__.__name__)
if mode is not None and self.mode not in mode:
raise IOError("bad operation for mode %r" % self.mode)
def _find_link_target(self, tarinfo):
"""Find the target member of a symlink or hardlink member in the
archive.
"""
if tarinfo.issym():
# Always search the entire archive.
linkname = os.path.dirname(tarinfo.name) + "/" + tarinfo.linkname
limit = None
else:
# Search the archive before the link, because a hard link is
# just a reference to an already archived file.
linkname = tarinfo.linkname
limit = tarinfo
member = self._getmember(linkname, tarinfo=limit, normalize=True)
if member is None:
raise KeyError("linkname %r not found" % linkname)
return member
def __iter__(self):
"""Provide an iterator object.
"""
if self._loaded:
return iter(self.members)
else:
return TarIter(self)
def _dbg(self, level, msg):
"""Write debugging output to sys.stderr.
"""
if level <= self.debug:
print(msg, file=sys.stderr)
def __enter__(self):
self._check()
return self
def __exit__(self, type, value, traceback):
if type is None:
self.close()
else:
# An exception occurred. We must not call close() because
# it would try to write end-of-archive blocks and padding.
if not self._extfileobj:
self.fileobj.close()
self.closed = True
# class TarFile
class TarIter(object):
"""Iterator Class.
for tarinfo in TarFile(...):
suite...
"""
def __init__(self, tarfile):
"""Construct a TarIter object.
"""
self.tarfile = tarfile
self.index = 0
def __iter__(self):
"""Return iterator object.
"""
return self
def __next__(self):
"""Return the next item using TarFile's next() method.
When all members have been read, set TarFile as _loaded.
"""
# Fix for SF #1100429: Under rare circumstances it can
# happen that getmembers() is called during iteration,
# which will cause TarIter to stop prematurely.
if not self.tarfile._loaded:
tarinfo = self.tarfile.next()
if not tarinfo:
self.tarfile._loaded = True
raise StopIteration
else:
try:
tarinfo = self.tarfile.members[self.index]
except IndexError:
raise StopIteration
self.index += 1
return tarinfo
next = __next__ # for Python 2.x
#--------------------
# exported functions
#--------------------
def is_tarfile(name):
"""Return True if name points to a tar archive that we
are able to handle, else return False.
"""
try:
t = open(name)
t.close()
return True
except TarError:
return False
bltn_open = open
open = TarFile.open
|
ebuchman/daoist_protocol | refs/heads/master | features/steps/nibble_packing.py | 4 | from behave import register_type
from .utils import parse_py
from pyethereum import trie
register_type(Py=parse_py)
@given(u'nibbles: {src_nibbles:Py}') # noqa
def step_impl(context, src_nibbles):
context.src_nibbles = src_nibbles
@when(u'append a terminator') # noqa
def step_impl(context):
context.src_nibbles.append(trie.NIBBLE_TERMINATOR)
@when(u'packed to binary') # noqa
def step_impl(context):
context.dst_binary = trie.pack_nibbles(context.src_nibbles)
@then(u'in the binary, the first nibbles should be {first_nibble:Py}') # noqa
def step_impl(context, first_nibble):
assert ord(context.dst_binary[0]) & 0xF0 == first_nibble << 4
context.prefix_nibbles_count = 1
@then(u'the second nibbles should be {second_nibble:Py}') # noqa
def step_impl(context, second_nibble):
assert ord(context.dst_binary[0]) & 0x0F == second_nibble
context.prefix_nibbles_count = 2
@then(u'nibbles after should equal to the original nibbles') # noqa
def step_impl(context):
dst_nibbles = trie.unpack_to_nibbles(context.dst_binary)
assert dst_nibbles == context.src_nibbles
@then(u'unpack the binary will get the original nibbles') # noqa
def step_impl(context):
assert context.src_nibbles == trie.unpack_to_nibbles(context.dst_binary)
@then(u'the packed result will be {dst:Py}') # noqa
def step_impl(context, dst):
assert context.dst_binary.encode('hex') == dst
@given(u'to be packed nibbles: {nibbles:Py} and terminator: {term:Py}') # noqa
def step_impl(context, nibbles, term):
context.src_nibbles = nibbles
if term:
context.src_nibbles.append(trie.NIBBLE_TERMINATOR)
|
darkleons/odoo | refs/heads/master | addons/Back/point_of_sale/controllers/main.py | 43 | # -*- coding: utf-8 -*-
import logging
import simplejson
import os
import openerp
import time
import random
from openerp import http
from openerp.http import request
from openerp.addons.web.controllers.main import module_boot, login_redirect
_logger = logging.getLogger(__name__)
class PosController(http.Controller):
@http.route('/pos/web', type='http', auth='user')
def a(self, debug=False, **k):
cr, uid, context, session = request.cr, request.uid, request.context, request.session
if not session.uid:
return login_redirect()
PosSession = request.registry['pos.session']
pos_session_ids = PosSession.search(cr, uid, [('state','=','opened'),('user_id','=',session.uid)], context=context)
PosSession.login(cr,uid,pos_session_ids,context=context)
modules = simplejson.dumps(module_boot(request.db))
init = """
var wc = new s.web.WebClient();
wc.show_application = function(){
wc.action_manager.do_action("pos.ui");
};
wc.setElement($(document.body));
wc.start();
"""
html = request.registry.get('ir.ui.view').render(cr, session.uid,'point_of_sale.index',{
'modules': modules,
'init': init,
})
return html
|
zeqing-guo/CaptchaReader | refs/heads/master | src/Control/Identify.py | 2 | '''
Created on Aug 28, 2014
@author: Jason Guo E-mail: zqguo@zqguo.com
'''
import cStringIO
import datetime
from sklearn.externals import joblib
from PIL import Image
import config
class Identify():
'''
Usage: to identify the captcha.
Input: the string of captcha image
Output: the string of captcha content
'''
def __init__(self, captcha):
'''
Constructor
'''
self.captcha = captcha
# Get the module
self.clf = joblib.load(config.DATA_FILE_NAME)
def captcha_reader(self):
starttime = datetime.datetime.now()
# Binarization
captcha_file = cStringIO.StringIO(self.captcha)
img = Image.open(captcha_file)
img = img.convert("RGBA")
black_pix = range(img.size[0])
pixdata = img.load()
for y in xrange(img.size[1]):
for x in xrange(img.size[0]):
if pixdata[x, y][0] < 90 or pixdata[x, y][1] < 162:
pixdata[x, y] = (0, 0, 0, 255)
if pixdata[x, y][2] > 0:
pixdata[x, y] = (255, 255, 255, 255)
else:
pixdata[x, y] = (0, 0, 0, 255)
endtime = datetime.datetime.now()
interval = endtime - starttime
print "%.5f," % (interval.seconds + interval.microseconds / 1000000.0),
starttime = datetime.datetime.now()
# Split figure
for x in xrange(img.size[0]):
row_black_pix = 0
for y in xrange(img.size[1]):
if pixdata[x, y] == (0, 0, 0, 255):
row_black_pix += 1
black_pix[x] = row_black_pix
split_position = []
for i in xrange(1, img.size[0]):
if black_pix[i] != 0 and black_pix[i - 1] == 0:
if len(split_position) % 2 == 0:
split_position.append(i)
elif black_pix[i] == 0 and black_pix[i - 1] != 0:
if i - 1 - split_position[-1] >= 6:
split_position.append(i - 1)
if split_position[1] > 17:
self.insert_index(1, 10, 16, black_pix, split_position)
if split_position[3] > 27:
self.insert_index(3, 20, 26, black_pix, split_position)
if split_position[5] > 37:
self.insert_index(5, 30, 36, black_pix, split_position)
if len(split_position) != 8:
return "alreadfail"
endtime = datetime.datetime.now()
interval = endtime - starttime
print "%.5f," % (interval.seconds + interval.microseconds / 1000000.0),
starttime = datetime.datetime.now()
# Identify figure
result = ""
identify_list = black_pix[split_position[0] : split_position[1] + 1]
identity_len = len(identify_list)
while identity_len < 11:
identify_list.append(0)
identity_len += 1
while identity_len > 11:
identify_list.pop()
identity_len -= 1
result += self.clf.predict([identify_list])[0]
identify_list = black_pix[split_position[2] : split_position[3] + 1]
identity_len = len(identify_list)
while identity_len < 11:
identify_list.append(0)
identity_len += 1
while identity_len > 11:
identify_list.pop()
identity_len -= 1
result += self.clf.predict([identify_list])[0]
identify_list = black_pix[split_position[4] : split_position[5] + 1]
identity_len = len(identify_list)
while identity_len < 11:
identify_list.append(0)
identity_len += 1
while identity_len > 11:
identify_list.pop()
identity_len -= 1
result += self.clf.predict([identify_list])[0]
identify_list = black_pix[split_position[6] : split_position[7] + 1]
identity_len = len(identify_list)
while identity_len < 11:
identify_list.append(0)
identity_len += 1
while identity_len > 11:
identify_list.pop()
identity_len -= 1
result += self.clf.predict([identify_list])[0]
endtime = datetime.datetime.now()
interval = endtime - starttime
print "%.5f," % (interval.seconds + interval.microseconds / 1000000.0),
return result
def insert_index(self, index, low, high, black_pix, split_position):
min_index = 0
min_value = 25
if split_position[index] > high:
for i in range(low, high):
if min_value > black_pix[i]:
min_value = black_pix[i]
min_index = i
split_position.insert(index, min_index)
split_position.insert(index + 1, min_index + 1)
|
ptressel/sahana-eden-madpub | refs/heads/master | tests/logout.py | 7 | from selenium import selenium
import unittest, time, re
class NewTest(unittest.TestCase):
def setUp(self):
self.verificationErrors = []
self.selenium = selenium("localhost", 4444, "*chrome", "http://change-this-to-the-site-you-are-testing/")
self.selenium.start()
def test_new(self):
sel = self.selenium
def tearDown(self):
self.selenium.stop()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
|
collective/collective.subsitebehaviors | refs/heads/master | src/collective/subsitebehaviors/__init__.py | 1 | # -*- coding: utf-8 -*-
"""Init and utils."""
from zope.i18nmessageid import MessageFactory
_ = MessageFactory('collective.subsitebehaviors')
|
keedio/hue | refs/heads/master | desktop/core/ext-py/python-ldap-2.3.13/Lib/ldap/__init__.py | 44 | """
ldap - base module
See http://www.python-ldap.org/ for details.
$Id: __init__.py,v 1.70 2011/02/19 14:36:53 stroeder Exp $
"""
# This is also the overall release version number
__version__ = '2.3.13'
import sys
if __debug__:
# Tracing is only supported in debugging mode
import traceback
_trace_level = 0
_trace_file = sys.stderr
_trace_stack_limit = None
from _ldap import *
class DummyLock:
"""Define dummy class with methods compatible to threading.Lock"""
def __init__(self):
pass
def acquire(self):
pass
def release(self):
pass
try:
# Check if Python installation was build with thread support
import thread
except ImportError:
LDAPLockBaseClass = DummyLock
else:
import threading
LDAPLockBaseClass = threading.Lock
class LDAPLock:
"""
Mainly a wrapper class to log all locking events.
Note that this cumbersome approach with _lock attribute was taken
since threading.Lock is not suitable for sub-classing.
"""
_min_trace_level = 2
def __init__(self,lock_class=None,desc=''):
"""
lock_class
Class compatible to threading.Lock
desc
Description shown in debug log messages
"""
self._desc = desc
self._lock = (lock_class or LDAPLockBaseClass)()
def acquire(self):
if __debug__:
if _trace_level>=self._min_trace_level:
_trace_file.write('***%s %s.acquire()\n' % (self._desc,self.__class__.__name__))
return self._lock.acquire()
def release(self):
if __debug__:
if _trace_level>=self._min_trace_level:
_trace_file.write('***%s %s.release()\n' % (self._desc,self.__class__.__name__))
return self._lock.release()
# Create module-wide lock for serializing all calls into underlying LDAP lib
_ldap_module_lock = LDAPLock(desc='Module wide')
from functions import open,initialize,init,get_option,set_option
from ldap.dn import explode_dn,explode_rdn,str2dn,dn2str
del str2dn
del dn2str
# More constants
# For compability of 2.3 and 2.4 OpenLDAP API
OPT_DIAGNOSTIC_MESSAGE = OPT_ERROR_STRING
|
Savaged-Zen/Savaged-Zen-Inc | refs/heads/master | scripts/tracing/draw_functrace.py | 14679 | #!/usr/bin/python
"""
Copyright 2008 (c) Frederic Weisbecker <fweisbec@gmail.com>
Licensed under the terms of the GNU GPL License version 2
This script parses a trace provided by the function tracer in
kernel/trace/trace_functions.c
The resulted trace is processed into a tree to produce a more human
view of the call stack by drawing textual but hierarchical tree of
calls. Only the functions's names and the the call time are provided.
Usage:
Be sure that you have CONFIG_FUNCTION_TRACER
# mount -t debugfs nodev /sys/kernel/debug
# echo function > /sys/kernel/debug/tracing/current_tracer
$ cat /sys/kernel/debug/tracing/trace_pipe > ~/raw_trace_func
Wait some times but not too much, the script is a bit slow.
Break the pipe (Ctrl + Z)
$ scripts/draw_functrace.py < raw_trace_func > draw_functrace
Then you have your drawn trace in draw_functrace
"""
import sys, re
class CallTree:
""" This class provides a tree representation of the functions
call stack. If a function has no parent in the kernel (interrupt,
syscall, kernel thread...) then it is attached to a virtual parent
called ROOT.
"""
ROOT = None
def __init__(self, func, time = None, parent = None):
self._func = func
self._time = time
if parent is None:
self._parent = CallTree.ROOT
else:
self._parent = parent
self._children = []
def calls(self, func, calltime):
""" If a function calls another one, call this method to insert it
into the tree at the appropriate place.
@return: A reference to the newly created child node.
"""
child = CallTree(func, calltime, self)
self._children.append(child)
return child
def getParent(self, func):
""" Retrieve the last parent of the current node that
has the name given by func. If this function is not
on a parent, then create it as new child of root
@return: A reference to the parent.
"""
tree = self
while tree != CallTree.ROOT and tree._func != func:
tree = tree._parent
if tree == CallTree.ROOT:
child = CallTree.ROOT.calls(func, None)
return child
return tree
def __repr__(self):
return self.__toString("", True)
def __toString(self, branch, lastChild):
if self._time is not None:
s = "%s----%s (%s)\n" % (branch, self._func, self._time)
else:
s = "%s----%s\n" % (branch, self._func)
i = 0
if lastChild:
branch = branch[:-1] + " "
while i < len(self._children):
if i != len(self._children) - 1:
s += "%s" % self._children[i].__toString(branch +\
" |", False)
else:
s += "%s" % self._children[i].__toString(branch +\
" |", True)
i += 1
return s
class BrokenLineException(Exception):
"""If the last line is not complete because of the pipe breakage,
we want to stop the processing and ignore this line.
"""
pass
class CommentLineException(Exception):
""" If the line is a comment (as in the beginning of the trace file),
just ignore it.
"""
pass
def parseLine(line):
line = line.strip()
if line.startswith("#"):
raise CommentLineException
m = re.match("[^]]+?\\] +([0-9.]+): (\\w+) <-(\\w+)", line)
if m is None:
raise BrokenLineException
return (m.group(1), m.group(2), m.group(3))
def main():
CallTree.ROOT = CallTree("Root (Nowhere)", None, None)
tree = CallTree.ROOT
for line in sys.stdin:
try:
calltime, callee, caller = parseLine(line)
except BrokenLineException:
break
except CommentLineException:
continue
tree = tree.getParent(caller)
tree = tree.calls(callee, calltime)
print CallTree.ROOT
if __name__ == "__main__":
main()
|
Pulgama/supriya | refs/heads/master | supriya/ugens/RunningMax.py | 1 | import collections
from supriya import CalculationRate
from supriya.ugens.Peak import Peak
class RunningMax(Peak):
"""
Tracks maximum signal amplitude.
::
>>> source = supriya.ugens.In.ar(0)
>>> trigger = supriya.ugens.Impulse.kr(1)
>>> running_max = supriya.ugens.RunningMax.ar(
... source=source,
... trigger=0,
... )
>>> running_max
RunningMax.ar()
"""
### CLASS VARIABLES ###
__documentation_section__ = "Trigger Utility UGens"
_ordered_input_names = collections.OrderedDict([("source", None), ("trigger", 0)])
_valid_calculation_rates = (CalculationRate.AUDIO, CalculationRate.CONTROL)
|
shahbazn/neutron | refs/heads/master | neutron/cmd/eventlet/agents/metadata_proxy.py | 58 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.agent.metadata import namespace_proxy
def main():
namespace_proxy.main()
|
jmesteve/openerpseda | refs/heads/master | openerp/addons/tr_barcode_field/__openerp__.py | 1 | # -*- coding: utf-8 -*-
#################################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 Julius Network Solutions SARL <contact@julius.fr>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#################################################################################
{
"name" : "Barcode field Module",
"version" : "1.1",
"author" : "Julius Network Solutions",
"description" : """
Presentation:
This module adds a field to make a link between the product and the barcode.
""",
"website" : "http://www.julius.fr",
"depends" : [
"tr_barcode",
],
"category" : "Warehouse Management",
"demo" : [],
"data" : [],
'test': [],
'installable': True,
'active': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Yen-Chung-En/2015cdb_W12 | refs/heads/master | static/Brython3.1.3-20150514-095342/Lib/_codecs.py | 107 |
def ascii_decode(*args,**kw):
pass
def ascii_encode(*args,**kw):
pass
def charbuffer_encode(*args,**kw):
pass
def charmap_build(*args,**kw):
pass
def charmap_decode(*args,**kw):
pass
def charmap_encode(*args,**kw):
pass
def decode(*args,**kw):
"""decode(obj, [encoding[,errors]]) -> object
Decodes obj using the codec registered for encoding. encoding defaults
to the default encoding. errors may be given to set a different error
handling scheme. Default is 'strict' meaning that encoding errors raise
a ValueError. Other possible values are 'ignore' and 'replace'
as well as any other name registered with codecs.register_error that is
able to handle ValueErrors."""
pass
def encode(*args,**kw):
"""encode(obj, [encoding[,errors]]) -> object
Encodes obj using the codec registered for encoding. encoding defaults
to the default encoding. errors may be given to set a different error
handling scheme. Default is 'strict' meaning that encoding errors raise
a ValueError. Other possible values are 'ignore', 'replace' and
'xmlcharrefreplace' as well as any other name registered with
codecs.register_error that can handle ValueErrors."""
obj = args[0]
if len(args)==2:
encoding = args[1]
else:
encoding = 'utf-8'
if isinstance(obj, str):
return obj.encode(encoding)
def escape_decode(*args,**kw):
pass
def escape_encode(*args,**kw):
pass
def latin_1_decode(*args,**kw):
pass
def latin_1_encode(*args,**kw):
pass
def lookup(encoding):
"""lookup(encoding) -> CodecInfo
Looks up a codec tuple in the Python codec registry and returns
a CodecInfo object."""
if encoding in ('utf-8', 'utf_8'):
from javascript import console
console.log('encoding', encoding)
import encodings.utf_8
return encodings.utf_8.getregentry()
LookupError(encoding)
def lookup_error(*args,**kw):
"""lookup_error(errors) -> handler
Return the error handler for the specified error handling name
or raise a LookupError, if no handler exists under this name."""
pass
def mbcs_decode(*args,**kw):
pass
def mbcs_encode(*args,**kw):
pass
def raw_unicode_escape_decode(*args,**kw):
pass
def raw_unicode_escape_encode(*args,**kw):
pass
def readbuffer_encode(*args,**kw):
pass
def register(*args,**kw):
"""register(search_function)
Register a codec search function. Search functions are expected to take
one argument, the encoding name in all lower case letters, and return
a tuple of functions (encoder, decoder, stream_reader, stream_writer)
(or a CodecInfo object)."""
pass
def register_error(*args,**kw):
"""register_error(errors, handler)
Register the specified error handler under the name
errors. handler must be a callable object, that
will be called with an exception instance containing
information about the location of the encoding/decoding
error and must return a (replacement, new position) tuple."""
pass
def unicode_escape_decode(*args,**kw):
pass
def unicode_escape_encode(*args,**kw):
pass
def unicode_internal_decode(*args,**kw):
pass
def unicode_internal_encode(*args,**kw):
pass
def utf_16_be_decode(*args,**kw):
pass
def utf_16_be_encode(*args,**kw):
pass
def utf_16_decode(*args,**kw):
pass
def utf_16_encode(*args,**kw):
pass
def utf_16_ex_decode(*args,**kw):
pass
def utf_16_le_decode(*args,**kw):
pass
def utf_16_le_encode(*args,**kw):
pass
def utf_32_be_decode(*args,**kw):
pass
def utf_32_be_encode(*args,**kw):
pass
def utf_32_decode(*args,**kw):
pass
def utf_32_encode(*args,**kw):
pass
def utf_32_ex_decode(*args,**kw):
pass
def utf_32_le_decode(*args,**kw):
pass
def utf_32_le_encode(*args,**kw):
pass
def utf_7_decode(*args,**kw):
pass
def utf_7_encode(*args,**kw):
pass
def utf_8_decode(*args,**kw):
pass
def utf_8_encode(*args,**kw):
input=args[0]
if len(args) == 2:
errors = args[1]
else:
errors=kw.get('errors', 'strict')
#todo need to deal with errors, but for now assume all is well.
return (bytes([_f for _f in input], 'utf-8'), len(input))
|
hdinsight/hue | refs/heads/master | desktop/core/ext-py/Django-1.6.10/tests/proxy_model_inheritance/app1/models.py | 150 | from __future__ import absolute_import
# TODO: why can't I make this ..app2
from app2.models import NiceModel
class ProxyModel(NiceModel):
class Meta:
proxy = True
|
joshavenue/python_notebook | refs/heads/master | insert.py | 1 | x = [1,2,3]
x.insert(1, 1) // Insert 1 into the list 1 //
x = [1,1,2,3]
|
uranusjr/django | refs/heads/master | tests/view_tests/tests/test_default_content_type.py | 60 | import sys
from types import ModuleType
from django.conf import Settings
from django.test import SimpleTestCase, ignore_warnings
from django.utils.deprecation import RemovedInDjango30Warning
class DefaultContentTypeTests(SimpleTestCase):
msg = 'The DEFAULT_CONTENT_TYPE setting is deprecated.'
@ignore_warnings(category=RemovedInDjango30Warning)
def test_default_content_type_is_text_html(self):
"""
Content-Type of the default error responses is text/html. Refs #20822.
"""
with self.settings(DEFAULT_CONTENT_TYPE='text/xml'):
response = self.client.get('/raises400/')
self.assertEqual(response['Content-Type'], 'text/html')
response = self.client.get('/raises403/')
self.assertEqual(response['Content-Type'], 'text/html')
response = self.client.get('/nonexistent_url/')
self.assertEqual(response['Content-Type'], 'text/html')
response = self.client.get('/server_error/')
self.assertEqual(response['Content-Type'], 'text/html')
def test_override_settings_warning(self):
with self.assertRaisesMessage(RemovedInDjango30Warning, self.msg):
with self.settings(DEFAULT_CONTENT_TYPE='text/xml'):
pass
def test_settings_init_warning(self):
settings_module = ModuleType('fake_settings_module')
settings_module.DEFAULT_CONTENT_TYPE = 'text/xml'
settings_module.SECRET_KEY = 'abc'
sys.modules['fake_settings_module'] = settings_module
try:
with self.assertRaisesMessage(RemovedInDjango30Warning, self.msg):
Settings('fake_settings_module')
finally:
del sys.modules['fake_settings_module']
|
AladdinSonni/phantomjs | refs/heads/master | src/breakpad/src/tools/gyp/pylib/gyp/MSVSProject.py | 137 | #!/usr/bin/python2.4
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import common
import xml.dom
import xml.dom.minidom
import MSVSNew
#------------------------------------------------------------------------------
class Tool(object):
"""Visual Studio tool."""
def __init__(self, name, attrs=None):
"""Initializes the tool.
Args:
name: Tool name.
attrs: Dict of tool attributes; may be None.
"""
self.name = name
self.attrs = attrs or {}
def CreateElement(self, doc):
"""Creates an element for the tool.
Args:
doc: xml.dom.Document object to use for node creation.
Returns:
A new xml.dom.Element for the tool.
"""
node = doc.createElement('Tool')
node.setAttribute('Name', self.name)
for k, v in self.attrs.items():
node.setAttribute(k, v)
return node
class Filter(object):
"""Visual Studio filter - that is, a virtual folder."""
def __init__(self, name, contents=None):
"""Initializes the folder.
Args:
name: Filter (folder) name.
contents: List of filenames and/or Filter objects contained.
"""
self.name = name
self.contents = list(contents or [])
#------------------------------------------------------------------------------
class Writer(object):
"""Visual Studio XML project writer."""
def __init__(self, project_path, version):
"""Initializes the project.
Args:
project_path: Path to the project file.
version: Format version to emit.
"""
self.project_path = project_path
self.doc = None
self.version = version
def Create(self, name, guid=None, platforms=None):
"""Creates the project document.
Args:
name: Name of the project.
guid: GUID to use for project, if not None.
"""
self.name = name
self.guid = guid or MSVSNew.MakeGuid(self.project_path)
# Default to Win32 for platforms.
if not platforms:
platforms = ['Win32']
# Create XML doc
xml_impl = xml.dom.getDOMImplementation()
self.doc = xml_impl.createDocument(None, 'VisualStudioProject', None)
# Add attributes to root element
self.n_root = self.doc.documentElement
self.n_root.setAttribute('ProjectType', 'Visual C++')
self.n_root.setAttribute('Version', self.version.ProjectVersion())
self.n_root.setAttribute('Name', self.name)
self.n_root.setAttribute('ProjectGUID', self.guid)
self.n_root.setAttribute('RootNamespace', self.name)
self.n_root.setAttribute('Keyword', 'Win32Proj')
# Add platform list
n_platform = self.doc.createElement('Platforms')
self.n_root.appendChild(n_platform)
for platform in platforms:
n = self.doc.createElement('Platform')
n.setAttribute('Name', platform)
n_platform.appendChild(n)
# Add tool files section
self.n_tool_files = self.doc.createElement('ToolFiles')
self.n_root.appendChild(self.n_tool_files)
# Add configurations section
self.n_configs = self.doc.createElement('Configurations')
self.n_root.appendChild(self.n_configs)
# Add empty References section
self.n_root.appendChild(self.doc.createElement('References'))
# Add files section
self.n_files = self.doc.createElement('Files')
self.n_root.appendChild(self.n_files)
# Keep a dict keyed on filename to speed up access.
self.n_files_dict = dict()
# Add empty Globals section
self.n_root.appendChild(self.doc.createElement('Globals'))
def AddToolFile(self, path):
"""Adds a tool file to the project.
Args:
path: Relative path from project to tool file.
"""
n_tool = self.doc.createElement('ToolFile')
n_tool.setAttribute('RelativePath', path)
self.n_tool_files.appendChild(n_tool)
def _AddConfigToNode(self, parent, config_type, config_name, attrs=None,
tools=None):
"""Adds a configuration to the parent node.
Args:
parent: Destination node.
config_type: Type of configuration node.
config_name: Configuration name.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
"""
# Handle defaults
if not attrs:
attrs = {}
if not tools:
tools = []
# Add configuration node and its attributes
n_config = self.doc.createElement(config_type)
n_config.setAttribute('Name', config_name)
for k, v in attrs.items():
n_config.setAttribute(k, v)
parent.appendChild(n_config)
# Add tool nodes and their attributes
if tools:
for t in tools:
if isinstance(t, Tool):
n_config.appendChild(t.CreateElement(self.doc))
else:
n_config.appendChild(Tool(t).CreateElement(self.doc))
def AddConfig(self, name, attrs=None, tools=None):
"""Adds a configuration to the project.
Args:
name: Configuration name.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
"""
self._AddConfigToNode(self.n_configs, 'Configuration', name, attrs, tools)
def _AddFilesToNode(self, parent, files):
"""Adds files and/or filters to the parent node.
Args:
parent: Destination node
files: A list of Filter objects and/or relative paths to files.
Will call itself recursively, if the files list contains Filter objects.
"""
for f in files:
if isinstance(f, Filter):
node = self.doc.createElement('Filter')
node.setAttribute('Name', f.name)
self._AddFilesToNode(node, f.contents)
else:
node = self.doc.createElement('File')
node.setAttribute('RelativePath', f)
self.n_files_dict[f] = node
parent.appendChild(node)
def AddFiles(self, files):
"""Adds files to the project.
Args:
files: A list of Filter objects and/or relative paths to files.
This makes a copy of the file/filter tree at the time of this call. If you
later add files to a Filter object which was passed into a previous call
to AddFiles(), it will not be reflected in this project.
"""
self._AddFilesToNode(self.n_files, files)
# TODO(rspangler) This also doesn't handle adding files to an existing
# filter. That is, it doesn't merge the trees.
def AddFileConfig(self, path, config, attrs=None, tools=None):
"""Adds a configuration to a file.
Args:
path: Relative path to the file.
config: Name of configuration to add.
attrs: Dict of configuration attributes; may be None.
tools: List of tools (strings or Tool objects); may be None.
Raises:
ValueError: Relative path does not match any file added via AddFiles().
"""
# Find the file node with the right relative path
parent = self.n_files_dict.get(path)
if not parent:
raise ValueError('AddFileConfig: file "%s" not in project.' % path)
# Add the config to the file node
self._AddConfigToNode(parent, 'FileConfiguration', config, attrs, tools)
def Write(self, writer=common.WriteOnDiff):
"""Writes the project file."""
f = writer(self.project_path)
self.doc.writexml(f, encoding='Windows-1252', addindent=' ', newl='\r\n')
f.close()
#------------------------------------------------------------------------------
|
stclair/wes-cms | refs/heads/master | django/views/generic/__init__.py | 493 | from django.views.generic.base import View, TemplateView, RedirectView
from django.views.generic.dates import (ArchiveIndexView, YearArchiveView, MonthArchiveView,
WeekArchiveView, DayArchiveView, TodayArchiveView,
DateDetailView)
from django.views.generic.detail import DetailView
from django.views.generic.edit import FormView, CreateView, UpdateView, DeleteView
from django.views.generic.list import ListView
class GenericViewError(Exception):
"""A problem in a generic view."""
pass
|
collex100/odoo | refs/heads/8.0 | addons/resource/faces/resource.py | 433 | #@+leo-ver=4
#@+node:@file resource.py
#@@language python
#@<< Copyright >>
#@+node:<< Copyright >>
############################################################################
# Copyright (C) 2005, 2006, 2007, 2008 by Reithinger GmbH
# mreithinger@web.de
#
# This file is part of faces.
#
# faces is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# faces is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
############################################################################
#@-node:<< Copyright >>
#@nl
#@<< Imports >>
#@+node:<< Imports >>
import pcalendar
import datetime
import utils
import string
import bisect
import plocale
#@-node:<< Imports >>
#@nl
_is_source = True
_to_datetime = pcalendar.to_datetime
_ = plocale.get_gettext()
#@+others
#@+node:_isattrib
#@+doc
#@nonl
# is used to find snapshot attributes
#@-doc
#@@code
def _isattrib(obj, a):
return a[0] != "_" \
and not callable(getattr(obj, a)) \
and not a.endswith("_members") \
and a not in ("name")
#@-node:_isattrib
#@+node:class ResourceCalendar
class ResourceCalendar(object):
"""
The resource calendar saves the load time of a resource.
Is ia sequence of time intervals of loads. An example of
such a sequence is:
[ (datetime.min, 0),
(2006/1/1, 1.0),
(2006/1/10, 0.5),
(2006/1/15, 0) ]
That means the resource:
is free till january the first 2006
is fully booked from january the first to january 10th
is half booked from january 10th to january 15th
is free since january 15th
"""
#@ @+others
#@+node:__init__
def __init__(self, src=None):
if src:
self.bookings = list(src.bookings)
else:
self.bookings = [ (datetime.datetime.min, 0) ]
#@-node:__init__
#@+node:__str__
def __str__(self):
return str(self.bookings)
#@-node:__str__
#@+node:__repr__
def __repr__(self):
return "<ResourceCalendar %s>" % (str(self))
#@-node:__repr__
#@+node:add_load
def add_load(self, start, end, load):
start = _to_datetime(start)
end = _to_datetime(end)
bookings = self.bookings
# the load will be converted in an integer to avoid
# rouning problems
load = int(load * 10000)
start_item = (start, 0)
start_pos = bisect.bisect_left(bookings, start_item)
left_load = 0
left_load = bookings[start_pos - 1][1]
if start_pos < len(bookings) and bookings[start_pos][0] == start:
prev_load = bookings[start_pos][1]
if prev_load + load == left_load:
del bookings[start_pos]
else:
bookings[start_pos] = (start, prev_load + load)
start_pos += 1
else:
bookings.insert(start_pos, (start, load + left_load))
start_pos += 1
item = (datetime.datetime.min, 0)
for i in range(start_pos, len(bookings)):
end_pos = i
item = bookings[i]
if item[0] >= end: break
bookings[i] = (item[0], item[1] + load)
else:
end_pos = len(bookings)
left_load = bookings[end_pos - 1][1]
if item[0] == end:
if item[1] == left_load:
del bookings[end_pos]
else:
bookings.insert(end_pos, (end, left_load - load))
#@-node:add_load
#@+node:end_of_booking_interval
def end_of_booking_interval(self, date):
date = _to_datetime(date)
bookings = self.bookings
date_item = (date, 999999)
date_pos = bisect.bisect_left(bookings, date_item) - 1
next_date = datetime.datetime.max
load = 0
try:
book_item = bookings[date_pos]
load = bookings[date_pos][1] / 10000.0
next_date = bookings[date_pos + 1][0]
except:
pass
return next_date, load
#@-node:end_of_booking_interval
#@+node:find_free_time
def find_free_time(self, start, length, load, max_load):
bookings = self.bookings
if isinstance(start, datetime.datetime):
adjust_date = _to_datetime
else:
adjust_date = start.calendar.EndDate
start = _to_datetime(start)
load = int(load * 10000)
max_load = int(max_load * 10000)
lb = len(bookings)
def next_possible(index):
while index < lb:
sd, lo = bookings[index]
if lo + load <= max_load:
break
index += 1
sd = adjust_date(max(start, sd))
ed = sd + length
end = _to_datetime(ed)
index += 1
while index < lb:
date, lo = bookings[index]
if date >= end:
#I found a good start date
return None, sd
if lo + load > max_load:
return index + 1, None
index += 1
return None, sd
start_item = (start, 1000000)
i = bisect.bisect_left(bookings, start_item) - 1
next_start = None
while not next_start and i < lb:
i, next_start = next_possible(i)
assert(next_start is not None)
return next_start
#@-node:find_free_time
#@+node:get_bookings
def get_bookings(self, start, end):
start = _to_datetime(start)
end = _to_datetime(end)
bookings = self.bookings
start_item = (start, 0)
start_pos = bisect.bisect_left(bookings, start_item)
if start_pos >= len(bookings) or bookings[start_pos][0] > start:
start_pos -= 1
end_item = (end, 0)
end_pos = bisect.bisect_left(bookings, end_item)
return start_pos, end_pos, bookings
#@-node:get_bookings
#@+node:get_load
def get_load(self, date):
date = _to_datetime(date)
bookings = self.bookings
item = (date, 100000)
pos = bisect.bisect_left(bookings, item) - 1
return bookings[pos][1] / 10000.0
#@-node:get_load
#@-others
#@-node:class ResourceCalendar
#@+node:class _ResourceBase
class _ResourceBase(object):
pass
#@-node:class _ResourceBase
#@+node:class _MetaResource
class _MetaResource(type):
doc_template = """
A resource class. The resources default attributes can
be changed when the class ist instanciated, i.e.
%(name)s(max_load=2.0)
@var max_load:
Specify the maximal allowed load sum of all simultaneously
allocated tasks of a resource. A ME{max_load} of 1.0 (default)
means the resource may be fully allocated. A ME{max_load} of 1.3
means the resource may be allocated with 30%% overtime.
@var title:
Specifies an alternative more descriptive name for the task.
@var efficiency:
The efficiency of a resource can be used for two purposes. First
you can use it as a crude way to model a team. A team of 5 people
should have an efficiency of 5.0. Keep in mind that you cannot
track the member of the team individually if you use this
feature. The other use is to model performance variations between
your resources.
@var vacation:
Specifies the vacation of the resource. This attribute is
specified as a list of date literals or date literal intervals.
Be aware that the end of an interval is excluded, i.e. it is
the first working date.
"""
#@ @+others
#@+node:__init__
def __init__(self, name, bases, dict_):
super(_MetaResource, self).__init__(name, bases, dict_)
self.name = name
self.title = dict_.get("title", name)
self._calendar = { None: ResourceCalendar() }
self._tasks = { }
self.__set_vacation()
self.__add_resource(bases[0])
self.__doc__ = dict_.get("__doc__", self.doc_template) % locals()
#@-node:__init__
#@+node:__or__
def __or__(self, other):
return self().__or__(other)
#@-node:__or__
#@+node:__and__
def __and__(self, other):
return self().__and__(other)
#@-node:__and__
#@+node:__cmp__
def __cmp__(self, other):
return cmp(self.name, getattr(other, "name", None))
#@-node:__cmp__
#@+node:__repr__
def __repr__(self):
return "<Resource %s>" % self.name
#@-node:__repr__
#@+node:__str__
def __str__(self):
return repr(self)
#@-node:__str__
#@+node:__set_vacation
def __set_vacation(self):
vacation = self.vacation
if isinstance(vacation, (tuple, list)):
for v in vacation:
if isinstance(v, (tuple, list)):
self.add_vacation(v[0], v[1])
else:
self.add_vacation(v)
else:
self.add_vacation(vacation)
#@-node:__set_vacation
#@+node:__add_resource
def __add_resource(self, base):
if issubclass(base, _ResourceBase):
members = getattr(base, base.__name__ + "_members", [])
members.append(self)
setattr(base, base.__name__ + "_members", members)
#@-node:__add_resource
#@+node:get_members
def get_members(self):
return getattr(self, self.__name__ + "_members", [])
#@-node:get_members
#@+node:add_vacation
def add_vacation(self, start, end=None):
start_date = _to_datetime(start)
if not end:
end_date = start_date.replace(hour=23, minute=59)
else:
end_date = _to_datetime(end)
for cal in self._calendar.itervalues():
cal.add_load(start_date, end_date, 1)
tp = Booking()
tp.start = start_date
tp.end = end_date
tp.book_start = start_date
tp.book_end = end_date
tp.work_time = end_date - start_date
tp.load = 1.0
tp.name = tp.title = _("(vacation)")
tp._id = ""
self._tasks.setdefault("", []).append(tp)
#@-node:add_vacation
#@+node:calendar
def calendar(self, scenario):
try:
return self._calendar[scenario]
except KeyError:
cal = self._calendar[scenario] = ResourceCalendar(self._calendar[None])
return cal
#@-node:calendar
#@-others
#@-node:class _MetaResource
#@+node:make_team
def make_team(resource):
members = resource.get_members()
if not members:
return resource
result = make_team(members[0])
for r in members[1:]:
result = result & make_team(r)
return result
#@-node:make_team
#@+node:class Booking
class Booking(object):
"""
A booking unit for a task.
"""
#@ << declarations >>
#@+node:<< declarations >>
book_start = datetime.datetime.min
book_end = datetime.datetime.max
actual = False
_id = ""
#@-node:<< declarations >>
#@nl
#@ @+others
#@+node:__init__
def __init__(self, task=None):
self.__task = task
#@-node:__init__
#@+node:__cmp__
def __cmp__(self, other):
return cmp(self._id, other._id)
#@-node:__cmp__
#@+node:path
def path(self):
first_dot = self._id.find(".")
return "root" + self._id[first_dot:]
path = property(path)
#@nonl
#@-node:path
#@+node:_idendity_
def _idendity_(self):
return self._id
#@-node:_idendity_
#@+node:__getattr__
def __getattr__(self, name):
if self.__task:
return getattr(self.__task, name)
raise AttributeError("'%s' is not a valid attribute" % (name))
#@-node:__getattr__
#@-others
#@-node:class Booking
#@+node:class ResourceList
class ResourceList(list):
#@ @+others
#@+node:__init__
def __init__(self, *args):
if args: self.extend(args)
#@-node:__init__
#@-others
#@-node:class ResourceList
#@+node:class Resource
class Resource(_ResourceBase):
#@ << declarations >>
#@+node:<< declarations >>
__metaclass__ = _MetaResource
__attrib_completions__ = {\
"max_load": 'max_load = ',
"title": 'title = "|"',
"efficiency": 'efficiency = ',
"vacation": 'vacation = [("|2002-02-01", "2002-02-05")]' }
__type_image__ = "resource16"
max_load = None # the maximum sum load for all task
vacation = ()
efficiency = 1.0
#@-node:<< declarations >>
#@nl
#@ @+others
#@+node:__init__
def __init__(self, **kwargs):
for k, v in kwargs.iteritems():
setattr(self, k, v)
#@-node:__init__
#@+node:_idendity_
def _idendity_(cls):
return "resource:" + cls.__name__
_idendity_ = classmethod(_idendity_)
#@-node:_idendity_
#@+node:__repr__
def __repr__(self):
return "<Resource %s>" % self.__class__.__name__
#@-node:__repr__
#@+node:__str__
def __str__(self):
return repr(self)
#@-node:__str__
#@+node:__call__
def __call__(self):
return self
#@-node:__call__
#@+node:__hash__
def __hash__(self):
return hash(self.__class__)
#@-node:__hash__
#@+node:__cmp__
def __cmp__(self, other):
return cmp(self.name, other.name)
#@-node:__cmp__
#@+node:__or__
def __or__(self, other):
if type(other) is _MetaResource:
other = other()
result = Resource()
result._subresource = _OrResourceGroup(self, other)
return result
#@-node:__or__
#@+node:__and__
def __and__(self, other):
if type(other) is _MetaResource:
other = other()
result = Resource()
result._subresource = _AndResourceGroup(self, other)
return result
#@-node:__and__
#@+node:_permutation_count
def _permutation_count(self):
if hasattr(self, "_subresource"):
return self._subresource._permutation_count()
return 1
#@-node:_permutation_count
#@+node:_get_resources
def _get_resources(self, state):
if hasattr(self, "_subresource"):
result = self._subresource._get_resources(state)
if self.name != "Resource":
result.name = self.name
if self.title != "Resource":
result.title = self.title
return result
result = ResourceList(self)
return result
#@-node:_get_resources
#@+node:all_members
def all_members(self):
if hasattr(self, "_subresource"):
return self._subresource.all_members()
return [ self.__class__ ]
#@-node:all_members
#@+node:unbook_tasks_of_project
def unbook_tasks_of_project(cls, project_id, scenario):
try:
task_list = cls._tasks[scenario]
except KeyError:
return
add_load = cls.calendar(scenario).add_load
for task_id, bookings in task_list.items():
if task_id.startswith(project_id):
for item in bookings:
add_load(item.book_start, item.book_end, -item.load)
del task_list[task_id]
if not task_list:
del cls._tasks[scenario]
unbook_tasks_of_project = classmethod(unbook_tasks_of_project)
#@-node:unbook_tasks_of_project
#@+node:unbook_task
def unbook_task(cls, task):
identdity = task._idendity_()
scenario = task.scenario
try:
task_list = cls._tasks[scenario]
bookings = task_list[identdity]
except KeyError:
return
add_load = cls.calendar(scenario).add_load
for b in bookings:
add_load(b.book_start, b.book_end, -b.load)
del task_list[identdity]
if not task_list:
del cls._tasks[scenario]
unbook_task = classmethod(unbook_task)
#@-node:unbook_task
#@+node:correct_bookings
def correct_bookings(cls, task):
#correct the booking data with the actual task data
try:
tasks = cls._tasks[task.scenario][task._idendity_()]
except KeyError:
return
for t in tasks:
t.start = task.start.to_datetime()
t.end = task.end.to_datetime()
correct_bookings = classmethod(correct_bookings)
#@-node:correct_bookings
#@+node:book_task
def book_task(cls, task, start, end, load, work_time, actual):
if not work_time: return
start = _to_datetime(start)
end = _to_datetime(end)
identdity = task._idendity_()
task_list = cls._tasks.setdefault(task.scenario, {})
bookings = task_list.setdefault(identdity, [])
add_load = cls.calendar(task.scenario).add_load
tb = Booking(task)
tb.book_start = start
tb.book_end = end
tb._id = identdity
tb.load = load
tb.start = _to_datetime(task.start)
tb.end = _to_datetime(task.end)
tb.title = task.title
tb.name = task.name
tb.work_time = int(work_time)
tb.actual = actual
bookings.append(tb)
result = add_load(start, end, load)
return result
book_task = classmethod(book_task)
#@-node:book_task
#@+node:length_of
def length_of(cls, task):
cal = task.root.calendar
bookings = cls.get_bookings(task)
return sum(map(lambda b: task._to_delta(b.work_time).round(), bookings))
length_of = classmethod(length_of)
#@-node:length_of
#@+node:done_of
def done_of(self, task):
cal = task.root.calendar
now = cal.now
bookings = self.get_bookings(task)
if task.__dict__.has_key("effort"):
efficiency = self.efficiency * task.efficiency
else:
efficiency = 1
def book_done(booking):
if booking.book_start >= now:
return 0
factor = 1
if booking.book_end > now:
start = task._to_start(booking.book_start)
end = task._to_end(booking.book_end)
cnow = task._to_start(now)
factor = float(cnow - start) / ((end - start) or 1)
return factor * booking.work_time * efficiency
return task._to_delta(sum(map(book_done, bookings)))
#@-node:done_of
#@+node:todo_of
def todo_of(self, task):
cal = task.root.calendar
now = cal.now
bookings = self.get_bookings(task)
if task.__dict__.has_key("effort"):
efficiency = self.efficiency * task.efficiency
else:
efficiency = 1
def book_todo(booking):
if booking.book_end <= now:
return 0
factor = 1
if booking.book_start < now:
start = task._to_start(booking.book_start)
end = task._to_end(booking.book_end)
cnow = task._to_start(now)
factor = float(end - cnow) / ((end - start) or 1)
return factor * booking.work_time * efficiency
return task._to_delta(sum(map(book_todo, bookings)))
#@-node:todo_of
#@+node:get_bookings
def get_bookings(cls, task):
return cls._tasks.get(task.scenario, {}).get(task._idendity_(), ())
get_bookings = classmethod(get_bookings)
#@-node:get_bookings
#@+node:get_bookings_at
def get_bookings_at(cls, start, end, scenario):
result = []
try:
items = cls._tasks[scenario].iteritems()
except KeyError:
return ()
for task_id, bookings in items:
result += [ booking for booking in bookings
if booking.book_start < end
and booking.book_end > start ]
vacations = cls._tasks.get("", ())
result += [ booking for booking in vacations
if booking.book_start < end
and booking.book_end > start ]
return result
get_bookings_at = classmethod(get_bookings_at)
#@-node:get_bookings_at
#@+node:find_free_time
def find_free_time(cls, start, length, load, max_load, scenario):
return cls.calendar(scenario).find_free_time(start, length, load, max_load)
find_free_time = classmethod(find_free_time)
#@-node:find_free_time
#@+node:get_load
def get_load(cls, date, scenario):
return cls.calendar(scenario).get_load(date)
get_load = classmethod(get_load)
#@-node:get_load
#@+node:end_of_booking_interval
def end_of_booking_interval(cls, date, task):
return cls.calendar(task.scenario).end_of_booking_interval(date)
end_of_booking_interval = classmethod(end_of_booking_interval)
#@-node:end_of_booking_interval
#@+node:snapshot
def snapshot(self):
from task import _as_string
def isattrib(a):
if a == "max_load" and self.max_load is None: return False
if a in ("name", "title", "vacation"): return False
return _isattrib(self, a)
attribs = filter(isattrib, dir(self))
attribs = map(lambda a: "%s=%s" % (a, _as_string(getattr(self, a))),
attribs)
return self.name + "(%s)" % ", ".join(attribs)
#@-node:snapshot
#@-others
#@-node:class Resource
#@+node:class _ResourceGroup
class _ResourceGroup(object):
#@ @+others
#@+node:__init__
def __init__(self, *args):
self.resources = []
for a in args:
self.__append(a)
#@-node:__init__
#@+node:all_members
def all_members(self):
group = reduce(lambda a, b: a + b.all_members(),
self.resources, [])
group = map(lambda r: (r, True), group)
group = dict(group)
group = group.keys()
return group
#@-node:all_members
#@+node:_permutation_count
def _permutation_count(self):
abstract
#@-node:_permutation_count
#@+node:_refactor
def _refactor(self, arg):
pass
#@-node:_refactor
#@+node:__append
def __append(self, arg):
if isinstance(arg, self.__class__):
self.resources += arg.resources
for r in arg.resources:
self._refactor(r)
return
elif isinstance(arg, Resource):
subresources = getattr(arg, "_subresource", None)
if subresources:
self.__append(subresources)
return
else:
self.resources.append(arg)
else:
assert(isinstance(arg, _ResourceGroup))
self.resources.append(arg)
self._refactor(arg)
#@-node:__append
#@+node:__str__
def __str__(self):
op = lower(self.__class__.__name__[0:-13])
return "(" + \
string.join([str(r) for r in self.resources],
" " + op + " ") + \
")"
#@-node:__str__
#@-others
#@-node:class _ResourceGroup
#@+node:class _OrResourceGroup
class _OrResourceGroup(_ResourceGroup):
#@ @+others
#@+node:_get_resources
def _get_resources(self, state):
for r in self.resources:
c = r._permutation_count()
if c <= state:
state -= c
else:
return r._get_resources(state)
assert(0)
#@-node:_get_resources
#@+node:_permutation_count
def _permutation_count(self):
return sum([ r._permutation_count() for r in self.resources])
#@-node:_permutation_count
#@-others
#@-node:class _OrResourceGroup
#@+node:class _AndResourceGroup
class _AndResourceGroup(_ResourceGroup):
#@ @+others
#@+node:__init__
def __init__(self, *args):
self.factors = [ 1 ]
_ResourceGroup.__init__(self, *args)
#@-node:__init__
#@+node:_refactor
def _refactor(self, arg):
count = arg._permutation_count()
self.factors = [ count * f for f in self.factors ]
self.factors.append(1)
#@-node:_refactor
#@+node:_permutation_count
#print "AndResourceGroup", count, arg, self.factors
def _permutation_count(self):
return self.factors[0]
#@-node:_permutation_count
#@+node:_get_resources
def _get_resources(self, state):
"""delivers None when there are duplicate resources"""
result = []
for i in range(1, len(self.factors)):
f = self.factors[i]
substate = state / f
state %= f
result.append(self.resources[i - 1]._get_resources(substate))
result = ResourceList(*list(utils.flatten(result)))
dupl_test = { }
for r in result:
if dupl_test.has_key(r):
return None
else:
dupl_test[r] = 1
return result
#@-node:_get_resources
#@+node:_has_duplicates
def _has_duplicates(self, state):
resources = self._get_resources(state)
tmp = { }
for r in resources:
if tmp.has_key(r):
return True
tmp[r] = 1
return False
#@-node:_has_duplicates
#@-others
#@-node:class _AndResourceGroup
#@-others
#@-node:@file resource.py
#@-leo
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
adelina-t/nova | refs/heads/master | nova/api/openstack/compute/schemas/v3/reset_server_state.py | 110 | # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
reset_state = {
'type': 'object',
'properties': {
'os-resetState': {
'type': 'object',
'properties': {
'state': {
'type': 'string',
'enum': ['active', 'error'],
},
},
'required': ['state'],
'additionalProperties': False,
},
},
'required': ['os-resetState'],
'additionalProperties': False,
}
|
OpenCobolIDE/OpenCobolIDE | refs/heads/master | open_cobol_ide/extlibs/pygments/styles/manni.py | 50 | # -*- coding: utf-8 -*-
"""
pygments.styles.manni
~~~~~~~~~~~~~~~~~~~~~
A colorful style, inspired by the terminal highlighting style.
This is a port of the style used in the `php port`_ of pygments
by Manni. The style is called 'default' there.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace
class ManniStyle(Style):
"""
A colorful style, inspired by the terminal highlighting style.
"""
background_color = '#f0f3f3'
styles = {
Whitespace: '#bbbbbb',
Comment: 'italic #0099FF',
Comment.Preproc: 'noitalic #009999',
Comment.Special: 'bold',
Keyword: 'bold #006699',
Keyword.Pseudo: 'nobold',
Keyword.Type: '#007788',
Operator: '#555555',
Operator.Word: 'bold #000000',
Name.Builtin: '#336666',
Name.Function: '#CC00FF',
Name.Class: 'bold #00AA88',
Name.Namespace: 'bold #00CCFF',
Name.Exception: 'bold #CC0000',
Name.Variable: '#003333',
Name.Constant: '#336600',
Name.Label: '#9999FF',
Name.Entity: 'bold #999999',
Name.Attribute: '#330099',
Name.Tag: 'bold #330099',
Name.Decorator: '#9999FF',
String: '#CC3300',
String.Doc: 'italic',
String.Interpol: '#AA0000',
String.Escape: 'bold #CC3300',
String.Regex: '#33AAAA',
String.Symbol: '#FFCC33',
String.Other: '#CC3300',
Number: '#FF6600',
Generic.Heading: 'bold #003300',
Generic.Subheading: 'bold #003300',
Generic.Deleted: 'border:#CC0000 bg:#FFCCCC',
Generic.Inserted: 'border:#00CC00 bg:#CCFFCC',
Generic.Error: '#FF0000',
Generic.Emph: 'italic',
Generic.Strong: 'bold',
Generic.Prompt: 'bold #000099',
Generic.Output: '#AAAAAA',
Generic.Traceback: '#99CC66',
Error: 'bg:#FFAAAA #AA0000'
}
|
lshain-android-source/external-chromium_org | refs/heads/master | chrome/test/chromedriver/test/run_java_tests.py | 23 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs the WebDriver Java acceptance tests.
This script is called from chrome/test/chromedriver/run_all_tests.py and reports
results using the buildbot annotation scheme.
For ChromeDriver documentation, refer to http://code.google.com/p/chromedriver.
"""
import optparse
import os
import shutil
import sys
import xml.dom.minidom as minidom
_THIS_DIR = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(1, os.path.join(_THIS_DIR, os.pardir))
import chrome_paths
import test_environment
import util
class TestResult(object):
"""A result for an attempted single test case."""
def __init__(self, name, time, failure):
"""Initializes a test result.
Args:
name: the full name of the test.
time: the amount of time the test ran, in seconds.
failure: the test error or failure message, or None if the test passed.
"""
self._name = name
self._time = time
self._failure = failure
def GetName(self):
"""Returns the test name."""
return self._name
def GetTime(self):
"""Returns the time it took to run the test."""
return self._time
def IsPass(self):
"""Returns whether the test passed."""
return self._failure is None
def GetFailureMessage(self):
"""Returns the test failure message, or None if the test passed."""
return self._failure
def _Run(java_tests_src_dir, test_filter,
chromedriver_path, chrome_path, android_package,
verbose, debug):
"""Run the WebDriver Java tests and return the test results.
Args:
java_tests_src_dir: the java test source code directory.
test_filter: the filter to use when choosing tests to run. Format is same
as Google C++ Test format.
chromedriver_path: path to ChromeDriver exe.
chrome_path: path to Chrome exe.
android_package: name of Chrome's Android package.
verbose: whether the output should be verbose.
debug: whether the tests should wait until attached by a debugger.
Returns:
A list of |TestResult|s.
"""
test_dir = util.MakeTempDir()
keystore_path = ('java', 'client', 'test', 'keystore')
required_dirs = [keystore_path[:-1],
('javascript',),
('third_party', 'closure', 'goog'),
('third_party', 'js')]
for required_dir in required_dirs:
os.makedirs(os.path.join(test_dir, *required_dir))
test_jar = 'test-standalone.jar'
class_path = test_jar
shutil.copyfile(os.path.join(java_tests_src_dir, 'keystore'),
os.path.join(test_dir, *keystore_path))
util.Unzip(os.path.join(java_tests_src_dir, 'common.zip'), test_dir)
shutil.copyfile(os.path.join(java_tests_src_dir, test_jar),
os.path.join(test_dir, test_jar))
sys_props = ['selenium.browser=chrome',
'webdriver.chrome.driver=' + os.path.abspath(chromedriver_path)]
if chrome_path is not None:
sys_props += ['webdriver.chrome.binary=' + os.path.abspath(chrome_path)]
if android_package is not None:
sys_props += ['webdriver.chrome.android_package=' + android_package]
if test_filter:
# Test jar actually takes a regex. Convert from glob.
test_filter = test_filter.replace('*', '.*')
sys_props += ['filter=' + test_filter]
jvm_args = []
if debug:
jvm_args += ['-agentlib:jdwp=transport=dt_socket,server=y,suspend=y,'
'address=33081']
# Unpack the sources into the test directory and add to the class path
# for ease of debugging, particularly with jdb.
util.Unzip(os.path.join(java_tests_src_dir, 'test-nodeps-srcs.jar'),
test_dir)
class_path += ':' + test_dir
return _RunAntTest(
test_dir, 'org.openqa.selenium.chrome.ChromeDriverTests',
class_path, sys_props, jvm_args, verbose)
def _RunAntTest(test_dir, test_class, class_path, sys_props, jvm_args, verbose):
"""Runs a single Ant JUnit test suite and returns the |TestResult|s.
Args:
test_dir: the directory to run the tests in.
test_class: the name of the JUnit test suite class to run.
class_path: the Java class path used when running the tests, colon delimited
sys_props: Java system properties to set when running the tests.
jvm_args: Java VM command line args to use.
verbose: whether the output should be verbose.
Returns:
A list of |TestResult|s.
"""
def _CreateBuildConfig(test_name, results_file, class_path, junit_props,
sys_props, jvm_args):
def _SystemPropToXml(prop):
key, value = prop.split('=')
return '<sysproperty key="%s" value="%s"/>' % (key, value)
def _JvmArgToXml(arg):
return '<jvmarg value="%s"/>' % arg
return '\n'.join([
'<project>',
' <target name="test">',
' <junit %s>' % ' '.join(junit_props),
' <formatter type="xml"/>',
' <classpath>',
' <pathelement path="%s"/>' % class_path,
' </classpath>',
' ' + '\n '.join(map(_SystemPropToXml, sys_props)),
' ' + '\n '.join(map(_JvmArgToXml, jvm_args)),
' <test name="%s" outfile="%s"/>' % (test_name, results_file),
' </junit>',
' </target>',
'</project>'])
def _ProcessResults(results_path):
doc = minidom.parse(results_path)
tests = []
for test in doc.getElementsByTagName('testcase'):
name = test.getAttribute('classname') + '.' + test.getAttribute('name')
time = test.getAttribute('time')
failure = None
error_nodes = test.getElementsByTagName('error')
failure_nodes = test.getElementsByTagName('failure')
if error_nodes:
failure = error_nodes[0].childNodes[0].nodeValue
elif failure_nodes:
failure = failure_nodes[0].childNodes[0].nodeValue
tests += [TestResult(name, time, failure)]
return tests
junit_props = ['printsummary="yes"',
'fork="yes"',
'haltonfailure="no"',
'haltonerror="no"']
if verbose:
junit_props += ['showoutput="yes"']
ant_file = open(os.path.join(test_dir, 'build.xml'), 'w')
ant_file.write(_CreateBuildConfig(
test_class, 'results', class_path, junit_props, sys_props, jvm_args))
ant_file.close()
if util.IsWindows():
ant_name = 'ant.bat'
else:
ant_name = 'ant'
code = util.RunCommand([ant_name, 'test'], cwd=test_dir)
if code != 0:
print 'FAILED to run java tests of %s through ant' % test_class
return
return _ProcessResults(os.path.join(test_dir, 'results.xml'))
def PrintTestResults(results):
"""Prints the given results in a format recognized by the buildbot."""
failures = []
failure_names = []
for result in results:
if not result.IsPass():
failures += [result]
failure_names += ['.'.join(result.GetName().split('.')[-2:])]
print 'Ran %s tests' % len(results)
print 'Failed %s:' % len(failures)
util.AddBuildStepText('failed %s/%s' % (len(failures), len(results)))
for result in failures:
print '=' * 80
print '=' * 10, result.GetName(), '(%ss)' % result.GetTime()
print result.GetFailureMessage()
if len(failures) < 10:
util.AddBuildStepText('.'.join(result.GetName().split('.')[-2:]))
print 'Rerun failing tests with filter:', ':'.join(failure_names)
return len(failures)
def main():
parser = optparse.OptionParser()
parser.add_option(
'', '--verbose', action='store_true', default=False,
help='Whether output should be verbose')
parser.add_option(
'', '--debug', action='store_true', default=False,
help='Whether to wait to be attached by a debugger')
parser.add_option(
'', '--chromedriver', type='string', default=None,
help='Path to a build of the chromedriver library(REQUIRED!)')
parser.add_option(
'', '--chrome', type='string', default=None,
help='Path to a build of the chrome binary')
parser.add_option(
'', '--chrome-version', default='HEAD',
help='Version of chrome. Default is \'HEAD\'')
parser.add_option(
'', '--android-package', type='string', default=None,
help='Name of Chrome\'s Android package')
parser.add_option(
'', '--filter', type='string', default=None,
help='Filter for specifying what tests to run, "*" will run all. E.g., '
'*testShouldReturnTitleOfPageIfSet')
parser.add_option(
'', '--also-run-disabled-tests', action='store_true', default=False,
help='Include disabled tests while running the tests')
parser.add_option(
'', '--isolate-tests', action='store_true', default=False,
help='Relaunch the jar test harness after each test')
options, _ = parser.parse_args()
if options.chromedriver is None or not os.path.exists(options.chromedriver):
parser.error('chromedriver is required or the given path is invalid.' +
'Please run "%s --help" for help' % __file__)
if options.android_package is not None:
if options.chrome_version != 'HEAD':
parser.error('Android does not support the --chrome-version argument.')
environment = test_environment.AndroidTestEnvironment()
else:
environment = test_environment.DesktopTestEnvironment(
options.chrome_version)
try:
environment.GlobalSetUp()
# Run passed tests when filter is not provided.
if options.isolate_tests:
test_filters = environment.GetPassedJavaTests()
else:
if options.filter:
test_filter = options.filter
else:
test_filter = '*'
if not options.also_run_disabled_tests:
if '-' in test_filter:
test_filter += ':'
else:
test_filter += '-'
test_filter += ':'.join(environment.GetDisabledJavaTestMatchers())
test_filters = [test_filter]
java_tests_src_dir = os.path.join(chrome_paths.GetSrc(), 'chrome', 'test',
'chromedriver', 'third_party',
'java_tests')
if (not os.path.exists(java_tests_src_dir) or
not os.listdir(java_tests_src_dir)):
java_tests_url = ('http://src.chromium.org/svn/trunk/deps/third_party'
'/webdriver')
print ('"%s" is empty or it doesn\'t exist. ' % java_tests_src_dir +
'Need to map <chrome-svn>/trunk/deps/third_party/webdriver to '
'chrome/test/chromedriver/third_party/java_tests in .gclient.\n'
'Alternatively, do:\n'
' $ cd chrome/test/chromedriver/third_party\n'
' $ svn co %s java_tests' % java_tests_url)
return 1
results = []
for filter in test_filters:
results += _Run(
java_tests_src_dir=java_tests_src_dir,
test_filter=filter,
chromedriver_path=options.chromedriver,
chrome_path=options.chrome,
android_package=options.android_package,
verbose=options.verbose,
debug=options.debug)
return PrintTestResults(results)
finally:
environment.GlobalTearDown()
if __name__ == '__main__':
sys.exit(main())
|
jimi-c/ansible | refs/heads/devel | lib/ansible/modules/files/assemble.py | 7 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2012, Stephen Fromm <sfromm@gmail.com>
# Copyright: (c) 2016, Toshio Kuratomi <tkuratomi@ansible.com>
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: assemble
short_description: Assembles a configuration file from fragments
description:
- Assembles a configuration file from fragments. Often a particular
program will take a single configuration file and does not support a
C(conf.d) style structure where it is easy to build up the configuration
from multiple sources. C(assemble) will take a directory of files that can be
local or have already been transferred to the system, and concatenate them
together to produce a destination file. Files are assembled in string sorting order.
Puppet calls this idea I(fragments).
version_added: "0.5"
options:
src:
description:
- An already existing directory full of source files.
required: true
dest:
description:
- A file to create using the concatenation of all of the source files.
required: true
backup:
description:
- Create a backup file (if C(yes)), including the timestamp information so
you can get the original file back if you somehow clobbered it
incorrectly.
type: bool
default: 'no'
delimiter:
description:
- A delimiter to separate the file contents.
version_added: "1.4"
remote_src:
description:
- If False, it will search for src at originating/master machine, if True it will
go to the remote/target machine for the src. Default is True.
type: bool
default: 'yes'
version_added: "1.4"
regexp:
description:
- Assemble files only if C(regex) matches the filename. If not set,
all files are assembled. All "\\" (backslash) must be escaped as
"\\\\" to comply yaml syntax. Uses Python regular expressions; see
U(http://docs.python.org/2/library/re.html).
ignore_hidden:
description:
- A boolean that controls if files that start with a '.' will be included or not.
type: bool
default: 'no'
version_added: "2.0"
validate:
description:
- The validation command to run before copying into place. The path to the file to
validate is passed in via '%s' which must be present as in the sshd example below.
The command is passed securely so shell features like expansion and pipes won't work.
version_added: "2.0"
author:
- Stephen Fromm (@sfromm)
extends_documentation_fragment:
- files
- decrypt
'''
EXAMPLES = '''
# Example from Ansible Playbooks
- assemble:
src: /etc/someapp/fragments
dest: /etc/someapp/someapp.conf
# When a delimiter is specified, it will be inserted in between each fragment
- assemble:
src: /etc/someapp/fragments
dest: /etc/someapp/someapp.conf
delimiter: '### START FRAGMENT ###'
# Copy a new "sshd_config" file into place, after passing validation with sshd
- assemble:
src: /etc/ssh/conf.d/
dest: /etc/ssh/sshd_config
validate: '/usr/sbin/sshd -t -f %s'
'''
import codecs
import os
import re
import tempfile
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import b
from ansible.module_utils._text import to_native
def assemble_from_fragments(src_path, delimiter=None, compiled_regexp=None, ignore_hidden=False, tmpdir=None):
''' assemble a file from a directory of fragments '''
tmpfd, temp_path = tempfile.mkstemp(dir=tmpdir)
tmp = os.fdopen(tmpfd, 'wb')
delimit_me = False
add_newline = False
for f in sorted(os.listdir(src_path)):
if compiled_regexp and not compiled_regexp.search(f):
continue
fragment = os.path.join(src_path, f)
if not os.path.isfile(fragment) or (ignore_hidden and os.path.basename(fragment).startswith('.')):
continue
fragment_content = open(fragment, 'rb').read()
# always put a newline between fragments if the previous fragment didn't end with a newline.
if add_newline:
tmp.write(b('\n'))
# delimiters should only appear between fragments
if delimit_me:
if delimiter:
# un-escape anything like newlines
delimiter = codecs.escape_decode(delimiter)[0]
tmp.write(delimiter)
# always make sure there's a newline after the
# delimiter, so lines don't run together
if delimiter[-1] != b('\n'):
tmp.write(b('\n'))
tmp.write(fragment_content)
delimit_me = True
if fragment_content.endswith(b('\n')):
add_newline = False
else:
add_newline = True
tmp.close()
return temp_path
def cleanup(path, result=None):
# cleanup just in case
if os.path.exists(path):
try:
os.remove(path)
except (IOError, OSError) as e:
# don't error on possible race conditions, but keep warning
if result is not None:
result['warnings'] = ['Unable to remove temp file (%s): %s' % (path, to_native(e))]
def main():
module = AnsibleModule(
# not checking because of daisy chain to file module
argument_spec=dict(
src=dict(required=True, type='path'),
delimiter=dict(required=False),
dest=dict(required=True, type='path'),
backup=dict(default=False, type='bool'),
remote_src=dict(default=False, type='bool'),
regexp=dict(required=False),
ignore_hidden=dict(default=False, type='bool'),
validate=dict(required=False, type='str'),
),
add_file_common_args=True,
)
changed = False
path_hash = None
dest_hash = None
src = module.params['src']
dest = module.params['dest']
backup = module.params['backup']
delimiter = module.params['delimiter']
regexp = module.params['regexp']
compiled_regexp = None
ignore_hidden = module.params['ignore_hidden']
validate = module.params.get('validate', None)
result = dict(src=src, dest=dest)
if not os.path.exists(src):
module.fail_json(msg="Source (%s) does not exist" % src)
if not os.path.isdir(src):
module.fail_json(msg="Source (%s) is not a directory" % src)
if regexp is not None:
try:
compiled_regexp = re.compile(regexp)
except re.error as e:
module.fail_json(msg="Invalid Regexp (%s) in \"%s\"" % (to_native(e), regexp))
if validate and "%s" not in validate:
module.fail_json(msg="validate must contain %%s: %s" % validate)
path = assemble_from_fragments(src, delimiter, compiled_regexp, ignore_hidden, module.tmpdir)
path_hash = module.sha1(path)
result['checksum'] = path_hash
# Backwards compat. This won't return data if FIPS mode is active
try:
pathmd5 = module.md5(path)
except ValueError:
pathmd5 = None
result['md5sum'] = pathmd5
if os.path.exists(dest):
dest_hash = module.sha1(dest)
if path_hash != dest_hash:
if validate:
(rc, out, err) = module.run_command(validate % path)
result['validation'] = dict(rc=rc, stdout=out, stderr=err)
if rc != 0:
cleanup(path)
module.fail_json(msg="failed to validate: rc:%s error:%s" % (rc, err))
if backup and dest_hash is not None:
result['backup_file'] = module.backup_local(dest)
module.atomic_move(path, dest, unsafe_writes=module.params['unsafe_writes'])
changed = True
cleanup(path, result)
# handle file permissions
file_args = module.load_file_common_arguments(module.params)
result['changed'] = module.set_fs_attributes_if_different(file_args, changed)
# Mission complete
result['msg'] = "OK"
module.exit_json(**result)
if __name__ == '__main__':
main()
|
nesdis/djongo | refs/heads/master | tests/django_tests/tests/v21/tests/i18n/sampleproject/update_catalogs.py | 344 | #!/usr/bin/env python
"""
Helper script to update sampleproject's translation catalogs.
When a bug has been identified related to i18n, this helps capture the issue
by using catalogs created from management commands.
Example:
The string "Two %% Three %%%" renders differently using trans and blocktrans.
This issue is difficult to debug, it could be a problem with extraction,
interpolation, or both.
How this script helps:
* Add {% trans "Two %% Three %%%" %} and blocktrans equivalent to templates.
* Run this script.
* Test extraction - verify the new msgid in sampleproject's django.po.
* Add a translation to sampleproject's django.po.
* Run this script.
* Test interpolation - verify templatetag rendering, test each in a template
that is rendered using an activated language from sampleproject's locale.
* Tests should fail, issue captured.
* Fix issue.
* Run this script.
* Tests all pass.
"""
import os
import re
import sys
proj_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.abspath(os.path.join(proj_dir, '..', '..', '..')))
def update_translation_catalogs():
"""Run makemessages and compilemessages in sampleproject."""
from django.core.management import call_command
prev_cwd = os.getcwd()
os.chdir(proj_dir)
call_command('makemessages')
call_command('compilemessages')
# keep the diff friendly - remove 'POT-Creation-Date'
pofile = os.path.join(proj_dir, 'locale', 'fr', 'LC_MESSAGES', 'django.po')
with open(pofile) as f:
content = f.read()
content = re.sub(r'^"POT-Creation-Date.+$\s', '', content, flags=re.MULTILINE)
with open(pofile, 'w') as f:
f.write(content)
os.chdir(prev_cwd)
if __name__ == "__main__":
update_translation_catalogs()
|
open-cloud/xos | refs/heads/master | lib/xos-genx/xos-genx-tests/test_optimize.py | 2 | # Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import unittest
from xosgenx.jinja2_extensions.fol2 import FOL2Python
class XProtoOptimizeTest(unittest.TestCase):
def setUp(self):
self.f2p = FOL2Python()
self.maxDiff = None
def test_constant(self):
input = "True"
output = self.f2p.hoist_outer(input)
self.assertEqual(output, input)
def test_exists(self):
input = {"exists": ["X", {"|": ["X.foo", "y"]}]}
output = self.f2p.hoist_outer(input)
expected = {"|": ["y", {"&": [{"not": "y"}, {"exists": ["X", "X.foo"]}]}]}
self.assertEqual(output, expected)
def test_exists_implies(self):
input = {
"exists": [
"Foo",
{
"&": [
{"=": ("Foo.a", "1")},
{"->": ["write_access", {"=": ("Foo.b", "1")}]},
]
},
]
}
output = self.f2p.hoist_outer(input)
expected = {
"|": [
{
"&": [
"write_access",
{
"exists": [
"Foo",
{"&": [{"=": ["Foo.a", "1"]}, {"=": ["Foo.b", "1"]}]},
]
},
]
},
{
"&": [
{"not": "write_access"},
{"exists": ["Foo", {"=": ["Foo.a", "1"]}]},
]
},
]
}
self.assertEqual(output, expected)
def test_forall(self):
input = {"forall": ["X", {"|": ["X.foo", "y"]}]}
output = self.f2p.hoist_outer(input)
expected = {"|": ["y", {"&": [{"not": "y"}, {"forall": ["X", "X.foo"]}]}]}
self.assertEqual(output, expected)
def test_exists_embedded(self):
input = {"&": ["True", {"exists": ["X", {"|": ["X.foo", "y"]}]}]}
output = self.f2p.hoist_outer(input)
expected = {"|": ["y", {"&": [{"not": "y"}, {"exists": ["X", "X.foo"]}]}]}
self.assertEqual(output, expected)
def test_exists_equals(self):
input = {"&": ["True", {"exists": ["X", {"|": ["X.foo", {"=": ["y", "z"]}]}]}]}
output = self.f2p.hoist_outer(input)
expected = {
"|": [
{"=": ["y", "z"]},
{"&": [{"not": {"=": ["y", "z"]}}, {"exists": ["X", "X.foo"]}]},
]
}
self.assertEqual(output, expected)
def test_exists_nested_constant(self):
input = {"&": ["True", {"exists": ["X", {"|": ["y", {"=": ["y", "X.foo"]}]}]}]}
output = self.f2p.hoist_outer(input)
expected = {
"|": [
"y",
{"&": [{"not": "y"}, {"exists": ["X", {"=": ["False", "X.foo"]}]}]},
]
}
self.assertEqual(output, expected)
def test_exists_nested(self):
input = {"exists": ["X", {"exists": ["Y", {"=": ["Y.foo", "X.foo"]}]}]}
output = self.f2p.hoist_outer(input)
expected = input
self.assertEqual(input, output)
def test_exists_nested2(self):
input = {"exists": ["X", {"exists": ["Y", {"=": ["Z", "Y"]}]}]}
output = self.f2p.hoist_outer(input)
expected = {"exists": ["Y", {"=": ["Z", "Y"]}]}
self.assertEqual(output, expected)
def test_exists_nested3(self):
input = {"exists": ["X", {"exists": ["Y", {"=": ["Z", "X"]}]}]}
output = self.f2p.hoist_outer(input)
expected = {"exists": ["X", {"=": ["Z", "X"]}]}
self.assertEqual(output, expected)
if __name__ == "__main__":
unittest.main()
|
guorendong/iridium-browser-ubuntu | refs/heads/ubuntu/precise | third_party/cython/src/Cython/Debugger/libpython.py | 101 | #!/usr/bin/python
# NOTE: this file is taken from the Python source distribution
# It can be found under Tools/gdb/libpython.py. It is shipped with Cython
# because it's not installed as a python module, and because changes are only
# merged into new python versions (v3.2+).
'''
From gdb 7 onwards, gdb's build can be configured --with-python, allowing gdb
to be extended with Python code e.g. for library-specific data visualizations,
such as for the C++ STL types. Documentation on this API can be seen at:
http://sourceware.org/gdb/current/onlinedocs/gdb/Python-API.html
This python module deals with the case when the process being debugged (the
"inferior process" in gdb parlance) is itself python, or more specifically,
linked against libpython. In this situation, almost every item of data is a
(PyObject*), and having the debugger merely print their addresses is not very
enlightening.
This module embeds knowledge about the implementation details of libpython so
that we can emit useful visualizations e.g. a string, a list, a dict, a frame
giving file/line information and the state of local variables
In particular, given a gdb.Value corresponding to a PyObject* in the inferior
process, we can generate a "proxy value" within the gdb process. For example,
given a PyObject* in the inferior process that is in fact a PyListObject*
holding three PyObject* that turn out to be PyStringObject* instances, we can
generate a proxy value within the gdb process that is a list of strings:
["foo", "bar", "baz"]
Doing so can be expensive for complicated graphs of objects, and could take
some time, so we also have a "write_repr" method that writes a representation
of the data to a file-like object. This allows us to stop the traversal by
having the file-like object raise an exception if it gets too much data.
With both "proxyval" and "write_repr" we keep track of the set of all addresses
visited so far in the traversal, to avoid infinite recursion due to cycles in
the graph of object references.
We try to defer gdb.lookup_type() invocations for python types until as late as
possible: for a dynamically linked python binary, when the process starts in
the debugger, the libpython.so hasn't been dynamically loaded yet, so none of
the type names are known to the debugger
The module also extends gdb with some python-specific commands.
'''
from __future__ import with_statement
import os
import re
import sys
import struct
import locale
import atexit
import warnings
import tempfile
import textwrap
import itertools
import gdb
if sys.version_info[0] < 3:
# I think this is the only way to fix this bug :'(
# http://sourceware.org/bugzilla/show_bug.cgi?id=12285
out, err = sys.stdout, sys.stderr
reload(sys).setdefaultencoding('UTF-8')
sys.stdout = out
sys.stderr = err
# Look up the gdb.Type for some standard types:
_type_char_ptr = gdb.lookup_type('char').pointer() # char*
_type_unsigned_char_ptr = gdb.lookup_type('unsigned char').pointer()
_type_void_ptr = gdb.lookup_type('void').pointer() # void*
SIZEOF_VOID_P = _type_void_ptr.sizeof
Py_TPFLAGS_HEAPTYPE = (1L << 9)
Py_TPFLAGS_INT_SUBCLASS = (1L << 23)
Py_TPFLAGS_LONG_SUBCLASS = (1L << 24)
Py_TPFLAGS_LIST_SUBCLASS = (1L << 25)
Py_TPFLAGS_TUPLE_SUBCLASS = (1L << 26)
Py_TPFLAGS_STRING_SUBCLASS = (1L << 27)
Py_TPFLAGS_BYTES_SUBCLASS = (1L << 27)
Py_TPFLAGS_UNICODE_SUBCLASS = (1L << 28)
Py_TPFLAGS_DICT_SUBCLASS = (1L << 29)
Py_TPFLAGS_BASE_EXC_SUBCLASS = (1L << 30)
Py_TPFLAGS_TYPE_SUBCLASS = (1L << 31)
MAX_OUTPUT_LEN = 1024
hexdigits = "0123456789abcdef"
ENCODING = locale.getpreferredencoding()
class NullPyObjectPtr(RuntimeError):
pass
def safety_limit(val):
# Given a integer value from the process being debugged, limit it to some
# safety threshold so that arbitrary breakage within said process doesn't
# break the gdb process too much (e.g. sizes of iterations, sizes of lists)
return min(val, 1000)
def safe_range(val):
# As per range, but don't trust the value too much: cap it to a safety
# threshold in case the data was corrupted
return xrange(safety_limit(val))
def write_unicode(file, text):
# Write a byte or unicode string to file. Unicode strings are encoded to
# ENCODING encoding with 'backslashreplace' error handler to avoid
# UnicodeEncodeError.
if isinstance(text, unicode):
text = text.encode(ENCODING, 'backslashreplace')
file.write(text)
def os_fsencode(filename):
if not isinstance(filename, unicode):
return filename
encoding = sys.getfilesystemencoding()
if encoding == 'mbcs':
# mbcs doesn't support surrogateescape
return filename.encode(encoding)
encoded = []
for char in filename:
# surrogateescape error handler
if 0xDC80 <= ord(char) <= 0xDCFF:
byte = chr(ord(char) - 0xDC00)
else:
byte = char.encode(encoding)
encoded.append(byte)
return ''.join(encoded)
class StringTruncated(RuntimeError):
pass
class TruncatedStringIO(object):
'''Similar to cStringIO, but can truncate the output by raising a
StringTruncated exception'''
def __init__(self, maxlen=None):
self._val = ''
self.maxlen = maxlen
def write(self, data):
if self.maxlen:
if len(data) + len(self._val) > self.maxlen:
# Truncation:
self._val += data[0:self.maxlen - len(self._val)]
raise StringTruncated()
self._val += data
def getvalue(self):
return self._val
# pretty printer lookup
all_pretty_typenames = set()
class PrettyPrinterTrackerMeta(type):
def __init__(self, name, bases, dict):
super(PrettyPrinterTrackerMeta, self).__init__(name, bases, dict)
all_pretty_typenames.add(self._typename)
class PyObjectPtr(object):
"""
Class wrapping a gdb.Value that's a either a (PyObject*) within the
inferior process, or some subclass pointer e.g. (PyStringObject*)
There will be a subclass for every refined PyObject type that we care
about.
Note that at every stage the underlying pointer could be NULL, point
to corrupt data, etc; this is the debugger, after all.
"""
__metaclass__ = PrettyPrinterTrackerMeta
_typename = 'PyObject'
def __init__(self, gdbval, cast_to=None):
if cast_to:
self._gdbval = gdbval.cast(cast_to)
else:
self._gdbval = gdbval
def field(self, name):
'''
Get the gdb.Value for the given field within the PyObject, coping with
some python 2 versus python 3 differences.
Various libpython types are defined using the "PyObject_HEAD" and
"PyObject_VAR_HEAD" macros.
In Python 2, this these are defined so that "ob_type" and (for a var
object) "ob_size" are fields of the type in question.
In Python 3, this is defined as an embedded PyVarObject type thus:
PyVarObject ob_base;
so that the "ob_size" field is located insize the "ob_base" field, and
the "ob_type" is most easily accessed by casting back to a (PyObject*).
'''
if self.is_null():
raise NullPyObjectPtr(self)
if name == 'ob_type':
pyo_ptr = self._gdbval.cast(PyObjectPtr.get_gdb_type())
return pyo_ptr.dereference()[name]
if name == 'ob_size':
pyo_ptr = self._gdbval.cast(PyVarObjectPtr.get_gdb_type())
return pyo_ptr.dereference()[name]
# General case: look it up inside the object:
return self._gdbval.dereference()[name]
def pyop_field(self, name):
'''
Get a PyObjectPtr for the given PyObject* field within this PyObject,
coping with some python 2 versus python 3 differences.
'''
return PyObjectPtr.from_pyobject_ptr(self.field(name))
def write_field_repr(self, name, out, visited):
'''
Extract the PyObject* field named "name", and write its representation
to file-like object "out"
'''
field_obj = self.pyop_field(name)
field_obj.write_repr(out, visited)
def get_truncated_repr(self, maxlen):
'''
Get a repr-like string for the data, but truncate it at "maxlen" bytes
(ending the object graph traversal as soon as you do)
'''
out = TruncatedStringIO(maxlen)
try:
self.write_repr(out, set())
except StringTruncated:
# Truncation occurred:
return out.getvalue() + '...(truncated)'
# No truncation occurred:
return out.getvalue()
def type(self):
return PyTypeObjectPtr(self.field('ob_type'))
def is_null(self):
return 0 == long(self._gdbval)
def is_optimized_out(self):
'''
Is the value of the underlying PyObject* visible to the debugger?
This can vary with the precise version of the compiler used to build
Python, and the precise version of gdb.
See e.g. https://bugzilla.redhat.com/show_bug.cgi?id=556975 with
PyEval_EvalFrameEx's "f"
'''
return self._gdbval.is_optimized_out
def safe_tp_name(self):
try:
return self.type().field('tp_name').string()
except NullPyObjectPtr:
# NULL tp_name?
return 'unknown'
except RuntimeError:
# Can't even read the object at all?
return 'unknown'
def proxyval(self, visited):
'''
Scrape a value from the inferior process, and try to represent it
within the gdb process, whilst (hopefully) avoiding crashes when
the remote data is corrupt.
Derived classes will override this.
For example, a PyIntObject* with ob_ival 42 in the inferior process
should result in an int(42) in this process.
visited: a set of all gdb.Value pyobject pointers already visited
whilst generating this value (to guard against infinite recursion when
visiting object graphs with loops). Analogous to Py_ReprEnter and
Py_ReprLeave
'''
class FakeRepr(object):
"""
Class representing a non-descript PyObject* value in the inferior
process for when we don't have a custom scraper, intended to have
a sane repr().
"""
def __init__(self, tp_name, address):
self.tp_name = tp_name
self.address = address
def __repr__(self):
# For the NULL pointer, we have no way of knowing a type, so
# special-case it as per
# http://bugs.python.org/issue8032#msg100882
if self.address == 0:
return '0x0'
return '<%s at remote 0x%x>' % (self.tp_name, self.address)
return FakeRepr(self.safe_tp_name(),
long(self._gdbval))
def write_repr(self, out, visited):
'''
Write a string representation of the value scraped from the inferior
process to "out", a file-like object.
'''
# Default implementation: generate a proxy value and write its repr
# However, this could involve a lot of work for complicated objects,
# so for derived classes we specialize this
return out.write(repr(self.proxyval(visited)))
@classmethod
def subclass_from_type(cls, t):
'''
Given a PyTypeObjectPtr instance wrapping a gdb.Value that's a
(PyTypeObject*), determine the corresponding subclass of PyObjectPtr
to use
Ideally, we would look up the symbols for the global types, but that
isn't working yet:
(gdb) python print gdb.lookup_symbol('PyList_Type')[0].value
Traceback (most recent call last):
File "<string>", line 1, in <module>
NotImplementedError: Symbol type not yet supported in Python scripts.
Error while executing Python code.
For now, we use tp_flags, after doing some string comparisons on the
tp_name for some special-cases that don't seem to be visible through
flags
'''
try:
tp_name = t.field('tp_name').string()
tp_flags = int(t.field('tp_flags'))
except RuntimeError:
# Handle any kind of error e.g. NULL ptrs by simply using the base
# class
return cls
#print 'tp_flags = 0x%08x' % tp_flags
#print 'tp_name = %r' % tp_name
name_map = {'bool': PyBoolObjectPtr,
'classobj': PyClassObjectPtr,
'instance': PyInstanceObjectPtr,
'NoneType': PyNoneStructPtr,
'frame': PyFrameObjectPtr,
'set' : PySetObjectPtr,
'frozenset' : PySetObjectPtr,
'builtin_function_or_method' : PyCFunctionObjectPtr,
}
if tp_name in name_map:
return name_map[tp_name]
if tp_flags & (Py_TPFLAGS_HEAPTYPE|Py_TPFLAGS_TYPE_SUBCLASS):
return PyTypeObjectPtr
if tp_flags & Py_TPFLAGS_INT_SUBCLASS:
return PyIntObjectPtr
if tp_flags & Py_TPFLAGS_LONG_SUBCLASS:
return PyLongObjectPtr
if tp_flags & Py_TPFLAGS_LIST_SUBCLASS:
return PyListObjectPtr
if tp_flags & Py_TPFLAGS_TUPLE_SUBCLASS:
return PyTupleObjectPtr
if tp_flags & Py_TPFLAGS_STRING_SUBCLASS:
try:
gdb.lookup_type('PyBytesObject')
return PyBytesObjectPtr
except RuntimeError:
return PyStringObjectPtr
if tp_flags & Py_TPFLAGS_UNICODE_SUBCLASS:
return PyUnicodeObjectPtr
if tp_flags & Py_TPFLAGS_DICT_SUBCLASS:
return PyDictObjectPtr
if tp_flags & Py_TPFLAGS_BASE_EXC_SUBCLASS:
return PyBaseExceptionObjectPtr
# Use the base class:
return cls
@classmethod
def from_pyobject_ptr(cls, gdbval):
'''
Try to locate the appropriate derived class dynamically, and cast
the pointer accordingly.
'''
try:
p = PyObjectPtr(gdbval)
cls = cls.subclass_from_type(p.type())
return cls(gdbval, cast_to=cls.get_gdb_type())
except RuntimeError, exc:
# Handle any kind of error e.g. NULL ptrs by simply using the base
# class
pass
return cls(gdbval)
@classmethod
def get_gdb_type(cls):
return gdb.lookup_type(cls._typename).pointer()
def as_address(self):
return long(self._gdbval)
class PyVarObjectPtr(PyObjectPtr):
_typename = 'PyVarObject'
class ProxyAlreadyVisited(object):
'''
Placeholder proxy to use when protecting against infinite recursion due to
loops in the object graph.
Analogous to the values emitted by the users of Py_ReprEnter and Py_ReprLeave
'''
def __init__(self, rep):
self._rep = rep
def __repr__(self):
return self._rep
def _write_instance_repr(out, visited, name, pyop_attrdict, address):
'''Shared code for use by old-style and new-style classes:
write a representation to file-like object "out"'''
out.write('<')
out.write(name)
# Write dictionary of instance attributes:
if isinstance(pyop_attrdict, PyDictObjectPtr):
out.write('(')
first = True
for pyop_arg, pyop_val in pyop_attrdict.iteritems():
if not first:
out.write(', ')
first = False
out.write(pyop_arg.proxyval(visited))
out.write('=')
pyop_val.write_repr(out, visited)
out.write(')')
out.write(' at remote 0x%x>' % address)
class InstanceProxy(object):
def __init__(self, cl_name, attrdict, address):
self.cl_name = cl_name
self.attrdict = attrdict
self.address = address
def __repr__(self):
if isinstance(self.attrdict, dict):
kwargs = ', '.join(["%s=%r" % (arg, val)
for arg, val in self.attrdict.iteritems()])
return '<%s(%s) at remote 0x%x>' % (self.cl_name,
kwargs, self.address)
else:
return '<%s at remote 0x%x>' % (self.cl_name,
self.address)
def _PyObject_VAR_SIZE(typeobj, nitems):
return ( ( typeobj.field('tp_basicsize') +
nitems * typeobj.field('tp_itemsize') +
(SIZEOF_VOID_P - 1)
) & ~(SIZEOF_VOID_P - 1)
).cast(gdb.lookup_type('size_t'))
class PyTypeObjectPtr(PyObjectPtr):
_typename = 'PyTypeObject'
def get_attr_dict(self):
'''
Get the PyDictObject ptr representing the attribute dictionary
(or None if there's a problem)
'''
try:
typeobj = self.type()
dictoffset = int_from_int(typeobj.field('tp_dictoffset'))
if dictoffset != 0:
if dictoffset < 0:
type_PyVarObject_ptr = gdb.lookup_type('PyVarObject').pointer()
tsize = int_from_int(self._gdbval.cast(type_PyVarObject_ptr)['ob_size'])
if tsize < 0:
tsize = -tsize
size = _PyObject_VAR_SIZE(typeobj, tsize)
dictoffset += size
assert dictoffset > 0
assert dictoffset % SIZEOF_VOID_P == 0
dictptr = self._gdbval.cast(_type_char_ptr) + dictoffset
PyObjectPtrPtr = PyObjectPtr.get_gdb_type().pointer()
dictptr = dictptr.cast(PyObjectPtrPtr)
return PyObjectPtr.from_pyobject_ptr(dictptr.dereference())
except RuntimeError:
# Corrupt data somewhere; fail safe
pass
# Not found, or some kind of error:
return None
def proxyval(self, visited):
'''
Support for new-style classes.
Currently we just locate the dictionary using a transliteration to
python of _PyObject_GetDictPtr, ignoring descriptors
'''
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('<...>')
visited.add(self.as_address())
pyop_attr_dict = self.get_attr_dict()
if pyop_attr_dict:
attr_dict = pyop_attr_dict.proxyval(visited)
else:
attr_dict = {}
tp_name = self.safe_tp_name()
# New-style class:
return InstanceProxy(tp_name, attr_dict, long(self._gdbval))
def write_repr(self, out, visited):
# Guard against infinite loops:
if self.as_address() in visited:
out.write('<...>')
return
visited.add(self.as_address())
try:
tp_name = self.field('tp_name').string()
except RuntimeError:
tp_name = 'unknown'
out.write('<type %s at remote 0x%x>' % (tp_name,
self.as_address()))
# pyop_attrdict = self.get_attr_dict()
# _write_instance_repr(out, visited,
# self.safe_tp_name(), pyop_attrdict, self.as_address())
class ProxyException(Exception):
def __init__(self, tp_name, args):
self.tp_name = tp_name
self.args = args
def __repr__(self):
return '%s%r' % (self.tp_name, self.args)
class PyBaseExceptionObjectPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyBaseExceptionObject* i.e. an exception
within the process being debugged.
"""
_typename = 'PyBaseExceptionObject'
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('(...)')
visited.add(self.as_address())
arg_proxy = self.pyop_field('args').proxyval(visited)
return ProxyException(self.safe_tp_name(),
arg_proxy)
def write_repr(self, out, visited):
# Guard against infinite loops:
if self.as_address() in visited:
out.write('(...)')
return
visited.add(self.as_address())
out.write(self.safe_tp_name())
self.write_field_repr('args', out, visited)
class PyClassObjectPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyClassObject* i.e. a <classobj>
instance within the process being debugged.
"""
_typename = 'PyClassObject'
class BuiltInFunctionProxy(object):
def __init__(self, ml_name):
self.ml_name = ml_name
def __repr__(self):
return "<built-in function %s>" % self.ml_name
class BuiltInMethodProxy(object):
def __init__(self, ml_name, pyop_m_self):
self.ml_name = ml_name
self.pyop_m_self = pyop_m_self
def __repr__(self):
return ('<built-in method %s of %s object at remote 0x%x>'
% (self.ml_name,
self.pyop_m_self.safe_tp_name(),
self.pyop_m_self.as_address())
)
class PyCFunctionObjectPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyCFunctionObject*
(see Include/methodobject.h and Objects/methodobject.c)
"""
_typename = 'PyCFunctionObject'
def proxyval(self, visited):
m_ml = self.field('m_ml') # m_ml is a (PyMethodDef*)
ml_name = m_ml['ml_name'].string()
pyop_m_self = self.pyop_field('m_self')
if pyop_m_self.is_null():
return BuiltInFunctionProxy(ml_name)
else:
return BuiltInMethodProxy(ml_name, pyop_m_self)
class PyCodeObjectPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyCodeObject* i.e. a <code> instance
within the process being debugged.
"""
_typename = 'PyCodeObject'
def addr2line(self, addrq):
'''
Get the line number for a given bytecode offset
Analogous to PyCode_Addr2Line; translated from pseudocode in
Objects/lnotab_notes.txt
'''
co_lnotab = self.pyop_field('co_lnotab').proxyval(set())
# Initialize lineno to co_firstlineno as per PyCode_Addr2Line
# not 0, as lnotab_notes.txt has it:
lineno = int_from_int(self.field('co_firstlineno'))
addr = 0
for addr_incr, line_incr in zip(co_lnotab[::2], co_lnotab[1::2]):
addr += ord(addr_incr)
if addr > addrq:
return lineno
lineno += ord(line_incr)
return lineno
class PyDictObjectPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyDictObject* i.e. a dict instance
within the process being debugged.
"""
_typename = 'PyDictObject'
def iteritems(self):
'''
Yields a sequence of (PyObjectPtr key, PyObjectPtr value) pairs,
analagous to dict.iteritems()
'''
for i in safe_range(self.field('ma_mask') + 1):
ep = self.field('ma_table') + i
pyop_value = PyObjectPtr.from_pyobject_ptr(ep['me_value'])
if not pyop_value.is_null():
pyop_key = PyObjectPtr.from_pyobject_ptr(ep['me_key'])
yield (pyop_key, pyop_value)
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('{...}')
visited.add(self.as_address())
result = {}
for pyop_key, pyop_value in self.iteritems():
proxy_key = pyop_key.proxyval(visited)
proxy_value = pyop_value.proxyval(visited)
result[proxy_key] = proxy_value
return result
def write_repr(self, out, visited):
# Guard against infinite loops:
if self.as_address() in visited:
out.write('{...}')
return
visited.add(self.as_address())
out.write('{')
first = True
for pyop_key, pyop_value in self.iteritems():
if not first:
out.write(', ')
first = False
pyop_key.write_repr(out, visited)
out.write(': ')
pyop_value.write_repr(out, visited)
out.write('}')
class PyInstanceObjectPtr(PyObjectPtr):
_typename = 'PyInstanceObject'
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('<...>')
visited.add(self.as_address())
# Get name of class:
in_class = self.pyop_field('in_class')
cl_name = in_class.pyop_field('cl_name').proxyval(visited)
# Get dictionary of instance attributes:
in_dict = self.pyop_field('in_dict').proxyval(visited)
# Old-style class:
return InstanceProxy(cl_name, in_dict, long(self._gdbval))
def write_repr(self, out, visited):
# Guard against infinite loops:
if self.as_address() in visited:
out.write('<...>')
return
visited.add(self.as_address())
# Old-style class:
# Get name of class:
in_class = self.pyop_field('in_class')
cl_name = in_class.pyop_field('cl_name').proxyval(visited)
# Get dictionary of instance attributes:
pyop_in_dict = self.pyop_field('in_dict')
_write_instance_repr(out, visited,
cl_name, pyop_in_dict, self.as_address())
class PyIntObjectPtr(PyObjectPtr):
_typename = 'PyIntObject'
def proxyval(self, visited):
result = int_from_int(self.field('ob_ival'))
return result
class PyListObjectPtr(PyObjectPtr):
_typename = 'PyListObject'
def __getitem__(self, i):
# Get the gdb.Value for the (PyObject*) with the given index:
field_ob_item = self.field('ob_item')
return field_ob_item[i]
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('[...]')
visited.add(self.as_address())
result = [PyObjectPtr.from_pyobject_ptr(self[i]).proxyval(visited)
for i in safe_range(int_from_int(self.field('ob_size')))]
return result
def write_repr(self, out, visited):
# Guard against infinite loops:
if self.as_address() in visited:
out.write('[...]')
return
visited.add(self.as_address())
out.write('[')
for i in safe_range(int_from_int(self.field('ob_size'))):
if i > 0:
out.write(', ')
element = PyObjectPtr.from_pyobject_ptr(self[i])
element.write_repr(out, visited)
out.write(']')
class PyLongObjectPtr(PyObjectPtr):
_typename = 'PyLongObject'
def proxyval(self, visited):
'''
Python's Include/longobjrep.h has this declaration:
struct _longobject {
PyObject_VAR_HEAD
digit ob_digit[1];
};
with this description:
The absolute value of a number is equal to
SUM(for i=0 through abs(ob_size)-1) ob_digit[i] * 2**(SHIFT*i)
Negative numbers are represented with ob_size < 0;
zero is represented by ob_size == 0.
where SHIFT can be either:
#define PyLong_SHIFT 30
#define PyLong_SHIFT 15
'''
ob_size = long(self.field('ob_size'))
if ob_size == 0:
return 0L
ob_digit = self.field('ob_digit')
if gdb.lookup_type('digit').sizeof == 2:
SHIFT = 15L
else:
SHIFT = 30L
digits = [long(ob_digit[i]) * 2**(SHIFT*i)
for i in safe_range(abs(ob_size))]
result = sum(digits)
if ob_size < 0:
result = -result
return result
def write_repr(self, out, visited):
# Write this out as a Python 3 int literal, i.e. without the "L" suffix
proxy = self.proxyval(visited)
out.write("%s" % proxy)
class PyBoolObjectPtr(PyLongObjectPtr):
"""
Class wrapping a gdb.Value that's a PyBoolObject* i.e. one of the two
<bool> instances (Py_True/Py_False) within the process being debugged.
"""
_typename = 'PyBoolObject'
def proxyval(self, visited):
castto = gdb.lookup_type('PyLongObject').pointer()
self._gdbval = self._gdbval.cast(castto)
return bool(PyLongObjectPtr(self._gdbval).proxyval(visited))
class PyNoneStructPtr(PyObjectPtr):
"""
Class wrapping a gdb.Value that's a PyObject* pointing to the
singleton (we hope) _Py_NoneStruct with ob_type PyNone_Type
"""
_typename = 'PyObject'
def proxyval(self, visited):
return None
class PyFrameObjectPtr(PyObjectPtr):
_typename = 'PyFrameObject'
def __init__(self, gdbval, cast_to=None):
PyObjectPtr.__init__(self, gdbval, cast_to)
if not self.is_optimized_out():
self.co = PyCodeObjectPtr.from_pyobject_ptr(self.field('f_code'))
self.co_name = self.co.pyop_field('co_name')
self.co_filename = self.co.pyop_field('co_filename')
self.f_lineno = int_from_int(self.field('f_lineno'))
self.f_lasti = int_from_int(self.field('f_lasti'))
self.co_nlocals = int_from_int(self.co.field('co_nlocals'))
self.co_varnames = PyTupleObjectPtr.from_pyobject_ptr(self.co.field('co_varnames'))
def iter_locals(self):
'''
Yield a sequence of (name,value) pairs of PyObjectPtr instances, for
the local variables of this frame
'''
if self.is_optimized_out():
return
f_localsplus = self.field('f_localsplus')
for i in safe_range(self.co_nlocals):
pyop_value = PyObjectPtr.from_pyobject_ptr(f_localsplus[i])
if not pyop_value.is_null():
pyop_name = PyObjectPtr.from_pyobject_ptr(self.co_varnames[i])
yield (pyop_name, pyop_value)
def iter_globals(self):
'''
Yield a sequence of (name,value) pairs of PyObjectPtr instances, for
the global variables of this frame
'''
if self.is_optimized_out():
return
pyop_globals = self.pyop_field('f_globals')
return pyop_globals.iteritems()
def iter_builtins(self):
'''
Yield a sequence of (name,value) pairs of PyObjectPtr instances, for
the builtin variables
'''
if self.is_optimized_out():
return
pyop_builtins = self.pyop_field('f_builtins')
return pyop_builtins.iteritems()
def get_var_by_name(self, name):
'''
Look for the named local variable, returning a (PyObjectPtr, scope) pair
where scope is a string 'local', 'global', 'builtin'
If not found, return (None, None)
'''
for pyop_name, pyop_value in self.iter_locals():
if name == pyop_name.proxyval(set()):
return pyop_value, 'local'
for pyop_name, pyop_value in self.iter_globals():
if name == pyop_name.proxyval(set()):
return pyop_value, 'global'
for pyop_name, pyop_value in self.iter_builtins():
if name == pyop_name.proxyval(set()):
return pyop_value, 'builtin'
return None, None
def filename(self):
'''Get the path of the current Python source file, as a string'''
if self.is_optimized_out():
return '(frame information optimized out)'
return self.co_filename.proxyval(set())
def current_line_num(self):
'''Get current line number as an integer (1-based)
Translated from PyFrame_GetLineNumber and PyCode_Addr2Line
See Objects/lnotab_notes.txt
'''
if self.is_optimized_out():
return None
f_trace = self.field('f_trace')
if long(f_trace) != 0:
# we have a non-NULL f_trace:
return self.f_lineno
else:
#try:
return self.co.addr2line(self.f_lasti)
#except ValueError:
# return self.f_lineno
def current_line(self):
'''Get the text of the current source line as a string, with a trailing
newline character'''
if self.is_optimized_out():
return '(frame information optimized out)'
filename = self.filename()
with open(os_fsencode(filename), 'r') as f:
all_lines = f.readlines()
# Convert from 1-based current_line_num to 0-based list offset:
return all_lines[self.current_line_num()-1]
def write_repr(self, out, visited):
if self.is_optimized_out():
out.write('(frame information optimized out)')
return
out.write('Frame 0x%x, for file %s, line %i, in %s ('
% (self.as_address(),
self.co_filename.proxyval(visited),
self.current_line_num(),
self.co_name.proxyval(visited)))
first = True
for pyop_name, pyop_value in self.iter_locals():
if not first:
out.write(', ')
first = False
out.write(pyop_name.proxyval(visited))
out.write('=')
pyop_value.write_repr(out, visited)
out.write(')')
class PySetObjectPtr(PyObjectPtr):
_typename = 'PySetObject'
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('%s(...)' % self.safe_tp_name())
visited.add(self.as_address())
members = []
table = self.field('table')
for i in safe_range(self.field('mask')+1):
setentry = table[i]
key = setentry['key']
if key != 0:
key_proxy = PyObjectPtr.from_pyobject_ptr(key).proxyval(visited)
if key_proxy != '<dummy key>':
members.append(key_proxy)
if self.safe_tp_name() == 'frozenset':
return frozenset(members)
else:
return set(members)
def write_repr(self, out, visited):
# Emulate Python 3's set_repr
tp_name = self.safe_tp_name()
# Guard against infinite loops:
if self.as_address() in visited:
out.write('(...)')
return
visited.add(self.as_address())
# Python 3's set_repr special-cases the empty set:
if not self.field('used'):
out.write(tp_name)
out.write('()')
return
# Python 3 uses {} for set literals:
if tp_name != 'set':
out.write(tp_name)
out.write('(')
out.write('{')
first = True
table = self.field('table')
for i in safe_range(self.field('mask')+1):
setentry = table[i]
key = setentry['key']
if key != 0:
pyop_key = PyObjectPtr.from_pyobject_ptr(key)
key_proxy = pyop_key.proxyval(visited) # FIXME!
if key_proxy != '<dummy key>':
if not first:
out.write(', ')
first = False
pyop_key.write_repr(out, visited)
out.write('}')
if tp_name != 'set':
out.write(')')
class PyBytesObjectPtr(PyObjectPtr):
_typename = 'PyBytesObject'
def __str__(self):
field_ob_size = self.field('ob_size')
field_ob_sval = self.field('ob_sval')
return ''.join(struct.pack('b', field_ob_sval[i])
for i in safe_range(field_ob_size))
def proxyval(self, visited):
return str(self)
def write_repr(self, out, visited, py3=True):
# Write this out as a Python 3 bytes literal, i.e. with a "b" prefix
# Get a PyStringObject* within the Python 2 gdb process:
proxy = self.proxyval(visited)
# Transliteration of Python 3's Objects/bytesobject.c:PyBytes_Repr
# to Python 2 code:
quote = "'"
if "'" in proxy and not '"' in proxy:
quote = '"'
if py3:
out.write('b')
out.write(quote)
for byte in proxy:
if byte == quote or byte == '\\':
out.write('\\')
out.write(byte)
elif byte == '\t':
out.write('\\t')
elif byte == '\n':
out.write('\\n')
elif byte == '\r':
out.write('\\r')
elif byte < ' ' or ord(byte) >= 0x7f:
out.write('\\x')
out.write(hexdigits[(ord(byte) & 0xf0) >> 4])
out.write(hexdigits[ord(byte) & 0xf])
else:
out.write(byte)
out.write(quote)
class PyStringObjectPtr(PyBytesObjectPtr):
_typename = 'PyStringObject'
def write_repr(self, out, visited):
return super(PyStringObjectPtr, self).write_repr(out, visited, py3=False)
class PyTupleObjectPtr(PyObjectPtr):
_typename = 'PyTupleObject'
def __getitem__(self, i):
# Get the gdb.Value for the (PyObject*) with the given index:
field_ob_item = self.field('ob_item')
return field_ob_item[i]
def proxyval(self, visited):
# Guard against infinite loops:
if self.as_address() in visited:
return ProxyAlreadyVisited('(...)')
visited.add(self.as_address())
result = tuple([PyObjectPtr.from_pyobject_ptr(self[i]).proxyval(visited)
for i in safe_range(int_from_int(self.field('ob_size')))])
return result
def write_repr(self, out, visited):
# Guard against infinite loops:
if self.as_address() in visited:
out.write('(...)')
return
visited.add(self.as_address())
out.write('(')
for i in safe_range(int_from_int(self.field('ob_size'))):
if i > 0:
out.write(', ')
element = PyObjectPtr.from_pyobject_ptr(self[i])
element.write_repr(out, visited)
if self.field('ob_size') == 1:
out.write(',)')
else:
out.write(')')
def _unichr_is_printable(char):
# Logic adapted from Python 3's Tools/unicode/makeunicodedata.py
if char == u" ":
return True
import unicodedata
return unicodedata.category(char) not in ("C", "Z")
if sys.maxunicode >= 0x10000:
_unichr = unichr
else:
# Needed for proper surrogate support if sizeof(Py_UNICODE) is 2 in gdb
def _unichr(x):
if x < 0x10000:
return unichr(x)
x -= 0x10000
ch1 = 0xD800 | (x >> 10)
ch2 = 0xDC00 | (x & 0x3FF)
return unichr(ch1) + unichr(ch2)
class PyUnicodeObjectPtr(PyObjectPtr):
_typename = 'PyUnicodeObject'
def char_width(self):
_type_Py_UNICODE = gdb.lookup_type('Py_UNICODE')
return _type_Py_UNICODE.sizeof
def proxyval(self, visited):
# From unicodeobject.h:
# Py_ssize_t length; /* Length of raw Unicode data in buffer */
# Py_UNICODE *str; /* Raw Unicode buffer */
field_length = long(self.field('length'))
field_str = self.field('str')
# Gather a list of ints from the Py_UNICODE array; these are either
# UCS-2 or UCS-4 code points:
if self.char_width() > 2:
Py_UNICODEs = [int(field_str[i]) for i in safe_range(field_length)]
else:
# A more elaborate routine if sizeof(Py_UNICODE) is 2 in the
# inferior process: we must join surrogate pairs.
Py_UNICODEs = []
i = 0
limit = safety_limit(field_length)
while i < limit:
ucs = int(field_str[i])
i += 1
if ucs < 0xD800 or ucs >= 0xDC00 or i == field_length:
Py_UNICODEs.append(ucs)
continue
# This could be a surrogate pair.
ucs2 = int(field_str[i])
if ucs2 < 0xDC00 or ucs2 > 0xDFFF:
continue
code = (ucs & 0x03FF) << 10
code |= ucs2 & 0x03FF
code += 0x00010000
Py_UNICODEs.append(code)
i += 1
# Convert the int code points to unicode characters, and generate a
# local unicode instance.
# This splits surrogate pairs if sizeof(Py_UNICODE) is 2 here (in gdb).
result = u''.join([_unichr(ucs) for ucs in Py_UNICODEs])
return result
def write_repr(self, out, visited):
# Get a PyUnicodeObject* within the Python 2 gdb process:
proxy = self.proxyval(visited)
# Transliteration of Python 3's Object/unicodeobject.c:unicode_repr
# to Python 2:
try:
gdb.parse_and_eval('PyString_Type')
except RuntimeError:
# Python 3, don't write 'u' as prefix
pass
else:
# Python 2, write the 'u'
out.write('u')
if "'" in proxy and '"' not in proxy:
quote = '"'
else:
quote = "'"
out.write(quote)
i = 0
while i < len(proxy):
ch = proxy[i]
i += 1
# Escape quotes and backslashes
if ch == quote or ch == '\\':
out.write('\\')
out.write(ch)
# Map special whitespace to '\t', \n', '\r'
elif ch == '\t':
out.write('\\t')
elif ch == '\n':
out.write('\\n')
elif ch == '\r':
out.write('\\r')
# Map non-printable US ASCII to '\xhh' */
elif ch < ' ' or ch == 0x7F:
out.write('\\x')
out.write(hexdigits[(ord(ch) >> 4) & 0x000F])
out.write(hexdigits[ord(ch) & 0x000F])
# Copy ASCII characters as-is
elif ord(ch) < 0x7F:
out.write(ch)
# Non-ASCII characters
else:
ucs = ch
ch2 = None
if sys.maxunicode < 0x10000:
# If sizeof(Py_UNICODE) is 2 here (in gdb), join
# surrogate pairs before calling _unichr_is_printable.
if (i < len(proxy)
and 0xD800 <= ord(ch) < 0xDC00 \
and 0xDC00 <= ord(proxy[i]) <= 0xDFFF):
ch2 = proxy[i]
ucs = ch + ch2
i += 1
# Unfortuately, Python 2's unicode type doesn't seem
# to expose the "isprintable" method
printable = _unichr_is_printable(ucs)
if printable:
try:
ucs.encode(ENCODING)
except UnicodeEncodeError:
printable = False
# Map Unicode whitespace and control characters
# (categories Z* and C* except ASCII space)
if not printable:
if ch2 is not None:
# Match Python 3's representation of non-printable
# wide characters.
code = (ord(ch) & 0x03FF) << 10
code |= ord(ch2) & 0x03FF
code += 0x00010000
else:
code = ord(ucs)
# Map 8-bit characters to '\\xhh'
if code <= 0xff:
out.write('\\x')
out.write(hexdigits[(code >> 4) & 0x000F])
out.write(hexdigits[code & 0x000F])
# Map 21-bit characters to '\U00xxxxxx'
elif code >= 0x10000:
out.write('\\U')
out.write(hexdigits[(code >> 28) & 0x0000000F])
out.write(hexdigits[(code >> 24) & 0x0000000F])
out.write(hexdigits[(code >> 20) & 0x0000000F])
out.write(hexdigits[(code >> 16) & 0x0000000F])
out.write(hexdigits[(code >> 12) & 0x0000000F])
out.write(hexdigits[(code >> 8) & 0x0000000F])
out.write(hexdigits[(code >> 4) & 0x0000000F])
out.write(hexdigits[code & 0x0000000F])
# Map 16-bit characters to '\uxxxx'
else:
out.write('\\u')
out.write(hexdigits[(code >> 12) & 0x000F])
out.write(hexdigits[(code >> 8) & 0x000F])
out.write(hexdigits[(code >> 4) & 0x000F])
out.write(hexdigits[code & 0x000F])
else:
# Copy characters as-is
out.write(ch)
if ch2 is not None:
out.write(ch2)
out.write(quote)
def __unicode__(self):
return self.proxyval(set())
def __str__(self):
# In Python 3, everything is unicode (including attributes of e.g.
# code objects, such as function names). The Python 2 debugger code
# uses PyUnicodePtr objects to format strings etc, whereas with a
# Python 2 debuggee we'd get PyStringObjectPtr instances with __str__.
# Be compatible with that.
return unicode(self).encode('UTF-8')
def int_from_int(gdbval):
return int(str(gdbval))
def stringify(val):
# TODO: repr() puts everything on one line; pformat can be nicer, but
# can lead to v.long results; this function isolates the choice
if True:
return repr(val)
else:
from pprint import pformat
return pformat(val)
class PyObjectPtrPrinter:
"Prints a (PyObject*)"
def __init__ (self, gdbval):
self.gdbval = gdbval
def to_string (self):
pyop = PyObjectPtr.from_pyobject_ptr(self.gdbval)
if True:
return pyop.get_truncated_repr(MAX_OUTPUT_LEN)
else:
# Generate full proxy value then stringify it.
# Doing so could be expensive
proxyval = pyop.proxyval(set())
return stringify(proxyval)
def pretty_printer_lookup(gdbval):
type = gdbval.type.unqualified()
if type.code == gdb.TYPE_CODE_PTR:
type = type.target().unqualified()
if str(type) in all_pretty_typenames:
return PyObjectPtrPrinter(gdbval)
"""
During development, I've been manually invoking the code in this way:
(gdb) python
import sys
sys.path.append('/home/david/coding/python-gdb')
import libpython
end
then reloading it after each edit like this:
(gdb) python reload(libpython)
The following code should ensure that the prettyprinter is registered
if the code is autoloaded by gdb when visiting libpython.so, provided
that this python file is installed to the same path as the library (or its
.debug file) plus a "-gdb.py" suffix, e.g:
/usr/lib/libpython2.6.so.1.0-gdb.py
/usr/lib/debug/usr/lib/libpython2.6.so.1.0.debug-gdb.py
"""
def register (obj):
if obj == None:
obj = gdb
# Wire up the pretty-printer
obj.pretty_printers.append(pretty_printer_lookup)
register (gdb.current_objfile ())
# Unfortunately, the exact API exposed by the gdb module varies somewhat
# from build to build
# See http://bugs.python.org/issue8279?#msg102276
class Frame(object):
'''
Wrapper for gdb.Frame, adding various methods
'''
def __init__(self, gdbframe):
self._gdbframe = gdbframe
def older(self):
older = self._gdbframe.older()
if older:
return Frame(older)
else:
return None
def newer(self):
newer = self._gdbframe.newer()
if newer:
return Frame(newer)
else:
return None
def select(self):
'''If supported, select this frame and return True; return False if unsupported
Not all builds have a gdb.Frame.select method; seems to be present on Fedora 12
onwards, but absent on Ubuntu buildbot'''
if not hasattr(self._gdbframe, 'select'):
print ('Unable to select frame: '
'this build of gdb does not expose a gdb.Frame.select method')
return False
self._gdbframe.select()
return True
def get_index(self):
'''Calculate index of frame, starting at 0 for the newest frame within
this thread'''
index = 0
# Go down until you reach the newest frame:
iter_frame = self
while iter_frame.newer():
index += 1
iter_frame = iter_frame.newer()
return index
def is_evalframeex(self):
'''Is this a PyEval_EvalFrameEx frame?'''
if self._gdbframe.name() == 'PyEval_EvalFrameEx':
'''
I believe we also need to filter on the inline
struct frame_id.inline_depth, only regarding frames with
an inline depth of 0 as actually being this function
So we reject those with type gdb.INLINE_FRAME
'''
if self._gdbframe.type() == gdb.NORMAL_FRAME:
# We have a PyEval_EvalFrameEx frame:
return True
return False
def read_var(self, varname):
"""
read_var with respect to code blocks (gdbframe.read_var works with
respect to the most recent block)
Apparently this function doesn't work, though, as it seems to read
variables in other frames also sometimes.
"""
block = self._gdbframe.block()
var = None
while block and var is None:
try:
var = self._gdbframe.read_var(varname, block)
except ValueError:
pass
block = block.superblock
return var
def get_pyop(self):
try:
# self.read_var does not always work properly, so select our frame
# and restore the previously selected frame
selected_frame = gdb.selected_frame()
self._gdbframe.select()
f = gdb.parse_and_eval('f')
selected_frame.select()
except RuntimeError:
return None
else:
return PyFrameObjectPtr.from_pyobject_ptr(f)
@classmethod
def get_selected_frame(cls):
_gdbframe = gdb.selected_frame()
if _gdbframe:
return Frame(_gdbframe)
return None
@classmethod
def get_selected_python_frame(cls):
'''Try to obtain the Frame for the python code in the selected frame,
or None'''
frame = cls.get_selected_frame()
while frame:
if frame.is_evalframeex():
return frame
frame = frame.older()
# Not found:
return None
def print_summary(self):
if self.is_evalframeex():
pyop = self.get_pyop()
if pyop:
line = pyop.get_truncated_repr(MAX_OUTPUT_LEN)
write_unicode(sys.stdout, '#%i %s\n' % (self.get_index(), line))
sys.stdout.write(pyop.current_line())
else:
sys.stdout.write('#%i (unable to read python frame information)\n' % self.get_index())
else:
sys.stdout.write('#%i\n' % self.get_index())
class PyList(gdb.Command):
'''List the current Python source code, if any
Use
py-list START
to list at a different line number within the python source.
Use
py-list START, END
to list a specific range of lines within the python source.
'''
def __init__(self):
gdb.Command.__init__ (self,
"py-list",
gdb.COMMAND_FILES,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
import re
start = None
end = None
m = re.match(r'\s*(\d+)\s*', args)
if m:
start = int(m.group(0))
end = start + 10
m = re.match(r'\s*(\d+)\s*,\s*(\d+)\s*', args)
if m:
start, end = map(int, m.groups())
frame = Frame.get_selected_python_frame()
if not frame:
print 'Unable to locate python frame'
return
pyop = frame.get_pyop()
if not pyop:
print 'Unable to read information on python frame'
return
filename = pyop.filename()
lineno = pyop.current_line_num()
if start is None:
start = lineno - 5
end = lineno + 5
if start<1:
start = 1
with open(os_fsencode(filename), 'r') as f:
all_lines = f.readlines()
# start and end are 1-based, all_lines is 0-based;
# so [start-1:end] as a python slice gives us [start, end] as a
# closed interval
for i, line in enumerate(all_lines[start-1:end]):
linestr = str(i+start)
# Highlight current line:
if i + start == lineno:
linestr = '>' + linestr
sys.stdout.write('%4s %s' % (linestr, line))
# ...and register the command:
PyList()
def move_in_stack(move_up):
'''Move up or down the stack (for the py-up/py-down command)'''
frame = Frame.get_selected_python_frame()
while frame:
if move_up:
iter_frame = frame.older()
else:
iter_frame = frame.newer()
if not iter_frame:
break
if iter_frame.is_evalframeex():
# Result:
if iter_frame.select():
iter_frame.print_summary()
return
frame = iter_frame
if move_up:
print 'Unable to find an older python frame'
else:
print 'Unable to find a newer python frame'
class PyUp(gdb.Command):
'Select and print the python stack frame that called this one (if any)'
def __init__(self):
gdb.Command.__init__ (self,
"py-up",
gdb.COMMAND_STACK,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
move_in_stack(move_up=True)
class PyDown(gdb.Command):
'Select and print the python stack frame called by this one (if any)'
def __init__(self):
gdb.Command.__init__ (self,
"py-down",
gdb.COMMAND_STACK,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
move_in_stack(move_up=False)
# Not all builds of gdb have gdb.Frame.select
if hasattr(gdb.Frame, 'select'):
PyUp()
PyDown()
class PyBacktrace(gdb.Command):
'Display the current python frame and all the frames within its call stack (if any)'
def __init__(self):
gdb.Command.__init__ (self,
"py-bt",
gdb.COMMAND_STACK,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
frame = Frame.get_selected_python_frame()
while frame:
if frame.is_evalframeex():
frame.print_summary()
frame = frame.older()
PyBacktrace()
class PyPrint(gdb.Command):
'Look up the given python variable name, and print it'
def __init__(self):
gdb.Command.__init__ (self,
"py-print",
gdb.COMMAND_DATA,
gdb.COMPLETE_NONE)
def invoke(self, args, from_tty):
name = str(args)
frame = Frame.get_selected_python_frame()
if not frame:
print 'Unable to locate python frame'
return
pyop_frame = frame.get_pyop()
if not pyop_frame:
print 'Unable to read information on python frame'
return
pyop_var, scope = pyop_frame.get_var_by_name(name)
if pyop_var:
print ('%s %r = %s'
% (scope,
name,
pyop_var.get_truncated_repr(MAX_OUTPUT_LEN)))
else:
print '%r not found' % name
PyPrint()
class PyLocals(gdb.Command):
'Look up the given python variable name, and print it'
def invoke(self, args, from_tty):
name = str(args)
frame = Frame.get_selected_python_frame()
if not frame:
print 'Unable to locate python frame'
return
pyop_frame = frame.get_pyop()
if not pyop_frame:
print 'Unable to read information on python frame'
return
namespace = self.get_namespace(pyop_frame)
namespace = [(name.proxyval(set()), val) for name, val in namespace]
if namespace:
name, val = max(namespace, key=lambda (name, val): len(name))
max_name_length = len(name)
for name, pyop_value in namespace:
value = pyop_value.get_truncated_repr(MAX_OUTPUT_LEN)
print ('%-*s = %s' % (max_name_length, name, value))
def get_namespace(self, pyop_frame):
return pyop_frame.iter_locals()
class PyGlobals(PyLocals):
'List all the globals in the currently select Python frame'
def get_namespace(self, pyop_frame):
return pyop_frame.iter_globals()
PyLocals("py-locals", gdb.COMMAND_DATA, gdb.COMPLETE_NONE)
PyGlobals("py-globals", gdb.COMMAND_DATA, gdb.COMPLETE_NONE)
class PyNameEquals(gdb.Function):
def _get_pycurframe_attr(self, attr):
frame = Frame(gdb.selected_frame())
if frame.is_evalframeex():
pyframe = frame.get_pyop()
if pyframe is None:
warnings.warn("Use a Python debug build, Python breakpoints "
"won't work otherwise.")
return None
return getattr(pyframe, attr).proxyval(set())
return None
def invoke(self, funcname):
attr = self._get_pycurframe_attr('co_name')
return attr is not None and attr == funcname.string()
PyNameEquals("pyname_equals")
class PyModEquals(PyNameEquals):
def invoke(self, modname):
attr = self._get_pycurframe_attr('co_filename')
if attr is not None:
filename, ext = os.path.splitext(os.path.basename(attr))
return filename == modname.string()
return False
PyModEquals("pymod_equals")
class PyBreak(gdb.Command):
"""
Set a Python breakpoint. Examples:
Break on any function or method named 'func' in module 'modname'
py-break modname.func
Break on any function or method named 'func'
py-break func
"""
def invoke(self, funcname, from_tty):
if '.' in funcname:
modname, dot, funcname = funcname.rpartition('.')
cond = '$pyname_equals("%s") && $pymod_equals("%s")' % (funcname,
modname)
else:
cond = '$pyname_equals("%s")' % funcname
gdb.execute('break PyEval_EvalFrameEx if ' + cond)
PyBreak("py-break", gdb.COMMAND_RUNNING, gdb.COMPLETE_NONE)
class _LoggingState(object):
"""
State that helps to provide a reentrant gdb.execute() function.
"""
def __init__(self):
self.fd, self.filename = tempfile.mkstemp()
self.file = os.fdopen(self.fd, 'r+')
_execute("set logging file %s" % self.filename)
self.file_position_stack = []
atexit.register(os.close, self.fd)
atexit.register(os.remove, self.filename)
def __enter__(self):
if not self.file_position_stack:
_execute("set logging redirect on")
_execute("set logging on")
_execute("set pagination off")
self.file_position_stack.append(os.fstat(self.fd).st_size)
return self
def getoutput(self):
gdb.flush()
self.file.seek(self.file_position_stack[-1])
result = self.file.read()
return result
def __exit__(self, exc_type, exc_val, tb):
startpos = self.file_position_stack.pop()
self.file.seek(startpos)
self.file.truncate()
if not self.file_position_stack:
_execute("set logging off")
_execute("set logging redirect off")
_execute("set pagination on")
def execute(command, from_tty=False, to_string=False):
"""
Replace gdb.execute() with this function and have it accept a 'to_string'
argument (new in 7.2). Have it properly capture stderr also. Ensure
reentrancy.
"""
if to_string:
with _logging_state as state:
_execute(command, from_tty)
return state.getoutput()
else:
_execute(command, from_tty)
_execute = gdb.execute
gdb.execute = execute
_logging_state = _LoggingState()
def get_selected_inferior():
"""
Return the selected inferior in gdb.
"""
# Woooh, another bug in gdb! Is there an end in sight?
# http://sourceware.org/bugzilla/show_bug.cgi?id=12212
return gdb.inferiors()[0]
selected_thread = gdb.selected_thread()
for inferior in gdb.inferiors():
for thread in inferior.threads():
if thread == selected_thread:
return inferior
def source_gdb_script(script_contents, to_string=False):
"""
Source a gdb script with script_contents passed as a string. This is useful
to provide defines for py-step and py-next to make them repeatable (this is
not possible with gdb.execute()). See
http://sourceware.org/bugzilla/show_bug.cgi?id=12216
"""
fd, filename = tempfile.mkstemp()
f = os.fdopen(fd, 'w')
f.write(script_contents)
f.close()
gdb.execute("source %s" % filename, to_string=to_string)
os.remove(filename)
def register_defines():
source_gdb_script(textwrap.dedent("""\
define py-step
-py-step
end
define py-next
-py-next
end
document py-step
%s
end
document py-next
%s
end
""") % (PyStep.__doc__, PyNext.__doc__))
def stackdepth(frame):
"Tells the stackdepth of a gdb frame."
depth = 0
while frame:
frame = frame.older()
depth += 1
return depth
class ExecutionControlCommandBase(gdb.Command):
"""
Superclass for language specific execution control. Language specific
features should be implemented by lang_info using the LanguageInfo
interface. 'name' is the name of the command.
"""
def __init__(self, name, lang_info):
super(ExecutionControlCommandBase, self).__init__(
name, gdb.COMMAND_RUNNING, gdb.COMPLETE_NONE)
self.lang_info = lang_info
def install_breakpoints(self):
all_locations = itertools.chain(
self.lang_info.static_break_functions(),
self.lang_info.runtime_break_functions())
for location in all_locations:
result = gdb.execute('break %s' % location, to_string=True)
yield re.search(r'Breakpoint (\d+)', result).group(1)
def delete_breakpoints(self, breakpoint_list):
for bp in breakpoint_list:
gdb.execute("delete %s" % bp)
def filter_output(self, result):
reflags = re.MULTILINE
output_on_halt = [
(r'^Program received signal .*', reflags|re.DOTALL),
(r'.*[Ww]arning.*', 0),
(r'^Program exited .*', reflags),
]
output_always = [
# output when halting on a watchpoint
(r'^(Old|New) value = .*', reflags),
# output from the 'display' command
(r'^\d+: \w+ = .*', reflags),
]
def filter_output(regexes):
output = []
for regex, flags in regexes:
for match in re.finditer(regex, result, flags):
output.append(match.group(0))
return '\n'.join(output)
# Filter the return value output of the 'finish' command
match_finish = re.search(r'^Value returned is \$\d+ = (.*)', result,
re.MULTILINE)
if match_finish:
finish_output = 'Value returned: %s\n' % match_finish.group(1)
else:
finish_output = ''
return (filter_output(output_on_halt),
finish_output + filter_output(output_always))
def stopped(self):
return get_selected_inferior().pid == 0
def finish_executing(self, result):
"""
After doing some kind of code running in the inferior, print the line
of source code or the result of the last executed gdb command (passed
in as the `result` argument).
"""
output_on_halt, output_always = self.filter_output(result)
if self.stopped():
print output_always
print output_on_halt
else:
frame = gdb.selected_frame()
source_line = self.lang_info.get_source_line(frame)
if self.lang_info.is_relevant_function(frame):
raised_exception = self.lang_info.exc_info(frame)
if raised_exception:
print raised_exception
if source_line:
if output_always.rstrip():
print output_always.rstrip()
print source_line
else:
print result
def _finish(self):
"""
Execute until the function returns (or until something else makes it
stop)
"""
if gdb.selected_frame().older() is not None:
return gdb.execute('finish', to_string=True)
else:
# outermost frame, continue
return gdb.execute('cont', to_string=True)
def _finish_frame(self):
"""
Execute until the function returns to a relevant caller.
"""
while True:
result = self._finish()
try:
frame = gdb.selected_frame()
except RuntimeError:
break
hitbp = re.search(r'Breakpoint (\d+)', result)
is_relevant = self.lang_info.is_relevant_function(frame)
if hitbp or is_relevant or self.stopped():
break
return result
def finish(self, *args):
"Implements the finish command."
result = self._finish_frame()
self.finish_executing(result)
def step(self, stepinto, stepover_command='next'):
"""
Do a single step or step-over. Returns the result of the last gdb
command that made execution stop.
This implementation, for stepping, sets (conditional) breakpoints for
all functions that are deemed relevant. It then does a step over until
either something halts execution, or until the next line is reached.
If, however, stepover_command is given, it should be a string gdb
command that continues execution in some way. The idea is that the
caller has set a (conditional) breakpoint or watchpoint that can work
more efficiently than the step-over loop. For Python this means setting
a watchpoint for f->f_lasti, which means we can then subsequently
"finish" frames.
We want f->f_lasti instead of f->f_lineno, because the latter only
works properly with local trace functions, see
PyFrameObjectPtr.current_line_num and PyFrameObjectPtr.addr2line.
"""
if stepinto:
breakpoint_list = list(self.install_breakpoints())
beginframe = gdb.selected_frame()
if self.lang_info.is_relevant_function(beginframe):
# If we start in a relevant frame, initialize stuff properly. If
# we don't start in a relevant frame, the loop will halt
# immediately. So don't call self.lang_info.lineno() as it may
# raise for irrelevant frames.
beginline = self.lang_info.lineno(beginframe)
if not stepinto:
depth = stackdepth(beginframe)
newframe = beginframe
while True:
if self.lang_info.is_relevant_function(newframe):
result = gdb.execute(stepover_command, to_string=True)
else:
result = self._finish_frame()
if self.stopped():
break
newframe = gdb.selected_frame()
is_relevant_function = self.lang_info.is_relevant_function(newframe)
try:
framename = newframe.name()
except RuntimeError:
framename = None
m = re.search(r'Breakpoint (\d+)', result)
if m:
if is_relevant_function and m.group(1) in breakpoint_list:
# although we hit a breakpoint, we still need to check
# that the function, in case hit by a runtime breakpoint,
# is in the right context
break
if newframe != beginframe:
# new function
if not stepinto:
# see if we returned to the caller
newdepth = stackdepth(newframe)
is_relevant_function = (newdepth < depth and
is_relevant_function)
if is_relevant_function:
break
else:
# newframe equals beginframe, check for a difference in the
# line number
lineno = self.lang_info.lineno(newframe)
if lineno and lineno != beginline:
break
if stepinto:
self.delete_breakpoints(breakpoint_list)
self.finish_executing(result)
def run(self, args, from_tty):
self.finish_executing(gdb.execute('run ' + args, to_string=True))
def cont(self, *args):
self.finish_executing(gdb.execute('cont', to_string=True))
class LanguageInfo(object):
"""
This class defines the interface that ExecutionControlCommandBase needs to
provide language-specific execution control.
Classes that implement this interface should implement:
lineno(frame)
Tells the current line number (only called for a relevant frame).
If lineno is a false value it is not checked for a difference.
is_relevant_function(frame)
tells whether we care about frame 'frame'
get_source_line(frame)
get the line of source code for the current line (only called for a
relevant frame). If the source code cannot be retrieved this
function should return None
exc_info(frame) -- optional
tells whether an exception was raised, if so, it should return a
string representation of the exception value, None otherwise.
static_break_functions()
returns an iterable of function names that are considered relevant
and should halt step-into execution. This is needed to provide a
performing step-into
runtime_break_functions() -- optional
list of functions that we should break into depending on the
context
"""
def exc_info(self, frame):
"See this class' docstring."
def runtime_break_functions(self):
"""
Implement this if the list of step-into functions depends on the
context.
"""
return ()
class PythonInfo(LanguageInfo):
def pyframe(self, frame):
pyframe = Frame(frame).get_pyop()
if pyframe:
return pyframe
else:
raise gdb.RuntimeError(
"Unable to find the Python frame, run your code with a debug "
"build (configure with --with-pydebug or compile with -g).")
def lineno(self, frame):
return self.pyframe(frame).current_line_num()
def is_relevant_function(self, frame):
return Frame(frame).is_evalframeex()
def get_source_line(self, frame):
try:
pyframe = self.pyframe(frame)
return '%4d %s' % (pyframe.current_line_num(),
pyframe.current_line().rstrip())
except IOError, e:
return None
def exc_info(self, frame):
try:
tstate = frame.read_var('tstate').dereference()
if gdb.parse_and_eval('tstate->frame == f'):
# tstate local variable initialized, check for an exception
inf_type = tstate['curexc_type']
inf_value = tstate['curexc_value']
if inf_type:
return 'An exception was raised: %s' % (inf_value,)
except (ValueError, RuntimeError), e:
# Could not read the variable tstate or it's memory, it's ok
pass
def static_break_functions(self):
yield 'PyEval_EvalFrameEx'
class PythonStepperMixin(object):
"""
Make this a mixin so CyStep can also inherit from this and use a
CythonCodeStepper at the same time.
"""
def python_step(self, stepinto):
"""
Set a watchpoint on the Python bytecode instruction pointer and try
to finish the frame
"""
output = gdb.execute('watch f->f_lasti', to_string=True)
watchpoint = int(re.search(r'[Ww]atchpoint (\d+):', output).group(1))
self.step(stepinto=stepinto, stepover_command='finish')
gdb.execute('delete %s' % watchpoint)
class PyStep(ExecutionControlCommandBase, PythonStepperMixin):
"Step through Python code."
stepinto = True
def invoke(self, args, from_tty):
self.python_step(stepinto=self.stepinto)
class PyNext(PyStep):
"Step-over Python code."
stepinto = False
class PyFinish(ExecutionControlCommandBase):
"Execute until function returns to a caller."
invoke = ExecutionControlCommandBase.finish
class PyRun(ExecutionControlCommandBase):
"Run the program."
invoke = ExecutionControlCommandBase.run
class PyCont(ExecutionControlCommandBase):
invoke = ExecutionControlCommandBase.cont
def _pointervalue(gdbval):
"""
Return the value of the pionter as a Python int.
gdbval.type must be a pointer type
"""
# don't convert with int() as it will raise a RuntimeError
if gdbval.address is not None:
return long(gdbval.address)
else:
# the address attribute is None sometimes, in which case we can
# still convert the pointer to an int
return long(gdbval)
def pointervalue(gdbval):
pointer = _pointervalue(gdbval)
try:
if pointer < 0:
raise gdb.GdbError("Negative pointer value, presumably a bug "
"in gdb, aborting.")
except RuntimeError:
# work around yet another bug in gdb where you get random behaviour
# and tracebacks
pass
return pointer
def get_inferior_unicode_postfix():
try:
gdb.parse_and_eval('PyUnicode_FromEncodedObject')
except RuntimeError:
try:
gdb.parse_and_eval('PyUnicodeUCS2_FromEncodedObject')
except RuntimeError:
return 'UCS4'
else:
return 'UCS2'
else:
return ''
class PythonCodeExecutor(object):
Py_single_input = 256
Py_file_input = 257
Py_eval_input = 258
def malloc(self, size):
chunk = (gdb.parse_and_eval("(void *) malloc((size_t) %d)" % size))
pointer = pointervalue(chunk)
if pointer == 0:
raise gdb.GdbError("No memory could be allocated in the inferior.")
return pointer
def alloc_string(self, string):
pointer = self.malloc(len(string))
get_selected_inferior().write_memory(pointer, string)
return pointer
def alloc_pystring(self, string):
stringp = self.alloc_string(string)
PyString_FromStringAndSize = 'PyString_FromStringAndSize'
try:
gdb.parse_and_eval(PyString_FromStringAndSize)
except RuntimeError:
# Python 3
PyString_FromStringAndSize = ('PyUnicode%s_FromStringAndSize' %
(get_inferior_unicode_postfix(),))
try:
result = gdb.parse_and_eval(
'(PyObject *) %s((char *) %d, (size_t) %d)' % (
PyString_FromStringAndSize, stringp, len(string)))
finally:
self.free(stringp)
pointer = pointervalue(result)
if pointer == 0:
raise gdb.GdbError("Unable to allocate Python string in "
"the inferior.")
return pointer
def free(self, pointer):
gdb.parse_and_eval("free((void *) %d)" % pointer)
def incref(self, pointer):
"Increment the reference count of a Python object in the inferior."
gdb.parse_and_eval('Py_IncRef((PyObject *) %d)' % pointer)
def xdecref(self, pointer):
"Decrement the reference count of a Python object in the inferior."
# Py_DecRef is like Py_XDECREF, but a function. So we don't have
# to check for NULL. This should also decref all our allocated
# Python strings.
gdb.parse_and_eval('Py_DecRef((PyObject *) %d)' % pointer)
def evalcode(self, code, input_type, global_dict=None, local_dict=None):
"""
Evaluate python code `code` given as a string in the inferior and
return the result as a gdb.Value. Returns a new reference in the
inferior.
Of course, executing any code in the inferior may be dangerous and may
leave the debuggee in an unsafe state or terminate it alltogether.
"""
if '\0' in code:
raise gdb.GdbError("String contains NUL byte.")
code += '\0'
pointer = self.alloc_string(code)
globalsp = pointervalue(global_dict)
localsp = pointervalue(local_dict)
if globalsp == 0 or localsp == 0:
raise gdb.GdbError("Unable to obtain or create locals or globals.")
code = """
PyRun_String(
(char *) %(code)d,
(int) %(start)d,
(PyObject *) %(globals)s,
(PyObject *) %(locals)d)
""" % dict(code=pointer, start=input_type,
globals=globalsp, locals=localsp)
with FetchAndRestoreError():
try:
pyobject_return_value = gdb.parse_and_eval(code)
finally:
self.free(pointer)
return pyobject_return_value
class FetchAndRestoreError(PythonCodeExecutor):
"""
Context manager that fetches the error indicator in the inferior and
restores it on exit.
"""
def __init__(self):
self.sizeof_PyObjectPtr = gdb.lookup_type('PyObject').pointer().sizeof
self.pointer = self.malloc(self.sizeof_PyObjectPtr * 3)
type = self.pointer
value = self.pointer + self.sizeof_PyObjectPtr
traceback = self.pointer + self.sizeof_PyObjectPtr * 2
self.errstate = type, value, traceback
def __enter__(self):
gdb.parse_and_eval("PyErr_Fetch(%d, %d, %d)" % self.errstate)
def __exit__(self, *args):
if gdb.parse_and_eval("(int) PyErr_Occurred()"):
gdb.parse_and_eval("PyErr_Print()")
pyerr_restore = ("PyErr_Restore("
"(PyObject *) *%d,"
"(PyObject *) *%d,"
"(PyObject *) *%d)")
try:
gdb.parse_and_eval(pyerr_restore % self.errstate)
finally:
self.free(self.pointer)
class FixGdbCommand(gdb.Command):
def __init__(self, command, actual_command):
super(FixGdbCommand, self).__init__(command, gdb.COMMAND_DATA,
gdb.COMPLETE_NONE)
self.actual_command = actual_command
def fix_gdb(self):
"""
It seems that invoking either 'cy exec' and 'py-exec' work perfectly
fine, but after this gdb's python API is entirely broken.
Maybe some uncleared exception value is still set?
sys.exc_clear() didn't help. A demonstration:
(gdb) cy exec 'hello'
'hello'
(gdb) python gdb.execute('cont')
RuntimeError: Cannot convert value to int.
Error while executing Python code.
(gdb) python gdb.execute('cont')
[15148 refs]
Program exited normally.
"""
warnings.filterwarnings('ignore', r'.*', RuntimeWarning,
re.escape(__name__))
try:
long(gdb.parse_and_eval("(void *) 0")) == 0
except RuntimeError:
pass
# warnings.resetwarnings()
def invoke(self, args, from_tty):
self.fix_gdb()
try:
gdb.execute('%s %s' % (self.actual_command, args))
except RuntimeError, e:
raise gdb.GdbError(str(e))
self.fix_gdb()
def _evalcode_python(executor, code, input_type):
"""
Execute Python code in the most recent stack frame.
"""
global_dict = gdb.parse_and_eval('PyEval_GetGlobals()')
local_dict = gdb.parse_and_eval('PyEval_GetLocals()')
if (pointervalue(global_dict) == 0 or pointervalue(local_dict) == 0):
raise gdb.GdbError("Unable to find the locals or globals of the "
"most recent Python function (relative to the "
"selected frame).")
return executor.evalcode(code, input_type, global_dict, local_dict)
class PyExec(gdb.Command):
def readcode(self, expr):
if expr:
return expr, PythonCodeExecutor.Py_single_input
else:
lines = []
while True:
try:
line = raw_input('>')
except EOFError:
break
else:
if line.rstrip() == 'end':
break
lines.append(line)
return '\n'.join(lines), PythonCodeExecutor.Py_file_input
def invoke(self, expr, from_tty):
expr, input_type = self.readcode(expr)
executor = PythonCodeExecutor()
executor.xdecref(_evalcode_python(executor, input_type, global_dict,
local_dict))
gdb.execute('set breakpoint pending on')
if hasattr(gdb, 'GdbError'):
# Wrap py-step and py-next in gdb defines to make them repeatable.
py_step = PyStep('-py-step', PythonInfo())
py_next = PyNext('-py-next', PythonInfo())
register_defines()
py_finish = PyFinish('py-finish', PythonInfo())
py_run = PyRun('py-run', PythonInfo())
py_cont = PyCont('py-cont', PythonInfo())
py_exec = FixGdbCommand('py-exec', '-py-exec')
_py_exec = PyExec("-py-exec", gdb.COMMAND_DATA, gdb.COMPLETE_NONE)
else:
warnings.warn("Use gdb 7.2 or higher to use the py-exec command.")
|
hevalberknevruz/oclapi | refs/heads/master | django-nonrel/ocl/orgs/search_indexes.py | 5 | from haystack import indexes
from oclapi.search_backends import SortOrFilterField
from oclapi.search_indexes import OCLSearchIndex
from orgs.models import Organization
__author__ = 'misternando'
class OrganizationIndex(OCLSearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
name = SortOrFilterField(model_attr='name', indexed=True, stored=True)
company = SortOrFilterField(model_attr='company', null=True, indexed=True, stored=True)
location = SortOrFilterField(model_attr='location', null=True, indexed=True, stored=True)
lastUpdate = indexes.DateTimeField(model_attr='updated_at', indexed=True, stored=True)
is_active = indexes.BooleanField(model_attr='is_active', indexed=True, stored=True)
def get_model(self):
return Organization
|
mattrobenolt/django | refs/heads/master | tests/migrations/test_migrations_squashed_complex/7_auto.py | 770 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("migrations", "6_auto")]
operations = [
migrations.RunPython(migrations.RunPython.noop)
]
|
DestrinStorm/deep-learning | refs/heads/master | tv-script-generation/problem_unittests.py | 68 | import numpy as np
import tensorflow as tf
from tensorflow.contrib import rnn
def _print_success_message():
print('Tests Passed')
def test_create_lookup_tables(create_lookup_tables):
with tf.Graph().as_default():
test_text = '''
Moe_Szyslak Moe's Tavern Where the elite meet to drink
Bart_Simpson Eh yeah hello is Mike there Last name Rotch
Moe_Szyslak Hold on I'll check Mike Rotch Mike Rotch Hey has anybody seen Mike Rotch lately
Moe_Szyslak Listen you little puke One of these days I'm gonna catch you and I'm gonna carve my name on your back with an ice pick
Moe_Szyslak Whats the matter Homer You're not your normal effervescent self
Homer_Simpson I got my problems Moe Give me another one
Moe_Szyslak Homer hey you should not drink to forget your problems
Barney_Gumble Yeah you should only drink to enhance your social skills'''
test_text = test_text.lower()
test_text = test_text.split()
vocab_to_int, int_to_vocab = create_lookup_tables(test_text)
# Check types
assert isinstance(vocab_to_int, dict),\
'vocab_to_int is not a dictionary.'
assert isinstance(int_to_vocab, dict),\
'int_to_vocab is not a dictionary.'
# Compare lengths of dicts
assert len(vocab_to_int) == len(int_to_vocab),\
'Length of vocab_to_int and int_to_vocab don\'t match. ' \
'vocab_to_int is length {}. int_to_vocab is length {}'.format(len(vocab_to_int), len(int_to_vocab))
# Make sure the dicts have the same words
vocab_to_int_word_set = set(vocab_to_int.keys())
int_to_vocab_word_set = set(int_to_vocab.values())
assert not (vocab_to_int_word_set - int_to_vocab_word_set),\
'vocab_to_int and int_to_vocab don\'t have the same words.' \
'{} found in vocab_to_int, but not in int_to_vocab'.format(vocab_to_int_word_set - int_to_vocab_word_set)
assert not (int_to_vocab_word_set - vocab_to_int_word_set),\
'vocab_to_int and int_to_vocab don\'t have the same words.' \
'{} found in int_to_vocab, but not in vocab_to_int'.format(int_to_vocab_word_set - vocab_to_int_word_set)
# Make sure the dicts have the same word ids
vocab_to_int_word_id_set = set(vocab_to_int.values())
int_to_vocab_word_id_set = set(int_to_vocab.keys())
assert not (vocab_to_int_word_id_set - int_to_vocab_word_id_set),\
'vocab_to_int and int_to_vocab don\'t contain the same word ids.' \
'{} found in vocab_to_int, but not in int_to_vocab'.format(vocab_to_int_word_id_set - int_to_vocab_word_id_set)
assert not (int_to_vocab_word_id_set - vocab_to_int_word_id_set),\
'vocab_to_int and int_to_vocab don\'t contain the same word ids.' \
'{} found in int_to_vocab, but not in vocab_to_int'.format(int_to_vocab_word_id_set - vocab_to_int_word_id_set)
# Make sure the dicts make the same lookup
missmatches = [(word, id, id, int_to_vocab[id]) for word, id in vocab_to_int.items() if int_to_vocab[id] != word]
assert not missmatches,\
'Found {} missmatche(s). First missmatch: vocab_to_int[{}] = {} and int_to_vocab[{}] = {}'.format(
len(missmatches),
*missmatches[0])
assert len(vocab_to_int) > len(set(test_text))/2,\
'The length of vocab seems too small. Found a length of {}'.format(len(vocab_to_int))
_print_success_message()
def test_get_batches(get_batches):
with tf.Graph().as_default():
test_batch_size = 128
test_seq_length = 5
test_int_text = list(range(1000*test_seq_length))
batches = get_batches(test_int_text, test_batch_size, test_seq_length)
# Check type
assert isinstance(batches, np.ndarray),\
'Batches is not a Numpy array'
# Check shape
assert batches.shape == (7, 2, 128, 5),\
'Batches returned wrong shape. Found {}'.format(batches.shape)
for x in range(batches.shape[2]):
assert np.array_equal(batches[0,0,x], np.array(range(x * 35, x * 35 + batches.shape[3]))),\
'Batches returned wrong contents. For example, input sequence {} in the first batch was {}'.format(x, batches[0,0,x])
assert np.array_equal(batches[0,1,x], np.array(range(x * 35 + 1, x * 35 + 1 + batches.shape[3]))),\
'Batches returned wrong contents. For example, target sequence {} in the first batch was {}'.format(x, batches[0,1,x])
last_seq_target = (test_batch_size-1) * 35 + 31
last_seq = np.array(range(last_seq_target, last_seq_target+ batches.shape[3]))
last_seq[-1] = batches[0,0,0,0]
assert np.array_equal(batches[-1,1,-1], last_seq),\
'The last target of the last batch should be the first input of the first batch. Found {} but expected {}'.format(batches[-1,1,-1], last_seq)
_print_success_message()
def test_tokenize(token_lookup):
with tf.Graph().as_default():
symbols = set(['.', ',', '"', ';', '!', '?', '(', ')', '--', '\n'])
token_dict = token_lookup()
# Check type
assert isinstance(token_dict, dict), \
'Returned type is {}.'.format(type(token_dict))
# Check symbols
missing_symbols = symbols - set(token_dict.keys())
unknown_symbols = set(token_dict.keys()) - symbols
assert not missing_symbols, \
'Missing symbols: {}'.format(missing_symbols)
assert not unknown_symbols, \
'Unknown symbols: {}'.format(unknown_symbols)
# Check values type
bad_value_type = [type(val) for val in token_dict.values() if not isinstance(val, str)]
assert not bad_value_type,\
'Found token as {} type.'.format(bad_value_type[0])
# Check for spaces
key_has_spaces = [k for k in token_dict.keys() if ' ' in k]
val_has_spaces = [val for val in token_dict.values() if ' ' in val]
assert not key_has_spaces,\
'The key "{}" includes spaces. Remove spaces from keys and values'.format(key_has_spaces[0])
assert not val_has_spaces,\
'The value "{}" includes spaces. Remove spaces from keys and values'.format(val_has_spaces[0])
# Check for symbols in values
symbol_val = ()
for symbol in symbols:
for val in token_dict.values():
if symbol in val:
symbol_val = (symbol, val)
assert not symbol_val,\
'Don\'t use a symbol that will be replaced in your tokens. Found the symbol {} in value {}'.format(*symbol_val)
_print_success_message()
def test_get_inputs(get_inputs):
with tf.Graph().as_default():
input_data, targets, lr = get_inputs()
# Check type
assert input_data.op.type == 'Placeholder',\
'Input not a Placeholder.'
assert targets.op.type == 'Placeholder',\
'Targets not a Placeholder.'
assert lr.op.type == 'Placeholder',\
'Learning Rate not a Placeholder.'
# Check name
assert input_data.name == 'input:0',\
'Input has bad name. Found name {}'.format(input_data.name)
# Check rank
input_rank = 0 if input_data.get_shape() == None else len(input_data.get_shape())
targets_rank = 0 if targets.get_shape() == None else len(targets.get_shape())
lr_rank = 0 if lr.get_shape() == None else len(lr.get_shape())
assert input_rank == 2,\
'Input has wrong rank. Rank {} found.'.format(input_rank)
assert targets_rank == 2,\
'Targets has wrong rank. Rank {} found.'.format(targets_rank)
assert lr_rank == 0,\
'Learning Rate has wrong rank. Rank {} found'.format(lr_rank)
_print_success_message()
def test_get_init_cell(get_init_cell):
with tf.Graph().as_default():
test_batch_size_ph = tf.placeholder(tf.int32)
test_rnn_size = 256
cell, init_state = get_init_cell(test_batch_size_ph, test_rnn_size)
# Check type
assert isinstance(cell, tf.contrib.rnn.MultiRNNCell),\
'Cell is wrong type. Found {} type'.format(type(cell))
# Check for name attribute
assert hasattr(init_state, 'name'),\
'Initial state doesn\'t have the "name" attribute. Try using `tf.identity` to set the name.'
# Check name
assert init_state.name == 'initial_state:0',\
'Initial state doesn\'t have the correct name. Found the name {}'.format(init_state.name)
_print_success_message()
def test_get_embed(get_embed):
with tf.Graph().as_default():
embed_shape = [50, 5, 256]
test_input_data = tf.placeholder(tf.int32, embed_shape[:2])
test_vocab_size = 27
test_embed_dim = embed_shape[2]
embed = get_embed(test_input_data, test_vocab_size, test_embed_dim)
# Check shape
assert embed.shape == embed_shape,\
'Wrong shape. Found shape {}'.format(embed.shape)
_print_success_message()
def test_build_rnn(build_rnn):
with tf.Graph().as_default():
test_rnn_size = 256
test_rnn_layer_size = 2
test_cell = rnn.MultiRNNCell([rnn.BasicLSTMCell(test_rnn_size) for _ in range(test_rnn_layer_size)])
test_inputs = tf.placeholder(tf.float32, [None, None, test_rnn_size])
outputs, final_state = build_rnn(test_cell, test_inputs)
# Check name
assert hasattr(final_state, 'name'),\
'Final state doesn\'t have the "name" attribute. Try using `tf.identity` to set the name.'
assert final_state.name == 'final_state:0',\
'Final state doesn\'t have the correct name. Found the name {}'.format(final_state.name)
# Check shape
assert outputs.get_shape().as_list() == [None, None, test_rnn_size],\
'Outputs has wrong shape. Found shape {}'.format(outputs.get_shape())
assert final_state.get_shape().as_list() == [test_rnn_layer_size, 2, None, test_rnn_size],\
'Final state wrong shape. Found shape {}'.format(final_state.get_shape())
_print_success_message()
def test_build_nn(build_nn):
with tf.Graph().as_default():
test_input_data_shape = [128, 5]
test_input_data = tf.placeholder(tf.int32, test_input_data_shape)
test_rnn_size = 256
test_embed_dim = 300
test_rnn_layer_size = 2
test_vocab_size = 27
test_cell = rnn.MultiRNNCell([rnn.BasicLSTMCell(test_rnn_size) for _ in range(test_rnn_layer_size)])
logits, final_state = build_nn(test_cell, test_rnn_size, test_input_data, test_vocab_size, test_embed_dim)
# Check name
assert hasattr(final_state, 'name'), \
'Final state doesn\'t have the "name" attribute. Are you using build_rnn?'
assert final_state.name == 'final_state:0', \
'Final state doesn\'t have the correct name. Found the name {}. Are you using build_rnn?'.format(final_state.name)
# Check Shape
assert logits.get_shape().as_list() == test_input_data_shape + [test_vocab_size], \
'Outputs has wrong shape. Found shape {}'.format(logits.get_shape())
assert final_state.get_shape().as_list() == [test_rnn_layer_size, 2, None, test_rnn_size], \
'Final state wrong shape. Found shape {}'.format(final_state.get_shape())
_print_success_message()
def test_get_tensors(get_tensors):
test_graph = tf.Graph()
with test_graph.as_default():
test_input = tf.placeholder(tf.int32, name='input')
test_initial_state = tf.placeholder(tf.int32, name='initial_state')
test_final_state = tf.placeholder(tf.int32, name='final_state')
test_probs = tf.placeholder(tf.float32, name='probs')
input_text, initial_state, final_state, probs = get_tensors(test_graph)
# Check correct tensor
assert input_text == test_input,\
'Test input is wrong tensor'
assert initial_state == test_initial_state, \
'Initial state is wrong tensor'
assert final_state == test_final_state, \
'Final state is wrong tensor'
assert probs == test_probs, \
'Probabilities is wrong tensor'
_print_success_message()
def test_pick_word(pick_word):
with tf.Graph().as_default():
test_probabilities = np.array([0.1, 0.8, 0.05, 0.05])
test_int_to_vocab = {word_i: word for word_i, word in enumerate(['this', 'is', 'a', 'test'])}
pred_word = pick_word(test_probabilities, test_int_to_vocab)
# Check type
assert isinstance(pred_word, str),\
'Predicted word is wrong type. Found {} type.'.format(type(pred_word))
# Check word is from vocab
assert pred_word in test_int_to_vocab.values(),\
'Predicted word not found in int_to_vocab.'
_print_success_message()
|
py-geek/City-Air | refs/heads/master | venv/lib/python2.7/site-packages/oauthlib/oauth1/rfc5849/utils.py | 93 | # -*- coding: utf-8 -*-
"""
oauthlib.utils
~~~~~~~~~~~~~~
This module contains utility methods used by various parts of the OAuth
spec.
"""
from __future__ import absolute_import, unicode_literals
try:
import urllib2
except ImportError:
import urllib.request as urllib2
from oauthlib.common import quote, unquote, bytes_type, unicode_type
UNICODE_ASCII_CHARACTER_SET = ('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'0123456789')
def filter_params(target):
"""Decorator which filters params to remove non-oauth_* parameters
Assumes the decorated method takes a params dict or list of tuples as its
first argument.
"""
def wrapper(params, *args, **kwargs):
params = filter_oauth_params(params)
return target(params, *args, **kwargs)
wrapper.__doc__ = target.__doc__
return wrapper
def filter_oauth_params(params):
"""Removes all non oauth parameters from a dict or a list of params."""
is_oauth = lambda kv: kv[0].startswith("oauth_")
if isinstance(params, dict):
return list(filter(is_oauth, list(params.items())))
else:
return list(filter(is_oauth, params))
def escape(u):
"""Escape a unicode string in an OAuth-compatible fashion.
Per `section 3.6`_ of the spec.
.. _`section 3.6`: http://tools.ietf.org/html/rfc5849#section-3.6
"""
if not isinstance(u, unicode_type):
raise ValueError('Only unicode objects are escapable. ' +
'Got %s of type %s.' % (u, type(u)))
# Letters, digits, and the characters '_.-' are already treated as safe
# by urllib.quote(). We need to add '~' to fully support rfc5849.
return quote(u, safe=b'~')
def unescape(u):
if not isinstance(u, unicode_type):
raise ValueError('Only unicode objects are unescapable.')
return unquote(u)
def parse_keqv_list(l):
"""A unicode-safe version of urllib2.parse_keqv_list"""
# With Python 2.6, parse_http_list handles unicode fine
return urllib2.parse_keqv_list(l)
def parse_http_list(u):
"""A unicode-safe version of urllib2.parse_http_list"""
# With Python 2.6, parse_http_list handles unicode fine
return urllib2.parse_http_list(u)
def parse_authorization_header(authorization_header):
"""Parse an OAuth authorization header into a list of 2-tuples"""
auth_scheme = 'OAuth '.lower()
if authorization_header[:len(auth_scheme)].lower().startswith(auth_scheme):
items = parse_http_list(authorization_header[len(auth_scheme):])
try:
return list(parse_keqv_list(items).items())
except (IndexError, ValueError):
pass
raise ValueError('Malformed authorization header')
|
TeamSWAP/swap | refs/heads/master | external/pyinstaller/PyInstaller/hooks/hook-xml.dom.ext.py | 10 | #-----------------------------------------------------------------------------
# Copyright (c) 2013, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
attrs = [('Node',0),
('NodeFilter',0),
('XML_NAMESPACE',0),
('XMLNS_NAMESPACE',0),
('DOMException',0),
('HTML_4_TRANSITIONAL_INLINE',0),
('IsDOMString',0),
('FtDomException',0),
('NodeTypeDict',0),
('NodeTypeToClassName',0),
('Print',0),
('PrettyPrint',0),
('XHtmlPrettyPrint',0),
('XHtmlPrint',0),
('ReleaseNode',0),
('StripHtml',0),
('StripXml',0),
('GetElementById',0),
('XmlSpaceState',0),
('GetAllNs',0),
('SplitQName',0),
('SeekNss',0),
]
|
chris-chris/tensorflow | refs/heads/master | tensorflow/tools/api/tests/api_compatibility_test.py | 20 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
"""TensorFlow API compatibility tests.
This test ensures all changes to the public API of TensorFlow are intended.
If this test fails, it means a change has been made to the public API. Backwards
incompatible changes are not allowed. You can run the test with
"--update_goldens" flag set to "True" to update goldens when making changes to
the public TF python API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import re
import sys
import unittest
import tensorflow as tf
from google.protobuf import text_format
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging as logging
from tensorflow.tools.api.lib import api_objects_pb2
from tensorflow.tools.api.lib import python_object_to_proto_visitor
from tensorflow.tools.common import public_api
from tensorflow.tools.common import traverse
# FLAGS defined at the bottom:
FLAGS = None
# DEFINE_boolean, update_goldens, default False:
_UPDATE_GOLDENS_HELP = """
Update stored golden files if API is updated. WARNING: All API changes
have to be authorized by TensorFlow leads.
"""
# DEFINE_boolean, verbose_diffs, default False:
_VERBOSE_DIFFS_HELP = """
If set to true, print line by line diffs on all libraries. If set to
false, only print which libraries have differences.
"""
_API_GOLDEN_FOLDER = 'tensorflow/tools/api/golden'
_TEST_README_FILE = 'tensorflow/tools/api/tests/README.txt'
_UPDATE_WARNING_FILE = 'tensorflow/tools/api/tests/API_UPDATE_WARNING.txt'
def _KeyToFilePath(key):
"""From a given key, construct a filepath."""
def _ReplaceCapsWithDash(matchobj):
match = matchobj.group(0)
return '-%s' % (match.lower())
case_insensitive_key = re.sub('([A-Z]{1})', _ReplaceCapsWithDash, key)
return os.path.join(_API_GOLDEN_FOLDER, '%s.pbtxt' % case_insensitive_key)
def _FileNameToKey(filename):
"""From a given filename, construct a key we use for api objects."""
def _ReplaceDashWithCaps(matchobj):
match = matchobj.group(0)
return match[1].upper()
base_filename = os.path.basename(filename)
base_filename_without_ext = os.path.splitext(base_filename)[0]
api_object_key = re.sub(
'((-[a-z]){1})', _ReplaceDashWithCaps, base_filename_without_ext)
return api_object_key
class ApiCompatibilityTest(test.TestCase):
def __init__(self, *args, **kwargs):
super(ApiCompatibilityTest, self).__init__(*args, **kwargs)
golden_update_warning_filename = os.path.join(
resource_loader.get_root_dir_with_all_resources(),
_UPDATE_WARNING_FILE)
self._update_golden_warning = file_io.read_file_to_string(
golden_update_warning_filename)
test_readme_filename = os.path.join(
resource_loader.get_root_dir_with_all_resources(),
_TEST_README_FILE)
self._test_readme_message = file_io.read_file_to_string(
test_readme_filename)
def _AssertProtoDictEquals(self,
expected_dict,
actual_dict,
verbose=False,
update_goldens=False):
"""Diff given dicts of protobufs and report differences a readable way.
Args:
expected_dict: a dict of TFAPIObject protos constructed from golden
files.
actual_dict: a ict of TFAPIObject protos constructed by reading from the
TF package linked to the test.
verbose: Whether to log the full diffs, or simply report which files were
different.
update_goldens: Whether to update goldens when there are diffs found.
"""
diffs = []
verbose_diffs = []
expected_keys = set(expected_dict.keys())
actual_keys = set(actual_dict.keys())
only_in_expected = expected_keys - actual_keys
only_in_actual = actual_keys - expected_keys
all_keys = expected_keys | actual_keys
# This will be populated below.
updated_keys = []
for key in all_keys:
diff_message = ''
verbose_diff_message = ''
# First check if the key is not found in one or the other.
if key in only_in_expected:
diff_message = 'Object %s expected but not found (removed).' % key
verbose_diff_message = diff_message
elif key in only_in_actual:
diff_message = 'New object %s found (added).' % key
verbose_diff_message = diff_message
else:
# Now we can run an actual proto diff.
try:
self.assertProtoEquals(expected_dict[key], actual_dict[key])
except AssertionError as e:
updated_keys.append(key)
diff_message = 'Change detected in python object: %s.' % key
verbose_diff_message = str(e)
# All difference cases covered above. If any difference found, add to the
# list.
if diff_message:
diffs.append(diff_message)
verbose_diffs.append(verbose_diff_message)
# If diffs are found, handle them based on flags.
if diffs:
diff_count = len(diffs)
logging.error(self._test_readme_message)
logging.error('%d differences found between API and golden.', diff_count)
messages = verbose_diffs if verbose else diffs
for i in range(diff_count):
logging.error('Issue %d\t: %s', i + 1, messages[i])
if update_goldens:
# Write files if requested.
logging.warning(self._update_golden_warning)
# If the keys are only in expected, some objects are deleted.
# Remove files.
for key in only_in_expected:
filepath = _KeyToFilePath(key)
file_io.delete_file(filepath)
# If the files are only in actual (current library), these are new
# modules. Write them to files. Also record all updates in files.
for key in only_in_actual | set(updated_keys):
filepath = _KeyToFilePath(key)
file_io.write_string_to_file(
filepath, text_format.MessageToString(actual_dict[key]))
else:
# Fail if we cannot fix the test by updating goldens.
self.fail('%d differences found between API and golden.' % diff_count)
else:
logging.info('No differences found between API and golden.')
@unittest.skipUnless(
sys.version_info.major == 2 and os.uname()[0] == 'Linux',
'API compabitility test goldens are generated using python2 on Linux.')
def testAPIBackwardsCompatibility(self):
# Extract all API stuff.
visitor = python_object_to_proto_visitor.PythonObjectToProtoVisitor()
public_api_visitor = public_api.PublicAPIVisitor(visitor)
public_api_visitor.do_not_descend_map[''].append('contrib')
traverse.traverse(tf, public_api_visitor)
proto_dict = visitor.GetProtos()
# Read all golden files.
expression = os.path.join(
resource_loader.get_root_dir_with_all_resources(),
_KeyToFilePath('*'))
golden_file_list = file_io.get_matching_files(expression)
def _ReadFileToProto(filename):
"""Read a filename, create a protobuf from its contents."""
ret_val = api_objects_pb2.TFAPIObject()
text_format.Merge(file_io.read_file_to_string(filename), ret_val)
return ret_val
golden_proto_dict = {
_FileNameToKey(filename): _ReadFileToProto(filename)
for filename in golden_file_list
}
# Diff them. Do not fail if called with update.
# If the test is run to update goldens, only report diffs but do not fail.
self._AssertProtoDictEquals(
golden_proto_dict,
proto_dict,
verbose=FLAGS.verbose_diffs,
update_goldens=FLAGS.update_goldens)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--update_goldens', type=bool, default=False, help=_UPDATE_GOLDENS_HELP)
parser.add_argument(
'--verbose_diffs', type=bool, default=False, help=_VERBOSE_DIFFS_HELP)
FLAGS, unparsed = parser.parse_known_args()
# Now update argv, so that unittest library does not get confused.
sys.argv = [sys.argv[0]] + unparsed
test.main()
|
lkuchenb/shogun | refs/heads/develop | tests/unit/base/clone_unittest.cc.py | 26 | #!/usr/bin/env python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Written (W) 2013 Viktor Gal
# Copyright (C) 2013 Viktor Gal
def get_class_list_content(class_list_file):
f = open(class_list_file, 'r')
content = f.readlines()
f.close()
return content
def get_class_list(class_list_content):
import re
in_class_list = False
classes = []
template_classes = []
for line in class_list_content:
if line == '\n':
continue
l = [l.strip() for l in line.split()]
if 'class_list[]' in l:
in_class_list = True
continue
if in_class_list:
if '};' in l:
in_class_list = False
continue
result = re.match(r"{\"(?P<sho_class>\w+)\"", l[0])
if result:
sho_class=result.group('sho_class')
#print l[1]
if l[1] == 'SHOGUN_BASIC_CLASS':
classes.append(sho_class)
if l[1] == 'SHOGUN_TEMPLATE_CLASS':
template_classes.append(sho_class)
return classes, template_classes
def entry(templateFile, class_list_file):
templateLoader = jinja2.FileSystemLoader(searchpath="/")
templateEnv = jinja2.Environment(loader=templateLoader)
template = templateEnv.get_template(templateFile)
# get the content of class_list.cpp
class_list_content = get_class_list_content(class_list_file)
classes, template_classes = get_class_list(class_list_content)
types = ['PT_BOOL', 'PT_CHAR', 'PT_INT8', 'PT_UINT8', 'PT_INT16', 'PT_UINT16', 'PT_INT32',
'PT_UINT32', 'PT_INT64', 'PT_UINT64', 'PT_FLOAT32', 'PT_FLOAT64', 'PT_FLOATMAX']
templateVars = {"classes" : classes, "template_classes" : template_classes, "types" : types}
return template.render(templateVars)
# execution
# ./clone_unittest.cc.py <template file> <output file name> <extra args...>
import sys
TEMPLATE_FILE = sys.argv[1]
output_file = sys.argv[2]
class_list_file = sys.argv[3]
try:
import jinja2
outputText = entry(TEMPLATE_FILE, class_list_file)
except ImportError:
import os
basename = os.path.basename(output_file)
basename = basename.replace('.cc', '')
print("Please install jinja2 for clone unit-tests");
outputText = ['''#include <gtest/gtest.h>
TEST(Dummy, %s_dummy)
{
}''' % (basename)]
f = open(output_file, 'w')
f.writelines(outputText)
f.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.