repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
Phonemetra/TurboCoin | test/functional/test_framework/test_node.py | 1 | 23598 | #!/usr/bin/env python3
# Copyright (c) 2017-2019 TurboCoin
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Class for turbocoind node under test"""
import contextlib
import decimal
import errno
from enum import Enum
import http.client
import json
import logging
import os
import re
import subprocess
import tempfile
import time
import urllib.parse
import collections
import shlex
import sys
from .authproxy import JSONRPCException
from .util import (
append_config,
delete_cookie_file,
get_rpc_proxy,
rpc_url,
wait_until,
p2p_port,
)
BITCOIND_PROC_WAIT_TIMEOUT = 60
class FailedToStartError(Exception):
"""Raised when a node fails to start correctly."""
class ErrorMatch(Enum):
FULL_TEXT = 1
FULL_REGEX = 2
PARTIAL_REGEX = 3
class TestNode():
"""A class for representing a turbocoind node under test.
This class contains:
- state about the node (whether it's running, etc)
- a Python subprocess.Popen object representing the running process
- an RPC connection to the node
- one or more P2P connections to the node
To make things easier for the test writer, any unrecognised messages will
be dispatched to the RPC connection."""
def __init__(self, i, datadir, *, rpchost, timewait, turbocoind, turbocoin_cli, coverage_dir, cwd, extra_conf=None, extra_args=None, use_cli=False, start_perf=False):
"""
Kwargs:
start_perf (bool): If True, begin profiling the node with `perf` as soon as
the node starts.
"""
self.index = i
self.datadir = datadir
self.stdout_dir = os.path.join(self.datadir, "stdout")
self.stderr_dir = os.path.join(self.datadir, "stderr")
self.rpchost = rpchost
self.rpc_timeout = timewait
self.binary = turbocoind
self.coverage_dir = coverage_dir
self.cwd = cwd
if extra_conf is not None:
append_config(datadir, extra_conf)
# Most callers will just need to add extra args to the standard list below.
# For those callers that need more flexibility, they can just set the args property directly.
# Note that common args are set in the config file (see initialize_datadir)
self.extra_args = extra_args
# Configuration for logging is set as command-line args rather than in the turbocoin.conf file.
# This means that starting a turbocoind using the temp dir to debug a failed test won't
# spam debug.log.
self.args = [
self.binary,
"-datadir=" + self.datadir,
"-logtimemicros",
"-logthreadnames",
"-debug",
"-debugexclude=libevent",
"-debugexclude=leveldb",
"-uacomment=testnode%d" % i,
]
self.cli = TestNodeCLI(turbocoin_cli, self.datadir)
self.use_cli = use_cli
self.start_perf = start_perf
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.url = None
self.log = logging.getLogger('TestFramework.node%d' % i)
self.cleanup_on_exit = True # Whether to kill the node when this object goes away
# Cache perf subprocesses here by their data output filename.
self.perf_subprocesses = {}
self.p2ps = []
def get_deterministic_priv_key(self):
"""Return a deterministic priv key in base58, that only depends on the node's index"""
AddressKeyPair = collections.namedtuple('AddressKeyPair', ['address', 'key'])
PRIV_KEYS = [
# address , privkey
AddressKeyPair('mjTkW3DjgyZck4KbiRusZsqTgaYTxdSz6z', 'cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW'),
AddressKeyPair('msX6jQXvxiNhx3Q62PKeLPrhrqZQdSimTg', 'cUxsWyKyZ9MAQTaAhUQWJmBbSvHMwSmuv59KgxQV7oZQU3PXN3KE'),
AddressKeyPair('mnonCMyH9TmAsSj3M59DsbH8H63U3RKoFP', 'cTrh7dkEAeJd6b3MRX9bZK8eRmNqVCMH3LSUkE3dSFDyzjU38QxK'),
AddressKeyPair('mqJupas8Dt2uestQDvV2NH3RU8uZh2dqQR', 'cVuKKa7gbehEQvVq717hYcbE9Dqmq7KEBKqWgWrYBa2CKKrhtRim'),
AddressKeyPair('msYac7Rvd5ywm6pEmkjyxhbCDKqWsVeYws', 'cQDCBuKcjanpXDpCqacNSjYfxeQj8G6CAtH1Dsk3cXyqLNC4RPuh'),
AddressKeyPair('n2rnuUnwLgXqf9kk2kjvVm8R5BZK1yxQBi', 'cQakmfPSLSqKHyMFGwAqKHgWUiofJCagVGhiB4KCainaeCSxeyYq'),
AddressKeyPair('myzuPxRwsf3vvGzEuzPfK9Nf2RfwauwYe6', 'cQMpDLJwA8DBe9NcQbdoSb1BhmFxVjWD5gRyrLZCtpuF9Zi3a9RK'),
AddressKeyPair('mumwTaMtbxEPUswmLBBN3vM9oGRtGBrys8', 'cSXmRKXVcoouhNNVpcNKFfxsTsToY5pvB9DVsFksF1ENunTzRKsy'),
AddressKeyPair('mpV7aGShMkJCZgbW7F6iZgrvuPHjZjH9qg', 'cSoXt6tm3pqy43UMabY6eUTmR3eSUYFtB2iNQDGgb3VUnRsQys2k'),
]
return PRIV_KEYS[self.index]
def get_mem_rss_kilobytes(self):
"""Get the memory usage (RSS) per `ps`.
Returns None if `ps` is unavailable.
"""
assert self.running
try:
return int(subprocess.check_output(
["ps", "h", "-o", "rss", "{}".format(self.process.pid)],
stderr=subprocess.DEVNULL).split()[-1])
# Avoid failing on platforms where ps isn't installed.
#
# We could later use something like `psutils` to work across platforms.
except (FileNotFoundError, subprocess.SubprocessError):
self.log.exception("Unable to get memory usage")
return None
def _node_msg(self, msg: str) -> str:
"""Return a modified msg that identifies this node by its index as a debugging aid."""
return "[node %d] %s" % (self.index, msg)
def _raise_assertion_error(self, msg: str):
"""Raise an AssertionError with msg modified to identify this node."""
raise AssertionError(self._node_msg(msg))
def __del__(self):
# Ensure that we don't leave any turbocoind processes lying around after
# the test ends
if self.process and self.cleanup_on_exit:
# Should only happen on test failure
# Avoid using logger, as that may have already been shutdown when
# this destructor is called.
print(self._node_msg("Cleaning up leftover process"))
self.process.kill()
def __getattr__(self, name):
"""Dispatches any unrecognised messages to the RPC connection or a CLI instance."""
if self.use_cli:
return getattr(self.cli, name)
else:
assert self.rpc_connected and self.rpc is not None, self._node_msg("Error: no RPC connection")
return getattr(self.rpc, name)
def start(self, extra_args=None, *, cwd=None, stdout=None, stderr=None, **kwargs):
"""Start the node."""
if extra_args is None:
extra_args = self.extra_args
# Add a new stdout and stderr file each time turbocoind is started
if stderr is None:
stderr = tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False)
if stdout is None:
stdout = tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False)
self.stderr = stderr
self.stdout = stdout
if cwd is None:
cwd = self.cwd
# Delete any existing cookie file -- if such a file exists (eg due to
# unclean shutdown), it will get overwritten anyway by turbocoind, and
# potentially interfere with our attempt to authenticate
delete_cookie_file(self.datadir)
# add environment variable LIBC_FATAL_STDERR_=1 so that libc errors are written to stderr and not the terminal
subp_env = dict(os.environ, LIBC_FATAL_STDERR_="1")
self.process = subprocess.Popen(self.args + extra_args, env=subp_env, stdout=stdout, stderr=stderr, cwd=cwd, **kwargs)
self.running = True
self.log.debug("turbocoind started, waiting for RPC to come up")
if self.start_perf:
self._start_perf()
def wait_for_rpc_connection(self):
"""Sets up an RPC connection to the turbocoind process. Returns False if unable to connect."""
# Poll at a rate of four times per second
poll_per_s = 4
for _ in range(poll_per_s * self.rpc_timeout):
if self.process.poll() is not None:
raise FailedToStartError(self._node_msg(
'turbocoind exited with status {} during initialization'.format(self.process.returncode)))
try:
rpc = get_rpc_proxy(rpc_url(self.datadir, self.index, self.rpchost), self.index, timeout=self.rpc_timeout, coveragedir=self.coverage_dir)
rpc.getblockcount()
# If the call to getblockcount() succeeds then the RPC connection is up
self.log.debug("RPC successfully started")
if self.use_cli:
return
self.rpc = rpc
self.rpc_connected = True
self.url = self.rpc.url
return
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
# -28 RPC in warmup
# -342 Service unavailable, RPC server started but is shutting down due to error
if e.error['code'] != -28 and e.error['code'] != -342:
raise # unknown JSON RPC exception
except ValueError as e: # cookie file not found and no rpcuser or rpcassword. turbocoind still starting
if "No RPC credentials" not in str(e):
raise
time.sleep(1.0 / poll_per_s)
self._raise_assertion_error("Unable to connect to turbocoind")
def generate(self, nblocks, maxtries=1000000):
self.log.debug("TestNode.generate() dispatches `generate` call to `generatetoaddress`")
return self.generatetoaddress(nblocks=nblocks, address=self.get_deterministic_priv_key().address, maxtries=maxtries)
def get_wallet_rpc(self, wallet_name):
if self.use_cli:
return self.cli("-rpcwallet={}".format(wallet_name))
else:
assert self.rpc_connected and self.rpc, self._node_msg("RPC not connected")
wallet_path = "wallet/{}".format(urllib.parse.quote(wallet_name))
return self.rpc / wallet_path
def stop_node(self, expected_stderr='', wait=0):
"""Stop the node."""
if not self.running:
return
self.log.debug("Stopping node")
try:
self.stop(wait=wait)
except http.client.CannotSendRequest:
self.log.exception("Unable to stop node.")
# If there are any running perf processes, stop them.
for profile_name in tuple(self.perf_subprocesses.keys()):
self._stop_perf(profile_name)
# Check that stderr is as expected
self.stderr.seek(0)
stderr = self.stderr.read().decode('utf-8').strip()
if stderr != expected_stderr:
raise AssertionError("Unexpected stderr {} != {}".format(stderr, expected_stderr))
self.stdout.close()
self.stderr.close()
del self.p2ps[:]
def is_node_stopped(self):
"""Checks whether the node has stopped.
Returns True if the node has stopped. False otherwise.
This method is responsible for freeing resources (self.process)."""
if not self.running:
return True
return_code = self.process.poll()
if return_code is None:
return False
# process has stopped. Assert that it didn't return an error code.
assert return_code == 0, self._node_msg(
"Node returned non-zero exit code (%d) when stopping" % return_code)
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.log.debug("Node stopped")
return True
def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT):
wait_until(self.is_node_stopped, timeout=timeout)
@contextlib.contextmanager
def assert_debug_log(self, expected_msgs):
debug_log = os.path.join(self.datadir, 'regtest', 'debug.log')
with open(debug_log, encoding='utf-8') as dl:
dl.seek(0, 2)
prev_size = dl.tell()
try:
yield
finally:
with open(debug_log, encoding='utf-8') as dl:
dl.seek(prev_size)
log = dl.read()
print_log = " - " + "\n - ".join(log.splitlines())
for expected_msg in expected_msgs:
if re.search(re.escape(expected_msg), log, flags=re.MULTILINE) is None:
self._raise_assertion_error('Expected message "{}" does not partially match log:\n\n{}\n\n'.format(expected_msg, print_log))
@contextlib.contextmanager
def assert_memory_usage_stable(self, *, increase_allowed=0.03):
"""Context manager that allows the user to assert that a node's memory usage (RSS)
hasn't increased beyond some threshold percentage.
Args:
increase_allowed (float): the fractional increase in memory allowed until failure;
e.g. `0.12` for up to 12% increase allowed.
"""
before_memory_usage = self.get_mem_rss_kilobytes()
yield
after_memory_usage = self.get_mem_rss_kilobytes()
if not (before_memory_usage and after_memory_usage):
self.log.warning("Unable to detect memory usage (RSS) - skipping memory check.")
return
perc_increase_memory_usage = (after_memory_usage / before_memory_usage) - 1
if perc_increase_memory_usage > increase_allowed:
self._raise_assertion_error(
"Memory usage increased over threshold of {:.3f}% from {} to {} ({:.3f}%)".format(
increase_allowed * 100, before_memory_usage, after_memory_usage,
perc_increase_memory_usage * 100))
@contextlib.contextmanager
def profile_with_perf(self, profile_name):
"""
Context manager that allows easy profiling of node activity using `perf`.
See `test/functional/README.md` for details on perf usage.
Args:
profile_name (str): This string will be appended to the
profile data filename generated by perf.
"""
subp = self._start_perf(profile_name)
yield
if subp:
self._stop_perf(profile_name)
def _start_perf(self, profile_name=None):
"""Start a perf process to profile this node.
Returns the subprocess running perf."""
subp = None
def test_success(cmd):
return subprocess.call(
# shell=True required for pipe use below
cmd, shell=True,
stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL) == 0
if not sys.platform.startswith('linux'):
self.log.warning("Can't profile with perf; only available on Linux platforms")
return None
if not test_success('which perf'):
self.log.warning("Can't profile with perf; must install perf-tools")
return None
if not test_success('readelf -S {} | grep .debug_str'.format(shlex.quote(self.binary))):
self.log.warning(
"perf output won't be very useful without debug symbols compiled into turbocoind")
output_path = tempfile.NamedTemporaryFile(
dir=self.datadir,
prefix="{}.perf.data.".format(profile_name or 'test'),
delete=False,
).name
cmd = [
'perf', 'record',
'-g', # Record the callgraph.
'--call-graph', 'dwarf', # Compatibility for gcc's --fomit-frame-pointer.
'-F', '101', # Sampling frequency in Hz.
'-p', str(self.process.pid),
'-o', output_path,
]
subp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.perf_subprocesses[profile_name] = subp
return subp
def _stop_perf(self, profile_name):
"""Stop (and pop) a perf subprocess."""
subp = self.perf_subprocesses.pop(profile_name)
output_path = subp.args[subp.args.index('-o') + 1]
subp.terminate()
subp.wait(timeout=10)
stderr = subp.stderr.read().decode()
if 'Consider tweaking /proc/sys/kernel/perf_event_paranoid' in stderr:
self.log.warning(
"perf couldn't collect data! Try "
"'sudo sysctl -w kernel.perf_event_paranoid=-1'")
else:
report_cmd = "perf report -i {}".format(output_path)
self.log.info("See perf output by running '{}'".format(report_cmd))
def assert_start_raises_init_error(self, extra_args=None, expected_msg=None, match=ErrorMatch.FULL_TEXT, *args, **kwargs):
"""Attempt to start the node and expect it to raise an error.
extra_args: extra arguments to pass through to turbocoind
expected_msg: regex that stderr should match when turbocoind fails
Will throw if turbocoind starts without an error.
Will throw if an expected_msg is provided and it does not match turbocoind's stdout."""
with tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False) as log_stderr, \
tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False) as log_stdout:
try:
self.start(extra_args, stdout=log_stdout, stderr=log_stderr, *args, **kwargs)
self.wait_for_rpc_connection()
self.stop_node()
self.wait_until_stopped()
except FailedToStartError as e:
self.log.debug('turbocoind failed to start: %s', e)
self.running = False
self.process = None
# Check stderr for expected message
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8').strip()
if match == ErrorMatch.PARTIAL_REGEX:
if re.search(expected_msg, stderr, flags=re.MULTILINE) is None:
self._raise_assertion_error(
'Expected message "{}" does not partially match stderr:\n"{}"'.format(expected_msg, stderr))
elif match == ErrorMatch.FULL_REGEX:
if re.fullmatch(expected_msg, stderr) is None:
self._raise_assertion_error(
'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
elif match == ErrorMatch.FULL_TEXT:
if expected_msg != stderr:
self._raise_assertion_error(
'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
else:
if expected_msg is None:
assert_msg = "turbocoind should have exited with an error"
else:
assert_msg = "turbocoind should have exited with expected error " + expected_msg
self._raise_assertion_error(assert_msg)
def add_p2p_connection(self, p2p_conn, *, wait_for_verack=True, **kwargs):
"""Add a p2p connection to the node.
This method adds the p2p connection to the self.p2ps list and also
returns the connection to the caller."""
if 'dstport' not in kwargs:
kwargs['dstport'] = p2p_port(self.index)
if 'dstaddr' not in kwargs:
kwargs['dstaddr'] = '127.0.0.1'
p2p_conn.peer_connect(**kwargs)()
self.p2ps.append(p2p_conn)
if wait_for_verack:
p2p_conn.wait_for_verack()
return p2p_conn
@property
def p2p(self):
"""Return the first p2p connection
Convenience property - most tests only use a single p2p connection to each
node, so this saves having to write node.p2ps[0] many times."""
assert self.p2ps, self._node_msg("No p2p connection")
return self.p2ps[0]
def disconnect_p2ps(self):
"""Close all p2p connections to the node."""
for p in self.p2ps:
p.peer_disconnect()
del self.p2ps[:]
class TestNodeCLIAttr:
def __init__(self, cli, command):
self.cli = cli
self.command = command
def __call__(self, *args, **kwargs):
return self.cli.send_cli(self.command, *args, **kwargs)
def get_request(self, *args, **kwargs):
return lambda: self(*args, **kwargs)
def arg_to_cli(arg):
if isinstance(arg, bool):
return str(arg).lower()
elif isinstance(arg, dict) or isinstance(arg, list):
return json.dumps(arg)
else:
return str(arg)
class TestNodeCLI():
"""Interface to turbocoin-cli for an individual node"""
def __init__(self, binary, datadir):
self.options = []
self.binary = binary
self.datadir = datadir
self.input = None
self.log = logging.getLogger('TestFramework.turbocoincli')
def __call__(self, *options, input=None):
# TestNodeCLI is callable with turbocoin-cli command-line options
cli = TestNodeCLI(self.binary, self.datadir)
cli.options = [str(o) for o in options]
cli.input = input
return cli
def __getattr__(self, command):
return TestNodeCLIAttr(self, command)
def batch(self, requests):
results = []
for request in requests:
try:
results.append(dict(result=request()))
except JSONRPCException as e:
results.append(dict(error=e))
return results
def send_cli(self, command=None, *args, **kwargs):
"""Run turbocoin-cli command. Deserializes returned string as python object."""
pos_args = [arg_to_cli(arg) for arg in args]
named_args = [str(key) + "=" + arg_to_cli(value) for (key, value) in kwargs.items()]
assert not (pos_args and named_args), "Cannot use positional arguments and named arguments in the same turbocoin-cli call"
p_args = [self.binary, "-datadir=" + self.datadir] + self.options
if named_args:
p_args += ["-named"]
if command is not None:
p_args += [command]
p_args += pos_args + named_args
self.log.debug("Running turbocoin-cli command: %s" % command)
process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
cli_stdout, cli_stderr = process.communicate(input=self.input)
returncode = process.poll()
if returncode:
match = re.match(r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr)
if match:
code, message = match.groups()
raise JSONRPCException(dict(code=int(code), message=message))
# Ignore cli_stdout, raise with cli_stderr
raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr)
try:
return json.loads(cli_stdout, parse_float=decimal.Decimal)
except json.JSONDecodeError:
return cli_stdout.rstrip("\n")
| mit |
sclabs/sccms-nonrel | django/contrib/sites/managers.py | 491 | 1985 | from django.conf import settings
from django.db import models
from django.db.models.fields import FieldDoesNotExist
class CurrentSiteManager(models.Manager):
"Use this to limit objects to those associated with the current site."
def __init__(self, field_name=None):
super(CurrentSiteManager, self).__init__()
self.__field_name = field_name
self.__is_validated = False
def _validate_field_name(self):
field_names = self.model._meta.get_all_field_names()
# If a custom name is provided, make sure the field exists on the model
if self.__field_name is not None and self.__field_name not in field_names:
raise ValueError("%s couldn't find a field named %s in %s." % \
(self.__class__.__name__, self.__field_name, self.model._meta.object_name))
# Otherwise, see if there is a field called either 'site' or 'sites'
else:
for potential_name in ['site', 'sites']:
if potential_name in field_names:
self.__field_name = potential_name
self.__is_validated = True
break
# Now do a type check on the field (FK or M2M only)
try:
field = self.model._meta.get_field(self.__field_name)
if not isinstance(field, (models.ForeignKey, models.ManyToManyField)):
raise TypeError("%s must be a ForeignKey or ManyToManyField." %self.__field_name)
except FieldDoesNotExist:
raise ValueError("%s couldn't find a field named %s in %s." % \
(self.__class__.__name__, self.__field_name, self.model._meta.object_name))
self.__is_validated = True
def get_query_set(self):
if not self.__is_validated:
self._validate_field_name()
return super(CurrentSiteManager, self).get_query_set().filter(**{self.__field_name + '__id__exact': settings.SITE_ID})
| bsd-3-clause |
Cito/sqlalchemy | lib/sqlalchemy/util/langhelpers.py | 3 | 36689 | # util/langhelpers.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Routines to help with the creation, loading and introspection of
modules, classes, hierarchies, attributes, functions, and methods.
"""
import itertools
import inspect
import operator
import re
import sys
import types
import warnings
from functools import update_wrapper
from .. import exc
import hashlib
from . import compat
from . import _collections
def md5_hex(x):
if compat.py3k:
x = x.encode('utf-8')
m = hashlib.md5()
m.update(x)
return m.hexdigest()
class safe_reraise(object):
"""Reraise an exception after invoking some
handler code.
Stores the existing exception info before
invoking so that it is maintained across a potential
coroutine context switch.
e.g.::
try:
sess.commit()
except:
with safe_reraise():
sess.rollback()
"""
def __enter__(self):
self._exc_info = sys.exc_info()
def __exit__(self, type_, value, traceback):
# see #2703 for notes
if type_ is None:
exc_type, exc_value, exc_tb = self._exc_info
self._exc_info = None # remove potential circular references
compat.reraise(exc_type, exc_value, exc_tb)
else:
self._exc_info = None # remove potential circular references
compat.reraise(type_, value, traceback)
def decode_slice(slc):
"""decode a slice object as sent to __getitem__.
takes into account the 2.5 __index__() method, basically.
"""
ret = []
for x in slc.start, slc.stop, slc.step:
if hasattr(x, '__index__'):
x = x.__index__()
ret.append(x)
return tuple(ret)
def _unique_symbols(used, *bases):
used = set(used)
for base in bases:
pool = itertools.chain((base,),
compat.itertools_imap(lambda i: base + str(i),
range(1000)))
for sym in pool:
if sym not in used:
used.add(sym)
yield sym
break
else:
raise NameError("exhausted namespace for symbol base %s" % base)
def decorator(target):
"""A signature-matching decorator factory."""
def decorate(fn):
if not inspect.isfunction(fn):
raise Exception("not a decoratable function")
spec = compat.inspect_getfullargspec(fn)
names = tuple(spec[0]) + spec[1:3] + (fn.__name__,)
targ_name, fn_name = _unique_symbols(names, 'target', 'fn')
metadata = dict(target=targ_name, fn=fn_name)
metadata.update(format_argspec_plus(spec, grouped=False))
metadata['name'] = fn.__name__
code = """\
def %(name)s(%(args)s):
return %(target)s(%(fn)s, %(apply_kw)s)
""" % metadata
decorated = _exec_code_in_env(code,
{targ_name: target, fn_name: fn},
fn.__name__)
decorated.__defaults__ = getattr(fn, 'im_func', fn).__defaults__
return update_wrapper(decorated, fn)
return update_wrapper(decorate, target)
def _exec_code_in_env(code, env, fn_name):
exec(code, env)
return env[fn_name]
def public_factory(target, location):
"""Produce a wrapping function for the given cls or classmethod.
Rationale here is so that the __init__ method of the
class can serve as documentation for the function.
"""
if isinstance(target, type):
fn = target.__init__
callable_ = target
doc = "Construct a new :class:`.%s` object. \n\n"\
"This constructor is mirrored as a public API function; see :func:`~%s` "\
"for a full usage and argument description." % (
target.__name__, location, )
else:
fn = callable_ = target
doc = "This function is mirrored; see :func:`~%s` "\
"for a description of arguments." % location
location_name = location.split(".")[-1]
spec = compat.inspect_getfullargspec(fn)
del spec[0][0]
metadata = format_argspec_plus(spec, grouped=False)
metadata['name'] = location_name
code = """\
def %(name)s(%(args)s):
return cls(%(apply_kw)s)
""" % metadata
env = {'cls': callable_, 'symbol': symbol}
exec(code, env)
decorated = env[location_name]
decorated.__doc__ = fn.__doc__
if compat.py2k or hasattr(fn, '__func__'):
fn.__func__.__doc__ = doc
else:
fn.__doc__ = doc
return decorated
class PluginLoader(object):
def __init__(self, group, auto_fn=None):
self.group = group
self.impls = {}
self.auto_fn = auto_fn
def load(self, name):
if name in self.impls:
return self.impls[name]()
if self.auto_fn:
loader = self.auto_fn(name)
if loader:
self.impls[name] = loader
return loader()
try:
import pkg_resources
except ImportError:
pass
else:
for impl in pkg_resources.iter_entry_points(
self.group, name):
self.impls[name] = impl.load
return impl.load()
raise exc.NoSuchModuleError(
"Can't load plugin: %s:%s" %
(self.group, name))
def register(self, name, modulepath, objname):
def load():
mod = compat.import_(modulepath)
for token in modulepath.split(".")[1:]:
mod = getattr(mod, token)
return getattr(mod, objname)
self.impls[name] = load
def get_cls_kwargs(cls, _set=None):
"""Return the full set of inherited kwargs for the given `cls`.
Probes a class's __init__ method, collecting all named arguments. If the
__init__ defines a \**kwargs catch-all, then the constructor is presumed to
pass along unrecognized keywords to it's base classes, and the collection
process is repeated recursively on each of the bases.
Uses a subset of inspect.getargspec() to cut down on method overhead.
No anonymous tuple arguments please !
"""
toplevel = _set == None
if toplevel:
_set = set()
ctr = cls.__dict__.get('__init__', False)
has_init = ctr and isinstance(ctr, types.FunctionType) and \
isinstance(ctr.__code__, types.CodeType)
if has_init:
names, has_kw = inspect_func_args(ctr)
_set.update(names)
if not has_kw and not toplevel:
return None
if not has_init or has_kw:
for c in cls.__bases__:
if get_cls_kwargs(c, _set) is None:
break
_set.discard('self')
return _set
try:
# TODO: who doesn't have this constant?
from inspect import CO_VARKEYWORDS
def inspect_func_args(fn):
co = fn.__code__
nargs = co.co_argcount
names = co.co_varnames
args = list(names[:nargs])
has_kw = bool(co.co_flags & CO_VARKEYWORDS)
return args, has_kw
except ImportError:
def inspect_func_args(fn):
names, _, has_kw, _ = inspect.getargspec(fn)
return names, bool(has_kw)
def get_func_kwargs(func):
"""Return the set of legal kwargs for the given `func`.
Uses getargspec so is safe to call for methods, functions,
etc.
"""
return compat.inspect_getargspec(func)[0]
def get_callable_argspec(fn, no_self=False):
if isinstance(fn, types.FunctionType):
return compat.inspect_getargspec(fn)
elif isinstance(fn, types.MethodType) and no_self:
spec = compat.inspect_getargspec(fn.__func__)
return compat.ArgSpec(spec.args[1:], spec.varargs, spec.keywords, spec.defaults)
elif hasattr(fn, '__func__'):
return compat.inspect_getargspec(fn.__func__)
elif hasattr(fn, '__call__') and \
not hasattr(fn.__call__, '__call__'): # functools.partial does this;
# not much we can do
return get_callable_argspec(fn.__call__)
else:
raise ValueError("Can't inspect function: %s" % fn)
def format_argspec_plus(fn, grouped=True):
"""Returns a dictionary of formatted, introspected function arguments.
A enhanced variant of inspect.formatargspec to support code generation.
fn
An inspectable callable or tuple of inspect getargspec() results.
grouped
Defaults to True; include (parens, around, argument) lists
Returns:
args
Full inspect.formatargspec for fn
self_arg
The name of the first positional argument, varargs[0], or None
if the function defines no positional arguments.
apply_pos
args, re-written in calling rather than receiving syntax. Arguments are
passed positionally.
apply_kw
Like apply_pos, except keyword-ish args are passed as keywords.
Example::
>>> format_argspec_plus(lambda self, a, b, c=3, **d: 123)
{'args': '(self, a, b, c=3, **d)',
'self_arg': 'self',
'apply_kw': '(self, a, b, c=c, **d)',
'apply_pos': '(self, a, b, c, **d)'}
"""
if compat.callable(fn):
spec = compat.inspect_getfullargspec(fn)
else:
# we accept an existing argspec...
spec = fn
args = inspect.formatargspec(*spec)
if spec[0]:
self_arg = spec[0][0]
elif spec[1]:
self_arg = '%s[0]' % spec[1]
else:
self_arg = None
if compat.py3k:
apply_pos = inspect.formatargspec(spec[0], spec[1],
spec[2], None, spec[4])
num_defaults = 0
if spec[3]:
num_defaults += len(spec[3])
if spec[4]:
num_defaults += len(spec[4])
name_args = spec[0] + spec[4]
else:
apply_pos = inspect.formatargspec(spec[0], spec[1], spec[2])
num_defaults = 0
if spec[3]:
num_defaults += len(spec[3])
name_args = spec[0]
if num_defaults:
defaulted_vals = name_args[0 - num_defaults:]
else:
defaulted_vals = ()
apply_kw = inspect.formatargspec(name_args, spec[1], spec[2],
defaulted_vals,
formatvalue=lambda x: '=' + x)
if grouped:
return dict(args=args, self_arg=self_arg,
apply_pos=apply_pos, apply_kw=apply_kw)
else:
return dict(args=args[1:-1], self_arg=self_arg,
apply_pos=apply_pos[1:-1], apply_kw=apply_kw[1:-1])
def format_argspec_init(method, grouped=True):
"""format_argspec_plus with considerations for typical __init__ methods
Wraps format_argspec_plus with error handling strategies for typical
__init__ cases::
object.__init__ -> (self)
other unreflectable (usually C) -> (self, *args, **kwargs)
"""
try:
return format_argspec_plus(method, grouped=grouped)
except TypeError:
if method is object.__init__:
args = grouped and '(self)' or 'self'
else:
args = (grouped and '(self, *args, **kwargs)'
or 'self, *args, **kwargs')
return dict(self_arg='self', args=args, apply_pos=args, apply_kw=args)
def getargspec_init(method):
"""inspect.getargspec with considerations for typical __init__ methods
Wraps inspect.getargspec with error handling for typical __init__ cases::
object.__init__ -> (self)
other unreflectable (usually C) -> (self, *args, **kwargs)
"""
try:
return inspect.getargspec(method)
except TypeError:
if method is object.__init__:
return (['self'], None, None, None)
else:
return (['self'], 'args', 'kwargs', None)
def unbound_method_to_callable(func_or_cls):
"""Adjust the incoming callable such that a 'self' argument is not
required.
"""
if isinstance(func_or_cls, types.MethodType) and not func_or_cls.__self__:
return func_or_cls.__func__
else:
return func_or_cls
def generic_repr(obj, additional_kw=(), to_inspect=None):
"""Produce a __repr__() based on direct association of the __init__()
specification vs. same-named attributes present.
"""
if to_inspect is None:
to_inspect = [obj]
else:
to_inspect = _collections.to_list(to_inspect)
missing = object()
pos_args = []
kw_args = _collections.OrderedDict()
vargs = None
for i, insp in enumerate(to_inspect):
try:
(_args, _vargs, vkw, defaults) = \
inspect.getargspec(insp.__init__)
except TypeError:
continue
else:
default_len = defaults and len(defaults) or 0
if i == 0:
if _vargs:
vargs = _vargs
if default_len:
pos_args.extend(_args[1:-default_len])
else:
pos_args.extend(_args[1:])
else:
kw_args.update([
(arg, missing) for arg in _args[1:-default_len]
])
if default_len:
kw_args.update([
(arg, default)
for arg, default
in zip(_args[-default_len:], defaults)
])
output = []
output.extend(repr(getattr(obj, arg, None)) for arg in pos_args)
if vargs is not None and hasattr(obj, vargs):
output.extend([repr(val) for val in getattr(obj, vargs)])
for arg, defval in kw_args.items():
try:
val = getattr(obj, arg, missing)
if val is not missing and val != defval:
output.append('%s=%r' % (arg, val))
except:
pass
if additional_kw:
for arg, defval in additional_kw:
try:
val = getattr(obj, arg, missing)
if val is not missing and val != defval:
output.append('%s=%r' % (arg, val))
except:
pass
return "%s(%s)" % (obj.__class__.__name__, ", ".join(output))
class portable_instancemethod(object):
"""Turn an instancemethod into a (parent, name) pair
to produce a serializable callable.
"""
def __init__(self, meth):
self.target = meth.__self__
self.name = meth.__name__
def __call__(self, *arg, **kw):
return getattr(self.target, self.name)(*arg, **kw)
def class_hierarchy(cls):
"""Return an unordered sequence of all classes related to cls.
Traverses diamond hierarchies.
Fibs slightly: subclasses of builtin types are not returned. Thus
class_hierarchy(class A(object)) returns (A, object), not A plus every
class systemwide that derives from object.
Old-style classes are discarded and hierarchies rooted on them
will not be descended.
"""
if compat.py2k:
if isinstance(cls, types.ClassType):
return list()
hier = set([cls])
process = list(cls.__mro__)
while process:
c = process.pop()
if compat.py2k:
if isinstance(c, types.ClassType):
continue
bases = (_ for _ in c.__bases__
if _ not in hier and not isinstance(_, types.ClassType))
else:
bases = (_ for _ in c.__bases__ if _ not in hier)
for b in bases:
process.append(b)
hier.add(b)
if compat.py3k:
if c.__module__ == 'builtins' or not hasattr(c, '__subclasses__'):
continue
else:
if c.__module__ == '__builtin__' or not hasattr(c, '__subclasses__'):
continue
for s in [_ for _ in c.__subclasses__() if _ not in hier]:
process.append(s)
hier.add(s)
return list(hier)
def iterate_attributes(cls):
"""iterate all the keys and attributes associated
with a class, without using getattr().
Does not use getattr() so that class-sensitive
descriptors (i.e. property.__get__()) are not called.
"""
keys = dir(cls)
for key in keys:
for c in cls.__mro__:
if key in c.__dict__:
yield (key, c.__dict__[key])
break
def monkeypatch_proxied_specials(into_cls, from_cls, skip=None, only=None,
name='self.proxy', from_instance=None):
"""Automates delegation of __specials__ for a proxying type."""
if only:
dunders = only
else:
if skip is None:
skip = ('__slots__', '__del__', '__getattribute__',
'__metaclass__', '__getstate__', '__setstate__')
dunders = [m for m in dir(from_cls)
if (m.startswith('__') and m.endswith('__') and
not hasattr(into_cls, m) and m not in skip)]
for method in dunders:
try:
fn = getattr(from_cls, method)
if not hasattr(fn, '__call__'):
continue
fn = getattr(fn, 'im_func', fn)
except AttributeError:
continue
try:
spec = inspect.getargspec(fn)
fn_args = inspect.formatargspec(spec[0])
d_args = inspect.formatargspec(spec[0][1:])
except TypeError:
fn_args = '(self, *args, **kw)'
d_args = '(*args, **kw)'
py = ("def %(method)s%(fn_args)s: "
"return %(name)s.%(method)s%(d_args)s" % locals())
env = from_instance is not None and {name: from_instance} or {}
compat.exec_(py, env)
try:
env[method].__defaults__ = fn.__defaults__
except AttributeError:
pass
setattr(into_cls, method, env[method])
def methods_equivalent(meth1, meth2):
"""Return True if the two methods are the same implementation."""
return getattr(meth1, '__func__', meth1) is getattr(meth2, '__func__', meth2)
def as_interface(obj, cls=None, methods=None, required=None):
"""Ensure basic interface compliance for an instance or dict of callables.
Checks that ``obj`` implements public methods of ``cls`` or has members
listed in ``methods``. If ``required`` is not supplied, implementing at
least one interface method is sufficient. Methods present on ``obj`` that
are not in the interface are ignored.
If ``obj`` is a dict and ``dict`` does not meet the interface
requirements, the keys of the dictionary are inspected. Keys present in
``obj`` that are not in the interface will raise TypeErrors.
Raises TypeError if ``obj`` does not meet the interface criteria.
In all passing cases, an object with callable members is returned. In the
simple case, ``obj`` is returned as-is; if dict processing kicks in then
an anonymous class is returned.
obj
A type, instance, or dictionary of callables.
cls
Optional, a type. All public methods of cls are considered the
interface. An ``obj`` instance of cls will always pass, ignoring
``required``..
methods
Optional, a sequence of method names to consider as the interface.
required
Optional, a sequence of mandatory implementations. If omitted, an
``obj`` that provides at least one interface method is considered
sufficient. As a convenience, required may be a type, in which case
all public methods of the type are required.
"""
if not cls and not methods:
raise TypeError('a class or collection of method names are required')
if isinstance(cls, type) and isinstance(obj, cls):
return obj
interface = set(methods or [m for m in dir(cls) if not m.startswith('_')])
implemented = set(dir(obj))
complies = operator.ge
if isinstance(required, type):
required = interface
elif not required:
required = set()
complies = operator.gt
else:
required = set(required)
if complies(implemented.intersection(interface), required):
return obj
# No dict duck typing here.
if not type(obj) is dict:
qualifier = complies is operator.gt and 'any of' or 'all of'
raise TypeError("%r does not implement %s: %s" % (
obj, qualifier, ', '.join(interface)))
class AnonymousInterface(object):
"""A callable-holding shell."""
if cls:
AnonymousInterface.__name__ = 'Anonymous' + cls.__name__
found = set()
for method, impl in dictlike_iteritems(obj):
if method not in interface:
raise TypeError("%r: unknown in this interface" % method)
if not compat.callable(impl):
raise TypeError("%r=%r is not callable" % (method, impl))
setattr(AnonymousInterface, method, staticmethod(impl))
found.add(method)
if complies(found, required):
return AnonymousInterface
raise TypeError("dictionary does not contain required keys %s" %
', '.join(required - found))
class memoized_property(object):
"""A read-only @property that is only evaluated once."""
def __init__(self, fget, doc=None):
self.fget = fget
self.__doc__ = doc or fget.__doc__
self.__name__ = fget.__name__
def __get__(self, obj, cls):
if obj is None:
return self
obj.__dict__[self.__name__] = result = self.fget(obj)
return result
def _reset(self, obj):
memoized_property.reset(obj, self.__name__)
@classmethod
def reset(cls, obj, name):
obj.__dict__.pop(name, None)
class memoized_instancemethod(object):
"""Decorate a method memoize its return value.
Best applied to no-arg methods: memoization is not sensitive to
argument values, and will always return the same value even when
called with different arguments.
"""
def __init__(self, fget, doc=None):
self.fget = fget
self.__doc__ = doc or fget.__doc__
self.__name__ = fget.__name__
def __get__(self, obj, cls):
if obj is None:
return self
def oneshot(*args, **kw):
result = self.fget(obj, *args, **kw)
memo = lambda *a, **kw: result
memo.__name__ = self.__name__
memo.__doc__ = self.__doc__
obj.__dict__[self.__name__] = memo
return result
oneshot.__name__ = self.__name__
oneshot.__doc__ = self.__doc__
return oneshot
class group_expirable_memoized_property(object):
"""A family of @memoized_properties that can be expired in tandem."""
def __init__(self, attributes=()):
self.attributes = []
if attributes:
self.attributes.extend(attributes)
def expire_instance(self, instance):
"""Expire all memoized properties for *instance*."""
stash = instance.__dict__
for attribute in self.attributes:
stash.pop(attribute, None)
def __call__(self, fn):
self.attributes.append(fn.__name__)
return memoized_property(fn)
def method(self, fn):
self.attributes.append(fn.__name__)
return memoized_instancemethod(fn)
def dependency_for(modulename):
def decorate(obj):
# TODO: would be nice to improve on this import silliness,
# unfortunately importlib doesn't work that great either
tokens = modulename.split(".")
mod = compat.import_(".".join(tokens[0:-1]), globals(), locals(), tokens[-1])
mod = getattr(mod, tokens[-1])
setattr(mod, obj.__name__, obj)
return obj
return decorate
class dependencies(object):
"""Apply imported dependencies as arguments to a function.
E.g.::
@util.dependencies(
"sqlalchemy.sql.widget",
"sqlalchemy.engine.default"
);
def some_func(self, widget, default, arg1, arg2, **kw):
# ...
Rationale is so that the impact of a dependency cycle can be
associated directly with the few functions that cause the cycle,
and not pollute the module-level namespace.
"""
def __init__(self, *deps):
self.import_deps = []
for dep in deps:
tokens = dep.split(".")
self.import_deps.append(
dependencies._importlater(
".".join(tokens[0:-1]),
tokens[-1]
)
)
def __call__(self, fn):
import_deps = self.import_deps
spec = compat.inspect_getfullargspec(fn)
spec_zero = list(spec[0])
hasself = spec_zero[0] in ('self', 'cls')
for i in range(len(import_deps)):
spec[0][i + (1 if hasself else 0)] = "import_deps[%r]" % i
inner_spec = format_argspec_plus(spec, grouped=False)
for impname in import_deps:
del spec_zero[1 if hasself else 0]
spec[0][:] = spec_zero
outer_spec = format_argspec_plus(spec, grouped=False)
code = 'lambda %(args)s: fn(%(apply_kw)s)' % {
"args": outer_spec['args'],
"apply_kw": inner_spec['apply_kw']
}
decorated = eval(code, locals())
decorated.__defaults__ = getattr(fn, 'im_func', fn).__defaults__
return update_wrapper(decorated, fn)
@classmethod
def resolve_all(cls, path):
for m in list(dependencies._unresolved):
if m._full_path.startswith(path):
m._resolve()
_unresolved = set()
_by_key = {}
class _importlater(object):
_unresolved = set()
_by_key = {}
def __new__(cls, path, addtl):
key = path + "." + addtl
if key in dependencies._by_key:
return dependencies._by_key[key]
else:
dependencies._by_key[key] = imp = object.__new__(cls)
return imp
def __init__(self, path, addtl):
self._il_path = path
self._il_addtl = addtl
dependencies._unresolved.add(self)
@property
def _full_path(self):
return self._il_path + "." + self._il_addtl
@memoized_property
def module(self):
if self in dependencies._unresolved:
raise ImportError(
"importlater.resolve_all() hasn't "
"been called (this is %s %s)"
% (self._il_path, self._il_addtl))
return getattr(self._initial_import, self._il_addtl)
def _resolve(self):
dependencies._unresolved.discard(self)
self._initial_import = compat.import_(
self._il_path, globals(), locals(),
[self._il_addtl])
def __getattr__(self, key):
if key == 'module':
raise ImportError("Could not resolve module %s"
% self._full_path)
try:
attr = getattr(self.module, key)
except AttributeError:
raise AttributeError(
"Module %s has no attribute '%s'" %
(self._full_path, key)
)
self.__dict__[key] = attr
return attr
# from paste.deploy.converters
def asbool(obj):
if isinstance(obj, compat.string_types):
obj = obj.strip().lower()
if obj in ['true', 'yes', 'on', 'y', 't', '1']:
return True
elif obj in ['false', 'no', 'off', 'n', 'f', '0']:
return False
else:
raise ValueError("String is not true/false: %r" % obj)
return bool(obj)
def bool_or_str(*text):
"""Return a callable that will evaulate a string as
boolean, or one of a set of "alternate" string values.
"""
def bool_or_value(obj):
if obj in text:
return obj
else:
return asbool(obj)
return bool_or_value
def asint(value):
"""Coerce to integer."""
if value is None:
return value
return int(value)
def coerce_kw_type(kw, key, type_, flexi_bool=True):
"""If 'key' is present in dict 'kw', coerce its value to type 'type\_' if
necessary. If 'flexi_bool' is True, the string '0' is considered false
when coercing to boolean.
"""
if key in kw and type(kw[key]) is not type_ and kw[key] is not None:
if type_ is bool and flexi_bool:
kw[key] = asbool(kw[key])
else:
kw[key] = type_(kw[key])
def constructor_copy(obj, cls, **kw):
"""Instantiate cls using the __dict__ of obj as constructor arguments.
Uses inspect to match the named arguments of ``cls``.
"""
names = get_cls_kwargs(cls)
kw.update((k, obj.__dict__[k]) for k in names if k in obj.__dict__)
return cls(**kw)
def counter():
"""Return a threadsafe counter function."""
lock = compat.threading.Lock()
counter = itertools.count(1)
# avoid the 2to3 "next" transformation...
def _next():
lock.acquire()
try:
return next(counter)
finally:
lock.release()
return _next
def duck_type_collection(specimen, default=None):
"""Given an instance or class, guess if it is or is acting as one of
the basic collection types: list, set and dict. If the __emulates__
property is present, return that preferentially.
"""
if hasattr(specimen, '__emulates__'):
# canonicalize set vs sets.Set to a standard: the builtin set
if (specimen.__emulates__ is not None and
issubclass(specimen.__emulates__, set)):
return set
else:
return specimen.__emulates__
isa = isinstance(specimen, type) and issubclass or isinstance
if isa(specimen, list):
return list
elif isa(specimen, set):
return set
elif isa(specimen, dict):
return dict
if hasattr(specimen, 'append'):
return list
elif hasattr(specimen, 'add'):
return set
elif hasattr(specimen, 'set'):
return dict
else:
return default
def assert_arg_type(arg, argtype, name):
if isinstance(arg, argtype):
return arg
else:
if isinstance(argtype, tuple):
raise exc.ArgumentError(
"Argument '%s' is expected to be one of type %s, got '%s'" %
(name, ' or '.join("'%s'" % a for a in argtype), type(arg)))
else:
raise exc.ArgumentError(
"Argument '%s' is expected to be of type '%s', got '%s'" %
(name, argtype, type(arg)))
def dictlike_iteritems(dictlike):
"""Return a (key, value) iterator for almost any dict-like object."""
if compat.py3k:
if hasattr(dictlike, 'items'):
return list(dictlike.items())
else:
if hasattr(dictlike, 'iteritems'):
return dictlike.iteritems()
elif hasattr(dictlike, 'items'):
return iter(dictlike.items())
getter = getattr(dictlike, '__getitem__', getattr(dictlike, 'get', None))
if getter is None:
raise TypeError(
"Object '%r' is not dict-like" % dictlike)
if hasattr(dictlike, 'iterkeys'):
def iterator():
for key in dictlike.iterkeys():
yield key, getter(key)
return iterator()
elif hasattr(dictlike, 'keys'):
return iter((key, getter(key)) for key in dictlike.keys())
else:
raise TypeError(
"Object '%r' is not dict-like" % dictlike)
class classproperty(property):
"""A decorator that behaves like @property except that operates
on classes rather than instances.
The decorator is currently special when using the declarative
module, but note that the
:class:`~.sqlalchemy.ext.declarative.declared_attr`
decorator should be used for this purpose with declarative.
"""
def __init__(self, fget, *arg, **kw):
super(classproperty, self).__init__(fget, *arg, **kw)
self.__doc__ = fget.__doc__
def __get__(desc, self, cls):
return desc.fget(cls)
class hybridmethod(object):
"""Decorate a function as cls- or instance- level."""
def __init__(self, func, expr=None):
self.func = func
def __get__(self, instance, owner):
if instance is None:
return self.func.__get__(owner, owner.__class__)
else:
return self.func.__get__(instance, owner)
class _symbol(int):
def __new__(self, name, doc=None, canonical=None):
"""Construct a new named symbol."""
assert isinstance(name, compat.string_types)
if canonical is None:
canonical = hash(name)
v = int.__new__(_symbol, canonical)
v.name = name
if doc:
v.__doc__ = doc
return v
def __reduce__(self):
return symbol, (self.name, "x", int(self))
def __str__(self):
return repr(self)
def __repr__(self):
return "symbol(%r)" % self.name
_symbol.__name__ = 'symbol'
class symbol(object):
"""A constant symbol.
>>> symbol('foo') is symbol('foo')
True
>>> symbol('foo')
<symbol 'foo>
A slight refinement of the MAGICCOOKIE=object() pattern. The primary
advantage of symbol() is its repr(). They are also singletons.
Repeated calls of symbol('name') will all return the same instance.
The optional ``doc`` argument assigns to ``__doc__``. This
is strictly so that Sphinx autoattr picks up the docstring we want
(it doesn't appear to pick up the in-module docstring if the datamember
is in a different module - autoattribute also blows up completely).
If Sphinx fixes/improves this then we would no longer need
``doc`` here.
"""
symbols = {}
_lock = compat.threading.Lock()
def __new__(cls, name, doc=None, canonical=None):
cls._lock.acquire()
try:
sym = cls.symbols.get(name)
if sym is None:
cls.symbols[name] = sym = _symbol(name, doc, canonical)
return sym
finally:
symbol._lock.release()
_creation_order = 1
def set_creation_order(instance):
"""Assign a '_creation_order' sequence to the given instance.
This allows multiple instances to be sorted in order of creation
(typically within a single thread; the counter is not particularly
threadsafe).
"""
global _creation_order
instance._creation_order = _creation_order
_creation_order += 1
def warn_exception(func, *args, **kwargs):
"""executes the given function, catches all exceptions and converts to
a warning.
"""
try:
return func(*args, **kwargs)
except:
warn("%s('%s') ignored" % sys.exc_info()[0:2])
def warn(msg, stacklevel=3):
"""Issue a warning.
If msg is a string, :class:`.exc.SAWarning` is used as
the category.
.. note::
This function is swapped out when the test suite
runs, with a compatible version that uses
warnings.warn_explicit, so that the warnings registry can
be controlled.
"""
if isinstance(msg, compat.string_types):
warnings.warn(msg, exc.SAWarning, stacklevel=stacklevel)
else:
warnings.warn(msg, stacklevel=stacklevel)
def only_once(fn):
"""Decorate the given function to be a no-op after it is called exactly
once."""
once = [fn]
def go(*arg, **kw):
if once:
once_fn = once.pop()
return once_fn(*arg, **kw)
return update_wrapper(go, fn)
_SQLA_RE = re.compile(r'sqlalchemy/([a-z_]+/){0,2}[a-z_]+\.py')
_UNITTEST_RE = re.compile(r'unit(?:2|test2?/)')
def chop_traceback(tb, exclude_prefix=_UNITTEST_RE, exclude_suffix=_SQLA_RE):
"""Chop extraneous lines off beginning and end of a traceback.
:param tb:
a list of traceback lines as returned by ``traceback.format_stack()``
:param exclude_prefix:
a regular expression object matching lines to skip at beginning of ``tb``
:param exclude_suffix:
a regular expression object matching lines to skip at end of ``tb``
"""
start = 0
end = len(tb) - 1
while start <= end and exclude_prefix.search(tb[start]):
start += 1
while start <= end and exclude_suffix.search(tb[end]):
end -= 1
return tb[start:end + 1]
NoneType = type(None)
| mit |
honggyukim/uftrace | tests/t247_graph_srcline.py | 2 | 2622 | #!/usr/bin/env python
from runtest import TestBase
class TestCase(TestBase):
def __init__(self):
TestBase.__init__(self, 'sort', result="""
# Function Call Graph for 'main' (session: de27777d0a966d5a)
=============== BACKTRACE ===============
backtrace #0: hit 1, time 13.120 ms
[0] main (0x56366ebab7fc)
========== FUNCTION CALL GRAPH ==========
# TOTAL TIME FUNCTION [SOURCE]
13.120 ms : (1) main
694.492 us : +-(2) foo [/home/eslee/soft/uftrace/tests/s-sort.c:10]
688.800 us : | (6) loop [/home/eslee/soft/uftrace/tests/s-sort.c:3]
: |
10.748 ms : +-(1) bar [/home/eslee/soft/uftrace/tests/s-sort.c:17]
10.183 ms : (1) usleep
""", sort='graph', cflags='-g')
def build(self, name, cflags='', ldflags=''):
if not 'dwarf' in self.feature:
return TestBase.TEST_SKIP
return TestBase.build(self, name, cflags, ldflags)
def prepare(self):
self.subcmd = 'record'
self.option = '--srcline'
self.exearg = 't-' + self.name
return self.runcmd()
def setup(self):
self.subcmd = 'graph'
self.option = '--srcline'
self.exearg = 'main'
def sort(self, output):
""" This function post-processes output of the test to be compared.
It ignores blank and comment (#) lines and header lines. """
result = []
mode = 0
for ln in output.split('\n'):
if ln.strip() == '' or ln.startswith('#'):
continue
# A graph result consists of backtrace and calling functions
if ln.startswith('=============== BACKTRACE ==============='):
mode = 1
continue
if ln.startswith('========== FUNCTION CALL GRAPH =========='):
mode = 2
continue
if mode == 1:
if ln.startswith(' backtrace #'):
result.append(ln.split(',')[0]) # remove time part
if ln.startswith(' ['):
result.append(ln.split('(')[0]) # remove '(addr)' part
if mode == 2:
if " : " in ln:
func = ln.split(':', 1)[1].split('[') # remove time part
if len(func) < 2 :
result.append('%s' % (func[-1]))
else :
# extract basename and line number of source location
result.append('%s %s' % (func[-2], func[-1][0:-1].split('/')[-1]))
else:
result.append(ln)
return '\n'.join(result)
| gpl-2.0 |
vincepandolfo/django | django/conf/__init__.py | 84 | 6865 | """
Settings and configuration for Django.
Values will be read from the module specified by the DJANGO_SETTINGS_MODULE environment
variable, and then from django.conf.global_settings; see the global settings file for
a list of all possible variables.
"""
import importlib
import os
import time
from django.conf import global_settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.functional import LazyObject, empty
ENVIRONMENT_VARIABLE = "DJANGO_SETTINGS_MODULE"
class LazySettings(LazyObject):
"""
A lazy proxy for either global Django settings or a custom settings object.
The user can manually configure settings prior to using them. Otherwise,
Django uses the settings module pointed to by DJANGO_SETTINGS_MODULE.
"""
def _setup(self, name=None):
"""
Load the settings module pointed to by the environment variable. This
is used the first time we need any settings at all, if the user has not
previously configured the settings manually.
"""
settings_module = os.environ.get(ENVIRONMENT_VARIABLE)
if not settings_module:
desc = ("setting %s" % name) if name else "settings"
raise ImproperlyConfigured(
"Requested %s, but settings are not configured. "
"You must either define the environment variable %s "
"or call settings.configure() before accessing settings."
% (desc, ENVIRONMENT_VARIABLE))
self._wrapped = Settings(settings_module)
def __repr__(self):
# Hardcode the class name as otherwise it yields 'Settings'.
if self._wrapped is empty:
return '<LazySettings [Unevaluated]>'
return '<LazySettings "%(settings_module)s">' % {
'settings_module': self._wrapped.SETTINGS_MODULE,
}
def __getattr__(self, name):
if self._wrapped is empty:
self._setup(name)
return getattr(self._wrapped, name)
def configure(self, default_settings=global_settings, **options):
"""
Called to manually configure the settings. The 'default_settings'
parameter sets where to retrieve any unspecified values from (its
argument must support attribute access (__getattr__)).
"""
if self._wrapped is not empty:
raise RuntimeError('Settings already configured.')
holder = UserSettingsHolder(default_settings)
for name, value in options.items():
setattr(holder, name, value)
self._wrapped = holder
@property
def configured(self):
"""
Returns True if the settings have already been configured.
"""
return self._wrapped is not empty
class BaseSettings(object):
"""
Common logic for settings whether set by a module or by the user.
"""
def __setattr__(self, name, value):
if name in ("MEDIA_URL", "STATIC_URL") and value and not value.endswith('/'):
raise ImproperlyConfigured("If set, %s must end with a slash" % name)
object.__setattr__(self, name, value)
class Settings(BaseSettings):
def __init__(self, settings_module):
# update this dict from global settings (but only for ALL_CAPS settings)
for setting in dir(global_settings):
if setting.isupper():
setattr(self, setting, getattr(global_settings, setting))
# store the settings module in case someone later cares
self.SETTINGS_MODULE = settings_module
mod = importlib.import_module(self.SETTINGS_MODULE)
tuple_settings = (
"INSTALLED_APPS",
"TEMPLATE_DIRS",
"LOCALE_PATHS",
)
self._explicit_settings = set()
for setting in dir(mod):
if setting.isupper():
setting_value = getattr(mod, setting)
if (setting in tuple_settings and
not isinstance(setting_value, (list, tuple))):
raise ImproperlyConfigured("The %s setting must be a list or a tuple. "
"Please fix your settings." % setting)
setattr(self, setting, setting_value)
self._explicit_settings.add(setting)
if not self.SECRET_KEY:
raise ImproperlyConfigured("The SECRET_KEY setting must not be empty.")
if hasattr(time, 'tzset') and self.TIME_ZONE:
# When we can, attempt to validate the timezone. If we can't find
# this file, no check happens and it's harmless.
zoneinfo_root = '/usr/share/zoneinfo'
if (os.path.exists(zoneinfo_root) and not
os.path.exists(os.path.join(zoneinfo_root, *(self.TIME_ZONE.split('/'))))):
raise ValueError("Incorrect timezone setting: %s" % self.TIME_ZONE)
# Move the time zone info into os.environ. See ticket #2315 for why
# we don't do this unconditionally (breaks Windows).
os.environ['TZ'] = self.TIME_ZONE
time.tzset()
def is_overridden(self, setting):
return setting in self._explicit_settings
def __repr__(self):
return '<%(cls)s "%(settings_module)s">' % {
'cls': self.__class__.__name__,
'settings_module': self.SETTINGS_MODULE,
}
class UserSettingsHolder(BaseSettings):
"""
Holder for user configured settings.
"""
# SETTINGS_MODULE doesn't make much sense in the manually configured
# (standalone) case.
SETTINGS_MODULE = None
def __init__(self, default_settings):
"""
Requests for configuration variables not in this class are satisfied
from the module specified in default_settings (if possible).
"""
self.__dict__['_deleted'] = set()
self.default_settings = default_settings
def __getattr__(self, name):
if name in self._deleted:
raise AttributeError
return getattr(self.default_settings, name)
def __setattr__(self, name, value):
self._deleted.discard(name)
super(UserSettingsHolder, self).__setattr__(name, value)
def __delattr__(self, name):
self._deleted.add(name)
if hasattr(self, name):
super(UserSettingsHolder, self).__delattr__(name)
def __dir__(self):
return list(self.__dict__) + dir(self.default_settings)
def is_overridden(self, setting):
deleted = (setting in self._deleted)
set_locally = (setting in self.__dict__)
set_on_default = getattr(self.default_settings, 'is_overridden', lambda s: False)(setting)
return (deleted or set_locally or set_on_default)
def __repr__(self):
return '<%(cls)s>' % {
'cls': self.__class__.__name__,
}
settings = LazySettings()
| bsd-3-clause |
rcharp/toyota-flask | venv/lib/python2.7/site-packages/setuptools/command/rotate.py | 461 | 2038 | from distutils.util import convert_path
from distutils import log
from distutils.errors import DistutilsOptionError
import os
from setuptools import Command
from setuptools.compat import basestring
class rotate(Command):
"""Delete older distributions"""
description = "delete older distributions, keeping N newest files"
user_options = [
('match=', 'm', "patterns to match (required)"),
('dist-dir=', 'd', "directory where the distributions are"),
('keep=', 'k', "number of matching distributions to keep"),
]
boolean_options = []
def initialize_options(self):
self.match = None
self.dist_dir = None
self.keep = None
def finalize_options(self):
if self.match is None:
raise DistutilsOptionError(
"Must specify one or more (comma-separated) match patterns "
"(e.g. '.zip' or '.egg')"
)
if self.keep is None:
raise DistutilsOptionError("Must specify number of files to keep")
try:
self.keep = int(self.keep)
except ValueError:
raise DistutilsOptionError("--keep must be an integer")
if isinstance(self.match, basestring):
self.match = [
convert_path(p.strip()) for p in self.match.split(',')
]
self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
def run(self):
self.run_command("egg_info")
from glob import glob
for pattern in self.match:
pattern = self.distribution.get_name() + '*' + pattern
files = glob(os.path.join(self.dist_dir, pattern))
files = [(os.path.getmtime(f), f) for f in files]
files.sort()
files.reverse()
log.info("%d file(s) matching %s", len(files), pattern)
files = files[self.keep:]
for (t, f) in files:
log.info("Deleting %s", f)
if not self.dry_run:
os.unlink(f)
| apache-2.0 |
mm112287/2015cda_g8_0421 | static/Brython3.1.1-20150328-091302/Lib/tempfile.py | 728 | 22357 | """Temporary files.
This module provides generic, low- and high-level interfaces for
creating temporary files and directories. The interfaces listed
as "safe" just below can be used without fear of race conditions.
Those listed as "unsafe" cannot, and are provided for backward
compatibility only.
This module also provides some data items to the user:
TMP_MAX - maximum number of names that will be tried before
giving up.
tempdir - If this is set to a string before the first use of
any routine from this module, it will be considered as
another candidate location to store temporary files.
"""
__all__ = [
"NamedTemporaryFile", "TemporaryFile", # high level safe interfaces
"SpooledTemporaryFile", "TemporaryDirectory",
"mkstemp", "mkdtemp", # low level safe interfaces
"mktemp", # deprecated unsafe interface
"TMP_MAX", "gettempprefix", # constants
"tempdir", "gettempdir"
]
# Imports.
import warnings as _warnings
import sys as _sys
import io as _io
import os as _os
import errno as _errno
from random import Random as _Random
try:
import fcntl as _fcntl
except ImportError:
def _set_cloexec(fd):
pass
else:
def _set_cloexec(fd):
try:
flags = _fcntl.fcntl(fd, _fcntl.F_GETFD, 0)
except OSError:
pass
else:
# flags read successfully, modify
flags |= _fcntl.FD_CLOEXEC
_fcntl.fcntl(fd, _fcntl.F_SETFD, flags)
try:
import _thread
except ImportError:
import _dummy_thread as _thread
_allocate_lock = _thread.allocate_lock
_text_openflags = _os.O_RDWR | _os.O_CREAT | _os.O_EXCL
if hasattr(_os, 'O_NOINHERIT'):
_text_openflags |= _os.O_NOINHERIT
if hasattr(_os, 'O_NOFOLLOW'):
_text_openflags |= _os.O_NOFOLLOW
_bin_openflags = _text_openflags
if hasattr(_os, 'O_BINARY'):
_bin_openflags |= _os.O_BINARY
if hasattr(_os, 'TMP_MAX'):
TMP_MAX = _os.TMP_MAX
else:
TMP_MAX = 10000
# Although it does not have an underscore for historical reasons, this
# variable is an internal implementation detail (see issue 10354).
template = "tmp"
# Internal routines.
_once_lock = _allocate_lock()
if hasattr(_os, "lstat"):
_stat = _os.lstat
elif hasattr(_os, "stat"):
_stat = _os.stat
else:
# Fallback. All we need is something that raises OSError if the
# file doesn't exist.
def _stat(fn):
f = open(fn)
f.close()
def _exists(fn):
try:
_stat(fn)
except OSError:
return False
else:
return True
class _RandomNameSequence:
"""An instance of _RandomNameSequence generates an endless
sequence of unpredictable strings which can safely be incorporated
into file names. Each string is six characters long. Multiple
threads can safely use the same instance at the same time.
_RandomNameSequence is an iterator."""
characters = "abcdefghijklmnopqrstuvwxyz0123456789_"
@property
def rng(self):
cur_pid = _os.getpid()
if cur_pid != getattr(self, '_rng_pid', None):
self._rng = _Random()
self._rng_pid = cur_pid
return self._rng
def __iter__(self):
return self
def __next__(self):
c = self.characters
choose = self.rng.choice
letters = [choose(c) for dummy in "123456"]
return ''.join(letters)
def _candidate_tempdir_list():
"""Generate a list of candidate temporary directories which
_get_default_tempdir will try."""
dirlist = []
# First, try the environment.
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = _os.getenv(envname)
if dirname: dirlist.append(dirname)
# Failing that, try OS-specific locations.
if _os.name == 'nt':
dirlist.extend([ r'c:\temp', r'c:\tmp', r'\temp', r'\tmp' ])
else:
dirlist.extend([ '/tmp', '/var/tmp', '/usr/tmp' ])
# As a last resort, the current directory.
try:
dirlist.append(_os.getcwd())
except (AttributeError, OSError):
dirlist.append(_os.curdir)
return dirlist
def _get_default_tempdir():
"""Calculate the default directory to use for temporary files.
This routine should be called exactly once.
We determine whether or not a candidate temp dir is usable by
trying to create and write to a file in that directory. If this
is successful, the test file is deleted. To prevent denial of
service, the name of the test file must be randomized."""
namer = _RandomNameSequence()
dirlist = _candidate_tempdir_list()
for dir in dirlist:
if dir != _os.curdir:
dir = _os.path.normcase(_os.path.abspath(dir))
# Try only a few names per directory.
for seq in range(100):
name = next(namer)
filename = _os.path.join(dir, name)
try:
fd = _os.open(filename, _bin_openflags, 0o600)
try:
try:
with _io.open(fd, 'wb', closefd=False) as fp:
fp.write(b'blat')
finally:
_os.close(fd)
finally:
_os.unlink(filename)
return dir
except FileExistsError:
pass
except OSError:
break # no point trying more names in this directory
raise FileNotFoundError(_errno.ENOENT,
"No usable temporary directory found in %s" %
dirlist)
_name_sequence = None
def _get_candidate_names():
"""Common setup sequence for all user-callable interfaces."""
global _name_sequence
if _name_sequence is None:
_once_lock.acquire()
try:
if _name_sequence is None:
_name_sequence = _RandomNameSequence()
finally:
_once_lock.release()
return _name_sequence
def _mkstemp_inner(dir, pre, suf, flags):
"""Code common to mkstemp, TemporaryFile, and NamedTemporaryFile."""
names = _get_candidate_names()
for seq in range(TMP_MAX):
name = next(names)
file = _os.path.join(dir, pre + name + suf)
try:
fd = _os.open(file, flags, 0o600)
_set_cloexec(fd)
return (fd, _os.path.abspath(file))
except FileExistsError:
continue # try again
except PermissionError:
# This exception is thrown when a directory with the chosen name
# already exists on windows.
if _os.name == 'nt':
continue
else:
raise
raise FileExistsError(_errno.EEXIST,
"No usable temporary file name found")
# User visible interfaces.
def gettempprefix():
"""Accessor for tempdir.template."""
return template
tempdir = None
def gettempdir():
"""Accessor for tempfile.tempdir."""
global tempdir
if tempdir is None:
_once_lock.acquire()
try:
if tempdir is None:
tempdir = _get_default_tempdir()
finally:
_once_lock.release()
return tempdir
def mkstemp(suffix="", prefix=template, dir=None, text=False):
"""User-callable function to create and return a unique temporary
file. The return value is a pair (fd, name) where fd is the
file descriptor returned by os.open, and name is the filename.
If 'suffix' is specified, the file name will end with that suffix,
otherwise there will be no suffix.
If 'prefix' is specified, the file name will begin with that prefix,
otherwise a default prefix is used.
If 'dir' is specified, the file will be created in that directory,
otherwise a default directory is used.
If 'text' is specified and true, the file is opened in text
mode. Else (the default) the file is opened in binary mode. On
some operating systems, this makes no difference.
The file is readable and writable only by the creating user ID.
If the operating system uses permission bits to indicate whether a
file is executable, the file is executable by no one. The file
descriptor is not inherited by children of this process.
Caller is responsible for deleting the file when done with it.
"""
if dir is None:
dir = gettempdir()
if text:
flags = _text_openflags
else:
flags = _bin_openflags
return _mkstemp_inner(dir, prefix, suffix, flags)
def mkdtemp(suffix="", prefix=template, dir=None):
"""User-callable function to create and return a unique temporary
directory. The return value is the pathname of the directory.
Arguments are as for mkstemp, except that the 'text' argument is
not accepted.
The directory is readable, writable, and searchable only by the
creating user.
Caller is responsible for deleting the directory when done with it.
"""
if dir is None:
dir = gettempdir()
names = _get_candidate_names()
for seq in range(TMP_MAX):
name = next(names)
file = _os.path.join(dir, prefix + name + suffix)
try:
_os.mkdir(file, 0o700)
return file
except FileExistsError:
continue # try again
raise FileExistsError(_errno.EEXIST,
"No usable temporary directory name found")
def mktemp(suffix="", prefix=template, dir=None):
"""User-callable function to return a unique temporary file name. The
file is not created.
Arguments are as for mkstemp, except that the 'text' argument is
not accepted.
This function is unsafe and should not be used. The file name
refers to a file that did not exist at some point, but by the time
you get around to creating it, someone else may have beaten you to
the punch.
"""
## from warnings import warn as _warn
## _warn("mktemp is a potential security risk to your program",
## RuntimeWarning, stacklevel=2)
if dir is None:
dir = gettempdir()
names = _get_candidate_names()
for seq in range(TMP_MAX):
name = next(names)
file = _os.path.join(dir, prefix + name + suffix)
if not _exists(file):
return file
raise FileExistsError(_errno.EEXIST,
"No usable temporary filename found")
class _TemporaryFileWrapper:
"""Temporary file wrapper
This class provides a wrapper around files opened for
temporary use. In particular, it seeks to automatically
remove the file when it is no longer needed.
"""
def __init__(self, file, name, delete=True):
self.file = file
self.name = name
self.close_called = False
self.delete = delete
def __getattr__(self, name):
# Attribute lookups are delegated to the underlying file
# and cached for non-numeric results
# (i.e. methods are cached, closed and friends are not)
file = self.__dict__['file']
a = getattr(file, name)
if not isinstance(a, int):
setattr(self, name, a)
return a
# The underlying __enter__ method returns the wrong object
# (self.file) so override it to return the wrapper
def __enter__(self):
self.file.__enter__()
return self
# iter() doesn't use __getattr__ to find the __iter__ method
def __iter__(self):
return iter(self.file)
# NT provides delete-on-close as a primitive, so we don't need
# the wrapper to do anything special. We still use it so that
# file.name is useful (i.e. not "(fdopen)") with NamedTemporaryFile.
if _os.name != 'nt':
# Cache the unlinker so we don't get spurious errors at
# shutdown when the module-level "os" is None'd out. Note
# that this must be referenced as self.unlink, because the
# name TemporaryFileWrapper may also get None'd out before
# __del__ is called.
unlink = _os.unlink
def close(self):
if not self.close_called:
self.close_called = True
self.file.close()
if self.delete:
self.unlink(self.name)
def __del__(self):
self.close()
# Need to trap __exit__ as well to ensure the file gets
# deleted when used in a with statement
def __exit__(self, exc, value, tb):
result = self.file.__exit__(exc, value, tb)
self.close()
return result
else:
def __exit__(self, exc, value, tb):
self.file.__exit__(exc, value, tb)
def NamedTemporaryFile(mode='w+b', buffering=-1, encoding=None,
newline=None, suffix="", prefix=template,
dir=None, delete=True):
"""Create and return a temporary file.
Arguments:
'prefix', 'suffix', 'dir' -- as for mkstemp.
'mode' -- the mode argument to io.open (default "w+b").
'buffering' -- the buffer size argument to io.open (default -1).
'encoding' -- the encoding argument to io.open (default None)
'newline' -- the newline argument to io.open (default None)
'delete' -- whether the file is deleted on close (default True).
The file is created as mkstemp() would do it.
Returns an object with a file-like interface; the name of the file
is accessible as file.name. The file will be automatically deleted
when it is closed unless the 'delete' argument is set to False.
"""
if dir is None:
dir = gettempdir()
flags = _bin_openflags
# Setting O_TEMPORARY in the flags causes the OS to delete
# the file when it is closed. This is only supported by Windows.
if _os.name == 'nt' and delete:
flags |= _os.O_TEMPORARY
(fd, name) = _mkstemp_inner(dir, prefix, suffix, flags)
file = _io.open(fd, mode, buffering=buffering,
newline=newline, encoding=encoding)
return _TemporaryFileWrapper(file, name, delete)
if _os.name != 'posix' or _os.sys.platform == 'cygwin':
# On non-POSIX and Cygwin systems, assume that we cannot unlink a file
# while it is open.
TemporaryFile = NamedTemporaryFile
else:
def TemporaryFile(mode='w+b', buffering=-1, encoding=None,
newline=None, suffix="", prefix=template,
dir=None):
"""Create and return a temporary file.
Arguments:
'prefix', 'suffix', 'dir' -- as for mkstemp.
'mode' -- the mode argument to io.open (default "w+b").
'buffering' -- the buffer size argument to io.open (default -1).
'encoding' -- the encoding argument to io.open (default None)
'newline' -- the newline argument to io.open (default None)
The file is created as mkstemp() would do it.
Returns an object with a file-like interface. The file has no
name, and will cease to exist when it is closed.
"""
if dir is None:
dir = gettempdir()
flags = _bin_openflags
(fd, name) = _mkstemp_inner(dir, prefix, suffix, flags)
try:
_os.unlink(name)
return _io.open(fd, mode, buffering=buffering,
newline=newline, encoding=encoding)
except:
_os.close(fd)
raise
class SpooledTemporaryFile:
"""Temporary file wrapper, specialized to switch from BytesIO
or StringIO to a real file when it exceeds a certain size or
when a fileno is needed.
"""
_rolled = False
def __init__(self, max_size=0, mode='w+b', buffering=-1,
encoding=None, newline=None,
suffix="", prefix=template, dir=None):
if 'b' in mode:
self._file = _io.BytesIO()
else:
# Setting newline="\n" avoids newline translation;
# this is important because otherwise on Windows we'd
# hget double newline translation upon rollover().
self._file = _io.StringIO(newline="\n")
self._max_size = max_size
self._rolled = False
self._TemporaryFileArgs = {'mode': mode, 'buffering': buffering,
'suffix': suffix, 'prefix': prefix,
'encoding': encoding, 'newline': newline,
'dir': dir}
def _check(self, file):
if self._rolled: return
max_size = self._max_size
if max_size and file.tell() > max_size:
self.rollover()
def rollover(self):
if self._rolled: return
file = self._file
newfile = self._file = TemporaryFile(**self._TemporaryFileArgs)
del self._TemporaryFileArgs
newfile.write(file.getvalue())
newfile.seek(file.tell(), 0)
self._rolled = True
# The method caching trick from NamedTemporaryFile
# won't work here, because _file may change from a
# BytesIO/StringIO instance to a real file. So we list
# all the methods directly.
# Context management protocol
def __enter__(self):
if self._file.closed:
raise ValueError("Cannot enter context with closed file")
return self
def __exit__(self, exc, value, tb):
self._file.close()
# file protocol
def __iter__(self):
return self._file.__iter__()
def close(self):
self._file.close()
@property
def closed(self):
return self._file.closed
@property
def encoding(self):
try:
return self._file.encoding
except AttributeError:
if 'b' in self._TemporaryFileArgs['mode']:
raise
return self._TemporaryFileArgs['encoding']
def fileno(self):
self.rollover()
return self._file.fileno()
def flush(self):
self._file.flush()
def isatty(self):
return self._file.isatty()
@property
def mode(self):
try:
return self._file.mode
except AttributeError:
return self._TemporaryFileArgs['mode']
@property
def name(self):
try:
return self._file.name
except AttributeError:
return None
@property
def newlines(self):
try:
return self._file.newlines
except AttributeError:
if 'b' in self._TemporaryFileArgs['mode']:
raise
return self._TemporaryFileArgs['newline']
def read(self, *args):
return self._file.read(*args)
def readline(self, *args):
return self._file.readline(*args)
def readlines(self, *args):
return self._file.readlines(*args)
def seek(self, *args):
self._file.seek(*args)
@property
def softspace(self):
return self._file.softspace
def tell(self):
return self._file.tell()
def truncate(self, size=None):
if size is None:
self._file.truncate()
else:
if size > self._max_size:
self.rollover()
self._file.truncate(size)
def write(self, s):
file = self._file
rv = file.write(s)
self._check(file)
return rv
def writelines(self, iterable):
file = self._file
rv = file.writelines(iterable)
self._check(file)
return rv
class TemporaryDirectory(object):
"""Create and return a temporary directory. This has the same
behavior as mkdtemp but can be used as a context manager. For
example:
with TemporaryDirectory() as tmpdir:
...
Upon exiting the context, the directory and everything contained
in it are removed.
"""
def __init__(self, suffix="", prefix=template, dir=None):
self._closed = False
self.name = None # Handle mkdtemp raising an exception
self.name = mkdtemp(suffix, prefix, dir)
def __repr__(self):
return "<{} {!r}>".format(self.__class__.__name__, self.name)
def __enter__(self):
return self.name
def cleanup(self, _warn=False):
if self.name and not self._closed:
try:
self._rmtree(self.name)
except (TypeError, AttributeError) as ex:
# Issue #10188: Emit a warning on stderr
# if the directory could not be cleaned
# up due to missing globals
if "None" not in str(ex):
raise
print("ERROR: {!r} while cleaning up {!r}".format(ex, self,),
file=_sys.stderr)
return
self._closed = True
if _warn:
self._warn("Implicitly cleaning up {!r}".format(self),
ResourceWarning)
def __exit__(self, exc, value, tb):
self.cleanup()
def __del__(self):
# Issue a ResourceWarning if implicit cleanup needed
self.cleanup(_warn=True)
# XXX (ncoghlan): The following code attempts to make
# this class tolerant of the module nulling out process
# that happens during CPython interpreter shutdown
# Alas, it doesn't actually manage it. See issue #10188
_listdir = staticmethod(_os.listdir)
_path_join = staticmethod(_os.path.join)
_isdir = staticmethod(_os.path.isdir)
_islink = staticmethod(_os.path.islink)
_remove = staticmethod(_os.remove)
_rmdir = staticmethod(_os.rmdir)
_os_error = OSError
_warn = _warnings.warn
def _rmtree(self, path):
# Essentially a stripped down version of shutil.rmtree. We can't
# use globals because they may be None'ed out at shutdown.
for name in self._listdir(path):
fullname = self._path_join(path, name)
try:
isdir = self._isdir(fullname) and not self._islink(fullname)
except self._os_error:
isdir = False
if isdir:
self._rmtree(fullname)
else:
try:
self._remove(fullname)
except self._os_error:
pass
try:
self._rmdir(path)
except self._os_error:
pass
| gpl-3.0 |
antepsis/anteplahmacun | sympy/matrices/expressions/tests/test_blockmatrix.py | 94 | 6835 | from sympy.matrices.expressions.blockmatrix import (block_collapse, bc_matmul,
bc_block_plus_ident, BlockDiagMatrix, BlockMatrix, bc_dist, bc_matadd,
bc_transpose, blockcut, reblock_2x2, deblock)
from sympy.matrices.expressions import (MatrixSymbol, Identity,
Inverse, trace, Transpose, det)
from sympy.matrices import Matrix, ImmutableMatrix
from sympy.core import Tuple, symbols, Expr
from sympy.core.compatibility import range
from sympy.functions import transpose
i, j, k, l, m, n, p = symbols('i:n, p', integer=True)
A = MatrixSymbol('A', n, n)
B = MatrixSymbol('B', n, n)
C = MatrixSymbol('C', n, n)
D = MatrixSymbol('D', n, n)
G = MatrixSymbol('G', n, n)
H = MatrixSymbol('H', n, n)
b1 = BlockMatrix([[G, H]])
b2 = BlockMatrix([[G], [H]])
def test_bc_matmul():
assert bc_matmul(H*b1*b2*G) == BlockMatrix([[(H*G*G + H*H*H)*G]])
def test_bc_matadd():
assert bc_matadd(BlockMatrix([[G, H]]) + BlockMatrix([[H, H]])) == \
BlockMatrix([[G+H, H+H]])
def test_bc_transpose():
assert bc_transpose(Transpose(BlockMatrix([[A, B], [C, D]]))) == \
BlockMatrix([[A.T, C.T], [B.T, D.T]])
def test_bc_dist_diag():
A = MatrixSymbol('A', n, n)
B = MatrixSymbol('B', m, m)
C = MatrixSymbol('C', l, l)
X = BlockDiagMatrix(A, B, C)
assert bc_dist(X+X).equals(BlockDiagMatrix(2*A, 2*B, 2*C))
def test_block_plus_ident():
A = MatrixSymbol('A', n, n)
B = MatrixSymbol('B', n, m)
C = MatrixSymbol('C', m, n)
D = MatrixSymbol('D', m, m)
X = BlockMatrix([[A, B], [C, D]])
assert bc_block_plus_ident(X+Identity(m+n)) == \
BlockDiagMatrix(Identity(n), Identity(m)) + X
def test_BlockMatrix():
A = MatrixSymbol('A', n, m)
B = MatrixSymbol('B', n, k)
C = MatrixSymbol('C', l, m)
D = MatrixSymbol('D', l, k)
M = MatrixSymbol('M', m + k, p)
N = MatrixSymbol('N', l + n, k + m)
X = BlockMatrix(Matrix([[A, B], [C, D]]))
assert X.__class__(*X.args) == X
# block_collapse does nothing on normal inputs
E = MatrixSymbol('E', n, m)
assert block_collapse(A + 2*E) == A + 2*E
F = MatrixSymbol('F', m, m)
assert block_collapse(E.T*A*F) == E.T*A*F
assert X.shape == (l + n, k + m)
assert X.blockshape == (2, 2)
assert transpose(X) == BlockMatrix(Matrix([[A.T, C.T], [B.T, D.T]]))
assert transpose(X).shape == X.shape[::-1]
# Test that BlockMatrices and MatrixSymbols can still mix
assert (X*M).is_MatMul
assert X._blockmul(M).is_MatMul
assert (X*M).shape == (n + l, p)
assert (X + N).is_MatAdd
assert X._blockadd(N).is_MatAdd
assert (X + N).shape == X.shape
E = MatrixSymbol('E', m, 1)
F = MatrixSymbol('F', k, 1)
Y = BlockMatrix(Matrix([[E], [F]]))
assert (X*Y).shape == (l + n, 1)
assert block_collapse(X*Y).blocks[0, 0] == A*E + B*F
assert block_collapse(X*Y).blocks[1, 0] == C*E + D*F
# block_collapse passes down into container objects, transposes, and inverse
assert block_collapse(transpose(X*Y)) == transpose(block_collapse(X*Y))
assert block_collapse(Tuple(X*Y, 2*X)) == (
block_collapse(X*Y), block_collapse(2*X))
# Make sure that MatrixSymbols will enter 1x1 BlockMatrix if it simplifies
Ab = BlockMatrix([[A]])
Z = MatrixSymbol('Z', *A.shape)
assert block_collapse(Ab + Z) == A + Z
def test_BlockMatrix_trace():
A, B, C, D = [MatrixSymbol(s, 3, 3) for s in 'ABCD']
X = BlockMatrix([[A, B], [C, D]])
assert trace(X) == trace(A) + trace(D)
def test_BlockMatrix_Determinant():
A, B, C, D = [MatrixSymbol(s, 3, 3) for s in 'ABCD']
X = BlockMatrix([[A, B], [C, D]])
from sympy import assuming, Q
with assuming(Q.invertible(A)):
assert det(X) == det(A) * det(D - C*A.I*B)
assert isinstance(det(X), Expr)
def test_squareBlockMatrix():
A = MatrixSymbol('A', n, n)
B = MatrixSymbol('B', n, m)
C = MatrixSymbol('C', m, n)
D = MatrixSymbol('D', m, m)
X = BlockMatrix([[A, B], [C, D]])
Y = BlockMatrix([[A]])
assert X.is_square
assert (block_collapse(X + Identity(m + n)) ==
BlockMatrix([[A + Identity(n), B], [C, D + Identity(m)]]))
Q = X + Identity(m + n)
assert (X + MatrixSymbol('Q', n + m, n + m)).is_MatAdd
assert (X * MatrixSymbol('Q', n + m, n + m)).is_MatMul
assert block_collapse(Y.I) == A.I
assert block_collapse(X.inverse()) == BlockMatrix([
[(-B*D.I*C + A).I, -A.I*B*(D + -C*A.I*B).I],
[-(D - C*A.I*B).I*C*A.I, (D - C*A.I*B).I]])
assert isinstance(X.inverse(), Inverse)
assert not X.is_Identity
Z = BlockMatrix([[Identity(n), B], [C, D]])
assert not Z.is_Identity
def test_BlockDiagMatrix():
A = MatrixSymbol('A', n, n)
B = MatrixSymbol('B', m, m)
C = MatrixSymbol('C', l, l)
M = MatrixSymbol('M', n + m + l, n + m + l)
X = BlockDiagMatrix(A, B, C)
Y = BlockDiagMatrix(A, 2*B, 3*C)
assert X.blocks[1, 1] == B
assert X.shape == (n + m + l, n + m + l)
assert all(X.blocks[i, j].is_ZeroMatrix if i != j else X.blocks[i, j] in [A, B, C]
for i in range(3) for j in range(3))
assert X.__class__(*X.args) == X
assert isinstance(block_collapse(X.I * X), Identity)
assert bc_matmul(X*X) == BlockDiagMatrix(A*A, B*B, C*C)
assert block_collapse(X*X) == BlockDiagMatrix(A*A, B*B, C*C)
#XXX: should be == ??
assert block_collapse(X + X).equals(BlockDiagMatrix(2*A, 2*B, 2*C))
assert block_collapse(X*Y) == BlockDiagMatrix(A*A, 2*B*B, 3*C*C)
assert block_collapse(X + Y) == BlockDiagMatrix(2*A, 3*B, 4*C)
# Ensure that BlockDiagMatrices can still interact with normal MatrixExprs
assert (X*(2*M)).is_MatMul
assert (X + (2*M)).is_MatAdd
assert (X._blockmul(M)).is_MatMul
assert (X._blockadd(M)).is_MatAdd
def test_blockcut():
A = MatrixSymbol('A', n, m)
B = blockcut(A, (n/2, n/2), (m/2, m/2))
assert A[i, j] == B[i, j]
assert B == BlockMatrix([[A[:n/2, :m/2], A[:n/2, m/2:]],
[A[n/2:, :m/2], A[n/2:, m/2:]]])
M = ImmutableMatrix(4, 4, range(16))
B = blockcut(M, (2, 2), (2, 2))
assert M == ImmutableMatrix(B)
B = blockcut(M, (1, 3), (2, 2))
assert ImmutableMatrix(B.blocks[0, 1]) == ImmutableMatrix([[2, 3]])
def test_reblock_2x2():
B = BlockMatrix([[MatrixSymbol('A_%d%d'%(i,j), 2, 2)
for j in range(3)]
for i in range(3)])
assert B.blocks.shape == (3, 3)
BB = reblock_2x2(B)
assert BB.blocks.shape == (2, 2)
assert B.shape == BB.shape
assert B.as_explicit() == BB.as_explicit()
def test_deblock():
B = BlockMatrix([[MatrixSymbol('A_%d%d'%(i,j), n, n)
for j in range(4)]
for i in range(4)])
assert deblock(reblock_2x2(B)) == B
| bsd-3-clause |
juanyaw/python | cpython/Lib/encodings/ascii.py | 858 | 1248 | """ Python 'ascii' Codec
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
class Codec(codecs.Codec):
# Note: Binding these as C functions will result in the class not
# converting them to methods. This is intended.
encode = codecs.ascii_encode
decode = codecs.ascii_decode
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.ascii_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.ascii_decode(input, self.errors)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
class StreamConverter(StreamWriter,StreamReader):
encode = codecs.ascii_decode
decode = codecs.ascii_encode
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='ascii',
encode=Codec.encode,
decode=Codec.decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
| bsd-3-clause |
orangeduck/PyAutoC | Python27/Lib/token.py | 178 | 2944 | #! /usr/bin/env python
"""Token constants (from "token.h")."""
# This file is automatically generated; please don't muck it up!
#
# To update the symbols in this file, 'cd' to the top directory of
# the python source tree after building the interpreter and run:
#
# python Lib/token.py
#--start constants--
ENDMARKER = 0
NAME = 1
NUMBER = 2
STRING = 3
NEWLINE = 4
INDENT = 5
DEDENT = 6
LPAR = 7
RPAR = 8
LSQB = 9
RSQB = 10
COLON = 11
COMMA = 12
SEMI = 13
PLUS = 14
MINUS = 15
STAR = 16
SLASH = 17
VBAR = 18
AMPER = 19
LESS = 20
GREATER = 21
EQUAL = 22
DOT = 23
PERCENT = 24
BACKQUOTE = 25
LBRACE = 26
RBRACE = 27
EQEQUAL = 28
NOTEQUAL = 29
LESSEQUAL = 30
GREATEREQUAL = 31
TILDE = 32
CIRCUMFLEX = 33
LEFTSHIFT = 34
RIGHTSHIFT = 35
DOUBLESTAR = 36
PLUSEQUAL = 37
MINEQUAL = 38
STAREQUAL = 39
SLASHEQUAL = 40
PERCENTEQUAL = 41
AMPEREQUAL = 42
VBAREQUAL = 43
CIRCUMFLEXEQUAL = 44
LEFTSHIFTEQUAL = 45
RIGHTSHIFTEQUAL = 46
DOUBLESTAREQUAL = 47
DOUBLESLASH = 48
DOUBLESLASHEQUAL = 49
AT = 50
OP = 51
ERRORTOKEN = 52
N_TOKENS = 53
NT_OFFSET = 256
#--end constants--
tok_name = {}
for _name, _value in globals().items():
if type(_value) is type(0):
tok_name[_value] = _name
del _name, _value
def ISTERMINAL(x):
return x < NT_OFFSET
def ISNONTERMINAL(x):
return x >= NT_OFFSET
def ISEOF(x):
return x == ENDMARKER
def main():
import re
import sys
args = sys.argv[1:]
inFileName = args and args[0] or "Include/token.h"
outFileName = "Lib/token.py"
if len(args) > 1:
outFileName = args[1]
try:
fp = open(inFileName)
except IOError, err:
sys.stdout.write("I/O error: %s\n" % str(err))
sys.exit(1)
lines = fp.read().split("\n")
fp.close()
prog = re.compile(
"#define[ \t][ \t]*([A-Z0-9][A-Z0-9_]*)[ \t][ \t]*([0-9][0-9]*)",
re.IGNORECASE)
tokens = {}
for line in lines:
match = prog.match(line)
if match:
name, val = match.group(1, 2)
val = int(val)
tokens[val] = name # reverse so we can sort them...
keys = tokens.keys()
keys.sort()
# load the output skeleton from the target:
try:
fp = open(outFileName)
except IOError, err:
sys.stderr.write("I/O error: %s\n" % str(err))
sys.exit(2)
format = fp.read().split("\n")
fp.close()
try:
start = format.index("#--start constants--") + 1
end = format.index("#--end constants--")
except ValueError:
sys.stderr.write("target does not contain format markers")
sys.exit(3)
lines = []
for val in keys:
lines.append("%s = %d" % (tokens[val], val))
format[start:end] = lines
try:
fp = open(outFileName, 'w')
except IOError, err:
sys.stderr.write("I/O error: %s\n" % str(err))
sys.exit(4)
fp.write("\n".join(format))
fp.close()
if __name__ == "__main__":
main()
| bsd-2-clause |
EmreAtes/spack | var/spack/repos/builtin/packages/octave-struct/package.py | 5 | 1581 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class OctaveStruct(OctavePackage):
"""Additional structure manipulation functions for Octave."""
homepage = "https://octave.sourceforge.io/struct/"
url = "https://downloads.sourceforge.net/octave/struct-1.0.14.tar.gz"
version('1.0.14', '3589d5eb8000f18426e2178587eb82f4')
extends('octave@2.9.7:')
| lgpl-2.1 |
bdoin/GCompris | src/lightsoff-activity/lightsoff.py | 4 | 22329 | # gcompris - lightsoff.py
#
# Copyright (C) 2010 Bruno and Clement Coudoin
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
#
# lightsoff Board module
import gtk
import gtk.gdk
import gcompris
import gcompris.utils
import gcompris.skin
import gcompris.bonus
import goocanvas
from gcompris import gcompris_gettext as _
class Gcompris_lightsoff:
"""Empty gcompris python class"""
def __init__(self, gcomprisBoard):
# Save the gcomprisBoard, it defines everything we need
# to know from the core
self.gcomprisBoard = gcomprisBoard
self.rootitem = None
# These are used to let us restart only after the bonus is displayed.
# When the bonus is displayed, it call us first with pause(1) and then with pause(0)
self.board_paused = False;
self.gamewon = False;
self.data = [
[[0,0,0,0,0],
[0,0,1,0,0],
[0,1,1,1,0],
[0,0,1,0,0],
[0,0,0,0,0]],
[[1,1,0,0,0],
[1,0,0,0,0],
[0,0,0,0,0],
[0,0,0,0,0],
[0,0,0,0,0]],
[[0,0,0,0,0],
[0,0,0,0,0],
[0,0,0,0,0],
[0,0,1,0,0],
[0,1,1,1,0]],
[[1,1,0,1,1],
[1,0,0,0,1],
[0,0,0,0,0],
[0,0,0,1,0],
[0,0,1,1,1]],
[[0,1,1,1,0],
[1,0,1,0,1],
[1,1,0,1,1],
[1,0,1,0,1],
[0,1,1,1,0]],
[[1,1,0,1,1],
[1,0,1,0,1],
[0,1,1,1,0],
[1,0,1,0,1],
[1,1,0,1,1]],
[[0,1,0,1,0],
[1,1,0,1,1],
[0,1,0,1,0],
[1,0,1,0,1],
[1,0,1,0,1]],
[[1,0,0,0,1],
[1,1,0,1,1],
[0,0,1,0,0],
[1,0,1,0,0],
[1,0,1,1,0]],
[[1,1,0,1,1],
[0,0,0,0,0],
[1,1,0,1,1],
[0,0,0,0,1],
[1,1,0,0,0]],
# 5
[[1,1,1,1,1],
[1,1,1,1,1],
[1,1,1,1,1],
[1,1,1,1,1],
[1,1,1,1,1]],
# 6
[[0,0,0,1,1],
[0,0,0,1,1],
[0,0,0,0,0],
[1,1,0,0,0],
[1,1,0,0,0]],
# 7
[[0,0,0,0,0],
[0,1,1,1,0],
[1,1,1,1,1],
[0,1,1,1,0],
[0,0,0,0,0]],
# 8
[[0,0,0,0,0],
[0,1,1,1,0],
[0,1,1,1,0],
[0,1,1,1,0],
[0,0,0,0,0]],
# 9
[[1,1,0,1,1],
[1,1,0,1,1],
[0,0,0,0,0],
[1,1,0,1,1],
[1,1,0,1,1]],
# 10
[[1,1,1,1,1],
[0,1,1,1,0],
[0,0,1,0,0],
[0,1,1,1,0],
[1,1,1,1,1]],
# 11
[[1,1,1,1,1],
[1,0,0,0,1],
[1,0,0,0,1],
[1,0,0,0,1],
[1,1,1,1,1]],
# 12
[[0,0,1,1,1],
[0,0,0,1,1],
[1,0,0,0,1],
[1,1,0,0,0],
[1,1,1,0,0]],
# 13
[[1,0,0,0,1],
[0,1,0,1,0],
[0,0,1,0,0],
[0,1,0,1,0],
[1,0,0,0,1]],
# 14
[[1,1,1,1,1],
[1,0,1,0,1],
[1,1,1,1,1],
[1,0,1,0,1],
[1,1,1,1,1]],
# 15
[[1,0,0,0,0],
[1,1,1,1,0],
[1,1,1,1,0],
[1,1,1,1,0],
[1,1,1,1,1]],
# 16
[[1,1,1,1,1],
[1,1,1,1,1],
[1,1,0,1,1],
[1,1,1,1,1],
[1,1,1,1,1]],
# 17
[[1,0,1,0,1],
[0,1,0,1,0],
[0,0,1,0,0],
[0,1,0,1,0],
[1,0,1,0,1]],
# 18
[[1,1,1,0,1],
[1,1,1,0,1],
[0,0,0,0,0],
[1,0,1,1,1],
[1,0,1,1,1]],
# 19
[[1,1,0,1,1],
[1,1,0,1,1],
[1,1,0,1,1],
[1,1,0,1,1],
[1,1,0,1,1]],
# 20
[[1,1,1,1,1],
[1,0,0,0,1],
[1,1,0,1,1],
[1,1,0,1,1],
[1,1,1,1,1]],
# 21
[[1,1,1,1,1],
[1,1,1,1,1],
[0,0,0,1,1],
[0,0,0,1,1],
[0,0,0,1,1]],
# 22
[[1,1,1,0,1],
[1,1,1,0,0],
[1,1,1,0,0],
[1,1,1,0,0],
[1,1,1,0,1]],
# 23
[[1,1,1,1,1],
[0,0,1,0,0],
[0,0,1,0,0],
[0,0,1,0,0],
[1,1,1,1,1]],
# 24
[[0,0,0,0,0],
[0,1,1,0,0],
[0,1,1,1,0],
[0,0,1,1,0],
[0,0,0,0,0]],
# 25
[[0,0,0,1,1],
[0,0,1,1,1],
[0,0,1,0,0],
[1,1,1,0,0],
[1,1,0,0,0]],
# 26
[[0,0,0,1,1],
[1,1,0,1,1],
[1,1,0,0,0],
[1,1,0,0,0],
[1,1,0,1,1]],
# 27
[[1,0,0,0,1],
[0,1,1,1,0],
[0,1,1,1,0],
[0,1,1,1,0],
[1,0,0,0,1]],
# 28
[[1,0,1,0,1],
[0,1,0,1,0],
[1,0,1,0,1],
[0,1,0,1,0],
[1,0,1,0,1]],
# 29
[[0,0,1,0,0],
[1,0,0,1,0],
[0,1,1,1,1],
[1,0,0,0,0],
[1,1,0,1,0]],
# 30
[[0,0,0,0,1],
[0,0,0,1,1],
[0,0,1,0,1],
[0,1,0,0,1],
[1,1,1,1,1]],
# 31
[[1,1,0,1,1],
[0,1,0,1,0],
[1,1,1,1,1],
[1,1,0,1,1],
[1,0,0,0,1]],
# 32
[[0,1,1,0,0],
[0,1,1,0,1],
[0,1,0,0,1],
[1,1,0,0,0],
[1,1,1,1,0]],
# 33
[[0,0,0,0,1],
[1,1,1,0,0],
[1,0,1,1,1],
[1,1,1,1,0],
[1,0,0,1,0]],
# 34
[[1,0,1,1,1],
[0,0,1,0,1],
[0,0,0,0,0],
[1,1,1,1,0],
[1,1,0,1,0]],
# 35
[[1,1,0,1,1],
[0,1,0,1,1],
[0,0,0,1,0],
[1,1,0,0,0],
[1,1,1,1,0]],
# 36
[[1,1,1,1,1],
[0,0,0,1,0],
[0,1,0,1,1],
[1,1,1,0,1],
[1,0,1,0,0]],
# 37
[[0,0,0,1,1],
[1,0,1,1,0],
[0,0,1,0,0],
[0,1,1,1,1],
[1,0,0,1,0]],
# 38
[[0,0,0,0,1],
[0,0,1,1,1],
[1,1,0,0,1],
[1,1,1,0,0],
[0,0,1,0,0]],
# 39
[[0,0,1,1,1],
[1,0,1,1,1],
[1,1,1,0,0],
[0,0,1,0,0],
[1,1,0,1,1]],
# 40
[[0,1,1,1,1],
[0,0,1,1,1],
[0,0,1,1,1],
[1,1,1,1,0],
[0,0,0,1,0]],
# 41
[[1,1,1,1,1],
[1,0,0,0,0],
[0,1,0,0,1],
[0,0,0,1,1],
[1,1,1,1,1]],
# 42
[[1,1,1,1,1],
[1,0,0,0,0],
[0,0,1,0,0],
[0,1,1,1,0],
[0,1,0,0,1]],
# 43
[[0,0,0,0,0],
[0,0,0,1,0],
[1,1,0,1,1],
[0,1,1,0,0],
[1,1,1,1,1]],
# 44
[[0,0,0,1,1],
[0,1,1,0,0],
[0,1,0,0,0],
[1,1,1,1,0],
[1,1,1,1,0]],
# 45
[[0,0,0,1,0],
[1,1,1,1,1],
[0,0,0,0,0],
[0,0,1,0,0],
[1,1,1,1,0]],
# 46
[[0,1,0,1,0],
[0,0,0,1,0],
[0,1,0,1,0],
[0,0,1,0,0],
[0,1,1,1,0]],
# 47
[[1,0,0,1,0],
[0,0,0,0,1],
[0,1,0,0,0],
[0,0,0,0,0],
[1,0,1,0,0]],
# 48
[[1,1,0,0,1],
[0,1,0,0,1],
[0,1,1,1,1],
[0,1,0,1,0],
[1,1,1,1,1]],
# 49
[[1,1,1,1,1],
[0,0,1,0,0],
[0,1,1,0,0],
[0,0,1,1,0],
[1,1,1,0,1]],
# 50
[[1,0,1,0,1],
[1,0,1,0,0],
[0,0,0,1,1],
[0,1,0,1,0],
[1,0,0,1,0]],
# 51
[[0,1,0,1,0],
[1,0,0,1,0],
[0,1,1,1,1],
[1,0,1,0,0],
[0,1,1,0,0]],
# 52
[[1,1,1,1,1],
[1,1,0,0,0],
[0,0,0,1,1],
[0,1,1,1,0],
[0,0,1,0,0]],
]
def start(self):
self.gcomprisBoard.level=1
self.gcomprisBoard.maxlevel=len(self.data)
self.gcomprisBoard.sublevel=1
self.gcomprisBoard.number_of_sublevel=1
# Create our rootitem. We put each canvas item in it so at the end we
# only have to kill it. The canvas deletes all the items it contains
# automaticaly.
self.backroot = goocanvas.Group(parent = \
self.gcomprisBoard.canvas.get_root_item())
# A color changing background
self.background = goocanvas.Rect(
parent = self.backroot,
x = 0,
y = 0,
width = gcompris.BOARD_WIDTH,
height = 200,
fill_color_rgba = 0xFFFFFFFFL
)
svghandle = gcompris.utils.load_svg("lightsoff/back.svgz")
goocanvas.Svg(
parent = self.backroot,
svg_handle = svghandle,
svg_id = "#BACKGROUND",
pointer_events = goocanvas.EVENTS_NONE
)
# The Sun
self.sunitem = goocanvas.Svg(
parent = self.backroot,
svg_handle = svghandle,
svg_id = "#SUN",
)
self.sunitem_offset = 0
# TUX
self.tuxitem = goocanvas.Svg(
parent = self.backroot,
svg_handle = svghandle,
svg_id = "#TUX",
)
self.tuxitem.connect("button_press_event", self.solve_event)
gcompris.utils.item_focus_init(self.tuxitem, None)
# The Buble
self.bubbleitem = goocanvas.Svg(
parent = self.backroot,
svg_handle = svghandle,
svg_id = "#BUBBLE1",
)
self.textitem = goocanvas.Text(
parent = self.backroot,
x = gcompris.BOARD_WIDTH/2 + 80,
y = gcompris.BOARD_HEIGHT - 80,
width = 400,
font = gcompris.skin.get_font("gcompris/content"),
text = _("Switch off all the lights, I have to go to sleep.\n"
"If you need help, click on me."),
fill_color = "black",
anchor = gtk.ANCHOR_CENTER
)
# The Tipi
self.tipiitem = goocanvas.Svg(
parent = self.backroot,
svg_handle = svghandle,
svg_id = "#TIPI",
)
# The image foreground
goocanvas.Svg(
parent = self.backroot,
svg_handle = svghandle,
svg_id = "#FOREGROUND",
pointer_events = goocanvas.EVENTS_NONE
)
# Set the buttons we want in the bar
gcompris.bar_set(gcompris.BAR_LEVEL|gcompris.BAR_REPEAT)
gcompris.bar_location(gcompris.BOARD_WIDTH/2 - 90, -1, 0.6)
gcompris.bar_set_level(self.gcomprisBoard)
self.display_game()
def end(self):
self.backroot.remove()
self.backroot = None
self.rootitem = None
def ok(self):
print("lightsoff ok.")
def repeat(self):
self.display_game()
def config(self):
print("lightsoff config.")
def key_press(self, keyval, commit_str, preedit_str):
pass
def pause(self, pause):
self.board_paused = pause
# When the bonus is displayed, it call us first
# with pause(1) and then with pause(0)
# the game is won
if(self.gamewon == True and pause == False):
self.gamewon = False
if(self.increment_level()):
self.display_game()
def set_level(self, level):
self.gcomprisBoard.level = level
gcompris.bar_set_level(self.gcomprisBoard)
self.display_game()
# Code that increments the sublevel and level
# And bail out if no more levels are available
# return 1 if continue, 0 if bail out
def increment_level(self):
self.gcomprisBoard.sublevel += 1
if(self.gcomprisBoard.sublevel>self.gcomprisBoard.number_of_sublevel):
# Try the next level
self.gcomprisBoard.sublevel=1
self.gcomprisBoard.level += 1
gcompris.bar_set_level(self.gcomprisBoard)
if(self.gcomprisBoard.level>self.gcomprisBoard.maxlevel):
self.gcomprisBoard.level = self.gcomprisBoard.maxlevel
return 1
def create_empty_list(self):
items = []
for x in range(5):
items.append(range(5))
for y in range(len(items)):
for x in range(len(items[0])):
items[y][x] = 0
return items
# Display the board game
def display_game(self):
# The grid we display
# It contains all the graphic items
self.items = self.create_empty_list()
# The grid of hints items
self.hints = self.create_empty_list()
# Do we display the hints
self.hints_mode = False
if self.rootitem:
self.rootitem.remove()
self.tipiitem.props.visibility = goocanvas.ITEM_INVISIBLE
self.textitem.props.visibility = goocanvas.ITEM_VISIBLE
self.tuxitem.props.visibility = goocanvas.ITEM_VISIBLE
self.bubbleitem.props.visibility = goocanvas.ITEM_VISIBLE
# Create our rootitem. We put each canvas item in it so at the end we
# only have to kill it. The canvas deletes all the items it contains
# automaticaly.
self.rootitem = \
goocanvas.Group(parent = self.backroot)
svghandle = gcompris.utils.load_svg("lightsoff/onoff.svgz")
iwidth = svghandle.props.width
iheight = svghandle.props.height
gap = 10
x_start = (gcompris.BOARD_WIDTH - len(self.items) * (iwidth + gap) ) / 2
y_start = (gcompris.BOARD_HEIGHT - len(self.items[0]) \
* (iheight + gap) ) / 2 - 40
goocanvas.Rect(
parent = self.rootitem,
x = x_start - gap,
y = y_start - gap,
width = len(self.items) * (iwidth + gap) + gap,
height = len(self.items[0]) * (iheight + gap) + gap,
fill_color_rgba = 0x445533AAL,
stroke_color_rgba = 0xC0C0C0AAL,
radius_x = 10,
radius_y = 10,
line_width = 2
)
data = self.data[self.gcomprisBoard.level - 1]
for y in range(len(self.items)):
for x in range(len(self.items[0])):
item = goocanvas.Rect(
parent = self.rootitem,
x = x_start + (iwidth + gap) * x - gap/2,
y = y_start + (iheight + gap) * y - gap/2,
width = iwidth + gap,
height = iheight + gap,
stroke_color_rgba = 0xC0C0C0FAL,
fill_color_rgba = 0x5050509AL,
line_width = 2,
radius_x = 10,
radius_y = 10,
)
self.hints[y][x] = item
item.props.visibility = goocanvas.ITEM_INVISIBLE
state = data[y][x]
svg_id = "#off"
if state:
svg_id = "#on"
item = goocanvas.Svg(
parent = self.rootitem,
svg_handle = svghandle,
svg_id = svg_id,
)
item.set_data("state", state)
item.translate(x_start + (iwidth + gap) * x,
y_start + (iheight + gap) * y)
item.connect("button_press_event", self.button_press_event, [y,x])
self.items[y][x] = item
self.solve_it()
def is_on(self, item):
return item.get_data("state")
def switch(self, item):
if not item:
return
mystate = self.is_on(item)
if mystate == False:
item.set_properties(svg_id = "#on")
else:
item.set_properties(svg_id = "#off")
item.set_data("state", not mystate)
def get_item_up(self, y, x):
if y == 0:
return None
return self.items[y-1][x]
def get_item_down(self, y, x):
if y == len(self.items[0])-1:
return None
return self.items[y+1][x]
def get_item_left(self, y, x):
if x == 0:
return None
return self.items[y][x-1]
def get_item_right(self, y, x):
if x == len(self.items)-1:
return None
return self.items[y][x+1]
# Returns True when complete
def is_done(self):
for y in range(len(self.items)):
for x in range(len(self.items[0])):
if self.is_on(self.items[y][x]):
return False
return True
def button_press_event(self, widget, target, event, spot):
self.switch(target)
self.switch(self.get_item_up(spot[0], spot[1]))
self.switch(self.get_item_left(spot[0], spot[1]))
self.switch(self.get_item_right(spot[0], spot[1]))
self.switch(self.get_item_down(spot[0], spot[1]))
self.solve_it()
if self.is_done():
self.tipiitem.props.visibility = goocanvas.ITEM_VISIBLE
self.textitem.props.visibility = goocanvas.ITEM_INVISIBLE
self.tuxitem.props.visibility = goocanvas.ITEM_INVISIBLE
self.bubbleitem.props.visibility = goocanvas.ITEM_INVISIBLE
self.gamewon = True
gcompris.bonus.display(gcompris.bonus.WIN, gcompris.bonus.FLOWER)
def solution_switch(self, items, clicks, y, x):
items[y][x] = not items[y][x]
clicks[y][x] = not clicks[y][x]
if y >= 1:
items[y-1][x] = not items[y-1][x]
if y <= 3:
items[y+1][x] = not items[y+1][x]
if x >= 1:
items[y][x-1] = not items[y][x-1]
if x <= 3:
items[y][x+1] = not items[y][x+1]
def chase_light(self, items, clicks):
for y in range(1, len(items)):
for x in range(len(items[0])):
if items[y-1][x]:
self.solution_switch(items, clicks, y, x)
def is_solution_pattern(self, s, a, b, c, d, e):
if s[4][0] == a and \
s[4][1] == b and \
s[4][2] == c and \
s[4][3] == d and \
s[4][4] == e:
return True
return False
# Return False if the is no solution
def solution_wrap(self, solution, clicks):
if self.is_solution_pattern(solution, 1, 0, 0 , 0, 1):
self.solution_switch(solution, clicks, 0, 0)
self.solution_switch(solution, clicks, 0, 1)
elif self.is_solution_pattern(solution, 0, 1, 0, 1, 0):
self.solution_switch(solution, clicks, 0, 0)
self.solution_switch(solution, clicks, 0, 3)
elif self.is_solution_pattern(solution, 1, 1, 1, 0, 0):
self.solution_switch(solution, clicks, 0, 1)
elif self.is_solution_pattern(solution, 0, 0, 1, 1 , 1):
self.solution_switch(solution, clicks, 0, 3)
elif self.is_solution_pattern(solution, 1, 0, 1, 1, 0):
self.solution_switch(solution, clicks, 0, 4)
elif self.is_solution_pattern(solution, 0, 1, 1, 0, 1):
self.solution_switch(solution, clicks, 0, 0)
elif self.is_solution_pattern(solution, 1, 1, 0, 1, 1):
self.solution_switch(solution, clicks, 0, 2)
else:
return False
return True
def items2list(self, items):
list = []
for y in range(len(items[0])):
line = []
for x in range(len(items)):
if self.is_on(items[y][x]):
line.append(1)
else:
line.append(0)
list.append(line)
return list
# We check only the last line
def solution_found(self, solution):
for x in range(len(solution[0])):
if solution[4][x]:
return False
return True
def solution_length(self, clicks):
click = 0
for y in range(0, len(clicks)):
for x in range(len(clicks[0])):
if clicks[y][x]:
click += 1
return click
def solve_one(self, solution, clicks):
found = False
for index in range(0, 5):
self.chase_light( solution, clicks )
if self.solution_found(solution):
found = True
break
if not self.solution_wrap(solution, clicks):
break
if found:
return clicks
else:
return None
# Solving algorithm is the one described here:
# http://www.haar.clara.co.uk/Lights/solving.html To begin, you turn
# out all the lights on the top row, by pressing the buttons on the
# second row that are directly underneath any lit buttons on the top
# row. The top row will then have all it's lights off. Repeat this
# step for the second, third and fourth row. (i.e. chase the lights
# all the way down to the bottom row). This may have solved the
# puzzle already ( click here for an example of this ), but is more
# likely that there will now be some lights left on in the bottom
# row. If so, there are only 7 posible configurations. Depending on
# which configuration you are left with, you need to press some
# buttons in the top row. You can determine which buttons you need
# to press from the following table.
# Light on bottom row Press on this on top row
# 10001 11000
# 01010 10010
# 11100 01000
# 00111 00010
# 10110 00001
# 01101 10000
# 11011 00100
def solve_it(self):
clicks = None
# Our solving algorithm does not find the shortest solution. We
# don't really care but we'd like to keep the proposed solution
# stable (not propose a complete new solution when one light
# changes). To achieve this (closely), we test here all the
# combination of the first line, trying to find the shortest
# solution.
for x in range(64):
solution = self.items2list(self.items)
clicks2 = self.create_empty_list()
if x & 1:
self.solution_switch(solution, clicks2, 0, 0)
if x & 2:
self.solution_switch(solution, clicks2, 0, 1)
if x & 4:
self.solution_switch(solution, clicks2, 0, 2)
if x & 8:
self.solution_switch(solution, clicks2, 0, 3)
if x & 16:
self.solution_switch(solution, clicks2, 0, 4)
clicks2 = self.solve_one(solution, clicks2)
if clicks == None and clicks2:
clicks = clicks2
elif clicks2 and \
self.solution_length(clicks2) < self.solution_length(clicks):
clicks = clicks2
if self.hints_mode:
self.show_hints(clicks)
self.update_background(clicks)
def solve_event(self, widget, target, event):
clicks = self.create_empty_list()
self.hints_mode = not self.hints_mode
if not self.hints_mode:
self.show_hints(clicks)
else:
self.solve_it()
def update_background(self, clicks):
length = self.solution_length(clicks)
c = int(length * 0xFF / 18.0)
color = 0X33 << 24 | 0x11 << 16 | c << 8 | 0xFFL
self.background.set_properties(fill_color_rgba = color)
self.sunitem.translate(0, self.sunitem_offset)
self.sunitem_offset = length * 10
self.sunitem.translate(0, self.sunitem_offset * -1)
def show_hints(self, clicks):
for y in range(len(clicks)):
for x in range(len(clicks[0])):
if clicks[y][x]:
self.hints[y][x].props.visibility = goocanvas.ITEM_VISIBLE
else:
self.hints[y][x].props.visibility = goocanvas.ITEM_INVISIBLE
def print_sol(self, clicks):
for y in range(len(clicks)):
s = ""
for x in range(len(clicks[0])):
if clicks[y][x]:
s += "1"
else:
s += "0"
print s
print ""
| gpl-3.0 |
daikeren/opbeat_python | opbeat/utils/wrapt/importer.py | 9 | 6636 | """This module implements a post import hook mechanism styled after what is
described in PEP-369. Note that it doesn't cope with modules being reloaded.
"""
import sys
import threading
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
import importlib
from .decorators import synchronized
# The dictionary registering any post import hooks to be triggered once
# the target module has been imported. Once a module has been imported
# and the hooks fired, the list of hooks recorded against the target
# module will be truncacted but the list left in the dictionary. This
# acts as a flag to indicate that the module had already been imported.
_post_import_hooks = {}
_post_import_hooks_init = False
_post_import_hooks_lock = threading.RLock()
# Register a new post import hook for the target module name.
@synchronized(_post_import_hooks_lock)
def register_post_import_hook(hook, name):
# Automatically install the import hook finder if it has not already
# been installed.
global _post_import_hooks_init
if not _post_import_hooks_init:
_post_import_hooks_init = True
sys.meta_path.insert(0, ImportHookFinder())
# Determine if any prior registration of a post import hook for
# the target modules has occurred and act appropriately.
hooks = _post_import_hooks.get(name, None)
if hooks is None:
# No prior registration of post import hooks for the target
# module. We need to check whether the module has already been
# imported. If it has we fire the hook immediately and add an
# empty list to the registry to indicate that the module has
# already been imported and hooks have fired. Otherwise add
# the post import hook to the registry.
module = sys.modules.get(name, None)
if module is not None:
_post_import_hooks[name] = []
hook(module)
else:
_post_import_hooks[name] = [hook]
elif hooks == []:
# A prior registration of port import hooks for the target
# module was done and the hooks already fired. Fire the hook
# immediately.
hook(module)
else:
# A prior registration of port import hooks for the target
# module was done but the module has not yet been imported.
_post_import_hooks[name].append(hook)
# Register post import hooks defined as package entry points.
def discover_post_import_hooks(group):
try:
import pkg_resources
except ImportError:
return
for entrypoint in pkg_resources.iter_entry_points(group=group):
def proxy_post_import_hook(module):
__import__(entrypoint.module_name)
callback = sys.modules[entrypoint.module_name]
for attr in entrypoints.attrs:
callback = getattr(callback, attr)
return callback(module)
register_post_import_hook(proxy_post_import_hook, entrypoint.name)
# Indicate that a module has been loaded. Any post import hooks which
# were registered against the target module will be invoked. If an
# exception is raised in any of the post import hooks, that will cause
# the import of the target module to fail.
@synchronized(_post_import_hooks_lock)
def notify_module_loaded(module):
name = getattr(module, '__name__', None)
hooks = _post_import_hooks.get(name, None)
if hooks:
_post_import_hooks[name] = []
for hook in hooks:
hook(module)
# A custom module import finder. This intercepts attempts to import
# modules and watches out for attempts to import target modules of
# interest. When a module of interest is imported, then any post import
# hooks which are registered will be invoked.
class _ImportHookLoader:
def load_module(self, fullname):
module = sys.modules[fullname]
notify_module_loaded(module)
return module
class _ImportHookChainedLoader:
def __init__(self, loader):
self.loader = loader
def load_module(self, fullname):
module = self.loader.load_module(fullname)
notify_module_loaded(module)
return module
class ImportHookFinder:
def __init__(self):
self.in_progress = {}
@synchronized(_post_import_hooks_lock)
def find_module(self, fullname, path=None):
# If the module being imported is not one we have registered
# post import hooks for, we can return immediately. We will
# take no further part in the importing of this module.
if not fullname in _post_import_hooks:
return None
# When we are interested in a specific module, we will call back
# into the import system a second time to defer to the import
# finder that is supposed to handle the importing of the module.
# We set an in progress flag for the target module so that on
# the second time through we don't trigger another call back
# into the import system and cause a infinite loop.
if fullname in self.in_progress:
return None
self.in_progress[fullname] = True
# Now call back into the import system again.
try:
if PY3:
# For Python 3 we need to use find_loader() from
# the importlib module. It doesn't actually
# import the target module and only finds the
# loader. If a loader is found, we need to return
# our own loader which will then in turn call the
# real loader to import the module and invoke the
# post import hooks.
loader = importlib.find_loader(fullname, path)
if loader:
return _ImportHookChainedLoader(loader)
else:
# For Python 2 we don't have much choice but to
# call back in to __import__(). This will
# actually cause the module to be imported. If no
# module could be found then ImportError will be
# raised. Otherwise we return a loader which
# returns the already loaded module and invokes
# the post import hooks.
__import__(fullname)
return _ImportHookLoader()
finally:
del self.in_progress[fullname]
# Decorator for marking that a function should be called as a post
# import hook when the target module is imported.
def when_imported(name):
def register(hook):
register_post_import_hook(hook, name)
return hook
return register
| bsd-3-clause |
dominjune/LintCode | Backpack.py | 3 | 2464 | """
Given n items with size A[i], an integer m denotes the size of a backpack. How full you can fill this backpack?
"""
__author__ = 'Danyang'
class Solution_TLE:
def backPack(self, m, A):
"""
search, brute force
:param m: An integer m denotes the size of a backpack
:param A: Given n items with size A[i]
:return: The maximum size
"""
result = [0]
self.dfs(A, 0, m, result)
return result[0]
def dfs(self, seq, cur, m, result):
if cur > m:
return
result[0] = max(result[0], cur)
if seq:
self.dfs(seq[1:], cur+seq[0], m, result)
self.dfs(seq[1:], cur, m, result)
class Solution_MLE:
def backPack(self, m, A):
"""
dp
f[i][v]=max{f[i-1][v],f[i-1][v-c[i]]+w[i]}
:param m: An integer m denotes the size of a backpack
:param A: Given n items with size A[i]
:return: The maximum size
"""
n = len(A)
f = [[0 for _ in xrange(m+1)] for _ in xrange(n+1)] # plus 1 for dummy
for i in xrange(1, n+1):
for j in xrange(1, m+1):
# decide whether to put A[i-1]
if j-A[i-1] >= 0:
f[i][j] = max(f[i-1][j], f[i-1][j-A[i-1]]+A[i-1])
else:
f[i][j] = f[i-1][j]
return f[n][m]
class Solution:
def backPack(self, m, A):
"""
Left f_{i, c} represents the maximum value the bag has at the index i for a bag with capacity c.
dp:
f[i][c]=max{f[i-1][c], # not choose the i-th item
f[i-1][c-w[i]] + v[i] # choose the i-th item
}
optimized the data structure to
f[v]=max{f[v],f[v-c[i]]+w[i]}
NEED TO KEEP A COPY OF (i-1) STATE.
:param m: An integer m denotes the size of a backpack
:param A: Given n items with size A[i]
:return: The maximum size
"""
n = len(A)
f = [0 for _ in xrange(m+1)] # plus 1 for dummy
for i in xrange(1, n+1):
copy = list(f)
for j in xrange(1, m+1):
# decide whether to put A[i-1]
if j-A[i-1] >= 0:
f[j] = max(copy[j], copy[j-A[i-1]]+A[i-1])
else:
f[j] = copy[j]
return f[m]
if __name__ == "__main__":
print Solution().backPack(11, [2, 3, 5, 7]) | apache-2.0 |
ChrisYammine/ChrisYammine.github.io | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-2.7/pygments/lexers/functional.py | 197 | 113704 | # -*- coding: utf-8 -*-
"""
pygments.lexers.functional
~~~~~~~~~~~~~~~~~~~~~~~~~~
Lexers for functional languages.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, bygroups, include, do_insertions
from pygments.token import Text, Comment, Operator, Keyword, Name, \
String, Number, Punctuation, Literal, Generic, Error
__all__ = ['RacketLexer', 'SchemeLexer', 'CommonLispLexer', 'HaskellLexer',
'AgdaLexer', 'LiterateHaskellLexer', 'LiterateAgdaLexer',
'SMLLexer', 'OcamlLexer', 'ErlangLexer', 'ErlangShellLexer',
'OpaLexer', 'CoqLexer', 'NewLispLexer', 'ElixirLexer',
'ElixirConsoleLexer', 'KokaLexer']
line_re = re.compile('.*?\n')
class RacketLexer(RegexLexer):
"""
Lexer for `Racket <http://racket-lang.org/>`_ source code (formerly known as
PLT Scheme).
*New in Pygments 1.6.*
"""
name = 'Racket'
aliases = ['racket', 'rkt']
filenames = ['*.rkt', '*.rktl']
mimetypes = ['text/x-racket', 'application/x-racket']
# From namespace-mapped-symbols
keywords = [
'#%app', '#%datum', '#%expression', '#%module-begin',
'#%plain-app', '#%plain-lambda', '#%plain-module-begin',
'#%provide', '#%require', '#%stratified-body', '#%top',
'#%top-interaction', '#%variable-reference', '...', 'and', 'begin',
'begin-for-syntax', 'begin0', 'case', 'case-lambda', 'cond',
'datum->syntax-object', 'define', 'define-for-syntax',
'define-struct', 'define-syntax', 'define-syntax-rule',
'define-syntaxes', 'define-values', 'define-values-for-syntax',
'delay', 'do', 'expand-path', 'fluid-let', 'hash-table-copy',
'hash-table-count', 'hash-table-for-each', 'hash-table-get',
'hash-table-iterate-first', 'hash-table-iterate-key',
'hash-table-iterate-next', 'hash-table-iterate-value',
'hash-table-map', 'hash-table-put!', 'hash-table-remove!',
'hash-table?', 'if', 'lambda', 'let', 'let*', 'let*-values',
'let-struct', 'let-syntax', 'let-syntaxes', 'let-values', 'let/cc',
'let/ec', 'letrec', 'letrec-syntax', 'letrec-syntaxes',
'letrec-syntaxes+values', 'letrec-values', 'list-immutable',
'make-hash-table', 'make-immutable-hash-table', 'make-namespace',
'module', 'module-identifier=?', 'module-label-identifier=?',
'module-template-identifier=?', 'module-transformer-identifier=?',
'namespace-transformer-require', 'or', 'parameterize',
'parameterize*', 'parameterize-break', 'provide',
'provide-for-label', 'provide-for-syntax', 'quasiquote',
'quasisyntax', 'quasisyntax/loc', 'quote', 'quote-syntax',
'quote-syntax/prune', 'require', 'require-for-label',
'require-for-syntax', 'require-for-template', 'set!',
'set!-values', 'syntax', 'syntax-case', 'syntax-case*',
'syntax-id-rules', 'syntax-object->datum', 'syntax-rules',
'syntax/loc', 'time', 'transcript-off', 'transcript-on', 'unless',
'unquote', 'unquote-splicing', 'unsyntax', 'unsyntax-splicing',
'when', 'with-continuation-mark', 'with-handlers',
'with-handlers*', 'with-syntax', 'λ'
]
# From namespace-mapped-symbols
builtins = [
'*', '+', '-', '/', '<', '<=', '=', '>', '>=',
'abort-current-continuation', 'abs', 'absolute-path?', 'acos',
'add1', 'alarm-evt', 'always-evt', 'andmap', 'angle', 'append',
'apply', 'arithmetic-shift', 'arity-at-least',
'arity-at-least-value', 'arity-at-least?', 'asin', 'assoc', 'assq',
'assv', 'atan', 'banner', 'bitwise-and', 'bitwise-bit-field',
'bitwise-bit-set?', 'bitwise-ior', 'bitwise-not', 'bitwise-xor',
'boolean?', 'bound-identifier=?', 'box', 'box-immutable', 'box?',
'break-enabled', 'break-thread', 'build-path',
'build-path/convention-type', 'byte-pregexp', 'byte-pregexp?',
'byte-ready?', 'byte-regexp', 'byte-regexp?', 'byte?', 'bytes',
'bytes->immutable-bytes', 'bytes->list', 'bytes->path',
'bytes->path-element', 'bytes->string/latin-1',
'bytes->string/locale', 'bytes->string/utf-8', 'bytes-append',
'bytes-close-converter', 'bytes-convert', 'bytes-convert-end',
'bytes-converter?', 'bytes-copy', 'bytes-copy!', 'bytes-fill!',
'bytes-length', 'bytes-open-converter', 'bytes-ref', 'bytes-set!',
'bytes-utf-8-index', 'bytes-utf-8-length', 'bytes-utf-8-ref',
'bytes<?', 'bytes=?', 'bytes>?', 'bytes?', 'caaaar', 'caaadr',
'caaar', 'caadar', 'caaddr', 'caadr', 'caar', 'cadaar', 'cadadr',
'cadar', 'caddar', 'cadddr', 'caddr', 'cadr',
'call-in-nested-thread', 'call-with-break-parameterization',
'call-with-composable-continuation',
'call-with-continuation-barrier', 'call-with-continuation-prompt',
'call-with-current-continuation', 'call-with-escape-continuation',
'call-with-exception-handler',
'call-with-immediate-continuation-mark', 'call-with-input-file',
'call-with-output-file', 'call-with-parameterization',
'call-with-semaphore', 'call-with-semaphore/enable-break',
'call-with-values', 'call/cc', 'call/ec', 'car', 'cdaaar',
'cdaadr', 'cdaar', 'cdadar', 'cdaddr', 'cdadr', 'cdar', 'cddaar',
'cddadr', 'cddar', 'cdddar', 'cddddr', 'cdddr', 'cddr', 'cdr',
'ceiling', 'channel-get', 'channel-put', 'channel-put-evt',
'channel-try-get', 'channel?', 'chaperone-box', 'chaperone-evt',
'chaperone-hash', 'chaperone-of?', 'chaperone-procedure',
'chaperone-struct', 'chaperone-struct-type', 'chaperone-vector',
'chaperone?', 'char->integer', 'char-alphabetic?', 'char-blank?',
'char-ci<=?', 'char-ci<?', 'char-ci=?', 'char-ci>=?', 'char-ci>?',
'char-downcase', 'char-foldcase', 'char-general-category',
'char-graphic?', 'char-iso-control?', 'char-lower-case?',
'char-numeric?', 'char-punctuation?', 'char-ready?',
'char-symbolic?', 'char-title-case?', 'char-titlecase',
'char-upcase', 'char-upper-case?', 'char-utf-8-length',
'char-whitespace?', 'char<=?', 'char<?', 'char=?', 'char>=?',
'char>?', 'char?', 'check-duplicate-identifier',
'checked-procedure-check-and-extract', 'choice-evt',
'cleanse-path', 'close-input-port', 'close-output-port',
'collect-garbage', 'collection-file-path', 'collection-path',
'compile', 'compile-allow-set!-undefined',
'compile-context-preservation-enabled',
'compile-enforce-module-constants', 'compile-syntax',
'compiled-expression?', 'compiled-module-expression?',
'complete-path?', 'complex?', 'cons',
'continuation-mark-set->context', 'continuation-mark-set->list',
'continuation-mark-set->list*', 'continuation-mark-set-first',
'continuation-mark-set?', 'continuation-marks',
'continuation-prompt-available?', 'continuation-prompt-tag?',
'continuation?', 'copy-file', 'cos',
'current-break-parameterization', 'current-code-inspector',
'current-command-line-arguments', 'current-compile',
'current-continuation-marks', 'current-custodian',
'current-directory', 'current-drive', 'current-error-port',
'current-eval', 'current-evt-pseudo-random-generator',
'current-gc-milliseconds', 'current-get-interaction-input-port',
'current-inexact-milliseconds', 'current-input-port',
'current-inspector', 'current-library-collection-paths',
'current-load', 'current-load-extension',
'current-load-relative-directory', 'current-load/use-compiled',
'current-locale', 'current-memory-use', 'current-milliseconds',
'current-module-declare-name', 'current-module-declare-source',
'current-module-name-resolver', 'current-namespace',
'current-output-port', 'current-parameterization',
'current-preserved-thread-cell-values', 'current-print',
'current-process-milliseconds', 'current-prompt-read',
'current-pseudo-random-generator', 'current-read-interaction',
'current-reader-guard', 'current-readtable', 'current-seconds',
'current-security-guard', 'current-subprocess-custodian-mode',
'current-thread', 'current-thread-group',
'current-thread-initial-stack-size',
'current-write-relative-directory', 'custodian-box-value',
'custodian-box?', 'custodian-limit-memory',
'custodian-managed-list', 'custodian-memory-accounting-available?',
'custodian-require-memory', 'custodian-shutdown-all', 'custodian?',
'custom-print-quotable-accessor', 'custom-print-quotable?',
'custom-write-accessor', 'custom-write?', 'date', 'date*',
'date*-nanosecond', 'date*-time-zone-name', 'date*?', 'date-day',
'date-dst?', 'date-hour', 'date-minute', 'date-month',
'date-second', 'date-time-zone-offset', 'date-week-day',
'date-year', 'date-year-day', 'date?', 'datum-intern-literal',
'default-continuation-prompt-tag', 'delete-directory',
'delete-file', 'denominator', 'directory-exists?',
'directory-list', 'display', 'displayln', 'dump-memory-stats',
'dynamic-require', 'dynamic-require-for-syntax', 'dynamic-wind',
'eof', 'eof-object?', 'ephemeron-value', 'ephemeron?', 'eprintf',
'eq-hash-code', 'eq?', 'equal-hash-code',
'equal-secondary-hash-code', 'equal?', 'equal?/recur',
'eqv-hash-code', 'eqv?', 'error', 'error-display-handler',
'error-escape-handler', 'error-print-context-length',
'error-print-source-location', 'error-print-width',
'error-value->string-handler', 'eval', 'eval-jit-enabled',
'eval-syntax', 'even?', 'evt?', 'exact->inexact', 'exact-integer?',
'exact-nonnegative-integer?', 'exact-positive-integer?', 'exact?',
'executable-yield-handler', 'exit', 'exit-handler', 'exn',
'exn-continuation-marks', 'exn-message', 'exn:break',
'exn:break-continuation', 'exn:break?', 'exn:fail',
'exn:fail:contract', 'exn:fail:contract:arity',
'exn:fail:contract:arity?', 'exn:fail:contract:continuation',
'exn:fail:contract:continuation?',
'exn:fail:contract:divide-by-zero',
'exn:fail:contract:divide-by-zero?',
'exn:fail:contract:non-fixnum-result',
'exn:fail:contract:non-fixnum-result?',
'exn:fail:contract:variable', 'exn:fail:contract:variable-id',
'exn:fail:contract:variable?', 'exn:fail:contract?',
'exn:fail:filesystem', 'exn:fail:filesystem:exists',
'exn:fail:filesystem:exists?', 'exn:fail:filesystem:version',
'exn:fail:filesystem:version?', 'exn:fail:filesystem?',
'exn:fail:network', 'exn:fail:network?', 'exn:fail:out-of-memory',
'exn:fail:out-of-memory?', 'exn:fail:read',
'exn:fail:read-srclocs', 'exn:fail:read:eof', 'exn:fail:read:eof?',
'exn:fail:read:non-char', 'exn:fail:read:non-char?',
'exn:fail:read?', 'exn:fail:syntax', 'exn:fail:syntax-exprs',
'exn:fail:syntax:unbound', 'exn:fail:syntax:unbound?',
'exn:fail:syntax?', 'exn:fail:unsupported',
'exn:fail:unsupported?', 'exn:fail:user', 'exn:fail:user?',
'exn:fail?', 'exn:srclocs-accessor', 'exn:srclocs?', 'exn?', 'exp',
'expand', 'expand-once', 'expand-syntax', 'expand-syntax-once',
'expand-syntax-to-top-form', 'expand-to-top-form',
'expand-user-path', 'expt', 'file-exists?',
'file-or-directory-identity', 'file-or-directory-modify-seconds',
'file-or-directory-permissions', 'file-position', 'file-size',
'file-stream-buffer-mode', 'file-stream-port?',
'filesystem-root-list', 'find-executable-path',
'find-library-collection-paths', 'find-system-path', 'fixnum?',
'floating-point-bytes->real', 'flonum?', 'floor', 'flush-output',
'for-each', 'force', 'format', 'fprintf', 'free-identifier=?',
'gcd', 'generate-temporaries', 'gensym', 'get-output-bytes',
'get-output-string', 'getenv', 'global-port-print-handler',
'guard-evt', 'handle-evt', 'handle-evt?', 'hash', 'hash-equal?',
'hash-eqv?', 'hash-has-key?', 'hash-placeholder?', 'hash-ref!',
'hasheq', 'hasheqv', 'identifier-binding',
'identifier-label-binding', 'identifier-prune-lexical-context',
'identifier-prune-to-source-module',
'identifier-remove-from-definition-context',
'identifier-template-binding', 'identifier-transformer-binding',
'identifier?', 'imag-part', 'immutable?', 'impersonate-box',
'impersonate-hash', 'impersonate-procedure', 'impersonate-struct',
'impersonate-vector', 'impersonator-of?',
'impersonator-prop:application-mark',
'impersonator-property-accessor-procedure?',
'impersonator-property?', 'impersonator?', 'inexact->exact',
'inexact-real?', 'inexact?', 'input-port?', 'inspector?',
'integer->char', 'integer->integer-bytes',
'integer-bytes->integer', 'integer-length', 'integer-sqrt',
'integer-sqrt/remainder', 'integer?',
'internal-definition-context-seal', 'internal-definition-context?',
'keyword->string', 'keyword<?', 'keyword?', 'kill-thread', 'lcm',
'length', 'liberal-define-context?', 'link-exists?', 'list',
'list*', 'list->bytes', 'list->string', 'list->vector', 'list-ref',
'list-tail', 'list?', 'load', 'load-extension',
'load-on-demand-enabled', 'load-relative',
'load-relative-extension', 'load/cd', 'load/use-compiled',
'local-expand', 'local-expand/capture-lifts',
'local-transformer-expand',
'local-transformer-expand/capture-lifts', 'locale-string-encoding',
'log', 'magnitude', 'make-arity-at-least', 'make-bytes',
'make-channel', 'make-continuation-prompt-tag', 'make-custodian',
'make-custodian-box', 'make-date', 'make-date*',
'make-derived-parameter', 'make-directory', 'make-ephemeron',
'make-exn', 'make-exn:break', 'make-exn:fail',
'make-exn:fail:contract', 'make-exn:fail:contract:arity',
'make-exn:fail:contract:continuation',
'make-exn:fail:contract:divide-by-zero',
'make-exn:fail:contract:non-fixnum-result',
'make-exn:fail:contract:variable', 'make-exn:fail:filesystem',
'make-exn:fail:filesystem:exists',
'make-exn:fail:filesystem:version', 'make-exn:fail:network',
'make-exn:fail:out-of-memory', 'make-exn:fail:read',
'make-exn:fail:read:eof', 'make-exn:fail:read:non-char',
'make-exn:fail:syntax', 'make-exn:fail:syntax:unbound',
'make-exn:fail:unsupported', 'make-exn:fail:user',
'make-file-or-directory-link', 'make-hash-placeholder',
'make-hasheq-placeholder', 'make-hasheqv',
'make-hasheqv-placeholder', 'make-immutable-hasheqv',
'make-impersonator-property', 'make-input-port', 'make-inspector',
'make-known-char-range-list', 'make-output-port', 'make-parameter',
'make-pipe', 'make-placeholder', 'make-polar',
'make-prefab-struct', 'make-pseudo-random-generator',
'make-reader-graph', 'make-readtable', 'make-rectangular',
'make-rename-transformer', 'make-resolved-module-path',
'make-security-guard', 'make-semaphore', 'make-set!-transformer',
'make-shared-bytes', 'make-sibling-inspector',
'make-special-comment', 'make-srcloc', 'make-string',
'make-struct-field-accessor', 'make-struct-field-mutator',
'make-struct-type', 'make-struct-type-property',
'make-syntax-delta-introducer', 'make-syntax-introducer',
'make-thread-cell', 'make-thread-group', 'make-vector',
'make-weak-box', 'make-weak-hasheqv', 'make-will-executor', 'map',
'max', 'mcar', 'mcdr', 'mcons', 'member', 'memq', 'memv', 'min',
'module->exports', 'module->imports', 'module->language-info',
'module->namespace', 'module-compiled-exports',
'module-compiled-imports', 'module-compiled-language-info',
'module-compiled-name', 'module-path-index-join',
'module-path-index-resolve', 'module-path-index-split',
'module-path-index?', 'module-path?', 'module-predefined?',
'module-provide-protected?', 'modulo', 'mpair?', 'nack-guard-evt',
'namespace-attach-module', 'namespace-attach-module-declaration',
'namespace-base-phase', 'namespace-mapped-symbols',
'namespace-module-identifier', 'namespace-module-registry',
'namespace-require', 'namespace-require/constant',
'namespace-require/copy', 'namespace-require/expansion-time',
'namespace-set-variable-value!', 'namespace-symbol->identifier',
'namespace-syntax-introduce', 'namespace-undefine-variable!',
'namespace-unprotect-module', 'namespace-variable-value',
'namespace?', 'negative?', 'never-evt', 'newline',
'normal-case-path', 'not', 'null', 'null?', 'number->string',
'number?', 'numerator', 'object-name', 'odd?', 'open-input-bytes',
'open-input-file', 'open-input-output-file', 'open-input-string',
'open-output-bytes', 'open-output-file', 'open-output-string',
'ormap', 'output-port?', 'pair?', 'parameter-procedure=?',
'parameter?', 'parameterization?', 'path->bytes',
'path->complete-path', 'path->directory-path', 'path->string',
'path-add-suffix', 'path-convention-type', 'path-element->bytes',
'path-element->string', 'path-for-some-system?',
'path-list-string->path-list', 'path-replace-suffix',
'path-string?', 'path?', 'peek-byte', 'peek-byte-or-special',
'peek-bytes', 'peek-bytes!', 'peek-bytes-avail!',
'peek-bytes-avail!*', 'peek-bytes-avail!/enable-break',
'peek-char', 'peek-char-or-special', 'peek-string', 'peek-string!',
'pipe-content-length', 'placeholder-get', 'placeholder-set!',
'placeholder?', 'poll-guard-evt', 'port-closed-evt',
'port-closed?', 'port-commit-peeked', 'port-count-lines!',
'port-count-lines-enabled', 'port-display-handler',
'port-file-identity', 'port-file-unlock', 'port-next-location',
'port-print-handler', 'port-progress-evt',
'port-provides-progress-evts?', 'port-read-handler',
'port-try-file-lock?', 'port-write-handler', 'port-writes-atomic?',
'port-writes-special?', 'port?', 'positive?',
'prefab-key->struct-type', 'prefab-struct-key', 'pregexp',
'pregexp?', 'primitive-closure?', 'primitive-result-arity',
'primitive?', 'print', 'print-as-expression',
'print-boolean-long-form', 'print-box', 'print-graph',
'print-hash-table', 'print-mpair-curly-braces',
'print-pair-curly-braces', 'print-reader-abbreviations',
'print-struct', 'print-syntax-width', 'print-unreadable',
'print-vector-length', 'printf', 'procedure->method',
'procedure-arity', 'procedure-arity-includes?', 'procedure-arity?',
'procedure-closure-contents-eq?', 'procedure-extract-target',
'procedure-reduce-arity', 'procedure-rename',
'procedure-struct-type?', 'procedure?', 'promise?',
'prop:arity-string', 'prop:checked-procedure',
'prop:custom-print-quotable', 'prop:custom-write',
'prop:equal+hash', 'prop:evt', 'prop:exn:srclocs',
'prop:impersonator-of', 'prop:input-port',
'prop:liberal-define-context', 'prop:output-port',
'prop:procedure', 'prop:rename-transformer',
'prop:set!-transformer', 'pseudo-random-generator->vector',
'pseudo-random-generator-vector?', 'pseudo-random-generator?',
'putenv', 'quotient', 'quotient/remainder', 'raise',
'raise-arity-error', 'raise-mismatch-error', 'raise-syntax-error',
'raise-type-error', 'raise-user-error', 'random', 'random-seed',
'rational?', 'rationalize', 'read', 'read-accept-bar-quote',
'read-accept-box', 'read-accept-compiled', 'read-accept-dot',
'read-accept-graph', 'read-accept-infix-dot', 'read-accept-lang',
'read-accept-quasiquote', 'read-accept-reader', 'read-byte',
'read-byte-or-special', 'read-bytes', 'read-bytes!',
'read-bytes-avail!', 'read-bytes-avail!*',
'read-bytes-avail!/enable-break', 'read-bytes-line',
'read-case-sensitive', 'read-char', 'read-char-or-special',
'read-curly-brace-as-paren', 'read-decimal-as-inexact',
'read-eval-print-loop', 'read-language', 'read-line',
'read-on-demand-source', 'read-square-bracket-as-paren',
'read-string', 'read-string!', 'read-syntax',
'read-syntax/recursive', 'read/recursive', 'readtable-mapping',
'readtable?', 'real->double-flonum', 'real->floating-point-bytes',
'real->single-flonum', 'real-part', 'real?', 'regexp',
'regexp-match', 'regexp-match-peek', 'regexp-match-peek-immediate',
'regexp-match-peek-positions',
'regexp-match-peek-positions-immediate',
'regexp-match-peek-positions-immediate/end',
'regexp-match-peek-positions/end', 'regexp-match-positions',
'regexp-match-positions/end', 'regexp-match/end', 'regexp-match?',
'regexp-max-lookbehind', 'regexp-replace', 'regexp-replace*',
'regexp?', 'relative-path?', 'remainder',
'rename-file-or-directory', 'rename-transformer-target',
'rename-transformer?', 'resolve-path', 'resolved-module-path-name',
'resolved-module-path?', 'reverse', 'round', 'seconds->date',
'security-guard?', 'semaphore-peek-evt', 'semaphore-post',
'semaphore-try-wait?', 'semaphore-wait',
'semaphore-wait/enable-break', 'semaphore?',
'set!-transformer-procedure', 'set!-transformer?', 'set-box!',
'set-mcar!', 'set-mcdr!', 'set-port-next-location!',
'shared-bytes', 'shell-execute', 'simplify-path', 'sin',
'single-flonum?', 'sleep', 'special-comment-value',
'special-comment?', 'split-path', 'sqrt', 'srcloc',
'srcloc-column', 'srcloc-line', 'srcloc-position', 'srcloc-source',
'srcloc-span', 'srcloc?', 'string', 'string->bytes/latin-1',
'string->bytes/locale', 'string->bytes/utf-8',
'string->immutable-string', 'string->keyword', 'string->list',
'string->number', 'string->path', 'string->path-element',
'string->symbol', 'string->uninterned-symbol',
'string->unreadable-symbol', 'string-append', 'string-ci<=?',
'string-ci<?', 'string-ci=?', 'string-ci>=?', 'string-ci>?',
'string-copy', 'string-copy!', 'string-downcase', 'string-fill!',
'string-foldcase', 'string-length', 'string-locale-ci<?',
'string-locale-ci=?', 'string-locale-ci>?',
'string-locale-downcase', 'string-locale-upcase',
'string-locale<?', 'string-locale=?', 'string-locale>?',
'string-normalize-nfc', 'string-normalize-nfd',
'string-normalize-nfkc', 'string-normalize-nfkd', 'string-ref',
'string-set!', 'string-titlecase', 'string-upcase',
'string-utf-8-length', 'string<=?', 'string<?', 'string=?',
'string>=?', 'string>?', 'string?', 'struct->vector',
'struct-accessor-procedure?', 'struct-constructor-procedure?',
'struct-info', 'struct-mutator-procedure?',
'struct-predicate-procedure?', 'struct-type-info',
'struct-type-make-constructor', 'struct-type-make-predicate',
'struct-type-property-accessor-procedure?',
'struct-type-property?', 'struct-type?', 'struct:arity-at-least',
'struct:date', 'struct:date*', 'struct:exn', 'struct:exn:break',
'struct:exn:fail', 'struct:exn:fail:contract',
'struct:exn:fail:contract:arity',
'struct:exn:fail:contract:continuation',
'struct:exn:fail:contract:divide-by-zero',
'struct:exn:fail:contract:non-fixnum-result',
'struct:exn:fail:contract:variable', 'struct:exn:fail:filesystem',
'struct:exn:fail:filesystem:exists',
'struct:exn:fail:filesystem:version', 'struct:exn:fail:network',
'struct:exn:fail:out-of-memory', 'struct:exn:fail:read',
'struct:exn:fail:read:eof', 'struct:exn:fail:read:non-char',
'struct:exn:fail:syntax', 'struct:exn:fail:syntax:unbound',
'struct:exn:fail:unsupported', 'struct:exn:fail:user',
'struct:srcloc', 'struct?', 'sub1', 'subbytes', 'subprocess',
'subprocess-group-enabled', 'subprocess-kill', 'subprocess-pid',
'subprocess-status', 'subprocess-wait', 'subprocess?', 'substring',
'symbol->string', 'symbol-interned?', 'symbol-unreadable?',
'symbol?', 'sync', 'sync/enable-break', 'sync/timeout',
'sync/timeout/enable-break', 'syntax->list', 'syntax-arm',
'syntax-column', 'syntax-disarm', 'syntax-e', 'syntax-line',
'syntax-local-bind-syntaxes', 'syntax-local-certifier',
'syntax-local-context', 'syntax-local-expand-expression',
'syntax-local-get-shadower', 'syntax-local-introduce',
'syntax-local-lift-context', 'syntax-local-lift-expression',
'syntax-local-lift-module-end-declaration',
'syntax-local-lift-provide', 'syntax-local-lift-require',
'syntax-local-lift-values-expression',
'syntax-local-make-definition-context',
'syntax-local-make-delta-introducer',
'syntax-local-module-defined-identifiers',
'syntax-local-module-exports',
'syntax-local-module-required-identifiers', 'syntax-local-name',
'syntax-local-phase-level',
'syntax-local-transforming-module-provides?', 'syntax-local-value',
'syntax-local-value/immediate', 'syntax-original?',
'syntax-position', 'syntax-property',
'syntax-property-symbol-keys', 'syntax-protect', 'syntax-rearm',
'syntax-recertify', 'syntax-shift-phase-level', 'syntax-source',
'syntax-source-module', 'syntax-span', 'syntax-taint',
'syntax-tainted?', 'syntax-track-origin',
'syntax-transforming-module-expression?', 'syntax-transforming?',
'syntax?', 'system-big-endian?', 'system-idle-evt',
'system-language+country', 'system-library-subpath',
'system-path-convention-type', 'system-type', 'tan',
'tcp-abandon-port', 'tcp-accept', 'tcp-accept-evt',
'tcp-accept-ready?', 'tcp-accept/enable-break', 'tcp-addresses',
'tcp-close', 'tcp-connect', 'tcp-connect/enable-break',
'tcp-listen', 'tcp-listener?', 'tcp-port?', 'terminal-port?',
'thread', 'thread-cell-ref', 'thread-cell-set!', 'thread-cell?',
'thread-dead-evt', 'thread-dead?', 'thread-group?',
'thread-resume', 'thread-resume-evt', 'thread-rewind-receive',
'thread-running?', 'thread-suspend', 'thread-suspend-evt',
'thread-wait', 'thread/suspend-to-kill', 'thread?', 'time-apply',
'truncate', 'udp-addresses', 'udp-bind!', 'udp-bound?',
'udp-close', 'udp-connect!', 'udp-connected?', 'udp-open-socket',
'udp-receive!', 'udp-receive!*', 'udp-receive!-evt',
'udp-receive!/enable-break', 'udp-receive-ready-evt', 'udp-send',
'udp-send*', 'udp-send-evt', 'udp-send-ready-evt', 'udp-send-to',
'udp-send-to*', 'udp-send-to-evt', 'udp-send-to/enable-break',
'udp-send/enable-break', 'udp?', 'unbox',
'uncaught-exception-handler', 'use-collection-link-paths',
'use-compiled-file-paths', 'use-user-specific-search-paths',
'values', 'variable-reference->empty-namespace',
'variable-reference->module-base-phase',
'variable-reference->module-declaration-inspector',
'variable-reference->module-source',
'variable-reference->namespace', 'variable-reference->phase',
'variable-reference->resolved-module-path',
'variable-reference-constant?', 'variable-reference?', 'vector',
'vector->immutable-vector', 'vector->list',
'vector->pseudo-random-generator',
'vector->pseudo-random-generator!', 'vector->values',
'vector-fill!', 'vector-immutable', 'vector-length', 'vector-ref',
'vector-set!', 'vector-set-performance-stats!', 'vector?',
'version', 'void', 'void?', 'weak-box-value', 'weak-box?',
'will-execute', 'will-executor?', 'will-register',
'will-try-execute', 'with-input-from-file', 'with-output-to-file',
'wrap-evt', 'write', 'write-byte', 'write-bytes',
'write-bytes-avail', 'write-bytes-avail*', 'write-bytes-avail-evt',
'write-bytes-avail/enable-break', 'write-char', 'write-special',
'write-special-avail*', 'write-special-evt', 'write-string', 'zero?'
]
# From SchemeLexer
valid_name = r'[a-zA-Z0-9!$%&*+,/:<=>?@^_~|-]+'
tokens = {
'root' : [
(r';.*$', Comment.Single),
(r'#\|[^|]+\|#', Comment.Multiline),
# whitespaces - usually not relevant
(r'\s+', Text),
## numbers: Keep in mind Racket reader hash prefixes,
## which can denote the base or the type. These don't map
## neatly onto pygments token types; some judgment calls
## here. Note that none of these regexps attempt to
## exclude identifiers that start with a number, such as a
## variable named "100-Continue".
# #b
(r'#b[-+]?[01]+\.[01]+', Number.Float),
(r'#b[01]+e[-+]?[01]+', Number.Float),
(r'#b[-+]?[01]/[01]+', Number),
(r'#b[-+]?[01]+', Number.Integer),
(r'#b\S*', Error),
# #d OR no hash prefix
(r'(#d)?[-+]?\d+\.\d+', Number.Float),
(r'(#d)?\d+e[-+]?\d+', Number.Float),
(r'(#d)?[-+]?\d+/\d+', Number),
(r'(#d)?[-+]?\d+', Number.Integer),
(r'#d\S*', Error),
# #e
(r'#e[-+]?\d+\.\d+', Number.Float),
(r'#e\d+e[-+]?\d+', Number.Float),
(r'#e[-+]?\d+/\d+', Number),
(r'#e[-+]?\d+', Number),
(r'#e\S*', Error),
# #i is always inexact-real, i.e. float
(r'#i[-+]?\d+\.\d+', Number.Float),
(r'#i\d+e[-+]?\d+', Number.Float),
(r'#i[-+]?\d+/\d+', Number.Float),
(r'#i[-+]?\d+', Number.Float),
(r'#i\S*', Error),
# #o
(r'#o[-+]?[0-7]+\.[0-7]+', Number.Oct),
(r'#o[0-7]+e[-+]?[0-7]+', Number.Oct),
(r'#o[-+]?[0-7]+/[0-7]+', Number.Oct),
(r'#o[-+]?[0-7]+', Number.Oct),
(r'#o\S*', Error),
# #x
(r'#x[-+]?[0-9a-fA-F]+\.[0-9a-fA-F]+', Number.Hex),
# the exponent variation (e.g. #x1e1) is N/A
(r'#x[-+]?[0-9a-fA-F]+/[0-9a-fA-F]+', Number.Hex),
(r'#x[-+]?[0-9a-fA-F]+', Number.Hex),
(r'#x\S*', Error),
# strings, symbols and characters
(r'"(\\\\|\\"|[^"])*"', String),
(r"'" + valid_name, String.Symbol),
(r"#\\([()/'\"._!§$%& ?=+-]{1}|[a-zA-Z0-9]+)", String.Char),
(r'#rx".+"', String.Regex),
(r'#px".+"', String.Regex),
# constants
(r'(#t|#f)', Name.Constant),
# keyword argument names (e.g. #:keyword)
(r'#:\S+', Keyword.Declaration),
# #lang
(r'#lang \S+', Keyword.Namespace),
# special operators
(r"('|#|`|,@|,|\.)", Operator),
# highlight the keywords
('(%s)' % '|'.join([
re.escape(entry) + ' ' for entry in keywords]),
Keyword
),
# first variable in a quoted string like
# '(this is syntactic sugar)
(r"(?<='\()" + valid_name, Name.Variable),
(r"(?<=#\()" + valid_name, Name.Variable),
# highlight the builtins
("(?<=\()(%s)" % '|'.join([
re.escape(entry) + ' ' for entry in builtins]),
Name.Builtin
),
# the remaining functions; handle both ( and [
(r'(?<=(\(|\[|\{))' + valid_name, Name.Function),
# find the remaining variables
(valid_name, Name.Variable),
# the famous parentheses!
(r'(\(|\)|\[|\]|\{|\})', Punctuation),
],
}
class SchemeLexer(RegexLexer):
"""
A Scheme lexer, parsing a stream and outputting the tokens
needed to highlight scheme code.
This lexer could be most probably easily subclassed to parse
other LISP-Dialects like Common Lisp, Emacs Lisp or AutoLisp.
This parser is checked with pastes from the LISP pastebin
at http://paste.lisp.org/ to cover as much syntax as possible.
It supports the full Scheme syntax as defined in R5RS.
*New in Pygments 0.6.*
"""
name = 'Scheme'
aliases = ['scheme', 'scm']
filenames = ['*.scm', '*.ss']
mimetypes = ['text/x-scheme', 'application/x-scheme']
# list of known keywords and builtins taken form vim 6.4 scheme.vim
# syntax file.
keywords = [
'lambda', 'define', 'if', 'else', 'cond', 'and', 'or', 'case', 'let',
'let*', 'letrec', 'begin', 'do', 'delay', 'set!', '=>', 'quote',
'quasiquote', 'unquote', 'unquote-splicing', 'define-syntax',
'let-syntax', 'letrec-syntax', 'syntax-rules'
]
builtins = [
'*', '+', '-', '/', '<', '<=', '=', '>', '>=', 'abs', 'acos', 'angle',
'append', 'apply', 'asin', 'assoc', 'assq', 'assv', 'atan',
'boolean?', 'caaaar', 'caaadr', 'caaar', 'caadar', 'caaddr', 'caadr',
'caar', 'cadaar', 'cadadr', 'cadar', 'caddar', 'cadddr', 'caddr',
'cadr', 'call-with-current-continuation', 'call-with-input-file',
'call-with-output-file', 'call-with-values', 'call/cc', 'car',
'cdaaar', 'cdaadr', 'cdaar', 'cdadar', 'cdaddr', 'cdadr', 'cdar',
'cddaar', 'cddadr', 'cddar', 'cdddar', 'cddddr', 'cdddr', 'cddr',
'cdr', 'ceiling', 'char->integer', 'char-alphabetic?', 'char-ci<=?',
'char-ci<?', 'char-ci=?', 'char-ci>=?', 'char-ci>?', 'char-downcase',
'char-lower-case?', 'char-numeric?', 'char-ready?', 'char-upcase',
'char-upper-case?', 'char-whitespace?', 'char<=?', 'char<?', 'char=?',
'char>=?', 'char>?', 'char?', 'close-input-port', 'close-output-port',
'complex?', 'cons', 'cos', 'current-input-port', 'current-output-port',
'denominator', 'display', 'dynamic-wind', 'eof-object?', 'eq?',
'equal?', 'eqv?', 'eval', 'even?', 'exact->inexact', 'exact?', 'exp',
'expt', 'floor', 'for-each', 'force', 'gcd', 'imag-part',
'inexact->exact', 'inexact?', 'input-port?', 'integer->char',
'integer?', 'interaction-environment', 'lcm', 'length', 'list',
'list->string', 'list->vector', 'list-ref', 'list-tail', 'list?',
'load', 'log', 'magnitude', 'make-polar', 'make-rectangular',
'make-string', 'make-vector', 'map', 'max', 'member', 'memq', 'memv',
'min', 'modulo', 'negative?', 'newline', 'not', 'null-environment',
'null?', 'number->string', 'number?', 'numerator', 'odd?',
'open-input-file', 'open-output-file', 'output-port?', 'pair?',
'peek-char', 'port?', 'positive?', 'procedure?', 'quotient',
'rational?', 'rationalize', 'read', 'read-char', 'real-part', 'real?',
'remainder', 'reverse', 'round', 'scheme-report-environment',
'set-car!', 'set-cdr!', 'sin', 'sqrt', 'string', 'string->list',
'string->number', 'string->symbol', 'string-append', 'string-ci<=?',
'string-ci<?', 'string-ci=?', 'string-ci>=?', 'string-ci>?',
'string-copy', 'string-fill!', 'string-length', 'string-ref',
'string-set!', 'string<=?', 'string<?', 'string=?', 'string>=?',
'string>?', 'string?', 'substring', 'symbol->string', 'symbol?',
'tan', 'transcript-off', 'transcript-on', 'truncate', 'values',
'vector', 'vector->list', 'vector-fill!', 'vector-length',
'vector-ref', 'vector-set!', 'vector?', 'with-input-from-file',
'with-output-to-file', 'write', 'write-char', 'zero?'
]
# valid names for identifiers
# well, names can only not consist fully of numbers
# but this should be good enough for now
valid_name = r'[a-zA-Z0-9!$%&*+,/:<=>?@^_~|-]+'
tokens = {
'root' : [
# the comments - always starting with semicolon
# and going to the end of the line
(r';.*$', Comment.Single),
# whitespaces - usually not relevant
(r'\s+', Text),
# numbers
(r'-?\d+\.\d+', Number.Float),
(r'-?\d+', Number.Integer),
# support for uncommon kinds of numbers -
# have to figure out what the characters mean
#(r'(#e|#i|#b|#o|#d|#x)[\d.]+', Number),
# strings, symbols and characters
(r'"(\\\\|\\"|[^"])*"', String),
(r"'" + valid_name, String.Symbol),
(r"#\\([()/'\"._!§$%& ?=+-]{1}|[a-zA-Z0-9]+)", String.Char),
# constants
(r'(#t|#f)', Name.Constant),
# special operators
(r"('|#|`|,@|,|\.)", Operator),
# highlight the keywords
('(%s)' % '|'.join([
re.escape(entry) + ' ' for entry in keywords]),
Keyword
),
# first variable in a quoted string like
# '(this is syntactic sugar)
(r"(?<='\()" + valid_name, Name.Variable),
(r"(?<=#\()" + valid_name, Name.Variable),
# highlight the builtins
("(?<=\()(%s)" % '|'.join([
re.escape(entry) + ' ' for entry in builtins]),
Name.Builtin
),
# the remaining functions
(r'(?<=\()' + valid_name, Name.Function),
# find the remaining variables
(valid_name, Name.Variable),
# the famous parentheses!
(r'(\(|\))', Punctuation),
(r'(\[|\])', Punctuation),
],
}
class CommonLispLexer(RegexLexer):
"""
A Common Lisp lexer.
*New in Pygments 0.9.*
"""
name = 'Common Lisp'
aliases = ['common-lisp', 'cl', 'lisp']
filenames = ['*.cl', '*.lisp', '*.el'] # use for Elisp too
mimetypes = ['text/x-common-lisp']
flags = re.IGNORECASE | re.MULTILINE
### couple of useful regexes
# characters that are not macro-characters and can be used to begin a symbol
nonmacro = r'\\.|[a-zA-Z0-9!$%&*+-/<=>?@\[\]^_{}~]'
constituent = nonmacro + '|[#.:]'
terminated = r'(?=[ "()\'\n,;`])' # whitespace or terminating macro characters
### symbol token, reverse-engineered from hyperspec
# Take a deep breath...
symbol = r'(\|[^|]+\||(?:%s)(?:%s)*)' % (nonmacro, constituent)
def __init__(self, **options):
from pygments.lexers._clbuiltins import BUILTIN_FUNCTIONS, \
SPECIAL_FORMS, MACROS, LAMBDA_LIST_KEYWORDS, DECLARATIONS, \
BUILTIN_TYPES, BUILTIN_CLASSES
self.builtin_function = BUILTIN_FUNCTIONS
self.special_forms = SPECIAL_FORMS
self.macros = MACROS
self.lambda_list_keywords = LAMBDA_LIST_KEYWORDS
self.declarations = DECLARATIONS
self.builtin_types = BUILTIN_TYPES
self.builtin_classes = BUILTIN_CLASSES
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
stack = ['root']
for index, token, value in RegexLexer.get_tokens_unprocessed(self, text, stack):
if token is Name.Variable:
if value in self.builtin_function:
yield index, Name.Builtin, value
continue
if value in self.special_forms:
yield index, Keyword, value
continue
if value in self.macros:
yield index, Name.Builtin, value
continue
if value in self.lambda_list_keywords:
yield index, Keyword, value
continue
if value in self.declarations:
yield index, Keyword, value
continue
if value in self.builtin_types:
yield index, Keyword.Type, value
continue
if value in self.builtin_classes:
yield index, Name.Class, value
continue
yield index, token, value
tokens = {
'root' : [
('', Text, 'body'),
],
'multiline-comment' : [
(r'#\|', Comment.Multiline, '#push'), # (cf. Hyperspec 2.4.8.19)
(r'\|#', Comment.Multiline, '#pop'),
(r'[^|#]+', Comment.Multiline),
(r'[|#]', Comment.Multiline),
],
'commented-form' : [
(r'\(', Comment.Preproc, '#push'),
(r'\)', Comment.Preproc, '#pop'),
(r'[^()]+', Comment.Preproc),
],
'body' : [
# whitespace
(r'\s+', Text),
# single-line comment
(r';.*$', Comment.Single),
# multi-line comment
(r'#\|', Comment.Multiline, 'multiline-comment'),
# encoding comment (?)
(r'#\d*Y.*$', Comment.Special),
# strings and characters
(r'"(\\.|\\\n|[^"\\])*"', String),
# quoting
(r":" + symbol, String.Symbol),
(r"::" + symbol, String.Symbol),
(r":#" + symbol, String.Symbol),
(r"'" + symbol, String.Symbol),
(r"'", Operator),
(r"`", Operator),
# decimal numbers
(r'[-+]?\d+\.?' + terminated, Number.Integer),
(r'[-+]?\d+/\d+' + terminated, Number),
(r'[-+]?(\d*\.\d+([defls][-+]?\d+)?|\d+(\.\d*)?[defls][-+]?\d+)' \
+ terminated, Number.Float),
# sharpsign strings and characters
(r"#\\." + terminated, String.Char),
(r"#\\" + symbol, String.Char),
# vector
(r'#\(', Operator, 'body'),
# bitstring
(r'#\d*\*[01]*', Literal.Other),
# uninterned symbol
(r'#:' + symbol, String.Symbol),
# read-time and load-time evaluation
(r'#[.,]', Operator),
# function shorthand
(r'#\'', Name.Function),
# binary rational
(r'#[bB][+-]?[01]+(/[01]+)?', Number),
# octal rational
(r'#[oO][+-]?[0-7]+(/[0-7]+)?', Number.Oct),
# hex rational
(r'#[xX][+-]?[0-9a-fA-F]+(/[0-9a-fA-F]+)?', Number.Hex),
# radix rational
(r'#\d+[rR][+-]?[0-9a-zA-Z]+(/[0-9a-zA-Z]+)?', Number),
# complex
(r'(#[cC])(\()', bygroups(Number, Punctuation), 'body'),
# array
(r'(#\d+[aA])(\()', bygroups(Literal.Other, Punctuation), 'body'),
# structure
(r'(#[sS])(\()', bygroups(Literal.Other, Punctuation), 'body'),
# path
(r'#[pP]?"(\\.|[^"])*"', Literal.Other),
# reference
(r'#\d+=', Operator),
(r'#\d+#', Operator),
# read-time comment
(r'#+nil' + terminated + '\s*\(', Comment.Preproc, 'commented-form'),
# read-time conditional
(r'#[+-]', Operator),
# special operators that should have been parsed already
(r'(,@|,|\.)', Operator),
# special constants
(r'(t|nil)' + terminated, Name.Constant),
# functions and variables
(r'\*' + symbol + '\*', Name.Variable.Global),
(symbol, Name.Variable),
# parentheses
(r'\(', Punctuation, 'body'),
(r'\)', Punctuation, '#pop'),
],
}
class HaskellLexer(RegexLexer):
"""
A Haskell lexer based on the lexemes defined in the Haskell 98 Report.
*New in Pygments 0.8.*
"""
name = 'Haskell'
aliases = ['haskell', 'hs']
filenames = ['*.hs']
mimetypes = ['text/x-haskell']
reserved = ['case','class','data','default','deriving','do','else',
'if','in','infix[lr]?','instance',
'let','newtype','of','then','type','where','_']
ascii = ['NUL','SOH','[SE]TX','EOT','ENQ','ACK',
'BEL','BS','HT','LF','VT','FF','CR','S[OI]','DLE',
'DC[1-4]','NAK','SYN','ETB','CAN',
'EM','SUB','ESC','[FGRU]S','SP','DEL']
tokens = {
'root': [
# Whitespace:
(r'\s+', Text),
#(r'--\s*|.*$', Comment.Doc),
(r'--(?![!#$%&*+./<=>?@\^|_~:\\]).*?$', Comment.Single),
(r'{-', Comment.Multiline, 'comment'),
# Lexemes:
# Identifiers
(r'\bimport\b', Keyword.Reserved, 'import'),
(r'\bmodule\b', Keyword.Reserved, 'module'),
(r'\berror\b', Name.Exception),
(r'\b(%s)(?!\')\b' % '|'.join(reserved), Keyword.Reserved),
(r'^[_a-z][\w\']*', Name.Function),
(r"'?[_a-z][\w']*", Name),
(r"('')?[A-Z][\w\']*", Keyword.Type),
# Operators
(r'\\(?![:!#$%&*+.\\/<=>?@^|~-]+)', Name.Function), # lambda operator
(r'(<-|::|->|=>|=)(?![:!#$%&*+.\\/<=>?@^|~-]+)', Operator.Word), # specials
(r':[:!#$%&*+.\\/<=>?@^|~-]*', Keyword.Type), # Constructor operators
(r'[:!#$%&*+.\\/<=>?@^|~-]+', Operator), # Other operators
# Numbers
(r'\d+[eE][+-]?\d+', Number.Float),
(r'\d+\.\d+([eE][+-]?\d+)?', Number.Float),
(r'0[oO][0-7]+', Number.Oct),
(r'0[xX][\da-fA-F]+', Number.Hex),
(r'\d+', Number.Integer),
# Character/String Literals
(r"'", String.Char, 'character'),
(r'"', String, 'string'),
# Special
(r'\[\]', Keyword.Type),
(r'\(\)', Name.Builtin),
(r'[][(),;`{}]', Punctuation),
],
'import': [
# Import statements
(r'\s+', Text),
(r'"', String, 'string'),
# after "funclist" state
(r'\)', Punctuation, '#pop'),
(r'qualified\b', Keyword),
# import X as Y
(r'([A-Z][a-zA-Z0-9_.]*)(\s+)(as)(\s+)([A-Z][a-zA-Z0-9_.]*)',
bygroups(Name.Namespace, Text, Keyword, Text, Name), '#pop'),
# import X hiding (functions)
(r'([A-Z][a-zA-Z0-9_.]*)(\s+)(hiding)(\s+)(\()',
bygroups(Name.Namespace, Text, Keyword, Text, Punctuation), 'funclist'),
# import X (functions)
(r'([A-Z][a-zA-Z0-9_.]*)(\s+)(\()',
bygroups(Name.Namespace, Text, Punctuation), 'funclist'),
# import X
(r'[a-zA-Z0-9_.]+', Name.Namespace, '#pop'),
],
'module': [
(r'\s+', Text),
(r'([A-Z][a-zA-Z0-9_.]*)(\s+)(\()',
bygroups(Name.Namespace, Text, Punctuation), 'funclist'),
(r'[A-Z][a-zA-Z0-9_.]*', Name.Namespace, '#pop'),
],
'funclist': [
(r'\s+', Text),
(r'[A-Z][a-zA-Z0-9_]*', Keyword.Type),
(r'(_[\w\']+|[a-z][\w\']*)', Name.Function),
(r'--.*$', Comment.Single),
(r'{-', Comment.Multiline, 'comment'),
(r',', Punctuation),
(r'[:!#$%&*+.\\/<=>?@^|~-]+', Operator),
# (HACK, but it makes sense to push two instances, believe me)
(r'\(', Punctuation, ('funclist', 'funclist')),
(r'\)', Punctuation, '#pop:2'),
],
# NOTE: the next four states are shared in the AgdaLexer; make sure
# any change is compatible with Agda as well or copy over and change
'comment': [
# Multiline Comments
(r'[^-{}]+', Comment.Multiline),
(r'{-', Comment.Multiline, '#push'),
(r'-}', Comment.Multiline, '#pop'),
(r'[-{}]', Comment.Multiline),
],
'character': [
# Allows multi-chars, incorrectly.
(r"[^\\']", String.Char),
(r"\\", String.Escape, 'escape'),
("'", String.Char, '#pop'),
],
'string': [
(r'[^\\"]+', String),
(r"\\", String.Escape, 'escape'),
('"', String, '#pop'),
],
'escape': [
(r'[abfnrtv"\'&\\]', String.Escape, '#pop'),
(r'\^[][A-Z@\^_]', String.Escape, '#pop'),
('|'.join(ascii), String.Escape, '#pop'),
(r'o[0-7]+', String.Escape, '#pop'),
(r'x[\da-fA-F]+', String.Escape, '#pop'),
(r'\d+', String.Escape, '#pop'),
(r'\s+\\', String.Escape, '#pop'),
],
}
class AgdaLexer(RegexLexer):
"""
For the `Agda <http://wiki.portal.chalmers.se/agda/pmwiki.php>`_
dependently typed functional programming language and proof assistant.
*New in Pygments 1.7.*
"""
name = 'Agda'
aliases = ['agda']
filenames = ['*.agda']
mimetypes = ['text/x-agda']
reserved = ['abstract', 'codata', 'coinductive', 'constructor', 'data',
'field', 'forall', 'hiding', 'in', 'inductive', 'infix',
'infixl', 'infixr', 'let', 'open', 'pattern', 'primitive',
'private', 'mutual', 'quote', 'quoteGoal', 'quoteTerm',
'record', 'syntax', 'rewrite', 'unquote', 'using', 'where',
'with']
tokens = {
'root': [
# Declaration
(r'^(\s*)([^\s\(\)\{\}]+)(\s*)(:)(\s*)',
bygroups(Text, Name.Function, Text, Operator.Word, Text)),
# Comments
(r'--(?![!#$%&*+./<=>?@\^|_~:\\]).*?$', Comment.Single),
(r'{-', Comment.Multiline, 'comment'),
# Holes
(r'{!', Comment.Directive, 'hole'),
# Lexemes:
# Identifiers
(ur'\b(%s)(?!\')\b' % '|'.join(reserved), Keyword.Reserved),
(r'(import|module)(\s+)', bygroups(Keyword.Reserved, Text), 'module'),
(r'\b(Set|Prop)\b', Keyword.Type),
# Special Symbols
(r'(\(|\)|\{|\})', Operator),
(ur'(\.{1,3}|\||[\u039B]|[\u2200]|[\u2192]|:|=|->)', Operator.Word),
# Numbers
(r'\d+[eE][+-]?\d+', Number.Float),
(r'\d+\.\d+([eE][+-]?\d+)?', Number.Float),
(r'0[xX][\da-fA-F]+', Number.Hex),
(r'\d+', Number.Integer),
# Strings
(r"'", String.Char, 'character'),
(r'"', String, 'string'),
(r'[^\s\(\)\{\}]+', Text),
(r'\s+?', Text), # Whitespace
],
'hole': [
# Holes
(r'[^!{}]+', Comment.Directive),
(r'{!', Comment.Directive, '#push'),
(r'!}', Comment.Directive, '#pop'),
(r'[!{}]', Comment.Directive),
],
'module': [
(r'{-', Comment.Multiline, 'comment'),
(r'[a-zA-Z][a-zA-Z0-9_.]*', Name, '#pop'),
(r'[^a-zA-Z]*', Text)
],
'comment': HaskellLexer.tokens['comment'],
'character': HaskellLexer.tokens['character'],
'string': HaskellLexer.tokens['string'],
'escape': HaskellLexer.tokens['escape']
}
class LiterateLexer(Lexer):
"""
Base class for lexers of literate file formats based on LaTeX or Bird-style
(prefixing each code line with ">").
Additional options accepted:
`litstyle`
If given, must be ``"bird"`` or ``"latex"``. If not given, the style
is autodetected: if the first non-whitespace character in the source
is a backslash or percent character, LaTeX is assumed, else Bird.
"""
bird_re = re.compile(r'(>[ \t]*)(.*\n)')
def __init__(self, baselexer, **options):
self.baselexer = baselexer
Lexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
style = self.options.get('litstyle')
if style is None:
style = (text.lstrip()[0:1] in '%\\') and 'latex' or 'bird'
code = ''
insertions = []
if style == 'bird':
# bird-style
for match in line_re.finditer(text):
line = match.group()
m = self.bird_re.match(line)
if m:
insertions.append((len(code),
[(0, Comment.Special, m.group(1))]))
code += m.group(2)
else:
insertions.append((len(code), [(0, Text, line)]))
else:
# latex-style
from pygments.lexers.text import TexLexer
lxlexer = TexLexer(**self.options)
codelines = 0
latex = ''
for match in line_re.finditer(text):
line = match.group()
if codelines:
if line.lstrip().startswith('\\end{code}'):
codelines = 0
latex += line
else:
code += line
elif line.lstrip().startswith('\\begin{code}'):
codelines = 1
latex += line
insertions.append((len(code),
list(lxlexer.get_tokens_unprocessed(latex))))
latex = ''
else:
latex += line
insertions.append((len(code),
list(lxlexer.get_tokens_unprocessed(latex))))
for item in do_insertions(insertions, self.baselexer.get_tokens_unprocessed(code)):
yield item
class LiterateHaskellLexer(LiterateLexer):
"""
For Literate Haskell (Bird-style or LaTeX) source.
Additional options accepted:
`litstyle`
If given, must be ``"bird"`` or ``"latex"``. If not given, the style
is autodetected: if the first non-whitespace character in the source
is a backslash or percent character, LaTeX is assumed, else Bird.
*New in Pygments 0.9.*
"""
name = 'Literate Haskell'
aliases = ['lhs', 'literate-haskell', 'lhaskell']
filenames = ['*.lhs']
mimetypes = ['text/x-literate-haskell']
def __init__(self, **options):
hslexer = HaskellLexer(**options)
LiterateLexer.__init__(self, hslexer, **options)
class LiterateAgdaLexer(LiterateLexer):
"""
For Literate Agda source.
Additional options accepted:
`litstyle`
If given, must be ``"bird"`` or ``"latex"``. If not given, the style
is autodetected: if the first non-whitespace character in the source
is a backslash or percent character, LaTeX is assumed, else Bird.
*New in Pygments 1.7.*
"""
name = 'Literate Agda'
aliases = ['lagda', 'literate-agda']
filenames = ['*.lagda']
mimetypes = ['text/x-literate-agda']
def __init__(self, **options):
agdalexer = AgdaLexer(**options)
LiterateLexer.__init__(self, agdalexer, litstyle='latex', **options)
class SMLLexer(RegexLexer):
"""
For the Standard ML language.
*New in Pygments 1.5.*
"""
name = 'Standard ML'
aliases = ['sml']
filenames = ['*.sml', '*.sig', '*.fun',]
mimetypes = ['text/x-standardml', 'application/x-standardml']
alphanumid_reserved = [
# Core
'abstype', 'and', 'andalso', 'as', 'case', 'datatype', 'do', 'else',
'end', 'exception', 'fn', 'fun', 'handle', 'if', 'in', 'infix',
'infixr', 'let', 'local', 'nonfix', 'of', 'op', 'open', 'orelse',
'raise', 'rec', 'then', 'type', 'val', 'with', 'withtype', 'while',
# Modules
'eqtype', 'functor', 'include', 'sharing', 'sig', 'signature',
'struct', 'structure', 'where',
]
symbolicid_reserved = [
# Core
':', '\|', '=', '=>', '->', '#',
# Modules
':>',
]
nonid_reserved = [ '(', ')', '[', ']', '{', '}', ',', ';', '...', '_' ]
alphanumid_re = r"[a-zA-Z][a-zA-Z0-9_']*"
symbolicid_re = r"[!%&$#+\-/:<=>?@\\~`^|*]+"
# A character constant is a sequence of the form #s, where s is a string
# constant denoting a string of size one character. This setup just parses
# the entire string as either a String.Double or a String.Char (depending
# on the argument), even if the String.Char is an erronous
# multiple-character string.
def stringy (whatkind):
return [
(r'[^"\\]', whatkind),
(r'\\[\\\"abtnvfr]', String.Escape),
# Control-character notation is used for codes < 32,
# where \^@ == \000
(r'\\\^[\x40-\x5e]', String.Escape),
# Docs say 'decimal digits'
(r'\\[0-9]{3}', String.Escape),
(r'\\u[0-9a-fA-F]{4}', String.Escape),
(r'\\\s+\\', String.Interpol),
(r'"', whatkind, '#pop'),
]
# Callbacks for distinguishing tokens and reserved words
def long_id_callback(self, match):
if match.group(1) in self.alphanumid_reserved: token = Error
else: token = Name.Namespace
yield match.start(1), token, match.group(1)
yield match.start(2), Punctuation, match.group(2)
def end_id_callback(self, match):
if match.group(1) in self.alphanumid_reserved: token = Error
elif match.group(1) in self.symbolicid_reserved: token = Error
else: token = Name
yield match.start(1), token, match.group(1)
def id_callback(self, match):
str = match.group(1)
if str in self.alphanumid_reserved: token = Keyword.Reserved
elif str in self.symbolicid_reserved: token = Punctuation
else: token = Name
yield match.start(1), token, str
tokens = {
# Whitespace and comments are (almost) everywhere
'whitespace': [
(r'\s+', Text),
(r'\(\*', Comment.Multiline, 'comment'),
],
'delimiters': [
# This lexer treats these delimiters specially:
# Delimiters define scopes, and the scope is how the meaning of
# the `|' is resolved - is it a case/handle expression, or function
# definition by cases? (This is not how the Definition works, but
# it's how MLton behaves, see http://mlton.org/SMLNJDeviations)
(r'\(|\[|{', Punctuation, 'main'),
(r'\)|\]|}', Punctuation, '#pop'),
(r'\b(let|if|local)\b(?!\')', Keyword.Reserved, ('main', 'main')),
(r'\b(struct|sig|while)\b(?!\')', Keyword.Reserved, 'main'),
(r'\b(do|else|end|in|then)\b(?!\')', Keyword.Reserved, '#pop'),
],
'core': [
# Punctuation that doesn't overlap symbolic identifiers
(r'(%s)' % '|'.join([re.escape(z) for z in nonid_reserved]),
Punctuation),
# Special constants: strings, floats, numbers in decimal and hex
(r'#"', String.Char, 'char'),
(r'"', String.Double, 'string'),
(r'~?0x[0-9a-fA-F]+', Number.Hex),
(r'0wx[0-9a-fA-F]+', Number.Hex),
(r'0w\d+', Number.Integer),
(r'~?\d+\.\d+[eE]~?\d+', Number.Float),
(r'~?\d+\.\d+', Number.Float),
(r'~?\d+[eE]~?\d+', Number.Float),
(r'~?\d+', Number.Integer),
# Labels
(r'#\s*[1-9][0-9]*', Name.Label),
(r'#\s*(%s)' % alphanumid_re, Name.Label),
(r'#\s+(%s)' % symbolicid_re, Name.Label),
# Some reserved words trigger a special, local lexer state change
(r'\b(datatype|abstype)\b(?!\')', Keyword.Reserved, 'dname'),
(r'(?=\b(exception)\b(?!\'))', Text, ('ename')),
(r'\b(functor|include|open|signature|structure)\b(?!\')',
Keyword.Reserved, 'sname'),
(r'\b(type|eqtype)\b(?!\')', Keyword.Reserved, 'tname'),
# Regular identifiers, long and otherwise
(r'\'[0-9a-zA-Z_\']*', Name.Decorator),
(r'(%s)(\.)' % alphanumid_re, long_id_callback, "dotted"),
(r'(%s)' % alphanumid_re, id_callback),
(r'(%s)' % symbolicid_re, id_callback),
],
'dotted': [
(r'(%s)(\.)' % alphanumid_re, long_id_callback),
(r'(%s)' % alphanumid_re, end_id_callback, "#pop"),
(r'(%s)' % symbolicid_re, end_id_callback, "#pop"),
(r'\s+', Error),
(r'\S+', Error),
],
# Main parser (prevents errors in files that have scoping errors)
'root': [ (r'', Text, 'main') ],
# In this scope, I expect '|' to not be followed by a function name,
# and I expect 'and' to be followed by a binding site
'main': [
include('whitespace'),
# Special behavior of val/and/fun
(r'\b(val|and)\b(?!\')', Keyword.Reserved, 'vname'),
(r'\b(fun)\b(?!\')', Keyword.Reserved,
('#pop', 'main-fun', 'fname')),
include('delimiters'),
include('core'),
(r'\S+', Error),
],
# In this scope, I expect '|' and 'and' to be followed by a function
'main-fun': [
include('whitespace'),
(r'\s', Text),
(r'\(\*', Comment.Multiline, 'comment'),
# Special behavior of val/and/fun
(r'\b(fun|and)\b(?!\')', Keyword.Reserved, 'fname'),
(r'\b(val)\b(?!\')', Keyword.Reserved,
('#pop', 'main', 'vname')),
# Special behavior of '|' and '|'-manipulating keywords
(r'\|', Punctuation, 'fname'),
(r'\b(case|handle)\b(?!\')', Keyword.Reserved,
('#pop', 'main')),
include('delimiters'),
include('core'),
(r'\S+', Error),
],
# Character and string parsers
'char': stringy(String.Char),
'string': stringy(String.Double),
'breakout': [
(r'(?=\b(%s)\b(?!\'))' % '|'.join(alphanumid_reserved), Text, '#pop'),
],
# Dealing with what comes after module system keywords
'sname': [
include('whitespace'),
include('breakout'),
(r'(%s)' % alphanumid_re, Name.Namespace),
(r'', Text, '#pop'),
],
# Dealing with what comes after the 'fun' (or 'and' or '|') keyword
'fname': [
include('whitespace'),
(r'\'[0-9a-zA-Z_\']*', Name.Decorator),
(r'\(', Punctuation, 'tyvarseq'),
(r'(%s)' % alphanumid_re, Name.Function, '#pop'),
(r'(%s)' % symbolicid_re, Name.Function, '#pop'),
# Ignore interesting function declarations like "fun (x + y) = ..."
(r'', Text, '#pop'),
],
# Dealing with what comes after the 'val' (or 'and') keyword
'vname': [
include('whitespace'),
(r'\'[0-9a-zA-Z_\']*', Name.Decorator),
(r'\(', Punctuation, 'tyvarseq'),
(r'(%s)(\s*)(=(?!%s))' % (alphanumid_re, symbolicid_re),
bygroups(Name.Variable, Text, Punctuation), '#pop'),
(r'(%s)(\s*)(=(?!%s))' % (symbolicid_re, symbolicid_re),
bygroups(Name.Variable, Text, Punctuation), '#pop'),
(r'(%s)' % alphanumid_re, Name.Variable, '#pop'),
(r'(%s)' % symbolicid_re, Name.Variable, '#pop'),
# Ignore interesting patterns like 'val (x, y)'
(r'', Text, '#pop'),
],
# Dealing with what comes after the 'type' (or 'and') keyword
'tname': [
include('whitespace'),
include('breakout'),
(r'\'[0-9a-zA-Z_\']*', Name.Decorator),
(r'\(', Punctuation, 'tyvarseq'),
(r'=(?!%s)' % symbolicid_re, Punctuation, ('#pop', 'typbind')),
(r'(%s)' % alphanumid_re, Keyword.Type),
(r'(%s)' % symbolicid_re, Keyword.Type),
(r'\S+', Error, '#pop'),
],
# A type binding includes most identifiers
'typbind': [
include('whitespace'),
(r'\b(and)\b(?!\')', Keyword.Reserved, ('#pop', 'tname')),
include('breakout'),
include('core'),
(r'\S+', Error, '#pop'),
],
# Dealing with what comes after the 'datatype' (or 'and') keyword
'dname': [
include('whitespace'),
include('breakout'),
(r'\'[0-9a-zA-Z_\']*', Name.Decorator),
(r'\(', Punctuation, 'tyvarseq'),
(r'(=)(\s*)(datatype)',
bygroups(Punctuation, Text, Keyword.Reserved), '#pop'),
(r'=(?!%s)' % symbolicid_re, Punctuation,
('#pop', 'datbind', 'datcon')),
(r'(%s)' % alphanumid_re, Keyword.Type),
(r'(%s)' % symbolicid_re, Keyword.Type),
(r'\S+', Error, '#pop'),
],
# common case - A | B | C of int
'datbind': [
include('whitespace'),
(r'\b(and)\b(?!\')', Keyword.Reserved, ('#pop', 'dname')),
(r'\b(withtype)\b(?!\')', Keyword.Reserved, ('#pop', 'tname')),
(r'\b(of)\b(?!\')', Keyword.Reserved),
(r'(\|)(\s*)(%s)' % alphanumid_re,
bygroups(Punctuation, Text, Name.Class)),
(r'(\|)(\s+)(%s)' % symbolicid_re,
bygroups(Punctuation, Text, Name.Class)),
include('breakout'),
include('core'),
(r'\S+', Error),
],
# Dealing with what comes after an exception
'ename': [
include('whitespace'),
(r'(exception|and)\b(\s+)(%s)' % alphanumid_re,
bygroups(Keyword.Reserved, Text, Name.Class)),
(r'(exception|and)\b(\s*)(%s)' % symbolicid_re,
bygroups(Keyword.Reserved, Text, Name.Class)),
(r'\b(of)\b(?!\')', Keyword.Reserved),
include('breakout'),
include('core'),
(r'\S+', Error),
],
'datcon': [
include('whitespace'),
(r'(%s)' % alphanumid_re, Name.Class, '#pop'),
(r'(%s)' % symbolicid_re, Name.Class, '#pop'),
(r'\S+', Error, '#pop'),
],
# Series of type variables
'tyvarseq': [
(r'\s', Text),
(r'\(\*', Comment.Multiline, 'comment'),
(r'\'[0-9a-zA-Z_\']*', Name.Decorator),
(alphanumid_re, Name),
(r',', Punctuation),
(r'\)', Punctuation, '#pop'),
(symbolicid_re, Name),
],
'comment': [
(r'[^(*)]', Comment.Multiline),
(r'\(\*', Comment.Multiline, '#push'),
(r'\*\)', Comment.Multiline, '#pop'),
(r'[(*)]', Comment.Multiline),
],
}
class OcamlLexer(RegexLexer):
"""
For the OCaml language.
*New in Pygments 0.7.*
"""
name = 'OCaml'
aliases = ['ocaml']
filenames = ['*.ml', '*.mli', '*.mll', '*.mly']
mimetypes = ['text/x-ocaml']
keywords = [
'as', 'assert', 'begin', 'class', 'constraint', 'do', 'done',
'downto', 'else', 'end', 'exception', 'external', 'false',
'for', 'fun', 'function', 'functor', 'if', 'in', 'include',
'inherit', 'initializer', 'lazy', 'let', 'match', 'method',
'module', 'mutable', 'new', 'object', 'of', 'open', 'private',
'raise', 'rec', 'sig', 'struct', 'then', 'to', 'true', 'try',
'type', 'value', 'val', 'virtual', 'when', 'while', 'with',
]
keyopts = [
'!=','#','&','&&','\(','\)','\*','\+',',','-',
'-\.','->','\.','\.\.',':','::',':=',':>',';',';;','<',
'<-','=','>','>]','>}','\?','\?\?','\[','\[<','\[>','\[\|',
']','_','`','{','{<','\|','\|]','}','~'
]
operators = r'[!$%&*+\./:<=>?@^|~-]'
word_operators = ['and', 'asr', 'land', 'lor', 'lsl', 'lxor', 'mod', 'or']
prefix_syms = r'[!?~]'
infix_syms = r'[=<>@^|&+\*/$%-]'
primitives = ['unit', 'int', 'float', 'bool', 'string', 'char', 'list', 'array']
tokens = {
'escape-sequence': [
(r'\\[\\\"\'ntbr]', String.Escape),
(r'\\[0-9]{3}', String.Escape),
(r'\\x[0-9a-fA-F]{2}', String.Escape),
],
'root': [
(r'\s+', Text),
(r'false|true|\(\)|\[\]', Name.Builtin.Pseudo),
(r'\b([A-Z][A-Za-z0-9_\']*)(?=\s*\.)',
Name.Namespace, 'dotted'),
(r'\b([A-Z][A-Za-z0-9_\']*)', Name.Class),
(r'\(\*(?![)])', Comment, 'comment'),
(r'\b(%s)\b' % '|'.join(keywords), Keyword),
(r'(%s)' % '|'.join(keyopts[::-1]), Operator),
(r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator),
(r'\b(%s)\b' % '|'.join(word_operators), Operator.Word),
(r'\b(%s)\b' % '|'.join(primitives), Keyword.Type),
(r"[^\W\d][\w']*", Name),
(r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)', Number.Float),
(r'0[xX][\da-fA-F][\da-fA-F_]*', Number.Hex),
(r'0[oO][0-7][0-7_]*', Number.Oct),
(r'0[bB][01][01_]*', Number.Binary),
(r'\d[\d_]*', Number.Integer),
(r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'",
String.Char),
(r"'.'", String.Char),
(r"'", Keyword), # a stray quote is another syntax element
(r'"', String.Double, 'string'),
(r'[~?][a-z][\w\']*:', Name.Variable),
],
'comment': [
(r'[^(*)]+', Comment),
(r'\(\*', Comment, '#push'),
(r'\*\)', Comment, '#pop'),
(r'[(*)]', Comment),
],
'string': [
(r'[^\\"]+', String.Double),
include('escape-sequence'),
(r'\\\n', String.Double),
(r'"', String.Double, '#pop'),
],
'dotted': [
(r'\s+', Text),
(r'\.', Punctuation),
(r'[A-Z][A-Za-z0-9_\']*(?=\s*\.)', Name.Namespace),
(r'[A-Z][A-Za-z0-9_\']*', Name.Class, '#pop'),
(r'[a-z_][A-Za-z0-9_\']*', Name, '#pop'),
],
}
class ErlangLexer(RegexLexer):
"""
For the Erlang functional programming language.
Blame Jeremy Thurgood (http://jerith.za.net/).
*New in Pygments 0.9.*
"""
name = 'Erlang'
aliases = ['erlang']
filenames = ['*.erl', '*.hrl', '*.es', '*.escript']
mimetypes = ['text/x-erlang']
keywords = [
'after', 'begin', 'case', 'catch', 'cond', 'end', 'fun', 'if',
'let', 'of', 'query', 'receive', 'try', 'when',
]
builtins = [ # See erlang(3) man page
'abs', 'append_element', 'apply', 'atom_to_list', 'binary_to_list',
'bitstring_to_list', 'binary_to_term', 'bit_size', 'bump_reductions',
'byte_size', 'cancel_timer', 'check_process_code', 'delete_module',
'demonitor', 'disconnect_node', 'display', 'element', 'erase', 'exit',
'float', 'float_to_list', 'fun_info', 'fun_to_list',
'function_exported', 'garbage_collect', 'get', 'get_keys',
'group_leader', 'hash', 'hd', 'integer_to_list', 'iolist_to_binary',
'iolist_size', 'is_atom', 'is_binary', 'is_bitstring', 'is_boolean',
'is_builtin', 'is_float', 'is_function', 'is_integer', 'is_list',
'is_number', 'is_pid', 'is_port', 'is_process_alive', 'is_record',
'is_reference', 'is_tuple', 'length', 'link', 'list_to_atom',
'list_to_binary', 'list_to_bitstring', 'list_to_existing_atom',
'list_to_float', 'list_to_integer', 'list_to_pid', 'list_to_tuple',
'load_module', 'localtime_to_universaltime', 'make_tuple', 'md5',
'md5_final', 'md5_update', 'memory', 'module_loaded', 'monitor',
'monitor_node', 'node', 'nodes', 'open_port', 'phash', 'phash2',
'pid_to_list', 'port_close', 'port_command', 'port_connect',
'port_control', 'port_call', 'port_info', 'port_to_list',
'process_display', 'process_flag', 'process_info', 'purge_module',
'put', 'read_timer', 'ref_to_list', 'register', 'resume_process',
'round', 'send', 'send_after', 'send_nosuspend', 'set_cookie',
'setelement', 'size', 'spawn', 'spawn_link', 'spawn_monitor',
'spawn_opt', 'split_binary', 'start_timer', 'statistics',
'suspend_process', 'system_flag', 'system_info', 'system_monitor',
'system_profile', 'term_to_binary', 'tl', 'trace', 'trace_delivered',
'trace_info', 'trace_pattern', 'trunc', 'tuple_size', 'tuple_to_list',
'universaltime_to_localtime', 'unlink', 'unregister', 'whereis'
]
operators = r'(\+\+?|--?|\*|/|<|>|/=|=:=|=/=|=<|>=|==?|<-|!|\?)'
word_operators = [
'and', 'andalso', 'band', 'bnot', 'bor', 'bsl', 'bsr', 'bxor',
'div', 'not', 'or', 'orelse', 'rem', 'xor'
]
atom_re = r"(?:[a-z][a-zA-Z0-9_]*|'[^\n']*[^\\]')"
variable_re = r'(?:[A-Z_][a-zA-Z0-9_]*)'
escape_re = r'(?:\\(?:[bdefnrstv\'"\\/]|[0-7][0-7]?[0-7]?|\^[a-zA-Z]))'
macro_re = r'(?:'+variable_re+r'|'+atom_re+r')'
base_re = r'(?:[2-9]|[12][0-9]|3[0-6])'
tokens = {
'root': [
(r'\s+', Text),
(r'%.*\n', Comment),
('(' + '|'.join(keywords) + r')\b', Keyword),
('(' + '|'.join(builtins) + r')\b', Name.Builtin),
('(' + '|'.join(word_operators) + r')\b', Operator.Word),
(r'^-', Punctuation, 'directive'),
(operators, Operator),
(r'"', String, 'string'),
(r'<<', Name.Label),
(r'>>', Name.Label),
('(' + atom_re + ')(:)', bygroups(Name.Namespace, Punctuation)),
('(?:^|(?<=:))(' + atom_re + r')(\s*)(\()',
bygroups(Name.Function, Text, Punctuation)),
(r'[+-]?'+base_re+r'#[0-9a-zA-Z]+', Number.Integer),
(r'[+-]?\d+', Number.Integer),
(r'[+-]?\d+.\d+', Number.Float),
(r'[]\[:_@\".{}()|;,]', Punctuation),
(variable_re, Name.Variable),
(atom_re, Name),
(r'\?'+macro_re, Name.Constant),
(r'\$(?:'+escape_re+r'|\\[ %]|[^\\])', String.Char),
(r'#'+atom_re+r'(:?\.'+atom_re+r')?', Name.Label),
],
'string': [
(escape_re, String.Escape),
(r'"', String, '#pop'),
(r'~[0-9.*]*[~#+bBcdefginpPswWxX]', String.Interpol),
(r'[^"\\~]+', String),
(r'~', String),
],
'directive': [
(r'(define)(\s*)(\()('+macro_re+r')',
bygroups(Name.Entity, Text, Punctuation, Name.Constant), '#pop'),
(r'(record)(\s*)(\()('+macro_re+r')',
bygroups(Name.Entity, Text, Punctuation, Name.Label), '#pop'),
(atom_re, Name.Entity, '#pop'),
],
}
class ErlangShellLexer(Lexer):
"""
Shell sessions in erl (for Erlang code).
*New in Pygments 1.1.*
"""
name = 'Erlang erl session'
aliases = ['erl']
filenames = ['*.erl-sh']
mimetypes = ['text/x-erl-shellsession']
_prompt_re = re.compile(r'\d+>(?=\s|\Z)')
def get_tokens_unprocessed(self, text):
erlexer = ErlangLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
m = self._prompt_re.match(line)
if m is not None:
end = m.end()
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:end])]))
curcode += line[end:]
else:
if curcode:
for item in do_insertions(insertions,
erlexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
if line.startswith('*'):
yield match.start(), Generic.Traceback, line
else:
yield match.start(), Generic.Output, line
if curcode:
for item in do_insertions(insertions,
erlexer.get_tokens_unprocessed(curcode)):
yield item
class OpaLexer(RegexLexer):
"""
Lexer for the Opa language (http://opalang.org).
*New in Pygments 1.5.*
"""
name = 'Opa'
aliases = ['opa']
filenames = ['*.opa']
mimetypes = ['text/x-opa']
# most of these aren't strictly keywords
# but if you color only real keywords, you might just
# as well not color anything
keywords = [
'and', 'as', 'begin', 'case', 'client', 'css', 'database', 'db', 'do',
'else', 'end', 'external', 'forall', 'function', 'if', 'import',
'match', 'module', 'or', 'package', 'parser', 'rec', 'server', 'then',
'type', 'val', 'with', 'xml_parser',
]
# matches both stuff and `stuff`
ident_re = r'(([a-zA-Z_]\w*)|(`[^`]*`))'
op_re = r'[.=\-<>,@~%/+?*&^!]'
punc_re = r'[()\[\],;|]' # '{' and '}' are treated elsewhere
# because they are also used for inserts
tokens = {
# copied from the caml lexer, should be adapted
'escape-sequence': [
(r'\\[\\\"\'ntr}]', String.Escape),
(r'\\[0-9]{3}', String.Escape),
(r'\\x[0-9a-fA-F]{2}', String.Escape),
],
# factorizing these rules, because they are inserted many times
'comments': [
(r'/\*', Comment, 'nested-comment'),
(r'//.*?$', Comment),
],
'comments-and-spaces': [
include('comments'),
(r'\s+', Text),
],
'root': [
include('comments-and-spaces'),
# keywords
(r'\b(%s)\b' % '|'.join(keywords), Keyword),
# directives
# we could parse the actual set of directives instead of anything
# starting with @, but this is troublesome
# because it needs to be adjusted all the time
# and assuming we parse only sources that compile, it is useless
(r'@'+ident_re+r'\b', Name.Builtin.Pseudo),
# number literals
(r'-?.[\d]+([eE][+\-]?\d+)', Number.Float),
(r'-?\d+.\d*([eE][+\-]?\d+)', Number.Float),
(r'-?\d+[eE][+\-]?\d+', Number.Float),
(r'0[xX][\da-fA-F]+', Number.Hex),
(r'0[oO][0-7]+', Number.Oct),
(r'0[bB][01]+', Number.Binary),
(r'\d+', Number.Integer),
# color literals
(r'#[\da-fA-F]{3,6}', Number.Integer),
# string literals
(r'"', String.Double, 'string'),
# char literal, should be checked because this is the regexp from
# the caml lexer
(r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2})|.)'",
String.Char),
# this is meant to deal with embedded exprs in strings
# every time we find a '}' we pop a state so that if we were
# inside a string, we are back in the string state
# as a consequence, we must also push a state every time we find a
# '{' or else we will have errors when parsing {} for instance
(r'{', Operator, '#push'),
(r'}', Operator, '#pop'),
# html literals
# this is a much more strict that the actual parser,
# since a<b would not be parsed as html
# but then again, the parser is way too lax, and we can't hope
# to have something as tolerant
(r'<(?=[a-zA-Z>])', String.Single, 'html-open-tag'),
# db path
# matching the '[_]' in '/a[_]' because it is a part
# of the syntax of the db path definition
# unfortunately, i don't know how to match the ']' in
# /a[1], so this is somewhat inconsistent
(r'[@?!]?(/\w+)+(\[_\])?', Name.Variable),
# putting the same color on <- as on db path, since
# it can be used only to mean Db.write
(r'<-(?!'+op_re+r')', Name.Variable),
# 'modules'
# although modules are not distinguished by their names as in caml
# the standard library seems to follow the convention that modules
# only area capitalized
(r'\b([A-Z]\w*)(?=\.)', Name.Namespace),
# operators
# = has a special role because this is the only
# way to syntactic distinguish binding constructions
# unfortunately, this colors the equal in {x=2} too
(r'=(?!'+op_re+r')', Keyword),
(r'(%s)+' % op_re, Operator),
(r'(%s)+' % punc_re, Operator),
# coercions
(r':', Operator, 'type'),
# type variables
# we need this rule because we don't parse specially type
# definitions so in "type t('a) = ...", "'a" is parsed by 'root'
("'"+ident_re, Keyword.Type),
# id literal, #something, or #{expr}
(r'#'+ident_re, String.Single),
(r'#(?={)', String.Single),
# identifiers
# this avoids to color '2' in 'a2' as an integer
(ident_re, Text),
# default, not sure if that is needed or not
# (r'.', Text),
],
# it is quite painful to have to parse types to know where they end
# this is the general rule for a type
# a type is either:
# * -> ty
# * type-with-slash
# * type-with-slash -> ty
# * type-with-slash (, type-with-slash)+ -> ty
#
# the code is pretty funky in here, but this code would roughly
# translate in caml to:
# let rec type stream =
# match stream with
# | [< "->"; stream >] -> type stream
# | [< ""; stream >] ->
# type_with_slash stream
# type_lhs_1 stream;
# and type_1 stream = ...
'type': [
include('comments-and-spaces'),
(r'->', Keyword.Type),
(r'', Keyword.Type, ('#pop', 'type-lhs-1', 'type-with-slash')),
],
# parses all the atomic or closed constructions in the syntax of type
# expressions: record types, tuple types, type constructors, basic type
# and type variables
'type-1': [
include('comments-and-spaces'),
(r'\(', Keyword.Type, ('#pop', 'type-tuple')),
(r'~?{', Keyword.Type, ('#pop', 'type-record')),
(ident_re+r'\(', Keyword.Type, ('#pop', 'type-tuple')),
(ident_re, Keyword.Type, '#pop'),
("'"+ident_re, Keyword.Type),
# this case is not in the syntax but sometimes
# we think we are parsing types when in fact we are parsing
# some css, so we just pop the states until we get back into
# the root state
(r'', Keyword.Type, '#pop'),
],
# type-with-slash is either:
# * type-1
# * type-1 (/ type-1)+
'type-with-slash': [
include('comments-and-spaces'),
(r'', Keyword.Type, ('#pop', 'slash-type-1', 'type-1')),
],
'slash-type-1': [
include('comments-and-spaces'),
('/', Keyword.Type, ('#pop', 'type-1')),
# same remark as above
(r'', Keyword.Type, '#pop'),
],
# we go in this state after having parsed a type-with-slash
# while trying to parse a type
# and at this point we must determine if we are parsing an arrow
# type (in which case we must continue parsing) or not (in which
# case we stop)
'type-lhs-1': [
include('comments-and-spaces'),
(r'->', Keyword.Type, ('#pop', 'type')),
(r'(?=,)', Keyword.Type, ('#pop', 'type-arrow')),
(r'', Keyword.Type, '#pop'),
],
'type-arrow': [
include('comments-and-spaces'),
# the look ahead here allows to parse f(x : int, y : float -> truc)
# correctly
(r',(?=[^:]*?->)', Keyword.Type, 'type-with-slash'),
(r'->', Keyword.Type, ('#pop', 'type')),
# same remark as above
(r'', Keyword.Type, '#pop'),
],
# no need to do precise parsing for tuples and records
# because they are closed constructions, so we can simply
# find the closing delimiter
# note that this function would be not work if the source
# contained identifiers like `{)` (although it could be patched
# to support it)
'type-tuple': [
include('comments-and-spaces'),
(r'[^\(\)/*]+', Keyword.Type),
(r'[/*]', Keyword.Type),
(r'\(', Keyword.Type, '#push'),
(r'\)', Keyword.Type, '#pop'),
],
'type-record': [
include('comments-and-spaces'),
(r'[^{}/*]+', Keyword.Type),
(r'[/*]', Keyword.Type),
(r'{', Keyword.Type, '#push'),
(r'}', Keyword.Type, '#pop'),
],
# 'type-tuple': [
# include('comments-and-spaces'),
# (r'\)', Keyword.Type, '#pop'),
# (r'', Keyword.Type, ('#pop', 'type-tuple-1', 'type-1')),
# ],
# 'type-tuple-1': [
# include('comments-and-spaces'),
# (r',?\s*\)', Keyword.Type, '#pop'), # ,) is a valid end of tuple, in (1,)
# (r',', Keyword.Type, 'type-1'),
# ],
# 'type-record':[
# include('comments-and-spaces'),
# (r'}', Keyword.Type, '#pop'),
# (r'~?(?:\w+|`[^`]*`)', Keyword.Type, 'type-record-field-expr'),
# ],
# 'type-record-field-expr': [
#
# ],
'nested-comment': [
(r'[^/*]+', Comment),
(r'/\*', Comment, '#push'),
(r'\*/', Comment, '#pop'),
(r'[/*]', Comment),
],
# the copy pasting between string and single-string
# is kinda sad. Is there a way to avoid that??
'string': [
(r'[^\\"{]+', String.Double),
(r'"', String.Double, '#pop'),
(r'{', Operator, 'root'),
include('escape-sequence'),
],
'single-string': [
(r'[^\\\'{]+', String.Double),
(r'\'', String.Double, '#pop'),
(r'{', Operator, 'root'),
include('escape-sequence'),
],
# all the html stuff
# can't really reuse some existing html parser
# because we must be able to parse embedded expressions
# we are in this state after someone parsed the '<' that
# started the html literal
'html-open-tag': [
(r'[\w\-:]+', String.Single, ('#pop', 'html-attr')),
(r'>', String.Single, ('#pop', 'html-content')),
],
# we are in this state after someone parsed the '</' that
# started the end of the closing tag
'html-end-tag': [
# this is a star, because </> is allowed
(r'[\w\-:]*>', String.Single, '#pop'),
],
# we are in this state after having parsed '<ident(:ident)?'
# we thus parse a possibly empty list of attributes
'html-attr': [
(r'\s+', Text),
(r'[\w\-:]+=', String.Single, 'html-attr-value'),
(r'/>', String.Single, '#pop'),
(r'>', String.Single, ('#pop', 'html-content')),
],
'html-attr-value': [
(r"'", String.Single, ('#pop', 'single-string')),
(r'"', String.Single, ('#pop', 'string')),
(r'#'+ident_re, String.Single, '#pop'),
(r'#(?={)', String.Single, ('#pop', 'root')),
(r'[^"\'{`=<>]+', String.Single, '#pop'),
(r'{', Operator, ('#pop', 'root')), # this is a tail call!
],
# we should probably deal with '\' escapes here
'html-content': [
(r'<!--', Comment, 'html-comment'),
(r'</', String.Single, ('#pop', 'html-end-tag')),
(r'<', String.Single, 'html-open-tag'),
(r'{', Operator, 'root'),
(r'[^<{]+', String.Single),
],
'html-comment': [
(r'-->', Comment, '#pop'),
(r'[^\-]+|-', Comment),
],
}
class CoqLexer(RegexLexer):
"""
For the `Coq <http://coq.inria.fr/>`_ theorem prover.
*New in Pygments 1.5.*
"""
name = 'Coq'
aliases = ['coq']
filenames = ['*.v']
mimetypes = ['text/x-coq']
keywords1 = [
# Vernacular commands
'Section', 'Module', 'End', 'Require', 'Import', 'Export', 'Variable',
'Variables', 'Parameter', 'Parameters', 'Axiom', 'Hypothesis',
'Hypotheses', 'Notation', 'Local', 'Tactic', 'Reserved', 'Scope',
'Open', 'Close', 'Bind', 'Delimit', 'Definition', 'Let', 'Ltac',
'Fixpoint', 'CoFixpoint', 'Morphism', 'Relation', 'Implicit',
'Arguments', 'Set', 'Unset', 'Contextual', 'Strict', 'Prenex',
'Implicits', 'Inductive', 'CoInductive', 'Record', 'Structure',
'Canonical', 'Coercion', 'Theorem', 'Lemma', 'Corollary',
'Proposition', 'Fact', 'Remark', 'Example', 'Proof', 'Goal', 'Save',
'Qed', 'Defined', 'Hint', 'Resolve', 'Rewrite', 'View', 'Search',
'Show', 'Print', 'Printing', 'All', 'Graph', 'Projections', 'inside',
'outside',
]
keywords2 = [
# Gallina
'forall', 'exists', 'exists2', 'fun', 'fix', 'cofix', 'struct',
'match', 'end', 'in', 'return', 'let', 'if', 'is', 'then', 'else',
'for', 'of', 'nosimpl', 'with', 'as',
]
keywords3 = [
# Sorts
'Type', 'Prop',
]
keywords4 = [
# Tactics
'pose', 'set', 'move', 'case', 'elim', 'apply', 'clear', 'hnf', 'intro',
'intros', 'generalize', 'rename', 'pattern', 'after', 'destruct',
'induction', 'using', 'refine', 'inversion', 'injection', 'rewrite',
'congr', 'unlock', 'compute', 'ring', 'field', 'replace', 'fold',
'unfold', 'change', 'cutrewrite', 'simpl', 'have', 'suff', 'wlog',
'suffices', 'without', 'loss', 'nat_norm', 'assert', 'cut', 'trivial',
'revert', 'bool_congr', 'nat_congr', 'symmetry', 'transitivity', 'auto',
'split', 'left', 'right', 'autorewrite',
]
keywords5 = [
# Terminators
'by', 'done', 'exact', 'reflexivity', 'tauto', 'romega', 'omega',
'assumption', 'solve', 'contradiction', 'discriminate',
]
keywords6 = [
# Control
'do', 'last', 'first', 'try', 'idtac', 'repeat',
]
# 'as', 'assert', 'begin', 'class', 'constraint', 'do', 'done',
# 'downto', 'else', 'end', 'exception', 'external', 'false',
# 'for', 'fun', 'function', 'functor', 'if', 'in', 'include',
# 'inherit', 'initializer', 'lazy', 'let', 'match', 'method',
# 'module', 'mutable', 'new', 'object', 'of', 'open', 'private',
# 'raise', 'rec', 'sig', 'struct', 'then', 'to', 'true', 'try',
# 'type', 'val', 'virtual', 'when', 'while', 'with'
keyopts = [
'!=', '#', '&', '&&', r'\(', r'\)', r'\*', r'\+', ',', '-',
r'-\.', '->', r'\.', r'\.\.', ':', '::', ':=', ':>', ';', ';;', '<',
'<-', '=', '>', '>]', '>}', r'\?', r'\?\?', r'\[', r'\[<', r'\[>',
r'\[\|', ']', '_', '`', '{', '{<', r'\|', r'\|]', '}', '~', '=>',
r'/\\', r'\\/',
u'Π', u'λ',
]
operators = r'[!$%&*+\./:<=>?@^|~-]'
word_operators = ['and', 'asr', 'land', 'lor', 'lsl', 'lxor', 'mod', 'or']
prefix_syms = r'[!?~]'
infix_syms = r'[=<>@^|&+\*/$%-]'
primitives = ['unit', 'int', 'float', 'bool', 'string', 'char', 'list',
'array']
tokens = {
'root': [
(r'\s+', Text),
(r'false|true|\(\)|\[\]', Name.Builtin.Pseudo),
(r'\(\*', Comment, 'comment'),
(r'\b(%s)\b' % '|'.join(keywords1), Keyword.Namespace),
(r'\b(%s)\b' % '|'.join(keywords2), Keyword),
(r'\b(%s)\b' % '|'.join(keywords3), Keyword.Type),
(r'\b(%s)\b' % '|'.join(keywords4), Keyword),
(r'\b(%s)\b' % '|'.join(keywords5), Keyword.Pseudo),
(r'\b(%s)\b' % '|'.join(keywords6), Keyword.Reserved),
(r'\b([A-Z][A-Za-z0-9_\']*)(?=\s*\.)',
Name.Namespace, 'dotted'),
(r'\b([A-Z][A-Za-z0-9_\']*)', Name.Class),
(r'(%s)' % '|'.join(keyopts[::-1]), Operator),
(r'(%s|%s)?%s' % (infix_syms, prefix_syms, operators), Operator),
(r'\b(%s)\b' % '|'.join(word_operators), Operator.Word),
(r'\b(%s)\b' % '|'.join(primitives), Keyword.Type),
(r"[^\W\d][\w']*", Name),
(r'\d[\d_]*', Number.Integer),
(r'0[xX][\da-fA-F][\da-fA-F_]*', Number.Hex),
(r'0[oO][0-7][0-7_]*', Number.Oct),
(r'0[bB][01][01_]*', Number.Binary),
(r'-?\d[\d_]*(.[\d_]*)?([eE][+\-]?\d[\d_]*)', Number.Float),
(r"'(?:(\\[\\\"'ntbr ])|(\\[0-9]{3})|(\\x[0-9a-fA-F]{2}))'",
String.Char),
(r"'.'", String.Char),
(r"'", Keyword), # a stray quote is another syntax element
(r'"', String.Double, 'string'),
(r'[~?][a-z][\w\']*:', Name.Variable),
],
'comment': [
(r'[^(*)]+', Comment),
(r'\(\*', Comment, '#push'),
(r'\*\)', Comment, '#pop'),
(r'[(*)]', Comment),
],
'string': [
(r'[^"]+', String.Double),
(r'""', String.Double),
(r'"', String.Double, '#pop'),
],
'dotted': [
(r'\s+', Text),
(r'\.', Punctuation),
(r'[A-Z][A-Za-z0-9_\']*(?=\s*\.)', Name.Namespace),
(r'[A-Z][A-Za-z0-9_\']*', Name.Class, '#pop'),
(r'[a-z][a-z0-9_\']*', Name, '#pop'),
(r'', Text, '#pop')
],
}
def analyse_text(text):
if text.startswith('(*'):
return True
class NewLispLexer(RegexLexer):
"""
For `newLISP. <www.newlisp.org>`_ source code (version 10.3.0).
*New in Pygments 1.5.*
"""
name = 'NewLisp'
aliases = ['newlisp']
filenames = ['*.lsp', '*.nl']
mimetypes = ['text/x-newlisp', 'application/x-newlisp']
flags = re.IGNORECASE | re.MULTILINE | re.UNICODE
# list of built-in functions for newLISP version 10.3
builtins = [
'^', '--', '-', ':', '!', '!=', '?', '@', '*', '/', '&', '%', '+', '++',
'<', '<<', '<=', '=', '>', '>=', '>>', '|', '~', '$', '$0', '$1', '$10',
'$11', '$12', '$13', '$14', '$15', '$2', '$3', '$4', '$5', '$6', '$7',
'$8', '$9', '$args', '$idx', '$it', '$main-args', 'abort', 'abs',
'acos', 'acosh', 'add', 'address', 'amb', 'and', 'and', 'append-file',
'append', 'apply', 'args', 'array-list', 'array?', 'array', 'asin',
'asinh', 'assoc', 'atan', 'atan2', 'atanh', 'atom?', 'base64-dec',
'base64-enc', 'bayes-query', 'bayes-train', 'begin', 'begin', 'begin',
'beta', 'betai', 'bind', 'binomial', 'bits', 'callback', 'case', 'case',
'case', 'catch', 'ceil', 'change-dir', 'char', 'chop', 'Class', 'clean',
'close', 'command-event', 'cond', 'cond', 'cond', 'cons', 'constant',
'context?', 'context', 'copy-file', 'copy', 'cos', 'cosh', 'count',
'cpymem', 'crc32', 'crit-chi2', 'crit-z', 'current-line', 'curry',
'date-list', 'date-parse', 'date-value', 'date', 'debug', 'dec',
'def-new', 'default', 'define-macro', 'define-macro', 'define',
'delete-file', 'delete-url', 'delete', 'destroy', 'det', 'device',
'difference', 'directory?', 'directory', 'div', 'do-until', 'do-while',
'doargs', 'dolist', 'dostring', 'dotimes', 'dotree', 'dump', 'dup',
'empty?', 'encrypt', 'ends-with', 'env', 'erf', 'error-event',
'eval-string', 'eval', 'exec', 'exists', 'exit', 'exp', 'expand',
'explode', 'extend', 'factor', 'fft', 'file-info', 'file?', 'filter',
'find-all', 'find', 'first', 'flat', 'float?', 'float', 'floor', 'flt',
'fn', 'for-all', 'for', 'fork', 'format', 'fv', 'gammai', 'gammaln',
'gcd', 'get-char', 'get-float', 'get-int', 'get-long', 'get-string',
'get-url', 'global?', 'global', 'if-not', 'if', 'ifft', 'import', 'inc',
'index', 'inf?', 'int', 'integer?', 'integer', 'intersect', 'invert',
'irr', 'join', 'lambda-macro', 'lambda?', 'lambda', 'last-error',
'last', 'legal?', 'length', 'let', 'let', 'let', 'letex', 'letn',
'letn', 'letn', 'list?', 'list', 'load', 'local', 'log', 'lookup',
'lower-case', 'macro?', 'main-args', 'MAIN', 'make-dir', 'map', 'mat',
'match', 'max', 'member', 'min', 'mod', 'module', 'mul', 'multiply',
'NaN?', 'net-accept', 'net-close', 'net-connect', 'net-error',
'net-eval', 'net-interface', 'net-ipv', 'net-listen', 'net-local',
'net-lookup', 'net-packet', 'net-peek', 'net-peer', 'net-ping',
'net-receive-from', 'net-receive-udp', 'net-receive', 'net-select',
'net-send-to', 'net-send-udp', 'net-send', 'net-service',
'net-sessions', 'new', 'nil?', 'nil', 'normal', 'not', 'now', 'nper',
'npv', 'nth', 'null?', 'number?', 'open', 'or', 'ostype', 'pack',
'parse-date', 'parse', 'peek', 'pipe', 'pmt', 'pop-assoc', 'pop',
'post-url', 'pow', 'prefix', 'pretty-print', 'primitive?', 'print',
'println', 'prob-chi2', 'prob-z', 'process', 'prompt-event',
'protected?', 'push', 'put-url', 'pv', 'quote?', 'quote', 'rand',
'random', 'randomize', 'read', 'read-char', 'read-expr', 'read-file',
'read-key', 'read-line', 'read-utf8', 'read', 'reader-event',
'real-path', 'receive', 'ref-all', 'ref', 'regex-comp', 'regex',
'remove-dir', 'rename-file', 'replace', 'reset', 'rest', 'reverse',
'rotate', 'round', 'save', 'search', 'seed', 'seek', 'select', 'self',
'semaphore', 'send', 'sequence', 'series', 'set-locale', 'set-ref-all',
'set-ref', 'set', 'setf', 'setq', 'sgn', 'share', 'signal', 'silent',
'sin', 'sinh', 'sleep', 'slice', 'sort', 'source', 'spawn', 'sqrt',
'starts-with', 'string?', 'string', 'sub', 'swap', 'sym', 'symbol?',
'symbols', 'sync', 'sys-error', 'sys-info', 'tan', 'tanh', 'term',
'throw-error', 'throw', 'time-of-day', 'time', 'timer', 'title-case',
'trace-highlight', 'trace', 'transpose', 'Tree', 'trim', 'true?',
'true', 'unicode', 'unify', 'unique', 'unless', 'unpack', 'until',
'upper-case', 'utf8', 'utf8len', 'uuid', 'wait-pid', 'when', 'while',
'write', 'write-char', 'write-file', 'write-line', 'write',
'xfer-event', 'xml-error', 'xml-parse', 'xml-type-tags', 'zero?',
]
# valid names
valid_name = r'([a-zA-Z0-9!$%&*+.,/<=>?@^_~|-])+|(\[.*?\])+'
tokens = {
'root': [
# shebang
(r'#!(.*?)$', Comment.Preproc),
# comments starting with semicolon
(r';.*$', Comment.Single),
# comments starting with #
(r'#.*$', Comment.Single),
# whitespace
(r'\s+', Text),
# strings, symbols and characters
(r'"(\\\\|\\"|[^"])*"', String),
# braces
(r"{", String, "bracestring"),
# [text] ... [/text] delimited strings
(r'\[text\]*', String, "tagstring"),
# 'special' operators...
(r"('|:)", Operator),
# highlight the builtins
('(%s)' % '|'.join(re.escape(entry) + '\\b' for entry in builtins),
Keyword),
# the remaining functions
(r'(?<=\()' + valid_name, Name.Variable),
# the remaining variables
(valid_name, String.Symbol),
# parentheses
(r'(\(|\))', Punctuation),
],
# braced strings...
'bracestring': [
("{", String, "#push"),
("}", String, "#pop"),
("[^{}]+", String),
],
# tagged [text]...[/text] delimited strings...
'tagstring': [
(r'(?s)(.*?)(\[/text\])', String, '#pop'),
],
}
class ElixirLexer(RegexLexer):
"""
For the `Elixir language <http://elixir-lang.org>`_.
*New in Pygments 1.5.*
"""
name = 'Elixir'
aliases = ['elixir', 'ex', 'exs']
filenames = ['*.ex', '*.exs']
mimetypes = ['text/x-elixir']
def gen_elixir_sigil_rules():
states = {}
states['strings'] = [
(r'(%[A-Ba-z])?"""(?:.|\n)*?"""', String.Doc),
(r"'''(?:.|\n)*?'''", String.Doc),
(r'"', String.Double, 'dqs'),
(r"'.*'", String.Single),
(r'(?<!\w)\?(\\(x\d{1,2}|\h{1,2}(?!\h)\b|0[0-7]{0,2}(?![0-7])\b|'
r'[^x0MC])|(\\[MC]-)+\w|[^\s\\])', String.Other)
]
for lbrace, rbrace, name, in ('\\{', '\\}', 'cb'), \
('\\[', '\\]', 'sb'), \
('\\(', '\\)', 'pa'), \
('\\<', '\\>', 'lt'):
states['strings'] += [
(r'%[a-z]' + lbrace, String.Double, name + 'intp'),
(r'%[A-Z]' + lbrace, String.Double, name + 'no-intp')
]
states[name +'intp'] = [
(r'' + rbrace + '[a-z]*', String.Double, "#pop"),
include('enddoublestr')
]
states[name +'no-intp'] = [
(r'.*' + rbrace + '[a-z]*', String.Double , "#pop")
]
return states
tokens = {
'root': [
(r'\s+', Text),
(r'#.*$', Comment.Single),
(r'\b(case|cond|end|bc|lc|if|unless|try|loop|receive|fn|defmodule|'
r'defp?|defprotocol|defimpl|defrecord|defmacrop?|defdelegate|'
r'defexception|exit|raise|throw|unless|after|rescue|catch|else)\b(?![?!])|'
r'(?<!\.)\b(do|\-\>)\b\s*', Keyword),
(r'\b(import|require|use|recur|quote|unquote|super|refer)\b(?![?!])',
Keyword.Namespace),
(r'(?<!\.)\b(and|not|or|when|xor|in)\b', Operator.Word),
(r'%=|\*=|\*\*=|\+=|\-=|\^=|\|\|=|'
r'<=>|<(?!<|=)|>(?!<|=|>)|<=|>=|===|==|=~|!=|!~|(?=[ \t])\?|'
r'(?<=[ \t])!+|&&|\|\||\^|\*|\+|\-|/|'
r'\||\+\+|\-\-|\*\*|\/\/|\<\-|\<\>|<<|>>|=|\.', Operator),
(r'(?<!:)(:)([a-zA-Z_]\w*([?!]|=(?![>=]))?|\<\>|===?|>=?|<=?|'
r'<=>|&&?|%\(\)|%\[\]|%\{\}|\+\+?|\-\-?|\|\|?|\!|//|[%&`/\|]|'
r'\*\*?|=?~|<\-)|([a-zA-Z_]\w*([?!])?)(:)(?!:)', String.Symbol),
(r':"', String.Symbol, 'interpoling_symbol'),
(r'\b(nil|true|false)\b(?![?!])|\b[A-Z]\w*\b', Name.Constant),
(r'\b(__(FILE|LINE|MODULE|MAIN|FUNCTION)__)\b(?![?!])', Name.Builtin.Pseudo),
(r'[a-zA-Z_!][\w_]*[!\?]?', Name),
(r'[(){};,/\|:\\\[\]]', Punctuation),
(r'@[a-zA-Z_]\w*|&\d', Name.Variable),
(r'\b(0[xX][0-9A-Fa-f]+|\d(_?\d)*(\.(?![^\d\s])'
r'(_?\d)*)?([eE][-+]?\d(_?\d)*)?|0[bB][01]+)\b', Number),
(r'%r\/.*\/', String.Regex),
include('strings'),
],
'dqs': [
(r'"', String.Double, "#pop"),
include('enddoublestr')
],
'interpoling': [
(r'#{', String.Interpol, 'interpoling_string'),
],
'interpoling_string' : [
(r'}', String.Interpol, "#pop"),
include('root')
],
'interpoling_symbol': [
(r'"', String.Symbol, "#pop"),
include('interpoling'),
(r'[^#"]+', String.Symbol),
],
'enddoublestr' : [
include('interpoling'),
(r'[^#"]+', String.Double),
]
}
tokens.update(gen_elixir_sigil_rules())
class ElixirConsoleLexer(Lexer):
"""
For Elixir interactive console (iex) output like:
.. sourcecode:: iex
iex> [head | tail] = [1,2,3]
[1,2,3]
iex> head
1
iex> tail
[2,3]
iex> [head | tail]
[1,2,3]
iex> length [head | tail]
3
*New in Pygments 1.5.*
"""
name = 'Elixir iex session'
aliases = ['iex']
mimetypes = ['text/x-elixir-shellsession']
_prompt_re = re.compile('(iex|\.{3})> ')
def get_tokens_unprocessed(self, text):
exlexer = ElixirLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
if line.startswith(u'** '):
insertions.append((len(curcode),
[(0, Generic.Error, line[:-1])]))
curcode += line[-1:]
else:
m = self._prompt_re.match(line)
if m is not None:
end = m.end()
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:end])]))
curcode += line[end:]
else:
if curcode:
for item in do_insertions(insertions,
exlexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
yield match.start(), Generic.Output, line
if curcode:
for item in do_insertions(insertions,
exlexer.get_tokens_unprocessed(curcode)):
yield item
class KokaLexer(RegexLexer):
"""
Lexer for the `Koka <http://koka.codeplex.com>`_
language.
*New in Pygments 1.6.*
"""
name = 'Koka'
aliases = ['koka']
filenames = ['*.kk', '*.kki']
mimetypes = ['text/x-koka']
keywords = [
'infix', 'infixr', 'infixl',
'type', 'cotype', 'rectype', 'alias',
'struct', 'con',
'fun', 'function', 'val', 'var',
'external',
'if', 'then', 'else', 'elif', 'return', 'match',
'private', 'public', 'private',
'module', 'import', 'as',
'include', 'inline',
'rec',
'try', 'yield', 'enum',
'interface', 'instance',
]
# keywords that are followed by a type
typeStartKeywords = [
'type', 'cotype', 'rectype', 'alias', 'struct', 'enum',
]
# keywords valid in a type
typekeywords = [
'forall', 'exists', 'some', 'with',
]
# builtin names and special names
builtin = [
'for', 'while', 'repeat',
'foreach', 'foreach-indexed',
'error', 'catch', 'finally',
'cs', 'js', 'file', 'ref', 'assigned',
]
# symbols that can be in an operator
symbols = '[\$%&\*\+@!/\\\^~=\.:\-\?\|<>]+'
# symbol boundary: an operator keyword should not be followed by any of these
sboundary = '(?!'+symbols+')'
# name boundary: a keyword should not be followed by any of these
boundary = '(?![\w/])'
# koka token abstractions
tokenType = Name.Attribute
tokenTypeDef = Name.Class
tokenConstructor = Generic.Emph
# main lexer
tokens = {
'root': [
include('whitespace'),
# go into type mode
(r'::?' + sboundary, tokenType, 'type'),
(r'(alias)(\s+)([a-z]\w*)?', bygroups(Keyword, Text, tokenTypeDef),
'alias-type'),
(r'(struct)(\s+)([a-z]\w*)?', bygroups(Keyword, Text, tokenTypeDef),
'struct-type'),
((r'(%s)' % '|'.join(typeStartKeywords)) +
r'(\s+)([a-z]\w*)?', bygroups(Keyword, Text, tokenTypeDef),
'type'),
# special sequences of tokens (we use ?: for non-capturing group as
# required by 'bygroups')
(r'(module)(\s+)(interface\s+)?((?:[a-z]\w*/)*[a-z]\w*)',
bygroups(Keyword, Text, Keyword, Name.Namespace)),
(r'(import)(\s+)((?:[a-z]\w*/)*[a-z]\w*)'
r'(?:(\s*)(=)(\s*)((?:qualified\s*)?)'
r'((?:[a-z]\w*/)*[a-z]\w*))?',
bygroups(Keyword, Text, Name.Namespace, Text, Keyword, Text,
Keyword, Name.Namespace)),
(r'(^(?:(?:public|private)\s*)?(?:function|fun|val))'
r'(\s+)([a-z]\w*|\((?:' + symbols + r'|/)\))',
bygroups(Keyword, Text, Name.Function)),
(r'(^(?:(?:public|private)\s*)?external)(\s+)(inline\s+)?'
r'([a-z]\w*|\((?:' + symbols + r'|/)\))',
bygroups(Keyword, Text, Keyword, Name.Function)),
# keywords
(r'(%s)' % '|'.join(typekeywords) + boundary, Keyword.Type),
(r'(%s)' % '|'.join(keywords) + boundary, Keyword),
(r'(%s)' % '|'.join(builtin) + boundary, Keyword.Pseudo),
(r'::?|:=|\->|[=\.]' + sboundary, Keyword),
# names
(r'((?:[a-z]\w*/)*)([A-Z]\w*)',
bygroups(Name.Namespace, tokenConstructor)),
(r'((?:[a-z]\w*/)*)([a-z]\w*)', bygroups(Name.Namespace, Name)),
(r'((?:[a-z]\w*/)*)(\((?:' + symbols + r'|/)\))',
bygroups(Name.Namespace, Name)),
(r'_\w*', Name.Variable),
# literal string
(r'@"', String.Double, 'litstring'),
# operators
(symbols + "|/(?![\*/])", Operator),
(r'`', Operator),
(r'[\{\}\(\)\[\];,]', Punctuation),
# literals. No check for literal characters with len > 1
(r'[0-9]+\.[0-9]+([eE][\-\+]?[0-9]+)?', Number.Float),
(r'0[xX][0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r"'", String.Char, 'char'),
(r'"', String.Double, 'string'),
],
# type started by alias
'alias-type': [
(r'=',Keyword),
include('type')
],
# type started by struct
'struct-type': [
(r'(?=\((?!,*\)))',Punctuation, '#pop'),
include('type')
],
# type started by colon
'type': [
(r'[\(\[<]', tokenType, 'type-nested'),
include('type-content')
],
# type nested in brackets: can contain parameters, comma etc.
'type-nested': [
(r'[\)\]>]', tokenType, '#pop'),
(r'[\(\[<]', tokenType, 'type-nested'),
(r',', tokenType),
(r'([a-z]\w*)(\s*)(:)(?!:)',
bygroups(Name, Text, tokenType)), # parameter name
include('type-content')
],
# shared contents of a type
'type-content': [
include('whitespace'),
# keywords
(r'(%s)' % '|'.join(typekeywords) + boundary, Keyword),
(r'(?=((%s)' % '|'.join(keywords) + boundary + '))',
Keyword, '#pop'), # need to match because names overlap...
# kinds
(r'[EPHVX]' + boundary, tokenType),
# type names
(r'[a-z][0-9]*(?![\w/])', tokenType ),
(r'_\w*', tokenType.Variable), # Generic.Emph
(r'((?:[a-z]\w*/)*)([A-Z]\w*)',
bygroups(Name.Namespace, tokenType)),
(r'((?:[a-z]\w*/)*)([a-z]\w+)',
bygroups(Name.Namespace, tokenType)),
# type keyword operators
(r'::|\->|[\.:|]', tokenType),
#catchall
(r'', Text, '#pop')
],
# comments and literals
'whitespace': [
(r'\n\s*#.*$', Comment.Preproc),
(r'\s+', Text),
(r'/\*', Comment.Multiline, 'comment'),
(r'//.*$', Comment.Single)
],
'comment': [
(r'[^/\*]+', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[\*/]', Comment.Multiline),
],
'litstring': [
(r'[^"]+', String.Double),
(r'""', String.Escape),
(r'"', String.Double, '#pop'),
],
'string': [
(r'[^\\"\n]+', String.Double),
include('escape-sequence'),
(r'["\n]', String.Double, '#pop'),
],
'char': [
(r'[^\\\'\n]+', String.Char),
include('escape-sequence'),
(r'[\'\n]', String.Char, '#pop'),
],
'escape-sequence': [
(r'\\[nrt\\\"\']', String.Escape),
(r'\\x[0-9a-fA-F]{2}', String.Escape),
(r'\\u[0-9a-fA-F]{4}', String.Escape),
# Yes, \U literals are 6 hex digits.
(r'\\U[0-9a-fA-F]{6}', String.Escape)
]
}
| mit |
cjaymes/pyscap | src/scap/model/xccdf_1_2/CheckContentRefType.py | 1 | 1395 | # Copyright 2016 Casey Jaymes
# This file is part of PySCAP.
#
# PySCAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PySCAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PySCAP. If not, see <http://www.gnu.org/licenses/>.
import logging
from scap.Model import Model, ReferenceException
logger = logging.getLogger(__name__)
class CheckContentRefType(Model):
MODEL_MAP = {
'attributes': {
'href': {'type': 'AnyUriType', 'requird': True},
'name': {'type': 'StringType'},
},
}
def check(self, benchmark, host, exports, import_names):
content = Model.find_content(self.href)
if content is None:
raise ReferenceException(self.href + ' was not loaded')
# find the named content
if self.name is not None:
content = content.find_reference(self.name)
# apply content
return content.check(host, exports, import_names)
| gpl-3.0 |
nhenezi/kuma | vendor/lib/python/south/migration/utils.py | 129 | 2417 | import sys
from collections import deque
from django.utils.datastructures import SortedDict
from django.db import models
from south import exceptions
class SortedSet(SortedDict):
def __init__(self, data=tuple()):
self.extend(data)
def __str__(self):
return "SortedSet(%s)" % list(self)
def add(self, value):
self[value] = True
def remove(self, value):
del self[value]
def extend(self, iterable):
[self.add(k) for k in iterable]
def get_app_label(app):
"""
Returns the _internal_ app label for the given app module.
i.e. for <module django.contrib.auth.models> will return 'auth'
"""
return app.__name__.split('.')[-2]
def app_label_to_app_module(app_label):
"""
Given the app label, returns the module of the app itself (unlike models.get_app,
which returns the models module)
"""
# Get the models module
app = models.get_app(app_label)
module_name = ".".join(app.__name__.split(".")[:-1])
try:
module = sys.modules[module_name]
except KeyError:
__import__(module_name, {}, {}, [''])
module = sys.modules[module_name]
return module
def flatten(*stack):
stack = deque(stack)
while stack:
try:
x = next(stack[0])
except TypeError:
stack[0] = iter(stack[0])
x = next(stack[0])
except StopIteration:
stack.popleft()
continue
if hasattr(x, '__iter__') and not isinstance(x, str):
stack.appendleft(x)
else:
yield x
dependency_cache = {}
def _dfs(start, get_children, path):
if (start, get_children) in dependency_cache:
return dependency_cache[(start, get_children)]
results = []
if start in path:
raise exceptions.CircularDependency(path[path.index(start):] + [start])
path.append(start)
results.append(start)
children = sorted(get_children(start), key=lambda x: str(x))
# We need to apply all the migrations this one depends on
for n in children:
results = _dfs(n, get_children, path) + results
path.pop()
results = list(SortedSet(results))
dependency_cache[(start, get_children)] = results
return results
def dfs(start, get_children):
return _dfs(start, get_children, [])
def depends(start, get_children):
return dfs(start, get_children)
| mpl-2.0 |
dakot/vilay-detect | vilay/detectors/DetectorWrapper.py | 1 | 2000 | from PyQt4 import QtGui
from vilay.core.DescriptionScheme import DescriptionScheme
class DetectorWrapper:
def __init__(self, vd, detectorClass):
self.vd = vd
self.detector = detectorClass
self.tgtRoot = self.vd.data.dsRoot
self.mediaTimes = []
self.detector.initialize()
def startDetector(self):
self.tgtRoot = self.vd.data.dsRoot
self.mediaTimes = [self.vd.mainWin.ui.navigationBar.getPlaceTime()]
items = ["Add new Description Scheme to root"]
# items.extend( self.vd.data.dsRoot.getAllPaths())
# item, ok = QtGui.QInputDialog.getItem (self.vd.mainWin, "Select root Description Scheme", "Select root for newly detected objects.", items, current = 0, editable = False)
ok = QtGui.QInputDialog.getItem (self.vd.mainWin, "Select root Description Scheme", "Select root for newly detected objects.", items, current = 0, editable = False)
# TODO: merge mediaTime
if ok:
newSemObj = DescriptionScheme("New "+self.detector.getName()+" DS")
self.vd.data.dsRoot.addDescriptionScheme(newSemObj)
self.tgtRoot = newSemObj
#TODO: fix selection of DS
# path = list(item.split(" > "))
# if len(path) == 1:
# if not item == self.vd.data.dsRoot.name:
# newSemObj = DescriptionScheme("New "+self.detector.getName()+" DS")
# self.vd.data.dsRoot.addDescriptionScheme(newSemObj)
# self.tgtRoot = newSemObj
# else:
# path.pop(0)
# print path
# newRoot = self.treeRoot.getDescriptionSchemesRecursicely(path)
# self.tgtRoot = newRoot
self.detector.detect(self.mediaTimes, self.tgtRoot, self.vd.data.film, self.vd.data.dsRoot, self.vd.mainWin)
self.vd.mainWin.replot()
self.vd.mainWin.repaint()
| gpl-3.0 |
scylladb/scylla-cluster-tests | sdcm/cdclog_reader_thread.py | 1 | 6943 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright (c) 2020 ScyllaDB
import logging
import uuid
import pprint
from pathlib import Path
from typing import List, Dict
from sdcm.sct_events.loaders import CDCReaderStressEvent
from sdcm.utils.common import get_docker_stress_image_name
from sdcm.utils.docker_remote import RemoteDocker
from sdcm.stress_thread import format_stress_cmd_error, DockerBasedStressThread
from sdcm.utils.cdc.options import CDC_LOGTABLE_SUFFIX
LOGGER = logging.getLogger(__name__)
CDCLOG_READER_IMAGE = get_docker_stress_image_name(tool_name="cdcstressor")
PP = pprint.PrettyPrinter(indent=2)
class CDCLogReaderThread(DockerBasedStressThread):
def __init__(self, *args, **kwargs):
self.keyspace = kwargs.pop("keyspace_name")
self.cdc_log_table = kwargs.pop("base_table_name") + CDC_LOGTABLE_SUFFIX
self.batching = kwargs.pop("enable_batching")
super().__init__(*args, **kwargs)
def build_stress_command(self, worker_id, worker_count):
node_ips = ",".join([node.ip_address for node in self.node_list])
shards_per_node = self.node_list[0].cpu_cores if self.batching else 1
self.stress_cmd = f"{self.stress_cmd} -keyspace {self.keyspace} -table {self.cdc_log_table} \
-nodes {node_ips} -group-size {shards_per_node} \
-worker-id {worker_id} -worker-count {worker_count}"
def _run_stress(self, loader, loader_idx, cpu_idx): # pylint: disable=unused-argument
loader_node_logdir = Path(loader.logdir)
if not loader_node_logdir.exists():
loader_node_logdir.mkdir()
worker_count = self.max_workers
worker_id = loader_idx * self.stress_num + cpu_idx
log_file_name = loader_node_logdir.joinpath(f'cdclogreader-l{loader_idx}-{worker_id}-{uuid.uuid4()}.log')
LOGGER.debug('cdc-stressor local log: %s', log_file_name)
self.build_stress_command(worker_id, worker_count)
LOGGER.info(self.stress_cmd)
docker = RemoteDocker(loader, CDCLOG_READER_IMAGE,
extra_docker_opts=f'--network=host --label shell_marker={self.shell_marker}')
node_cmd = f'STRESS_TEST_MARKER={self.shell_marker}; {self.stress_cmd}'
CDCReaderStressEvent.start(node=loader, stress_cmd=self.stress_cmd).publish()
try:
result = docker.run(cmd=node_cmd,
timeout=self.timeout + self.shutdown_timeout,
ignore_status=True,
log_file=log_file_name,
verbose=True)
if not result.ok:
CDCReaderStressEvent.error(node=loader,
stress_cmd=self.stress_cmd,
errors=result.stderr.split("\n")).publish()
return result
except Exception as exc: # pylint: disable=broad-except
CDCReaderStressEvent.failure(node=loader,
stress_cmd=self.stress_cmd,
errors=[format_stress_cmd_error(exc), ]).publish()
finally:
CDCReaderStressEvent.finish(node=loader, stress_cmd=self.stress_cmd).publish()
return None
@staticmethod
def _parse_cdcreaderstressor_results(lines: List[str]) -> Dict:
"""parse result of cdcreader results
lines:
Results:
num rows read: 95185
rows read/s: 528.805556/s
polls/s: 3039.144444/s
idle polls: 529041/547046 (96.708686%)
latency min: 0.524288 ms
latency avg: 11.493153 ms
latency median: 8.978431 ms
latency 90%: 22.151167 ms
latency 99%: 56.328191 ms
latency 99.9%: 88.604671 ms
latency max: 156.762111 ms
return
{
"op rate": "1000",
"latency min": "0.5",
"latency max": "10",
"latency mean": "4",
...
}
"""
cdcreader_cs_keys_map = {
# {"num rows read": ["num rows read"]},
"rows read/s": ["partition rate", "row rate"],
"polls/s": ["op rate"],
"latency min": ["latency min"],
"latency avg": ["latency mean"],
"latency median": ["latency median"],
"latency 90%": ["latency 90th percentile"],
"latency 99%": ["latency 99th percentile"],
"latency 99.9%": ["latency 99.9th percentile"],
"latency max": ["latency max"],
}
result = {}
parse_enable = False
for line in lines:
if line.startswith("Results:"):
parse_enable = True
if not parse_enable:
continue
res = line.split(":")
if len(res) < 2:
continue
name = res[0].strip()
value = res[1].strip()
if name in cdcreader_cs_keys_map:
if name in ["rows read/s", "polls/s"]:
for replace_name in cdcreader_cs_keys_map[name]:
result[replace_name] = value.split("/")[0]
else:
for replace_name in cdcreader_cs_keys_map[name]:
result[replace_name] = value.split(" ")[0]
LOGGER.debug(result)
return result
def get_results(self) -> List[Dict]:
"""Return results of cdclog readers
return list of dicts:
[
{
"op rate": "1000",
"latency min": "0.5",
"latency max": "10",
"latency mean": "4",
...
},
{
"op rate": "1000",
"latency min": "0.5",
"latency max": "10",
"latency mean": "4",
...
}
]
"""
results = []
res_stats = []
results = super().get_results()
LOGGER.debug(PP.pformat(results))
for result in results:
res = self._parse_cdcreaderstressor_results(result.stdout.splitlines())
if not res:
LOGGER.warning("Result is empty")
continue
res_stats.append(res)
self.kill()
return res_stats
| agpl-3.0 |
Halfnhav/node-gyp | gyp/pylib/gyp/mac_tool.py | 377 | 19309 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions to perform Xcode-style build steps.
These functions are executed via gyp-mac-tool when using the Makefile generator.
"""
import fcntl
import fnmatch
import glob
import json
import os
import plistlib
import re
import shutil
import string
import subprocess
import sys
import tempfile
def main(args):
executor = MacTool()
exit_code = executor.Dispatch(args)
if exit_code is not None:
sys.exit(exit_code)
class MacTool(object):
"""This class performs all the Mac tooling steps. The methods can either be
executed directly, or dispatched from an argument list."""
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
return getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like copy-info-plist to CopyInfoPlist"""
return name_string.title().replace('-', '')
def ExecCopyBundleResource(self, source, dest):
"""Copies a resource file to the bundle/Resources directory, performing any
necessary compilation on each resource."""
extension = os.path.splitext(source)[1].lower()
if os.path.isdir(source):
# Copy tree.
# TODO(thakis): This copies file attributes like mtime, while the
# single-file branch below doesn't. This should probably be changed to
# be consistent with the single-file branch.
if os.path.exists(dest):
shutil.rmtree(dest)
shutil.copytree(source, dest)
elif extension == '.xib':
return self._CopyXIBFile(source, dest)
elif extension == '.storyboard':
return self._CopyXIBFile(source, dest)
elif extension == '.strings':
self._CopyStringsFile(source, dest)
else:
shutil.copy(source, dest)
def _CopyXIBFile(self, source, dest):
"""Compiles a XIB file with ibtool into a binary plist in the bundle."""
# ibtool sometimes crashes with relative paths. See crbug.com/314728.
base = os.path.dirname(os.path.realpath(__file__))
if os.path.relpath(source):
source = os.path.join(base, source)
if os.path.relpath(dest):
dest = os.path.join(base, dest)
args = ['xcrun', 'ibtool', '--errors', '--warnings', '--notices',
'--output-format', 'human-readable-text', '--compile', dest, source]
ibtool_section_re = re.compile(r'/\*.*\*/')
ibtool_re = re.compile(r'.*note:.*is clipping its content')
ibtoolout = subprocess.Popen(args, stdout=subprocess.PIPE)
current_section_header = None
for line in ibtoolout.stdout:
if ibtool_section_re.match(line):
current_section_header = line
elif not ibtool_re.match(line):
if current_section_header:
sys.stdout.write(current_section_header)
current_section_header = None
sys.stdout.write(line)
return ibtoolout.returncode
def _CopyStringsFile(self, source, dest):
"""Copies a .strings file using iconv to reconvert the input into UTF-16."""
input_code = self._DetectInputEncoding(source) or "UTF-8"
# Xcode's CpyCopyStringsFile / builtin-copyStrings seems to call
# CFPropertyListCreateFromXMLData() behind the scenes; at least it prints
# CFPropertyListCreateFromXMLData(): Old-style plist parser: missing
# semicolon in dictionary.
# on invalid files. Do the same kind of validation.
import CoreFoundation
s = open(source, 'rb').read()
d = CoreFoundation.CFDataCreate(None, s, len(s))
_, error = CoreFoundation.CFPropertyListCreateFromXMLData(None, d, 0, None)
if error:
return
fp = open(dest, 'wb')
fp.write(s.decode(input_code).encode('UTF-16'))
fp.close()
def _DetectInputEncoding(self, file_name):
"""Reads the first few bytes from file_name and tries to guess the text
encoding. Returns None as a guess if it can't detect it."""
fp = open(file_name, 'rb')
try:
header = fp.read(3)
except e:
fp.close()
return None
fp.close()
if header.startswith("\xFE\xFF"):
return "UTF-16"
elif header.startswith("\xFF\xFE"):
return "UTF-16"
elif header.startswith("\xEF\xBB\xBF"):
return "UTF-8"
else:
return None
def ExecCopyInfoPlist(self, source, dest, *keys):
"""Copies the |source| Info.plist to the destination directory |dest|."""
# Read the source Info.plist into memory.
fd = open(source, 'r')
lines = fd.read()
fd.close()
# Insert synthesized key/value pairs (e.g. BuildMachineOSBuild).
plist = plistlib.readPlistFromString(lines)
if keys:
plist = dict(plist.items() + json.loads(keys[0]).items())
lines = plistlib.writePlistToString(plist)
# Go through all the environment variables and replace them as variables in
# the file.
IDENT_RE = re.compile('[/\s]')
for key in os.environ:
if key.startswith('_'):
continue
evar = '${%s}' % key
evalue = os.environ[key]
lines = string.replace(lines, evar, evalue)
# Xcode supports various suffices on environment variables, which are
# all undocumented. :rfc1034identifier is used in the standard project
# template these days, and :identifier was used earlier. They are used to
# convert non-url characters into things that look like valid urls --
# except that the replacement character for :identifier, '_' isn't valid
# in a URL either -- oops, hence :rfc1034identifier was born.
evar = '${%s:identifier}' % key
evalue = IDENT_RE.sub('_', os.environ[key])
lines = string.replace(lines, evar, evalue)
evar = '${%s:rfc1034identifier}' % key
evalue = IDENT_RE.sub('-', os.environ[key])
lines = string.replace(lines, evar, evalue)
# Remove any keys with values that haven't been replaced.
lines = lines.split('\n')
for i in range(len(lines)):
if lines[i].strip().startswith("<string>${"):
lines[i] = None
lines[i - 1] = None
lines = '\n'.join(filter(lambda x: x is not None, lines))
# Write out the file with variables replaced.
fd = open(dest, 'w')
fd.write(lines)
fd.close()
# Now write out PkgInfo file now that the Info.plist file has been
# "compiled".
self._WritePkgInfo(dest)
def _WritePkgInfo(self, info_plist):
"""This writes the PkgInfo file from the data stored in Info.plist."""
plist = plistlib.readPlist(info_plist)
if not plist:
return
# Only create PkgInfo for executable types.
package_type = plist['CFBundlePackageType']
if package_type != 'APPL':
return
# The format of PkgInfo is eight characters, representing the bundle type
# and bundle signature, each four characters. If that is missing, four
# '?' characters are used instead.
signature_code = plist.get('CFBundleSignature', '????')
if len(signature_code) != 4: # Wrong length resets everything, too.
signature_code = '?' * 4
dest = os.path.join(os.path.dirname(info_plist), 'PkgInfo')
fp = open(dest, 'w')
fp.write('%s%s' % (package_type, signature_code))
fp.close()
def ExecFlock(self, lockfile, *cmd_list):
"""Emulates the most basic behavior of Linux's flock(1)."""
# Rely on exception handling to report errors.
fd = os.open(lockfile, os.O_RDONLY|os.O_NOCTTY|os.O_CREAT, 0o666)
fcntl.flock(fd, fcntl.LOCK_EX)
return subprocess.call(cmd_list)
def ExecFilterLibtool(self, *cmd_list):
"""Calls libtool and filters out '/path/to/libtool: file: foo.o has no
symbols'."""
libtool_re = re.compile(r'^.*libtool: file: .* has no symbols$')
libtoolout = subprocess.Popen(cmd_list, stderr=subprocess.PIPE)
_, err = libtoolout.communicate()
for line in err.splitlines():
if not libtool_re.match(line):
print >>sys.stderr, line
return libtoolout.returncode
def ExecPackageFramework(self, framework, version):
"""Takes a path to Something.framework and the Current version of that and
sets up all the symlinks."""
# Find the name of the binary based on the part before the ".framework".
binary = os.path.basename(framework).split('.')[0]
CURRENT = 'Current'
RESOURCES = 'Resources'
VERSIONS = 'Versions'
if not os.path.exists(os.path.join(framework, VERSIONS, version, binary)):
# Binary-less frameworks don't seem to contain symlinks (see e.g.
# chromium's out/Debug/org.chromium.Chromium.manifest/ bundle).
return
# Move into the framework directory to set the symlinks correctly.
pwd = os.getcwd()
os.chdir(framework)
# Set up the Current version.
self._Relink(version, os.path.join(VERSIONS, CURRENT))
# Set up the root symlinks.
self._Relink(os.path.join(VERSIONS, CURRENT, binary), binary)
self._Relink(os.path.join(VERSIONS, CURRENT, RESOURCES), RESOURCES)
# Back to where we were before!
os.chdir(pwd)
def _Relink(self, dest, link):
"""Creates a symlink to |dest| named |link|. If |link| already exists,
it is overwritten."""
if os.path.lexists(link):
os.remove(link)
os.symlink(dest, link)
def ExecCodeSignBundle(self, key, resource_rules, entitlements, provisioning):
"""Code sign a bundle.
This function tries to code sign an iOS bundle, following the same
algorithm as Xcode:
1. copy ResourceRules.plist from the user or the SDK into the bundle,
2. pick the provisioning profile that best match the bundle identifier,
and copy it into the bundle as embedded.mobileprovision,
3. copy Entitlements.plist from user or SDK next to the bundle,
4. code sign the bundle.
"""
resource_rules_path = self._InstallResourceRules(resource_rules)
substitutions, overrides = self._InstallProvisioningProfile(
provisioning, self._GetCFBundleIdentifier())
entitlements_path = self._InstallEntitlements(
entitlements, substitutions, overrides)
subprocess.check_call([
'codesign', '--force', '--sign', key, '--resource-rules',
resource_rules_path, '--entitlements', entitlements_path,
os.path.join(
os.environ['TARGET_BUILD_DIR'],
os.environ['FULL_PRODUCT_NAME'])])
def _InstallResourceRules(self, resource_rules):
"""Installs ResourceRules.plist from user or SDK into the bundle.
Args:
resource_rules: string, optional, path to the ResourceRules.plist file
to use, default to "${SDKROOT}/ResourceRules.plist"
Returns:
Path to the copy of ResourceRules.plist into the bundle.
"""
source_path = resource_rules
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['CONTENTS_FOLDER_PATH'],
'ResourceRules.plist')
if not source_path:
source_path = os.path.join(
os.environ['SDKROOT'], 'ResourceRules.plist')
shutil.copy2(source_path, target_path)
return target_path
def _InstallProvisioningProfile(self, profile, bundle_identifier):
"""Installs embedded.mobileprovision into the bundle.
Args:
profile: string, optional, short name of the .mobileprovision file
to use, if empty or the file is missing, the best file installed
will be used
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
Returns:
A tuple containing two dictionary: variables substitutions and values
to overrides when generating the entitlements file.
"""
source_path, provisioning_data, team_id = self._FindProvisioningProfile(
profile, bundle_identifier)
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['CONTENTS_FOLDER_PATH'],
'embedded.mobileprovision')
shutil.copy2(source_path, target_path)
substitutions = self._GetSubstitutions(bundle_identifier, team_id + '.')
return substitutions, provisioning_data['Entitlements']
def _FindProvisioningProfile(self, profile, bundle_identifier):
"""Finds the .mobileprovision file to use for signing the bundle.
Checks all the installed provisioning profiles (or if the user specified
the PROVISIONING_PROFILE variable, only consult it) and select the most
specific that correspond to the bundle identifier.
Args:
profile: string, optional, short name of the .mobileprovision file
to use, if empty or the file is missing, the best file installed
will be used
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
Returns:
A tuple of the path to the selected provisioning profile, the data of
the embedded plist in the provisioning profile and the team identifier
to use for code signing.
Raises:
SystemExit: if no .mobileprovision can be used to sign the bundle.
"""
profiles_dir = os.path.join(
os.environ['HOME'], 'Library', 'MobileDevice', 'Provisioning Profiles')
if not os.path.isdir(profiles_dir):
print >>sys.stderr, (
'cannot find mobile provisioning for %s' % bundle_identifier)
sys.exit(1)
provisioning_profiles = None
if profile:
profile_path = os.path.join(profiles_dir, profile + '.mobileprovision')
if os.path.exists(profile_path):
provisioning_profiles = [profile_path]
if not provisioning_profiles:
provisioning_profiles = glob.glob(
os.path.join(profiles_dir, '*.mobileprovision'))
valid_provisioning_profiles = {}
for profile_path in provisioning_profiles:
profile_data = self._LoadProvisioningProfile(profile_path)
app_id_pattern = profile_data.get(
'Entitlements', {}).get('application-identifier', '')
for team_identifier in profile_data.get('TeamIdentifier', []):
app_id = '%s.%s' % (team_identifier, bundle_identifier)
if fnmatch.fnmatch(app_id, app_id_pattern):
valid_provisioning_profiles[app_id_pattern] = (
profile_path, profile_data, team_identifier)
if not valid_provisioning_profiles:
print >>sys.stderr, (
'cannot find mobile provisioning for %s' % bundle_identifier)
sys.exit(1)
# If the user has multiple provisioning profiles installed that can be
# used for ${bundle_identifier}, pick the most specific one (ie. the
# provisioning profile whose pattern is the longest).
selected_key = max(valid_provisioning_profiles, key=lambda v: len(v))
return valid_provisioning_profiles[selected_key]
def _LoadProvisioningProfile(self, profile_path):
"""Extracts the plist embedded in a provisioning profile.
Args:
profile_path: string, path to the .mobileprovision file
Returns:
Content of the plist embedded in the provisioning profile as a dictionary.
"""
with tempfile.NamedTemporaryFile() as temp:
subprocess.check_call([
'security', 'cms', '-D', '-i', profile_path, '-o', temp.name])
return self._LoadPlistMaybeBinary(temp.name)
def _LoadPlistMaybeBinary(self, plist_path):
"""Loads into a memory a plist possibly encoded in binary format.
This is a wrapper around plistlib.readPlist that tries to convert the
plist to the XML format if it can't be parsed (assuming that it is in
the binary format).
Args:
plist_path: string, path to a plist file, in XML or binary format
Returns:
Content of the plist as a dictionary.
"""
try:
# First, try to read the file using plistlib that only supports XML,
# and if an exception is raised, convert a temporary copy to XML and
# load that copy.
return plistlib.readPlist(plist_path)
except:
pass
with tempfile.NamedTemporaryFile() as temp:
shutil.copy2(plist_path, temp.name)
subprocess.check_call(['plutil', '-convert', 'xml1', temp.name])
return plistlib.readPlist(temp.name)
def _GetSubstitutions(self, bundle_identifier, app_identifier_prefix):
"""Constructs a dictionary of variable substitutions for Entitlements.plist.
Args:
bundle_identifier: string, value of CFBundleIdentifier from Info.plist
app_identifier_prefix: string, value for AppIdentifierPrefix
Returns:
Dictionary of substitutions to apply when generating Entitlements.plist.
"""
return {
'CFBundleIdentifier': bundle_identifier,
'AppIdentifierPrefix': app_identifier_prefix,
}
def _GetCFBundleIdentifier(self):
"""Extracts CFBundleIdentifier value from Info.plist in the bundle.
Returns:
Value of CFBundleIdentifier in the Info.plist located in the bundle.
"""
info_plist_path = os.path.join(
os.environ['TARGET_BUILD_DIR'],
os.environ['INFOPLIST_PATH'])
info_plist_data = self._LoadPlistMaybeBinary(info_plist_path)
return info_plist_data['CFBundleIdentifier']
def _InstallEntitlements(self, entitlements, substitutions, overrides):
"""Generates and install the ${BundleName}.xcent entitlements file.
Expands variables "$(variable)" pattern in the source entitlements file,
add extra entitlements defined in the .mobileprovision file and the copy
the generated plist to "${BundlePath}.xcent".
Args:
entitlements: string, optional, path to the Entitlements.plist template
to use, defaults to "${SDKROOT}/Entitlements.plist"
substitutions: dictionary, variable substitutions
overrides: dictionary, values to add to the entitlements
Returns:
Path to the generated entitlements file.
"""
source_path = entitlements
target_path = os.path.join(
os.environ['BUILT_PRODUCTS_DIR'],
os.environ['PRODUCT_NAME'] + '.xcent')
if not source_path:
source_path = os.path.join(
os.environ['SDKROOT'],
'Entitlements.plist')
shutil.copy2(source_path, target_path)
data = self._LoadPlistMaybeBinary(target_path)
data = self._ExpandVariables(data, substitutions)
if overrides:
for key in overrides:
if key not in data:
data[key] = overrides[key]
plistlib.writePlist(data, target_path)
return target_path
def _ExpandVariables(self, data, substitutions):
"""Expands variables "$(variable)" in data.
Args:
data: object, can be either string, list or dictionary
substitutions: dictionary, variable substitutions to perform
Returns:
Copy of data where each references to "$(variable)" has been replaced
by the corresponding value found in substitutions, or left intact if
the key was not found.
"""
if isinstance(data, str):
for key, value in substitutions.iteritems():
data = data.replace('$(%s)' % key, value)
return data
if isinstance(data, list):
return [self._ExpandVariables(v, substitutions) for v in data]
if isinstance(data, dict):
return dict((k, self._ExpandVariables(data[k],
substitutions)) for k in data)
return data
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| mit |
daxxi13/CouchPotatoServer | couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/metacafe.py | 40 | 10132 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_parse_qs,
compat_urllib_parse,
compat_urllib_request,
)
from ..utils import (
determine_ext,
ExtractorError,
int_or_none,
)
class MetacafeIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?metacafe\.com/watch/([^/]+)/([^/]+)/.*'
_DISCLAIMER = 'http://www.metacafe.com/family_filter/'
_FILTER_POST = 'http://www.metacafe.com/f/index.php?inputType=filter&controllerGroup=user'
IE_NAME = 'metacafe'
_TESTS = [
# Youtube video
{
'add_ie': ['Youtube'],
'url': 'http://metacafe.com/watch/yt-_aUehQsCQtM/the_electric_company_short_i_pbs_kids_go/',
'info_dict': {
'id': '_aUehQsCQtM',
'ext': 'mp4',
'upload_date': '20090102',
'title': 'The Electric Company | "Short I" | PBS KIDS GO!',
'description': 'md5:2439a8ef6d5a70e380c22f5ad323e5a8',
'uploader': 'PBS',
'uploader_id': 'PBS'
}
},
# Normal metacafe video
{
'url': 'http://www.metacafe.com/watch/11121940/news_stuff_you_wont_do_with_your_playstation_4/',
'md5': '6e0bca200eaad2552e6915ed6fd4d9ad',
'info_dict': {
'id': '11121940',
'ext': 'mp4',
'title': 'News: Stuff You Won\'t Do with Your PlayStation 4',
'uploader': 'ign',
'description': 'Sony released a massive FAQ on the PlayStation Blog detailing the PS4\'s capabilities and limitations.',
},
},
# AnyClip video
{
'url': 'http://www.metacafe.com/watch/an-dVVXnuY7Jh77J/the_andromeda_strain_1971_stop_the_bomb_part_3/',
'info_dict': {
'id': 'an-dVVXnuY7Jh77J',
'ext': 'mp4',
'title': 'The Andromeda Strain (1971): Stop the Bomb Part 3',
'uploader': 'anyclip',
'description': 'md5:38c711dd98f5bb87acf973d573442e67',
},
},
# age-restricted video
{
'url': 'http://www.metacafe.com/watch/5186653/bbc_internal_christmas_tape_79_uncensored_outtakes_etc/',
'md5': '98dde7c1a35d02178e8ab7560fe8bd09',
'info_dict': {
'id': '5186653',
'ext': 'mp4',
'title': 'BBC INTERNAL Christmas Tape \'79 - UNCENSORED Outtakes, Etc.',
'uploader': 'Dwayne Pipe',
'description': 'md5:950bf4c581e2c059911fa3ffbe377e4b',
'age_limit': 18,
},
},
# cbs video
{
'url': 'http://www.metacafe.com/watch/cb-8VD4r_Zws8VP/open_this_is_face_the_nation_february_9/',
'info_dict': {
'id': '8VD4r_Zws8VP',
'ext': 'flv',
'title': 'Open: This is Face the Nation, February 9',
'description': 'md5:8a9ceec26d1f7ed6eab610834cc1a476',
'duration': 96,
},
'params': {
# rtmp download
'skip_download': True,
},
},
# Movieclips.com video
{
'url': 'http://www.metacafe.com/watch/mv-Wy7ZU/my_week_with_marilyn_do_you_love_me/',
'info_dict': {
'id': 'mv-Wy7ZU',
'ext': 'mp4',
'title': 'My Week with Marilyn - Do You Love Me?',
'description': 'From the movie My Week with Marilyn - Colin (Eddie Redmayne) professes his love to Marilyn (Michelle Williams) and gets her to promise to return to set and finish the movie.',
'uploader': 'movie_trailers',
'duration': 176,
},
'params': {
'skip_download': 'requires rtmpdump',
}
}
]
def report_disclaimer(self):
self.to_screen('Retrieving disclaimer')
def _real_initialize(self):
# Retrieve disclaimer
self.report_disclaimer()
self._download_webpage(self._DISCLAIMER, None, False, 'Unable to retrieve disclaimer')
# Confirm age
disclaimer_form = {
'filters': '0',
'submit': "Continue - I'm over 18",
}
request = compat_urllib_request.Request(self._FILTER_POST, compat_urllib_parse.urlencode(disclaimer_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
self.report_age_confirmation()
self._download_webpage(request, None, False, 'Unable to confirm age')
def _real_extract(self, url):
# Extract id and simplified title from URL
mobj = re.match(self._VALID_URL, url)
if mobj is None:
raise ExtractorError('Invalid URL: %s' % url)
video_id = mobj.group(1)
# the video may come from an external site
m_external = re.match('^(\w{2})-(.*)$', video_id)
if m_external is not None:
prefix, ext_id = m_external.groups()
# Check if video comes from YouTube
if prefix == 'yt':
return self.url_result('http://www.youtube.com/watch?v=%s' % ext_id, 'Youtube')
# CBS videos use theplatform.com
if prefix == 'cb':
return self.url_result('theplatform:%s' % ext_id, 'ThePlatform')
# Retrieve video webpage to extract further information
req = compat_urllib_request.Request('http://www.metacafe.com/watch/%s/' % video_id)
# AnyClip videos require the flashversion cookie so that we get the link
# to the mp4 file
mobj_an = re.match(r'^an-(.*?)$', video_id)
if mobj_an:
req.headers['Cookie'] = 'flashVersion=0;'
webpage = self._download_webpage(req, video_id)
# Extract URL, uploader and title from webpage
self.report_extraction(video_id)
video_url = None
mobj = re.search(r'(?m)&mediaURL=([^&]+)', webpage)
if mobj is not None:
mediaURL = compat_urllib_parse.unquote(mobj.group(1))
video_ext = mediaURL[-3:]
# Extract gdaKey if available
mobj = re.search(r'(?m)&gdaKey=(.*?)&', webpage)
if mobj is None:
video_url = mediaURL
else:
gdaKey = mobj.group(1)
video_url = '%s?__gda__=%s' % (mediaURL, gdaKey)
if video_url is None:
mobj = re.search(r'<video src="([^"]+)"', webpage)
if mobj:
video_url = mobj.group(1)
video_ext = 'mp4'
if video_url is None:
flashvars = self._search_regex(
r' name="flashvars" value="(.*?)"', webpage, 'flashvars',
default=None)
if flashvars:
vardict = compat_parse_qs(flashvars)
if 'mediaData' not in vardict:
raise ExtractorError('Unable to extract media URL')
mobj = re.search(
r'"mediaURL":"(?P<mediaURL>http.*?)",(.*?)"key":"(?P<key>.*?)"', vardict['mediaData'][0])
if mobj is None:
raise ExtractorError('Unable to extract media URL')
mediaURL = mobj.group('mediaURL').replace('\\/', '/')
video_url = '%s?__gda__=%s' % (mediaURL, mobj.group('key'))
video_ext = determine_ext(video_url)
if video_url is None:
player_url = self._search_regex(
r"swfobject\.embedSWF\('([^']+)'",
webpage, 'config URL', default=None)
if player_url:
config_url = self._search_regex(
r'config=(.+)$', player_url, 'config URL')
config_doc = self._download_xml(
config_url, video_id,
note='Downloading video config')
smil_url = config_doc.find('.//properties').attrib['smil_file']
smil_doc = self._download_xml(
smil_url, video_id,
note='Downloading SMIL document')
base_url = smil_doc.find('./head/meta').attrib['base']
video_url = []
for vn in smil_doc.findall('.//video'):
br = int(vn.attrib['system-bitrate'])
play_path = vn.attrib['src']
video_url.append({
'format_id': 'smil-%d' % br,
'url': base_url,
'play_path': play_path,
'page_url': url,
'player_url': player_url,
'ext': play_path.partition(':')[0],
})
if video_url is None:
raise ExtractorError('Unsupported video type')
video_title = self._html_search_regex(
r'(?im)<title>(.*) - Video</title>', webpage, 'title')
description = self._og_search_description(webpage)
thumbnail = self._og_search_thumbnail(webpage)
video_uploader = self._html_search_regex(
r'submitter=(.*?);|googletag\.pubads\(\)\.setTargeting\("(?:channel|submiter)","([^"]+)"\);',
webpage, 'uploader nickname', fatal=False)
duration = int_or_none(
self._html_search_meta('video:duration', webpage))
age_limit = (
18
if re.search(r'"contentRating":"restricted"', webpage)
else 0)
if isinstance(video_url, list):
formats = video_url
else:
formats = [{
'url': video_url,
'ext': video_ext,
}]
self._sort_formats(formats)
return {
'id': video_id,
'description': description,
'uploader': video_uploader,
'title': video_title,
'thumbnail': thumbnail,
'age_limit': age_limit,
'formats': formats,
'duration': duration,
}
| gpl-3.0 |
rhinstaller/blivet | tests/formats_test/init_test.py | 6 | 1466 | import copy
import unittest
import blivet.formats as formats
class FormatsTestCase(unittest.TestCase):
def test_formats_methods(self):
##
# get_device_format_class
##
format_pairs = {
None: formats.DeviceFormat,
"bogus": None,
"biosboot": formats.biosboot.BIOSBoot,
"BIOS Boot": formats.biosboot.BIOSBoot,
"nodev": formats.fs.NoDevFS
}
format_names = format_pairs.keys()
format_values = [format_pairs[k] for k in format_names]
self.assertEqual(
[formats.get_device_format_class(x) for x in format_names],
format_values)
# A DeviceFormat object is returned if lookup by name fails
for name in format_names:
self.assertIs(formats.get_format(name).__class__,
formats.DeviceFormat if format_pairs[name] is None else format_pairs[name])
# Consecutively constructed DeviceFormat objects have consecutive ids
names = [key for key in format_pairs.keys() if format_pairs[key] is not None]
objs = [formats.get_format(name) for name in names]
ids = [obj.id for obj in objs]
self.assertEqual(ids, list(range(ids[0], ids[0] + len(ids))))
# Copy or deepcopy should preserve the id
self.assertEqual(ids, [copy.copy(obj).id for obj in objs])
self.assertEqual(ids, [copy.deepcopy(obj).id for obj in objs])
| lgpl-2.1 |
OpenMined/PySyft | packages/syft/src/syft/proto/core/io/location_pb2.py | 1 | 3450 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: proto/core/io/location.proto
"""Generated protocol buffer code."""
# third party
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
# syft absolute
from syft.proto.core.common import (
common_object_pb2 as proto_dot_core_dot_common_dot_common__object__pb2,
)
DESCRIPTOR = _descriptor.FileDescriptor(
name="proto/core/io/location.proto",
package="syft.core.io",
syntax="proto3",
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x1cproto/core/io/location.proto\x12\x0csyft.core.io\x1a%proto/core/common/common_object.proto"C\n\x10SpecificLocation\x12!\n\x02id\x18\x01 \x01(\x0b\x32\x15.syft.core.common.UID\x12\x0c\n\x04name\x18\x02 \x01(\tb\x06proto3',
dependencies=[
proto_dot_core_dot_common_dot_common__object__pb2.DESCRIPTOR,
],
)
_SPECIFICLOCATION = _descriptor.Descriptor(
name="SpecificLocation",
full_name="syft.core.io.SpecificLocation",
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name="id",
full_name="syft.core.io.SpecificLocation.id",
index=0,
number=1,
type=11,
cpp_type=10,
label=1,
has_default_value=False,
default_value=None,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
_descriptor.FieldDescriptor(
name="name",
full_name="syft.core.io.SpecificLocation.name",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=85,
serialized_end=152,
)
_SPECIFICLOCATION.fields_by_name[
"id"
].message_type = proto_dot_core_dot_common_dot_common__object__pb2._UID
DESCRIPTOR.message_types_by_name["SpecificLocation"] = _SPECIFICLOCATION
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SpecificLocation = _reflection.GeneratedProtocolMessageType(
"SpecificLocation",
(_message.Message,),
{
"DESCRIPTOR": _SPECIFICLOCATION,
"__module__": "proto.core.io.location_pb2"
# @@protoc_insertion_point(class_scope:syft.core.io.SpecificLocation)
},
)
_sym_db.RegisterMessage(SpecificLocation)
# @@protoc_insertion_point(module_scope)
| apache-2.0 |
Adenilson/servo | tests/wpt/web-platform-tests/cors/resources/cors-makeheader.py | 79 | 2139 | import json
def main(request, response):
origin = request.GET.first("origin", request.headers.get('origin'))
if "check" in request.GET:
token = request.GET.first("token")
value = request.server.stash.take(token)
if value is not None:
if request.GET.first("check", None) == "keep":
request.server.stash.put(token, value)
body = "1"
else:
body = "0"
return [("Content-Type", "text/plain")], body
if origin != 'none':
response.headers.set("Access-Control-Allow-Origin", origin)
if 'origin2' in request.GET:
response.headers.append("Access-Control-Allow-Origin", request.GET.first('origin2'))
#Preflight
if 'headers' in request.GET:
response.headers.set("Access-Control-Allow-Headers", request.GET.first('headers'))
if 'credentials' in request.GET:
response.headers.set("Access-Control-Allow-Credentials", request.GET.first('credentials'))
if 'methods' in request.GET:
response.headers.set("Access-Control-Allow-Methods", request.GET.first('methods'))
code = request.GET.first('code', None)
if request.method == 'OPTIONS':
#Override the response code if we're in a preflight and it's asked
if 'preflight' in request.GET:
code = int(request.GET.first('preflight'))
#Log that the preflight actually happened if we have an ident
if 'token' in request.GET:
request.server.stash.put(request.GET['token'])
if 'location' in request.GET:
if code is None:
code = 302
if code >= 300 and code < 400:
response.headers.set("Location", request.GET.first('location'))
headers = {}
for name, values in request.headers.iteritems():
if len(values) == 1:
headers[name] = values[0]
else:
#I have no idea, really
headers[name] = values
headers['get_value'] = request.GET.first('get_value', '')
body = json.dumps(headers)
if code:
return (code, "StatusText"), [], body
else:
return body
| mpl-2.0 |
four2five/0.19.2 | src/contrib/hod/support/logcondense.py | 10 | 7097 | #!/bin/sh
#Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
""":"
work_dir=$(dirname $0)
base_name=$(basename $0)
cd $work_dir
if [ $HOD_PYTHON_HOME ]; then
exec $HOD_PYTHON_HOME -OO -u $base_name ${1+"$@"}
elif [ -e /usr/bin/python ]; then
exec /usr/bin/python -OO -u $base_name ${1+"$@"}
elif [ -e /usr/local/bin/python ]; then
exec /usr/local/bin/python -OO -u $base_name ${1+"$@"}
else
exec python -OO -u $base_name ${1+"$@"}
fi
":"""
from os import popen3
import os, sys
import re
import time
from datetime import datetime
from optparse import OptionParser
myName = os.path.basename(sys.argv[0])
myName = re.sub(".*/", "", myName)
reVersion = re.compile(".*(\d+_\d+).*")
VERSION = '$HeadURL: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.19/src/contrib/hod/support/logcondense.py $'
reMatch = reVersion.match(VERSION)
if reMatch:
VERSION = reMatch.group(1)
VERSION = re.sub("_", ".", VERSION)
else:
VERSION = 'DEV'
options = ( {'short' : "-p",
'long' : "--package",
'type' : "string",
'action' : "store",
'dest' : "package",
'metavar' : " ",
'default' : 'hadoop',
'help' : "Bin file for hadoop"},
{'short' : "-d",
'long' : "--days",
'type' : "int",
'action' : "store",
'dest' : "days",
'metavar' : " ",
'default' : 7,
'help' : "Number of days before logs are deleted"},
{'short' : "-c",
'long' : "--config",
'type' : "string",
'action' : "store",
'dest' : "config",
'metavar' : " ",
'default' : None,
'help' : "config directory for hadoop"},
{'short' : "-l",
'long' : "--logs",
'type' : "string",
'action' : "store",
'dest' : "log",
'metavar' : " ",
'default' : "/user",
'help' : "directory prefix under which logs are stored per user"},
{'short' : "-n",
'long' : "--dynamicdfs",
'type' : "string",
'action' : "store",
'dest' : "dynamicdfs",
'metavar' : " ",
'default' : "false",
'help' : "'true', if the cluster is used to bring up dynamic dfs clusters, 'false' otherwise"}
)
def getDfsCommand(options, args):
if (options.config == None):
cmd = options.package + " " + "dfs " + args
else:
cmd = options.package + " " + "--config " + options.config + " dfs " + args
return cmd
def runcondense():
import shutil
options = process_args()
# if the cluster is used to bring up dynamic dfs, we must leave NameNode and JobTracker logs,
# otherwise only JobTracker logs. Likewise, in case of dynamic dfs, we must also look for
# deleting datanode logs
filteredNames = ['jobtracker']
deletedNamePrefixes = ['*-tasktracker-*']
if options.dynamicdfs == 'true':
filteredNames.append('namenode')
deletedNamePrefixes.append('*-datanode-*')
filepath = '%s/\*/hod-logs/' % (options.log)
cmd = getDfsCommand(options, "-lsr " + filepath)
(stdin, stdout, stderr) = popen3(cmd)
lastjobid = 'none'
toPurge = { }
for line in stdout:
try:
m = re.match("^.*\s(.*)\n$", line)
filename = m.group(1)
# file name format: <prefix>/<user>/hod-logs/<jobid>/[0-9]*-[jobtracker|tasktracker|datanode|namenode|]-hostname-YYYYMMDDtime-random.tar.gz
# first strip prefix:
if filename.startswith(options.log):
filename = filename.lstrip(options.log)
if not filename.startswith('/'):
filename = '/' + filename
else:
continue
# Now get other details from filename.
k = re.match("/(.*)/hod-logs/(.*)/.*-.*-([0-9][0-9][0-9][0-9])([0-9][0-9])([0-9][0-9]).*$", filename)
if k:
username = k.group(1)
jobid = k.group(2)
datetimefile = datetime(int(k.group(3)), int(k.group(4)), int(k.group(5)))
datetimenow = datetime.utcnow()
diff = datetimenow - datetimefile
filedate = k.group(3) + k.group(4) + k.group(5)
newdate = datetimenow.strftime("%Y%m%d")
print "%s %s %s %d" % (filename, filedate, newdate, diff.days)
# if the cluster is used to bring up dynamic dfs, we must also leave NameNode logs.
foundFilteredName = False
for name in filteredNames:
if filename.find(name) >= 0:
foundFilteredName = True
break
if foundFilteredName:
continue
if (diff.days > options.days):
desttodel = filename
if not toPurge.has_key(jobid):
toPurge[jobid] = options.log.rstrip("/") + "/" + username + "/hod-logs/" + jobid
except Exception, e:
print >> sys.stderr, e
for job in toPurge.keys():
try:
for prefix in deletedNamePrefixes:
cmd = getDfsCommand(options, "-rm " + toPurge[job] + '/' + prefix)
print cmd
ret = 0
ret = os.system(cmd)
if (ret != 0):
print >> sys.stderr, "Command failed to delete file " + cmd
except Exception, e:
print >> sys.stderr, e
def process_args():
global options, myName, VERSION
usage = "usage: %s <ARGS>" % (myName)
version = "%s %s" % (myName, VERSION)
argParser = OptionParser(usage=usage, version=VERSION)
for option_element in options:
argParser.add_option(option_element['short'], option_element['long'],
type=option_element['type'], action=option_element['action'],
dest=option_element['dest'], default=option_element['default'],
metavar=option_element['metavar'], help=option_element['help'])
(parsedOptions, args) = argParser.parse_args()
if not os.path.exists(parsedOptions.package):
argParser.error("Could not find path to hadoop binary: %s" % parsedOptions.package)
if not os.path.exists(parsedOptions.config):
argParser.error("Could not find config: %s" % parsedOptions.config)
if parsedOptions.days <= 0:
argParser.error("Invalid number of days specified, must be > 0: %s" % parsedOptions.config)
if parsedOptions.dynamicdfs!='true' and parsedOptions.dynamicdfs!='false':
argParser.error("Invalid option for dynamicdfs, must be true or false: %s" % parsedOptions.dynamicdfs)
return parsedOptions
if __name__ == '__main__':
runcondense()
| apache-2.0 |
andrew-aladev/samba-talloc-debug | python/samba/sites.py | 41 | 4060 | # python site manipulation code
# Copyright Matthieu Patou <mat@matws.net> 2011
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Manipulating sites."""
import ldb
from ldb import FLAG_MOD_ADD
class SiteException(Exception):
"""Base element for Sites errors"""
def __init__(self, value):
self.value = value
def __str__(self):
return "SiteException: " + self.value
class SiteNotFoundException(SiteException):
"""Raised when the site is not found and it's expected to exists."""
def __init__(self, value):
self.value = value
def __str__(self):
return "SiteNotFoundException: " + self.value
class SiteAlreadyExistsException(SiteException):
"""Raised when the site is not found and it's expected not to exists."""
def __init__(self, value):
self.value = value
def __str__(self):
return "SiteAlreadyExists: " + self.value
class SiteServerNotEmptyException(SiteException):
"""Raised when the site still has servers attached."""
def __init__(self, value):
self.value = value
def __str__(self):
return "SiteServerNotEmpty: " + self.value
def create_site(samdb, configDn, siteName):
"""
Create a site
:param samdb: A samdb connection
:param configDn: The DN of the configuration partition
:param siteName: Name of the site to create
:return: True upon success
:raise SiteAlreadyExists: if the site to be created already exists.
"""
ret = samdb.search(base=configDn, scope=ldb.SCOPE_SUBTREE,
expression='(&(objectclass=Site)(cn=%s))' % siteName)
if len(ret) != 0:
raise SiteAlreadyExistsException('A site with the name %s already exists' % siteName)
m = ldb.Message()
m.dn = ldb.Dn(samdb, "Cn=%s,CN=Sites,%s" % (siteName, str(configDn)))
m["objectclass"] = ldb.MessageElement("site", FLAG_MOD_ADD, "objectclass")
samdb.add(m)
m2 = ldb.Message()
m2.dn = ldb.Dn(samdb, "Cn=NTDS Site Settings,%s" % str(m.dn))
m2["objectclass"] = ldb.MessageElement("nTDSSiteSettings", FLAG_MOD_ADD, "objectclass")
samdb.add(m2)
m3 = ldb.Message()
m3.dn = ldb.Dn(samdb, "Cn=Servers,%s" % str(m.dn))
m3["objectclass"] = ldb.MessageElement("serversContainer", FLAG_MOD_ADD, "objectclass")
samdb.add(m3)
return True
def delete_site(samdb, configDn, siteName):
"""
Delete a site
:param samdb: A samdb connection
:param configDn: The DN of the configuration partition
:param siteName: Name of the site to delete
:return: True upon success
:raise SiteNotFoundException: if the site to be deleted do not exists.
:raise SiteServerNotEmpty: if the site has still servers in it.
"""
dnsites = ldb.Dn(samdb, "CN=Sites,%s" % (str(configDn)))
dnsite = ldb.Dn(samdb, "Cn=%s,CN=Sites,%s" % (siteName, str(configDn)))
dnserver = ldb.Dn(samdb, "Cn=Servers,%s" % str(dnsite))
ret = samdb.search(base=dnsites, scope=ldb.SCOPE_ONELEVEL,
expression='(dn=%s)' % str(dnsite))
if len(ret) != 1:
raise SiteNotFoundException('Site %s do not exists' % siteName)
ret = samdb.search(base=dnserver, scope=ldb.SCOPE_ONELEVEL,
expression='(objectclass=server)')
if len(ret) != 0:
raise SiteServerNotEmptyException('Site %s still has servers in it, move them before removal' % siteName)
samdb.delete(dnsite, ["tree_delete:0"])
return True
| gpl-3.0 |
Kudo/mailchimp_manager | mailchimp_manager/tests/test_list_manager.py | 1 | 1550 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_list_manager.py - Integration test for list management of mailchimp_manager
"""
try:
from mailchimp_manager import MailChimpManager
except:
# Local module testing - assuming mailchimp_manager folder put in grandparent folder
import sys, os.path
# Hack for import module in grandparent folder
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, os.path.pardir)))
from mailchimp_manager import MailChimpManager
import unittest
TEST_EMAIL = u'john.doe@gmail.com'
class TestMailChimpListManager(unittest.TestCase):
def test_Subscribe_TestEmailInSubscribedList_True(self):
listMgr = MailChimpManager.ListManager()
listMgr.subscribe(TEST_EMAIL)
emails = listMgr.listMembers()
self.assertIn(TEST_EMAIL, emails)
def test_Unsubscribe_TestEmailInSubscribeList_False(self):
listMgr = MailChimpManager.ListManager()
listMgr.unsubscribe(TEST_EMAIL)
emails = listMgr.listMembers()
self.assertNotIn(TEST_EMAIL, emails)
def test_Unsubscribe_TestEmailInUnsubscribeList_True(self):
listMgr = MailChimpManager.ListManager()
listMgr.unsubscribe(TEST_EMAIL)
emails = listMgr.listMembers(MailChimpManager.ListManager.MEMBER_STATUS.UNSUBSCRIBED)
self.assertIn(TEST_EMAIL, emails)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestMailChimpListManager)
unittest.TextTestRunner(verbosity=2).run(suite)
| bsd-3-clause |
afandria/mojo | third_party/protobuf/python/google/protobuf/internal/descriptor_test.py | 261 | 24195 | #! /usr/bin/python
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unittest for google.protobuf.internal.descriptor."""
__author__ = 'robinson@google.com (Will Robinson)'
import unittest
from google.protobuf import unittest_custom_options_pb2
from google.protobuf import unittest_import_pb2
from google.protobuf import unittest_pb2
from google.protobuf import descriptor_pb2
from google.protobuf import descriptor
from google.protobuf import text_format
TEST_EMPTY_MESSAGE_DESCRIPTOR_ASCII = """
name: 'TestEmptyMessage'
"""
class DescriptorTest(unittest.TestCase):
def setUp(self):
self.my_file = descriptor.FileDescriptor(
name='some/filename/some.proto',
package='protobuf_unittest'
)
self.my_enum = descriptor.EnumDescriptor(
name='ForeignEnum',
full_name='protobuf_unittest.ForeignEnum',
filename=None,
file=self.my_file,
values=[
descriptor.EnumValueDescriptor(name='FOREIGN_FOO', index=0, number=4),
descriptor.EnumValueDescriptor(name='FOREIGN_BAR', index=1, number=5),
descriptor.EnumValueDescriptor(name='FOREIGN_BAZ', index=2, number=6),
])
self.my_message = descriptor.Descriptor(
name='NestedMessage',
full_name='protobuf_unittest.TestAllTypes.NestedMessage',
filename=None,
file=self.my_file,
containing_type=None,
fields=[
descriptor.FieldDescriptor(
name='bb',
full_name='protobuf_unittest.TestAllTypes.NestedMessage.bb',
index=0, number=1,
type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None),
],
nested_types=[],
enum_types=[
self.my_enum,
],
extensions=[])
self.my_method = descriptor.MethodDescriptor(
name='Bar',
full_name='protobuf_unittest.TestService.Bar',
index=0,
containing_service=None,
input_type=None,
output_type=None)
self.my_service = descriptor.ServiceDescriptor(
name='TestServiceWithOptions',
full_name='protobuf_unittest.TestServiceWithOptions',
file=self.my_file,
index=0,
methods=[
self.my_method
])
def testEnumValueName(self):
self.assertEqual(self.my_message.EnumValueName('ForeignEnum', 4),
'FOREIGN_FOO')
self.assertEqual(
self.my_message.enum_types_by_name[
'ForeignEnum'].values_by_number[4].name,
self.my_message.EnumValueName('ForeignEnum', 4))
def testEnumFixups(self):
self.assertEqual(self.my_enum, self.my_enum.values[0].type)
def testContainingTypeFixups(self):
self.assertEqual(self.my_message, self.my_message.fields[0].containing_type)
self.assertEqual(self.my_message, self.my_enum.containing_type)
def testContainingServiceFixups(self):
self.assertEqual(self.my_service, self.my_method.containing_service)
def testGetOptions(self):
self.assertEqual(self.my_enum.GetOptions(),
descriptor_pb2.EnumOptions())
self.assertEqual(self.my_enum.values[0].GetOptions(),
descriptor_pb2.EnumValueOptions())
self.assertEqual(self.my_message.GetOptions(),
descriptor_pb2.MessageOptions())
self.assertEqual(self.my_message.fields[0].GetOptions(),
descriptor_pb2.FieldOptions())
self.assertEqual(self.my_method.GetOptions(),
descriptor_pb2.MethodOptions())
self.assertEqual(self.my_service.GetOptions(),
descriptor_pb2.ServiceOptions())
def testSimpleCustomOptions(self):
file_descriptor = unittest_custom_options_pb2.DESCRIPTOR
message_descriptor =\
unittest_custom_options_pb2.TestMessageWithCustomOptions.DESCRIPTOR
field_descriptor = message_descriptor.fields_by_name["field1"]
enum_descriptor = message_descriptor.enum_types_by_name["AnEnum"]
enum_value_descriptor =\
message_descriptor.enum_values_by_name["ANENUM_VAL2"]
service_descriptor =\
unittest_custom_options_pb2.TestServiceWithCustomOptions.DESCRIPTOR
method_descriptor = service_descriptor.FindMethodByName("Foo")
file_options = file_descriptor.GetOptions()
file_opt1 = unittest_custom_options_pb2.file_opt1
self.assertEqual(9876543210, file_options.Extensions[file_opt1])
message_options = message_descriptor.GetOptions()
message_opt1 = unittest_custom_options_pb2.message_opt1
self.assertEqual(-56, message_options.Extensions[message_opt1])
field_options = field_descriptor.GetOptions()
field_opt1 = unittest_custom_options_pb2.field_opt1
self.assertEqual(8765432109, field_options.Extensions[field_opt1])
field_opt2 = unittest_custom_options_pb2.field_opt2
self.assertEqual(42, field_options.Extensions[field_opt2])
enum_options = enum_descriptor.GetOptions()
enum_opt1 = unittest_custom_options_pb2.enum_opt1
self.assertEqual(-789, enum_options.Extensions[enum_opt1])
enum_value_options = enum_value_descriptor.GetOptions()
enum_value_opt1 = unittest_custom_options_pb2.enum_value_opt1
self.assertEqual(123, enum_value_options.Extensions[enum_value_opt1])
service_options = service_descriptor.GetOptions()
service_opt1 = unittest_custom_options_pb2.service_opt1
self.assertEqual(-9876543210, service_options.Extensions[service_opt1])
method_options = method_descriptor.GetOptions()
method_opt1 = unittest_custom_options_pb2.method_opt1
self.assertEqual(unittest_custom_options_pb2.METHODOPT1_VAL2,
method_options.Extensions[method_opt1])
def testDifferentCustomOptionTypes(self):
kint32min = -2**31
kint64min = -2**63
kint32max = 2**31 - 1
kint64max = 2**63 - 1
kuint32max = 2**32 - 1
kuint64max = 2**64 - 1
message_descriptor =\
unittest_custom_options_pb2.CustomOptionMinIntegerValues.DESCRIPTOR
message_options = message_descriptor.GetOptions()
self.assertEqual(False, message_options.Extensions[
unittest_custom_options_pb2.bool_opt])
self.assertEqual(kint32min, message_options.Extensions[
unittest_custom_options_pb2.int32_opt])
self.assertEqual(kint64min, message_options.Extensions[
unittest_custom_options_pb2.int64_opt])
self.assertEqual(0, message_options.Extensions[
unittest_custom_options_pb2.uint32_opt])
self.assertEqual(0, message_options.Extensions[
unittest_custom_options_pb2.uint64_opt])
self.assertEqual(kint32min, message_options.Extensions[
unittest_custom_options_pb2.sint32_opt])
self.assertEqual(kint64min, message_options.Extensions[
unittest_custom_options_pb2.sint64_opt])
self.assertEqual(0, message_options.Extensions[
unittest_custom_options_pb2.fixed32_opt])
self.assertEqual(0, message_options.Extensions[
unittest_custom_options_pb2.fixed64_opt])
self.assertEqual(kint32min, message_options.Extensions[
unittest_custom_options_pb2.sfixed32_opt])
self.assertEqual(kint64min, message_options.Extensions[
unittest_custom_options_pb2.sfixed64_opt])
message_descriptor =\
unittest_custom_options_pb2.CustomOptionMaxIntegerValues.DESCRIPTOR
message_options = message_descriptor.GetOptions()
self.assertEqual(True, message_options.Extensions[
unittest_custom_options_pb2.bool_opt])
self.assertEqual(kint32max, message_options.Extensions[
unittest_custom_options_pb2.int32_opt])
self.assertEqual(kint64max, message_options.Extensions[
unittest_custom_options_pb2.int64_opt])
self.assertEqual(kuint32max, message_options.Extensions[
unittest_custom_options_pb2.uint32_opt])
self.assertEqual(kuint64max, message_options.Extensions[
unittest_custom_options_pb2.uint64_opt])
self.assertEqual(kint32max, message_options.Extensions[
unittest_custom_options_pb2.sint32_opt])
self.assertEqual(kint64max, message_options.Extensions[
unittest_custom_options_pb2.sint64_opt])
self.assertEqual(kuint32max, message_options.Extensions[
unittest_custom_options_pb2.fixed32_opt])
self.assertEqual(kuint64max, message_options.Extensions[
unittest_custom_options_pb2.fixed64_opt])
self.assertEqual(kint32max, message_options.Extensions[
unittest_custom_options_pb2.sfixed32_opt])
self.assertEqual(kint64max, message_options.Extensions[
unittest_custom_options_pb2.sfixed64_opt])
message_descriptor =\
unittest_custom_options_pb2.CustomOptionOtherValues.DESCRIPTOR
message_options = message_descriptor.GetOptions()
self.assertEqual(-100, message_options.Extensions[
unittest_custom_options_pb2.int32_opt])
self.assertAlmostEqual(12.3456789, message_options.Extensions[
unittest_custom_options_pb2.float_opt], 6)
self.assertAlmostEqual(1.234567890123456789, message_options.Extensions[
unittest_custom_options_pb2.double_opt])
self.assertEqual("Hello, \"World\"", message_options.Extensions[
unittest_custom_options_pb2.string_opt])
self.assertEqual("Hello\0World", message_options.Extensions[
unittest_custom_options_pb2.bytes_opt])
dummy_enum = unittest_custom_options_pb2.DummyMessageContainingEnum
self.assertEqual(
dummy_enum.TEST_OPTION_ENUM_TYPE2,
message_options.Extensions[unittest_custom_options_pb2.enum_opt])
message_descriptor =\
unittest_custom_options_pb2.SettingRealsFromPositiveInts.DESCRIPTOR
message_options = message_descriptor.GetOptions()
self.assertAlmostEqual(12, message_options.Extensions[
unittest_custom_options_pb2.float_opt], 6)
self.assertAlmostEqual(154, message_options.Extensions[
unittest_custom_options_pb2.double_opt])
message_descriptor =\
unittest_custom_options_pb2.SettingRealsFromNegativeInts.DESCRIPTOR
message_options = message_descriptor.GetOptions()
self.assertAlmostEqual(-12, message_options.Extensions[
unittest_custom_options_pb2.float_opt], 6)
self.assertAlmostEqual(-154, message_options.Extensions[
unittest_custom_options_pb2.double_opt])
def testComplexExtensionOptions(self):
descriptor =\
unittest_custom_options_pb2.VariousComplexOptions.DESCRIPTOR
options = descriptor.GetOptions()
self.assertEqual(42, options.Extensions[
unittest_custom_options_pb2.complex_opt1].foo)
self.assertEqual(324, options.Extensions[
unittest_custom_options_pb2.complex_opt1].Extensions[
unittest_custom_options_pb2.quux])
self.assertEqual(876, options.Extensions[
unittest_custom_options_pb2.complex_opt1].Extensions[
unittest_custom_options_pb2.corge].qux)
self.assertEqual(987, options.Extensions[
unittest_custom_options_pb2.complex_opt2].baz)
self.assertEqual(654, options.Extensions[
unittest_custom_options_pb2.complex_opt2].Extensions[
unittest_custom_options_pb2.grault])
self.assertEqual(743, options.Extensions[
unittest_custom_options_pb2.complex_opt2].bar.foo)
self.assertEqual(1999, options.Extensions[
unittest_custom_options_pb2.complex_opt2].bar.Extensions[
unittest_custom_options_pb2.quux])
self.assertEqual(2008, options.Extensions[
unittest_custom_options_pb2.complex_opt2].bar.Extensions[
unittest_custom_options_pb2.corge].qux)
self.assertEqual(741, options.Extensions[
unittest_custom_options_pb2.complex_opt2].Extensions[
unittest_custom_options_pb2.garply].foo)
self.assertEqual(1998, options.Extensions[
unittest_custom_options_pb2.complex_opt2].Extensions[
unittest_custom_options_pb2.garply].Extensions[
unittest_custom_options_pb2.quux])
self.assertEqual(2121, options.Extensions[
unittest_custom_options_pb2.complex_opt2].Extensions[
unittest_custom_options_pb2.garply].Extensions[
unittest_custom_options_pb2.corge].qux)
self.assertEqual(1971, options.Extensions[
unittest_custom_options_pb2.ComplexOptionType2
.ComplexOptionType4.complex_opt4].waldo)
self.assertEqual(321, options.Extensions[
unittest_custom_options_pb2.complex_opt2].fred.waldo)
self.assertEqual(9, options.Extensions[
unittest_custom_options_pb2.complex_opt3].qux)
self.assertEqual(22, options.Extensions[
unittest_custom_options_pb2.complex_opt3].complexoptiontype5.plugh)
self.assertEqual(24, options.Extensions[
unittest_custom_options_pb2.complexopt6].xyzzy)
# Check that aggregate options were parsed and saved correctly in
# the appropriate descriptors.
def testAggregateOptions(self):
file_descriptor = unittest_custom_options_pb2.DESCRIPTOR
message_descriptor =\
unittest_custom_options_pb2.AggregateMessage.DESCRIPTOR
field_descriptor = message_descriptor.fields_by_name["fieldname"]
enum_descriptor = unittest_custom_options_pb2.AggregateEnum.DESCRIPTOR
enum_value_descriptor = enum_descriptor.values_by_name["VALUE"]
service_descriptor =\
unittest_custom_options_pb2.AggregateService.DESCRIPTOR
method_descriptor = service_descriptor.FindMethodByName("Method")
# Tests for the different types of data embedded in fileopt
file_options = file_descriptor.GetOptions().Extensions[
unittest_custom_options_pb2.fileopt]
self.assertEqual(100, file_options.i)
self.assertEqual("FileAnnotation", file_options.s)
self.assertEqual("NestedFileAnnotation", file_options.sub.s)
self.assertEqual("FileExtensionAnnotation", file_options.file.Extensions[
unittest_custom_options_pb2.fileopt].s)
self.assertEqual("EmbeddedMessageSetElement", file_options.mset.Extensions[
unittest_custom_options_pb2.AggregateMessageSetElement
.message_set_extension].s)
# Simple tests for all the other types of annotations
self.assertEqual(
"MessageAnnotation",
message_descriptor.GetOptions().Extensions[
unittest_custom_options_pb2.msgopt].s)
self.assertEqual(
"FieldAnnotation",
field_descriptor.GetOptions().Extensions[
unittest_custom_options_pb2.fieldopt].s)
self.assertEqual(
"EnumAnnotation",
enum_descriptor.GetOptions().Extensions[
unittest_custom_options_pb2.enumopt].s)
self.assertEqual(
"EnumValueAnnotation",
enum_value_descriptor.GetOptions().Extensions[
unittest_custom_options_pb2.enumvalopt].s)
self.assertEqual(
"ServiceAnnotation",
service_descriptor.GetOptions().Extensions[
unittest_custom_options_pb2.serviceopt].s)
self.assertEqual(
"MethodAnnotation",
method_descriptor.GetOptions().Extensions[
unittest_custom_options_pb2.methodopt].s)
def testNestedOptions(self):
nested_message =\
unittest_custom_options_pb2.NestedOptionType.NestedMessage.DESCRIPTOR
self.assertEqual(1001, nested_message.GetOptions().Extensions[
unittest_custom_options_pb2.message_opt1])
nested_field = nested_message.fields_by_name["nested_field"]
self.assertEqual(1002, nested_field.GetOptions().Extensions[
unittest_custom_options_pb2.field_opt1])
outer_message =\
unittest_custom_options_pb2.NestedOptionType.DESCRIPTOR
nested_enum = outer_message.enum_types_by_name["NestedEnum"]
self.assertEqual(1003, nested_enum.GetOptions().Extensions[
unittest_custom_options_pb2.enum_opt1])
nested_enum_value = outer_message.enum_values_by_name["NESTED_ENUM_VALUE"]
self.assertEqual(1004, nested_enum_value.GetOptions().Extensions[
unittest_custom_options_pb2.enum_value_opt1])
nested_extension = outer_message.extensions_by_name["nested_extension"]
self.assertEqual(1005, nested_extension.GetOptions().Extensions[
unittest_custom_options_pb2.field_opt2])
def testFileDescriptorReferences(self):
self.assertEqual(self.my_enum.file, self.my_file)
self.assertEqual(self.my_message.file, self.my_file)
def testFileDescriptor(self):
self.assertEqual(self.my_file.name, 'some/filename/some.proto')
self.assertEqual(self.my_file.package, 'protobuf_unittest')
class DescriptorCopyToProtoTest(unittest.TestCase):
"""Tests for CopyTo functions of Descriptor."""
def _AssertProtoEqual(self, actual_proto, expected_class, expected_ascii):
expected_proto = expected_class()
text_format.Merge(expected_ascii, expected_proto)
self.assertEqual(
actual_proto, expected_proto,
'Not equal,\nActual:\n%s\nExpected:\n%s\n'
% (str(actual_proto), str(expected_proto)))
def _InternalTestCopyToProto(self, desc, expected_proto_class,
expected_proto_ascii):
actual = expected_proto_class()
desc.CopyToProto(actual)
self._AssertProtoEqual(
actual, expected_proto_class, expected_proto_ascii)
def testCopyToProto_EmptyMessage(self):
self._InternalTestCopyToProto(
unittest_pb2.TestEmptyMessage.DESCRIPTOR,
descriptor_pb2.DescriptorProto,
TEST_EMPTY_MESSAGE_DESCRIPTOR_ASCII)
def testCopyToProto_NestedMessage(self):
TEST_NESTED_MESSAGE_ASCII = """
name: 'NestedMessage'
field: <
name: 'bb'
number: 1
label: 1 # Optional
type: 5 # TYPE_INT32
>
"""
self._InternalTestCopyToProto(
unittest_pb2.TestAllTypes.NestedMessage.DESCRIPTOR,
descriptor_pb2.DescriptorProto,
TEST_NESTED_MESSAGE_ASCII)
def testCopyToProto_ForeignNestedMessage(self):
TEST_FOREIGN_NESTED_ASCII = """
name: 'TestForeignNested'
field: <
name: 'foreign_nested'
number: 1
label: 1 # Optional
type: 11 # TYPE_MESSAGE
type_name: '.protobuf_unittest.TestAllTypes.NestedMessage'
>
"""
self._InternalTestCopyToProto(
unittest_pb2.TestForeignNested.DESCRIPTOR,
descriptor_pb2.DescriptorProto,
TEST_FOREIGN_NESTED_ASCII)
def testCopyToProto_ForeignEnum(self):
TEST_FOREIGN_ENUM_ASCII = """
name: 'ForeignEnum'
value: <
name: 'FOREIGN_FOO'
number: 4
>
value: <
name: 'FOREIGN_BAR'
number: 5
>
value: <
name: 'FOREIGN_BAZ'
number: 6
>
"""
self._InternalTestCopyToProto(
unittest_pb2._FOREIGNENUM,
descriptor_pb2.EnumDescriptorProto,
TEST_FOREIGN_ENUM_ASCII)
def testCopyToProto_Options(self):
TEST_DEPRECATED_FIELDS_ASCII = """
name: 'TestDeprecatedFields'
field: <
name: 'deprecated_int32'
number: 1
label: 1 # Optional
type: 5 # TYPE_INT32
options: <
deprecated: true
>
>
"""
self._InternalTestCopyToProto(
unittest_pb2.TestDeprecatedFields.DESCRIPTOR,
descriptor_pb2.DescriptorProto,
TEST_DEPRECATED_FIELDS_ASCII)
def testCopyToProto_AllExtensions(self):
TEST_EMPTY_MESSAGE_WITH_EXTENSIONS_ASCII = """
name: 'TestEmptyMessageWithExtensions'
extension_range: <
start: 1
end: 536870912
>
"""
self._InternalTestCopyToProto(
unittest_pb2.TestEmptyMessageWithExtensions.DESCRIPTOR,
descriptor_pb2.DescriptorProto,
TEST_EMPTY_MESSAGE_WITH_EXTENSIONS_ASCII)
def testCopyToProto_SeveralExtensions(self):
TEST_MESSAGE_WITH_SEVERAL_EXTENSIONS_ASCII = """
name: 'TestMultipleExtensionRanges'
extension_range: <
start: 42
end: 43
>
extension_range: <
start: 4143
end: 4244
>
extension_range: <
start: 65536
end: 536870912
>
"""
self._InternalTestCopyToProto(
unittest_pb2.TestMultipleExtensionRanges.DESCRIPTOR,
descriptor_pb2.DescriptorProto,
TEST_MESSAGE_WITH_SEVERAL_EXTENSIONS_ASCII)
def testCopyToProto_FileDescriptor(self):
UNITTEST_IMPORT_FILE_DESCRIPTOR_ASCII = ("""
name: 'google/protobuf/unittest_import.proto'
package: 'protobuf_unittest_import'
dependency: 'google/protobuf/unittest_import_public.proto'
message_type: <
name: 'ImportMessage'
field: <
name: 'd'
number: 1
label: 1 # Optional
type: 5 # TYPE_INT32
>
>
""" +
"""enum_type: <
name: 'ImportEnum'
value: <
name: 'IMPORT_FOO'
number: 7
>
value: <
name: 'IMPORT_BAR'
number: 8
>
value: <
name: 'IMPORT_BAZ'
number: 9
>
>
options: <
java_package: 'com.google.protobuf.test'
optimize_for: 1 # SPEED
>
public_dependency: 0
""")
self._InternalTestCopyToProto(
unittest_import_pb2.DESCRIPTOR,
descriptor_pb2.FileDescriptorProto,
UNITTEST_IMPORT_FILE_DESCRIPTOR_ASCII)
def testCopyToProto_ServiceDescriptor(self):
TEST_SERVICE_ASCII = """
name: 'TestService'
method: <
name: 'Foo'
input_type: '.protobuf_unittest.FooRequest'
output_type: '.protobuf_unittest.FooResponse'
>
method: <
name: 'Bar'
input_type: '.protobuf_unittest.BarRequest'
output_type: '.protobuf_unittest.BarResponse'
>
"""
self._InternalTestCopyToProto(
unittest_pb2.TestService.DESCRIPTOR,
descriptor_pb2.ServiceDescriptorProto,
TEST_SERVICE_ASCII)
class MakeDescriptorTest(unittest.TestCase):
def testMakeDescriptorWithUnsignedIntField(self):
file_descriptor_proto = descriptor_pb2.FileDescriptorProto()
file_descriptor_proto.name = 'Foo'
message_type = file_descriptor_proto.message_type.add()
message_type.name = file_descriptor_proto.name
field = message_type.field.add()
field.number = 1
field.name = 'uint64_field'
field.label = descriptor.FieldDescriptor.LABEL_REQUIRED
field.type = descriptor.FieldDescriptor.TYPE_UINT64
result = descriptor.MakeDescriptor(message_type)
self.assertEqual(result.fields[0].cpp_type,
descriptor.FieldDescriptor.CPPTYPE_UINT64)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
Blizzard/s2protocol | s2protocol/versions/protocol63454.py | 6 | 31078 | # Copyright (c) 2015-2017 Blizzard Entertainment
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from s2protocol.decoders import *
# Decoding instructions for each protocol type.
typeinfos = [
('_int',[(0,7)]), #0
('_int',[(0,4)]), #1
('_int',[(0,5)]), #2
('_int',[(0,6)]), #3
('_int',[(0,14)]), #4
('_int',[(0,22)]), #5
('_int',[(0,32)]), #6
('_choice',[(0,2),{0:('m_uint6',3),1:('m_uint14',4),2:('m_uint22',5),3:('m_uint32',6)}]), #7
('_struct',[[('m_userId',2,-1)]]), #8
('_blob',[(0,8)]), #9
('_int',[(0,8)]), #10
('_struct',[[('m_flags',10,0),('m_major',10,1),('m_minor',10,2),('m_revision',10,3),('m_build',6,4),('m_baseBuild',6,5)]]), #11
('_int',[(0,3)]), #12
('_bool',[]), #13
('_array',[(16,0),10]), #14
('_optional',[14]), #15
('_blob',[(16,0)]), #16
('_struct',[[('m_dataDeprecated',15,0),('m_data',16,1)]]), #17
('_struct',[[('m_signature',9,0),('m_version',11,1),('m_type',12,2),('m_elapsedGameLoops',6,3),('m_useScaledTime',13,4),('m_ngdpRootKey',17,5),('m_dataBuildNum',6,6),('m_replayCompatibilityHash',17,7),('m_ngdpRootKeyIsDevData',13,8)]]), #18
('_fourcc',[]), #19
('_blob',[(0,7)]), #20
('_int',[(0,64)]), #21
('_struct',[[('m_region',10,0),('m_programId',19,1),('m_realm',6,2),('m_name',20,3),('m_id',21,4)]]), #22
('_struct',[[('m_a',10,0),('m_r',10,1),('m_g',10,2),('m_b',10,3)]]), #23
('_int',[(0,2)]), #24
('_optional',[10]), #25
('_struct',[[('m_name',9,0),('m_toon',22,1),('m_race',9,2),('m_color',23,3),('m_control',10,4),('m_teamId',1,5),('m_handicap',0,6),('m_observe',24,7),('m_result',24,8),('m_workingSetSlotId',25,9),('m_hero',9,10)]]), #26
('_array',[(0,5),26]), #27
('_optional',[27]), #28
('_blob',[(0,10)]), #29
('_blob',[(0,11)]), #30
('_struct',[[('m_file',30,0)]]), #31
('_int',[(-9223372036854775808,64)]), #32
('_optional',[13]), #33
('_blob',[(0,12)]), #34
('_blob',[(40,0)]), #35
('_array',[(0,6),35]), #36
('_optional',[36]), #37
('_array',[(0,6),30]), #38
('_optional',[38]), #39
('_struct',[[('m_playerList',28,0),('m_title',29,1),('m_difficulty',9,2),('m_thumbnail',31,3),('m_isBlizzardMap',13,4),('m_timeUTC',32,5),('m_timeLocalOffset',32,6),('m_restartAsTransitionMap',33,16),('m_disableRecoverGame',13,17),('m_description',34,7),('m_imageFilePath',30,8),('m_campaignIndex',10,15),('m_mapFileName',30,9),('m_cacheHandles',37,10),('m_miniSave',13,11),('m_gameSpeed',12,12),('m_defaultDifficulty',3,13),('m_modPaths',39,14)]]), #40
('_optional',[9]), #41
('_optional',[35]), #42
('_optional',[6]), #43
('_struct',[[('m_race',25,-1)]]), #44
('_struct',[[('m_team',25,-1)]]), #45
('_blob',[(0,9)]), #46
('_int',[(-2147483648,32)]), #47
('_optional',[47]), #48
('_struct',[[('m_name',9,-19),('m_clanTag',41,-18),('m_clanLogo',42,-17),('m_highestLeague',25,-16),('m_combinedRaceLevels',43,-15),('m_randomSeed',6,-14),('m_racePreference',44,-13),('m_teamPreference',45,-12),('m_testMap',13,-11),('m_testAuto',13,-10),('m_examine',13,-9),('m_customInterface',13,-8),('m_testType',6,-7),('m_observe',24,-6),('m_hero',46,-5),('m_skin',46,-4),('m_mount',46,-3),('m_toonHandle',20,-2),('m_scaledRating',48,-1)]]), #49
('_array',[(0,5),49]), #50
('_struct',[[('m_lockTeams',13,-16),('m_teamsTogether',13,-15),('m_advancedSharedControl',13,-14),('m_randomRaces',13,-13),('m_battleNet',13,-12),('m_amm',13,-11),('m_competitive',13,-10),('m_practice',13,-9),('m_cooperative',13,-8),('m_noVictoryOrDefeat',13,-7),('m_heroDuplicatesAllowed',13,-6),('m_fog',24,-5),('m_observers',24,-4),('m_userDifficulty',24,-3),('m_clientDebugFlags',21,-2),('m_buildCoachEnabled',13,-1)]]), #51
('_int',[(1,4)]), #52
('_int',[(1,8)]), #53
('_bitarray',[(0,6)]), #54
('_bitarray',[(0,8)]), #55
('_bitarray',[(0,2)]), #56
('_struct',[[('m_allowedColors',54,-6),('m_allowedRaces',55,-5),('m_allowedDifficulty',54,-4),('m_allowedControls',55,-3),('m_allowedObserveTypes',56,-2),('m_allowedAIBuilds',55,-1)]]), #57
('_array',[(0,5),57]), #58
('_struct',[[('m_randomValue',6,-28),('m_gameCacheName',29,-27),('m_gameOptions',51,-26),('m_gameSpeed',12,-25),('m_gameType',12,-24),('m_maxUsers',2,-23),('m_maxObservers',2,-22),('m_maxPlayers',2,-21),('m_maxTeams',52,-20),('m_maxColors',3,-19),('m_maxRaces',53,-18),('m_maxControls',10,-17),('m_mapSizeX',10,-16),('m_mapSizeY',10,-15),('m_mapFileSyncChecksum',6,-14),('m_mapFileName',30,-13),('m_mapAuthorName',9,-12),('m_modFileSyncChecksum',6,-11),('m_slotDescriptions',58,-10),('m_defaultDifficulty',3,-9),('m_defaultAIBuild',10,-8),('m_cacheHandles',36,-7),('m_hasExtensionMod',13,-6),('m_hasNonBlizzardExtensionMod',13,-5),('m_isBlizzardMap',13,-4),('m_isPremadeFFA',13,-3),('m_isCoopMode',13,-2),('m_isRealtimeMode',13,-1)]]), #59
('_optional',[1]), #60
('_optional',[2]), #61
('_struct',[[('m_color',61,-1)]]), #62
('_array',[(0,4),46]), #63
('_array',[(0,17),6]), #64
('_array',[(0,9),6]), #65
('_array',[(0,3),6]), #66
('_struct',[[('m_key',6,-2),('m_rewards',64,-1)]]), #67
('_array',[(0,17),67]), #68
('_struct',[[('m_control',10,-26),('m_userId',60,-25),('m_teamId',1,-24),('m_colorPref',62,-23),('m_racePref',44,-22),('m_difficulty',3,-21),('m_aiBuild',10,-20),('m_handicap',0,-19),('m_observe',24,-18),('m_logoIndex',6,-17),('m_hero',46,-16),('m_skin',46,-15),('m_mount',46,-14),('m_artifacts',63,-13),('m_workingSetSlotId',25,-12),('m_rewards',64,-11),('m_toonHandle',20,-10),('m_licenses',65,-9),('m_tandemLeaderId',60,-8),('m_commander',46,-7),('m_commanderLevel',6,-6),('m_hasSilencePenalty',13,-5),('m_tandemId',60,-4),('m_commanderMasteryLevel',6,-3),('m_commanderMasteryTalents',66,-2),('m_rewardOverrides',68,-1)]]), #69
('_array',[(0,5),69]), #70
('_struct',[[('m_phase',12,-11),('m_maxUsers',2,-10),('m_maxObservers',2,-9),('m_slots',70,-8),('m_randomSeed',6,-7),('m_hostUserId',60,-6),('m_isSinglePlayer',13,-5),('m_pickedMapTag',10,-4),('m_gameDuration',6,-3),('m_defaultDifficulty',3,-2),('m_defaultAIBuild',10,-1)]]), #71
('_struct',[[('m_userInitialData',50,-3),('m_gameDescription',59,-2),('m_lobbyState',71,-1)]]), #72
('_struct',[[('m_syncLobbyState',72,-1)]]), #73
('_struct',[[('m_name',20,-6)]]), #74
('_blob',[(0,6)]), #75
('_struct',[[('m_name',75,-6)]]), #76
('_struct',[[('m_name',75,-8),('m_type',6,-7),('m_data',20,-6)]]), #77
('_struct',[[('m_type',6,-8),('m_name',75,-7),('m_data',34,-6)]]), #78
('_array',[(0,5),10]), #79
('_struct',[[('m_signature',79,-7),('m_toonHandle',20,-6)]]), #80
('_struct',[[('m_gameFullyDownloaded',13,-19),('m_developmentCheatsEnabled',13,-18),('m_testCheatsEnabled',13,-17),('m_multiplayerCheatsEnabled',13,-16),('m_syncChecksummingEnabled',13,-15),('m_isMapToMapTransition',13,-14),('m_debugPauseEnabled',13,-13),('m_useGalaxyAsserts',13,-12),('m_platformMac',13,-11),('m_cameraFollow',13,-10),('m_baseBuildNum',6,-9),('m_buildNum',6,-8),('m_versionFlags',6,-7),('m_hotkeyProfile',46,-6)]]), #81
('_struct',[[]]), #82
('_int',[(0,16)]), #83
('_struct',[[('x',83,-2),('y',83,-1)]]), #84
('_struct',[[('m_which',12,-7),('m_target',84,-6)]]), #85
('_struct',[[('m_fileName',30,-10),('m_automatic',13,-9),('m_overwrite',13,-8),('m_name',9,-7),('m_description',29,-6)]]), #86
('_struct',[[('m_sequence',6,-6)]]), #87
('_struct',[[('x',47,-2),('y',47,-1)]]), #88
('_struct',[[('m_point',88,-4),('m_time',47,-3),('m_verb',29,-2),('m_arguments',29,-1)]]), #89
('_struct',[[('m_data',89,-6)]]), #90
('_int',[(0,25)]), #91
('_struct',[[('m_abilLink',83,-3),('m_abilCmdIndex',2,-2),('m_abilCmdData',25,-1)]]), #92
('_optional',[92]), #93
('_null',[]), #94
('_int',[(0,20)]), #95
('_struct',[[('x',95,-3),('y',95,-2),('z',47,-1)]]), #96
('_struct',[[('m_targetUnitFlags',83,-7),('m_timer',10,-6),('m_tag',6,-5),('m_snapshotUnitLink',83,-4),('m_snapshotControlPlayerId',60,-3),('m_snapshotUpkeepPlayerId',60,-2),('m_snapshotPoint',96,-1)]]), #97
('_choice',[(0,2),{0:('None',94),1:('TargetPoint',96),2:('TargetUnit',97),3:('Data',6)}]), #98
('_int',[(1,32)]), #99
('_struct',[[('m_cmdFlags',91,-11),('m_abil',93,-10),('m_data',98,-9),('m_sequence',99,-8),('m_otherUnit',43,-7),('m_unitGroup',43,-6)]]), #100
('_int',[(0,9)]), #101
('_bitarray',[(0,9)]), #102
('_array',[(0,9),101]), #103
('_choice',[(0,2),{0:('None',94),1:('Mask',102),2:('OneIndices',103),3:('ZeroIndices',103)}]), #104
('_struct',[[('m_unitLink',83,-4),('m_subgroupPriority',10,-3),('m_intraSubgroupPriority',10,-2),('m_count',101,-1)]]), #105
('_array',[(0,9),105]), #106
('_struct',[[('m_subgroupIndex',101,-4),('m_removeMask',104,-3),('m_addSubgroups',106,-2),('m_addUnitTags',65,-1)]]), #107
('_struct',[[('m_controlGroupId',1,-7),('m_delta',107,-6)]]), #108
('_struct',[[('m_controlGroupIndex',1,-8),('m_controlGroupUpdate',12,-7),('m_mask',104,-6)]]), #109
('_struct',[[('m_count',101,-6),('m_subgroupCount',101,-5),('m_activeSubgroupIndex',101,-4),('m_unitTagsChecksum',6,-3),('m_subgroupIndicesChecksum',6,-2),('m_subgroupsChecksum',6,-1)]]), #110
('_struct',[[('m_controlGroupId',1,-7),('m_selectionSyncData',110,-6)]]), #111
('_array',[(0,3),47]), #112
('_struct',[[('m_recipientId',1,-7),('m_resources',112,-6)]]), #113
('_struct',[[('m_chatMessage',29,-6)]]), #114
('_int',[(-128,8)]), #115
('_struct',[[('x',47,-3),('y',47,-2),('z',47,-1)]]), #116
('_struct',[[('m_beacon',115,-14),('m_ally',115,-13),('m_flags',115,-12),('m_build',115,-11),('m_targetUnitTag',6,-10),('m_targetUnitSnapshotUnitLink',83,-9),('m_targetUnitSnapshotUpkeepPlayerId',115,-8),('m_targetUnitSnapshotControlPlayerId',115,-7),('m_targetPoint',116,-6)]]), #117
('_struct',[[('m_speed',12,-6)]]), #118
('_struct',[[('m_delta',115,-6)]]), #119
('_struct',[[('m_point',88,-14),('m_unit',6,-13),('m_unitLink',83,-12),('m_unitControlPlayerId',60,-11),('m_unitUpkeepPlayerId',60,-10),('m_unitPosition',96,-9),('m_unitIsUnderConstruction',13,-8),('m_pingedMinimap',13,-7),('m_option',47,-6)]]), #120
('_struct',[[('m_verb',29,-7),('m_arguments',29,-6)]]), #121
('_struct',[[('m_alliance',6,-7),('m_control',6,-6)]]), #122
('_struct',[[('m_unitTag',6,-6)]]), #123
('_struct',[[('m_unitTag',6,-7),('m_flags',10,-6)]]), #124
('_struct',[[('m_conversationId',47,-7),('m_replyId',47,-6)]]), #125
('_optional',[20]), #126
('_struct',[[('m_gameUserId',1,-6),('m_observe',24,-5),('m_name',9,-4),('m_toonHandle',126,-3),('m_clanTag',41,-2),('m_clanLogo',42,-1)]]), #127
('_array',[(0,5),127]), #128
('_int',[(0,1)]), #129
('_struct',[[('m_userInfos',128,-7),('m_method',129,-6)]]), #130
('_struct',[[('m_purchaseItemId',47,-6)]]), #131
('_struct',[[('m_difficultyLevel',47,-6)]]), #132
('_choice',[(0,3),{0:('None',94),1:('Checked',13),2:('ValueChanged',6),3:('SelectionChanged',47),4:('TextChanged',30),5:('MouseButton',6)}]), #133
('_struct',[[('m_controlId',47,-8),('m_eventType',47,-7),('m_eventData',133,-6)]]), #134
('_struct',[[('m_soundHash',6,-7),('m_length',6,-6)]]), #135
('_array',[(0,7),6]), #136
('_struct',[[('m_soundHash',136,-2),('m_length',136,-1)]]), #137
('_struct',[[('m_syncInfo',137,-6)]]), #138
('_struct',[[('m_queryId',83,-8),('m_lengthMs',6,-7),('m_finishGameLoop',6,-6)]]), #139
('_struct',[[('m_queryId',83,-7),('m_lengthMs',6,-6)]]), #140
('_struct',[[('m_animWaitQueryId',83,-6)]]), #141
('_struct',[[('m_sound',6,-6)]]), #142
('_struct',[[('m_transmissionId',47,-7),('m_thread',6,-6)]]), #143
('_struct',[[('m_transmissionId',47,-6)]]), #144
('_optional',[84]), #145
('_optional',[83]), #146
('_optional',[115]), #147
('_struct',[[('m_target',145,-11),('m_distance',146,-10),('m_pitch',146,-9),('m_yaw',146,-8),('m_reason',147,-7),('m_follow',13,-6)]]), #148
('_struct',[[('m_skipType',129,-6)]]), #149
('_int',[(0,11)]), #150
('_struct',[[('x',150,-2),('y',150,-1)]]), #151
('_struct',[[('m_button',6,-10),('m_down',13,-9),('m_posUI',151,-8),('m_posWorld',96,-7),('m_flags',115,-6)]]), #152
('_struct',[[('m_posUI',151,-8),('m_posWorld',96,-7),('m_flags',115,-6)]]), #153
('_struct',[[('m_achievementLink',83,-6)]]), #154
('_struct',[[('m_hotkey',6,-7),('m_down',13,-6)]]), #155
('_struct',[[('m_abilLink',83,-8),('m_abilCmdIndex',2,-7),('m_state',115,-6)]]), #156
('_struct',[[('m_soundtrack',6,-6)]]), #157
('_struct',[[('m_planetId',47,-6)]]), #158
('_struct',[[('m_key',115,-7),('m_flags',115,-6)]]), #159
('_struct',[[('m_resources',112,-6)]]), #160
('_struct',[[('m_fulfillRequestId',47,-6)]]), #161
('_struct',[[('m_cancelRequestId',47,-6)]]), #162
('_struct',[[('m_error',47,-7),('m_abil',93,-6)]]), #163
('_struct',[[('m_researchItemId',47,-6)]]), #164
('_struct',[[('m_mercenaryId',47,-6)]]), #165
('_struct',[[('m_battleReportId',47,-7),('m_difficultyLevel',47,-6)]]), #166
('_struct',[[('m_battleReportId',47,-6)]]), #167
('_struct',[[('m_decrementSeconds',47,-6)]]), #168
('_struct',[[('m_portraitId',47,-6)]]), #169
('_struct',[[('m_functionName',20,-6)]]), #170
('_struct',[[('m_result',47,-6)]]), #171
('_struct',[[('m_gameMenuItemIndex',47,-6)]]), #172
('_int',[(-32768,16)]), #173
('_struct',[[('m_wheelSpin',173,-7),('m_flags',115,-6)]]), #174
('_struct',[[('m_purchaseCategoryId',47,-6)]]), #175
('_struct',[[('m_button',83,-6)]]), #176
('_struct',[[('m_cutsceneId',47,-7),('m_bookmarkName',20,-6)]]), #177
('_struct',[[('m_cutsceneId',47,-6)]]), #178
('_struct',[[('m_cutsceneId',47,-8),('m_conversationLine',20,-7),('m_altConversationLine',20,-6)]]), #179
('_struct',[[('m_cutsceneId',47,-7),('m_conversationLine',20,-6)]]), #180
('_struct',[[('m_leaveReason',1,-6)]]), #181
('_struct',[[('m_observe',24,-12),('m_name',9,-11),('m_toonHandle',126,-10),('m_clanTag',41,-9),('m_clanLogo',42,-8),('m_hijack',13,-7),('m_hijackCloneGameUserId',60,-6)]]), #182
('_optional',[99]), #183
('_struct',[[('m_state',24,-7),('m_sequence',183,-6)]]), #184
('_struct',[[('m_target',96,-6)]]), #185
('_struct',[[('m_target',97,-6)]]), #186
('_struct',[[('m_catalog',10,-9),('m_entry',83,-8),('m_field',9,-7),('m_value',9,-6)]]), #187
('_struct',[[('m_index',6,-6)]]), #188
('_struct',[[('m_shown',13,-6)]]), #189
('_struct',[[('m_recipient',12,-3),('m_string',30,-2)]]), #190
('_struct',[[('m_recipient',12,-3),('m_point',88,-2)]]), #191
('_struct',[[('m_progress',47,-2)]]), #192
('_struct',[[('m_status',24,-2)]]), #193
('_struct',[[('m_scoreValueMineralsCurrent',47,0),('m_scoreValueVespeneCurrent',47,1),('m_scoreValueMineralsCollectionRate',47,2),('m_scoreValueVespeneCollectionRate',47,3),('m_scoreValueWorkersActiveCount',47,4),('m_scoreValueMineralsUsedInProgressArmy',47,5),('m_scoreValueMineralsUsedInProgressEconomy',47,6),('m_scoreValueMineralsUsedInProgressTechnology',47,7),('m_scoreValueVespeneUsedInProgressArmy',47,8),('m_scoreValueVespeneUsedInProgressEconomy',47,9),('m_scoreValueVespeneUsedInProgressTechnology',47,10),('m_scoreValueMineralsUsedCurrentArmy',47,11),('m_scoreValueMineralsUsedCurrentEconomy',47,12),('m_scoreValueMineralsUsedCurrentTechnology',47,13),('m_scoreValueVespeneUsedCurrentArmy',47,14),('m_scoreValueVespeneUsedCurrentEconomy',47,15),('m_scoreValueVespeneUsedCurrentTechnology',47,16),('m_scoreValueMineralsLostArmy',47,17),('m_scoreValueMineralsLostEconomy',47,18),('m_scoreValueMineralsLostTechnology',47,19),('m_scoreValueVespeneLostArmy',47,20),('m_scoreValueVespeneLostEconomy',47,21),('m_scoreValueVespeneLostTechnology',47,22),('m_scoreValueMineralsKilledArmy',47,23),('m_scoreValueMineralsKilledEconomy',47,24),('m_scoreValueMineralsKilledTechnology',47,25),('m_scoreValueVespeneKilledArmy',47,26),('m_scoreValueVespeneKilledEconomy',47,27),('m_scoreValueVespeneKilledTechnology',47,28),('m_scoreValueFoodUsed',47,29),('m_scoreValueFoodMade',47,30),('m_scoreValueMineralsUsedActiveForces',47,31),('m_scoreValueVespeneUsedActiveForces',47,32),('m_scoreValueMineralsFriendlyFireArmy',47,33),('m_scoreValueMineralsFriendlyFireEconomy',47,34),('m_scoreValueMineralsFriendlyFireTechnology',47,35),('m_scoreValueVespeneFriendlyFireArmy',47,36),('m_scoreValueVespeneFriendlyFireEconomy',47,37),('m_scoreValueVespeneFriendlyFireTechnology',47,38)]]), #194
('_struct',[[('m_playerId',1,0),('m_stats',194,1)]]), #195
('_optional',[29]), #196
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_unitTypeName',29,2),('m_controlPlayerId',1,3),('m_upkeepPlayerId',1,4),('m_x',10,5),('m_y',10,6),('m_creatorUnitTagIndex',43,7),('m_creatorUnitTagRecycle',43,8),('m_creatorAbilityName',196,9)]]), #197
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_killerPlayerId',60,2),('m_x',10,3),('m_y',10,4),('m_killerUnitTagIndex',43,5),('m_killerUnitTagRecycle',43,6)]]), #198
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_controlPlayerId',1,2),('m_upkeepPlayerId',1,3)]]), #199
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_unitTypeName',29,2)]]), #200
('_struct',[[('m_playerId',1,0),('m_upgradeTypeName',29,1),('m_count',47,2)]]), #201
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1),('m_unitTypeName',29,2),('m_controlPlayerId',1,3),('m_upkeepPlayerId',1,4),('m_x',10,5),('m_y',10,6)]]), #202
('_struct',[[('m_unitTagIndex',6,0),('m_unitTagRecycle',6,1)]]), #203
('_array',[(0,10),47]), #204
('_struct',[[('m_firstUnitIndex',6,0),('m_items',204,1)]]), #205
('_struct',[[('m_playerId',1,0),('m_type',6,1),('m_userId',43,2),('m_slotId',43,3)]]), #206
]
# Map from protocol NNet.Game.*Event eventid to (typeid, name)
game_event_types = {
5: (82, 'NNet.Game.SUserFinishedLoadingSyncEvent'),
7: (81, 'NNet.Game.SUserOptionsEvent'),
9: (74, 'NNet.Game.SBankFileEvent'),
10: (76, 'NNet.Game.SBankSectionEvent'),
11: (77, 'NNet.Game.SBankKeyEvent'),
12: (78, 'NNet.Game.SBankValueEvent'),
13: (80, 'NNet.Game.SBankSignatureEvent'),
14: (85, 'NNet.Game.SCameraSaveEvent'),
21: (86, 'NNet.Game.SSaveGameEvent'),
22: (82, 'NNet.Game.SSaveGameDoneEvent'),
23: (82, 'NNet.Game.SLoadGameDoneEvent'),
25: (87, 'NNet.Game.SCommandManagerResetEvent'),
26: (90, 'NNet.Game.SGameCheatEvent'),
27: (100, 'NNet.Game.SCmdEvent'),
28: (108, 'NNet.Game.SSelectionDeltaEvent'),
29: (109, 'NNet.Game.SControlGroupUpdateEvent'),
30: (111, 'NNet.Game.SSelectionSyncCheckEvent'),
31: (113, 'NNet.Game.SResourceTradeEvent'),
32: (114, 'NNet.Game.STriggerChatMessageEvent'),
33: (117, 'NNet.Game.SAICommunicateEvent'),
34: (118, 'NNet.Game.SSetAbsoluteGameSpeedEvent'),
35: (119, 'NNet.Game.SAddAbsoluteGameSpeedEvent'),
36: (120, 'NNet.Game.STriggerPingEvent'),
37: (121, 'NNet.Game.SBroadcastCheatEvent'),
38: (122, 'NNet.Game.SAllianceEvent'),
39: (123, 'NNet.Game.SUnitClickEvent'),
40: (124, 'NNet.Game.SUnitHighlightEvent'),
41: (125, 'NNet.Game.STriggerReplySelectedEvent'),
43: (130, 'NNet.Game.SHijackReplayGameEvent'),
44: (82, 'NNet.Game.STriggerSkippedEvent'),
45: (135, 'NNet.Game.STriggerSoundLengthQueryEvent'),
46: (142, 'NNet.Game.STriggerSoundOffsetEvent'),
47: (143, 'NNet.Game.STriggerTransmissionOffsetEvent'),
48: (144, 'NNet.Game.STriggerTransmissionCompleteEvent'),
49: (148, 'NNet.Game.SCameraUpdateEvent'),
50: (82, 'NNet.Game.STriggerAbortMissionEvent'),
51: (131, 'NNet.Game.STriggerPurchaseMadeEvent'),
52: (82, 'NNet.Game.STriggerPurchaseExitEvent'),
53: (132, 'NNet.Game.STriggerPlanetMissionLaunchedEvent'),
54: (82, 'NNet.Game.STriggerPlanetPanelCanceledEvent'),
55: (134, 'NNet.Game.STriggerDialogControlEvent'),
56: (138, 'NNet.Game.STriggerSoundLengthSyncEvent'),
57: (149, 'NNet.Game.STriggerConversationSkippedEvent'),
58: (152, 'NNet.Game.STriggerMouseClickedEvent'),
59: (153, 'NNet.Game.STriggerMouseMovedEvent'),
60: (154, 'NNet.Game.SAchievementAwardedEvent'),
61: (155, 'NNet.Game.STriggerHotkeyPressedEvent'),
62: (156, 'NNet.Game.STriggerTargetModeUpdateEvent'),
63: (82, 'NNet.Game.STriggerPlanetPanelReplayEvent'),
64: (157, 'NNet.Game.STriggerSoundtrackDoneEvent'),
65: (158, 'NNet.Game.STriggerPlanetMissionSelectedEvent'),
66: (159, 'NNet.Game.STriggerKeyPressedEvent'),
67: (170, 'NNet.Game.STriggerMovieFunctionEvent'),
68: (82, 'NNet.Game.STriggerPlanetPanelBirthCompleteEvent'),
69: (82, 'NNet.Game.STriggerPlanetPanelDeathCompleteEvent'),
70: (160, 'NNet.Game.SResourceRequestEvent'),
71: (161, 'NNet.Game.SResourceRequestFulfillEvent'),
72: (162, 'NNet.Game.SResourceRequestCancelEvent'),
73: (82, 'NNet.Game.STriggerResearchPanelExitEvent'),
74: (82, 'NNet.Game.STriggerResearchPanelPurchaseEvent'),
75: (164, 'NNet.Game.STriggerResearchPanelSelectionChangedEvent'),
76: (163, 'NNet.Game.STriggerCommandErrorEvent'),
77: (82, 'NNet.Game.STriggerMercenaryPanelExitEvent'),
78: (82, 'NNet.Game.STriggerMercenaryPanelPurchaseEvent'),
79: (165, 'NNet.Game.STriggerMercenaryPanelSelectionChangedEvent'),
80: (82, 'NNet.Game.STriggerVictoryPanelExitEvent'),
81: (82, 'NNet.Game.STriggerBattleReportPanelExitEvent'),
82: (166, 'NNet.Game.STriggerBattleReportPanelPlayMissionEvent'),
83: (167, 'NNet.Game.STriggerBattleReportPanelPlaySceneEvent'),
84: (167, 'NNet.Game.STriggerBattleReportPanelSelectionChangedEvent'),
85: (132, 'NNet.Game.STriggerVictoryPanelPlayMissionAgainEvent'),
86: (82, 'NNet.Game.STriggerMovieStartedEvent'),
87: (82, 'NNet.Game.STriggerMovieFinishedEvent'),
88: (168, 'NNet.Game.SDecrementGameTimeRemainingEvent'),
89: (169, 'NNet.Game.STriggerPortraitLoadedEvent'),
90: (171, 'NNet.Game.STriggerCustomDialogDismissedEvent'),
91: (172, 'NNet.Game.STriggerGameMenuItemSelectedEvent'),
92: (174, 'NNet.Game.STriggerMouseWheelEvent'),
93: (131, 'NNet.Game.STriggerPurchasePanelSelectedPurchaseItemChangedEvent'),
94: (175, 'NNet.Game.STriggerPurchasePanelSelectedPurchaseCategoryChangedEvent'),
95: (176, 'NNet.Game.STriggerButtonPressedEvent'),
96: (82, 'NNet.Game.STriggerGameCreditsFinishedEvent'),
97: (177, 'NNet.Game.STriggerCutsceneBookmarkFiredEvent'),
98: (178, 'NNet.Game.STriggerCutsceneEndSceneFiredEvent'),
99: (179, 'NNet.Game.STriggerCutsceneConversationLineEvent'),
100: (180, 'NNet.Game.STriggerCutsceneConversationLineMissingEvent'),
101: (181, 'NNet.Game.SGameUserLeaveEvent'),
102: (182, 'NNet.Game.SGameUserJoinEvent'),
103: (184, 'NNet.Game.SCommandManagerStateEvent'),
104: (185, 'NNet.Game.SCmdUpdateTargetPointEvent'),
105: (186, 'NNet.Game.SCmdUpdateTargetUnitEvent'),
106: (139, 'NNet.Game.STriggerAnimLengthQueryByNameEvent'),
107: (140, 'NNet.Game.STriggerAnimLengthQueryByPropsEvent'),
108: (141, 'NNet.Game.STriggerAnimOffsetEvent'),
109: (187, 'NNet.Game.SCatalogModifyEvent'),
110: (188, 'NNet.Game.SHeroTalentTreeSelectedEvent'),
111: (82, 'NNet.Game.STriggerProfilerLoggingFinishedEvent'),
112: (189, 'NNet.Game.SHeroTalentTreeSelectionPanelToggledEvent'),
}
# The typeid of the NNet.Game.EEventId enum.
game_eventid_typeid = 0
# Map from protocol NNet.Game.*Message eventid to (typeid, name)
message_event_types = {
0: (190, 'NNet.Game.SChatMessage'),
1: (191, 'NNet.Game.SPingMessage'),
2: (192, 'NNet.Game.SLoadingProgressMessage'),
3: (82, 'NNet.Game.SServerPingMessage'),
4: (193, 'NNet.Game.SReconnectNotifyMessage'),
}
# The typeid of the NNet.Game.EMessageId enum.
message_eventid_typeid = 1
# Map from protocol NNet.Replay.Tracker.*Event eventid to (typeid, name)
tracker_event_types = {
0: (195, 'NNet.Replay.Tracker.SPlayerStatsEvent'),
1: (197, 'NNet.Replay.Tracker.SUnitBornEvent'),
2: (198, 'NNet.Replay.Tracker.SUnitDiedEvent'),
3: (199, 'NNet.Replay.Tracker.SUnitOwnerChangeEvent'),
4: (200, 'NNet.Replay.Tracker.SUnitTypeChangeEvent'),
5: (201, 'NNet.Replay.Tracker.SUpgradeEvent'),
6: (202, 'NNet.Replay.Tracker.SUnitInitEvent'),
7: (203, 'NNet.Replay.Tracker.SUnitDoneEvent'),
8: (205, 'NNet.Replay.Tracker.SUnitPositionsEvent'),
9: (206, 'NNet.Replay.Tracker.SPlayerSetupEvent'),
}
# NOTE: older builds may not support some types and the generated methods
# may fail to function properly, if specific backwards compatibility is
# needed these values should be tested against for None
# The typeid of the NNet.Replay.Tracker.EEventId enum.
tracker_eventid_typeid = 2
# The typeid of NNet.SVarUint32 (the type used to encode gameloop deltas).
svaruint32_typeid = 7
# The typeid of NNet.Replay.SGameUserId (the type used to encode player ids).
replay_userid_typeid = 8
# The typeid of NNet.Replay.SHeader (the type used to store replay game version and length).
replay_header_typeid = 18
# The typeid of NNet.Game.SDetails (the type used to store overall replay details).
game_details_typeid = 40
# The typeid of NNet.Replay.SInitData (the type used to store the inital lobby).
replay_initdata_typeid = 73
def _varuint32_value(value):
# Returns the numeric value from a SVarUint32 instance.
for v in value.values():
return v
return 0
def _decode_event_stream(decoder, eventid_typeid, event_types, decode_user_id):
# Decodes events prefixed with a gameloop and possibly userid
gameloop = 0
while not decoder.done():
start_bits = decoder.used_bits()
# decode the gameloop delta before each event
delta = _varuint32_value(decoder.instance(svaruint32_typeid))
gameloop += delta
# decode the userid before each event
if decode_user_id:
userid = decoder.instance(replay_userid_typeid)
# decode the event id
eventid = decoder.instance(eventid_typeid)
typeid, typename = event_types.get(eventid, (None, None))
if typeid is None:
raise CorruptedError('eventid({}) at {}'.format(eventid, decoder))
# decode the event struct instance
event = decoder.instance(typeid)
event['_event'] = typename
event['_eventid'] = eventid
# insert gameloop and userid
event['_gameloop'] = gameloop
if decode_user_id:
event['_userid'] = userid
# the next event is byte aligned
decoder.byte_align()
# insert bits used in stream
event['_bits'] = decoder.used_bits() - start_bits
yield event
def decode_replay_game_events(contents):
"""Decodes and yields each game event from the contents byte string."""
decoder = BitPackedDecoder(contents, typeinfos)
for event in _decode_event_stream(decoder,
game_eventid_typeid,
game_event_types,
decode_user_id=True):
yield event
def decode_replay_message_events(contents):
"""Decodes and yields each message event from the contents byte string."""
decoder = BitPackedDecoder(contents, typeinfos)
for event in _decode_event_stream(decoder,
message_eventid_typeid,
message_event_types,
decode_user_id=True):
yield event
def decode_replay_tracker_events(contents):
"""Decodes and yields each tracker event from the contents byte string."""
decoder = VersionedDecoder(contents, typeinfos)
for event in _decode_event_stream(decoder,
tracker_eventid_typeid,
tracker_event_types,
decode_user_id=False):
yield event
def decode_replay_header(contents):
"""Decodes and return the replay header from the contents byte string."""
decoder = VersionedDecoder(contents, typeinfos)
return decoder.instance(replay_header_typeid)
def decode_replay_details(contents):
"""Decodes and returns the game details from the contents byte string."""
decoder = VersionedDecoder(contents, typeinfos)
return decoder.instance(game_details_typeid)
def decode_replay_initdata(contents):
"""Decodes and return the replay init data from the contents byte string."""
decoder = BitPackedDecoder(contents, typeinfos)
return decoder.instance(replay_initdata_typeid)
def decode_replay_attributes_events(contents):
"""Decodes and yields each attribute from the contents byte string."""
buffer = BitPackedBuffer(contents, 'little')
attributes = {}
if not buffer.done():
attributes['source'] = buffer.read_bits(8)
attributes['mapNamespace'] = buffer.read_bits(32)
count = buffer.read_bits(32)
attributes['scopes'] = {}
while not buffer.done():
value = {}
value['namespace'] = buffer.read_bits(32)
value['attrid'] = attrid = buffer.read_bits(32)
scope = buffer.read_bits(8)
value['value'] = buffer.read_aligned_bytes(4)[::-1].strip(b'\x00')
if not scope in attributes['scopes']:
attributes['scopes'][scope] = {}
if not attrid in attributes['scopes'][scope]:
attributes['scopes'][scope][attrid] = []
attributes['scopes'][scope][attrid].append(value)
return attributes
def unit_tag(unitTagIndex, unitTagRecycle):
return (unitTagIndex << 18) + unitTagRecycle
def unit_tag_index(unitTag):
return (unitTag >> 18) & 0x00003fff
def unit_tag_recycle(unitTag):
return (unitTag) & 0x0003ffff
| mit |
IPMITMO/statan | coala/coalib/results/ResultFilter.py | 35 | 9630 | import copy
from difflib import SequenceMatcher
from coalib.results.Diff import ConflictError, Diff
from coalib.results.SourceRange import SourceRange
def filter_results(original_file_dict,
modified_file_dict,
original_results,
modified_results):
"""
Filters results for such ones that are unique across file changes
:param original_file_dict: Dict of lists of file contents before changes
:param modified_file_dict: Dict of lists of file contents after changes
:param original_results: List of results of the old files
:param modified_results: List of results of the new files
:return: List of results from new files that are unique
from all those that existed in the old changes
"""
renamed_files = ensure_files_present(original_file_dict,
modified_file_dict)
# diffs_dict[file] is a diff between the original and modified file
diffs_dict = {}
for file in original_file_dict:
diffs_dict[file] = Diff.from_string_arrays(
original_file_dict[file],
modified_file_dict[renamed_files.get(file, file)])
orig_result_diff_dict_dict = remove_result_ranges_diffs(original_results,
original_file_dict)
mod_result_diff_dict_dict = remove_result_ranges_diffs(modified_results,
modified_file_dict)
unique_results = []
for m_r in reversed(modified_results):
unique = True
for o_r in original_results:
if basics_match(o_r, m_r):
if source_ranges_match(original_file_dict,
diffs_dict,
orig_result_diff_dict_dict[o_r],
mod_result_diff_dict_dict[m_r],
renamed_files):
# at least one original result matches completely
unique = False
break
if unique:
unique_results.append(m_r)
return unique_results
def basics_match(original_result,
modified_result):
"""
Checks whether the following properties of two results match:
* origin
* message
* severity
* debug_msg
:param original_result: A result of the old files
:param modified_result: A result of the new files
:return: Boolean value whether or not the properties match
"""
return all(getattr(original_result, member) ==
getattr(modified_result, member)
for member in ['origin', 'message', 'severity', 'debug_msg'])
def source_ranges_match(original_file_dict,
diff_dict,
original_result_diff_dict,
modified_result_diff_dict,
renamed_files):
"""
Checks whether the SourceRanges of two results match
:param original_file_dict: Dict of lists of file contents before changes
:param diff_dict: Dict of diffs describing the changes per file
:param original_result_diff_dict: diff for each file for this result
:param modified_result_diff_dict: guess
:param renamed_files: A dictionary containing file renamings across runs
:return: Boolean value whether the SourceRanges match
"""
for file_name in original_file_dict:
try: # fails if the affected range of the result get's modified
original_total_diff = (diff_dict[file_name] +
original_result_diff_dict[file_name])
except ConflictError:
return False
# original file with file_diff and original_diff applied
original_total_file = original_total_diff.modified
# modified file with modified_diff applied
modified_total_file = modified_result_diff_dict[
renamed_files.get(file_name, file_name)].modified
if original_total_file != modified_total_file:
return False
return True
def remove_range(file_contents, source_range):
"""
removes the chars covered by the sourceRange from the file
:param file_contents: list of lines in the file
:param source_range: Source Range
:return: list of file contents without specified chars removed
"""
if not file_contents:
return []
newfile = list(file_contents)
# attention: line numbers in the SourceRange are human-readable,
# list indices start with 0
source_range = source_range.expand(file_contents)
if source_range.start.line == source_range.end.line:
# if it's all in one line, replace the line by it's beginning and end
newfile[source_range.start.line - 1] = (
newfile[source_range.start.line - 1][:source_range.start.column-1]
+ newfile[source_range.start.line - 1][source_range.end.column:])
if newfile[source_range.start.line - 1] == '':
del newfile[source_range.start.line - 1]
else:
# cut away after start
newfile[source_range.start.line - 1] = (
newfile[source_range.start.line - 1][:source_range.start.column-1])
# cut away before end
newfile[source_range.end.line - 1] = (
newfile[source_range.end.line - 1][source_range.end.column:])
# start: index = first line number ==> line after first line
# end: index = last line -2 ==> line before last line
for i in reversed(range(
source_range.start.line, source_range.end.line - 1)):
del newfile[i]
# remove leftover empty lines
# the first line here is actually the former `source_range.end.line -1`
if newfile[source_range.start.line] == '':
del newfile[source_range.start.line]
if newfile[source_range.start.line - 1] == '':
del newfile[source_range.start.line - 1]
return newfile
def remove_result_ranges_diffs(result_list, file_dict):
"""
Calculates the diffs to all files in file_dict that describe the removal of
each respective result's affected code.
:param result_list: list of results
:param file_dict: dict of file contents
:return: returnvalue[result][file] is a diff of the changes the
removal of this result's affected code would cause for
the file.
"""
result_diff_dict_dict = {}
for original_result in result_list:
mod_file_dict = copy.deepcopy(file_dict)
# gather all source ranges from this result
source_ranges = []
# SourceRanges must be sorted backwards and overlaps must be eliminated
# this way, the deletion based on sourceRanges is not offset by
# previous deletions in the same line that invalidate the indices.
previous = None
for source_range in sorted(original_result.affected_code, reverse=True):
# previous exists and overlaps
if previous is not None and source_range.overlaps(previous):
combined_sr = SourceRange.join(previous, source_range)
previous = combined_sr
elif previous is None:
previous = source_range
# previous exists but it doesn't overlap
else:
source_ranges.append(previous)
previous = source_range
# don't forget last entry if there were any:
if previous:
source_ranges.append(previous)
for source_range in source_ranges:
file_name = source_range.file
new_file = remove_range(mod_file_dict[file_name],
source_range)
mod_file_dict[file_name] = new_file
diff_dict = {}
for file_name in file_dict:
diff_dict[file_name] = Diff.from_string_arrays(
file_dict[file_name],
mod_file_dict[file_name])
result_diff_dict_dict[original_result] = diff_dict
return result_diff_dict_dict
def ensure_files_present(original_file_dict, modified_file_dict):
"""
Ensures that all files are available as keys in both dicts.
:param original_file_dict: Dict of lists of file contents before changes
:param modified_file_dict: Dict of lists of file contents after changes
:return: Return a dictionary of renamed files.
"""
original_files = set(original_file_dict.keys())
modified_files = set(modified_file_dict.keys())
affected_files = original_files | modified_files
original_unique_files = affected_files - modified_files
renamed_files_dict = {}
for file in filter(
lambda filter_file: filter_file not in original_files,
affected_files):
for comparable_file in original_unique_files:
s = SequenceMatcher(
None,
''.join(modified_file_dict[file]),
''.join(original_file_dict[comparable_file]))
if s.real_quick_ratio() >= 0.5 and s.ratio() > 0.5:
renamed_files_dict[comparable_file] = file
break
else:
original_file_dict[file] = []
for file in filter(
lambda filter_file: filter_file not in modified_files,
affected_files):
modified_file_dict[file] = []
return renamed_files_dict
| mit |
flashycud/timestack | django/core/management/validation.py | 103 | 19729 | import sys
from django.contrib.contenttypes.generic import GenericForeignKey, GenericRelation
from django.core.management.color import color_style
from django.utils.itercompat import is_iterable
try:
any
except NameError:
from django.utils.itercompat import any
class ModelErrorCollection:
def __init__(self, outfile=sys.stdout):
self.errors = []
self.outfile = outfile
self.style = color_style()
def add(self, context, error):
self.errors.append((context, error))
self.outfile.write(self.style.ERROR("%s: %s\n" % (context, error)))
def get_validation_errors(outfile, app=None):
"""
Validates all models that are part of the specified app. If no app name is provided,
validates all models of all installed apps. Writes errors, if any, to outfile.
Returns number of errors.
"""
from django.conf import settings
from django.db import models, connection
from django.db.models.loading import get_app_errors
from django.db.models.fields.related import RelatedObject
from django.db.models.deletion import SET_NULL, SET_DEFAULT
e = ModelErrorCollection(outfile)
for (app_name, error) in get_app_errors().items():
e.add(app_name, error)
for cls in models.get_models(app):
opts = cls._meta
# Do field-specific validation.
for f in opts.local_fields:
if f.name == 'id' and not f.primary_key and opts.pk.name == 'id':
e.add(opts, '"%s": You can\'t use "id" as a field name, because each model automatically gets an "id" field if none of the fields have primary_key=True. You need to either remove/rename your "id" field or add primary_key=True to a field.' % f.name)
if f.name.endswith('_'):
e.add(opts, '"%s": Field names cannot end with underscores, because this would lead to ambiguous queryset filters.' % f.name)
if isinstance(f, models.CharField):
try:
max_length = int(f.max_length)
if max_length <= 0:
e.add(opts, '"%s": CharFields require a "max_length" attribute that is a positive integer.' % f.name)
except (ValueError, TypeError):
e.add(opts, '"%s": CharFields require a "max_length" attribute that is a positive integer.' % f.name)
if isinstance(f, models.DecimalField):
decimalp_ok, mdigits_ok = False, False
decimalp_msg ='"%s": DecimalFields require a "decimal_places" attribute that is a non-negative integer.'
try:
decimal_places = int(f.decimal_places)
if decimal_places < 0:
e.add(opts, decimalp_msg % f.name)
else:
decimalp_ok = True
except (ValueError, TypeError):
e.add(opts, decimalp_msg % f.name)
mdigits_msg = '"%s": DecimalFields require a "max_digits" attribute that is a positive integer.'
try:
max_digits = int(f.max_digits)
if max_digits <= 0:
e.add(opts, mdigits_msg % f.name)
else:
mdigits_ok = True
except (ValueError, TypeError):
e.add(opts, mdigits_msg % f.name)
invalid_values_msg = '"%s": DecimalFields require a "max_digits" attribute value that is greater than the value of the "decimal_places" attribute.'
if decimalp_ok and mdigits_ok:
if decimal_places >= max_digits:
e.add(opts, invalid_values_msg % f.name)
if isinstance(f, models.FileField) and not f.upload_to:
e.add(opts, '"%s": FileFields require an "upload_to" attribute.' % f.name)
if isinstance(f, models.ImageField):
# Try to import PIL in either of the two ways it can end up installed.
try:
from PIL import Image
except ImportError:
try:
import Image
except ImportError:
e.add(opts, '"%s": To use ImageFields, you need to install the Python Imaging Library. Get it at http://www.pythonware.com/products/pil/ .' % f.name)
if isinstance(f, models.BooleanField) and getattr(f, 'null', False):
e.add(opts, '"%s": BooleanFields do not accept null values. Use a NullBooleanField instead.' % f.name)
if f.choices:
if isinstance(f.choices, basestring) or not is_iterable(f.choices):
e.add(opts, '"%s": "choices" should be iterable (e.g., a tuple or list).' % f.name)
else:
for c in f.choices:
if not isinstance(c, (list, tuple)) or len(c) != 2:
e.add(opts, '"%s": "choices" should be a sequence of two-tuples.' % f.name)
if f.db_index not in (None, True, False):
e.add(opts, '"%s": "db_index" should be either None, True or False.' % f.name)
# Perform any backend-specific field validation.
connection.validation.validate_field(e, opts, f)
# Check if the on_delete behavior is sane
if f.rel and hasattr(f.rel, 'on_delete'):
if f.rel.on_delete == SET_NULL and not f.null:
e.add(opts, "'%s' specifies on_delete=SET_NULL, but cannot be null." % f.name)
elif f.rel.on_delete == SET_DEFAULT and not f.has_default():
e.add(opts, "'%s' specifies on_delete=SET_DEFAULT, but has no default value." % f.name)
# Check to see if the related field will clash with any existing
# fields, m2m fields, m2m related objects or related objects
if f.rel:
if f.rel.to not in models.get_models():
e.add(opts, "'%s' has a relation with model %s, which has either not been installed or is abstract." % (f.name, f.rel.to))
# it is a string and we could not find the model it refers to
# so skip the next section
if isinstance(f.rel.to, (str, unicode)):
continue
# Make sure the related field specified by a ForeignKey is unique
if not f.rel.to._meta.get_field(f.rel.field_name).unique:
e.add(opts, "Field '%s' under model '%s' must have a unique=True constraint." % (f.rel.field_name, f.rel.to.__name__))
rel_opts = f.rel.to._meta
rel_name = RelatedObject(f.rel.to, cls, f).get_accessor_name()
rel_query_name = f.related_query_name()
if not f.rel.is_hidden():
for r in rel_opts.fields:
if r.name == rel_name:
e.add(opts, "Accessor for field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
if r.name == rel_query_name:
e.add(opts, "Reverse query name for field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
for r in rel_opts.local_many_to_many:
if r.name == rel_name:
e.add(opts, "Accessor for field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
if r.name == rel_query_name:
e.add(opts, "Reverse query name for field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
for r in rel_opts.get_all_related_many_to_many_objects():
if r.get_accessor_name() == rel_name:
e.add(opts, "Accessor for field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
if r.get_accessor_name() == rel_query_name:
e.add(opts, "Reverse query name for field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
for r in rel_opts.get_all_related_objects():
if r.field is not f:
if r.get_accessor_name() == rel_name:
e.add(opts, "Accessor for field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
if r.get_accessor_name() == rel_query_name:
e.add(opts, "Reverse query name for field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
seen_intermediary_signatures = []
for i, f in enumerate(opts.local_many_to_many):
# Check to see if the related m2m field will clash with any
# existing fields, m2m fields, m2m related objects or related
# objects
if f.rel.to not in models.get_models():
e.add(opts, "'%s' has an m2m relation with model %s, which has either not been installed or is abstract." % (f.name, f.rel.to))
# it is a string and we could not find the model it refers to
# so skip the next section
if isinstance(f.rel.to, (str, unicode)):
continue
# Check that the field is not set to unique. ManyToManyFields do not support unique.
if f.unique:
e.add(opts, "ManyToManyFields cannot be unique. Remove the unique argument on '%s'." % f.name)
if f.rel.through is not None and not isinstance(f.rel.through, basestring):
from_model, to_model = cls, f.rel.to
if from_model == to_model and f.rel.symmetrical and not f.rel.through._meta.auto_created:
e.add(opts, "Many-to-many fields with intermediate tables cannot be symmetrical.")
seen_from, seen_to, seen_self = False, False, 0
for inter_field in f.rel.through._meta.fields:
rel_to = getattr(inter_field.rel, 'to', None)
if from_model == to_model: # relation to self
if rel_to == from_model:
seen_self += 1
if seen_self > 2:
e.add(opts, "Intermediary model %s has more than "
"two foreign keys to %s, which is ambiguous "
"and is not permitted." % (
f.rel.through._meta.object_name,
from_model._meta.object_name
)
)
else:
if rel_to == from_model:
if seen_from:
e.add(opts, "Intermediary model %s has more "
"than one foreign key to %s, which is "
"ambiguous and is not permitted." % (
f.rel.through._meta.object_name,
from_model._meta.object_name
)
)
else:
seen_from = True
elif rel_to == to_model:
if seen_to:
e.add(opts, "Intermediary model %s has more "
"than one foreign key to %s, which is "
"ambiguous and is not permitted." % (
f.rel.through._meta.object_name,
rel_to._meta.object_name
)
)
else:
seen_to = True
if f.rel.through not in models.get_models(include_auto_created=True):
e.add(opts, "'%s' specifies an m2m relation through model "
"%s, which has not been installed." % (f.name, f.rel.through)
)
signature = (f.rel.to, cls, f.rel.through)
if signature in seen_intermediary_signatures:
e.add(opts, "The model %s has two manually-defined m2m "
"relations through the model %s, which is not "
"permitted. Please consider using an extra field on "
"your intermediary model instead." % (
cls._meta.object_name,
f.rel.through._meta.object_name
)
)
else:
seen_intermediary_signatures.append(signature)
if not f.rel.through._meta.auto_created:
seen_related_fk, seen_this_fk = False, False
for field in f.rel.through._meta.fields:
if field.rel:
if not seen_related_fk and field.rel.to == f.rel.to:
seen_related_fk = True
elif field.rel.to == cls:
seen_this_fk = True
if not seen_related_fk or not seen_this_fk:
e.add(opts, "'%s' is a manually-defined m2m relation "
"through model %s, which does not have foreign keys "
"to %s and %s" % (f.name, f.rel.through._meta.object_name,
f.rel.to._meta.object_name, cls._meta.object_name)
)
elif isinstance(f.rel.through, basestring):
e.add(opts, "'%s' specifies an m2m relation through model %s, "
"which has not been installed" % (f.name, f.rel.through)
)
elif isinstance(f, GenericRelation):
if not any([isinstance(vfield, GenericForeignKey) for vfield in f.rel.to._meta.virtual_fields]):
e.add(opts, "Model '%s' must have a GenericForeignKey in "
"order to create a GenericRelation that points to it."
% f.rel.to.__name__
)
rel_opts = f.rel.to._meta
rel_name = RelatedObject(f.rel.to, cls, f).get_accessor_name()
rel_query_name = f.related_query_name()
# If rel_name is none, there is no reverse accessor (this only
# occurs for symmetrical m2m relations to self). If this is the
# case, there are no clashes to check for this field, as there are
# no reverse descriptors for this field.
if rel_name is not None:
for r in rel_opts.fields:
if r.name == rel_name:
e.add(opts, "Accessor for m2m field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
if r.name == rel_query_name:
e.add(opts, "Reverse query name for m2m field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
for r in rel_opts.local_many_to_many:
if r.name == rel_name:
e.add(opts, "Accessor for m2m field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
if r.name == rel_query_name:
e.add(opts, "Reverse query name for m2m field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
for r in rel_opts.get_all_related_many_to_many_objects():
if r.field is not f:
if r.get_accessor_name() == rel_name:
e.add(opts, "Accessor for m2m field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
if r.get_accessor_name() == rel_query_name:
e.add(opts, "Reverse query name for m2m field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
for r in rel_opts.get_all_related_objects():
if r.get_accessor_name() == rel_name:
e.add(opts, "Accessor for m2m field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
if r.get_accessor_name() == rel_query_name:
e.add(opts, "Reverse query name for m2m field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
# Check ordering attribute.
if opts.ordering:
for field_name in opts.ordering:
if field_name == '?': continue
if field_name.startswith('-'):
field_name = field_name[1:]
if opts.order_with_respect_to and field_name == '_order':
continue
# Skip ordering in the format field1__field2 (FIXME: checking
# this format would be nice, but it's a little fiddly).
if '__' in field_name:
continue
try:
opts.get_field(field_name, many_to_many=False)
except models.FieldDoesNotExist:
e.add(opts, '"ordering" refers to "%s", a field that doesn\'t exist.' % field_name)
# Check unique_together.
for ut in opts.unique_together:
for field_name in ut:
try:
f = opts.get_field(field_name, many_to_many=True)
except models.FieldDoesNotExist:
e.add(opts, '"unique_together" refers to %s, a field that doesn\'t exist. Check your syntax.' % field_name)
else:
if isinstance(f.rel, models.ManyToManyRel):
e.add(opts, '"unique_together" refers to %s. ManyToManyFields are not supported in unique_together.' % f.name)
if f not in opts.local_fields:
e.add(opts, '"unique_together" refers to %s. This is not in the same model as the unique_together statement.' % f.name)
return len(e.errors)
| mit |
lpsinger/astropy | astropy/io/fits/tests/test_fitsheader.py | 8 | 5967 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from . import FitsTestCase
from astropy.io.fits.scripts import fitsheader
from astropy import __version__ as version
class TestFITSheader_script(FitsTestCase):
def test_help(self):
with pytest.raises(SystemExit) as e:
fitsheader.main(['-h'])
assert e.value.code == 0
def test_version(self, capsys):
with pytest.raises(SystemExit) as e:
fitsheader.main(['--version'])
out = capsys.readouterr()[0]
assert out == f'fitsheader {version}'
assert e.value.code == 0
def test_file_exists(self, capsys):
fitsheader.main([self.data('arange.fits')])
out, err = capsys.readouterr()
assert out.splitlines()[1].startswith(
'SIMPLE = T / conforms to FITS standard')
assert err == ''
def test_by_keyword(self, capsys):
fitsheader.main(['-k', 'NAXIS', self.data('arange.fits')])
out, err = capsys.readouterr()
assert out.splitlines()[1].startswith(
'NAXIS = 3 / number of array dimensions')
fitsheader.main(['-k', 'NAXIS*', self.data('arange.fits')])
out, err = capsys.readouterr()
out = out.splitlines()
assert len(out) == 5
assert out[1].startswith('NAXIS')
assert out[2].startswith('NAXIS1')
assert out[3].startswith('NAXIS2')
assert out[4].startswith('NAXIS3')
fitsheader.main(['-k', 'RANDOMKEY', self.data('arange.fits')])
out, err = capsys.readouterr()
assert err.startswith('WARNING') and 'RANDOMKEY' in err
assert not err.startswith('ERROR')
def test_by_extension(self, capsys):
fitsheader.main(['-e', '1', self.data('test0.fits')])
out, err = capsys.readouterr()
assert len(out.splitlines()) == 62
fitsheader.main(['-e', '3', '-k', 'BACKGRND', self.data('test0.fits')])
out, err = capsys.readouterr()
assert out.splitlines()[1].startswith('BACKGRND= 312.')
fitsheader.main(['-e', '0', '-k', 'BACKGRND', self.data('test0.fits')])
out, err = capsys.readouterr()
assert err.startswith('WARNING')
fitsheader.main(['-e', '3', '-k', 'FOO', self.data('test0.fits')])
out, err = capsys.readouterr()
assert err.startswith('WARNING')
def test_table(self, capsys):
fitsheader.main(['-t', '-k', 'BACKGRND', self.data('test0.fits')])
out, err = capsys.readouterr()
out = out.splitlines()
assert len(out) == 5
assert out[1].endswith('| 1 | BACKGRND | 316.0 |')
assert out[2].endswith('| 2 | BACKGRND | 351.0 |')
assert out[3].endswith('| 3 | BACKGRND | 312.0 |')
assert out[4].endswith('| 4 | BACKGRND | 323.0 |')
fitsheader.main(['-t', '-e', '0', '-k', 'NAXIS',
self.data('arange.fits'),
self.data('ascii.fits'),
self.data('blank.fits')])
out, err = capsys.readouterr()
out = out.splitlines()
assert len(out) == 4
assert out[1].endswith('| 0 | NAXIS | 3 |')
assert out[2].endswith('| 0 | NAXIS | 0 |')
assert out[3].endswith('| 0 | NAXIS | 2 |')
def test_fitsort(self, capsys):
fitsheader.main(['-e', '0', '-f', '-k', 'EXPSTART', '-k', 'EXPTIME',
self.data('test0.fits'), self.data('test1.fits')])
out, err = capsys.readouterr()
out = out.splitlines()
assert len(out) == 4
assert out[2].endswith('test0.fits 49491.65366175 0.23')
assert out[3].endswith('test1.fits 49492.65366175 0.22')
fitsheader.main(['-e', '0', '-f', '-k', 'EXPSTART', '-k', 'EXPTIME',
self.data('test0.fits')])
out, err = capsys.readouterr()
out = out.splitlines()
assert len(out) == 3
assert out[2].endswith('test0.fits 49491.65366175 0.23')
fitsheader.main(['-f', '-k', 'NAXIS',
self.data('tdim.fits'), self.data('test1.fits')])
out, err = capsys.readouterr()
out = out.splitlines()
assert len(out) == 4
assert out[0].endswith('0:NAXIS 1:NAXIS 2:NAXIS 3:NAXIS 4:NAXIS')
assert out[2].endswith('tdim.fits 0 2 -- -- --')
assert out[3].endswith('test1.fits 0 2 2 2 2')
# check that files without required keyword are present
fitsheader.main(['-f', '-k', 'DATE-OBS',
self.data('table.fits'), self.data('test0.fits')])
out, err = capsys.readouterr()
out = out.splitlines()
assert len(out) == 4
assert out[2].endswith('table.fits --')
assert out[3].endswith('test0.fits 19/05/94')
# check that COMMENT and HISTORY are excluded
fitsheader.main(['-e', '0', '-f', self.data('tb.fits')])
out, err = capsys.readouterr()
out = out.splitlines()
assert len(out) == 3
assert out[2].endswith('tb.fits True 16 0 True '
'STScI-STSDAS/TABLES tb.fits 1')
def test_dotkeyword(self, capsys):
fitsheader.main(['-e', '0', '-k', 'ESO DET ID',
self.data('fixed-1890.fits')])
out, err = capsys.readouterr()
out = out.splitlines()
assert len(out) == 2
assert out[1].strip().endswith("HIERARCH ESO DET ID = 'DV13' / Detector system Id")
fitsheader.main(['-e', '0', '-k', 'ESO.DET.ID',
self.data('fixed-1890.fits')])
out, err = capsys.readouterr()
out = out.splitlines()
assert len(out) == 2
assert out[1].strip().endswith("HIERARCH ESO DET ID = 'DV13' / Detector system Id")
| bsd-3-clause |
goldsborough/.emacs | .emacs.d/.python-environments/default/lib/python3.5/site-packages/setuptools/command/install.py | 529 | 4683 | from distutils.errors import DistutilsArgError
import inspect
import glob
import warnings
import platform
import distutils.command.install as orig
import setuptools
# Prior to numpy 1.9, NumPy relies on the '_install' name, so provide it for
# now. See https://github.com/pypa/setuptools/issues/199/
_install = orig.install
class install(orig.install):
"""Use easy_install to install the package, w/dependencies"""
user_options = orig.install.user_options + [
('old-and-unmanageable', None, "Try not to use this!"),
('single-version-externally-managed', None,
"used by system package builders to create 'flat' eggs"),
]
boolean_options = orig.install.boolean_options + [
'old-and-unmanageable', 'single-version-externally-managed',
]
new_commands = [
('install_egg_info', lambda self: True),
('install_scripts', lambda self: True),
]
_nc = dict(new_commands)
def initialize_options(self):
orig.install.initialize_options(self)
self.old_and_unmanageable = None
self.single_version_externally_managed = None
def finalize_options(self):
orig.install.finalize_options(self)
if self.root:
self.single_version_externally_managed = True
elif self.single_version_externally_managed:
if not self.root and not self.record:
raise DistutilsArgError(
"You must specify --record or --root when building system"
" packages"
)
def handle_extra_path(self):
if self.root or self.single_version_externally_managed:
# explicit backward-compatibility mode, allow extra_path to work
return orig.install.handle_extra_path(self)
# Ignore extra_path when installing an egg (or being run by another
# command without --root or --single-version-externally-managed
self.path_file = None
self.extra_dirs = ''
def run(self):
# Explicit request for old-style install? Just do it
if self.old_and_unmanageable or self.single_version_externally_managed:
return orig.install.run(self)
if not self._called_from_setup(inspect.currentframe()):
# Run in backward-compatibility mode to support bdist_* commands.
orig.install.run(self)
else:
self.do_egg_install()
@staticmethod
def _called_from_setup(run_frame):
"""
Attempt to detect whether run() was called from setup() or by another
command. If called by setup(), the parent caller will be the
'run_command' method in 'distutils.dist', and *its* caller will be
the 'run_commands' method. If called any other way, the
immediate caller *might* be 'run_command', but it won't have been
called by 'run_commands'. Return True in that case or if a call stack
is unavailable. Return False otherwise.
"""
if run_frame is None:
msg = "Call stack not available. bdist_* commands may fail."
warnings.warn(msg)
if platform.python_implementation() == 'IronPython':
msg = "For best results, pass -X:Frames to enable call stack."
warnings.warn(msg)
return True
res = inspect.getouterframes(run_frame)[2]
caller, = res[:1]
info = inspect.getframeinfo(caller)
caller_module = caller.f_globals.get('__name__', '')
return (
caller_module == 'distutils.dist'
and info.function == 'run_commands'
)
def do_egg_install(self):
easy_install = self.distribution.get_command_class('easy_install')
cmd = easy_install(
self.distribution, args="x", root=self.root, record=self.record,
)
cmd.ensure_finalized() # finalize before bdist_egg munges install cmd
cmd.always_copy_from = '.' # make sure local-dir eggs get installed
# pick up setup-dir .egg files only: no .egg-info
cmd.package_index.scan(glob.glob('*.egg'))
self.run_command('bdist_egg')
args = [self.distribution.get_command_obj('bdist_egg').egg_output]
if setuptools.bootstrap_install_from:
# Bootstrap self-installation of setuptools
args.insert(0, setuptools.bootstrap_install_from)
cmd.args = args
cmd.run()
setuptools.bootstrap_install_from = None
# XXX Python 3.1 doesn't see _nc if this is inside the class
install.sub_commands = (
[cmd for cmd in orig.install.sub_commands if cmd[0] not in install._nc] +
install.new_commands
)
| mit |
rosshamish/classtime | tests/classtime/brain/remote_db/test_ldapdb.py | 1 | 2787 |
from __future__ import absolute_import
import os
import json
import classtime.brain.remote_db as remote_db
from classtime.brain.institutions import CONFIG_FOLDER_PATH as institution_config_path
class TestRemoteLDAPDatabase(object): # pylint: disable=R0904
@classmethod
def setup_class(cls):
cls.institution_configs = list()
cls.institutions = list()
config_filenames = [os.path.join(institution_config_path, filename)
for filename in os.listdir(institution_config_path)]
assert len(config_filenames) > len(['institutions.json', '__init__.py'])
for config_filename in config_filenames:
if 'json' not in config_filename:
continue
if 'institutions.json' in config_filename:
continue
with open(config_filename, 'r') as config:
institution_config = json.loads(config.read())
assert 'type' in institution_config
if 'ldap' in institution_config.get('type'):
cls.institution_configs.append(institution_config)
@classmethod
def teardown_class(cls):
del cls.institutions
def test_connect(self):
self.institutions = list()
for config in self.institution_configs:
assert 'name' in config
self.institutions.append(
remote_db.RemoteDatabaseFactory.build(config.get('name')))
for institution in self.institutions:
assert_connect(institution)
def test_disconnect(self):
for institution in self.institutions:
assert_disconnect(institution)
def test_search():
ldapdbs = TestRemoteLDAPDatabase()
ldapdbs.test_connect()
for institution, config in zip(ldapdbs.institutions,
ldapdbs.institution_configs):
if 'saved_searches' not in config:
continue
assert isinstance(config.get('saved_searches'), dict)
for search_name, search_config in config.get('saved_searches').items():
print 'search: [{}->{}]'.format(config.get('name'), search_name)
results = institution.search(search_name, limit=10)
yield assert_valid_search_results, results, search_config
ldapdbs.test_disconnect()
def assert_connect(institution):
try:
institution.connect()
except:
assert False
else:
assert True
def assert_disconnect(institution):
try:
institution.disconnect()
except:
assert False
else:
assert True
def assert_valid_search_results(search_results, search_config):
for search_result in search_results:
for attr, _ in search_result.items():
assert attr in search_config['attrs']
| mit |
mm112287/2015cda_g8_0421 | static/Brython3.1.1-20150328-091302/Lib/test/regrtest.py | 718 | 65317 | #! /usr/bin/python3.3
"""
Usage:
python -m test [options] [test_name1 [test_name2 ...]]
python path/to/Lib/test/regrtest.py [options] [test_name1 [test_name2 ...]]
If no arguments or options are provided, finds all files matching
the pattern "test_*" in the Lib/test subdirectory and runs
them in alphabetical order (but see -M and -u, below, for exceptions).
For more rigorous testing, it is useful to use the following
command line:
python -E -Wd -m test [options] [test_name1 ...]
Options:
-h/--help -- print this text and exit
--timeout TIMEOUT
-- dump the traceback and exit if a test takes more
than TIMEOUT seconds; disabled if TIMEOUT is negative
or equals to zero
--wait -- wait for user input, e.g., allow a debugger to be attached
Verbosity
-v/--verbose -- run tests in verbose mode with output to stdout
-w/--verbose2 -- re-run failed tests in verbose mode
-W/--verbose3 -- display test output on failure
-d/--debug -- print traceback for failed tests
-q/--quiet -- no output unless one or more tests fail
-o/--slow -- print the slowest 10 tests
--header -- print header with interpreter info
Selecting tests
-r/--randomize -- randomize test execution order (see below)
--randseed -- pass a random seed to reproduce a previous random run
-f/--fromfile -- read names of tests to run from a file (see below)
-x/--exclude -- arguments are tests to *exclude*
-s/--single -- single step through a set of tests (see below)
-m/--match PAT -- match test cases and methods with glob pattern PAT
-G/--failfast -- fail as soon as a test fails (only with -v or -W)
-u/--use RES1,RES2,...
-- specify which special resource intensive tests to run
-M/--memlimit LIMIT
-- run very large memory-consuming tests
--testdir DIR
-- execute test files in the specified directory (instead
of the Python stdlib test suite)
Special runs
-l/--findleaks -- if GC is available detect tests that leak memory
-L/--runleaks -- run the leaks(1) command just before exit
-R/--huntrleaks RUNCOUNTS
-- search for reference leaks (needs debug build, v. slow)
-j/--multiprocess PROCESSES
-- run PROCESSES processes at once
-T/--coverage -- turn on code coverage tracing using the trace module
-D/--coverdir DIRECTORY
-- Directory where coverage files are put
-N/--nocoverdir -- Put coverage files alongside modules
-t/--threshold THRESHOLD
-- call gc.set_threshold(THRESHOLD)
-n/--nowindows -- suppress error message boxes on Windows
-F/--forever -- run the specified tests in a loop, until an error happens
Additional Option Details:
-r randomizes test execution order. You can use --randseed=int to provide a
int seed value for the randomizer; this is useful for reproducing troublesome
test orders.
-s On the first invocation of regrtest using -s, the first test file found
or the first test file given on the command line is run, and the name of
the next test is recorded in a file named pynexttest. If run from the
Python build directory, pynexttest is located in the 'build' subdirectory,
otherwise it is located in tempfile.gettempdir(). On subsequent runs,
the test in pynexttest is run, and the next test is written to pynexttest.
When the last test has been run, pynexttest is deleted. In this way it
is possible to single step through the test files. This is useful when
doing memory analysis on the Python interpreter, which process tends to
consume too many resources to run the full regression test non-stop.
-S is used to continue running tests after an aborted run. It will
maintain the order a standard run (ie, this assumes -r is not used).
This is useful after the tests have prematurely stopped for some external
reason and you want to start running from where you left off rather
than starting from the beginning.
-f reads the names of tests from the file given as f's argument, one
or more test names per line. Whitespace is ignored. Blank lines and
lines beginning with '#' are ignored. This is especially useful for
whittling down failures involving interactions among tests.
-L causes the leaks(1) command to be run just before exit if it exists.
leaks(1) is available on Mac OS X and presumably on some other
FreeBSD-derived systems.
-R runs each test several times and examines sys.gettotalrefcount() to
see if the test appears to be leaking references. The argument should
be of the form stab:run:fname where 'stab' is the number of times the
test is run to let gettotalrefcount settle down, 'run' is the number
of times further it is run and 'fname' is the name of the file the
reports are written to. These parameters all have defaults (5, 4 and
"reflog.txt" respectively), and the minimal invocation is '-R :'.
-M runs tests that require an exorbitant amount of memory. These tests
typically try to ascertain containers keep working when containing more than
2 billion objects, which only works on 64-bit systems. There are also some
tests that try to exhaust the address space of the process, which only makes
sense on 32-bit systems with at least 2Gb of memory. The passed-in memlimit,
which is a string in the form of '2.5Gb', determines howmuch memory the
tests will limit themselves to (but they may go slightly over.) The number
shouldn't be more memory than the machine has (including swap memory). You
should also keep in mind that swap memory is generally much, much slower
than RAM, and setting memlimit to all available RAM or higher will heavily
tax the machine. On the other hand, it is no use running these tests with a
limit of less than 2.5Gb, and many require more than 20Gb. Tests that expect
to use more than memlimit memory will be skipped. The big-memory tests
generally run very, very long.
-u is used to specify which special resource intensive tests to run,
such as those requiring large file support or network connectivity.
The argument is a comma-separated list of words indicating the
resources to test. Currently only the following are defined:
all - Enable all special resources.
none - Disable all special resources (this is the default).
audio - Tests that use the audio device. (There are known
cases of broken audio drivers that can crash Python or
even the Linux kernel.)
curses - Tests that use curses and will modify the terminal's
state and output modes.
largefile - It is okay to run some test that may create huge
files. These tests can take a long time and may
consume >2GB of disk space temporarily.
network - It is okay to run tests that use external network
resource, e.g. testing SSL support for sockets.
decimal - Test the decimal module against a large suite that
verifies compliance with standards.
cpu - Used for certain CPU-heavy tests.
subprocess Run all tests for the subprocess module.
urlfetch - It is okay to download files required on testing.
gui - Run tests that require a running GUI.
To enable all resources except one, use '-uall,-<resource>'. For
example, to run all the tests except for the gui tests, give the
option '-uall,-gui'.
"""
# We import importlib *ASAP* in order to test #15386
import importlib
import builtins
import faulthandler
import getopt
import io
import json
import logging
import os
import platform
import random
import re
import shutil
import signal
import sys
import sysconfig
import tempfile
import time
import traceback
import unittest
import warnings
from inspect import isabstract
try:
import threading
except ImportError:
threading = None
try:
import multiprocessing.process
except ImportError:
multiprocessing = None
# Some times __path__ and __file__ are not absolute (e.g. while running from
# Lib/) and, if we change the CWD to run the tests in a temporary dir, some
# imports might fail. This affects only the modules imported before os.chdir().
# These modules are searched first in sys.path[0] (so '' -- the CWD) and if
# they are found in the CWD their __file__ and __path__ will be relative (this
# happens before the chdir). All the modules imported after the chdir, are
# not found in the CWD, and since the other paths in sys.path[1:] are absolute
# (site.py absolutize them), the __file__ and __path__ will be absolute too.
# Therefore it is necessary to absolutize manually the __file__ and __path__ of
# the packages to prevent later imports to fail when the CWD is different.
for module in sys.modules.values():
if hasattr(module, '__path__'):
module.__path__ = [os.path.abspath(path) for path in module.__path__]
if hasattr(module, '__file__'):
module.__file__ = os.path.abspath(module.__file__)
# MacOSX (a.k.a. Darwin) has a default stack size that is too small
# for deeply recursive regular expressions. We see this as crashes in
# the Python test suite when running test_re.py and test_sre.py. The
# fix is to set the stack limit to 2048.
# This approach may also be useful for other Unixy platforms that
# suffer from small default stack limits.
if sys.platform == 'darwin':
try:
import resource
except ImportError:
pass
else:
soft, hard = resource.getrlimit(resource.RLIMIT_STACK)
newsoft = min(hard, max(soft, 1024*2048))
resource.setrlimit(resource.RLIMIT_STACK, (newsoft, hard))
# Test result constants.
PASSED = 1
FAILED = 0
ENV_CHANGED = -1
SKIPPED = -2
RESOURCE_DENIED = -3
INTERRUPTED = -4
CHILD_ERROR = -5 # error in a child process
from test import support
RESOURCE_NAMES = ('audio', 'curses', 'largefile', 'network',
'decimal', 'cpu', 'subprocess', 'urlfetch', 'gui')
TEMPDIR = os.path.abspath(tempfile.gettempdir())
def usage(msg):
print(msg, file=sys.stderr)
print("Use --help for usage", file=sys.stderr)
sys.exit(2)
def main(tests=None, testdir=None, verbose=0, quiet=False,
exclude=False, single=0, randomize=False, fromfile=None,
findleaks=False, use_resources=None, trace=False, coverdir='coverage',
runleaks=False, huntrleaks=False, verbose2=False, print_slow=False,
random_seed=None, use_mp=None, verbose3=False, forever=False,
header=False, failfast=False, match_tests=None):
"""Execute a test suite.
This also parses command-line options and modifies its behavior
accordingly.
tests -- a list of strings containing test names (optional)
testdir -- the directory in which to look for tests (optional)
Users other than the Python test suite will certainly want to
specify testdir; if it's omitted, the directory containing the
Python test suite is searched for.
If the tests argument is omitted, the tests listed on the
command-line will be used. If that's empty, too, then all *.py
files beginning with test_ will be used.
The other default arguments (verbose, quiet, exclude,
single, randomize, findleaks, use_resources, trace, coverdir,
print_slow, and random_seed) allow programmers calling main()
directly to set the values that would normally be set by flags
on the command line.
"""
# Display the Python traceback on fatal errors (e.g. segfault)
faulthandler.enable(all_threads=True)
# Display the Python traceback on SIGALRM or SIGUSR1 signal
signals = []
if hasattr(signal, 'SIGALRM'):
signals.append(signal.SIGALRM)
if hasattr(signal, 'SIGUSR1'):
signals.append(signal.SIGUSR1)
for signum in signals:
faulthandler.register(signum, chain=True)
replace_stdout()
support.record_original_stdout(sys.stdout)
try:
opts, args = getopt.getopt(sys.argv[1:], 'hvqxsoS:rf:lu:t:TD:NLR:FdwWM:nj:Gm:',
['help', 'verbose', 'verbose2', 'verbose3', 'quiet',
'exclude', 'single', 'slow', 'randomize', 'fromfile=', 'findleaks',
'use=', 'threshold=', 'coverdir=', 'nocoverdir',
'runleaks', 'huntrleaks=', 'memlimit=', 'randseed=',
'multiprocess=', 'coverage', 'slaveargs=', 'forever', 'debug',
'start=', 'nowindows', 'header', 'testdir=', 'timeout=', 'wait',
'failfast', 'match=', 'next='])
except getopt.error as msg:
usage(msg)
# Defaults
if random_seed is None:
random_seed = random.randrange(10000000)
if use_resources is None:
use_resources = []
debug = False
start = None
timeout = None
for o, a in opts:
if o in ('-h', '--help'):
print(__doc__)
return
elif o in ('-v', '--verbose'):
verbose += 1
elif o in ('-w', '--verbose2'):
verbose2 = True
elif o in ('-d', '--debug'):
debug = True
elif o in ('-W', '--verbose3'):
verbose3 = True
elif o in ('-G', '--failfast'):
failfast = True
elif o in ('-q', '--quiet'):
quiet = True;
verbose = 0
elif o in ('-x', '--exclude'):
exclude = True
elif o in ('-S', '--start'):
start = a
elif o in ('-s', '--single'):
single = 1
elif o == '--next':
single = int(a)
elif o in ('-o', '--slow'):
print_slow = True
elif o in ('-r', '--randomize'):
randomize = True
elif o == '--randseed':
random_seed = int(a)
elif o in ('-f', '--fromfile'):
fromfile = a
elif o in ('-m', '--match'):
match_tests = a
elif o in ('-l', '--findleaks'):
findleaks = True
elif o in ('-L', '--runleaks'):
runleaks = True
elif o in ('-t', '--threshold'):
import gc
gc.set_threshold(int(a))
elif o in ('-T', '--coverage'):
trace = True
elif o in ('-D', '--coverdir'):
# CWD is replaced with a temporary dir before calling main(), so we
# need join it with the saved CWD so it goes where the user expects.
coverdir = os.path.join(support.SAVEDCWD, a)
elif o in ('-N', '--nocoverdir'):
coverdir = None
elif o in ('-R', '--huntrleaks'):
huntrleaks = a.split(':')
if len(huntrleaks) not in (2, 3):
print(a, huntrleaks)
usage('-R takes 2 or 3 colon-separated arguments')
if not huntrleaks[0]:
huntrleaks[0] = 5
else:
huntrleaks[0] = int(huntrleaks[0])
if not huntrleaks[1]:
huntrleaks[1] = 4
else:
huntrleaks[1] = int(huntrleaks[1])
if len(huntrleaks) == 2 or not huntrleaks[2]:
huntrleaks[2:] = ["reflog.txt"]
# Avoid false positives due to various caches
# filling slowly with random data:
warm_caches()
elif o in ('-M', '--memlimit'):
support.set_memlimit(a)
elif o in ('-u', '--use'):
u = [x.lower() for x in a.split(',')]
for r in u:
if r == 'all':
use_resources[:] = RESOURCE_NAMES
continue
if r == 'none':
del use_resources[:]
continue
remove = False
if r[0] == '-':
remove = True
r = r[1:]
if r not in RESOURCE_NAMES:
usage('Invalid -u/--use option: ' + a)
if remove:
if r in use_resources:
use_resources.remove(r)
elif r not in use_resources:
use_resources.append(r)
elif o in ('-n', '--nowindows'):
import msvcrt
msvcrt.SetErrorMode(msvcrt.SEM_FAILCRITICALERRORS|
msvcrt.SEM_NOALIGNMENTFAULTEXCEPT|
msvcrt.SEM_NOGPFAULTERRORBOX|
msvcrt.SEM_NOOPENFILEERRORBOX)
try:
msvcrt.CrtSetReportMode
except AttributeError:
# release build
pass
else:
for m in [msvcrt.CRT_WARN, msvcrt.CRT_ERROR, msvcrt.CRT_ASSERT]:
msvcrt.CrtSetReportMode(m, msvcrt.CRTDBG_MODE_FILE)
msvcrt.CrtSetReportFile(m, msvcrt.CRTDBG_FILE_STDERR)
elif o in ('-F', '--forever'):
forever = True
elif o in ('-j', '--multiprocess'):
use_mp = int(a)
if use_mp <= 0:
try:
import multiprocessing
# Use all cores + extras for tests that like to sleep
use_mp = 2 + multiprocessing.cpu_count()
except (ImportError, NotImplementedError):
use_mp = 3
if use_mp == 1:
use_mp = None
elif o == '--header':
header = True
elif o == '--slaveargs':
args, kwargs = json.loads(a)
try:
result = runtest(*args, **kwargs)
except KeyboardInterrupt:
result = INTERRUPTED, ''
except BaseException as e:
traceback.print_exc()
result = CHILD_ERROR, str(e)
sys.stdout.flush()
print() # Force a newline (just in case)
print(json.dumps(result))
sys.exit(0)
elif o == '--testdir':
# CWD is replaced with a temporary dir before calling main(), so we
# join it with the saved CWD so it ends up where the user expects.
testdir = os.path.join(support.SAVEDCWD, a)
elif o == '--timeout':
if hasattr(faulthandler, 'dump_tracebacks_later'):
timeout = float(a)
if timeout <= 0:
timeout = None
else:
print("Warning: The timeout option requires "
"faulthandler.dump_tracebacks_later")
timeout = None
elif o == '--wait':
input("Press any key to continue...")
else:
print(("No handler for option {}. Please report this as a bug "
"at http://bugs.python.org.").format(o), file=sys.stderr)
sys.exit(1)
if single and fromfile:
usage("-s and -f don't go together!")
if use_mp and trace:
usage("-T and -j don't go together!")
if use_mp and findleaks:
usage("-l and -j don't go together!")
if use_mp and support.max_memuse:
usage("-M and -j don't go together!")
if failfast and not (verbose or verbose3):
usage("-G/--failfast needs either -v or -W")
good = []
bad = []
skipped = []
resource_denieds = []
environment_changed = []
interrupted = False
if findleaks:
try:
import gc
except ImportError:
print('No GC available, disabling findleaks.')
findleaks = False
else:
# Uncomment the line below to report garbage that is not
# freeable by reference counting alone. By default only
# garbage that is not collectable by the GC is reported.
#gc.set_debug(gc.DEBUG_SAVEALL)
found_garbage = []
if single:
filename = os.path.join(TEMPDIR, 'pynexttest')
try:
fp = open(filename, 'r')
next_test = fp.read().strip()
tests = [next_test]
fp.close()
except IOError:
pass
if fromfile:
tests = []
fp = open(os.path.join(support.SAVEDCWD, fromfile))
count_pat = re.compile(r'\[\s*\d+/\s*\d+\]')
for line in fp:
line = count_pat.sub('', line)
guts = line.split() # assuming no test has whitespace in its name
if guts and not guts[0].startswith('#'):
tests.extend(guts)
fp.close()
# Strip .py extensions.
removepy(args)
removepy(tests)
stdtests = STDTESTS[:]
nottests = NOTTESTS.copy()
if exclude:
for arg in args:
if arg in stdtests:
stdtests.remove(arg)
nottests.add(arg)
args = []
# For a partial run, we do not need to clutter the output.
if verbose or header or not (quiet or single != 1 or tests or args):
# Print basic platform information
print("==", platform.python_implementation(), *sys.version.split())
print("== ", platform.platform(aliased=True),
"%s-endian" % sys.byteorder)
print("== ", os.getcwd())
print("Testing with flags:", sys.flags)
# if testdir is set, then we are not running the python tests suite, so
# don't add default tests to be executed or skipped (pass empty values)
if testdir:
alltests = findtests(testdir, list(), set())
else:
alltests = findtests(testdir, stdtests, nottests)
selected = tests or args or alltests
if single:
first_selected = selected[0]
index_selected = alltests.index(first_selected)
if index_selected + single > len(alltests):
single = len(alltests) - index_selected
selected = alltests[index_selected:index_selected+single]
try:
next_single_test = alltests[index_selected+single]
except IndexError:
next_single_test = None
# Remove all the selected tests that precede start if it's set.
if start:
try:
del selected[:selected.index(start)]
except ValueError:
print("Couldn't find starting test (%s), using all tests" % start)
if randomize:
random.seed(random_seed)
print("Using random seed", random_seed)
random.shuffle(selected)
if trace:
import trace, tempfile
tracer = trace.Trace(ignoredirs=[sys.base_prefix, sys.base_exec_prefix,
tempfile.gettempdir()],
trace=False, count=True)
test_times = []
support.verbose = verbose # Tell tests to be moderately quiet
support.use_resources = use_resources
save_modules = sys.modules.keys()
def accumulate_result(test, result):
ok, test_time = result
test_times.append((test_time, test))
if ok == PASSED:
good.append(test)
elif ok == FAILED:
bad.append(test)
elif ok == ENV_CHANGED:
environment_changed.append(test)
elif ok == SKIPPED:
skipped.append(test)
elif ok == RESOURCE_DENIED:
skipped.append(test)
resource_denieds.append(test)
if forever:
def test_forever(tests=list(selected)):
while True:
for test in tests:
yield test
if bad:
return
tests = test_forever()
test_count = ''
test_count_width = 3
else:
tests = iter(selected)
test_count = '/{}'.format(len(selected))
test_count_width = len(test_count) - 1
if use_mp:
try:
from threading import Thread
except ImportError:
print("Multiprocess option requires thread support")
sys.exit(2)
from queue import Queue
from subprocess import Popen, PIPE
debug_output_pat = re.compile(r"\[\d+ refs\]$")
output = Queue()
pending = MultiprocessTests(tests)
opt_args = support.args_from_interpreter_flags()
base_cmd = [sys.executable] + opt_args + ['-m', 'test.regrtest']
def work():
# A worker thread.
try:
while True:
try:
test = next(pending)
except StopIteration:
output.put((None, None, None, None))
return
args_tuple = (
(test, verbose, quiet),
dict(huntrleaks=huntrleaks, use_resources=use_resources,
debug=debug, output_on_failure=verbose3,
timeout=timeout, failfast=failfast,
match_tests=match_tests)
)
# -E is needed by some tests, e.g. test_import
# Running the child from the same working directory ensures
# that TEMPDIR for the child is the same when
# sysconfig.is_python_build() is true. See issue 15300.
popen = Popen(base_cmd + ['--slaveargs', json.dumps(args_tuple)],
stdout=PIPE, stderr=PIPE,
universal_newlines=True,
close_fds=(os.name != 'nt'),
cwd=support.SAVEDCWD)
stdout, stderr = popen.communicate()
retcode = popen.wait()
# Strip last refcount output line if it exists, since it
# comes from the shutdown of the interpreter in the subcommand.
stderr = debug_output_pat.sub("", stderr)
stdout, _, result = stdout.strip().rpartition("\n")
if retcode != 0:
result = (CHILD_ERROR, "Exit code %s" % retcode)
output.put((test, stdout.rstrip(), stderr.rstrip(), result))
return
if not result:
output.put((None, None, None, None))
return
result = json.loads(result)
output.put((test, stdout.rstrip(), stderr.rstrip(), result))
except BaseException:
output.put((None, None, None, None))
raise
workers = [Thread(target=work) for i in range(use_mp)]
for worker in workers:
worker.start()
finished = 0
test_index = 1
try:
while finished < use_mp:
test, stdout, stderr, result = output.get()
if test is None:
finished += 1
continue
accumulate_result(test, result)
if not quiet:
fmt = "[{1:{0}}{2}/{3}] {4}" if bad else "[{1:{0}}{2}] {4}"
print(fmt.format(
test_count_width, test_index, test_count,
len(bad), test))
if stdout:
print(stdout)
if stderr:
print(stderr, file=sys.stderr)
sys.stdout.flush()
sys.stderr.flush()
if result[0] == INTERRUPTED:
raise KeyboardInterrupt
if result[0] == CHILD_ERROR:
raise Exception("Child error on {}: {}".format(test, result[1]))
test_index += 1
except KeyboardInterrupt:
interrupted = True
pending.interrupted = True
for worker in workers:
worker.join()
else:
for test_index, test in enumerate(tests, 1):
if not quiet:
fmt = "[{1:{0}}{2}/{3}] {4}" if bad else "[{1:{0}}{2}] {4}"
print(fmt.format(
test_count_width, test_index, test_count, len(bad), test))
sys.stdout.flush()
if trace:
# If we're tracing code coverage, then we don't exit with status
# if on a false return value from main.
tracer.runctx('runtest(test, verbose, quiet, timeout=timeout)',
globals=globals(), locals=vars())
else:
try:
result = runtest(test, verbose, quiet, huntrleaks, debug,
output_on_failure=verbose3,
timeout=timeout, failfast=failfast,
match_tests=match_tests)
accumulate_result(test, result)
except KeyboardInterrupt:
interrupted = True
break
except:
raise
if findleaks:
gc.collect()
if gc.garbage:
print("Warning: test created", len(gc.garbage), end=' ')
print("uncollectable object(s).")
# move the uncollectable objects somewhere so we don't see
# them again
found_garbage.extend(gc.garbage)
del gc.garbage[:]
# Unload the newly imported modules (best effort finalization)
for module in sys.modules.keys():
if module not in save_modules and module.startswith("test."):
support.unload(module)
if interrupted:
# print a newline after ^C
print()
print("Test suite interrupted by signal SIGINT.")
omitted = set(selected) - set(good) - set(bad) - set(skipped)
print(count(len(omitted), "test"), "omitted:")
printlist(omitted)
if good and not quiet:
if not bad and not skipped and not interrupted and len(good) > 1:
print("All", end=' ')
print(count(len(good), "test"), "OK.")
if print_slow:
test_times.sort(reverse=True)
print("10 slowest tests:")
for time, test in test_times[:10]:
print("%s: %.1fs" % (test, time))
if bad:
bad = sorted(set(bad) - set(environment_changed))
if bad:
print(count(len(bad), "test"), "failed:")
printlist(bad)
if environment_changed:
print("{} altered the execution environment:".format(
count(len(environment_changed), "test")))
printlist(environment_changed)
if skipped and not quiet:
print(count(len(skipped), "test"), "skipped:")
printlist(skipped)
e = _ExpectedSkips()
plat = sys.platform
if e.isvalid():
surprise = set(skipped) - e.getexpected() - set(resource_denieds)
if surprise:
print(count(len(surprise), "skip"), \
"unexpected on", plat + ":")
printlist(surprise)
else:
print("Those skips are all expected on", plat + ".")
else:
print("Ask someone to teach regrtest.py about which tests are")
print("expected to get skipped on", plat + ".")
if verbose2 and bad:
print("Re-running failed tests in verbose mode")
for test in bad:
print("Re-running test %r in verbose mode" % test)
sys.stdout.flush()
try:
verbose = True
ok = runtest(test, True, quiet, huntrleaks, debug, timeout=timeout)
except KeyboardInterrupt:
# print a newline separate from the ^C
print()
break
except:
raise
if single:
if next_single_test:
with open(filename, 'w') as fp:
fp.write(next_single_test + '\n')
else:
os.unlink(filename)
if trace:
r = tracer.results()
r.write_results(show_missing=True, summary=True, coverdir=coverdir)
if runleaks:
os.system("leaks %d" % os.getpid())
sys.exit(len(bad) > 0 or interrupted)
# small set of tests to determine if we have a basically functioning interpreter
# (i.e. if any of these fail, then anything else is likely to follow)
STDTESTS = [
'test_grammar',
'test_opcodes',
'test_dict',
'test_builtin',
'test_exceptions',
'test_types',
'test_unittest',
'test_doctest',
'test_doctest2',
'test_support'
]
# set of tests that we don't want to be executed when using regrtest
NOTTESTS = set()
def findtests(testdir=None, stdtests=STDTESTS, nottests=NOTTESTS):
"""Return a list of all applicable test modules."""
testdir = findtestdir(testdir)
names = os.listdir(testdir)
tests = []
others = set(stdtests) | nottests
for name in names:
mod, ext = os.path.splitext(name)
if mod[:5] == "test_" and ext in (".py", "") and mod not in others:
tests.append(mod)
return stdtests + sorted(tests)
# We do not use a generator so multiple threads can call next().
class MultiprocessTests(object):
"""A thread-safe iterator over tests for multiprocess mode."""
def __init__(self, tests):
self.interrupted = False
self.lock = threading.Lock()
self.tests = tests
def __iter__(self):
return self
def __next__(self):
with self.lock:
if self.interrupted:
raise StopIteration('tests interrupted')
return next(self.tests)
def replace_stdout():
"""Set stdout encoder error handler to backslashreplace (as stderr error
handler) to avoid UnicodeEncodeError when printing a traceback"""
import atexit
stdout = sys.stdout
sys.stdout = open(stdout.fileno(), 'w',
encoding=stdout.encoding,
errors="backslashreplace",
closefd=False,
newline='\n')
def restore_stdout():
sys.stdout.close()
sys.stdout = stdout
atexit.register(restore_stdout)
def runtest(test, verbose, quiet,
huntrleaks=False, debug=False, use_resources=None,
output_on_failure=False, failfast=False, match_tests=None,
timeout=None):
"""Run a single test.
test -- the name of the test
verbose -- if true, print more messages
quiet -- if true, don't print 'skipped' messages (probably redundant)
test_times -- a list of (time, test_name) pairs
huntrleaks -- run multiple times to test for leaks; requires a debug
build; a triple corresponding to -R's three arguments
output_on_failure -- if true, display test output on failure
timeout -- dump the traceback and exit if a test takes more than
timeout seconds
Returns one of the test result constants:
INTERRUPTED KeyboardInterrupt when run under -j
RESOURCE_DENIED test skipped because resource denied
SKIPPED test skipped for some other reason
ENV_CHANGED test failed because it changed the execution environment
FAILED test failed
PASSED test passed
"""
if use_resources is not None:
support.use_resources = use_resources
use_timeout = (timeout is not None)
if use_timeout:
faulthandler.dump_tracebacks_later(timeout, exit=True)
try:
support.match_tests = match_tests
if failfast:
support.failfast = True
if output_on_failure:
support.verbose = True
# Reuse the same instance to all calls to runtest(). Some
# tests keep a reference to sys.stdout or sys.stderr
# (eg. test_argparse).
if runtest.stringio is None:
stream = io.StringIO()
runtest.stringio = stream
else:
stream = runtest.stringio
stream.seek(0)
stream.truncate()
orig_stdout = sys.stdout
orig_stderr = sys.stderr
try:
sys.stdout = stream
sys.stderr = stream
result = runtest_inner(test, verbose, quiet, huntrleaks,
debug, display_failure=False)
if result[0] == FAILED:
output = stream.getvalue()
orig_stderr.write(output)
orig_stderr.flush()
finally:
sys.stdout = orig_stdout
sys.stderr = orig_stderr
else:
support.verbose = verbose # Tell tests to be moderately quiet
result = runtest_inner(test, verbose, quiet, huntrleaks, debug,
display_failure=not verbose)
return result
finally:
if use_timeout:
faulthandler.cancel_dump_tracebacks_later()
cleanup_test_droppings(test, verbose)
runtest.stringio = None
# Unit tests are supposed to leave the execution environment unchanged
# once they complete. But sometimes tests have bugs, especially when
# tests fail, and the changes to environment go on to mess up other
# tests. This can cause issues with buildbot stability, since tests
# are run in random order and so problems may appear to come and go.
# There are a few things we can save and restore to mitigate this, and
# the following context manager handles this task.
class saved_test_environment:
"""Save bits of the test environment and restore them at block exit.
with saved_test_environment(testname, verbose, quiet):
#stuff
Unless quiet is True, a warning is printed to stderr if any of
the saved items was changed by the test. The attribute 'changed'
is initially False, but is set to True if a change is detected.
If verbose is more than 1, the before and after state of changed
items is also printed.
"""
changed = False
def __init__(self, testname, verbose=0, quiet=False):
self.testname = testname
self.verbose = verbose
self.quiet = quiet
# To add things to save and restore, add a name XXX to the resources list
# and add corresponding get_XXX/restore_XXX functions. get_XXX should
# return the value to be saved and compared against a second call to the
# get function when test execution completes. restore_XXX should accept
# the saved value and restore the resource using it. It will be called if
# and only if a change in the value is detected.
#
# Note: XXX will have any '.' replaced with '_' characters when determining
# the corresponding method names.
resources = ('sys.argv', 'cwd', 'sys.stdin', 'sys.stdout', 'sys.stderr',
'os.environ', 'sys.path', 'sys.path_hooks', '__import__',
'warnings.filters', 'asyncore.socket_map',
'logging._handlers', 'logging._handlerList', 'sys.gettrace',
'sys.warnoptions', 'threading._dangling',
'multiprocessing.process._dangling',
'sysconfig._CONFIG_VARS', 'sysconfig._INSTALL_SCHEMES',
'support.TESTFN',
)
def get_sys_argv(self):
return id(sys.argv), sys.argv, sys.argv[:]
def restore_sys_argv(self, saved_argv):
sys.argv = saved_argv[1]
sys.argv[:] = saved_argv[2]
def get_cwd(self):
return os.getcwd()
def restore_cwd(self, saved_cwd):
os.chdir(saved_cwd)
def get_sys_stdout(self):
return sys.stdout
def restore_sys_stdout(self, saved_stdout):
sys.stdout = saved_stdout
def get_sys_stderr(self):
return sys.stderr
def restore_sys_stderr(self, saved_stderr):
sys.stderr = saved_stderr
def get_sys_stdin(self):
return sys.stdin
def restore_sys_stdin(self, saved_stdin):
sys.stdin = saved_stdin
def get_os_environ(self):
return id(os.environ), os.environ, dict(os.environ)
def restore_os_environ(self, saved_environ):
os.environ = saved_environ[1]
os.environ.clear()
os.environ.update(saved_environ[2])
def get_sys_path(self):
return id(sys.path), sys.path, sys.path[:]
def restore_sys_path(self, saved_path):
sys.path = saved_path[1]
sys.path[:] = saved_path[2]
def get_sys_path_hooks(self):
return id(sys.path_hooks), sys.path_hooks, sys.path_hooks[:]
def restore_sys_path_hooks(self, saved_hooks):
sys.path_hooks = saved_hooks[1]
sys.path_hooks[:] = saved_hooks[2]
def get_sys_gettrace(self):
return sys.gettrace()
def restore_sys_gettrace(self, trace_fxn):
sys.settrace(trace_fxn)
def get___import__(self):
return builtins.__import__
def restore___import__(self, import_):
builtins.__import__ = import_
def get_warnings_filters(self):
return id(warnings.filters), warnings.filters, warnings.filters[:]
def restore_warnings_filters(self, saved_filters):
warnings.filters = saved_filters[1]
warnings.filters[:] = saved_filters[2]
def get_asyncore_socket_map(self):
asyncore = sys.modules.get('asyncore')
# XXX Making a copy keeps objects alive until __exit__ gets called.
return asyncore and asyncore.socket_map.copy() or {}
def restore_asyncore_socket_map(self, saved_map):
asyncore = sys.modules.get('asyncore')
if asyncore is not None:
asyncore.close_all(ignore_all=True)
asyncore.socket_map.update(saved_map)
def get_shutil_archive_formats(self):
# we could call get_archives_formats() but that only returns the
# registry keys; we want to check the values too (the functions that
# are registered)
return shutil._ARCHIVE_FORMATS, shutil._ARCHIVE_FORMATS.copy()
def restore_shutil_archive_formats(self, saved):
shutil._ARCHIVE_FORMATS = saved[0]
shutil._ARCHIVE_FORMATS.clear()
shutil._ARCHIVE_FORMATS.update(saved[1])
def get_shutil_unpack_formats(self):
return shutil._UNPACK_FORMATS, shutil._UNPACK_FORMATS.copy()
def restore_shutil_unpack_formats(self, saved):
shutil._UNPACK_FORMATS = saved[0]
shutil._UNPACK_FORMATS.clear()
shutil._UNPACK_FORMATS.update(saved[1])
def get_logging__handlers(self):
# _handlers is a WeakValueDictionary
return id(logging._handlers), logging._handlers, logging._handlers.copy()
def restore_logging__handlers(self, saved_handlers):
# Can't easily revert the logging state
pass
def get_logging__handlerList(self):
# _handlerList is a list of weakrefs to handlers
return id(logging._handlerList), logging._handlerList, logging._handlerList[:]
def restore_logging__handlerList(self, saved_handlerList):
# Can't easily revert the logging state
pass
def get_sys_warnoptions(self):
return id(sys.warnoptions), sys.warnoptions, sys.warnoptions[:]
def restore_sys_warnoptions(self, saved_options):
sys.warnoptions = saved_options[1]
sys.warnoptions[:] = saved_options[2]
# Controlling dangling references to Thread objects can make it easier
# to track reference leaks.
def get_threading__dangling(self):
if not threading:
return None
# This copies the weakrefs without making any strong reference
return threading._dangling.copy()
def restore_threading__dangling(self, saved):
if not threading:
return
threading._dangling.clear()
threading._dangling.update(saved)
# Same for Process objects
def get_multiprocessing_process__dangling(self):
if not multiprocessing:
return None
# This copies the weakrefs without making any strong reference
return multiprocessing.process._dangling.copy()
def restore_multiprocessing_process__dangling(self, saved):
if not multiprocessing:
return
multiprocessing.process._dangling.clear()
multiprocessing.process._dangling.update(saved)
def get_sysconfig__CONFIG_VARS(self):
# make sure the dict is initialized
sysconfig.get_config_var('prefix')
return (id(sysconfig._CONFIG_VARS), sysconfig._CONFIG_VARS,
dict(sysconfig._CONFIG_VARS))
def restore_sysconfig__CONFIG_VARS(self, saved):
sysconfig._CONFIG_VARS = saved[1]
sysconfig._CONFIG_VARS.clear()
sysconfig._CONFIG_VARS.update(saved[2])
def get_sysconfig__INSTALL_SCHEMES(self):
return (id(sysconfig._INSTALL_SCHEMES), sysconfig._INSTALL_SCHEMES,
sysconfig._INSTALL_SCHEMES.copy())
def restore_sysconfig__INSTALL_SCHEMES(self, saved):
sysconfig._INSTALL_SCHEMES = saved[1]
sysconfig._INSTALL_SCHEMES.clear()
sysconfig._INSTALL_SCHEMES.update(saved[2])
def get_support_TESTFN(self):
if os.path.isfile(support.TESTFN):
result = 'f'
elif os.path.isdir(support.TESTFN):
result = 'd'
else:
result = None
return result
def restore_support_TESTFN(self, saved_value):
if saved_value is None:
if os.path.isfile(support.TESTFN):
os.unlink(support.TESTFN)
elif os.path.isdir(support.TESTFN):
shutil.rmtree(support.TESTFN)
def resource_info(self):
for name in self.resources:
method_suffix = name.replace('.', '_')
get_name = 'get_' + method_suffix
restore_name = 'restore_' + method_suffix
yield name, getattr(self, get_name), getattr(self, restore_name)
def __enter__(self):
self.saved_values = dict((name, get()) for name, get, restore
in self.resource_info())
return self
def __exit__(self, exc_type, exc_val, exc_tb):
saved_values = self.saved_values
del self.saved_values
for name, get, restore in self.resource_info():
current = get()
original = saved_values.pop(name)
# Check for changes to the resource's value
if current != original:
self.changed = True
restore(original)
if not self.quiet:
print("Warning -- {} was modified by {}".format(
name, self.testname),
file=sys.stderr)
if self.verbose > 1:
print(" Before: {}\n After: {} ".format(
original, current),
file=sys.stderr)
return False
def runtest_inner(test, verbose, quiet,
huntrleaks=False, debug=False, display_failure=True):
support.unload(test)
test_time = 0.0
refleak = False # True if the test leaked references.
try:
if test.startswith('test.'):
abstest = test
else:
# Always import it from the test package
abstest = 'test.' + test
with saved_test_environment(test, verbose, quiet) as environment:
start_time = time.time()
the_package = __import__(abstest, globals(), locals(), [])
the_module = getattr(the_package, test)
# If the test has a test_main, that will run the appropriate
# tests. If not, use normal unittest test loading.
test_runner = getattr(the_module, "test_main", None)
if test_runner is None:
tests = unittest.TestLoader().loadTestsFromModule(the_module)
test_runner = lambda: support.run_unittest(tests)
test_runner()
if huntrleaks:
refleak = dash_R(the_module, test, test_runner,
huntrleaks)
test_time = time.time() - start_time
except support.ResourceDenied as msg:
if not quiet:
print(test, "skipped --", msg)
sys.stdout.flush()
return RESOURCE_DENIED, test_time
except unittest.SkipTest as msg:
if not quiet:
print(test, "skipped --", msg)
sys.stdout.flush()
return SKIPPED, test_time
except KeyboardInterrupt:
raise
except support.TestFailed as msg:
if display_failure:
print("test", test, "failed --", msg, file=sys.stderr)
else:
print("test", test, "failed", file=sys.stderr)
sys.stderr.flush()
return FAILED, test_time
except:
msg = traceback.format_exc()
print("test", test, "crashed --", msg, file=sys.stderr)
sys.stderr.flush()
return FAILED, test_time
else:
if refleak:
return FAILED, test_time
if environment.changed:
return ENV_CHANGED, test_time
return PASSED, test_time
def cleanup_test_droppings(testname, verbose):
import shutil
import stat
import gc
# First kill any dangling references to open files etc.
# This can also issue some ResourceWarnings which would otherwise get
# triggered during the following test run, and possibly produce failures.
gc.collect()
# Try to clean up junk commonly left behind. While tests shouldn't leave
# any files or directories behind, when a test fails that can be tedious
# for it to arrange. The consequences can be especially nasty on Windows,
# since if a test leaves a file open, it cannot be deleted by name (while
# there's nothing we can do about that here either, we can display the
# name of the offending test, which is a real help).
for name in (support.TESTFN,
"db_home",
):
if not os.path.exists(name):
continue
if os.path.isdir(name):
kind, nuker = "directory", shutil.rmtree
elif os.path.isfile(name):
kind, nuker = "file", os.unlink
else:
raise SystemError("os.path says %r exists but is neither "
"directory nor file" % name)
if verbose:
print("%r left behind %s %r" % (testname, kind, name))
try:
# if we have chmod, fix possible permissions problems
# that might prevent cleanup
if (hasattr(os, 'chmod')):
os.chmod(name, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
nuker(name)
except Exception as msg:
print(("%r left behind %s %r and it couldn't be "
"removed: %s" % (testname, kind, name, msg)), file=sys.stderr)
def dash_R(the_module, test, indirect_test, huntrleaks):
"""Run a test multiple times, looking for reference leaks.
Returns:
False if the test didn't leak references; True if we detected refleaks.
"""
# This code is hackish and inelegant, but it seems to do the job.
import copyreg
import collections.abc
if not hasattr(sys, 'gettotalrefcount'):
raise Exception("Tracking reference leaks requires a debug build "
"of Python")
# Save current values for dash_R_cleanup() to restore.
fs = warnings.filters[:]
ps = copyreg.dispatch_table.copy()
pic = sys.path_importer_cache.copy()
try:
import zipimport
except ImportError:
zdc = None # Run unmodified on platforms without zipimport support
else:
zdc = zipimport._zip_directory_cache.copy()
abcs = {}
for abc in [getattr(collections.abc, a) for a in collections.abc.__all__]:
if not isabstract(abc):
continue
for obj in abc.__subclasses__() + [abc]:
abcs[obj] = obj._abc_registry.copy()
if indirect_test:
def run_the_test():
indirect_test()
else:
def run_the_test():
del sys.modules[the_module.__name__]
exec('import ' + the_module.__name__)
deltas = []
nwarmup, ntracked, fname = huntrleaks
fname = os.path.join(support.SAVEDCWD, fname)
repcount = nwarmup + ntracked
print("beginning", repcount, "repetitions", file=sys.stderr)
print(("1234567890"*(repcount//10 + 1))[:repcount], file=sys.stderr)
sys.stderr.flush()
dash_R_cleanup(fs, ps, pic, zdc, abcs)
for i in range(repcount):
rc_before = sys.gettotalrefcount()
run_the_test()
sys.stderr.write('.')
sys.stderr.flush()
dash_R_cleanup(fs, ps, pic, zdc, abcs)
rc_after = sys.gettotalrefcount()
if i >= nwarmup:
deltas.append(rc_after - rc_before)
print(file=sys.stderr)
if any(deltas):
msg = '%s leaked %s references, sum=%s' % (test, deltas, sum(deltas))
print(msg, file=sys.stderr)
sys.stderr.flush()
with open(fname, "a") as refrep:
print(msg, file=refrep)
refrep.flush()
return True
return False
def dash_R_cleanup(fs, ps, pic, zdc, abcs):
import gc, copyreg
import _strptime, linecache
import urllib.parse, urllib.request, mimetypes, doctest
import struct, filecmp, collections.abc
from distutils.dir_util import _path_created
from weakref import WeakSet
# Clear the warnings registry, so they can be displayed again
for mod in sys.modules.values():
if hasattr(mod, '__warningregistry__'):
del mod.__warningregistry__
# Restore some original values.
warnings.filters[:] = fs
copyreg.dispatch_table.clear()
copyreg.dispatch_table.update(ps)
sys.path_importer_cache.clear()
sys.path_importer_cache.update(pic)
try:
import zipimport
except ImportError:
pass # Run unmodified on platforms without zipimport support
else:
zipimport._zip_directory_cache.clear()
zipimport._zip_directory_cache.update(zdc)
# clear type cache
sys._clear_type_cache()
# Clear ABC registries, restoring previously saved ABC registries.
for abc in [getattr(collections.abc, a) for a in collections.abc.__all__]:
if not isabstract(abc):
continue
for obj in abc.__subclasses__() + [abc]:
obj._abc_registry = abcs.get(obj, WeakSet()).copy()
obj._abc_cache.clear()
obj._abc_negative_cache.clear()
# Flush standard output, so that buffered data is sent to the OS and
# associated Python objects are reclaimed.
for stream in (sys.stdout, sys.stderr, sys.__stdout__, sys.__stderr__):
if stream is not None:
stream.flush()
# Clear assorted module caches.
_path_created.clear()
re.purge()
_strptime._regex_cache.clear()
urllib.parse.clear_cache()
urllib.request.urlcleanup()
linecache.clearcache()
mimetypes._default_mime_types()
filecmp._cache.clear()
struct._clearcache()
doctest.master = None
try:
import ctypes
except ImportError:
# Don't worry about resetting the cache if ctypes is not supported
pass
else:
ctypes._reset_cache()
# Collect cyclic trash.
gc.collect()
def warm_caches():
# char cache
s = bytes(range(256))
for i in range(256):
s[i:i+1]
# unicode cache
x = [chr(i) for i in range(256)]
# int cache
x = list(range(-5, 257))
def findtestdir(path=None):
return path or os.path.dirname(__file__) or os.curdir
def removepy(names):
if not names:
return
for idx, name in enumerate(names):
basename, ext = os.path.splitext(name)
if ext == '.py':
names[idx] = basename
def count(n, word):
if n == 1:
return "%d %s" % (n, word)
else:
return "%d %ss" % (n, word)
def printlist(x, width=70, indent=4):
"""Print the elements of iterable x to stdout.
Optional arg width (default 70) is the maximum line length.
Optional arg indent (default 4) is the number of blanks with which to
begin each line.
"""
from textwrap import fill
blanks = ' ' * indent
# Print the sorted list: 'x' may be a '--random' list or a set()
print(fill(' '.join(str(elt) for elt in sorted(x)), width,
initial_indent=blanks, subsequent_indent=blanks))
# Map sys.platform to a string containing the basenames of tests
# expected to be skipped on that platform.
#
# Special cases:
# test_pep277
# The _ExpectedSkips constructor adds this to the set of expected
# skips if not os.path.supports_unicode_filenames.
# test_timeout
# Controlled by test_timeout.skip_expected. Requires the network
# resource and a socket module.
#
# Tests that are expected to be skipped everywhere except on one platform
# are also handled separately.
_expectations = (
('win32',
"""
test__locale
test_crypt
test_curses
test_dbm
test_devpoll
test_fcntl
test_fork1
test_epoll
test_dbm_gnu
test_dbm_ndbm
test_grp
test_ioctl
test_largefile
test_kqueue
test_openpty
test_ossaudiodev
test_pipes
test_poll
test_posix
test_pty
test_pwd
test_resource
test_signal
test_syslog
test_threadsignals
test_wait3
test_wait4
"""),
('linux',
"""
test_curses
test_devpoll
test_largefile
test_kqueue
test_ossaudiodev
"""),
('unixware',
"""
test_epoll
test_largefile
test_kqueue
test_minidom
test_openpty
test_pyexpat
test_sax
test_sundry
"""),
('openunix',
"""
test_epoll
test_largefile
test_kqueue
test_minidom
test_openpty
test_pyexpat
test_sax
test_sundry
"""),
('sco_sv',
"""
test_asynchat
test_fork1
test_epoll
test_gettext
test_largefile
test_locale
test_kqueue
test_minidom
test_openpty
test_pyexpat
test_queue
test_sax
test_sundry
test_thread
test_threaded_import
test_threadedtempfile
test_threading
"""),
('darwin',
"""
test__locale
test_curses
test_devpoll
test_epoll
test_dbm_gnu
test_gdb
test_largefile
test_locale
test_minidom
test_ossaudiodev
test_poll
"""),
('sunos',
"""
test_curses
test_dbm
test_epoll
test_kqueue
test_dbm_gnu
test_gzip
test_openpty
test_zipfile
test_zlib
"""),
('hp-ux',
"""
test_curses
test_epoll
test_dbm_gnu
test_gzip
test_largefile
test_locale
test_kqueue
test_minidom
test_openpty
test_pyexpat
test_sax
test_zipfile
test_zlib
"""),
('cygwin',
"""
test_curses
test_dbm
test_devpoll
test_epoll
test_ioctl
test_kqueue
test_largefile
test_locale
test_ossaudiodev
test_socketserver
"""),
('os2emx',
"""
test_audioop
test_curses
test_epoll
test_kqueue
test_largefile
test_mmap
test_openpty
test_ossaudiodev
test_pty
test_resource
test_signal
"""),
('freebsd',
"""
test_devpoll
test_epoll
test_dbm_gnu
test_locale
test_ossaudiodev
test_pep277
test_pty
test_socketserver
test_tcl
test_tk
test_ttk_guionly
test_ttk_textonly
test_timeout
test_urllibnet
test_multiprocessing
"""),
('aix',
"""
test_bz2
test_epoll
test_dbm_gnu
test_gzip
test_kqueue
test_ossaudiodev
test_tcl
test_tk
test_ttk_guionly
test_ttk_textonly
test_zipimport
test_zlib
"""),
('openbsd',
"""
test_ctypes
test_devpoll
test_epoll
test_dbm_gnu
test_locale
test_normalization
test_ossaudiodev
test_pep277
test_tcl
test_tk
test_ttk_guionly
test_ttk_textonly
test_multiprocessing
"""),
('netbsd',
"""
test_ctypes
test_curses
test_devpoll
test_epoll
test_dbm_gnu
test_locale
test_ossaudiodev
test_pep277
test_tcl
test_tk
test_ttk_guionly
test_ttk_textonly
test_multiprocessing
"""),
)
class _ExpectedSkips:
def __init__(self):
import os.path
from test import test_timeout
self.valid = False
expected = None
for item in _expectations:
if sys.platform.startswith(item[0]):
expected = item[1]
break
if expected is not None:
self.expected = set(expected.split())
# These are broken tests, for now skipped on every platform.
# XXX Fix these!
self.expected.add('test_nis')
# expected to be skipped on every platform, even Linux
if not os.path.supports_unicode_filenames:
self.expected.add('test_pep277')
# doctest, profile and cProfile tests fail when the codec for the
# fs encoding isn't built in because PyUnicode_Decode() adds two
# calls into Python.
encs = ("utf-8", "latin-1", "ascii", "mbcs", "utf-16", "utf-32")
if sys.getfilesystemencoding().lower() not in encs:
self.expected.add('test_profile')
self.expected.add('test_cProfile')
self.expected.add('test_doctest')
if test_timeout.skip_expected:
self.expected.add('test_timeout')
if sys.platform != "win32":
# test_sqlite is only reliable on Windows where the library
# is distributed with Python
WIN_ONLY = {"test_unicode_file", "test_winreg",
"test_winsound", "test_startfile",
"test_sqlite", "test_msilib"}
self.expected |= WIN_ONLY
if sys.platform != 'sunos5':
self.expected.add('test_nis')
if support.python_is_optimized():
self.expected.add("test_gdb")
self.valid = True
def isvalid(self):
"Return true iff _ExpectedSkips knows about the current platform."
return self.valid
def getexpected(self):
"""Return set of test names we expect to skip on current platform.
self.isvalid() must be true.
"""
assert self.isvalid()
return self.expected
def _make_temp_dir_for_build(TEMPDIR):
# When tests are run from the Python build directory, it is best practice
# to keep the test files in a subfolder. It eases the cleanup of leftover
# files using command "make distclean".
if sysconfig.is_python_build():
TEMPDIR = os.path.join(sysconfig.get_config_var('srcdir'), 'build')
TEMPDIR = os.path.abspath(TEMPDIR)
try:
os.mkdir(TEMPDIR)
except FileExistsError:
pass
# Define a writable temp dir that will be used as cwd while running
# the tests. The name of the dir includes the pid to allow parallel
# testing (see the -j option).
TESTCWD = 'test_python_{}'.format(os.getpid())
TESTCWD = os.path.join(TEMPDIR, TESTCWD)
return TEMPDIR, TESTCWD
if __name__ == '__main__':
# Remove regrtest.py's own directory from the module search path. Despite
# the elimination of implicit relative imports, this is still needed to
# ensure that submodules of the test package do not inappropriately appear
# as top-level modules even when people (or buildbots!) invoke regrtest.py
# directly instead of using the -m switch
mydir = os.path.abspath(os.path.normpath(os.path.dirname(sys.argv[0])))
i = len(sys.path)
while i >= 0:
i -= 1
if os.path.abspath(os.path.normpath(sys.path[i])) == mydir:
del sys.path[i]
# findtestdir() gets the dirname out of __file__, so we have to make it
# absolute before changing the working directory.
# For example __file__ may be relative when running trace or profile.
# See issue #9323.
__file__ = os.path.abspath(__file__)
# sanity check
assert __file__ == os.path.abspath(sys.argv[0])
TEMPDIR, TESTCWD = _make_temp_dir_for_build(TEMPDIR)
# Run the tests in a context manager that temporary changes the CWD to a
# temporary and writable directory. If it's not possible to create or
# change the CWD, the original CWD will be used. The original CWD is
# available from support.SAVEDCWD.
with support.temp_cwd(TESTCWD, quiet=True):
main()
| gpl-3.0 |
JoeCao/shadowsocks | shadowsocks/tcprelay.py | 922 | 28870 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import time
import socket
import errno
import struct
import logging
import traceback
import random
from shadowsocks import encrypt, eventloop, shell, common
from shadowsocks.common import parse_header
# we clear at most TIMEOUTS_CLEAN_SIZE timeouts each time
TIMEOUTS_CLEAN_SIZE = 512
MSG_FASTOPEN = 0x20000000
# SOCKS command definition
CMD_CONNECT = 1
CMD_BIND = 2
CMD_UDP_ASSOCIATE = 3
# for each opening port, we have a TCP Relay
# for each connection, we have a TCP Relay Handler to handle the connection
# for each handler, we have 2 sockets:
# local: connected to the client
# remote: connected to remote server
# for each handler, it could be at one of several stages:
# as sslocal:
# stage 0 SOCKS hello received from local, send hello to local
# stage 1 addr received from local, query DNS for remote
# stage 2 UDP assoc
# stage 3 DNS resolved, connect to remote
# stage 4 still connecting, more data from local received
# stage 5 remote connected, piping local and remote
# as ssserver:
# stage 0 just jump to stage 1
# stage 1 addr received from local, query DNS for remote
# stage 3 DNS resolved, connect to remote
# stage 4 still connecting, more data from local received
# stage 5 remote connected, piping local and remote
STAGE_INIT = 0
STAGE_ADDR = 1
STAGE_UDP_ASSOC = 2
STAGE_DNS = 3
STAGE_CONNECTING = 4
STAGE_STREAM = 5
STAGE_DESTROYED = -1
# for each handler, we have 2 stream directions:
# upstream: from client to server direction
# read local and write to remote
# downstream: from server to client direction
# read remote and write to local
STREAM_UP = 0
STREAM_DOWN = 1
# for each stream, it's waiting for reading, or writing, or both
WAIT_STATUS_INIT = 0
WAIT_STATUS_READING = 1
WAIT_STATUS_WRITING = 2
WAIT_STATUS_READWRITING = WAIT_STATUS_READING | WAIT_STATUS_WRITING
BUF_SIZE = 32 * 1024
class TCPRelayHandler(object):
def __init__(self, server, fd_to_handlers, loop, local_sock, config,
dns_resolver, is_local):
self._server = server
self._fd_to_handlers = fd_to_handlers
self._loop = loop
self._local_sock = local_sock
self._remote_sock = None
self._config = config
self._dns_resolver = dns_resolver
# TCP Relay works as either sslocal or ssserver
# if is_local, this is sslocal
self._is_local = is_local
self._stage = STAGE_INIT
self._encryptor = encrypt.Encryptor(config['password'],
config['method'])
self._fastopen_connected = False
self._data_to_write_to_local = []
self._data_to_write_to_remote = []
self._upstream_status = WAIT_STATUS_READING
self._downstream_status = WAIT_STATUS_INIT
self._client_address = local_sock.getpeername()[:2]
self._remote_address = None
if 'forbidden_ip' in config:
self._forbidden_iplist = config['forbidden_ip']
else:
self._forbidden_iplist = None
if is_local:
self._chosen_server = self._get_a_server()
fd_to_handlers[local_sock.fileno()] = self
local_sock.setblocking(False)
local_sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
loop.add(local_sock, eventloop.POLL_IN | eventloop.POLL_ERR,
self._server)
self.last_activity = 0
self._update_activity()
def __hash__(self):
# default __hash__ is id / 16
# we want to eliminate collisions
return id(self)
@property
def remote_address(self):
return self._remote_address
def _get_a_server(self):
server = self._config['server']
server_port = self._config['server_port']
if type(server_port) == list:
server_port = random.choice(server_port)
if type(server) == list:
server = random.choice(server)
logging.debug('chosen server: %s:%d', server, server_port)
return server, server_port
def _update_activity(self, data_len=0):
# tell the TCP Relay we have activities recently
# else it will think we are inactive and timed out
self._server.update_activity(self, data_len)
def _update_stream(self, stream, status):
# update a stream to a new waiting status
# check if status is changed
# only update if dirty
dirty = False
if stream == STREAM_DOWN:
if self._downstream_status != status:
self._downstream_status = status
dirty = True
elif stream == STREAM_UP:
if self._upstream_status != status:
self._upstream_status = status
dirty = True
if dirty:
if self._local_sock:
event = eventloop.POLL_ERR
if self._downstream_status & WAIT_STATUS_WRITING:
event |= eventloop.POLL_OUT
if self._upstream_status & WAIT_STATUS_READING:
event |= eventloop.POLL_IN
self._loop.modify(self._local_sock, event)
if self._remote_sock:
event = eventloop.POLL_ERR
if self._downstream_status & WAIT_STATUS_READING:
event |= eventloop.POLL_IN
if self._upstream_status & WAIT_STATUS_WRITING:
event |= eventloop.POLL_OUT
self._loop.modify(self._remote_sock, event)
def _write_to_sock(self, data, sock):
# write data to sock
# if only some of the data are written, put remaining in the buffer
# and update the stream to wait for writing
if not data or not sock:
return False
uncomplete = False
try:
l = len(data)
s = sock.send(data)
if s < l:
data = data[s:]
uncomplete = True
except (OSError, IOError) as e:
error_no = eventloop.errno_from_exception(e)
if error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK):
uncomplete = True
else:
shell.print_exception(e)
self.destroy()
return False
if uncomplete:
if sock == self._local_sock:
self._data_to_write_to_local.append(data)
self._update_stream(STREAM_DOWN, WAIT_STATUS_WRITING)
elif sock == self._remote_sock:
self._data_to_write_to_remote.append(data)
self._update_stream(STREAM_UP, WAIT_STATUS_WRITING)
else:
logging.error('write_all_to_sock:unknown socket')
else:
if sock == self._local_sock:
self._update_stream(STREAM_DOWN, WAIT_STATUS_READING)
elif sock == self._remote_sock:
self._update_stream(STREAM_UP, WAIT_STATUS_READING)
else:
logging.error('write_all_to_sock:unknown socket')
return True
def _handle_stage_connecting(self, data):
if self._is_local:
data = self._encryptor.encrypt(data)
self._data_to_write_to_remote.append(data)
if self._is_local and not self._fastopen_connected and \
self._config['fast_open']:
# for sslocal and fastopen, we basically wait for data and use
# sendto to connect
try:
# only connect once
self._fastopen_connected = True
remote_sock = \
self._create_remote_socket(self._chosen_server[0],
self._chosen_server[1])
self._loop.add(remote_sock, eventloop.POLL_ERR, self._server)
data = b''.join(self._data_to_write_to_remote)
l = len(data)
s = remote_sock.sendto(data, MSG_FASTOPEN, self._chosen_server)
if s < l:
data = data[s:]
self._data_to_write_to_remote = [data]
else:
self._data_to_write_to_remote = []
self._update_stream(STREAM_UP, WAIT_STATUS_READWRITING)
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) == errno.EINPROGRESS:
# in this case data is not sent at all
self._update_stream(STREAM_UP, WAIT_STATUS_READWRITING)
elif eventloop.errno_from_exception(e) == errno.ENOTCONN:
logging.error('fast open not supported on this OS')
self._config['fast_open'] = False
self.destroy()
else:
shell.print_exception(e)
if self._config['verbose']:
traceback.print_exc()
self.destroy()
def _handle_stage_addr(self, data):
try:
if self._is_local:
cmd = common.ord(data[1])
if cmd == CMD_UDP_ASSOCIATE:
logging.debug('UDP associate')
if self._local_sock.family == socket.AF_INET6:
header = b'\x05\x00\x00\x04'
else:
header = b'\x05\x00\x00\x01'
addr, port = self._local_sock.getsockname()[:2]
addr_to_send = socket.inet_pton(self._local_sock.family,
addr)
port_to_send = struct.pack('>H', port)
self._write_to_sock(header + addr_to_send + port_to_send,
self._local_sock)
self._stage = STAGE_UDP_ASSOC
# just wait for the client to disconnect
return
elif cmd == CMD_CONNECT:
# just trim VER CMD RSV
data = data[3:]
else:
logging.error('unknown command %d', cmd)
self.destroy()
return
header_result = parse_header(data)
if header_result is None:
raise Exception('can not parse header')
addrtype, remote_addr, remote_port, header_length = header_result
logging.info('connecting %s:%d from %s:%d' %
(common.to_str(remote_addr), remote_port,
self._client_address[0], self._client_address[1]))
self._remote_address = (common.to_str(remote_addr), remote_port)
# pause reading
self._update_stream(STREAM_UP, WAIT_STATUS_WRITING)
self._stage = STAGE_DNS
if self._is_local:
# forward address to remote
self._write_to_sock((b'\x05\x00\x00\x01'
b'\x00\x00\x00\x00\x10\x10'),
self._local_sock)
data_to_send = self._encryptor.encrypt(data)
self._data_to_write_to_remote.append(data_to_send)
# notice here may go into _handle_dns_resolved directly
self._dns_resolver.resolve(self._chosen_server[0],
self._handle_dns_resolved)
else:
if len(data) > header_length:
self._data_to_write_to_remote.append(data[header_length:])
# notice here may go into _handle_dns_resolved directly
self._dns_resolver.resolve(remote_addr,
self._handle_dns_resolved)
except Exception as e:
self._log_error(e)
if self._config['verbose']:
traceback.print_exc()
self.destroy()
def _create_remote_socket(self, ip, port):
addrs = socket.getaddrinfo(ip, port, 0, socket.SOCK_STREAM,
socket.SOL_TCP)
if len(addrs) == 0:
raise Exception("getaddrinfo failed for %s:%d" % (ip, port))
af, socktype, proto, canonname, sa = addrs[0]
if self._forbidden_iplist:
if common.to_str(sa[0]) in self._forbidden_iplist:
raise Exception('IP %s is in forbidden list, reject' %
common.to_str(sa[0]))
remote_sock = socket.socket(af, socktype, proto)
self._remote_sock = remote_sock
self._fd_to_handlers[remote_sock.fileno()] = self
remote_sock.setblocking(False)
remote_sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
return remote_sock
def _handle_dns_resolved(self, result, error):
if error:
self._log_error(error)
self.destroy()
return
if result:
ip = result[1]
if ip:
try:
self._stage = STAGE_CONNECTING
remote_addr = ip
if self._is_local:
remote_port = self._chosen_server[1]
else:
remote_port = self._remote_address[1]
if self._is_local and self._config['fast_open']:
# for fastopen:
# wait for more data to arrive and send them in one SYN
self._stage = STAGE_CONNECTING
# we don't have to wait for remote since it's not
# created
self._update_stream(STREAM_UP, WAIT_STATUS_READING)
# TODO when there is already data in this packet
else:
# else do connect
remote_sock = self._create_remote_socket(remote_addr,
remote_port)
try:
remote_sock.connect((remote_addr, remote_port))
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) == \
errno.EINPROGRESS:
pass
self._loop.add(remote_sock,
eventloop.POLL_ERR | eventloop.POLL_OUT,
self._server)
self._stage = STAGE_CONNECTING
self._update_stream(STREAM_UP, WAIT_STATUS_READWRITING)
self._update_stream(STREAM_DOWN, WAIT_STATUS_READING)
return
except Exception as e:
shell.print_exception(e)
if self._config['verbose']:
traceback.print_exc()
self.destroy()
def _on_local_read(self):
# handle all local read events and dispatch them to methods for
# each stage
if not self._local_sock:
return
is_local = self._is_local
data = None
try:
data = self._local_sock.recv(BUF_SIZE)
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) in \
(errno.ETIMEDOUT, errno.EAGAIN, errno.EWOULDBLOCK):
return
if not data:
self.destroy()
return
self._update_activity(len(data))
if not is_local:
data = self._encryptor.decrypt(data)
if not data:
return
if self._stage == STAGE_STREAM:
if self._is_local:
data = self._encryptor.encrypt(data)
self._write_to_sock(data, self._remote_sock)
return
elif is_local and self._stage == STAGE_INIT:
# TODO check auth method
self._write_to_sock(b'\x05\00', self._local_sock)
self._stage = STAGE_ADDR
return
elif self._stage == STAGE_CONNECTING:
self._handle_stage_connecting(data)
elif (is_local and self._stage == STAGE_ADDR) or \
(not is_local and self._stage == STAGE_INIT):
self._handle_stage_addr(data)
def _on_remote_read(self):
# handle all remote read events
data = None
try:
data = self._remote_sock.recv(BUF_SIZE)
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) in \
(errno.ETIMEDOUT, errno.EAGAIN, errno.EWOULDBLOCK):
return
if not data:
self.destroy()
return
self._update_activity(len(data))
if self._is_local:
data = self._encryptor.decrypt(data)
else:
data = self._encryptor.encrypt(data)
try:
self._write_to_sock(data, self._local_sock)
except Exception as e:
shell.print_exception(e)
if self._config['verbose']:
traceback.print_exc()
# TODO use logging when debug completed
self.destroy()
def _on_local_write(self):
# handle local writable event
if self._data_to_write_to_local:
data = b''.join(self._data_to_write_to_local)
self._data_to_write_to_local = []
self._write_to_sock(data, self._local_sock)
else:
self._update_stream(STREAM_DOWN, WAIT_STATUS_READING)
def _on_remote_write(self):
# handle remote writable event
self._stage = STAGE_STREAM
if self._data_to_write_to_remote:
data = b''.join(self._data_to_write_to_remote)
self._data_to_write_to_remote = []
self._write_to_sock(data, self._remote_sock)
else:
self._update_stream(STREAM_UP, WAIT_STATUS_READING)
def _on_local_error(self):
logging.debug('got local error')
if self._local_sock:
logging.error(eventloop.get_sock_error(self._local_sock))
self.destroy()
def _on_remote_error(self):
logging.debug('got remote error')
if self._remote_sock:
logging.error(eventloop.get_sock_error(self._remote_sock))
self.destroy()
def handle_event(self, sock, event):
# handle all events in this handler and dispatch them to methods
if self._stage == STAGE_DESTROYED:
logging.debug('ignore handle_event: destroyed')
return
# order is important
if sock == self._remote_sock:
if event & eventloop.POLL_ERR:
self._on_remote_error()
if self._stage == STAGE_DESTROYED:
return
if event & (eventloop.POLL_IN | eventloop.POLL_HUP):
self._on_remote_read()
if self._stage == STAGE_DESTROYED:
return
if event & eventloop.POLL_OUT:
self._on_remote_write()
elif sock == self._local_sock:
if event & eventloop.POLL_ERR:
self._on_local_error()
if self._stage == STAGE_DESTROYED:
return
if event & (eventloop.POLL_IN | eventloop.POLL_HUP):
self._on_local_read()
if self._stage == STAGE_DESTROYED:
return
if event & eventloop.POLL_OUT:
self._on_local_write()
else:
logging.warn('unknown socket')
def _log_error(self, e):
logging.error('%s when handling connection from %s:%d' %
(e, self._client_address[0], self._client_address[1]))
def destroy(self):
# destroy the handler and release any resources
# promises:
# 1. destroy won't make another destroy() call inside
# 2. destroy releases resources so it prevents future call to destroy
# 3. destroy won't raise any exceptions
# if any of the promises are broken, it indicates a bug has been
# introduced! mostly likely memory leaks, etc
if self._stage == STAGE_DESTROYED:
# this couldn't happen
logging.debug('already destroyed')
return
self._stage = STAGE_DESTROYED
if self._remote_address:
logging.debug('destroy: %s:%d' %
self._remote_address)
else:
logging.debug('destroy')
if self._remote_sock:
logging.debug('destroying remote')
self._loop.remove(self._remote_sock)
del self._fd_to_handlers[self._remote_sock.fileno()]
self._remote_sock.close()
self._remote_sock = None
if self._local_sock:
logging.debug('destroying local')
self._loop.remove(self._local_sock)
del self._fd_to_handlers[self._local_sock.fileno()]
self._local_sock.close()
self._local_sock = None
self._dns_resolver.remove_callback(self._handle_dns_resolved)
self._server.remove_handler(self)
class TCPRelay(object):
def __init__(self, config, dns_resolver, is_local, stat_callback=None):
self._config = config
self._is_local = is_local
self._dns_resolver = dns_resolver
self._closed = False
self._eventloop = None
self._fd_to_handlers = {}
self._timeout = config['timeout']
self._timeouts = [] # a list for all the handlers
# we trim the timeouts once a while
self._timeout_offset = 0 # last checked position for timeout
self._handler_to_timeouts = {} # key: handler value: index in timeouts
if is_local:
listen_addr = config['local_address']
listen_port = config['local_port']
else:
listen_addr = config['server']
listen_port = config['server_port']
self._listen_port = listen_port
addrs = socket.getaddrinfo(listen_addr, listen_port, 0,
socket.SOCK_STREAM, socket.SOL_TCP)
if len(addrs) == 0:
raise Exception("can't get addrinfo for %s:%d" %
(listen_addr, listen_port))
af, socktype, proto, canonname, sa = addrs[0]
server_socket = socket.socket(af, socktype, proto)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(sa)
server_socket.setblocking(False)
if config['fast_open']:
try:
server_socket.setsockopt(socket.SOL_TCP, 23, 5)
except socket.error:
logging.error('warning: fast open is not available')
self._config['fast_open'] = False
server_socket.listen(1024)
self._server_socket = server_socket
self._stat_callback = stat_callback
def add_to_loop(self, loop):
if self._eventloop:
raise Exception('already add to loop')
if self._closed:
raise Exception('already closed')
self._eventloop = loop
self._eventloop.add(self._server_socket,
eventloop.POLL_IN | eventloop.POLL_ERR, self)
self._eventloop.add_periodic(self.handle_periodic)
def remove_handler(self, handler):
index = self._handler_to_timeouts.get(hash(handler), -1)
if index >= 0:
# delete is O(n), so we just set it to None
self._timeouts[index] = None
del self._handler_to_timeouts[hash(handler)]
def update_activity(self, handler, data_len):
if data_len and self._stat_callback:
self._stat_callback(self._listen_port, data_len)
# set handler to active
now = int(time.time())
if now - handler.last_activity < eventloop.TIMEOUT_PRECISION:
# thus we can lower timeout modification frequency
return
handler.last_activity = now
index = self._handler_to_timeouts.get(hash(handler), -1)
if index >= 0:
# delete is O(n), so we just set it to None
self._timeouts[index] = None
length = len(self._timeouts)
self._timeouts.append(handler)
self._handler_to_timeouts[hash(handler)] = length
def _sweep_timeout(self):
# tornado's timeout memory management is more flexible than we need
# we just need a sorted last_activity queue and it's faster than heapq
# in fact we can do O(1) insertion/remove so we invent our own
if self._timeouts:
logging.log(shell.VERBOSE_LEVEL, 'sweeping timeouts')
now = time.time()
length = len(self._timeouts)
pos = self._timeout_offset
while pos < length:
handler = self._timeouts[pos]
if handler:
if now - handler.last_activity < self._timeout:
break
else:
if handler.remote_address:
logging.warn('timed out: %s:%d' %
handler.remote_address)
else:
logging.warn('timed out')
handler.destroy()
self._timeouts[pos] = None # free memory
pos += 1
else:
pos += 1
if pos > TIMEOUTS_CLEAN_SIZE and pos > length >> 1:
# clean up the timeout queue when it gets larger than half
# of the queue
self._timeouts = self._timeouts[pos:]
for key in self._handler_to_timeouts:
self._handler_to_timeouts[key] -= pos
pos = 0
self._timeout_offset = pos
def handle_event(self, sock, fd, event):
# handle events and dispatch to handlers
if sock:
logging.log(shell.VERBOSE_LEVEL, 'fd %d %s', fd,
eventloop.EVENT_NAMES.get(event, event))
if sock == self._server_socket:
if event & eventloop.POLL_ERR:
# TODO
raise Exception('server_socket error')
try:
logging.debug('accept')
conn = self._server_socket.accept()
TCPRelayHandler(self, self._fd_to_handlers,
self._eventloop, conn[0], self._config,
self._dns_resolver, self._is_local)
except (OSError, IOError) as e:
error_no = eventloop.errno_from_exception(e)
if error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK):
return
else:
shell.print_exception(e)
if self._config['verbose']:
traceback.print_exc()
else:
if sock:
handler = self._fd_to_handlers.get(fd, None)
if handler:
handler.handle_event(sock, event)
else:
logging.warn('poll removed fd')
def handle_periodic(self):
if self._closed:
if self._server_socket:
self._eventloop.remove(self._server_socket)
self._server_socket.close()
self._server_socket = None
logging.info('closed TCP port %d', self._listen_port)
if not self._fd_to_handlers:
logging.info('stopping')
self._eventloop.stop()
self._sweep_timeout()
def close(self, next_tick=False):
logging.debug('TCP close')
self._closed = True
if not next_tick:
if self._eventloop:
self._eventloop.remove_periodic(self.handle_periodic)
self._eventloop.remove(self._server_socket)
self._server_socket.close()
for handler in list(self._fd_to_handlers.values()):
handler.destroy()
| apache-2.0 |
leeon/annotated-django | tests/migrations/test_optimizer.py | 18 | 12842 | # encoding: utf8
from django.test import TestCase
from django.db.migrations.optimizer import MigrationOptimizer
from django.db import migrations
from django.db import models
class OptimizerTests(TestCase):
"""
Tests the migration autodetector.
"""
def optimize(self, operations):
"""
Handy shortcut for getting results + number of loops
"""
optimizer = MigrationOptimizer()
return optimizer.optimize(operations), optimizer._iterations
def assertOptimizesTo(self, operations, expected, exact=None, less_than=None):
result, iterations = self.optimize(operations)
self.assertEqual(expected, result)
if exact is not None and iterations != exact:
raise self.failureException("Optimization did not take exactly %s iterations (it took %s)" % (exact, iterations))
if less_than is not None and iterations >= less_than:
raise self.failureException("Optimization did not take less than %s iterations (it took %s)" % (less_than, iterations))
def test_operation_equality(self):
"""
Tests the equality operator on lists of operations.
If this is broken, then the optimizer will get stuck in an
infinite loop, so it's kind of important.
"""
self.assertEqual(
[migrations.DeleteModel("Test")],
[migrations.DeleteModel("Test")],
)
self.assertEqual(
[migrations.CreateModel("Test", [("name", models.CharField(max_length=255))])],
[migrations.CreateModel("Test", [("name", models.CharField(max_length=255))])],
)
self.assertNotEqual(
[migrations.CreateModel("Test", [("name", models.CharField(max_length=255))])],
[migrations.CreateModel("Test", [("name", models.CharField(max_length=100))])],
)
self.assertEqual(
[migrations.AddField("Test", "name", models.CharField(max_length=255))],
[migrations.AddField("Test", "name", models.CharField(max_length=255))],
)
self.assertNotEqual(
[migrations.AddField("Test", "name", models.CharField(max_length=255))],
[migrations.AddField("Test", "name", models.CharField(max_length=100))],
)
self.assertNotEqual(
[migrations.AddField("Test", "name", models.CharField(max_length=255))],
[migrations.AlterField("Test", "name", models.CharField(max_length=255))],
)
def test_single(self):
"""
Tests that the optimizer does nothing on a single operation,
and that it does it in just one pass.
"""
self.assertOptimizesTo(
[migrations.DeleteModel("Foo")],
[migrations.DeleteModel("Foo")],
exact=1,
)
def test_create_delete_model(self):
"""
CreateModel and DeleteModel should collapse into nothing.
"""
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.DeleteModel("Foo"),
],
[],
)
def test_create_rename_model(self):
"""
CreateModel should absorb RenameModels.
"""
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.RenameModel("Foo", "Bar"),
],
[
migrations.CreateModel("Bar", [("name", models.CharField(max_length=255))]),
],
)
def test_rename_model_self(self):
"""
RenameModels should absorb themselves.
"""
self.assertOptimizesTo(
[
migrations.RenameModel("Foo", "Baa"),
migrations.RenameModel("Baa", "Bar"),
],
[
migrations.RenameModel("Foo", "Bar"),
],
)
def test_create_alter_delete_model(self):
"""
CreateModel, AlterModelTable, AlterUniqueTogether, and DeleteModel should collapse into nothing.
"""
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.AlterModelTable("Foo", "woohoo"),
migrations.AlterUniqueTogether("Foo", [["a", "b"]]),
migrations.DeleteModel("Foo"),
],
[],
)
def test_optimize_through_create(self):
"""
We should be able to optimize away create/delete through a create or delete
of a different model, but only if the create operation does not mention the model
at all.
"""
# These should work
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("size", models.IntegerField())]),
migrations.DeleteModel("Foo"),
],
[
migrations.CreateModel("Bar", [("size", models.IntegerField())]),
],
)
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("size", models.IntegerField())]),
migrations.DeleteModel("Bar"),
migrations.DeleteModel("Foo"),
],
[],
)
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("size", models.IntegerField())]),
migrations.DeleteModel("Foo"),
migrations.DeleteModel("Bar"),
],
[],
)
# This should not work - FK should block it
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("other", models.ForeignKey("testapp.Foo"))]),
migrations.DeleteModel("Foo"),
],
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("other", models.ForeignKey("testapp.Foo"))]),
migrations.DeleteModel("Foo"),
],
)
# This should not work - bases should block it
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("size", models.IntegerField())], bases=("testapp.Foo", )),
migrations.DeleteModel("Foo"),
],
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("size", models.IntegerField())], bases=("testapp.Foo", )),
migrations.DeleteModel("Foo"),
],
)
def test_create_model_add_field(self):
"""
AddField should optimize into CreateModel.
"""
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.AddField("Foo", "age", models.IntegerField()),
],
[
migrations.CreateModel("Foo", [
("name", models.CharField(max_length=255)),
("age", models.IntegerField()),
]),
],
)
def test_create_model_alter_field(self):
"""
AlterField should optimize into CreateModel.
"""
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.AlterField("Foo", "name", models.IntegerField()),
],
[
migrations.CreateModel("Foo", [
("name", models.IntegerField()),
]),
],
)
def test_create_model_rename_field(self):
"""
RenameField should optimize into CreateModel.
"""
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.RenameField("Foo", "name", "title"),
],
[
migrations.CreateModel("Foo", [
("title", models.CharField(max_length=255)),
]),
],
)
def test_add_field_rename_field(self):
"""
RenameField should optimize into AddField
"""
self.assertOptimizesTo(
[
migrations.AddField("Foo", "name", models.CharField(max_length=255)),
migrations.RenameField("Foo", "name", "title"),
],
[
migrations.AddField("Foo", "title", models.CharField(max_length=255)),
],
)
def test_alter_field_rename_field(self):
"""
RenameField should optimize to the other side of AlterField,
and into itself.
"""
self.assertOptimizesTo(
[
migrations.AlterField("Foo", "name", models.CharField(max_length=255)),
migrations.RenameField("Foo", "name", "title"),
migrations.RenameField("Foo", "title", "nom"),
],
[
migrations.RenameField("Foo", "name", "nom"),
migrations.AlterField("Foo", "nom", models.CharField(max_length=255)),
],
)
def test_create_model_remove_field(self):
"""
RemoveField should optimize into CreateModel.
"""
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [
("name", models.CharField(max_length=255)),
("age", models.IntegerField()),
]),
migrations.RemoveField("Foo", "age"),
],
[
migrations.CreateModel("Foo", [
("name", models.CharField(max_length=255)),
]),
],
)
def test_add_field_alter_field(self):
"""
AlterField should optimize into AddField.
"""
self.assertOptimizesTo(
[
migrations.AddField("Foo", "age", models.IntegerField()),
migrations.AlterField("Foo", "age", models.FloatField(default=2.4)),
],
[
migrations.AddField("Foo", "age", models.FloatField(default=2.4)),
],
)
def test_add_field_delete_field(self):
"""
RemoveField should cancel AddField
"""
self.assertOptimizesTo(
[
migrations.AddField("Foo", "age", models.IntegerField()),
migrations.RemoveField("Foo", "age"),
],
[],
)
def test_alter_field_delete_field(self):
"""
RemoveField should absorb AlterField
"""
self.assertOptimizesTo(
[
migrations.AlterField("Foo", "age", models.IntegerField()),
migrations.RemoveField("Foo", "age"),
],
[
migrations.RemoveField("Foo", "age"),
],
)
def test_optimize_through_fields(self):
"""
Checks that field-level through checking is working.
This should manage to collapse model Foo to nonexistence,
and model Bar to a single IntegerField called "width".
"""
self.assertOptimizesTo(
[
migrations.CreateModel("Foo", [("name", models.CharField(max_length=255))]),
migrations.CreateModel("Bar", [("size", models.IntegerField())]),
migrations.AddField("Foo", "age", models.IntegerField()),
migrations.AddField("Bar", "width", models.IntegerField()),
migrations.AlterField("Foo", "age", models.IntegerField()),
migrations.RenameField("Bar", "size", "dimensions"),
migrations.RemoveField("Foo", "age"),
migrations.RenameModel("Foo", "Phou"),
migrations.RemoveField("Bar", "dimensions"),
migrations.RenameModel("Phou", "Fou"),
migrations.DeleteModel("Fou"),
],
[
migrations.CreateModel("Bar", [("width", models.IntegerField())]),
],
)
| bsd-3-clause |
takaaptech/sky_engine | build/android/pylib/utils/device_temp_file.py | 51 | 1958 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A temp file that automatically gets pushed and deleted from a device."""
# pylint: disable=W0622
import random
import time
from pylib import cmd_helper
from pylib.device import device_errors
class DeviceTempFile(object):
def __init__(self, adb, suffix='', prefix='temp_file', dir='/data/local/tmp'):
"""Find an unused temporary file path in the devices external directory.
When this object is closed, the file will be deleted on the device.
Args:
adb: An instance of AdbWrapper
suffix: The suffix of the name of the temp file.
prefix: The prefix of the name of the temp file.
dir: The directory on the device where to place the temp file.
"""
self._adb = adb
# make sure that the temp dir is writable
self._adb.Shell('test -d %s' % cmd_helper.SingleQuote(dir))
while True:
self.name = '{dir}/{prefix}-{time:d}-{nonce:d}{suffix}'.format(
dir=dir, prefix=prefix, time=int(time.time()),
nonce=random.randint(0, 1000000), suffix=suffix)
self.name_quoted = cmd_helper.SingleQuote(self.name)
try:
self._adb.Shell('test -e %s' % self.name_quoted)
except device_errors.AdbCommandFailedError:
break # file does not exist
# Immediately touch the file, so other temp files can't get the same name.
self._adb.Shell('touch %s' % self.name_quoted)
def close(self):
"""Deletes the temporary file from the device."""
# ignore exception if the file is already gone.
try:
self._adb.Shell('rm -f %s' % self.name_quoted)
except device_errors.AdbCommandFailedError:
# file does not exist on Android version without 'rm -f' support (ICS)
pass
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
| bsd-3-clause |
fnouama/intellij-community | python/lib/Lib/site-packages/django/utils/feedgenerator.py | 131 | 14820 | """
Syndication feed generation library -- used for generating RSS, etc.
Sample usage:
>>> from django.utils import feedgenerator
>>> feed = feedgenerator.Rss201rev2Feed(
... title=u"Poynter E-Media Tidbits",
... link=u"http://www.poynter.org/column.asp?id=31",
... description=u"A group Weblog by the sharpest minds in online media/journalism/publishing.",
... language=u"en",
... )
>>> feed.add_item(
... title="Hello",
... link=u"http://www.holovaty.com/test/",
... description="Testing."
... )
>>> fp = open('test.rss', 'w')
>>> feed.write(fp, 'utf-8')
>>> fp.close()
For definitions of the different versions of RSS, see:
http://diveintomark.org/archives/2004/02/04/incompatible-rss
"""
import datetime
import urlparse
from django.utils.xmlutils import SimplerXMLGenerator
from django.utils.encoding import force_unicode, iri_to_uri
def rfc2822_date(date):
# We do this ourselves to be timezone aware, email.Utils is not tz aware.
if date.tzinfo:
time_str = date.strftime('%a, %d %b %Y %H:%M:%S ')
offset = date.tzinfo.utcoffset(date)
timezone = (offset.days * 24 * 60) + (offset.seconds / 60)
hour, minute = divmod(timezone, 60)
return time_str + "%+03d%02d" % (hour, minute)
else:
return date.strftime('%a, %d %b %Y %H:%M:%S -0000')
def rfc3339_date(date):
if date.tzinfo:
time_str = date.strftime('%Y-%m-%dT%H:%M:%S')
offset = date.tzinfo.utcoffset(date)
timezone = (offset.days * 24 * 60) + (offset.seconds / 60)
hour, minute = divmod(timezone, 60)
return time_str + "%+03d:%02d" % (hour, minute)
else:
return date.strftime('%Y-%m-%dT%H:%M:%SZ')
def get_tag_uri(url, date):
"""
Creates a TagURI.
See http://diveintomark.org/archives/2004/05/28/howto-atom-id
"""
url_split = urlparse.urlparse(url)
# Python 2.4 didn't have named attributes on split results or the hostname.
hostname = getattr(url_split, 'hostname', url_split[1].split(':')[0])
path = url_split[2]
fragment = url_split[5]
d = ''
if date is not None:
d = ',%s' % date.strftime('%Y-%m-%d')
return u'tag:%s%s:%s/%s' % (hostname, d, path, fragment)
class SyndicationFeed(object):
"Base class for all syndication feeds. Subclasses should provide write()"
def __init__(self, title, link, description, language=None, author_email=None,
author_name=None, author_link=None, subtitle=None, categories=None,
feed_url=None, feed_copyright=None, feed_guid=None, ttl=None, **kwargs):
to_unicode = lambda s: force_unicode(s, strings_only=True)
if categories:
categories = [force_unicode(c) for c in categories]
if ttl is not None:
# Force ints to unicode
ttl = force_unicode(ttl)
self.feed = {
'title': to_unicode(title),
'link': iri_to_uri(link),
'description': to_unicode(description),
'language': to_unicode(language),
'author_email': to_unicode(author_email),
'author_name': to_unicode(author_name),
'author_link': iri_to_uri(author_link),
'subtitle': to_unicode(subtitle),
'categories': categories or (),
'feed_url': iri_to_uri(feed_url),
'feed_copyright': to_unicode(feed_copyright),
'id': feed_guid or link,
'ttl': ttl,
}
self.feed.update(kwargs)
self.items = []
def add_item(self, title, link, description, author_email=None,
author_name=None, author_link=None, pubdate=None, comments=None,
unique_id=None, enclosure=None, categories=(), item_copyright=None,
ttl=None, **kwargs):
"""
Adds an item to the feed. All args are expected to be Python Unicode
objects except pubdate, which is a datetime.datetime object, and
enclosure, which is an instance of the Enclosure class.
"""
to_unicode = lambda s: force_unicode(s, strings_only=True)
if categories:
categories = [to_unicode(c) for c in categories]
if ttl is not None:
# Force ints to unicode
ttl = force_unicode(ttl)
item = {
'title': to_unicode(title),
'link': iri_to_uri(link),
'description': to_unicode(description),
'author_email': to_unicode(author_email),
'author_name': to_unicode(author_name),
'author_link': iri_to_uri(author_link),
'pubdate': pubdate,
'comments': to_unicode(comments),
'unique_id': to_unicode(unique_id),
'enclosure': enclosure,
'categories': categories or (),
'item_copyright': to_unicode(item_copyright),
'ttl': ttl,
}
item.update(kwargs)
self.items.append(item)
def num_items(self):
return len(self.items)
def root_attributes(self):
"""
Return extra attributes to place on the root (i.e. feed/channel) element.
Called from write().
"""
return {}
def add_root_elements(self, handler):
"""
Add elements in the root (i.e. feed/channel) element. Called
from write().
"""
pass
def item_attributes(self, item):
"""
Return extra attributes to place on each item (i.e. item/entry) element.
"""
return {}
def add_item_elements(self, handler, item):
"""
Add elements on each item (i.e. item/entry) element.
"""
pass
def write(self, outfile, encoding):
"""
Outputs the feed in the given encoding to outfile, which is a file-like
object. Subclasses should override this.
"""
raise NotImplementedError
def writeString(self, encoding):
"""
Returns the feed in the given encoding as a string.
"""
from StringIO import StringIO
s = StringIO()
self.write(s, encoding)
return s.getvalue()
def latest_post_date(self):
"""
Returns the latest item's pubdate. If none of them have a pubdate,
this returns the current date/time.
"""
updates = [i['pubdate'] for i in self.items if i['pubdate'] is not None]
if len(updates) > 0:
updates.sort()
return updates[-1]
else:
return datetime.datetime.now()
class Enclosure(object):
"Represents an RSS enclosure"
def __init__(self, url, length, mime_type):
"All args are expected to be Python Unicode objects"
self.length, self.mime_type = length, mime_type
self.url = iri_to_uri(url)
class RssFeed(SyndicationFeed):
mime_type = 'application/rss+xml'
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement(u"rss", self.rss_attributes())
handler.startElement(u"channel", self.root_attributes())
self.add_root_elements(handler)
self.write_items(handler)
self.endChannelElement(handler)
handler.endElement(u"rss")
def rss_attributes(self):
return {u"version": self._version,
u"xmlns:atom": u"http://www.w3.org/2005/Atom"}
def write_items(self, handler):
for item in self.items:
handler.startElement(u'item', self.item_attributes(item))
self.add_item_elements(handler, item)
handler.endElement(u"item")
def add_root_elements(self, handler):
handler.addQuickElement(u"title", self.feed['title'])
handler.addQuickElement(u"link", self.feed['link'])
handler.addQuickElement(u"description", self.feed['description'])
handler.addQuickElement(u"atom:link", None, {u"rel": u"self", u"href": self.feed['feed_url']})
if self.feed['language'] is not None:
handler.addQuickElement(u"language", self.feed['language'])
for cat in self.feed['categories']:
handler.addQuickElement(u"category", cat)
if self.feed['feed_copyright'] is not None:
handler.addQuickElement(u"copyright", self.feed['feed_copyright'])
handler.addQuickElement(u"lastBuildDate", rfc2822_date(self.latest_post_date()).decode('utf-8'))
if self.feed['ttl'] is not None:
handler.addQuickElement(u"ttl", self.feed['ttl'])
def endChannelElement(self, handler):
handler.endElement(u"channel")
class RssUserland091Feed(RssFeed):
_version = u"0.91"
def add_item_elements(self, handler, item):
handler.addQuickElement(u"title", item['title'])
handler.addQuickElement(u"link", item['link'])
if item['description'] is not None:
handler.addQuickElement(u"description", item['description'])
class Rss201rev2Feed(RssFeed):
# Spec: http://blogs.law.harvard.edu/tech/rss
_version = u"2.0"
def add_item_elements(self, handler, item):
handler.addQuickElement(u"title", item['title'])
handler.addQuickElement(u"link", item['link'])
if item['description'] is not None:
handler.addQuickElement(u"description", item['description'])
# Author information.
if item["author_name"] and item["author_email"]:
handler.addQuickElement(u"author", "%s (%s)" % \
(item['author_email'], item['author_name']))
elif item["author_email"]:
handler.addQuickElement(u"author", item["author_email"])
elif item["author_name"]:
handler.addQuickElement(u"dc:creator", item["author_name"], {u"xmlns:dc": u"http://purl.org/dc/elements/1.1/"})
if item['pubdate'] is not None:
handler.addQuickElement(u"pubDate", rfc2822_date(item['pubdate']).decode('utf-8'))
if item['comments'] is not None:
handler.addQuickElement(u"comments", item['comments'])
if item['unique_id'] is not None:
handler.addQuickElement(u"guid", item['unique_id'])
if item['ttl'] is not None:
handler.addQuickElement(u"ttl", item['ttl'])
# Enclosure.
if item['enclosure'] is not None:
handler.addQuickElement(u"enclosure", '',
{u"url": item['enclosure'].url, u"length": item['enclosure'].length,
u"type": item['enclosure'].mime_type})
# Categories.
for cat in item['categories']:
handler.addQuickElement(u"category", cat)
class Atom1Feed(SyndicationFeed):
# Spec: http://atompub.org/2005/07/11/draft-ietf-atompub-format-10.html
mime_type = 'application/atom+xml'
ns = u"http://www.w3.org/2005/Atom"
def write(self, outfile, encoding):
handler = SimplerXMLGenerator(outfile, encoding)
handler.startDocument()
handler.startElement(u'feed', self.root_attributes())
self.add_root_elements(handler)
self.write_items(handler)
handler.endElement(u"feed")
def root_attributes(self):
if self.feed['language'] is not None:
return {u"xmlns": self.ns, u"xml:lang": self.feed['language']}
else:
return {u"xmlns": self.ns}
def add_root_elements(self, handler):
handler.addQuickElement(u"title", self.feed['title'])
handler.addQuickElement(u"link", "", {u"rel": u"alternate", u"href": self.feed['link']})
if self.feed['feed_url'] is not None:
handler.addQuickElement(u"link", "", {u"rel": u"self", u"href": self.feed['feed_url']})
handler.addQuickElement(u"id", self.feed['id'])
handler.addQuickElement(u"updated", rfc3339_date(self.latest_post_date()).decode('utf-8'))
if self.feed['author_name'] is not None:
handler.startElement(u"author", {})
handler.addQuickElement(u"name", self.feed['author_name'])
if self.feed['author_email'] is not None:
handler.addQuickElement(u"email", self.feed['author_email'])
if self.feed['author_link'] is not None:
handler.addQuickElement(u"uri", self.feed['author_link'])
handler.endElement(u"author")
if self.feed['subtitle'] is not None:
handler.addQuickElement(u"subtitle", self.feed['subtitle'])
for cat in self.feed['categories']:
handler.addQuickElement(u"category", "", {u"term": cat})
if self.feed['feed_copyright'] is not None:
handler.addQuickElement(u"rights", self.feed['feed_copyright'])
def write_items(self, handler):
for item in self.items:
handler.startElement(u"entry", self.item_attributes(item))
self.add_item_elements(handler, item)
handler.endElement(u"entry")
def add_item_elements(self, handler, item):
handler.addQuickElement(u"title", item['title'])
handler.addQuickElement(u"link", u"", {u"href": item['link'], u"rel": u"alternate"})
if item['pubdate'] is not None:
handler.addQuickElement(u"updated", rfc3339_date(item['pubdate']).decode('utf-8'))
# Author information.
if item['author_name'] is not None:
handler.startElement(u"author", {})
handler.addQuickElement(u"name", item['author_name'])
if item['author_email'] is not None:
handler.addQuickElement(u"email", item['author_email'])
if item['author_link'] is not None:
handler.addQuickElement(u"uri", item['author_link'])
handler.endElement(u"author")
# Unique ID.
if item['unique_id'] is not None:
unique_id = item['unique_id']
else:
unique_id = get_tag_uri(item['link'], item['pubdate'])
handler.addQuickElement(u"id", unique_id)
# Summary.
if item['description'] is not None:
handler.addQuickElement(u"summary", item['description'], {u"type": u"html"})
# Enclosure.
if item['enclosure'] is not None:
handler.addQuickElement(u"link", '',
{u"rel": u"enclosure",
u"href": item['enclosure'].url,
u"length": item['enclosure'].length,
u"type": item['enclosure'].mime_type})
# Categories.
for cat in item['categories']:
handler.addQuickElement(u"category", u"", {u"term": cat})
# Rights.
if item['item_copyright'] is not None:
handler.addQuickElement(u"rights", item['item_copyright'])
# This isolates the decision of what the system default is, so calling code can
# do "feedgenerator.DefaultFeed" instead of "feedgenerator.Rss201rev2Feed".
DefaultFeed = Rss201rev2Feed
| apache-2.0 |
arpitparmar5739/youtube-dl | youtube_dl/extractor/moevideo.py | 112 | 3732 | # coding: utf-8
from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse,
compat_urllib_request,
)
from ..utils import (
ExtractorError,
int_or_none,
)
class MoeVideoIE(InfoExtractor):
IE_DESC = 'LetitBit video services: moevideo.net, playreplay.net and videochart.net'
_VALID_URL = r'''(?x)
https?://(?P<host>(?:www\.)?
(?:(?:moevideo|playreplay|videochart)\.net))/
(?:video|framevideo)/(?P<id>[0-9]+\.[0-9A-Za-z]+)'''
_API_URL = 'http://api.letitbit.net/'
_API_KEY = 'tVL0gjqo5'
_TESTS = [
{
'url': 'http://moevideo.net/video/00297.0036103fe3d513ef27915216fd29',
'md5': '129f5ae1f6585d0e9bb4f38e774ffb3a',
'info_dict': {
'id': '00297.0036103fe3d513ef27915216fd29',
'ext': 'flv',
'title': 'Sink cut out machine',
'description': 'md5:f29ff97b663aefa760bf7ca63c8ca8a8',
'thumbnail': 're:^https?://.*\.jpg$',
'width': 540,
'height': 360,
'duration': 179,
'filesize': 17822500,
}
},
{
'url': 'http://playreplay.net/video/77107.7f325710a627383d40540d8e991a',
'md5': '74f0a014d5b661f0f0e2361300d1620e',
'info_dict': {
'id': '77107.7f325710a627383d40540d8e991a',
'ext': 'flv',
'title': 'Operacion Condor.',
'description': 'md5:7e68cb2fcda66833d5081c542491a9a3',
'thumbnail': 're:^https?://.*\.jpg$',
'width': 480,
'height': 296,
'duration': 6027,
'filesize': 588257923,
},
'skip': 'Video has been removed',
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(
'http://%s/video/%s' % (mobj.group('host'), video_id),
video_id, 'Downloading webpage')
title = self._og_search_title(webpage)
thumbnail = self._og_search_thumbnail(webpage)
description = self._og_search_description(webpage)
r = [
self._API_KEY,
[
'preview/flv_link',
{
'uid': video_id,
},
],
]
r_json = json.dumps(r)
post = compat_urllib_parse.urlencode({'r': r_json})
req = compat_urllib_request.Request(self._API_URL, post)
req.add_header('Content-type', 'application/x-www-form-urlencoded')
response = self._download_json(req, video_id)
if response['status'] != 'OK':
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, response['data']),
expected=True
)
item = response['data'][0]
video_url = item['link']
duration = int_or_none(item['length'])
width = int_or_none(item['width'])
height = int_or_none(item['height'])
filesize = int_or_none(item['convert_size'])
formats = [{
'format_id': 'sd',
'http_headers': {'Range': 'bytes=0-'}, # Required to download
'url': video_url,
'width': width,
'height': height,
'filesize': filesize,
}]
return {
'id': video_id,
'title': title,
'thumbnail': thumbnail,
'description': description,
'duration': duration,
'formats': formats,
}
| unlicense |
burstlam/pantech_kernel_A850 | tools/perf/python/twatch.py | 7370 | 1334 | #! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, watermark = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
| gpl-2.0 |
neilhan/tensorflow | tensorflow/python/kernel_tests/scan_ops_test.py | 21 | 9061 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for scan ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
def numpy_reverse(x, axis):
length = len(x.shape)
if axis < 0:
axis = length + axis
ix = [slice(None, None, -1)
if i == axis else slice(None) for i in range(length)]
return x[ix]
def handle_options(func, x, axis, exclusive, reverse):
"""Adds tf options to numpy scan ops."""
length = len(x.shape)
if axis < 0:
axis = length + axis
if reverse:
x = numpy_reverse(x, axis)
if exclusive:
ix_head = [slice(0, 1) if i == axis else slice(None)
for i in range(length)]
ix_init = [slice(0, -1) if i == axis else slice(None)
for i in range(length)]
if func == np.cumsum:
init = np.zeros_like(x[ix_head])
elif func == np.cumprod:
init = np.ones_like(x[ix_head])
else:
raise ValueError("Unknown scan function.")
x = np.concatenate([init, func(x[ix_init], axis)], axis=axis)
else:
x = func(x, axis=axis)
if reverse:
x = numpy_reverse(x, axis)
return x
class CumsumTest(tf.test.TestCase):
valid_dtypes = [np.int32, np.int64, np.float16, np.float32,
np.float64, np.complex64, np.complex128]
def _compare(self, x, axis, exclusive, reverse):
np_out = handle_options(np.cumsum, x, axis, exclusive, reverse)
with self.test_session(use_gpu=True):
tf_out = tf.cumsum(x, axis, exclusive, reverse).eval()
self.assertAllClose(np_out, tf_out)
def _compareAll(self, x, axis):
for exclusive in [True, False]:
for reverse in [True, False]:
self._compare(x, axis, exclusive, reverse)
def testEmpty(self):
for dtype in self.valid_dtypes:
x = np.zeros([0]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
def test1D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
def test2D(self):
for dtype in self.valid_dtypes:
x = np.arange(0, 10).reshape([2, 5]).astype(dtype)
for axis in (-2, -1, 0, 1):
self._compareAll(x, axis)
def test3D(self):
for dtype in self.valid_dtypes:
x = np.arange(0, 20).reshape([2, 2, 5]).astype(dtype)
for axis in (-3, -2, -1, 0, 1, 2):
self._compareAll(x, axis)
def test6D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 145).reshape([2, 2, 3, 3, 2, 2]).astype(dtype)
for axis in range(-6, 6, 3):
self._compareAll(x, axis)
def testInvalidAxis(self):
x = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
input_tensor = tf.convert_to_tensor(x)
with self.test_session(use_gpu=True):
with self.assertRaisesWithPredicateMatch(
tf.errors.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
tf.cumsum(input_tensor, -3).eval()
with self.assertRaisesWithPredicateMatch(
tf.errors.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
tf.cumsum(input_tensor, 2).eval()
with self.assertRaisesWithPredicateMatch(
tf.errors.InvalidArgumentError,
lambda e: "axis must be a scalar" in str(e)):
tf.cumsum(input_tensor, [0]).eval()
def _compareGradient(self, shape, axis, exclusive, reverse):
x = np.arange(0, 50).reshape(shape).astype(np.float64)
with self.test_session(use_gpu=True):
t = tf.convert_to_tensor(x)
result = tf.cumsum(t, axis, exclusive, reverse)
jacob_t, jacob_n = tf.test.compute_gradient(t,
shape,
result,
shape,
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient(self):
for axis in (-1, 0):
self._compareGradient([50], axis, False, False)
def testGradientReverse(self):
for axis in (-1, 0):
self._compareGradient([50], axis, False, True)
def testGradientExclusive(self):
for axis in (-1, 0):
self._compareGradient([50], axis, True, False)
def testGradientExclusiveReverse(self):
for axis in (-1, 0):
self._compareGradient([50], axis, True, True)
def testGradient2D(self):
for axis in (-1, 0, 1):
for exclusive in [True, False]:
for reverse in [True, False]:
self._compareGradient([5, 10], axis, exclusive, reverse)
class CumprodTest(tf.test.TestCase):
valid_dtypes = [np.int32, np.int64, np.float16, np.float32,
np.float64, np.complex64, np.complex128]
def _compare(self, x, axis, exclusive, reverse):
np_out = handle_options(np.cumprod, x, axis, exclusive, reverse)
with self.test_session(use_gpu=True):
tf_out = tf.cumprod(x, axis, exclusive, reverse).eval()
self.assertAllClose(np_out, tf_out)
def _compareAll(self, x, axis):
for exclusive in [True, False]:
for reverse in [True, False]:
self._compare(x, axis, exclusive, reverse)
def testEmpty(self):
for dtype in self.valid_dtypes:
x = np.zeros([0]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
def test1D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
def test2D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 11).reshape([2, 5]).astype(dtype)
for axis in (-2, -1, 0, 1):
self._compareAll(x, axis)
def test3D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 21).reshape([2, 2, 5]).astype(dtype)
for axis in (-3, -2, -1, 0, 1, 2):
self._compareAll(x, axis)
def test6D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 145).reshape([2, 2, 3, 3, 2, 2]).astype(dtype)
for axis in range(-6, 6, 3):
self._compareAll(x, axis)
def testInvalidAxis(self):
x = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
input_tensor = tf.convert_to_tensor(x)
with self.test_session(use_gpu=True):
with self.assertRaisesWithPredicateMatch(
tf.errors.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
tf.cumprod(input_tensor, -3).eval()
with self.assertRaisesWithPredicateMatch(
tf.errors.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
tf.cumprod(input_tensor, 2).eval()
with self.assertRaisesWithPredicateMatch(
tf.errors.InvalidArgumentError,
lambda e: "axis must be a scalar" in str(e)):
tf.cumprod(input_tensor, [0]).eval()
def _compareGradient(self, shape, axis, exclusive, reverse):
x = np.arange(1, 9).reshape(shape).astype(np.float64)
with self.test_session(use_gpu=True):
t = tf.convert_to_tensor(x)
result = tf.cumprod(t, axis, exclusive, reverse)
jacob_t, jacob_n = tf.test.compute_gradient(t,
shape,
result,
shape,
x_init_value=x,
delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
def testGradient(self):
for axis in (-1, 0):
self._compareGradient([8], axis, False, False)
def testGradientReverse(self):
for axis in (-1, 0):
self._compareGradient([8], axis, False, True)
def testGradientExclusive(self):
for axis in (-1, 0):
self._compareGradient([8], axis, True, False)
def testGradientExclusiveReverse(self):
for axis in (-1, 0):
self._compareGradient([8], axis, True, True)
def testGradient2D(self):
for axis in (-2, -1, 0, 1):
for exclusive in [True, False]:
for reverse in [True, False]:
self._compareGradient([2, 4], axis, exclusive, reverse)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
mcltn/ansible | lib/ansible/plugins/action/pause.py | 107 | 6444 | # Copyright 2012, Tim Bielawa <tbielawa@redhat.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import datetime
import signal
import sys
import termios
import time
import tty
from os import isatty
from ansible.errors import *
from ansible.plugins.action import ActionBase
class AnsibleTimeoutExceeded(Exception):
pass
def timeout_handler(signum, frame):
raise AnsibleTimeoutExceeded
class ActionModule(ActionBase):
''' pauses execution for a length or time, or until input is received '''
PAUSE_TYPES = ['seconds', 'minutes', 'prompt', '']
BYPASS_HOST_LOOP = True
def run(self, tmp=None, task_vars=dict()):
''' run the pause action module '''
duration_unit = 'minutes'
prompt = None
seconds = None
result = dict(
changed = False,
rc = 0,
stderr = '',
stdout = '',
start = None,
stop = None,
delta = None,
)
# Is 'args' empty, then this is the default prompted pause
if self._task.args is None or len(self._task.args.keys()) == 0:
pause_type = 'prompt'
prompt = "[%s]\nPress enter to continue:" % self._task.get_name().strip()
# Are 'minutes' or 'seconds' keys that exist in 'args'?
elif 'minutes' in self._task.args or 'seconds' in self._task.args:
try:
if 'minutes' in self._task.args:
pause_type = 'minutes'
# The time() command operates in seconds so we need to
# recalculate for minutes=X values.
seconds = int(self._task.args['minutes']) * 60
else:
pause_type = 'seconds'
seconds = int(self._task.args['seconds'])
duration_unit = 'seconds'
except ValueError as e:
return dict(failed=True, msg="non-integer value given for prompt duration:\n%s" % str(e))
# Is 'prompt' a key in 'args'?
elif 'prompt' in self._task.args:
pause_type = 'prompt'
prompt = "[%s]\n%s:" % (self._task.get_name().strip(), self._task.args['prompt'])
else:
# I have no idea what you're trying to do. But it's so wrong.
return dict(failed=True, msg="invalid pause type given. must be one of: %s" % ", ".join(self.PAUSE_TYPES))
########################################################################
# Begin the hard work!
start = time.time()
result['start'] = str(datetime.datetime.now())
result['user_input'] = ''
try:
if seconds is not None:
# setup the alarm handler
signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(seconds)
# show the prompt
self._display.display("Pausing for %d seconds" % seconds)
self._display.display("(ctrl+C then 'C' = continue early, ctrl+C then 'A' = abort)\r"),
else:
self._display.display(prompt)
# save the attributes on the existing (duped) stdin so
# that we can restore them later after we set raw mode
fd = self._connection._new_stdin.fileno()
if isatty(fd):
old_settings = termios.tcgetattr(fd)
tty.setraw(fd)
# flush the buffer to make sure no previous key presses
# are read in below
termios.tcflush(self._connection._new_stdin, termios.TCIFLUSH)
while True:
try:
key_pressed = self._connection._new_stdin.read(1)
if key_pressed == '\x03':
raise KeyboardInterrupt
if not seconds:
if not isatty(fd):
self._display.warning("Not waiting from prompt as stdin is not interactive")
break
# read key presses and act accordingly
if key_pressed == '\r':
break
else:
result['user_input'] += key_pressed
except KeyboardInterrupt:
if seconds is not None:
signal.alarm(0)
self._display.display("Press 'C' to continue the play or 'A' to abort \r"),
if self._c_or_a():
break
else:
raise AnsibleError('user requested abort!')
except AnsibleTimeoutExceeded:
# this is the exception we expect when the alarm signal
# fires, so we simply ignore it to move into the cleanup
pass
finally:
# cleanup and save some information
# restore the old settings for the duped stdin fd
if isatty(fd):
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
duration = time.time() - start
result['stop'] = str(datetime.datetime.now())
result['delta'] = int(duration)
if duration_unit == 'minutes':
duration = round(duration / 60.0, 2)
else:
duration = round(duration, 2)
result['stdout'] = "Paused for %s %s" % (duration, duration_unit)
return result
def _c_or_a(self):
while True:
key_pressed = self._connection._new_stdin.read(1)
if key_pressed.lower() == 'a':
return False
elif key_pressed.lower() == 'c':
return True
| gpl-3.0 |
MaheshIBM/ursula | library/cinder_volume_type.py | 9 | 7853 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2013, Blue Box Group, Inc.
# Copyright 2013, Craig Tracey <craigtracey@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
DOCUMENTATION = '''
---
module: cinder_volume_type
short_description: Create cinder volume types
description:
- cinder_volume_types
options:
login_username:
description:
- login username to authenticate to keystone
required: true
default: admin
login_password:
description:
- password of login user
required: true
default: 'password'
login_tenant_id:
description:
- the tenant id of the login user
required: true
default: None
auth_url:
description:
- the keystone url for authentication
required: false
default: 'http://127.0.0.1:5000/v2.0/'
encryption_type:
description:
- flag indicating whether this is a encrption type or not
required: false
default: false
volume_type:
description:
- the name of the cinder volume type
required: true
default: None
provider:
decription:
- the module path to the Nova encryption provider
required: false
default: None
control_location:
decription:
- the control location to user in the Nova encryption provider
required: false
default: None
cipher:
decription:
- the cipher to use in the Nova encryption provider
required: None
default: None
key_size:
decription:
- the key size to use in the Nova encryption provider
required: None
default: None
requirements: [ "python-cinderclient", "python-keystoneclient" ]
'''
EXAMPLES = '''
- cinder_volume_type: |
login_username=admin
login_password=password
login_tenant_id=123456789
auth_url=http://keystone:5000/v2.0
volume_type=encrypted-aes-256
'''
try:
from keystoneclient.v2_0 import client as ksclient
from cinderclient.v2 import client
except ImportError as e:
print("failed=True msg='python-cinderclient is required'")
# FIXME(cmt): the fact that we need this is totally ridiculous. cinderclient
# does not accept tenant_name as a parameter. So we are forced to lookup the
# tenant's id. seriously?!
def _get_tenant_id(module, **kwargs):
tenant_id = None
try:
keystone = ksclient.Client(username=kwargs.get('login_username'),
password=kwargs.get('login_password'),
insecure=True,
auth_url=kwargs.get('auth_url'))
for tenant in keystone.tenants.list():
if tenant.name == kwargs.get('login_tenant_name'):
tenant_id = tenant.id
except Exception as e:
module.fail_json(msg="error authenticating to keystone: %s" % str(e))
return tenant_id
def _get_cinderclient(module, **kwargs):
cinderclient = None
tenant_id = _get_tenant_id(module, **kwargs)
try:
cinderclient = client.Client(username=kwargs.get('login_username'),
insecure=True,
api_key=kwargs.get('login_password'),
tenant_id=tenant_id,
auth_url=kwargs.get('auth_url'))
except Exception as e:
module.fail_json(msg="error authenticating to cinder: %s" % str(e))
return cinderclient
def _create_volume_type(module, cinderclient, type_name):
volume_type_id = _get_volume_type_id(cinderclient, type_name)
if volume_type_id:
module.exit_json(changed=False, result="unchanged")
cinderclient.volume_types.create(type_name)
def _get_volume_type_id(cinderclient, type_name):
volume_type_id = None
volume_types = cinderclient.volume_types.list()
for volume_type in volume_types:
if volume_type.name == type_name:
volume_type_id = volume_type.id
return volume_type_id
def _get_encrypted_volume_type_id_name(cinderclient, volume_type_id):
enc_volume_types = cinderclient.volume_encryption_types.list()
for enc_volume_type in enc_volume_types:
if enc_volume_type.volume_type_id == volume_type_id:
return enc_volume_type
return None
def _create_encrypted_volume_type(module, cinderclient, volume_type, provider,
control_location=None, cipher=None,
key_size=None):
volume_type_id = _get_volume_type_id(cinderclient, volume_type)
if not volume_type_id:
_create_volume_type(module, cinderclient, volume_type)
volume_type_id = _get_volume_type_id(cinderclient, volume_type)
if not volume_type_id:
raise ValueError("volume type '%s' not found and could not be created" % volume_type)
enc_volume_type = _get_encrypted_volume_type_id_name(cinderclient,
volume_type_id)
if enc_volume_type:
if (provider == enc_volume_type.provider and
control_location == enc_volume_type.control_location and
cipher == enc_volume_type.cipher and
int(key_size) == enc_volume_type.key_size):
module.exit_json(changed=False, result="unchanged")
# FIXME(cmt) this should not be necessary but seems to be broken
# in cinder itself, so force it here.
possible_control_locs = ('front-end', 'back-end')
if control_location not in possible_control_locs:
raise ValueError("control_location must be one of %s" %
" or ".join(possible_control_locs))
spec = {
'provider': provider,
'control_location': control_location,
'cipher': cipher,
'key_size': int(key_size)
}
cinderclient.volume_encryption_types.create(volume_type_id, spec)
def main():
module = AnsibleModule(
argument_spec=dict(
login_username=dict(default=None),
login_password=dict(default=None),
login_tenant_name=dict(default=None),
auth_url=dict(default='http://127.0.0.1:5000/v2.0/'),
volume_type=dict(required=True),
encryption_type=dict(default=False),
provider=dict(default=None),
cipher=dict(default=None),
key_size=dict(default=None),
control_location=dict(default=None),
)
)
cinderclient = _get_cinderclient(module, **module.params)
try:
if module.params['encryption_type']:
_create_encrypted_volume_type(module, cinderclient,
module.params['volume_type'],
module.params['provider'],
module.params['control_location'],
module.params['cipher'],
module.params['key_size'])
else:
_create_volume_type(module, cinderclient,
module.params['volume_type'])
module.exit_json(changed=True, result="created")
except Exception as e:
module.fail_json(msg="creating the volume type failed: %s" % str(e))
# this is magic, see lib/ansible/module_common.py
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
main()
| mit |
wavii/dulwich | dulwich/tests/utils.py | 3 | 10899 | # utils.py -- Test utilities for Dulwich.
# Copyright (C) 2010 Google, Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2
# of the License or (at your option) any later version of
# the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""Utility functions common to Dulwich tests."""
import datetime
import os
import shutil
import tempfile
import time
import types
from dulwich.index import (
commit_tree,
)
from dulwich.objects import (
FixedSha,
Commit,
)
from dulwich.pack import (
OFS_DELTA,
REF_DELTA,
DELTA_TYPES,
obj_sha,
SHA1Writer,
write_pack_header,
write_pack_object,
create_delta,
)
from dulwich.repo import Repo
from dulwich.tests import (
SkipTest,
)
# Plain files are very frequently used in tests, so let the mode be very short.
F = 0100644 # Shorthand mode for Files.
def open_repo(name):
"""Open a copy of a repo in a temporary directory.
Use this function for accessing repos in dulwich/tests/data/repos to avoid
accidentally or intentionally modifying those repos in place. Use
tear_down_repo to delete any temp files created.
:param name: The name of the repository, relative to
dulwich/tests/data/repos
:returns: An initialized Repo object that lives in a temporary directory.
"""
temp_dir = tempfile.mkdtemp()
repo_dir = os.path.join(os.path.dirname(__file__), 'data', 'repos', name)
temp_repo_dir = os.path.join(temp_dir, name)
shutil.copytree(repo_dir, temp_repo_dir, symlinks=True)
return Repo(temp_repo_dir)
def tear_down_repo(repo):
"""Tear down a test repository."""
temp_dir = os.path.dirname(repo.path.rstrip(os.sep))
shutil.rmtree(temp_dir)
def make_object(cls, **attrs):
"""Make an object for testing and assign some members.
This method creates a new subclass to allow arbitrary attribute
reassignment, which is not otherwise possible with objects having __slots__.
:param attrs: dict of attributes to set on the new object.
:return: A newly initialized object of type cls.
"""
class TestObject(cls):
"""Class that inherits from the given class, but without __slots__.
Note that classes with __slots__ can't have arbitrary attributes monkey-
patched in, so this is a class that is exactly the same only with a
__dict__ instead of __slots__.
"""
pass
obj = TestObject()
for name, value in attrs.iteritems():
if name == 'id':
# id property is read-only, so we overwrite sha instead.
sha = FixedSha(value)
obj.sha = lambda: sha
else:
setattr(obj, name, value)
return obj
def make_commit(**attrs):
"""Make a Commit object with a default set of members.
:param attrs: dict of attributes to overwrite from the default values.
:return: A newly initialized Commit object.
"""
default_time = int(time.mktime(datetime.datetime(2010, 1, 1).timetuple()))
all_attrs = {'author': 'Test Author <test@nodomain.com>',
'author_time': default_time,
'author_timezone': 0,
'committer': 'Test Committer <test@nodomain.com>',
'commit_time': default_time,
'commit_timezone': 0,
'message': 'Test message.',
'parents': [],
'tree': '0' * 40}
all_attrs.update(attrs)
return make_object(Commit, **all_attrs)
def functest_builder(method, func):
"""Generate a test method that tests the given function."""
def do_test(self):
method(self, func)
return do_test
def ext_functest_builder(method, func):
"""Generate a test method that tests the given extension function.
This is intended to generate test methods that test both a pure-Python
version and an extension version using common test code. The extension test
will raise SkipTest if the extension is not found.
Sample usage:
class MyTest(TestCase);
def _do_some_test(self, func_impl):
self.assertEqual('foo', func_impl())
test_foo = functest_builder(_do_some_test, foo_py)
test_foo_extension = ext_functest_builder(_do_some_test, _foo_c)
:param method: The method to run. It must must two parameters, self and the
function implementation to test.
:param func: The function implementation to pass to method.
"""
def do_test(self):
if not isinstance(func, types.BuiltinFunctionType):
raise SkipTest("%s extension not found" % func.func_name)
method(self, func)
return do_test
def build_pack(f, objects_spec, store=None):
"""Write test pack data from a concise spec.
:param f: A file-like object to write the pack to.
:param objects_spec: A list of (type_num, obj). For non-delta types, obj
is the string of that object's data.
For delta types, obj is a tuple of (base, data), where:
* base can be either an index in objects_spec of the base for that
* delta; or for a ref delta, a SHA, in which case the resulting pack
* will be thin and the base will be an external ref.
* data is a string of the full, non-deltified data for that object.
Note that offsets/refs and deltas are computed within this function.
:param store: An optional ObjectStore for looking up external refs.
:return: A list of tuples in the order specified by objects_spec:
(offset, type num, data, sha, CRC32)
"""
sf = SHA1Writer(f)
num_objects = len(objects_spec)
write_pack_header(sf, num_objects)
full_objects = {}
offsets = {}
crc32s = {}
while len(full_objects) < num_objects:
for i, (type_num, data) in enumerate(objects_spec):
if type_num not in DELTA_TYPES:
full_objects[i] = (type_num, data,
obj_sha(type_num, [data]))
continue
base, data = data
if isinstance(base, int):
if base not in full_objects:
continue
base_type_num, _, _ = full_objects[base]
else:
base_type_num, _ = store.get_raw(base)
full_objects[i] = (base_type_num, data,
obj_sha(base_type_num, [data]))
for i, (type_num, obj) in enumerate(objects_spec):
offset = f.tell()
if type_num == OFS_DELTA:
base_index, data = obj
base = offset - offsets[base_index]
_, base_data, _ = full_objects[base_index]
obj = (base, create_delta(base_data, data))
elif type_num == REF_DELTA:
base_ref, data = obj
if isinstance(base_ref, int):
_, base_data, base = full_objects[base_ref]
else:
base_type_num, base_data = store.get_raw(base_ref)
base = obj_sha(base_type_num, base_data)
obj = (base, create_delta(base_data, data))
crc32 = write_pack_object(sf, type_num, obj)
offsets[i] = offset
crc32s[i] = crc32
expected = []
for i in xrange(num_objects):
type_num, data, sha = full_objects[i]
assert len(sha) == 20
expected.append((offsets[i], type_num, data, sha, crc32s[i]))
sf.write_sha()
f.seek(0)
return expected
def build_commit_graph(object_store, commit_spec, trees=None, attrs=None):
"""Build a commit graph from a concise specification.
Sample usage:
>>> c1, c2, c3 = build_commit_graph(store, [[1], [2, 1], [3, 1, 2]])
>>> store[store[c3].parents[0]] == c1
True
>>> store[store[c3].parents[1]] == c2
True
If not otherwise specified, commits will refer to the empty tree and have
commit times increasing in the same order as the commit spec.
:param object_store: An ObjectStore to commit objects to.
:param commit_spec: An iterable of iterables of ints defining the commit
graph. Each entry defines one commit, and entries must be in topological
order. The first element of each entry is a commit number, and the
remaining elements are its parents. The commit numbers are only
meaningful for the call to make_commits; since real commit objects are
created, they will get created with real, opaque SHAs.
:param trees: An optional dict of commit number -> tree spec for building
trees for commits. The tree spec is an iterable of (path, blob, mode) or
(path, blob) entries; if mode is omitted, it defaults to the normal file
mode (0100644).
:param attrs: A dict of commit number -> (dict of attribute -> value) for
assigning additional values to the commits.
:return: The list of commit objects created.
:raise ValueError: If an undefined commit identifier is listed as a parent.
"""
if trees is None:
trees = {}
if attrs is None:
attrs = {}
commit_time = 0
nums = {}
commits = []
for commit in commit_spec:
commit_num = commit[0]
try:
parent_ids = [nums[pn] for pn in commit[1:]]
except KeyError, e:
missing_parent, = e.args
raise ValueError('Unknown parent %i' % missing_parent)
blobs = []
for entry in trees.get(commit_num, []):
if len(entry) == 2:
path, blob = entry
entry = (path, blob, F)
path, blob, mode = entry
blobs.append((path, blob.id, mode))
object_store.add_object(blob)
tree_id = commit_tree(object_store, blobs)
commit_attrs = {
'message': 'Commit %i' % commit_num,
'parents': parent_ids,
'tree': tree_id,
'commit_time': commit_time,
}
commit_attrs.update(attrs.get(commit_num, {}))
commit_obj = make_commit(**commit_attrs)
# By default, increment the time by a lot. Out-of-order commits should
# be closer together than this because their main cause is clock skew.
commit_time = commit_attrs['commit_time'] + 100
nums[commit_num] = commit_obj.id
object_store.add_object(commit_obj)
commits.append(commit_obj)
return commits
| gpl-2.0 |
shaunwbell/FOCI_Analysis | ReanalysisRetreival_orig/GOA_Winds/NCEP_NARR_comp.py | 1 | 8096 | #!/usr/bin/env
"""
NCEP_NARR_comp.py
NCEP vs NARR side by side comparisons of select fields
Compare NARR Winds with NCEP V2 (with Mooring Winds)
Using Anaconda packaged Python
"""
#System Stack
import datetime
#Science Stack
import numpy as np
# User Stack
import general_utilities.date2doy as date2doy
from utilities import ncutilities as ncutil
# Visual Stack
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap, shiftgrid
from matplotlib.dates import MonthLocator, DateFormatter
__author__ = 'Shaun Bell'
__email__ = 'shaun.bell@noaa.gov'
__created__ = datetime.datetime(2014, 01, 13)
__modified__ = datetime.datetime(2014, 01, 13)
__version__ = "0.1.0"
__status__ = "Development"
"""------------------------General Modules-------------------------------------------"""
def date2pydate(file_time, file_time2=None, file_flag='EPIC'):
if file_flag == 'EPIC':
ref_time_py = datetime.datetime.toordinal(datetime.datetime(1968, 5, 23))
ref_time_epic = 2440000
offset = ref_time_epic - ref_time_py
try: #if input is an array
python_time = [None] * len(file_time)
for i, val in enumerate(file_time):
pyday = file_time[i] - offset
pyfrac = file_time2[i] / (1000. * 60. * 60.* 24.) #milliseconds in a day
python_time[i] = (pyday + pyfrac)
except:
pytime = []
pyday = file_time - offset
pyfrac = file_time2 / (1000. * 60. * 60.* 24.) #milliseconds in a day
python_time = (pyday + pyfrac)
elif file_flag == 'NARR':
""" Hours since 1800-1-1"""
base_date=datetime.datetime.strptime(file_time,'%Y-%m-%d').toordinal()
python_time = file_time * 24. + base_date
elif file_flag == 'NCEP':
""" Hours since 1800-1-1"""
base_date=datetime.datetime.strptime('1800-01-01','%Y-%m-%d').toordinal()
python_time = file_time / 24. + base_date
else:
print "time flag not recognized"
sys.exit()
return np.array(python_time)
def hourly2daily(ltbound,utbound, time, data):
tarray = np.arange(ltbound, utbound+1,1.)
dmean = np.zeros_like(tarray)
dstd = np.zeros_like(tarray)
for i, val in enumerate(tarray):
ind = np.where(np.floor(time) == val )
dmean[i] = data[ind].mean()
dstd[i] = data[ind].std()
return ( {'daily_mean':dmean ,'daily_std':dstd, 'daily_time':tarray} )
def cart2wind(cart_angle):
""" 0deg is North, rotate clockwise"""
cart_angle_out = np.zeros_like(cart_angle)
cart_angle = 90. - cart_angle #rotate so N is 0deg
cart_angle =cart_angle % 360.
return cart_angle
def from_netcdf(infile):
""" Uses ncreadfile_dic which returns a dictionary of all data from netcdf"""
###nc readin/out
nchandle = ncutil.ncopen(infile)
params = ncutil.get_vars(nchandle) #gets all of them
ncdata = ncutil.ncreadfile_dic(nchandle, params)
ncutil.ncclose(nchandle)
return (ncdata, params)
"""---------------------------- Main Routine-------------------------------------------"""
"""------Ingest Data--------"""
#doy = date2doy.date2doy('2003-06-03')
NCEPV2 = '/Users/bell/Data_Local/Reanalysis_Files/NCEPV2/daily_mean/'
NCEPV2_uwind, NCEPV2_uparams = from_netcdf(NCEPV2 + 'uwnd.10m.gauss.2003.nc')
NCEPV2_vwind, NCEPV2_vparams = from_netcdf(NCEPV2 + 'vwnd.10m.gauss.2003.nc')
NCEPTime = date2pydate(NCEPV2_uwind['time'], file_flag='NCEP')
NARR = '/Users/bell/Data_Local/Reanalysis_Files/NARR/daily/'
NARR_uwind, NARR_uparams = from_netcdf(NARR + 'uwnd.10m.2003.nc')
NARR_vwind, NARR_vparams = from_netcdf(NARR + 'vwnd.10m.2003.nc')
NARRTime = date2pydate(NARR_uwind['time'], file_flag='NCEP')
### NARR Data has the following boundary corners:
# 12.2N;133.5W, 54.5N; 152.9W, 57.3N; 49.4W ,14.3N;65.1W
# Lambert Conformal
#lat/lon is ~ 59N, 149W
MooringFile = '/Users/bell/Data_Local/FOCI/Mooring/2003/globec3/03gbm3a_wpak.nc'
MooringMetData, Mooring_params = from_netcdf(MooringFile)
MooringTime = date2pydate(MooringMetData['time'], MooringMetData['time2'], file_flag='EPIC')
MooringDaily_uwnd = hourly2daily(NARRTime.min(),NARRTime.max(), MooringTime, MooringMetData['WU_422'])
MooringDaily_vwnd = hourly2daily(NARRTime.min(),NARRTime.max(), MooringTime, MooringMetData['WV_423'])
sta_lat = MooringMetData['latitude'][0]
sta_long = MooringMetData['longitude'][0]
"""---------------------------- Data Manipulation Routines-----------------------------"""
#NCEP V2 - vectors given to define grid
#shift grid from 0->360 to -360->0
NCEP_uwnds,lons_ncep = shiftgrid(0.,NCEPV2_uwind['uwnd'],NCEPV2_uwind['lon'],start=False)
NCEP_vwnds,lons_ncep = shiftgrid(0.,NCEPV2_vwind['vwnd'],NCEPV2_vwind['lon'],start=False)
if isinstance(NARR_uwind['uwnd'], np.ma.core.MaskedArray): #masked array handling
NARR_uwind['uwnd'] = NARR_uwind['uwnd'].data
NARR_vwind['vwnd'] = NARR_vwind['vwnd'].data
### Plot
for ind in (range(0,366,1)):
# use for color coding of winds
CmagNARR = np.sqrt(NARR_uwind['uwnd'][ind,:,:]**2. + NARR_vwind['vwnd'][ind,:,:]**2.)
CmagNCEP = np.sqrt(NCEP_uwnds[ind,:,:][0]**2. + NCEP_vwnds[ind,:,:][0]**2.)
fig = plt.figure()
ax = plt.subplot(121)
m = Basemap(resolution='i',projection='merc', llcrnrlat=55, \
urcrnrlat=65,llcrnrlon=-155,urcrnrlon=-140, lat_ts=45)
#NARR - array given to define grid
x_narr, y_narr = m(NARR_uwind['lon'], NARR_uwind['lat'])
# Mooring Data
x_moor, y_moor = m(-1. * sta_long,sta_lat)
Q1 = m.quiver(x_moor,y_moor,MooringDaily_uwnd['daily_mean'][ind],MooringDaily_vwnd['daily_mean'][ind],scale=100, color='b')
Q = m.quiver(x_narr,y_narr,NARR_uwind['uwnd'][ind,:,:],NARR_vwind['vwnd'][ind,:,:], CmagNARR, cmap='Reds', scale=100)
qk = plt.quiverkey(Q, 0.05, 0.05, 5, '5 m/s', labelpos='S')
m.scatter(x_moor,y_moor,10,marker='o',color='g')
m.drawcountries(linewidth=0.5)
m.drawcoastlines(linewidth=0.5)
m.drawparallels(np.arange(55,65,2.),labels=[1,0,0,0],color='black',dashes=[1,1],labelstyle='+/-',linewidth=0.2) # draw parallels
m.drawmeridians(np.arange(-155,-140,2.),labels=[0,0,0,1],color='black',dashes=[1,1],labelstyle='+/-',linewidth=0.2) # draw meridians
#m.fillcontinents(color='black')
ax = plt.subplot(122)
m = Basemap(resolution='i',projection='merc', llcrnrlat=55, \
urcrnrlat=65,llcrnrlon=-155,urcrnrlon=-140, lat_ts=45)
lon_ncep, lat_ncep = np.meshgrid(lons_ncep, NCEPV2_uwind['lat'])
x_ncep, y_ncep = m(lon_ncep, lat_ncep)
# Mooring Data
x_moor, y_moor = m(-1. * sta_long,sta_lat)
Q1 = m.quiver(x_moor,y_moor,MooringDaily_uwnd['daily_mean'][ind],MooringDaily_vwnd['daily_mean'][ind],scale=100, color='b')
Q = m.quiver(x_ncep,y_ncep,NCEP_uwnds[ind,:,:][0],NCEP_vwnds[ind,:,:][0], CmagNCEP, cmap='Reds', scale=100)
#
qk = plt.quiverkey(Q, 0.05, 0.05, 5, '5 m/s', labelpos='S')
m.scatter(x_moor,y_moor,10,marker='o',color='g')
m.drawcountries(linewidth=0.5)
m.drawcoastlines(linewidth=0.5)
m.drawparallels(np.arange(55,65,2.),labels=[1,0,0,0],color='black',dashes=[1,1],labelstyle='+/-',linewidth=0.2) # draw parallels
m.drawmeridians(np.arange(-155,-140,2.),labels=[0,0,0,1],color='black',dashes=[1,1],labelstyle='+/-',linewidth=0.2) # draw meridians
#m.fillcontinents(color='black')
ind_off = ind + 1 #offset array with DOY
if (ind_off < 10):
str_ind = '00' + str(ind_off)
elif (ind_off >= 10 and ind_off < 100):
str_ind = '0' + str(ind_off)
else:
str_ind = str(ind_off)
fig.suptitle('NARR (left) vs NCEP V2 (right) DOY:'+str_ind, fontsize=12)
DefaultSize = fig.get_size_inches()
fig.set_size_inches( (DefaultSize[0]*1.25, DefaultSize[1]) )
plt.savefig('images_2003/Globec_region' + str_ind + '.png', bbox_inches='tight', dpi = (100))
plt.close(fig)
print "Finishing Figure: " + str_ind
| mit |
TBits/rolekit | src/rolekit/config/dbus.py | 4 | 1221 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2011-2014 Red Hat, Inc.
#
# Authors:
# Thomas Woerner <twoerner@redhat.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
DBUS_INTERFACE_VERSION = 1
DBUS_INTERFACE_REVISION = 1
DBUS_INTERFACE = "org.fedoraproject.rolekit%d" % DBUS_INTERFACE_VERSION
DBUS_INTERFACE_ROLE = DBUS_INTERFACE+".role"
DBUS_INTERFACE_ROLE_INSTANCE = DBUS_INTERFACE_ROLE+".instance"
DBUS_PATH = "/org/fedoraproject/rolekit%d" % DBUS_INTERFACE_VERSION
DBUS_PATH_ROLES = DBUS_PATH + "/roles"
# Polkit actions
_PK_ACTION = "org.fedoraproject.rolekit%d" % DBUS_INTERFACE_VERSION
PK_ACTION_ALL = _PK_ACTION+".all"
| gpl-2.0 |
RitwikGupta/pattern | pattern/text/fr/__init__.py | 21 | 9083 | #### PATTERN | FR ##################################################################################
# -*- coding: utf-8 -*-
# Copyright (c) 2013 University of Antwerp, Belgium
# Copyright (c) 2013 St. Lucas University College of Art & Design, Antwerp.
# Author: Tom De Smedt <tom@organisms.be>
# License: BSD (see LICENSE.txt for details).
# http://www.clips.ua.ac.be/pages/pattern
####################################################################################################
# French linguistical tools using fast regular expressions.
import os
import sys
try:
MODULE = os.path.dirname(os.path.realpath(__file__))
except:
MODULE = ""
sys.path.insert(0, os.path.join(MODULE, "..", "..", "..", ".."))
# Import parser base classes.
from pattern.text import (
Lexicon, Model, Morphology, Context, Parser as _Parser, ngrams, pprint, commandline,
PUNCTUATION
)
# Import parser universal tagset.
from pattern.text import (
penntreebank2universal as _penntreebank2universal,
PTB, PENN, UNIVERSAL,
NOUN, VERB, ADJ, ADV, PRON, DET, PREP, ADP, NUM, CONJ, INTJ, PRT, PUNC, X
)
# Import parse tree base classes.
from pattern.text.tree import (
Tree, Text, Sentence, Slice, Chunk, PNPChunk, Chink, Word, table,
SLASH, WORD, POS, CHUNK, PNP, REL, ANCHOR, LEMMA, AND, OR
)
# Import sentiment analysis base classes.
from pattern.text import (
Sentiment as _Sentiment,
NOUN, VERB, ADJECTIVE, ADVERB,
MOOD, IRONY
)
# Import spelling base class.
from pattern.text import (
Spelling
)
# Import verb tenses.
from pattern.text import (
INFINITIVE, PRESENT, PAST, FUTURE,
FIRST, SECOND, THIRD,
SINGULAR, PLURAL, SG, PL,
INDICATIVE, IMPERATIVE, SUBJUNCTIVE, CONDITIONAL,
IMPERFECTIVE, PERFECTIVE, PROGRESSIVE,
IMPERFECT, PRETERITE,
PARTICIPLE, GERUND
)
# Import inflection functions.
from pattern.text.fr.inflect import (
pluralize, singularize, NOUN, VERB, ADJECTIVE,
verbs, conjugate, lemma, lexeme, tenses,
predicative, attributive
)
# Import all submodules.
from pattern.text.fr import inflect
sys.path.pop(0)
#--- FRENCH PARSER ---------------------------------------------------------------------------------
# The French parser is based on Lefff (Lexique des Formes Fléchies du Français).
# Benoît Sagot, Lionel Clément, Érice Villemonte de la Clergerie, Pierre Boullier.
# The Lefff 2 syntactic lexicon for French: architecture, acquisition.
# http://alpage.inria.fr/~sagot/lefff-en.html
# For words in Lefff that can have different part-of-speech tags,
# we used Lexique to find the most frequent POS-tag:
# http://www.lexique.org/
_subordinating_conjunctions = set((
"afin", "comme", "lorsque", "parce", "puisque", "quand", "que", "quoique", "si"
))
def penntreebank2universal(token, tag):
""" Converts a Penn Treebank II tag to a universal tag.
For example: comme/IN => comme/CONJ
"""
if tag == "IN" and token.lower() in _subordinating_conjunctions:
return CONJ
return _penntreebank2universal(token, tag)
ABBREVIATIONS = set((
u"av.", u"boul.", u"C.-B.", u"c.-à-d.", u"ex.", u"éd.", u"fig.", u"I.-P.-E.", u"J.-C.",
u"Ltee.", u"Ltée.", u"M.", u"Me.","Mlle.", u"Mlles.", u"MM.", u"N.-B.", u"N.-É.", u"p.",
u"S.B.E.", u"Ste.", u"T.-N.", u"t.a.b."
))
# While contractions in English are optional,
# they are required in French:
replacements = {
"l'": "l' ", # le/la
"c'": "c' ", # ce
"d'": "d' ", # de
"j'": "j' ", # je
"m'": "m' ", # me
"n'": "n' ", # ne
"qu'": "qu' ", # que
"s'": "s' ", # se
"t'": "t' ", # te
"jusqu'": "jusqu' ",
"lorsqu'": "lorsqu' ",
"puisqu'": "puisqu' ",
# Same rule for Unicode apostrophe, see also Parser.find_tokens():
ur"(l|c|d|j|m|n|qu|s|t|jusqu|lorsqu|puisqu)’": u"\\1’ "
}
replacements.update(((k.upper(), v.upper()) for k, v in replacements.items()))
def find_lemmata(tokens):
""" Annotates the tokens with lemmata for plural nouns and conjugated verbs,
where each token is a [word, part-of-speech] list.
"""
for token in tokens:
word, pos, lemma = token[0], token[1], token[0]
if pos.startswith(("DT", "PR", "WP")):
lemma = singularize(word, pos=pos)
if pos.startswith(("RB", "IN")) and (word.endswith(("'", u"’")) or word == "du"):
lemma = singularize(word, pos=pos)
if pos.startswith(("JJ",)):
lemma = predicative(word)
if pos == "NNS":
lemma = singularize(word)
if pos.startswith(("VB", "MD")):
lemma = conjugate(word, INFINITIVE) or word
token.append(lemma.lower())
return tokens
class Parser(_Parser):
def find_tokens(self, tokens, **kwargs):
kwargs.setdefault("abbreviations", ABBREVIATIONS)
kwargs.setdefault("replace", replacements)
s = _Parser.find_tokens(self, tokens, **kwargs)
s = [s.replace("&rsquo ;", u"’") if isinstance(s, unicode) else s for s in s]
return s
def find_lemmata(self, tokens, **kwargs):
return find_lemmata(tokens)
def find_tags(self, tokens, **kwargs):
if kwargs.get("tagset") in (PENN, None):
kwargs.setdefault("map", lambda token, tag: (token, tag))
if kwargs.get("tagset") == UNIVERSAL:
kwargs.setdefault("map", lambda token, tag: penntreebank2universal(token, tag))
return _Parser.find_tags(self, tokens, **kwargs)
class Sentiment(_Sentiment):
def load(self, path=None):
_Sentiment.load(self, path)
# Map "précaire" to "precaire" (without diacritics, +1% accuracy).
if not path:
for w, pos in dict.items(self):
w0 = w
if not w.endswith((u"à", u"è", u"é", u"ê", u"ï")):
w = w.replace(u"à", "a")
w = w.replace(u"é", "e")
w = w.replace(u"è", "e")
w = w.replace(u"ê", "e")
w = w.replace(u"ï", "i")
if w != w0:
for pos, (p, s, i) in pos.items():
self.annotate(w, pos, p, s, i)
parser = Parser(
lexicon = os.path.join(MODULE, "fr-lexicon.txt"),
frequency = os.path.join(MODULE, "fr-frequency.txt"),
morphology = os.path.join(MODULE, "fr-morphology.txt"),
context = os.path.join(MODULE, "fr-context.txt"),
default = ("NN", "NNP", "CD"),
language = "fr"
)
lexicon = parser.lexicon # Expose lexicon.
sentiment = Sentiment(
path = os.path.join(MODULE, "fr-sentiment.xml"),
synset = None,
negations = ("n'", "ne", "ni", "non", "pas", "rien", "sans", "aucun", "jamais"),
modifiers = ("RB",),
modifier = lambda w: w.endswith("ment"),
tokenizer = parser.find_tokens,
language = "fr"
)
spelling = Spelling(
path = os.path.join(MODULE, "fr-spelling.txt")
)
def tokenize(s, *args, **kwargs):
""" Returns a list of sentences, where punctuation marks have been split from words.
"""
return parser.find_tokens(s, *args, **kwargs)
def parse(s, *args, **kwargs):
""" Returns a tagged Unicode string.
"""
return parser.parse(s, *args, **kwargs)
def parsetree(s, *args, **kwargs):
""" Returns a parsed Text from the given string.
"""
return Text(parse(s, *args, **kwargs))
def tree(s, token=[WORD, POS, CHUNK, PNP, REL, LEMMA]):
""" Returns a parsed Text from the given parsed string.
"""
return Text(s, token)
def tag(s, tokenize=True, encoding="utf-8", **kwargs):
""" Returns a list of (token, tag)-tuples from the given string.
"""
tags = []
for sentence in parse(s, tokenize, True, False, False, False, encoding, **kwargs).split():
for token in sentence:
tags.append((token[0], token[1]))
return tags
def keywords(s, top=10, **kwargs):
""" Returns a sorted list of keywords in the given string.
"""
return parser.find_keywords(s, **dict({
"frequency": parser.frequency,
"top": top,
"pos": ("NN",),
"ignore": ("rt",)}, **kwargs))
def suggest(w):
""" Returns a list of (word, confidence)-tuples of spelling corrections.
"""
return spelling.suggest(w)
def polarity(s, **kwargs):
""" Returns the sentence polarity (positive/negative) between -1.0 and 1.0.
"""
return sentiment(s, **kwargs)[0]
def subjectivity(s, **kwargs):
""" Returns the sentence subjectivity (objective/subjective) between 0.0 and 1.0.
"""
return sentiment(s, **kwargs)[1]
def positive(s, threshold=0.1, **kwargs):
""" Returns True if the given sentence has a positive sentiment (polarity >= threshold).
"""
return polarity(s, **kwargs) >= threshold
split = tree # Backwards compatibility.
#---------------------------------------------------------------------------------------------------
# python -m pattern.fr xml -s "C'est l'exception qui confirme la règle." -OTCL
if __name__ == "__main__":
commandline(parse) | bsd-3-clause |
surligas/gnuradio | docs/doxygen/doxyxml/generated/index.py | 344 | 1871 | #!/usr/bin/env python
"""
Generated Mon Feb 9 19:08:05 2009 by generateDS.py.
"""
from xml.dom import minidom
import os
import sys
import compound
import indexsuper as supermod
class DoxygenTypeSub(supermod.DoxygenType):
def __init__(self, version=None, compound=None):
supermod.DoxygenType.__init__(self, version, compound)
def find_compounds_and_members(self, details):
"""
Returns a list of all compounds and their members which match details
"""
results = []
for compound in self.compound:
members = compound.find_members(details)
if members:
results.append([compound, members])
else:
if details.match(compound):
results.append([compound, []])
return results
supermod.DoxygenType.subclass = DoxygenTypeSub
# end class DoxygenTypeSub
class CompoundTypeSub(supermod.CompoundType):
def __init__(self, kind=None, refid=None, name='', member=None):
supermod.CompoundType.__init__(self, kind, refid, name, member)
def find_members(self, details):
"""
Returns a list of all members which match details
"""
results = []
for member in self.member:
if details.match(member):
results.append(member)
return results
supermod.CompoundType.subclass = CompoundTypeSub
# end class CompoundTypeSub
class MemberTypeSub(supermod.MemberType):
def __init__(self, kind=None, refid=None, name=''):
supermod.MemberType.__init__(self, kind, refid, name)
supermod.MemberType.subclass = MemberTypeSub
# end class MemberTypeSub
def parse(inFilename):
doc = minidom.parse(inFilename)
rootNode = doc.documentElement
rootObj = supermod.DoxygenType.factory()
rootObj.build(rootNode)
return rootObj
| gpl-3.0 |
ishikawa/modipyd | tests/test_bytecode.py | 1 | 6690 | #!/usr/bin/env python
import unittest
from modipyd import bytecode as bc
from modipyd import HAS_RELATIVE_IMPORTS
from tests import TestCase
class DisassemblerTestCase(TestCase):
def compile(self, src, filename='<string>'):
return compile(src, filename, 'exec')
def compile_scan_imports(self, src, filename='<string>'):
co = self.compile(src, filename)
context = {}
processor = bc.ImportProcessor()
self.assertNotNone(processor)
bc.scan_code(co, processor, context)
return context['imports']
class TestDisassembler25(DisassemblerTestCase):
def test_relative_import_with_modulename(self):
imports = self.compile_scan_imports("from . A import B")
self.assertEqual(1, len(imports))
self.assertEqual('B', imports[0][0])
self.assertEqual('A.B', imports[0][1])
self.assertEqual(1, imports[0][2])
imports = self.compile_scan_imports("from .. A import B")
self.assertEqual(1, len(imports))
self.assertEqual('B', imports[0][0])
self.assertEqual('A.B', imports[0][1])
self.assertEqual(2, imports[0][2])
def test_relative_import_without_modulename(self):
imports = self.compile_scan_imports("from . import A")
self.assertEqual(1, len(imports))
self.assertEqual('A', imports[0][0])
self.assertEqual('A', imports[0][1])
self.assertEqual(1, imports[0][2])
imports = self.compile_scan_imports("from .. import A")
self.assertEqual(1, len(imports))
self.assertEqual('A', imports[0][0])
self.assertEqual('A', imports[0][1])
self.assertEqual(2, imports[0][2])
def test_relative_import_without_modulename_as(self):
imports = self.compile_scan_imports("from .. import A as b")
self.assertEqual(1, len(imports))
self.assertEqual('b', imports[0][0])
self.assertEqual('A', imports[0][1])
self.assertEqual(2, imports[0][2])
def test_future(self):
imports = self.compile_scan_imports(
"from __future__ import absolute_import")
self.assertEqual(1, len(imports))
self.assertEqual('absolute_import', imports[0][0])
self.assertEqual('__future__.absolute_import', imports[0][1])
self.assertEqual(0, imports[0][2])
class TestDisassembler(DisassemblerTestCase):
def test_simple(self):
imports = self.compile_scan_imports("import os")
self.assertEqual(1, len(imports))
self.assertEqual(3, len(imports[0]))
self.assertEqual('os', imports[0][0])
self.assertEqual('os', imports[0][1])
self.assertEqual(-1, imports[0][2])
def test_submodule(self):
imports = self.compile_scan_imports("import os.path")
self.assertEqual(1, len(imports))
self.assertEqual('os.path', imports[0][0])
self.assertEqual('os.path', imports[0][1])
self.assertEqual(-1, imports[0][2])
imports = self.compile_scan_imports("import os.path as os_path")
self.assertEqual(1, len(imports))
self.assertEqual('os_path', imports[0][0])
self.assertEqual('os.path', imports[0][1])
self.assertEqual(-1, imports[0][2])
def test_local_scope(self):
imports = self.compile_scan_imports("""
def import_module():
import os.path""", "<test_local_scope>")
self.assertEqual(1, len(imports))
self.assertEqual('os.path', imports[0][0])
self.assertEqual('os.path', imports[0][1])
self.assertEqual(-1, imports[0][2])
def test_bind_scope(self):
imports = self.compile_scan_imports("""
def fn():
import fnmatch
def ignore(filename):
if fnmatch.fnmatch(filename, '*'):
pass
""", "<test_local_scope>")
self.assertEqual(1, len(imports))
self.assertEqual('fnmatch', imports[0][0])
self.assertEqual('fnmatch', imports[0][1])
self.assertEqual(-1, imports[0][2])
def test_multiple(self):
imports = self.compile_scan_imports("import os, sys as sys_mod")
self.assertEqual(2, len(imports))
self.assertEqual('os', imports[0][0])
self.assertEqual('os', imports[0][1])
self.assertEqual(-1, imports[0][2])
self.assertEqual('sys_mod', imports[1][0])
self.assertEqual('sys', imports[1][1])
self.assertEqual(-1, imports[1][2])
def test_fromlist(self):
imports = self.compile_scan_imports("from os import path")
self.assertEqual(1, len(imports))
self.assertEqual('path', imports[0][0])
self.assertEqual('os.path', imports[0][1])
self.assertEqual(-1, imports[0][2])
imports = self.compile_scan_imports("from os.path import join")
self.assertEqual(1, len(imports))
self.assertEqual('join', imports[0][0])
self.assertEqual('os.path.join', imports[0][1])
self.assertEqual(-1, imports[0][2])
imports = self.compile_scan_imports("from os.path import dirname, join")
self.assertEqual(2, len(imports))
self.assertEqual('dirname', imports[0][0])
self.assertEqual('os.path.dirname', imports[0][1])
self.assertEqual('join', imports[1][0])
self.assertEqual('os.path.join', imports[1][1])
self.assertEqual(-1, imports[0][2])
imports = self.compile_scan_imports(
"from os.path import dirname as d, join")
self.assertEqual(2, len(imports))
self.assertEqual('d', imports[0][0])
self.assertEqual('os.path.dirname', imports[0][1])
self.assertEqual(-1, imports[0][2])
self.assertEqual('join', imports[1][0])
self.assertEqual('os.path.join', imports[1][1])
self.assertEqual(-1, imports[1][2])
def test_star(self):
imports = self.compile_scan_imports("from os.path import *")
# from ... import * is currently not fully supported
self.assertEqual(1, len(imports))
self.assertEqual('*', imports[0][0])
self.assertEqual('os.path.*', imports[0][1])
self.assertEqual(-1, imports[0][2])
def test_django_contrib_gis_tests_test_gdal_geom(self):
imports = self.compile_scan_imports("""
from django.contrib.gis.tests.geometries import *
class OGRGeomTest(unittest.TestCase):
pass
""")
# from ... import * is currently not fully supported
self.assertEqual(1, len(imports))
self.assertEqual('*', imports[0][0])
self.assertEqual('django.contrib.gis.tests.geometries.*', imports[0][1])
self.assertEqual(-1, imports[0][2])
if not HAS_RELATIVE_IMPORTS:
del TestDisassembler25
if __name__ == '__main__':
unittest.main()
| mit |
TuSimple/mxnet | tools/bandwidth/test_measure.py | 46 | 1863 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
test measure.py
"""
from measure import run
import subprocess
import logging
def get_gpus():
try:
re = subprocess.check_output(["nvidia-smi", "-L"], universal_newlines=True)
except OSError:
return ''
gpus = [i for i in re.split('\n') if 'GPU' in i]
return ','.join([str(i) for i in range(len(gpus))])
def test_measure(**kwargs):
logging.info(kwargs)
res = run(image_shape='3,224,224', num_classes=1000,
num_layers=50, disp_batches=2, num_batches=2, test_results=1, **kwargs)
assert len(res) == 1
assert res[0].error < 1e-4
if __name__ == '__main__':
gpus = get_gpus()
assert gpus is not ''
test_measure(gpus=gpus, network='alexnet', optimizer=None, kv_store='device')
test_measure(gpus=gpus, network='resnet', optimizer='sgd', kv_store='device')
test_measure(gpus=gpus, network='inception-bn', optimizer=None, kv_store='local')
test_measure(gpus=gpus, network='resnet', optimizer=None, kv_store='local')
test_measure(gpus=gpus, network='resnet', optimizer='sgd', kv_store='local')
| apache-2.0 |
katstalk/android_external_chromium_org | tools/metrics/histograms/diffutil.py | 57 | 1626 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions for prompting user if changes automatically applied to some
user-managed files are correct.
"""
import logging
import os
import webbrowser
from difflib import HtmlDiff
from tempfile import NamedTemporaryFile
def PromptUserToAcceptDiff(old_text, new_text, prompt):
"""Displays a difference in two strings (old and new file contents) to the
user and asks whether the new version is acceptable.
Args:
old_text: A string containing old file contents.
new_text: A string containing new file contents.
prompt: Text that should be displayed to the user, asking whether the new
file contents should be accepted.
Returns:
True is user accepted the changes or there were no changes, False otherwise.
"""
logging.info('Computing diff...')
if old_text == new_text:
logging.info('No changes detected')
return True
html_diff = HtmlDiff(wrapcolumn=80).make_file(
old_text.splitlines(), new_text.splitlines(), fromdesc='Original',
todesc='Updated', context=True, numlines=5)
temp = NamedTemporaryFile(suffix='.html', delete=False)
try:
temp.write(html_diff)
temp.close() # Close the file so the browser process can access it.
webbrowser.open('file://' + temp.name)
print prompt
response = raw_input('(Y/n): ').strip().lower()
finally:
temp.close() # May be called on already closed file.
os.remove(temp.name)
return response == 'y' or response == ''
| bsd-3-clause |
skyline75489/shadowsocks | shadowsocks/server.py | 652 | 4836 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import sys
import os
import logging
import signal
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../'))
from shadowsocks import shell, daemon, eventloop, tcprelay, udprelay, \
asyncdns, manager
def main():
shell.check_python()
config = shell.get_config(False)
daemon.daemon_exec(config)
if config['port_password']:
if config['password']:
logging.warn('warning: port_password should not be used with '
'server_port and password. server_port and password '
'will be ignored')
else:
config['port_password'] = {}
server_port = config.get('server_port', None)
if server_port:
if type(server_port) == list:
for a_server_port in server_port:
config['port_password'][a_server_port] = config['password']
else:
config['port_password'][str(server_port)] = config['password']
if config.get('manager_address', 0):
logging.info('entering manager mode')
manager.run(config)
return
tcp_servers = []
udp_servers = []
if 'dns_server' in config: # allow override settings in resolv.conf
dns_resolver = asyncdns.DNSResolver(config['dns_server'])
else:
dns_resolver = asyncdns.DNSResolver()
port_password = config['port_password']
del config['port_password']
for port, password in port_password.items():
a_config = config.copy()
a_config['server_port'] = int(port)
a_config['password'] = password
logging.info("starting server at %s:%d" %
(a_config['server'], int(port)))
tcp_servers.append(tcprelay.TCPRelay(a_config, dns_resolver, False))
udp_servers.append(udprelay.UDPRelay(a_config, dns_resolver, False))
def run_server():
def child_handler(signum, _):
logging.warn('received SIGQUIT, doing graceful shutting down..')
list(map(lambda s: s.close(next_tick=True),
tcp_servers + udp_servers))
signal.signal(getattr(signal, 'SIGQUIT', signal.SIGTERM),
child_handler)
def int_handler(signum, _):
sys.exit(1)
signal.signal(signal.SIGINT, int_handler)
try:
loop = eventloop.EventLoop()
dns_resolver.add_to_loop(loop)
list(map(lambda s: s.add_to_loop(loop), tcp_servers + udp_servers))
daemon.set_user(config.get('user', None))
loop.run()
except Exception as e:
shell.print_exception(e)
sys.exit(1)
if int(config['workers']) > 1:
if os.name == 'posix':
children = []
is_child = False
for i in range(0, int(config['workers'])):
r = os.fork()
if r == 0:
logging.info('worker started')
is_child = True
run_server()
break
else:
children.append(r)
if not is_child:
def handler(signum, _):
for pid in children:
try:
os.kill(pid, signum)
os.waitpid(pid, 0)
except OSError: # child may already exited
pass
sys.exit()
signal.signal(signal.SIGTERM, handler)
signal.signal(signal.SIGQUIT, handler)
signal.signal(signal.SIGINT, handler)
# master
for a_tcp_server in tcp_servers:
a_tcp_server.close()
for a_udp_server in udp_servers:
a_udp_server.close()
dns_resolver.close()
for child in children:
os.waitpid(child, 0)
else:
logging.warn('worker is only available on Unix/Linux')
run_server()
else:
run_server()
if __name__ == '__main__':
main()
| apache-2.0 |
Hellowlol/HTPC-Manager | libs/formencode/variabledecode.py | 11 | 5386 | """
Takes GET/POST variable dictionary, as might be returned by ``cgi``,
and turns them into lists and dictionaries.
Keys (variable names) can have subkeys, with a ``.`` and
can be numbered with ``-``, like ``a.b-3=something`` means that
the value ``a`` is a dictionary with a key ``b``, and ``b``
is a list, the third(-ish) element with the value ``something``.
Numbers are used to sort, missing numbers are ignored.
This doesn't deal with multiple keys, like in a query string of
``id=10&id=20``, which returns something like ``{'id': ['10',
'20']}``. That's left to someplace else to interpret. If you want to
represent lists in this model, you use indexes, and the lists are
explicitly ordered.
If you want to change the character that determines when to split for
a dict or list, both variable_decode and variable_encode take dict_char
and list_char keyword args. For example, to have the GET/POST variables,
``a_1=something`` as a list, you would use a list_char='_'.
"""
import api
__all__ = ['variable_decode', 'variable_encode', 'NestedVariables']
def variable_decode(d, dict_char='.', list_char='-'):
"""
Decode the flat dictionary d into a nested structure.
"""
result = {}
dicts_to_sort = {}
known_lengths = {}
for key, value in d.items():
keys = key.split(dict_char)
new_keys = []
was_repetition_count = False
for key in keys:
if key.endswith('--repetitions'):
key = key[:-len('--repetitions')]
new_keys.append(key)
known_lengths[tuple(new_keys)] = int(value)
was_repetition_count = True
break
elif list_char in key:
key, index = key.split(list_char)
new_keys.append(key)
dicts_to_sort[tuple(new_keys)] = 1
new_keys.append(int(index))
else:
new_keys.append(key)
if was_repetition_count:
continue
place = result
for i in range(len(new_keys)-1):
try:
if not isinstance(place[new_keys[i]], dict):
place[new_keys[i]] = {None: place[new_keys[i]]}
place = place[new_keys[i]]
except KeyError:
place[new_keys[i]] = {}
place = place[new_keys[i]]
if new_keys[-1] in place:
if isinstance(place[new_keys[-1]], dict):
place[new_keys[-1]][None] = value
elif isinstance(place[new_keys[-1]], list):
if isinstance(value, list):
place[new_keys[-1]].extend(value)
else:
place[new_keys[-1]].append(value)
else:
if isinstance(value, list):
place[new_keys[-1]] = [place[new_keys[-1]]]
place[new_keys[-1]].extend(value)
else:
place[new_keys[-1]] = [place[new_keys[-1]], value]
else:
place[new_keys[-1]] = value
try:
to_sort_keys = sorted(dicts_to_sort, key=len, reverse=True)
except NameError: # Python < 2.4
to_sort_keys = dicts_to_sort.keys()
to_sort_keys.sort(lambda a, b: -cmp(len(a), len(b)))
for key in to_sort_keys:
to_sort = result
source = None
last_key = None
for sub_key in key:
source = to_sort
last_key = sub_key
to_sort = to_sort[sub_key]
if None in to_sort:
noneVals = [(0, x) for x in to_sort.pop(None)]
noneVals.extend(to_sort.items())
to_sort = noneVals
else:
to_sort = to_sort.items()
to_sort.sort()
to_sort = [v for k, v in to_sort]
if key in known_lengths:
if len(to_sort) < known_lengths[key]:
to_sort.extend(['']*(known_lengths[key] - len(to_sort)))
source[last_key] = to_sort
return result
def variable_encode(d, prepend='', result=None, add_repetitions=True,
dict_char='.', list_char='-'):
"""
Encode a nested structure into a flat dictionary.
"""
if result is None:
result = {}
if isinstance(d, dict):
for key, value in d.items():
if key is None:
name = prepend
elif not prepend:
name = key
else:
name = "%s%s%s" % (prepend, dict_char, key)
variable_encode(value, name, result, add_repetitions,
dict_char=dict_char, list_char=list_char)
elif isinstance(d, list):
for i in range(len(d)):
variable_encode(d[i], "%s%s%i" % (prepend, list_char, i), result,
add_repetitions, dict_char=dict_char, list_char=list_char)
if add_repetitions:
if prepend:
repName = '%s--repetitions' % prepend
else:
repName = '__repetitions__'
result[repName] = str(len(d))
else:
result[prepend] = d
return result
class NestedVariables(api.FancyValidator):
def _to_python(self, value, state):
return variable_decode(value)
def _from_python(self, value, state):
return variable_encode(value)
def empty_value(self, value):
return {}
| mit |
dirkcuys/save4life | ussd/transactions.py | 1 | 2954 | from django.utils import timezone
from .models import Transaction
from .tasks import issue_airtime
from datetime import datetime
class TransactionError(Exception):
pass
def award_joining_bonus(user):
transaction = Transaction.objects.create(
user=user,
action=Transaction.REGISTRATION_BONUS,
amount=5, # TODO store joining bonus somewhere
reference_code='joining-bonus'
)
return transaction
def award_streak(user, weeks, amount):
return Transaction.objects.create(
user=user,
action=Transaction.REWARD,
amount=amount,
reference_code='streak-{0}'.format(weeks)
)
def award_quiz_prize(user, quiz):
return Transaction.objects.create(
user=user,
action=Transaction.QUIZ_PRIZE,
amount=user.balance(), # TODO should this be limited to an upper amount?
reference_code='quiz-{0}'.format(quiz.pk)
)
def redeem_voucher(voucher, user, savings_amount):
# make sure voucher wasn't already redeemed or revoked!!
if voucher.redeemed_at or voucher.revoked_at:
raise TransactionError("Voucher not valid")
if savings_amount > voucher.amount or savings_amount < 0:
raise TransactionError('Invalid savings amount')
voucher.redeemed_at = timezone.now()
voucher.redeemed_by = user
voucher.save()
# Credit user balance with savings amount
Transaction.objects.create(
user=user,
action=Transaction.SAVING,
amount=savings_amount,
reference_code='savings',
voucher=voucher
)
# TODO - change this to create transaction that will be processes later
# Credit airtime with remainder
airtime_amount = voucher.amount - savings_amount
airtime_transaction = Transaction.objects.create(
user=user,
action=Transaction.AIRTIME,
amount=airtime_amount,
reference_code ='', # TODO
voucher=voucher
)
issue_airtime.delay(airtime_transaction)
def withdraw_savings(user, amount):
if not amount or amount < 5 or amount > user.balance():
raise TransactionError('incorrect amount')
# Stop user from withdrawing an amount that would result in
# positive balance less than 5
resulting_balance = user.balance() - amount
if 0 < resulting_balance < 5:
raise TransactionError('resulting balance less than minimum payable amount')
transaction = Transaction.objects.create(
user=user,
action=Transaction.WITHDRAWAL,
amount=-amount,
reference_code='' # TODO
)
# TODO should we fire off async airtime operation or should we run
# a task that matches WITHDRAWAL transactions agains AIRTIME transactions?
airtime_transaction = Transaction.objects.create(
user=user,
action=Transaction.AIRTIME,
amount=amount,
reference_code='' #TODO
)
issue_airtime.delay(airtime_transaction)
| bsd-3-clause |
guschmue/tensorflow | tensorflow/contrib/distributions/python/ops/mixture.py | 13 | 18780 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Mixture distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution_util as distribution_utils
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import categorical
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
class Mixture(distribution.Distribution):
"""Mixture distribution.
The `Mixture` object implements batched mixture distributions.
The mixture model is defined by a `Categorical` distribution (the mixture)
and a python list of `Distribution` objects.
Methods supported include `log_prob`, `prob`, `mean`, `sample`, and
`entropy_lower_bound`.
#### Examples
```python
# Create a mixture of two Gaussians:
ds = tf.contrib.distributions
mix = 0.3
bimix_gauss = ds.Mixture(
cat=ds.Categorical(probs=[mix, 1.-mix]),
components=[
ds.Normal(loc=-1., scale=0.1),
ds.Normal(loc=+1., scale=0.5),
])
# Plot the PDF.
import matplotlib.pyplot as plt
x = tf.linspace(-2., 3., int(1e4)).eval()
plt.plot(x, bimix_gauss.prob(x).eval());
```
"""
def __init__(self,
cat,
components,
validate_args=False,
allow_nan_stats=True,
name="Mixture"):
"""Initialize a Mixture distribution.
A `Mixture` is defined by a `Categorical` (`cat`, representing the
mixture probabilities) and a list of `Distribution` objects
all having matching dtype, batch shape, event shape, and continuity
properties (the components).
The `num_classes` of `cat` must be possible to infer at graph construction
time and match `len(components)`.
Args:
cat: A `Categorical` distribution instance, representing the probabilities
of `distributions`.
components: A list or tuple of `Distribution` instances.
Each instance must have the same type, be defined on the same domain,
and have matching `event_shape` and `batch_shape`.
validate_args: Python `bool`, default `False`. If `True`, raise a runtime
error if batch or event ranks are inconsistent between cat and any of
the distributions. This is only checked if the ranks cannot be
determined statically at graph construction time.
allow_nan_stats: Boolean, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member. If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: A name for this distribution (optional).
Raises:
TypeError: If cat is not a `Categorical`, or `components` is not
a list or tuple, or the elements of `components` are not
instances of `Distribution`, or do not have matching `dtype`.
ValueError: If `components` is an empty list or tuple, or its
elements do not have a statically known event rank.
If `cat.num_classes` cannot be inferred at graph creation time,
or the constant value of `cat.num_classes` is not equal to
`len(components)`, or all `components` and `cat` do not have
matching static batch shapes, or all components do not
have matching static event shapes.
"""
parameters = locals()
if not isinstance(cat, categorical.Categorical):
raise TypeError("cat must be a Categorical distribution, but saw: %s" %
cat)
if not components:
raise ValueError("components must be a non-empty list or tuple")
if not isinstance(components, (list, tuple)):
raise TypeError("components must be a list or tuple, but saw: %s" %
components)
if not all(isinstance(c, distribution.Distribution) for c in components):
raise TypeError(
"all entries in components must be Distribution instances"
" but saw: %s" % components)
dtype = components[0].dtype
if not all(d.dtype == dtype for d in components):
raise TypeError("All components must have the same dtype, but saw "
"dtypes: %s" % [(d.name, d.dtype) for d in components])
static_event_shape = components[0].event_shape
static_batch_shape = cat.batch_shape
for d in components:
static_event_shape = static_event_shape.merge_with(d.event_shape)
static_batch_shape = static_batch_shape.merge_with(d.batch_shape)
if static_event_shape.ndims is None:
raise ValueError(
"Expected to know rank(event_shape) from components, but "
"none of the components provide a static number of ndims")
# Ensure that all batch and event ndims are consistent.
with ops.name_scope(name, values=[cat.logits]):
num_components = cat.event_size
static_num_components = tensor_util.constant_value(num_components)
if static_num_components is None:
raise ValueError(
"Could not infer number of classes from cat and unable "
"to compare this value to the number of components passed in.")
# Possibly convert from numpy 0-D array.
static_num_components = int(static_num_components)
if static_num_components != len(components):
raise ValueError("cat.num_classes != len(components): %d vs. %d" %
(static_num_components, len(components)))
cat_batch_shape = cat.batch_shape_tensor()
cat_batch_rank = array_ops.size(cat_batch_shape)
if validate_args:
batch_shapes = [d.batch_shape_tensor() for d in components]
batch_ranks = [array_ops.size(bs) for bs in batch_shapes]
check_message = ("components[%d] batch shape must match cat "
"batch shape")
self._assertions = [
check_ops.assert_equal(
cat_batch_rank, batch_ranks[di], message=check_message % di)
for di in range(len(components))
]
self._assertions += [
check_ops.assert_equal(
cat_batch_shape, batch_shapes[di], message=check_message % di)
for di in range(len(components))
]
else:
self._assertions = []
self._cat = cat
self._components = list(components)
self._num_components = static_num_components
self._static_event_shape = static_event_shape
self._static_batch_shape = static_batch_shape
# We let the Mixture distribution access _graph_parents since its arguably
# more like a baseclass.
graph_parents = self._cat._graph_parents # pylint: disable=protected-access
for c in self._components:
graph_parents += c._graph_parents # pylint: disable=protected-access
super(Mixture, self).__init__(
dtype=dtype,
reparameterization_type=distribution.NOT_REPARAMETERIZED,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=graph_parents,
name=name)
@property
def cat(self):
return self._cat
@property
def components(self):
return self._components
@property
def num_components(self):
return self._num_components
def _batch_shape_tensor(self):
return self._cat.batch_shape_tensor()
def _batch_shape(self):
return self._static_batch_shape
def _event_shape_tensor(self):
return self._components[0].event_shape_tensor()
def _event_shape(self):
return self._static_event_shape
def _expand_to_event_rank(self, x):
"""Expand the rank of x up to static_event_rank times for broadcasting.
The static event rank was checked to not be None at construction time.
Args:
x: A tensor to expand.
Returns:
The expanded tensor.
"""
expanded_x = x
for _ in range(self.event_shape.ndims):
expanded_x = array_ops.expand_dims(expanded_x, -1)
return expanded_x
def _mean(self):
with ops.control_dependencies(self._assertions):
distribution_means = [d.mean() for d in self.components]
cat_probs = self._cat_probs(log_probs=False)
cat_probs = [self._expand_to_event_rank(c_p) for c_p in cat_probs]
partial_means = [
c_p * m for (c_p, m) in zip(cat_probs, distribution_means)
]
# These should all be the same shape by virtue of matching
# batch_shape and event_shape.
return math_ops.add_n(partial_means)
def _stddev(self):
with ops.control_dependencies(self._assertions):
distribution_means = [d.mean() for d in self.components]
distribution_devs = [d.stddev() for d in self.components]
cat_probs = self._cat_probs(log_probs=False)
stacked_means = array_ops.stack(distribution_means, axis=-1)
stacked_devs = array_ops.stack(distribution_devs, axis=-1)
cat_probs = [self._expand_to_event_rank(c_p) for c_p in cat_probs]
broadcasted_cat_probs = (array_ops.stack(cat_probs, axis=-1) *
array_ops.ones_like(stacked_means))
batched_dev = distribution_utils.mixture_stddev(
array_ops.reshape(broadcasted_cat_probs, [-1, len(self.components)]),
array_ops.reshape(stacked_means, [-1, len(self.components)]),
array_ops.reshape(stacked_devs, [-1, len(self.components)]))
# I.e. re-shape to list(batch_shape) + list(event_shape).
return array_ops.reshape(batched_dev,
array_ops.shape(broadcasted_cat_probs)[:-1])
def _log_prob(self, x):
with ops.control_dependencies(self._assertions):
x = ops.convert_to_tensor(x, name="x")
distribution_log_probs = [d.log_prob(x) for d in self.components]
cat_log_probs = self._cat_probs(log_probs=True)
final_log_probs = [
cat_lp + d_lp
for (cat_lp, d_lp) in zip(cat_log_probs, distribution_log_probs)
]
concat_log_probs = array_ops.stack(final_log_probs, 0)
log_sum_exp = math_ops.reduce_logsumexp(concat_log_probs, [0])
return log_sum_exp
def _log_cdf(self, x):
with ops.control_dependencies(self._assertions):
x = ops.convert_to_tensor(x, name="x")
distribution_log_cdfs = [d.log_cdf(x) for d in self.components]
cat_log_probs = self._cat_probs(log_probs=True)
final_log_cdfs = [
cat_lp + d_lcdf
for (cat_lp, d_lcdf) in zip(cat_log_probs, distribution_log_cdfs)
]
concatted_log_cdfs = array_ops.stack(final_log_cdfs, axis=0)
mixture_log_cdf = math_ops.reduce_logsumexp(concatted_log_cdfs, [0])
return mixture_log_cdf
def _sample_n(self, n, seed=None):
with ops.control_dependencies(self._assertions):
n = ops.convert_to_tensor(n, name="n")
static_n = tensor_util.constant_value(n)
n = int(static_n) if static_n is not None else n
cat_samples = self.cat.sample(n, seed=seed)
static_samples_shape = cat_samples.get_shape()
if static_samples_shape.is_fully_defined():
samples_shape = static_samples_shape.as_list()
samples_size = static_samples_shape.num_elements()
else:
samples_shape = array_ops.shape(cat_samples)
samples_size = array_ops.size(cat_samples)
static_batch_shape = self.batch_shape
if static_batch_shape.is_fully_defined():
batch_shape = static_batch_shape.as_list()
batch_size = static_batch_shape.num_elements()
else:
batch_shape = self.batch_shape_tensor()
batch_size = math_ops.reduce_prod(batch_shape)
static_event_shape = self.event_shape
if static_event_shape.is_fully_defined():
event_shape = np.array(static_event_shape.as_list(), dtype=np.int32)
else:
event_shape = self.event_shape_tensor()
# Get indices into the raw cat sampling tensor. We will
# need these to stitch sample values back out after sampling
# within the component partitions.
samples_raw_indices = array_ops.reshape(
math_ops.range(0, samples_size), samples_shape)
# Partition the raw indices so that we can use
# dynamic_stitch later to reconstruct the samples from the
# known partitions.
partitioned_samples_indices = data_flow_ops.dynamic_partition(
data=samples_raw_indices,
partitions=cat_samples,
num_partitions=self.num_components)
# Copy the batch indices n times, as we will need to know
# these to pull out the appropriate rows within the
# component partitions.
batch_raw_indices = array_ops.reshape(
array_ops.tile(math_ops.range(0, batch_size), [n]), samples_shape)
# Explanation of the dynamic partitioning below:
# batch indices are i.e., [0, 1, 0, 1, 0, 1]
# Suppose partitions are:
# [1 1 0 0 1 1]
# After partitioning, batch indices are cut as:
# [batch_indices[x] for x in 2, 3]
# [batch_indices[x] for x in 0, 1, 4, 5]
# i.e.
# [1 1] and [0 0 0 0]
# Now we sample n=2 from part 0 and n=4 from part 1.
# For part 0 we want samples from batch entries 1, 1 (samples 0, 1),
# and for part 1 we want samples from batch entries 0, 0, 0, 0
# (samples 0, 1, 2, 3).
partitioned_batch_indices = data_flow_ops.dynamic_partition(
data=batch_raw_indices,
partitions=cat_samples,
num_partitions=self.num_components)
samples_class = [None for _ in range(self.num_components)]
for c in range(self.num_components):
n_class = array_ops.size(partitioned_samples_indices[c])
seed = distribution_util.gen_new_seed(seed, "mixture")
samples_class_c = self.components[c].sample(n_class, seed=seed)
# Pull out the correct batch entries from each index.
# To do this, we may have to flatten the batch shape.
# For sample s, batch element b of component c, we get the
# partitioned batch indices from
# partitioned_batch_indices[c]; and shift each element by
# the sample index. The final lookup can be thought of as
# a matrix gather along locations (s, b) in
# samples_class_c where the n_class rows correspond to
# samples within this component and the batch_size columns
# correspond to batch elements within the component.
#
# Thus the lookup index is
# lookup[c, i] = batch_size * s[i] + b[c, i]
# for i = 0 ... n_class[c] - 1.
lookup_partitioned_batch_indices = (
batch_size * math_ops.range(n_class) +
partitioned_batch_indices[c])
samples_class_c = array_ops.reshape(
samples_class_c,
array_ops.concat([[n_class * batch_size], event_shape], 0))
samples_class_c = array_ops.gather(
samples_class_c, lookup_partitioned_batch_indices,
name="samples_class_c_gather")
samples_class[c] = samples_class_c
# Stitch back together the samples across the components.
lhs_flat_ret = data_flow_ops.dynamic_stitch(
indices=partitioned_samples_indices, data=samples_class)
# Reshape back to proper sample, batch, and event shape.
ret = array_ops.reshape(lhs_flat_ret,
array_ops.concat([samples_shape,
self.event_shape_tensor()], 0))
ret.set_shape(
tensor_shape.TensorShape(static_samples_shape).concatenate(
self.event_shape))
return ret
def entropy_lower_bound(self, name="entropy_lower_bound"):
r"""A lower bound on the entropy of this mixture model.
The bound below is not always very tight, and its usefulness depends
on the mixture probabilities and the components in use.
A lower bound is useful for ELBO when the `Mixture` is the variational
distribution:
\\(
\log p(x) >= ELBO = \int q(z) \log p(x, z) dz + H[q]
\\)
where \\( p \\) is the prior distribution, \\( q \\) is the variational,
and \\( H[q] \\) is the entropy of \\( q \\). If there is a lower bound
\\( G[q] \\) such that \\( H[q] \geq G[q] \\) then it can be used in
place of \\( H[q] \\).
For a mixture of distributions \\( q(Z) = \sum_i c_i q_i(Z) \\) with
\\( \sum_i c_i = 1 \\), by the concavity of \\( f(x) = -x \log x \\), a
simple lower bound is:
\\(
\begin{align}
H[q] & = - \int q(z) \log q(z) dz \\\
& = - \int (\sum_i c_i q_i(z)) \log(\sum_i c_i q_i(z)) dz \\\
& \geq - \sum_i c_i \int q_i(z) \log q_i(z) dz \\\
& = \sum_i c_i H[q_i]
\end{align}
\\)
This is the term we calculate below for \\( G[q] \\).
Args:
name: A name for this operation (optional).
Returns:
A lower bound on the Mixture's entropy.
"""
with self._name_scope(name, values=[self.cat.logits]):
with ops.control_dependencies(self._assertions):
distribution_entropies = [d.entropy() for d in self.components]
cat_probs = self._cat_probs(log_probs=False)
partial_entropies = [
c_p * m for (c_p, m) in zip(cat_probs, distribution_entropies)
]
# These are all the same shape by virtue of matching batch_shape
return math_ops.add_n(partial_entropies)
def _cat_probs(self, log_probs):
"""Get a list of num_components batchwise probabilities."""
which_softmax = nn_ops.log_softmax if log_probs else nn_ops.softmax
cat_probs = which_softmax(self.cat.logits)
cat_probs = array_ops.unstack(cat_probs, num=self.num_components, axis=-1)
return cat_probs
| apache-2.0 |
dstufft/html5lib-python | html5lib/html5parser.py | 60 | 117335 | from __future__ import absolute_import, division, unicode_literals
from six import with_metaclass
import types
from . import inputstream
from . import tokenizer
from . import treebuilders
from .treebuilders._base import Marker
from . import utils
from . import constants
from .constants import spaceCharacters, asciiUpper2Lower
from .constants import specialElements
from .constants import headingElements
from .constants import cdataElements, rcdataElements
from .constants import tokenTypes, ReparseException, namespaces
from .constants import htmlIntegrationPointElements, mathmlTextIntegrationPointElements
from .constants import adjustForeignAttributes as adjustForeignAttributesMap
from .constants import E
def parse(doc, treebuilder="etree", encoding=None,
namespaceHTMLElements=True):
"""Parse a string or file-like object into a tree"""
tb = treebuilders.getTreeBuilder(treebuilder)
p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)
return p.parse(doc, encoding=encoding)
def parseFragment(doc, container="div", treebuilder="etree", encoding=None,
namespaceHTMLElements=True):
tb = treebuilders.getTreeBuilder(treebuilder)
p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)
return p.parseFragment(doc, container=container, encoding=encoding)
def method_decorator_metaclass(function):
class Decorated(type):
def __new__(meta, classname, bases, classDict):
for attributeName, attribute in classDict.items():
if isinstance(attribute, types.FunctionType):
attribute = function(attribute)
classDict[attributeName] = attribute
return type.__new__(meta, classname, bases, classDict)
return Decorated
class HTMLParser(object):
"""HTML parser. Generates a tree structure from a stream of (possibly
malformed) HTML"""
def __init__(self, tree=None, tokenizer=tokenizer.HTMLTokenizer,
strict=False, namespaceHTMLElements=True, debug=False):
"""
strict - raise an exception when a parse error is encountered
tree - a treebuilder class controlling the type of tree that will be
returned. Built in treebuilders can be accessed through
html5lib.treebuilders.getTreeBuilder(treeType)
tokenizer - a class that provides a stream of tokens to the treebuilder.
This may be replaced for e.g. a sanitizer which converts some tags to
text
"""
# Raise an exception on the first error encountered
self.strict = strict
if tree is None:
tree = treebuilders.getTreeBuilder("etree")
self.tree = tree(namespaceHTMLElements)
self.tokenizer_class = tokenizer
self.errors = []
self.phases = dict([(name, cls(self, self.tree)) for name, cls in
getPhases(debug).items()])
def _parse(self, stream, innerHTML=False, container="div",
encoding=None, parseMeta=True, useChardet=True, **kwargs):
self.innerHTMLMode = innerHTML
self.container = container
self.tokenizer = self.tokenizer_class(stream, encoding=encoding,
parseMeta=parseMeta,
useChardet=useChardet,
parser=self, **kwargs)
self.reset()
while True:
try:
self.mainLoop()
break
except ReparseException:
self.reset()
def reset(self):
self.tree.reset()
self.firstStartTag = False
self.errors = []
self.log = [] # only used with debug mode
# "quirks" / "limited quirks" / "no quirks"
self.compatMode = "no quirks"
if self.innerHTMLMode:
self.innerHTML = self.container.lower()
if self.innerHTML in cdataElements:
self.tokenizer.state = self.tokenizer.rcdataState
elif self.innerHTML in rcdataElements:
self.tokenizer.state = self.tokenizer.rawtextState
elif self.innerHTML == 'plaintext':
self.tokenizer.state = self.tokenizer.plaintextState
else:
# state already is data state
# self.tokenizer.state = self.tokenizer.dataState
pass
self.phase = self.phases["beforeHtml"]
self.phase.insertHtmlElement()
self.resetInsertionMode()
else:
self.innerHTML = False
self.phase = self.phases["initial"]
self.lastPhase = None
self.beforeRCDataPhase = None
self.framesetOK = True
@property
def documentEncoding(self):
"""The name of the character encoding
that was used to decode the input stream,
or :obj:`None` if that is not determined yet.
"""
if not hasattr(self, 'tokenizer'):
return None
return self.tokenizer.stream.charEncoding[0]
def isHTMLIntegrationPoint(self, element):
if (element.name == "annotation-xml" and
element.namespace == namespaces["mathml"]):
return ("encoding" in element.attributes and
element.attributes["encoding"].translate(
asciiUpper2Lower) in
("text/html", "application/xhtml+xml"))
else:
return (element.namespace, element.name) in htmlIntegrationPointElements
def isMathMLTextIntegrationPoint(self, element):
return (element.namespace, element.name) in mathmlTextIntegrationPointElements
def mainLoop(self):
CharactersToken = tokenTypes["Characters"]
SpaceCharactersToken = tokenTypes["SpaceCharacters"]
StartTagToken = tokenTypes["StartTag"]
EndTagToken = tokenTypes["EndTag"]
CommentToken = tokenTypes["Comment"]
DoctypeToken = tokenTypes["Doctype"]
ParseErrorToken = tokenTypes["ParseError"]
for token in self.normalizedTokens():
new_token = token
while new_token is not None:
currentNode = self.tree.openElements[-1] if self.tree.openElements else None
currentNodeNamespace = currentNode.namespace if currentNode else None
currentNodeName = currentNode.name if currentNode else None
type = new_token["type"]
if type == ParseErrorToken:
self.parseError(new_token["data"], new_token.get("datavars", {}))
new_token = None
else:
if (len(self.tree.openElements) == 0 or
currentNodeNamespace == self.tree.defaultNamespace or
(self.isMathMLTextIntegrationPoint(currentNode) and
((type == StartTagToken and
token["name"] not in frozenset(["mglyph", "malignmark"])) or
type in (CharactersToken, SpaceCharactersToken))) or
(currentNodeNamespace == namespaces["mathml"] and
currentNodeName == "annotation-xml" and
token["name"] == "svg") or
(self.isHTMLIntegrationPoint(currentNode) and
type in (StartTagToken, CharactersToken, SpaceCharactersToken))):
phase = self.phase
else:
phase = self.phases["inForeignContent"]
if type == CharactersToken:
new_token = phase.processCharacters(new_token)
elif type == SpaceCharactersToken:
new_token = phase.processSpaceCharacters(new_token)
elif type == StartTagToken:
new_token = phase.processStartTag(new_token)
elif type == EndTagToken:
new_token = phase.processEndTag(new_token)
elif type == CommentToken:
new_token = phase.processComment(new_token)
elif type == DoctypeToken:
new_token = phase.processDoctype(new_token)
if (type == StartTagToken and token["selfClosing"]
and not token["selfClosingAcknowledged"]):
self.parseError("non-void-element-with-trailing-solidus",
{"name": token["name"]})
# When the loop finishes it's EOF
reprocess = True
phases = []
while reprocess:
phases.append(self.phase)
reprocess = self.phase.processEOF()
if reprocess:
assert self.phase not in phases
def normalizedTokens(self):
for token in self.tokenizer:
yield self.normalizeToken(token)
def parse(self, stream, encoding=None, parseMeta=True, useChardet=True):
"""Parse a HTML document into a well-formed tree
stream - a filelike object or string containing the HTML to be parsed
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
"""
self._parse(stream, innerHTML=False, encoding=encoding,
parseMeta=parseMeta, useChardet=useChardet)
return self.tree.getDocument()
def parseFragment(self, stream, container="div", encoding=None,
parseMeta=False, useChardet=True):
"""Parse a HTML fragment into a well-formed tree fragment
container - name of the element we're setting the innerHTML property
if set to None, default to 'div'
stream - a filelike object or string containing the HTML to be parsed
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
"""
self._parse(stream, True, container=container, encoding=encoding)
return self.tree.getFragment()
def parseError(self, errorcode="XXX-undefined-error", datavars={}):
# XXX The idea is to make errorcode mandatory.
self.errors.append((self.tokenizer.stream.position(), errorcode, datavars))
if self.strict:
raise ParseError(E[errorcode] % datavars)
def normalizeToken(self, token):
""" HTML5 specific normalizations to the token stream """
if token["type"] == tokenTypes["StartTag"]:
token["data"] = dict(token["data"][::-1])
return token
def adjustMathMLAttributes(self, token):
replacements = {"definitionurl": "definitionURL"}
for k, v in replacements.items():
if k in token["data"]:
token["data"][v] = token["data"][k]
del token["data"][k]
def adjustSVGAttributes(self, token):
replacements = {
"attributename": "attributeName",
"attributetype": "attributeType",
"basefrequency": "baseFrequency",
"baseprofile": "baseProfile",
"calcmode": "calcMode",
"clippathunits": "clipPathUnits",
"contentscripttype": "contentScriptType",
"contentstyletype": "contentStyleType",
"diffuseconstant": "diffuseConstant",
"edgemode": "edgeMode",
"externalresourcesrequired": "externalResourcesRequired",
"filterres": "filterRes",
"filterunits": "filterUnits",
"glyphref": "glyphRef",
"gradienttransform": "gradientTransform",
"gradientunits": "gradientUnits",
"kernelmatrix": "kernelMatrix",
"kernelunitlength": "kernelUnitLength",
"keypoints": "keyPoints",
"keysplines": "keySplines",
"keytimes": "keyTimes",
"lengthadjust": "lengthAdjust",
"limitingconeangle": "limitingConeAngle",
"markerheight": "markerHeight",
"markerunits": "markerUnits",
"markerwidth": "markerWidth",
"maskcontentunits": "maskContentUnits",
"maskunits": "maskUnits",
"numoctaves": "numOctaves",
"pathlength": "pathLength",
"patterncontentunits": "patternContentUnits",
"patterntransform": "patternTransform",
"patternunits": "patternUnits",
"pointsatx": "pointsAtX",
"pointsaty": "pointsAtY",
"pointsatz": "pointsAtZ",
"preservealpha": "preserveAlpha",
"preserveaspectratio": "preserveAspectRatio",
"primitiveunits": "primitiveUnits",
"refx": "refX",
"refy": "refY",
"repeatcount": "repeatCount",
"repeatdur": "repeatDur",
"requiredextensions": "requiredExtensions",
"requiredfeatures": "requiredFeatures",
"specularconstant": "specularConstant",
"specularexponent": "specularExponent",
"spreadmethod": "spreadMethod",
"startoffset": "startOffset",
"stddeviation": "stdDeviation",
"stitchtiles": "stitchTiles",
"surfacescale": "surfaceScale",
"systemlanguage": "systemLanguage",
"tablevalues": "tableValues",
"targetx": "targetX",
"targety": "targetY",
"textlength": "textLength",
"viewbox": "viewBox",
"viewtarget": "viewTarget",
"xchannelselector": "xChannelSelector",
"ychannelselector": "yChannelSelector",
"zoomandpan": "zoomAndPan"
}
for originalName in list(token["data"].keys()):
if originalName in replacements:
svgName = replacements[originalName]
token["data"][svgName] = token["data"][originalName]
del token["data"][originalName]
def adjustForeignAttributes(self, token):
replacements = adjustForeignAttributesMap
for originalName in token["data"].keys():
if originalName in replacements:
foreignName = replacements[originalName]
token["data"][foreignName] = token["data"][originalName]
del token["data"][originalName]
def reparseTokenNormal(self, token):
self.parser.phase()
def resetInsertionMode(self):
# The name of this method is mostly historical. (It's also used in the
# specification.)
last = False
newModes = {
"select": "inSelect",
"td": "inCell",
"th": "inCell",
"tr": "inRow",
"tbody": "inTableBody",
"thead": "inTableBody",
"tfoot": "inTableBody",
"caption": "inCaption",
"colgroup": "inColumnGroup",
"table": "inTable",
"head": "inBody",
"body": "inBody",
"frameset": "inFrameset",
"html": "beforeHead"
}
for node in self.tree.openElements[::-1]:
nodeName = node.name
new_phase = None
if node == self.tree.openElements[0]:
assert self.innerHTML
last = True
nodeName = self.innerHTML
# Check for conditions that should only happen in the innerHTML
# case
if nodeName in ("select", "colgroup", "head", "html"):
assert self.innerHTML
if not last and node.namespace != self.tree.defaultNamespace:
continue
if nodeName in newModes:
new_phase = self.phases[newModes[nodeName]]
break
elif last:
new_phase = self.phases["inBody"]
break
self.phase = new_phase
def parseRCDataRawtext(self, token, contentType):
"""Generic RCDATA/RAWTEXT Parsing algorithm
contentType - RCDATA or RAWTEXT
"""
assert contentType in ("RAWTEXT", "RCDATA")
self.tree.insertElement(token)
if contentType == "RAWTEXT":
self.tokenizer.state = self.tokenizer.rawtextState
else:
self.tokenizer.state = self.tokenizer.rcdataState
self.originalPhase = self.phase
self.phase = self.phases["text"]
def getPhases(debug):
def log(function):
"""Logger that records which phase processes each token"""
type_names = dict((value, key) for key, value in
constants.tokenTypes.items())
def wrapped(self, *args, **kwargs):
if function.__name__.startswith("process") and len(args) > 0:
token = args[0]
try:
info = {"type": type_names[token['type']]}
except:
raise
if token['type'] in constants.tagTokenTypes:
info["name"] = token['name']
self.parser.log.append((self.parser.tokenizer.state.__name__,
self.parser.phase.__class__.__name__,
self.__class__.__name__,
function.__name__,
info))
return function(self, *args, **kwargs)
else:
return function(self, *args, **kwargs)
return wrapped
def getMetaclass(use_metaclass, metaclass_func):
if use_metaclass:
return method_decorator_metaclass(metaclass_func)
else:
return type
class Phase(with_metaclass(getMetaclass(debug, log))):
"""Base class for helper object that implements each phase of processing
"""
def __init__(self, parser, tree):
self.parser = parser
self.tree = tree
def processEOF(self):
raise NotImplementedError
def processComment(self, token):
# For most phases the following is correct. Where it's not it will be
# overridden.
self.tree.insertComment(token, self.tree.openElements[-1])
def processDoctype(self, token):
self.parser.parseError("unexpected-doctype")
def processCharacters(self, token):
self.tree.insertText(token["data"])
def processSpaceCharacters(self, token):
self.tree.insertText(token["data"])
def processStartTag(self, token):
return self.startTagHandler[token["name"]](token)
def startTagHtml(self, token):
if not self.parser.firstStartTag and token["name"] == "html":
self.parser.parseError("non-html-root")
# XXX Need a check here to see if the first start tag token emitted is
# this token... If it's not, invoke self.parser.parseError().
for attr, value in token["data"].items():
if attr not in self.tree.openElements[0].attributes:
self.tree.openElements[0].attributes[attr] = value
self.parser.firstStartTag = False
def processEndTag(self, token):
return self.endTagHandler[token["name"]](token)
class InitialPhase(Phase):
def processSpaceCharacters(self, token):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
correct = token["correct"]
if (name != "html" or publicId is not None or
systemId is not None and systemId != "about:legacy-compat"):
self.parser.parseError("unknown-doctype")
if publicId is None:
publicId = ""
self.tree.insertDoctype(token)
if publicId != "":
publicId = publicId.translate(asciiUpper2Lower)
if (not correct or token["name"] != "html"
or publicId.startswith(
("+//silmaril//dtd html pro v0r11 19970101//",
"-//advasoft ltd//dtd html 3.0 aswedit + extensions//",
"-//as//dtd html 3.0 aswedit + extensions//",
"-//ietf//dtd html 2.0 level 1//",
"-//ietf//dtd html 2.0 level 2//",
"-//ietf//dtd html 2.0 strict level 1//",
"-//ietf//dtd html 2.0 strict level 2//",
"-//ietf//dtd html 2.0 strict//",
"-//ietf//dtd html 2.0//",
"-//ietf//dtd html 2.1e//",
"-//ietf//dtd html 3.0//",
"-//ietf//dtd html 3.2 final//",
"-//ietf//dtd html 3.2//",
"-//ietf//dtd html 3//",
"-//ietf//dtd html level 0//",
"-//ietf//dtd html level 1//",
"-//ietf//dtd html level 2//",
"-//ietf//dtd html level 3//",
"-//ietf//dtd html strict level 0//",
"-//ietf//dtd html strict level 1//",
"-//ietf//dtd html strict level 2//",
"-//ietf//dtd html strict level 3//",
"-//ietf//dtd html strict//",
"-//ietf//dtd html//",
"-//metrius//dtd metrius presentational//",
"-//microsoft//dtd internet explorer 2.0 html strict//",
"-//microsoft//dtd internet explorer 2.0 html//",
"-//microsoft//dtd internet explorer 2.0 tables//",
"-//microsoft//dtd internet explorer 3.0 html strict//",
"-//microsoft//dtd internet explorer 3.0 html//",
"-//microsoft//dtd internet explorer 3.0 tables//",
"-//netscape comm. corp.//dtd html//",
"-//netscape comm. corp.//dtd strict html//",
"-//o'reilly and associates//dtd html 2.0//",
"-//o'reilly and associates//dtd html extended 1.0//",
"-//o'reilly and associates//dtd html extended relaxed 1.0//",
"-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//",
"-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//",
"-//spyglass//dtd html 2.0 extended//",
"-//sq//dtd html 2.0 hotmetal + extensions//",
"-//sun microsystems corp.//dtd hotjava html//",
"-//sun microsystems corp.//dtd hotjava strict html//",
"-//w3c//dtd html 3 1995-03-24//",
"-//w3c//dtd html 3.2 draft//",
"-//w3c//dtd html 3.2 final//",
"-//w3c//dtd html 3.2//",
"-//w3c//dtd html 3.2s draft//",
"-//w3c//dtd html 4.0 frameset//",
"-//w3c//dtd html 4.0 transitional//",
"-//w3c//dtd html experimental 19960712//",
"-//w3c//dtd html experimental 970421//",
"-//w3c//dtd w3 html//",
"-//w3o//dtd w3 html 3.0//",
"-//webtechs//dtd mozilla html 2.0//",
"-//webtechs//dtd mozilla html//"))
or publicId in
("-//w3o//dtd w3 html strict 3.0//en//",
"-/w3c/dtd html 4.0 transitional/en",
"html")
or publicId.startswith(
("-//w3c//dtd html 4.01 frameset//",
"-//w3c//dtd html 4.01 transitional//")) and
systemId is None
or systemId and systemId.lower() == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd"):
self.parser.compatMode = "quirks"
elif (publicId.startswith(
("-//w3c//dtd xhtml 1.0 frameset//",
"-//w3c//dtd xhtml 1.0 transitional//"))
or publicId.startswith(
("-//w3c//dtd html 4.01 frameset//",
"-//w3c//dtd html 4.01 transitional//")) and
systemId is not None):
self.parser.compatMode = "limited quirks"
self.parser.phase = self.parser.phases["beforeHtml"]
def anythingElse(self):
self.parser.compatMode = "quirks"
self.parser.phase = self.parser.phases["beforeHtml"]
def processCharacters(self, token):
self.parser.parseError("expected-doctype-but-got-chars")
self.anythingElse()
return token
def processStartTag(self, token):
self.parser.parseError("expected-doctype-but-got-start-tag",
{"name": token["name"]})
self.anythingElse()
return token
def processEndTag(self, token):
self.parser.parseError("expected-doctype-but-got-end-tag",
{"name": token["name"]})
self.anythingElse()
return token
def processEOF(self):
self.parser.parseError("expected-doctype-but-got-eof")
self.anythingElse()
return True
class BeforeHtmlPhase(Phase):
# helper methods
def insertHtmlElement(self):
self.tree.insertRoot(impliedTagToken("html", "StartTag"))
self.parser.phase = self.parser.phases["beforeHead"]
# other
def processEOF(self):
self.insertHtmlElement()
return True
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
pass
def processCharacters(self, token):
self.insertHtmlElement()
return token
def processStartTag(self, token):
if token["name"] == "html":
self.parser.firstStartTag = True
self.insertHtmlElement()
return token
def processEndTag(self, token):
if token["name"] not in ("head", "body", "html", "br"):
self.parser.parseError("unexpected-end-tag-before-html",
{"name": token["name"]})
else:
self.insertHtmlElement()
return token
class BeforeHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("head", "body", "html", "br"), self.endTagImplyHead)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.startTagHead(impliedTagToken("head", "StartTag"))
return True
def processSpaceCharacters(self, token):
pass
def processCharacters(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagHead(self, token):
self.tree.insertElement(token)
self.tree.headPointer = self.tree.openElements[-1]
self.parser.phase = self.parser.phases["inHead"]
def startTagOther(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def endTagImplyHead(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def endTagOther(self, token):
self.parser.parseError("end-tag-after-implied-root",
{"name": token["name"]})
class InHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("title", self.startTagTitle),
(("noscript", "noframes", "style"), self.startTagNoScriptNoFramesStyle),
("script", self.startTagScript),
(("base", "basefont", "bgsound", "command", "link"),
self.startTagBaseLinkCommand),
("meta", self.startTagMeta),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self. endTagHandler = utils.MethodDispatcher([
("head", self.endTagHead),
(("br", "html", "body"), self.endTagHtmlBodyBr)
])
self.endTagHandler.default = self.endTagOther
# the real thing
def processEOF(self):
self.anythingElse()
return True
def processCharacters(self, token):
self.anythingElse()
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagHead(self, token):
self.parser.parseError("two-heads-are-not-better-than-one")
def startTagBaseLinkCommand(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagMeta(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
attributes = token["data"]
if self.parser.tokenizer.stream.charEncoding[1] == "tentative":
if "charset" in attributes:
self.parser.tokenizer.stream.changeEncoding(attributes["charset"])
elif ("content" in attributes and
"http-equiv" in attributes and
attributes["http-equiv"].lower() == "content-type"):
# Encoding it as UTF-8 here is a hack, as really we should pass
# the abstract Unicode string, and just use the
# ContentAttrParser on that, but using UTF-8 allows all chars
# to be encoded and as a ASCII-superset works.
data = inputstream.EncodingBytes(attributes["content"].encode("utf-8"))
parser = inputstream.ContentAttrParser(data)
codec = parser.parse()
self.parser.tokenizer.stream.changeEncoding(codec)
def startTagTitle(self, token):
self.parser.parseRCDataRawtext(token, "RCDATA")
def startTagNoScriptNoFramesStyle(self, token):
# Need to decide whether to implement the scripting-disabled case
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagScript(self, token):
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.scriptDataState
self.parser.originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["text"]
def startTagOther(self, token):
self.anythingElse()
return token
def endTagHead(self, token):
node = self.parser.tree.openElements.pop()
assert node.name == "head", "Expected head got %s" % node.name
self.parser.phase = self.parser.phases["afterHead"]
def endTagHtmlBodyBr(self, token):
self.anythingElse()
return token
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def anythingElse(self):
self.endTagHead(impliedTagToken("head"))
# XXX If we implement a parser for which scripting is disabled we need to
# implement this phase.
#
# class InHeadNoScriptPhase(Phase):
class AfterHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("body", self.startTagBody),
("frameset", self.startTagFrameset),
(("base", "basefont", "bgsound", "link", "meta", "noframes", "script",
"style", "title"),
self.startTagFromHead),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([(("body", "html", "br"),
self.endTagHtmlBodyBr)])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.anythingElse()
return True
def processCharacters(self, token):
self.anythingElse()
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagBody(self, token):
self.parser.framesetOK = False
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inBody"]
def startTagFrameset(self, token):
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inFrameset"]
def startTagFromHead(self, token):
self.parser.parseError("unexpected-start-tag-out-of-my-head",
{"name": token["name"]})
self.tree.openElements.append(self.tree.headPointer)
self.parser.phases["inHead"].processStartTag(token)
for node in self.tree.openElements[::-1]:
if node.name == "head":
self.tree.openElements.remove(node)
break
def startTagHead(self, token):
self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
def startTagOther(self, token):
self.anythingElse()
return token
def endTagHtmlBodyBr(self, token):
self.anythingElse()
return token
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def anythingElse(self):
self.tree.insertElement(impliedTagToken("body", "StartTag"))
self.parser.phase = self.parser.phases["inBody"]
self.parser.framesetOK = True
class InBodyPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#parsing-main-inbody
# the really-really-really-very crazy mode
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
# Keep a ref to this for special handling of whitespace in <pre>
self.processSpaceCharactersNonPre = self.processSpaceCharacters
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("base", "basefont", "bgsound", "command", "link", "meta",
"script", "style", "title"),
self.startTagProcessInHead),
("body", self.startTagBody),
("frameset", self.startTagFrameset),
(("address", "article", "aside", "blockquote", "center", "details",
"details", "dir", "div", "dl", "fieldset", "figcaption", "figure",
"footer", "header", "hgroup", "main", "menu", "nav", "ol", "p",
"section", "summary", "ul"),
self.startTagCloseP),
(headingElements, self.startTagHeading),
(("pre", "listing"), self.startTagPreListing),
("form", self.startTagForm),
(("li", "dd", "dt"), self.startTagListItem),
("plaintext", self.startTagPlaintext),
("a", self.startTagA),
(("b", "big", "code", "em", "font", "i", "s", "small", "strike",
"strong", "tt", "u"), self.startTagFormatting),
("nobr", self.startTagNobr),
("button", self.startTagButton),
(("applet", "marquee", "object"), self.startTagAppletMarqueeObject),
("xmp", self.startTagXmp),
("table", self.startTagTable),
(("area", "br", "embed", "img", "keygen", "wbr"),
self.startTagVoidFormatting),
(("param", "source", "track"), self.startTagParamSource),
("input", self.startTagInput),
("hr", self.startTagHr),
("image", self.startTagImage),
("isindex", self.startTagIsIndex),
("textarea", self.startTagTextarea),
("iframe", self.startTagIFrame),
(("noembed", "noframes", "noscript"), self.startTagRawtext),
("select", self.startTagSelect),
(("rp", "rt"), self.startTagRpRt),
(("option", "optgroup"), self.startTagOpt),
(("math"), self.startTagMath),
(("svg"), self.startTagSvg),
(("caption", "col", "colgroup", "frame", "head",
"tbody", "td", "tfoot", "th", "thead",
"tr"), self.startTagMisplaced)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("body", self.endTagBody),
("html", self.endTagHtml),
(("address", "article", "aside", "blockquote", "button", "center",
"details", "dialog", "dir", "div", "dl", "fieldset", "figcaption", "figure",
"footer", "header", "hgroup", "listing", "main", "menu", "nav", "ol", "pre",
"section", "summary", "ul"), self.endTagBlock),
("form", self.endTagForm),
("p", self.endTagP),
(("dd", "dt", "li"), self.endTagListItem),
(headingElements, self.endTagHeading),
(("a", "b", "big", "code", "em", "font", "i", "nobr", "s", "small",
"strike", "strong", "tt", "u"), self.endTagFormatting),
(("applet", "marquee", "object"), self.endTagAppletMarqueeObject),
("br", self.endTagBr),
])
self.endTagHandler.default = self.endTagOther
def isMatchingFormattingElement(self, node1, node2):
if node1.name != node2.name or node1.namespace != node2.namespace:
return False
elif len(node1.attributes) != len(node2.attributes):
return False
else:
attributes1 = sorted(node1.attributes.items())
attributes2 = sorted(node2.attributes.items())
for attr1, attr2 in zip(attributes1, attributes2):
if attr1 != attr2:
return False
return True
# helper
def addFormattingElement(self, token):
self.tree.insertElement(token)
element = self.tree.openElements[-1]
matchingElements = []
for node in self.tree.activeFormattingElements[::-1]:
if node is Marker:
break
elif self.isMatchingFormattingElement(node, element):
matchingElements.append(node)
assert len(matchingElements) <= 3
if len(matchingElements) == 3:
self.tree.activeFormattingElements.remove(matchingElements[-1])
self.tree.activeFormattingElements.append(element)
# the real deal
def processEOF(self):
allowed_elements = frozenset(("dd", "dt", "li", "p", "tbody", "td",
"tfoot", "th", "thead", "tr", "body",
"html"))
for node in self.tree.openElements[::-1]:
if node.name not in allowed_elements:
self.parser.parseError("expected-closing-tag-but-got-eof")
break
# Stop parsing
def processSpaceCharactersDropNewline(self, token):
# Sometimes (start of <pre>, <listing>, and <textarea> blocks) we
# want to drop leading newlines
data = token["data"]
self.processSpaceCharacters = self.processSpaceCharactersNonPre
if (data.startswith("\n") and
self.tree.openElements[-1].name in ("pre", "listing", "textarea")
and not self.tree.openElements[-1].hasContent()):
data = data[1:]
if data:
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(data)
def processCharacters(self, token):
if token["data"] == "\u0000":
# The tokenizer should always emit null on its own
return
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(token["data"])
# This must be bad for performance
if (self.parser.framesetOK and
any([char not in spaceCharacters
for char in token["data"]])):
self.parser.framesetOK = False
def processSpaceCharacters(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(token["data"])
def startTagProcessInHead(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagBody(self, token):
self.parser.parseError("unexpected-start-tag", {"name": "body"})
if (len(self.tree.openElements) == 1
or self.tree.openElements[1].name != "body"):
assert self.parser.innerHTML
else:
self.parser.framesetOK = False
for attr, value in token["data"].items():
if attr not in self.tree.openElements[1].attributes:
self.tree.openElements[1].attributes[attr] = value
def startTagFrameset(self, token):
self.parser.parseError("unexpected-start-tag", {"name": "frameset"})
if (len(self.tree.openElements) == 1 or self.tree.openElements[1].name != "body"):
assert self.parser.innerHTML
elif not self.parser.framesetOK:
pass
else:
if self.tree.openElements[1].parent:
self.tree.openElements[1].parent.removeChild(self.tree.openElements[1])
while self.tree.openElements[-1].name != "html":
self.tree.openElements.pop()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inFrameset"]
def startTagCloseP(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
def startTagPreListing(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.framesetOK = False
self.processSpaceCharacters = self.processSpaceCharactersDropNewline
def startTagForm(self, token):
if self.tree.formPointer:
self.parser.parseError("unexpected-start-tag", {"name": "form"})
else:
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.tree.formPointer = self.tree.openElements[-1]
def startTagListItem(self, token):
self.parser.framesetOK = False
stopNamesMap = {"li": ["li"],
"dt": ["dt", "dd"],
"dd": ["dt", "dd"]}
stopNames = stopNamesMap[token["name"]]
for node in reversed(self.tree.openElements):
if node.name in stopNames:
self.parser.phase.processEndTag(
impliedTagToken(node.name, "EndTag"))
break
if (node.nameTuple in specialElements and
node.name not in ("address", "div", "p")):
break
if self.tree.elementInScope("p", variant="button"):
self.parser.phase.processEndTag(
impliedTagToken("p", "EndTag"))
self.tree.insertElement(token)
def startTagPlaintext(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.plaintextState
def startTagHeading(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
if self.tree.openElements[-1].name in headingElements:
self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagA(self, token):
afeAElement = self.tree.elementInActiveFormattingElements("a")
if afeAElement:
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "a", "endName": "a"})
self.endTagFormatting(impliedTagToken("a"))
if afeAElement in self.tree.openElements:
self.tree.openElements.remove(afeAElement)
if afeAElement in self.tree.activeFormattingElements:
self.tree.activeFormattingElements.remove(afeAElement)
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagFormatting(self, token):
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagNobr(self, token):
self.tree.reconstructActiveFormattingElements()
if self.tree.elementInScope("nobr"):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "nobr", "endName": "nobr"})
self.processEndTag(impliedTagToken("nobr"))
# XXX Need tests that trigger the following
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagButton(self, token):
if self.tree.elementInScope("button"):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "button", "endName": "button"})
self.processEndTag(impliedTagToken("button"))
return token
else:
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.parser.framesetOK = False
def startTagAppletMarqueeObject(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.tree.activeFormattingElements.append(Marker)
self.parser.framesetOK = False
def startTagXmp(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.reconstructActiveFormattingElements()
self.parser.framesetOK = False
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagTable(self, token):
if self.parser.compatMode != "quirks":
if self.tree.elementInScope("p", variant="button"):
self.processEndTag(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.framesetOK = False
self.parser.phase = self.parser.phases["inTable"]
def startTagVoidFormatting(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
self.parser.framesetOK = False
def startTagInput(self, token):
framesetOK = self.parser.framesetOK
self.startTagVoidFormatting(token)
if ("type" in token["data"] and
token["data"]["type"].translate(asciiUpper2Lower) == "hidden"):
# input type=hidden doesn't change framesetOK
self.parser.framesetOK = framesetOK
def startTagParamSource(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagHr(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
self.parser.framesetOK = False
def startTagImage(self, token):
# No really...
self.parser.parseError("unexpected-start-tag-treated-as",
{"originalName": "image", "newName": "img"})
self.processStartTag(impliedTagToken("img", "StartTag",
attributes=token["data"],
selfClosing=token["selfClosing"]))
def startTagIsIndex(self, token):
self.parser.parseError("deprecated-tag", {"name": "isindex"})
if self.tree.formPointer:
return
form_attrs = {}
if "action" in token["data"]:
form_attrs["action"] = token["data"]["action"]
self.processStartTag(impliedTagToken("form", "StartTag",
attributes=form_attrs))
self.processStartTag(impliedTagToken("hr", "StartTag"))
self.processStartTag(impliedTagToken("label", "StartTag"))
# XXX Localization ...
if "prompt" in token["data"]:
prompt = token["data"]["prompt"]
else:
prompt = "This is a searchable index. Enter search keywords: "
self.processCharacters(
{"type": tokenTypes["Characters"], "data": prompt})
attributes = token["data"].copy()
if "action" in attributes:
del attributes["action"]
if "prompt" in attributes:
del attributes["prompt"]
attributes["name"] = "isindex"
self.processStartTag(impliedTagToken("input", "StartTag",
attributes=attributes,
selfClosing=token["selfClosing"]))
self.processEndTag(impliedTagToken("label"))
self.processStartTag(impliedTagToken("hr", "StartTag"))
self.processEndTag(impliedTagToken("form"))
def startTagTextarea(self, token):
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.rcdataState
self.processSpaceCharacters = self.processSpaceCharactersDropNewline
self.parser.framesetOK = False
def startTagIFrame(self, token):
self.parser.framesetOK = False
self.startTagRawtext(token)
def startTagRawtext(self, token):
"""iframe, noembed noframes, noscript(if scripting enabled)"""
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagOpt(self, token):
if self.tree.openElements[-1].name == "option":
self.parser.phase.processEndTag(impliedTagToken("option"))
self.tree.reconstructActiveFormattingElements()
self.parser.tree.insertElement(token)
def startTagSelect(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.parser.framesetOK = False
if self.parser.phase in (self.parser.phases["inTable"],
self.parser.phases["inCaption"],
self.parser.phases["inColumnGroup"],
self.parser.phases["inTableBody"],
self.parser.phases["inRow"],
self.parser.phases["inCell"]):
self.parser.phase = self.parser.phases["inSelectInTable"]
else:
self.parser.phase = self.parser.phases["inSelect"]
def startTagRpRt(self, token):
if self.tree.elementInScope("ruby"):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "ruby":
self.parser.parseError()
self.tree.insertElement(token)
def startTagMath(self, token):
self.tree.reconstructActiveFormattingElements()
self.parser.adjustMathMLAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = namespaces["mathml"]
self.tree.insertElement(token)
# Need to get the parse error right for the case where the token
# has a namespace not equal to the xmlns attribute
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagSvg(self, token):
self.tree.reconstructActiveFormattingElements()
self.parser.adjustSVGAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = namespaces["svg"]
self.tree.insertElement(token)
# Need to get the parse error right for the case where the token
# has a namespace not equal to the xmlns attribute
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagMisplaced(self, token):
""" Elements that should be children of other elements that have a
different insertion mode; here they are ignored
"caption", "col", "colgroup", "frame", "frameset", "head",
"option", "optgroup", "tbody", "td", "tfoot", "th", "thead",
"tr", "noscript"
"""
self.parser.parseError("unexpected-start-tag-ignored", {"name": token["name"]})
def startTagOther(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
def endTagP(self, token):
if not self.tree.elementInScope("p", variant="button"):
self.startTagCloseP(impliedTagToken("p", "StartTag"))
self.parser.parseError("unexpected-end-tag", {"name": "p"})
self.endTagP(impliedTagToken("p", "EndTag"))
else:
self.tree.generateImpliedEndTags("p")
if self.tree.openElements[-1].name != "p":
self.parser.parseError("unexpected-end-tag", {"name": "p"})
node = self.tree.openElements.pop()
while node.name != "p":
node = self.tree.openElements.pop()
def endTagBody(self, token):
if not self.tree.elementInScope("body"):
self.parser.parseError()
return
elif self.tree.openElements[-1].name != "body":
for node in self.tree.openElements[2:]:
if node.name not in frozenset(("dd", "dt", "li", "optgroup",
"option", "p", "rp", "rt",
"tbody", "td", "tfoot",
"th", "thead", "tr", "body",
"html")):
# Not sure this is the correct name for the parse error
self.parser.parseError(
"expected-one-end-tag-but-got-another",
{"expectedName": "body", "gotName": node.name})
break
self.parser.phase = self.parser.phases["afterBody"]
def endTagHtml(self, token):
# We repeat the test for the body end tag token being ignored here
if self.tree.elementInScope("body"):
self.endTagBody(impliedTagToken("body"))
return token
def endTagBlock(self, token):
# Put us back in the right whitespace handling mode
if token["name"] == "pre":
self.processSpaceCharacters = self.processSpaceCharactersNonPre
inScope = self.tree.elementInScope(token["name"])
if inScope:
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
if inScope:
node = self.tree.openElements.pop()
while node.name != token["name"]:
node = self.tree.openElements.pop()
def endTagForm(self, token):
node = self.tree.formPointer
self.tree.formPointer = None
if node is None or not self.tree.elementInScope(node):
self.parser.parseError("unexpected-end-tag",
{"name": "form"})
else:
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1] != node:
self.parser.parseError("end-tag-too-early-ignored",
{"name": "form"})
self.tree.openElements.remove(node)
def endTagListItem(self, token):
if token["name"] == "li":
variant = "list"
else:
variant = None
if not self.tree.elementInScope(token["name"], variant=variant):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
else:
self.tree.generateImpliedEndTags(exclude=token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError(
"end-tag-too-early",
{"name": token["name"]})
node = self.tree.openElements.pop()
while node.name != token["name"]:
node = self.tree.openElements.pop()
def endTagHeading(self, token):
for item in headingElements:
if self.tree.elementInScope(item):
self.tree.generateImpliedEndTags()
break
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
for item in headingElements:
if self.tree.elementInScope(item):
item = self.tree.openElements.pop()
while item.name not in headingElements:
item = self.tree.openElements.pop()
break
def endTagFormatting(self, token):
"""The much-feared adoption agency algorithm"""
# http://svn.whatwg.org/webapps/complete.html#adoptionAgency revision 7867
# XXX Better parseError messages appreciated.
# Step 1
outerLoopCounter = 0
# Step 2
while outerLoopCounter < 8:
# Step 3
outerLoopCounter += 1
# Step 4:
# Let the formatting element be the last element in
# the list of active formatting elements that:
# - is between the end of the list and the last scope
# marker in the list, if any, or the start of the list
# otherwise, and
# - has the same tag name as the token.
formattingElement = self.tree.elementInActiveFormattingElements(
token["name"])
if (not formattingElement or
(formattingElement in self.tree.openElements and
not self.tree.elementInScope(formattingElement.name))):
# If there is no such node, then abort these steps
# and instead act as described in the "any other
# end tag" entry below.
self.endTagOther(token)
return
# Otherwise, if there is such a node, but that node is
# not in the stack of open elements, then this is a
# parse error; remove the element from the list, and
# abort these steps.
elif formattingElement not in self.tree.openElements:
self.parser.parseError("adoption-agency-1.2", {"name": token["name"]})
self.tree.activeFormattingElements.remove(formattingElement)
return
# Otherwise, if there is such a node, and that node is
# also in the stack of open elements, but the element
# is not in scope, then this is a parse error; ignore
# the token, and abort these steps.
elif not self.tree.elementInScope(formattingElement.name):
self.parser.parseError("adoption-agency-4.4", {"name": token["name"]})
return
# Otherwise, there is a formatting element and that
# element is in the stack and is in scope. If the
# element is not the current node, this is a parse
# error. In any case, proceed with the algorithm as
# written in the following steps.
else:
if formattingElement != self.tree.openElements[-1]:
self.parser.parseError("adoption-agency-1.3", {"name": token["name"]})
# Step 5:
# Let the furthest block be the topmost node in the
# stack of open elements that is lower in the stack
# than the formatting element, and is an element in
# the special category. There might not be one.
afeIndex = self.tree.openElements.index(formattingElement)
furthestBlock = None
for element in self.tree.openElements[afeIndex:]:
if element.nameTuple in specialElements:
furthestBlock = element
break
# Step 6:
# If there is no furthest block, then the UA must
# first pop all the nodes from the bottom of the stack
# of open elements, from the current node up to and
# including the formatting element, then remove the
# formatting element from the list of active
# formatting elements, and finally abort these steps.
if furthestBlock is None:
element = self.tree.openElements.pop()
while element != formattingElement:
element = self.tree.openElements.pop()
self.tree.activeFormattingElements.remove(element)
return
# Step 7
commonAncestor = self.tree.openElements[afeIndex - 1]
# Step 8:
# The bookmark is supposed to help us identify where to reinsert
# nodes in step 15. We have to ensure that we reinsert nodes after
# the node before the active formatting element. Note the bookmark
# can move in step 9.7
bookmark = self.tree.activeFormattingElements.index(formattingElement)
# Step 9
lastNode = node = furthestBlock
innerLoopCounter = 0
index = self.tree.openElements.index(node)
while innerLoopCounter < 3:
innerLoopCounter += 1
# Node is element before node in open elements
index -= 1
node = self.tree.openElements[index]
if node not in self.tree.activeFormattingElements:
self.tree.openElements.remove(node)
continue
# Step 9.6
if node == formattingElement:
break
# Step 9.7
if lastNode == furthestBlock:
bookmark = self.tree.activeFormattingElements.index(node) + 1
# Step 9.8
clone = node.cloneNode()
# Replace node with clone
self.tree.activeFormattingElements[
self.tree.activeFormattingElements.index(node)] = clone
self.tree.openElements[
self.tree.openElements.index(node)] = clone
node = clone
# Step 9.9
# Remove lastNode from its parents, if any
if lastNode.parent:
lastNode.parent.removeChild(lastNode)
node.appendChild(lastNode)
# Step 9.10
lastNode = node
# Step 10
# Foster parent lastNode if commonAncestor is a
# table, tbody, tfoot, thead, or tr we need to foster
# parent the lastNode
if lastNode.parent:
lastNode.parent.removeChild(lastNode)
if commonAncestor.name in frozenset(("table", "tbody", "tfoot", "thead", "tr")):
parent, insertBefore = self.tree.getTableMisnestedNodePosition()
parent.insertBefore(lastNode, insertBefore)
else:
commonAncestor.appendChild(lastNode)
# Step 11
clone = formattingElement.cloneNode()
# Step 12
furthestBlock.reparentChildren(clone)
# Step 13
furthestBlock.appendChild(clone)
# Step 14
self.tree.activeFormattingElements.remove(formattingElement)
self.tree.activeFormattingElements.insert(bookmark, clone)
# Step 15
self.tree.openElements.remove(formattingElement)
self.tree.openElements.insert(
self.tree.openElements.index(furthestBlock) + 1, clone)
def endTagAppletMarqueeObject(self, token):
if self.tree.elementInScope(token["name"]):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
if self.tree.elementInScope(token["name"]):
element = self.tree.openElements.pop()
while element.name != token["name"]:
element = self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
def endTagBr(self, token):
self.parser.parseError("unexpected-end-tag-treated-as",
{"originalName": "br", "newName": "br element"})
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(impliedTagToken("br", "StartTag"))
self.tree.openElements.pop()
def endTagOther(self, token):
for node in self.tree.openElements[::-1]:
if node.name == token["name"]:
self.tree.generateImpliedEndTags(exclude=token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
while self.tree.openElements.pop() != node:
pass
break
else:
if node.nameTuple in specialElements:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
break
class TextPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("script", self.endTagScript)])
self.endTagHandler.default = self.endTagOther
def processCharacters(self, token):
self.tree.insertText(token["data"])
def processEOF(self):
self.parser.parseError("expected-named-closing-tag-but-got-eof",
{"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
self.parser.phase = self.parser.originalPhase
return True
def startTagOther(self, token):
assert False, "Tried to process start tag %s in RCDATA/RAWTEXT mode" % token['name']
def endTagScript(self, token):
node = self.tree.openElements.pop()
assert node.name == "script"
self.parser.phase = self.parser.originalPhase
# The rest of this method is all stuff that only happens if
# document.write works
def endTagOther(self, token):
self.tree.openElements.pop()
self.parser.phase = self.parser.originalPhase
class InTablePhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-table
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("caption", self.startTagCaption),
("colgroup", self.startTagColgroup),
("col", self.startTagCol),
(("tbody", "tfoot", "thead"), self.startTagRowGroup),
(("td", "th", "tr"), self.startTagImplyTbody),
("table", self.startTagTable),
(("style", "script"), self.startTagStyleScript),
("input", self.startTagInput),
("form", self.startTagForm)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("table", self.endTagTable),
(("body", "caption", "col", "colgroup", "html", "tbody", "td",
"tfoot", "th", "thead", "tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods
def clearStackToTableContext(self):
# "clear the stack back to a table context"
while self.tree.openElements[-1].name not in ("table", "html"):
# self.parser.parseError("unexpected-implied-end-tag-in-table",
# {"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
# When the current node is <html> it's an innerHTML case
# processing methods
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-table")
else:
assert self.parser.innerHTML
# Stop parsing
def processSpaceCharacters(self, token):
originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["inTableText"]
self.parser.phase.originalPhase = originalPhase
self.parser.phase.processSpaceCharacters(token)
def processCharacters(self, token):
originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["inTableText"]
self.parser.phase.originalPhase = originalPhase
self.parser.phase.processCharacters(token)
def insertText(self, token):
# If we get here there must be at least one non-whitespace character
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processCharacters(token)
self.tree.insertFromTable = False
def startTagCaption(self, token):
self.clearStackToTableContext()
self.tree.activeFormattingElements.append(Marker)
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inCaption"]
def startTagColgroup(self, token):
self.clearStackToTableContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inColumnGroup"]
def startTagCol(self, token):
self.startTagColgroup(impliedTagToken("colgroup", "StartTag"))
return token
def startTagRowGroup(self, token):
self.clearStackToTableContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inTableBody"]
def startTagImplyTbody(self, token):
self.startTagRowGroup(impliedTagToken("tbody", "StartTag"))
return token
def startTagTable(self, token):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "table", "endName": "table"})
self.parser.phase.processEndTag(impliedTagToken("table"))
if not self.parser.innerHTML:
return token
def startTagStyleScript(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagInput(self, token):
if ("type" in token["data"] and
token["data"]["type"].translate(asciiUpper2Lower) == "hidden"):
self.parser.parseError("unexpected-hidden-input-in-table")
self.tree.insertElement(token)
# XXX associate with form
self.tree.openElements.pop()
else:
self.startTagOther(token)
def startTagForm(self, token):
self.parser.parseError("unexpected-form-in-table")
if self.tree.formPointer is None:
self.tree.insertElement(token)
self.tree.formPointer = self.tree.openElements[-1]
self.tree.openElements.pop()
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-implies-table-voodoo", {"name": token["name"]})
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processStartTag(token)
self.tree.insertFromTable = False
def endTagTable(self, token):
if self.tree.elementInScope("table", variant="table"):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "table":
self.parser.parseError("end-tag-too-early-named",
{"gotName": "table",
"expectedName": self.tree.openElements[-1].name})
while self.tree.openElements[-1].name != "table":
self.tree.openElements.pop()
self.tree.openElements.pop()
self.parser.resetInsertionMode()
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-implies-table-voodoo", {"name": token["name"]})
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processEndTag(token)
self.tree.insertFromTable = False
class InTableTextPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.originalPhase = None
self.characterTokens = []
def flushCharacters(self):
data = "".join([item["data"] for item in self.characterTokens])
if any([item not in spaceCharacters for item in data]):
token = {"type": tokenTypes["Characters"], "data": data}
self.parser.phases["inTable"].insertText(token)
elif data:
self.tree.insertText(data)
self.characterTokens = []
def processComment(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
def processEOF(self):
self.flushCharacters()
self.parser.phase = self.originalPhase
return True
def processCharacters(self, token):
if token["data"] == "\u0000":
return
self.characterTokens.append(token)
def processSpaceCharacters(self, token):
# pretty sure we should never reach here
self.characterTokens.append(token)
# assert False
def processStartTag(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
def processEndTag(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
class InCaptionPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-caption
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.startTagTableElement)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("caption", self.endTagCaption),
("table", self.endTagTable),
(("body", "col", "colgroup", "html", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
def ignoreEndTagCaption(self):
return not self.tree.elementInScope("caption", variant="table")
def processEOF(self):
self.parser.phases["inBody"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inBody"].processCharacters(token)
def startTagTableElement(self, token):
self.parser.parseError()
# XXX Have to duplicate logic here to find out if the tag is ignored
ignoreEndTag = self.ignoreEndTagCaption()
self.parser.phase.processEndTag(impliedTagToken("caption"))
if not ignoreEndTag:
return token
def startTagOther(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def endTagCaption(self, token):
if not self.ignoreEndTagCaption():
# AT this code is quite similar to endTagTable in "InTable"
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "caption":
self.parser.parseError("expected-one-end-tag-but-got-another",
{"gotName": "caption",
"expectedName": self.tree.openElements[-1].name})
while self.tree.openElements[-1].name != "caption":
self.tree.openElements.pop()
self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
self.parser.phase = self.parser.phases["inTable"]
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagTable(self, token):
self.parser.parseError()
ignoreEndTag = self.ignoreEndTagCaption()
self.parser.phase.processEndTag(impliedTagToken("caption"))
if not ignoreEndTag:
return token
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inBody"].processEndTag(token)
class InColumnGroupPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-column
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("col", self.startTagCol)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("colgroup", self.endTagColgroup),
("col", self.endTagCol)
])
self.endTagHandler.default = self.endTagOther
def ignoreEndTagColgroup(self):
return self.tree.openElements[-1].name == "html"
def processEOF(self):
if self.tree.openElements[-1].name == "html":
assert self.parser.innerHTML
return
else:
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return True
def processCharacters(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
def startTagCol(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
def startTagOther(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
def endTagColgroup(self, token):
if self.ignoreEndTagColgroup():
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
else:
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTable"]
def endTagCol(self, token):
self.parser.parseError("no-end-tag", {"name": "col"})
def endTagOther(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
class InTableBodyPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-table0
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("tr", self.startTagTr),
(("td", "th"), self.startTagTableCell),
(("caption", "col", "colgroup", "tbody", "tfoot", "thead"),
self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("tbody", "tfoot", "thead"), self.endTagTableRowGroup),
("table", self.endTagTable),
(("body", "caption", "col", "colgroup", "html", "td", "th",
"tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods
def clearStackToTableBodyContext(self):
while self.tree.openElements[-1].name not in ("tbody", "tfoot",
"thead", "html"):
# self.parser.parseError("unexpected-implied-end-tag-in-table",
# {"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
if self.tree.openElements[-1].name == "html":
assert self.parser.innerHTML
# the rest
def processEOF(self):
self.parser.phases["inTable"].processEOF()
def processSpaceCharacters(self, token):
return self.parser.phases["inTable"].processSpaceCharacters(token)
def processCharacters(self, token):
return self.parser.phases["inTable"].processCharacters(token)
def startTagTr(self, token):
self.clearStackToTableBodyContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inRow"]
def startTagTableCell(self, token):
self.parser.parseError("unexpected-cell-in-table-body",
{"name": token["name"]})
self.startTagTr(impliedTagToken("tr", "StartTag"))
return token
def startTagTableOther(self, token):
# XXX AT Any ideas on how to share this with endTagTable?
if (self.tree.elementInScope("tbody", variant="table") or
self.tree.elementInScope("thead", variant="table") or
self.tree.elementInScope("tfoot", variant="table")):
self.clearStackToTableBodyContext()
self.endTagTableRowGroup(
impliedTagToken(self.tree.openElements[-1].name))
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def startTagOther(self, token):
return self.parser.phases["inTable"].processStartTag(token)
def endTagTableRowGroup(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.clearStackToTableBodyContext()
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTable"]
else:
self.parser.parseError("unexpected-end-tag-in-table-body",
{"name": token["name"]})
def endTagTable(self, token):
if (self.tree.elementInScope("tbody", variant="table") or
self.tree.elementInScope("thead", variant="table") or
self.tree.elementInScope("tfoot", variant="table")):
self.clearStackToTableBodyContext()
self.endTagTableRowGroup(
impliedTagToken(self.tree.openElements[-1].name))
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag-in-table-body",
{"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inTable"].processEndTag(token)
class InRowPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-row
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("td", "th"), self.startTagTableCell),
(("caption", "col", "colgroup", "tbody", "tfoot", "thead",
"tr"), self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("tr", self.endTagTr),
("table", self.endTagTable),
(("tbody", "tfoot", "thead"), self.endTagTableRowGroup),
(("body", "caption", "col", "colgroup", "html", "td", "th"),
self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods (XXX unify this with other table helper methods)
def clearStackToTableRowContext(self):
while self.tree.openElements[-1].name not in ("tr", "html"):
self.parser.parseError("unexpected-implied-end-tag-in-table-row",
{"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
def ignoreEndTagTr(self):
return not self.tree.elementInScope("tr", variant="table")
# the rest
def processEOF(self):
self.parser.phases["inTable"].processEOF()
def processSpaceCharacters(self, token):
return self.parser.phases["inTable"].processSpaceCharacters(token)
def processCharacters(self, token):
return self.parser.phases["inTable"].processCharacters(token)
def startTagTableCell(self, token):
self.clearStackToTableRowContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inCell"]
self.tree.activeFormattingElements.append(Marker)
def startTagTableOther(self, token):
ignoreEndTag = self.ignoreEndTagTr()
self.endTagTr(impliedTagToken("tr"))
# XXX how are we sure it's always ignored in the innerHTML case?
if not ignoreEndTag:
return token
def startTagOther(self, token):
return self.parser.phases["inTable"].processStartTag(token)
def endTagTr(self, token):
if not self.ignoreEndTagTr():
self.clearStackToTableRowContext()
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTableBody"]
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagTable(self, token):
ignoreEndTag = self.ignoreEndTagTr()
self.endTagTr(impliedTagToken("tr"))
# Reprocess the current tag if the tr end tag was not ignored
# XXX how are we sure it's always ignored in the innerHTML case?
if not ignoreEndTag:
return token
def endTagTableRowGroup(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.endTagTr(impliedTagToken("tr"))
return token
else:
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag-in-table-row",
{"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inTable"].processEndTag(token)
class InCellPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-cell
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("td", "th"), self.endTagTableCell),
(("body", "caption", "col", "colgroup", "html"), self.endTagIgnore),
(("table", "tbody", "tfoot", "thead", "tr"), self.endTagImply)
])
self.endTagHandler.default = self.endTagOther
# helper
def closeCell(self):
if self.tree.elementInScope("td", variant="table"):
self.endTagTableCell(impliedTagToken("td"))
elif self.tree.elementInScope("th", variant="table"):
self.endTagTableCell(impliedTagToken("th"))
# the rest
def processEOF(self):
self.parser.phases["inBody"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inBody"].processCharacters(token)
def startTagTableOther(self, token):
if (self.tree.elementInScope("td", variant="table") or
self.tree.elementInScope("th", variant="table")):
self.closeCell()
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def startTagOther(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def endTagTableCell(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.tree.generateImpliedEndTags(token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("unexpected-cell-end-tag",
{"name": token["name"]})
while True:
node = self.tree.openElements.pop()
if node.name == token["name"]:
break
else:
self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
self.parser.phase = self.parser.phases["inRow"]
else:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagImply(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.closeCell()
return token
else:
# sometimes innerHTML case
self.parser.parseError()
def endTagOther(self, token):
return self.parser.phases["inBody"].processEndTag(token)
class InSelectPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("option", self.startTagOption),
("optgroup", self.startTagOptgroup),
("select", self.startTagSelect),
(("input", "keygen", "textarea"), self.startTagInput),
("script", self.startTagScript)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("option", self.endTagOption),
("optgroup", self.endTagOptgroup),
("select", self.endTagSelect)
])
self.endTagHandler.default = self.endTagOther
# http://www.whatwg.org/specs/web-apps/current-work/#in-select
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-select")
else:
assert self.parser.innerHTML
def processCharacters(self, token):
if token["data"] == "\u0000":
return
self.tree.insertText(token["data"])
def startTagOption(self, token):
# We need to imply </option> if <option> is the current node.
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagOptgroup(self, token):
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
if self.tree.openElements[-1].name == "optgroup":
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagSelect(self, token):
self.parser.parseError("unexpected-select-in-select")
self.endTagSelect(impliedTagToken("select"))
def startTagInput(self, token):
self.parser.parseError("unexpected-input-in-select")
if self.tree.elementInScope("select", variant="select"):
self.endTagSelect(impliedTagToken("select"))
return token
else:
assert self.parser.innerHTML
def startTagScript(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-in-select",
{"name": token["name"]})
def endTagOption(self, token):
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
else:
self.parser.parseError("unexpected-end-tag-in-select",
{"name": "option"})
def endTagOptgroup(self, token):
# </optgroup> implicitly closes <option>
if (self.tree.openElements[-1].name == "option" and
self.tree.openElements[-2].name == "optgroup"):
self.tree.openElements.pop()
# It also closes </optgroup>
if self.tree.openElements[-1].name == "optgroup":
self.tree.openElements.pop()
# But nothing else
else:
self.parser.parseError("unexpected-end-tag-in-select",
{"name": "optgroup"})
def endTagSelect(self, token):
if self.tree.elementInScope("select", variant="select"):
node = self.tree.openElements.pop()
while node.name != "select":
node = self.tree.openElements.pop()
self.parser.resetInsertionMode()
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-in-select",
{"name": token["name"]})
class InSelectInTablePhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
(("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
self.startTagTable)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
self.endTagTable)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.parser.phases["inSelect"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inSelect"].processCharacters(token)
def startTagTable(self, token):
self.parser.parseError("unexpected-table-element-start-tag-in-select-in-table", {"name": token["name"]})
self.endTagOther(impliedTagToken("select"))
return token
def startTagOther(self, token):
return self.parser.phases["inSelect"].processStartTag(token)
def endTagTable(self, token):
self.parser.parseError("unexpected-table-element-end-tag-in-select-in-table", {"name": token["name"]})
if self.tree.elementInScope(token["name"], variant="table"):
self.endTagOther(impliedTagToken("select"))
return token
def endTagOther(self, token):
return self.parser.phases["inSelect"].processEndTag(token)
class InForeignContentPhase(Phase):
breakoutElements = frozenset(["b", "big", "blockquote", "body", "br",
"center", "code", "dd", "div", "dl", "dt",
"em", "embed", "h1", "h2", "h3",
"h4", "h5", "h6", "head", "hr", "i", "img",
"li", "listing", "menu", "meta", "nobr",
"ol", "p", "pre", "ruby", "s", "small",
"span", "strong", "strike", "sub", "sup",
"table", "tt", "u", "ul", "var"])
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
def adjustSVGTagNames(self, token):
replacements = {"altglyph": "altGlyph",
"altglyphdef": "altGlyphDef",
"altglyphitem": "altGlyphItem",
"animatecolor": "animateColor",
"animatemotion": "animateMotion",
"animatetransform": "animateTransform",
"clippath": "clipPath",
"feblend": "feBlend",
"fecolormatrix": "feColorMatrix",
"fecomponenttransfer": "feComponentTransfer",
"fecomposite": "feComposite",
"feconvolvematrix": "feConvolveMatrix",
"fediffuselighting": "feDiffuseLighting",
"fedisplacementmap": "feDisplacementMap",
"fedistantlight": "feDistantLight",
"feflood": "feFlood",
"fefunca": "feFuncA",
"fefuncb": "feFuncB",
"fefuncg": "feFuncG",
"fefuncr": "feFuncR",
"fegaussianblur": "feGaussianBlur",
"feimage": "feImage",
"femerge": "feMerge",
"femergenode": "feMergeNode",
"femorphology": "feMorphology",
"feoffset": "feOffset",
"fepointlight": "fePointLight",
"fespecularlighting": "feSpecularLighting",
"fespotlight": "feSpotLight",
"fetile": "feTile",
"feturbulence": "feTurbulence",
"foreignobject": "foreignObject",
"glyphref": "glyphRef",
"lineargradient": "linearGradient",
"radialgradient": "radialGradient",
"textpath": "textPath"}
if token["name"] in replacements:
token["name"] = replacements[token["name"]]
def processCharacters(self, token):
if token["data"] == "\u0000":
token["data"] = "\uFFFD"
elif (self.parser.framesetOK and
any(char not in spaceCharacters for char in token["data"])):
self.parser.framesetOK = False
Phase.processCharacters(self, token)
def processStartTag(self, token):
currentNode = self.tree.openElements[-1]
if (token["name"] in self.breakoutElements or
(token["name"] == "font" and
set(token["data"].keys()) & set(["color", "face", "size"]))):
self.parser.parseError("unexpected-html-element-in-foreign-content",
{"name": token["name"]})
while (self.tree.openElements[-1].namespace !=
self.tree.defaultNamespace and
not self.parser.isHTMLIntegrationPoint(self.tree.openElements[-1]) and
not self.parser.isMathMLTextIntegrationPoint(self.tree.openElements[-1])):
self.tree.openElements.pop()
return token
else:
if currentNode.namespace == namespaces["mathml"]:
self.parser.adjustMathMLAttributes(token)
elif currentNode.namespace == namespaces["svg"]:
self.adjustSVGTagNames(token)
self.parser.adjustSVGAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = currentNode.namespace
self.tree.insertElement(token)
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def processEndTag(self, token):
nodeIndex = len(self.tree.openElements) - 1
node = self.tree.openElements[-1]
if node.name != token["name"]:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
while True:
if node.name.translate(asciiUpper2Lower) == token["name"]:
# XXX this isn't in the spec but it seems necessary
if self.parser.phase == self.parser.phases["inTableText"]:
self.parser.phase.flushCharacters()
self.parser.phase = self.parser.phase.originalPhase
while self.tree.openElements.pop() != node:
assert self.tree.openElements
new_token = None
break
nodeIndex -= 1
node = self.tree.openElements[nodeIndex]
if node.namespace != self.tree.defaultNamespace:
continue
else:
new_token = self.parser.phase.processEndTag(token)
break
return new_token
class AfterBodyPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([("html", self.endTagHtml)])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
# Stop parsing
pass
def processComment(self, token):
# This is needed because data is to be appended to the <html> element
# here and not to whatever is currently open.
self.tree.insertComment(token, self.tree.openElements[0])
def processCharacters(self, token):
self.parser.parseError("unexpected-char-after-body")
self.parser.phase = self.parser.phases["inBody"]
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-after-body",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
def endTagHtml(self, name):
if self.parser.innerHTML:
self.parser.parseError("unexpected-end-tag-after-body-innerhtml")
else:
self.parser.phase = self.parser.phases["afterAfterBody"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-after-body",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
class InFramesetPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-frameset
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("frameset", self.startTagFrameset),
("frame", self.startTagFrame),
("noframes", self.startTagNoframes)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("frameset", self.endTagFrameset)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-frameset")
else:
assert self.parser.innerHTML
def processCharacters(self, token):
self.parser.parseError("unexpected-char-in-frameset")
def startTagFrameset(self, token):
self.tree.insertElement(token)
def startTagFrame(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
def startTagNoframes(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-in-frameset",
{"name": token["name"]})
def endTagFrameset(self, token):
if self.tree.openElements[-1].name == "html":
# innerHTML case
self.parser.parseError("unexpected-frameset-in-frameset-innerhtml")
else:
self.tree.openElements.pop()
if (not self.parser.innerHTML and
self.tree.openElements[-1].name != "frameset"):
# If we're not in innerHTML mode and the the current node is not a
# "frameset" element (anymore) then switch.
self.parser.phase = self.parser.phases["afterFrameset"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-in-frameset",
{"name": token["name"]})
class AfterFramesetPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#after3
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("noframes", self.startTagNoframes)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("html", self.endTagHtml)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
# Stop parsing
pass
def processCharacters(self, token):
self.parser.parseError("unexpected-char-after-frameset")
def startTagNoframes(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-after-frameset",
{"name": token["name"]})
def endTagHtml(self, token):
self.parser.phase = self.parser.phases["afterAfterFrameset"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-after-frameset",
{"name": token["name"]})
class AfterAfterBodyPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml)
])
self.startTagHandler.default = self.startTagOther
def processEOF(self):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
return self.parser.phases["inBody"].processSpaceCharacters(token)
def processCharacters(self, token):
self.parser.parseError("expected-eof-but-got-char")
self.parser.phase = self.parser.phases["inBody"]
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("expected-eof-but-got-start-tag",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
def processEndTag(self, token):
self.parser.parseError("expected-eof-but-got-end-tag",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
class AfterAfterFramesetPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("noframes", self.startTagNoFrames)
])
self.startTagHandler.default = self.startTagOther
def processEOF(self):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
return self.parser.phases["inBody"].processSpaceCharacters(token)
def processCharacters(self, token):
self.parser.parseError("expected-eof-but-got-char")
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagNoFrames(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("expected-eof-but-got-start-tag",
{"name": token["name"]})
def processEndTag(self, token):
self.parser.parseError("expected-eof-but-got-end-tag",
{"name": token["name"]})
return {
"initial": InitialPhase,
"beforeHtml": BeforeHtmlPhase,
"beforeHead": BeforeHeadPhase,
"inHead": InHeadPhase,
# XXX "inHeadNoscript": InHeadNoScriptPhase,
"afterHead": AfterHeadPhase,
"inBody": InBodyPhase,
"text": TextPhase,
"inTable": InTablePhase,
"inTableText": InTableTextPhase,
"inCaption": InCaptionPhase,
"inColumnGroup": InColumnGroupPhase,
"inTableBody": InTableBodyPhase,
"inRow": InRowPhase,
"inCell": InCellPhase,
"inSelect": InSelectPhase,
"inSelectInTable": InSelectInTablePhase,
"inForeignContent": InForeignContentPhase,
"afterBody": AfterBodyPhase,
"inFrameset": InFramesetPhase,
"afterFrameset": AfterFramesetPhase,
"afterAfterBody": AfterAfterBodyPhase,
"afterAfterFrameset": AfterAfterFramesetPhase,
# XXX after after frameset
}
def impliedTagToken(name, type="EndTag", attributes=None,
selfClosing=False):
if attributes is None:
attributes = {}
return {"type": tokenTypes[type], "name": name, "data": attributes,
"selfClosing": selfClosing}
class ParseError(Exception):
"""Error in parsed document"""
pass
| mit |
brianzelip/militarization | css/basscss/node_modules/pygmentize-bundled/vendor/pygments/pygments/lexers/qbasic.py | 48 | 6456 | # -*- coding: utf-8 -*-
"""
pygments.lexers.qbasic
~~~~~~~~~~~~~~~~~~~~~~
Simple lexer for Microsoft QBasic source code.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups
from pygments.token import Text, Name, Comment, String, Keyword, Punctuation, \
Number, Operator
__all__ = ['QBasicLexer']
class QBasicLexer(RegexLexer):
"""
For
`QBasic <http://en.wikipedia.org/wiki/QBasic>`_
source code.
"""
name = 'QBasic'
aliases = ['qbasic', 'basic']
filenames = ['*.BAS', '*.bas']
mimetypes = ['text/basic']
declarations = ['DATA', 'LET']
functions = [
'ABS', 'ASC', 'ATN', 'CDBL', 'CHR$', 'CINT', 'CLNG',
'COMMAND$', 'COS', 'CSNG', 'CSRLIN', 'CVD', 'CVDMBF', 'CVI',
'CVL', 'CVS', 'CVSMBF', 'DATE$', 'ENVIRON$', 'EOF', 'ERDEV',
'ERDEV$', 'ERL', 'ERR', 'EXP', 'FILEATTR', 'FIX', 'FRE',
'FREEFILE', 'HEX$', 'INKEY$', 'INP', 'INPUT$', 'INSTR', 'INT',
'IOCTL$', 'LBOUND', 'LCASE$', 'LEFT$', 'LEN', 'LOC', 'LOF',
'LOG', 'LPOS', 'LTRIM$', 'MID$', 'MKD$', 'MKDMBF$', 'MKI$',
'MKL$', 'MKS$', 'MKSMBF$', 'OCT$', 'PEEK', 'PEN', 'PLAY',
'PMAP', 'POINT', 'POS', 'RIGHT$', 'RND', 'RTRIM$', 'SADD',
'SCREEN', 'SEEK', 'SETMEM', 'SGN', 'SIN', 'SPACE$', 'SPC',
'SQR', 'STICK', 'STR$', 'STRIG', 'STRING$', 'TAB', 'TAN',
'TIME$', 'TIMER', 'UBOUND', 'UCASE$', 'VAL', 'VARPTR',
'VARPTR$', 'VARSEG'
]
metacommands = ['$DYNAMIC', '$INCLUDE', '$STATIC']
operators = ['AND', 'EQV', 'IMP', 'NOT', 'OR', 'XOR']
statements = [
'BEEP', 'BLOAD', 'BSAVE', 'CALL', 'CALL ABSOLUTE',
'CALL INTERRUPT', 'CALLS', 'CHAIN', 'CHDIR', 'CIRCLE', 'CLEAR',
'CLOSE', 'CLS', 'COLOR', 'COM', 'COMMON', 'CONST', 'DATA',
'DATE$', 'DECLARE', 'DEF FN', 'DEF SEG', 'DEFDBL', 'DEFINT',
'DEFLNG', 'DEFSNG', 'DEFSTR', 'DEF', 'DIM', 'DO', 'LOOP',
'DRAW', 'END', 'ENVIRON', 'ERASE', 'ERROR', 'EXIT', 'FIELD',
'FILES', 'FOR', 'NEXT', 'FUNCTION', 'GET', 'GOSUB', 'GOTO',
'IF', 'THEN', 'INPUT', 'INPUT #', 'IOCTL', 'KEY', 'KEY',
'KILL', 'LET', 'LINE', 'LINE INPUT', 'LINE INPUT #', 'LOCATE',
'LOCK', 'UNLOCK', 'LPRINT', 'LSET', 'MID$', 'MKDIR', 'NAME',
'ON COM', 'ON ERROR', 'ON KEY', 'ON PEN', 'ON PLAY',
'ON STRIG', 'ON TIMER', 'ON UEVENT', 'ON', 'OPEN', 'OPEN COM',
'OPTION BASE', 'OUT', 'PAINT', 'PALETTE', 'PCOPY', 'PEN',
'PLAY', 'POKE', 'PRESET', 'PRINT', 'PRINT #', 'PRINT USING',
'PSET', 'PUT', 'PUT', 'RANDOMIZE', 'READ', 'REDIM', 'REM',
'RESET', 'RESTORE', 'RESUME', 'RETURN', 'RMDIR', 'RSET', 'RUN',
'SCREEN', 'SEEK', 'SELECT CASE', 'SHARED', 'SHELL', 'SLEEP',
'SOUND', 'STATIC', 'STOP', 'STRIG', 'SUB', 'SWAP', 'SYSTEM',
'TIME$', 'TIMER', 'TROFF', 'TRON', 'TYPE', 'UEVENT', 'UNLOCK',
'VIEW', 'WAIT', 'WHILE', 'WEND', 'WIDTH', 'WINDOW', 'WRITE'
]
keywords = [
'ACCESS', 'ALIAS', 'ANY', 'APPEND', 'AS', 'BASE', 'BINARY',
'BYVAL', 'CASE', 'CDECL', 'DOUBLE', 'ELSE', 'ELSEIF', 'ENDIF',
'INTEGER', 'IS', 'LIST', 'LOCAL', 'LONG', 'LOOP', 'MOD',
'NEXT', 'OFF', 'ON', 'OUTPUT', 'RANDOM', 'SIGNAL', 'SINGLE',
'STEP', 'STRING', 'THEN', 'TO', 'UNTIL', 'USING', 'WEND'
]
tokens = {
'root': [
(r'\n+', Text),
(r'\s+', Text.Whitespace),
(r'^(\s*)(\d*)(\s*)(REM .*)$',
bygroups(Text.Whitespace, Name.Label, Text.Whitespace,
Comment.Single)),
(r'^(\s*)(\d+)(\s*)',
bygroups(Text.Whitespace, Name.Label, Text.Whitespace)),
(r'(?=[\s]*)(\w+)(?=[\s]*=)', Name.Variable.Global),
(r'(?=[^"]*)\'.*$', Comment.Single),
(r'"[^\n\"]*"', String.Double),
(r'(END)(\s+)(FUNCTION|IF|SELECT|SUB)',
bygroups(Keyword.Reserved, Text.Whitespace, Keyword.Reserved)),
(r'(DECLARE)(\s+)([A-Z]+)(\s+)(\S+)',
bygroups(Keyword.Declaration, Text.Whitespace, Name.Variable,
Text.Whitespace, Name)),
(r'(DIM)(\s+)(SHARED)(\s+)([^\s\(]+)',
bygroups(Keyword.Declaration, Text.Whitespace, Name.Variable,
Text.Whitespace, Name.Variable.Global)),
(r'(DIM)(\s+)([^\s\(]+)',
bygroups(Keyword.Declaration, Text.Whitespace, Name.Variable.Global)),
(r'^(\s*)([a-zA-Z_]+)(\s*)(\=)',
bygroups(Text.Whitespace, Name.Variable.Global, Text.Whitespace,
Operator)),
(r'(GOTO|GOSUB)(\s+)(\w+\:?)',
bygroups(Keyword.Reserved, Text.Whitespace, Name.Label)),
(r'(SUB)(\s+)(\w+\:?)',
bygroups(Keyword.Reserved, Text.Whitespace, Name.Label)),
include('declarations'),
include('functions'),
include('metacommands'),
include('operators'),
include('statements'),
include('keywords'),
(r'[a-zA-Z_]\w*[\$@#&!]', Name.Variable.Global),
(r'[a-zA-Z_]\w*\:', Name.Label),
(r'\-?\d*\.\d+[@|#]?', Number.Float),
(r'\-?\d+[@|#]', Number.Float),
(r'\-?\d+#?', Number.Integer.Long),
(r'\-?\d+#?', Number.Integer),
(r'!=|==|:=|\.=|<<|>>|[-~+/\\*%=<>&^|?:!.]', Operator),
(r'[\[\]{}(),;]', Punctuation),
(r'[\w]+', Name.Variable.Global),
],
# can't use regular \b because of X$()
'declarations': [
(r'\b(%s)(?=\(|\b)' % '|'.join(map(re.escape, declarations)),
Keyword.Declaration),
],
'functions': [
(r'\b(%s)(?=\(|\b)' % '|'.join(map(re.escape, functions)),
Keyword.Reserved),
],
'metacommands': [
(r'\b(%s)(?=\(|\b)' % '|'.join(map(re.escape, metacommands)),
Keyword.Constant),
],
'operators': [
(r'\b(%s)(?=\(|\b)' % '|'.join(map(re.escape, operators)), Operator.Word),
],
'statements': [
(r'\b(%s)\b' % '|'.join(map(re.escape, statements)),
Keyword.Reserved),
],
'keywords': [
(r'\b(%s)\b' % '|'.join(keywords), Keyword),
],
}
def analyse_text(text):
return 0.2
| gpl-2.0 |
ArcEye/machinekit-testing | lib/python/gladevcp/hal_pyngcgui.py | 26 | 8131 | #!/usr/bin/env python
#------------------------------------------------------------------------------
# Copyright: 2013
# Author: Dewey Garrett <dgarrett@panix.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#------------------------------------------------------------------------------
import os
import gtk
import gobject
import pango
import hal_actions
import pyngcgui
g_module = os.path.basename(__file__)
#-----------------------------------------------------------------------------
# class to make a gladevcp widget:
class PyNgcGui(gtk.Frame,hal_actions._EMC_ActionBase):
"""PyNgcGui -- gladevcp widget"""
__gtype_name__ = 'PyNgcGui'
__gproperties__ = {
'use_keyboard' : (gobject.TYPE_BOOLEAN
,'Use Popup Keyboard'
,'Yes or No'
,False
,gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT
),
'debug' : (gobject.TYPE_BOOLEAN
,'Debug'
,'Yes or No'
,False
,gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT
),
'verbose' : (gobject.TYPE_BOOLEAN
,'Verbose'
,'Yes or No'
,False
,gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT
),
'send_function_name': (gobject.TYPE_STRING
,'Send Function'
,'default_send | send_to_axis | dummy_send'
,'default_send'
,gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT
),
'send_to_dir': (gobject.TYPE_STRING
,'Send to dir'
,'None|touchy|dirname None(default:[DISPLAY]PROGRAM_PREFIX'
,''
,gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT
),
'gtk_theme_name': (gobject.TYPE_STRING
,'GTK+ Theme Name'
,'default | name_of_gtk+_theme'
,'Follow System Theme'
,gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT
),
'control_font_name': (gobject.TYPE_STRING
,'Control Font'
,'example: Sans 10'
,'Sans 10'
,gobject.PARAM_READWRITE | gobject.PARAM_CONSTRUCT
),
}
__gproperties = __gproperties__ # self.__gproperties
def __init__(self):
super(PyNgcGui,self).__init__(label=None) # glade creates label anyway
self.set_label(None) # this doesn't work here
# the two attempts above don't prevent glade from making a Frame label
# put default property values in self.property_dict[]
self.property_dict = {}
for name in self.__gproperties.keys():
gtype = self.__gproperties[name][0]
if ( gtype == gobject.TYPE_BOOLEAN
or gtype == gobject.TYPE_STRING):
ty,lbl,tip,dflt,other = self.__gproperties[name]
if ( gtype == gobject.TYPE_INT
or gtype == gobject.TYPE_FLOAT):
ty,lbl,tip,minv,maxv,dflt,other = self.__gproperties[name]
self.property_dict[name] = dflt
gobject.timeout_add(1,self.go_ngcgui) # deferred
def do_get_property(self,property):
name = property.name.replace('-', '_')
if name in self.property_dict.keys():
return self.property_dict[name]
else:
raise AttributeError(_('%s:unknown property %s')
% (g_module,property.name))
def do_set_property(self,property,value):
name = property.name.replace('-','_')
if name not in self.__gproperties.keys():
raise(AttributeError
,_('%s:pyngcgui:do_set_property: unknown <%s>')
% (g_module,name))
else:
pyngcgui.vprint('SET P[%s]=%s' % (name,value))
self.property_dict[name] = value
def go_ngcgui(self):
self.start_NgcGui(debug = self.property_dict['debug']
,verbose = self.property_dict['verbose']
,use_keyboard = self.property_dict['use_keyboard']
,send_function_name = self.property_dict['send_function_name']
,send_to_dir = self.property_dict['send_to_dir']
,control_font_name = self.property_dict['control_font_name']
,gtk_theme_name = self.property_dict['gtk_theme_name']
)
gobject.timeout_add(1,self.remove_unwanted_label)
def remove_unwanted_label(self):
# coerce removal of unwanted label
self.set_label(None)
return False # one-time-only
def start_NgcGui(self
,debug=False
,verbose=False
,use_keyboard=False
,send_function_name=''
,send_to_dir=''
,control_font_name=None
,gtk_theme_name="Follow System Theme"
):
thenotebook = gtk.Notebook()
self.add(thenotebook) # tried with self=VBox,HBox,Frame
# Frame shows up best in glade designer
keyboardfile = None
if use_keyboard: keyboardfile = 'default'
send_function = None # None: let NgcGui handle it
if send_function_name == '': send_function = pyngcgui.default_send
elif send_function_name == 'dummy_send': send_function = pyngcgui.dummy_send
elif send_function_name == 'send_to_axis': send_function = pyngcgui.send_to_axis
elif send_function_name == 'default_send': send_function = pyngcgui.default_send
else:
print(_('%s:unknown send_function<%s>')
% (g_module,send_function_name))
if control_font_name is not None:
control_font = pango.FontDescription(control_font_name)
auto_file = None # use default behavior
if send_to_dir.strip() == "": send_to_dir = None
if send_to_dir is not None:
if send_to_dir == 'touchy':
# allow sent file to show up in touchy auto tab page
send_to_dir = '~/linuxcnc/nc_files'
if not os.path.isdir(os.path.expanduser(send_to_dir)):
raise ValueError(_('%s:Not a directory:\n %s\n'
% (g_module,send_to_dir)))
auto_file = os.path.expanduser(
os.path.join(send_to_dir,'ngcgui_generated.ngc'))
self.ngcgui = pyngcgui.NgcGui(w=thenotebook
,debug=debug
,verbose=verbose
,keyboardfile=keyboardfile
,send_function=send_function # prototype: (fname)
,auto_file=auto_file # None for default behavior
,control_font=control_font
,gtk_theme_name=gtk_theme_name
)
| lgpl-2.1 |
haxsie/kobato | main.py | 1 | 20973 | # !/user/bin/env python
# coding: utf-8
import pygame
from pygame.locals import*
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
import os
import random
import math
import datetime
from character import Character
# About Character class:
# Design with 4*4 grid layout.
# Rightmost column is for spacing.
from replay import Replay
from crosstest import cross_test
# Initialize modules
SCR_RECT = pygame.Rect(0, 0, 640, 480)
pygame.init()
screen = pygame.display.set_mode(SCR_RECT.size)
pygame.display.set_caption("KOBATO")
surface = pygame.Surface(SCR_RECT.size)
character = Character(screen)
replay = Replay()
# Constant
GRID = 50
CELL = 0
if SCR_RECT[3] <= SCR_RECT[2]:
CELL = SCR_RECT[3] // 54
else:
CELL = SCR_RECT[2] // 54
MARGIN_X = (SCR_RECT[2]-GRID*CELL) // 2
MARGIN_Y = (SCR_RECT[3]-GRID*CELL) // 2
GRID_FOR_CHAR = CELL // 2
LIGHT_GRAY = (110, 110, 110)
DARK_GRAY = (50, 50, 50)
GRID_COLOR = (30, 30, 30)
BG_COLOR = (0, 0, 0)
PLAYER_COLOR = (255, 0, 100)
GEM_COLOR = (0, 255, 255)
GEM_COLOR_DARK = (0, 80, 80)
ENEMY_COLOR_0 = (255, 255, 0)
DARK_EC0 = (128, 128, 50)
ENEMY_COLOR_1 = (GEM_COLOR)
# Game settings
ENEMIES_LEN = 10
V_LIM = 4
INITIAL_SPAWN_SPAN = 31
MINIMUM_SPAWN_SPAN = 5
INITIAL_LEVEL = 3
CLEAR_LEVEL = 10
GEM_TIME_GAIN = 5
# Sound Files
SE_CLASH = pygame.mixer.Sound(os.path.join("data\se_clash.wav"))
SE_GEM = pygame.mixer.Sound(os.path.join("data\se_gem.wav"))
SE_MOVE = pygame.mixer.Sound(os.path.join("data\se_move.wav"))
SE_INCUBATION = pygame.mixer.Sound(os.path.join("data\se_incubation.wav"))
SE_LEVEL_UP = pygame.mixer.Sound(os.path.join("data\se_level_up.wav"))
BGM = pygame.mixer.music.load(os.path.join("data\kobato.ogg"))
# FROM Grid TO Display
def dX(x):
return MARGIN_X + x*CELL
def dY(y):
return MARGIN_Y + y*CELL
# Generate the game
def main():
game = Game()
replay.__init__(game)
game.set_up()
class Game:
def __init__(self):
pass
def set_up(self, seed=None):
'''Initialize game'''
# Random seed
if seed == None:
self.SEED = random.randint(0, 9999)
else:
self.SEED = seed
random.seed(self.SEED)
# Initialize game settings
self.waitForInitialInput = True
self.isRecording = False
self.enemies = []
self.gem = Gem()
self.spawnSpan = INITIAL_SPAWN_SPAN
self.timeToSpawn = self.spawnSpan
self.level = INITIAL_LEVEL
self.player = Player()
Player.vector = [0, 0]
Player.pos[0] = GRID // 2
Player.pos[1] = GRID // 2
for i in range(ENEMIES_LEN):
self.enemies.append(None)
# Initial display
screen.fill(BG_COLOR)
self.draw_grid()
self.update_clock()
self.player.draw_spawn_point()
self.update_info_display()
self.update_gem()
msg = character.write(
"KOBATO",PLAYER_COLOR,
(MARGIN_X + 40, SCR_RECT[3]//2 - CELL),
GRID_FOR_CHAR, 2)
attrMsg = (
"INPUT",
"R/P RECORD/PLAYBACK REPLAY",
"SPACE CAPTURE SCREEN",
"ENTER RESET GAME",
"ESC EXIT GAME")
msg = self.attr_msg(attrMsg)
msg = self.common_msg("PRESS ARROW KEY")
pygame.display.update()
## BGM start
pygame.mixer.music.play(-1, 0.0)
# Into the loop
if seed == None:
self.phase = "Live"
self.loop()
def loop(self):
'''Wait for input.'''
while True:
if self.phase == "Dead":
pygame.mixer.music.fadeout(80)
self.darken()
msg = self.common_msg("PRESS RETURN TO CONTINUE")
pygame.display.update()
pygame.time.wait(80)
if self.phase == "GameClear":
for i in range(100):
pygame.mixer.music.fadeout(80)
self.draw_dia(DARK_GRAY, (dX(Player.pos[0]), dY(Player.pos[1])), i * 30, 1)
self.lighten()
msg = self.special_msg("CLEAR")
pygame.display.update()
pygame.time.wait(40)
replay.close()
sys.exit()
# input check
key = pygame.key.get_pressed()
if self.phase == "Live":
pygame.time.wait(80)
if key[K_RIGHT] or key[K_LEFT] or key[K_DOWN] or key[K_UP]:
h = key[K_RIGHT] - key[K_LEFT]
v = key[K_DOWN] - key[K_UP]
# Record replay
replay.write(h, v)
# Game progress
self.update(h, v)
for e in pygame.event.get():
keyPressed = (e.type == KEYDOWN)
if e.type == QUIT or (keyPressed and e.key == K_ESCAPE):
replay.close()
sys.exit()
if keyPressed and self.waitForInitialInput:
# Replay
if e.key == K_r:
if replay.start():
pygame.draw.circle(screen, (255, 0, 0), (10, 10), 5, 0)
pygame.display.update()
elif e.key == K_p:
replay.play()
self.waitForInitialInput = False
if keyPressed and e.key == K_SPACE:
name = datetime.datetime.today()
name = name.strftime("kobato_%Y_%m_%d_%H_%M_%S")
name = str(name)+".png"
print(name)
img = pygame.image.save(screen, name)
if keyPressed and e.key == K_RETURN:
replay.close()
self.set_up()
def update(self, x, y):
'''Turn-based process'''
# SE
SE_MOVE.play()
# Enemy spawn
self.enemy_spawn()
# Preprocess
self.player_stat_check()
self.player_extend_check()
# Move
self.update_enemy_vector()
self.player.move((x, y))
# Cross check
self.check_cross()
self.player.bound()
self.check_cross()
self.update_player_stats()
# Display
self.update_clock()
self.darken()
self.player.update_line()
self.update_enemy_line()
self.update_info_display()
self.update_gem()
pygame.display.update()
# Clean up
self.update_position()
self.reset_player_flags()
self.clean_enemies()
self.reset_gems()
def player_stat_check(self):
if Player.life <= 0:
self.phase = "Dead"
if self.level == CLEAR_LEVEL:
self.phase = "GameClear"
def enemy_spawn(self):
if self.timeToSpawn < 0:
# Generate Tobi
self.spawn_tobi()
if self.spawnSpan < 0:
self.spawnSpan = MINIMUM_SPAWN_SPAN
else:
self.spawnSpan -= 5
self.timeToSpawn = self.spawnSpan
# SE
SE_INCUBATION.play()
for enemy in self.enemies:
if enemy != None and enemy.spawnCount:
enemy.update_spawn()
self.timeToSpawn -= 1
def player_extend_check(self):
if Player.objective <= Player.point:
Player.life += 1
Player.objective += 1
Player.extended = True
Player.point = 0
self.level += 1
# Generate Taka
self.spawn_taka()
# SE
SE_LEVEL_UP.play()
def update_enemy_vector(self):
for enemy in self.enemies:
if enemy != None and not enemy.spawnCount:
enemy.move()
def update_enemy_line(self):
for enemy in self.enemies:
if enemy != None and not enemy.spawnCount:
enemy.update_line()
def update_player_stats(self):
if Player.crossed:
Player.life -= 1
if self.gem.got:
Player.point += 1
def update_position(self):
Player.pos[0] += Player.vector[0]
Player.pos[1] += Player.vector[1]
for e, enemy in enumerate(self.enemies):
if enemy != None:
self.enemies[e].pos[0] += enemy.vector[0]
self.enemies[e].pos[1] += enemy.vector[1]
def reset_player_flags(self):
Player.crossed = False
Player.extended = False
def clean_enemies(self):
for e, enemy in enumerate(self.enemies):
if enemy != None and not enemy.alive:
self.enemies[e] = None
def reset_gems(self):
if self.gem.got:
self.timeToSpawn += GEM_TIME_GAIN
self.gem.__init__()
# Cross test functions
def check_cross(self):
for e, enemy in enumerate(self.enemies):
if enemy != None and enemy.vector != [0,0]:
# Player and enemy
if self.cross_test_vec(Player.pos, Player.vector, enemy.pos, enemy.vector):
Player.crossed = True
# SE
SE_CLASH.play()
for ee, exEnemy in enumerate(self.enemies):
# Enemy and enemy
if ee > e and exEnemy != None and exEnemy.vector != [0, 0]:
if self.cross_test_vec(enemy.pos, enemy.vector, exEnemy.pos, exEnemy.vector):
if not enemy.shield:
enemy.crossed = True
self.enemies[e].alive = False
# SE
SE_CLASH.play()
if not exEnemy.shield:
exEnemy.crossed = True
self.enemies[ee].alive = False
# SE
SE_CLASH.play()
# Player and gem
a = (Player.pos[0]+Player.vector[0], Player.pos[1]+Player.vector[1])
for p, point in enumerate(self.gem.points):
# Each edge in gem
b = (self.gem.points[p - 1][0], self.gem.points[p - 1][1])
c = (point[0], point[1])
if cross_test(Player.pos, a, b, c):
self.gem.got = True
# SE
SE_GEM.play()
def cross_test_vec(self, p1, v1, p2, v2):
'''Test for cross among vector from p1 toward v1, and p2 toward v2'''
return cross_test(p1, (p1[0] + v1[0], p1[1] + v1[1]), p2, (p2[0] + v2[0], p2[1] + v2[1]))
# Spawn functions
def spawn_tobi(self):
if None in self.enemies:
self.enemies[self.enemies.index(None)] = Tobi(self, (GRID // 2, GRID // 2))
def spawn_taka(self):
if None in self.enemies:
self.enemies[self.enemies.index(None)] = Taka(self, (GRID // 2, GRID // 2))
# Draw functions
def draw_grid(self):
for x in range(GRID):
pygame.draw.line(
screen,GRID_COLOR,
(dX(x), MARGIN_Y),
(dX(x), SCR_RECT[3] - MARGIN_Y), 1)
for y in range(GRID):
pygame.draw.line(
screen,GRID_COLOR,
(MARGIN_X, dY(y)),
(SCR_RECT[2] - MARGIN_X, dY(y)), 1)
def update_info_display(self):
# Player life
r = CELL // 2
for i in range(Player.life):
center = (MARGIN_X + r + r*3*i, MARGIN_Y // 2)
if Player.crossed == True:
pygame.draw.circle(screen, (255, 255, 255), center, r, 0)
else:
pygame.draw.circle(screen, Player.color, center, r, 0)
if Player.extended == True:
pygame.draw.circle(screen, (255, 255, 255), center, r*2, 1)
# Player point
center = (SCR_RECT[2] - MARGIN_X - r, MARGIN_Y // 2)
for i in range(Player.objective):
self.draw_dia(Gem.colorDark, (center[0] - r*3*i, center[1]), CELL // 2, 0)
for i in range(Player.point):
self.draw_dia(Gem.color, (center[0] - r*3*i, center[1]), CELL // 2, 0)
if Player.extended:
for i in range(Player.objective):
self.draw_dia((255, 255, 255), (center[0] - r*3*i, center[1]), CELL, 1)
# frame
pygame.draw.rect(screen, GRID_COLOR, ((MARGIN_X, MARGIN_Y), (CELL * GRID, CELL * GRID)), 3)
# Level information
gridR = SCR_RECT[2] - MARGIN_X
gridT = MARGIN_Y
color = LIGHT_GRAY
# Line header height
y = 0
# Line spacing
spacing = CELL*2
# level
msg = " LEVEL"
s = GRID_FOR_CHAR*3 // 4
x = gridR
y = gridT-s * 4
w = 1
label = character.write(msg, color, (x, y), s, w)
y += s*4 + spacing
# level value
msg = str(self.level)
s = GRID_FOR_CHAR * 2
x = gridR + (MARGIN_X - len(msg)*s*4)//2
w = 3
label = character.write(msg, color, (x, y), s, w)
y += s*4 + spacing
# level objective
msg = "/ " + str(CLEAR_LEVEL) + " "
s = GRID_FOR_CHAR*3 // 4
x = SCR_RECT[2] - len(msg)*s*4
w = 1
label = character.write(msg, color, (x, y), s, w)
def draw_dia(self, col, center, size, width):
pygame.draw.polygon(
screen, col,
((center[0], center[1] - size),
(center[0] + size, center[1]),
(center[0], center[1] + size),
(center[0] - size, center[1])),
width)
def update_gem(self):
self.gem.draw()
def update_clock(self):
self.draw_clock(120, DARK_GRAY)
self.draw_clock(self.timeToSpawn, DARK_EC0)
def draw_clock(self, deg, col):
for i in range(deg):
pygame.draw.line(
screen, col,
(math.sin(math.radians(180 + i*-3))*CELL*10 + SCR_RECT[2]//2,
math.cos(math.radians(180 + i*-3))*CELL*10 + SCR_RECT[3]//2),
(math.sin(math.radians(180 + i*-3))*CELL*12 + SCR_RECT[2]//2,
math.cos(math.radians(180 + i*-3))*CELL*12 + SCR_RECT[3]//2),
1)
def darken(self):
surface.fill(BG_COLOR)
surface.set_alpha(80)
screen.blit(surface,(0, 0))
def lighten(self):
surface.fill((255, 255, 255))
surface.set_alpha(80)
screen.blit(surface,(0, 0))
def attr_msg(self, msgArray):
'''Attributes message'''
size = GRID_FOR_CHAR // 2
for l, msg in enumerate(msgArray):
character.write(
msg, (255, 255, 255),
(MARGIN_X + 10, MARGIN_Y + 10 + (size+20)*l),
size, 1)
def common_msg(self, msg):
'''Common message'''
size = GRID_FOR_CHAR//4 * 3
character.write(
msg, (255, 255, 255),
((SCR_RECT[2]-len(msg)*size*4) // 2, SCR_RECT[3]//4 * 3),
size, 1)
def special_msg(self, msg):
'''Special message'''
size = GRID_FOR_CHAR
character.write(
msg, DARK_GRAY,
((SCR_RECT[2]-len(msg)*size*4) // 2, SCR_RECT[3]//4 * 1),
size, 2)
class Anima(object):
'''Movethings class'''
def __init__(self):
self.crossed = False
self.pos = [0, 0]
self.vector = [0, 0]
self.color = (0, 0, 0)
def update_line(self):
color = self.__class__.color
if self.crossed == True:
color = (255, 255, 255)
self.drawLine(color)
def drawLine(self, color):
pygame.draw.line(screen, color,
(dX(self.pos[0]), dY(self.pos[1])),
(dX(self.pos[0] + self.vector[0]), dY(self.pos[1] + self.vector[1])),
2)
class Player(Anima):
'''Player class'''
def __init__(self):
# No inherit member vars of Anima class
Player.color = PLAYER_COLOR
Player.pos = [0, 0]
Player.vector = [0, 0]
Player.point = 0
# Initial objective
Player.objective = INITIAL_LEVEL
# Life limit equals to initial level
Player.life = INITIAL_LEVEL
Player.crossed = False
Player.extended = False
def update_line(self):
color = self.__class__.color
if Player.crossed == True:
color = (255, 255, 255)
self.drawLine(color)
def draw_spawn_point(self):
size = CELL // 4
pygame.draw.rect(
screen, Player.color,
(dX(Player.pos[0]) - size, dY(Player.pos[1]) - size,
size * 2, size * 2), 0)
pygame.draw.rect(
screen, Player.color,
(dX(Player.pos[0]) - size*2, dY(Player.pos[1]) - size*2,
size * 4, size * 4), 1)
def move(self, vec):
for i in range(2):
if V_LIM >= Player.vector[i] + vec[i] >= V_LIM * -1:
Player.vector[i] += vec[i]
def bound(self):
self.update_line()
for i in range(2):
if not(GRID >= Player.pos[i] + Player.vector[i] >= 1):
Player.pos[0] += Player.vector[0]
Player.pos[1] += Player.vector[1]
if Player.pos[i] + Player.vector[i] < 1:
Player.vector[i] *= -1
if Player.pos[i] + Player.vector[i] > GRID:
Player.vector[i] *= -1
self.update_line()
return True
return False
class Enemy(Anima):
'''Enemy class'''
color = (0, 0, 0)
shield = False
speedOffset = 0
def __init__(self, game, spawnPos):
super(Enemy, self).__init__()
self.game = game
self.speed = V_LIM - 1 + self.__class__.speedOffset*2
self.__class__.speedOffset += 1
if self.__class__.speedOffset > 2:
self.__class__.speedOffset = 0
self.pos = [spawnPos[0], spawnPos[1]]
self.vector = [0, 0]
self.alive = True
# Spawn duration
self.spawnCountMax = 5
self.spawnCount = self.spawnCountMax
#
self.update_spawn()
def update_spawn(self):
SE_INCUBATION.play()
r = (CELL*self.spawnCount)*2//2 + 2
pygame.draw.circle(screen, self.__class__.color, (dX(self.pos[0]), dY(self.pos[1])), r, 1)
self.spawnCount -= 1
def move_to_player(self):
target = Player.pos + Player.vector
self.update_pos(target)
def move_to_gem(self):
target = self.game.gem.pos
self.update_pos(target)
def update_pos(self, target):
for i in range(2):
if self.pos[i] < target[i]:
self.vector[i] += 1
if self.pos[i] > target[i]:
self.vector[i] -= 1
if self.vector[i] >= self.speed:
self.vector[i] -= 1
if self.vector[i] <= -1 * (self.speed):
self.vector[i] += 1
class Tobi(Enemy):
'''Tobi class'''
color = ENEMY_COLOR_0
shield = False
def __init__(self, game, spawnPos):
super(Tobi, self).__init__(game, spawnPos)
def move(self):
self.move_to_player()
class Taka(Enemy):
'''Taka class'''
color = ENEMY_COLOR_1
shield = True
speedOffset = 0
def __init__(self, game, spawnPos):
super(Taka, self).__init__(game, spawnPos)
def move(self):
self.move_to_gem()
class Gem:
'''Gem class'''
color = GEM_COLOR
colorDark = GEM_COLOR_DARK
def __init__(self):
self.pos = [0, 0]
self.got = False
# shape
self.vers = ((-2, 0), (0, -2), (2, 0), (0, 2))
# Shape with grid position
self.points = []
self.spawn()
def spawn(self):
self.pos[0] = random.randint(1, GRID - 1)
self.pos[1] = random.randint(1, GRID - 1)
for ver in self.vers:
self.points.append([self.pos[0] + ver[0], self.pos[1] + ver[1]])
self.got = False
self.draw()
def draw(self):
c = self.__class__.color
if self.got == True:
c = (255, 255, 255)
pointsDisp = []
for p, point in enumerate(self.points):
pointsDisp.append((dX(point[0]), dY(point[1])))
pygame.draw.circle(screen, c, (dX(self.pos[0]), dY(self.pos[1])), CELL // 3, 1)
pygame.draw.polygon(screen, c, pointsDisp, 1)
if __name__ == "__main__":
main()
| mit |
zionist/mon | mon/apps/cmp/migrations/0012_auto__chg_field_result_wc__chg_field_result_room__chg_field_result_hal.py | 1 | 46568 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Result.wc'
db.alter_column(u'cmp_result', 'wc_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.ResultWC'], null=True))
# Changing field 'Result.room'
db.alter_column(u'cmp_result', 'room_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.ResultRoom'], null=True))
# Changing field 'Result.hallway'
db.alter_column(u'cmp_result', 'hallway_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.ResultHallway'], null=True))
# Changing field 'Result.kitchen'
db.alter_column(u'cmp_result', 'kitchen_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.ResultKitchen'], null=True))
def backwards(self, orm):
# Changing field 'Result.wc'
db.alter_column(u'cmp_result', 'wc_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.WC'], null=True))
# Changing field 'Result.room'
db.alter_column(u'cmp_result', 'room_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.Room'], null=True))
# Changing field 'Result.hallway'
db.alter_column(u'cmp_result', 'hallway_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.Hallway'], null=True))
# Changing field 'Result.kitchen'
db.alter_column(u'cmp_result', 'kitchen_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['core.Kitchen'], null=True))
models = {
'build.building': {
'Meta': {'object_name': 'Building'},
'address': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'approve_status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'area': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'area_cmp': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True'}),
'build_state': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'build_year': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'cad_num': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '2048', 'db_index': 'True'}),
'cad_passport': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cad_sum': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'clinic': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'complete_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'contract': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['build.Contract']", 'null': 'True', 'blank': 'True'}),
'developer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Developer']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'driveways': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'electric_supply': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'entrance_door': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'finish_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2018, 12, 31, 0, 0)'}),
'flat_num': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'flats_amount': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'floor': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'floors': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'gas_supply': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'hallway': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Hallway']", 'null': 'True', 'blank': 'True'}),
'heating': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'hot_water_supply': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_doors': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'is_balcony': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_clother_drying': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_dustbin_area': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_heat_boiler': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_intercom': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_loggia': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_parking': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_playground': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_routes': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_water_boiler': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'kindergarden': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'kitchen': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Kitchen']", 'null': 'True', 'blank': 'True'}),
'market': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'mo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mo.MO']"}),
'mo_fond_doc_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'mo_fond_doc_num': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'ownership_doc_num': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'ownership_year': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'payment_perspective': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'planing_floor': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'public_transport': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'readiness': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'room': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Room']", 'null': 'True', 'blank': 'True'}),
'school': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 1, 1, 0, 0)'}),
'state': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'water_removal': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'water_settlement': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'wc': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.WC']", 'null': 'True', 'blank': 'True'}),
'window_constructions': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'build.contract': {
'Meta': {'object_name': 'Contract'},
'address': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'area': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'creation_form': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True'}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'developer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Developer']", 'null': 'True', 'blank': 'True'}),
'docs': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['build.ContractDocuments']", 'null': 'True', 'blank': 'True'}),
'finish_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2018, 12, 31, 0, 0)'}),
'flats_amount': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'has_trouble_docs': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mo.MO']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True'}),
'num': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'period_of_payment': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True'}),
'start_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 1, 1, 0, 0)'}),
'summ_mo_money': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'summ_without_mo_money': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'summa': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'summa_fed': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'summa_reg': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'build.contractdocuments': {
'Meta': {'object_name': 'ContractDocuments'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mun_contracts': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
'build.ground': {
'Meta': {'object_name': 'Ground'},
'address': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'approve_status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'area': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'area_cmp': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True'}),
'build_state': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'build_year': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'cad_num': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '2048', 'db_index': 'True'}),
'cad_passport': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cad_sum': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'clinic': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'complete_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'contract': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['build.Contract']", 'null': 'True', 'blank': 'True'}),
'developer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Developer']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'driveways': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'electric_supply': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'entrance_door': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'finish_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'finish_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2018, 12, 31, 0, 0)'}),
'flats_amount': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'floor': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'floors': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'gas_supply': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'hallway': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Hallway']", 'null': 'True', 'blank': 'True'}),
'heating': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'hot_water_supply': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_doors': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'is_balcony': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_clother_drying': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_dustbin_area': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_heat_boiler': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_intercom': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_loggia': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_parking': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_playground': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_routes': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_water_boiler': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'kindergarden': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'kitchen': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Kitchen']", 'null': 'True', 'blank': 'True'}),
'market': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'mo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mo.MO']"}),
'mo_fond_doc_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'mo_fond_doc_num': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'ownership_doc_num': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'ownership_year': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'payment_perspective': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'planing_floor': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'public_transport': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'readiness': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'room': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Room']", 'null': 'True', 'blank': 'True'}),
'school': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'start_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 1, 1, 0, 0)'}),
'state': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'water_removal': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'water_settlement': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'wc': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.WC']", 'null': 'True', 'blank': 'True'}),
'window_constructions': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'cmp.auction': {
'Meta': {'object_name': 'Auction'},
'area': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'area_cmp': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True'}),
'contract': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['build.Contract']", 'null': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'electric_supply': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'entrance_door': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': '0', 'max_length': '256', 'blank': 'True'}),
'finish_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2018, 12, 31, 0, 0)'}),
'flats_amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'gas_supply': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'hallway': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.AuctionHallway']", 'null': 'True', 'blank': 'True'}),
'heating': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'hot_water_supply': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_doors': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': '0', 'max_length': '256', 'blank': 'True'}),
'is_balcony': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_heat_boiler': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_loggia': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_water_boiler': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'kitchen': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.AuctionKitchen']", 'null': 'True', 'blank': 'True'}),
'mo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mo.MO']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True'}),
'num': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'open_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'proposal_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'room': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.AuctionRoom']", 'null': 'True', 'blank': 'True'}),
'stage': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'start_price': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'start_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 1, 1, 0, 0)'}),
'water_removal': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'water_settlement': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'wc': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.AuctionWC']", 'null': 'True', 'blank': 'True'}),
'window_constructions': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': '0', 'max_length': '256', 'blank': 'True'})
},
'cmp.comparedata': {
'Meta': {'object_name': 'CompareData'},
'area': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'area_cmp': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True'}),
'clinic': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'cmp_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 4, 25, 0, 0)', 'blank': 'True'}),
'driveways': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'electric_supply': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'entrance_door': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'flats_amount': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'floor': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'floors': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'gas_supply': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'heating': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'hot_water_supply': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_doors': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'is_balcony': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_clother_drying': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_dustbin_area': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_heat_boiler': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_intercom': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_loggia': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_parking': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_playground': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_routes': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_water_boiler': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'kindergarden': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'market': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'planing_floor': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'public_transport': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'school': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'water_removal': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'water_settlement': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'window_constructions': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'cmp.copyauction': {
'Meta': {'object_name': 'CopyAuction'},
'area': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'area_cmp': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True'}),
'contract': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['build.Contract']", 'null': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'electric_supply': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'entrance_door': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': '0', 'max_length': '256', 'blank': 'True'}),
'finish_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2018, 12, 31, 0, 0)'}),
'flats_amount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'gas_supply': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'hallway': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.AuctionHallway']", 'null': 'True', 'blank': 'True'}),
'heating': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'hot_water_supply': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_doors': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': '0', 'max_length': '256', 'blank': 'True'}),
'is_balcony': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_heat_boiler': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_loggia': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_water_boiler': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'kitchen': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.AuctionKitchen']", 'null': 'True', 'blank': 'True'}),
'mo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mo.MO']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True'}),
'num': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'open_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'proposal_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'room': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.AuctionRoom']", 'null': 'True', 'blank': 'True'}),
'stage': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'start_price': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'start_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 1, 1, 0, 0)'}),
'water_removal': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'water_settlement': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'wc': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.AuctionWC']", 'null': 'True', 'blank': 'True'}),
'window_constructions': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': '0', 'max_length': '256', 'blank': 'True'})
},
'cmp.person': {
'Meta': {'object_name': 'Person'},
'birth_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'contact_phone': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'})
},
'cmp.result': {
'Meta': {'object_name': 'Result'},
'building': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['build.Building']", 'null': 'True', 'blank': 'True'}),
'check_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'cmp_data': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmp.CompareData']", 'null': 'True', 'blank': 'True'}),
'contract': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['build.Contract']", 'null': 'True', 'blank': 'True'}),
'doc_files': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'doc_list': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'establish_pers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'establish_pers'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['cmp.Person']"}),
'finish_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2018, 12, 31, 0, 0)'}),
'ground': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['build.Ground']", 'null': 'True', 'blank': 'True'}),
'hallway': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.ResultHallway']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kitchen': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.ResultKitchen']", 'null': 'True', 'blank': 'True'}),
'mo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['mo.MO']", 'null': 'True'}),
'mo_pers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'mo_pers'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['cmp.Person']"}),
'readiness': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'recommend': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'room': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.ResultRoom']", 'null': 'True', 'blank': 'True'}),
'start_year': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 1, 1, 0, 0)'}),
'wc': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.ResultWC']", 'null': 'True', 'blank': 'True'})
},
u'core.auctionhallway': {
'Meta': {'object_name': 'AuctionHallway', '_ormbases': ['core.BaseHallway']},
u'basehallway_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseHallway']", 'unique': 'True', 'primary_key': 'True'}),
'ceiling': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': '0', 'max_length': '256', 'blank': 'True'}),
'floor': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': '0', 'max_length': '256', 'blank': 'True'}),
'wall': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': '0', 'max_length': '256', 'blank': 'True'})
},
u'core.auctionkitchen': {
'Meta': {'object_name': 'AuctionKitchen', '_ormbases': ['core.BaseKitchen']},
u'basekitchen_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseKitchen']", 'unique': 'True', 'primary_key': 'True'}),
'ceiling': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': '0', 'max_length': '256', 'blank': 'True'}),
'floor': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': '0', 'max_length': '256', 'blank': 'True'}),
'stove': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'wall': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': '0', 'max_length': '256', 'blank': 'True'})
},
u'core.auctionroom': {
'Meta': {'object_name': 'AuctionRoom', '_ormbases': ['core.BaseRoom']},
u'baseroom_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseRoom']", 'unique': 'True', 'primary_key': 'True'}),
'ceiling': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': '0', 'max_length': '256', 'blank': 'True'}),
'floor': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': '0', 'max_length': '256', 'blank': 'True'}),
'wall': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': '0', 'max_length': '256', 'blank': 'True'})
},
u'core.auctionwc': {
'Meta': {'object_name': 'AuctionWC', '_ormbases': ['core.BaseWC']},
u'basewc_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseWC']", 'unique': 'True', 'primary_key': 'True'}),
'ceiling': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': '0', 'max_length': '256', 'blank': 'True'}),
'floor': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': '0', 'max_length': '256', 'blank': 'True'}),
'separate': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'wall': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': '0', 'max_length': '256', 'blank': 'True'}),
'wc_ceiling': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'wc_floor': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'wc_wall': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'})
},
'core.basehallway': {
'Meta': {'object_name': 'BaseHallway'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'core.basekitchen': {
'Meta': {'object_name': 'BaseKitchen'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sink_with_mixer': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
},
'core.baseroom': {
'Meta': {'object_name': 'BaseRoom'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'core.basewc': {
'Meta': {'object_name': 'BaseWC'},
'bath_with_mixer': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_toilet': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_tower_dryer': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'sink_with_mixer': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'wc_switches': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'})
},
'core.developer': {
'Meta': {'object_name': 'Developer'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'boss_position': ('django.db.models.fields.CharField', [], {'max_length': '2048'}),
'face_list': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'})
},
u'core.hallway': {
'Meta': {'object_name': 'Hallway', '_ormbases': ['core.BaseHallway']},
u'basehallway_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseHallway']", 'unique': 'True', 'primary_key': 'True'}),
'ceiling': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'floor': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'wall': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
u'core.kitchen': {
'Meta': {'object_name': 'Kitchen', '_ormbases': ['core.BaseKitchen']},
u'basekitchen_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseKitchen']", 'unique': 'True', 'primary_key': 'True'}),
'ceiling': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'floor': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'stove': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'wall': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
u'core.resulthallway': {
'Meta': {'object_name': 'ResultHallway', '_ormbases': [u'core.Hallway']},
'ceiling_hook': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
u'hallway_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['core.Hallway']", 'unique': 'True', 'primary_key': 'True'}),
'heaters': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'lamp': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'smoke_filter': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'sockets': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'switches': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'})
},
u'core.resultkitchen': {
'Meta': {'object_name': 'ResultKitchen', '_ormbases': [u'core.Kitchen']},
'ceiling_hook': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'heaters': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
u'kitchen_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['core.Kitchen']", 'unique': 'True', 'primary_key': 'True'}),
'lamp': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'smoke_filter': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'sockets': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'switches': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'})
},
u'core.resultroom': {
'Meta': {'object_name': 'ResultRoom', '_ormbases': [u'core.Room']},
'ceiling_hook': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'heaters': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'lamp': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
u'room_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['core.Room']", 'unique': 'True', 'primary_key': 'True'}),
'smoke_filter': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'sockets': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'switches': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'})
},
u'core.resultwc': {
'Meta': {'object_name': 'ResultWC', '_ormbases': [u'core.WC']},
'ceiling_hook': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'heaters': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'lamp': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'smoke_filter': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'sockets': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'switches': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
u'wc_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['core.WC']", 'unique': 'True', 'primary_key': 'True'})
},
u'core.room': {
'Meta': {'object_name': 'Room', '_ormbases': ['core.BaseRoom']},
u'baseroom_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseRoom']", 'unique': 'True', 'primary_key': 'True'}),
'ceiling': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'floor': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'wall': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
u'core.wc': {
'Meta': {'object_name': 'WC', '_ormbases': ['core.BaseWC']},
u'basewc_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseWC']", 'unique': 'True', 'primary_key': 'True'}),
'ceiling': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'floor': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'separate': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'wall': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'wc_ceiling': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'wc_floor': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'wc_wall': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'mo.mo': {
'Meta': {'object_name': 'MO'},
'common_amount': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'common_economy': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'common_fed_amount': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'common_percentage': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'common_reg_amount': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'common_spent': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'creation_form': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '24', 'null': 'True', 'blank': 'True'}),
'flats_amount': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'has_trouble': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'home_fed_orphans': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'home_orphans': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'home_reg_orphans': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '2048'}),
'planing_home_orphans': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['cmp'] | bsd-3-clause |
birsoyo/conan | conans/server/rest/controllers/users_controller.py | 2 | 1305 | from bottle import response
from conans.errors import AuthenticationException
from conans.server.rest.controllers.controller import Controller
from conans.server.service.user_service import UserService
class UsersController(Controller):
"""
Serve requests related with users
"""
def attach_to(self, app):
@app.route(self.route + '/authenticate', method=["GET"])
def authenticate(http_basic_credentials):
if not http_basic_credentials:
raise AuthenticationException("Wrong user or password")
user_service = UserService(app.authenticator,
app.credentials_manager)
token = user_service.authenticate(http_basic_credentials.user,
http_basic_credentials.password)
response.content_type = 'text/plain'
return token
@app.route(self.route + '/check_credentials', method=["GET"])
def check_credentials(auth_user):
"""Just check if valid token. It not exception
is raised from Bottle plugin"""
if not auth_user:
raise AuthenticationException("Logged user needed!")
response.content_type = 'text/plain'
return auth_user
| mit |
tkzeng/molecular-design-toolkit | moldesign/viewer/common.py | 1 | 3411 | # Copyright 2016 Autodesk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from moldesign import utils
from moldesign.helpers import colormap
class ColorMixin(object):
def color_by(self, atom_callback, atoms=None, mplmap='auto', force_cmap=False):
"""
Color atoms according to either:
* an atomic attribute (e.g., 'chain', 'residue', 'mass')
* a callback function that accepts an atom and returns a color or a category
Args:
atom_callback (callable OR str): callable f(atom) returns color OR
category OR an atom attribute (e.g., ``atnum, mass, residue.type``)
atoms (moldesign.molecules.AtomContainer): atoms to color (default: self.atoms)
mplmap (str): name of the matplotlib colormap to use if colors aren't explicitly
specified)
force_cmap (bool): force the use of a colormap
Notes:
If you'd like to explicitly specify colors, the callback can return color
specifications as an HTML string (``'#1234AB'``), a hexadecimal integer (
``0x12345AB``), or a CSS3 color keyword (``'green'``, ``'purple'``, etc., see
https://developer.mozilla.org/en-US/docs/Web/CSS/color_value)
If the callback returns an integer, it may be interpreted as a color spec (since RGB
colors are just hexadecimal integers). Use ``force_cmap=True`` to force the creation
of a colormap.
Returns:
dict: mapping of categories to colors
"""
atoms = utils.if_not_none(atoms, self.mol.atoms)
if isinstance(atom_callback, basestring):
# shortcut to use strings to access atom attributes, i.e. "ff.partial_charge"
attrs = atom_callback.split('.')
# make sure that whatever value is returned doesn't get interpreted as a color
force_cmap = True
def atom_callback(atom):
obj = atom
for attr in attrs:
obj = getattr(obj, attr)
return obj
colors = utils.Categorizer(atom_callback, atoms)
if force_cmap:
name_is_color = [False]
else:
name_is_color = map(utils.is_color, colors.keys())
if len(colors) <= 1:
colors = {'gray': atoms}
elif not all(name_is_color):
assert not any(name_is_color), \
"callback function returned a mix of colors and categories"
categories = colors
cats = categories.keys()
# If there are >256 categories, this is a many-to-one mapping
colornames = colormap(cats, mplmap=mplmap)
colors = {c: [] for c in colornames}
for cat, color in zip(cats, colornames):
colors[color].extend(categories[cat])
self.set_colors(colors)
| apache-2.0 |
devops2014/djangosite | django/core/files/uploadedfile.py | 471 | 4334 | """
Classes representing uploaded files.
"""
import errno
import os
from io import BytesIO
from django.conf import settings
from django.core.files import temp as tempfile
from django.core.files.base import File
from django.utils.encoding import force_str
__all__ = ('UploadedFile', 'TemporaryUploadedFile', 'InMemoryUploadedFile',
'SimpleUploadedFile')
class UploadedFile(File):
"""
A abstract uploaded file (``TemporaryUploadedFile`` and
``InMemoryUploadedFile`` are the built-in concrete subclasses).
An ``UploadedFile`` object behaves somewhat like a file object and
represents some file data that the user submitted with a form.
"""
DEFAULT_CHUNK_SIZE = 64 * 2 ** 10
def __init__(self, file=None, name=None, content_type=None, size=None, charset=None, content_type_extra=None):
super(UploadedFile, self).__init__(file, name)
self.size = size
self.content_type = content_type
self.charset = charset
self.content_type_extra = content_type_extra
def __repr__(self):
return force_str("<%s: %s (%s)>" % (
self.__class__.__name__, self.name, self.content_type))
def _get_name(self):
return self._name
def _set_name(self, name):
# Sanitize the file name so that it can't be dangerous.
if name is not None:
# Just use the basename of the file -- anything else is dangerous.
name = os.path.basename(name)
# File names longer than 255 characters can cause problems on older OSes.
if len(name) > 255:
name, ext = os.path.splitext(name)
ext = ext[:255]
name = name[:255 - len(ext)] + ext
self._name = name
name = property(_get_name, _set_name)
class TemporaryUploadedFile(UploadedFile):
"""
A file uploaded to a temporary location (i.e. stream-to-disk).
"""
def __init__(self, name, content_type, size, charset, content_type_extra=None):
if settings.FILE_UPLOAD_TEMP_DIR:
file = tempfile.NamedTemporaryFile(suffix='.upload',
dir=settings.FILE_UPLOAD_TEMP_DIR)
else:
file = tempfile.NamedTemporaryFile(suffix='.upload')
super(TemporaryUploadedFile, self).__init__(file, name, content_type, size, charset, content_type_extra)
def temporary_file_path(self):
"""
Returns the full path of this file.
"""
return self.file.name
def close(self):
try:
return self.file.close()
except OSError as e:
if e.errno != errno.ENOENT:
# Means the file was moved or deleted before the tempfile
# could unlink it. Still sets self.file.close_called and
# calls self.file.file.close() before the exception
raise
class InMemoryUploadedFile(UploadedFile):
"""
A file uploaded into memory (i.e. stream-to-memory).
"""
def __init__(self, file, field_name, name, content_type, size, charset, content_type_extra=None):
super(InMemoryUploadedFile, self).__init__(file, name, content_type, size, charset, content_type_extra)
self.field_name = field_name
def open(self, mode=None):
self.file.seek(0)
def chunks(self, chunk_size=None):
self.file.seek(0)
yield self.read()
def multiple_chunks(self, chunk_size=None):
# Since it's in memory, we'll never have multiple chunks.
return False
class SimpleUploadedFile(InMemoryUploadedFile):
"""
A simple representation of a file, which just has content, size, and a name.
"""
def __init__(self, name, content, content_type='text/plain'):
content = content or b''
super(SimpleUploadedFile, self).__init__(BytesIO(content), None, name,
content_type, len(content), None, None)
@classmethod
def from_dict(cls, file_dict):
"""
Creates a SimpleUploadedFile object from
a dictionary object with the following keys:
- filename
- content-type
- content
"""
return cls(file_dict['filename'],
file_dict['content'],
file_dict.get('content-type', 'text/plain'))
| bsd-3-clause |
palerdot/calibre | src/calibre/ebooks/pdf/reflow.py | 10 | 24834 | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import sys, os
from lxml import etree
class Font(object):
def __init__(self, spec):
self.id = spec.get('id')
self.size = float(spec.get('size'))
self.color = spec.get('color')
self.family = spec.get('family')
class Element(object):
def __init__(self):
self.starts_block = None
self.block_style = None
def __eq__(self, other):
return self.id == other.id
def __hash__(self):
return hash(self.id)
class Image(Element):
def __init__(self, img, opts, log, idc):
Element.__init__(self)
self.opts, self.log = opts, log
self.id = idc.next()
self.top, self.left, self.width, self.height, self.iwidth, self.iheight = \
map(float, map(img.get, ('top', 'left', 'rwidth', 'rheight', 'iwidth',
'iheight')))
self.src = img.get('src')
self.bottom = self.top + self.height
self.right = self.left + self.width
def to_html(self):
return '<img src="%s" width="%dpx" height="%dpx"/>' % \
(self.src, int(self.width), int(self.height))
def dump(self, f):
f.write(self.to_html())
f.write('\n')
class Text(Element):
def __init__(self, text, font_map, opts, log, idc):
Element.__init__(self)
self.id = idc.next()
self.opts, self.log = opts, log
self.font_map = font_map
self.top, self.left, self.width, self.height = map(float, map(text.get,
('top', 'left', 'width', 'height')))
self.bottom = self.top + self.height
self.right = self.left + self.width
self.font = self.font_map[text.get('font')]
self.font_size = self.font.size
self.color = self.font.color
self.font_family = self.font.family
text.tail = ''
self.text_as_string = etree.tostring(text, method='text',
encoding=unicode)
self.raw = text.text if text.text else u''
for x in text.iterchildren():
self.raw += etree.tostring(x, method='xml', encoding=unicode)
self.average_character_width = self.width/len(self.text_as_string)
def coalesce(self, other, page_number):
if self.opts.verbose > 2:
self.log.debug('Coalescing %r with %r on page %d'%(self.text_as_string,
other.text_as_string, page_number))
self.top = min(self.top, other.top)
self.right = other.right
self.width = self.right - self.left
self.bottom = max(self.bottom, other.bottom)
self.height = self.bottom - self.top
self.font_size = max(self.font_size, other.font_size)
self.font = other.font if self.font_size == other.font_size else other.font
self.text_as_string += other.text_as_string
self.raw += other.raw
self.average_character_width = (self.average_character_width +
other.average_character_width)/2.0
def to_html(self):
return self.raw
def dump(self, f):
f.write(self.to_html().encode('utf-8'))
f.write('\n')
class FontSizeStats(dict):
def __init__(self, stats):
total = float(sum(stats.values()))
self.most_common_size, self.chars_at_most_common_size = -1, 0
for sz, chars in stats.items():
if chars >= self.chars_at_most_common_size:
self.most_common_size, self.chars_at_most_common_size = sz, chars
self[sz] = chars/total
class Interval(object):
def __init__(self, left, right):
self.left, self.right = left, right
self.width = right - left
def intersection(self, other):
left = max(self.left, other.left)
right = min(self.right, other.right)
return Interval(left, right)
def centered_in(self, parent):
left = abs(self.left - parent.left)
right = abs(self.right - parent.right)
return abs(left-right) < 3
def __nonzero__(self):
return self.width > 0
def __eq__(self, other):
return self.left == other.left and self.right == other.right
def __hash__(self):
return hash('(%f,%f)'%self.left, self.right)
class Column(object):
# A column contains an element is the element bulges out to
# the left or the right by at most HFUZZ*col width.
HFUZZ = 0.2
def __init__(self):
self.left = self.right = self.top = self.bottom = 0
self.width = self.height = 0
self.elements = []
self.average_line_separation = 0
def add(self, elem):
if elem in self.elements: return
self.elements.append(elem)
self._post_add()
def prepend(self, elem):
if elem in self.elements: return
self.elements.insert(0, elem)
self._post_add()
def _post_add(self):
self.elements.sort(cmp=lambda x,y:cmp(x.bottom,y.bottom))
self.top = self.elements[0].top
self.bottom = self.elements[-1].bottom
self.left, self.right = sys.maxint, 0
for x in self:
self.left = min(self.left, x.left)
self.right = max(self.right, x.right)
self.width, self.height = self.right-self.left, self.bottom-self.top
def __iter__(self):
for x in self.elements:
yield x
def __len__(self):
return len(self.elements)
def contains(self, elem):
return elem.left > self.left - self.HFUZZ*self.width and \
elem.right < self.right + self.HFUZZ*self.width
def collect_stats(self):
if len(self.elements) > 1:
gaps = [self.elements[i+1].top - self.elements[i].bottom for i in
range(0, len(self.elements)-1)]
self.average_line_separation = sum(gaps)/len(gaps)
for i, elem in enumerate(self.elements):
left_margin = elem.left - self.left
elem.indent_fraction = left_margin/self.width
elem.width_fraction = elem.width/self.width
if i == 0:
elem.top_gap_ratio = None
else:
elem.top_gap_ratio = (self.elements[i-1].bottom -
elem.top)/self.average_line_separation
def previous_element(self, idx):
if idx == 0:
return None
return self.elements[idx-1]
def dump(self, f, num):
f.write('******** Column %d\n\n'%num)
for elem in self.elements:
elem.dump(f)
class Box(list):
def __init__(self, type='p'):
self.tag = type
def to_html(self):
ans = ['<%s>'%self.tag]
for elem in self:
if isinstance(elem, int):
ans.append('<a name="page_%d"/>'%elem)
else:
ans.append(elem.to_html()+' ')
ans.append('</%s>'%self.tag)
return ans
class ImageBox(Box):
def __init__(self, img):
Box.__init__(self)
self.img = img
def to_html(self):
ans = ['<div style="text-align:center">']
ans.append(self.img.to_html())
if len(self) > 0:
ans.append('<br/>')
for elem in self:
if isinstance(elem, int):
ans.append('<a name="page_%d"/>'%elem)
else:
ans.append(elem.to_html()+' ')
ans.append('</div>')
return ans
class Region(object):
def __init__(self, opts, log):
self.opts, self.log = opts, log
self.columns = []
self.top = self.bottom = self.left = self.right = self.width = self.height = 0
def add(self, columns):
if not self.columns:
for x in sorted(columns, cmp=lambda x,y: cmp(x.left, y.left)):
self.columns.append(x)
else:
for i in range(len(columns)):
for elem in columns[i]:
self.columns[i].add(elem)
def contains(self, columns):
# TODO: handle unbalanced columns
if not self.columns:
return True
if len(columns) != len(self.columns):
return False
for i in range(len(columns)):
c1, c2 = self.columns[i], columns[i]
x1 = Interval(c1.left, c1.right)
x2 = Interval(c2.left, c2.right)
intersection = x1.intersection(x2)
base = min(x1.width, x2.width)
if intersection.width/base < 0.6:
return False
return True
@property
def is_empty(self):
return len(self.columns) == 0
@property
def line_count(self):
max_lines = 0
for c in self.columns:
max_lines = max(max_lines, len(c))
return max_lines
@property
def is_small(self):
return self.line_count < 3
def absorb(self, singleton):
def most_suitable_column(elem):
mc, mw = None, 0
for c in self.columns:
i = Interval(c.left, c.right)
e = Interval(elem.left, elem.right)
w = i.intersection(e).width
if w > mw:
mc, mw = c, w
if mc is None:
self.log.warn('No suitable column for singleton',
elem.to_html())
mc = self.columns[0]
return mc
for c in singleton.columns:
for elem in c:
col = most_suitable_column(elem)
if self.opts.verbose > 3:
idx = self.columns.index(col)
self.log.debug(u'Absorbing singleton %s into column'%elem.to_html(),
idx)
col.add(elem)
def collect_stats(self):
for column in self.columns:
column.collect_stats()
self.average_line_separation = sum([x.average_line_separation for x in
self.columns])/float(len(self.columns))
def __iter__(self):
for x in self.columns:
yield x
def absorb_regions(self, regions, at):
for region in regions:
self.absorb_region(region, at)
def absorb_region(self, region, at):
if len(region.columns) <= len(self.columns):
for i in range(len(region.columns)):
src, dest = region.columns[i], self.columns[i]
if at != 'bottom':
src = reversed(list(iter(src)))
for elem in src:
func = dest.add if at == 'bottom' else dest.prepend
func(elem)
else:
col_map = {}
for i, col in enumerate(region.columns):
max_overlap, max_overlap_index = 0, 0
for j, dcol in enumerate(self.columns):
sint = Interval(col.left, col.right)
dint = Interval(dcol.left, dcol.right)
width = sint.intersection(dint).width
if width > max_overlap:
max_overlap = width
max_overlap_index = j
col_map[i] = max_overlap_index
lines = max(map(len, region.columns))
if at == 'bottom':
lines = range(lines)
else:
lines = range(lines-1, -1, -1)
for i in lines:
for j, src in enumerate(region.columns):
dest = self.columns[col_map[j]]
if i < len(src):
func = dest.add if at == 'bottom' else dest.prepend
func(src.elements[i])
def dump(self, f):
f.write('############################################################\n')
f.write('########## Region (%d columns) ###############\n'%len(self.columns))
f.write('############################################################\n\n')
for i, col in enumerate(self.columns):
col.dump(f, i)
def linearize(self):
self.elements = []
for x in self.columns:
self.elements.extend(x)
self.boxes = [Box()]
for i, elem in enumerate(self.elements):
if isinstance(elem, Image):
self.boxes.append(ImageBox(elem))
img = Interval(elem.left, elem.right)
for j in range(i+1, len(self.elements)):
t = self.elements[j]
if not isinstance(t, Text):
break
ti = Interval(t.left, t.right)
if not ti.centered_in(img):
break
self.boxes[-1].append(t)
self.boxes.append(Box())
else:
is_indented = False
if i+1 < len(self.elements):
indent_diff = elem.indent_fraction - \
self.elements[i+1].indent_fraction
if indent_diff > 0.05:
is_indented = True
if elem.top_gap_ratio > 1.2 or is_indented:
self.boxes.append(Box())
self.boxes[-1].append(elem)
class Page(object):
# Fraction of a character width that two strings have to be apart,
# for them to be considered part of the same text fragment
COALESCE_FACTOR = 0.5
# Fraction of text height that two strings' bottoms can differ by
# for them to be considered to be part of the same text fragment
LINE_FACTOR = 0.4
# Multiplies the average line height when determining row height
# of a particular element to detect columns.
YFUZZ = 1.5
def __init__(self, page, font_map, opts, log, idc):
self.opts, self.log = opts, log
self.font_map = font_map
self.number = int(page.get('number'))
self.width, self.height = map(float, map(page.get,
('width', 'height')))
self.id = 'page%d'%self.number
self.texts = []
self.left_margin, self.right_margin = self.width, 0
for text in page.xpath('descendant::text'):
self.texts.append(Text(text, self.font_map, self.opts, self.log, idc))
text = self.texts[-1]
self.left_margin = min(text.left, self.left_margin)
self.right_margin = max(text.right, self.right_margin)
self.textwidth = self.right_margin - self.left_margin
self.font_size_stats = {}
self.average_text_height = 0
for t in self.texts:
if t.font_size not in self.font_size_stats:
self.font_size_stats[t.font_size] = 0
self.font_size_stats[t.font_size] += len(t.text_as_string)
self.average_text_height += t.height
if len(self.texts):
self.average_text_height /= len(self.texts)
self.font_size_stats = FontSizeStats(self.font_size_stats)
self.coalesce_fragments()
self.elements = list(self.texts)
for img in page.xpath('descendant::img'):
self.elements.append(Image(img, self.opts, self.log, idc))
self.elements.sort(cmp=lambda x,y:cmp(x.top, y.top))
def coalesce_fragments(self):
def find_match(frag):
for t in self.texts:
hdelta = t.left - frag.right
hoverlap = self.COALESCE_FACTOR * frag.average_character_width
if t is not frag and hdelta > -hoverlap and \
hdelta < hoverlap and \
abs(t.bottom - frag.bottom) < self.LINE_FACTOR*frag.height:
return t
match_found = True
while match_found:
match_found, match = False, None
for frag in self.texts:
match = find_match(frag)
if match is not None:
match_found = True
frag.coalesce(match, self.number)
break
if match is not None:
self.texts.remove(match)
def first_pass(self):
'Sort page into regions and columns'
self.regions = []
if not self.elements:
return
for i, x in enumerate(self.elements):
x.idx = i
current_region = Region(self.opts, self.log)
processed = set([])
for x in self.elements:
if x in processed: continue
elems = set(self.find_elements_in_row_of(x))
columns = self.sort_into_columns(x, elems)
processed.update(elems)
if not current_region.contains(columns):
self.regions.append(current_region)
current_region = Region(self.opts, self.log)
current_region.add(columns)
if not current_region.is_empty:
self.regions.append(current_region)
if self.opts.verbose > 2:
self.debug_dir = 'page-%d'%self.number
os.mkdir(self.debug_dir)
self.dump_regions('pre-coalesce')
self.coalesce_regions()
self.dump_regions('post-coalesce')
def dump_regions(self, fname):
fname = 'regions-'+fname+'.txt'
with open(os.path.join(self.debug_dir, fname), 'wb') as f:
f.write('Page #%d\n\n'%self.number)
for region in self.regions:
region.dump(f)
def coalesce_regions(self):
# find contiguous sets of small regions
# absorb into a neighboring region (prefer the one with number of cols
# closer to the avg number of cols in the set, if equal use larger
# region)
found = True
absorbed = set([])
processed = set([])
while found:
found = False
for i, region in enumerate(self.regions):
if region in absorbed:
continue
if region.is_small and region not in processed:
found = True
processed.add(region)
regions = [region]
end = i+1
for j in range(i+1, len(self.regions)):
end = j
if self.regions[j].is_small:
regions.append(self.regions[j])
else:
break
prev_region = None if i == 0 else i-1
next_region = end if end < len(self.regions) and self.regions[end] not in regions else None
absorb_at = 'bottom'
if prev_region is None and next_region is not None:
absorb_into = next_region
absorb_at = 'top'
elif next_region is None and prev_region is not None:
absorb_into = prev_region
elif prev_region is None and next_region is None:
if len(regions) > 1:
absorb_into = i
regions = regions[1:]
else:
absorb_into = None
else:
absorb_into = prev_region
if self.regions[next_region].line_count >= \
self.regions[prev_region].line_count:
avg_column_count = sum([len(r.columns) for r in
regions])/float(len(regions))
if self.regions[next_region].line_count > \
self.regions[prev_region].line_count \
or abs(avg_column_count -
len(self.regions[prev_region].columns)) \
> abs(avg_column_count -
len(self.regions[next_region].columns)):
absorb_into = next_region
absorb_at = 'top'
if absorb_into is not None:
self.regions[absorb_into].absorb_regions(regions, absorb_at)
absorbed.update(regions)
for region in absorbed:
self.regions.remove(region)
def sort_into_columns(self, elem, neighbors):
neighbors.add(elem)
neighbors = sorted(neighbors, cmp=lambda x,y:cmp(x.left, y.left))
if self.opts.verbose > 3:
self.log.debug('Neighbors:', [x.to_html() for x in neighbors])
columns = [Column()]
columns[0].add(elem)
for x in neighbors:
added = False
for c in columns:
if c.contains(x):
c.add(x)
added = True
break
if not added:
columns.append(Column())
columns[-1].add(x)
columns.sort(cmp=lambda x,y:cmp(x.left, y.left))
return columns
def find_elements_in_row_of(self, x):
interval = Interval(x.top,
x.top + self.YFUZZ*(self.average_text_height))
h_interval = Interval(x.left, x.right)
for y in self.elements[x.idx:x.idx+15]:
if y is not x:
y_interval = Interval(y.top, y.bottom)
x_interval = Interval(y.left, y.right)
if interval.intersection(y_interval).width > \
0.5*self.average_text_height and \
x_interval.intersection(h_interval).width <= 0:
yield y
def second_pass(self):
'Locate paragraph boundaries in each column'
for region in self.regions:
region.collect_stats()
region.linearize()
class PDFDocument(object):
def __init__(self, xml, opts, log):
self.opts, self.log = opts, log
parser = etree.XMLParser(recover=True)
self.root = etree.fromstring(xml, parser=parser)
idc = iter(xrange(sys.maxint))
self.fonts = []
self.font_map = {}
for spec in self.root.xpath('//font'):
self.fonts.append(Font(spec))
self.font_map[self.fonts[-1].id] = self.fonts[-1]
self.pages = []
self.page_map = {}
for page in self.root.xpath('//page'):
page = Page(page, self.font_map, opts, log, idc)
self.page_map[page.id] = page
self.pages.append(page)
self.collect_font_statistics()
for page in self.pages:
page.document_font_stats = self.font_size_stats
page.first_pass()
page.second_pass()
self.linearize()
self.render()
def collect_font_statistics(self):
self.font_size_stats = {}
for p in self.pages:
for sz in p.font_size_stats:
chars = p.font_size_stats[sz]
if sz not in self.font_size_stats:
self.font_size_stats[sz] = 0
self.font_size_stats[sz] += chars
self.font_size_stats = FontSizeStats(self.font_size_stats)
def linearize(self):
self.elements = []
last_region = last_block = None
for page in self.pages:
page_number_inserted = False
for region in page.regions:
merge_first_block = last_region is not None and \
len(last_region.columns) == len(region.columns) and \
not hasattr(last_block, 'img')
for i, block in enumerate(region.boxes):
if merge_first_block:
merge_first_block = False
if not page_number_inserted:
last_block.append(page.number)
page_number_inserted = True
for elem in block:
last_block.append(elem)
else:
if not page_number_inserted:
block.insert(0, page.number)
page_number_inserted = True
self.elements.append(block)
last_block = block
last_region = region
def render(self):
html = ['<?xml version="1.0" encoding="UTF-8"?>',
'<html xmlns="http://www.w3.org/1999/xhtml">', '<head>',
'<title>PDF Reflow conversion</title>', '</head>', '<body>',
'<div>']
for elem in self.elements:
html.extend(elem.to_html())
html += ['</body>', '</html>']
raw = (u'\n'.join(html)).replace('</strong><strong>', '')
with open('index.html', 'wb') as f:
f.write(raw.encode('utf-8'))
| gpl-3.0 |
vallemrv/tpvB3 | tpv/models/pedido.py | 1 | 5745 | # -*- coding: utf-8 -*-
# @Author: Manuel Rodriguez <valle>
# @Date: 04-Sep-2017
# @Email: valle.mrv@gmail.com
# @Last modified by: valle
# @Last modified time: 14-Feb-2018
# @License: Apache license vesion 2.0
from kivy.event import EventDispatcher
from kivy.properties import NumericProperty, StringProperty
from models.lineapedido import Constructor
from models.lineapedido import Linea
from models.db.pedidos import *
from kivy.storage.jsonstore import JsonStore
from copy import deepcopy
from datetime import datetime
import os
class Pedido(EventDispatcher):
total = NumericProperty(0.0)
modo_pago = StringProperty('Efectivo')
fecha = StringProperty('')
num_avisador = StringProperty('')
para_llevar = StringProperty('')
def __init__(self, **kargs):
super(Pedido, self).__init__(**kargs)
self.lineas_pedido = []
self.constructor = None
self.linea = None
self.dbCliente = None
self.efectivo = 0.0
self.cambio = 0.0
def add_modificador(self, obj):
db = None
if not self.linea:
self.linea = Linea(deepcopy(obj))
self.linea.obj['cant'] = 1
self.constructor = Constructor(producto=self.linea)
self.lineas_pedido.append(self.linea)
nom_ing = obj.get("ingredientes") if "ingredientes" in obj else None
if nom_ing != None:
mod = Linea(obj)
db = "../db/ingredientes/%s.json" % nom_ing
if self.linea.hay_preguntas():
db = self.linea.get_pregunta()
else:
db = self.constructor.add_modificador(obj)
return db
def rm_estado(self):
if self.linea:
if self.linea.getNumMod() > 0:
self.linea.remove_modificador()
else:
self.lineas_pedido.pop()
self.linea = None
def actualizar_total(self):
self.total = 0.0
for item in self.lineas_pedido:
self.total = self.total + item.getTotal()
def finaliza_linea(self):
self.actualizar_total()
self.linea = None
self.constructor = None
def borrar(self, linea):
borrar = False
obj = linea.obj
if obj['cant'] > 1:
obj['cant'] = obj['cant'] - 1
else:
self.linea = None
self.constructor = None
if linea in self.lineas_pedido:
self.lineas_pedido.remove(linea)
borrar = True
self.total = 0.0
for item in self.lineas_pedido:
self.total = self.total + item.getTotal()
return borrar
def getNumArt(self):
num = 0
for item in self.lineas_pedido:
num = num + item.getNumArt()
return num
def sumar(self, linea):
linea.obj['cant'] = linea.obj['cant'] + 1
self.total = 0.0
for item in self.lineas_pedido:
self.total = self.total + item.getTotal()
def set_list_pedidos(self, db):
for obj in self.lineas_pedido:
linea = LineasPedido()
linea = LineasPedido(**{'text': obj.obj.get('text'),
'des': obj.getDescripcion(),
'cant': obj.obj.get('cant'),
'precio': obj.getPrecio(),
'total': obj.getTotal(),
'tipo': obj.obj.get("tipo")})
linea.pedidos_id = db.id
linea.save()
def guardar_pedido(self):
db = Pedidos()
db.save(total=self.total,
modo_pago=self.modo_pago,
para_llevar=self.para_llevar,
num_avisador=self.num_avisador,
entrega=self.efectivo,
cambio=self.cambio)
self.set_list_pedidos(db)
if self.num_avisador == "Para recoger":
db.save(estado = "NPG_NO")
if self.dbCliente:
self.dbCliente.pedidos.add(db)
db.save(estado = "NPG_NO")
return db
def aparcar_pedido(self):
fecha = str(datetime.now())
db = JsonStore("../db/parking/%s.json" % fecha)
lineas = []
for obj in self.lineas_pedido:
sug = [] if not 'sug' in obj.obj else obj.obj["sug"]
linea = {'text': obj.obj.get('text'),
'modificadores': obj.obj.get("modificadores"),
'cant': obj.obj.get('cant'),
'precio': obj.obj.get('precio'),
'sug': sug,
'tipo': obj.obj.get("tipo")}
lineas.append(linea)
db.put("db", total=self.total,
modo_pago=self.modo_pago,
para_llevar=self.para_llevar,
num_avisador=self.num_avisador,
efectivo=self.efectivo,
cambio=self.cambio,
lineas=lineas)
def cargar_pedido(self, db):
self.total=db.get("db")["total"]
self.modo_pago=db.get("db")['modo_pago']
self.para_llevar=db.get("db")['para_llevar']
self.num_avisador=db.get("db")['num_avisador']
self.efectivo=db.get("db")['efectivo']
self.cambio=db.get("db")['cambio']
def add_linea(self, obj):
self.linea = Linea(deepcopy(obj))
self.linea.obj['text'] = obj.get('text')
self.linea.obj['modificadores'] = obj.get("modificadores")
self.linea.obj['precio'] = obj.get('precio')
self.linea.obj['sug'] = obj.get('sug')
self.linea.obj['tipo'] = obj.get('tipo')
self.linea.obj['cant'] = obj.get('cant')
self.lineas_pedido.append(self.linea)
| apache-2.0 |
timpalpant/calibre | src/calibre/gui2/viewer/gestures.py | 14 | 14599 | #!/usr/bin/env python2
# vim:fileencoding=utf-8
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid at kovidgoyal.net>'
import time, sys
from functools import partial
from PyQt5.Qt import (
QObject, QPointF, pyqtSignal, QEvent, QApplication, QMouseEvent, Qt,
QContextMenuEvent, QDialog, QDialogButtonBox, QLabel, QVBoxLayout)
from calibre.constants import iswindows
touch_supported = False
if iswindows and sys.getwindowsversion()[:2] >= (6, 2): # At least windows 7
touch_supported = True
SWIPE_HOLD_INTERVAL = 0.5 # seconds
HOLD_THRESHOLD = 1.0 # seconds
TAP_THRESHOLD = 50 # manhattan pixels
SWIPE_DISTANCE = 100 # manhattan pixels
PINCH_CENTER_THRESHOLD = 150 # manhattan pixels
PINCH_SQUEEZE_FACTOR = 2.5 # smaller length must be less that larger length / squeeze factor
Tap, TapAndHold, Pinch, Swipe, SwipeAndHold = 'Tap', 'TapAndHold', 'Pinch', 'Swipe', 'SwipeAndHold'
Left, Right, Up, Down = 'Left', 'Right', 'Up', 'Down'
In, Out = 'In', 'Out'
class Help(QDialog): # {{{
def __init__(self, parent=None):
QDialog.__init__(self, parent=parent)
self.l = l = QVBoxLayout(self)
self.setLayout(l)
self.la = la = QLabel(
'''
<style>
h2 { text-align: center }
dt { font-weight: bold }
dd { margin-bottom: 1.5em }
</style>
''' + _(
'''
<h2>The list of available gestures</h2>
<dl>
<dt>Single tap</dt>
<dd>A single tap on the right two thirds of the page will turn to the next page
and on the left one-third of the page will turn to the previous page. Single tapping
on a link will activate the link.</dd>
<dt>Swipe</dt>
<dd>Swipe to the left to go to the next page and to the right to go to the previous page.
This mimics turning pages in a paper book. When the viewer is not in paged mode, swiping
scrolls the text line by line instead of page by page.</dd>
<dt>Pinch</dt>
<dd>Pinch in or out to decrease or increase the font size</dd>
<dt>Swipe and hold</dt>
<dd>If you swipe and the hold your finger down instead of lifting it, pages will be turned
rapidly allowing for quickly scanning through large numbers of pages.</dd>
<dt>Tap and hold</dt>
<dd>Bring up the context (right-click) menu</dd>
</dl>
'''
))
la.setAlignment(Qt.AlignTop | Qt.AlignLeft)
la.setWordWrap(True)
l.addWidget(la, Qt.AlignTop|Qt.AlignLeft)
self.bb = bb = QDialogButtonBox(QDialogButtonBox.Close)
bb.accepted.connect(self.accept)
bb.rejected.connect(self.reject)
l.addWidget(bb)
self.resize(600, 500)
# }}}
class TouchPoint(object):
def __init__(self, tp):
self.creation_time = self.last_update_time = self.time_of_last_move = time.time()
self.start_screen_position = self.current_screen_position = self.previous_screen_position = QPointF(tp.screenPos())
self.time_since_last_update = -1
self.total_movement = 0
def update(self, tp):
now = time.time()
self.time_since_last_update = now - self.last_update_time
self.last_update_time = now
self.previous_screen_position, self.current_screen_position = self.current_screen_position, QPointF(tp.screenPos())
movement = (self.current_screen_position - self.previous_screen_position).manhattanLength()
self.total_movement += movement
if movement > 5:
self.time_of_last_move = now
@property
def swipe_type(self):
x_movement = self.current_screen_position.x() - self.start_screen_position.x()
y_movement = self.current_screen_position.y() - self.start_screen_position.y()
xabs, yabs = map(abs, (x_movement, y_movement))
if max(xabs, yabs) < SWIPE_DISTANCE or min(xabs/max(yabs, 0.01), yabs/max(xabs, 0.01)) > 0.3:
return
d = x_movement if xabs > yabs else y_movement
axis = (Left, Right) if xabs > yabs else (Up, Down)
return axis[0 if d < 0 else 1]
@property
def swipe_live(self):
x_movement = self.current_screen_position.x() - self.previous_screen_position.x()
y_movement = self.current_screen_position.y() - self.previous_screen_position.y()
return (x_movement, y_movement)
def get_pinch(p1, p2):
starts = [p1.start_screen_position, p2.start_screen_position]
ends = [p1.current_screen_position, p2.current_screen_position]
start_center = (starts[0] + starts[1]) / 2.0
end_center = (ends[0] + ends[1]) / 2.0
if (start_center - end_center).manhattanLength() > PINCH_CENTER_THRESHOLD:
return None
start_length = (starts[0] - starts[1]).manhattanLength()
end_length = (ends[0] - ends[1]).manhattanLength()
if min(start_length, end_length) > max(start_length, end_length) / PINCH_SQUEEZE_FACTOR:
return None
return In if start_length > end_length else Out
class State(QObject):
tapped = pyqtSignal(object)
swiped = pyqtSignal(object)
swiping = pyqtSignal(object, object)
pinched = pyqtSignal(object)
tap_hold_started = pyqtSignal(object)
tap_hold_updated = pyqtSignal(object)
swipe_hold_started = pyqtSignal(object)
swipe_hold_updated = pyqtSignal(object)
tap_hold_finished = pyqtSignal(object)
swipe_hold_finished = pyqtSignal(object)
def __init__(self):
QObject.__init__(self)
self.clear()
def clear(self):
self.possible_gestures = set()
self.touch_points = {}
self.hold_started = False
self.hold_data = None
def start(self):
self.clear()
self.possible_gestures = {Tap, TapAndHold, Swipe, Pinch, SwipeAndHold}
def update(self, ev, boundary='update'):
if boundary == 'start':
self.start()
for tp in ev.touchPoints():
tpid = tp.id()
if tpid not in self.touch_points:
self.touch_points[tpid] = TouchPoint(tp)
else:
self.touch_points[tpid].update(tp)
if len(self.touch_points) > 2:
self.possible_gestures.clear()
elif len(self.touch_points) > 1:
self.possible_gestures &= {Pinch}
if boundary == 'end':
self.check_for_holds()
self.finalize()
self.clear()
else:
self.check_for_holds()
if {Swipe, SwipeAndHold} & self.possible_gestures:
tp = next(self.touch_points.itervalues())
self.swiping.emit(*tp.swipe_live)
def check_for_holds(self):
if not {SwipeAndHold, TapAndHold} & self.possible_gestures:
return
now = time.time()
tp = next(self.touch_points.itervalues())
if now - tp.time_of_last_move < HOLD_THRESHOLD:
return
if self.hold_started:
if TapAndHold in self.possible_gestures:
self.tap_hold_updated.emit(tp)
if SwipeAndHold in self.possible_gestures:
self.swipe_hold_updated.emit(self.hold_data[1])
else:
self.possible_gestures &= {TapAndHold, SwipeAndHold}
if tp.total_movement > TAP_THRESHOLD:
st = tp.swipe_type
if st is None:
self.possible_gestures.clear()
else:
self.hold_started = True
self.possible_gestures = {SwipeAndHold}
self.hold_data = (now, st)
self.swipe_hold_started.emit(st)
else:
self.possible_gestures = {TapAndHold}
self.hold_started = True
self.hold_data = now
self.tap_hold_started.emit(tp)
def finalize(self):
if Tap in self.possible_gestures:
tp = next(self.touch_points.itervalues())
if tp.total_movement <= TAP_THRESHOLD:
self.tapped.emit(tp)
return
if Swipe in self.possible_gestures:
tp = next(self.touch_points.itervalues())
st = tp.swipe_type
if st is not None:
self.swiped.emit(st)
return
if Pinch in self.possible_gestures:
points = tuple(self.touch_points.itervalues())
if len(points) == 2:
pinch_dir = get_pinch(*points)
if pinch_dir is not None:
self.pinched.emit(pinch_dir)
if not self.hold_started:
return
if TapAndHold in self.possible_gestures:
tp = next(self.touch_points.itervalues())
self.tap_hold_finished.emit(tp)
return
if SwipeAndHold in self.possible_gestures:
self.swipe_hold_finished.emit(self.hold_data[1])
return
class GestureHandler(QObject):
def __init__(self, view):
QObject.__init__(self, view)
self.state = State()
self.last_swipe_hold_update = None
self.state.swiped.connect(self.handle_swipe)
self.state.tapped.connect(self.handle_tap)
self.state.swiping.connect(self.handle_swiping)
self.state.tap_hold_started.connect(partial(self.handle_tap_hold, 'start'))
self.state.tap_hold_updated.connect(partial(self.handle_tap_hold, 'update'))
self.state.tap_hold_finished.connect(partial(self.handle_tap_hold, 'end'))
self.state.swipe_hold_started.connect(partial(self.handle_swipe_hold, 'start'))
self.state.swipe_hold_updated.connect(partial(self.handle_swipe_hold, 'update'))
self.state.swipe_hold_finished.connect(partial(self.handle_swipe_hold, 'end'))
self.state.pinched.connect(self.handle_pinch)
self.evmap = {QEvent.TouchBegin: 'start', QEvent.TouchUpdate: 'update', QEvent.TouchEnd: 'end'}
def __call__(self, ev):
if not touch_supported:
return False
etype = ev.type()
if etype in (
QEvent.MouseMove, QEvent.MouseButtonPress,
QEvent.MouseButtonRelease, QEvent.MouseButtonDblClick) and ev.source() != Qt.MouseEventNotSynthesized:
# swallow fake mouse events generated from touch events
ev.accept()
return True
boundary = self.evmap.get(etype, None)
if boundary is None:
return False
self.state.update(ev, boundary=boundary)
ev.accept()
return True
def close_open_menu(self):
m = getattr(self.parent(), 'context_menu', None)
if m is not None and m.isVisible():
m.close()
return True
def handle_swipe(self, direction):
if self.close_open_menu():
return
view = self.parent()
if not view.document.in_paged_mode:
return
func = {Left:'next_page', Right: 'previous_page', Up:'goto_previous_section', Down:'goto_next_section'}[direction]
getattr(view, func)()
def handle_swiping(self, x, y):
if max(abs(x), abs(y)) < 1:
return
view = self.parent()
if view.document.in_paged_mode:
return
ydirection = Up if y < 0 else Down
if view.manager is not None and abs(y) > 0:
if ydirection is Up and view.document.at_bottom:
view.manager.next_document()
return
elif ydirection is Down and view.document.at_top:
view.manager.previous_document()
return
view.scroll_by(x=-x, y=-y)
if view.manager is not None:
view.manager.scrolled(view.scroll_fraction)
def current_position(self, tp):
return self.parent().mapFromGlobal(tp.current_screen_position.toPoint())
def handle_tap(self, tp):
if self.close_open_menu():
return
view = self.parent()
mf = view.document.mainFrame()
r = mf.hitTestContent(self.current_position(tp))
if r.linkElement().isNull():
if view.document.tap_flips_pages:
threshold = view.width() / 3.0
attr = 'previous' if self.current_position(tp).x() <= threshold else 'next'
getattr(view, '%s_page'%attr)()
else:
for etype in (QEvent.MouseButtonPress, QEvent.MouseButtonRelease):
ev = QMouseEvent(etype, self.current_position(tp), tp.current_screen_position.toPoint(), Qt.LeftButton, Qt.LeftButton, Qt.NoModifier)
QApplication.sendEvent(view, ev)
def handle_tap_hold(self, action, tp):
etype = {'start':QEvent.MouseButtonPress, 'update':QEvent.MouseMove, 'end':QEvent.MouseButtonRelease}[action]
ev = QMouseEvent(etype, self.current_position(tp), tp.current_screen_position.toPoint(), Qt.LeftButton, Qt.LeftButton, Qt.NoModifier)
QApplication.sendEvent(self.parent(), ev)
if action == 'end':
ev = QContextMenuEvent(QContextMenuEvent.Other, self.current_position(tp), tp.current_screen_position.toPoint())
# We have to use post event otherwise the popup remains an alien widget and does not receive events
QApplication.postEvent(self.parent(), ev)
def handle_swipe_hold(self, action, direction):
view = self.parent()
if not view.document.in_paged_mode:
return
if action == 'start':
self.last_swipe_hold_update = time.time()
try:
self.handle_swipe(direction)
finally:
view.is_auto_repeat_event = False
elif action == 'update' and self.last_swipe_hold_update is not None and time.time() - self.last_swipe_hold_update > SWIPE_HOLD_INTERVAL:
view.is_auto_repeat_event = True
self.last_swipe_hold_update = time.time()
try:
self.handle_swipe(direction)
finally:
view.is_auto_repeat_event = False
elif action == 'end':
self.last_swipe_hold_update = None
def handle_pinch(self, direction):
attr = 'magnify' if direction is Out else 'shrink'
getattr(self.parent(), '%s_fonts' % attr)()
def show_help(self):
Help(self.parent()).exec_()
if __name__ == '__main__':
app = QApplication([])
Help().exec_()
| gpl-3.0 |
tchernomax/ansible | test/units/playbook/test_block.py | 119 | 2795 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.playbook.block import Block
from ansible.playbook.task import Task
class TestBlock(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_construct_empty_block(self):
b = Block()
def test_construct_block_with_role(self):
pass
def test_load_block_simple(self):
ds = dict(
block=[],
rescue=[],
always=[],
# otherwise=[],
)
b = Block.load(ds)
self.assertEqual(b.block, [])
self.assertEqual(b.rescue, [])
self.assertEqual(b.always, [])
# not currently used
# self.assertEqual(b.otherwise, [])
def test_load_block_with_tasks(self):
ds = dict(
block=[dict(action='block')],
rescue=[dict(action='rescue')],
always=[dict(action='always')],
# otherwise=[dict(action='otherwise')],
)
b = Block.load(ds)
self.assertEqual(len(b.block), 1)
self.assertIsInstance(b.block[0], Task)
self.assertEqual(len(b.rescue), 1)
self.assertIsInstance(b.rescue[0], Task)
self.assertEqual(len(b.always), 1)
self.assertIsInstance(b.always[0], Task)
# not currently used
# self.assertEqual(len(b.otherwise), 1)
# self.assertIsInstance(b.otherwise[0], Task)
def test_load_implicit_block(self):
ds = [dict(action='foo')]
b = Block.load(ds)
self.assertEqual(len(b.block), 1)
self.assertIsInstance(b.block[0], Task)
def test_deserialize(self):
ds = dict(
block=[dict(action='block')],
rescue=[dict(action='rescue')],
always=[dict(action='always')],
)
b = Block.load(ds)
data = dict(parent=ds, parent_type='Block')
b.deserialize(data)
self.assertIsInstance(b._parent, Block)
| gpl-3.0 |
developerQuinnZ/this_will_work | student-work/quinn_zepeda/exercism/python/bob/bob_test.py | 6 | 3337 | import unittest
import bob
# test cases adapted from `x-common//canonical-data.json` @ version: 1.0.0
class BobTests(unittest.TestCase):
def test_stating_something(self):
self.assertEqual(bob.hey("Tom-ay-to, tom-aaaah-to."), "Whatever.")
def test_shouting(self):
self.assertEqual(bob.hey("WATCH OUT!"), "Whoa, chill out!")
def test_shouting_gibberish(self):
self.assertEqual(bob.hey("FCECDFCAAB"), "Whoa, chill out!")
def test_asking_a_question(self):
self.assertEqual(
bob.hey("Does this cryogenic chamber make me look fat?"), "Sure.")
def test_asking_a_numeric_question(self):
self.assertEqual(bob.hey("You are, what, like 15?"), "Sure.")
def test_asking_gibberish(self):
self.assertEqual(bob.hey("fffbbcbeab?"), "Sure.")
def test_talking_forcefully(self):
self.assertEqual(
bob.hey("Let's go make out behind the gym!"), "Whatever.")
def test_using_acronyms_in_regular_speech(self):
self.assertEqual(
bob.hey("It's OK if you don't want to go to the DMV."),
"Whatever.")
def test_forceful_question(self):
self.assertEqual(
bob.hey("WHAT THE HELL WERE YOU THINKING?"), "Whoa, chill out!")
def test_shouting_numbers(self):
self.assertEqual(bob.hey("1, 2, 3 GO!"), "Whoa, chill out!")
def test_only_numbers(self):
self.assertEqual(bob.hey("1, 2, 3"), "Whatever.")
def test_question_with_only_numbers(self):
self.assertEqual(bob.hey("4?"), "Sure.")
def test_shouting_with_special_characters(self):
self.assertEqual(
bob.hey("ZOMG THE %^*@#$(*^ ZOMBIES ARE COMING!!11!!1!"),
"Whoa, chill out!")
def test_shouting_with_no_exclamation_mark(self):
self.assertEqual(bob.hey("I HATE YOU"), "Whoa, chill out!")
def test_statement_containing_question_mark(self):
self.assertEqual(
bob.hey("Ending with ? means a question."), "Whatever.")
def test_non_letters_with_question(self):
self.assertEqual(bob.hey(":) ?"), "Sure.")
def test_prattling_on(self):
self.assertEqual(
bob.hey("Wait! Hang on. Are you going to be OK?"), "Sure.")
def test_silence(self):
self.assertEqual(bob.hey(""), "Fine. Be that way!")
def test_prolonged_silence(self):
self.assertEqual(bob.hey(" "), "Fine. Be that way!")
def test_alternate_silence(self):
self.assertEqual(bob.hey("\t\t\t\t\t\t\t\t\t\t"), "Fine. Be that way!")
def test_multiple_line_question(self):
self.assertEqual(
bob.hey("\nDoes this cryogenic chamber make me look fat?\nno"),
"Whatever.")
def test_starting_with_whitespace(self):
self.assertEqual(bob.hey(" hmmmmmmm..."), "Whatever.")
def test_ending_with_whitespace(self):
self.assertEqual(
bob.hey("Okay if like my spacebar quite a bit? "), "Sure.")
def test_other_whitespace(self):
self.assertEqual(bob.hey("\n\r \t"), "Fine. Be that way!")
def test_non_question_ending_with_whitespace(self):
self.assertEqual(
bob.hey("This is a statement ending with whitespace "),
"Whatever.")
if __name__ == '__main__':
unittest.main()
| mit |
belevtsoff/luigi | test/clone_test.py | 53 | 1859 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from helpers import unittest
import luigi
import luigi.notifications
luigi.notifications.DEBUG = True
class LinearSum(luigi.Task):
lo = luigi.IntParameter()
hi = luigi.IntParameter()
def requires(self):
if self.hi > self.lo:
return self.clone(hi=self.hi - 1)
def run(self):
if self.hi > self.lo:
self.s = self.requires().s + self.f(self.hi - 1)
else:
self.s = 0
self.complete = lambda: True # workaround since we don't write any output
complete = lambda self: False
def f(self, x):
return x
class PowerSum(LinearSum):
p = luigi.IntParameter()
def f(self, x):
return x ** self.p
class CloneTest(unittest.TestCase):
def test_args(self):
t = LinearSum(lo=42, hi=45)
self.assertEqual(t.param_args, (42, 45))
self.assertEqual(t.param_kwargs, {'lo': 42, 'hi': 45})
def test_recursion(self):
t = LinearSum(lo=42, hi=45)
luigi.build([t], local_scheduler=True)
self.assertEqual(t.s, 42 + 43 + 44)
def test_inheritance(self):
t = PowerSum(lo=42, hi=45, p=2)
luigi.build([t], local_scheduler=True)
self.assertEqual(t.s, 42 ** 2 + 43 ** 2 + 44 ** 2)
| apache-2.0 |
Kazade/NeHe-Website | google_appengine/lib/django-1.4/django/contrib/gis/maps/google/gmap.py | 90 | 8964 | from django.conf import settings
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
from django.contrib.gis.maps.google.overlays import GPolygon, GPolyline, GMarker
class GoogleMapException(Exception):
pass
# The default Google Maps URL (for the API javascript)
# TODO: Internationalize for Japan, UK, etc.
GOOGLE_MAPS_URL='http://maps.google.com/maps?file=api&v=%s&key='
class GoogleMap(object):
"A class for generating Google Maps JavaScript."
# String constants
onunload = mark_safe('onunload="GUnload()"') # Cleans up after Google Maps
vml_css = mark_safe('v\:* {behavior:url(#default#VML);}') # CSS for IE VML
xmlns = mark_safe('xmlns:v="urn:schemas-microsoft-com:vml"') # XML Namespace (for IE VML).
def __init__(self, key=None, api_url=None, version=None,
center=None, zoom=None, dom_id='map',
kml_urls=[], polylines=None, polygons=None, markers=None,
template='gis/google/google-map.js',
js_module='geodjango',
extra_context={}):
# The Google Maps API Key defined in the settings will be used
# if not passed in as a parameter. The use of an API key is
# _required_.
if not key:
try:
self.key = settings.GOOGLE_MAPS_API_KEY
except AttributeError:
raise GoogleMapException('Google Maps API Key not found (try adding GOOGLE_MAPS_API_KEY to your settings).')
else:
self.key = key
# Getting the Google Maps API version, defaults to using the latest ("2.x"),
# this is not necessarily the most stable.
if not version:
self.version = getattr(settings, 'GOOGLE_MAPS_API_VERSION', '2.x')
else:
self.version = version
# Can specify the API URL in the `api_url` keyword.
if not api_url:
self.api_url = mark_safe(getattr(settings, 'GOOGLE_MAPS_URL', GOOGLE_MAPS_URL) % self.version)
else:
self.api_url = api_url
# Setting the DOM id of the map, the load function, the JavaScript
# template, and the KML URLs array.
self.dom_id = dom_id
self.extra_context = extra_context
self.js_module = js_module
self.template = template
self.kml_urls = kml_urls
# Does the user want any GMarker, GPolygon, and/or GPolyline overlays?
overlay_info = [[GMarker, markers, 'markers'],
[GPolygon, polygons, 'polygons'],
[GPolyline, polylines, 'polylines']]
for overlay_class, overlay_list, varname in overlay_info:
setattr(self, varname, [])
if overlay_list:
for overlay in overlay_list:
if isinstance(overlay, overlay_class):
getattr(self, varname).append(overlay)
else:
getattr(self, varname).append(overlay_class(overlay))
# If GMarker, GPolygons, and/or GPolylines are used the zoom will be
# automatically calculated via the Google Maps API. If both a zoom
# level and a center coordinate are provided with polygons/polylines,
# no automatic determination will occur.
self.calc_zoom = False
if self.polygons or self.polylines or self.markers:
if center is None or zoom is None:
self.calc_zoom = True
# Defaults for the zoom level and center coordinates if the zoom
# is not automatically calculated.
if zoom is None: zoom = 4
self.zoom = zoom
if center is None: center = (0, 0)
self.center = center
def render(self):
"""
Generates the JavaScript necessary for displaying this Google Map.
"""
params = {'calc_zoom' : self.calc_zoom,
'center' : self.center,
'dom_id' : self.dom_id,
'js_module' : self.js_module,
'kml_urls' : self.kml_urls,
'zoom' : self.zoom,
'polygons' : self.polygons,
'polylines' : self.polylines,
'icons': self.icons,
'markers' : self.markers,
}
params.update(self.extra_context)
return render_to_string(self.template, params)
@property
def body(self):
"Returns HTML body tag for loading and unloading Google Maps javascript."
return mark_safe('<body %s %s>' % (self.onload, self.onunload))
@property
def onload(self):
"Returns the `onload` HTML <body> attribute."
return mark_safe('onload="%s.%s_load()"' % (self.js_module, self.dom_id))
@property
def api_script(self):
"Returns the <script> tag for the Google Maps API javascript."
return mark_safe('<script src="%s%s" type="text/javascript"></script>' % (self.api_url, self.key))
@property
def js(self):
"Returns only the generated Google Maps JavaScript (no <script> tags)."
return self.render()
@property
def scripts(self):
"Returns all <script></script> tags required with Google Maps JavaScript."
return mark_safe('%s\n <script type="text/javascript">\n//<![CDATA[\n%s//]]>\n </script>' % (self.api_script, self.js))
@property
def style(self):
"Returns additional CSS styling needed for Google Maps on IE."
return mark_safe('<style type="text/css">%s</style>' % self.vml_css)
@property
def xhtml(self):
"Returns XHTML information needed for IE VML overlays."
return mark_safe('<html xmlns="http://www.w3.org/1999/xhtml" %s>' % self.xmlns)
@property
def icons(self):
"Returns a sequence of GIcon objects in this map."
return set([marker.icon for marker in self.markers if marker.icon])
class GoogleMapSet(GoogleMap):
def __init__(self, *args, **kwargs):
"""
A class for generating sets of Google Maps that will be shown on the
same page together.
Example:
gmapset = GoogleMapSet( GoogleMap( ... ), GoogleMap( ... ) )
gmapset = GoogleMapSet( [ gmap1, gmap2] )
"""
# The `google-multi.js` template is used instead of `google-single.js`
# by default.
template = kwargs.pop('template', 'gis/google/google-multi.js')
# This is the template used to generate the GMap load JavaScript for
# each map in the set.
self.map_template = kwargs.pop('map_template', 'gis/google/google-single.js')
# Running GoogleMap.__init__(), and resetting the template
# value with default obtained above.
super(GoogleMapSet, self).__init__(**kwargs)
self.template = template
# If a tuple/list passed in as first element of args, then assume
if isinstance(args[0], (tuple, list)):
self.maps = args[0]
else:
self.maps = args
# Generating DOM ids for each of the maps in the set.
self.dom_ids = ['map%d' % i for i in xrange(len(self.maps))]
def load_map_js(self):
"""
Returns JavaScript containing all of the loading routines for each
map in this set.
"""
result = []
for dom_id, gmap in zip(self.dom_ids, self.maps):
# Backup copies the GoogleMap DOM id and template attributes.
# They are overridden on each GoogleMap instance in the set so
# that only the loading JavaScript (and not the header variables)
# is used with the generated DOM ids.
tmp = (gmap.template, gmap.dom_id)
gmap.template = self.map_template
gmap.dom_id = dom_id
result.append(gmap.js)
# Restoring the backup values.
gmap.template, gmap.dom_id = tmp
return mark_safe(''.join(result))
def render(self):
"""
Generates the JavaScript for the collection of Google Maps in
this set.
"""
params = {'js_module' : self.js_module,
'dom_ids' : self.dom_ids,
'load_map_js' : self.load_map_js(),
'icons' : self.icons,
}
params.update(self.extra_context)
return render_to_string(self.template, params)
@property
def onload(self):
"Returns the `onload` HTML <body> attribute."
# Overloaded to use the `load` function defined in the
# `google-multi.js`, which calls the load routines for
# each one of the individual maps in the set.
return mark_safe('onload="%s.load()"' % self.js_module)
@property
def icons(self):
"Returns a sequence of all icons in each map of the set."
icons = set()
for map in self.maps: icons |= map.icons
return icons
| bsd-3-clause |
naslanidis/ansible | lib/ansible/modules/cloud/google/gce_pd.py | 24 | 10645 | #!/usr/bin/python
# Copyright 2013 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: gce_pd
version_added: "1.4"
short_description: utilize GCE persistent disk resources
description:
- This module can create and destroy unformatted GCE persistent disks
U(https://developers.google.com/compute/docs/disks#persistentdisks).
It also supports attaching and detaching disks from running instances.
Full install/configuration instructions for the gce* modules can
be found in the comments of ansible/test/gce_tests.py.
options:
detach_only:
description:
- do not destroy the disk, merely detach it from an instance
required: false
default: "no"
choices: ["yes", "no"]
aliases: []
instance_name:
description:
- instance name if you wish to attach or detach the disk
required: false
default: null
aliases: []
mode:
description:
- GCE mount mode of disk, READ_ONLY (default) or READ_WRITE
required: false
default: "READ_ONLY"
choices: ["READ_WRITE", "READ_ONLY"]
aliases: []
name:
description:
- name of the disk
required: true
default: null
aliases: []
size_gb:
description:
- whole integer size of disk (in GB) to create, default is 10 GB
required: false
default: 10
aliases: []
image:
description:
- the source image to use for the disk
required: false
default: null
aliases: []
version_added: "1.7"
snapshot:
description:
- the source snapshot to use for the disk
required: false
default: null
aliases: []
version_added: "1.7"
state:
description:
- desired state of the persistent disk
required: false
default: "present"
choices: ["active", "present", "absent", "deleted"]
aliases: []
zone:
description:
- zone in which to create the disk
required: false
default: "us-central1-b"
aliases: []
service_account_email:
version_added: "1.6"
description:
- service account email
required: false
default: null
aliases: []
pem_file:
version_added: "1.6"
description:
- path to the pem file associated with the service account email
This option is deprecated. Use 'credentials_file'.
required: false
default: null
aliases: []
credentials_file:
version_added: "2.1.0"
description:
- path to the JSON file associated with the service account email
required: false
default: null
aliases: []
project_id:
version_added: "1.6"
description:
- your GCE project ID
required: false
default: null
aliases: []
disk_type:
version_added: "1.9"
description:
- type of disk provisioned
required: false
default: "pd-standard"
choices: ["pd-standard", "pd-ssd"]
aliases: []
delete_on_termination:
version_added: "2.3"
description:
- If yes, deletes the volume when instance is terminated
default: no
choices: ["yes", "no"]
requirements:
- "python >= 2.6"
- "apache-libcloud >= 0.13.3, >= 0.17.0 if using JSON credentials"
author: "Eric Johnson (@erjohnso) <erjohnso@google.com>"
'''
EXAMPLES = '''
# Simple attachment action to an existing instance
- local_action:
module: gce_pd
instance_name: notlocalhost
size_gb: 5
name: pd
'''
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceNotFoundError, ResourceInUseError
_ = Provider.GCE
HAS_LIBCLOUD = True
except ImportError:
HAS_LIBCLOUD = False
def main():
module = AnsibleModule(
argument_spec = dict(
delete_on_termination = dict(type='bool'),
detach_only = dict(type='bool'),
instance_name = dict(),
mode = dict(default='READ_ONLY', choices=['READ_WRITE', 'READ_ONLY']),
name = dict(required=True),
size_gb = dict(default=10),
disk_type = dict(default='pd-standard'),
image = dict(),
snapshot = dict(),
state = dict(default='present'),
zone = dict(default='us-central1-b'),
service_account_email = dict(),
pem_file = dict(),
credentials_file = dict(),
project_id = dict(),
)
)
if not HAS_LIBCLOUD:
module.fail_json(msg='libcloud with GCE support (0.17.0+) is required for this module')
gce = gce_connect(module)
delete_on_termination = module.params.get('delete_on_termination')
detach_only = module.params.get('detach_only')
instance_name = module.params.get('instance_name')
mode = module.params.get('mode')
name = module.params.get('name')
size_gb = module.params.get('size_gb')
disk_type = module.params.get('disk_type')
image = module.params.get('image')
snapshot = module.params.get('snapshot')
state = module.params.get('state')
zone = module.params.get('zone')
if delete_on_termination and not instance_name:
module.fail_json(
msg='Must specify an instance name when requesting delete on termination',
changed=False)
if detach_only and not instance_name:
module.fail_json(
msg='Must specify an instance name when detaching a disk',
changed=False)
disk = inst = None
changed = is_attached = False
json_output = { 'name': name, 'zone': zone, 'state': state, 'disk_type': disk_type }
if detach_only:
json_output['detach_only'] = True
json_output['detached_from_instance'] = instance_name
if instance_name:
# user wants to attach/detach from an existing instance
try:
inst = gce.ex_get_node(instance_name, zone)
# is the disk attached?
for d in inst.extra['disks']:
if d['deviceName'] == name:
is_attached = True
json_output['attached_mode'] = d['mode']
json_output['attached_to_instance'] = inst.name
except:
pass
# find disk if it already exists
try:
disk = gce.ex_get_volume(name)
json_output['size_gb'] = int(disk.size)
except ResourceNotFoundError:
pass
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
# user wants a disk to exist. If "instance_name" is supplied the user
# also wants it attached
if state in ['active', 'present']:
if not size_gb:
module.fail_json(msg="Must supply a size_gb", changed=False)
try:
size_gb = int(round(float(size_gb)))
if size_gb < 1:
raise Exception
except:
module.fail_json(msg="Must supply a size_gb larger than 1 GB",
changed=False)
if instance_name and inst is None:
module.fail_json(msg='Instance %s does not exist in zone %s' % (
instance_name, zone), changed=False)
if not disk:
if image is not None and snapshot is not None:
module.fail_json(
msg='Cannot give both image (%s) and snapshot (%s)' % (
image, snapshot), changed=False)
lc_image = None
lc_snapshot = None
if image is not None:
lc_image = gce.ex_get_image(image)
elif snapshot is not None:
lc_snapshot = gce.ex_get_snapshot(snapshot)
try:
disk = gce.create_volume(
size_gb, name, location=zone, image=lc_image,
snapshot=lc_snapshot, ex_disk_type=disk_type)
except ResourceExistsError:
pass
except QuotaExceededError:
module.fail_json(msg='Requested disk size exceeds quota',
changed=False)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
json_output['size_gb'] = size_gb
if image is not None:
json_output['image'] = image
if snapshot is not None:
json_output['snapshot'] = snapshot
changed = True
if inst and not is_attached:
try:
gce.attach_volume(inst, disk, device=name, ex_mode=mode,
ex_auto_delete=delete_on_termination)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
json_output['attached_to_instance'] = inst.name
json_output['attached_mode'] = mode
if delete_on_termination:
json_output['delete_on_termination'] = True
changed = True
# user wants to delete a disk (or perhaps just detach it).
if state in ['absent', 'deleted'] and disk:
if inst and is_attached:
try:
gce.detach_volume(disk, ex_node=inst)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
changed = True
if not detach_only:
try:
gce.destroy_volume(disk)
except ResourceInUseError as e:
module.fail_json(msg=str(e.value), changed=False)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
changed = True
json_output['changed'] = changed
module.exit_json(**json_output)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.gce import *
if __name__ == '__main__':
main()
| gpl-3.0 |
Tejal011089/digitales_erpnext | erpnext/accounts/report/supplier_account_head/supplier_account_head.py | 40 | 1123 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute(filters=None):
account_map = get_account_map()
columns = get_columns(account_map)
data = []
suppliers = frappe.db.sql("select name from tabSupplier where docstatus < 2")
for supplier in suppliers:
row = [supplier[0]]
for company in sorted(account_map):
row.append(account_map[company].get(supplier[0], ''))
data.append(row)
return columns, data
def get_account_map():
accounts = frappe.db.sql("""select name, company, master_name
from `tabAccount` where master_type = 'Supplier'
and ifnull(master_name, '') != '' and docstatus < 2""", as_dict=1)
account_map = {}
for acc in accounts:
account_map.setdefault(acc.company, {}).setdefault(acc.master_name, {})
account_map[acc.company][acc.master_name] = acc.name
return account_map
def get_columns(account_map):
columns = ["Supplier:Link/Supplier:120"] + \
[(company + ":Link/Account:120") for company in sorted(account_map)]
return columns | agpl-3.0 |
bcheung92/Paperproject | gem5/src/dev/Uart.py | 66 | 1976 | # Copyright (c) 2005-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
from m5.params import *
from m5.proxy import *
from Device import BasicPioDevice
class Uart(BasicPioDevice):
type = 'Uart'
abstract = True
cxx_header = "dev/uart.hh"
platform = Param.Platform(Parent.any, "Platform this device is part of.")
terminal = Param.Terminal(Parent.any, "The terminal")
class Uart8250(Uart):
type = 'Uart8250'
cxx_header = "dev/uart8250.hh"
| mit |
fotinakis/sentry | src/sentry/south_migrations/0179_auto__add_field_release_date_released.py | 34 | 36688 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Release.date_released'
db.add_column('sentry_release', 'date_released',
self.gf('django.db.models.fields.DateTimeField')(null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Release.date_released'
db.delete_column('sentry_release', 'date_released')
models = {
'sentry.accessgroup': {
'Meta': {'unique_together': "(('team', 'name'),)", 'object_name': 'AccessGroup'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.User']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'symmetrical': 'False'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}),
'type': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '50'})
},
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Event']", 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.alert': {
'Meta': {'object_name': 'Alert'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'related_groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_alerts'", 'symmetrical': 'False', 'through': "orm['sentry.AlertRelatedGroup']", 'to': "orm['sentry.Group']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.alertrelatedgroup': {
'Meta': {'unique_together': "(('group', 'alert'),)", 'object_name': 'AlertRelatedGroup'},
'alert': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Alert']"}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.apikey': {
'Meta': {'object_name': 'ApiKey'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Organization']"}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.auditlogentry': {
'Meta': {'object_name': 'AuditLogEntry'},
'actor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'audit_actors'", 'to': "orm['sentry.User']"}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'target_user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.authidentity': {
'Meta': {'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity'},
'auth_provider': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.AuthProvider']"}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authprovider': {
'Meta': {'object_name': 'AuthProvider'},
'config': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'default_teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'unique': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.broadcast': {
'Meta': {'object_name': 'Broadcast'},
'badge': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group', 'datetime'),)"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.file': {
'Meta': {'object_name': 'File'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'headers': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'storage': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'storage_options': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'null': 'True', 'to': "orm['sentry.Project']"}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.helppage': {
'Meta': {'object_name': 'HelpPage'},
'content': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True'}),
'priority': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {'object_name': 'Organization'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationaccessrequest': {
'Meta': {'unique_together': "(('team', 'member'),)", 'object_name': 'OrganizationAccessRequest'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'member': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationmember': {
'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember'},
'counter': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Organization']"}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMemberTeam']", 'blank': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.organizationmemberteam': {
'Meta': {'unique_together': "(('team', 'organizationmember'),)", 'object_name': 'OrganizationMemberTeam', 'db_table': "'sentry_organizationmember_teams'"},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'organizationmember': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'), ('organization', 'slug'))", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.release': {
'Meta': {'unique_together': "(('project', 'version'),)", 'object_name': 'Release'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_released': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.releasefile': {
'Meta': {'unique_together': "(('release', 'ident'),)", 'object_name': 'ReleaseFile'},
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
}
}
complete_apps = ['sentry'] | bsd-3-clause |
manojhirway/ExistingImagesOnNFS | cinder/tests/unit/api/contrib/test_availability_zones.py | 32 | 2932 | # Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from lxml import etree
from oslo_utils import timeutils
import cinder.api.contrib.availability_zones
import cinder.context
import cinder.test
import cinder.volume.api
created_time = datetime.datetime(2012, 11, 14, 1, 20, 41, 95099)
current_time = timeutils.utcnow()
def list_availability_zones(self):
return (
{'name': 'ping', 'available': True},
{'name': 'pong', 'available': False},
)
class FakeRequest(object):
environ = {'cinder.context': cinder.context.get_admin_context()}
GET = {}
class ControllerTestCase(cinder.test.TestCase):
def setUp(self):
super(ControllerTestCase, self).setUp()
self.controller = cinder.api.contrib.availability_zones.Controller()
self.req = FakeRequest()
self.stubs.Set(cinder.volume.api.API,
'list_availability_zones',
list_availability_zones)
def test_list_hosts(self):
"""Verify that the volume hosts are returned."""
actual = self.controller.index(self.req)
expected = {
'availabilityZoneInfo': [
{'zoneName': 'ping', 'zoneState': {'available': True}},
{'zoneName': 'pong', 'zoneState': {'available': False}},
],
}
self.assertEqual(expected, actual)
class XMLSerializerTest(cinder.test.TestCase):
def test_index_xml(self):
fixture = {
'availabilityZoneInfo': [
{'zoneName': 'ping', 'zoneState': {'available': True}},
{'zoneName': 'pong', 'zoneState': {'available': False}},
],
}
serializer = cinder.api.contrib.availability_zones.ListTemplate()
text = serializer.serialize(fixture)
tree = etree.fromstring(text)
self.assertEqual('availabilityZones', tree.tag)
self.assertEqual(2, len(tree))
self.assertEqual('availabilityZone', tree[0].tag)
self.assertEqual('ping', tree[0].get('name'))
self.assertEqual('zoneState', tree[0][0].tag)
self.assertEqual('True', tree[0][0].get('available'))
self.assertEqual('pong', tree[1].get('name'))
self.assertEqual('zoneState', tree[1][0].tag)
self.assertEqual('False', tree[1][0].get('available'))
| apache-2.0 |
Changaco/oh-mainline | vendor/packages/celery/celery/tests/test_concurrency/test_concurrency_eventlet.py | 18 | 1179 | from __future__ import absolute_import
import os
import sys
from nose import SkipTest
from celery.tests.utils import unittest
class EventletCase(unittest.TestCase):
def setUp(self):
if getattr(sys, "pypy_version_info", None):
raise SkipTest("Does not work on PyPy")
try:
self.eventlet = __import__("eventlet")
except ImportError:
raise SkipTest(
"eventlet not installed, skipping related tests.")
class test_eventlet_patch(EventletCase):
def test_is_patched(self):
monkey_patched = []
prev_monkey_patch = self.eventlet.monkey_patch
self.eventlet.monkey_patch = lambda: monkey_patched.append(True)
prev_eventlet = sys.modules.pop("celery.concurrency.eventlet", None)
os.environ.pop("EVENTLET_NOPATCH")
try:
from celery.concurrency import eventlet
self.assertTrue(eventlet)
self.assertTrue(monkey_patched)
finally:
sys.modules["celery.concurrency.eventlet"] = prev_eventlet
os.environ["EVENTLET_NOPATCH"] = "yes"
self.eventlet.monkey_patch = prev_monkey_patch
| agpl-3.0 |
shepdelacreme/ansible | lib/ansible/modules/network/radware/vdirect_commit.py | 27 | 12999 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2017 Radware LTD.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
module: vdirect_commit
author: Evgeny Fedoruk @ Radware LTD (@evgenyfedoruk)
short_description: Commits pending configuration changes on Radware devices
description:
- Commits pending configuration changes on one or more Radware devices via vDirect server.
- For Alteon ADC device, apply, sync and save actions will be performed by default.
Skipping of an action is possible by explicit parameter specifying.
- For Alteon VX Container device, no sync operation will be performed
since sync action is only relevant for Alteon ADC devices.
- For DefensePro and AppWall devices, a bulk commit action will be performed.
Explicit apply, sync and save actions specifying is not relevant.
notes:
- Requires the Radware vdirect-client Python package on the host. This is as easy as
C(pip install vdirect-client)
version_added: "2.5"
options:
vdirect_ip:
description:
- Primary vDirect server IP address, may be set as C(VDIRECT_IP) environment variable.
required: true
vdirect_user:
description:
- vDirect server username, may be set as C(VDIRECT_USER) environment variable.
required: true
vdirect_password:
description:
- vDirect server password, may be set as C(VDIRECT_PASSWORD) environment variable.
required: true
vdirect_secondary_ip:
description:
- Secondary vDirect server IP address, may be set as C(VDIRECT_SECONDARY_IP) environment variable.
vdirect_wait:
description:
- Wait for async operation to complete, may be set as C(VDIRECT_WAIT) environment variable.
type: bool
default: 'yes'
vdirect_https_port:
description:
- vDirect server HTTPS port number, may be set as C(VDIRECT_HTTPS_PORT) environment variable.
default: 2189
vdirect_http_port:
description:
- vDirect server HTTP port number, may be set as C(VDIRECT_HTTP_PORT) environment variable.
default: 2188
vdirect_timeout:
description:
- Amount of time to wait for async operation completion [seconds],
- may be set as C(VDIRECT_TIMEOUT) environment variable.
default: 60
vdirect_use_ssl:
description:
- If C(no), an HTTP connection will be used instead of the default HTTPS connection,
- may be set as C(VDIRECT_HTTPS) or C(VDIRECT_USE_SSL) environment variable.
type: bool
default: 'yes'
vdirect_validate_certs:
description:
- If C(no), SSL certificates will not be validated,
- may be set as C(VDIRECT_VALIDATE_CERTS) or C(VDIRECT_VERIFY) environment variable.
- This should only set to C(no) used on personally controlled sites using self-signed certificates.
type: bool
default: 'yes'
devices:
description:
- List of Radware Alteon device names for commit operations.
required: true
apply:
description:
- If C(no), apply action will not be performed. Relevant for ADC devices only.
type: bool
default: 'yes'
save:
description:
- If C(no), save action will not be performed. Relevant for ADC devices only.
type: bool
default: 'yes'
sync:
description:
- If C(no), sync action will not be performed. Relevant for ADC devices only.
type: bool
default: 'yes'
requirements:
- "vdirect-client >= 4.1.1"
'''
EXAMPLES = '''
- name: vdirect_commit
vdirect_commit:
vdirect_ip: 10.10.10.10
vdirect_user: vDirect
vdirect_password: radware
devices: ['dev1', 'dev2']
sync: no
'''
RETURN = '''
result:
description: Message detailing actions result
returned: success
type: string
sample: "Requested actions were successfully performed on all devices."
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from vdirect_client import rest_client
HAS_REST_CLIENT = True
except ImportError:
HAS_REST_CLIENT = False
SUCCESS = 'Requested actions were successfully performed on all devices.'
FAILURE = 'Failure occurred while performing requested actions on devices. See details'
ADC_DEVICE_TYPE = 'Adc'
CONTAINER_DEVICE_TYPE = 'Container'
PARTITIONED_CONTAINER_DEVICE_TYPE = 'AlteonPartitioned'
APPWALL_DEVICE_TYPE = 'AppWall'
DP_DEVICE_TYPE = 'DefensePro'
SUCCEEDED = 'succeeded'
FAILED = 'failed'
NOT_PERFORMED = 'not performed'
meta_args = dict(
vdirect_ip=dict(required=True, fallback=(env_fallback, ['VDIRECT_IP'])),
vdirect_user=dict(required=True, fallback=(env_fallback, ['VDIRECT_USER'])),
vdirect_password=dict(
required=True, fallback=(env_fallback, ['VDIRECT_PASSWORD']),
no_log=True, type='str'),
vdirect_secondary_ip=dict(
required=False, fallback=(env_fallback, ['VDIRECT_SECONDARY_IP']),
default=None),
vdirect_use_ssl=dict(
required=False, fallback=(env_fallback, ['VDIRECT_HTTPS', 'VDIRECT_USE_SSL']),
default=True, type='bool'),
vdirect_wait=dict(
required=False, fallback=(env_fallback, ['VDIRECT_WAIT']),
default=True, type='bool'),
vdirect_timeout=dict(
required=False, fallback=(env_fallback, ['VDIRECT_TIMEOUT']),
default=60, type='int'),
vdirect_validate_certs=dict(
required=False, fallback=(env_fallback, ['VDIRECT_VERIFY', 'VDIRECT_VALIDATE_CERTS']),
default=True, type='bool'),
vdirect_https_port=dict(
required=False, fallback=(env_fallback, ['VDIRECT_HTTPS_PORT']),
default=2189, type='int'),
vdirect_http_port=dict(
required=False, fallback=(env_fallback, ['VDIRECT_HTTP_PORT']),
default=2188, type='int'),
devices=dict(
required=True, type='list'),
apply=dict(
required=False, default=True, type='bool'),
save=dict(
required=False, default=True, type='bool'),
sync=dict(
required=False, default=True, type='bool'),
)
class CommitException(Exception):
def __init__(self, reason, details):
self.reason = reason
self.details = details
def __str__(self):
return 'Reason: {0}. Details:{1}.'.format(self.reason, self.details)
class MissingDeviceException(CommitException):
def __init__(self, device_name):
super(MissingDeviceException, self).__init__(
'Device missing',
'Device ' + repr(device_name) + ' does not exist')
class VdirectCommit(object):
def __init__(self, params):
self.client = rest_client.RestClient(params['vdirect_ip'],
params['vdirect_user'],
params['vdirect_password'],
wait=params['vdirect_wait'],
secondary_vdirect_ip=params['vdirect_secondary_ip'],
https_port=params['vdirect_https_port'],
http_port=params['vdirect_http_port'],
timeout=params['vdirect_timeout'],
https=params['vdirect_use_ssl'],
verify=params['vdirect_validate_certs'])
self.devices = params['devices']
self.apply = params['apply']
self.save = params['save']
self.sync = params['sync']
self.devicesMap = {}
def _validate_devices(self):
for device in self.devices:
try:
res = self.client.adc.get(device)
if res[rest_client.RESP_STATUS] == 200:
self.devicesMap.update({device: ADC_DEVICE_TYPE})
continue
res = self.client.container.get(device)
if res[rest_client.RESP_STATUS] == 200:
if res[rest_client.RESP_DATA]['type'] == PARTITIONED_CONTAINER_DEVICE_TYPE:
self.devicesMap.update({device: CONTAINER_DEVICE_TYPE})
continue
res = self.client.appWall.get(device)
if res[rest_client.RESP_STATUS] == 200:
self.devicesMap.update({device: APPWALL_DEVICE_TYPE})
continue
res = self.client.defensePro.get(device)
if res[rest_client.RESP_STATUS] == 200:
self.devicesMap.update({device: DP_DEVICE_TYPE})
continue
except Exception as e:
raise CommitException('Failed to communicate with device ' + device, str(e))
raise MissingDeviceException(device)
def _perform_action_and_update_result(self, device, action, perform, failure_occurred, actions_result):
if not perform or failure_occurred:
actions_result[action] = NOT_PERFORMED
return True
try:
if self.devicesMap[device] == ADC_DEVICE_TYPE:
res = self.client.adc.control_device(device, action)
elif self.devicesMap[device] == CONTAINER_DEVICE_TYPE:
res = self.client.container.control(device, action)
elif self.devicesMap[device] == APPWALL_DEVICE_TYPE:
res = self.client.appWall.control_device(device, action)
elif self.devicesMap[device] == DP_DEVICE_TYPE:
res = self.client.defensePro.control_device(device, action)
if res[rest_client.RESP_STATUS] in [200, 204]:
actions_result[action] = SUCCEEDED
else:
actions_result[action] = FAILED
actions_result['failure_description'] = res[rest_client.RESP_STR]
return False
except Exception as e:
actions_result[action] = FAILED
actions_result['failure_description'] = 'Exception occurred while performing '\
+ action + ' action. Exception: ' + str(e)
return False
return True
def commit(self):
self._validate_devices()
result_to_return = dict()
result_to_return['details'] = list()
for device in self.devices:
failure_occurred = False
device_type = self.devicesMap[device]
actions_result = dict()
actions_result['device_name'] = device
actions_result['device_type'] = device_type
if device_type in [DP_DEVICE_TYPE, APPWALL_DEVICE_TYPE]:
failure_occurred = not self._perform_action_and_update_result(
device, 'commit', True, failure_occurred, actions_result)\
or failure_occurred
else:
failure_occurred = not self._perform_action_and_update_result(
device, 'apply', self.apply, failure_occurred, actions_result)\
or failure_occurred
if device_type != CONTAINER_DEVICE_TYPE:
failure_occurred = not self._perform_action_and_update_result(
device, 'sync', self.sync, failure_occurred, actions_result)\
or failure_occurred
failure_occurred = not self._perform_action_and_update_result(
device, 'save', self.save, failure_occurred, actions_result)\
or failure_occurred
result_to_return['details'].extend([actions_result])
if failure_occurred:
result_to_return['msg'] = FAILURE
if 'msg' not in result_to_return:
result_to_return['msg'] = SUCCESS
return result_to_return
def main():
module = AnsibleModule(argument_spec=meta_args)
if not HAS_REST_CLIENT:
module.fail_json(msg="The python vdirect-client module is required")
try:
vdirect_commit = VdirectCommit(module.params)
result = vdirect_commit.commit()
result = dict(result=result)
module.exit_json(**result)
except Exception as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| gpl-3.0 |
testalt/electrum-dgc | gui/gtk.py | 1 | 49996 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import datetime
import thread, time, ast, sys, re
import socket, traceback
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, Gdk, GObject, cairo
from decimal import Decimal
from electrum_dgc.util import print_error
from electrum_dgc.bitcoin import is_valid
from electrum_dgc import WalletStorage, Wallet
Gdk.threads_init()
APP_NAME = "Electrum-dgc"
import platform
MONOSPACE_FONT = 'Lucida Console' if platform.system() == 'Windows' else 'monospace'
from electrum_dgc.util import format_satoshis, parse_URI
from electrum_dgc.network import DEFAULT_SERVERS
from electrum_dgc.bitcoin import MIN_RELAY_TX_FEE
def numbify(entry, is_int = False):
text = entry.get_text().strip()
chars = '0123456789'
if not is_int: chars +='.'
s = ''.join([i for i in text if i in chars])
if not is_int:
if '.' in s:
p = s.find('.')
s = s.replace('.','')
s = s[:p] + '.' + s[p:p+8]
try:
amount = int( Decimal(s) * 100000000 )
except Exception:
amount = None
else:
try:
amount = int( s )
except Exception:
amount = None
entry.set_text(s)
return amount
def show_seed_dialog(seed, parent):
if not seed:
show_message("No seed")
return
dialog = Gtk.MessageDialog(
parent = parent,
flags = Gtk.DialogFlags.MODAL,
buttons = Gtk.ButtonsType.OK,
message_format = "Your wallet generation seed is:\n\n" + '"' + seed + '"'\
+ "\n\nPlease keep it in a safe place; if you lose it, you will not be able to restore your wallet.\n\n" )
dialog.set_title("Seed")
dialog.show()
dialog.run()
dialog.destroy()
def restore_create_dialog():
# ask if the user wants to create a new wallet, or recover from a seed.
# if he wants to recover, and nothing is found, do not create wallet
dialog = Gtk.Dialog("electrum-dgc", parent=None,
flags=Gtk.DialogFlags.MODAL,
buttons= ("create", 0, "restore",1, "cancel",2) )
label = Gtk.Label("Wallet file not found.\nDo you want to create a new wallet,\n or to restore an existing one?" )
label.show()
dialog.vbox.pack_start(label, True, True, 0)
dialog.show()
r = dialog.run()
dialog.destroy()
if r==2: return False
return 'restore' if r==1 else 'create'
def run_recovery_dialog():
message = "Please enter your wallet seed or the corresponding mnemonic list of words, and the gap limit of your wallet."
dialog = Gtk.MessageDialog(
parent = None,
flags = Gtk.DialogFlags.MODAL,
buttons = Gtk.ButtonsType.OK_CANCEL,
message_format = message)
vbox = dialog.vbox
dialog.set_default_response(Gtk.ResponseType.OK)
# ask seed, server and gap in the same dialog
seed_box = Gtk.HBox()
seed_label = Gtk.Label(label='Seed or mnemonic:')
seed_label.set_size_request(150,-1)
seed_box.pack_start(seed_label, False, False, 10)
seed_label.show()
seed_entry = Gtk.Entry()
seed_entry.show()
seed_entry.set_size_request(450,-1)
seed_box.pack_start(seed_entry, False, False, 10)
add_help_button(seed_box, '.')
seed_box.show()
vbox.pack_start(seed_box, False, False, 5)
dialog.show()
r = dialog.run()
seed = seed_entry.get_text()
dialog.destroy()
if r==Gtk.ResponseType.CANCEL:
return False
if Wallet.is_seed(seed):
return seed
show_message("no seed")
return False
def run_settings_dialog(self):
message = "Here are the settings of your wallet. For more explanations, click on the question mark buttons next to each input field."
dialog = Gtk.MessageDialog(
parent = self.window,
flags = Gtk.DialogFlags.MODAL,
buttons = Gtk.ButtonsType.OK_CANCEL,
message_format = message)
image = Gtk.Image()
image.set_from_stock(Gtk.STOCK_PREFERENCES, Gtk.IconSize.DIALOG)
image.show()
dialog.set_image(image)
dialog.set_title("Settings")
vbox = dialog.vbox
dialog.set_default_response(Gtk.ResponseType.OK)
fee = Gtk.HBox()
fee_entry = Gtk.Entry()
fee_label = Gtk.Label(label='Transaction fee:')
fee_label.set_size_request(150,10)
fee_label.show()
fee.pack_start(fee_label,False, False, 10)
fee_entry.set_text( str( Decimal(self.wallet.fee_per_kb) /100000000 ) )
fee_entry.connect('changed', numbify, False)
fee_entry.show()
fee.pack_start(fee_entry,False,False, 10)
add_help_button(fee, 'Fee per kilobyte of transaction. Recommended value:0.0001')
fee.show()
vbox.pack_start(fee, False,False, 5)
nz = Gtk.HBox()
nz_entry = Gtk.Entry()
nz_label = Gtk.Label(label='Display zeros:')
nz_label.set_size_request(150,10)
nz_label.show()
nz.pack_start(nz_label,False, False, 10)
nz_entry.set_text( str( self.num_zeros ))
nz_entry.connect('changed', numbify, True)
nz_entry.show()
nz.pack_start(nz_entry,False,False, 10)
add_help_button(nz, "Number of zeros displayed after the decimal point.\nFor example, if this number is 2, then '5.' is displayed as '5.00'")
nz.show()
vbox.pack_start(nz, False,False, 5)
dialog.show()
r = dialog.run()
fee = fee_entry.get_text()
nz = nz_entry.get_text()
dialog.destroy()
if r==Gtk.ResponseType.CANCEL:
return
try:
fee = int( 100000000 * Decimal(fee) )
except Exception:
show_message("error")
return
self.wallet.set_fee(fee)
try:
nz = int( nz )
if nz>8: nz = 8
except Exception:
show_message("error")
return
if self.num_zeros != nz:
self.num_zeros = nz
self.config.set_key('num_zeros',nz,True)
self.update_history_tab()
def run_network_dialog( network, parent ):
image = Gtk.Image()
image.set_from_stock(Gtk.STOCK_NETWORK, Gtk.IconSize.DIALOG)
host, port, protocol, proxy_config, auto_connect = network.get_parameters()
server = "%s:%s:%s"%(host, port, protocol)
if parent:
if network.is_connected():
status = "Connected to %s\n%d blocks"%(host, network.get_local_height())
else:
status = "Not connected"
else:
import random
status = "Please choose a server.\nSelect cancel if you are offline."
servers = network.get_servers()
dialog = Gtk.MessageDialog( parent, Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT,
Gtk.MessageType.QUESTION, Gtk.ButtonsType.OK_CANCEL, status)
dialog.set_title("Server")
dialog.set_image(image)
image.show()
vbox = dialog.vbox
host_box = Gtk.HBox()
host_label = Gtk.Label(label='Connect to:')
host_label.set_size_request(100,-1)
host_label.show()
host_box.pack_start(host_label, False, False, 10)
host_entry = Gtk.Entry()
host_entry.set_size_request(200,-1)
if network.is_connected():
host_entry.set_text(server)
else:
host_entry.set_text("Not Connected")
host_entry.show()
host_box.pack_start(host_entry, False, False, 10)
add_help_button(host_box, 'The name, port number and protocol of your Electrum-dgc server, separated by a colon. Example: "ecdsa.org:50002:s". Some servers allow you to connect through http (port 80) or https (port 443)')
host_box.show()
p_box = Gtk.HBox(False, 10)
p_box.show()
p_label = Gtk.Label(label='Protocol:')
p_label.set_size_request(100,-1)
p_label.show()
p_box.pack_start(p_label, False, False, 10)
combobox = Gtk.ComboBoxText()
combobox.show()
combobox.append_text("TCP")
combobox.append_text("SSL")
combobox.append_text("HTTP")
combobox.append_text("HTTPS")
p_box.pack_start(combobox, True, True, 0)
def current_line():
return unicode(host_entry.get_text()).split(':')
def set_combobox(protocol):
combobox.set_active('tshg'.index(protocol))
def set_protocol(protocol):
host = current_line()[0]
pp = servers[host]
if protocol not in pp.keys():
protocol = pp.keys()[0]
set_combobox(protocol)
port = pp[protocol]
host_entry.set_text( host + ':' + port + ':' + protocol)
combobox.connect("changed", lambda x:set_protocol('tshg'[combobox.get_active()]))
if network.is_connected():
set_combobox(protocol)
server_list = Gtk.ListStore(str)
for host in servers.keys():
server_list.append([host])
treeview = Gtk.TreeView(model=server_list)
treeview.show()
label = 'Active Servers' if network.is_connected() else 'Default Servers'
tvcolumn = Gtk.TreeViewColumn(label)
treeview.append_column(tvcolumn)
cell = Gtk.CellRendererText()
tvcolumn.pack_start(cell, False)
tvcolumn.add_attribute(cell, 'text', 0)
vbox.pack_start(host_box, False,False, 5)
vbox.pack_start(p_box, True, True, 0)
#scroll = Gtk.ScrolledWindow()
#scroll.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.ALWAYS)
#scroll.add_with_viewport(treeview)
#scroll.show()
#vbox.pack_start(scroll, True)
vbox.pack_start(treeview, True, True, 0)
def my_treeview_cb(treeview):
path, view_column = treeview.get_cursor()
host = server_list.get_value( server_list.get_iter(path), 0)
pp = servers[host]
if 't' in pp.keys():
protocol = 't'
else:
protocol = pp.keys()[0]
port = pp[protocol]
host_entry.set_text( host + ':' + port + ':' + protocol)
set_combobox(protocol)
treeview.connect('cursor-changed', my_treeview_cb)
dialog.show_all()
r = dialog.run()
server = host_entry.get_text()
dialog.destroy()
if r==Gtk.ResponseType.CANCEL:
return False
try:
host, port, protocol = server.split(':')
except Exception:
show_message("error:" + server)
return False
network.set_parameters(host, port, protocol, proxy_config, auto_connect)
def show_message(message, parent=None):
dialog = Gtk.MessageDialog(
parent = parent,
flags = Gtk.DialogFlags.MODAL,
buttons = Gtk.ButtonsType.CLOSE,
message_format = message )
dialog.show()
dialog.run()
dialog.destroy()
def password_line(label):
password = Gtk.HBox()
password_label = Gtk.Label(label=label)
password_label.set_size_request(120,10)
password_label.show()
password.pack_start(password_label,False, False, 10)
password_entry = Gtk.Entry()
password_entry.set_size_request(300,-1)
password_entry.set_visibility(False)
password_entry.show()
password.pack_start(password_entry,False,False, 10)
password.show()
return password, password_entry
def password_dialog(parent):
dialog = Gtk.MessageDialog( parent, Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT,
Gtk.MessageType.QUESTION, Gtk.ButtonsType.OK_CANCEL, "Please enter your password.")
dialog.get_image().set_visible(False)
current_pw, current_pw_entry = password_line('Password:')
current_pw_entry.connect("activate", lambda entry, dialog, response: dialog.response(response), dialog, Gtk.ResponseType.OK)
dialog.vbox.pack_start(current_pw, False, True, 0)
dialog.show()
result = dialog.run()
pw = current_pw_entry.get_text()
dialog.destroy()
if result != Gtk.ResponseType.CANCEL: return pw
def change_password_dialog(is_encrypted, parent):
if parent:
msg = 'Your wallet is encrypted. Use this dialog to change the password. To disable wallet encryption, enter an empty new password.' if is_encrypted else 'Your wallet keys are not encrypted'
else:
msg = "Please choose a password to encrypt your wallet keys"
dialog = Gtk.MessageDialog( parent, Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT, Gtk.MessageType.QUESTION, Gtk.ButtonsType.OK_CANCEL, msg)
dialog.set_title("Change password")
image = Gtk.Image()
image.set_from_stock(Gtk.STOCK_DIALOG_AUTHENTICATION, Gtk.IconSize.DIALOG)
image.show()
dialog.set_image(image)
if is_encrypted:
current_pw, current_pw_entry = password_line('Current password:')
dialog.vbox.pack_start(current_pw, False, True, 0)
password, password_entry = password_line('New password:')
dialog.vbox.pack_start(password, False, True, 5)
password2, password2_entry = password_line('Confirm password:')
dialog.vbox.pack_start(password2, False, True, 5)
dialog.show()
result = dialog.run()
password = current_pw_entry.get_text() if is_encrypted else None
new_password = password_entry.get_text()
new_password2 = password2_entry.get_text()
dialog.destroy()
if result == Gtk.ResponseType.CANCEL:
return
if new_password != new_password2:
show_message("passwords do not match")
return change_password_dialog(is_encrypted, parent)
if not new_password:
new_password = None
return True, password, new_password
def add_help_button(hbox, message):
button = Gtk.Button('?')
button.connect("clicked", lambda x: show_message(message))
button.show()
hbox.pack_start(button,False, False, 0)
class ElectrumWindow:
def show_message(self, msg):
show_message(msg, self.window)
def on_key(self, w, event):
if Gdk.ModifierType.CONTROL_MASK & event.state and event.keyval in [113,119]:
Gtk.main_quit()
return True
def __init__(self, wallet, config, network):
self.config = config
self.wallet = wallet
self.network = network
self.funds_error = False # True if not enough funds
self.num_zeros = int(self.config.get('num_zeros',0))
self.window = Gtk.Window(Gtk.WindowType.TOPLEVEL)
self.window.connect('key-press-event', self.on_key)
title = 'Electrum-dgc ' + self.wallet.electrum_version + ' - ' + self.config.path
if not self.wallet.seed: title += ' [seedless]'
self.window.set_title(title)
self.window.connect("destroy", Gtk.main_quit)
self.window.set_border_width(0)
#self.window.connect('mykeypress', Gtk.main_quit)
self.window.set_default_size(720, 350)
self.wallet_updated = False
vbox = Gtk.VBox()
self.notebook = Gtk.Notebook()
self.create_history_tab()
if self.wallet.seed:
self.create_send_tab()
self.create_recv_tab()
self.create_book_tab()
self.create_about_tab()
self.notebook.show()
vbox.pack_start(self.notebook, True, True, 2)
self.status_bar = Gtk.Statusbar()
vbox.pack_start(self.status_bar, False, False, 0)
self.status_image = Gtk.Image()
self.status_image.set_from_stock(Gtk.STOCK_NO, Gtk.IconSize.MENU)
self.status_image.set_alignment(True, 0.5 )
self.status_image.show()
self.network_button = Gtk.Button()
self.network_button.connect("clicked", lambda x: run_network_dialog(self.network, self.window) )
self.network_button.add(self.status_image)
self.network_button.set_relief(Gtk.ReliefStyle.NONE)
self.network_button.show()
self.status_bar.pack_end(self.network_button, False, False, 0)
if self.wallet.seed:
def seedb(w, wallet):
if wallet.use_encryption:
password = password_dialog(self.window)
if not password: return
else: password = None
seed = wallet.get_mnemonic(password)
show_seed_dialog(seed, self.window)
button = Gtk.Button('S')
button.connect("clicked", seedb, self.wallet )
button.set_relief(Gtk.ReliefStyle.NONE)
button.show()
self.status_bar.pack_end(button,False, False, 0)
settings_icon = Gtk.Image()
settings_icon.set_from_stock(Gtk.STOCK_PREFERENCES, Gtk.IconSize.MENU)
settings_icon.set_alignment(0.5, 0.5)
settings_icon.set_size_request(16,16 )
settings_icon.show()
prefs_button = Gtk.Button()
prefs_button.connect("clicked", lambda x: run_settings_dialog(self) )
prefs_button.add(settings_icon)
prefs_button.set_tooltip_text("Settings")
prefs_button.set_relief(Gtk.ReliefStyle.NONE)
prefs_button.show()
self.status_bar.pack_end(prefs_button,False,False, 0)
self.pw_icon = Gtk.Image()
self.pw_icon.set_from_stock(Gtk.STOCK_DIALOG_AUTHENTICATION, Gtk.IconSize.MENU)
self.pw_icon.set_alignment(0.5, 0.5)
self.pw_icon.set_size_request(16,16 )
self.pw_icon.show()
if self.wallet.seed:
if self.wallet.use_encryption:
self.pw_icon.set_tooltip_text('Wallet is encrypted')
else:
self.pw_icon.set_tooltip_text('Wallet is unencrypted')
password_button = Gtk.Button()
password_button.connect("clicked", self.do_update_password, self.wallet)
password_button.add(self.pw_icon)
password_button.set_relief(Gtk.ReliefStyle.NONE)
password_button.show()
self.status_bar.pack_end(password_button,False,False, 0)
self.window.add(vbox)
self.window.show_all()
#self.fee_box.hide()
self.context_id = self.status_bar.get_context_id("statusbar")
self.update_status_bar()
self.network.register_callback('updated', self.update_callback)
def update_status_bar_thread():
while True:
GObject.idle_add( self.update_status_bar )
time.sleep(0.5)
def check_recipient_thread():
old_r = ''
while True:
time.sleep(0.5)
if self.payto_entry.is_focus():
continue
r = self.payto_entry.get_text()
if r != old_r:
old_r = r
r = r.strip()
if re.match('^(|([\w\-\.]+)@)((\w[\w\-]+\.)+[\w\-]+)$', r):
try:
to_address = self.wallet.get_alias(r, interactive=False)
except Exception:
continue
if to_address:
s = r + ' <' + to_address + '>'
GObject.idle_add( lambda: self.payto_entry.set_text(s) )
thread.start_new_thread(update_status_bar_thread, ())
if self.wallet.seed:
thread.start_new_thread(check_recipient_thread, ())
self.notebook.set_current_page(0)
def update_callback(self):
self.wallet_updated = True
def do_update_password(self, button, wallet):
if not wallet.seed:
show_message("No seed")
return
res = change_password_dialog(wallet.use_encryption, self.window)
if res:
_, password, new_password = res
try:
wallet.get_seed(password)
except Exception:
show_message("Incorrect password")
return
wallet.update_password(password, new_password)
if wallet.use_encryption:
self.pw_icon.set_tooltip_text('Wallet is encrypted')
else:
self.pw_icon.set_tooltip_text('Wallet is unencrypted')
def add_tab(self, page, name):
tab_label = Gtk.Label(label=name)
tab_label.show()
self.notebook.append_page(page, tab_label)
def create_send_tab(self):
page = vbox = Gtk.VBox()
page.show()
payto = Gtk.HBox()
payto_label = Gtk.Label(label='Pay to:')
payto_label.set_size_request(100,-1)
payto.pack_start(payto_label, False, False, 0)
payto_entry = Gtk.Entry()
payto_entry.set_size_request(450, 26)
payto.pack_start(payto_entry, False, False, 0)
vbox.pack_start(payto, False, False, 5)
message = Gtk.HBox()
message_label = Gtk.Label(label='Description:')
message_label.set_size_request(100,-1)
message.pack_start(message_label, False, False, 0)
message_entry = Gtk.Entry()
message_entry.set_size_request(450, 26)
message.pack_start(message_entry, False, False, 0)
vbox.pack_start(message, False, False, 5)
amount_box = Gtk.HBox()
amount_label = Gtk.Label(label='Amount:')
amount_label.set_size_request(100,-1)
amount_box.pack_start(amount_label, False, False, 0)
amount_entry = Gtk.Entry()
amount_entry.set_size_request(120, -1)
amount_box.pack_start(amount_entry, False, False, 0)
vbox.pack_start(amount_box, False, False, 5)
self.fee_box = fee_box = Gtk.HBox()
fee_label = Gtk.Label(label='Fee:')
fee_label.set_size_request(100,-1)
fee_box.pack_start(fee_label, False, False, 0)
fee_entry = Gtk.Entry()
fee_entry.set_size_request(60, 26)
fee_box.pack_start(fee_entry, False, False, 0)
vbox.pack_start(fee_box, False, False, 5)
end_box = Gtk.HBox()
empty_label = Gtk.Label(label='')
empty_label.set_size_request(100,-1)
end_box.pack_start(empty_label, False, False, 0)
send_button = Gtk.Button("Send")
send_button.show()
end_box.pack_start(send_button, False, False, 0)
clear_button = Gtk.Button("Clear")
clear_button.show()
end_box.pack_start(clear_button, False, False, 15)
send_button.connect("clicked", self.do_send, (payto_entry, message_entry, amount_entry, fee_entry))
clear_button.connect("clicked", self.do_clear, (payto_entry, message_entry, amount_entry, fee_entry))
vbox.pack_start(end_box, False, False, 5)
# display this line only if there is a signature
payto_sig = Gtk.HBox()
payto_sig_id = Gtk.Label(label='')
payto_sig.pack_start(payto_sig_id, False, False, 0)
vbox.pack_start(payto_sig, True, True, 5)
self.user_fee = False
def entry_changed( entry, is_fee ):
self.funds_error = False
amount = numbify(amount_entry)
fee = numbify(fee_entry)
if not is_fee: fee = None
if amount is None:
return
tx = self.wallet.make_unsigned_transaction([('op_return', 'dummy_tx', amount)], fee)
if not is_fee:
if tx:
fee = tx.get_fee()
fee_entry.set_text( str( Decimal( fee ) / 100000000 ) )
self.fee_box.show()
if tx:
amount_entry.modify_text(Gtk.StateType.NORMAL, Gdk.color_parse("#000000"))
fee_entry.modify_text(Gtk.StateType.NORMAL, Gdk.color_parse("#000000"))
send_button.set_sensitive(True)
else:
send_button.set_sensitive(False)
amount_entry.modify_text(Gtk.StateType.NORMAL, Gdk.color_parse("#cc0000"))
fee_entry.modify_text(Gtk.StateType.NORMAL, Gdk.color_parse("#cc0000"))
self.funds_error = True
amount_entry.connect('changed', entry_changed, False)
fee_entry.connect('changed', entry_changed, True)
self.payto_entry = payto_entry
self.payto_fee_entry = fee_entry
self.payto_sig_id = payto_sig_id
self.payto_sig = payto_sig
self.amount_entry = amount_entry
self.message_entry = message_entry
self.add_tab(page, 'Send')
def set_frozen(self,entry,frozen):
if frozen:
entry.set_editable(False)
entry.set_has_frame(False)
entry.modify_base(Gtk.StateType.NORMAL, Gdk.color_parse("#eeeeee"))
else:
entry.set_editable(True)
entry.set_has_frame(True)
entry.modify_base(Gtk.StateType.NORMAL, Gdk.color_parse("#ffffff"))
def set_url(self, url):
payto, amount, label, message, payment_request = parse_URI(url)
self.notebook.set_current_page(1)
self.payto_entry.set_text(payto)
self.message_entry.set_text(message)
self.amount_entry.set_text(amount)
self.payto_sig.set_visible(False)
def create_about_tab(self):
from gi.repository import Pango
page = Gtk.VBox()
page.show()
tv = Gtk.TextView()
tv.set_editable(False)
tv.set_cursor_visible(False)
tv.modify_font(Pango.FontDescription(MONOSPACE_FONT))
scroll = Gtk.ScrolledWindow()
scroll.add(tv)
page.pack_start(scroll, True, True, 0)
self.info = tv.get_buffer()
self.add_tab(page, 'Wall')
def do_clear(self, w, data):
self.payto_sig.set_visible(False)
self.payto_fee_entry.set_text('')
for entry in [self.payto_entry,self.amount_entry,self.message_entry]:
self.set_frozen(entry,False)
entry.set_text('')
def question(self,msg):
dialog = Gtk.MessageDialog( self.window, Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT, Gtk.MessageType.QUESTION, Gtk.ButtonsType.OK_CANCEL, msg)
dialog.show()
result = dialog.run()
dialog.destroy()
return result == Gtk.ResponseType.OK
def do_send(self, w, data):
payto_entry, label_entry, amount_entry, fee_entry = data
label = label_entry.get_text()
r = payto_entry.get_text()
r = r.strip()
m1 = re.match('^(|([\w\-\.]+)@)((\w[\w\-]+\.)+[\w\-]+)$', r)
m2 = re.match('(|([\w\-\.]+)@)((\w[\w\-]+\.)+[\w\-]+) \<([1-9A-HJ-NP-Za-km-z]{26,})\>', r)
if m1:
to_address = self.wallet.get_alias(r, True, self.show_message, self.question)
if not to_address:
return
else:
self.update_sending_tab()
elif m2:
to_address = m2.group(5)
else:
to_address = r
if not is_valid(to_address):
self.show_message( "invalid dogecoin address:\n"+to_address)
return
try:
amount = int( Decimal(amount_entry.get_text()) * 100000000 )
except Exception:
self.show_message( "invalid amount")
return
try:
fee = int( Decimal(fee_entry.get_text()) * 100000000 )
except Exception:
self.show_message( "invalid fee")
return
if self.wallet.use_encryption:
password = password_dialog(self.window)
if not password:
return
else:
password = None
try:
tx = self.wallet.mktx( [(to_address, amount)], password, fee )
except Exception as e:
self.show_message(str(e))
return
#@todo dogecoin electrum-ltc modifies this...
if tx.requires_fee(self.wallet.verifier) and fee < MIN_RELAY_TX_FEE:
self.show_message( "This transaction requires a higher fee, or it will not be propagated by the network." )
return
if label:
self.wallet.labels[tx.hash()] = label
status, msg = self.wallet.sendtx( tx )
if status:
self.show_message( "payment sent.\n" + msg )
payto_entry.set_text("")
label_entry.set_text("")
amount_entry.set_text("")
fee_entry.set_text("")
#self.fee_box.hide()
self.update_sending_tab()
else:
self.show_message( msg )
def treeview_button_press(self, treeview, event):
if event.type == Gdk.EventType.DOUBLE_BUTTON_PRESS:
c = treeview.get_cursor()[0]
if treeview == self.history_treeview:
tx_details = self.history_list.get_value( self.history_list.get_iter(c), 8)
self.show_message(tx_details)
elif treeview == self.contacts_treeview:
m = self.addressbook_list.get_value( self.addressbook_list.get_iter(c), 0)
#a = self.wallet.aliases.get(m)
#if a:
# if a[0] in self.wallet.authorities.keys():
# s = self.wallet.authorities.get(a[0])
# else:
# s = "self-signed"
# msg = 'Alias: '+ m + '\nTarget address: '+ a[1] + '\n\nSigned by: ' + s + '\nSigning address:' + a[0]
# self.show_message(msg)
def treeview_key_press(self, treeview, event):
c = treeview.get_cursor()[0]
if event.keyval == Gdk.KEY_Up:
if c and c[0] == 0:
treeview.parent.grab_focus()
treeview.set_cursor((0,))
elif event.keyval == Gdk.KEY_Return:
if treeview == self.history_treeview:
tx_details = self.history_list.get_value( self.history_list.get_iter(c), 8)
self.show_message(tx_details)
elif treeview == self.contacts_treeview:
m = self.addressbook_list.get_value( self.addressbook_list.get_iter(c), 0)
#a = self.wallet.aliases.get(m)
#if a:
# if a[0] in self.wallet.authorities.keys():
# s = self.wallet.authorities.get(a[0])
# else:
# s = "self"
# msg = 'Alias:'+ m + '\n\nTarget: '+ a[1] + '\nSigned by: ' + s + '\nSigning address:' + a[0]
# self.show_message(msg)
return False
def create_history_tab(self):
self.history_list = Gtk.ListStore(str, str, str, str, 'gboolean', str, str, str, str)
treeview = Gtk.TreeView(model=self.history_list)
self.history_treeview = treeview
treeview.set_tooltip_column(7)
treeview.show()
treeview.connect('key-press-event', self.treeview_key_press)
treeview.connect('button-press-event', self.treeview_button_press)
tvcolumn = Gtk.TreeViewColumn('')
treeview.append_column(tvcolumn)
cell = Gtk.CellRendererPixbuf()
tvcolumn.pack_start(cell, False)
tvcolumn.set_attributes(cell, stock_id=1)
tvcolumn = Gtk.TreeViewColumn('Date')
treeview.append_column(tvcolumn)
cell = Gtk.CellRendererText()
tvcolumn.pack_start(cell, False)
tvcolumn.add_attribute(cell, 'text', 2)
tvcolumn = Gtk.TreeViewColumn('Description')
treeview.append_column(tvcolumn)
cell = Gtk.CellRendererText()
cell.set_property('foreground', 'grey')
cell.set_property('family', MONOSPACE_FONT)
cell.set_property('editable', True)
def edited_cb(cell, path, new_text, h_list):
tx = h_list.get_value( h_list.get_iter(path), 0)
self.wallet.set_label(tx,new_text)
self.update_history_tab()
cell.connect('edited', edited_cb, self.history_list)
def editing_started(cell, entry, path, h_list):
tx = h_list.get_value( h_list.get_iter(path), 0)
if not self.wallet.labels.get(tx): entry.set_text('')
cell.connect('editing-started', editing_started, self.history_list)
tvcolumn.set_expand(True)
tvcolumn.pack_start(cell, True)
tvcolumn.set_attributes(cell, text=3, foreground_set = 4)
tvcolumn = Gtk.TreeViewColumn('Amount')
treeview.append_column(tvcolumn)
cell = Gtk.CellRendererText()
cell.set_alignment(1, 0.5)
cell.set_property('family', MONOSPACE_FONT)
tvcolumn.pack_start(cell, False)
tvcolumn.add_attribute(cell, 'text', 5)
tvcolumn = Gtk.TreeViewColumn('Balance')
treeview.append_column(tvcolumn)
cell = Gtk.CellRendererText()
cell.set_alignment(1, 0.5)
cell.set_property('family', MONOSPACE_FONT)
tvcolumn.pack_start(cell, False)
tvcolumn.add_attribute(cell, 'text', 6)
tvcolumn = Gtk.TreeViewColumn('Tooltip')
treeview.append_column(tvcolumn)
cell = Gtk.CellRendererText()
tvcolumn.pack_start(cell, False)
tvcolumn.add_attribute(cell, 'text', 7)
tvcolumn.set_visible(False)
scroll = Gtk.ScrolledWindow()
scroll.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)
scroll.add(treeview)
self.add_tab(scroll, 'History')
self.update_history_tab()
def create_recv_tab(self):
self.recv_list = Gtk.ListStore(str, str, str, str, str)
self.add_tab( self.make_address_list(True), 'Receive')
self.update_receiving_tab()
def create_book_tab(self):
self.addressbook_list = Gtk.ListStore(str, str, str)
self.add_tab( self.make_address_list(False), 'Contacts')
self.update_sending_tab()
def make_address_list(self, is_recv):
liststore = self.recv_list if is_recv else self.addressbook_list
treeview = Gtk.TreeView(model= liststore)
treeview.connect('key-press-event', self.treeview_key_press)
treeview.connect('button-press-event', self.treeview_button_press)
treeview.show()
if not is_recv:
self.contacts_treeview = treeview
tvcolumn = Gtk.TreeViewColumn('Address')
treeview.append_column(tvcolumn)
cell = Gtk.CellRendererText()
cell.set_property('family', MONOSPACE_FONT)
tvcolumn.pack_start(cell, True)
tvcolumn.add_attribute(cell, 'text', 0)
tvcolumn = Gtk.TreeViewColumn('Label')
tvcolumn.set_expand(True)
treeview.append_column(tvcolumn)
cell = Gtk.CellRendererText()
cell.set_property('editable', True)
def edited_cb2(cell, path, new_text, liststore):
address = liststore.get_value( liststore.get_iter(path), 0)
self.wallet.set_label(address, new_text)
self.update_receiving_tab()
self.update_sending_tab()
self.update_history_tab()
cell.connect('edited', edited_cb2, liststore)
tvcolumn.pack_start(cell, True)
tvcolumn.add_attribute(cell, 'text', 1)
tvcolumn = Gtk.TreeViewColumn('Tx')
treeview.append_column(tvcolumn)
cell = Gtk.CellRendererText()
tvcolumn.pack_start(cell, True)
tvcolumn.add_attribute(cell, 'text', 2)
if is_recv:
tvcolumn = Gtk.TreeViewColumn('Balance')
treeview.append_column(tvcolumn)
cell = Gtk.CellRendererText()
tvcolumn.pack_start(cell, True)
tvcolumn.add_attribute(cell, 'text', 3)
tvcolumn = Gtk.TreeViewColumn('Type')
treeview.append_column(tvcolumn)
cell = Gtk.CellRendererText()
tvcolumn.pack_start(cell, True)
tvcolumn.add_attribute(cell, 'text', 4)
scroll = Gtk.ScrolledWindow()
scroll.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
scroll.add(treeview)
hbox = Gtk.HBox()
if not is_recv:
button = Gtk.Button("New")
button.connect("clicked", self.newaddress_dialog)
button.show()
hbox.pack_start(button,False, False, 0)
def showqrcode(w, treeview, liststore):
import qrcode
path, col = treeview.get_cursor()
if not path: return
address = liststore.get_value(liststore.get_iter(path), 0)
qr = qrcode.QRCode()
qr.add_data(address)
boxsize = 7
matrix = qr.get_matrix()
boxcount_row = len(matrix)
size = (boxcount_row + 4) * boxsize
def area_expose_cb(area, cr):
style = area.get_style()
Gdk.cairo_set_source_color(cr, style.white)
cr.rectangle(0, 0, size, size)
cr.fill()
Gdk.cairo_set_source_color(cr, style.black)
for r in range(boxcount_row):
for c in range(boxcount_row):
if matrix[r][c]:
cr.rectangle((c + 2) * boxsize, (r + 2) * boxsize, boxsize, boxsize)
cr.fill()
area = Gtk.DrawingArea()
area.set_size_request(size, size)
area.connect("draw", area_expose_cb)
area.show()
dialog = Gtk.Dialog(address, parent=self.window, flags=Gtk.DialogFlags.MODAL, buttons = ("ok",1))
dialog.vbox.add(area)
dialog.run()
dialog.destroy()
button = Gtk.Button("QR")
button.connect("clicked", showqrcode, treeview, liststore)
button.show()
hbox.pack_start(button,False, False, 0)
button = Gtk.Button("Copy to clipboard")
def copy2clipboard(w, treeview, liststore):
import platform
path, col = treeview.get_cursor()
if path:
address = liststore.get_value( liststore.get_iter(path), 0)
if platform.system() == 'Windows':
from Tkinter import Tk
r = Tk()
r.withdraw()
r.clipboard_clear()
r.clipboard_append( address )
r.destroy()
else:
atom = Gdk.atom_intern('CLIPBOARD', True)
c = Gtk.Clipboard.get(atom)
c.set_text( address, len(address) )
button.connect("clicked", copy2clipboard, treeview, liststore)
button.show()
hbox.pack_start(button,False, False, 0)
if is_recv:
button = Gtk.Button("Freeze")
def freeze_address(w, treeview, liststore, wallet):
path, col = treeview.get_cursor()
if path:
address = liststore.get_value( liststore.get_iter(path), 0)
if address in wallet.frozen_addresses:
wallet.unfreeze(address)
else:
wallet.freeze(address)
self.update_receiving_tab()
button.connect("clicked", freeze_address, treeview, liststore, self.wallet)
button.show()
hbox.pack_start(button,False, False, 0)
if not is_recv:
button = Gtk.Button("Pay to")
def payto(w, treeview, liststore):
path, col = treeview.get_cursor()
if path:
address = liststore.get_value( liststore.get_iter(path), 0)
self.payto_entry.set_text( address )
self.notebook.set_current_page(1)
self.amount_entry.grab_focus()
button.connect("clicked", payto, treeview, liststore)
button.show()
hbox.pack_start(button,False, False, 0)
vbox = Gtk.VBox()
vbox.pack_start(scroll,True, True, 0)
vbox.pack_start(hbox, False, False, 0)
return vbox
def update_status_bar(self):
if self.funds_error:
text = "Not enough funds"
elif self.network.is_connected():
host, port, _,_,_ = self.network.get_parameters()
port = int(port)
height = self.network.get_local_height()
self.network_button.set_tooltip_text("Connected to %s:%d.\n%d blocks"%(host, port, height))
if not self.wallet.up_to_date:
self.status_image.set_from_stock(Gtk.STOCK_REFRESH, Gtk.IconSize.MENU)
text = "Synchronizing..."
else:
self.status_image.set_from_stock(Gtk.STOCK_YES, Gtk.IconSize.MENU)
c, u = self.wallet.get_balance()
text = "Balance: %s "%( format_satoshis(c,False,self.num_zeros) )
if u: text += "[%s unconfirmed]"%( format_satoshis(u,True,self.num_zeros).strip() )
else:
self.status_image.set_from_stock(Gtk.STOCK_NO, Gtk.IconSize.MENU)
self.network_button.set_tooltip_text("Not connected.")
text = "Not connected"
self.status_bar.pop(self.context_id)
self.status_bar.push(self.context_id, text)
if self.wallet.up_to_date and self.wallet_updated:
self.update_history_tab()
self.update_receiving_tab()
# addressbook too...
self.info.set_text( self.network.banner )
self.wallet_updated = False
def update_receiving_tab(self):
self.recv_list.clear()
for address in self.wallet.addresses(True):
Type = "R"
c = u = 0
if self.wallet.is_change(address): Type = "C"
if address in self.wallet.imported_keys.keys():
Type = "I"
c, u = self.wallet.get_addr_balance(address)
if address in self.wallet.frozen_addresses: Type = Type + "F"
label = self.wallet.labels.get(address)
h = self.wallet.history.get(address,[])
n = len(h)
tx = "0" if n==0 else "%d"%n
self.recv_list.append((address, label, tx, format_satoshis(c,False,self.num_zeros), Type ))
def update_sending_tab(self):
# detect addresses that are not mine in history, add them here...
self.addressbook_list.clear()
#for alias, v in self.wallet.aliases.items():
# s, target = v
# label = self.wallet.labels.get(alias)
# self.addressbook_list.append((alias, label, '-'))
for address in self.wallet.addressbook:
label = self.wallet.labels.get(address)
n = self.wallet.get_num_tx(address)
self.addressbook_list.append((address, label, "%d"%n))
def update_history_tab(self):
cursor = self.history_treeview.get_cursor()[0]
self.history_list.clear()
for item in self.wallet.get_tx_history():
tx_hash, conf, is_mine, value, fee, balance, timestamp = item
if conf > 0:
try:
time_str = datetime.datetime.fromtimestamp( timestamp).isoformat(' ')[:-3]
except Exception:
time_str = "------"
conf_icon = Gtk.STOCK_APPLY
elif conf == -1:
time_str = 'unverified'
conf_icon = None
else:
time_str = 'pending'
conf_icon = Gtk.STOCK_EXECUTE
label, is_default_label = self.wallet.get_label(tx_hash)
tooltip = tx_hash + "\n%d confirmations"%conf if tx_hash else ''
details = self.get_tx_details(tx_hash)
self.history_list.prepend( [tx_hash, conf_icon, time_str, label, is_default_label,
format_satoshis(value,True,self.num_zeros, whitespaces=True),
format_satoshis(balance,False,self.num_zeros, whitespaces=True), tooltip, details] )
if cursor: self.history_treeview.set_cursor( cursor )
def get_tx_details(self, tx_hash):
import datetime
if not tx_hash: return ''
tx = self.wallet.transactions.get(tx_hash)
is_relevant, is_mine, v, fee = self.wallet.get_tx_value(tx)
conf, timestamp = self.wallet.verifier.get_confirmations(tx_hash)
if timestamp:
time_str = datetime.datetime.fromtimestamp(timestamp).isoformat(' ')[:-3]
else:
time_str = 'pending'
inputs = map(lambda x: x.get('address'), tx.inputs)
outputs = map(lambda x: x[0], tx.get_outputs())
tx_details = "Transaction Details" +"\n\n" \
+ "Transaction ID:\n" + tx_hash + "\n\n" \
+ "Status: %d confirmations\n"%conf
if is_mine:
if fee:
tx_details += "Amount sent: %s\n"% format_satoshis(v-fee, False) \
+ "Transaction fee: %s\n"% format_satoshis(fee, False)
else:
tx_details += "Amount sent: %s\n"% format_satoshis(v, False) \
+ "Transaction fee: unknown\n"
else:
tx_details += "Amount received: %s\n"% format_satoshis(v, False) \
tx_details += "Date: %s\n\n"%time_str \
+ "Inputs:\n-"+ '\n-'.join(inputs) + "\n\n" \
+ "Outputs:\n-"+ '\n-'.join(outputs)
return tx_details
def newaddress_dialog(self, w):
title = "New Contact"
dialog = Gtk.Dialog(title, parent=self.window,
flags=Gtk.DialogFlags.MODAL,
buttons= ("cancel", 0, "ok",1) )
dialog.show()
label = Gtk.HBox()
label_label = Gtk.Label(label='Label:')
label_label.set_size_request(120,10)
label_label.show()
label.pack_start(label_label, True, True, 0)
label_entry = Gtk.Entry()
label_entry.show()
label.pack_start(label_entry, True, True, 0)
label.show()
dialog.vbox.pack_start(label, False, True, 5)
address = Gtk.HBox()
address_label = Gtk.Label(label='Address:')
address_label.set_size_request(120,10)
address_label.show()
address.pack_start(address_label, True, True, 0)
address_entry = Gtk.Entry()
address_entry.show()
address.pack_start(address_entry, True, True, 0)
address.show()
dialog.vbox.pack_start(address, False, True, 5)
result = dialog.run()
address = address_entry.get_text()
label = label_entry.get_text()
dialog.destroy()
if result == 1:
if is_valid(address):
self.wallet.add_contact(address,label)
self.update_sending_tab()
else:
errorDialog = Gtk.MessageDialog(
parent=self.window,
flags=Gtk.DialogFlags.MODAL,
buttons= Gtk.ButtonsType.CLOSE,
message_format = "Invalid address")
errorDialog.show()
errorDialog.run()
errorDialog.destroy()
class ElectrumGui():
def __init__(self, config, network):
self.network = network
self.config = config
def main(self, url=None):
storage = WalletStorage(self.config)
if not storage.file_exists:
action = self.restore_or_create()
if not action:
exit()
self.wallet = wallet = Wallet(storage)
gap = self.config.get('gap_limit', 5)
if gap != 5:
wallet.gap_limit = gap
wallet.storage.put('gap_limit', gap, True)
if action == 'create':
seed = wallet.make_seed()
show_seed_dialog(seed, None)
r = change_password_dialog(False, None)
password = r[2] if r else None
wallet.add_seed(seed, password)
wallet.create_master_keys(password)
wallet.create_main_account(password)
wallet.synchronize() # generate first addresses offline
elif action == 'restore':
seed = self.seed_dialog()
if not seed:
exit()
r = change_password_dialog(False, None)
password = r[2] if r else None
wallet.add_seed(seed, password)
wallet.create_master_keys(password)
wallet.create_main_account(password)
else:
exit()
else:
self.wallet = Wallet(storage)
action = None
self.wallet.start_threads(self.network)
if action == 'restore':
self.restore_wallet(wallet)
w = ElectrumWindow(self.wallet, self.config, self.network)
if url: w.set_url(url)
Gtk.main()
def restore_or_create(self):
return restore_create_dialog()
def seed_dialog(self):
return run_recovery_dialog()
def network_dialog(self):
return run_network_dialog( self.network, parent=None )
def restore_wallet(self, wallet):
dialog = Gtk.MessageDialog(
parent = None,
flags = Gtk.DialogFlags.MODAL,
buttons = Gtk.ButtonsType.CANCEL,
message_format = "Please wait..." )
dialog.show()
def recover_thread( wallet, dialog ):
wallet.restore(lambda x:x)
GObject.idle_add( dialog.destroy )
thread.start_new_thread( recover_thread, ( wallet, dialog ) )
r = dialog.run()
dialog.destroy()
if r==Gtk.ResponseType.CANCEL: return False
if not wallet.is_found():
show_message("No transactions found for this seed")
return True
| gpl-3.0 |
NEricN/RobotCSimulator | Python/App/Lib/site-packages/pip/_vendor/html5lib/tokenizer.py | 1710 | 76929 | from __future__ import absolute_import, division, unicode_literals
try:
chr = unichr # flake8: noqa
except NameError:
pass
from collections import deque
from .constants import spaceCharacters
from .constants import entities
from .constants import asciiLetters, asciiUpper2Lower
from .constants import digits, hexDigits, EOF
from .constants import tokenTypes, tagTokenTypes
from .constants import replacementCharacters
from .inputstream import HTMLInputStream
from .trie import Trie
entitiesTrie = Trie(entities)
class HTMLTokenizer(object):
""" This class takes care of tokenizing HTML.
* self.currentToken
Holds the token that is currently being processed.
* self.state
Holds a reference to the method to be invoked... XXX
* self.stream
Points to HTMLInputStream object.
"""
def __init__(self, stream, encoding=None, parseMeta=True, useChardet=True,
lowercaseElementName=True, lowercaseAttrName=True, parser=None):
self.stream = HTMLInputStream(stream, encoding, parseMeta, useChardet)
self.parser = parser
# Perform case conversions?
self.lowercaseElementName = lowercaseElementName
self.lowercaseAttrName = lowercaseAttrName
# Setup the initial tokenizer state
self.escapeFlag = False
self.lastFourChars = []
self.state = self.dataState
self.escape = False
# The current token being created
self.currentToken = None
super(HTMLTokenizer, self).__init__()
def __iter__(self):
""" This is where the magic happens.
We do our usually processing through the states and when we have a token
to return we yield the token which pauses processing until the next token
is requested.
"""
self.tokenQueue = deque([])
# Start processing. When EOF is reached self.state will return False
# instead of True and the loop will terminate.
while self.state():
while self.stream.errors:
yield {"type": tokenTypes["ParseError"], "data": self.stream.errors.pop(0)}
while self.tokenQueue:
yield self.tokenQueue.popleft()
def consumeNumberEntity(self, isHex):
"""This function returns either U+FFFD or the character based on the
decimal or hexadecimal representation. It also discards ";" if present.
If not present self.tokenQueue.append({"type": tokenTypes["ParseError"]}) is invoked.
"""
allowed = digits
radix = 10
if isHex:
allowed = hexDigits
radix = 16
charStack = []
# Consume all the characters that are in range while making sure we
# don't hit an EOF.
c = self.stream.char()
while c in allowed and c is not EOF:
charStack.append(c)
c = self.stream.char()
# Convert the set of characters consumed to an int.
charAsInt = int("".join(charStack), radix)
# Certain characters get replaced with others
if charAsInt in replacementCharacters:
char = replacementCharacters[charAsInt]
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
elif ((0xD800 <= charAsInt <= 0xDFFF) or
(charAsInt > 0x10FFFF)):
char = "\uFFFD"
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
else:
# Should speed up this check somehow (e.g. move the set to a constant)
if ((0x0001 <= charAsInt <= 0x0008) or
(0x000E <= charAsInt <= 0x001F) or
(0x007F <= charAsInt <= 0x009F) or
(0xFDD0 <= charAsInt <= 0xFDEF) or
charAsInt in frozenset([0x000B, 0xFFFE, 0xFFFF, 0x1FFFE,
0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE,
0x5FFFF, 0x6FFFE, 0x6FFFF, 0x7FFFE,
0x7FFFF, 0x8FFFE, 0x8FFFF, 0x9FFFE,
0x9FFFF, 0xAFFFE, 0xAFFFF, 0xBFFFE,
0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE,
0xFFFFF, 0x10FFFE, 0x10FFFF])):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"illegal-codepoint-for-numeric-entity",
"datavars": {"charAsInt": charAsInt}})
try:
# Try/except needed as UCS-2 Python builds' unichar only works
# within the BMP.
char = chr(charAsInt)
except ValueError:
v = charAsInt - 0x10000
char = chr(0xD800 | (v >> 10)) + chr(0xDC00 | (v & 0x3FF))
# Discard the ; if present. Otherwise, put it back on the queue and
# invoke parseError on parser.
if c != ";":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"numeric-entity-without-semicolon"})
self.stream.unget(c)
return char
def consumeEntity(self, allowedChar=None, fromAttribute=False):
# Initialise to the default output for when no entity is matched
output = "&"
charStack = [self.stream.char()]
if (charStack[0] in spaceCharacters or charStack[0] in (EOF, "<", "&")
or (allowedChar is not None and allowedChar == charStack[0])):
self.stream.unget(charStack[0])
elif charStack[0] == "#":
# Read the next character to see if it's hex or decimal
hex = False
charStack.append(self.stream.char())
if charStack[-1] in ("x", "X"):
hex = True
charStack.append(self.stream.char())
# charStack[-1] should be the first digit
if (hex and charStack[-1] in hexDigits) \
or (not hex and charStack[-1] in digits):
# At least one digit found, so consume the whole number
self.stream.unget(charStack[-1])
output = self.consumeNumberEntity(hex)
else:
# No digits found
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "expected-numeric-entity"})
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
else:
# At this point in the process might have named entity. Entities
# are stored in the global variable "entities".
#
# Consume characters and compare to these to a substring of the
# entity names in the list until the substring no longer matches.
while (charStack[-1] is not EOF):
if not entitiesTrie.has_keys_with_prefix("".join(charStack)):
break
charStack.append(self.stream.char())
# At this point we have a string that starts with some characters
# that may match an entity
# Try to find the longest entity the string will match to take care
# of ¬i for instance.
try:
entityName = entitiesTrie.longest_prefix("".join(charStack[:-1]))
entityLength = len(entityName)
except KeyError:
entityName = None
if entityName is not None:
if entityName[-1] != ";":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"named-entity-without-semicolon"})
if (entityName[-1] != ";" and fromAttribute and
(charStack[entityLength] in asciiLetters or
charStack[entityLength] in digits or
charStack[entityLength] == "=")):
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
else:
output = entities[entityName]
self.stream.unget(charStack.pop())
output += "".join(charStack[entityLength:])
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-named-entity"})
self.stream.unget(charStack.pop())
output = "&" + "".join(charStack)
if fromAttribute:
self.currentToken["data"][-1][1] += output
else:
if output in spaceCharacters:
tokenType = "SpaceCharacters"
else:
tokenType = "Characters"
self.tokenQueue.append({"type": tokenTypes[tokenType], "data": output})
def processEntityInAttribute(self, allowedChar):
"""This method replaces the need for "entityInAttributeValueState".
"""
self.consumeEntity(allowedChar=allowedChar, fromAttribute=True)
def emitCurrentToken(self):
"""This method is a generic handler for emitting the tags. It also sets
the state to "data" because that's what's needed after a token has been
emitted.
"""
token = self.currentToken
# Add token to the queue to be yielded
if (token["type"] in tagTokenTypes):
if self.lowercaseElementName:
token["name"] = token["name"].translate(asciiUpper2Lower)
if token["type"] == tokenTypes["EndTag"]:
if token["data"]:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "attributes-in-end-tag"})
if token["selfClosing"]:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "self-closing-flag-on-end-tag"})
self.tokenQueue.append(token)
self.state = self.dataState
# Below are the various tokenizer states worked out.
def dataState(self):
data = self.stream.char()
if data == "&":
self.state = self.entityDataState
elif data == "<":
self.state = self.tagOpenState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\u0000"})
elif data is EOF:
# Tokenization ends.
return False
elif data in spaceCharacters:
# Directly after emitting a token you switch back to the "data
# state". At that point spaceCharacters are important so they are
# emitted separately.
self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data":
data + self.stream.charsUntil(spaceCharacters, True)})
# No need to update lastFourChars here, since the first space will
# have already been appended to lastFourChars and will have broken
# any <!-- or --> sequences
else:
chars = self.stream.charsUntil(("&", "<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def entityDataState(self):
self.consumeEntity()
self.state = self.dataState
return True
def rcdataState(self):
data = self.stream.char()
if data == "&":
self.state = self.characterReferenceInRcdata
elif data == "<":
self.state = self.rcdataLessThanSignState
elif data == EOF:
# Tokenization ends.
return False
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data in spaceCharacters:
# Directly after emitting a token you switch back to the "data
# state". At that point spaceCharacters are important so they are
# emitted separately.
self.tokenQueue.append({"type": tokenTypes["SpaceCharacters"], "data":
data + self.stream.charsUntil(spaceCharacters, True)})
# No need to update lastFourChars here, since the first space will
# have already been appended to lastFourChars and will have broken
# any <!-- or --> sequences
else:
chars = self.stream.charsUntil(("&", "<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def characterReferenceInRcdata(self):
self.consumeEntity()
self.state = self.rcdataState
return True
def rawtextState(self):
data = self.stream.char()
if data == "<":
self.state = self.rawtextLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
# Tokenization ends.
return False
else:
chars = self.stream.charsUntil(("<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def scriptDataState(self):
data = self.stream.char()
if data == "<":
self.state = self.scriptDataLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
# Tokenization ends.
return False
else:
chars = self.stream.charsUntil(("<", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def plaintextState(self):
data = self.stream.char()
if data == EOF:
# Tokenization ends.
return False
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + self.stream.charsUntil("\u0000")})
return True
def tagOpenState(self):
data = self.stream.char()
if data == "!":
self.state = self.markupDeclarationOpenState
elif data == "/":
self.state = self.closeTagOpenState
elif data in asciiLetters:
self.currentToken = {"type": tokenTypes["StartTag"],
"name": data, "data": [],
"selfClosing": False,
"selfClosingAcknowledged": False}
self.state = self.tagNameState
elif data == ">":
# XXX In theory it could be something besides a tag name. But
# do we really care?
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name-but-got-right-bracket"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<>"})
self.state = self.dataState
elif data == "?":
# XXX In theory it could be something besides a tag name. But
# do we really care?
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name-but-got-question-mark"})
self.stream.unget(data)
self.state = self.bogusCommentState
else:
# XXX
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-tag-name"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.dataState
return True
def closeTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.currentToken = {"type": tokenTypes["EndTag"], "name": data,
"data": [], "selfClosing": False}
self.state = self.tagNameState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-right-bracket"})
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-eof"})
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.state = self.dataState
else:
# XXX data can be _'_...
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-closing-tag-but-got-char",
"datavars": {"data": data}})
self.stream.unget(data)
self.state = self.bogusCommentState
return True
def tagNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == ">":
self.emitCurrentToken()
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-tag-name"})
self.state = self.dataState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] += "\uFFFD"
else:
self.currentToken["name"] += data
# (Don't use charsUntil here, because tag names are
# very short and it's faster to not do anything fancy)
return True
def rcdataLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.rcdataEndTagOpenState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rcdataEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.rcdataEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rcdataEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.rcdataState
return True
def rawtextLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.rawtextEndTagOpenState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.rawtextState
return True
def rawtextEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.rawtextEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.rawtextState
return True
def rawtextEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.rawtextState
return True
def scriptDataLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.scriptDataEndTagOpenState
elif data == "!":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<!"})
self.state = self.scriptDataEscapeStartState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer += data
self.state = self.scriptDataEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapeStartState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapeStartDashState
else:
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapeStartDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashDashState
else:
self.stream.unget(data)
self.state = self.scriptDataState
return True
def scriptDataEscapedState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashState
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
self.state = self.dataState
else:
chars = self.stream.charsUntil(("<", "-", "\u0000"))
self.tokenQueue.append({"type": tokenTypes["Characters"], "data":
data + chars})
return True
def scriptDataEscapedDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataEscapedDashDashState
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataEscapedState
elif data == EOF:
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedDashDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
elif data == "<":
self.state = self.scriptDataEscapedLessThanSignState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"})
self.state = self.scriptDataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataEscapedState
elif data == EOF:
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.temporaryBuffer = ""
self.state = self.scriptDataEscapedEndTagOpenState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<" + data})
self.temporaryBuffer = data
self.state = self.scriptDataDoubleEscapeStartState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedEndTagOpenState(self):
data = self.stream.char()
if data in asciiLetters:
self.temporaryBuffer = data
self.state = self.scriptDataEscapedEndTagNameState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "</"})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataEscapedEndTagNameState(self):
appropriate = self.currentToken and self.currentToken["name"].lower() == self.temporaryBuffer.lower()
data = self.stream.char()
if data in spaceCharacters and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.beforeAttributeNameState
elif data == "/" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.state = self.selfClosingStartTagState
elif data == ">" and appropriate:
self.currentToken = {"type": tokenTypes["EndTag"],
"name": self.temporaryBuffer,
"data": [], "selfClosing": False}
self.emitCurrentToken()
self.state = self.dataState
elif data in asciiLetters:
self.temporaryBuffer += data
else:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "</" + self.temporaryBuffer})
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataDoubleEscapeStartState(self):
data = self.stream.char()
if data in (spaceCharacters | frozenset(("/", ">"))):
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
if self.temporaryBuffer.lower() == "script":
self.state = self.scriptDataDoubleEscapedState
else:
self.state = self.scriptDataEscapedState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.temporaryBuffer += data
else:
self.stream.unget(data)
self.state = self.scriptDataEscapedState
return True
def scriptDataDoubleEscapedState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataDoubleEscapedDashState
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
return True
def scriptDataDoubleEscapedDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
self.state = self.scriptDataDoubleEscapedDashDashState
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataDoubleEscapedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapedDashDashState(self):
data = self.stream.char()
if data == "-":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "-"})
elif data == "<":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "<"})
self.state = self.scriptDataDoubleEscapedLessThanSignState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": ">"})
self.state = self.scriptDataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": "\uFFFD"})
self.state = self.scriptDataDoubleEscapedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-script-in-script"})
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapedLessThanSignState(self):
data = self.stream.char()
if data == "/":
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": "/"})
self.temporaryBuffer = ""
self.state = self.scriptDataDoubleEscapeEndState
else:
self.stream.unget(data)
self.state = self.scriptDataDoubleEscapedState
return True
def scriptDataDoubleEscapeEndState(self):
data = self.stream.char()
if data in (spaceCharacters | frozenset(("/", ">"))):
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
if self.temporaryBuffer.lower() == "script":
self.state = self.scriptDataEscapedState
else:
self.state = self.scriptDataDoubleEscapedState
elif data in asciiLetters:
self.tokenQueue.append({"type": tokenTypes["Characters"], "data": data})
self.temporaryBuffer += data
else:
self.stream.unget(data)
self.state = self.scriptDataDoubleEscapedState
return True
def beforeAttributeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data in asciiLetters:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == ">":
self.emitCurrentToken()
elif data == "/":
self.state = self.selfClosingStartTagState
elif data in ("'", '"', "=", "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"invalid-character-in-attribute-name"})
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"].append(["\uFFFD", ""])
self.state = self.attributeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-name-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
return True
def attributeNameState(self):
data = self.stream.char()
leavingThisState = True
emitToken = False
if data == "=":
self.state = self.beforeAttributeValueState
elif data in asciiLetters:
self.currentToken["data"][-1][0] += data +\
self.stream.charsUntil(asciiLetters, True)
leavingThisState = False
elif data == ">":
# XXX If we emit here the attributes are converted to a dict
# without being checked and when the code below runs we error
# because data is a dict not a list
emitToken = True
elif data in spaceCharacters:
self.state = self.afterAttributeNameState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][0] += "\uFFFD"
leavingThisState = False
elif data in ("'", '"', "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"invalid-character-in-attribute-name"})
self.currentToken["data"][-1][0] += data
leavingThisState = False
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "eof-in-attribute-name"})
self.state = self.dataState
else:
self.currentToken["data"][-1][0] += data
leavingThisState = False
if leavingThisState:
# Attributes are not dropped at this stage. That happens when the
# start tag token is emitted so values can still be safely appended
# to attributes, but we do want to report the parse error in time.
if self.lowercaseAttrName:
self.currentToken["data"][-1][0] = (
self.currentToken["data"][-1][0].translate(asciiUpper2Lower))
for name, value in self.currentToken["data"][:-1]:
if self.currentToken["data"][-1][0] == name:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"duplicate-attribute"})
break
# XXX Fix for above XXX
if emitToken:
self.emitCurrentToken()
return True
def afterAttributeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data == "=":
self.state = self.beforeAttributeValueState
elif data == ">":
self.emitCurrentToken()
elif data in asciiLetters:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data == "/":
self.state = self.selfClosingStartTagState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"].append(["\uFFFD", ""])
self.state = self.attributeNameState
elif data in ("'", '"', "<"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"invalid-character-after-attribute-name"})
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-end-of-tag-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"].append([data, ""])
self.state = self.attributeNameState
return True
def beforeAttributeValueState(self):
data = self.stream.char()
if data in spaceCharacters:
self.stream.charsUntil(spaceCharacters, True)
elif data == "\"":
self.state = self.attributeValueDoubleQuotedState
elif data == "&":
self.state = self.attributeValueUnQuotedState
self.stream.unget(data)
elif data == "'":
self.state = self.attributeValueSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-value-but-got-right-bracket"})
self.emitCurrentToken()
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
self.state = self.attributeValueUnQuotedState
elif data in ("=", "<", "`"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"equals-in-unquoted-attribute-value"})
self.currentToken["data"][-1][1] += data
self.state = self.attributeValueUnQuotedState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-attribute-value-but-got-eof"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data
self.state = self.attributeValueUnQuotedState
return True
def attributeValueDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterAttributeValueState
elif data == "&":
self.processEntityInAttribute('"')
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-double-quote"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data +\
self.stream.charsUntil(("\"", "&", "\u0000"))
return True
def attributeValueSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterAttributeValueState
elif data == "&":
self.processEntityInAttribute("'")
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-single-quote"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data +\
self.stream.charsUntil(("'", "&", "\u0000"))
return True
def attributeValueUnQuotedState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == "&":
self.processEntityInAttribute(">")
elif data == ">":
self.emitCurrentToken()
elif data in ('"', "'", "=", "<", "`"):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-in-unquoted-attribute-value"})
self.currentToken["data"][-1][1] += data
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"][-1][1] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-attribute-value-no-quotes"})
self.state = self.dataState
else:
self.currentToken["data"][-1][1] += data + self.stream.charsUntil(
frozenset(("&", ">", '"', "'", "=", "<", "`", "\u0000")) | spaceCharacters)
return True
def afterAttributeValueState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeAttributeNameState
elif data == ">":
self.emitCurrentToken()
elif data == "/":
self.state = self.selfClosingStartTagState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-EOF-after-attribute-value"})
self.stream.unget(data)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-after-attribute-value"})
self.stream.unget(data)
self.state = self.beforeAttributeNameState
return True
def selfClosingStartTagState(self):
data = self.stream.char()
if data == ">":
self.currentToken["selfClosing"] = True
self.emitCurrentToken()
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data":
"unexpected-EOF-after-solidus-in-tag"})
self.stream.unget(data)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-character-after-solidus-in-tag"})
self.stream.unget(data)
self.state = self.beforeAttributeNameState
return True
def bogusCommentState(self):
# Make a new comment token and give it as value all the characters
# until the first > or EOF (charsUntil checks for EOF automatically)
# and emit it.
data = self.stream.charsUntil(">")
data = data.replace("\u0000", "\uFFFD")
self.tokenQueue.append(
{"type": tokenTypes["Comment"], "data": data})
# Eat the character directly after the bogus comment which is either a
# ">" or an EOF.
self.stream.char()
self.state = self.dataState
return True
def markupDeclarationOpenState(self):
charStack = [self.stream.char()]
if charStack[-1] == "-":
charStack.append(self.stream.char())
if charStack[-1] == "-":
self.currentToken = {"type": tokenTypes["Comment"], "data": ""}
self.state = self.commentStartState
return True
elif charStack[-1] in ('d', 'D'):
matched = True
for expected in (('o', 'O'), ('c', 'C'), ('t', 'T'),
('y', 'Y'), ('p', 'P'), ('e', 'E')):
charStack.append(self.stream.char())
if charStack[-1] not in expected:
matched = False
break
if matched:
self.currentToken = {"type": tokenTypes["Doctype"],
"name": "",
"publicId": None, "systemId": None,
"correct": True}
self.state = self.doctypeState
return True
elif (charStack[-1] == "[" and
self.parser is not None and
self.parser.tree.openElements and
self.parser.tree.openElements[-1].namespace != self.parser.tree.defaultNamespace):
matched = True
for expected in ["C", "D", "A", "T", "A", "["]:
charStack.append(self.stream.char())
if charStack[-1] != expected:
matched = False
break
if matched:
self.state = self.cdataSectionState
return True
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-dashes-or-doctype"})
while charStack:
self.stream.unget(charStack.pop())
self.state = self.bogusCommentState
return True
def commentStartState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentStartDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"incorrect-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += data
self.state = self.commentState
return True
def commentStartDashState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "-\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"incorrect-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "-" + data
self.state = self.commentState
return True
def commentState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "\uFFFD"
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "eof-in-comment"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += data + \
self.stream.charsUntil(("-", "\u0000"))
return True
def commentEndDashState(self):
data = self.stream.char()
if data == "-":
self.state = self.commentEndState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "-\uFFFD"
self.state = self.commentState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-end-dash"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "-" + data
self.state = self.commentState
return True
def commentEndState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "--\uFFFD"
self.state = self.commentState
elif data == "!":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-bang-after-double-dash-in-comment"})
self.state = self.commentEndBangState
elif data == "-":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-dash-after-double-dash-in-comment"})
self.currentToken["data"] += data
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-double-dash"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
# XXX
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-comment"})
self.currentToken["data"] += "--" + data
self.state = self.commentState
return True
def commentEndBangState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "-":
self.currentToken["data"] += "--!"
self.state = self.commentEndDashState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["data"] += "--!\uFFFD"
self.state = self.commentState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-comment-end-bang-state"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["data"] += "--!" + data
self.state = self.commentState
return True
def doctypeState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-eof"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"need-space-after-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypeNameState
return True
def beforeDoctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-right-bracket"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] = "\uFFFD"
self.state = self.doctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-doctype-name-but-got-eof"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["name"] = data
self.state = self.doctypeNameState
return True
def doctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.state = self.afterDoctypeNameState
elif data == ">":
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["name"] += "\uFFFD"
self.state = self.doctypeNameState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype-name"})
self.currentToken["correct"] = False
self.currentToken["name"] = self.currentToken["name"].translate(asciiUpper2Lower)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["name"] += data
return True
def afterDoctypeNameState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.currentToken["correct"] = False
self.stream.unget(data)
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
if data in ("p", "P"):
matched = True
for expected in (("u", "U"), ("b", "B"), ("l", "L"),
("i", "I"), ("c", "C")):
data = self.stream.char()
if data not in expected:
matched = False
break
if matched:
self.state = self.afterDoctypePublicKeywordState
return True
elif data in ("s", "S"):
matched = True
for expected in (("y", "Y"), ("s", "S"), ("t", "T"),
("e", "E"), ("m", "M")):
data = self.stream.char()
if data not in expected:
matched = False
break
if matched:
self.state = self.afterDoctypeSystemKeywordState
return True
# All the characters read before the current 'data' will be
# [a-zA-Z], so they're garbage in the bogus doctype and can be
# discarded; only the latest character might be '>' or EOF
# and needs to be ungetted
self.stream.unget(data)
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"expected-space-or-right-bracket-in-doctype", "datavars":
{"data": data}})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def afterDoctypePublicKeywordState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypePublicIdentifierState
elif data in ("'", '"'):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypePublicIdentifierState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.stream.unget(data)
self.state = self.beforeDoctypePublicIdentifierState
return True
def beforeDoctypePublicIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == "\"":
self.currentToken["publicId"] = ""
self.state = self.doctypePublicIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["publicId"] = ""
self.state = self.doctypePublicIdentifierSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def doctypePublicIdentifierDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterDoctypePublicIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["publicId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["publicId"] += data
return True
def doctypePublicIdentifierSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterDoctypePublicIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["publicId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["publicId"] += data
return True
def afterDoctypePublicIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.betweenDoctypePublicAndSystemIdentifiersState
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == '"':
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def betweenDoctypePublicAndSystemIdentifiersState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data == '"':
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data == EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def afterDoctypeSystemKeywordState(self):
data = self.stream.char()
if data in spaceCharacters:
self.state = self.beforeDoctypeSystemIdentifierState
elif data in ("'", '"'):
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.stream.unget(data)
self.state = self.beforeDoctypeSystemIdentifierState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.stream.unget(data)
self.state = self.beforeDoctypeSystemIdentifierState
return True
def beforeDoctypeSystemIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == "\"":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierDoubleQuotedState
elif data == "'":
self.currentToken["systemId"] = ""
self.state = self.doctypeSystemIdentifierSingleQuotedState
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.currentToken["correct"] = False
self.state = self.bogusDoctypeState
return True
def doctypeSystemIdentifierDoubleQuotedState(self):
data = self.stream.char()
if data == "\"":
self.state = self.afterDoctypeSystemIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["systemId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["systemId"] += data
return True
def doctypeSystemIdentifierSingleQuotedState(self):
data = self.stream.char()
if data == "'":
self.state = self.afterDoctypeSystemIdentifierState
elif data == "\u0000":
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
self.currentToken["systemId"] += "\uFFFD"
elif data == ">":
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-end-of-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.currentToken["systemId"] += data
return True
def afterDoctypeSystemIdentifierState(self):
data = self.stream.char()
if data in spaceCharacters:
pass
elif data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"eof-in-doctype"})
self.currentToken["correct"] = False
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
self.tokenQueue.append({"type": tokenTypes["ParseError"], "data":
"unexpected-char-in-doctype"})
self.state = self.bogusDoctypeState
return True
def bogusDoctypeState(self):
data = self.stream.char()
if data == ">":
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
elif data is EOF:
# XXX EMIT
self.stream.unget(data)
self.tokenQueue.append(self.currentToken)
self.state = self.dataState
else:
pass
return True
def cdataSectionState(self):
data = []
while True:
data.append(self.stream.charsUntil("]"))
data.append(self.stream.charsUntil(">"))
char = self.stream.char()
if char == EOF:
break
else:
assert char == ">"
if data[-1][-2:] == "]]":
data[-1] = data[-1][:-2]
break
else:
data.append(char)
data = "".join(data)
# Deal with null here rather than in the parser
nullCount = data.count("\u0000")
if nullCount > 0:
for i in range(nullCount):
self.tokenQueue.append({"type": tokenTypes["ParseError"],
"data": "invalid-codepoint"})
data = data.replace("\u0000", "\uFFFD")
if data:
self.tokenQueue.append({"type": tokenTypes["Characters"],
"data": data})
self.state = self.dataState
return True
| apache-2.0 |
mhaessig/servo | tests/wpt/web-platform-tests/tools/pywebsocket/src/test/run_all.py | 465 | 3259 | #!/usr/bin/env python
#
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Run all tests in the same directory.
This suite is expected to be run under pywebsocket's src directory, i.e. the
directory containing mod_pywebsocket, test, etc.
To change loggin level, please specify --log-level option.
python test/run_test.py --log-level debug
To pass any option to unittest module, please specify options after '--'. For
example, run this for making the test runner verbose.
python test/run_test.py --log-level debug -- -v
"""
import logging
import optparse
import os
import re
import sys
import unittest
_TEST_MODULE_PATTERN = re.compile(r'^(test_.+)\.py$')
def _list_test_modules(directory):
module_names = []
for filename in os.listdir(directory):
match = _TEST_MODULE_PATTERN.search(filename)
if match:
module_names.append(match.group(1))
return module_names
def _suite():
loader = unittest.TestLoader()
return loader.loadTestsFromNames(
_list_test_modules(os.path.join(os.path.split(__file__)[0], '.')))
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option('--log-level', '--log_level', type='choice',
dest='log_level', default='warning',
choices=['debug', 'info', 'warning', 'warn', 'error',
'critical'])
options, args = parser.parse_args()
logging.basicConfig(
level=logging.getLevelName(options.log_level.upper()),
format='%(levelname)s %(asctime)s '
'%(filename)s:%(lineno)d] '
'%(message)s',
datefmt='%H:%M:%S')
unittest.main(defaultTest='_suite', argv=[sys.argv[0]] + args)
# vi:sts=4 sw=4 et
| mpl-2.0 |
dikshyam/icsisumm | icsisumm-primary-sys34_v1/nltk/nltk-0.9.2/nltk_contrib/classifier_tests/decisiontreetests.py | 9 | 3668 | # Natural Language Toolkit
#
# Author: Sumukh Ghodke <sumukh dot ghodke at gmail dot com>
#
# URL: <http://nltk.sf.net>
# This software is distributed under GPL, for license information see LICENSE.TXT
from nltk_contrib.classifier_tests import *
from nltk_contrib.classifier import decisiontree, decisionstump as ds, instances as ins, attribute as attr
from nltk_contrib.classifier.exceptions import invaliddataerror as inv
class DecisionTreeTestCase(unittest.TestCase):
def test_tree_creation(self):
path = datasetsDir(self) + 'test_phones' + SEP + 'phoney'
a, c = metadata(path)
tree = decisiontree.DecisionTree(training(path), a, c)
tree.train()
self.assertNotEqual(None, tree)
self.assertNotEqual(None, tree.root)
self.assertEqual('band', tree.root.attribute.name)
self.assertEqual(1, len(tree.root.children))
self.assertEqual('size', tree.root.children['tri'].attribute.name)
def test_filter_does_not_affect_the_original_training(self):
path = datasetsDir(self) + 'minigolf' + SEP + 'weather'
a, c = metadata(path)
tree = decisiontree.DecisionTree(training(path), a, c)
tree.train()
outlook = tree.attributes[0]
self.assertEqual(9, len(tree.training))
filtered = tree.training.filter(outlook, 'sunny')
self.assertEqual(9, len(tree.training))
self.assertEqual(4, len(filtered))
def test_maximum_information_gain_stump_is_selected(self):
path = datasetsDir(self) + 'test_phones' + SEP + 'phoney'
_training = training(path)
a, c = metadata(path)
tree = decisiontree.DecisionTree(_training, a, c)
decision_stumps = tree.possible_decision_stumps([], _training)
max_ig_stump = tree.maximum_information_gain(decision_stumps)
self.assertEqual('band', max_ig_stump.attribute.name)
def test_maximum_gain_raito_stump_is_selected(self):
path = datasetsDir(self) + 'test_phones' + SEP + 'phoney'
_training = training(path)
a, c = metadata(path)
tree = decisiontree.DecisionTree(_training, a, c)
decision_stumps = tree.possible_decision_stumps([], _training)
max_gr_stump = tree.maximum_gain_ratio(decision_stumps)
self.assertEqual('pda', max_gr_stump.attribute.name)
# outlook
# sunny / | \ rainy
# / | \
# temperature windy
#
def test_ignores_selected_attributes_in_next_recursive_iteration(self):
path = datasetsDir(self) + 'minigolf' + SEP + 'weather'
a, c = metadata(path)
tree = decisiontree.DecisionTree(training(path), a, c)
tree.train()
self.assertEqual('outlook', tree.root.attribute.name)
children = tree.root.children
self.assertEqual(2, len(children))
sunny = children['sunny']
self.assertEqual('temperature', sunny.attribute.name)
self.assertEqual(0, len(sunny.children))
rainy = children['rainy']
self.assertEqual('windy', rainy.attribute.name)
self.assertEqual(0, len(rainy.children))
def test_throws_error_if_conitinuous_atributes_are_present(self):
try:
path = datasetsDir(self) + 'numerical' + SEP + 'weather'
a,c = metadata(path)
dt = decisiontree.DecisionTree(training(path), a, c)
dt.train()
self.fail('should have thrown an error')
except inv.InvalidDataError:
pass
| gpl-3.0 |
LUTAN/tensorflow | tensorflow/contrib/labeled_tensor/python/ops/io_ops.py | 172 | 6373 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Input parsing code for LabeledTensors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six import string_types
from tensorflow.contrib.labeled_tensor.python.ops import _typecheck as tc
from tensorflow.contrib.labeled_tensor.python.ops import core
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import parsing_ops
class FixedLenFeature(object):
"""Configuration for parsing a fixed-length input feature.
Fields:
axes: A list of Axis objects or tuples (axis_name, axis_value),
where `axis_name` is a string and `axis_value` is None (unknown size), an
integer or a list of tick labels.
dtype: Data type of input.
default_value: Value to be used if an example is missing this feature. It
must be compatible with `dtype`.
"""
def __init__(self, axes, dtype, default_value=None):
self._axes = [core.as_axis(a) for a in axes]
self._dtype = dtype
self._default_value = default_value
@property
def axes(self):
return self._axes
@property
def dtype(self):
return self._dtype
@property
def default_value(self):
return self._default_value
@tc.returns(tc.Dict(string_types, parsing_ops.FixedLenFeature))
@tc.accepts(tc.Mapping(string_types, FixedLenFeature))
def _labeled_to_unlabeled_features(features):
"""Convert a dict of lt.FixedLenFeature into a dict of tf.FixedLenFeature."""
unlabeled_features = {}
for name, labeled_feature in features.items():
shape = [ax.size for ax in labeled_feature.axes]
if any(size is None for size in shape):
# This should be caught on the TensorFlow side, but it isn't yet:
# https://github.com/tensorflow/tensorflow/issues/2874
raise ValueError('axes with unknown size are not supported')
dtype = labeled_feature.dtype
default_value = labeled_feature.default_value
unlabeled_features[name] = parsing_ops.FixedLenFeature(
shape, dtype, default_value)
return unlabeled_features
@tc.returns(tc.Dict(string_types, core.LabeledTensor))
@tc.accepts(core.LabeledTensorLike, tc.Mapping(string_types, FixedLenFeature),
tc.Optional(string_types), object)
def parse_example(serialized, features, name=None, example_names=None):
"""Parse `Example` protos into a `dict` of labeled tensors.
See tf.parse_example.
Args:
serialized: A 1-D LabeledTensor of strings, a batch of binary serialized
`Example` protos.
features: A `dict` mapping feature keys to `labeled_tensor.FixedLenFeature`
values.
name: A name for this operation (optional).
example_names: A vector (1-D Tensor) of strings (optional), the names of
the serialized protos in the batch.
Returns:
A `dict` mapping feature keys to `LabeledTensor` values. The single axis
from `serialized` will be prepended to the axes provided by each feature.
Raises:
ValueError: if any feature is invalid.
"""
serialized = core.convert_to_labeled_tensor(serialized)
unlabeled_features = _labeled_to_unlabeled_features(features)
unlabeled_parsed = parsing_ops.parse_example(
serialized.tensor, unlabeled_features, name, example_names)
parsed = {}
for name, parsed_feature in unlabeled_parsed.items():
axes = list(serialized.axes.values()) + features[name].axes
parsed[name] = core.LabeledTensor(parsed_feature, axes)
return parsed
@tc.returns(tc.Dict(string_types, core.LabeledTensor))
@tc.accepts(core.LabeledTensorLike, tc.Mapping(string_types, FixedLenFeature),
tc.Optional(string_types), object)
def parse_single_example(serialized, features, name=None, example_names=None):
"""Parses a single `Example` proto.
See tf.parse_single_example.
Args:
serialized: A scalar string Tensor or LabeledTensor, a single serialized
Example.
features: A `dict` mapping feature keys to `labeled_tensor.FixedLenFeature`
values.
name: A name for this operation (optional).
example_names: (Optional) A scalar string Tensor, the associated name.
Returns:
A `dict` mapping feature keys to `LabeledTensor` values.
Raises:
ValueError: if any feature is invalid.
"""
serialized = core.convert_to_labeled_tensor(serialized)
unlabeled_features = _labeled_to_unlabeled_features(features)
unlabeled_parsed = parsing_ops.parse_single_example(
serialized.tensor, unlabeled_features, name, example_names)
parsed = {}
for name, parsed_feature in unlabeled_parsed.items():
parsed[name] = core.LabeledTensor(parsed_feature, features[name].axes)
return parsed
@tc.returns(core.LabeledTensor)
@tc.accepts(dtypes.DType, tc.Collection(tc.Union(string_types, core.AxisLike)),
tc.Optional(string_types))
def placeholder(dtype, axes, name=None):
"""Create a placeholder for a labeled tensor.
For example:
lt.placeholder(tf.float32, ['batch', ('channel', ['r', 'g', 'b'])])
See tf.placeholder for more details.
Args:
dtype: The type of elements in the tensor to be fed.
axes: sequence of strings (denoting axes of unknown size) and/or objects
convertable to lt.Axis to label the result.
name: Optional op name.
Returns:
Placeholder labeled tensor.
"""
with ops.name_scope(name, 'lt_placeholder', []) as scope:
axes = core.Axes([(axis, None) if isinstance(axis, string_types) else axis
for axis in axes])
shape = [axis.size for axis in axes.values()]
tensor = array_ops.placeholder(dtype, shape, name=scope)
return core.LabeledTensor(tensor, axes)
| apache-2.0 |
xrmx/django | tests/template_tests/filter_tests/test_linenumbers.py | 331 | 1992 | from django.template.defaultfilters import linenumbers
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class LinenumbersTests(SimpleTestCase):
"""
The contents of "linenumbers" is escaped according to the current
autoescape setting.
"""
@setup({'linenumbers01': '{{ a|linenumbers }} {{ b|linenumbers }}'})
def test_linenumbers01(self):
output = self.engine.render_to_string(
'linenumbers01',
{'a': 'one\n<two>\nthree', 'b': mark_safe('one\n<two>\nthree')},
)
self.assertEqual(output, '1. one\n2. <two>\n3. three '
'1. one\n2. <two>\n3. three')
@setup({'linenumbers02':
'{% autoescape off %}{{ a|linenumbers }} {{ b|linenumbers }}{% endautoescape %}'})
def test_linenumbers02(self):
output = self.engine.render_to_string(
'linenumbers02',
{'a': 'one\n<two>\nthree', 'b': mark_safe('one\n<two>\nthree')},
)
self.assertEqual(output, '1. one\n2. <two>\n3. three '
'1. one\n2. <two>\n3. three')
class FunctionTests(SimpleTestCase):
def test_linenumbers(self):
self.assertEqual(linenumbers('line 1\nline 2'), '1. line 1\n2. line 2')
def test_linenumbers2(self):
self.assertEqual(
linenumbers('\n'.join(['x'] * 10)),
'01. x\n02. x\n03. x\n04. x\n05. x\n06. x\n07. x\n08. x\n09. x\n10. x',
)
def test_non_string_input(self):
self.assertEqual(linenumbers(123), '1. 123')
def test_autoescape(self):
self.assertEqual(
linenumbers('foo\n<a>bar</a>\nbuz'),
'1. foo\n2. <a>bar</a>\n3. buz',
)
def test_autoescape_off(self):
self.assertEqual(
linenumbers('foo\n<a>bar</a>\nbuz', autoescape=False),
'1. foo\n2. <a>bar</a>\n3. buz'
)
| bsd-3-clause |
Balachan27/django | django/conf/locale/mk/formats.py | 504 | 1742 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'd F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j.m.Y'
SHORT_DATETIME_FORMAT = 'j.m.Y H:i'
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%d. %m. %Y', '%d. %m. %y', # '25. 10. 2006', '25. 10. 06'
]
DATETIME_INPUT_FORMATS = [
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
'%d. %m. %Y %H:%M:%S', # '25. 10. 2006 14:30:59'
'%d. %m. %Y %H:%M:%S.%f', # '25. 10. 2006 14:30:59.000200'
'%d. %m. %Y %H:%M', # '25. 10. 2006 14:30'
'%d. %m. %Y', # '25. 10. 2006'
'%d. %m. %y %H:%M:%S', # '25. 10. 06 14:30:59'
'%d. %m. %y %H:%M:%S.%f', # '25. 10. 06 14:30:59.000200'
'%d. %m. %y %H:%M', # '25. 10. 06 14:30'
'%d. %m. %y', # '25. 10. 06'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| bsd-3-clause |
numpy/datetime | numpy/f2py/tests/test_return_logical.py | 59 | 4633 | from numpy.testing import *
from numpy import array
import util
class TestReturnLogical(util.F2PyTest):
def check_function(self, t):
assert t(True)==1,`t(True)`
assert t(False)==0,`t(False)`
assert t(0)==0
assert t(None)==0
assert t(0.0)==0
assert t(0j)==0
assert t(1j)==1
assert t(234)==1
assert t(234.6)==1
assert t(234l)==1
assert t(234.6+3j)==1
assert t('234')==1
assert t('aaa')==1
assert t('')==0
assert t([])==0
assert t(())==0
assert t({})==0
assert t(t)==1
assert t(-234)==1
assert t(10l**100)==1
assert t([234])==1
assert t((234,))==1
assert t(array(234))==1
assert t(array([234]))==1
assert t(array([[234]]))==1
assert t(array([234],'b'))==1
assert t(array([234],'h'))==1
assert t(array([234],'i'))==1
assert t(array([234],'l'))==1
assert t(array([234],'f'))==1
assert t(array([234],'d'))==1
assert t(array([234+3j],'F'))==1
assert t(array([234],'D'))==1
assert t(array(0))==0
assert t(array([0]))==0
assert t(array([[0]]))==0
assert t(array([0j]))==0
assert t(array([1]))==1
assert_raises(ValueError, t, array([0,0]))
class TestF77ReturnLogical(TestReturnLogical):
code = """
function t0(value)
logical value
logical t0
t0 = value
end
function t1(value)
logical*1 value
logical*1 t1
t1 = value
end
function t2(value)
logical*2 value
logical*2 t2
t2 = value
end
function t4(value)
logical*4 value
logical*4 t4
t4 = value
end
c function t8(value)
c logical*8 value
c logical*8 t8
c t8 = value
c end
subroutine s0(t0,value)
logical value
logical t0
cf2py intent(out) t0
t0 = value
end
subroutine s1(t1,value)
logical*1 value
logical*1 t1
cf2py intent(out) t1
t1 = value
end
subroutine s2(t2,value)
logical*2 value
logical*2 t2
cf2py intent(out) t2
t2 = value
end
subroutine s4(t4,value)
logical*4 value
logical*4 t4
cf2py intent(out) t4
t4 = value
end
c subroutine s8(t8,value)
c logical*8 value
c logical*8 t8
cf2py intent(out) t8
c t8 = value
c end
"""
@dec.slow
def test_all(self):
for name in "t0,t1,t2,t4,s0,s1,s2,s4".split(","):
self.check_function(getattr(self.module, name))
class TestF90ReturnLogical(TestReturnLogical):
suffix = ".f90"
code = """
module f90_return_logical
contains
function t0(value)
logical :: value
logical :: t0
t0 = value
end function t0
function t1(value)
logical(kind=1) :: value
logical(kind=1) :: t1
t1 = value
end function t1
function t2(value)
logical(kind=2) :: value
logical(kind=2) :: t2
t2 = value
end function t2
function t4(value)
logical(kind=4) :: value
logical(kind=4) :: t4
t4 = value
end function t4
function t8(value)
logical(kind=8) :: value
logical(kind=8) :: t8
t8 = value
end function t8
subroutine s0(t0,value)
logical :: value
logical :: t0
!f2py intent(out) t0
t0 = value
end subroutine s0
subroutine s1(t1,value)
logical(kind=1) :: value
logical(kind=1) :: t1
!f2py intent(out) t1
t1 = value
end subroutine s1
subroutine s2(t2,value)
logical(kind=2) :: value
logical(kind=2) :: t2
!f2py intent(out) t2
t2 = value
end subroutine s2
subroutine s4(t4,value)
logical(kind=4) :: value
logical(kind=4) :: t4
!f2py intent(out) t4
t4 = value
end subroutine s4
subroutine s8(t8,value)
logical(kind=8) :: value
logical(kind=8) :: t8
!f2py intent(out) t8
t8 = value
end subroutine s8
end module f90_return_logical
"""
@dec.slow
def test_all(self):
for name in "t0,t1,t2,t4,t8,s0,s1,s2,s4,s8".split(","):
self.check_function(getattr(self.module.f90_return_logical, name))
if __name__ == "__main__":
import nose
nose.runmodule()
| bsd-3-clause |
yawnosnorous/python-for-android | python3-alpha/python3-src/Lib/lib2to3/fixes/fix_except.py | 203 | 3344 | """Fixer for except statements with named exceptions.
The following cases will be converted:
- "except E, T:" where T is a name:
except E as T:
- "except E, T:" where T is not a name, tuple or list:
except E as t:
T = t
This is done because the target of an "except" clause must be a
name.
- "except E, T:" where T is a tuple or list literal:
except E as t:
T = t.args
"""
# Author: Collin Winter
# Local imports
from .. import pytree
from ..pgen2 import token
from .. import fixer_base
from ..fixer_util import Assign, Attr, Name, is_tuple, is_list, syms
def find_excepts(nodes):
for i, n in enumerate(nodes):
if n.type == syms.except_clause:
if n.children[0].value == 'except':
yield (n, nodes[i+2])
class FixExcept(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
try_stmt< 'try' ':' (simple_stmt | suite)
cleanup=(except_clause ':' (simple_stmt | suite))+
tail=(['except' ':' (simple_stmt | suite)]
['else' ':' (simple_stmt | suite)]
['finally' ':' (simple_stmt | suite)]) >
"""
def transform(self, node, results):
syms = self.syms
tail = [n.clone() for n in results["tail"]]
try_cleanup = [ch.clone() for ch in results["cleanup"]]
for except_clause, e_suite in find_excepts(try_cleanup):
if len(except_clause.children) == 4:
(E, comma, N) = except_clause.children[1:4]
comma.replace(Name("as", prefix=" "))
if N.type != token.NAME:
# Generate a new N for the except clause
new_N = Name(self.new_name(), prefix=" ")
target = N.clone()
target.prefix = ""
N.replace(new_N)
new_N = new_N.clone()
# Insert "old_N = new_N" as the first statement in
# the except body. This loop skips leading whitespace
# and indents
#TODO(cwinter) suite-cleanup
suite_stmts = e_suite.children
for i, stmt in enumerate(suite_stmts):
if isinstance(stmt, pytree.Node):
break
# The assignment is different if old_N is a tuple or list
# In that case, the assignment is old_N = new_N.args
if is_tuple(N) or is_list(N):
assign = Assign(target, Attr(new_N, Name('args')))
else:
assign = Assign(target, new_N)
#TODO(cwinter) stopgap until children becomes a smart list
for child in reversed(suite_stmts[:i]):
e_suite.insert_child(0, child)
e_suite.insert_child(i, assign)
elif N.prefix == "":
# No space after a comma is legal; no space after "as",
# not so much.
N.prefix = " "
#TODO(cwinter) fix this when children becomes a smart list
children = [c.clone() for c in node.children[:3]] + try_cleanup + tail
return pytree.Node(node.type, children)
| apache-2.0 |
callowayproject/Transmogrify | transmogrify/create_doc_imgs.py | 1 | 3036 | import os
from transmogrify import Transmogrify
square_img = os.path.abspath(os.path.join(os.path.dirname(__file__), 'testdata', 'square_img.jpg'))
vert_img = os.path.abspath(os.path.join(os.path.dirname(__file__), 'testdata', 'vert_img.jpg'))
horiz_img = os.path.abspath(os.path.join(os.path.dirname(__file__), 'testdata', 'horiz_img.jpg'))
####
#### AutoCrop
####
Transmogrify(square_img, [('a', '100x100'),]).save()
Transmogrify(vert_img, [('a', '100x100'),]).save()
Transmogrify(horiz_img, [('a', '100x100'),]).save()
####
#### Thumbnail
####
Transmogrify(square_img, [('t', '200'),]).save()
Transmogrify(vert_img, [('t', '200'),]).save()
Transmogrify(horiz_img, [('t', '200'),]).save()
Transmogrify(square_img, [('t', 'x200'),]).save()
Transmogrify(vert_img, [('t', 'x200'),]).save()
Transmogrify(horiz_img, [('t', 'x200'),]).save()
Transmogrify(square_img, [('t', '200x200'),]).save()
Transmogrify(vert_img, [('t', '200x200'),]).save()
Transmogrify(horiz_img, [('t', '200x200'),]).save()
####
#### Resize
####
Transmogrify(square_img, [('r', '500'),]).save()
Transmogrify(vert_img, [('r', '500'),]).save()
Transmogrify(horiz_img, [('r', '500'),]).save()
Transmogrify(square_img, [('r', 'x500'),]).save()
Transmogrify(vert_img, [('r', 'x500'),]).save()
Transmogrify(horiz_img, [('r', 'x500'),]).save()
Transmogrify(square_img, [('r', '500x500'),]).save()
Transmogrify(vert_img, [('r', '500x500'),]).save()
Transmogrify(horiz_img, [('r', '500x500'),]).save()
####
#### Letterbox
####
Transmogrify(square_img, [('l', '500x500-f00'),]).save()
Transmogrify(vert_img, [('l', '500x500-f00'),]).save()
Transmogrify(horiz_img, [('l', '500x500-f00'),]).save()
Transmogrify(square_img, [('l', '500x500-fffee1'),]).save()
Transmogrify(vert_img, [('l', '500x500-fffee1'),]).save()
Transmogrify(horiz_img, [('l', '500x500-fffee1'),]).save()
####
#### ForceFit
####
Transmogrify(square_img, [('s', '300x300'),]).save()
Transmogrify(vert_img, [('s', '300x300'),]).save()
Transmogrify(horiz_img, [('s', '300x300'),]).save()
####
#### Crop
####
Transmogrify(square_img, [('c', '100x100'),]).save()
Transmogrify(vert_img, [('c', '100x100'),]).save()
Transmogrify(horiz_img, [('c', '100x100'),]).save()
####
#### Filter
####
Transmogrify(square_img, [('r', '300x300'), ('f', 'blur')]).save()
Transmogrify(square_img, [('r', '300x300'), ('f', 'contour')]).save()
Transmogrify(square_img, [('r', '300x300'), ('f', 'detail')]).save()
Transmogrify(square_img, [('r', '300x300'), ('f', 'edge_enhance')]).save()
Transmogrify(square_img, [('r', '300x300'), ('f', 'edge_enhance_more')]).save()
Transmogrify(square_img, [('r', '300x300'), ('f', 'emboss')]).save()
Transmogrify(square_img, [('r', '300x300'), ('f', 'find_edges')]).save()
Transmogrify(square_img, [('r', '300x300'), ('f', 'smooth')]).save()
Transmogrify(square_img, [('r', '300x300'), ('f', 'smooth_more')]).save()
Transmogrify(square_img, [('r', '300x300'), ('f', 'sharpen')]).save()
####
#### Border
####
Transmogrify(square_img, [('r', '300x300'), ('b', '3-fffee1')]).save()
| apache-2.0 |
nwjs/chromium.src | third_party/blink/web_tests/external/wpt/webdriver/tests/set_window_rect/user_prompts.py | 42 | 4063 | # META: timeout=long
import pytest
from tests.support.asserts import assert_dialog_handled, assert_error, assert_success
def set_window_rect(session, rect):
return session.transport.send(
"POST", "session/{session_id}/window/rect".format(**vars(session)),
rect)
@pytest.fixture
def check_user_prompt_closed_without_exception(session, create_dialog):
def check_user_prompt_closed_without_exception(dialog_type, retval):
original_rect = session.window.rect
create_dialog(dialog_type, text=dialog_type)
response = set_window_rect(session, {
"x": original_rect["x"] + 10, "y": original_rect["y"] + 10})
assert_success(response)
assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
assert session.window.rect != original_rect
return check_user_prompt_closed_without_exception
@pytest.fixture
def check_user_prompt_closed_with_exception(session, create_dialog):
def check_user_prompt_closed_with_exception(dialog_type, retval):
original_rect = session.window.rect
create_dialog(dialog_type, text=dialog_type)
response = set_window_rect(session, {
"x": original_rect["x"] + 10, "y": original_rect["y"] + 10})
assert_error(response, "unexpected alert open")
assert_dialog_handled(session, expected_text=dialog_type, expected_retval=retval)
assert session.window.rect == original_rect
return check_user_prompt_closed_with_exception
@pytest.fixture
def check_user_prompt_not_closed_but_exception(session, create_dialog):
def check_user_prompt_not_closed_but_exception(dialog_type):
original_rect = session.window.rect
create_dialog(dialog_type, text=dialog_type)
response = set_window_rect(session, {
"x": original_rect["x"] + 10, "y": original_rect["y"] + 10})
assert_error(response, "unexpected alert open")
assert session.alert.text == dialog_type
session.alert.dismiss()
assert session.window.rect == original_rect
return check_user_prompt_not_closed_but_exception
@pytest.mark.capabilities({"unhandledPromptBehavior": "accept"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", True),
("prompt", ""),
])
def test_accept(check_user_prompt_closed_without_exception, dialog_type, retval):
check_user_prompt_closed_without_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "accept and notify"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", True),
("prompt", ""),
])
def test_accept_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
check_user_prompt_closed_with_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", False),
("prompt", None),
])
def test_dismiss(check_user_prompt_closed_without_exception, dialog_type, retval):
check_user_prompt_closed_without_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "dismiss and notify"})
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", False),
("prompt", None),
])
def test_dismiss_and_notify(check_user_prompt_closed_with_exception, dialog_type, retval):
check_user_prompt_closed_with_exception(dialog_type, retval)
@pytest.mark.capabilities({"unhandledPromptBehavior": "ignore"})
@pytest.mark.parametrize("dialog_type", ["alert", "confirm", "prompt"])
def test_ignore(check_user_prompt_not_closed_but_exception, dialog_type):
check_user_prompt_not_closed_but_exception(dialog_type)
@pytest.mark.parametrize("dialog_type, retval", [
("alert", None),
("confirm", False),
("prompt", None),
])
def test_default(check_user_prompt_closed_with_exception, dialog_type, retval):
check_user_prompt_closed_with_exception(dialog_type, retval)
| bsd-3-clause |
mistercrunch/airflow | tests/providers/apache/kylin/hooks/test_kylin.py | 7 | 2924 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from unittest.mock import MagicMock, patch
from kylinpy.exceptions import KylinCubeError
from airflow.exceptions import AirflowException
from airflow.providers.apache.kylin.hooks.kylin import KylinHook
class TestKylinHook(unittest.TestCase):
def setUp(self) -> None:
self.hook = KylinHook(kylin_conn_id='kylin_default', project='learn_kylin')
@patch("kylinpy.Kylin.get_job")
def test_get_job_status(self, mock_job):
job = MagicMock()
job.status = "ERROR"
mock_job.return_value = job
self.assertEqual(self.hook.get_job_status('123'), "ERROR")
@patch("kylinpy.Kylin.get_datasource")
def test_cube_run(self, cube_source):
class MockCubeSource:
def invoke_command(self, command, **kwargs):
invoke_command_list = [
'fullbuild',
'build',
'merge',
'refresh',
'delete',
'build_streaming',
'merge_streaming',
'refresh_streaming',
'disable',
'enable',
'purge',
'clone',
'drop',
]
if command in invoke_command_list:
return {"code": "000", "data": {}}
else:
raise KylinCubeError(f'Unsupported invoke command for datasource: {command}')
cube_source.return_value = MockCubeSource()
response_data = {"code": "000", "data": {}}
self.assertDictEqual(self.hook.cube_run('kylin_sales_cube', 'build'), response_data)
self.assertDictEqual(self.hook.cube_run('kylin_sales_cube', 'refresh'), response_data)
self.assertDictEqual(self.hook.cube_run('kylin_sales_cube', 'merge'), response_data)
self.assertDictEqual(self.hook.cube_run('kylin_sales_cube', 'build_streaming'), response_data)
self.assertRaises(
AirflowException,
self.hook.cube_run,
'kylin_sales_cube',
'build123',
)
| apache-2.0 |
wisonwang/django-lfs | lfs/core/models.py | 4 | 9087 | # django imports
from django.conf import settings
from django.core.cache import cache
from django.db import models
from django.utils.translation import ugettext_lazy as _
# lfs imports
from lfs.checkout.settings import CHECKOUT_TYPES
from lfs.checkout.settings import CHECKOUT_TYPE_SELECT
from lfs.core.fields.thumbs import ImageWithThumbsField
from lfs.catalog.models import DeliveryTime
from lfs.catalog.models import StaticBlock
class Country(models.Model):
"""Holds country relevant data for the shop.
"""
code = models.CharField(_(u"Country code"), max_length=2)
name = models.CharField(_(u"Name"), max_length=100)
def __unicode__(self):
return self.name
class Meta:
verbose_name_plural = 'Countries'
ordering = ("name", )
app_label = 'core'
class ActionGroup(models.Model):
"""Actions of a action group can be displayed on several parts of the web
page.
**Attributes**:
name
The name of the group.
"""
name = models.CharField(_(u"Name"), blank=True, max_length=100, unique=True)
class Meta:
ordering = ("name", )
app_label = 'core'
def __unicode__(self):
return self.name
def get_actions(self):
"""Returns the actions of this group.
"""
return self.actions.filter(active=True)
class Action(models.Model):
"""A action is a link which belongs to a action groups.
**Attributes**:
group
The belonging group.
title
The title of the menu tab.
link
The link to the object.
active
If true the tab is displayed.
position
the position of the tab within the menu.
parent
Parent tab to create a tree.
"""
active = models.BooleanField(_(u"Active"), default=False)
title = models.CharField(_(u"Title"), max_length=40)
link = models.CharField(_(u"Link"), blank=True, max_length=100)
group = models.ForeignKey(ActionGroup, verbose_name=_(u"Group"), related_name="actions")
position = models.IntegerField(_(u"Position"), default=999)
parent = models.ForeignKey("self", verbose_name=_(u"Parent"), blank=True, null=True)
def __unicode__(self):
return self.title
class Meta:
ordering = ("position", )
app_label = 'core'
class Shop(models.Model):
"""
Holds all shop related information.
name
The name of the shop. This is used for the the title of the HTML pages
shop_owner
The shop owner. This is displayed within several places for instance the
checkout page
from_email
This e-mail address is used for the from header of all outgoing e-mails
notification_emails
This e-mail addresses are used for incoming notification e-mails, e.g.
received an order. One e-mail address per line.
description
A description of the shop
image
An image which can be used as default image if a category doesn't have one
product_cols, product_rows, category_cols
Upmost format information, which defines how products and categories are
displayed within several views. These may be inherited by categories and
sub categories.
delivery_time
The default delivery time, which is used if no delivery time can be
calculated for a product.
google_analytics_id
Used to generate google analytics tracker code and e-commerce code. the
id has the format UA-xxxxxxx-xx and is provided by Google.
ga_site_tracking
If selected and the google_analytics_id is given google analytics site
tracking code is inserted into the HTML source code.
ga_ecommerce_tracking
If selected and the google_analytics_id is given google analytics
e-commerce tracking code is inserted into the HTML source code.
countries
Selected countries will be offered to the shop customer tho choose for
shipping and invoice address.
default_country
This country will be used to calculate shipping price if the shop
customer doesn't have select a country yet.
use_international_currency_code
If this is True the international currency code from the current locale
is used.
price_calculator
Class that implements lfs.price.PriceCalculator for calculating product
price. This is the default price calculator for all products.
checkout_type
Decides whether the customer has to login, has not to login or has the
choice to to login or not to be able to check out.
confirm_toc
If this is activated the shop customer has to confirm terms and
conditions to checkout.
meta*
This information is used within HTML meta tags of the shop view.
"""
name = models.CharField(_(u"Name"), max_length=30)
shop_owner = models.CharField(_(u"Shop owner"), max_length=100, blank=True)
from_email = models.EmailField(_(u"From e-mail address"))
notification_emails = models.TextField(_(u"Notification email addresses"))
description = models.TextField(_(u"Description"), blank=True)
image = ImageWithThumbsField(_(u"Image"), upload_to="images", blank=True, null=True, sizes=((60, 60), (100, 100), (200, 200), (400, 400)))
static_block = models.ForeignKey(StaticBlock, verbose_name=_(u"Static block"), blank=True, null=True, related_name="shops")
product_cols = models.IntegerField(_(u"Product cols"), default=1)
product_rows = models.IntegerField(_(u"Product rows"), default=10)
category_cols = models.IntegerField(_(u"Category cols"), default=1)
delivery_time = models.ForeignKey(DeliveryTime, verbose_name=_(u"Delivery time"), blank=True, null=True)
google_analytics_id = models.CharField(_(u"Google Analytics ID"), blank=True, max_length=20)
ga_site_tracking = models.BooleanField(_(u"Google Analytics Site Tracking"), default=False)
ga_ecommerce_tracking = models.BooleanField(_(u"Google Analytics E-Commerce Tracking"), default=False)
invoice_countries = models.ManyToManyField(Country, verbose_name=_(u"Invoice countries"), related_name="invoice")
shipping_countries = models.ManyToManyField(Country, verbose_name=_(u"Shipping countries"), related_name="shipping")
default_country = models.ForeignKey(Country, verbose_name=_(u"Default shipping country"))
use_international_currency_code = models.BooleanField(_(u"Use international currency codes"), default=False)
price_calculator = models.CharField(choices=settings.LFS_PRICE_CALCULATORS, max_length=255,
default=settings.LFS_PRICE_CALCULATORS[0][0],
verbose_name=_(u"Price calculator"))
checkout_type = models.PositiveSmallIntegerField(_(u"Checkout type"), choices=CHECKOUT_TYPES, default=CHECKOUT_TYPE_SELECT)
confirm_toc = models.BooleanField(_(u"Confirm TOC"), default=False)
meta_title = models.CharField(_(u"Meta title"), blank=True, default="<name>", max_length=80)
meta_keywords = models.TextField(_(u"Meta keywords"), blank=True)
meta_description = models.TextField(_(u"Meta description"), blank=True)
class Meta:
permissions = (("manage_shop", "Manage shop"),)
app_label = 'core'
def __unicode__(self):
return self.name
def get_format_info(self):
"""Returns the global format info.
"""
return {
"product_cols": self.product_cols,
"product_rows": self.product_rows,
"category_cols": self.category_cols,
}
def get_default_country(self):
"""Returns the default country of the shop.
"""
cache_key = "%s-default-country-%s" % (settings.CACHE_MIDDLEWARE_KEY_PREFIX, self.id)
default_country = cache.get(cache_key)
if default_country:
return default_country
default_country = self.default_country
cache.set(cache_key, default_country)
return default_country
def get_notification_emails(self):
"""Returns the notification e-mail addresses as list
"""
import re
adresses = re.split("[\s,]+", self.notification_emails)
return adresses
def get_parent_for_portlets(self):
"""Implements contract for django-portlets. Returns always None as there
is no parent for a shop.
"""
return None
def get_meta_title(self):
"""Returns the meta title of the shop.
"""
return self.meta_title.replace("<name>", self.name)
def get_meta_keywords(self):
"""Returns the meta keywords of the shop.
"""
return self.meta_keywords.replace("<name>", self.name)
def get_meta_description(self):
"""Returns the meta description of the shop.
"""
return self.meta_description.replace("<name>", self.name)
class Application(models.Model):
version = models.CharField(_("Version"), blank=True, max_length=10)
class Meta:
app_label = 'core'
from monkeys import *
| bsd-3-clause |
nexdatas/writer | test/Converters_test.py | 1 | 5954 | #!/usr/bin/env python
# This file is part of nexdatas - Tango Server for NeXus data writer
#
# Copyright (C) 2012-2017 DESY, Jan Kotanski <jkotan@mail.desy.de>
#
# nexdatas is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# nexdatas is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with nexdatas. If not, see <http://www.gnu.org/licenses/>.
# \package test nexdatas
# \file ConvertersTest.py
# unittests for field Tags running Tango Server
#
import unittest
import sys
import struct
from nxswriter.Types import Converters
# if 64-bit machione
IS64BIT = (struct.calcsize("P") == 8)
# test fixture
class ConvertersTest(unittest.TestCase):
# constructor
# \param methodName name of the test method
def __init__(self, methodName):
unittest.TestCase.__init__(self, methodName)
self._tfname = "doc"
self._fname = "test.h5"
self._nxDoc = None
self._eDoc = None
self._fattrs = {"short_name": "test", "units": "m"}
self._gname = "testDoc"
self._gtype = "NXentry"
self._bint = "int64" if IS64BIT else "int32"
self._buint = "uint64" if IS64BIT else "uint32"
self._bfloat = "float64" if IS64BIT else "float32"
# test starter
# \brief Common set up
def setUp(self):
# file handle
print("\nsetting up...")
# test closer
# \brief Common tear down
def tearDown(self):
print("tearing down ...")
# Exception tester
# \param exception expected exception
# \param method called method
# \param args list with method arguments
# \param kwargs dictionary with method arguments
def myAssertRaise(self, exception, method, *args, **kwargs):
try:
error = False
method(*args, **kwargs)
except Exception:
error = True
self.assertEqual(error, True)
# toBool instance test
# \brief It tests default settings
def test_toBool_instance(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
bools = {
True: True, False: False,
"True": True, "False": False,
"true": True, "false": False,
"TRUE": True, "FALSE": False,
"tRUE": True, "fALSE": False,
"TrUE": True, "FaLSE": False,
"TrUE": True, "FAlSE": False,
"TRuE": True, "FALsE": False,
"TRUe": True, "FALSe": False,
"trUE": True, "faLSE": False,
"tRuE": True, "fAlSE": False,
"tRUe": True, "fALsE": False,
"tRUe": True, "fALSe": False,
"TruE": True, "FalSE": False,
"TrUe": True, "FaLsE": False,
"TrUe": True, "FaLSe": False,
"TRue": True, "FAlsE": False,
"TRue": True, "FAlSe": False,
"TRue": True, "FAlse": False,
"truE": True, "falSE": False,
"trUe": True, "faLsE": False,
"tRue": True, "fAlsE": False,
"True": True, "FalsE": False,
"BleBle": True, "FaLSe": False,
"bleble": True, "FAlSe": False,
"xxxxxx": True, "FalSe": False,
"bldsff": True, "fALse": False,
"blerew": True, "FaLse": False,
"bwerle": True, "FAlse": False,
"alebwe": True, "fAlse": False,
"glewer": True, "faLse": False,
"fgeble": True, "falSe": False,
"fall": True, "falsE": False,
}
el = Converters()
self.assertTrue(isinstance(el, object))
self.assertTrue(hasattr(el, "toBool"))
for b in bools:
self.assertEqual(el.toBool(b), bools[b])
# toBool class test
# \brief It tests default settings
def test_toBool_class(self):
fun = sys._getframe().f_code.co_name
print("Run: %s.%s() " % (self.__class__.__name__, fun))
bools = {
True: True, False: False,
"True": True, "False": False,
"true": True, "false": False,
"TRUE": True, "FALSE": False,
"tRUE": True, "fALSE": False,
"TrUE": True, "FaLSE": False,
"TrUE": True, "FAlSE": False,
"TRuE": True, "FALsE": False,
"TRUe": True, "FALSe": False,
"trUE": True, "faLSE": False,
"tRuE": True, "fAlSE": False,
"tRUe": True, "fALsE": False,
"tRUe": True, "fALSe": False,
"TruE": True, "FalSE": False,
"TrUe": True, "FaLsE": False,
"TrUe": True, "FaLSe": False,
"TRue": True, "FAlsE": False,
"TRue": True, "FAlSe": False,
"TRue": True, "FAlse": False,
"truE": True, "falSE": False,
"trUe": True, "faLsE": False,
"tRue": True, "fAlsE": False,
"True": True, "FalsE": False,
"BleBle": True, "FaLSe": False,
"bleble": True, "FAlSe": False,
"xxxxxx": True, "FalSe": False,
"bldsff": True, "fALse": False,
"blerew": True, "FaLse": False,
"bwerle": True, "FAlse": False,
"alebwe": True, "fAlse": False,
"glewer": True, "faLse": False,
"fgeble": True, "falSe": False,
"fall": True, "falsE": False,
}
for b in bools:
self.assertEqual(Converters.toBool(b), bools[b])
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
40223112/2015cd_midterm | static/Brython3.1.0-20150301-090019/Lib/importlib/__init__.py | 610 | 3472 | """A pure Python implementation of import."""
__all__ = ['__import__', 'import_module', 'invalidate_caches']
# Bootstrap help #####################################################
# Until bootstrapping is complete, DO NOT import any modules that attempt
# to import importlib._bootstrap (directly or indirectly). Since this
# partially initialised package would be present in sys.modules, those
# modules would get an uninitialised copy of the source version, instead
# of a fully initialised version (either the frozen one or the one
# initialised below if the frozen one is not available).
import _imp # Just the builtin component, NOT the full Python module
import sys
from . import machinery #fix me brython
try:
import _frozen_importlib as _bootstrap
except ImportError:
from . import _bootstrap
_bootstrap._setup(sys, _imp)
else:
# importlib._bootstrap is the built-in import, ensure we don't create
# a second copy of the module.
_bootstrap.__name__ = 'importlib._bootstrap'
_bootstrap.__package__ = 'importlib'
_bootstrap.__file__ = __file__.replace('__init__.py', '_bootstrap.py')
sys.modules['importlib._bootstrap'] = _bootstrap
# To simplify imports in test code
_w_long = _bootstrap._w_long
_r_long = _bootstrap._r_long
# Fully bootstrapped at this point, import whatever you like, circular
# dependencies and startup overhead minimisation permitting :)
# Public API #########################################################
from ._bootstrap import __import__
def invalidate_caches():
"""Call the invalidate_caches() method on all meta path finders stored in
sys.meta_path (where implemented)."""
for finder in sys.meta_path:
if hasattr(finder, 'invalidate_caches'):
finder.invalidate_caches()
def find_loader(name, path=None):
"""Find the loader for the specified module.
First, sys.modules is checked to see if the module was already imported. If
so, then sys.modules[name].__loader__ is returned. If that happens to be
set to None, then ValueError is raised. If the module is not in
sys.modules, then sys.meta_path is searched for a suitable loader with the
value of 'path' given to the finders. None is returned if no loader could
be found.
Dotted names do not have their parent packages implicitly imported. You will
most likely need to explicitly import all parent packages in the proper
order for a submodule to get the correct loader.
"""
try:
loader = sys.modules[name].__loader__
if loader is None:
raise ValueError('{}.__loader__ is None'.format(name))
else:
return loader
except KeyError:
pass
return _bootstrap._find_module(name, path)
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
"""
level = 0
if name.startswith('.'):
if not package:
raise TypeError("relative imports require the 'package' argument")
for character in name:
if character != '.':
break
level += 1
return _bootstrap._gcd_import(name[level:], package, level)
#need at least one import hook for importlib stuff to work.
import basehook
sys.meta_path.append(basehook.BaseHook())
| gpl-3.0 |
RyanYoung25/tensorflow | tensorflow/python/kernel_tests/random_crop_test.py | 15 | 2626 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for random_crop."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class RandomCropTest(tf.test.TestCase):
def testNoOp(self):
# No random cropping is performed since the size is value.shape.
for shape in (2, 1, 1), (2, 1, 3), (4, 5, 3):
value = np.arange(0, np.prod(shape), dtype=np.int32).reshape(shape)
with self.test_session():
crop = tf.random_crop(value, shape).eval()
self.assertAllEqual(crop, value)
def testContains(self):
with self.test_session():
shape = (3, 5, 7)
target = (2, 3, 4)
value = np.random.randint(1000000, size=shape)
value_set = set(tuple(value[i:i + 2, j:j + 3, k:k + 4].ravel())
for i in range(2) for j in range(3) for k in range(4))
crop = tf.random_crop(value, size=target)
for _ in range(20):
y = crop.eval()
self.assertAllEqual(y.shape, target)
self.assertTrue(tuple(y.ravel()) in value_set)
def testRandomization(self):
# Run 1x1 crop num_samples times in an image and ensure that one finds each
# pixel 1/size of the time.
num_samples = 1000
shape = [5, 4, 1]
size = np.prod(shape)
single = [1, 1, 1]
value = np.arange(size).reshape(shape)
with self.test_session():
crop = tf.random_crop(value, single, seed=7)
counts = np.zeros(size, dtype=np.int32)
for _ in range(num_samples):
y = crop.eval()
self.assertAllEqual(y.shape, single)
counts[y] += 1
# Calculate the mean and 4 * standard deviation.
mean = np.repeat(num_samples / size, size)
four_stddev = 4.0 * np.sqrt(mean)
# Ensure that each entry is observed in 1/size of the samples
# within 4 standard deviations.
self.assertAllClose(counts, mean, atol=four_stddev)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
jelugbo/hebs_repo | lms/djangoapps/courseware/tests/test_lti_integration.py | 6 | 9266 | """LTI integration tests"""
import oauthlib
from collections import OrderedDict
import mock
import urllib
import json
from django.test.utils import override_settings
from django.core.urlresolvers import reverse
from django.conf import settings
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.x_module import STUDENT_VIEW
from courseware.tests import BaseTestXmodule
from courseware.tests.modulestore_config import TEST_DATA_MIXED_MODULESTORE
from courseware.views import get_course_lti_endpoints
from lms.lib.xblock.runtime import quote_slashes
class TestLTI(BaseTestXmodule):
"""
Integration test for lti xmodule.
It checks overall code, by assuring that context that goes to template is correct.
As part of that, checks oauth signature generation by mocking signing function
of `oauthlib` library.
"""
CATEGORY = "lti"
def setUp(self):
"""
Mock oauth1 signing of requests library for testing.
"""
super(TestLTI, self).setUp()
mocked_nonce = u'135685044251684026041377608307'
mocked_timestamp = u'1234567890'
mocked_signature_after_sign = u'my_signature%3D'
mocked_decoded_signature = u'my_signature='
# Note: this course_id is actually a course_key
context_id = self.item_descriptor.course_id.to_deprecated_string()
user_id = unicode(self.item_descriptor.xmodule_runtime.anonymous_student_id)
hostname = self.item_descriptor.xmodule_runtime.hostname
resource_link_id = unicode(urllib.quote('{}-{}'.format(hostname, self.item_descriptor.location.html_id())))
sourcedId = "{context}:{resource_link}:{user_id}".format(
context=urllib.quote(context_id),
resource_link=resource_link_id,
user_id=user_id
)
self.correct_headers = {
u'user_id': user_id,
u'oauth_callback': u'about:blank',
u'launch_presentation_return_url': '',
u'lti_message_type': u'basic-lti-launch-request',
u'lti_version': 'LTI-1p0',
u'roles': u'Student',
u'context_id': context_id,
u'resource_link_id': resource_link_id,
u'lis_result_sourcedid': sourcedId,
u'oauth_nonce': mocked_nonce,
u'oauth_timestamp': mocked_timestamp,
u'oauth_consumer_key': u'',
u'oauth_signature_method': u'HMAC-SHA1',
u'oauth_version': u'1.0',
u'oauth_signature': mocked_decoded_signature
}
saved_sign = oauthlib.oauth1.Client.sign
self.expected_context = {
'display_name': self.item_descriptor.display_name,
'input_fields': self.correct_headers,
'element_class': self.item_descriptor.category,
'element_id': self.item_descriptor.location.html_id(),
'launch_url': 'http://www.example.com', # default value
'open_in_a_new_page': True,
'form_url': self.item_descriptor.xmodule_runtime.handler_url(self.item_descriptor,
'preview_handler').rstrip('/?'),
'hide_launch': False,
'has_score': False,
'module_score': None,
'comment': u'',
'weight': 1.0,
'ask_to_send_username': self.item_descriptor.ask_to_send_username,
'ask_to_send_email': self.item_descriptor.ask_to_send_email,
'description': self.item_descriptor.description,
'button_text': self.item_descriptor.button_text,
}
def mocked_sign(self, *args, **kwargs):
"""
Mocked oauth1 sign function.
"""
# self is <oauthlib.oauth1.rfc5849.Client object> here:
__, headers, __ = saved_sign(self, *args, **kwargs)
# we should replace nonce, timestamp and signed_signature in headers:
old = headers[u'Authorization']
old_parsed = OrderedDict([param.strip().replace('"', '').split('=') for param in old.split(',')])
old_parsed[u'OAuth oauth_nonce'] = mocked_nonce
old_parsed[u'oauth_timestamp'] = mocked_timestamp
old_parsed[u'oauth_signature'] = mocked_signature_after_sign
headers[u'Authorization'] = ', '.join([k+'="'+v+'"' for k, v in old_parsed.items()])
return None, headers, None
patcher = mock.patch.object(oauthlib.oauth1.Client, "sign", mocked_sign)
patcher.start()
self.addCleanup(patcher.stop)
def test_lti_constructor(self):
generated_content = self.item_descriptor.render(STUDENT_VIEW).content
expected_content = self.runtime.render_template('lti.html', self.expected_context)
self.assertEqual(generated_content, expected_content)
def test_lti_preview_handler(self):
generated_content = self.item_descriptor.preview_handler(None, None).body
expected_content = self.runtime.render_template('lti_form.html', self.expected_context)
self.assertEqual(generated_content, expected_content)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestLTIModuleListing(ModuleStoreTestCase):
"""
a test for the rest endpoint that lists LTI modules in a course
"""
# arbitrary constant
COURSE_SLUG = "100"
COURSE_NAME = "test_course"
def setUp(self):
"""Create course, 2 chapters, 2 sections"""
self.course = CourseFactory.create(display_name=self.COURSE_NAME, number=self.COURSE_SLUG)
self.chapter1 = ItemFactory.create(
parent_location=self.course.location,
display_name="chapter1",
category='chapter')
self.section1 = ItemFactory.create(
parent_location=self.chapter1.location,
display_name="section1",
category='sequential')
self.chapter2 = ItemFactory.create(
parent_location=self.course.location,
display_name="chapter2",
category='chapter')
self.section2 = ItemFactory.create(
parent_location=self.chapter2.location,
display_name="section2",
category='sequential')
# creates one draft and one published lti module, in different sections
self.lti_published = ItemFactory.create(
parent_location=self.section1.location,
display_name="lti published",
category="lti",
location=self.course.id.make_usage_key('lti', 'lti_published'),
)
self.lti_draft = ItemFactory.create(
parent_location=self.section2.location,
display_name="lti draft",
category="lti",
location=self.course.id.make_usage_key('lti', 'lti_published'),
publish_item=False,
)
def expected_handler_url(self, handler):
"""convenience method to get the reversed handler urls"""
return "https://{}{}".format(settings.SITE_NAME, reverse(
'courseware.module_render.handle_xblock_callback_noauth',
args=[
self.course.id.to_deprecated_string(),
quote_slashes(unicode(self.lti_published.scope_ids.usage_id.to_deprecated_string()).encode('utf-8')),
handler
]
))
def test_lti_rest_bad_course(self):
"""Tests what happens when the lti listing rest endpoint gets a bad course_id"""
bad_ids = [u"sf", u"dne/dne/dne", u"fo/ey/\\u5305"]
for bad_course_id in bad_ids:
lti_rest_endpoints_url = 'courses/{}/lti_rest_endpoints/'.format(bad_course_id)
response = self.client.get(lti_rest_endpoints_url)
self.assertEqual(404, response.status_code)
def test_lti_rest_listing(self):
"""tests that the draft lti module is part of the endpoint response"""
request = mock.Mock()
request.method = 'GET'
response = get_course_lti_endpoints(request, course_id=self.course.id.to_deprecated_string())
self.assertEqual(200, response.status_code)
self.assertEqual('application/json', response['Content-Type'])
expected = {
"lti_1_1_result_service_xml_endpoint": self.expected_handler_url('grade_handler'),
"lti_2_0_result_service_json_endpoint":
self.expected_handler_url('lti_2_0_result_rest_handler') + "/user/{anon_user_id}",
"display_name": self.lti_draft.display_name
}
self.assertEqual([expected], json.loads(response.content))
def test_lti_rest_non_get(self):
"""tests that the endpoint returns 404 when hit with NON-get"""
DISALLOWED_METHODS = ("POST", "PUT", "DELETE", "HEAD", "OPTIONS") # pylint: disable=invalid-name
for method in DISALLOWED_METHODS:
request = mock.Mock()
request.method = method
response = get_course_lti_endpoints(request, self.course.id.to_deprecated_string())
self.assertEqual(405, response.status_code)
| agpl-3.0 |
vyscond/pyfpdf | attic/font/times.py | 26 | 2580 | fpdf_charwidths['times']={
'\x00':250,'\x01':250,'\x02':250,'\x03':250,'\x04':250,'\x05':250,'\x06':250,'\x07':250,'\x08':250,'\t':250,'\n':250,'\x0b':250,'\x0c':250,'\r':250,'\x0e':250,'\x0f':250,'\x10':250,'\x11':250,'\x12':250,'\x13':250,'\x14':250,'\x15':250,
'\x16':250,'\x17':250,'\x18':250,'\x19':250,'\x1a':250,'\x1b':250,'\x1c':250,'\x1d':250,'\x1e':250,'\x1f':250,' ':250,'!':333,'"':408,'#':500,'$':500,'%':833,'&':778,'\'':180,'(':333,')':333,'*':500,'+':564,
',':250,'-':333,'.':250,'/':278,'0':500,'1':500,'2':500,'3':500,'4':500,'5':500,'6':500,'7':500,'8':500,'9':500,':':278,';':278,'<':564,'=':564,'>':564,'?':444,'@':921,'A':722,
'B':667,'C':667,'D':722,'E':611,'F':556,'G':722,'H':722,'I':333,'J':389,'K':722,'L':611,'M':889,'N':722,'O':722,'P':556,'Q':722,'R':667,'S':556,'T':611,'U':722,'V':722,'W':944,
'X':722,'Y':722,'Z':611,'[':333,'\\':278,']':333,'^':469,'_':500,'`':333,'a':444,'b':500,'c':444,'d':500,'e':444,'f':333,'g':500,'h':500,'i':278,'j':278,'k':500,'l':278,'m':778,
'n':500,'o':500,'p':500,'q':500,'r':333,'s':389,'t':278,'u':500,'v':500,'w':722,'x':500,'y':500,'z':444,'{':480,'|':200,'}':480,'~':541,'\x7f':350,'\x80':500,'\x81':350,'\x82':333,'\x83':500,
'\x84':444,'\x85':1000,'\x86':500,'\x87':500,'\x88':333,'\x89':1000,'\x8a':556,'\x8b':333,'\x8c':889,'\x8d':350,'\x8e':611,'\x8f':350,'\x90':350,'\x91':333,'\x92':333,'\x93':444,'\x94':444,'\x95':350,'\x96':500,'\x97':1000,'\x98':333,'\x99':980,
'\x9a':389,'\x9b':333,'\x9c':722,'\x9d':350,'\x9e':444,'\x9f':722,'\xa0':250,'\xa1':333,'\xa2':500,'\xa3':500,'\xa4':500,'\xa5':500,'\xa6':200,'\xa7':500,'\xa8':333,'\xa9':760,'\xaa':276,'\xab':500,'\xac':564,'\xad':333,'\xae':760,'\xaf':333,
'\xb0':400,'\xb1':564,'\xb2':300,'\xb3':300,'\xb4':333,'\xb5':500,'\xb6':453,'\xb7':250,'\xb8':333,'\xb9':300,'\xba':310,'\xbb':500,'\xbc':750,'\xbd':750,'\xbe':750,'\xbf':444,'\xc0':722,'\xc1':722,'\xc2':722,'\xc3':722,'\xc4':722,'\xc5':722,
'\xc6':889,'\xc7':667,'\xc8':611,'\xc9':611,'\xca':611,'\xcb':611,'\xcc':333,'\xcd':333,'\xce':333,'\xcf':333,'\xd0':722,'\xd1':722,'\xd2':722,'\xd3':722,'\xd4':722,'\xd5':722,'\xd6':722,'\xd7':564,'\xd8':722,'\xd9':722,'\xda':722,'\xdb':722,
'\xdc':722,'\xdd':722,'\xde':556,'\xdf':500,'\xe0':444,'\xe1':444,'\xe2':444,'\xe3':444,'\xe4':444,'\xe5':444,'\xe6':667,'\xe7':444,'\xe8':444,'\xe9':444,'\xea':444,'\xeb':444,'\xec':278,'\xed':278,'\xee':278,'\xef':278,'\xf0':500,'\xf1':500,
'\xf2':500,'\xf3':500,'\xf4':500,'\xf5':500,'\xf6':500,'\xf7':564,'\xf8':500,'\xf9':500,'\xfa':500,'\xfb':500,'\xfc':500,'\xfd':500,'\xfe':500,'\xff':500} | lgpl-3.0 |
mKeRix/home-assistant | homeassistant/components/upb/__init__.py | 15 | 4196 | """Support the UPB PIM."""
import asyncio
import upb_lib
from homeassistant.const import CONF_FILE_PATH, CONF_HOST
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import ConfigType
from .const import (
ATTR_ADDRESS,
ATTR_BRIGHTNESS_PCT,
ATTR_COMMAND,
ATTR_RATE,
DOMAIN,
EVENT_UPB_SCENE_CHANGED,
)
UPB_PLATFORMS = ["light", "scene"]
async def async_setup(hass: HomeAssistant, hass_config: ConfigType) -> bool:
"""Set up the UPB platform."""
return True
async def async_setup_entry(hass, config_entry):
"""Set up a new config_entry for UPB PIM."""
url = config_entry.data[CONF_HOST]
file = config_entry.data[CONF_FILE_PATH]
upb = upb_lib.UpbPim({"url": url, "UPStartExportFile": file})
upb.connect()
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][config_entry.entry_id] = {"upb": upb}
for component in UPB_PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, component)
)
def _element_changed(element, changeset):
change = changeset.get("last_change")
if change is None:
return
if change.get("command") is None:
return
hass.bus.async_fire(
EVENT_UPB_SCENE_CHANGED,
{
ATTR_COMMAND: change["command"],
ATTR_ADDRESS: element.addr.index,
ATTR_BRIGHTNESS_PCT: change.get("level", -1),
ATTR_RATE: change.get("rate", -1),
},
)
for link in upb.links:
element = upb.links[link]
element.add_callback(_element_changed)
return True
async def async_unload_entry(hass, config_entry):
"""Unload the config_entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(config_entry, component)
for component in UPB_PLATFORMS
]
)
)
if unload_ok:
upb = hass.data[DOMAIN][config_entry.entry_id]["upb"]
upb.disconnect()
hass.data[DOMAIN].pop(config_entry.entry_id)
return unload_ok
class UpbEntity(Entity):
"""Base class for all UPB entities."""
def __init__(self, element, unique_id, upb):
"""Initialize the base of all UPB devices."""
self._upb = upb
self._element = element
element_type = "link" if element.addr.is_link else "device"
self._unique_id = f"{unique_id}_{element_type}_{element.addr}"
@property
def name(self):
"""Name of the element."""
return self._element.name
@property
def unique_id(self):
"""Return unique id of the element."""
return self._unique_id
@property
def should_poll(self) -> bool:
"""Don't poll this device."""
return False
@property
def device_state_attributes(self):
"""Return the default attributes of the element."""
return self._element.as_dict()
@property
def available(self):
"""Is the entity available to be updated."""
return self._upb.is_connected()
def _element_changed(self, element, changeset):
pass
@callback
def _element_callback(self, element, changeset):
"""Handle callback from an UPB element that has changed."""
self._element_changed(element, changeset)
self.async_write_ha_state()
async def async_added_to_hass(self):
"""Register callback for UPB changes and update entity state."""
self._element.add_callback(self._element_callback)
self._element_callback(self._element, {})
class UpbAttachedEntity(UpbEntity):
"""Base class for UPB attached entities."""
@property
def device_info(self):
"""Device info for the entity."""
return {
"name": self._element.name,
"identifiers": {(DOMAIN, self._element.index)},
"sw_version": self._element.version,
"manufacturer": self._element.manufacturer,
"model": self._element.product,
}
| mit |
ehabkost/busmap | python/busmap/urbsweb/negen_fetch.py | 1 | 1885 | import sys
import urllib2
import string
import re
from busmap.urbsweb.mapa import DataDir
import logging
logger = logging.getLogger(__name__)
dbg = logger.debug
warn = logger.warning
info = logger.info
def main(argv):
loglevel = logging.INFO
args = []
i = 0
while i < len(sys.argv[1:]):
arg = sys.argv[1+i]
if arg == '-d':
loglevel = logging.DEBUG
else:
args.append(arg)
i += 1
logging.basicConfig(stream=sys.stderr, level=loglevel)
dpath = args[0]
dd = DataDir(dpath)
cookie = dd.get('negen_cookie', unpickle=False)
opener = urllib2.build_opener()
opener.addheaders.append(('Cookie', cookie))
tosearch = string.lowercase
for q in tosearch:
reply = dd.get('negen/getLinhas/%s/response' % (q), False)
if reply is None:
reply = opener.open('http://www.urbs.curitiba.pr.gov.br/PORTAL/tabelahorario/negen/getLinhas.php?q=%').read()
dd.put('negen/getLinhas/%s/response' % (q), reply, False)
dbg('reply: %r', reply)
matches = re.findall(r"option value='(.*?)'.*?>(.*?)<", reply)
dbg('matches: %r', matches)
for l,name in matches:
dd.put('negen/getLinha/%s/nome.txt' % (l), name, False)
l,v,cor = l.split(',')
dbg('querying for getLinha: %r, %r, %r' % (l, v, cor))
reply = dd.get('negen/getLinha/%s,%s,%s/response' % (l, v, cor), False)
if reply is None:
url = 'http://www.urbs.curitiba.pr.gov.br/PORTAL/tabelahorario/negen/getLinha.php?l=%s&v=%s&cor=%s' % (l, v, cor)
dbg('url: %r', url)
reply = opener.open(url).read()
dd.put('negen/getLinha/%s,%s,%s/response' % (l, v, cor), reply, False)
dbg('reply: %r', reply)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| mit |
lodemo/CATANA | src/face_recognition/youtube_dl/extractor/izlesene.py | 40 | 4263 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_urllib_parse_unquote
from ..utils import (
determine_ext,
float_or_none,
get_element_by_id,
int_or_none,
parse_iso8601,
str_to_int,
)
class IzleseneIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://(?:(?:www|m)\.)?izlesene\.com/
(?:video|embedplayer)/(?:[^/]+/)?(?P<id>[0-9]+)
'''
_TESTS = [
{
'url': 'http://www.izlesene.com/video/sevincten-cildirtan-dogum-gunu-hediyesi/7599694',
'md5': '4384f9f0ea65086734b881085ee05ac2',
'info_dict': {
'id': '7599694',
'ext': 'mp4',
'title': 'Sevinçten Çıldırtan Doğum Günü Hediyesi',
'description': 'md5:253753e2655dde93f59f74b572454f6d',
'thumbnail': r're:^https?://.*\.jpg',
'uploader_id': 'pelikzzle',
'timestamp': int,
'upload_date': '20140702',
'duration': 95.395,
'age_limit': 0,
}
},
{
'url': 'http://www.izlesene.com/video/tarkan-dortmund-2006-konseri/17997',
'md5': '97f09b6872bffa284cb7fa4f6910cb72',
'info_dict': {
'id': '17997',
'ext': 'mp4',
'title': 'Tarkan Dortmund 2006 Konseri',
'thumbnail': r're:^https://.*\.jpg',
'uploader_id': 'parlayankiz',
'timestamp': int,
'upload_date': '20061112',
'duration': 253.666,
'age_limit': 0,
}
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
url = 'http://www.izlesene.com/video/%s' % video_id
webpage = self._download_webpage(url, video_id)
title = self._og_search_title(webpage)
description = self._og_search_description(webpage, default=None)
thumbnail = self._proto_relative_url(
self._og_search_thumbnail(webpage), scheme='http:')
uploader = self._html_search_regex(
r"adduserUsername\s*=\s*'([^']+)';",
webpage, 'uploader', fatal=False)
timestamp = parse_iso8601(self._html_search_meta(
'uploadDate', webpage, 'upload date'))
duration = float_or_none(self._html_search_regex(
r'"videoduration"\s*:\s*"([^"]+)"',
webpage, 'duration', fatal=False), scale=1000)
view_count = str_to_int(get_element_by_id('videoViewCount', webpage))
comment_count = self._html_search_regex(
r'comment_count\s*=\s*\'([^\']+)\';',
webpage, 'comment_count', fatal=False)
content_url = self._html_search_meta(
'contentURL', webpage, 'content URL', fatal=False)
ext = determine_ext(content_url, 'mp4')
# Might be empty for some videos.
streams = self._html_search_regex(
r'"qualitylevel"\s*:\s*"([^"]+)"', webpage, 'streams', default='')
formats = []
if streams:
for stream in streams.split('|'):
quality, url = re.search(r'\[(\w+)\](.+)', stream).groups()
formats.append({
'format_id': '%sp' % quality if quality else 'sd',
'url': compat_urllib_parse_unquote(url),
'ext': ext,
})
else:
stream_url = self._search_regex(
r'"streamurl"\s*:\s*"([^"]+)"', webpage, 'stream URL')
formats.append({
'format_id': 'sd',
'url': compat_urllib_parse_unquote(stream_url),
'ext': ext,
})
return {
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'uploader_id': uploader,
'timestamp': timestamp,
'duration': duration,
'view_count': int_or_none(view_count),
'comment_count': int_or_none(comment_count),
'age_limit': self._family_friendly_search(webpage),
'formats': formats,
}
| mit |
telamonian/saga-python | docs/concepts/decorators.py | 5 | 2369 |
__author__ = "Andre Merzky"
__copyright__ = "Copyright 2012-2013, The SAGA Project"
__license__ = "MIT"
#!/usr/bin/python
SYNC = 'Sync'
ASYNC = 'Async'
TASK = 'Task'
# ------------------------------------
# exception class
#
class NotImplemented :
pass
def __str__ (self) :
return "NotImplemented"
# ------------------------------------
# decorator, which switches method to
# _async version if ttype is set and !None
def async (sync_function) :
def wrap_function (self, *args, **kwargs) :
if 'ttype' in kwargs :
if kwargs['ttype'] :
# TODO: check if ttype is valid enum
# call async function flavor
try :
async_function_name = "%s_async" % sync_function.__name__
async_function = getattr (self, async_function_name)
except AttributeError :
print " %s: async %s() not implemented" % (self.__class__.__name__, sync_function.__name__)
return None
# raise NotImplemented
else :
# 'self' not needed, getattr() returns member function
return async_function (*args, **kwargs)
# no ttype, or ttype==None: call default sync function
return sync_function (self, *args, **kwargs)
return wrap_function
# ------------------------------------
# same decorator, different name
def sync (sync_function) :
return async (sync_function)
# ------------------------------------
# a cpi class which only has sync methods
class sync_printer (object) :
@sync
def print_message (self, msg) :
print " sync printer: %s" % msg
# ------------------------------------
# a cpi class which has async methods
class async_printer (object) :
@async
def print_message (self, msg) :
print "async printer: %s" % msg
def print_message_async (self, msg, ttype) :
print "async printer: %s (%s)" % (msg, ttype)
# ------------------------------------
# test the sync class (fails on ttype versions)
sp = sync_printer ()
sp.print_message ('test')
sp.print_message ('test', ttype=SYNC)
sp.print_message ('test', ttype=ASYNC)
sp.print_message ('test', ttype=TASK)
# ------------------------------------
# test the async class
ap = async_printer ()
ap.print_message ('test')
ap.print_message ('test', ttype=SYNC)
ap.print_message ('test', ttype=ASYNC)
ap.print_message ('test', ttype=TASK)
| mit |