repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
---|---|---|---|---|
jamiefolsom/edx-platform | refs/heads/master | openedx/core/djangoapps/course_groups/management/commands/tests/test_remove_users_from_multiple_cohorts.py | 91 | """
Tests for cleanup of users which are added in multiple cohorts of a course
"""
from django.core.exceptions import MultipleObjectsReturned
from django.core.management import call_command
from django.test.client import RequestFactory
from openedx.core.djangoapps.course_groups.views import cohort_handler
from openedx.core.djangoapps.course_groups.cohorts import get_cohort, get_cohort_by_name
from openedx.core.djangoapps.course_groups.tests.helpers import config_course_cohorts
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
class TestMultipleCohortUsers(ModuleStoreTestCase):
"""
Base class for testing users with multiple cohorts
"""
def setUp(self):
"""
setup course, user and request for tests
"""
super(TestMultipleCohortUsers, self).setUp()
self.course1 = CourseFactory.create()
self.course2 = CourseFactory.create()
self.user1 = UserFactory(is_staff=True)
self.user2 = UserFactory(is_staff=True)
self.request = RequestFactory().get("dummy_url")
self.request.user = self.user1
def test_users_with_multiple_cohorts_cleanup(self):
"""
Test that user which have been added in multiple cohorts of a course,
can get cohorts without error after running cohorts cleanup command
"""
# set two auto_cohort_groups for both courses
config_course_cohorts(
self.course1, is_cohorted=True, auto_cohorts=["Course1AutoGroup1", "Course1AutoGroup2"]
)
config_course_cohorts(
self.course2, is_cohorted=True, auto_cohorts=["Course2AutoGroup1", "Course2AutoGroup2"]
)
# get the cohorts from the courses, which will cause auto cohorts to be created
cohort_handler(self.request, unicode(self.course1.id))
cohort_handler(self.request, unicode(self.course2.id))
course_1_auto_cohort_1 = get_cohort_by_name(self.course1.id, "Course1AutoGroup1")
course_1_auto_cohort_2 = get_cohort_by_name(self.course1.id, "Course1AutoGroup2")
course_2_auto_cohort_1 = get_cohort_by_name(self.course2.id, "Course2AutoGroup1")
# forcefully add user1 in two auto cohorts
course_1_auto_cohort_1.users.add(self.user1)
course_1_auto_cohort_2.users.add(self.user1)
# forcefully add user2 in auto cohorts of both courses
course_1_auto_cohort_1.users.add(self.user2)
course_2_auto_cohort_1.users.add(self.user2)
# now check that when user1 goes on discussion page and tries to get
# cohorts 'MultipleObjectsReturned' exception is returned
with self.assertRaises(MultipleObjectsReturned):
get_cohort(self.user1, self.course1.id)
# also check that user 2 can go on discussion page of both courses
# without any exception
get_cohort(self.user2, self.course1.id)
get_cohort(self.user2, self.course2.id)
# call command to remove users added in multiple cohorts of a course
# are removed from all cohort groups
call_command('remove_users_from_multiple_cohorts')
# check that only user1 (with multiple cohorts) is removed from cohorts
# and user2 is still in auto cohorts of both course after running
# 'remove_users_from_multiple_cohorts' management command
self.assertEqual(self.user1.course_groups.count(), 0)
self.assertEqual(self.user2.course_groups.count(), 2)
user2_cohorts = list(self.user2.course_groups.values_list('name', flat=True))
self.assertEqual(user2_cohorts, ['Course1AutoGroup1', 'Course2AutoGroup1'])
# now check that user1 can get cohorts in which he is added
response = cohort_handler(self.request, unicode(self.course1.id))
self.assertEqual(response.status_code, 200)
|
fata1ex/django-statsy | refs/heads/master | tests/urls.py | 2 | # coding: utf-8
from django.conf.urls import url
from tests.views import ViewFabric
def get_test_urlpatterns():
url_list, urlpatterns = [], []
for idx, (view, _) in enumerate(ViewFabric()):
url_part = r'^test_view_{0}'.format(idx)
url_name = url_part.strip('^')
urlpatterns.append(url(url_part, view, name=url_name))
url_list.append(url_name)
return urlpatterns, url_list
urlpatterns, test_url_list = get_test_urlpatterns()
|
o5k/openerp-oemedical-v0.1 | refs/heads/master | openerp/addons/hr_recruitment/wizard/hr_recruitment_employee_hired.py | 53 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class hired_employee(osv.osv_memory):
_name = 'hired.employee'
_description = 'Create Employee'
def case_close(self, cr, uid, ids, context=None):
"""
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of case's Ids
@param *args: Give Tuple Value
"""
if context is None:
context = {}
self.pool.get('hr.applicant').case_close(cr, uid,context.get('active_ids',[]))
return {}
def case_close_with_emp(self, cr, uid, ids, context=None):
"""
@param self: The object pointer
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of case's Ids
"""
if context is None:
context = {}
return self.pool.get('hr.applicant').case_close_with_emp(cr, uid,context.get('active_ids', []))
hired_employee()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
k3nnyfr/s2a_fr-nsis | refs/heads/master | s2a/Python/Lib/distutils/extension.py | 250 | """distutils.extension
Provides the Extension class, used to describe C/C++ extension
modules in setup scripts."""
__revision__ = "$Id$"
import os, string, sys
from types import *
try:
import warnings
except ImportError:
warnings = None
# This class is really only used by the "build_ext" command, so it might
# make sense to put it in distutils.command.build_ext. However, that
# module is already big enough, and I want to make this class a bit more
# complex to simplify some common cases ("foo" module in "foo.c") and do
# better error-checking ("foo.c" actually exists).
#
# Also, putting this in build_ext.py means every setup script would have to
# import that large-ish module (indirectly, through distutils.core) in
# order to do anything.
class Extension:
"""Just a collection of attributes that describes an extension
module and everything needed to build it (hopefully in a portable
way, but there are hooks that let you be as unportable as you need).
Instance attributes:
name : string
the full name of the extension, including any packages -- ie.
*not* a filename or pathname, but Python dotted name
sources : [string]
list of source filenames, relative to the distribution root
(where the setup script lives), in Unix form (slash-separated)
for portability. Source files may be C, C++, SWIG (.i),
platform-specific resource files, or whatever else is recognized
by the "build_ext" command as source for a Python extension.
include_dirs : [string]
list of directories to search for C/C++ header files (in Unix
form for portability)
define_macros : [(name : string, value : string|None)]
list of macros to define; each macro is defined using a 2-tuple,
where 'value' is either the string to define it to or None to
define it without a particular value (equivalent of "#define
FOO" in source or -DFOO on Unix C compiler command line)
undef_macros : [string]
list of macros to undefine explicitly
library_dirs : [string]
list of directories to search for C/C++ libraries at link time
libraries : [string]
list of library names (not filenames or paths) to link against
runtime_library_dirs : [string]
list of directories to search for C/C++ libraries at run time
(for shared extensions, this is when the extension is loaded)
extra_objects : [string]
list of extra files to link with (eg. object files not implied
by 'sources', static library that must be explicitly specified,
binary resource files, etc.)
extra_compile_args : [string]
any extra platform- and compiler-specific information to use
when compiling the source files in 'sources'. For platforms and
compilers where "command line" makes sense, this is typically a
list of command-line arguments, but for other platforms it could
be anything.
extra_link_args : [string]
any extra platform- and compiler-specific information to use
when linking object files together to create the extension (or
to create a new static Python interpreter). Similar
interpretation as for 'extra_compile_args'.
export_symbols : [string]
list of symbols to be exported from a shared extension. Not
used on all platforms, and not generally necessary for Python
extensions, which typically export exactly one symbol: "init" +
extension_name.
swig_opts : [string]
any extra options to pass to SWIG if a source file has the .i
extension.
depends : [string]
list of files that the extension depends on
language : string
extension language (i.e. "c", "c++", "objc"). Will be detected
from the source extensions if not provided.
"""
# When adding arguments to this constructor, be sure to update
# setup_keywords in core.py.
def __init__ (self, name, sources,
include_dirs=None,
define_macros=None,
undef_macros=None,
library_dirs=None,
libraries=None,
runtime_library_dirs=None,
extra_objects=None,
extra_compile_args=None,
extra_link_args=None,
export_symbols=None,
swig_opts = None,
depends=None,
language=None,
**kw # To catch unknown keywords
):
assert type(name) is StringType, "'name' must be a string"
assert (type(sources) is ListType and
map(type, sources) == [StringType]*len(sources)), \
"'sources' must be a list of strings"
self.name = name
self.sources = sources
self.include_dirs = include_dirs or []
self.define_macros = define_macros or []
self.undef_macros = undef_macros or []
self.library_dirs = library_dirs or []
self.libraries = libraries or []
self.runtime_library_dirs = runtime_library_dirs or []
self.extra_objects = extra_objects or []
self.extra_compile_args = extra_compile_args or []
self.extra_link_args = extra_link_args or []
self.export_symbols = export_symbols or []
self.swig_opts = swig_opts or []
self.depends = depends or []
self.language = language
# If there are unknown keyword options, warn about them
if len(kw):
L = kw.keys() ; L.sort()
L = map(repr, L)
msg = "Unknown Extension options: " + string.join(L, ', ')
if warnings is not None:
warnings.warn(msg)
else:
sys.stderr.write(msg + '\n')
# class Extension
def read_setup_file (filename):
from distutils.sysconfig import \
parse_makefile, expand_makefile_vars, _variable_rx
from distutils.text_file import TextFile
from distutils.util import split_quoted
# First pass over the file to gather "VAR = VALUE" assignments.
vars = parse_makefile(filename)
# Second pass to gobble up the real content: lines of the form
# <module> ... [<sourcefile> ...] [<cpparg> ...] [<library> ...]
file = TextFile(filename,
strip_comments=1, skip_blanks=1, join_lines=1,
lstrip_ws=1, rstrip_ws=1)
try:
extensions = []
while 1:
line = file.readline()
if line is None: # eof
break
if _variable_rx.match(line): # VAR=VALUE, handled in first pass
continue
if line[0] == line[-1] == "*":
file.warn("'%s' lines not handled yet" % line)
continue
#print "original line: " + line
line = expand_makefile_vars(line, vars)
words = split_quoted(line)
#print "expanded line: " + line
# NB. this parses a slightly different syntax than the old
# makesetup script: here, there must be exactly one extension per
# line, and it must be the first word of the line. I have no idea
# why the old syntax supported multiple extensions per line, as
# they all wind up being the same.
module = words[0]
ext = Extension(module, [])
append_next_word = None
for word in words[1:]:
if append_next_word is not None:
append_next_word.append(word)
append_next_word = None
continue
suffix = os.path.splitext(word)[1]
switch = word[0:2] ; value = word[2:]
if suffix in (".c", ".cc", ".cpp", ".cxx", ".c++", ".m", ".mm"):
# hmm, should we do something about C vs. C++ sources?
# or leave it up to the CCompiler implementation to
# worry about?
ext.sources.append(word)
elif switch == "-I":
ext.include_dirs.append(value)
elif switch == "-D":
equals = string.find(value, "=")
if equals == -1: # bare "-DFOO" -- no value
ext.define_macros.append((value, None))
else: # "-DFOO=blah"
ext.define_macros.append((value[0:equals],
value[equals+2:]))
elif switch == "-U":
ext.undef_macros.append(value)
elif switch == "-C": # only here 'cause makesetup has it!
ext.extra_compile_args.append(word)
elif switch == "-l":
ext.libraries.append(value)
elif switch == "-L":
ext.library_dirs.append(value)
elif switch == "-R":
ext.runtime_library_dirs.append(value)
elif word == "-rpath":
append_next_word = ext.runtime_library_dirs
elif word == "-Xlinker":
append_next_word = ext.extra_link_args
elif word == "-Xcompiler":
append_next_word = ext.extra_compile_args
elif switch == "-u":
ext.extra_link_args.append(word)
if not value:
append_next_word = ext.extra_link_args
elif word == "-Xcompiler":
append_next_word = ext.extra_compile_args
elif switch == "-u":
ext.extra_link_args.append(word)
if not value:
append_next_word = ext.extra_link_args
elif suffix in (".a", ".so", ".sl", ".o", ".dylib"):
# NB. a really faithful emulation of makesetup would
# append a .o file to extra_objects only if it
# had a slash in it; otherwise, it would s/.o/.c/
# and append it to sources. Hmmmm.
ext.extra_objects.append(word)
else:
file.warn("unrecognized argument '%s'" % word)
extensions.append(ext)
finally:
file.close()
#print "module:", module
#print "source files:", source_files
#print "cpp args:", cpp_args
#print "lib args:", library_args
#extensions[module] = { 'sources': source_files,
# 'cpp_args': cpp_args,
# 'lib_args': library_args }
return extensions
# read_setup_file ()
|
abhijeet9920/python_project | refs/heads/master | develop/lib/python3.4/site-packages/pip/_vendor/requests/certs.py | 516 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
requests.certs
~~~~~~~~~~~~~~
This module returns the preferred default CA certificate bundle.
If you are packaging Requests, e.g., for a Linux distribution or a managed
environment, you can change the definition of where() to return a separately
packaged CA bundle.
"""
import os.path
try:
from certifi import where
except ImportError:
def where():
"""Return the preferred certificate bundle."""
# vendored bundle inside Requests
return os.path.join(os.path.dirname(__file__), 'cacert.pem')
if __name__ == '__main__':
print(where())
|
hernad/frappe | refs/heads/develop | frappe/custom/doctype/property_setter/__init__.py | 1829 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
|
godiard/sugar | refs/heads/master | tests/extensions/cpsection/webaccount/services/mock/service.py | 12 | # Copyright (C) 2013, Walter Bender - Raul Gutierrez Segales
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from gi.repository import Gtk
from jarabe.webservice import accountsmanager
from cpsection.webaccount.web_service import WebService
_SERVICE_NAME = 'mock'
class WebService(WebService):
def __init__(self):
self._account = accountsmanager.get_account(_SERVICE_NAME)
def get_icon_name(self):
return _SERVICE_NAME
def config_service_cb(self, widget, event, container):
label = Gtk.Label(_SERVICE_NAME)
for c in container.get_children():
container.remove(c)
container.add(label)
container.show_all()
def get_service():
return WebService()
|
iglpdc/nipype | refs/heads/master | nipype/pipeline/plugins/oar.py | 3 | """Parallel workflow execution via OAR http://oar.imag.fr
"""
import os
import stat
from time import sleep
import subprocess
import json
from .base import (SGELikeBatchManagerBase, logger, iflogger, logging)
from nipype.interfaces.base import CommandLine
class OARPlugin(SGELikeBatchManagerBase):
"""Execute using OAR
The plugin_args input to run can be used to control the OAR execution.
Currently supported options are:
- template : template to use for batch job submission
- oarsub_args : arguments to be prepended to the job execution
script in the oarsub call
- max_jobname_len: maximum length of the job name. Default 15.
"""
# Addtional class variables
_max_jobname_len = 15
_oarsub_args = ''
def __init__(self, **kwargs):
template = """
# oarsub -J
"""
self._retry_timeout = 2
self._max_tries = 2
self._max_jobname_length = 15
if 'plugin_args' in kwargs and kwargs['plugin_args']:
if 'retry_timeout' in kwargs['plugin_args']:
self._retry_timeout = kwargs['plugin_args']['retry_timeout']
if 'max_tries' in kwargs['plugin_args']:
self._max_tries = kwargs['plugin_args']['max_tries']
if 'max_jobname_len' in kwargs['plugin_args']:
self._max_jobname_len = \
kwargs['plugin_args']['max_jobname_len']
super(OARPlugin, self).__init__(template, **kwargs)
def _is_pending(self, taskid):
# subprocess.Popen requires taskid to be a string
proc = subprocess.Popen(
['oarstat', '-J', '-s',
'-j', taskid],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
o, e = proc.communicate()
parsed_result = json.loads(o)[taskid].lower()
is_pending = (
('error' not in parsed_result) and
('terminated' not in parsed_result)
)
return is_pending
def _submit_batchtask(self, scriptfile, node):
cmd = CommandLine('oarsub', environ=os.environ.data,
terminal_output='allatonce')
path = os.path.dirname(scriptfile)
oarsubargs = ''
if self._oarsub_args:
oarsubargs = self._oarsub_args
if 'oarsub_args' in node.plugin_args:
if (
'overwrite' in node.plugin_args and
node.plugin_args['overwrite']
):
oarsubargs = node.plugin_args['oarsub_args']
else:
oarsubargs += (" " + node.plugin_args['oarsub_args'])
if node._hierarchy:
jobname = '.'.join((os.environ.data['LOGNAME'],
node._hierarchy,
node._id))
else:
jobname = '.'.join((os.environ.data['LOGNAME'],
node._id))
jobnameitems = jobname.split('.')
jobnameitems.reverse()
jobname = '.'.join(jobnameitems)
jobname = jobname[0:self._max_jobname_len]
if '-O' not in oarsubargs:
oarsubargs = '%s -O %s' % (
oarsubargs,
os.path.join(path, jobname + '.stdout')
)
if '-E' not in oarsubargs:
oarsubargs = '%s -E %s' % (
oarsubargs,
os.path.join(path, jobname + '.stderr')
)
if '-J' not in oarsubargs:
oarsubargs = '%s -J' % (oarsubargs)
os.chmod(scriptfile, stat.S_IEXEC | stat.S_IREAD | stat.S_IWRITE)
cmd.inputs.args = '%s -n %s -S %s' % (
oarsubargs,
jobname,
scriptfile
)
oldlevel = iflogger.level
iflogger.setLevel(logging.getLevelName('CRITICAL'))
tries = 0
while True:
try:
result = cmd.run()
except Exception as e:
if tries < self._max_tries:
tries += 1
sleep(self._retry_timeout)
# sleep 2 seconds and try again.
else:
iflogger.setLevel(oldlevel)
raise RuntimeError('\n'.join((('Could not submit OAR task'
' for node %s') % node._id,
str(e))))
else:
break
iflogger.setLevel(oldlevel)
# retrieve OAR taskid
o = ''
add = False
for line in result.runtime.stdout.splitlines():
if line.strip().startswith('{'):
add = True
if add:
o += line + '\n'
if line.strip().startswith('}'):
break
taskid = json.loads(o)['job_id']
self._pending[taskid] = node.output_dir()
logger.debug('submitted OAR task: %s for node %s' % (taskid, node._id))
return taskid
|
ZobairAlijan/osf.io | refs/heads/develop | website/addons/forward/tests/test_models.py | 44 | # -*- coding: utf-8 -*-
from nose.tools import * # PEP8 asserts
from modularodm.exceptions import ValidationError
from tests.base import OsfTestCase
from website.addons.forward.tests.factories import ForwardSettingsFactory
class TestSettingsValidation(OsfTestCase):
def setUp(self):
super(TestSettingsValidation, self).setUp()
self.settings = ForwardSettingsFactory()
def test_validate_url_bad(self):
self.settings.url = 'badurl'
with assert_raises(ValidationError):
self.settings.save()
def test_validate_url_good(self):
self.settings.url = 'http://frozen.pizza.reviews/'
try:
self.settings.save()
except ValidationError:
assert 0
def test_validate_redirect_bool_bad(self):
self.settings.redirect_bool = 'notabool'
with assert_raises(ValidationError):
self.settings.save()
def test_validate_redirect_bool_good(self):
self.settings.redirect_bool = False
try:
self.settings.save()
except ValidationError:
assert 0
def test_validate_redirect_secs_bad(self):
self.settings.redirect_secs = -2
with assert_raises(ValidationError):
self.settings.save()
def test_validate_redirect_secs_good(self):
self.settings.redirect_secs = 20
try:
self.settings.save()
except ValidationError:
assert 0
def test_label_sanitary(self):
self.settings.label = 'safe'
try:
self.settings.save()
except ValidationError:
assert False
def test_label_unsanitary(self):
self.settings.label = 'un<br />safe'
with assert_raises(ValidationError):
self.settings.save()
|
JizhouZhang/SDR | refs/heads/master | gr-blocks/python/blocks/qa_hier_block2.py | 16 | #!/usr/bin/env python
from gnuradio import gr, gr_unittest, blocks
import numpy
class add_ff(gr.sync_block):
def __init__(self):
gr.sync_block.__init__(
self,
name = "add_ff",
in_sig = [numpy.float32, numpy.float32],
out_sig = [numpy.float32],
)
def work(self, input_items, output_items):
output_items[0][:] = input_items[0] + input_items[1]
return len(output_items[0])
class multiply_const_ff(gr.sync_block):
def __init__(self, k):
gr.sync_block.__init__(
self,
name = "multiply_ff",
in_sig = [numpy.float32],
out_sig = [numpy.float32],
)
self.k = k
def work(self, input_items, output_items):
output_items[0][:] = map(lambda x: self.k*x, input_items[0])
return len(output_items[0])
class test_hier_block2(gr_unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_001_make(self):
hblock = gr.hier_block2("test_block",
gr.io_signature(1,1,gr.sizeof_int),
gr.io_signature(1,1,gr.sizeof_int))
self.assertEqual("test_block", hblock.name())
self.assertEqual(1, hblock.input_signature().max_streams())
self.assertEqual(1, hblock.output_signature().min_streams())
self.assertEqual(1, hblock.output_signature().max_streams())
self.assertEqual(gr.sizeof_int, hblock.output_signature().sizeof_stream_item(0))
def test_002_connect_input(self):
hblock = gr.hier_block2("test_block",
gr.io_signature(1,1,gr.sizeof_int),
gr.io_signature(1,1,gr.sizeof_int))
nop1 = blocks.nop(gr.sizeof_int)
hblock.connect(hblock, nop1)
def test_004_connect_output(self):
hblock = gr.hier_block2("test_block",
gr.io_signature(1,1,gr.sizeof_int),
gr.io_signature(1,1,gr.sizeof_int))
nop1 = blocks.nop(gr.sizeof_int)
hblock.connect(nop1, hblock)
def test_005_connect_output_in_use(self):
hblock = gr.hier_block2("test_block",
gr.io_signature(1,1,gr.sizeof_int),
gr.io_signature(1,1,gr.sizeof_int))
nop1 = blocks.nop(gr.sizeof_int)
nop2 = blocks.nop(gr.sizeof_int)
hblock.connect(nop1, hblock)
self.assertRaises(ValueError,
lambda: hblock.connect(nop2, hblock))
def test_006_connect_invalid_src_port_neg(self):
hblock = gr.hier_block2("test_block",
gr.io_signature(1,1,gr.sizeof_int),
gr.io_signature(1,1,gr.sizeof_int))
nop1 = blocks.nop(gr.sizeof_int)
self.assertRaises(ValueError,
lambda: hblock.connect((hblock, -1), nop1))
def test_005_connect_invalid_src_port_exceeds(self):
hblock = gr.hier_block2("test_block",
gr.io_signature(1,1,gr.sizeof_int),
gr.io_signature(1,1,gr.sizeof_int))
nop1 = blocks.nop(gr.sizeof_int)
self.assertRaises(ValueError,
lambda: hblock.connect((hblock, 1), nop1))
def test_007_connect_invalid_dst_port_neg(self):
hblock = gr.hier_block2("test_block",
gr.io_signature(1,1,gr.sizeof_int),
gr.io_signature(1,1,gr.sizeof_int))
nop1 = blocks.nop(gr.sizeof_int)
nop2 = blocks.nop(gr.sizeof_int)
self.assertRaises(ValueError,
lambda: hblock.connect(nop1, (nop2, -1)))
def test_008_connect_invalid_dst_port_exceeds(self):
hblock = gr.hier_block2("test_block",
gr.io_signature(1,1,gr.sizeof_int),
gr.io_signature(1,1,gr.sizeof_int))
nop1 = blocks.null_sink(gr.sizeof_int)
nop2 = blocks.null_sink(gr.sizeof_int)
self.assertRaises(ValueError,
lambda: hblock.connect(nop1, (nop2, 1)))
def test_009_check_topology(self):
hblock = gr.top_block("test_block")
hblock.check_topology(0, 0)
def test_010_run(self):
expected = (1.0, 2.0, 3.0, 4.0)
hblock = gr.top_block("test_block")
src = blocks.vector_source_f(expected, False)
sink1 = blocks.vector_sink_f()
sink2 = blocks.vector_sink_f()
hblock.connect(src, sink1)
hblock.connect(src, sink2)
hblock.run()
actual1 = sink1.data()
actual2 = sink2.data()
self.assertEquals(expected, actual1)
self.assertEquals(expected, actual2)
def test_012_disconnect_input(self):
hblock = gr.hier_block2("test_block",
gr.io_signature(1,1,gr.sizeof_int),
gr.io_signature(1,1,gr.sizeof_int))
nop1 = blocks.nop(gr.sizeof_int)
hblock.connect(hblock, nop1)
hblock.disconnect(hblock, nop1)
def test_013_disconnect_input_not_connected(self):
hblock = gr.hier_block2("test_block",
gr.io_signature(1,1,gr.sizeof_int),
gr.io_signature(1,1,gr.sizeof_int))
nop1 = blocks.nop(gr.sizeof_int)
nop2 = blocks.nop(gr.sizeof_int)
hblock.connect(hblock, nop1)
self.assertRaises(ValueError,
lambda: hblock.disconnect(hblock, nop2))
def test_014_disconnect_input_neg(self):
hblock = gr.hier_block2("test_block",
gr.io_signature(1,1,gr.sizeof_int),
gr.io_signature(1,1,gr.sizeof_int))
nop1 = blocks.nop(gr.sizeof_int)
hblock.connect(hblock, nop1)
self.assertRaises(ValueError,
lambda: hblock.disconnect((hblock, -1), nop1))
def test_015_disconnect_input_exceeds(self):
hblock = gr.hier_block2("test_block",
gr.io_signature(1,1,gr.sizeof_int),
gr.io_signature(1,1,gr.sizeof_int))
nop1 = blocks.nop(gr.sizeof_int)
hblock.connect(hblock, nop1)
self.assertRaises(ValueError,
lambda: hblock.disconnect((hblock, 1), nop1))
def test_016_disconnect_output(self):
hblock = gr.hier_block2("test_block",
gr.io_signature(1,1,gr.sizeof_int),
gr.io_signature(1,1,gr.sizeof_int))
nop1 = blocks.nop(gr.sizeof_int)
hblock.connect(nop1, hblock)
hblock.disconnect(nop1, hblock)
def test_017_disconnect_output_not_connected(self):
hblock = gr.hier_block2("test_block",
gr.io_signature(1,1,gr.sizeof_int),
gr.io_signature(1,1,gr.sizeof_int))
nop1 = blocks.nop(gr.sizeof_int)
nop2 = blocks.nop(gr.sizeof_int)
hblock.connect(nop1, hblock)
self.assertRaises(ValueError,
lambda: hblock.disconnect(nop2, hblock))
def test_018_disconnect_output_neg(self):
hblock = gr.hier_block2("test_block",
gr.io_signature(1,1,gr.sizeof_int),
gr.io_signature(1,1,gr.sizeof_int))
nop1 = blocks.nop(gr.sizeof_int)
hblock.connect(hblock, nop1)
self.assertRaises(ValueError,
lambda: hblock.disconnect(nop1, (hblock, -1)))
def test_019_disconnect_output_exceeds(self):
hblock = gr.hier_block2("test_block",
gr.io_signature(1,1,gr.sizeof_int),
gr.io_signature(1,1,gr.sizeof_int))
nop1 = blocks.nop(gr.sizeof_int)
hblock.connect(nop1, hblock)
self.assertRaises(ValueError,
lambda: hblock.disconnect(nop1, (hblock, 1)))
def test_020_run(self):
hblock = gr.top_block("test_block")
data = (1.0, 2.0, 3.0, 4.0)
src = blocks.vector_source_f(data, False)
dst = blocks.vector_sink_f()
hblock.connect(src, dst)
hblock.run()
self.assertEquals(data, dst.data())
def test_021_connect_single(self):
hblock = gr.top_block("test_block")
blk = gr.hier_block2("block",
gr.io_signature(0, 0, 0),
gr.io_signature(0, 0, 0))
hblock.connect(blk)
def test_022_connect_single_with_ports(self):
hblock = gr.top_block("test_block")
blk = gr.hier_block2("block",
gr.io_signature(1, 1, 1),
gr.io_signature(1, 1, 1))
self.assertRaises(ValueError,
lambda: hblock.connect(blk))
def test_023_connect_single_twice(self):
hblock = gr.top_block("test_block")
blk = gr.hier_block2("block",
gr.io_signature(0, 0, 0),
gr.io_signature(0, 0, 0))
hblock.connect(blk)
self.assertRaises(ValueError,
lambda: hblock.connect(blk))
def test_024_disconnect_single(self):
hblock = gr.top_block("test_block")
blk = gr.hier_block2("block",
gr.io_signature(0, 0, 0),
gr.io_signature(0, 0, 0))
hblock.connect(blk)
hblock.disconnect(blk)
def test_025_disconnect_single_not_connected(self):
hblock = gr.top_block("test_block")
blk = gr.hier_block2("block",
gr.io_signature(0, 0, 0),
gr.io_signature(0, 0, 0))
self.assertRaises(ValueError,
lambda: hblock.disconnect(blk))
def test_026_run_single(self):
expected_data = (1.0,)
tb = gr.top_block("top_block")
hb = gr.hier_block2("block",
gr.io_signature(0, 0, 0),
gr.io_signature(0, 0, 0))
src = blocks.vector_source_f(expected_data)
dst = blocks.vector_sink_f()
hb.connect(src, dst)
tb.connect(hb)
tb.run()
self.assertEquals(expected_data, dst.data())
def test_027a_internally_unconnected_input(self):
tb = gr.top_block()
hb = gr.hier_block2("block",
gr.io_signature(1, 1, 1),
gr.io_signature(1, 1, 1))
hsrc = blocks.vector_source_b([1,])
hb.connect(hsrc, hb) # wire output internally
src = blocks.vector_source_b([1, ])
dst = blocks.vector_sink_b()
tb.connect(src, hb, dst) # hb's input is not connected internally
self.assertRaises(RuntimeError,
lambda: tb.run())
def test_027b_internally_unconnected_output(self):
tb = gr.top_block()
hb = gr.hier_block2("block",
gr.io_signature(1, 1, 1),
gr.io_signature(1, 1, 1))
hdst = blocks.vector_sink_b()
hb.connect(hb, hdst) # wire input internally
src = blocks.vector_source_b([1, ])
dst = blocks.vector_sink_b()
tb.connect(src, hb, dst) # hb's output is not connected internally
self.assertRaises(RuntimeError,
lambda: tb.run())
def test_027c_fully_unconnected_output(self):
tb = gr.top_block()
hb = gr.hier_block2("block",
gr.io_signature(1, 1, 1),
gr.io_signature(1, 1, 1))
hsrc = blocks.vector_sink_b()
hb.connect(hb, hsrc) # wire input internally
src = blocks.vector_source_b([1, ])
dst = blocks.vector_sink_b()
tb.connect(src, hb) # hb's output is not connected internally or externally
self.assertRaises(RuntimeError,
lambda: tb.run())
def test_027d_fully_unconnected_input(self):
tb = gr.top_block()
hb = gr.hier_block2("block",
gr.io_signature(1, 1, 1),
gr.io_signature(1, 1, 1))
hdst = blocks.vector_source_b([1,])
hb.connect(hdst, hb) # wire output internally
dst = blocks.vector_sink_b()
tb.connect(hb, dst) # hb's input is not connected internally or externally
self.assertRaises(RuntimeError,
lambda: tb.run())
def test_028_singleton_reconfigure(self):
tb = gr.top_block()
hb = gr.hier_block2("block",
gr.io_signature(0, 0, 0), gr.io_signature(0, 0, 0))
src = blocks.vector_source_b([1, ])
dst = blocks.vector_sink_b()
hb.connect(src, dst)
tb.connect(hb) # Singleton connect
tb.lock()
tb.disconnect_all()
tb.connect(src, dst)
tb.unlock()
def test_029_singleton_disconnect(self):
tb = gr.top_block()
src = blocks.vector_source_b([1, ])
dst = blocks.vector_sink_b()
tb.connect(src, dst)
tb.disconnect(src) # Singleton disconnect
tb.connect(src, dst)
tb.run()
self.assertEquals(dst.data(), (1,))
def test_030_nested_input(self):
tb = gr.top_block()
src = blocks.vector_source_b([1,])
hb1 = gr.hier_block2("hb1",
gr.io_signature(1, 1, gr.sizeof_char),
gr.io_signature(0, 0, 0))
hb2 = gr.hier_block2("hb2",
gr.io_signature(1, 1, gr.sizeof_char),
gr.io_signature(0, 0, 0))
dst = blocks.vector_sink_b()
tb.connect(src, hb1)
hb1.connect(hb1, hb2)
hb2.connect(hb2, blocks.copy(gr.sizeof_char), dst)
tb.run()
self.assertEquals(dst.data(), (1,))
def test_031_multiple_internal_inputs(self):
tb = gr.top_block()
src = blocks.vector_source_f([1.0,])
hb = gr.hier_block2("hb",
gr.io_signature(1, 1, gr.sizeof_float),
gr.io_signature(1, 1, gr.sizeof_float))
m1 = multiply_const_ff(1.0)
m2 = multiply_const_ff(2.0)
add = add_ff()
hb.connect(hb, m1) # m1 is connected to hb external input #0
hb.connect(hb, m2) # m2 is also connected to hb external input #0
hb.connect(m1, (add, 0))
hb.connect(m2, (add, 1))
hb.connect(add, hb) # add is connected to hb external output #0
dst = blocks.vector_sink_f()
tb.connect(src, hb, dst)
tb.run()
self.assertEquals(dst.data(), (3.0,))
def test_032_nested_multiple_internal_inputs(self):
tb = gr.top_block()
src = blocks.vector_source_f([1.0,])
hb = gr.hier_block2("hb",
gr.io_signature(1, 1, gr.sizeof_float),
gr.io_signature(1, 1, gr.sizeof_float))
hb2 = gr.hier_block2("hb",
gr.io_signature(1, 1, gr.sizeof_float),
gr.io_signature(1, 1, gr.sizeof_float))
m1 = multiply_const_ff(1.0)
m2 = multiply_const_ff(2.0)
add = add_ff()
hb2.connect(hb2, m1) # m1 is connected to hb2 external input #0
hb2.connect(hb2, m2) # m2 is also connected to hb2 external input #0
hb2.connect(m1, (add, 0))
hb2.connect(m2, (add, 1))
hb2.connect(add, hb2) # add is connected to hb2 external output #0
hb.connect(hb, hb2, hb) # hb as hb2 as nested internal block
dst = blocks.vector_sink_f()
tb.connect(src, hb, dst)
tb.run()
self.assertEquals(dst.data(), (3.0,))
def test_033a_set_affinity(self):
expected = (1.0, 2.0, 3.0, 4.0)
hblock = gr.top_block("test_block")
src = blocks.vector_source_f(expected, False)
snk = blocks.vector_sink_f()
hblock.connect(src, snk)
hblock.set_processor_affinity([0,])
hblock.run()
actual = snk.data()
self.assertEquals(expected, actual)
def test_033b_unset_affinity(self):
expected = (1.0, 2.0, 3.0, 4.0)
hblock = gr.top_block("test_block")
src = blocks.vector_source_f(expected, False)
snk = blocks.vector_sink_f()
hblock.connect(src, snk)
hblock.set_processor_affinity([0,])
hblock.unset_processor_affinity()
hblock.run()
actual = snk.data()
self.assertEquals(expected, actual)
def test_033c_get_affinity(self):
expected = (1.0, 2.0, 3.0, 4.0)
hblock = gr.top_block("test_block")
src = blocks.vector_source_f(expected, False)
snk = blocks.vector_sink_f()
hblock.connect(src, snk)
hblock.set_processor_affinity([0,])
procs = hblock.processor_affinity()
self.assertEquals((0,), procs)
if __name__ == "__main__":
gr_unittest.run(test_hier_block2, "test_hier_block2.xml")
|
SunnyRao/python3-webapp | refs/heads/master | www/markdown2.py | 59 | #!/usr/bin/env python
# Copyright (c) 2012 Trent Mick.
# Copyright (c) 2007-2008 ActiveState Corp.
# License: MIT (http://www.opensource.org/licenses/mit-license.php)
from __future__ import generators
r"""A fast and complete Python implementation of Markdown.
[from http://daringfireball.net/projects/markdown/]
> Markdown is a text-to-HTML filter; it translates an easy-to-read /
> easy-to-write structured text format into HTML. Markdown's text
> format is most similar to that of plain text email, and supports
> features such as headers, *emphasis*, code blocks, blockquotes, and
> links.
>
> Markdown's syntax is designed not as a generic markup language, but
> specifically to serve as a front-end to (X)HTML. You can use span-level
> HTML tags anywhere in a Markdown document, and you can use block level
> HTML tags (like <div> and <table> as well).
Module usage:
>>> import markdown2
>>> markdown2.markdown("*boo!*") # or use `html = markdown_path(PATH)`
u'<p><em>boo!</em></p>\n'
>>> markdowner = Markdown()
>>> markdowner.convert("*boo!*")
u'<p><em>boo!</em></p>\n'
>>> markdowner.convert("**boom!**")
u'<p><strong>boom!</strong></p>\n'
This implementation of Markdown implements the full "core" syntax plus a
number of extras (e.g., code syntax coloring, footnotes) as described on
<https://github.com/trentm/python-markdown2/wiki/Extras>.
"""
cmdln_desc = """A fast and complete Python implementation of Markdown, a
text-to-HTML conversion tool for web writers.
Supported extra syntax options (see -x|--extras option below and
see <https://github.com/trentm/python-markdown2/wiki/Extras> for details):
* code-friendly: Disable _ and __ for em and strong.
* cuddled-lists: Allow lists to be cuddled to the preceding paragraph.
* fenced-code-blocks: Allows a code block to not have to be indented
by fencing it with '```' on a line before and after. Based on
<http://github.github.com/github-flavored-markdown/> with support for
syntax highlighting.
* footnotes: Support footnotes as in use on daringfireball.net and
implemented in other Markdown processors (tho not in Markdown.pl v1.0.1).
* header-ids: Adds "id" attributes to headers. The id value is a slug of
the header text.
* html-classes: Takes a dict mapping html tag names (lowercase) to a
string to use for a "class" tag attribute. Currently only supports
"pre" and "code" tags. Add an issue if you require this for other tags.
* markdown-in-html: Allow the use of `markdown="1"` in a block HTML tag to
have markdown processing be done on its contents. Similar to
<http://michelf.com/projects/php-markdown/extra/#markdown-attr> but with
some limitations.
* metadata: Extract metadata from a leading '---'-fenced block.
See <https://github.com/trentm/python-markdown2/issues/77> for details.
* nofollow: Add `rel="nofollow"` to add `<a>` tags with an href. See
<http://en.wikipedia.org/wiki/Nofollow>.
* pyshell: Treats unindented Python interactive shell sessions as <code>
blocks.
* link-patterns: Auto-link given regex patterns in text (e.g. bug number
references, revision number references).
* smarty-pants: Replaces ' and " with curly quotation marks or curly
apostrophes. Replaces --, ---, ..., and . . . with en dashes, em dashes,
and ellipses.
* toc: The returned HTML string gets a new "toc_html" attribute which is
a Table of Contents for the document. (experimental)
* xml: Passes one-liner processing instructions and namespaced XML tags.
* tables: Tables using the same format as GFM
<https://help.github.com/articles/github-flavored-markdown#tables> and
PHP-Markdown Extra <https://michelf.ca/projects/php-markdown/extra/#table>.
* wiki-tables: Google Code Wiki-style tables. See
<http://code.google.com/p/support/wiki/WikiSyntax#Tables>.
"""
# Dev Notes:
# - Python's regex syntax doesn't have '\z', so I'm using '\Z'. I'm
# not yet sure if there implications with this. Compare 'pydoc sre'
# and 'perldoc perlre'.
__version_info__ = (2, 3, 0)
__version__ = '.'.join(map(str, __version_info__))
__author__ = "Trent Mick"
import os
import sys
from pprint import pprint, pformat
import re
import logging
try:
from hashlib import md5
except ImportError:
from md5 import md5
import optparse
from random import random, randint
import codecs
#---- Python version compat
try:
from urllib.parse import quote # python3
except ImportError:
from urllib import quote # python2
if sys.version_info[:2] < (2,4):
from sets import Set as set
def reversed(sequence):
for i in sequence[::-1]:
yield i
# Use `bytes` for byte strings and `unicode` for unicode strings (str in Py3).
if sys.version_info[0] <= 2:
py3 = False
try:
bytes
except NameError:
bytes = str
base_string_type = basestring
elif sys.version_info[0] >= 3:
py3 = True
unicode = str
base_string_type = str
#---- globals
DEBUG = False
log = logging.getLogger("markdown")
DEFAULT_TAB_WIDTH = 4
SECRET_SALT = bytes(randint(0, 1000000))
def _hash_text(s):
return 'md5-' + md5(SECRET_SALT + s.encode("utf-8")).hexdigest()
# Table of hash values for escaped characters:
g_escape_table = dict([(ch, _hash_text(ch))
for ch in '\\`*_{}[]()>#+-.!'])
#---- exceptions
class MarkdownError(Exception):
pass
#---- public api
def markdown_path(path, encoding="utf-8",
html4tags=False, tab_width=DEFAULT_TAB_WIDTH,
safe_mode=None, extras=None, link_patterns=None,
use_file_vars=False):
fp = codecs.open(path, 'r', encoding)
text = fp.read()
fp.close()
return Markdown(html4tags=html4tags, tab_width=tab_width,
safe_mode=safe_mode, extras=extras,
link_patterns=link_patterns,
use_file_vars=use_file_vars).convert(text)
def markdown(text, html4tags=False, tab_width=DEFAULT_TAB_WIDTH,
safe_mode=None, extras=None, link_patterns=None,
use_file_vars=False):
return Markdown(html4tags=html4tags, tab_width=tab_width,
safe_mode=safe_mode, extras=extras,
link_patterns=link_patterns,
use_file_vars=use_file_vars).convert(text)
class Markdown(object):
# The dict of "extras" to enable in processing -- a mapping of
# extra name to argument for the extra. Most extras do not have an
# argument, in which case the value is None.
#
# This can be set via (a) subclassing and (b) the constructor
# "extras" argument.
extras = None
urls = None
titles = None
html_blocks = None
html_spans = None
html_removed_text = "[HTML_REMOVED]" # for compat with markdown.py
# Used to track when we're inside an ordered or unordered list
# (see _ProcessListItems() for details):
list_level = 0
_ws_only_line_re = re.compile(r"^[ \t]+$", re.M)
def __init__(self, html4tags=False, tab_width=4, safe_mode=None,
extras=None, link_patterns=None, use_file_vars=False):
if html4tags:
self.empty_element_suffix = ">"
else:
self.empty_element_suffix = " />"
self.tab_width = tab_width
# For compatibility with earlier markdown2.py and with
# markdown.py's safe_mode being a boolean,
# safe_mode == True -> "replace"
if safe_mode is True:
self.safe_mode = "replace"
else:
self.safe_mode = safe_mode
# Massaging and building the "extras" info.
if self.extras is None:
self.extras = {}
elif not isinstance(self.extras, dict):
self.extras = dict([(e, None) for e in self.extras])
if extras:
if not isinstance(extras, dict):
extras = dict([(e, None) for e in extras])
self.extras.update(extras)
assert isinstance(self.extras, dict)
if "toc" in self.extras and not "header-ids" in self.extras:
self.extras["header-ids"] = None # "toc" implies "header-ids"
self._instance_extras = self.extras.copy()
self.link_patterns = link_patterns
self.use_file_vars = use_file_vars
self._outdent_re = re.compile(r'^(\t|[ ]{1,%d})' % tab_width, re.M)
self._escape_table = g_escape_table.copy()
if "smarty-pants" in self.extras:
self._escape_table['"'] = _hash_text('"')
self._escape_table["'"] = _hash_text("'")
def reset(self):
self.urls = {}
self.titles = {}
self.html_blocks = {}
self.html_spans = {}
self.list_level = 0
self.extras = self._instance_extras.copy()
if "footnotes" in self.extras:
self.footnotes = {}
self.footnote_ids = []
if "header-ids" in self.extras:
self._count_from_header_id = {} # no `defaultdict` in Python 2.4
if "metadata" in self.extras:
self.metadata = {}
# Per <https://developer.mozilla.org/en-US/docs/HTML/Element/a> "rel"
# should only be used in <a> tags with an "href" attribute.
_a_nofollow = re.compile(r"<(a)([^>]*href=)", re.IGNORECASE)
def convert(self, text):
"""Convert the given text."""
# Main function. The order in which other subs are called here is
# essential. Link and image substitutions need to happen before
# _EscapeSpecialChars(), so that any *'s or _'s in the <a>
# and <img> tags get encoded.
# Clear the global hashes. If we don't clear these, you get conflicts
# from other articles when generating a page which contains more than
# one article (e.g. an index page that shows the N most recent
# articles):
self.reset()
if not isinstance(text, unicode):
#TODO: perhaps shouldn't presume UTF-8 for string input?
text = unicode(text, 'utf-8')
if self.use_file_vars:
# Look for emacs-style file variable hints.
emacs_vars = self._get_emacs_vars(text)
if "markdown-extras" in emacs_vars:
splitter = re.compile("[ ,]+")
for e in splitter.split(emacs_vars["markdown-extras"]):
if '=' in e:
ename, earg = e.split('=', 1)
try:
earg = int(earg)
except ValueError:
pass
else:
ename, earg = e, None
self.extras[ename] = earg
# Standardize line endings:
text = re.sub("\r\n|\r", "\n", text)
# Make sure $text ends with a couple of newlines:
text += "\n\n"
# Convert all tabs to spaces.
text = self._detab(text)
# Strip any lines consisting only of spaces and tabs.
# This makes subsequent regexen easier to write, because we can
# match consecutive blank lines with /\n+/ instead of something
# contorted like /[ \t]*\n+/ .
text = self._ws_only_line_re.sub("", text)
# strip metadata from head and extract
if "metadata" in self.extras:
text = self._extract_metadata(text)
text = self.preprocess(text)
if "fenced-code-blocks" in self.extras and not self.safe_mode:
text = self._do_fenced_code_blocks(text)
if self.safe_mode:
text = self._hash_html_spans(text)
# Turn block-level HTML blocks into hash entries
text = self._hash_html_blocks(text, raw=True)
if "fenced-code-blocks" in self.extras and self.safe_mode:
text = self._do_fenced_code_blocks(text)
# Strip link definitions, store in hashes.
if "footnotes" in self.extras:
# Must do footnotes first because an unlucky footnote defn
# looks like a link defn:
# [^4]: this "looks like a link defn"
text = self._strip_footnote_definitions(text)
text = self._strip_link_definitions(text)
text = self._run_block_gamut(text)
if "footnotes" in self.extras:
text = self._add_footnotes(text)
text = self.postprocess(text)
text = self._unescape_special_chars(text)
if self.safe_mode:
text = self._unhash_html_spans(text)
if "nofollow" in self.extras:
text = self._a_nofollow.sub(r'<\1 rel="nofollow"\2', text)
text += "\n"
rv = UnicodeWithAttrs(text)
if "toc" in self.extras:
rv._toc = self._toc
if "metadata" in self.extras:
rv.metadata = self.metadata
return rv
def postprocess(self, text):
"""A hook for subclasses to do some postprocessing of the html, if
desired. This is called before unescaping of special chars and
unhashing of raw HTML spans.
"""
return text
def preprocess(self, text):
"""A hook for subclasses to do some preprocessing of the Markdown, if
desired. This is called after basic formatting of the text, but prior
to any extras, safe mode, etc. processing.
"""
return text
# Is metadata if the content starts with '---'-fenced `key: value`
# pairs. E.g. (indented for presentation):
# ---
# foo: bar
# another-var: blah blah
# ---
_metadata_pat = re.compile("""^---[ \t]*\n((?:[ \t]*[^ \t:]+[ \t]*:[^\n]*\n)+)---[ \t]*\n""")
def _extract_metadata(self, text):
# fast test
if not text.startswith("---"):
return text
match = self._metadata_pat.match(text)
if not match:
return text
tail = text[len(match.group(0)):]
metadata_str = match.group(1).strip()
for line in metadata_str.split('\n'):
key, value = line.split(':', 1)
self.metadata[key.strip()] = value.strip()
return tail
_emacs_oneliner_vars_pat = re.compile(r"-\*-\s*([^\r\n]*?)\s*-\*-", re.UNICODE)
# This regular expression is intended to match blocks like this:
# PREFIX Local Variables: SUFFIX
# PREFIX mode: Tcl SUFFIX
# PREFIX End: SUFFIX
# Some notes:
# - "[ \t]" is used instead of "\s" to specifically exclude newlines
# - "(\r\n|\n|\r)" is used instead of "$" because the sre engine does
# not like anything other than Unix-style line terminators.
_emacs_local_vars_pat = re.compile(r"""^
(?P<prefix>(?:[^\r\n|\n|\r])*?)
[\ \t]*Local\ Variables:[\ \t]*
(?P<suffix>.*?)(?:\r\n|\n|\r)
(?P<content>.*?\1End:)
""", re.IGNORECASE | re.MULTILINE | re.DOTALL | re.VERBOSE)
def _get_emacs_vars(self, text):
"""Return a dictionary of emacs-style local variables.
Parsing is done loosely according to this spec (and according to
some in-practice deviations from this):
http://www.gnu.org/software/emacs/manual/html_node/emacs/Specifying-File-Variables.html#Specifying-File-Variables
"""
emacs_vars = {}
SIZE = pow(2, 13) # 8kB
# Search near the start for a '-*-'-style one-liner of variables.
head = text[:SIZE]
if "-*-" in head:
match = self._emacs_oneliner_vars_pat.search(head)
if match:
emacs_vars_str = match.group(1)
assert '\n' not in emacs_vars_str
emacs_var_strs = [s.strip() for s in emacs_vars_str.split(';')
if s.strip()]
if len(emacs_var_strs) == 1 and ':' not in emacs_var_strs[0]:
# While not in the spec, this form is allowed by emacs:
# -*- Tcl -*-
# where the implied "variable" is "mode". This form
# is only allowed if there are no other variables.
emacs_vars["mode"] = emacs_var_strs[0].strip()
else:
for emacs_var_str in emacs_var_strs:
try:
variable, value = emacs_var_str.strip().split(':', 1)
except ValueError:
log.debug("emacs variables error: malformed -*- "
"line: %r", emacs_var_str)
continue
# Lowercase the variable name because Emacs allows "Mode"
# or "mode" or "MoDe", etc.
emacs_vars[variable.lower()] = value.strip()
tail = text[-SIZE:]
if "Local Variables" in tail:
match = self._emacs_local_vars_pat.search(tail)
if match:
prefix = match.group("prefix")
suffix = match.group("suffix")
lines = match.group("content").splitlines(0)
#print "prefix=%r, suffix=%r, content=%r, lines: %s"\
# % (prefix, suffix, match.group("content"), lines)
# Validate the Local Variables block: proper prefix and suffix
# usage.
for i, line in enumerate(lines):
if not line.startswith(prefix):
log.debug("emacs variables error: line '%s' "
"does not use proper prefix '%s'"
% (line, prefix))
return {}
# Don't validate suffix on last line. Emacs doesn't care,
# neither should we.
if i != len(lines)-1 and not line.endswith(suffix):
log.debug("emacs variables error: line '%s' "
"does not use proper suffix '%s'"
% (line, suffix))
return {}
# Parse out one emacs var per line.
continued_for = None
for line in lines[:-1]: # no var on the last line ("PREFIX End:")
if prefix: line = line[len(prefix):] # strip prefix
if suffix: line = line[:-len(suffix)] # strip suffix
line = line.strip()
if continued_for:
variable = continued_for
if line.endswith('\\'):
line = line[:-1].rstrip()
else:
continued_for = None
emacs_vars[variable] += ' ' + line
else:
try:
variable, value = line.split(':', 1)
except ValueError:
log.debug("local variables error: missing colon "
"in local variables entry: '%s'" % line)
continue
# Do NOT lowercase the variable name, because Emacs only
# allows "mode" (and not "Mode", "MoDe", etc.) in this block.
value = value.strip()
if value.endswith('\\'):
value = value[:-1].rstrip()
continued_for = variable
else:
continued_for = None
emacs_vars[variable] = value
# Unquote values.
for var, val in list(emacs_vars.items()):
if len(val) > 1 and (val.startswith('"') and val.endswith('"')
or val.startswith('"') and val.endswith('"')):
emacs_vars[var] = val[1:-1]
return emacs_vars
# Cribbed from a post by Bart Lateur:
# <http://www.nntp.perl.org/group/perl.macperl.anyperl/154>
_detab_re = re.compile(r'(.*?)\t', re.M)
def _detab_sub(self, match):
g1 = match.group(1)
return g1 + (' ' * (self.tab_width - len(g1) % self.tab_width))
def _detab(self, text):
r"""Remove (leading?) tabs from a file.
>>> m = Markdown()
>>> m._detab("\tfoo")
' foo'
>>> m._detab(" \tfoo")
' foo'
>>> m._detab("\t foo")
' foo'
>>> m._detab(" foo")
' foo'
>>> m._detab(" foo\n\tbar\tblam")
' foo\n bar blam'
"""
if '\t' not in text:
return text
return self._detab_re.subn(self._detab_sub, text)[0]
# I broke out the html5 tags here and add them to _block_tags_a and
# _block_tags_b. This way html5 tags are easy to keep track of.
_html5tags = '|article|aside|header|hgroup|footer|nav|section|figure|figcaption'
_block_tags_a = 'p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|noscript|form|fieldset|iframe|math|ins|del'
_block_tags_a += _html5tags
_strict_tag_block_re = re.compile(r"""
( # save in \1
^ # start of line (with re.M)
<(%s) # start tag = \2
\b # word break
(.*\n)*? # any number of lines, minimally matching
</\2> # the matching end tag
[ \t]* # trailing spaces/tabs
(?=\n+|\Z) # followed by a newline or end of document
)
""" % _block_tags_a,
re.X | re.M)
_block_tags_b = 'p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|noscript|form|fieldset|iframe|math'
_block_tags_b += _html5tags
_liberal_tag_block_re = re.compile(r"""
( # save in \1
^ # start of line (with re.M)
<(%s) # start tag = \2
\b # word break
(.*\n)*? # any number of lines, minimally matching
.*</\2> # the matching end tag
[ \t]* # trailing spaces/tabs
(?=\n+|\Z) # followed by a newline or end of document
)
""" % _block_tags_b,
re.X | re.M)
_html_markdown_attr_re = re.compile(
r'''\s+markdown=("1"|'1')''')
def _hash_html_block_sub(self, match, raw=False):
html = match.group(1)
if raw and self.safe_mode:
html = self._sanitize_html(html)
elif 'markdown-in-html' in self.extras and 'markdown=' in html:
first_line = html.split('\n', 1)[0]
m = self._html_markdown_attr_re.search(first_line)
if m:
lines = html.split('\n')
middle = '\n'.join(lines[1:-1])
last_line = lines[-1]
first_line = first_line[:m.start()] + first_line[m.end():]
f_key = _hash_text(first_line)
self.html_blocks[f_key] = first_line
l_key = _hash_text(last_line)
self.html_blocks[l_key] = last_line
return ''.join(["\n\n", f_key,
"\n\n", middle, "\n\n",
l_key, "\n\n"])
key = _hash_text(html)
self.html_blocks[key] = html
return "\n\n" + key + "\n\n"
def _hash_html_blocks(self, text, raw=False):
"""Hashify HTML blocks
We only want to do this for block-level HTML tags, such as headers,
lists, and tables. That's because we still want to wrap <p>s around
"paragraphs" that are wrapped in non-block-level tags, such as anchors,
phrase emphasis, and spans. The list of tags we're looking for is
hard-coded.
@param raw {boolean} indicates if these are raw HTML blocks in
the original source. It makes a difference in "safe" mode.
"""
if '<' not in text:
return text
# Pass `raw` value into our calls to self._hash_html_block_sub.
hash_html_block_sub = _curry(self._hash_html_block_sub, raw=raw)
# First, look for nested blocks, e.g.:
# <div>
# <div>
# tags for inner block must be indented.
# </div>
# </div>
#
# The outermost tags must start at the left margin for this to match, and
# the inner nested divs must be indented.
# We need to do this before the next, more liberal match, because the next
# match will start at the first `<div>` and stop at the first `</div>`.
text = self._strict_tag_block_re.sub(hash_html_block_sub, text)
# Now match more liberally, simply from `\n<tag>` to `</tag>\n`
text = self._liberal_tag_block_re.sub(hash_html_block_sub, text)
# Special case just for <hr />. It was easier to make a special
# case than to make the other regex more complicated.
if "<hr" in text:
_hr_tag_re = _hr_tag_re_from_tab_width(self.tab_width)
text = _hr_tag_re.sub(hash_html_block_sub, text)
# Special case for standalone HTML comments:
if "<!--" in text:
start = 0
while True:
# Delimiters for next comment block.
try:
start_idx = text.index("<!--", start)
except ValueError:
break
try:
end_idx = text.index("-->", start_idx) + 3
except ValueError:
break
# Start position for next comment block search.
start = end_idx
# Validate whitespace before comment.
if start_idx:
# - Up to `tab_width - 1` spaces before start_idx.
for i in range(self.tab_width - 1):
if text[start_idx - 1] != ' ':
break
start_idx -= 1
if start_idx == 0:
break
# - Must be preceded by 2 newlines or hit the start of
# the document.
if start_idx == 0:
pass
elif start_idx == 1 and text[0] == '\n':
start_idx = 0 # to match minute detail of Markdown.pl regex
elif text[start_idx-2:start_idx] == '\n\n':
pass
else:
break
# Validate whitespace after comment.
# - Any number of spaces and tabs.
while end_idx < len(text):
if text[end_idx] not in ' \t':
break
end_idx += 1
# - Must be following by 2 newlines or hit end of text.
if text[end_idx:end_idx+2] not in ('', '\n', '\n\n'):
continue
# Escape and hash (must match `_hash_html_block_sub`).
html = text[start_idx:end_idx]
if raw and self.safe_mode:
html = self._sanitize_html(html)
key = _hash_text(html)
self.html_blocks[key] = html
text = text[:start_idx] + "\n\n" + key + "\n\n" + text[end_idx:]
if "xml" in self.extras:
# Treat XML processing instructions and namespaced one-liner
# tags as if they were block HTML tags. E.g., if standalone
# (i.e. are their own paragraph), the following do not get
# wrapped in a <p> tag:
# <?foo bar?>
#
# <xi:include xmlns:xi="http://www.w3.org/2001/XInclude" href="chapter_1.md"/>
_xml_oneliner_re = _xml_oneliner_re_from_tab_width(self.tab_width)
text = _xml_oneliner_re.sub(hash_html_block_sub, text)
return text
def _strip_link_definitions(self, text):
# Strips link definitions from text, stores the URLs and titles in
# hash references.
less_than_tab = self.tab_width - 1
# Link defs are in the form:
# [id]: url "optional title"
_link_def_re = re.compile(r"""
^[ ]{0,%d}\[(.+)\]: # id = \1
[ \t]*
\n? # maybe *one* newline
[ \t]*
<?(.+?)>? # url = \2
[ \t]*
(?:
\n? # maybe one newline
[ \t]*
(?<=\s) # lookbehind for whitespace
['"(]
([^\n]*) # title = \3
['")]
[ \t]*
)? # title is optional
(?:\n+|\Z)
""" % less_than_tab, re.X | re.M | re.U)
return _link_def_re.sub(self._extract_link_def_sub, text)
def _extract_link_def_sub(self, match):
id, url, title = match.groups()
key = id.lower() # Link IDs are case-insensitive
self.urls[key] = self._encode_amps_and_angles(url)
if title:
self.titles[key] = title
return ""
def _extract_footnote_def_sub(self, match):
id, text = match.groups()
text = _dedent(text, skip_first_line=not text.startswith('\n')).strip()
normed_id = re.sub(r'\W', '-', id)
# Ensure footnote text ends with a couple newlines (for some
# block gamut matches).
self.footnotes[normed_id] = text + "\n\n"
return ""
def _strip_footnote_definitions(self, text):
"""A footnote definition looks like this:
[^note-id]: Text of the note.
May include one or more indented paragraphs.
Where,
- The 'note-id' can be pretty much anything, though typically it
is the number of the footnote.
- The first paragraph may start on the next line, like so:
[^note-id]:
Text of the note.
"""
less_than_tab = self.tab_width - 1
footnote_def_re = re.compile(r'''
^[ ]{0,%d}\[\^(.+)\]: # id = \1
[ \t]*
( # footnote text = \2
# First line need not start with the spaces.
(?:\s*.*\n+)
(?:
(?:[ ]{%d} | \t) # Subsequent lines must be indented.
.*\n+
)*
)
# Lookahead for non-space at line-start, or end of doc.
(?:(?=^[ ]{0,%d}\S)|\Z)
''' % (less_than_tab, self.tab_width, self.tab_width),
re.X | re.M)
return footnote_def_re.sub(self._extract_footnote_def_sub, text)
_hr_re = re.compile(r'^[ ]{0,3}([-_*][ ]{0,2}){3,}$', re.M)
def _run_block_gamut(self, text):
# These are all the transformations that form block-level
# tags like paragraphs, headers, and list items.
if "fenced-code-blocks" in self.extras:
text = self._do_fenced_code_blocks(text)
text = self._do_headers(text)
# Do Horizontal Rules:
# On the number of spaces in horizontal rules: The spec is fuzzy: "If
# you wish, you may use spaces between the hyphens or asterisks."
# Markdown.pl 1.0.1's hr regexes limit the number of spaces between the
# hr chars to one or two. We'll reproduce that limit here.
hr = "\n<hr"+self.empty_element_suffix+"\n"
text = re.sub(self._hr_re, hr, text)
text = self._do_lists(text)
if "pyshell" in self.extras:
text = self._prepare_pyshell_blocks(text)
if "wiki-tables" in self.extras:
text = self._do_wiki_tables(text)
if "tables" in self.extras:
text = self._do_tables(text)
text = self._do_code_blocks(text)
text = self._do_block_quotes(text)
# We already ran _HashHTMLBlocks() before, in Markdown(), but that
# was to escape raw HTML in the original Markdown source. This time,
# we're escaping the markup we've just created, so that we don't wrap
# <p> tags around block-level tags.
text = self._hash_html_blocks(text)
text = self._form_paragraphs(text)
return text
def _pyshell_block_sub(self, match):
lines = match.group(0).splitlines(0)
_dedentlines(lines)
indent = ' ' * self.tab_width
s = ('\n' # separate from possible cuddled paragraph
+ indent + ('\n'+indent).join(lines)
+ '\n\n')
return s
def _prepare_pyshell_blocks(self, text):
"""Ensure that Python interactive shell sessions are put in
code blocks -- even if not properly indented.
"""
if ">>>" not in text:
return text
less_than_tab = self.tab_width - 1
_pyshell_block_re = re.compile(r"""
^([ ]{0,%d})>>>[ ].*\n # first line
^(\1.*\S+.*\n)* # any number of subsequent lines
^\n # ends with a blank line
""" % less_than_tab, re.M | re.X)
return _pyshell_block_re.sub(self._pyshell_block_sub, text)
def _table_sub(self, match):
head, underline, body = match.groups()
# Determine aligns for columns.
cols = [cell.strip() for cell in underline.strip('| \t\n').split('|')]
align_from_col_idx = {}
for col_idx, col in enumerate(cols):
if col[0] == ':' and col[-1] == ':':
align_from_col_idx[col_idx] = ' align="center"'
elif col[0] == ':':
align_from_col_idx[col_idx] = ' align="left"'
elif col[-1] == ':':
align_from_col_idx[col_idx] = ' align="right"'
# thead
hlines = ['<table>', '<thead>', '<tr>']
cols = [cell.strip() for cell in head.strip('| \t\n').split('|')]
for col_idx, col in enumerate(cols):
hlines.append(' <th%s>%s</th>' % (
align_from_col_idx.get(col_idx, ''),
self._run_span_gamut(col)
))
hlines.append('</tr>')
hlines.append('</thead>')
# tbody
hlines.append('<tbody>')
for line in body.strip('\n').split('\n'):
hlines.append('<tr>')
cols = [cell.strip() for cell in line.strip('| \t\n').split('|')]
for col_idx, col in enumerate(cols):
hlines.append(' <td%s>%s</td>' % (
align_from_col_idx.get(col_idx, ''),
self._run_span_gamut(col)
))
hlines.append('</tr>')
hlines.append('</tbody>')
hlines.append('</table>')
return '\n'.join(hlines) + '\n'
def _do_tables(self, text):
"""Copying PHP-Markdown and GFM table syntax. Some regex borrowed from
https://github.com/michelf/php-markdown/blob/lib/Michelf/Markdown.php#L2538
"""
less_than_tab = self.tab_width - 1
table_re = re.compile(r'''
(?:(?<=\n\n)|\A\n?) # leading blank line
^[ ]{0,%d} # allowed whitespace
(.*[|].*) \n # $1: header row (at least one pipe)
^[ ]{0,%d} # allowed whitespace
( # $2: underline row
# underline row with leading bar
(?: \|\ *:?-+:?\ * )+ \|? \n
|
# or, underline row without leading bar
(?: \ *:?-+:?\ *\| )+ (?: \ *:?-+:?\ * )? \n
)
( # $3: data rows
(?:
^[ ]{0,%d}(?!\ ) # ensure line begins with 0 to less_than_tab spaces
.*\|.* \n
)+
)
''' % (less_than_tab, less_than_tab, less_than_tab), re.M | re.X)
return table_re.sub(self._table_sub, text)
def _wiki_table_sub(self, match):
ttext = match.group(0).strip()
#print 'wiki table: %r' % match.group(0)
rows = []
for line in ttext.splitlines(0):
line = line.strip()[2:-2].strip()
row = [c.strip() for c in re.split(r'(?<!\\)\|\|', line)]
rows.append(row)
#pprint(rows)
hlines = ['<table>', '<tbody>']
for row in rows:
hrow = ['<tr>']
for cell in row:
hrow.append('<td>')
hrow.append(self._run_span_gamut(cell))
hrow.append('</td>')
hrow.append('</tr>')
hlines.append(''.join(hrow))
hlines += ['</tbody>', '</table>']
return '\n'.join(hlines) + '\n'
def _do_wiki_tables(self, text):
# Optimization.
if "||" not in text:
return text
less_than_tab = self.tab_width - 1
wiki_table_re = re.compile(r'''
(?:(?<=\n\n)|\A\n?) # leading blank line
^([ ]{0,%d})\|\|.+?\|\|[ ]*\n # first line
(^\1\|\|.+?\|\|\n)* # any number of subsequent lines
''' % less_than_tab, re.M | re.X)
return wiki_table_re.sub(self._wiki_table_sub, text)
def _run_span_gamut(self, text):
# These are all the transformations that occur *within* block-level
# tags like paragraphs, headers, and list items.
text = self._do_code_spans(text)
text = self._escape_special_chars(text)
# Process anchor and image tags.
text = self._do_links(text)
# Make links out of things like `<http://example.com/>`
# Must come after _do_links(), because you can use < and >
# delimiters in inline links like [this](<url>).
text = self._do_auto_links(text)
if "link-patterns" in self.extras:
text = self._do_link_patterns(text)
text = self._encode_amps_and_angles(text)
text = self._do_italics_and_bold(text)
if "smarty-pants" in self.extras:
text = self._do_smart_punctuation(text)
# Do hard breaks:
if "break-on-newline" in self.extras:
text = re.sub(r" *\n", "<br%s\n" % self.empty_element_suffix, text)
else:
text = re.sub(r" {2,}\n", " <br%s\n" % self.empty_element_suffix, text)
return text
# "Sorta" because auto-links are identified as "tag" tokens.
_sorta_html_tokenize_re = re.compile(r"""
(
# tag
</?
(?:\w+) # tag name
(?:\s+(?:[\w-]+:)?[\w-]+=(?:".*?"|'.*?'))* # attributes
\s*/?>
|
# auto-link (e.g., <http://www.activestate.com/>)
<\w+[^>]*>
|
<!--.*?--> # comment
|
<\?.*?\?> # processing instruction
)
""", re.X)
def _escape_special_chars(self, text):
# Python markdown note: the HTML tokenization here differs from
# that in Markdown.pl, hence the behaviour for subtle cases can
# differ (I believe the tokenizer here does a better job because
# it isn't susceptible to unmatched '<' and '>' in HTML tags).
# Note, however, that '>' is not allowed in an auto-link URL
# here.
escaped = []
is_html_markup = False
for token in self._sorta_html_tokenize_re.split(text):
if is_html_markup:
# Within tags/HTML-comments/auto-links, encode * and _
# so they don't conflict with their use in Markdown for
# italics and strong. We're replacing each such
# character with its corresponding MD5 checksum value;
# this is likely overkill, but it should prevent us from
# colliding with the escape values by accident.
escaped.append(token.replace('*', self._escape_table['*'])
.replace('_', self._escape_table['_']))
else:
escaped.append(self._encode_backslash_escapes(token))
is_html_markup = not is_html_markup
return ''.join(escaped)
def _hash_html_spans(self, text):
# Used for safe_mode.
def _is_auto_link(s):
if ':' in s and self._auto_link_re.match(s):
return True
elif '@' in s and self._auto_email_link_re.match(s):
return True
return False
tokens = []
is_html_markup = False
for token in self._sorta_html_tokenize_re.split(text):
if is_html_markup and not _is_auto_link(token):
sanitized = self._sanitize_html(token)
key = _hash_text(sanitized)
self.html_spans[key] = sanitized
tokens.append(key)
else:
tokens.append(token)
is_html_markup = not is_html_markup
return ''.join(tokens)
def _unhash_html_spans(self, text):
for key, sanitized in list(self.html_spans.items()):
text = text.replace(key, sanitized)
return text
def _sanitize_html(self, s):
if self.safe_mode == "replace":
return self.html_removed_text
elif self.safe_mode == "escape":
replacements = [
('&', '&'),
('<', '<'),
('>', '>'),
]
for before, after in replacements:
s = s.replace(before, after)
return s
else:
raise MarkdownError("invalid value for 'safe_mode': %r (must be "
"'escape' or 'replace')" % self.safe_mode)
_inline_link_title = re.compile(r'''
( # \1
[ \t]+
(['"]) # quote char = \2
(?P<title>.*?)
\2
)? # title is optional
\)$
''', re.X | re.S)
_tail_of_reference_link_re = re.compile(r'''
# Match tail of: [text][id]
[ ]? # one optional space
(?:\n[ ]*)? # one optional newline followed by spaces
\[
(?P<id>.*?)
\]
''', re.X | re.S)
_whitespace = re.compile(r'\s*')
_strip_anglebrackets = re.compile(r'<(.*)>.*')
def _find_non_whitespace(self, text, start):
"""Returns the index of the first non-whitespace character in text
after (and including) start
"""
match = self._whitespace.match(text, start)
return match.end()
def _find_balanced(self, text, start, open_c, close_c):
"""Returns the index where the open_c and close_c characters balance
out - the same number of open_c and close_c are encountered - or the
end of string if it's reached before the balance point is found.
"""
i = start
l = len(text)
count = 1
while count > 0 and i < l:
if text[i] == open_c:
count += 1
elif text[i] == close_c:
count -= 1
i += 1
return i
def _extract_url_and_title(self, text, start):
"""Extracts the url and (optional) title from the tail of a link"""
# text[start] equals the opening parenthesis
idx = self._find_non_whitespace(text, start+1)
if idx == len(text):
return None, None, None
end_idx = idx
has_anglebrackets = text[idx] == "<"
if has_anglebrackets:
end_idx = self._find_balanced(text, end_idx+1, "<", ">")
end_idx = self._find_balanced(text, end_idx, "(", ")")
match = self._inline_link_title.search(text, idx, end_idx)
if not match:
return None, None, None
url, title = text[idx:match.start()], match.group("title")
if has_anglebrackets:
url = self._strip_anglebrackets.sub(r'\1', url)
return url, title, end_idx
def _do_links(self, text):
"""Turn Markdown link shortcuts into XHTML <a> and <img> tags.
This is a combination of Markdown.pl's _DoAnchors() and
_DoImages(). They are done together because that simplified the
approach. It was necessary to use a different approach than
Markdown.pl because of the lack of atomic matching support in
Python's regex engine used in $g_nested_brackets.
"""
MAX_LINK_TEXT_SENTINEL = 3000 # markdown2 issue 24
# `anchor_allowed_pos` is used to support img links inside
# anchors, but not anchors inside anchors. An anchor's start
# pos must be `>= anchor_allowed_pos`.
anchor_allowed_pos = 0
curr_pos = 0
while True: # Handle the next link.
# The next '[' is the start of:
# - an inline anchor: [text](url "title")
# - a reference anchor: [text][id]
# - an inline img: 
# - a reference img: ![text][id]
# - a footnote ref: [^id]
# (Only if 'footnotes' extra enabled)
# - a footnote defn: [^id]: ...
# (Only if 'footnotes' extra enabled) These have already
# been stripped in _strip_footnote_definitions() so no
# need to watch for them.
# - a link definition: [id]: url "title"
# These have already been stripped in
# _strip_link_definitions() so no need to watch for them.
# - not markup: [...anything else...
try:
start_idx = text.index('[', curr_pos)
except ValueError:
break
text_length = len(text)
# Find the matching closing ']'.
# Markdown.pl allows *matching* brackets in link text so we
# will here too. Markdown.pl *doesn't* currently allow
# matching brackets in img alt text -- we'll differ in that
# regard.
bracket_depth = 0
for p in range(start_idx+1, min(start_idx+MAX_LINK_TEXT_SENTINEL,
text_length)):
ch = text[p]
if ch == ']':
bracket_depth -= 1
if bracket_depth < 0:
break
elif ch == '[':
bracket_depth += 1
else:
# Closing bracket not found within sentinel length.
# This isn't markup.
curr_pos = start_idx + 1
continue
link_text = text[start_idx+1:p]
# Possibly a footnote ref?
if "footnotes" in self.extras and link_text.startswith("^"):
normed_id = re.sub(r'\W', '-', link_text[1:])
if normed_id in self.footnotes:
self.footnote_ids.append(normed_id)
result = '<sup class="footnote-ref" id="fnref-%s">' \
'<a href="#fn-%s">%s</a></sup>' \
% (normed_id, normed_id, len(self.footnote_ids))
text = text[:start_idx] + result + text[p+1:]
else:
# This id isn't defined, leave the markup alone.
curr_pos = p+1
continue
# Now determine what this is by the remainder.
p += 1
if p == text_length:
return text
# Inline anchor or img?
if text[p] == '(': # attempt at perf improvement
url, title, url_end_idx = self._extract_url_and_title(text, p)
if url is not None:
# Handle an inline anchor or img.
is_img = start_idx > 0 and text[start_idx-1] == "!"
if is_img:
start_idx -= 1
# We've got to encode these to avoid conflicting
# with italics/bold.
url = url.replace('*', self._escape_table['*']) \
.replace('_', self._escape_table['_'])
if title:
title_str = ' title="%s"' % (
_xml_escape_attr(title)
.replace('*', self._escape_table['*'])
.replace('_', self._escape_table['_']))
else:
title_str = ''
if is_img:
img_class_str = self._html_class_str_from_tag("img")
result = '<img src="%s" alt="%s"%s%s%s' \
% (url.replace('"', '"'),
_xml_escape_attr(link_text),
title_str, img_class_str, self.empty_element_suffix)
if "smarty-pants" in self.extras:
result = result.replace('"', self._escape_table['"'])
curr_pos = start_idx + len(result)
text = text[:start_idx] + result + text[url_end_idx:]
elif start_idx >= anchor_allowed_pos:
result_head = '<a href="%s"%s>' % (url, title_str)
result = '%s%s</a>' % (result_head, link_text)
if "smarty-pants" in self.extras:
result = result.replace('"', self._escape_table['"'])
# <img> allowed from curr_pos on, <a> from
# anchor_allowed_pos on.
curr_pos = start_idx + len(result_head)
anchor_allowed_pos = start_idx + len(result)
text = text[:start_idx] + result + text[url_end_idx:]
else:
# Anchor not allowed here.
curr_pos = start_idx + 1
continue
# Reference anchor or img?
else:
match = self._tail_of_reference_link_re.match(text, p)
if match:
# Handle a reference-style anchor or img.
is_img = start_idx > 0 and text[start_idx-1] == "!"
if is_img:
start_idx -= 1
link_id = match.group("id").lower()
if not link_id:
link_id = link_text.lower() # for links like [this][]
if link_id in self.urls:
url = self.urls[link_id]
# We've got to encode these to avoid conflicting
# with italics/bold.
url = url.replace('*', self._escape_table['*']) \
.replace('_', self._escape_table['_'])
title = self.titles.get(link_id)
if title:
before = title
title = _xml_escape_attr(title) \
.replace('*', self._escape_table['*']) \
.replace('_', self._escape_table['_'])
title_str = ' title="%s"' % title
else:
title_str = ''
if is_img:
img_class_str = self._html_class_str_from_tag("img")
result = '<img src="%s" alt="%s"%s%s%s' \
% (url.replace('"', '"'),
link_text.replace('"', '"'),
title_str, img_class_str, self.empty_element_suffix)
if "smarty-pants" in self.extras:
result = result.replace('"', self._escape_table['"'])
curr_pos = start_idx + len(result)
text = text[:start_idx] + result + text[match.end():]
elif start_idx >= anchor_allowed_pos:
result = '<a href="%s"%s>%s</a>' \
% (url, title_str, link_text)
result_head = '<a href="%s"%s>' % (url, title_str)
result = '%s%s</a>' % (result_head, link_text)
if "smarty-pants" in self.extras:
result = result.replace('"', self._escape_table['"'])
# <img> allowed from curr_pos on, <a> from
# anchor_allowed_pos on.
curr_pos = start_idx + len(result_head)
anchor_allowed_pos = start_idx + len(result)
text = text[:start_idx] + result + text[match.end():]
else:
# Anchor not allowed here.
curr_pos = start_idx + 1
else:
# This id isn't defined, leave the markup alone.
curr_pos = match.end()
continue
# Otherwise, it isn't markup.
curr_pos = start_idx + 1
return text
def header_id_from_text(self, text, prefix, n):
"""Generate a header id attribute value from the given header
HTML content.
This is only called if the "header-ids" extra is enabled.
Subclasses may override this for different header ids.
@param text {str} The text of the header tag
@param prefix {str} The requested prefix for header ids. This is the
value of the "header-ids" extra key, if any. Otherwise, None.
@param n {int} The <hN> tag number, i.e. `1` for an <h1> tag.
@returns {str} The value for the header tag's "id" attribute. Return
None to not have an id attribute and to exclude this header from
the TOC (if the "toc" extra is specified).
"""
header_id = _slugify(text)
if prefix and isinstance(prefix, base_string_type):
header_id = prefix + '-' + header_id
if header_id in self._count_from_header_id:
self._count_from_header_id[header_id] += 1
header_id += '-%s' % self._count_from_header_id[header_id]
else:
self._count_from_header_id[header_id] = 1
return header_id
_toc = None
def _toc_add_entry(self, level, id, name):
if self._toc is None:
self._toc = []
self._toc.append((level, id, self._unescape_special_chars(name)))
_h_re_base = r'''
(^(.+)[ \t]*\n(=+|-+)[ \t]*\n+)
|
(^(\#{1,6}) # \1 = string of #'s
[ \t]%s
(.+?) # \2 = Header text
[ \t]*
(?<!\\) # ensure not an escaped trailing '#'
\#* # optional closing #'s (not counted)
\n+
)
'''
_h_re = re.compile(_h_re_base % '*', re.X | re.M)
_h_re_tag_friendly = re.compile(_h_re_base % '+', re.X | re.M)
def _h_sub(self, match):
if match.group(1) is not None:
# Setext header
n = {"=": 1, "-": 2}[match.group(3)[0]]
header_group = match.group(2)
else:
# atx header
n = len(match.group(5))
header_group = match.group(6)
demote_headers = self.extras.get("demote-headers")
if demote_headers:
n = min(n + demote_headers, 6)
header_id_attr = ""
if "header-ids" in self.extras:
header_id = self.header_id_from_text(header_group,
self.extras["header-ids"], n)
if header_id:
header_id_attr = ' id="%s"' % header_id
html = self._run_span_gamut(header_group)
if "toc" in self.extras and header_id:
self._toc_add_entry(n, header_id, html)
return "<h%d%s>%s</h%d>\n\n" % (n, header_id_attr, html, n)
def _do_headers(self, text):
# Setext-style headers:
# Header 1
# ========
#
# Header 2
# --------
# atx-style headers:
# # Header 1
# ## Header 2
# ## Header 2 with closing hashes ##
# ...
# ###### Header 6
if 'tag-friendly' in self.extras:
return self._h_re_tag_friendly.sub(self._h_sub, text)
return self._h_re.sub(self._h_sub, text)
_marker_ul_chars = '*+-'
_marker_any = r'(?:[%s]|\d+\.)' % _marker_ul_chars
_marker_ul = '(?:[%s])' % _marker_ul_chars
_marker_ol = r'(?:\d+\.)'
def _list_sub(self, match):
lst = match.group(1)
lst_type = match.group(3) in self._marker_ul_chars and "ul" or "ol"
result = self._process_list_items(lst)
if self.list_level:
return "<%s>\n%s</%s>\n" % (lst_type, result, lst_type)
else:
return "<%s>\n%s</%s>\n\n" % (lst_type, result, lst_type)
def _do_lists(self, text):
# Form HTML ordered (numbered) and unordered (bulleted) lists.
# Iterate over each *non-overlapping* list match.
pos = 0
while True:
# Find the *first* hit for either list style (ul or ol). We
# match ul and ol separately to avoid adjacent lists of different
# types running into each other (see issue #16).
hits = []
for marker_pat in (self._marker_ul, self._marker_ol):
less_than_tab = self.tab_width - 1
whole_list = r'''
( # \1 = whole list
( # \2
[ ]{0,%d}
(%s) # \3 = first list item marker
[ \t]+
(?!\ *\3\ ) # '- - - ...' isn't a list. See 'not_quite_a_list' test case.
)
(?:.+?)
( # \4
\Z
|
\n{2,}
(?=\S)
(?! # Negative lookahead for another list item marker
[ \t]*
%s[ \t]+
)
)
)
''' % (less_than_tab, marker_pat, marker_pat)
if self.list_level: # sub-list
list_re = re.compile("^"+whole_list, re.X | re.M | re.S)
else:
list_re = re.compile(r"(?:(?<=\n\n)|\A\n?)"+whole_list,
re.X | re.M | re.S)
match = list_re.search(text, pos)
if match:
hits.append((match.start(), match))
if not hits:
break
hits.sort()
match = hits[0][1]
start, end = match.span()
middle = self._list_sub(match)
text = text[:start] + middle + text[end:]
pos = start + len(middle) # start pos for next attempted match
return text
_list_item_re = re.compile(r'''
(\n)? # leading line = \1
(^[ \t]*) # leading whitespace = \2
(?P<marker>%s) [ \t]+ # list marker = \3
((?:.+?) # list item text = \4
(\n{1,2})) # eols = \5
(?= \n* (\Z | \2 (?P<next_marker>%s) [ \t]+))
''' % (_marker_any, _marker_any),
re.M | re.X | re.S)
_last_li_endswith_two_eols = False
def _list_item_sub(self, match):
item = match.group(4)
leading_line = match.group(1)
leading_space = match.group(2)
if leading_line or "\n\n" in item or self._last_li_endswith_two_eols:
item = self._run_block_gamut(self._outdent(item))
else:
# Recursion for sub-lists:
item = self._do_lists(self._outdent(item))
if item.endswith('\n'):
item = item[:-1]
item = self._run_span_gamut(item)
self._last_li_endswith_two_eols = (len(match.group(5)) == 2)
return "<li>%s</li>\n" % item
def _process_list_items(self, list_str):
# Process the contents of a single ordered or unordered list,
# splitting it into individual list items.
# The $g_list_level global keeps track of when we're inside a list.
# Each time we enter a list, we increment it; when we leave a list,
# we decrement. If it's zero, we're not in a list anymore.
#
# We do this because when we're not inside a list, we want to treat
# something like this:
#
# I recommend upgrading to version
# 8. Oops, now this line is treated
# as a sub-list.
#
# As a single paragraph, despite the fact that the second line starts
# with a digit-period-space sequence.
#
# Whereas when we're inside a list (or sub-list), that line will be
# treated as the start of a sub-list. What a kludge, huh? This is
# an aspect of Markdown's syntax that's hard to parse perfectly
# without resorting to mind-reading. Perhaps the solution is to
# change the syntax rules such that sub-lists must start with a
# starting cardinal number; e.g. "1." or "a.".
self.list_level += 1
self._last_li_endswith_two_eols = False
list_str = list_str.rstrip('\n') + '\n'
list_str = self._list_item_re.sub(self._list_item_sub, list_str)
self.list_level -= 1
return list_str
def _get_pygments_lexer(self, lexer_name):
try:
from pygments import lexers, util
except ImportError:
return None
try:
return lexers.get_lexer_by_name(lexer_name)
except util.ClassNotFound:
return None
def _color_with_pygments(self, codeblock, lexer, **formatter_opts):
import pygments
import pygments.formatters
class HtmlCodeFormatter(pygments.formatters.HtmlFormatter):
def _wrap_code(self, inner):
"""A function for use in a Pygments Formatter which
wraps in <code> tags.
"""
yield 0, "<code>"
for tup in inner:
yield tup
yield 0, "</code>"
def wrap(self, source, outfile):
"""Return the source with a code, pre, and div."""
return self._wrap_div(self._wrap_pre(self._wrap_code(source)))
formatter_opts.setdefault("cssclass", "codehilite")
formatter = HtmlCodeFormatter(**formatter_opts)
return pygments.highlight(codeblock, lexer, formatter)
def _code_block_sub(self, match, is_fenced_code_block=False):
lexer_name = None
if is_fenced_code_block:
lexer_name = match.group(1)
if lexer_name:
formatter_opts = self.extras['fenced-code-blocks'] or {}
codeblock = match.group(2)
codeblock = codeblock[:-1] # drop one trailing newline
else:
codeblock = match.group(1)
codeblock = self._outdent(codeblock)
codeblock = self._detab(codeblock)
codeblock = codeblock.lstrip('\n') # trim leading newlines
codeblock = codeblock.rstrip() # trim trailing whitespace
# Note: "code-color" extra is DEPRECATED.
if "code-color" in self.extras and codeblock.startswith(":::"):
lexer_name, rest = codeblock.split('\n', 1)
lexer_name = lexer_name[3:].strip()
codeblock = rest.lstrip("\n") # Remove lexer declaration line.
formatter_opts = self.extras['code-color'] or {}
if lexer_name:
def unhash_code( codeblock ):
for key, sanitized in list(self.html_spans.items()):
codeblock = codeblock.replace(key, sanitized)
replacements = [
("&", "&"),
("<", "<"),
(">", ">")
]
for old, new in replacements:
codeblock = codeblock.replace(old, new)
return codeblock
lexer = self._get_pygments_lexer(lexer_name)
if lexer:
codeblock = unhash_code( codeblock )
colored = self._color_with_pygments(codeblock, lexer,
**formatter_opts)
return "\n\n%s\n\n" % colored
codeblock = self._encode_code(codeblock)
pre_class_str = self._html_class_str_from_tag("pre")
code_class_str = self._html_class_str_from_tag("code")
return "\n\n<pre%s><code%s>%s\n</code></pre>\n\n" % (
pre_class_str, code_class_str, codeblock)
def _html_class_str_from_tag(self, tag):
"""Get the appropriate ' class="..."' string (note the leading
space), if any, for the given tag.
"""
if "html-classes" not in self.extras:
return ""
try:
html_classes_from_tag = self.extras["html-classes"]
except TypeError:
return ""
else:
if tag in html_classes_from_tag:
return ' class="%s"' % html_classes_from_tag[tag]
return ""
def _do_code_blocks(self, text):
"""Process Markdown `<pre><code>` blocks."""
code_block_re = re.compile(r'''
(?:\n\n|\A\n?)
( # $1 = the code block -- one or more lines, starting with a space/tab
(?:
(?:[ ]{%d} | \t) # Lines must start with a tab or a tab-width of spaces
.*\n+
)+
)
((?=^[ ]{0,%d}\S)|\Z) # Lookahead for non-space at line-start, or end of doc
# Lookahead to make sure this block isn't already in a code block.
# Needed when syntax highlighting is being used.
(?![^<]*\</code\>)
''' % (self.tab_width, self.tab_width),
re.M | re.X)
return code_block_re.sub(self._code_block_sub, text)
_fenced_code_block_re = re.compile(r'''
(?:\n\n|\A\n?)
^```([\w+-]+)?[ \t]*\n # opening fence, $1 = optional lang
(.*?) # $2 = code block content
^```[ \t]*\n # closing fence
''', re.M | re.X | re.S)
def _fenced_code_block_sub(self, match):
return self._code_block_sub(match, is_fenced_code_block=True);
def _do_fenced_code_blocks(self, text):
"""Process ```-fenced unindented code blocks ('fenced-code-blocks' extra)."""
return self._fenced_code_block_re.sub(self._fenced_code_block_sub, text)
# Rules for a code span:
# - backslash escapes are not interpreted in a code span
# - to include one or or a run of more backticks the delimiters must
# be a longer run of backticks
# - cannot start or end a code span with a backtick; pad with a
# space and that space will be removed in the emitted HTML
# See `test/tm-cases/escapes.text` for a number of edge-case
# examples.
_code_span_re = re.compile(r'''
(?<!\\)
(`+) # \1 = Opening run of `
(?!`) # See Note A test/tm-cases/escapes.text
(.+?) # \2 = The code block
(?<!`)
\1 # Matching closer
(?!`)
''', re.X | re.S)
def _code_span_sub(self, match):
c = match.group(2).strip(" \t")
c = self._encode_code(c)
return "<code>%s</code>" % c
def _do_code_spans(self, text):
# * Backtick quotes are used for <code></code> spans.
#
# * You can use multiple backticks as the delimiters if you want to
# include literal backticks in the code span. So, this input:
#
# Just type ``foo `bar` baz`` at the prompt.
#
# Will translate to:
#
# <p>Just type <code>foo `bar` baz</code> at the prompt.</p>
#
# There's no arbitrary limit to the number of backticks you
# can use as delimters. If you need three consecutive backticks
# in your code, use four for delimiters, etc.
#
# * You can use spaces to get literal backticks at the edges:
#
# ... type `` `bar` `` ...
#
# Turns to:
#
# ... type <code>`bar`</code> ...
return self._code_span_re.sub(self._code_span_sub, text)
def _encode_code(self, text):
"""Encode/escape certain characters inside Markdown code runs.
The point is that in code, these characters are literals,
and lose their special Markdown meanings.
"""
replacements = [
# Encode all ampersands; HTML entities are not
# entities within a Markdown code span.
('&', '&'),
# Do the angle bracket song and dance:
('<', '<'),
('>', '>'),
]
for before, after in replacements:
text = text.replace(before, after)
hashed = _hash_text(text)
self._escape_table[text] = hashed
return hashed
_strong_re = re.compile(r"(\*\*|__)(?=\S)(.+?[*_]*)(?<=\S)\1", re.S)
_em_re = re.compile(r"(\*|_)(?=\S)(.+?)(?<=\S)\1", re.S)
_code_friendly_strong_re = re.compile(r"\*\*(?=\S)(.+?[*_]*)(?<=\S)\*\*", re.S)
_code_friendly_em_re = re.compile(r"\*(?=\S)(.+?)(?<=\S)\*", re.S)
def _do_italics_and_bold(self, text):
# <strong> must go first:
if "code-friendly" in self.extras:
text = self._code_friendly_strong_re.sub(r"<strong>\1</strong>", text)
text = self._code_friendly_em_re.sub(r"<em>\1</em>", text)
else:
text = self._strong_re.sub(r"<strong>\2</strong>", text)
text = self._em_re.sub(r"<em>\2</em>", text)
return text
# "smarty-pants" extra: Very liberal in interpreting a single prime as an
# apostrophe; e.g. ignores the fact that "round", "bout", "twer", and
# "twixt" can be written without an initial apostrophe. This is fine because
# using scare quotes (single quotation marks) is rare.
_apostrophe_year_re = re.compile(r"'(\d\d)(?=(\s|,|;|\.|\?|!|$))")
_contractions = ["tis", "twas", "twer", "neath", "o", "n",
"round", "bout", "twixt", "nuff", "fraid", "sup"]
def _do_smart_contractions(self, text):
text = self._apostrophe_year_re.sub(r"’\1", text)
for c in self._contractions:
text = text.replace("'%s" % c, "’%s" % c)
text = text.replace("'%s" % c.capitalize(),
"’%s" % c.capitalize())
return text
# Substitute double-quotes before single-quotes.
_opening_single_quote_re = re.compile(r"(?<!\S)'(?=\S)")
_opening_double_quote_re = re.compile(r'(?<!\S)"(?=\S)')
_closing_single_quote_re = re.compile(r"(?<=\S)'")
_closing_double_quote_re = re.compile(r'(?<=\S)"(?=(\s|,|;|\.|\?|!|$))')
def _do_smart_punctuation(self, text):
"""Fancifies 'single quotes', "double quotes", and apostrophes.
Converts --, ---, and ... into en dashes, em dashes, and ellipses.
Inspiration is: <http://daringfireball.net/projects/smartypants/>
See "test/tm-cases/smarty_pants.text" for a full discussion of the
support here and
<http://code.google.com/p/python-markdown2/issues/detail?id=42> for a
discussion of some diversion from the original SmartyPants.
"""
if "'" in text: # guard for perf
text = self._do_smart_contractions(text)
text = self._opening_single_quote_re.sub("‘", text)
text = self._closing_single_quote_re.sub("’", text)
if '"' in text: # guard for perf
text = self._opening_double_quote_re.sub("“", text)
text = self._closing_double_quote_re.sub("”", text)
text = text.replace("---", "—")
text = text.replace("--", "–")
text = text.replace("...", "…")
text = text.replace(" . . . ", "…")
text = text.replace(". . .", "…")
return text
_block_quote_re = re.compile(r'''
( # Wrap whole match in \1
(
^[ \t]*>[ \t]? # '>' at the start of a line
.+\n # rest of the first line
(.+\n)* # subsequent consecutive lines
\n* # blanks
)+
)
''', re.M | re.X)
_bq_one_level_re = re.compile('^[ \t]*>[ \t]?', re.M);
_html_pre_block_re = re.compile(r'(\s*<pre>.+?</pre>)', re.S)
def _dedent_two_spaces_sub(self, match):
return re.sub(r'(?m)^ ', '', match.group(1))
def _block_quote_sub(self, match):
bq = match.group(1)
bq = self._bq_one_level_re.sub('', bq) # trim one level of quoting
bq = self._ws_only_line_re.sub('', bq) # trim whitespace-only lines
bq = self._run_block_gamut(bq) # recurse
bq = re.sub('(?m)^', ' ', bq)
# These leading spaces screw with <pre> content, so we need to fix that:
bq = self._html_pre_block_re.sub(self._dedent_two_spaces_sub, bq)
return "<blockquote>\n%s\n</blockquote>\n\n" % bq
def _do_block_quotes(self, text):
if '>' not in text:
return text
return self._block_quote_re.sub(self._block_quote_sub, text)
def _form_paragraphs(self, text):
# Strip leading and trailing lines:
text = text.strip('\n')
# Wrap <p> tags.
grafs = []
for i, graf in enumerate(re.split(r"\n{2,}", text)):
if graf in self.html_blocks:
# Unhashify HTML blocks
grafs.append(self.html_blocks[graf])
else:
cuddled_list = None
if "cuddled-lists" in self.extras:
# Need to put back trailing '\n' for `_list_item_re`
# match at the end of the paragraph.
li = self._list_item_re.search(graf + '\n')
# Two of the same list marker in this paragraph: a likely
# candidate for a list cuddled to preceding paragraph
# text (issue 33). Note the `[-1]` is a quick way to
# consider numeric bullets (e.g. "1." and "2.") to be
# equal.
if (li and len(li.group(2)) <= 3 and li.group("next_marker")
and li.group("marker")[-1] == li.group("next_marker")[-1]):
start = li.start()
cuddled_list = self._do_lists(graf[start:]).rstrip("\n")
assert cuddled_list.startswith("<ul>") or cuddled_list.startswith("<ol>")
graf = graf[:start]
# Wrap <p> tags.
graf = self._run_span_gamut(graf)
grafs.append("<p>" + graf.lstrip(" \t") + "</p>")
if cuddled_list:
grafs.append(cuddled_list)
return "\n\n".join(grafs)
def _add_footnotes(self, text):
if self.footnotes:
footer = [
'<div class="footnotes">',
'<hr' + self.empty_element_suffix,
'<ol>',
]
for i, id in enumerate(self.footnote_ids):
if i != 0:
footer.append('')
footer.append('<li id="fn-%s">' % id)
footer.append(self._run_block_gamut(self.footnotes[id]))
backlink = ('<a href="#fnref-%s" '
'class="footnoteBackLink" '
'title="Jump back to footnote %d in the text.">'
'↩</a>' % (id, i+1))
if footer[-1].endswith("</p>"):
footer[-1] = footer[-1][:-len("</p>")] \
+ ' ' + backlink + "</p>"
else:
footer.append("\n<p>%s</p>" % backlink)
footer.append('</li>')
footer.append('</ol>')
footer.append('</div>')
return text + '\n\n' + '\n'.join(footer)
else:
return text
# Ampersand-encoding based entirely on Nat Irons's Amputator MT plugin:
# http://bumppo.net/projects/amputator/
_ampersand_re = re.compile(r'&(?!#?[xX]?(?:[0-9a-fA-F]+|\w+);)')
_naked_lt_re = re.compile(r'<(?![a-z/?\$!])', re.I)
_naked_gt_re = re.compile(r'''(?<![a-z0-9?!/'"-])>''', re.I)
def _encode_amps_and_angles(self, text):
# Smart processing for ampersands and angle brackets that need
# to be encoded.
text = self._ampersand_re.sub('&', text)
# Encode naked <'s
text = self._naked_lt_re.sub('<', text)
# Encode naked >'s
# Note: Other markdown implementations (e.g. Markdown.pl, PHP
# Markdown) don't do this.
text = self._naked_gt_re.sub('>', text)
return text
def _encode_backslash_escapes(self, text):
for ch, escape in list(self._escape_table.items()):
text = text.replace("\\"+ch, escape)
return text
_auto_link_re = re.compile(r'<((https?|ftp):[^\'">\s]+)>', re.I)
def _auto_link_sub(self, match):
g1 = match.group(1)
return '<a href="%s">%s</a>' % (g1, g1)
_auto_email_link_re = re.compile(r"""
<
(?:mailto:)?
(
[-.\w]+
\@
[-\w]+(\.[-\w]+)*\.[a-z]+
)
>
""", re.I | re.X | re.U)
def _auto_email_link_sub(self, match):
return self._encode_email_address(
self._unescape_special_chars(match.group(1)))
def _do_auto_links(self, text):
text = self._auto_link_re.sub(self._auto_link_sub, text)
text = self._auto_email_link_re.sub(self._auto_email_link_sub, text)
return text
def _encode_email_address(self, addr):
# Input: an email address, e.g. "foo@example.com"
#
# Output: the email address as a mailto link, with each character
# of the address encoded as either a decimal or hex entity, in
# the hopes of foiling most address harvesting spam bots. E.g.:
#
# <a href="mailto:foo@e
# xample.com">foo
# @example.com</a>
#
# Based on a filter by Matthew Wickline, posted to the BBEdit-Talk
# mailing list: <http://tinyurl.com/yu7ue>
chars = [_xml_encode_email_char_at_random(ch)
for ch in "mailto:" + addr]
# Strip the mailto: from the visible part.
addr = '<a href="%s">%s</a>' \
% (''.join(chars), ''.join(chars[7:]))
return addr
def _do_link_patterns(self, text):
"""Caveat emptor: there isn't much guarding against link
patterns being formed inside other standard Markdown links, e.g.
inside a [link def][like this].
Dev Notes: *Could* consider prefixing regexes with a negative
lookbehind assertion to attempt to guard against this.
"""
link_from_hash = {}
for regex, repl in self.link_patterns:
replacements = []
for match in regex.finditer(text):
if hasattr(repl, "__call__"):
href = repl(match)
else:
href = match.expand(repl)
replacements.append((match.span(), href))
for (start, end), href in reversed(replacements):
escaped_href = (
href.replace('"', '"') # b/c of attr quote
# To avoid markdown <em> and <strong>:
.replace('*', self._escape_table['*'])
.replace('_', self._escape_table['_']))
link = '<a href="%s">%s</a>' % (escaped_href, text[start:end])
hash = _hash_text(link)
link_from_hash[hash] = link
text = text[:start] + hash + text[end:]
for hash, link in list(link_from_hash.items()):
text = text.replace(hash, link)
return text
def _unescape_special_chars(self, text):
# Swap back in all the special characters we've hidden.
for ch, hash in list(self._escape_table.items()):
text = text.replace(hash, ch)
return text
def _outdent(self, text):
# Remove one level of line-leading tabs or spaces
return self._outdent_re.sub('', text)
class MarkdownWithExtras(Markdown):
"""A markdowner class that enables most extras:
- footnotes
- code-color (only has effect if 'pygments' Python module on path)
These are not included:
- pyshell (specific to Python-related documenting)
- code-friendly (because it *disables* part of the syntax)
- link-patterns (because you need to specify some actual
link-patterns anyway)
"""
extras = ["footnotes", "code-color"]
#---- internal support functions
class UnicodeWithAttrs(unicode):
"""A subclass of unicode used for the return value of conversion to
possibly attach some attributes. E.g. the "toc_html" attribute when
the "toc" extra is used.
"""
metadata = None
_toc = None
def toc_html(self):
"""Return the HTML for the current TOC.
This expects the `_toc` attribute to have been set on this instance.
"""
if self._toc is None:
return None
def indent():
return ' ' * (len(h_stack) - 1)
lines = []
h_stack = [0] # stack of header-level numbers
for level, id, name in self._toc:
if level > h_stack[-1]:
lines.append("%s<ul>" % indent())
h_stack.append(level)
elif level == h_stack[-1]:
lines[-1] += "</li>"
else:
while level < h_stack[-1]:
h_stack.pop()
if not lines[-1].endswith("</li>"):
lines[-1] += "</li>"
lines.append("%s</ul></li>" % indent())
lines.append('%s<li><a href="#%s">%s</a>' % (
indent(), id, name))
while len(h_stack) > 1:
h_stack.pop()
if not lines[-1].endswith("</li>"):
lines[-1] += "</li>"
lines.append("%s</ul>" % indent())
return '\n'.join(lines) + '\n'
toc_html = property(toc_html)
## {{{ http://code.activestate.com/recipes/577257/ (r1)
_slugify_strip_re = re.compile(r'[^\w\s-]')
_slugify_hyphenate_re = re.compile(r'[-\s]+')
def _slugify(value):
"""
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
From Django's "django/template/defaultfilters.py".
"""
import unicodedata
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode()
value = _slugify_strip_re.sub('', value).strip().lower()
return _slugify_hyphenate_re.sub('-', value)
## end of http://code.activestate.com/recipes/577257/ }}}
# From http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52549
def _curry(*args, **kwargs):
function, args = args[0], args[1:]
def result(*rest, **kwrest):
combined = kwargs.copy()
combined.update(kwrest)
return function(*args + rest, **combined)
return result
# Recipe: regex_from_encoded_pattern (1.0)
def _regex_from_encoded_pattern(s):
"""'foo' -> re.compile(re.escape('foo'))
'/foo/' -> re.compile('foo')
'/foo/i' -> re.compile('foo', re.I)
"""
if s.startswith('/') and s.rfind('/') != 0:
# Parse it: /PATTERN/FLAGS
idx = s.rfind('/')
pattern, flags_str = s[1:idx], s[idx+1:]
flag_from_char = {
"i": re.IGNORECASE,
"l": re.LOCALE,
"s": re.DOTALL,
"m": re.MULTILINE,
"u": re.UNICODE,
}
flags = 0
for char in flags_str:
try:
flags |= flag_from_char[char]
except KeyError:
raise ValueError("unsupported regex flag: '%s' in '%s' "
"(must be one of '%s')"
% (char, s, ''.join(list(flag_from_char.keys()))))
return re.compile(s[1:idx], flags)
else: # not an encoded regex
return re.compile(re.escape(s))
# Recipe: dedent (0.1.2)
def _dedentlines(lines, tabsize=8, skip_first_line=False):
"""_dedentlines(lines, tabsize=8, skip_first_line=False) -> dedented lines
"lines" is a list of lines to dedent.
"tabsize" is the tab width to use for indent width calculations.
"skip_first_line" is a boolean indicating if the first line should
be skipped for calculating the indent width and for dedenting.
This is sometimes useful for docstrings and similar.
Same as dedent() except operates on a sequence of lines. Note: the
lines list is modified **in-place**.
"""
DEBUG = False
if DEBUG:
print("dedent: dedent(..., tabsize=%d, skip_first_line=%r)"\
% (tabsize, skip_first_line))
indents = []
margin = None
for i, line in enumerate(lines):
if i == 0 and skip_first_line: continue
indent = 0
for ch in line:
if ch == ' ':
indent += 1
elif ch == '\t':
indent += tabsize - (indent % tabsize)
elif ch in '\r\n':
continue # skip all-whitespace lines
else:
break
else:
continue # skip all-whitespace lines
if DEBUG: print("dedent: indent=%d: %r" % (indent, line))
if margin is None:
margin = indent
else:
margin = min(margin, indent)
if DEBUG: print("dedent: margin=%r" % margin)
if margin is not None and margin > 0:
for i, line in enumerate(lines):
if i == 0 and skip_first_line: continue
removed = 0
for j, ch in enumerate(line):
if ch == ' ':
removed += 1
elif ch == '\t':
removed += tabsize - (removed % tabsize)
elif ch in '\r\n':
if DEBUG: print("dedent: %r: EOL -> strip up to EOL" % line)
lines[i] = lines[i][j:]
break
else:
raise ValueError("unexpected non-whitespace char %r in "
"line %r while removing %d-space margin"
% (ch, line, margin))
if DEBUG:
print("dedent: %r: %r -> removed %d/%d"\
% (line, ch, removed, margin))
if removed == margin:
lines[i] = lines[i][j+1:]
break
elif removed > margin:
lines[i] = ' '*(removed-margin) + lines[i][j+1:]
break
else:
if removed:
lines[i] = lines[i][removed:]
return lines
def _dedent(text, tabsize=8, skip_first_line=False):
"""_dedent(text, tabsize=8, skip_first_line=False) -> dedented text
"text" is the text to dedent.
"tabsize" is the tab width to use for indent width calculations.
"skip_first_line" is a boolean indicating if the first line should
be skipped for calculating the indent width and for dedenting.
This is sometimes useful for docstrings and similar.
textwrap.dedent(s), but don't expand tabs to spaces
"""
lines = text.splitlines(1)
_dedentlines(lines, tabsize=tabsize, skip_first_line=skip_first_line)
return ''.join(lines)
class _memoized(object):
"""Decorator that caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned, and
not re-evaluated.
http://wiki.python.org/moin/PythonDecoratorLibrary
"""
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
self.cache[args] = value = self.func(*args)
return value
except TypeError:
# uncachable -- for instance, passing a list as an argument.
# Better to not cache than to blow up entirely.
return self.func(*args)
def __repr__(self):
"""Return the function's docstring."""
return self.func.__doc__
def _xml_oneliner_re_from_tab_width(tab_width):
"""Standalone XML processing instruction regex."""
return re.compile(r"""
(?:
(?<=\n\n) # Starting after a blank line
| # or
\A\n? # the beginning of the doc
)
( # save in $1
[ ]{0,%d}
(?:
<\?\w+\b\s+.*?\?> # XML processing instruction
|
<\w+:\w+\b\s+.*?/> # namespaced single tag
)
[ \t]*
(?=\n{2,}|\Z) # followed by a blank line or end of document
)
""" % (tab_width - 1), re.X)
_xml_oneliner_re_from_tab_width = _memoized(_xml_oneliner_re_from_tab_width)
def _hr_tag_re_from_tab_width(tab_width):
return re.compile(r"""
(?:
(?<=\n\n) # Starting after a blank line
| # or
\A\n? # the beginning of the doc
)
( # save in \1
[ ]{0,%d}
<(hr) # start tag = \2
\b # word break
([^<>])*? #
/?> # the matching end tag
[ \t]*
(?=\n{2,}|\Z) # followed by a blank line or end of document
)
""" % (tab_width - 1), re.X)
_hr_tag_re_from_tab_width = _memoized(_hr_tag_re_from_tab_width)
def _xml_escape_attr(attr, skip_single_quote=True):
"""Escape the given string for use in an HTML/XML tag attribute.
By default this doesn't bother with escaping `'` to `'`, presuming that
the tag attribute is surrounded by double quotes.
"""
escaped = (attr
.replace('&', '&')
.replace('"', '"')
.replace('<', '<')
.replace('>', '>'))
if not skip_single_quote:
escaped = escaped.replace("'", "'")
return escaped
def _xml_encode_email_char_at_random(ch):
r = random()
# Roughly 10% raw, 45% hex, 45% dec.
# '@' *must* be encoded. I [John Gruber] insist.
# Issue 26: '_' must be encoded.
if r > 0.9 and ch not in "@_":
return ch
elif r < 0.45:
# The [1:] is to drop leading '0': 0x63 -> x63
return '&#%s;' % hex(ord(ch))[1:]
else:
return '&#%s;' % ord(ch)
#---- mainline
class _NoReflowFormatter(optparse.IndentedHelpFormatter):
"""An optparse formatter that does NOT reflow the description."""
def format_description(self, description):
return description or ""
def _test():
import doctest
doctest.testmod()
def main(argv=None):
if argv is None:
argv = sys.argv
if not logging.root.handlers:
logging.basicConfig()
usage = "usage: %prog [PATHS...]"
version = "%prog "+__version__
parser = optparse.OptionParser(prog="markdown2", usage=usage,
version=version, description=cmdln_desc,
formatter=_NoReflowFormatter())
parser.add_option("-v", "--verbose", dest="log_level",
action="store_const", const=logging.DEBUG,
help="more verbose output")
parser.add_option("--encoding",
help="specify encoding of text content")
parser.add_option("--html4tags", action="store_true", default=False,
help="use HTML 4 style for empty element tags")
parser.add_option("-s", "--safe", metavar="MODE", dest="safe_mode",
help="sanitize literal HTML: 'escape' escapes "
"HTML meta chars, 'replace' replaces with an "
"[HTML_REMOVED] note")
parser.add_option("-x", "--extras", action="append",
help="Turn on specific extra features (not part of "
"the core Markdown spec). See above.")
parser.add_option("--use-file-vars",
help="Look for and use Emacs-style 'markdown-extras' "
"file var to turn on extras. See "
"<https://github.com/trentm/python-markdown2/wiki/Extras>")
parser.add_option("--link-patterns-file",
help="path to a link pattern file")
parser.add_option("--self-test", action="store_true",
help="run internal self-tests (some doctests)")
parser.add_option("--compare", action="store_true",
help="run against Markdown.pl as well (for testing)")
parser.set_defaults(log_level=logging.INFO, compare=False,
encoding="utf-8", safe_mode=None, use_file_vars=False)
opts, paths = parser.parse_args()
log.setLevel(opts.log_level)
if opts.self_test:
return _test()
if opts.extras:
extras = {}
for s in opts.extras:
splitter = re.compile("[,;: ]+")
for e in splitter.split(s):
if '=' in e:
ename, earg = e.split('=', 1)
try:
earg = int(earg)
except ValueError:
pass
else:
ename, earg = e, None
extras[ename] = earg
else:
extras = None
if opts.link_patterns_file:
link_patterns = []
f = open(opts.link_patterns_file)
try:
for i, line in enumerate(f.readlines()):
if not line.strip(): continue
if line.lstrip().startswith("#"): continue
try:
pat, href = line.rstrip().rsplit(None, 1)
except ValueError:
raise MarkdownError("%s:%d: invalid link pattern line: %r"
% (opts.link_patterns_file, i+1, line))
link_patterns.append(
(_regex_from_encoded_pattern(pat), href))
finally:
f.close()
else:
link_patterns = None
from os.path import join, dirname, abspath, exists
markdown_pl = join(dirname(dirname(abspath(__file__))), "test",
"Markdown.pl")
if not paths:
paths = ['-']
for path in paths:
if path == '-':
text = sys.stdin.read()
else:
fp = codecs.open(path, 'r', opts.encoding)
text = fp.read()
fp.close()
if opts.compare:
from subprocess import Popen, PIPE
print("==== Markdown.pl ====")
p = Popen('perl %s' % markdown_pl, shell=True, stdin=PIPE, stdout=PIPE, close_fds=True)
p.stdin.write(text.encode('utf-8'))
p.stdin.close()
perl_html = p.stdout.read().decode('utf-8')
if py3:
sys.stdout.write(perl_html)
else:
sys.stdout.write(perl_html.encode(
sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))
print("==== markdown2.py ====")
html = markdown(text,
html4tags=opts.html4tags,
safe_mode=opts.safe_mode,
extras=extras, link_patterns=link_patterns,
use_file_vars=opts.use_file_vars)
if py3:
sys.stdout.write(html)
else:
sys.stdout.write(html.encode(
sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))
if extras and "toc" in extras:
log.debug("toc_html: " +
html.toc_html.encode(sys.stdout.encoding or "utf-8", 'xmlcharrefreplace'))
if opts.compare:
test_dir = join(dirname(dirname(abspath(__file__))), "test")
if exists(join(test_dir, "test_markdown2.py")):
sys.path.insert(0, test_dir)
from test_markdown2 import norm_html_from_html
norm_html = norm_html_from_html(html)
norm_perl_html = norm_html_from_html(perl_html)
else:
norm_html = html
norm_perl_html = perl_html
print("==== match? %r ====" % (norm_perl_html == norm_html))
if __name__ == "__main__":
sys.exit( main(sys.argv) )
|
aerickson/ansible | refs/heads/devel | lib/ansible/plugins/lookup/url.py | 26 | # (c) 2015, Brian Coca <bcoca@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleError
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from ansible.module_utils._text import to_text
from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
from ansible.plugins.lookup import LookupBase
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
validate_certs = kwargs.get('validate_certs', True)
ret = []
for term in terms:
display.vvvv("url lookup connecting to %s" % term)
try:
response = open_url(term, validate_certs=validate_certs)
except HTTPError as e:
raise AnsibleError("Received HTTP error for %s : %s" % (term, str(e)))
except URLError as e:
raise AnsibleError("Failed lookup url for %s : %s" % (term, str(e)))
except SSLValidationError as e:
raise AnsibleError("Error validating the server's certificate for %s: %s" % (term, str(e)))
except ConnectionError as e:
raise AnsibleError("Error connecting to %s: %s" % (term, str(e)))
for line in response.read().splitlines():
ret.append(to_text(line))
return ret
|
fitermay/intellij-community | refs/heads/master | python/lib/Lib/site-packages/django/contrib/localflavor/au/forms.py | 309 | """
Australian-specific Form helpers
"""
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, RegexField, Select
from django.utils.encoding import smart_unicode
from django.utils.translation import ugettext_lazy as _
import re
PHONE_DIGITS_RE = re.compile(r'^(\d{10})$')
class AUPostCodeField(RegexField):
"""Australian post code field."""
default_error_messages = {
'invalid': _('Enter a 4 digit post code.'),
}
def __init__(self, *args, **kwargs):
super(AUPostCodeField, self).__init__(r'^\d{4}$',
max_length=None, min_length=None, *args, **kwargs)
class AUPhoneNumberField(Field):
"""Australian phone number field."""
default_error_messages = {
'invalid': u'Phone numbers must contain 10 digits.',
}
def clean(self, value):
"""
Validate a phone number. Strips parentheses, whitespace and hyphens.
"""
super(AUPhoneNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
value = re.sub('(\(|\)|\s+|-)', '', smart_unicode(value))
phone_match = PHONE_DIGITS_RE.search(value)
if phone_match:
return u'%s' % phone_match.group(1)
raise ValidationError(self.error_messages['invalid'])
class AUStateSelect(Select):
"""
A Select widget that uses a list of Australian states/territories as its
choices.
"""
def __init__(self, attrs=None):
from au_states import STATE_CHOICES
super(AUStateSelect, self).__init__(attrs, choices=STATE_CHOICES)
|
saguas/frappe | refs/heads/develop | frappe/email/doctype/bulk_email/__init__.py | 1829 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
|
vallsv/pyqtgraph | refs/heads/develop | pyqtgraph/graphicsItems/ButtonItem.py | 52 | from ..Qt import QtGui, QtCore
from .GraphicsObject import GraphicsObject
__all__ = ['ButtonItem']
class ButtonItem(GraphicsObject):
"""Button graphicsItem displaying an image."""
clicked = QtCore.Signal(object)
def __init__(self, imageFile=None, width=None, parentItem=None, pixmap=None):
self.enabled = True
GraphicsObject.__init__(self)
if imageFile is not None:
self.setImageFile(imageFile)
elif pixmap is not None:
self.setPixmap(pixmap)
if width is not None:
s = float(width) / self.pixmap.width()
self.scale(s, s)
if parentItem is not None:
self.setParentItem(parentItem)
self.setOpacity(0.7)
def setImageFile(self, imageFile):
self.setPixmap(QtGui.QPixmap(imageFile))
def setPixmap(self, pixmap):
self.pixmap = pixmap
self.update()
def mouseClickEvent(self, ev):
if self.enabled:
self.clicked.emit(self)
def mouseHoverEvent(self, ev):
if not self.enabled:
return
if ev.isEnter():
self.setOpacity(1.0)
else:
self.setOpacity(0.7)
def disable(self):
self.enabled = False
self.setOpacity(0.4)
def enable(self):
self.enabled = True
self.setOpacity(0.7)
def paint(self, p, *args):
p.setRenderHint(p.Antialiasing)
p.drawPixmap(0, 0, self.pixmap)
def boundingRect(self):
return QtCore.QRectF(self.pixmap.rect())
|
jougs/nest-simulator | refs/heads/master | pynest/nest/lib/hl_api_types.py | 1 | # -*- coding: utf-8 -*-
#
# hl_api_types.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Classes defining the different PyNEST types
"""
from ..ll_api import *
from .. import pynestkernel as kernel
from .hl_api_helper import *
from .hl_api_simulation import GetKernelStatus
import numpy
import json
try:
import pandas
HAVE_PANDAS = True
except ImportError:
HAVE_PANDAS = False
__all__ = [
'CreateParameter',
'Mask',
'NodeCollection',
'Parameter',
'serializable',
'SynapseCollection',
'to_json',
]
def CreateParameter(parametertype, specs):
"""
Create a parameter.
Parameters
----------
parametertype : string
Parameter type with or without distance dependency.
Can be one of the following: 'constant', 'linear', 'exponential', 'gaussian', 'gaussian2D',
'uniform', 'normal', 'lognormal', 'distance', 'position'
specs : dict
Dictionary specifying the parameters of the provided
`parametertype`, see **Parameter types**.
Returns
-------
``Parameter``:
Object representing the parameter
Notes
-----
- Instead of using `CreateParameter` you can also use the various parametrizations embedded in NEST. See for
instance :py:func:`.uniform`.
**Parameter types**
Some available parameter types (`parametertype` parameter), their function and
acceptable keys for their corresponding specification dictionaries
* Constant
::
'constant' :
{'value' : float} # constant value
* Randomization
::
# random parameter with uniform distribution in [min,max)
'uniform' :
{'min' : float, # minimum value, default: 0.0
'max' : float} # maximum value, default: 1.0
# random parameter with normal distribution, optionally truncated
# to [min,max)
'normal':
{'mean' : float, # mean value, default: 0.0
'sigma': float, # standard deviation, default: 1.0
'min' : float, # minimum value, default: -inf
'max' : float} # maximum value, default: +inf
# random parameter with lognormal distribution,
# optionally truncated to [min,max)
'lognormal' :
{'mu' : float, # mean value of logarithm, default: 0.0
'sigma': float, # standard deviation of log, default: 1.0
'min' : float, # minimum value, default: -inf
'max' : float} # maximum value, default: +inf
"""
return sli_func('CreateParameter', {parametertype: specs})
class NodeCollectionIterator(object):
"""
Iterator class for `NodeCollection`.
Returns
-------
`NodeCollection`:
Single node ID `NodeCollection` of respective iteration.
"""
def __init__(self, nc):
self._nc = nc
self._increment = 0
def __iter__(self):
return self
def __next__(self):
if self._increment > len(self._nc) - 1:
raise StopIteration
val = sli_func('Take', self._nc._datum, [self._increment + (self._increment >= 0)])
self._increment += 1
return val
class NodeCollection(object):
"""
Class for `NodeCollection`.
`NodeCollection` represents the nodes of a network. The class supports
iteration, concatenation, indexing, slicing, membership, length, conversion to and
from lists, test for membership, and test for equality. By using the
membership functions :py:func:`get()` and :py:func:`set()`, you can get and set desired
parameters.
A `NodeCollection` is created by the :py:func:`.Create` function, or by converting a
list of nodes to a `NodeCollection` with ``nest.NodeCollection(list)``.
If your nodes have spatial extent, use the member parameter ``spatial`` to get the spatial information.
Example
-------
::
import nest
nest.ResetKernel()
# Create NodeCollection representing nodes
nc = nest.Create('iaf_psc_alpha', 10)
# Convert from list
node_ids_in = [2, 4, 6, 8]
new_nc = nest.NodeCollection(node_ids_in)
# Convert to list
nc_list = nc.tolist()
# Concatenation
Enrns = nest.Create('aeif_cond_alpha', 600)
Inrns = nest.Create('iaf_psc_alpha', 400)
nrns = Enrns + Inrns
# Slicing and membership
print(new_nc[2])
print(new_nc[1:2])
6 in new_nc
"""
_datum = None
def __init__(self, data=None):
if data is None:
data = []
if isinstance(data, kernel.SLIDatum):
if data.dtype != "nodecollectiontype":
raise TypeError("Need NodeCollection Datum.")
self._datum = data
else:
# Data from user, must be converted to datum
# Data can be anything that can be converted to a NodeCollection,
# such as list, tuple, etc.
nc = sli_func('cvnodecollection', data)
self._datum = nc._datum
def __iter__(self):
return NodeCollectionIterator(self)
def __add__(self, other):
if not isinstance(other, NodeCollection):
raise NotImplementedError()
return sli_func('join', self._datum, other._datum)
def __getitem__(self, key):
if isinstance(key, slice):
if key.start is None:
start = 1
else:
start = key.start + 1 if key.start >= 0 else max(key.start, -1 * self.__len__())
if start > self.__len__():
raise IndexError('slice start value outside of the NodeCollection')
if key.stop is None:
stop = self.__len__()
else:
stop = min(key.stop, self.__len__()) if key.stop >= 0 else key.stop - 1
if abs(stop) > self.__len__():
raise IndexError('slice stop value outside of the NodeCollection')
step = 1 if key.step is None else key.step
if step < 1:
raise IndexError('slicing step for NodeCollection must be strictly positive')
return sli_func('Take', self._datum, [start, stop, step])
elif isinstance(key, (int, numpy.integer)):
if abs(key + (key >= 0)) > self.__len__():
raise IndexError('index value outside of the NodeCollection')
return sli_func('Take', self._datum, [key + (key >= 0)])
elif isinstance(key, (list, tuple)):
if len(key) == 0:
return NodeCollection([])
# Must check if elements are bool first, because bool inherits from int
if all(isinstance(x, bool) for x in key):
if len(key) != len(self):
raise IndexError('Bool index array must be the same length as NodeCollection')
np_key = numpy.array(key, dtype=numpy.bool)
# Checking that elements are not instances of bool too, because bool inherits from int
elif all(isinstance(x, int) and not isinstance(x, bool) for x in key):
np_key = numpy.array(key, dtype=numpy.uint64)
if len(numpy.unique(np_key)) != len(np_key):
raise ValueError('All node IDs in a NodeCollection have to be unique')
else:
raise TypeError('Indices must be integers or bools')
return take_array_index(self._datum, np_key)
elif isinstance(key, numpy.ndarray):
if len(key) == 0:
return NodeCollection([])
if len(key.shape) != 1:
raise TypeError('NumPy indices must one-dimensional')
is_booltype = numpy.issubdtype(key.dtype, numpy.dtype(bool).type)
if not (is_booltype or numpy.issubdtype(key.dtype, numpy.integer)):
raise TypeError('NumPy indices must be an array of integers or bools')
if is_booltype and len(key) != len(self):
raise IndexError('Bool index array must be the same length as NodeCollection')
if not is_booltype and len(numpy.unique(key)) != len(key):
raise ValueError('All node IDs in a NodeCollection have to be unique')
return take_array_index(self._datum, key)
else:
raise IndexError('only integers, slices, lists, tuples, and numpy arrays are valid indices')
def __contains__(self, node_id):
return sli_func('MemberQ', self._datum, node_id)
def __eq__(self, other):
if not isinstance(other, NodeCollection):
raise NotImplementedError('Cannot compare NodeCollection to {}'.format(type(other).__name__))
if self.__len__() != other.__len__():
return False
return sli_func('eq', self, other)
def __neq__(self, other):
if not isinstance(other, NodeCollection):
raise NotImplementedError()
return not self == other
def __len__(self):
return sli_func('size', self._datum)
def __str__(self):
return sli_func('pcvs', self._datum)
def __repr__(self):
return sli_func('pcvs', self._datum)
def get(self, *params, **kwargs):
"""
Get parameters from nodes.
Parameters
----------
params : str or list, optional
Parameters to get from the nodes. It must be one of the following:
- A single string.
- A list of strings.
- One or more strings, followed by a string or list of strings.
This is for hierarchical addressing.
output : str, ['pandas','json'], optional
If the returned data should be in a Pandas DataFrame or in a
JSON serializable format.
Returns
-------
int or float:
If there is a single node in the `NodeCollection`, and a single
parameter in params.
array_like:
If there are multiple nodes in the `NodeCollection`, and a single
parameter in params.
dict:
If there are multiple parameters in params. Or, if no parameters
are specified, a dictionary containing aggregated parameter-values
for all nodes is returned.
DataFrame:
Pandas Data frame if output should be in pandas format.
Raises
------
TypeError
If the input params are of the wrong form.
KeyError
If the specified parameter does not exist for the nodes.
See Also
--------
set
"""
if not self:
raise ValueError('Cannot get parameter of empty NodeCollection')
# ------------------------- #
# Checks of input #
# ------------------------- #
if not kwargs:
output = ''
elif 'output' in kwargs:
output = kwargs['output']
if output == 'pandas' and not HAVE_PANDAS:
raise ImportError('Pandas could not be imported')
else:
raise TypeError('Got unexpected keyword argument')
pandas_output = output == 'pandas'
if len(params) == 0:
# get() is called without arguments
result = sli_func('get', self._datum)
elif len(params) == 1:
# params is a tuple with a string or list of strings
result = get_parameters(self, params[0])
else:
# Hierarchical addressing
result = get_parameters_hierarchical_addressing(self, params)
if pandas_output:
index = self.get('global_id')
if len(params) == 1 and is_literal(params[0]):
# params is a string
result = {params[0]: result}
elif len(params) > 1 and is_literal(params[1]):
# hierarchical, single string
result = {params[1]: result}
if len(self) == 1:
index = [index]
result = {key: [val] for key, val in result.items()}
result = pandas.DataFrame(result, index=index)
elif output == 'json':
result = to_json(result)
return result
def set(self, params=None, **kwargs):
"""
Set the parameters of nodes to params.
NB! This is almost the same implementation as `SetStatus`.
If `kwargs` is given, it has to be names and values of an attribute as keyword argument pairs. The values
can be single values or list of the same size as the `NodeCollection`.
Parameters
----------
params : str or dict or list
Dictionary of parameters or list of dictionaries of parameters of
same length as the `NodeCollection`.
kwargs : keyword argument pairs
Named arguments of parameters of the elements in the `NodeCollection`.
Raises
------
TypeError
If the input params are of the wrong form.
KeyError
If the specified parameter does not exist for the nodes.
"""
if not self:
return
if kwargs and params is None:
params = kwargs
elif kwargs and params:
raise TypeError("must either provide params or kwargs, but not both.")
local_nodes = [self.local] if len(self) == 1 else self.local
if isinstance(params, dict) and all(local_nodes):
node_params = self[0].get()
contains_list = [is_iterable(vals) and key in node_params and not is_iterable(node_params[key]) for
key, vals in params.items()]
if any(contains_list):
temp_param = [{} for _ in range(self.__len__())]
for key, vals in params.items():
if not is_iterable(vals):
for temp_dict in temp_param:
temp_dict[key] = vals
else:
for i, temp_dict in enumerate(temp_param):
temp_dict[key] = vals[i]
params = temp_param
if (isinstance(params, (list, tuple)) and self.__len__() != len(params)):
raise TypeError("status dict must be a dict, or a list of dicts of length {} ".format(self.__len__()))
sli_func('SetStatus', self._datum, params)
def tolist(self):
"""
Convert `NodeCollection` to list.
"""
if self.__len__() == 0:
return []
return (list(self.get('global_id')) if len(self) > 1
else [self.get('global_id')])
def index(self, node_id):
"""
Find the index of a node ID in the `NodeCollection`.
Parameters
----------
node_id : int
Global ID to be found.
Raises
------
ValueError
If the node ID is not in the `NodeCollection`.
"""
index = sli_func('Find', self._datum, node_id)
if index == -1:
raise ValueError('{} is not in NodeCollection'.format(node_id))
return index
def __bool__(self):
"""Converts the NodeCollection to a bool. False if it is empty, True otherwise."""
return len(self) > 0
def __array__(self, dtype=None):
"""Convert the NodeCollection to a NumPy array."""
return numpy.array(self.tolist(), dtype=dtype)
def __getattr__(self, attr):
if not self:
raise AttributeError('Cannot get attribute of empty NodeCollection')
if attr == 'spatial':
metadata = sli_func('GetMetadata', self._datum)
val = metadata if metadata else None
super().__setattr__(attr, val)
return self.spatial
# NumPy compatibility check:
# raises AttributeError to tell NumPy that interfaces other than
# __array__ are not available (otherwise get_parameters would be
# queried, KeyError would be raised, and all would crash)
if attr.startswith('__array_'):
raise AttributeError
return self.get(attr)
def __setattr__(self, attr, value):
# `_datum` is the only property of NodeCollection that should not be
# interpreted as a property of the model
if attr == '_datum':
super().__setattr__(attr, value)
else:
self.set({attr: value})
class SynapseCollectionIterator(object):
"""
Iterator class for SynapseCollection.
"""
def __init__(self, synapse_collection):
self._iter = iter(synapse_collection._datum)
def __iter__(self):
return self
def __next__(self):
return SynapseCollection(next(self._iter))
class SynapseCollection(object):
"""
Class for Connections.
`SynapseCollection` represents the connections of a network. The class supports indexing, iteration, length and
equality. You can get and set connection parameters by using the membership functions :py:func:`get()` and
:py:func:`set()`. By using the membership function :py:func:`sources()` you get an iterator over
source nodes, while :py:func:`targets()` returns an interator over the target nodes of the connections.
A SynapseCollection is created by the :py:func:`.GetConnections` function.
"""
_datum = None
def __init__(self, data):
if isinstance(data, list):
for datum in data:
if (not isinstance(datum, kernel.SLIDatum) or
datum.dtype != "connectiontype"):
raise TypeError("Expected Connection Datum.")
self._datum = data
elif data is None:
# We can have an empty SynapseCollection if there are no connections.
self._datum = data
else:
if (not isinstance(data, kernel.SLIDatum) or
data.dtype != "connectiontype"):
raise TypeError("Expected Connection Datum.")
# self._datum needs to be a list of Connection datums.
self._datum = [data]
def __iter__(self):
return SynapseCollectionIterator(self)
def __len__(self):
if self._datum is None:
return 0
return len(self._datum)
def __eq__(self, other):
if not isinstance(other, SynapseCollection):
raise NotImplementedError()
if self.__len__() != other.__len__():
return False
self_get = self.get(['source', 'target', 'target_thread',
'synapse_id', 'port'])
other_get = other.get(['source', 'target', 'target_thread',
'synapse_id', 'port'])
if self_get != other_get:
return False
return True
def __neq__(self, other):
if not isinstance(other, SynapseCollection):
raise NotImplementedError()
return not self == other
def __getitem__(self, key):
if isinstance(key, slice):
return SynapseCollection(self._datum[key])
else:
return SynapseCollection([self._datum[key]])
def __str__(self):
"""
Printing a `SynapseCollection` returns something of the form:
*--------*-------------*
| source | 1, 1, 2, 2, |
*--------*-------------*
| target | 1, 2, 1, 2, |
*--------*-------------*
"""
srcs = self.get('source')
trgt = self.get('target')
if isinstance(srcs, int):
srcs = [srcs]
if isinstance(trgt, int):
trgt = [trgt]
# 35 is arbitrarily chosen.
if len(srcs) < 35:
source = '| source | ' + ''.join(str(e)+', ' for e in srcs) + '|'
target = '| target | ' + ''.join(str(e)+', ' for e in trgt) + '|'
else:
source = ('| source | ' + ''.join(str(e)+', ' for e in srcs[:15]) +
'... ' + ''.join(str(e)+', ' for e in srcs[-15:]) + '|')
target = ('| target | ' + ''.join(str(e)+', ' for e in trgt[:15]) +
'... ' + ''.join(str(e)+', ' for e in trgt[-15:]) + '|')
borderline_s = '*--------*' + '-'*(len(source) - 12) + '-*'
borderline_t = '*--------*' + '-'*(len(target) - 12) + '-*'
borderline_m = max(borderline_s, borderline_t)
result = (borderline_s + '\n' + source + '\n' + borderline_m + '\n' +
target + '\n' + borderline_t)
return result
def __getattr__(self, attr):
if attr == 'distance':
dist = sli_func('Distance', self._datum)
super().__setattr__(attr, dist)
return self.distance
return self.get(attr)
def __setattr__(self, attr, value):
# `_datum` is the only property of SynapseCollection that should not be
# interpreted as a property of the model
if attr == '_datum':
super().__setattr__(attr, value)
else:
self.set({attr: value})
def sources(self):
"""Returns iterator containing the source node IDs of the `SynapseCollection`."""
sources = self.get('source')
if not isinstance(sources, (list, tuple)):
sources = (sources,)
return iter(sources)
def targets(self):
"""Returns iterator containing the target node IDs of the `SynapseCollection`."""
targets = self.get('target')
if not isinstance(targets, (list, tuple)):
targets = (targets,)
return iter(targets)
def get(self, keys=None, output=''):
"""
Return a parameter dictionary of the connections.
If `keys` is a string, a list of values is returned, unless we have a
single connection, in which case the single value is returned.
`keys` may also be a list, in which case a dictionary with a list of
values is returned.
Parameters
----------
keys : str or list, optional
String or a list of strings naming model properties. get
then returns a single value or a dictionary with lists of values
belonging to the given `keys`.
output : str, ['pandas','json'], optional
If the returned data should be in a Pandas DataFrame or in a
JSON serializable format.
Returns
-------
dict:
All parameters, or, if keys is a list of strings, a dictionary with
lists of corresponding parameters
type:
If keys is a string, the corrsponding parameter(s) is returned
Raises
------
TypeError
If input params are of the wrong form.
KeyError
If the specified parameter does not exist for the connections.
"""
pandas_output = output == 'pandas'
if pandas_output and not HAVE_PANDAS:
raise ImportError('Pandas could not be imported')
# Return empty tuple if we have no connections or if we have done a
# nest.ResetKernel()
num_conn = GetKernelStatus('num_connections')
if self.__len__() == 0 or num_conn == 0:
return ()
if keys is None:
cmd = 'GetStatus'
elif is_literal(keys):
cmd = 'GetStatus {{ /{0} get }} Map'.format(keys)
elif is_iterable(keys):
keys_str = " ".join("/{0}".format(x) for x in keys)
cmd = 'GetStatus {{ [ [ {0} ] ] get }} Map'.format(keys_str)
else:
raise TypeError("keys should be either a string or an iterable")
sps(self._datum)
sr(cmd)
result = spp()
# Need to restructure the data.
final_result = restructure_data(result, keys)
if pandas_output:
index = (self.get('source') if self.__len__() > 1 else
(self.get('source'),))
if is_literal(keys):
final_result = {keys: final_result}
final_result = pandas.DataFrame(final_result, index=index)
elif output == 'json':
final_result = to_json(final_result)
return final_result
def set(self, params=None, **kwargs):
"""
Set the parameters of the connections to `params`.
NB! This is almost the same implementation as SetStatus
If `kwargs` is given, it has to be names and values of an attribute as keyword argument pairs. The values
can be single values or list of the same size as the `SynapseCollection`.
Parameters
----------
params : str or dict or list
Dictionary of parameters or list of dictionaries of parameters of
same length as the `SynapseCollection`.
kwargs : keyword argument pairs
Named arguments of parameters of the elements in the `SynapseCollection`.
Raises
------
TypeError
If input params are of the wrong form.
KeyError
If the specified parameter does not exist for the connections.
"""
# This was added to ensure that the function is a nop (instead of,
# for instance, raising an exception) when applied to an empty
# SynapseCollection, or after having done a nest.ResetKernel().
if self.__len__() == 0 or GetKernelStatus()['network_size'] == 0:
return
if (isinstance(params, (list, tuple)) and
self.__len__() != len(params)):
raise TypeError("status dict must be a dict, or a list of dicts of length {}".format(self.__len__()))
if kwargs and params is None:
params = kwargs
elif kwargs and params:
raise TypeError("must either provide params or kwargs, but not both.")
if isinstance(params, dict):
node_params = self[0].get()
contains_list = [is_iterable(vals) and key in node_params and not is_iterable(node_params[key]) for
key, vals in params.items()]
if any(contains_list):
temp_param = [{} for _ in range(self.__len__())]
for key, vals in params.items():
if not is_iterable(vals):
for temp_dict in temp_param:
temp_dict[key] = vals
else:
for i, temp_dict in enumerate(temp_param):
temp_dict[key] = vals[i]
params = temp_param
params = broadcast(params, self.__len__(), (dict,), "params")
sps(self._datum)
sps(params)
sr('2 arraystore')
sr('Transpose { arrayload pop SetStatus } forall')
class Mask(object):
"""
Class for spatial masks.
Masks are used when creating connections when nodes have spatial extent. A mask
describes the area of the pool population that shall be searched to find nodes to
connect to for any given node in the driver population. Masks are created using
the :py:func:`.CreateMask` command.
"""
_datum = None
# The constructor should not be called by the user
def __init__(self, datum):
"""Masks must be created using the CreateMask command."""
if not isinstance(datum, kernel.SLIDatum) or datum.dtype != "masktype":
raise TypeError("expected mask Datum")
self._datum = datum
# Generic binary operation
def _binop(self, op, other):
if not isinstance(other, Mask):
raise NotImplementedError()
return sli_func(op, self._datum, other._datum)
def __or__(self, other):
return self._binop("or", other)
def __and__(self, other):
return self._binop("and", other)
def __sub__(self, other):
return self._binop("sub", other)
def Inside(self, point):
"""
Test if a point is inside a mask.
Parameters
----------
point : tuple/list of float values
Coordinate of point
Returns
-------
out : bool
True if the point is inside the mask, False otherwise
"""
return sli_func("Inside", point, self._datum)
class Parameter(object):
"""
Class for parameters
A parameter may be used as a probability kernel when creating
connections and nodes or as synaptic parameters (such as weight and delay).
Parameters are created using the :py:func:`.CreateParameter` command.
"""
_datum = None
# The constructor should not be called by the user
def __init__(self, datum):
"""Parameters must be created using the CreateParameter command."""
if not isinstance(datum,
kernel.SLIDatum) or datum.dtype != "parametertype":
raise TypeError("expected parameter datum")
self._datum = datum
# Generic binary operation
def _binop(self, op, other, params=None):
if isinstance(other, (int, float)):
other = CreateParameter('constant', {'value': float(other)})
if not isinstance(other, Parameter):
raise NotImplementedError()
if params is None:
return sli_func(op, self._datum, other._datum)
else:
return sli_func(op, self._datum, other._datum, params)
def __add__(self, other):
return self._binop("add", other)
def __radd__(self, other):
return self + other
def __sub__(self, other):
return self._binop("sub", other)
def __rsub__(self, other):
return self * (-1) + other
def __neg__(self):
return self * (-1)
def __mul__(self, other):
return self._binop("mul", other)
def __rmul__(self, other):
return self * other
def __div__(self, other):
return self._binop("div", other)
def __truediv__(self, other):
return self._binop("div", other)
def __pow__(self, exponent):
return sli_func("pow", self._datum, float(exponent))
def __lt__(self, other):
return self._binop("compare", other, {'comparator': 0})
def __le__(self, other):
return self._binop("compare", other, {'comparator': 1})
def __eq__(self, other):
return self._binop("compare", other, {'comparator': 2})
def __ne__(self, other):
return self._binop("compare", other, {'comparator': 3})
def __ge__(self, other):
return self._binop("compare", other, {'comparator': 4})
def __gt__(self, other):
return self._binop("compare", other, {'comparator': 5})
def GetValue(self):
"""
Compute value of parameter.
Returns
-------
out : value
The value of the parameter
See also
--------
CreateParameter
Example
-------
::
import nest
# normal distribution parameter
P = nest.CreateParameter('normal', {'mean': 0.0, 'sigma': 1.0})
# get out value
P.GetValue()
"""
return sli_func("GetValue", self._datum)
def is_spatial(self):
return sli_func('ParameterIsSpatial', self._datum)
def apply(self, spatial_nc, positions=None):
if positions is None:
return sli_func('Apply', self._datum, spatial_nc)
else:
if len(spatial_nc) != 1:
raise ValueError('The NodeCollection must contain a single node ID only')
if not isinstance(positions, (list, tuple)):
raise TypeError('Positions must be a list or tuple of positions')
for pos in positions:
if not isinstance(pos, (list, tuple, numpy.ndarray)):
raise TypeError('Each position must be a list or tuple')
if len(pos) != len(positions[0]):
raise ValueError('All positions must have the same number of dimensions')
return sli_func('Apply', self._datum, {'source': spatial_nc, 'targets': positions})
def serializable(data):
"""Make data serializable for JSON.
Parameters
----------
data : any
Returns
-------
data_serialized : str, int, float, list, dict
Data can be encoded to JSON
"""
if isinstance(data, (numpy.ndarray, NodeCollection)):
return data.tolist()
if isinstance(data, SynapseCollection):
# Get full information from SynapseCollection
return serializable(data.get())
if isinstance(data, kernel.SLILiteral):
# Get name of SLILiteral.
return data.name
if isinstance(data, (list, tuple)):
return [serializable(d) for d in data]
if isinstance(data, dict):
return dict([(key, serializable(value)) for key, value in data.items()])
return data
def to_json(data, **kwargs):
"""Serialize data to JSON.
Parameters
----------
data : any
kwargs : keyword argument pairs
Named arguments of parameters for `json.dumps` function.
Returns
-------
data_json : str
JSON format of the data
"""
data_serialized = serializable(data)
data_json = json.dumps(data_serialized, **kwargs)
return data_json
|
rschnapka/odoo | refs/heads/7.0 | addons/sale_mrp/__init__.py | 445 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sale_mrp
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
saurabh6790/test_final_med_app | refs/heads/master | patches/june_2013/p02_update_project_completed.py | 30 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
import webnotes
def execute():
webnotes.reload_doc("projects", "doctype", "project")
for p in webnotes.conn.sql_list("""select name from tabProject"""):
webnotes.bean("Project", p).make_controller().update_percent_complete() |
CompassionCH/bank-payment | refs/heads/10.0-payment-cancel | account_payment_partner/models/__init__.py | 1 | # -*- coding: utf-8 -*-
from . import res_partner
from . import account_invoice
from . import account_move_line
from . import account_payment_mode
|
AtsushiSakai/PythonRobotics | refs/heads/master | AerialNavigation/rocket_powered_landing/rocket_powered_landing.py | 1 | """
A rocket powered landing with successive convexification
author: Sven Niederberger
Atsushi Sakai
Ref:
- Python implementation of 'Successive Convexification for 6-DoF Mars Rocket Powered Landing with Free-Final-Time' paper
by Michael Szmuk and Behcet Acıkmese.
- EmbersArc/SuccessiveConvexificationFreeFinalTime: Implementation of "Successive Convexification for 6-DoF Mars Rocket Powered Landing with Free-Final-Time" https://github.com/EmbersArc/SuccessiveConvexificationFreeFinalTime
"""
from time import time
import numpy as np
from scipy.integrate import odeint
import cvxpy
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
# Trajectory points
K = 50
# Max solver iterations
iterations = 30
# Weight constants
W_SIGMA = 1 # flight time
W_DELTA = 1e-3 # difference in state/input
W_DELTA_SIGMA = 1e-1 # difference in flight time
W_NU = 1e5 # virtual control
solver = 'ECOS'
verbose_solver = False
show_animation = True
class Rocket_Model_6DoF:
"""
A 6 degree of freedom rocket landing problem.
"""
def __init__(self):
"""
A large r_scale for a small scale problem will
ead to numerical problems as parameters become excessively small
and (it seems) precision is lost in the dynamics.
"""
self.n_x = 14
self.n_u = 3
# Mass
self.m_wet = 3.0 # 30000 kg
self.m_dry = 2.2 # 22000 kg
# Flight time guess
self.t_f_guess = 10.0 # 10 s
# State constraints
self.r_I_final = np.array((0., 0., 0.))
self.v_I_final = np.array((-1e-1, 0., 0.))
self.q_B_I_final = self.euler_to_quat((0, 0, 0))
self.w_B_final = np.deg2rad(np.array((0., 0., 0.)))
self.w_B_max = np.deg2rad(60)
# Angles
max_gimbal = 20
max_angle = 90
glidelslope_angle = 20
self.tan_delta_max = np.tan(np.deg2rad(max_gimbal))
self.cos_theta_max = np.cos(np.deg2rad(max_angle))
self.tan_gamma_gs = np.tan(np.deg2rad(glidelslope_angle))
# Thrust limits
self.T_max = 5.0
self.T_min = 0.3
# Angular moment of inertia
self.J_B = 1e-2 * np.diag([1., 1., 1.])
# Gravity
self.g_I = np.array((-1, 0., 0.))
# Fuel consumption
self.alpha_m = 0.01
# Vector from thrust point to CoM
self.r_T_B = np.array([-1e-2, 0., 0.])
self.set_random_initial_state()
self.x_init = np.concatenate(
((self.m_wet,), self.r_I_init, self.v_I_init, self.q_B_I_init, self.w_B_init))
self.x_final = np.concatenate(
((self.m_dry,), self.r_I_final, self.v_I_final, self.q_B_I_final, self.w_B_final))
self.r_scale = np.linalg.norm(self.r_I_init)
self.m_scale = self.m_wet
def set_random_initial_state(self):
self.r_I_init = np.array((0., 0., 0.))
self.r_I_init[0] = np.random.uniform(3, 4)
self.r_I_init[1:3] = np.random.uniform(-2, 2, size=2)
self.v_I_init = np.array((0., 0., 0.))
self.v_I_init[0] = np.random.uniform(-1, -0.5)
self.v_I_init[1:3] = np.random.uniform(
-0.5, -0.2, size=2) * self.r_I_init[1:3]
self.q_B_I_init = self.euler_to_quat((0,
np.random.uniform(-30, 30),
np.random.uniform(-30, 30)))
self.w_B_init = np.deg2rad((0,
np.random.uniform(-20, 20),
np.random.uniform(-20, 20)))
def f_func(self, x, u):
m, rx, ry, rz, vx, vy, vz, q0, q1, q2, q3, wx, wy, wz = x[0], x[1], x[
2], x[3], x[4], x[5], x[6], x[7], x[8], x[9], x[10], x[11], x[12], x[13]
ux, uy, uz = u[0], u[1], u[2]
return np.matrix([
[-0.01 * np.sqrt(ux**2 + uy**2 + uz**2)],
[vx],
[vy],
[vz],
[(-1.0 * m - ux * (2 * q2**2 + 2 * q3**2 - 1) - 2 * uy
* (q0 * q3 - q1 * q2) + 2 * uz * (q0 * q2 + q1 * q3)) / m],
[(2 * ux * (q0 * q3 + q1 * q2) - uy * (2 * q1**2
+ 2 * q3**2 - 1) - 2 * uz * (q0 * q1 - q2 * q3)) / m],
[(-2 * ux * (q0 * q2 - q1 * q3) + 2 * uy
* (q0 * q1 + q2 * q3) - uz * (2 * q1**2 + 2 * q2**2 - 1)) / m],
[-0.5 * q1 * wx - 0.5 * q2 * wy - 0.5 * q3 * wz],
[0.5 * q0 * wx + 0.5 * q2 * wz - 0.5 * q3 * wy],
[0.5 * q0 * wy - 0.5 * q1 * wz + 0.5 * q3 * wx],
[0.5 * q0 * wz + 0.5 * q1 * wy - 0.5 * q2 * wx],
[0],
[1.0 * uz],
[-1.0 * uy]
])
def A_func(self, x, u):
m, rx, ry, rz, vx, vy, vz, q0, q1, q2, q3, wx, wy, wz = x[0], x[1], x[
2], x[3], x[4], x[5], x[6], x[7], x[8], x[9], x[10], x[11], x[12], x[13]
ux, uy, uz = u[0], u[1], u[2]
return np.matrix([
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[(ux * (2 * q2**2 + 2 * q3**2 - 1) + 2 * uy * (q0 * q3 - q1 * q2) - 2 * uz * (q0 * q2 + q1 * q3)) / m**2, 0, 0, 0, 0, 0, 0, 2 * (q2 * uz
- q3 * uy) / m, 2 * (q2 * uy + q3 * uz) / m, 2 * (q0 * uz + q1 * uy - 2 * q2 * ux) / m, 2 * (-q0 * uy + q1 * uz - 2 * q3 * ux) / m, 0, 0, 0],
[(-2 * ux * (q0 * q3 + q1 * q2) + uy * (2 * q1**2 + 2 * q3**2 - 1) + 2 * uz * (q0 * q1 - q2 * q3)) / m**2, 0, 0, 0, 0, 0, 0, 2 * (-q1 * uz
+ q3 * ux) / m, 2 * (-q0 * uz - 2 * q1 * uy + q2 * ux) / m, 2 * (q1 * ux + q3 * uz) / m, 2 * (q0 * ux + q2 * uz - 2 * q3 * uy) / m, 0, 0, 0],
[(2 * ux * (q0 * q2 - q1 * q3) - 2 * uy * (q0 * q1 + q2 * q3) + uz * (2 * q1**2 + 2 * q2**2 - 1)) / m**2, 0, 0, 0, 0, 0, 0, 2 * (q1 * uy
- q2 * ux) / m, 2 * (q0 * uy - 2 * q1 * uz + q3 * ux) / m, 2 * (-q0 * ux - 2 * q2 * uz + q3 * uy) / m, 2 * (q1 * ux + q2 * uy) / m, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, -0.5 * wx, -0.5 * wy,
- 0.5 * wz, -0.5 * q1, -0.5 * q2, -0.5 * q3],
[0, 0, 0, 0, 0, 0, 0, 0.5 * wx, 0, 0.5 * wz,
- 0.5 * wy, 0.5 * q0, -0.5 * q3, 0.5 * q2],
[0, 0, 0, 0, 0, 0, 0, 0.5 * wy, -0.5 * wz, 0,
0.5 * wx, 0.5 * q3, 0.5 * q0, -0.5 * q1],
[0, 0, 0, 0, 0, 0, 0, 0.5 * wz, 0.5 * wy,
- 0.5 * wx, 0, -0.5 * q2, 0.5 * q1, 0.5 * q0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
def B_func(self, x, u):
m, rx, ry, rz, vx, vy, vz, q0, q1, q2, q3, wx, wy, wz = x[0], x[1], x[
2], x[3], x[4], x[5], x[6], x[7], x[8], x[9], x[10], x[11], x[12], x[13]
ux, uy, uz = u[0], u[1], u[2]
return np.matrix([
[-0.01 * ux / np.sqrt(ux**2 + uy**2 + uz**2),
-0.01 * uy / np.sqrt(ux ** 2 + uy**2 + uz**2),
-0.01 * uz / np.sqrt(ux**2 + uy**2 + uz**2)],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[(-2 * q2**2 - 2 * q3**2 + 1) / m, 2
* (-q0 * q3 + q1 * q2) / m, 2 * (q0 * q2 + q1 * q3) / m],
[2 * (q0 * q3 + q1 * q2) / m, (-2 * q1**2 - 2
* q3**2 + 1) / m, 2 * (-q0 * q1 + q2 * q3) / m],
[2 * (-q0 * q2 + q1 * q3) / m, 2 * (q0 * q1 + q2 * q3)
/ m, (-2 * q1**2 - 2 * q2**2 + 1) / m],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 1.0],
[0, -1.0, 0]
])
def euler_to_quat(self, a):
a = np.deg2rad(a)
cy = np.cos(a[1] * 0.5)
sy = np.sin(a[1] * 0.5)
cr = np.cos(a[0] * 0.5)
sr = np.sin(a[0] * 0.5)
cp = np.cos(a[2] * 0.5)
sp = np.sin(a[2] * 0.5)
q = np.zeros(4)
q[0] = cy * cr * cp + sy * sr * sp
q[1] = cy * sr * cp - sy * cr * sp
q[3] = cy * cr * sp + sy * sr * cp
q[2] = sy * cr * cp - cy * sr * sp
return q
def skew(self, v):
return np.matrix([
[0, -v[2], v[1]],
[v[2], 0, -v[0]],
[-v[1], v[0], 0]
])
def dir_cosine(self, q):
return np.matrix([
[1 - 2 * (q[2] ** 2 + q[3] ** 2), 2 * (q[1] * q[2]
+ q[0] * q[3]), 2 * (q[1] * q[3] - q[0] * q[2])],
[2 * (q[1] * q[2] - q[0] * q[3]), 1 - 2
* (q[1] ** 2 + q[3] ** 2), 2 * (q[2] * q[3] + q[0] * q[1])],
[2 * (q[1] * q[3] + q[0] * q[2]), 2 * (q[2] * q[3]
- q[0] * q[1]), 1 - 2 * (q[1] ** 2 + q[2] ** 2)]
])
def omega(self, w):
return np.matrix([
[0, -w[0], -w[1], -w[2]],
[w[0], 0, w[2], -w[1]],
[w[1], -w[2], 0, w[0]],
[w[2], w[1], -w[0], 0],
])
def initialize_trajectory(self, X, U):
"""
Initialize the trajectory with linear approximation.
"""
K = X.shape[1]
for k in range(K):
alpha1 = (K - k) / K
alpha2 = k / K
m_k = (alpha1 * self.x_init[0] + alpha2 * self.x_final[0],)
r_I_k = alpha1 * self.x_init[1:4] + alpha2 * self.x_final[1:4]
v_I_k = alpha1 * self.x_init[4:7] + alpha2 * self.x_final[4:7]
q_B_I_k = np.array([1, 0, 0, 0])
w_B_k = alpha1 * self.x_init[11:14] + alpha2 * self.x_final[11:14]
X[:, k] = np.concatenate((m_k, r_I_k, v_I_k, q_B_I_k, w_B_k))
U[:, k] = m_k * -self.g_I
return X, U
def get_constraints(self, X_v, U_v, X_last_p, U_last_p):
"""
Get model specific constraints.
:param X_v: cvx variable for current states
:param U_v: cvx variable for current inputs
:param X_last_p: cvx parameter for last states
:param U_last_p: cvx parameter for last inputs
:return: A list of cvx constraints
"""
# Boundary conditions:
constraints = [
X_v[0, 0] == self.x_init[0],
X_v[1:4, 0] == self.x_init[1:4],
X_v[4:7, 0] == self.x_init[4:7],
# X_v[7:11, 0] == self.x_init[7:11], # initial orientation is free
X_v[11:14, 0] == self.x_init[11:14],
# X_[0, -1] final mass is free
X_v[1:, -1] == self.x_final[1:],
U_v[1:3, -1] == 0,
]
constraints += [
# State constraints:
X_v[0, :] >= self.m_dry, # minimum mass
cvxpy.norm(X_v[2: 4, :], axis=0) <= X_v[1, :] / \
self.tan_gamma_gs, # glideslope
cvxpy.norm(X_v[9:11, :], axis=0) <= np.sqrt(
(1 - self.cos_theta_max) / 2), # maximum angle
# maximum angular velocity
cvxpy.norm(X_v[11: 14, :], axis=0) <= self.w_B_max,
# Control constraints:
cvxpy.norm(U_v[1:3, :], axis=0) <= self.tan_delta_max * \
U_v[0, :], # gimbal angle constraint
cvxpy.norm(U_v, axis=0) <= self.T_max, # upper thrust constraint
]
# linearized lower thrust constraint
rhs = [U_last_p[:, k] / cvxpy.norm(U_last_p[:, k]) * U_v[:, k]
for k in range(X_v.shape[1])]
constraints += [
self.T_min <= cvxpy.vstack(rhs)
]
return constraints
class Integrator:
def __init__(self, m, K):
self.K = K
self.m = m
self.n_x = m.n_x
self.n_u = m.n_u
self.A_bar = np.zeros([m.n_x * m.n_x, K - 1])
self.B_bar = np.zeros([m.n_x * m.n_u, K - 1])
self.C_bar = np.zeros([m.n_x * m.n_u, K - 1])
self.S_bar = np.zeros([m.n_x, K - 1])
self.z_bar = np.zeros([m.n_x, K - 1])
# vector indices for flat matrices
x_end = m.n_x
A_bar_end = m.n_x * (1 + m.n_x)
B_bar_end = m.n_x * (1 + m.n_x + m.n_u)
C_bar_end = m.n_x * (1 + m.n_x + m.n_u + m.n_u)
S_bar_end = m.n_x * (1 + m.n_x + m.n_u + m.n_u + 1)
z_bar_end = m.n_x * (1 + m.n_x + m.n_u + m.n_u + 2)
self.x_ind = slice(0, x_end)
self.A_bar_ind = slice(x_end, A_bar_end)
self.B_bar_ind = slice(A_bar_end, B_bar_end)
self.C_bar_ind = slice(B_bar_end, C_bar_end)
self.S_bar_ind = slice(C_bar_end, S_bar_end)
self.z_bar_ind = slice(S_bar_end, z_bar_end)
self.f, self.A, self.B = m.f_func, m.A_func, m.B_func
# integration initial condition
self.V0 = np.zeros((m.n_x * (1 + m.n_x + m.n_u + m.n_u + 2),))
self.V0[self.A_bar_ind] = np.eye(m.n_x).reshape(-1)
self.dt = 1. / (K - 1)
def calculate_discretization(self, X, U, sigma):
"""
Calculate discretization for given states, inputs and total time.
:param X: Matrix of states for all time points
:param U: Matrix of inputs for all time points
:param sigma: Total time
:return: The discretization matrices
"""
for k in range(self.K - 1):
self.V0[self.x_ind] = X[:, k]
V = np.array(odeint(self._ode_dVdt, self.V0, (0, self.dt),
args=(U[:, k], U[:, k + 1], sigma))[1, :])
# using \Phi_A(\tau_{k+1},\xi) = \Phi_A(\tau_{k+1},\tau_k)\Phi_A(\xi,\tau_k)^{-1}
# flatten matrices in column-major (Fortran) order for CVXPY
Phi = V[self.A_bar_ind].reshape((self.n_x, self.n_x))
self.A_bar[:, k] = Phi.flatten(order='F')
self.B_bar[:, k] = np.matmul(Phi, V[self.B_bar_ind].reshape(
(self.n_x, self.n_u))).flatten(order='F')
self.C_bar[:, k] = np.matmul(Phi, V[self.C_bar_ind].reshape(
(self.n_x, self.n_u))).flatten(order='F')
self.S_bar[:, k] = np.matmul(Phi, V[self.S_bar_ind])
self.z_bar[:, k] = np.matmul(Phi, V[self.z_bar_ind])
return self.A_bar, self.B_bar, self.C_bar, self.S_bar, self.z_bar
def _ode_dVdt(self, V, t, u_t0, u_t1, sigma):
"""
ODE function to compute dVdt.
:param V: Evaluation state V = [x, Phi_A, B_bar, C_bar, S_bar, z_bar]
:param t: Evaluation time
:param u_t0: Input at start of interval
:param u_t1: Input at end of interval
:param sigma: Total time
:return: Derivative at current time and state dVdt
"""
alpha = (self.dt - t) / self.dt
beta = t / self.dt
x = V[self.x_ind]
u = u_t0 + beta * (u_t1 - u_t0)
# using \Phi_A(\tau_{k+1},\xi) = \Phi_A(\tau_{k+1},\tau_k)\Phi_A(\xi,\tau_k)^{-1}
# and pre-multiplying with \Phi_A(\tau_{k+1},\tau_k) after integration
Phi_A_xi = np.linalg.inv(
V[self.A_bar_ind].reshape((self.n_x, self.n_x)))
A_subs = sigma * self.A(x, u)
B_subs = sigma * self.B(x, u)
f_subs = self.f(x, u)
dVdt = np.zeros_like(V)
dVdt[self.x_ind] = sigma * f_subs.transpose()
dVdt[self.A_bar_ind] = np.matmul(
A_subs, V[self.A_bar_ind].reshape((self.n_x, self.n_x))).reshape(-1)
dVdt[self.B_bar_ind] = np.matmul(Phi_A_xi, B_subs).reshape(-1) * alpha
dVdt[self.C_bar_ind] = np.matmul(Phi_A_xi, B_subs).reshape(-1) * beta
dVdt[self.S_bar_ind] = np.matmul(Phi_A_xi, f_subs).transpose()
z_t = -np.matmul(A_subs, x) - np.matmul(B_subs, u)
dVdt[self.z_bar_ind] = np.dot(Phi_A_xi, z_t.T).flatten()
return dVdt
class SCProblem:
"""
Defines a standard Successive Convexification problem and
adds the model specific constraints and objectives.
:param m: The model object
:param K: Number of discretization points
"""
def __init__(self, m, K):
# Variables:
self.var = dict()
self.var['X'] = cvxpy.Variable((m.n_x, K))
self.var['U'] = cvxpy.Variable((m.n_u, K))
self.var['sigma'] = cvxpy.Variable(nonneg=True)
self.var['nu'] = cvxpy.Variable((m.n_x, K - 1))
self.var['delta_norm'] = cvxpy.Variable(nonneg=True)
self.var['sigma_norm'] = cvxpy.Variable(nonneg=True)
# Parameters:
self.par = dict()
self.par['A_bar'] = cvxpy.Parameter((m.n_x * m.n_x, K - 1))
self.par['B_bar'] = cvxpy.Parameter((m.n_x * m.n_u, K - 1))
self.par['C_bar'] = cvxpy.Parameter((m.n_x * m.n_u, K - 1))
self.par['S_bar'] = cvxpy.Parameter((m.n_x, K - 1))
self.par['z_bar'] = cvxpy.Parameter((m.n_x, K - 1))
self.par['X_last'] = cvxpy.Parameter((m.n_x, K))
self.par['U_last'] = cvxpy.Parameter((m.n_u, K))
self.par['sigma_last'] = cvxpy.Parameter(nonneg=True)
self.par['weight_sigma'] = cvxpy.Parameter(nonneg=True)
self.par['weight_delta'] = cvxpy.Parameter(nonneg=True)
self.par['weight_delta_sigma'] = cvxpy.Parameter(nonneg=True)
self.par['weight_nu'] = cvxpy.Parameter(nonneg=True)
# Constraints:
constraints = []
# Model:
constraints += m.get_constraints(
self.var['X'], self.var['U'], self.par['X_last'], self.par['U_last'])
# Dynamics:
# x_t+1 = A_*x_t+B_*U_t+C_*U_T+1*S_*sigma+zbar+nu
constraints += [
self.var['X'][:, k + 1] ==
cvxpy.reshape(self.par['A_bar'][:, k], (m.n_x, m.n_x)) *
self.var['X'][:, k] +
cvxpy.reshape(self.par['B_bar'][:, k], (m.n_x, m.n_u)) *
self.var['U'][:, k] +
cvxpy.reshape(self.par['C_bar'][:, k], (m.n_x, m.n_u)) *
self.var['U'][:, k + 1] +
self.par['S_bar'][:, k] * self.var['sigma'] +
self.par['z_bar'][:, k] +
self.var['nu'][:, k]
for k in range(K - 1)
]
# Trust regions:
dx = cvxpy.sum(cvxpy.square(
self.var['X'] - self.par['X_last']), axis=0)
du = cvxpy.sum(cvxpy.square(
self.var['U'] - self.par['U_last']), axis=0)
ds = self.var['sigma'] - self.par['sigma_last']
constraints += [cvxpy.norm(dx + du, 1) <= self.var['delta_norm']]
constraints += [cvxpy.norm(ds, 'inf') <= self.var['sigma_norm']]
# Flight time positive:
constraints += [self.var['sigma'] >= 0.1]
# Objective:
sc_objective = cvxpy.Minimize(
self.par['weight_sigma'] * self.var['sigma'] +
self.par['weight_nu'] * cvxpy.norm(self.var['nu'], 'inf') +
self.par['weight_delta'] * self.var['delta_norm'] +
self.par['weight_delta_sigma'] * self.var['sigma_norm']
)
objective = sc_objective
self.prob = cvxpy.Problem(objective, constraints)
def set_parameters(self, **kwargs):
"""
All parameters have to be filled before calling solve().
Takes the following arguments as keywords:
A_bar
B_bar
C_bar
S_bar
z_bar
X_last
U_last
sigma_last
E
weight_sigma
weight_nu
radius_trust_region
"""
for key in kwargs:
if key in self.par:
self.par[key].value = kwargs[key]
else:
print(f'Parameter \'{key}\' does not exist.')
def get_variable(self, name):
if name in self.var:
return self.var[name].value
else:
print(f'Variable \'{name}\' does not exist.')
return None
def solve(self, **kwargs):
error = False
try:
self.prob.solve(verbose=verbose_solver,
solver=solver)
except cvxpy.SolverError:
error = True
stats = self.prob.solver_stats
info = {
'setup_time': stats.setup_time,
'solver_time': stats.solve_time,
'iterations': stats.num_iters,
'solver_error': error
}
return info
def axis3d_equal(X, Y, Z, ax):
max_range = np.array([X.max() - X.min(), Y.max()
- Y.min(), Z.max() - Z.min()]).max()
Xb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2,
- 1:2:2][0].flatten() + 0.5 * (X.max() + X.min())
Yb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2,
- 1:2:2][1].flatten() + 0.5 * (Y.max() + Y.min())
Zb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2,
- 1:2:2][2].flatten() + 0.5 * (Z.max() + Z.min())
# Comment or uncomment following both lines to test the fake bounding box:
for xb, yb, zb in zip(Xb, Yb, Zb):
ax.plot([xb], [yb], [zb], 'w')
def plot_animation(X, U): # pragma: no cover
fig = plt.figure()
ax = fig.gca(projection='3d')
# for stopping simulation with the esc key.
fig.canvas.mpl_connect('key_release_event',
lambda event: [exit(0) if event.key == 'escape' else None])
for k in range(K):
plt.cla()
ax.plot(X[2, :], X[3, :], X[1, :]) # trajectory
ax.scatter3D([0.0], [0.0], [0.0], c="r",
marker="x") # target landing point
axis3d_equal(X[2, :], X[3, :], X[1, :], ax)
rx, ry, rz = X[1:4, k]
# vx, vy, vz = X[4:7, k]
qw, qx, qy, qz = X[7:11, k]
CBI = np.array([
[1 - 2 * (qy ** 2 + qz ** 2), 2 * (qx * qy + qw * qz),
2 * (qx * qz - qw * qy)],
[2 * (qx * qy - qw * qz), 1 - 2
* (qx ** 2 + qz ** 2), 2 * (qy * qz + qw * qx)],
[2 * (qx * qz + qw * qy), 2 * (qy * qz - qw * qx),
1 - 2 * (qx ** 2 + qy ** 2)]
])
Fx, Fy, Fz = np.dot(np.transpose(CBI), U[:, k])
dx, dy, dz = np.dot(np.transpose(CBI), np.array([1., 0., 0.]))
# attitude vector
ax.quiver(ry, rz, rx, dy, dz, dx, length=0.5, linewidth=3.0,
arrow_length_ratio=0.0, color='black')
# thrust vector
ax.quiver(ry, rz, rx, -Fy, -Fz, -Fx, length=0.1,
arrow_length_ratio=0.0, color='red')
ax.set_title("Rocket powered landing")
plt.pause(0.5)
def main():
print("start!!")
m = Rocket_Model_6DoF()
# state and input list
X = np.empty(shape=[m.n_x, K])
U = np.empty(shape=[m.n_u, K])
# INITIALIZATION
sigma = m.t_f_guess
X, U = m.initialize_trajectory(X, U)
integrator = Integrator(m, K)
problem = SCProblem(m, K)
converged = False
w_delta = W_DELTA
for it in range(iterations):
t0_it = time()
print('-' * 18 + f' Iteration {str(it + 1).zfill(2)} ' + '-' * 18)
A_bar, B_bar, C_bar, S_bar, z_bar = integrator.calculate_discretization(
X, U, sigma)
problem.set_parameters(A_bar=A_bar, B_bar=B_bar, C_bar=C_bar, S_bar=S_bar, z_bar=z_bar,
X_last=X, U_last=U, sigma_last=sigma,
weight_sigma=W_SIGMA, weight_nu=W_NU,
weight_delta=w_delta, weight_delta_sigma=W_DELTA_SIGMA)
problem.solve()
X = problem.get_variable('X')
U = problem.get_variable('U')
sigma = problem.get_variable('sigma')
delta_norm = problem.get_variable('delta_norm')
sigma_norm = problem.get_variable('sigma_norm')
nu_norm = np.linalg.norm(problem.get_variable('nu'), np.inf)
print('delta_norm', delta_norm)
print('sigma_norm', sigma_norm)
print('nu_norm', nu_norm)
if delta_norm < 1e-3 and sigma_norm < 1e-3 and nu_norm < 1e-7:
converged = True
w_delta *= 1.5
print('Time for iteration', time() - t0_it, 's')
if converged:
print(f'Converged after {it + 1} iterations.')
break
if show_animation: # pragma: no cover
plot_animation(X, U)
print("done!!")
if __name__ == '__main__':
main()
|
semonte/intellij-community | refs/heads/master | python/testData/intentions/PyConvertMethodToPropertyIntentionTest/simple_after.py | 167 | class MyClass(object):
"""
My class to show intention.
"""
def __init__(self):
self._x = None
@property
def x(self):
return self._x
x = MyClass().x |
getlantern/lantern-java | refs/heads/master | install/linux_x86_64/pt/fteproxy/Crypto/Hash/RIPEMD.py | 124 | # -*- coding: utf-8 -*-
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""RIPEMD-160 cryptographic hash algorithm.
RIPEMD-160_ produces the 160 bit digest of a message.
>>> from Crypto.Hash import RIPEMD
>>>
>>> h = RIPEMD.new()
>>> h.update(b'Hello')
>>> print h.hexdigest()
RIPEMD-160 stands for RACE Integrity Primitives Evaluation Message Digest
with a 160 bit digest. It was invented by Dobbertin, Bosselaers, and Preneel.
This algorithm is considered secure, although it has not been scrutinized as
extensively as SHA-1. Moreover, it provides an informal security level of just
80bits.
.. _RIPEMD-160: http://homes.esat.kuleuven.be/~bosselae/ripemd160.html
"""
_revision__ = "$Id$"
__all__ = ['new', 'digest_size', 'RIPEMD160Hash' ]
from Crypto.Util.py3compat import *
from Crypto.Hash.hashalgo import HashAlgo
import Crypto.Hash._RIPEMD160 as _RIPEMD160
hashFactory = _RIPEMD160
class RIPEMD160Hash(HashAlgo):
"""Class that implements a RIPMD-160 hash
:undocumented: block_size
"""
#: ASN.1 Object identifier (OID)::
#:
#: id-ripemd160 OBJECT IDENTIFIER ::= {
#: iso(1) identified-organization(3) teletrust(36)
#: algorithm(3) hashAlgorithm(2) ripemd160(1)
#: }
#:
#: This value uniquely identifies the RIPMD-160 algorithm.
oid = b("\x06\x05\x2b\x24\x03\x02\x01")
digest_size = 20
block_size = 64
def __init__(self, data=None):
HashAlgo.__init__(self, hashFactory, data)
def new(self, data=None):
return RIPEMD160Hash(data)
def new(data=None):
"""Return a fresh instance of the hash object.
:Parameters:
data : byte string
The very first chunk of the message to hash.
It is equivalent to an early call to `RIPEMD160Hash.update()`.
Optional.
:Return: A `RIPEMD160Hash` object
"""
return RIPEMD160Hash().new(data)
#: The size of the resulting hash in bytes.
digest_size = RIPEMD160Hash.digest_size
#: The internal block size of the hash algorithm in bytes.
block_size = RIPEMD160Hash.block_size
|
patrickcurl/ztruck | refs/heads/master | dj/lib/python2.7/site-packages/localflavor/ch/ch_states.py | 17 | # -*- coding: utf-8 -*
from django.utils.translation import ugettext_lazy as _
#: An alphabetical list of states
STATE_CHOICES = (
('AG', _('Aargau')),
('AI', _('Appenzell Innerrhoden')),
('AR', _('Appenzell Ausserrhoden')),
('BS', _('Basel-Stadt')),
('BL', _('Basel-Land')),
('BE', _('Berne')),
('FR', _('Fribourg')),
('GE', _('Geneva')),
('GL', _('Glarus')),
('GR', _('Graubuenden')),
('JU', _('Jura')),
('LU', _('Lucerne')),
('NE', _('Neuchatel')),
('NW', _('Nidwalden')),
('OW', _('Obwalden')),
('SH', _('Schaffhausen')),
('SZ', _('Schwyz')),
('SO', _('Solothurn')),
('SG', _('St. Gallen')),
('TG', _('Thurgau')),
('TI', _('Ticino')),
('UR', _('Uri')),
('VS', _('Valais')),
('VD', _('Vaud')),
('ZG', _('Zug')),
('ZH', _('Zurich'))
)
|
mwpackage/mvp-loader | refs/heads/master | node_modules/jsdoc/node_modules/esprima/tools/generate-unicode-regex.py | 260 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# By Yusuke Suzuki <utatane.tea@gmail.com>
# Modified by Mathias Bynens <http://mathiasbynens.be/>
# http://code.google.com/p/esprima/issues/detail?id=110
import sys
import string
import re
class RegExpGenerator(object):
def __init__(self, detector):
self.detector = detector
def generate_identifier_start(self):
r = [ ch for ch in range(0xFFFF + 1) if self.detector.is_identifier_start(ch)]
return self._generate_range(r)
def generate_identifier_part(self):
r = [ ch for ch in range(0xFFFF + 1) if self.detector.is_identifier_part(ch)]
return self._generate_range(r)
def generate_non_ascii_identifier_start(self):
r = [ ch for ch in xrange(0x0080, 0xFFFF + 1) if self.detector.is_identifier_start(ch)]
return self._generate_range(r)
def generate_non_ascii_identifier_part(self):
r = [ ch for ch in range(0x0080, 0xFFFF + 1) if self.detector.is_identifier_part(ch)]
return self._generate_range(r)
def generate_non_ascii_separator_space(self):
r = [ ch for ch in range(0x0080, 0xFFFF + 1) if self.detector.is_separator_space(ch)]
return self._generate_range(r)
def _generate_range(self, r):
if len(r) == 0:
return '[]'
buf = []
start = r[0]
end = r[0]
predict = start + 1
r = r[1:]
for code in r:
if predict == code:
end = code
predict = code + 1
continue
else:
if start == end:
buf.append("\\u%04X" % start)
elif end == start + 1:
buf.append("\\u%04X\\u%04X" % (start, end))
else:
buf.append("\\u%04X-\\u%04X" % (start, end))
start = code
end = code
predict = code + 1
if start == end:
buf.append("\\u%04X" % start)
else:
buf.append("\\u%04X-\\u%04X" % (start, end))
return '[' + ''.join(buf) + ']'
class Detector(object):
def __init__(self, data):
self.data = data
def is_ascii(self, ch):
return ch < 0x80
def is_ascii_alpha(self, ch):
v = ch | 0x20
return v >= ord('a') and v <= ord('z')
def is_decimal_digit(self, ch):
return ch >= ord('0') and ch <= ord('9')
def is_octal_digit(self, ch):
return ch >= ord('0') and ch <= ord('7')
def is_hex_digit(self, ch):
v = ch | 0x20
return self.is_decimal_digit(c) or (v >= ord('a') and v <= ord('f'))
def is_digit(self, ch):
return self.is_decimal_digit(ch) or self.data[ch] == 'Nd'
def is_ascii_alphanumeric(self, ch):
return self.is_decimal_digit(ch) or self.is_ascii_alpha(ch)
def _is_non_ascii_identifier_start(self, ch):
c = self.data[ch]
return c == 'Lu' or c == 'Ll' or c == 'Lt' or c == 'Lm' or c == 'Lo' or c == 'Nl'
def _is_non_ascii_identifier_part(self, ch):
c = self.data[ch]
return c == 'Lu' or c == 'Ll' or c == 'Lt' or c == 'Lm' or c == 'Lo' or c == 'Nl' or c == 'Mn' or c == 'Mc' or c == 'Nd' or c == 'Pc' or ch == 0x200C or ch == 0x200D
def is_separator_space(self, ch):
return self.data[ch] == 'Zs'
def is_white_space(self, ch):
return ch == ord(' ') or ch == ord("\t") or ch == 0xB or ch == 0xC or ch == 0x00A0 or ch == 0xFEFF or self.is_separator_space(ch)
def is_line_terminator(self, ch):
return ch == 0x000D or ch == 0x000A or self.is_line_or_paragraph_terminator(ch)
def is_line_or_paragraph_terminator(self, ch):
return ch == 0x2028 or ch == 0x2029
def is_identifier_start(self, ch):
if self.is_ascii(ch):
return ch == ord('$') or ch == ord('_') or ch == ord('\\') or self.is_ascii_alpha(ch)
return self._is_non_ascii_identifier_start(ch)
def is_identifier_part(self, ch):
if self.is_ascii(ch):
return ch == ord('$') or ch == ord('_') or ch == ord('\\') or self.is_ascii_alphanumeric(ch)
return self._is_non_ascii_identifier_part(ch)
def analyze(source):
data = []
dictionary = {}
with open(source) as uni:
flag = False
first = 0
for line in uni:
d = string.split(line.strip(), ";")
val = int(d[0], 16)
if flag:
if re.compile("<.+, Last>").match(d[1]):
# print "%s : u%X" % (d[1], val)
flag = False
for t in range(first, val+1):
dictionary[t] = str(d[2])
else:
raise "Database Exception"
else:
if re.compile("<.+, First>").match(d[1]):
# print "%s : u%X" % (d[1], val)
flag = True
first = val
else:
dictionary[val] = str(d[2])
for i in range(0xFFFF + 1):
if dictionary.get(i) == None:
data.append("Un")
else:
data.append(dictionary[i])
return RegExpGenerator(Detector(data))
def main(source):
generator = analyze(source)
print generator.generate_non_ascii_identifier_start()
print generator.generate_non_ascii_identifier_part()
print generator.generate_non_ascii_separator_space()
if __name__ == '__main__':
main(sys.argv[1])
|
newville/scikit-image | refs/heads/master | skimage/transform/radon_transform.py | 1 | # -*- coding: utf-8 -*-
"""
radon.py - Radon and inverse radon transforms
Based on code of Justin K. Romberg
(http://www.clear.rice.edu/elec431/projects96/DSP/bpanalysis.html)
J. Gillam and Chris Griffin.
References:
-B.R. Ramesh, N. Srinivasa, K. Rajgopal, "An Algorithm for Computing
the Discrete Radon Transform With Some Applications", Proceedings of
the Fourth IEEE Region 10 International Conference, TENCON '89, 1989.
-A. C. Kak, Malcolm Slaney, "Principles of Computerized Tomographic
Imaging", IEEE Press 1988.
"""
from __future__ import division
from collections import namedtuple
import numpy as np
from scipy.fftpack import fft, ifft, fftfreq
from scipy.interpolate import interp1d
from ._warps_cy import _warp_fast
from ._radon_transform import sart_projection_update
from .. import util
__all__ = ["radon", "iradon", "iradon_sart", "iradon_workspace"]
def radon(image, theta=None, circle=False):
"""
Calculates the radon transform of an image given specified
projection angles.
Parameters
----------
image : array_like, dtype=float
Input image. The rotation axis will be located in the pixel with
indices ``(image.shape[0] // 2, image.shape[1] // 2)``.
theta : array_like, dtype=float, optional (default np.arange(180))
Projection angles (in degrees).
circle : boolean, optional
Assume image is zero outside the inscribed circle, making the
width of each projection (the first dimension of the sinogram)
equal to ``min(image.shape)``.
Returns
-------
radon_image : ndarray
Radon transform (sinogram). The tomography rotation axis will lie
at the pixel index ``radon_image.shape[0] // 2`` along the 0th
dimension of ``radon_image``.
Raises
------
ValueError
If called with ``circle=True`` and ``image != 0`` outside the inscribed
circle
"""
if image.ndim != 2:
raise ValueError('The input image must be 2-D')
if theta is None:
theta = np.arange(180)
if circle:
radius = min(image.shape) // 2
c0, c1 = np.ogrid[0:image.shape[0], 0:image.shape[1]]
reconstruction_circle = ((c0 - image.shape[0] // 2) ** 2
+ (c1 - image.shape[1] // 2) ** 2)
reconstruction_circle = reconstruction_circle <= radius ** 2
if not np.all(reconstruction_circle | (image == 0)):
raise ValueError('Image must be zero outside the reconstruction'
' circle')
# Crop image to make it square
slices = []
for d in (0, 1):
if image.shape[d] > min(image.shape):
excess = image.shape[d] - min(image.shape)
slices.append(slice(int(np.ceil(excess / 2)),
int(np.ceil(excess / 2)
+ min(image.shape))))
else:
slices.append(slice(None))
slices = tuple(slices)
padded_image = image[slices]
else:
diagonal = np.sqrt(2) * max(image.shape)
pad = [int(np.ceil(diagonal - s)) for s in image.shape]
new_center = [(s + p) // 2 for s, p in zip(image.shape, pad)]
old_center = [s // 2 for s in image.shape]
pad_before = [nc - oc for oc, nc in zip(old_center, new_center)]
pad_width = [(pb, p - pb) for pb, p in zip(pad_before, pad)]
padded_image = util.pad(image, pad_width, mode='constant',
constant_values=0)
# padded_image is always square
assert padded_image.shape[0] == padded_image.shape[1]
radon_image = np.zeros((padded_image.shape[0], len(theta)))
center = padded_image.shape[0] // 2
shift0 = np.array([[1, 0, -center],
[0, 1, -center],
[0, 0, 1]])
shift1 = np.array([[1, 0, center],
[0, 1, center],
[0, 0, 1]])
def build_rotation(theta):
T = np.deg2rad(theta)
R = np.array([[np.cos(T), np.sin(T), 0],
[-np.sin(T), np.cos(T), 0],
[0, 0, 1]])
return shift1.dot(R).dot(shift0)
for i in range(len(theta)):
rotated = _warp_fast(padded_image, build_rotation(theta[i]))
radon_image[:, i] = rotated.sum(0)
return radon_image
def _sinogram_circle_to_square(sinogram):
diagonal = int(np.ceil(np.sqrt(2) * sinogram.shape[0]))
pad = diagonal - sinogram.shape[0]
old_center = sinogram.shape[0] // 2
new_center = diagonal // 2
pad_before = new_center - old_center
pad_width = ((pad_before, pad - pad_before), (0, 0))
return util.pad(sinogram, pad_width, mode='constant', constant_values=0)
def iradon_workspace(theta, output_size, circle=False, full=True):
"""
Generate workspace needed for iradon().
This allows work arrays to be calculated once for an output size and set
of angles, useful for multiple calls to iradon() for the same geometry.
Parameters
----------
theta : array_like, dtype=float, optional
Reconstruction angles (in degrees). Default: m angles evenly spaced
between 0 and 180 (if the shape of `radon_image` is (N, M)).
output_size : int
Number of rows and columns in the reconstruction.
circle : boolean, optional
Assume the reconstructed image is zero outside the inscribed circle.
Also changes the default output_size to match the behaviour of
``radon`` called with ``circle=True``.
full : boolean, optional
Whether to generate full workspace (default) or only the
grid data (for older behavior, smaller memory usage).
Returns
-------
workspace:
The workspace
"""
if not circle:
output_size = int(np.floor(np.sqrt((output_size)**2 / 2.0)))
[_x, _y] = np.mgrid[0:output_size, 0:output_size]
xpr = _x - int(output_size) // 2
ypr = _y - int(output_size) // 2
thw = [None]*len(theta)
if full:
th = np.deg2rad(theta)
thw = np.zeros((len(th), output_size, output_size),
dtype=np.float64)
for i in range(len(th)):
thw[i] = ypr * np.cos(th[i]) - xpr * np.sin(th[i])
Workspace = namedtuple("Workspace", ["xpr","ypr","thw"])
return Workspace(xpr=xpr, ypr=ypr, thw=thw)
def iradon(radon_image, theta=None, output_size=None, workspace=None,
filter="ramp", interpolation="linear", circle=False):
"""
Inverse radon transform.
Reconstruct an image from the radon transform, using the filtered
back projection algorithm.
Parameters
----------
radon_image : array_like, dtype=float
Image containing radon transform (sinogram). Each column of
the image corresponds to a projection along a different angle. The
tomography rotation axis should lie at the pixel index
``radon_image.shape[0] // 2`` along the 0th dimension of
``radon_image``.
theta : array_like, dtype=float, optional
Reconstruction angles (in degrees). Default: m angles evenly spaced
between 0 and 180 (if the shape of `radon_image` is (N, M)).
output_size : int
Number of rows and columns in the reconstruction.
workspace : ``None`` or result of ``iradon_workspace()``
Workspace is a tuple of arrays with values needed for iradon
transform, as calculated by ``iradon_workspace(theta, output_size)``.
If ``None`` (default), the work arrays are generated as needed.
filter : str, optional (default ramp)
Filter used in frequency domain filtering. Ramp filter used by default.
Filters available: ramp, shepp-logan, cosine, hamming, hann.
Assign None to use no filter.
interpolation : str, optional (default 'linear')
Interpolation method used in reconstruction. Methods available:
'linear', 'nearest', and 'cubic' ('cubic' is slow).
circle : boolean, optional
Assume the reconstructed image is zero outside the inscribed circle.
Also changes the default output_size to match the behaviour of
``radon`` called with ``circle=True``.
Returns
-------
reconstructed : ndarray
Reconstructed image. The rotation axis will be located in the pixel
with indices
``(reconstructed.shape[0] // 2, reconstructed.shape[1] // 2)``.
Notes
-----
It applies the Fourier slice theorem to reconstruct an image by
multiplying the frequency domain of the filter with the FFT of the
projection data. This algorithm is called filtered back projection.
"""
if radon_image.ndim != 2:
raise ValueError('The input image must be 2-D')
if theta is None:
m, n = radon_image.shape
theta = np.linspace(0, 180, n, endpoint=False)
else:
theta = np.asarray(theta)
if len(theta) != radon_image.shape[1]:
raise ValueError("The given ``theta`` does not match the number of "
"projections in ``radon_image``.")
interpolation_types = ('linear', 'nearest', 'cubic')
if not interpolation in interpolation_types:
raise ValueError("Unknown interpolation: %s" % interpolation)
if not output_size:
# If output size not specified, estimate from input radon image
if circle:
output_size = radon_image.shape[0]
else:
output_size = int(np.floor(np.sqrt((radon_image.shape[0]) ** 2
/ 2.0)))
if circle:
radon_image = _sinogram_circle_to_square(radon_image)
th = np.deg2rad(theta)
# resize image to next power of two (but no less than 64) for
# Fourier analysis; speeds up Fourier and lessens artifacts
projection_size_padded = \
max(64, int(2 ** np.ceil(np.log2(2 * radon_image.shape[0]))))
pad_width = ((0, projection_size_padded - radon_image.shape[0]), (0, 0))
img = util.pad(radon_image, pad_width, mode='constant', constant_values=0)
# Construct the Fourier filter
f = fftfreq(projection_size_padded).reshape(-1, 1) # digital frequency
omega = 2 * np.pi * f # angular frequency
fourier_filter = 2 * np.abs(f) # ramp filter
if filter == "ramp":
pass
elif filter == "shepp-logan":
# Start from first element to avoid divide by zero
fourier_filter[1:] = fourier_filter[1:] * np.sin(omega[1:]) / omega[1:]
elif filter == "cosine":
fourier_filter *= np.cos(omega)
elif filter == "hamming":
fourier_filter *= (0.54 + 0.46 * np.cos(omega / 2))
elif filter == "hann":
fourier_filter *= (1 + np.cos(omega / 2)) / 2
elif filter is None:
fourier_filter[:] = 1
else:
raise ValueError("Unknown filter: %s" % filter)
# Apply filter in Fourier domain
projection = fft(img, axis=0) * fourier_filter
radon_filtered = np.real(ifft(projection, axis=0))
# Resize filtered image back to original size
radon_filtered = radon_filtered[:radon_image.shape[0], :]
reconstructed = np.zeros((output_size, output_size))
# Determine the center of the projections (= center of sinogram)
mid_index = radon_image.shape[0] // 2
# notes on workspace:
# 1. if thwork[i] is None, the calculation will be done per row.
# 2. since output_size may have already been rescaled above, use
# circle=True when calling iradon_workspace() here.
if workspace is None:
workspace = iradon_workspace(theta, output_size,
circle=True, full=False)
xpr, ypr, thwork = workspace
# Reconstruct image by interpolation
for i in range(len(theta)):
t = thwork[i]
if t is None:
t = ypr * np.cos(th[i]) - xpr * np.sin(th[i])
x = np.arange(radon_filtered.shape[0]) - mid_index
if interpolation == 'linear':
backprojected = np.interp(t, x, radon_filtered[:, i],
left=0, right=0)
else:
interpolant = interp1d(x, radon_filtered[:, i], kind=interpolation,
bounds_error=False, fill_value=0)
backprojected = interpolant(t)
reconstructed += backprojected
if circle:
radius = output_size // 2
reconstruction_circle = (xpr ** 2 + ypr ** 2) <= radius ** 2
reconstructed[~reconstruction_circle] = 0.
return reconstructed * np.pi / (2 * len(th))
def order_angles_golden_ratio(theta):
"""
Order angles to reduce the amount of correlated information
in subsequent projections.
Parameters
----------
theta : 1D array of floats
Projection angles in degrees. Duplicate angles are not allowed.
Returns
-------
indices_generator : generator yielding unsigned integers
The returned generator yields indices into ``theta`` such that
``theta[indices]`` gives the approximate golden ratio ordering
of the projections. In total, ``len(theta)`` indices are yielded.
All non-negative integers < ``len(theta)`` are yielded exactly once.
Notes
-----
The method used here is that of the golden ratio introduced
by T. Kohler.
References
----------
.. [1] Kohler, T. "A projection access scheme for iterative
reconstruction based on the golden section." Nuclear Science
Symposium Conference Record, 2004 IEEE. Vol. 6. IEEE, 2004.
.. [2] Winkelmann, Stefanie, et al. "An optimal radial profile order
based on the Golden Ratio for time-resolved MRI."
Medical Imaging, IEEE Transactions on 26.1 (2007): 68-76.
"""
interval = 180
def angle_distance(a, b):
difference = a - b
return min(abs(difference % interval), abs(difference % -interval))
remaining = list(np.argsort(theta)) # indices into theta
# yield an arbitrary angle to start things off
index = remaining.pop(0)
angle = theta[index]
yield index
# determine subsequent angles using the golden ratio method
angle_increment = interval * (1 - (np.sqrt(5) - 1) / 2)
while remaining:
angle = (angle + angle_increment) % interval
insert_point = np.searchsorted(theta[remaining], angle)
index_below = insert_point - 1
index_above = 0 if insert_point == len(remaining) else insert_point
distance_below = angle_distance(angle, theta[remaining[index_below]])
distance_above = angle_distance(angle, theta[remaining[index_above]])
if distance_below < distance_above:
yield remaining.pop(index_below)
else:
yield remaining.pop(index_above)
def iradon_sart(radon_image, theta=None, image=None, projection_shifts=None,
clip=None, relaxation=0.15):
"""
Inverse radon transform
Reconstruct an image from the radon transform, using a single iteration of
the Simultaneous Algebraic Reconstruction Technique (SART) algorithm.
Parameters
----------
radon_image : 2D array, dtype=float
Image containing radon transform (sinogram). Each column of
the image corresponds to a projection along a different angle. The
tomography rotation axis should lie at the pixel index
``radon_image.shape[0] // 2`` along the 0th dimension of
``radon_image``.
theta : 1D array, dtype=float, optional
Reconstruction angles (in degrees). Default: m angles evenly spaced
between 0 and 180 (if the shape of `radon_image` is (N, M)).
image : 2D array, dtype=float, optional
Image containing an initial reconstruction estimate. Shape of this
array should be ``(radon_image.shape[0], radon_image.shape[0])``. The
default is an array of zeros.
projection_shifts : 1D array, dtype=float
Shift the projections contained in ``radon_image`` (the sinogram) by
this many pixels before reconstructing the image. The i'th value
defines the shift of the i'th column of ``radon_image``.
clip : length-2 sequence of floats
Force all values in the reconstructed tomogram to lie in the range
``[clip[0], clip[1]]``
relaxation : float
Relaxation parameter for the update step. A higher value can
improve the convergence rate, but one runs the risk of instabilities.
Values close to or higher than 1 are not recommended.
Returns
-------
reconstructed : ndarray
Reconstructed image. The rotation axis will be located in the pixel
with indices
``(reconstructed.shape[0] // 2, reconstructed.shape[1] // 2)``.
Notes
-----
Algebraic Reconstruction Techniques are based on formulating the tomography
reconstruction problem as a set of linear equations. Along each ray,
the projected value is the sum of all the values of the cross section along
the ray. A typical feature of SART (and a few other variants of algebraic
techniques) is that it samples the cross section at equidistant points
along the ray, using linear interpolation between the pixel values of the
cross section. The resulting set of linear equations are then solved using
a slightly modified Kaczmarz method.
When using SART, a single iteration is usually sufficient to obtain a good
reconstruction. Further iterations will tend to enhance high-frequency
information, but will also often increase the noise.
References
----------
.. [1] AC Kak, M Slaney, "Principles of Computerized Tomographic
Imaging", IEEE Press 1988.
.. [2] AH Andersen, AC Kak, "Simultaneous algebraic reconstruction
technique (SART): a superior implementation of the ART algorithm",
Ultrasonic Imaging 6 pp 81--94 (1984)
.. [3] S Kaczmarz, "Angenäherte auflösung von systemen linearer
gleichungen", Bulletin International de l’Academie Polonaise des
Sciences et des Lettres 35 pp 355--357 (1937)
.. [4] Kohler, T. "A projection access scheme for iterative
reconstruction based on the golden section." Nuclear Science
Symposium Conference Record, 2004 IEEE. Vol. 6. IEEE, 2004.
.. [5] Kaczmarz' method, Wikipedia,
http://en.wikipedia.org/wiki/Kaczmarz_method
"""
if radon_image.ndim != 2:
raise ValueError('radon_image must be two dimensional')
reconstructed_shape = (radon_image.shape[0], radon_image.shape[0])
if theta is None:
theta = np.linspace(0, 180, radon_image.shape[1], endpoint=False)
elif theta.shape != (radon_image.shape[1],):
raise ValueError('Shape of theta (%s) does not match the '
'number of projections (%d)'
% (projection_shifts.shape, radon_image.shape[1]))
if image is None:
image = np.zeros(reconstructed_shape, dtype=np.float)
elif image.shape != reconstructed_shape:
raise ValueError('Shape of image (%s) does not match first dimension '
'of radon_image (%s)'
% (image.shape, reconstructed_shape))
if projection_shifts is None:
projection_shifts = np.zeros((radon_image.shape[1],), dtype=np.float)
elif projection_shifts.shape != (radon_image.shape[1],):
raise ValueError('Shape of projection_shifts (%s) does not match the '
'number of projections (%d)'
% (projection_shifts.shape, radon_image.shape[1]))
if not clip is None:
if len(clip) != 2:
raise ValueError('clip must be a length-2 sequence')
clip = (float(clip[0]), float(clip[1]))
relaxation = float(relaxation)
for angle_index in order_angles_golden_ratio(theta):
image_update = sart_projection_update(image, theta[angle_index],
radon_image[:, angle_index],
projection_shifts[angle_index])
image += relaxation * image_update
if not clip is None:
image = np.clip(image, clip[0], clip[1])
return image
|
dpaleino/new-osm-stats | refs/heads/master | plot.py | 1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright © 2010, David Paleino <d.paleino@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
from glob import glob
import cjson
import Gnuplot
import tempfile
import os
from collections import defaultdict
import hashlib
from operator import itemgetter
from config import *
class Graph():
def __init__(self, filename, title=None):
self.file = filename
self.title = title if title else self.file
self.user = False
self.tmp = []
self.g = Gnuplot.Gnuplot(debug=1)
self.g.title(self.title)
self.g("set style data lines")
self.g("set output '%s'" % self.file)
self.g("set terminal svg font 'sans-serif'")
self.g("set xdata time")
self.g("set timefmt '%Y%m%d'")
self.g("set format x '%m/%y'")
self.g("set key left top")
def add_line(self, user, xcoords, ycoords):
tmpfile = tempfile.mkstemp()[1]
self.tmp.append((user, tmpfile))
xcoords = map(lambda x: x.split('T')[0], xcoords)
f = open(tmpfile, 'w')
couples = zip(xcoords, ycoords)
# i = 0 # number of parsings
# every = len(couples) / 5 # (we only want 5 labels)
for x, y in sorted(zip(xcoords, ycoords), key=itemgetter(0)):
f.write('%(x)s %(y)s\n' % locals())
# i += 1
# if int(every) == 0 or (i % every) == 0:
# self.g("set label '%(y)s' at '%(x)s',%(y)s" % locals())
f.close()
def plot(self):
l = []
for user, tmpfile in self.tmp:
# support "categories"
if '|' in user and ';' in user.split('|')[1]:
label = user.split('|')[0]
else:
label = user
l.append("'%s' using 1:2 title '%s'" % (tmpfile, label))
self.g("plot " + ', '.join(l))
self.g.close()
for user, tmpfile in self.tmp:
os.unlink(tmpfile)
if __name__ == "__main__":
counts, xcoords, ycoords = parse_json()
titles = ['Nodes', 'Ways', 'Relations']
for t in titles:
graph = Graph(os.path.join(graphs_path, "%s.svg" % t), t)
graph.add_line(t, xcoords, zip(*counts)[titles.index(t)])
graph.plot()
|
henry-gobiernoabierto/geomoose | refs/heads/master | sphinx-docs/doc_config_js.py | 1 | #!/usr/bin/env python
#
# Convert geomoose/config.js into an RST document.
#
#
# Copyright (c) 2009-2014, Dan "Ducky" Little & GeoMOOSE.org
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import re
if(__name__ == "__main__"):
title = 'config.js Configuration Options'
print title
print '=' * len(title)
print ''
for line in open('../js/geomoose/config.js', 'r'):
# strip the leading white space
line = line.strip()
if(line[0:3] == '/**'):
var_name, desc = line[3:-2].strip().split(' - ')
print ' * ``%s``' % var_name
print ''
print ' '+desc
print ''
|
raccoongang/edx-platform | refs/heads/ginkgo-rg | lms/djangoapps/teams/models.py | 5 | """Django models related to teams functionality."""
from datetime import datetime
from uuid import uuid4
import pytz
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.dispatch import receiver
from django.utils.translation import ugettext_lazy
from django_countries.fields import CountryField
from model_utils import FieldTracker
from django_comment_common.signals import (
comment_created,
comment_deleted,
comment_edited,
comment_endorsed,
comment_voted,
thread_created,
thread_deleted,
thread_edited,
thread_voted
)
from lms.djangoapps.teams import TEAM_DISCUSSION_CONTEXT
from lms.djangoapps.teams.utils import emit_team_event
from openedx.core.djangoapps.xmodule_django.models import CourseKeyField
from student.models import CourseEnrollment, LanguageField
from util.model_utils import slugify
from .errors import AlreadyOnTeamInCourse, ImmutableMembershipFieldException, NotEnrolledInCourseForTeam
@receiver(thread_voted)
@receiver(thread_created)
@receiver(comment_voted)
@receiver(comment_created)
def post_create_vote_handler(sender, **kwargs): # pylint: disable=unused-argument
"""Update the user's last activity date upon creating or voting for a
post."""
handle_activity(kwargs['user'], kwargs['post'])
@receiver(thread_edited)
@receiver(thread_deleted)
@receiver(comment_edited)
@receiver(comment_deleted)
def post_edit_delete_handler(sender, **kwargs): # pylint: disable=unused-argument
"""Update the user's last activity date upon editing or deleting a
post."""
post = kwargs['post']
handle_activity(kwargs['user'], post, long(post.user_id))
@receiver(comment_endorsed)
def comment_endorsed_handler(sender, **kwargs): # pylint: disable=unused-argument
"""Update the user's last activity date upon endorsing a comment."""
comment = kwargs['post']
handle_activity(kwargs['user'], comment, long(comment.thread.user_id))
def handle_activity(user, post, original_author_id=None):
"""Handle user activity from django_comment_client and discussion_api
and update the user's last activity date. Checks if the user who
performed the action is the original author, and that the
discussion has the team context.
"""
if original_author_id is not None and user.id != original_author_id:
return
if getattr(post, "context", "course") == TEAM_DISCUSSION_CONTEXT:
CourseTeamMembership.update_last_activity(user, post.commentable_id)
class CourseTeam(models.Model):
"""This model represents team related info."""
class Meta(object):
app_label = "teams"
team_id = models.CharField(max_length=255, unique=True)
discussion_topic_id = models.CharField(max_length=255, unique=True)
name = models.CharField(max_length=255, db_index=True)
course_id = CourseKeyField(max_length=255, db_index=True)
topic_id = models.CharField(max_length=255, db_index=True, blank=True)
date_created = models.DateTimeField(auto_now_add=True)
description = models.CharField(max_length=300)
country = CountryField(blank=True)
language = LanguageField(
blank=True,
help_text=ugettext_lazy("Optional language the team uses as ISO 639-1 code."),
)
last_activity_at = models.DateTimeField(db_index=True) # indexed for ordering
users = models.ManyToManyField(User, db_index=True, related_name='teams', through='CourseTeamMembership')
team_size = models.IntegerField(default=0, db_index=True) # indexed for ordering
field_tracker = FieldTracker()
# Don't emit changed events when these fields change.
FIELD_BLACKLIST = ['last_activity_at', 'team_size']
@classmethod
def create(cls, name, course_id, description, topic_id=None, country=None, language=None):
"""Create a complete CourseTeam object.
Args:
name (str): The name of the team to be created.
course_id (str): The ID string of the course associated
with this team.
description (str): A description of the team.
topic_id (str): An optional identifier for the topic the
team formed around.
country (str, optional): An optional country where the team
is based, as ISO 3166-1 code.
language (str, optional): An optional language which the
team uses, as ISO 639-1 code.
"""
unique_id = uuid4().hex
team_id = slugify(name)[0:20] + '-' + unique_id
discussion_topic_id = unique_id
course_team = cls(
team_id=team_id,
discussion_topic_id=discussion_topic_id,
name=name,
course_id=course_id,
topic_id=topic_id if topic_id else '',
description=description,
country=country if country else '',
language=language if language else '',
last_activity_at=datetime.utcnow().replace(tzinfo=pytz.utc)
)
return course_team
def __repr__(self):
return "<CourseTeam team_id={0.team_id}>".format(self)
def add_user(self, user):
"""Adds the given user to the CourseTeam."""
if not CourseEnrollment.is_enrolled(user, self.course_id):
raise NotEnrolledInCourseForTeam
if CourseTeamMembership.user_in_team_for_course(user, self.course_id):
raise AlreadyOnTeamInCourse
return CourseTeamMembership.objects.create(
user=user,
team=self
)
def reset_team_size(self):
"""Reset team_size to reflect the current membership count."""
self.team_size = CourseTeamMembership.objects.filter(team=self).count()
self.save()
class CourseTeamMembership(models.Model):
"""This model represents the membership of a single user in a single team."""
class Meta(object):
app_label = "teams"
unique_together = (('user', 'team'),)
user = models.ForeignKey(User)
team = models.ForeignKey(CourseTeam, related_name='membership')
date_joined = models.DateTimeField(auto_now_add=True)
last_activity_at = models.DateTimeField()
immutable_fields = ('user', 'team', 'date_joined')
def __setattr__(self, name, value):
"""Memberships are immutable, with the exception of last activity
date.
"""
if name in self.immutable_fields:
# Check the current value -- if it is None, then this
# model is being created from the database and it's fine
# to set the value. Otherwise, we're trying to overwrite
# an immutable field.
current_value = getattr(self, name, None)
if value == current_value:
# This is an attempt to set an immutable value to the same value
# to which it's already set. Don't complain - just ignore the attempt.
return
else:
# This is an attempt to set an immutable value to a different value.
# Allow it *only* if the current value is None.
if current_value is not None:
raise ImmutableMembershipFieldException(
"Field %r shouldn't change from %r to %r" % (name, current_value, value)
)
super(CourseTeamMembership, self).__setattr__(name, value)
def save(self, *args, **kwargs):
"""Customize save method to set the last_activity_at if it does not
currently exist. Also resets the team's size if this model is
being created.
"""
should_reset_team_size = False
if self.pk is None:
should_reset_team_size = True
if not self.last_activity_at:
self.last_activity_at = datetime.utcnow().replace(tzinfo=pytz.utc)
super(CourseTeamMembership, self).save(*args, **kwargs)
if should_reset_team_size:
self.team.reset_team_size()
def delete(self, *args, **kwargs):
"""Recompute the related team's team_size after deleting a membership"""
super(CourseTeamMembership, self).delete(*args, **kwargs)
self.team.reset_team_size()
@classmethod
def get_memberships(cls, username=None, course_ids=None, team_id=None):
"""
Get a queryset of memberships.
Args:
username (unicode, optional): The username to filter on.
course_ids (list of unicode, optional) Course IDs to filter on.
team_id (unicode, optional): The team_id to filter on.
"""
queryset = cls.objects.all()
if username is not None:
queryset = queryset.filter(user__username=username)
if course_ids is not None:
queryset = queryset.filter(team__course_id__in=course_ids)
if team_id is not None:
queryset = queryset.filter(team__team_id=team_id)
return queryset
@classmethod
def user_in_team_for_course(cls, user, course_id):
"""
Checks whether or not a user is already in a team in the given course.
Args:
user: the user that we want to query on
course_id: the course_id of the course we're interested in
Returns:
True if the user is on a team in the course already
False if not
"""
return cls.objects.filter(user=user, team__course_id=course_id).exists()
@classmethod
def update_last_activity(cls, user, discussion_topic_id):
"""Set the `last_activity_at` for both this user and their team in the
given discussion topic. No-op if the user is not a member of
the team for this discussion.
"""
try:
membership = cls.objects.get(user=user, team__discussion_topic_id=discussion_topic_id)
# If a privileged user is active in the discussion of a team
# they do not belong to, do not update their last activity
# information.
except ObjectDoesNotExist:
return
now = datetime.utcnow().replace(tzinfo=pytz.utc)
membership.last_activity_at = now
membership.team.last_activity_at = now
membership.team.save()
membership.save()
emit_team_event('edx.team.activity_updated', membership.team.course_id, {
'team_id': membership.team.team_id,
})
|
tjsavage/djangononrel-starter | refs/heads/master | django/contrib/sessions/tests.py | 16 | import base64
from datetime import datetime, timedelta
import pickle
import shutil
import tempfile
from django.conf import settings
from django.contrib.sessions.backends.db import SessionStore as DatabaseSession
from django.contrib.sessions.backends.cache import SessionStore as CacheSession
from django.contrib.sessions.backends.cached_db import SessionStore as CacheDBSession
from django.contrib.sessions.backends.file import SessionStore as FileSession
from django.contrib.sessions.backends.base import SessionBase
from django.contrib.sessions.models import Session
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from django.utils import unittest
from django.utils.hashcompat import md5_constructor
class SessionTestsMixin(object):
# This does not inherit from TestCase to avoid any tests being run with this
# class, which wouldn't work, and to allow different TestCase subclasses to
# be used.
backend = None # subclasses must specify
def setUp(self):
self.session = self.backend()
def tearDown(self):
# NB: be careful to delete any sessions created; stale sessions fill up
# the /tmp (with some backends) and eventually overwhelm it after lots
# of runs (think buildbots)
self.session.delete()
def test_new_session(self):
self.assertFalse(self.session.modified)
self.assertFalse(self.session.accessed)
def test_get_empty(self):
self.assertEqual(self.session.get('cat'), None)
def test_store(self):
self.session['cat'] = "dog"
self.assertTrue(self.session.modified)
self.assertEqual(self.session.pop('cat'), 'dog')
def test_pop(self):
self.session['some key'] = 'exists'
# Need to reset these to pretend we haven't accessed it:
self.accessed = False
self.modified = False
self.assertEqual(self.session.pop('some key'), 'exists')
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
self.assertEqual(self.session.get('some key'), None)
def test_pop_default(self):
self.assertEqual(self.session.pop('some key', 'does not exist'),
'does not exist')
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
def test_setdefault(self):
self.assertEqual(self.session.setdefault('foo', 'bar'), 'bar')
self.assertEqual(self.session.setdefault('foo', 'baz'), 'bar')
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
def test_update(self):
self.session.update({'update key': 1})
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
self.assertEqual(self.session.get('update key', None), 1)
def test_has_key(self):
self.session['some key'] = 1
self.session.modified = False
self.session.accessed = False
self.assertTrue(self.session.has_key('some key'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
def test_values(self):
self.assertEqual(self.session.values(), [])
self.assertTrue(self.session.accessed)
self.session['some key'] = 1
self.assertEqual(self.session.values(), [1])
def test_iterkeys(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
i = self.session.iterkeys()
self.assertTrue(hasattr(i, '__iter__'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
self.assertEqual(list(i), ['x'])
def test_iterkeys(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
i = self.session.itervalues()
self.assertTrue(hasattr(i, '__iter__'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
self.assertEqual(list(i), [1])
def test_iteritems(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
i = self.session.iteritems()
self.assertTrue(hasattr(i, '__iter__'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
self.assertEqual(list(i), [('x',1)])
def test_clear(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
self.assertEqual(self.session.items(), [('x',1)])
self.session.clear()
self.assertEqual(self.session.items(), [])
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
def test_save(self):
self.session.save()
self.assertTrue(self.session.exists(self.session.session_key))
def test_delete(self):
self.session.delete(self.session.session_key)
self.assertFalse(self.session.exists(self.session.session_key))
def test_flush(self):
self.session['foo'] = 'bar'
self.session.save()
prev_key = self.session.session_key
self.session.flush()
self.assertFalse(self.session.exists(prev_key))
self.assertNotEqual(self.session.session_key, prev_key)
self.assertTrue(self.session.modified)
self.assertTrue(self.session.accessed)
def test_cycle(self):
self.session['a'], self.session['b'] = 'c', 'd'
self.session.save()
prev_key = self.session.session_key
prev_data = self.session.items()
self.session.cycle_key()
self.assertNotEqual(self.session.session_key, prev_key)
self.assertEqual(self.session.items(), prev_data)
def test_invalid_key(self):
# Submitting an invalid session key (either by guessing, or if the db has
# removed the key) results in a new key being generated.
session = self.backend('1')
session.save()
self.assertNotEqual(session.session_key, '1')
self.assertEqual(session.get('cat'), None)
session.delete()
# Custom session expiry
def test_default_expiry(self):
# A normal session has a max age equal to settings
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
# So does a custom session with an idle expiration time of 0 (but it'll
# expire at browser close)
self.session.set_expiry(0)
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
def test_custom_expiry_seconds(self):
# Using seconds
self.session.set_expiry(10)
delta = self.session.get_expiry_date() - datetime.now()
self.assertTrue(delta.seconds in (9, 10))
age = self.session.get_expiry_age()
self.assertTrue(age in (9, 10))
def test_custom_expiry_timedelta(self):
# Using timedelta
self.session.set_expiry(timedelta(seconds=10))
delta = self.session.get_expiry_date() - datetime.now()
self.assertTrue(delta.seconds in (9, 10))
age = self.session.get_expiry_age()
self.assertTrue(age in (9, 10))
def test_custom_expiry_timedelta(self):
# Using timedelta
self.session.set_expiry(datetime.now() + timedelta(seconds=10))
delta = self.session.get_expiry_date() - datetime.now()
self.assertTrue(delta.seconds in (9, 10))
age = self.session.get_expiry_age()
self.assertTrue(age in (9, 10))
def test_custom_expiry_reset(self):
self.session.set_expiry(None)
self.session.set_expiry(10)
self.session.set_expiry(None)
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
def test_get_expire_at_browser_close(self):
# Tests get_expire_at_browser_close with different settings and different
# set_expiry calls
try:
try:
original_expire_at_browser_close = settings.SESSION_EXPIRE_AT_BROWSER_CLOSE
settings.SESSION_EXPIRE_AT_BROWSER_CLOSE = False
self.session.set_expiry(10)
self.assertFalse(self.session.get_expire_at_browser_close())
self.session.set_expiry(0)
self.assertTrue(self.session.get_expire_at_browser_close())
self.session.set_expiry(None)
self.assertFalse(self.session.get_expire_at_browser_close())
settings.SESSION_EXPIRE_AT_BROWSER_CLOSE = True
self.session.set_expiry(10)
self.assertFalse(self.session.get_expire_at_browser_close())
self.session.set_expiry(0)
self.assertTrue(self.session.get_expire_at_browser_close())
self.session.set_expiry(None)
self.assertTrue(self.session.get_expire_at_browser_close())
except:
raise
finally:
settings.SESSION_EXPIRE_AT_BROWSER_CLOSE = original_expire_at_browser_close
def test_decode(self):
# Ensure we can decode what we encode
data = {'a test key': 'a test value'}
encoded = self.session.encode(data)
self.assertEqual(self.session.decode(encoded), data)
def test_decode_django12(self):
# Ensure we can decode values encoded using Django 1.2
# Hard code the Django 1.2 method here:
def encode(session_dict):
pickled = pickle.dumps(session_dict, pickle.HIGHEST_PROTOCOL)
pickled_md5 = md5_constructor(pickled + settings.SECRET_KEY).hexdigest()
return base64.encodestring(pickled + pickled_md5)
data = {'a test key': 'a test value'}
encoded = encode(data)
self.assertEqual(self.session.decode(encoded), data)
class DatabaseSessionTests(SessionTestsMixin, TestCase):
backend = DatabaseSession
class CacheDBSessionTests(SessionTestsMixin, TestCase):
backend = CacheDBSession
# Don't need DB flushing for these tests, so can use unittest.TestCase as base class
class FileSessionTests(SessionTestsMixin, unittest.TestCase):
backend = FileSession
def setUp(self):
super(FileSessionTests, self).setUp()
# Do file session tests in an isolated directory, and kill it after we're done.
self.original_session_file_path = settings.SESSION_FILE_PATH
self.temp_session_store = settings.SESSION_FILE_PATH = tempfile.mkdtemp()
def tearDown(self):
settings.SESSION_FILE_PATH = self.original_session_file_path
shutil.rmtree(self.temp_session_store)
super(FileSessionTests, self).tearDown()
def test_configuration_check(self):
# Make sure the file backend checks for a good storage dir
settings.SESSION_FILE_PATH = "/if/this/directory/exists/you/have/a/weird/computer"
self.assertRaises(ImproperlyConfigured, self.backend)
class CacheSessionTests(SessionTestsMixin, unittest.TestCase):
backend = CacheSession
|
sayoun/workalendar | refs/heads/master | workalendar/africa/south_africa.py | 1 | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from datetime import timedelta, date
from workalendar.core import WesternCalendar
from workalendar.core import SUN, MON
from workalendar.core import ChristianMixin
class SouthAfrica(WesternCalendar, ChristianMixin):
"South Africa"
include_good_friday = True
include_easter_monday = True
include_christmas = True
include_boxing_day = True
FIXED_HOLIDAYS = WesternCalendar.FIXED_HOLIDAYS + (
(5, 1, "Workers Day"),
(12, 16, "Day of reconcilation"),
)
def get_family_day(self, year):
return (self.get_good_friday(year), "Family Day")
def get_fixed_holidays(self, year):
days = super(SouthAfrica, self).get_fixed_holidays(year)
if year < 1952:
days.append((date(year, 5, 24), "Empire Day"))
if year >= 1952 and year <= 1974:
days.append((date(year, 4, 6), "Van Riebeeck's Day"))
if year >= 1952 and year <= 1979:
days.append((self.get_nth_weekday_in_month(year, 9, MON, 1),
"Settlers' Day"))
if year >= 1952 and year <= 1993:
days.append((date(year, 10, 10), "Kruger Day"))
if year <= 1960:
days.append((date(year, 5, 31), "Union Day"))
if year > 1960 and year <= 1993:
days.append((date(year, 5, 31), "Republic Day"))
if year > 1960 and year <= 1974:
days.append((date(year, 7, 10), "Family Day"))
if year >= 1980 and year <= 1994:
days.append((date(year, 4, 6), "Founder's Day"))
if year >= 1990:
days.append((date(year, 3, 21), 'Human Rights Day'))
if year <= 1993:
days.append((self.get_ascension_thursday(year), "Ascension Day"))
if year >= 1994:
days.append((date(year, 4, 27), "Freedom Day"))
days.append((date(year, 12, 26), "Day of good will"))
if year >= 1995:
days.append((date(year, 6, 16), "Youth Day"))
days.append((date(year, 8, 9), "National Women Day"))
days.append((date(year, 9, 24), "Heritage Day"))
return days
def get_variable_days(self, year):
days = super(SouthAfrica, self).get_variable_days(year)
days.append(self.get_family_day(year))
days += self.get_fixed_holidays(year)
# compute shifting days
for holiday, label in days:
if holiday.weekday() == SUN:
days.append((
holiday + timedelta(days=1),
"%s substitute" % label
))
# Other one-offs. Don't shift these
if year == 1999:
days.append((date(year, 6, 2), "National Elections"))
days.append((date(year, 12, 31), "Y2K"))
if year == 2000:
# 2 January 2000 public holidays to accommodate the Y2K changeover,
# 3 January 2000 because the previous holiday was a Sunday
days.append((date(year, 1, 2), "Y2K"))
days.append((date(year, 1, 3), "Y2K"))
if year == 2001:
days.append((date(year, 1, 2), "Y2K"))
if year == 2004:
days.append((date(year, 4, 14), "National Elections"))
if year == 2006:
days.append((date(year, 3, 1), "Local Elections"))
if year == 2008:
# 2 May 2008 was declared a public holiday when Human Rights Day
# and Good Friday coincided on 21 March 2008
days.append((date(year, 5, 2), "Special Human Rights"))
if year == 2009:
days.append((date(year, 4, 22), "National Elections"))
if year == 2011:
days.append((date(year, 5, 18), "Local Elections"))
days.append((date(year, 12, 27), "Special Day of Goodwill"))
if year == 2014:
days.append((date(year, 5, 7), "National Elections"))
if year == 2016:
days.append((date(year, 8, 3), "Local Elections"))
return days
|
rfloca/MITK | refs/heads/master | Build/Tools/Python/renameClass.py | 4 | #!/usr/bin/python
# mitk c++ class rename script by Marco Nolden and Michael Mueller
#
# There are two ways to use this:
#
# 1. renameClass <dir> <oldClassName> <newClassName>
#
# 2. renameClass <dir> <csvFileOfClassNameReplaces>
#
#
# Always backup your code before using this! It has only been tested on a few cases for a special purpose!
# It does not parse the c++ , but just does a text replace on ClassName, mitkClassName, m_ClassName, GetClassName
# and SetClassName and renames files accordingly. There is some basic mechanism to avoid name clashes but better
# double check the results.
#
# using the commitCommandString and the renameFileCommand you can commit your rename results directly to your
# favourite version control.
#
# some source code was taken from regexplace by Stefano Spinucci
#
import os;
import fileinput;
import re;
import sys;
import stat;
import os.path;
import csv;
commitCommandString = None
renameFileCommand = None
#
# uncomment and adapt this to commit after each rename. Parameters will be oldname, newname
# commitCommandString = "git commit -a -m \"CHG (#3669): renamed %s to %s\""
# uncomment and adapt this for renaming files. If undefined, a normal file rename will we performed
# using python commands
# renameFileCommand = "git mv %s %s "
class FileList:
def __init__(self,dir):
self.filelist = [];
for root,dirs,files in os.walk(dir):
if ".svn" in dirs:
dirs.remove(".svn")
if ".git" in dirs:
dirs.remove(".git")
for name in files:
self.filelist.append((root,name))
def contains(self,filename):
for root,xfilename in self.filelist:
if (xfilename == filename):
return (root,filename)
return None
def rename_file(self,source,dest):
self.filelist.remove(source)
xroot,xfile = source
self.filelist.append((xroot,dest))
if renameFileCommand:
os.system(renameFileCommand % (os.path.join(xroot,xfile),os.path.join(xroot,dest) ) )
else:
os.rename(os.path.join(xroot,xfile),os.path.join(xroot,dest))
def exists_somewhere(self,stringlist):
exists = False
regexString = str(stringlist.pop())
for string in stringlist:
regexString = regexString + "|" + string
regexString = "\\b(" + regexString + ")\\b"
regex = re.compile(regexString)
for root,filename in self.filelist:
xfile = os.path.join(root,filename)
# open file for read
readlines=open(xfile,'r').readlines()
# search and replace in current file printing to the user changed lines
for currentline in readlines:
if regex.search(currentline):
print "warning: %s found in %s" % (string,xfile)
exists = True
return exists
def find_all(dir):
filelist = [];
for root,dirs,files in os.walk(dir):
if ".svn" in dirs:
dirs.remove(".svn")
if ".git" in dirs:
dirs.remove(".git")
for name in files:
filelist.append((root,name))
return filelist
# in all files in 'fileslist' search the regexp 'searchregx' and replace
# with 'replacestring'; real substitution in files only if 'simulation' = 0;
# real substitution may also be step by step (if 'stepbystep' = 1)
def replace_in_files(fileslist, searchregx, replacestring, simulation, stepbystep):
# compile regexp
cregex=re.compile(searchregx)
# print message to the user
if simulation == 1:
print '\nReplaced (simulation):\n'
else:
print '\nReplaced:\n'
# loop on all files
for root,filename in fileslist:
xfile = os.path.join(root,filename)
# initialize the replace flag
replaceflag=0
fileAtt = os.stat(xfile)[0]
if (not fileAtt & stat.S_IWRITE):
continue
# open file for read
readlines=open(xfile,'r').readlines()
# intialize the list counter
listindex = -1
# search and replace in current file printing to the user changed lines
for currentline in readlines:
# increment the list counter
listindex = listindex + 1
# if the regexp is found
if cregex.search(currentline):
# make the substitution
f=re.sub(searchregx,replacestring,currentline)
# print the current filename, the old string and the new string
print '\n' + xfile
print '- ' + currentline ,
if currentline[-1:]!='\n': print '\n' ,
print '+ ' + f ,
if f[-1:]!='\n': print '\n' ,
# if substitution is real
if simulation == 0:
# if substitution is step by step
if stepbystep == 1:
# ask user if the current line must be replaced
question = raw_input('write(Y), skip (n), quit (q) ? ')
question = string.lower(question)
# if quit
if question=='q':
sys.exit('\ninterrupted by the user !!!')
# if skip
elif question=='n':
pass
# if write
else:
# update the whole file variable ('readlines')
readlines[listindex] = f
replaceflag=1
# if substitution is not step by step
else:
# update the whole file variable ('readlines')
readlines[listindex] = f
replaceflag=1
# if some text was replaced
# overwrite the original file
if replaceflag==1:
# open the file for writting
write_file=open(xfile,'w')
# overwrite the file
for line in readlines:
write_file.write(line)
# close the file
write_file.close()
def replace_word_in_files(fileslist, searchword, replaceword, simulation = False, stepbystep = False):
replace_in_files(fileslist,"\\b" + searchword + "\\b",replaceword,simulation,stepbystep)
def rename_class(filelist, oldname, newname,classPrefix = "mitk" ):
suffixes = [ "h","cpp","txx" ]
for suffix in suffixes:
origName = classPrefix + oldname + "." + suffix
newName = classPrefix + newname + "." + suffix
fileName = filelist.contains(origName)
if fileName:
replace_word_in_files(filelist.filelist,origName,newName)
filelist.rename_file(fileName,newName)
replace_word_in_files(filelist.filelist,oldname,newname)
prefixes = [ "Get" , "Set" , "m_" ]
newnames = map(lambda x : x + newname , prefixes)
if filelist.exists_somewhere(newnames):
print "Skipping member variable and getter/setter renaming due to name conflict"
return
for prefix in prefixes:
replace_word_in_files(filelist.filelist,prefix + oldname, prefix + newname)
x = FileList(sys.argv[1])
if len(sys.argv) == 4:
rename_class(x,sys.argv[2],sys.argv[3])
if len(sys.argv) == 3:
csvReader = csv.reader(open(sys.argv[2],'r'))
for row in csvReader:
print row
rename_class(x,row[0],row[1])
if commitCommandString:
os.system(commitCommandString % ( row[0],row[1] ) )
|
Jgarcia-IAS/Fidelizacion_odoo | refs/heads/master | openerp/addons/l10n_pl/__openerp__.py | 277 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009 - now Grzegorz Grzelak grzegorz.grzelak@openglobe.pl
# All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'Poland - Accounting',
'version' : '1.02',
'author' : 'Grzegorz Grzelak (OpenGLOBE)',
'website': 'http://www.openglobe.pl',
'category' : 'Localization/Account Charts',
'description': """
This is the module to manage the accounting chart and taxes for Poland in OpenERP.
==================================================================================
To jest moduł do tworzenia wzorcowego planu kont, podatków, obszarów podatkowych i
rejestrów podatkowych. Moduł ustawia też konta do kupna i sprzedaży towarów
zakładając, że wszystkie towary są w obrocie hurtowym.
Niniejszy moduł jest przeznaczony dla odoo 8.0.
Wewnętrzny numer wersji OpenGLOBE 1.02
""",
'depends' : ['account', 'base_iban', 'base_vat', 'account_chart'],
'demo' : [],
'data' : ['account_tax_code.xml',
'account_chart.xml',
'account_tax.xml',
'fiscal_position.xml',
'country_pl.xml',
'l10n_chart_pl_wizard.xml'
],
'auto_install': False,
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
sudosurootdev/external_chromium_org | refs/heads/L5 | tools/auto_bisect/builder.py | 25 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Classes and functions for building Chrome.
This includes functions for running commands to build, as well as
specific rules about which targets to build.
"""
import os
import subprocess
import sys
import bisect_utils
ORIGINAL_ENV = {}
class Builder(object):
"""Subclasses of the Builder class are used by the bisect script to build
relevant targets.
"""
def __init__(self, opts):
"""Performs setup for building with target build system.
Args:
opts: Options parsed from command line.
Raises:
RuntimeError: Some condition necessary for building was not met.
"""
if bisect_utils.IsWindowsHost():
if not opts.build_preference:
opts.build_preference = 'msvs'
if opts.build_preference == 'msvs':
if not os.getenv('VS100COMNTOOLS'):
raise RuntimeError(
'Path to visual studio could not be determined.')
else:
# Need to re-escape goma dir, see crbug.com/394990.
if opts.goma_dir:
opts.goma_dir = opts.goma_dir.encode('string_escape')
SetBuildSystemDefault(opts.build_preference, opts.use_goma,
opts.goma_dir)
else:
if not opts.build_preference:
if 'ninja' in os.getenv('GYP_GENERATORS', default=''):
opts.build_preference = 'ninja'
else:
opts.build_preference = 'make'
SetBuildSystemDefault(opts.build_preference, opts.use_goma, opts.goma_dir)
if not SetupPlatformBuildEnvironment(opts):
raise RuntimeError('Failed to set platform environment.')
@staticmethod
def FromOpts(opts):
"""Constructs and returns a Builder object.
Args:
opts: Options parsed from the command-line.
"""
builder = None
if opts.target_platform == 'cros':
builder = CrosBuilder(opts)
elif opts.target_platform == 'android':
builder = AndroidBuilder(opts)
elif opts.target_platform == 'android-chrome':
builder = AndroidChromeBuilder(opts)
else:
builder = DesktopBuilder(opts)
return builder
def Build(self, depot, opts):
"""Runs a command to build Chrome."""
raise NotImplementedError()
def GetBuildOutputDirectory(opts, src_dir=None):
"""Returns the path to the build directory, relative to the checkout root.
Assumes that the current working directory is the checkout root.
Args:
opts: Command-line options.
src_dir: Path to chromium/src directory.
Returns:
A path to the directory to use as build output directory.
Raises:
NotImplementedError: The platform according to sys.platform is unexpected.
"""
src_dir = src_dir or 'src'
if opts.build_preference == 'ninja' or bisect_utils.IsLinuxHost():
return os.path.join(src_dir, 'out')
if bisect_utils.IsMacHost():
return os.path.join(src_dir, 'xcodebuild')
if bisect_utils.IsWindowsHost():
return os.path.join(src_dir, 'build')
raise NotImplementedError('Unexpected platform %s' % sys.platform)
class DesktopBuilder(Builder):
"""DesktopBuilder is used to build Chromium on Linux, Mac, or Windows."""
def __init__(self, opts):
super(DesktopBuilder, self).__init__(opts)
def Build(self, depot, opts):
"""Builds chromium_builder_perf target using options passed into the script.
Args:
depot: Name of current depot being bisected.
opts: The options parsed from the command line.
Returns:
True if build was successful.
"""
targets = ['chromium_builder_perf']
threads = None
if opts.use_goma:
threads = 64
build_success = False
if opts.build_preference == 'make':
build_success = BuildWithMake(threads, targets, opts.target_build_type)
elif opts.build_preference == 'ninja':
build_success = BuildWithNinja(threads, targets, opts.target_build_type)
elif opts.build_preference == 'msvs':
assert bisect_utils.IsWindowsHost(), 'msvs is only supported on Windows.'
build_success = BuildWithVisualStudio(targets, opts.target_build_type)
else:
assert False, 'No build system defined.'
return build_success
class AndroidBuilder(Builder):
"""AndroidBuilder is used to build on android."""
def __init__(self, opts):
super(AndroidBuilder, self).__init__(opts)
# TODO(qyearsley): Make this a class method and verify that it works with
# a unit test.
# pylint: disable=R0201
def _GetTargets(self):
"""Returns a list of build targets."""
return ['chrome_shell_apk', 'cc_perftests_apk', 'android_tools']
def Build(self, depot, opts):
"""Builds the android content shell and other necessary tools.
Args:
depot: Current depot being bisected.
opts: The options parsed from the command line.
Returns:
True if build was successful.
"""
threads = None
if opts.use_goma:
threads = 64
build_success = False
if opts.build_preference == 'ninja':
build_success = BuildWithNinja(
threads, self._GetTargets(), opts.target_build_type)
else:
assert False, 'No build system defined.'
return build_success
class AndroidChromeBuilder(AndroidBuilder):
"""AndroidChromeBuilder is used to build "android-chrome".
This is slightly different from AndroidBuilder.
"""
def __init__(self, opts):
super(AndroidChromeBuilder, self).__init__(opts)
# TODO(qyearsley): Make this a class method and verify that it works with
# a unit test.
# pylint: disable=R0201
def _GetTargets(self):
"""Returns a list of build targets."""
return AndroidBuilder._GetTargets(self) + ['chrome_apk']
class CrosBuilder(Builder):
"""CrosBuilder is used to build and image ChromeOS/Chromium.
WARNING(qyearsley, 2014-08-15): This hasn't been tested recently.
"""
def __init__(self, opts):
super(CrosBuilder, self).__init__(opts)
@staticmethod
def ImageToTarget(opts):
"""Installs latest image to target specified by opts.cros_remote_ip.
Args:
opts: Program options containing cros_board and cros_remote_ip.
Returns:
True if successful.
"""
try:
# Keys will most likely be set to 0640 after wiping the chroot.
os.chmod(bisect_utils.CROS_SCRIPT_KEY_PATH, 0600)
os.chmod(bisect_utils.CROS_TEST_KEY_PATH, 0600)
cmd = [bisect_utils.CROS_SDK_PATH, '--', './bin/cros_image_to_target.py',
'--remote=%s' % opts.cros_remote_ip,
'--board=%s' % opts.cros_board, '--test', '--verbose']
return_code = bisect_utils.RunProcess(cmd)
return not return_code
except OSError:
return False
@staticmethod
def BuildPackages(opts, depot):
"""Builds packages for cros.
Args:
opts: Program options containing cros_board.
depot: The depot being bisected.
Returns:
True if successful.
"""
cmd = [bisect_utils.CROS_SDK_PATH]
if depot != 'cros':
path_to_chrome = os.path.join(os.getcwd(), '..')
cmd += ['--chrome_root=%s' % path_to_chrome]
cmd += ['--']
if depot != 'cros':
cmd += ['CHROME_ORIGIN=LOCAL_SOURCE']
cmd += ['BUILDTYPE=%s' % opts.target_build_type, './build_packages',
'--board=%s' % opts.cros_board]
return_code = bisect_utils.RunProcess(cmd)
return not return_code
@staticmethod
def BuildImage(opts, depot):
"""Builds test image for cros.
Args:
opts: Program options containing cros_board.
depot: The depot being bisected.
Returns:
True if successful.
"""
cmd = [bisect_utils.CROS_SDK_PATH]
if depot != 'cros':
path_to_chrome = os.path.join(os.getcwd(), '..')
cmd += ['--chrome_root=%s' % path_to_chrome]
cmd += ['--']
if depot != 'cros':
cmd += ['CHROME_ORIGIN=LOCAL_SOURCE']
cmd += ['BUILDTYPE=%s' % opts.target_build_type, '--', './build_image',
'--board=%s' % opts.cros_board, 'test']
return_code = bisect_utils.RunProcess(cmd)
return not return_code
def Build(self, depot, opts):
"""Builds targets using options passed into the script.
Args:
depot: Current depot being bisected.
opts: The options parsed from the command line.
Returns:
True if build was successful.
"""
if self.BuildPackages(opts, depot):
if self.BuildImage(opts, depot):
return self.ImageToTarget(opts)
return False
def SetBuildSystemDefault(build_system, use_goma, goma_dir):
"""Sets up any environment variables needed to build with the specified build
system.
Args:
build_system: A string specifying build system. Currently only 'ninja' or
'make' are supported.
"""
if build_system == 'ninja':
gyp_var = os.getenv('GYP_GENERATORS', default='')
if not gyp_var or not 'ninja' in gyp_var:
if gyp_var:
os.environ['GYP_GENERATORS'] = gyp_var + ',ninja'
else:
os.environ['GYP_GENERATORS'] = 'ninja'
if bisect_utils.IsWindowsHost():
os.environ['GYP_DEFINES'] = 'component=shared_library '\
'incremental_chrome_dll=1 disable_nacl=1 fastbuild=1 '\
'chromium_win_pch=0'
elif build_system == 'make':
os.environ['GYP_GENERATORS'] = 'make'
else:
raise RuntimeError('%s build not supported.' % build_system)
if use_goma:
os.environ['GYP_DEFINES'] = '%s %s' % (os.getenv('GYP_DEFINES', default=''),
'use_goma=1')
if goma_dir:
os.environ['GYP_DEFINES'] += ' gomadir=%s' % goma_dir
def SetupPlatformBuildEnvironment(opts):
"""Performs any platform-specific setup.
Args:
opts: The options parsed from the command line through parse_args().
Returns:
True if successful.
"""
if 'android' in opts.target_platform:
CopyAndSaveOriginalEnvironmentVars()
return SetupAndroidBuildEnvironment(opts)
elif opts.target_platform == 'cros':
return bisect_utils.SetupCrosRepo()
return True
def BuildWithMake(threads, targets, build_type='Release'):
"""Runs a make command with the given targets.
Args:
threads: The number of threads to use. None means unspecified/unlimited.
targets: List of make targets.
build_type: Release or Debug.
Returns:
True if the command had a 0 exit code, False otherwise.
"""
cmd = ['make', 'BUILDTYPE=%s' % build_type]
if threads:
cmd.append('-j%d' % threads)
cmd += targets
return_code = bisect_utils.RunProcess(cmd)
return not return_code
def BuildWithNinja(threads, targets, build_type='Release'):
"""Runs a ninja command with the given targets."""
cmd = ['ninja', '-C', os.path.join('out', build_type)]
if threads:
cmd.append('-j%d' % threads)
cmd += targets
return_code = bisect_utils.RunProcess(cmd)
return not return_code
def BuildWithVisualStudio(targets, build_type='Release'):
"""Runs a command to build the given targets with Visual Studio."""
path_to_devenv = os.path.abspath(
os.path.join(os.environ['VS100COMNTOOLS'], '..', 'IDE', 'devenv.com'))
path_to_sln = os.path.join(os.getcwd(), 'chrome', 'chrome.sln')
cmd = [path_to_devenv, '/build', build_type, path_to_sln]
for t in targets:
cmd.extend(['/Project', t])
return_code = bisect_utils.RunProcess(cmd)
return not return_code
def CopyAndSaveOriginalEnvironmentVars():
"""Makes a copy of the current environment variables.
Before making a copy of the environment variables and setting a global
variable, this function unsets a certain set of environment variables.
"""
# TODO: Waiting on crbug.com/255689, will remove this after.
vars_to_remove = [
'CHROME_SRC',
'CHROMIUM_GYP_FILE',
'GYP_CROSSCOMPILE',
'GYP_DEFINES',
'GYP_GENERATORS',
'GYP_GENERATOR_FLAGS',
'OBJCOPY',
]
for key in os.environ:
if 'ANDROID' in key:
vars_to_remove.append(key)
for key in vars_to_remove:
if os.environ.has_key(key):
del os.environ[key]
global ORIGINAL_ENV
ORIGINAL_ENV = os.environ.copy()
def SetupAndroidBuildEnvironment(opts, path_to_src=None):
"""Sets up the android build environment.
Args:
opts: The options parsed from the command line through parse_args().
path_to_src: Path to the src checkout.
Returns:
True if successful.
"""
# Revert the environment variables back to default before setting them up
# with envsetup.sh.
env_vars = os.environ.copy()
for k, _ in env_vars.iteritems():
del os.environ[k]
for k, v in ORIGINAL_ENV.iteritems():
os.environ[k] = v
envsetup_path = os.path.join('build', 'android', 'envsetup.sh')
proc = subprocess.Popen(['bash', '-c', 'source %s && env' % envsetup_path],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=path_to_src)
out, _ = proc.communicate()
for line in out.splitlines():
k, _, v = line.partition('=')
os.environ[k] = v
# envsetup.sh no longer sets OS=android in GYP_DEFINES environment variable.
# (See http://crrev.com/170273005). So, we set this variable explicitly here
# in order to build Chrome on Android.
if 'GYP_DEFINES' not in os.environ:
os.environ['GYP_DEFINES'] = 'OS=android'
else:
os.environ['GYP_DEFINES'] += ' OS=android'
if opts.use_goma:
os.environ['GYP_DEFINES'] += ' use_goma=1'
return not proc.returncode
|
QLGu/django-oscar | refs/heads/master | src/oscar/management/commands/oscar_fork_statics.py | 32 | import logging
import os
import shutil
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
logger = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Copy Oscar's statics into local project so they can be used as a base for
styling a new site.
"""
args = '<destination folder>'
help = "Copy Oscar's static files"
def handle(self, *args, **options):
# Determine where to copy to
folder = args[0] if args else 'static'
if not folder.startswith('/'):
destination = os.path.join(os.getcwd(), folder)
else:
destination = folder
if os.path.exists(destination):
raise CommandError(
"The folder %s already exists - aborting!" % destination)
source = os.path.realpath(
os.path.join(os.path.dirname(__file__), '../../static'))
print("Copying Oscar's static files to %s" % (destination,))
shutil.copytree(source, destination)
# Check if this new folder is in STATICFILES_DIRS
if destination not in settings.STATICFILES_DIRS:
print(("You need to add %s to STATICFILES_DIRS in order for your "
"local overrides to be picked up") % destination)
|
ataylor32/django | refs/heads/master | django/contrib/postgres/forms/ranges.py | 393 | from psycopg2.extras import DateRange, DateTimeTZRange, NumericRange
from django import forms
from django.core import exceptions
from django.forms.widgets import MultiWidget
from django.utils.translation import ugettext_lazy as _
__all__ = ['IntegerRangeField', 'FloatRangeField', 'DateTimeRangeField', 'DateRangeField']
class BaseRangeField(forms.MultiValueField):
default_error_messages = {
'invalid': _('Enter two valid values.'),
'bound_ordering': _('The start of the range must not exceed the end of the range.'),
}
def __init__(self, **kwargs):
kwargs.setdefault('widget', RangeWidget(self.base_field.widget))
kwargs.setdefault('fields', [self.base_field(required=False), self.base_field(required=False)])
kwargs.setdefault('required', False)
kwargs.setdefault('require_all_fields', False)
super(BaseRangeField, self).__init__(**kwargs)
def prepare_value(self, value):
lower_base, upper_base = self.fields
if isinstance(value, self.range_type):
return [
lower_base.prepare_value(value.lower),
upper_base.prepare_value(value.upper),
]
if value is None:
return [
lower_base.prepare_value(None),
upper_base.prepare_value(None),
]
return value
def compress(self, values):
if not values:
return None
lower, upper = values
if lower is not None and upper is not None and lower > upper:
raise exceptions.ValidationError(
self.error_messages['bound_ordering'],
code='bound_ordering',
)
try:
range_value = self.range_type(lower, upper)
except TypeError:
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
)
else:
return range_value
class IntegerRangeField(BaseRangeField):
default_error_messages = {'invalid': _('Enter two whole numbers.')}
base_field = forms.IntegerField
range_type = NumericRange
class FloatRangeField(BaseRangeField):
default_error_messages = {'invalid': _('Enter two numbers.')}
base_field = forms.FloatField
range_type = NumericRange
class DateTimeRangeField(BaseRangeField):
default_error_messages = {'invalid': _('Enter two valid date/times.')}
base_field = forms.DateTimeField
range_type = DateTimeTZRange
class DateRangeField(BaseRangeField):
default_error_messages = {'invalid': _('Enter two valid dates.')}
base_field = forms.DateField
range_type = DateRange
class RangeWidget(MultiWidget):
def __init__(self, base_widget, attrs=None):
widgets = (base_widget, base_widget)
super(RangeWidget, self).__init__(widgets, attrs)
def decompress(self, value):
if value:
return (value.lower, value.upper)
return (None, None)
|
googleapis/python-bigquery-storage | refs/heads/master | google/cloud/bigquery_storage_v1beta2/types/avro.py | 1 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.cloud.bigquery.storage.v1beta2",
manifest={"AvroSchema", "AvroRows",},
)
class AvroSchema(proto.Message):
r"""Avro schema.
Attributes:
schema (str):
Json serialized schema, as described at
https://avro.apache.org/docs/1.8.1/spec.html.
"""
schema = proto.Field(proto.STRING, number=1,)
class AvroRows(proto.Message):
r"""Avro rows.
Attributes:
serialized_binary_rows (bytes):
Binary serialized rows in a block.
"""
serialized_binary_rows = proto.Field(proto.BYTES, number=1,)
__all__ = tuple(sorted(__protobuf__.manifest))
|
350dotorg/Django | refs/heads/master | django/core/files/temp.py | 536 | """
The temp module provides a NamedTemporaryFile that can be re-opened on any
platform. Most platforms use the standard Python tempfile.TemporaryFile class,
but MS Windows users are given a custom class.
This is needed because in Windows NT, the default implementation of
NamedTemporaryFile uses the O_TEMPORARY flag, and thus cannot be reopened [1].
1: http://mail.python.org/pipermail/python-list/2005-December/359474.html
"""
import os
import tempfile
from django.core.files.utils import FileProxyMixin
__all__ = ('NamedTemporaryFile', 'gettempdir',)
if os.name == 'nt':
class TemporaryFile(FileProxyMixin):
"""
Temporary file object constructor that works in Windows and supports
reopening of the temporary file in windows.
"""
def __init__(self, mode='w+b', bufsize=-1, suffix='', prefix='',
dir=None):
fd, name = tempfile.mkstemp(suffix=suffix, prefix=prefix,
dir=dir)
self.name = name
self.file = os.fdopen(fd, mode, bufsize)
self.close_called = False
# Because close can be called during shutdown
# we need to cache os.unlink and access it
# as self.unlink only
unlink = os.unlink
def close(self):
if not self.close_called:
self.close_called = True
try:
self.file.close()
except (OSError, IOError):
pass
try:
self.unlink(self.name)
except (OSError):
pass
def __del__(self):
self.close()
NamedTemporaryFile = TemporaryFile
else:
NamedTemporaryFile = tempfile.NamedTemporaryFile
gettempdir = tempfile.gettempdir
|
kkauffman/CourseWatcher | refs/heads/master | app/__init__.py | 1 | from threading import Thread
from celery import Celery
from flask import Flask, current_app
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.mail import Mail, Message
app = Flask(__name__)
app.config.from_object('config')
db = SQLAlchemy(app)
mail = Mail(app)
default_error = '<b>Opps, it appears something went wrong!</b>'
celery = Celery(app.import_name, broker=app.config['CELERY_BROKER_URL'])
celery.conf.update(app.config)
TaskBase = celery.Task
class ContextTask(TaskBase):
abstract = True
def __call__(self, *args, **kwargs):
with app.app_context():
return TaskBase.__call__(self, *args, **kwargs)
celery.Task = ContextTask
@celery.task
def send_email_updates(poller_name):
""" Takes a school name and updates its course data and then sends all open course notifications. """
requests = UpdateDB(poller_name)
for request in requests:
emails = []
for entry in request[0]:
emails.append(entry.email)
db.session.delete(entry)
send_email(emails, request[1], request[1])
def send_email(recipients, subject, body):
msg = Message(subject, sender=app.config['EMAIL_ADDRESS'], recipients=recipients)
msg.body = body
thr = Thread(target=send_async_email, args=[msg])
thr.start()
def send_async_email(msg):
with app.app_context():
mail.send(msg)
from app.update import UpdateDB
from app import views, models
|
tboyce021/home-assistant | refs/heads/dev | homeassistant/components/homematic/notify.py | 14 | """Notification support for Homematic."""
import voluptuous as vol
from homeassistant.components.notify import (
ATTR_DATA,
PLATFORM_SCHEMA,
BaseNotificationService,
)
import homeassistant.helpers.config_validation as cv
import homeassistant.helpers.template as template_helper
from .const import (
ATTR_ADDRESS,
ATTR_CHANNEL,
ATTR_INTERFACE,
ATTR_PARAM,
ATTR_VALUE,
DOMAIN,
SERVICE_SET_DEVICE_VALUE,
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(ATTR_ADDRESS): vol.All(cv.string, vol.Upper),
vol.Required(ATTR_CHANNEL): vol.Coerce(int),
vol.Required(ATTR_PARAM): vol.All(cv.string, vol.Upper),
vol.Required(ATTR_VALUE): cv.match_all,
vol.Optional(ATTR_INTERFACE): cv.string,
}
)
def get_service(hass, config, discovery_info=None):
"""Get the Homematic notification service."""
data = {
ATTR_ADDRESS: config[ATTR_ADDRESS],
ATTR_CHANNEL: config[ATTR_CHANNEL],
ATTR_PARAM: config[ATTR_PARAM],
ATTR_VALUE: config[ATTR_VALUE],
}
if ATTR_INTERFACE in config:
data[ATTR_INTERFACE] = config[ATTR_INTERFACE]
return HomematicNotificationService(hass, data)
class HomematicNotificationService(BaseNotificationService):
"""Implement the notification service for Homematic."""
def __init__(self, hass, data):
"""Initialize the service."""
self.hass = hass
self.data = data
def send_message(self, message="", **kwargs):
"""Send a notification to the device."""
data = {**self.data, **kwargs.get(ATTR_DATA, {})}
if data.get(ATTR_VALUE) is not None:
templ = template_helper.Template(self.data[ATTR_VALUE], self.hass)
data[ATTR_VALUE] = template_helper.render_complex(templ, None)
self.hass.services.call(DOMAIN, SERVICE_SET_DEVICE_VALUE, data)
|
andreif/django | refs/heads/master | tests/forms_tests/models.py | 261 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import itertools
import tempfile
from django.core.files.storage import FileSystemStorage
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
callable_default_counter = itertools.count()
def callable_default():
return next(callable_default_counter)
temp_storage = FileSystemStorage(location=tempfile.mkdtemp())
class BoundaryModel(models.Model):
positive_integer = models.PositiveIntegerField(null=True, blank=True)
class Defaults(models.Model):
name = models.CharField(max_length=255, default='class default value')
def_date = models.DateField(default=datetime.date(1980, 1, 1))
value = models.IntegerField(default=42)
callable_default = models.IntegerField(default=callable_default)
class ChoiceModel(models.Model):
"""For ModelChoiceField and ModelMultipleChoiceField tests."""
CHOICES = [
('', 'No Preference'),
('f', 'Foo'),
('b', 'Bar'),
]
INTEGER_CHOICES = [
(None, 'No Preference'),
(1, 'Foo'),
(2, 'Bar'),
]
STRING_CHOICES_WITH_NONE = [
(None, 'No Preference'),
('f', 'Foo'),
('b', 'Bar'),
]
name = models.CharField(max_length=10)
choice = models.CharField(max_length=2, blank=True, choices=CHOICES)
choice_string_w_none = models.CharField(
max_length=2, blank=True, null=True, choices=STRING_CHOICES_WITH_NONE)
choice_integer = models.IntegerField(choices=INTEGER_CHOICES, blank=True,
null=True)
@python_2_unicode_compatible
class ChoiceOptionModel(models.Model):
"""Destination for ChoiceFieldModel's ForeignKey.
Can't reuse ChoiceModel because error_message tests require that it have no instances."""
name = models.CharField(max_length=10)
class Meta:
ordering = ('name',)
def __str__(self):
return 'ChoiceOption %d' % self.pk
def choice_default():
return ChoiceOptionModel.objects.get_or_create(name='default')[0].pk
def choice_default_list():
return [choice_default()]
def int_default():
return 1
def int_list_default():
return [1]
class ChoiceFieldModel(models.Model):
"""Model with ForeignKey to another model, for testing ModelForm
generation with ModelChoiceField."""
choice = models.ForeignKey(
ChoiceOptionModel,
models.CASCADE,
blank=False,
default=choice_default,
)
choice_int = models.ForeignKey(
ChoiceOptionModel,
models.CASCADE,
blank=False,
related_name='choice_int',
default=int_default,
)
multi_choice = models.ManyToManyField(
ChoiceOptionModel,
blank=False,
related_name='multi_choice',
default=choice_default_list,
)
multi_choice_int = models.ManyToManyField(
ChoiceOptionModel,
blank=False,
related_name='multi_choice_int',
default=int_list_default,
)
class OptionalMultiChoiceModel(models.Model):
multi_choice = models.ManyToManyField(
ChoiceOptionModel,
blank=False,
related_name='not_relevant',
default=choice_default,
)
multi_choice_optional = models.ManyToManyField(
ChoiceOptionModel,
blank=True,
related_name='not_relevant2',
)
class FileModel(models.Model):
file = models.FileField(storage=temp_storage, upload_to='tests')
@python_2_unicode_compatible
class Group(models.Model):
name = models.CharField(max_length=10)
def __str__(self):
return '%s' % self.name
class Cheese(models.Model):
name = models.CharField(max_length=100)
class Article(models.Model):
content = models.TextField()
|
kaiix/depot_tools | refs/heads/master | third_party/pylint/checkers/format.py | 59 | # Copyright (c) 2003-2013 LOGILAB S.A. (Paris, FRANCE).
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Python code format's checker.
By default try to follow Guido's style guide :
http://www.python.org/doc/essays/styleguide.html
Some parts of the process_token method is based from The Tab Nanny std module.
"""
import keyword
import sys
import tokenize
from functools import reduce # pylint: disable=redefined-builtin
import six
from six.moves import zip, map, filter # pylint: disable=redefined-builtin
from astroid import nodes
from pylint.interfaces import ITokenChecker, IAstroidChecker, IRawChecker
from pylint.checkers import BaseTokenChecker
from pylint.checkers.utils import check_messages
from pylint.utils import WarningScope, OPTION_RGX
_CONTINUATION_BLOCK_OPENERS = ['elif', 'except', 'for', 'if', 'while', 'def', 'class']
_KEYWORD_TOKENS = ['assert', 'del', 'elif', 'except', 'for', 'if', 'in', 'not',
'raise', 'return', 'while', 'yield']
if sys.version_info < (3, 0):
_KEYWORD_TOKENS.append('print')
_SPACED_OPERATORS = ['==', '<', '>', '!=', '<>', '<=', '>=',
'+=', '-=', '*=', '**=', '/=', '//=', '&=', '|=', '^=',
'%=', '>>=', '<<=']
_OPENING_BRACKETS = ['(', '[', '{']
_CLOSING_BRACKETS = [')', ']', '}']
_TAB_LENGTH = 8
_EOL = frozenset([tokenize.NEWLINE, tokenize.NL, tokenize.COMMENT])
_JUNK_TOKENS = (tokenize.COMMENT, tokenize.NL)
# Whitespace checking policy constants
_MUST = 0
_MUST_NOT = 1
_IGNORE = 2
# Whitespace checking config constants
_DICT_SEPARATOR = 'dict-separator'
_TRAILING_COMMA = 'trailing-comma'
_NO_SPACE_CHECK_CHOICES = [_TRAILING_COMMA, _DICT_SEPARATOR]
MSGS = {
'C0301': ('Line too long (%s/%s)',
'line-too-long',
'Used when a line is longer than a given number of characters.'),
'C0302': ('Too many lines in module (%s/%s)', # was W0302
'too-many-lines',
'Used when a module has too much lines, reducing its readability.'
),
'C0303': ('Trailing whitespace',
'trailing-whitespace',
'Used when there is whitespace between the end of a line and the '
'newline.'),
'C0304': ('Final newline missing',
'missing-final-newline',
'Used when the last line in a file is missing a newline.'),
'W0311': ('Bad indentation. Found %s %s, expected %s',
'bad-indentation',
'Used when an unexpected number of indentation\'s tabulations or '
'spaces has been found.'),
'C0330': ('Wrong %s indentation%s.\n%s%s',
'bad-continuation',
'TODO'),
'W0312': ('Found indentation with %ss instead of %ss',
'mixed-indentation',
'Used when there are some mixed tabs and spaces in a module.'),
'W0301': ('Unnecessary semicolon', # was W0106
'unnecessary-semicolon',
'Used when a statement is ended by a semi-colon (";"), which \
isn\'t necessary (that\'s python, not C ;).'),
'C0321': ('More than one statement on a single line',
'multiple-statements',
'Used when more than on statement are found on the same line.',
{'scope': WarningScope.NODE}),
'C0325' : ('Unnecessary parens after %r keyword',
'superfluous-parens',
'Used when a single item in parentheses follows an if, for, or '
'other keyword.'),
'C0326': ('%s space %s %s %s\n%s',
'bad-whitespace',
('Used when a wrong number of spaces is used around an operator, '
'bracket or block opener.'),
{'old_names': [('C0323', 'no-space-after-operator'),
('C0324', 'no-space-after-comma'),
('C0322', 'no-space-before-operator')]}),
'W0332': ('Use of "l" as long integer identifier',
'lowercase-l-suffix',
'Used when a lower case "l" is used to mark a long integer. You '
'should use a upper case "L" since the letter "l" looks too much '
'like the digit "1"',
{'maxversion': (3, 0)}),
'C0327': ('Mixed line endings LF and CRLF',
'mixed-line-endings',
'Used when there are mixed (LF and CRLF) newline signs in a file.'),
'C0328': ('Unexpected line ending format. There is \'%s\' while it should be \'%s\'.',
'unexpected-line-ending-format',
'Used when there is different newline than expected.'),
}
def _underline_token(token):
length = token[3][1] - token[2][1]
offset = token[2][1]
return token[4] + (' ' * offset) + ('^' * length)
def _column_distance(token1, token2):
if token1 == token2:
return 0
if token2[3] < token1[3]:
token1, token2 = token2, token1
if token1[3][0] != token2[2][0]:
return None
return token2[2][1] - token1[3][1]
def _last_token_on_line_is(tokens, line_end, token):
return (line_end > 0 and tokens.token(line_end-1) == token or
line_end > 1 and tokens.token(line_end-2) == token
and tokens.type(line_end-1) == tokenize.COMMENT)
def _token_followed_by_eol(tokens, position):
return (tokens.type(position+1) == tokenize.NL or
tokens.type(position+1) == tokenize.COMMENT and
tokens.type(position+2) == tokenize.NL)
def _get_indent_length(line):
"""Return the length of the indentation on the given token's line."""
result = 0
for char in line:
if char == ' ':
result += 1
elif char == '\t':
result += _TAB_LENGTH
else:
break
return result
def _get_indent_hint_line(bar_positions, bad_position):
"""Return a line with |s for each of the positions in the given lists."""
if not bar_positions:
return ''
markers = [(pos, '|') for pos in bar_positions]
markers.append((bad_position, '^'))
markers.sort()
line = [' '] * (markers[-1][0] + 1)
for position, marker in markers:
line[position] = marker
return ''.join(line)
class _ContinuedIndent(object):
__slots__ = ('valid_outdent_offsets',
'valid_continuation_offsets',
'context_type',
'token',
'position')
def __init__(self,
context_type,
token,
position,
valid_outdent_offsets,
valid_continuation_offsets):
self.valid_outdent_offsets = valid_outdent_offsets
self.valid_continuation_offsets = valid_continuation_offsets
self.context_type = context_type
self.position = position
self.token = token
# The contexts for hanging indents.
# A hanging indented dictionary value after :
HANGING_DICT_VALUE = 'dict-value'
# Hanging indentation in an expression.
HANGING = 'hanging'
# Hanging indentation in a block header.
HANGING_BLOCK = 'hanging-block'
# Continued indentation inside an expression.
CONTINUED = 'continued'
# Continued indentation in a block header.
CONTINUED_BLOCK = 'continued-block'
SINGLE_LINE = 'single'
WITH_BODY = 'multi'
_CONTINUATION_MSG_PARTS = {
HANGING_DICT_VALUE: ('hanging', ' in dict value'),
HANGING: ('hanging', ''),
HANGING_BLOCK: ('hanging', ' before block'),
CONTINUED: ('continued', ''),
CONTINUED_BLOCK: ('continued', ' before block'),
}
def _Offsets(*args):
"""Valid indentation offsets for a continued line."""
return dict((a, None) for a in args)
def _BeforeBlockOffsets(single, with_body):
"""Valid alternative indent offsets for continued lines before blocks.
:param single: Valid offset for statements on a single logical line.
:param with_body: Valid offset for statements on several lines.
"""
return {single: SINGLE_LINE, with_body: WITH_BODY}
class TokenWrapper(object):
"""A wrapper for readable access to token information."""
def __init__(self, tokens):
self._tokens = tokens
def token(self, idx):
return self._tokens[idx][1]
def type(self, idx):
return self._tokens[idx][0]
def start_line(self, idx):
return self._tokens[idx][2][0]
def start_col(self, idx):
return self._tokens[idx][2][1]
def line(self, idx):
return self._tokens[idx][4]
class ContinuedLineState(object):
"""Tracker for continued indentation inside a logical line."""
def __init__(self, tokens, config):
self._line_start = -1
self._cont_stack = []
self._is_block_opener = False
self.retained_warnings = []
self._config = config
self._tokens = TokenWrapper(tokens)
@property
def has_content(self):
return bool(self._cont_stack)
@property
def _block_indent_size(self):
return len(self._config.indent_string.replace('\t', ' ' * _TAB_LENGTH))
@property
def _continuation_size(self):
return self._config.indent_after_paren
def handle_line_start(self, pos):
"""Record the first non-junk token at the start of a line."""
if self._line_start > -1:
return
self._is_block_opener = self._tokens.token(pos) in _CONTINUATION_BLOCK_OPENERS
self._line_start = pos
def next_physical_line(self):
"""Prepares the tracker for a new physical line (NL)."""
self._line_start = -1
self._is_block_opener = False
def next_logical_line(self):
"""Prepares the tracker for a new logical line (NEWLINE).
A new logical line only starts with block indentation.
"""
self.next_physical_line()
self.retained_warnings = []
self._cont_stack = []
def add_block_warning(self, token_position, state, valid_offsets):
self.retained_warnings.append((token_position, state, valid_offsets))
def get_valid_offsets(self, idx):
""""Returns the valid offsets for the token at the given position."""
# The closing brace on a dict or the 'for' in a dict comprehension may
# reset two indent levels because the dict value is ended implicitly
stack_top = -1
if self._tokens.token(idx) in ('}', 'for') and self._cont_stack[-1].token == ':':
stack_top = -2
indent = self._cont_stack[stack_top]
if self._tokens.token(idx) in _CLOSING_BRACKETS:
valid_offsets = indent.valid_outdent_offsets
else:
valid_offsets = indent.valid_continuation_offsets
return indent, valid_offsets.copy()
def _hanging_indent_after_bracket(self, bracket, position):
"""Extracts indentation information for a hanging indent."""
indentation = _get_indent_length(self._tokens.line(position))
if self._is_block_opener and self._continuation_size == self._block_indent_size:
return _ContinuedIndent(
HANGING_BLOCK,
bracket,
position,
_Offsets(indentation + self._continuation_size, indentation),
_BeforeBlockOffsets(indentation + self._continuation_size,
indentation + self._continuation_size * 2))
elif bracket == ':':
# If the dict key was on the same line as the open brace, the new
# correct indent should be relative to the key instead of the
# current indent level
paren_align = self._cont_stack[-1].valid_outdent_offsets
next_align = self._cont_stack[-1].valid_continuation_offsets.copy()
next_align_keys = list(next_align.keys())
next_align[next_align_keys[0] + self._continuation_size] = True
# Note that the continuation of
# d = {
# 'a': 'b'
# 'c'
# }
# is handled by the special-casing for hanging continued string indents.
return _ContinuedIndent(HANGING_DICT_VALUE, bracket, position, paren_align, next_align)
else:
return _ContinuedIndent(
HANGING,
bracket,
position,
_Offsets(indentation, indentation + self._continuation_size),
_Offsets(indentation + self._continuation_size))
def _continuation_inside_bracket(self, bracket, pos):
"""Extracts indentation information for a continued indent."""
indentation = _get_indent_length(self._tokens.line(pos))
if self._is_block_opener and self._tokens.start_col(pos+1) - indentation == self._block_indent_size:
return _ContinuedIndent(
CONTINUED_BLOCK,
bracket,
pos,
_Offsets(self._tokens.start_col(pos)),
_BeforeBlockOffsets(self._tokens.start_col(pos+1),
self._tokens.start_col(pos+1) + self._continuation_size))
else:
return _ContinuedIndent(
CONTINUED,
bracket,
pos,
_Offsets(self._tokens.start_col(pos)),
_Offsets(self._tokens.start_col(pos+1)))
def pop_token(self):
self._cont_stack.pop()
def push_token(self, token, position):
"""Pushes a new token for continued indentation on the stack.
Tokens that can modify continued indentation offsets are:
* opening brackets
* 'lambda'
* : inside dictionaries
push_token relies on the caller to filter out those
interesting tokens.
:param token: The concrete token
:param position: The position of the token in the stream.
"""
if _token_followed_by_eol(self._tokens, position):
self._cont_stack.append(
self._hanging_indent_after_bracket(token, position))
else:
self._cont_stack.append(
self._continuation_inside_bracket(token, position))
class FormatChecker(BaseTokenChecker):
"""checks for :
* unauthorized constructions
* strict indentation
* line length
"""
__implements__ = (ITokenChecker, IAstroidChecker, IRawChecker)
# configuration section name
name = 'format'
# messages
msgs = MSGS
# configuration options
# for available dict keys/values see the optik parser 'add_option' method
options = (('max-line-length',
{'default' : 100, 'type' : "int", 'metavar' : '<int>',
'help' : 'Maximum number of characters on a single line.'}),
('ignore-long-lines',
{'type': 'regexp', 'metavar': '<regexp>',
'default': r'^\s*(# )?<?https?://\S+>?$',
'help': ('Regexp for a line that is allowed to be longer than '
'the limit.')}),
('single-line-if-stmt',
{'default': False, 'type' : 'yn', 'metavar' : '<y_or_n>',
'help' : ('Allow the body of an if to be on the same '
'line as the test if there is no else.')}),
('no-space-check',
{'default': ','.join(_NO_SPACE_CHECK_CHOICES),
'type': 'multiple_choice',
'choices': _NO_SPACE_CHECK_CHOICES,
'help': ('List of optional constructs for which whitespace '
'checking is disabled')}),
('max-module-lines',
{'default' : 1000, 'type' : 'int', 'metavar' : '<int>',
'help': 'Maximum number of lines in a module'}
),
('indent-string',
{'default' : ' ', 'type' : "string", 'metavar' : '<string>',
'help' : 'String used as indentation unit. This is usually '
'" " (4 spaces) or "\\t" (1 tab).'}),
('indent-after-paren',
{'type': 'int', 'metavar': '<int>', 'default': 4,
'help': 'Number of spaces of indent required inside a hanging '
' or continued line.'}),
('expected-line-ending-format',
{'type': 'choice', 'metavar': '<empty or LF or CRLF>', 'default': '',
'choices': ['', 'LF', 'CRLF'],
'help': 'Expected format of line ending, e.g. empty (any line ending), LF or CRLF.'}),
)
def __init__(self, linter=None):
BaseTokenChecker.__init__(self, linter)
self._lines = None
self._visited_lines = None
self._bracket_stack = [None]
def _pop_token(self):
self._bracket_stack.pop()
self._current_line.pop_token()
def _push_token(self, token, idx):
self._bracket_stack.append(token)
self._current_line.push_token(token, idx)
def new_line(self, tokens, line_end, line_start):
"""a new line has been encountered, process it if necessary"""
if _last_token_on_line_is(tokens, line_end, ';'):
self.add_message('unnecessary-semicolon', line=tokens.start_line(line_end))
line_num = tokens.start_line(line_start)
line = tokens.line(line_start)
if tokens.type(line_start) not in _JUNK_TOKENS:
self._lines[line_num] = line.split('\n')[0]
self.check_lines(line, line_num)
def process_module(self, module):
self._keywords_with_parens = set()
if 'print_function' in module.future_imports:
self._keywords_with_parens.add('print')
def _check_keyword_parentheses(self, tokens, start):
"""Check that there are not unnecessary parens after a keyword.
Parens are unnecessary if there is exactly one balanced outer pair on a
line, and it is followed by a colon, and contains no commas (i.e. is not a
tuple).
Args:
tokens: list of Tokens; the entire list of Tokens.
start: int; the position of the keyword in the token list.
"""
# If the next token is not a paren, we're fine.
if self._inside_brackets(':') and tokens[start][1] == 'for':
self._pop_token()
if tokens[start+1][1] != '(':
return
found_and_or = False
depth = 0
keyword_token = tokens[start][1]
line_num = tokens[start][2][0]
for i in range(start, len(tokens) - 1):
token = tokens[i]
# If we hit a newline, then assume any parens were for continuation.
if token[0] == tokenize.NL:
return
if token[1] == '(':
depth += 1
elif token[1] == ')':
depth -= 1
if not depth:
# ')' can't happen after if (foo), since it would be a syntax error.
if (tokens[i+1][1] in (':', ')', ']', '}', 'in') or
tokens[i+1][0] in (tokenize.NEWLINE,
tokenize.ENDMARKER,
tokenize.COMMENT)):
# The empty tuple () is always accepted.
if i == start + 2:
return
if keyword_token == 'not':
if not found_and_or:
self.add_message('superfluous-parens', line=line_num,
args=keyword_token)
elif keyword_token in ('return', 'yield'):
self.add_message('superfluous-parens', line=line_num,
args=keyword_token)
elif keyword_token not in self._keywords_with_parens:
if not (tokens[i+1][1] == 'in' and found_and_or):
self.add_message('superfluous-parens', line=line_num,
args=keyword_token)
return
elif depth == 1:
# This is a tuple, which is always acceptable.
if token[1] == ',':
return
# 'and' and 'or' are the only boolean operators with lower precedence
# than 'not', so parens are only required when they are found.
elif token[1] in ('and', 'or'):
found_and_or = True
# A yield inside an expression must always be in parentheses,
# quit early without error.
elif token[1] == 'yield':
return
# A generator expression always has a 'for' token in it, and
# the 'for' token is only legal inside parens when it is in a
# generator expression. The parens are necessary here, so bail
# without an error.
elif token[1] == 'for':
return
def _opening_bracket(self, tokens, i):
self._push_token(tokens[i][1], i)
# Special case: ignore slices
if tokens[i][1] == '[' and tokens[i+1][1] == ':':
return
if (i > 0 and (tokens[i-1][0] == tokenize.NAME and
not (keyword.iskeyword(tokens[i-1][1]))
or tokens[i-1][1] in _CLOSING_BRACKETS)):
self._check_space(tokens, i, (_MUST_NOT, _MUST_NOT))
else:
self._check_space(tokens, i, (_IGNORE, _MUST_NOT))
def _closing_bracket(self, tokens, i):
if self._inside_brackets(':'):
self._pop_token()
self._pop_token()
# Special case: ignore slices
if tokens[i-1][1] == ':' and tokens[i][1] == ']':
return
policy_before = _MUST_NOT
if tokens[i][1] in _CLOSING_BRACKETS and tokens[i-1][1] == ',':
if _TRAILING_COMMA in self.config.no_space_check:
policy_before = _IGNORE
self._check_space(tokens, i, (policy_before, _IGNORE))
def _check_equals_spacing(self, tokens, i):
"""Check the spacing of a single equals sign."""
if self._inside_brackets('(') or self._inside_brackets('lambda'):
self._check_space(tokens, i, (_MUST_NOT, _MUST_NOT))
else:
self._check_space(tokens, i, (_MUST, _MUST))
def _open_lambda(self, tokens, i): # pylint:disable=unused-argument
self._push_token('lambda', i)
def _handle_colon(self, tokens, i):
# Special case: ignore slices
if self._inside_brackets('['):
return
if (self._inside_brackets('{') and
_DICT_SEPARATOR in self.config.no_space_check):
policy = (_IGNORE, _IGNORE)
else:
policy = (_MUST_NOT, _MUST)
self._check_space(tokens, i, policy)
if self._inside_brackets('lambda'):
self._pop_token()
elif self._inside_brackets('{'):
self._push_token(':', i)
def _handle_comma(self, tokens, i):
# Only require a following whitespace if this is
# not a hanging comma before a closing bracket.
if tokens[i+1][1] in _CLOSING_BRACKETS:
self._check_space(tokens, i, (_MUST_NOT, _IGNORE))
else:
self._check_space(tokens, i, (_MUST_NOT, _MUST))
if self._inside_brackets(':'):
self._pop_token()
def _check_surrounded_by_space(self, tokens, i):
"""Check that a binary operator is surrounded by exactly one space."""
self._check_space(tokens, i, (_MUST, _MUST))
def _check_space(self, tokens, i, policies):
def _policy_string(policy):
if policy == _MUST:
return 'Exactly one', 'required'
else:
return 'No', 'allowed'
def _name_construct(token):
if token[1] == ',':
return 'comma'
elif token[1] == ':':
return ':'
elif token[1] in '()[]{}':
return 'bracket'
elif token[1] in ('<', '>', '<=', '>=', '!=', '=='):
return 'comparison'
else:
if self._inside_brackets('('):
return 'keyword argument assignment'
else:
return 'assignment'
good_space = [True, True]
token = tokens[i]
pairs = [(tokens[i-1], token), (token, tokens[i+1])]
for other_idx, (policy, token_pair) in enumerate(zip(policies, pairs)):
if token_pair[other_idx][0] in _EOL or policy == _IGNORE:
continue
distance = _column_distance(*token_pair)
if distance is None:
continue
good_space[other_idx] = (
(policy == _MUST and distance == 1) or
(policy == _MUST_NOT and distance == 0))
warnings = []
if not any(good_space) and policies[0] == policies[1]:
warnings.append((policies[0], 'around'))
else:
for ok, policy, position in zip(good_space, policies, ('before', 'after')):
if not ok:
warnings.append((policy, position))
for policy, position in warnings:
construct = _name_construct(token)
count, state = _policy_string(policy)
self.add_message('bad-whitespace', line=token[2][0],
args=(count, state, position, construct,
_underline_token(token)))
def _inside_brackets(self, left):
return self._bracket_stack[-1] == left
def _prepare_token_dispatcher(self):
raw = [
(_KEYWORD_TOKENS,
self._check_keyword_parentheses),
(_OPENING_BRACKETS, self._opening_bracket),
(_CLOSING_BRACKETS, self._closing_bracket),
(['='], self._check_equals_spacing),
(_SPACED_OPERATORS, self._check_surrounded_by_space),
([','], self._handle_comma),
([':'], self._handle_colon),
(['lambda'], self._open_lambda),
]
dispatch = {}
for tokens, handler in raw:
for token in tokens:
dispatch[token] = handler
return dispatch
def process_tokens(self, tokens):
"""process tokens and search for :
_ non strict indentation (i.e. not always using the <indent> parameter as
indent unit)
_ too long lines (i.e. longer than <max_chars>)
_ optionally bad construct (if given, bad_construct must be a compiled
regular expression).
"""
self._bracket_stack = [None]
indents = [0]
check_equal = False
line_num = 0
self._lines = {}
self._visited_lines = {}
token_handlers = self._prepare_token_dispatcher()
self._last_line_ending = None
self._current_line = ContinuedLineState(tokens, self.config)
for idx, (tok_type, token, start, _, line) in enumerate(tokens):
if start[0] != line_num:
line_num = start[0]
# A tokenizer oddity: if an indented line contains a multi-line
# docstring, the line member of the INDENT token does not contain
# the full line; therefore we check the next token on the line.
if tok_type == tokenize.INDENT:
self.new_line(TokenWrapper(tokens), idx-1, idx+1)
else:
self.new_line(TokenWrapper(tokens), idx-1, idx)
if tok_type == tokenize.NEWLINE:
# a program statement, or ENDMARKER, will eventually follow,
# after some (possibly empty) run of tokens of the form
# (NL | COMMENT)* (INDENT | DEDENT+)?
# If an INDENT appears, setting check_equal is wrong, and will
# be undone when we see the INDENT.
check_equal = True
self._process_retained_warnings(TokenWrapper(tokens), idx)
self._current_line.next_logical_line()
self._check_line_ending(token, line_num)
elif tok_type == tokenize.INDENT:
check_equal = False
self.check_indent_level(token, indents[-1]+1, line_num)
indents.append(indents[-1]+1)
elif tok_type == tokenize.DEDENT:
# there's nothing we need to check here! what's important is
# that when the run of DEDENTs ends, the indentation of the
# program statement (or ENDMARKER) that triggered the run is
# equal to what's left at the top of the indents stack
check_equal = True
if len(indents) > 1:
del indents[-1]
elif tok_type == tokenize.NL:
self._check_continued_indentation(TokenWrapper(tokens), idx+1)
self._current_line.next_physical_line()
elif tok_type != tokenize.COMMENT:
self._current_line.handle_line_start(idx)
# This is the first concrete token following a NEWLINE, so it
# must be the first token of the next program statement, or an
# ENDMARKER; the "line" argument exposes the leading whitespace
# for this statement; in the case of ENDMARKER, line is an empty
# string, so will properly match the empty string with which the
# "indents" stack was seeded
if check_equal:
check_equal = False
self.check_indent_level(line, indents[-1], line_num)
if tok_type == tokenize.NUMBER and token.endswith('l'):
self.add_message('lowercase-l-suffix', line=line_num)
try:
handler = token_handlers[token]
except KeyError:
pass
else:
handler(tokens, idx)
line_num -= 1 # to be ok with "wc -l"
if line_num > self.config.max_module_lines:
# Get the line where the too-many-lines (or its message id)
# was disabled or default to 1.
symbol = self.linter.msgs_store.check_message_id('too-many-lines')
names = (symbol.msgid, 'too-many-lines')
line = next(filter(None,
map(self.linter._pragma_lineno.get, names)), 1)
self.add_message('too-many-lines',
args=(line_num, self.config.max_module_lines),
line=line)
def _check_line_ending(self, line_ending, line_num):
# check if line endings are mixed
if self._last_line_ending is not None:
if line_ending != self._last_line_ending:
self.add_message('mixed-line-endings', line=line_num)
self._last_line_ending = line_ending
# check if line ending is as expected
expected = self.config.expected_line_ending_format
if expected:
line_ending = reduce(lambda x, y: x + y if x != y else x, line_ending, "") # reduce multiple \n\n\n\n to one \n
line_ending = 'LF' if line_ending == '\n' else 'CRLF'
if line_ending != expected:
self.add_message('unexpected-line-ending-format', args=(line_ending, expected), line=line_num)
def _process_retained_warnings(self, tokens, current_pos):
single_line_block_stmt = not _last_token_on_line_is(tokens, current_pos, ':')
for indent_pos, state, offsets in self._current_line.retained_warnings:
block_type = offsets[tokens.start_col(indent_pos)]
hints = dict((k, v) for k, v in six.iteritems(offsets)
if v != block_type)
if single_line_block_stmt and block_type == WITH_BODY:
self._add_continuation_message(state, hints, tokens, indent_pos)
elif not single_line_block_stmt and block_type == SINGLE_LINE:
self._add_continuation_message(state, hints, tokens, indent_pos)
def _check_continued_indentation(self, tokens, next_idx):
def same_token_around_nl(token_type):
return (tokens.type(next_idx) == token_type and
tokens.type(next_idx-2) == token_type)
# Do not issue any warnings if the next line is empty.
if not self._current_line.has_content or tokens.type(next_idx) == tokenize.NL:
return
state, valid_offsets = self._current_line.get_valid_offsets(next_idx)
# Special handling for hanging comments and strings. If the last line ended
# with a comment (string) and the new line contains only a comment, the line
# may also be indented to the start of the previous token.
if same_token_around_nl(tokenize.COMMENT) or same_token_around_nl(tokenize.STRING):
valid_offsets[tokens.start_col(next_idx-2)] = True
# We can only decide if the indentation of a continued line before opening
# a new block is valid once we know of the body of the block is on the
# same line as the block opener. Since the token processing is single-pass,
# emitting those warnings is delayed until the block opener is processed.
if (state.context_type in (HANGING_BLOCK, CONTINUED_BLOCK)
and tokens.start_col(next_idx) in valid_offsets):
self._current_line.add_block_warning(next_idx, state, valid_offsets)
elif tokens.start_col(next_idx) not in valid_offsets:
self._add_continuation_message(state, valid_offsets, tokens, next_idx)
def _add_continuation_message(self, state, offsets, tokens, position):
readable_type, readable_position = _CONTINUATION_MSG_PARTS[state.context_type]
hint_line = _get_indent_hint_line(offsets, tokens.start_col(position))
self.add_message(
'bad-continuation',
line=tokens.start_line(position),
args=(readable_type, readable_position, tokens.line(position), hint_line))
@check_messages('multiple-statements')
def visit_default(self, node):
"""check the node line number and check it if not yet done"""
if not node.is_statement:
return
if not node.root().pure_python:
return # XXX block visit of child nodes
prev_sibl = node.previous_sibling()
if prev_sibl is not None:
prev_line = prev_sibl.fromlineno
else:
# The line on which a finally: occurs in a try/finally
# is not directly represented in the AST. We infer it
# by taking the last line of the body and adding 1, which
# should be the line of finally:
if (isinstance(node.parent, nodes.TryFinally)
and node in node.parent.finalbody):
prev_line = node.parent.body[0].tolineno + 1
else:
prev_line = node.parent.statement().fromlineno
line = node.fromlineno
assert line, node
if prev_line == line and self._visited_lines.get(line) != 2:
self._check_multi_statement_line(node, line)
return
if line in self._visited_lines:
return
try:
tolineno = node.blockstart_tolineno
except AttributeError:
tolineno = node.tolineno
assert tolineno, node
lines = []
for line in range(line, tolineno + 1):
self._visited_lines[line] = 1
try:
lines.append(self._lines[line].rstrip())
except KeyError:
lines.append('')
def _check_multi_statement_line(self, node, line):
"""Check for lines containing multiple statements."""
# Do not warn about multiple nested context managers
# in with statements.
if isinstance(node, nodes.With):
return
# For try... except... finally..., the two nodes
# appear to be on the same line due to how the AST is built.
if (isinstance(node, nodes.TryExcept) and
isinstance(node.parent, nodes.TryFinally)):
return
if (isinstance(node.parent, nodes.If) and not node.parent.orelse
and self.config.single_line_if_stmt):
return
self.add_message('multiple-statements', node=node)
self._visited_lines[line] = 2
def check_lines(self, lines, i):
"""check lines have less than a maximum number of characters
"""
max_chars = self.config.max_line_length
ignore_long_line = self.config.ignore_long_lines
for line in lines.splitlines(True):
if not line.endswith('\n'):
self.add_message('missing-final-newline', line=i)
else:
stripped_line = line.rstrip()
if line[len(stripped_line):] not in ('\n', '\r\n'):
self.add_message('trailing-whitespace', line=i)
# Don't count excess whitespace in the line length.
line = stripped_line
mobj = OPTION_RGX.search(line)
if mobj and mobj.group(1).split('=', 1)[0].strip() == 'disable':
line = line.split('#')[0].rstrip()
if len(line) > max_chars and not ignore_long_line.search(line):
self.add_message('line-too-long', line=i, args=(len(line), max_chars))
i += 1
def check_indent_level(self, string, expected, line_num):
"""return the indent level of the string
"""
indent = self.config.indent_string
if indent == '\\t': # \t is not interpreted in the configuration file
indent = '\t'
level = 0
unit_size = len(indent)
while string[:unit_size] == indent:
string = string[unit_size:]
level += 1
suppl = ''
while string and string[0] in ' \t':
if string[0] != indent[0]:
if string[0] == '\t':
args = ('tab', 'space')
else:
args = ('space', 'tab')
self.add_message('mixed-indentation', args=args, line=line_num)
return level
suppl += string[0]
string = string[1:]
if level != expected or suppl:
i_type = 'spaces'
if indent[0] == '\t':
i_type = 'tabs'
self.add_message('bad-indentation', line=line_num,
args=(level * unit_size + len(suppl), i_type,
expected * unit_size))
def register(linter):
"""required method to auto register this checker """
linter.register_checker(FormatChecker(linter))
|
Thraxis/pymedusa | refs/heads/master | lib/sqlalchemy/testing/__init__.py | 34 | # testing/__init__.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from .warnings import assert_warnings
from . import config
from .exclusions import db_spec, _is_excluded, fails_if, skip_if, future,\
fails_on, fails_on_everything_except, skip, only_on, exclude, \
against as _against, _server_version, only_if, fails
def against(*queries):
return _against(config._current, *queries)
from .assertions import emits_warning, emits_warning_on, uses_deprecated, \
eq_, ne_, le_, is_, is_not_, startswith_, assert_raises, \
assert_raises_message, AssertsCompiledSQL, ComparesTables, \
AssertsExecutionResults, expect_deprecated, expect_warnings, \
in_, not_in_
from .util import run_as_contextmanager, rowset, fail, \
provide_metadata, adict, force_drop_names, \
teardown_events
crashes = skip
from .config import db
from .config import requirements as requires
from . import mock
|
axinging/chromium-crosswalk | refs/heads/master | tools/android/loading/prefetch_view.py | 10 | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Models the effect of prefetching resources from a loading trace.
For example, this can be used to evaluate NoState Prefetch
(https://goo.gl/B3nRUR).
When executed as a script, takes a trace as a command-line arguments and shows
statistics about it.
"""
import itertools
import operator
import common_util
import dependency_graph
import graph
import loading_trace
import user_satisfied_lens
import request_dependencies_lens
import request_track
class RequestNode(dependency_graph.RequestNode):
"""Simulates the effect of prefetching resources discoverable by the preload
scanner.
"""
_ATTRS = ['preloaded', 'before']
def __init__(self, request=None):
super(RequestNode, self).__init__(request)
self.preloaded = False
self.before = False
def ToJsonDict(self):
result = super(RequestNode, self).ToJsonDict()
return common_util.SerializeAttributesToJsonDict(result, self, self._ATTRS)
@classmethod
def FromJsonDict(cls, json_dict):
result = super(RequestNode, cls).FromJsonDict(json_dict)
return common_util.DeserializeAttributesFromJsonDict(
json_dict, result, cls._ATTRS)
class PrefetchSimulationView(object):
"""Simulates the effect of prefetch."""
def __init__(self, trace, dependencies_lens, user_lens):
self.postload_msec = None
self.graph = None
if trace is None:
return
requests = trace.request_track.GetEvents()
critical_requests_ids = user_lens.CriticalRequestIds()
self.postload_msec = user_lens.PostloadTimeMsec()
self.graph = dependency_graph.RequestDependencyGraph(
requests, dependencies_lens, node_class=RequestNode)
preloaded_requests = [r.request_id for r in self.PreloadedRequests(
requests[0], dependencies_lens, trace)]
self._AnnotateNodes(self.graph.graph.Nodes(), preloaded_requests,
critical_requests_ids)
def Cost(self):
"""Returns the cost of the graph, restricted to the critical requests."""
pruned_graph = self._PrunedGraph()
return pruned_graph.Cost() + self.postload_msec
def UpdateNodeCosts(self, node_to_cost):
"""Updates the cost of nodes, according to |node_to_cost|.
Args:
node_to_cost: (Callable) RequestNode -> float. Callable returning the cost
of a node.
"""
pruned_graph = self._PrunedGraph()
for node in pruned_graph.Nodes():
node.cost = node_to_cost(node)
def ToJsonDict(self):
"""Returns a dict representing this instance."""
result = {'graph': self.graph.ToJsonDict()}
return common_util.SerializeAttributesToJsonDict(
result, self, ['postload_msec'])
@classmethod
def FromJsonDict(cls, json_dict):
"""Returns an instance of PrefetchSimulationView from a dict dumped by
ToJSonDict().
"""
result = cls(None, None, None)
result.graph = dependency_graph.RequestDependencyGraph.FromJsonDict(
json_dict['graph'], RequestNode, dependency_graph.Edge)
return common_util.DeserializeAttributesFromJsonDict(
json_dict, result, ['postload_msec'])
@classmethod
def _AnnotateNodes(cls, nodes, preloaded_requests_ids,
critical_requests_ids,):
for node in nodes:
node.preloaded = node.request.request_id in preloaded_requests_ids
node.before = node.request.request_id in critical_requests_ids
@classmethod
def ParserDiscoverableRequests(
cls, request, dependencies_lens, recurse=False):
"""Returns a list of requests IDs dicovered by the parser.
Args:
request: (Request) Root request.
Returns:
[Request]
"""
# TODO(lizeb): handle the recursive case.
assert not recurse
discoverable_requests = [request]
first_request = dependencies_lens.GetRedirectChain(request)[-1]
deps = dependencies_lens.GetRequestDependencies()
for (first, second, reason) in deps:
if first.request_id == first_request.request_id and reason == 'parser':
discoverable_requests.append(second)
return discoverable_requests
@classmethod
def _ExpandRedirectChains(cls, requests, dependencies_lens):
return list(itertools.chain.from_iterable(
[dependencies_lens.GetRedirectChain(r) for r in requests]))
@classmethod
def PreloadedRequests(cls, request, dependencies_lens, trace):
"""Returns the requests that have been preloaded from a given request.
This list is the set of request that are:
- Discoverable by the parser
- Found in the trace log.
Before looking for dependencies, this follows the redirect chain.
Args:
request: (Request) Root request.
Returns:
A list of Request. Does not include the root request. This list is a
subset of the one returned by ParserDiscoverableRequests().
"""
# Preload step events are emitted in ResourceFetcher::preloadStarted().
resource_events = trace.tracing_track.Filter(
categories=set([u'blink.net']))
preload_step_events = filter(
lambda e: e.args.get('step') == 'Preload',
resource_events.GetEvents())
preloaded_urls = set()
for preload_step_event in preload_step_events:
preload_event = resource_events.EventFromStep(preload_step_event)
if preload_event:
preloaded_urls.add(preload_event.args['data']['url'])
parser_requests = cls.ParserDiscoverableRequests(
request, dependencies_lens)
preloaded_root_requests = filter(
lambda r: r.url in preloaded_urls, parser_requests)
# We can actually fetch the whole redirect chain.
return [request] + list(itertools.chain.from_iterable(
[dependencies_lens.GetRedirectChain(r)
for r in preloaded_root_requests]))
def _PrunedGraph(self):
roots = self.graph.graph.RootNodes()
nodes = self.graph.graph.ReachableNodes(
roots, should_stop=lambda n: not n.before)
return graph.DirectedGraph(nodes, self.graph.graph.Edges())
def _PrintSumamry(trace, dependencies_lens, user_lens):
prefetch_view = PrefetchSimulationView(trace, dependencies_lens, user_lens)
print 'Time to First Contentful Paint = %.02fms' % prefetch_view.Cost()
print 'Set costs of prefetched requests to 0.'
prefetch_view.UpdateNodeCosts(lambda n: 0 if n.preloaded else n.cost)
print 'Time to First Contentful Paint = %.02fms' % prefetch_view.Cost()
def main(filename):
trace = loading_trace.LoadingTrace.FromJsonFile(filename)
dependencies_lens = request_dependencies_lens.RequestDependencyLens(trace)
user_lens = user_satisfied_lens.FirstContentfulPaintLens(trace)
_PrintSumamry(trace, dependencies_lens, user_lens)
if __name__ == '__main__':
import sys
main(sys.argv[1])
|
thaim/ansible | refs/heads/fix-broken-link | lib/ansible/plugins/lookup/grafana_dashboard.py | 12 | # (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
lookup: grafana_dashboard
author: Thierry Salle (@seuf)
version_added: "2.7"
short_description: list or search grafana dashboards
description:
- This lookup returns a list of grafana dashboards with possibility to filter them by query.
options:
grafana_url:
description: url of grafana.
env:
- name: GRAFANA_URL
default: http://127.0.0.1:3000
grafana_api_key:
description:
- api key of grafana.
- when C(grafana_api_key) is set, the options C(grafan_user), C(grafana_password) and C(grafana_org_id) are ignored.
- Attention, please remove the two == at the end of the grafana_api_key
- because ansible lookup plugins options are split on = (see example).
env:
- name: GRAFANA_API_KEY
grafana_user:
description: grafana authentication user.
env:
- name: GRAFANA_USER
default: admin
grafana_password:
description: grafana authentication password.
env:
- name: GRAFANA_PASSWORD
default: admin
grafana_org_id:
description: grafana organisation id.
env:
- name: GRAFANA_ORG_ID
default: 1
search:
description: optional filter for dashboard search.
env:
- name: GRAFANA_DASHBOARD_SEARCH
"""
EXAMPLES = """
- name: get project foo grafana dashboards
set_fact:
grafana_dashboards: "{{ lookup('grafana_dashboard', 'grafana_url=http://grafana.company.com grafana_user=admin grafana_password=admin search=foo') }}"
- name: get all grafana dashboards
set_fact:
grafana_dashboards: "{{ lookup('grafana_dashboard', 'grafana_url=http://grafana.company.com grafana_api_key=' ~ grafana_api_key|replace('==', '')) }}"
"""
import base64
import json
import os
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.plugins.lookup import LookupBase
from ansible.module_utils.urls import open_url
from ansible.module_utils._text import to_bytes, to_native
from ansible.module_utils.six.moves.urllib.error import HTTPError
from ansible.utils.display import Display
display = Display()
ANSIBLE_GRAFANA_URL = 'http://127.0.0.1:3000'
ANSIBLE_GRAFANA_API_KEY = None
ANSIBLE_GRAFANA_USER = 'admin'
ANSIBLE_GRAFANA_PASSWORD = 'admin'
ANSIBLE_GRAFANA_ORG_ID = 1
ANSIBLE_GRAFANA_DASHBOARD_SEARCH = None
if os.getenv('GRAFANA_URL') is not None:
ANSIBLE_GRAFANA_URL = os.environ['GRAFANA_URL']
if os.getenv('GRAFANA_API_KEY') is not None:
ANSIBLE_GRAFANA_API_KEY = os.environ['GRAFANA_API_KEY']
if os.getenv('GRAFANA_USER') is not None:
ANSIBLE_GRAFANA_USER = os.environ['GRAFANA_USER']
if os.getenv('GRAFANA_PASSWORD') is not None:
ANSIBLE_GRAFANA_PASSWORD = os.environ['GRAFANA_PASSWORD']
if os.getenv('GRAFANA_ORG_ID') is not None:
ANSIBLE_GRAFANA_ORG_ID = os.environ['GRAFANA_ORG_ID']
if os.getenv('GRAFANA_DASHBOARD_SEARCH') is not None:
ANSIBLE_GRAFANA_DASHBOARD_SEARCH = os.environ['GRAFANA_DASHBOARD_SEARCH']
class GrafanaAPIException(Exception):
pass
class GrafanaAPI:
def __init__(self, **kwargs):
self.grafana_url = kwargs.get('grafana_url', ANSIBLE_GRAFANA_URL)
self.grafana_api_key = kwargs.get('grafana_api_key', ANSIBLE_GRAFANA_API_KEY)
self.grafana_user = kwargs.get('grafana_user', ANSIBLE_GRAFANA_USER)
self.grafana_password = kwargs.get('grafana_password', ANSIBLE_GRAFANA_PASSWORD)
self.grafana_org_id = kwargs.get('grafana_org_id', ANSIBLE_GRAFANA_ORG_ID)
self.search = kwargs.get('search', ANSIBLE_GRAFANA_DASHBOARD_SEARCH)
def grafana_switch_organisation(self, headers):
try:
r = open_url('%s/api/user/using/%s' % (self.grafana_url, self.grafana_org_id), headers=headers, method='POST')
except HTTPError as e:
raise GrafanaAPIException('Unable to switch to organization %s : %s' % (self.grafana_org_id, to_native(e)))
if r.getcode() != 200:
raise GrafanaAPIException('Unable to switch to organization %s : %s' % (self.grafana_org_id, str(r.getcode())))
def grafana_headers(self):
headers = {'content-type': 'application/json; charset=utf8'}
if self.grafana_api_key:
headers['Authorization'] = "Bearer %s==" % self.grafana_api_key
else:
auth = base64.b64encode(to_bytes('%s:%s' % (self.grafana_user, self.grafana_password)).replace('\n', ''))
headers['Authorization'] = 'Basic %s' % auth
self.grafana_switch_organisation(headers)
return headers
def grafana_list_dashboards(self):
# define http headers
headers = self.grafana_headers()
dashboard_list = []
try:
if self.search:
r = open_url('%s/api/search?query=%s' % (self.grafana_url, self.search), headers=headers, method='GET')
else:
r = open_url('%s/api/search/' % self.grafana_url, headers=headers, method='GET')
except HTTPError as e:
raise GrafanaAPIException('Unable to search dashboards : %s' % to_native(e))
if r.getcode() == 200:
try:
dashboard_list = json.loads(r.read())
except Exception as e:
raise GrafanaAPIException('Unable to parse json list %s' % to_native(e))
else:
raise GrafanaAPIException('Unable to list grafana dashboards : %s' % str(r.getcode()))
return dashboard_list
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
grafana_args = terms[0].split(' ')
grafana_dict = {}
ret = []
for param in grafana_args:
try:
key, value = param.split('=')
except ValueError:
raise AnsibleError("grafana_dashboard lookup plugin needs key=value pairs, but received %s" % terms)
grafana_dict[key] = value
grafana = GrafanaAPI(**grafana_dict)
ret = grafana.grafana_list_dashboards()
return ret
|
martinrotter/textilosaurus | refs/heads/master | src/libtextosaurus/3rd-party/scintilla-lt/scripts/Face.py | 4 | # Face.py - module for reading and parsing Scintilla.iface file
# Implemented 2000 by Neil Hodgson neilh@scintilla.org
# Released to the public domain.
# Requires Python 2.5 or later
def sanitiseLine(line):
if line[-1:] == '\n': line = line[:-1]
if line.find("##") != -1:
line = line[:line.find("##")]
line = line.strip()
return line
def decodeFunction(featureVal):
retType, rest = featureVal.split(" ", 1)
nameIdent, params = rest.split("(")
name, value = nameIdent.split("=")
params, rest = params.split(")")
param1, param2 = params.split(",")
return retType, name, value, param1, param2
def decodeEvent(featureVal):
retType, rest = featureVal.split(" ", 1)
nameIdent, params = rest.split("(")
name, value = nameIdent.split("=")
return retType, name, value
def decodeParam(p):
param = p.strip()
type = ""
name = ""
value = ""
if " " in param:
type, nv = param.split(" ")
if "=" in nv:
name, value = nv.split("=")
else:
name = nv
return type, name, value
def IsEnumeration(t):
return t[:1].isupper()
class Face:
def __init__(self):
self.order = []
self.features = {}
self.values = {}
self.events = {}
self.aliases = {}
def ReadFromFile(self, name):
currentCategory = ""
currentComment = []
currentCommentFinished = 0
file = open(name)
for line in file.readlines():
line = sanitiseLine(line)
if line:
if line[0] == "#":
if line[1] == " ":
if currentCommentFinished:
currentComment = []
currentCommentFinished = 0
currentComment.append(line[2:])
else:
currentCommentFinished = 1
featureType, featureVal = line.split(" ", 1)
if featureType in ["fun", "get", "set"]:
try:
retType, name, value, param1, param2 = decodeFunction(featureVal)
except ValueError:
print("Failed to decode %s" % line)
raise
p1 = decodeParam(param1)
p2 = decodeParam(param2)
self.features[name] = {
"FeatureType": featureType,
"ReturnType": retType,
"Value": value,
"Param1Type": p1[0], "Param1Name": p1[1], "Param1Value": p1[2],
"Param2Type": p2[0], "Param2Name": p2[1], "Param2Value": p2[2],
"Category": currentCategory, "Comment": currentComment
}
if value in self.values:
raise Exception("Duplicate value " + value + " " + name)
self.values[value] = 1
self.order.append(name)
currentComment = []
elif featureType == "evt":
retType, name, value = decodeEvent(featureVal)
self.features[name] = {
"FeatureType": featureType,
"ReturnType": retType,
"Value": value,
"Category": currentCategory, "Comment": currentComment
}
if value in self.events:
raise Exception("Duplicate event " + value + " " + name)
self.events[value] = 1
self.order.append(name)
elif featureType == "cat":
currentCategory = featureVal
elif featureType == "val":
try:
name, value = featureVal.split("=", 1)
except ValueError:
print("Failure %s" % featureVal)
raise Exception()
self.features[name] = {
"FeatureType": featureType,
"Category": currentCategory,
"Value": value }
self.order.append(name)
elif featureType == "enu" or featureType == "lex":
name, value = featureVal.split("=", 1)
self.features[name] = {
"FeatureType": featureType,
"Category": currentCategory,
"Value": value,
"Comment": currentComment }
self.order.append(name)
currentComment = []
elif featureType == "ali":
# Enumeration alias
name, value = featureVal.split("=", 1)
self.aliases[name] = value
currentComment = []
|
I2Cvb/hyper_learn | refs/heads/master | src/sampling_strategy/data_base.py | 1 | """random thoughts - sampling_strategy, support code"""
import collections
import numpy as np
# from data_class_instance import *
# from i_data_model import *
DataClassInstance = collections.namedtuple('DataClassInstance', 'name color')
"""DataClassInstance defininition as a namedtuple to store data identifying
info.
This information describing the data in hand is further used in plots or to
retrieve the data based on the data class attributes.
Attributes:
name (str): Human readable string to identify the data in hand.
color (str): color code to identify the data when plotting. The color
should be specified in hexacode as '#00ff00'.
Note:
In ordrder to see the expansion of the class code execute it with the
`verbose=True` parameter.
:version: 0.0.1
:author: sik
"""
DBElement = collections.namedtuple('DBElement', 'dbeClass dbeModel dbeSamples')
"""DBElement defininition as a namedtuple to store all the information
regarding a particular data class
Attributes:
dbeClass (DataClassInstance): data base element class indentification \
information.
dbeModel (IDataModel): data base element model
dbeSamples (nparray): data base element samples
Note:
In ordrder to see the expansion of the class code execute it with the
`verbose=True` parameter.
:version: 0.0.1
:author: sik
"""
class DataBase(dict):
"""DataBase is a (class, model, samples) triplet diccionary that can be
accessed using the class name of the data.
Note:
* the names of the data class **must** be unique, since they are used \
the dictionary key.
Attributes:
self (dictionary of DBElement): each element corresponds to a different
group of data within the data base.
:version: 0.0.1
:author: sik
"""
def __init__(self, dbelementList):
"""__init__ constructs a DataBase from the given parameters.(not fully
implemnted, yet. Rightnow only accepted parameter is a DBElement list
to be passed to the dict default __init__)
.. todo:: [code] this method should be overloaded in order to take \
diferent instanciations
:version: 0.0.1
:author: sik
"""
try:
keyList = [e.dbeClass.name for e in dbelementList]
except TypeError:
eString = '{1}.__init__ TypeError, valid DBElement with \
DataClassInstance as dbeClass element was expected. \
Instead {2}, {3} types were given.'
print eString.format(self.__class__,
dbelementList[0].__class__,
dbelementList[0].dbeClass.__class__)
super(self.__class__, self).__init__(zip(keyList, dbelementList))
def __str__(self):
"""
formated console print of the DataBase object
.. todo:: [code] maybe the element formating should be handeled by \
DBElement. \
see `stackOverflow <http://stackoverflow.com/questions/7914152/can-i-overwrite-the-string-form-of-a-namedtuple>`_ \
and `the docs <http://docs.python.org/library/collections.html#collections.namedtuple>`_
:version: 0.0.1
:author: sik
"""
# define the format strings for each DBElement attribute
classFStr = "class: '{0.dbeClass.name:s}', '{0.dbeClass.color:s}'\n"
modelFStr = "model: {0.dbeModel:s}\n"
shapeFStr = "shape: {0.dbeSamples:s}\n"
dbElementsReport = ""
for dKey, dElement in self.items():
dbElementsReport += "\n"
for fStr, dType in zip([classFStr, modelFStr, shapeFStr],
['DataClassInstance', 'IDataModel',
'np.array']):
try:
dbElementsReport += "\t<{0:s}> ".format(dKey)
dbElementsReport += fStr.format(dElement)
except TypeError:
print "un-expected dataType. ({} expected)".format(dType)
return "{0:d}-class DataBase{1:s}".format(len(self), dbElementsReport)
def get_range(self):
"""
get_range returns the [xmin xmax ymin ymax] needed to display the data
Return:
Array of (min, max) tuples of all the data dimensions
:rtype: array of tuples
:version: 0.0.1
:author: sik
.. # I don't understand how to do this whithout numpy.
.. todo:: [code][to Impl.] This is a copy-paste of the previous code structure
"""
# cBoundaries=np.asarray([c.get_range()for c in self._data])
# return[min(cBoundaries[:,0]),max(cBoundaries[:,1]),
# min(cBoundaries[:,2]),max(cBoundaries[:,3])]
def draw_models(self, axisId):
"""
@param mplAxis axisId :
@return :
@author sik
"""
pass
def draw_samples(self, axisId):
"""
@param mplAxis axisId :
@return :
@author sik
"""
pass
class DataSet (DataBase):
"""
.. todo:: [code] whole DataSet class
:version:
:author: sik
"""
def draw_samples(self, axisId, fade=0.5):
"""
draw_samples draws a scatter plot of the dataset data fading those \
DataBase samples not belonging to the DataSet
@param mplAxis axisId :
@param float fade : fading ratio
@return :
@author sik
"""
pass
def _test():
""" test function to call when executing this file directly """
nTestClasses = 3
myClasses = [DataClassInstance('c{}'.format(i), '#fffff{}'.format(i))
for i in range(1, nTestClasses)]
myDBEList = [DBElement(c, 'model_{}'.format(idx+1), np.array([0, idx]))
for idx, c in enumerate(myClasses)]
dd = DataBase(myDBEList)
print dd
# myDBElementDictionary = dict(zip([x.name for x in myClasses], myDBEList))
# for k in myDBElementDictionary.iterkeys():
# assert dd[k] == myDBElementDictionary[k]
if __name__ == '__main__':
_test()
|
Permutatrix/servo | refs/heads/master | tests/wpt/web-platform-tests/tools/html5lib/html5lib/treewalkers/pulldom.py | 1729 | from __future__ import absolute_import, division, unicode_literals
from xml.dom.pulldom import START_ELEMENT, END_ELEMENT, \
COMMENT, IGNORABLE_WHITESPACE, CHARACTERS
from . import _base
from ..constants import voidElements
class TreeWalker(_base.TreeWalker):
def __iter__(self):
ignore_until = None
previous = None
for event in self.tree:
if previous is not None and \
(ignore_until is None or previous[1] is ignore_until):
if previous[1] is ignore_until:
ignore_until = None
for token in self.tokens(previous, event):
yield token
if token["type"] == "EmptyTag":
ignore_until = previous[1]
previous = event
if ignore_until is None or previous[1] is ignore_until:
for token in self.tokens(previous, None):
yield token
elif ignore_until is not None:
raise ValueError("Illformed DOM event stream: void element without END_ELEMENT")
def tokens(self, event, next):
type, node = event
if type == START_ELEMENT:
name = node.nodeName
namespace = node.namespaceURI
attrs = {}
for attr in list(node.attributes.keys()):
attr = node.getAttributeNode(attr)
attrs[(attr.namespaceURI, attr.localName)] = attr.value
if name in voidElements:
for token in self.emptyTag(namespace,
name,
attrs,
not next or next[1] is not node):
yield token
else:
yield self.startTag(namespace, name, attrs)
elif type == END_ELEMENT:
name = node.nodeName
namespace = node.namespaceURI
if name not in voidElements:
yield self.endTag(namespace, name)
elif type == COMMENT:
yield self.comment(node.nodeValue)
elif type in (IGNORABLE_WHITESPACE, CHARACTERS):
for token in self.text(node.nodeValue):
yield token
else:
yield self.unknown(type)
|
sheep7/leopard-lavatory | refs/heads/master | leopard_lavatory/celery/celery_factory.py | 1 | from celery import Celery
def make_celery(app):
celery = Celery(
app.import_name,
backend=app.config['CELERY_RESULT_BACKEND'],
broker=app.config['CELERY_BROKER_URL']
)
celery.conf.update(app.config)
class ContextTask(celery.Task):
def __call__(self, *args, **kwargs):
with app.app_context():
return self.run(*args, **kwargs)
# noinspection PyPropertyAccess
celery.Task = ContextTask
return celery
|
andreadean5/python-hpOneView | refs/heads/master | tests/unit/test_connection.py | 1 | # -*- coding: utf-8 -*-
###
# (C) Copyright (2016) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
import json
import mock
import unittest
from http.client import HTTPSConnection
from hpOneView.connection import connection
from hpOneView.exceptions import HPOneViewException
from mock import call
class ConnectionTest(unittest.TestCase):
def setUp(self):
self.host = '127.0.0.1'
self.connection = connection(self.host)
self.accept_language_header = {
'Accept-Language': 'en_US'
}
self.default_headers = {
'X-API-Version': 200,
'Accept': 'application/json',
'Content-Type': 'application/json'
}
self.merged_headers = {
'X-API-Version': 200,
'Accept': 'application/json',
'Content-Type': 'application/json',
'Accept-Language': 'en_US'
}
self.request_body = {"request body": "content"}
self.response_body = {"response body": "content"}
self.dumped_request_body = json.dumps(self.request_body.copy())
self.expected_response_body = self.response_body.copy()
def __make_http_response(self, status):
mock_response = mock.Mock(status=status)
mock_response.read.return_value = json.dumps(self.response_body).encode('utf-8')
if status == 200 or status == 202:
mock_response.getheader.return_value = '/task/uri'
return mock_response
def test_default_headers(self):
self.assertEqual(self.default_headers, self.connection._headers)
def test_headers_with_api_version_300(self):
self.connection = connection(self.host, 300)
expected_headers = self.default_headers.copy()
expected_headers['X-API-Version'] = 300
self.assertEqual(expected_headers, self.connection._headers)
@mock.patch.object(HTTPSConnection, 'request')
@mock.patch.object(HTTPSConnection, 'getresponse')
def test_post_should_do_rest_call_when_status_ok(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=200)
self.connection.post('/path', self.request_body)
mock_request.assert_called_once_with('POST', '/path', self.dumped_request_body, self.default_headers)
@mock.patch.object(HTTPSConnection, 'request')
@mock.patch.object(HTTPSConnection, 'getresponse')
def test_post_should_do_rest_calls_when_status_accepted(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=202)
self.connection.post('/path', self.request_body)
expected_calls = [call('POST', '/path', self.dumped_request_body, self.default_headers),
call('GET', '/task/uri', '', self.default_headers)]
self.assertEqual(expected_calls, mock_request.call_args_list)
@mock.patch.object(HTTPSConnection, 'request')
@mock.patch.object(HTTPSConnection, 'getresponse')
def test_post_should_send_merged_headers_when_headers_provided(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=202)
self.connection.post('/path', self.request_body, custom_headers=self.accept_language_header)
expected_calls = [call('POST', mock.ANY, mock.ANY, self.merged_headers), mock.ANY]
self.assertEqual(expected_calls, mock_request.call_args_list)
@mock.patch.object(HTTPSConnection, 'request')
@mock.patch.object(HTTPSConnection, 'getresponse')
def test_post_should_return_body_when_status_ok(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=200)
result = self.connection.post('/path', self.response_body, custom_headers=self.accept_language_header)
expected_result = (None, self.expected_response_body)
self.assertEqual(expected_result, result)
@mock.patch.object(HTTPSConnection, 'request')
@mock.patch.object(HTTPSConnection, 'getresponse')
def test_post_should_return_tuple_when_status_accepted(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=202)
result = self.connection.post('/path', self.response_body, custom_headers=self.accept_language_header)
expected_result = (self.expected_response_body, self.expected_response_body)
self.assertEqual(result, expected_result)
@mock.patch.object(HTTPSConnection, 'request')
@mock.patch.object(HTTPSConnection, 'getresponse')
def test_post_should_raise_exception_when_status_internal_error(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=400)
try:
self.connection.post('/path', self.request_body)
except HPOneViewException as e:
self.assertEqual(e.msg, self.expected_response_body)
else:
self.fail()
@mock.patch.object(HTTPSConnection, 'request')
@mock.patch.object(HTTPSConnection, 'getresponse')
def test_post_should_raise_exception_when_status_not_found(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=404)
try:
self.connection.post('/path', self.request_body)
except HPOneViewException as e:
self.assertEqual(e.msg, self.expected_response_body)
else:
self.fail()
@mock.patch.object(HTTPSConnection, 'request')
@mock.patch.object(HTTPSConnection, 'getresponse')
def test_put_should_do_rest_call_when_status_ok(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=200)
self.connection.put('/path', self.request_body)
mock_request.assert_called_once_with('PUT', '/path', self.dumped_request_body, self.default_headers)
@mock.patch.object(HTTPSConnection, 'request')
@mock.patch.object(HTTPSConnection, 'getresponse')
def test_put_should_do_rest_calls_when_status_accepted(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=202)
self.connection.put('/path', self.request_body)
expected_calls = [call('PUT', '/path', self.dumped_request_body, self.default_headers),
call('GET', '/task/uri', '', self.default_headers)]
self.assertEqual(expected_calls, mock_request.call_args_list)
@mock.patch.object(HTTPSConnection, 'request')
@mock.patch.object(HTTPSConnection, 'getresponse')
def test_put_should_send_merged_headers_when_headers_provided(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=202)
self.connection.put('/path', self.request_body, custom_headers=self.accept_language_header)
expected_calls = [call('PUT', mock.ANY, mock.ANY, self.merged_headers), mock.ANY]
self.assertEqual(expected_calls, mock_request.call_args_list)
@mock.patch.object(HTTPSConnection, 'request')
@mock.patch.object(HTTPSConnection, 'getresponse')
def test_put_should_return_body_when_status_ok(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=200)
result = self.connection.put('/path', self.response_body, custom_headers=self.accept_language_header)
expected_result = (None, self.expected_response_body)
self.assertEqual(result, expected_result)
@mock.patch.object(HTTPSConnection, 'request')
@mock.patch.object(HTTPSConnection, 'getresponse')
def test_put_should_return_tuple_when_status_accepted(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=202)
result = self.connection.put('/path', self.response_body, custom_headers=self.accept_language_header)
expected_result = (self.expected_response_body, self.expected_response_body)
self.assertEqual(result, expected_result)
@mock.patch.object(HTTPSConnection, 'request')
@mock.patch.object(HTTPSConnection, 'getresponse')
def test_put_should_raise_exception_when_status_internal_error(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=400)
try:
self.connection.put('/path', self.request_body)
except HPOneViewException as e:
self.assertEqual(e.msg, self.expected_response_body)
else:
self.fail()
@mock.patch.object(HTTPSConnection, 'request')
@mock.patch.object(HTTPSConnection, 'getresponse')
def test_put_should_raise_exception_when_status_not_found(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=404)
try:
self.connection.put('/path', self.request_body)
except HPOneViewException as e:
self.assertEqual(e.msg, self.expected_response_body)
else:
self.fail()
@mock.patch.object(HTTPSConnection, 'request')
@mock.patch.object(HTTPSConnection, 'getresponse')
def test_patch_should_do_rest_call_when_status_ok(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=200)
self.connection.patch('/path', self.request_body)
mock_request.assert_called_once_with('PATCH', '/path', self.dumped_request_body, self.default_headers)
@mock.patch.object(HTTPSConnection, 'request')
@mock.patch.object(HTTPSConnection, 'getresponse')
def test_patch_should_do_rest_calls_when_status_accepted(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=202)
self.connection.patch('/path', self.request_body)
expected_calls = [call('PATCH', '/path', self.dumped_request_body, self.default_headers),
call('GET', '/task/uri', '', self.default_headers)]
self.assertEqual(expected_calls, mock_request.call_args_list)
@mock.patch.object(HTTPSConnection, 'request')
@mock.patch.object(HTTPSConnection, 'getresponse')
def test_patch_should_send_merged_headers_when_headers_provided(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=202)
self.connection.patch('/path', self.request_body, custom_headers=self.accept_language_header)
expected_calls = [call('PATCH', mock.ANY, mock.ANY, self.merged_headers), mock.ANY]
self.assertEqual(expected_calls, mock_request.call_args_list)
@mock.patch.object(HTTPSConnection, 'request')
@mock.patch.object(HTTPSConnection, 'getresponse')
def test_patch_should_return_body_when_status_ok(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=200)
result = self.connection.patch('/path', self.response_body, custom_headers=self.accept_language_header)
expected_result = (None, self.expected_response_body)
self.assertEqual(result, expected_result)
@mock.patch.object(HTTPSConnection, 'request')
@mock.patch.object(HTTPSConnection, 'getresponse')
def test_patch_should_return_tuple_when_status_accepted(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=202)
result = self.connection.patch('/path', self.response_body, custom_headers=self.accept_language_header)
expected_result = (self.expected_response_body, self.expected_response_body)
self.assertEqual(result, expected_result)
@mock.patch.object(HTTPSConnection, 'request')
@mock.patch.object(HTTPSConnection, 'getresponse')
def test_patch_should_raise_exception_when_status_internal_error(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=400)
try:
self.connection.patch('/path', self.request_body)
except HPOneViewException as e:
self.assertEqual(e.msg, self.expected_response_body)
else:
self.fail()
@mock.patch.object(HTTPSConnection, 'request')
@mock.patch.object(HTTPSConnection, 'getresponse')
def test_patch_should_raise_exception_when_status_not_found(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=404)
try:
self.connection.patch('/path', self.request_body)
except HPOneViewException as e:
self.assertEqual(e.msg, self.expected_response_body)
else:
self.fail()
@mock.patch.object(HTTPSConnection, 'request')
@mock.patch.object(HTTPSConnection, 'getresponse')
def test_delete_should_do_rest_calls_when_status_ok(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=200)
self.connection.delete('/path')
mock_request.assert_called_once_with('DELETE', '/path', json.dumps(''), self.default_headers)
@mock.patch.object(HTTPSConnection, 'request')
@mock.patch.object(HTTPSConnection, 'getresponse')
def test_delete_should_do_rest_calls_when_status_accepted(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=202)
self.connection.delete('/path')
expected_calls = [call('DELETE', '/path', json.dumps(''), self.default_headers),
call('GET', '/task/uri', '', self.default_headers)]
self.assertEqual(expected_calls, mock_request.call_args_list)
@mock.patch.object(HTTPSConnection, 'request')
@mock.patch.object(HTTPSConnection, 'getresponse')
def test_delete_should_send_merged_headers_when_headers_provided(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=202)
self.connection.delete('/path', custom_headers=self.accept_language_header)
expected_calls = [call('DELETE', mock.ANY, mock.ANY, self.merged_headers), mock.ANY]
self.assertEqual(expected_calls, mock_request.call_args_list)
@mock.patch.object(HTTPSConnection, 'request')
@mock.patch.object(HTTPSConnection, 'getresponse')
def test_delete_should_return_body_when_status_ok(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=200)
result = self.connection.delete('/path', custom_headers=self.accept_language_header)
expected_result = (None, self.expected_response_body)
self.assertEqual(result, expected_result)
@mock.patch.object(HTTPSConnection, 'request')
@mock.patch.object(HTTPSConnection, 'getresponse')
def test_delete_should_return_tuple_when_status_accepted(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=202)
result = self.connection.delete('/path', custom_headers=self.accept_language_header)
expected_result = (self.expected_response_body, self.expected_response_body)
self.assertEqual(result, expected_result)
@mock.patch.object(HTTPSConnection, 'request')
@mock.patch.object(HTTPSConnection, 'getresponse')
def test_delete_should_raise_exception_when_status_internal_error(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=400)
try:
self.connection.delete('/path')
except HPOneViewException as e:
self.assertEqual(e.msg, self.expected_response_body)
else:
self.fail()
@mock.patch.object(HTTPSConnection, 'request')
@mock.patch.object(HTTPSConnection, 'getresponse')
def test_delete_should_raise_exception_when_status_not_found(self, mock_response, mock_request):
mock_request.return_value = {}
mock_response.return_value = self.__make_http_response(status=404)
try:
self.connection.delete('/path', self.request_body)
except HPOneViewException as e:
self.assertEqual(e.msg, self.expected_response_body)
else:
self.fail()
|
levigross/pyscanner | refs/heads/master | mytests/django/contrib/gis/tests/geoapp/feeds.py | 308 | from __future__ import absolute_import
from django.contrib.gis import feeds
from .models import City
class TestGeoRSS1(feeds.Feed):
link = '/city/'
title = 'Test GeoDjango Cities'
def items(self):
return City.objects.all()
def item_link(self, item):
return '/city/%s/' % item.pk
def item_geometry(self, item):
return item.point
class TestGeoRSS2(TestGeoRSS1):
def geometry(self, obj):
# This should attach a <georss:box> element for the extent of
# of the cities in the database. This tuple came from
# calling `City.objects.extent()` -- we can't do that call here
# because `extent` is not implemented for MySQL/Oracle.
return (-123.30, -41.32, 174.78, 48.46)
def item_geometry(self, item):
# Returning a simple tuple for the geometry.
return item.point.x, item.point.y
class TestGeoAtom1(TestGeoRSS1):
feed_type = feeds.GeoAtom1Feed
class TestGeoAtom2(TestGeoRSS2):
feed_type = feeds.GeoAtom1Feed
def geometry(self, obj):
# This time we'll use a 2-tuple of coordinates for the box.
return ((-123.30, -41.32), (174.78, 48.46))
class TestW3CGeo1(TestGeoRSS1):
feed_type = feeds.W3CGeoFeed
# The following feeds are invalid, and will raise exceptions.
class TestW3CGeo2(TestGeoRSS2):
feed_type = feeds.W3CGeoFeed
class TestW3CGeo3(TestGeoRSS1):
feed_type = feeds.W3CGeoFeed
def item_geometry(self, item):
from django.contrib.gis.geos import Polygon
return Polygon(((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
# The feed dictionary to use for URLs.
feed_dict = {
'rss1' : TestGeoRSS1,
'rss2' : TestGeoRSS2,
'atom1' : TestGeoAtom1,
'atom2' : TestGeoAtom2,
'w3cgeo1' : TestW3CGeo1,
'w3cgeo2' : TestW3CGeo2,
'w3cgeo3' : TestW3CGeo3,
}
|
Yong-Lee/decode-Django | refs/heads/master | Django-1.5.1/tests/regressiontests/m2m_through_regress/__init__.py | 45382 | |
Mattze96/youtube-dl | refs/heads/master | youtube_dl/downloader/rtsp.py | 119 | from __future__ import unicode_literals
import os
import subprocess
from .common import FileDownloader
from ..utils import (
check_executable,
encodeFilename,
)
class RtspFD(FileDownloader):
def real_download(self, filename, info_dict):
url = info_dict['url']
self.report_destination(filename)
tmpfilename = self.temp_name(filename)
if check_executable('mplayer', ['-h']):
args = [
'mplayer', '-really-quiet', '-vo', 'null', '-vc', 'dummy',
'-dumpstream', '-dumpfile', tmpfilename, url]
elif check_executable('mpv', ['-h']):
args = [
'mpv', '-really-quiet', '--vo=null', '--stream-dump=' + tmpfilename, url]
else:
self.report_error('MMS or RTSP download detected but neither "mplayer" nor "mpv" could be run. Please install any.')
return False
retval = subprocess.call(args)
if retval == 0:
fsize = os.path.getsize(encodeFilename(tmpfilename))
self.to_screen('\r[%s] %s bytes' % (args[0], fsize))
self.try_rename(tmpfilename, filename)
self._hook_progress({
'downloaded_bytes': fsize,
'total_bytes': fsize,
'filename': filename,
'status': 'finished',
})
return True
else:
self.to_stderr('\n')
self.report_error('%s exited with code %d' % (args[0], retval))
return False
|
yaii/yai | refs/heads/alpha | share/extensions/grid_polar.py | 3 | #!/usr/bin/env python
'''
Copyright (C) 2007 John Beard john.j.beard@gmail.com
##This extension allows you to draw a polar grid in Inkscape.
##There is a wide range of options including subdivision and labels.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
'''
import inkex
import simplestyle, sys
from math import *
def draw_SVG_circle(r, cx, cy, width, fill, name, parent):
style = { 'stroke': '#000000', 'stroke-width':str(width), 'fill': fill }
circ_attribs = {'style':simplestyle.formatStyle(style),
'cx':str(cx), 'cy':str(cy),
'r':str(r),
inkex.addNS('label','inkscape'):name}
circle = inkex.etree.SubElement(parent, inkex.addNS('circle','svg'), circ_attribs )
def draw_SVG_line(x1, y1, x2, y2, width, name, parent):
style = { 'stroke': '#000000', 'stroke-width':str(width), 'fill': 'none' }
line_attribs = {'style':simplestyle.formatStyle(style),
inkex.addNS('label','inkscape'):name,
'd':'M '+str(x1)+','+str(y1)+' L '+str(x2)+','+str(y2)}
inkex.etree.SubElement(parent, inkex.addNS('path','svg'), line_attribs )
def draw_SVG_label_centred(x, y, string, font_size, name, parent):
style = {'text-align': 'center', 'vertical-align': 'top',
'text-anchor': 'middle', 'font-size': str(font_size)+'px',
'fill-opacity': '1.0', 'stroke': 'none',
'font-weight': 'normal', 'font-style': 'normal', 'fill': '#000000'}
label_attribs = {'style':simplestyle.formatStyle(style),
inkex.addNS('label','inkscape'):name,
'x':str(x), 'y':str(y)}
label = inkex.etree.SubElement(parent, inkex.addNS('text','svg'), label_attribs)
label.text = string
class Grid_Polar(inkex.Effect):
def __init__(self):
inkex.Effect.__init__(self)
self.OptionParser.add_option("--r_divs",
action="store", type="int",
dest="r_divs", default=5,
help="Circular Divisions")
self.OptionParser.add_option("--dr",
action="store", type="float",
dest="dr", default=50,
help="Circular Division Spacing")
self.OptionParser.add_option("--r_subdivs",
action="store", type="int",
dest="r_subdivs", default=3,
help="Circular Subdivisions per Major division")
self.OptionParser.add_option("--r_log",
action="store", type="inkbool",
dest="r_log", default=False,
help="Logarithmic subdivisions if true")
self.OptionParser.add_option("--r_divs_th",
action="store", type="float",
dest="r_divs_th", default=2,
help="Major Circular Division Line thickness")
self.OptionParser.add_option("--r_subdivs_th",
action="store", type="float",
dest="r_subdivs_th", default=1,
help="Minor Circular Division Line thickness")
self.OptionParser.add_option("--a_divs",
action="store", type="int",
dest="a_divs", default=24,
help="Angle Divisions")
self.OptionParser.add_option("--a_divs_cent",
action="store", type="int",
dest="a_divs_cent", default=4,
help="Angle Divisions at Centre")
self.OptionParser.add_option("--a_subdivs",
action="store", type="int",
dest="a_subdivs", default=1,
help="Angcular Subdivisions per Major division")
self.OptionParser.add_option("--a_subdivs_cent",
action="store", type="int",
dest="a_subdivs_cent", default=1,
help="Angular Subdivisions end 'n' major circular divisions before the centre")
self.OptionParser.add_option("--a_divs_th",
action="store", type="float",
dest="a_divs_th", default=2,
help="Major Angular Division Line thickness")
self.OptionParser.add_option("--a_subdivs_th",
action="store", type="float",
dest="a_subdivs_th", default=1,
help="Minor Angular Division Line thickness")
self.OptionParser.add_option("--c_dot_dia",
action="store", type="float",
dest="c_dot_dia", default=5.0,
help="Diameter of Centre Dot")
self.OptionParser.add_option("--a_labels",
action="store", type="string",
dest="a_labels", default='deg',
help="The kind of labels to apply")
self.OptionParser.add_option("--a_label_size",
action="store", type="int",
dest="a_label_size", default=18,
help="The nominal pixel size of the circumferential labels")
self.OptionParser.add_option("--a_label_outset",
action="store", type="float",
dest="a_label_outset", default=24,
help="The radial outset of the circumferential labels")
def effect(self):
self.options.dr = self.unittouu(str(self.options.dr) + 'px')
self.options.r_divs_th = self.unittouu(str(self.options.r_divs_th) + 'px')
self.options.r_subdivs_th = self.unittouu(str(self.options.r_subdivs_th) + 'px')
self.options.a_divs_th = self.unittouu(str(self.options.a_divs_th) + 'px')
self.options.a_subdivs_th = self.unittouu(str(self.options.a_subdivs_th) + 'px')
self.options.c_dot_dia = self.unittouu(str(self.options.c_dot_dia) + 'px')
self.options.a_label_size = self.unittouu(str(self.options.a_label_size) + 'px')
self.options.a_label_outset = self.unittouu(str(self.options.a_label_outset) + 'px')
# Embed grid in group
#Put in in the centre of the current view
t = 'translate(' + str( self.view_center[0] ) + ',' + str( self.view_center[1] ) + ')'
g_attribs = {inkex.addNS('label','inkscape'):'Grid_Polar:R' +
str( self.options.r_divs )+':A'+str( self.options.a_divs ),
'transform':t }
grid = inkex.etree.SubElement(self.current_layer, 'g', g_attribs)
dr = self.options.dr #Distance between neighbouring circles
dtheta = 2 * pi / self.options.a_divs_cent #Angular change between adjacent radial lines at centre
rmax = self.options.r_divs * dr
#Create SVG circles
for i in range(1, self.options.r_divs+1):
draw_SVG_circle(i*dr, 0, 0, #major div circles
self.options.r_divs_th, 'none',
'MajorDivCircle'+str(i)+':R'+str(i*dr), grid)
if self.options.r_log: #logarithmic subdivisions
for j in range (2, self.options.r_subdivs):
draw_SVG_circle(i*dr-(1-log(j, self.options.r_subdivs))*dr, #minor div circles
0, 0, self.options.r_subdivs_th, 'none',
'MinorDivCircle'+str(i)+':Log'+str(j), grid)
else: #linear subdivs
for j in range (1, self.options.r_subdivs):
draw_SVG_circle(i*dr-j*dr/self.options.r_subdivs, #minor div circles
0, 0, self.options.r_subdivs_th, 'none',
'MinorDivCircle'+str(i)+':R'+str(i*dr), grid)
if self.options.a_divs == self.options.a_divs_cent: #the lines can go from the centre to the edge
for i in range(0, self.options.a_divs):
draw_SVG_line(0, 0, rmax*sin(i*dtheta), rmax*cos(i*dtheta),
self.options.a_divs_th, 'RadialGridline'+str(i), grid)
else: #we need separate lines
for i in range(0, self.options.a_divs_cent): #lines that go to the first circle
draw_SVG_line(0, 0, dr*sin(i*dtheta), dr*cos(i*dtheta),
self.options.a_divs_th, 'RadialGridline'+str(i), grid)
dtheta = 2 * pi / self.options.a_divs #work out the angle change for outer lines
for i in range(0, self.options.a_divs): #lines that go from there to the edge
draw_SVG_line( dr*sin(i*dtheta+pi/2.0), dr*cos(i*dtheta+pi/2.0),
rmax*sin(i*dtheta+pi/2.0), rmax*cos(i*dtheta+pi/2.0),
self.options.a_divs_th, 'RadialGridline'+str(i), grid)
if self.options.a_subdivs > 1: #draw angular subdivs
for i in range(0, self.options.a_divs): #for each major divison
for j in range(1, self.options.a_subdivs): #draw the subdivisions
angle = i*dtheta-j*dtheta/self.options.a_subdivs+pi/2.0 # the angle of the subdivion line
draw_SVG_line(dr*self.options.a_subdivs_cent*sin(angle),
dr*self.options.a_subdivs_cent*cos(angle),
rmax*sin(angle), rmax*cos(angle),
self.options.a_subdivs_th, 'RadialMinorGridline'+str(i), grid)
if self.options.c_dot_dia <> 0: #if a non-zero diameter, draw the centre dot
draw_SVG_circle(self.options.c_dot_dia /2.0,
0, 0, 0, '#000000', 'CentreDot', grid)
if self.options.a_labels == 'deg':
label_radius = rmax+self.options.a_label_outset #radius of label centres
label_size = self.options.a_label_size
numeral_size = 0.73*label_size #numerals appear to be 0.73 the height of the nominal pixel size of the font in "Sans"
for i in range(0, self.options.a_divs):#self.options.a_divs): #radial line labels
draw_SVG_label_centred(sin(i*dtheta+pi/2.0)*label_radius, #0 at the RHS, mathematical style
cos(i*dtheta+pi/2.0)*label_radius+ numeral_size/2.0, #centre the text vertically
str(i*360/self.options.a_divs),
label_size, 'Label'+str(i), grid)
if __name__ == '__main__':
e = Grid_Polar()
e.affect()
# vim: expandtab shiftwidth=4 tabstop=8 softtabstop=4 fileencoding=utf-8 textwidth=99
|
dstockwell/catapult | refs/heads/master | tracing/third_party/closure_linter/closure_linter/requireprovidesorter_test.py | 135 | #!/usr/bin/env python
#
# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for RequireProvideSorter."""
import unittest as googletest
from closure_linter import ecmametadatapass
from closure_linter import javascripttokenizer
from closure_linter import javascripttokens
from closure_linter import requireprovidesorter
# pylint: disable-msg=C6409
TokenType = javascripttokens.JavaScriptTokenType
class RequireProvideSorterTest(googletest.TestCase):
"""Tests for RequireProvideSorter."""
_tokenizer = javascripttokenizer.JavaScriptTokenizer()
_metadata_pass = ecmametadatapass.EcmaMetaDataPass()
def testFixRequires_removeBlankLines(self):
"""Tests that blank lines are omitted in sorted goog.require statements."""
input_lines = [
'goog.provide(\'package.subpackage.Whatever\');',
'',
'goog.require(\'package.subpackage.ClassB\');',
'',
'goog.require(\'package.subpackage.ClassA\');'
]
expected_lines = [
'goog.provide(\'package.subpackage.Whatever\');',
'',
'goog.require(\'package.subpackage.ClassA\');',
'goog.require(\'package.subpackage.ClassB\');'
]
token = self._tokenizer.TokenizeFile(input_lines)
self._metadata_pass.Reset()
self._metadata_pass.Process(token)
sorter = requireprovidesorter.RequireProvideSorter()
sorter.FixRequires(token)
self.assertEquals(expected_lines, self._GetLines(token))
def _GetLines(self, token):
"""Returns an array of lines based on the specified token stream."""
lines = []
line = ''
while token:
line += token.string
if token.IsLastInLine():
lines.append(line)
line = ''
token = token.next
return lines
if __name__ == '__main__':
googletest.main()
|
irwinlove/django | refs/heads/master | django/core/management/commands/test.py | 267 | import logging
import os
import sys
from django.conf import settings
from django.core.management.base import BaseCommand
from django.test.utils import get_runner
class Command(BaseCommand):
help = 'Discover and run tests in the specified modules or the current directory.'
requires_system_checks = False
def __init__(self):
self.test_runner = None
super(Command, self).__init__()
def run_from_argv(self, argv):
"""
Pre-parse the command line to extract the value of the --testrunner
option. This allows a test runner to define additional command line
arguments.
"""
option = '--testrunner='
for arg in argv[2:]:
if arg.startswith(option):
self.test_runner = arg[len(option):]
break
super(Command, self).run_from_argv(argv)
def add_arguments(self, parser):
parser.add_argument('args', metavar='test_label', nargs='*',
help='Module paths to test; can be modulename, modulename.TestCase or modulename.TestCase.test_method')
parser.add_argument('--noinput',
action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.'),
parser.add_argument('--failfast',
action='store_true', dest='failfast', default=False,
help='Tells Django to stop running the test suite after first '
'failed test.'),
parser.add_argument('--testrunner',
action='store', dest='testrunner',
help='Tells Django to use specified test runner class instead of '
'the one specified by the TEST_RUNNER setting.'),
parser.add_argument('--liveserver',
action='store', dest='liveserver', default=None,
help='Overrides the default address where the live server (used '
'with LiveServerTestCase) is expected to run from. The '
'default value is localhost:8081.'),
test_runner_class = get_runner(settings, self.test_runner)
if hasattr(test_runner_class, 'option_list'):
# Keeping compatibility with both optparse and argparse at this level
# would be too heavy for a non-critical item
raise RuntimeError(
"The method to extend accepted command-line arguments by the "
"test management command has changed in Django 1.8. Please "
"create an add_arguments class method to achieve this.")
if hasattr(test_runner_class, 'add_arguments'):
test_runner_class.add_arguments(parser)
def execute(self, *args, **options):
if options['verbosity'] > 0:
# ensure that deprecation warnings are displayed during testing
# the following state is assumed:
# logging.capturewarnings is true
# a "default" level warnings filter has been added for
# DeprecationWarning. See django.conf.LazySettings._configure_logging
logger = logging.getLogger('py.warnings')
handler = logging.StreamHandler()
logger.addHandler(handler)
super(Command, self).execute(*args, **options)
if options['verbosity'] > 0:
# remove the testing-specific handler
logger.removeHandler(handler)
def handle(self, *test_labels, **options):
from django.conf import settings
from django.test.utils import get_runner
TestRunner = get_runner(settings, options.get('testrunner'))
if options.get('liveserver') is not None:
os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = options['liveserver']
del options['liveserver']
test_runner = TestRunner(**options)
failures = test_runner.run_tests(test_labels)
if failures:
sys.exit(bool(failures))
|
jackkiej/SickRage | refs/heads/master | lib/tornado/platform/twisted.py | 16 | # Author: Ovidiu Predescu
# Date: July 2011
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Bridges between the Twisted reactor and Tornado IOLoop.
This module lets you run applications and libraries written for
Twisted in a Tornado application. It can be used in two modes,
depending on which library's underlying event loop you want to use.
This module has been tested with Twisted versions 11.0.0 and newer.
"""
from __future__ import absolute_import, division, print_function, with_statement
import datetime
import functools
import numbers
import socket
import sys
import twisted.internet.abstract # type: ignore
from twisted.internet.defer import Deferred # type: ignore
from twisted.internet.posixbase import PosixReactorBase # type: ignore
from twisted.internet.interfaces import IReactorFDSet, IDelayedCall, IReactorTime, IReadDescriptor, IWriteDescriptor # type: ignore
from twisted.python import failure, log # type: ignore
from twisted.internet import error # type: ignore
import twisted.names.cache # type: ignore
import twisted.names.client # type: ignore
import twisted.names.hosts # type: ignore
import twisted.names.resolve # type: ignore
from zope.interface import implementer # type: ignore
from tornado.concurrent import Future
from tornado.escape import utf8
from tornado import gen
import tornado.ioloop
from tornado.log import app_log
from tornado.netutil import Resolver
from tornado.stack_context import NullContext, wrap
from tornado.ioloop import IOLoop
from tornado.util import timedelta_to_seconds
@implementer(IDelayedCall)
class TornadoDelayedCall(object):
"""DelayedCall object for Tornado."""
def __init__(self, reactor, seconds, f, *args, **kw):
self._reactor = reactor
self._func = functools.partial(f, *args, **kw)
self._time = self._reactor.seconds() + seconds
self._timeout = self._reactor._io_loop.add_timeout(self._time,
self._called)
self._active = True
def _called(self):
self._active = False
self._reactor._removeDelayedCall(self)
try:
self._func()
except:
app_log.error("_called caught exception", exc_info=True)
def getTime(self):
return self._time
def cancel(self):
self._active = False
self._reactor._io_loop.remove_timeout(self._timeout)
self._reactor._removeDelayedCall(self)
def delay(self, seconds):
self._reactor._io_loop.remove_timeout(self._timeout)
self._time += seconds
self._timeout = self._reactor._io_loop.add_timeout(self._time,
self._called)
def reset(self, seconds):
self._reactor._io_loop.remove_timeout(self._timeout)
self._time = self._reactor.seconds() + seconds
self._timeout = self._reactor._io_loop.add_timeout(self._time,
self._called)
def active(self):
return self._active
@implementer(IReactorTime, IReactorFDSet)
class TornadoReactor(PosixReactorBase):
"""Twisted reactor built on the Tornado IOLoop.
`TornadoReactor` implements the Twisted reactor interface on top of
the Tornado IOLoop. To use it, simply call `install` at the beginning
of the application::
import tornado.platform.twisted
tornado.platform.twisted.install()
from twisted.internet import reactor
When the app is ready to start, call ``IOLoop.current().start()``
instead of ``reactor.run()``.
It is also possible to create a non-global reactor by calling
``tornado.platform.twisted.TornadoReactor(io_loop)``. However, if
the `.IOLoop` and reactor are to be short-lived (such as those used in
unit tests), additional cleanup may be required. Specifically, it is
recommended to call::
reactor.fireSystemEvent('shutdown')
reactor.disconnectAll()
before closing the `.IOLoop`.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
def __init__(self, io_loop=None):
if not io_loop:
io_loop = tornado.ioloop.IOLoop.current()
self._io_loop = io_loop
self._readers = {} # map of reader objects to fd
self._writers = {} # map of writer objects to fd
self._fds = {} # a map of fd to a (reader, writer) tuple
self._delayedCalls = {}
PosixReactorBase.__init__(self)
self.addSystemEventTrigger('during', 'shutdown', self.crash)
# IOLoop.start() bypasses some of the reactor initialization.
# Fire off the necessary events if they weren't already triggered
# by reactor.run().
def start_if_necessary():
if not self._started:
self.fireSystemEvent('startup')
self._io_loop.add_callback(start_if_necessary)
# IReactorTime
def seconds(self):
return self._io_loop.time()
def callLater(self, seconds, f, *args, **kw):
dc = TornadoDelayedCall(self, seconds, f, *args, **kw)
self._delayedCalls[dc] = True
return dc
def getDelayedCalls(self):
return [x for x in self._delayedCalls if x._active]
def _removeDelayedCall(self, dc):
if dc in self._delayedCalls:
del self._delayedCalls[dc]
# IReactorThreads
def callFromThread(self, f, *args, **kw):
assert callable(f), "%s is not callable" % f
with NullContext():
# This NullContext is mainly for an edge case when running
# TwistedIOLoop on top of a TornadoReactor.
# TwistedIOLoop.add_callback uses reactor.callFromThread and
# should not pick up additional StackContexts along the way.
self._io_loop.add_callback(f, *args, **kw)
# We don't need the waker code from the super class, Tornado uses
# its own waker.
def installWaker(self):
pass
def wakeUp(self):
pass
# IReactorFDSet
def _invoke_callback(self, fd, events):
if fd not in self._fds:
return
(reader, writer) = self._fds[fd]
if reader:
err = None
if reader.fileno() == -1:
err = error.ConnectionLost()
elif events & IOLoop.READ:
err = log.callWithLogger(reader, reader.doRead)
if err is None and events & IOLoop.ERROR:
err = error.ConnectionLost()
if err is not None:
self.removeReader(reader)
reader.readConnectionLost(failure.Failure(err))
if writer:
err = None
if writer.fileno() == -1:
err = error.ConnectionLost()
elif events & IOLoop.WRITE:
err = log.callWithLogger(writer, writer.doWrite)
if err is None and events & IOLoop.ERROR:
err = error.ConnectionLost()
if err is not None:
self.removeWriter(writer)
writer.writeConnectionLost(failure.Failure(err))
def addReader(self, reader):
if reader in self._readers:
# Don't add the reader if it's already there
return
fd = reader.fileno()
self._readers[reader] = fd
if fd in self._fds:
(_, writer) = self._fds[fd]
self._fds[fd] = (reader, writer)
if writer:
# We already registered this fd for write events,
# update it for read events as well.
self._io_loop.update_handler(fd, IOLoop.READ | IOLoop.WRITE)
else:
with NullContext():
self._fds[fd] = (reader, None)
self._io_loop.add_handler(fd, self._invoke_callback,
IOLoop.READ)
def addWriter(self, writer):
if writer in self._writers:
return
fd = writer.fileno()
self._writers[writer] = fd
if fd in self._fds:
(reader, _) = self._fds[fd]
self._fds[fd] = (reader, writer)
if reader:
# We already registered this fd for read events,
# update it for write events as well.
self._io_loop.update_handler(fd, IOLoop.READ | IOLoop.WRITE)
else:
with NullContext():
self._fds[fd] = (None, writer)
self._io_loop.add_handler(fd, self._invoke_callback,
IOLoop.WRITE)
def removeReader(self, reader):
if reader in self._readers:
fd = self._readers.pop(reader)
(_, writer) = self._fds[fd]
if writer:
# We have a writer so we need to update the IOLoop for
# write events only.
self._fds[fd] = (None, writer)
self._io_loop.update_handler(fd, IOLoop.WRITE)
else:
# Since we have no writer registered, we remove the
# entry from _fds and unregister the handler from the
# IOLoop
del self._fds[fd]
self._io_loop.remove_handler(fd)
def removeWriter(self, writer):
if writer in self._writers:
fd = self._writers.pop(writer)
(reader, _) = self._fds[fd]
if reader:
# We have a reader so we need to update the IOLoop for
# read events only.
self._fds[fd] = (reader, None)
self._io_loop.update_handler(fd, IOLoop.READ)
else:
# Since we have no reader registered, we remove the
# entry from the _fds and unregister the handler from
# the IOLoop.
del self._fds[fd]
self._io_loop.remove_handler(fd)
def removeAll(self):
return self._removeAll(self._readers, self._writers)
def getReaders(self):
return self._readers.keys()
def getWriters(self):
return self._writers.keys()
# The following functions are mainly used in twisted-style test cases;
# it is expected that most users of the TornadoReactor will call
# IOLoop.start() instead of Reactor.run().
def stop(self):
PosixReactorBase.stop(self)
fire_shutdown = functools.partial(self.fireSystemEvent, "shutdown")
self._io_loop.add_callback(fire_shutdown)
def crash(self):
PosixReactorBase.crash(self)
self._io_loop.stop()
def doIteration(self, delay):
raise NotImplementedError("doIteration")
def mainLoop(self):
# Since this class is intended to be used in applications
# where the top-level event loop is ``io_loop.start()`` rather
# than ``reactor.run()``, it is implemented a little
# differently than other Twisted reactors. We override
# ``mainLoop`` instead of ``doIteration`` and must implement
# timed call functionality on top of `.IOLoop.add_timeout`
# rather than using the implementation in
# ``PosixReactorBase``.
self._io_loop.start()
class _TestReactor(TornadoReactor):
"""Subclass of TornadoReactor for use in unittests.
This can't go in the test.py file because of import-order dependencies
with the Twisted reactor test builder.
"""
def __init__(self):
# always use a new ioloop
super(_TestReactor, self).__init__(IOLoop())
def listenTCP(self, port, factory, backlog=50, interface=''):
# default to localhost to avoid firewall prompts on the mac
if not interface:
interface = '127.0.0.1'
return super(_TestReactor, self).listenTCP(
port, factory, backlog=backlog, interface=interface)
def listenUDP(self, port, protocol, interface='', maxPacketSize=8192):
if not interface:
interface = '127.0.0.1'
return super(_TestReactor, self).listenUDP(
port, protocol, interface=interface, maxPacketSize=maxPacketSize)
def install(io_loop=None):
"""Install this package as the default Twisted reactor.
``install()`` must be called very early in the startup process,
before most other twisted-related imports. Conversely, because it
initializes the `.IOLoop`, it cannot be called before
`.fork_processes` or multi-process `~.TCPServer.start`. These
conflicting requirements make it difficult to use `.TornadoReactor`
in multi-process mode, and an external process manager such as
``supervisord`` is recommended instead.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
if not io_loop:
io_loop = tornado.ioloop.IOLoop.current()
reactor = TornadoReactor(io_loop)
from twisted.internet.main import installReactor # type: ignore
installReactor(reactor)
return reactor
@implementer(IReadDescriptor, IWriteDescriptor)
class _FD(object):
def __init__(self, fd, fileobj, handler):
self.fd = fd
self.fileobj = fileobj
self.handler = handler
self.reading = False
self.writing = False
self.lost = False
def fileno(self):
return self.fd
def doRead(self):
if not self.lost:
self.handler(self.fileobj, tornado.ioloop.IOLoop.READ)
def doWrite(self):
if not self.lost:
self.handler(self.fileobj, tornado.ioloop.IOLoop.WRITE)
def connectionLost(self, reason):
if not self.lost:
self.handler(self.fileobj, tornado.ioloop.IOLoop.ERROR)
self.lost = True
def logPrefix(self):
return ''
class TwistedIOLoop(tornado.ioloop.IOLoop):
"""IOLoop implementation that runs on Twisted.
`TwistedIOLoop` implements the Tornado IOLoop interface on top of
the Twisted reactor. Recommended usage::
from tornado.platform.twisted import TwistedIOLoop
from twisted.internet import reactor
TwistedIOLoop().install()
# Set up your tornado application as usual using `IOLoop.instance`
reactor.run()
Uses the global Twisted reactor by default. To create multiple
``TwistedIOLoops`` in the same process, you must pass a unique reactor
when constructing each one.
Not compatible with `tornado.process.Subprocess.set_exit_callback`
because the ``SIGCHLD`` handlers used by Tornado and Twisted conflict
with each other.
See also :meth:`tornado.ioloop.IOLoop.install` for general notes on
installing alternative IOLoops.
"""
def initialize(self, reactor=None, **kwargs):
super(TwistedIOLoop, self).initialize(**kwargs)
if reactor is None:
import twisted.internet.reactor # type: ignore
reactor = twisted.internet.reactor
self.reactor = reactor
self.fds = {}
def close(self, all_fds=False):
fds = self.fds
self.reactor.removeAll()
for c in self.reactor.getDelayedCalls():
c.cancel()
if all_fds:
for fd in fds.values():
self.close_fd(fd.fileobj)
def add_handler(self, fd, handler, events):
if fd in self.fds:
raise ValueError('fd %s added twice' % fd)
fd, fileobj = self.split_fd(fd)
self.fds[fd] = _FD(fd, fileobj, wrap(handler))
if events & tornado.ioloop.IOLoop.READ:
self.fds[fd].reading = True
self.reactor.addReader(self.fds[fd])
if events & tornado.ioloop.IOLoop.WRITE:
self.fds[fd].writing = True
self.reactor.addWriter(self.fds[fd])
def update_handler(self, fd, events):
fd, fileobj = self.split_fd(fd)
if events & tornado.ioloop.IOLoop.READ:
if not self.fds[fd].reading:
self.fds[fd].reading = True
self.reactor.addReader(self.fds[fd])
else:
if self.fds[fd].reading:
self.fds[fd].reading = False
self.reactor.removeReader(self.fds[fd])
if events & tornado.ioloop.IOLoop.WRITE:
if not self.fds[fd].writing:
self.fds[fd].writing = True
self.reactor.addWriter(self.fds[fd])
else:
if self.fds[fd].writing:
self.fds[fd].writing = False
self.reactor.removeWriter(self.fds[fd])
def remove_handler(self, fd):
fd, fileobj = self.split_fd(fd)
if fd not in self.fds:
return
self.fds[fd].lost = True
if self.fds[fd].reading:
self.reactor.removeReader(self.fds[fd])
if self.fds[fd].writing:
self.reactor.removeWriter(self.fds[fd])
del self.fds[fd]
def start(self):
old_current = IOLoop.current(instance=False)
try:
self._setup_logging()
self.make_current()
self.reactor.run()
finally:
if old_current is None:
IOLoop.clear_current()
else:
old_current.make_current()
def stop(self):
self.reactor.crash()
def add_timeout(self, deadline, callback, *args, **kwargs):
# This method could be simplified (since tornado 4.0) by
# overriding call_at instead of add_timeout, but we leave it
# for now as a test of backwards-compatibility.
if isinstance(deadline, numbers.Real):
delay = max(deadline - self.time(), 0)
elif isinstance(deadline, datetime.timedelta):
delay = timedelta_to_seconds(deadline)
else:
raise TypeError("Unsupported deadline %r")
return self.reactor.callLater(
delay, self._run_callback,
functools.partial(wrap(callback), *args, **kwargs))
def remove_timeout(self, timeout):
if timeout.active():
timeout.cancel()
def add_callback(self, callback, *args, **kwargs):
self.reactor.callFromThread(
self._run_callback,
functools.partial(wrap(callback), *args, **kwargs))
def add_callback_from_signal(self, callback, *args, **kwargs):
self.add_callback(callback, *args, **kwargs)
class TwistedResolver(Resolver):
"""Twisted-based asynchronous resolver.
This is a non-blocking and non-threaded resolver. It is
recommended only when threads cannot be used, since it has
limitations compared to the standard ``getaddrinfo``-based
`~tornado.netutil.Resolver` and
`~tornado.netutil.ThreadedResolver`. Specifically, it returns at
most one result, and arguments other than ``host`` and ``family``
are ignored. It may fail to resolve when ``family`` is not
``socket.AF_UNSPEC``.
Requires Twisted 12.1 or newer.
.. versionchanged:: 4.1
The ``io_loop`` argument is deprecated.
"""
def initialize(self, io_loop=None):
self.io_loop = io_loop or IOLoop.current()
# partial copy of twisted.names.client.createResolver, which doesn't
# allow for a reactor to be passed in.
self.reactor = tornado.platform.twisted.TornadoReactor(io_loop)
host_resolver = twisted.names.hosts.Resolver('/etc/hosts')
cache_resolver = twisted.names.cache.CacheResolver(reactor=self.reactor)
real_resolver = twisted.names.client.Resolver('/etc/resolv.conf',
reactor=self.reactor)
self.resolver = twisted.names.resolve.ResolverChain(
[host_resolver, cache_resolver, real_resolver])
@gen.coroutine
def resolve(self, host, port, family=0):
# getHostByName doesn't accept IP addresses, so if the input
# looks like an IP address just return it immediately.
if twisted.internet.abstract.isIPAddress(host):
resolved = host
resolved_family = socket.AF_INET
elif twisted.internet.abstract.isIPv6Address(host):
resolved = host
resolved_family = socket.AF_INET6
else:
deferred = self.resolver.getHostByName(utf8(host))
resolved = yield gen.Task(deferred.addBoth)
if isinstance(resolved, failure.Failure):
try:
resolved.raiseException()
except twisted.names.error.DomainError as e:
raise IOError(e)
elif twisted.internet.abstract.isIPAddress(resolved):
resolved_family = socket.AF_INET
elif twisted.internet.abstract.isIPv6Address(resolved):
resolved_family = socket.AF_INET6
else:
resolved_family = socket.AF_UNSPEC
if family != socket.AF_UNSPEC and family != resolved_family:
raise Exception('Requested socket family %d but got %d' %
(family, resolved_family))
result = [
(resolved_family, (resolved, port)),
]
raise gen.Return(result)
if hasattr(gen.convert_yielded, 'register'):
@gen.convert_yielded.register(Deferred) # type: ignore
def _(d):
f = Future()
def errback(failure):
try:
failure.raiseException()
# Should never happen, but just in case
raise Exception("errback called without error")
except:
f.set_exc_info(sys.exc_info())
d.addCallbacks(f.set_result, errback)
return f
|
rombr/dev-hub-test-task | refs/heads/master | blog/admin.py | 1 | # -*- coding: utf-8 -*-
from django.contrib import admin
from models import Post, Subscribe
class PostAdmin(admin.ModelAdmin):
list_display = ('author', 'created', 'title',)
search_fields = ('title', 'body', )
class SubscribeAdmin(admin.ModelAdmin):
list_display = ('author', 'reader', 'date_joined',)
admin.site.register(Post, PostAdmin)
admin.site.register(Subscribe, SubscribeAdmin)
|
lconceicao/son-cli | refs/heads/master | src/son/package/tests/test_unit_Packager.py | 5 | # Copyright (c) 2015 SONATA-NFV, UBIWHERE
# ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Neither the name of the SONATA-NFV, UBIWHERE
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# This work has been performed in the framework of the SONATA project,
# funded by the European Commission under Grant number 671517 through
# the Horizon 2020 and 5G-PPP programmes. The authors would like to
# acknowledge the contributions of their colleagues of the SONATA
# partner consortium (www.sonata-nfv.eu).
import unittest
from unittest.mock import patch
from unittest.mock import Mock
from unittest import mock
from son.package.package import Packager
from son.workspace.workspace import Workspace
from son.workspace.workspace import Project
class UnitCreatePackageTests(unittest.TestCase):
@patch('son.package.package.generate_hash')
@patch('son.package.package.Validator')
@patch('son.package.package.os.path.abspath')
@patch('son.package.package.os.path.join')
@patch('son.package.package.zipfile')
def test_generate_package(self, m_zipfile, m_join, m_abspath, m_validator,
m_hash):
"""
Ensures that a package file is created with correct name and location
"""
# First, create a workspace to give to Packager
workspace = Workspace("ws/root", ws_name="ws_test", log_level='debug')
# Create project
project = Project(workspace, 'prj/path')
# Instantiate a Packager instance
packager = Packager(workspace=workspace,
project=project,
generate_pd=False,
dst_path="dst/path")
packager._package_descriptor = True
# Prepare mocks
context_manager_mock = Mock()
m_zipfile.ZipFile.return_value = context_manager_mock
enter_mock = Mock()
exit_mock = Mock()
setattr(context_manager_mock, '__enter__', enter_mock)
setattr(context_manager_mock, '__exit__', exit_mock)
m_validator.validate_package.return_value = True
m_hash.return_value = ''
m_abspath.return_value = ''
# execute
packager.generate_package("package_name")
# make assertions
self.assertEqual(m_join.call_args_list[-1],
mock.call('dst/path', 'package_name.son'))
def test_package_gds(self):
"""
Test the validation of the project general description section
"""
# First, create a workspace to give to Packager
workspace = Workspace("ws/root", ws_name="ws_test", log_level='debug')
# Create project
project = Project(workspace, 'prj/path')
# Instantiate a Packager instance
packager = Packager(workspace=workspace,
project=project,
generate_pd=False,
dst_path="dst/path")
packager._package_descriptor = True
# Create fake project configuration
prj_config = {
'version': '0.5',
'package': {
'version': '0.1',
'name': 'sonata-project-sample',
'vendor': 'com.sonata.project',
'maintainer': 'Name, Company, Contact',
'description': 'Project description',
},
'descriptor_extension': 'yml'
}
# Remove package keys, one by one...
for key in prj_config['package']:
value = prj_config['package'].pop(key)
self.assertIsNone(packager.package_gds(prj_config))
prj_config['package'][key] = value
# Make prj_config complete...
prj_config['name'] = 'sonata - project - sample'
self.assertTrue(packager.package_gds(prj_config))
|
SteveDiamond/cvxpy | refs/heads/master | examples/expr_trees/inpainting.py | 2 | """
Copyright 2013 Steven Diamond
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from scipy import misc
import matplotlib.pyplot as plt
import numpy as np
l = misc.ascent()
l = l.astype(np.float64, copy=False)
l = l/np.max(l) #rescale pixels into [0,1]
plt.imshow(l, cmap=plt.gray)
#plt.show()
from PIL import Image, ImageDraw
num_lines = 5
width = 5
imshape = l.shape
def drawRandLine(draw,width):
x = [np.random.randint(0,im.size[0]) for i in range(2)]
y = [np.random.randint(0,im.size[1]) for i in range(2)]
xy = zip(x,y)
#fill gives the color
draw.line(xy,fill=255,width=width)
im = Image.new("L",imshape)
draw = ImageDraw.Draw(im)
for i in range(num_lines):
drawRandLine(draw,width)
del draw
# im.show()
err = np.asarray(im,dtype=np.bool)
r = l.copy()
r[err] = 1.0
plt.imshow(r, cmap=plt.gray)
idx2pair = np.nonzero(err)
idx2pair = zip(idx2pair[0].tolist(), idx2pair[1].tolist())
pair2idx = dict(zip(idx2pair, range(len(idx2pair))))
idx2pair = np.array(idx2pair) #convert back to numpy array
import scipy.sparse as sp
from cvxopt import spmatrix
def involvedpairs(pairs):
''' Get all the pixel pairs whose gradient involves an unknown pixel.
Input should be a set or dictionary of pixel pair tuples
'''
for pair in pairs: #loop through unknown pixels
yield pair
left = (pair[0],pair[1]-1)
if left[1] >= 0 and left not in pairs: #if 'left' in picture, and not already unknown
yield left
top = (pair[0]-1,pair[1])
topright = (pair[0]-1,pair[1]+1)
#if not on top boundary, top is fixed, and top not already touched by upper right pixel
if pair[0] > 0 and top not in pairs and topright not in pairs:
yield top
def formCOO(pair2idx, img):
m, n = img.shape
Is, Js, Vs, bs = [[],[]], [[],[]], [[],[]], [[],[]]
row = 0
for pixel1 in involvedpairs(pair2idx):
bottom = (pixel1[0]+1,pixel1[1])
right= (pixel1[0],pixel1[1]+1)
for i, pixel2 in enumerate([bottom, right]):
if pixel2[0] >= m or pixel2[1] >= n:
bs[i].append(0)
continue
b = 0
for j, pix in enumerate([pixel2, pixel1]):
if pix in pair2idx: #unknown pixel
Is[i].append(row)
Js[i].append(pair2idx[pix])
Vs[i].append(pow(-1,j))
else: #known pixel
b += pow(-1,j)*img[pix]
bs[i].append(b)
row += 1
'''
Form Gx and Gy such that the x-component of the gradient is Gx*x + bx,
where x is an array representing the unknown pixel values.
'''
m = len(bs[0])
n = len(pair2idx)
Gx = spmatrix(Vs[1], Is[1], Js[1],(m,n))
Gy = spmatrix(Vs[0], Is[0], Js[0],(m,n))
bx = np.array(bs[1])
by = np.array(bs[0])
return Gx, Gy, bx, by
Gx, Gy, bx, by = formCOO(pair2idx, r)
import cvxpy as cp
m, n = Gx.size
x = cp.Variable(n)
#z = cp.vstack((x.__rmul__(Gx) + bx).T, (x.__rmul__(Gy) + by).T)
#z = cp.hstack(x.__rmul__(Gx) + bx, x.__rmul__(Gy) + by)
z = cp.Variable(m, 2)
constraints = [z[:, 0] == x.__rmul__(Gx) + bx,
z[:, 1] == x.__rmul__(Gy) + by]
objective = cp.Minimize(sum([cp.norm(z[i,:]) for i in range(m)]))
p = cp.Problem(objective, constraints)
import cProfile
cProfile.run("""
result = p.solve(solver=cp.ECOS, verbose=True)
""")
|
AndreyPopovNew/asuswrt-merlin-rt-n | refs/heads/master | release/src/router/samba36/lib/testtools/testtools/tests/test_spinner.py | 20 | # Copyright (c) 2010 Jonathan M. Lange. See LICENSE for details.
"""Tests for the evil Twisted reactor-spinning we do."""
import os
import signal
from testtools import (
skipIf,
TestCase,
)
from testtools.helpers import try_import
from testtools.matchers import (
Equals,
Is,
MatchesException,
Raises,
)
_spinner = try_import('testtools._spinner')
defer = try_import('twisted.internet.defer')
Failure = try_import('twisted.python.failure.Failure')
class NeedsTwistedTestCase(TestCase):
def setUp(self):
super(NeedsTwistedTestCase, self).setUp()
if defer is None or Failure is None:
self.skipTest("Need Twisted to run")
class TestNotReentrant(NeedsTwistedTestCase):
def test_not_reentrant(self):
# A function decorated as not being re-entrant will raise a
# _spinner.ReentryError if it is called while it is running.
calls = []
@_spinner.not_reentrant
def log_something():
calls.append(None)
if len(calls) < 5:
log_something()
self.assertThat(
log_something, Raises(MatchesException(_spinner.ReentryError)))
self.assertEqual(1, len(calls))
def test_deeper_stack(self):
calls = []
@_spinner.not_reentrant
def g():
calls.append(None)
if len(calls) < 5:
f()
@_spinner.not_reentrant
def f():
calls.append(None)
if len(calls) < 5:
g()
self.assertThat(f, Raises(MatchesException(_spinner.ReentryError)))
self.assertEqual(2, len(calls))
class TestExtractResult(NeedsTwistedTestCase):
def test_not_fired(self):
# _spinner.extract_result raises _spinner.DeferredNotFired if it's
# given a Deferred that has not fired.
self.assertThat(lambda:_spinner.extract_result(defer.Deferred()),
Raises(MatchesException(_spinner.DeferredNotFired)))
def test_success(self):
# _spinner.extract_result returns the value of the Deferred if it has
# fired successfully.
marker = object()
d = defer.succeed(marker)
self.assertThat(_spinner.extract_result(d), Equals(marker))
def test_failure(self):
# _spinner.extract_result raises the failure's exception if it's given
# a Deferred that is failing.
try:
1/0
except ZeroDivisionError:
f = Failure()
d = defer.fail(f)
self.assertThat(lambda:_spinner.extract_result(d),
Raises(MatchesException(ZeroDivisionError)))
class TestTrapUnhandledErrors(NeedsTwistedTestCase):
def test_no_deferreds(self):
marker = object()
result, errors = _spinner.trap_unhandled_errors(lambda: marker)
self.assertEqual([], errors)
self.assertIs(marker, result)
def test_unhandled_error(self):
failures = []
def make_deferred_but_dont_handle():
try:
1/0
except ZeroDivisionError:
f = Failure()
failures.append(f)
defer.fail(f)
result, errors = _spinner.trap_unhandled_errors(
make_deferred_but_dont_handle)
self.assertIs(None, result)
self.assertEqual(failures, [error.failResult for error in errors])
class TestRunInReactor(NeedsTwistedTestCase):
def make_reactor(self):
from twisted.internet import reactor
return reactor
def make_spinner(self, reactor=None):
if reactor is None:
reactor = self.make_reactor()
return _spinner.Spinner(reactor)
def make_timeout(self):
return 0.01
def test_function_called(self):
# run_in_reactor actually calls the function given to it.
calls = []
marker = object()
self.make_spinner().run(self.make_timeout(), calls.append, marker)
self.assertThat(calls, Equals([marker]))
def test_return_value_returned(self):
# run_in_reactor returns the value returned by the function given to
# it.
marker = object()
result = self.make_spinner().run(self.make_timeout(), lambda: marker)
self.assertThat(result, Is(marker))
def test_exception_reraised(self):
# If the given function raises an error, run_in_reactor re-raises that
# error.
self.assertThat(
lambda:self.make_spinner().run(self.make_timeout(), lambda: 1/0),
Raises(MatchesException(ZeroDivisionError)))
def test_keyword_arguments(self):
# run_in_reactor passes keyword arguments on.
calls = []
function = lambda *a, **kw: calls.extend([a, kw])
self.make_spinner().run(self.make_timeout(), function, foo=42)
self.assertThat(calls, Equals([(), {'foo': 42}]))
def test_not_reentrant(self):
# run_in_reactor raises an error if it is called inside another call
# to run_in_reactor.
spinner = self.make_spinner()
self.assertThat(lambda: spinner.run(
self.make_timeout(), spinner.run, self.make_timeout(),
lambda: None), Raises(MatchesException(_spinner.ReentryError)))
def test_deferred_value_returned(self):
# If the given function returns a Deferred, run_in_reactor returns the
# value in the Deferred at the end of the callback chain.
marker = object()
result = self.make_spinner().run(
self.make_timeout(), lambda: defer.succeed(marker))
self.assertThat(result, Is(marker))
def test_preserve_signal_handler(self):
signals = ['SIGINT', 'SIGTERM', 'SIGCHLD']
signals = filter(
None, (getattr(signal, name, None) for name in signals))
for sig in signals:
self.addCleanup(signal.signal, sig, signal.getsignal(sig))
new_hdlrs = list(lambda *a: None for _ in signals)
for sig, hdlr in zip(signals, new_hdlrs):
signal.signal(sig, hdlr)
spinner = self.make_spinner()
spinner.run(self.make_timeout(), lambda: None)
self.assertEqual(new_hdlrs, map(signal.getsignal, signals))
def test_timeout(self):
# If the function takes too long to run, we raise a
# _spinner.TimeoutError.
timeout = self.make_timeout()
self.assertThat(
lambda:self.make_spinner().run(timeout, lambda: defer.Deferred()),
Raises(MatchesException(_spinner.TimeoutError)))
def test_no_junk_by_default(self):
# If the reactor hasn't spun yet, then there cannot be any junk.
spinner = self.make_spinner()
self.assertThat(spinner.get_junk(), Equals([]))
def test_clean_do_nothing(self):
# If there's nothing going on in the reactor, then clean does nothing
# and returns an empty list.
spinner = self.make_spinner()
result = spinner._clean()
self.assertThat(result, Equals([]))
def test_clean_delayed_call(self):
# If there's a delayed call in the reactor, then clean cancels it and
# returns an empty list.
reactor = self.make_reactor()
spinner = self.make_spinner(reactor)
call = reactor.callLater(10, lambda: None)
results = spinner._clean()
self.assertThat(results, Equals([call]))
self.assertThat(call.active(), Equals(False))
def test_clean_delayed_call_cancelled(self):
# If there's a delayed call that's just been cancelled, then it's no
# longer there.
reactor = self.make_reactor()
spinner = self.make_spinner(reactor)
call = reactor.callLater(10, lambda: None)
call.cancel()
results = spinner._clean()
self.assertThat(results, Equals([]))
def test_clean_selectables(self):
# If there's still a selectable (e.g. a listening socket), then
# clean() removes it from the reactor's registry.
#
# Note that the socket is left open. This emulates a bug in trial.
from twisted.internet.protocol import ServerFactory
reactor = self.make_reactor()
spinner = self.make_spinner(reactor)
port = reactor.listenTCP(0, ServerFactory())
spinner.run(self.make_timeout(), lambda: None)
results = spinner.get_junk()
self.assertThat(results, Equals([port]))
def test_clean_running_threads(self):
import threading
import time
current_threads = list(threading.enumerate())
reactor = self.make_reactor()
timeout = self.make_timeout()
spinner = self.make_spinner(reactor)
spinner.run(timeout, reactor.callInThread, time.sleep, timeout / 2.0)
# Python before 2.5 has a race condition with thread handling where
# join() does not remove threads from enumerate before returning - the
# thread being joined does the removal. This was fixed in Python 2.5
# but we still support 2.4, so we have to workaround the issue.
# http://bugs.python.org/issue1703448.
self.assertThat(
[thread for thread in threading.enumerate() if thread.isAlive()],
Equals(current_threads))
def test_leftover_junk_available(self):
# If 'run' is given a function that leaves the reactor dirty in some
# way, 'run' will clean up the reactor and then store information
# about the junk. This information can be got using get_junk.
from twisted.internet.protocol import ServerFactory
reactor = self.make_reactor()
spinner = self.make_spinner(reactor)
port = spinner.run(
self.make_timeout(), reactor.listenTCP, 0, ServerFactory())
self.assertThat(spinner.get_junk(), Equals([port]))
def test_will_not_run_with_previous_junk(self):
# If 'run' is called and there's still junk in the spinner's junk
# list, then the spinner will refuse to run.
from twisted.internet.protocol import ServerFactory
reactor = self.make_reactor()
spinner = self.make_spinner(reactor)
timeout = self.make_timeout()
spinner.run(timeout, reactor.listenTCP, 0, ServerFactory())
self.assertThat(lambda: spinner.run(timeout, lambda: None),
Raises(MatchesException(_spinner.StaleJunkError)))
def test_clear_junk_clears_previous_junk(self):
# If 'run' is called and there's still junk in the spinner's junk
# list, then the spinner will refuse to run.
from twisted.internet.protocol import ServerFactory
reactor = self.make_reactor()
spinner = self.make_spinner(reactor)
timeout = self.make_timeout()
port = spinner.run(timeout, reactor.listenTCP, 0, ServerFactory())
junk = spinner.clear_junk()
self.assertThat(junk, Equals([port]))
self.assertThat(spinner.get_junk(), Equals([]))
@skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only")
def test_sigint_raises_no_result_error(self):
# If we get a SIGINT during a run, we raise _spinner.NoResultError.
SIGINT = getattr(signal, 'SIGINT', None)
if not SIGINT:
self.skipTest("SIGINT not available")
reactor = self.make_reactor()
spinner = self.make_spinner(reactor)
timeout = self.make_timeout()
reactor.callLater(timeout, os.kill, os.getpid(), SIGINT)
self.assertThat(lambda:spinner.run(timeout * 5, defer.Deferred),
Raises(MatchesException(_spinner.NoResultError)))
self.assertEqual([], spinner._clean())
@skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only")
def test_sigint_raises_no_result_error_second_time(self):
# If we get a SIGINT during a run, we raise _spinner.NoResultError.
# This test is exactly the same as test_sigint_raises_no_result_error,
# and exists to make sure we haven't futzed with state.
self.test_sigint_raises_no_result_error()
@skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only")
def test_fast_sigint_raises_no_result_error(self):
# If we get a SIGINT during a run, we raise _spinner.NoResultError.
SIGINT = getattr(signal, 'SIGINT', None)
if not SIGINT:
self.skipTest("SIGINT not available")
reactor = self.make_reactor()
spinner = self.make_spinner(reactor)
timeout = self.make_timeout()
reactor.callWhenRunning(os.kill, os.getpid(), SIGINT)
self.assertThat(lambda:spinner.run(timeout * 5, defer.Deferred),
Raises(MatchesException(_spinner.NoResultError)))
self.assertEqual([], spinner._clean())
@skipIf(os.name != "posix", "Sending SIGINT with os.kill is posix only")
def test_fast_sigint_raises_no_result_error_second_time(self):
self.test_fast_sigint_raises_no_result_error()
def test_suite():
from unittest import TestLoader
return TestLoader().loadTestsFromName(__name__)
|
lextoumbourou/plugin.video.thenewboston | refs/heads/master | resources/lib/tnb.py | 1 | import requests
from BeautifulSoup import BeautifulSoup
from urlparse import urlparse
BASE_URL = 'https://www.thenewboston.com/videos.php'
def get_lesson_id(url):
return int(url.split('=')[-1])
def get_categories():
"""Scrape categories from homepage."""
page = requests.get(BASE_URL, verify=False)
soup = BeautifulSoup(page.content)
output = [{'title': u'Most Popular Courses'}]
for c in (
soup.find(id='content-wrapper').findAll('div', 'video-category-panel')
):
output.append({'title': c.find('h2', 'panel-title').text})
return output
def get_topics(category):
"""Scrape topics from homepage."""
page = requests.get(BASE_URL, verify=False)
soup = BeautifulSoup(page.content)
content = soup.find(id='content-wrapper')
output = []
if category == u'Most Popular Courses':
courses = content.find('table', 'videos-top-courses')
for item in courses.findAll('tr'):
link = courses.findAll('tr')[0].a['href']
thumbnail = item.find('img').get('src')
title = item.h4.text.replace(' ', '').strip()
output.append({
'thumbnail': thumbnail,
'title': title,
'lesson_id': get_lesson_id(link)})
for panel in content.findAll('div', 'video-category-panel'):
found_category = panel.find('h2', 'panel-title').text
if found_category == category:
for item in panel.find('div', 'list-group').findAll('a'):
output.append({
'title': item.text,
'lesson_id': get_lesson_id(item['href'])})
break
return output
def get_lessons(lesson_id):
"""Retrieve lessons from the lesson pages."""
url = '{0}?cat={1}'.format(BASE_URL, lesson_id)
page = requests.get(url, verify=False)
soup = BeautifulSoup(page.content)
output = []
for item in soup.find(id='main-menu').find('ul', 'navigation').findAll('li'):
video_id = item.a['href'].split('=')[-1]
title = item.a.text
output.append({
'title': title, 'lesson_id': lesson_id,
'video_id': video_id})
return output
def get_video(lesson_id, video_id):
"""Retrieve a Youtube id from a video page."""
url = '{0}?cat={1}&video={2}'.format(BASE_URL, lesson_id, video_id)
page = requests.get(url, verify=False)
soup = BeautifulSoup(page.content)
return urlparse(soup.find('iframe')['src']).path.split('/')[-1]
|
ucsd-ccbb/Oncolist | refs/heads/master | src/server/Pubmed/AuthorPrinter.py | 1 | __author__ = 'guorongxu'
import sys
import os
import json
#To build the JSON file for author and gene.
def output(output_file, author_list):
# Open a file
filewriter = open(output_file, "a")
for author in author_list:
filewriter.write(author + "\tAuthor\n")
filewriter.close()
## To
def print_author_list(workspace):
pubmed_files = workspace + "/pubmed_files"
output_file = workspace + "/json_files/authorNameTable.txt"
if not os.path.exists(os.path.dirname(output_file)):
os.makedirs(os.path.dirname(output_file))
author_list = {}
for file in os.listdir(pubmed_files):
if file.endswith(".json"):
myfile = pubmed_files + "/" + file
#Extract the gene list and publications for each author
with open(myfile) as json_file:
try:
json_data = json.load(json_file)
geneName = json_data.get("node_name")
node_list = json_data.get("node_list")
nodes = node_list.get("node")
for node in nodes:
author = node.get("name")
if author in author_list:
publications = node.get("publications")
geneList = author_list.get(author)
geneList.update({geneName: publications})
else:
publications = node.get("publications")
geneList = {}
geneList.update({geneName: publications})
author_list.update({author: geneList})
except ValueError, e:
print myfile + "is empty!"
output(output_file, author_list)
if __name__ == "__main__":
workspace = sys.argv[1]
print "Printing pubmed author json files..."
print_author_list(workspace) |
MoritzS/django | refs/heads/master | django/contrib/auth/migrations/0006_require_contenttypes_0002.py | 134 | from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('auth', '0005_alter_user_last_login_null'),
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
# Ensure the contenttypes migration is applied before sending
# post_migrate signals (which create ContentTypes).
]
|
hfp/tensorflow-xsmm | refs/heads/master | tensorflow/contrib/solvers/python/__init__.py | 959 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ops module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
|
ric2b/Vivaldi-browser | refs/heads/master | chromium/third_party/blink/web_tests/external/wpt/mimesniff/mime-types/resources/generated-mime-types.py | 47 | import json
def isHTTPTokenCodePoint(cp):
if cp in (0x21, 0x23, 0x24, 0x25, 0x26, 0x27, 0x2A, 0x2B, 0x2D, 0x2E, 0x5E, 0x5F, 0x60, 0x7C, 0x7E) or (cp >= 0x30 and cp <= 0x39) or (cp >= 0x41 and cp <= 0x5A) or (cp >= 0x61 and cp <= 0x7A):
return True
else:
return False
def isHTTPQuotedStringTokenCodePoint(cp):
if cp == 0x09 or (cp >= 0x20 and cp <= 0x7E) or (cp >= 0x80 and cp <= 0xFF):
return True
else:
return False
tests = []
for cp in range(0x00, 0x100):
if isHTTPTokenCodePoint(cp):
continue
for scenario in ("type", "subtype", "name", "value"):
if scenario == "type" or scenario == "subtype":
if cp == 0x2F: # /
continue
if scenario == "type":
test = unichr(cp) + "/x"
else:
test = "x/" + unichr(cp)
tests.append({"input": test, "output": None})
elif scenario == "name":
if cp == 0x3B or cp == 0x3D: # ; =
continue
tests.append({"input": "x/x;" + unichr(cp) + "=x;bonus=x", "output": "x/x;bonus=x"})
elif scenario == "value":
if cp == 0x09 or cp == 0x20 or cp == 0x22 or cp == 0x3B or cp == 0x5C: # TAB SP " ; \
continue
if isHTTPQuotedStringTokenCodePoint(cp):
testOutput = "x/x;x=\"" + unichr(cp) + "\";bonus=x"
else:
testOutput = "x/x;bonus=x"
tests.append({"input": "x/x;x=" + unichr(cp) + ";bonus=x", "output": testOutput})
tests.append({"input": "x/x;x=\"" + unichr(cp) + "\";bonus=x", "output": testOutput})
handle = open("generated-mime-types.json", "w")
handle.write(json.dumps(tests, indent=2, separators=(',', ': ')))
handle.write("\n")
|
ozamiatin/glance | refs/heads/master | glance/api/v3/router.py | 9 | # Copyright (c) 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from glance.api.v3 import artifacts
from glance.common import wsgi
UUID_REGEX = (
R'[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}')
class API(wsgi.Router):
def _get_artifacts_resource(self):
if not self.artifacts_resource:
self.artifacts_resource = artifacts.create_resource()
return self.artifacts_resource
def __init__(self, mapper):
self.artifacts_resource = None
artifacts_resource = self._get_artifacts_resource()
reject_method_resource = wsgi.Resource(wsgi.RejectMethodController())
def _check_json_content_type(environ, result):
return "application/json" in environ["CONTENT_TYPE"]
def _check_octet_stream_content_type(environ, result):
return "application/octet-stream" in environ["CONTENT_TYPE"]
def connect_routes(m, read_only):
with m.submapper(resource_name="artifact_operations",
path_prefix="/{id}",
requirements={'id': UUID_REGEX}) as art:
art.show()
if not read_only:
art.delete()
art.action('update', method='PATCH')
art.link('publish', method='POST')
def connect_attr_action(attr):
if not read_only:
attr.action("upload", conditions={
'method': ["POST", "PUT"],
'function': _check_octet_stream_content_type})
attr.action("update_property",
conditions={
'method': ["POST", "PUT"],
'function': _check_json_content_type})
attr.link("download", method="GET")
attr_map = art.submapper(resource_name="attr_operations",
path_prefix="/{attr}", path_left=None)
attr_items = art.submapper(
resource_name="attr_item_ops",
path_prefix="/{attr}/{path_left:.*}")
connect_attr_action(attr_map)
connect_attr_action(attr_items)
m.connect("", action='list', conditions={'method': 'GET'},
state='active')
m.connect("/drafts", action='list', conditions={'method': 'GET'},
state='creating')
if not read_only:
m.connect("/drafts", action='create',
conditions={'method': 'POST'})
mapper.connect('/artifacts',
controller=artifacts_resource,
action='list_artifact_types',
conditions={'method': ['GET']})
versioned = mapper.submapper(path_prefix='/artifacts/{type_name}/'
'v{type_version}',
controller=artifacts_resource)
non_versioned = mapper.submapper(path_prefix='/artifacts/{type_name}',
type_version=None,
controller=artifacts_resource)
connect_routes(versioned, False)
connect_routes(non_versioned, True)
mapper.connect('/artifacts',
controller=reject_method_resource,
action='reject',
allowed_methods='GET',
conditions={'method': ['POST', 'PUT', 'DELETE',
'PATCH', 'HEAD']})
super(API, self).__init__(mapper)
|
karan1276/servo | refs/heads/master | tests/wpt/css-tests/tools/serve/__init__.py | 458 | import serve
|
praekelt/molo | refs/heads/develop | molo/core/apps.py | 1 | from django.apps import AppConfig
from django.db.utils import OperationalError, ProgrammingError
from django.conf import settings
from django.utils.timezone import activate
import logging
class MoloAppConfig(AppConfig):
name = 'molo.core'
def ready(self):
from molo.core.models import Site, CmsSettings
logging.basicConfig()
logger = logging.getLogger(__name__)
try:
site = Site.objects.first()
timezone = CmsSettings.for_site(site).timezone
if timezone is None:
timezone_name = settings.TIME_ZONE
logger.warning(
'Timezone unset, defaulting to {0}'.format(timezone_name))
else:
timezone_name = timezone.title
except (OperationalError, ProgrammingError) as e:
timezone_name = settings.TIME_ZONE
logger.warning('Database error: {0}'.format(e))
logger.warning('Defaulting to timezone: {0}'.format(timezone_name))
activate(timezone_name)
|
cypsun/FreeCAD | refs/heads/master | src/Mod/Start/InitGui.py | 19 | # Start gui init module
# (c) 2003 Juergen Riegel
#
# Gathering all the information to start FreeCAD
# This is the second one of three init scripts, the third one
# runs when the gui is up
#***************************************************************************
#* (c) Juergen Riegel (juergen.riegel@web.de) 2002 *
#* *
#* This file is part of the FreeCAD CAx development system. *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* FreeCAD is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Lesser General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with FreeCAD; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#* Juergen Riegel 2002 *
#***************************************************************************/
class StartWorkbench ( Workbench ):
"Start workbench object"
Icon = """
/* XPM */
static char * start_xpm[] = {
"16 16 9 1",
" c None",
". c #000200",
"+ c #081938",
"@ c #113155",
"# c #1133A0",
"$ c #1D56DC",
"% c #2161A8",
"& c #2670EB",
"* c #2C86F2",
" ",
" . ",
" #+ ",
" .#$+ ",
" .#$&+ ",
" ####$$$&&&+ ",
" .#$$$$$&&&&*@ ",
" .#$$$$&&&&***@ ",
" .#$$$&&&&***% ",
" .#$$&&&&***% ",
" .++++++%**% ",
" .%*@ ",
" .%@ ",
" .. ",
" ",
" "};
"""
MenuText = "Start"
ToolTip = "Start workbench"
def Initialize(self):
# load the module
import StartGui
import Start
def GetClassName(self):
return "StartGui::Workbench"
Gui.addWorkbench(StartWorkbench())
|
geekboxzone/lollipop_external_chromium_org | refs/heads/geekbox | chrome/browser/resources/chromeos/braille_ime/PRESUBMIT.py | 100 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for the Braille IME."""
def CheckChangeOnUpload(input_api, output_api):
def FileFilter(path):
return path.endswith('.js') or path.endswith('check_braille_ime.py')
if not any((FileFilter(p) for p in input_api.LocalPaths())):
return []
import sys
if not sys.platform.startswith('linux'):
return []
sys.path.insert(0, input_api.PresubmitLocalPath())
try:
from check_braille_ime import CheckBrailleIme
finally:
sys.path.pop(0)
success, output = CheckBrailleIme()
if not success:
return [output_api.PresubmitError(
'Braille IME closure compilation failed',
long_text=output)]
return []
|
deepsrijit1105/edx-platform | refs/heads/master | common/test/acceptance/tests/lms/test_lms_matlab_problem.py | 4 | # -*- coding: utf-8 -*-
"""
Test for matlab problems
"""
import time
from common.test.acceptance.pages.lms.matlab_problem import MatlabProblemPage
from common.test.acceptance.fixtures.course import XBlockFixtureDesc
from common.test.acceptance.fixtures.xqueue import XQueueResponseFixture
from common.test.acceptance.tests.lms.test_lms_problems import ProblemsTest
from textwrap import dedent
class MatlabProblemTest(ProblemsTest):
"""
Tests that verify matlab problem "Run Code".
"""
def get_problem(self):
"""
Create a matlab problem for the test.
"""
problem_data = dedent("""
<problem markdown="null">
<text>
<p>
Write MATLAB code to create the following row vector and store it in a variable named <code>V</code>.
</p>
<table id="a0000000466" class="equation" width="100%" cellspacing="0" cellpadding="7" style="table-layout:auto">
<tr>
<td class="equation">[1 1 2 3 5 8 13]</td>
</tr>
</table>
<p>
<coderesponse queuename="matlab">
<matlabinput rows="10" cols="40" mode="" tabsize="4">
<plot_payload>
</plot_payload>
</matlabinput>
<codeparam>
<initial_display/>
<answer_display>
</answer_display>
<grader_payload>
</grader_payload>
</codeparam>
</coderesponse>
</p>
</text>
</problem>
""")
return XBlockFixtureDesc('problem', 'Test Matlab Problem', data=problem_data)
def _goto_matlab_problem_page(self):
"""
Open matlab problem page with assertion.
"""
self.courseware_page.visit()
matlab_problem_page = MatlabProblemPage(self.browser)
self.assertEqual(matlab_problem_page.problem_name, 'Test Matlab Problem')
return matlab_problem_page
def test_run_code(self):
"""
Test "Run Code" button functionality.
"""
# Enter a submission, which will trigger a pre-defined response from the XQueue stub.
self.submission = "a=1" + self.unique_id[0:5]
self.xqueue_grade_response = {'msg': self.submission}
matlab_problem_page = self._goto_matlab_problem_page()
# Configure the XQueue stub's response for the text we will submit
if self.xqueue_grade_response is not None:
XQueueResponseFixture(self.submission, self.xqueue_grade_response).install()
matlab_problem_page.set_response(self.submission)
matlab_problem_page.click_run_code()
self.assertEqual(
u'Submitted. As soon as a response is returned, this message will be replaced by that feedback.',
matlab_problem_page.get_grader_msg(".external-grader-message")[0]
)
# Wait 5 seconds for xqueue stub server grader response sent back to lms.
time.sleep(5)
self.assertEqual(u'', matlab_problem_page.get_grader_msg(".external-grader-message")[0])
self.assertEqual(
self.xqueue_grade_response.get("msg"),
matlab_problem_page.get_grader_msg(".ungraded-matlab-result")[0]
)
|
a358003542/python-guide-book | refs/heads/master | codes/ch12/select_get_poetry.py | 1 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import datetime
import argparse
import socket
import selectors
sel = selectors.DefaultSelector()
def parse_args():
usage = """usage: %prog [options] [hostname]:port ...
python3 select_get_poetry3.py port1 port2 port3 ...
通过select I/O复用来建立一个异步诗歌下载客户端,可以同时面向多个诗歌服务器来进行下载。
"""
parser = argparse.ArgumentParser(usage)
parser.add_argument('port', nargs='+')
args = vars(parser.parse_args())
addresses = args['port']
if not addresses:
print(parser.format_help())
parser.exit()
def parse_address(addr):
if ':' not in addr:
host = '127.0.0.1'
port = addr
else:
host, port = addr.split(':', 1)
if not port.isdigit():
parser.error('Ports must be integers.')
return host, int(port)
return map(parse_address, addresses)
def download_poetry(sock, infile):
"""Download a piece of poetry from the given address."""
bstring = sock.recv(1024)
if not bstring: # end fo reading
sel.unregister(sock)
infile.close()
print('end of reading')
return True
else:
print('writing to {}'.format(infile.name))
infile.write(bstring)
def connect(address):
"""Connect to the given server and return a non-blocking socket."""
sock = socket.socket()
sock.connect(address)
sock.setblocking(False)
return sock
def format_address(address):
host, port = address
return '%s:%s' % (host or '127.0.0.1', port)
def main():
addresses = parse_args()
elapsed = datetime.timedelta()
sockets = map(connect, addresses)
for sock in sockets:
filename = str(sock.getpeername()[1]) + '.txt'
infile = open(filename, 'wb')
sel.register(sock, selectors.EVENT_READ,
data={'callback': download_poetry,
'args': [infile]})
while True:
events = sel.select()
for key, mask in events:
callback = key.data['callback']
callback(key.fileobj, *key.data['args'])
if __name__ == '__main__':
main()
|
zerobatu/edx-platform | refs/heads/master | lms/djangoapps/open_ended_grading/open_ended_notifications.py | 66 | import datetime
import json
import logging
from django.conf import settings
from xmodule.open_ended_grading_classes import peer_grading_service
from xmodule.open_ended_grading_classes.controller_query_service import ControllerQueryService
from courseware.access import has_access
from edxmako.shortcuts import render_to_string
from student.models import unique_id_for_user
from util.cache import cache
from .staff_grading_service import StaffGradingService
log = logging.getLogger(__name__)
NOTIFICATION_CACHE_TIME = 300
KEY_PREFIX = "open_ended_"
NOTIFICATION_TYPES = (
('student_needs_to_peer_grade', 'peer_grading', 'Peer Grading'),
('staff_needs_to_grade', 'staff_grading', 'Staff Grading'),
('new_student_grading_to_view', 'open_ended_problems', 'Problems you have submitted'),
('flagged_submissions_exist', 'open_ended_flagged_problems', 'Flagged Submissions')
)
def staff_grading_notifications(course, user):
staff_gs = StaffGradingService(settings.OPEN_ENDED_GRADING_INTERFACE)
pending_grading = False
img_path = ""
course_id = course.id
student_id = unique_id_for_user(user)
notification_type = "staff"
success, notification_dict = get_value_from_cache(student_id, course_id, notification_type)
if success:
return notification_dict
try:
notifications = json.loads(staff_gs.get_notifications(course_id))
if notifications['success']:
if notifications['staff_needs_to_grade']:
pending_grading = True
except:
#Non catastrophic error, so no real action
notifications = {}
#This is a dev_facing_error
log.info(
"Problem with getting notifications from staff grading service for course {0} user {1}.".format(course_id,
student_id))
if pending_grading:
img_path = "/static/images/grading_notification.png"
notification_dict = {'pending_grading': pending_grading, 'img_path': img_path, 'response': notifications}
set_value_in_cache(student_id, course_id, notification_type, notification_dict)
return notification_dict
def peer_grading_notifications(course, user):
peer_gs = peer_grading_service.PeerGradingService(settings.OPEN_ENDED_GRADING_INTERFACE, render_to_string)
pending_grading = False
img_path = ""
course_id = course.id
student_id = unique_id_for_user(user)
notification_type = "peer"
success, notification_dict = get_value_from_cache(student_id, course_id, notification_type)
if success:
return notification_dict
try:
notifications = json.loads(peer_gs.get_notifications(course_id, student_id))
if notifications['success']:
if notifications['student_needs_to_peer_grade']:
pending_grading = True
except:
#Non catastrophic error, so no real action
notifications = {}
#This is a dev_facing_error
log.info(
"Problem with getting notifications from peer grading service for course {0} user {1}.".format(course_id,
student_id))
if pending_grading:
img_path = "/static/images/grading_notification.png"
notification_dict = {'pending_grading': pending_grading, 'img_path': img_path, 'response': notifications}
set_value_in_cache(student_id, course_id, notification_type, notification_dict)
return notification_dict
def combined_notifications(course, user):
"""
Show notifications to a given user for a given course. Get notifications from the cache if possible,
or from the grading controller server if not.
@param course: The course object for which we are getting notifications
@param user: The user object for which we are getting notifications
@return: A dictionary with boolean pending_grading (true if there is pending grading), img_path (for notification
image), and response (actual response from grading controller server).
"""
#Set up return values so that we can return them for error cases
pending_grading = False
img_path = ""
notifications = {}
notification_dict = {'pending_grading': pending_grading, 'img_path': img_path, 'response': notifications}
#We don't want to show anonymous users anything.
if not user.is_authenticated():
return notification_dict
#Initialize controller query service using our mock system
controller_qs = ControllerQueryService(settings.OPEN_ENDED_GRADING_INTERFACE, render_to_string)
student_id = unique_id_for_user(user)
user_is_staff = bool(has_access(user, 'staff', course))
course_id = course.id
notification_type = "combined"
#See if we have a stored value in the cache
success, notification_dict = get_value_from_cache(student_id, course_id, notification_type)
if success:
return notification_dict
#Get the time of the last login of the user
last_login = user.last_login
last_time_viewed = last_login - datetime.timedelta(seconds=(NOTIFICATION_CACHE_TIME + 60))
try:
#Get the notifications from the grading controller
notifications = controller_qs.check_combined_notifications(
course.id,
student_id,
user_is_staff,
last_time_viewed,
)
if notifications.get('success'):
if (notifications.get('staff_needs_to_grade') or
notifications.get('student_needs_to_peer_grade')):
pending_grading = True
except:
#Non catastrophic error, so no real action
#This is a dev_facing_error
log.exception(
u"Problem with getting notifications from controller query service for course {0} user {1}.".format(
course_id, student_id))
if pending_grading:
img_path = "/static/images/grading_notification.png"
notification_dict = {'pending_grading': pending_grading, 'img_path': img_path, 'response': notifications}
#Store the notifications in the cache
set_value_in_cache(student_id, course_id, notification_type, notification_dict)
return notification_dict
def get_value_from_cache(student_id, course_id, notification_type):
key_name = create_key_name(student_id, course_id, notification_type)
success, value = _get_value_from_cache(key_name)
return success, value
def set_value_in_cache(student_id, course_id, notification_type, value):
key_name = create_key_name(student_id, course_id, notification_type)
_set_value_in_cache(key_name, value)
def create_key_name(student_id, course_id, notification_type):
key_name = u"{prefix}{type}_{course}_{student}".format(
prefix=KEY_PREFIX,
type=notification_type,
course=course_id,
student=student_id,
)
return key_name
def _get_value_from_cache(key_name):
value = cache.get(key_name)
success = False
if value is None:
return success, value
try:
value = json.loads(value)
success = True
except:
pass
return success, value
def _set_value_in_cache(key_name, value):
cache.set(key_name, json.dumps(value), NOTIFICATION_CACHE_TIME)
|
gymnasium/edx-platform | refs/heads/open-release/hawthorn.master | lms/djangoapps/grades/models.py | 2 | """
Models used for robust grading.
Robust grading allows student scores to be saved per-subsection independent
of any changes that may occur to the course after the score is achieved.
We also persist students' course-level grades, and update them whenever
a student's score or the course grading policy changes. As they are
persisted, course grades are also immune to changes in course content.
"""
import json
import logging
from base64 import b64encode
from collections import namedtuple
from hashlib import sha1
from django.db import models
from django.utils.timezone import now
from lazy import lazy
from model_utils.models import TimeStampedModel
from opaque_keys.edx.django.models import CourseKeyField, UsageKeyField
from opaque_keys.edx.keys import CourseKey, UsageKey
from coursewarehistoryextended.fields import UnsignedBigIntAutoField, UnsignedBigIntOneToOneField
from openedx.core.djangoapps.request_cache import get_cache
import events
log = logging.getLogger(__name__)
BLOCK_RECORD_LIST_VERSION = 1
# Used to serialize information about a block at the time it was used in
# grade calculation.
BlockRecord = namedtuple('BlockRecord', ['locator', 'weight', 'raw_possible', 'graded'])
class BlockRecordList(tuple):
"""
An immutable ordered list of BlockRecord objects.
"""
def __new__(cls, blocks, course_key, version=None): # pylint: disable=unused-argument
return super(BlockRecordList, cls).__new__(cls, blocks)
def __init__(self, blocks, course_key, version=None):
super(BlockRecordList, self).__init__(blocks)
self.course_key = course_key
self.version = version or BLOCK_RECORD_LIST_VERSION
def __eq__(self, other):
assert isinstance(other, BlockRecordList)
return hash(self) == hash(other)
def __hash__(self):
"""
Returns an integer Type value of the hash of this
list of block records, as required by python.
"""
return hash(self.hash_value)
@lazy
def hash_value(self):
"""
Returns a hash value of the list of block records.
This currently hashes using sha1, and returns a base64 encoded version
of the binary digest. In the future, different algorithms could be
supported by adding a label indicated which algorithm was used, e.g.,
"sha256$j0NDRmSPa5bfid2pAcUXaxCm2Dlh3TwayItZstwyeqQ=".
"""
return b64encode(sha1(self.json_value).digest())
@lazy
def json_value(self):
"""
Return a JSON-serialized version of the list of block records, using a
stable ordering.
"""
list_of_block_dicts = [block._asdict() for block in self]
for block_dict in list_of_block_dicts:
block_dict['locator'] = unicode(block_dict['locator']) # BlockUsageLocator is not json-serializable
data = {
u'blocks': list_of_block_dicts,
u'course_key': unicode(self.course_key),
u'version': self.version,
}
return json.dumps(
data,
separators=(',', ':'), # Remove spaces from separators for more compact representation
sort_keys=True,
)
@classmethod
def from_json(cls, blockrecord_json):
"""
Return a BlockRecordList from previously serialized json.
"""
data = json.loads(blockrecord_json)
course_key = CourseKey.from_string(data['course_key'])
block_dicts = data['blocks']
record_generator = (
BlockRecord(
locator=UsageKey.from_string(block["locator"]).replace(course_key=course_key),
weight=block["weight"],
raw_possible=block["raw_possible"],
graded=block["graded"],
)
for block in block_dicts
)
return cls(record_generator, course_key, version=data['version'])
@classmethod
def from_list(cls, blocks, course_key):
"""
Return a BlockRecordList from the given list and course_key.
"""
return cls(blocks, course_key)
class VisibleBlocks(models.Model):
"""
A django model used to track the state of a set of visible blocks under a
given subsection at the time they are used for grade calculation.
This state is represented using an array of BlockRecord, stored
in the blocks_json field. A hash of this json array is used for lookup
purposes.
"""
blocks_json = models.TextField()
hashed = models.CharField(max_length=100, unique=True)
course_id = CourseKeyField(blank=False, max_length=255, db_index=True)
_CACHE_NAMESPACE = u"grades.models.VisibleBlocks"
class Meta(object):
app_label = "grades"
def __unicode__(self):
"""
String representation of this model.
"""
return u"VisibleBlocks object - hash:{}, raw json:'{}'".format(self.hashed, self.blocks_json)
@property
def blocks(self):
"""
Returns the blocks_json data stored on this model as a list of
BlockRecords in the order they were provided.
"""
return BlockRecordList.from_json(self.blocks_json)
@classmethod
def bulk_read(cls, user_id, course_key):
"""
Reads and returns all visible block records for the given user and course from
the cache. The cache is initialized with the visible blocks for this user and
course if no entry currently exists.
Arguments:
course_key: The course identifier for the desired records
"""
prefetched = get_cache(cls._CACHE_NAMESPACE).get(cls._cache_key(user_id, course_key), None)
if prefetched is None:
prefetched = cls._initialize_cache(user_id, course_key)
return prefetched
@classmethod
def cached_get_or_create(cls, user_id, blocks):
"""
Given a ``user_id`` and a ``BlockRecordList`` object, attempts to
fetch the related VisibleBlocks model from the request cache. This
will create and save a new ``VisibleBlocks`` record if no record
exists corresponding to the hash_value of ``blocks``.
"""
prefetched = get_cache(cls._CACHE_NAMESPACE).get(cls._cache_key(user_id, blocks.course_key))
if prefetched is not None:
model = prefetched.get(blocks.hash_value)
if not model:
# We still have to do a get_or_create, because
# another user may have had this block hash created,
# even if the user we checked the cache for hasn't yet.
model, _ = cls.objects.get_or_create(
hashed=blocks.hash_value, blocks_json=blocks.json_value, course_id=blocks.course_key,
)
cls._update_cache(user_id, blocks.course_key, [model])
else:
model, _ = cls.objects.get_or_create(
hashed=blocks.hash_value,
defaults={u'blocks_json': blocks.json_value, u'course_id': blocks.course_key},
)
return model
@classmethod
def bulk_create(cls, user_id, course_key, block_record_lists):
"""
Bulk creates VisibleBlocks for the given iterator of
BlockRecordList objects and updates the VisibleBlocks cache
for the block records' course with the new VisibleBlocks.
Returns the newly created visible blocks.
"""
created = cls.objects.bulk_create([
VisibleBlocks(
blocks_json=brl.json_value,
hashed=brl.hash_value,
course_id=course_key,
)
for brl in block_record_lists
])
cls._update_cache(user_id, course_key, created)
return created
@classmethod
def bulk_get_or_create(cls, user_id, course_key, block_record_lists):
"""
Bulk creates VisibleBlocks for the given iterator of
BlockRecordList objects for the given user and course_key, but
only for those that aren't already created.
"""
cached_records = cls.bulk_read(user_id, course_key)
non_existent_brls = {brl for brl in block_record_lists if brl.hash_value not in cached_records}
cls.bulk_create(user_id, course_key, non_existent_brls)
@classmethod
def _initialize_cache(cls, user_id, course_key):
"""
Prefetches visible blocks for the given user and course and stores in the cache.
Returns a dictionary mapping hashes of these block records to the
block record objects.
"""
grades_with_blocks = PersistentSubsectionGrade.objects.select_related('visible_blocks').filter(
user_id=user_id,
course_id=course_key,
)
prefetched = {grade.visible_blocks.hashed: grade.visible_blocks for grade in grades_with_blocks}
get_cache(cls._CACHE_NAMESPACE)[cls._cache_key(user_id, course_key)] = prefetched
return prefetched
@classmethod
def _update_cache(cls, user_id, course_key, visible_blocks):
"""
Adds a specific set of visible blocks to the request cache.
This assumes that prefetch has already been called.
"""
get_cache(cls._CACHE_NAMESPACE)[cls._cache_key(user_id, course_key)].update(
{visible_block.hashed: visible_block for visible_block in visible_blocks}
)
@classmethod
def _cache_key(cls, user_id, course_key):
return u"visible_blocks_cache.{}.{}".format(course_key, user_id)
class PersistentSubsectionGrade(TimeStampedModel):
"""
A django model tracking persistent grades at the subsection level.
"""
class Meta(object):
app_label = "grades"
unique_together = [
# * Specific grades can be pulled using all three columns,
# * Progress page can pull all grades for a given (course_id, user_id)
# * Course staff can see all grades for a course using (course_id,)
('course_id', 'user_id', 'usage_key'),
]
# Allows querying in the following ways:
# (modified): find all the grades updated within a certain timespan
# (modified, course_id): find all the grades updated within a timespan for a certain course
# (modified, course_id, usage_key): find all the grades updated within a timespan for a subsection
# in a course
# (first_attempted, course_id, user_id): find all attempted subsections in a course for a user
# (first_attempted, course_id): find all attempted subsections in a course for all users
index_together = [
('modified', 'course_id', 'usage_key'),
('first_attempted', 'course_id', 'user_id')
]
# primary key will need to be large for this table
id = UnsignedBigIntAutoField(primary_key=True) # pylint: disable=invalid-name
user_id = models.IntegerField(blank=False)
course_id = CourseKeyField(blank=False, max_length=255)
# note: the usage_key may not have the run filled in for
# old mongo courses. Use the full_usage_key property
# instead when you want to use/compare the usage_key.
usage_key = UsageKeyField(blank=False, max_length=255)
# Information relating to the state of content when grade was calculated
subtree_edited_timestamp = models.DateTimeField(u'Last content edit timestamp', blank=True, null=True)
course_version = models.CharField(u'Guid of latest course version', blank=True, max_length=255)
# earned/possible refers to the number of points achieved and available to achieve.
# graded refers to the subset of all problems that are marked as being graded.
earned_all = models.FloatField(blank=False)
possible_all = models.FloatField(blank=False)
earned_graded = models.FloatField(blank=False)
possible_graded = models.FloatField(blank=False)
# timestamp for the learner's first attempt at content in
# this subsection. If null, indicates no attempt
# has yet been made.
first_attempted = models.DateTimeField(null=True, blank=True)
# track which blocks were visible at the time of grade calculation
visible_blocks = models.ForeignKey(VisibleBlocks, db_column='visible_blocks_hash', to_field='hashed',
on_delete=models.CASCADE)
@property
def full_usage_key(self):
"""
Returns the "correct" usage key value with the run filled in.
"""
if self.usage_key.run is None: # pylint: disable=no-member
return self.usage_key.replace(course_key=self.course_id)
else:
return self.usage_key
def __unicode__(self):
"""
Returns a string representation of this model.
"""
return (
u"{} user: {}, course version: {}, subsection: {} ({}). {}/{} graded, {}/{} all, first_attempted: {}"
).format(
type(self).__name__,
self.user_id,
self.course_version,
self.usage_key,
self.visible_blocks_id,
self.earned_graded,
self.possible_graded,
self.earned_all,
self.possible_all,
self.first_attempted,
)
@classmethod
def read_grade(cls, user_id, usage_key):
"""
Reads a grade from database
Arguments:
user_id: The user associated with the desired grade
usage_key: The location of the subsection associated with the desired grade
Raises PersistentSubsectionGrade.DoesNotExist if applicable
"""
return cls.objects.select_related('visible_blocks', 'override').get(
user_id=user_id,
course_id=usage_key.course_key, # course_id is included to take advantage of db indexes
usage_key=usage_key,
)
@classmethod
def bulk_read_grades(cls, user_id, course_key):
"""
Reads all grades for the given user and course.
Arguments:
user_id: The user associated with the desired grades
course_key: The course identifier for the desired grades
"""
return cls.objects.select_related('visible_blocks', 'override').filter(
user_id=user_id,
course_id=course_key,
)
@classmethod
def update_or_create_grade(cls, **params):
"""
Wrapper for objects.update_or_create.
"""
cls._prepare_params(params)
VisibleBlocks.cached_get_or_create(params['user_id'], params['visible_blocks'])
cls._prepare_params_visible_blocks_id(params)
cls._prepare_params_override(params)
# TODO: do we NEED to pop these?
first_attempted = params.pop('first_attempted')
user_id = params.pop('user_id')
usage_key = params.pop('usage_key')
grade, _ = cls.objects.update_or_create(
user_id=user_id,
course_id=usage_key.course_key,
usage_key=usage_key,
defaults=params,
)
if first_attempted is not None and grade.first_attempted is None:
grade.first_attempted = first_attempted
grade.save()
cls._emit_grade_calculated_event(grade)
return grade
@classmethod
def bulk_create_grades(cls, grade_params_iter, user_id, course_key):
"""
Bulk creation of grades.
"""
if not grade_params_iter:
return
PersistentSubsectionGradeOverride.prefetch(user_id, course_key)
map(cls._prepare_params, grade_params_iter)
VisibleBlocks.bulk_get_or_create(
user_id, course_key, [params['visible_blocks'] for params in grade_params_iter]
)
map(cls._prepare_params_visible_blocks_id, grade_params_iter)
map(cls._prepare_params_override, grade_params_iter)
grades = [PersistentSubsectionGrade(**params) for params in grade_params_iter]
grades = cls.objects.bulk_create(grades)
for grade in grades:
cls._emit_grade_calculated_event(grade)
return grades
@classmethod
def _prepare_params(cls, params):
"""
Prepares the fields for the grade record.
"""
if not params.get('course_id', None):
params['course_id'] = params['usage_key'].course_key
params['course_version'] = params.get('course_version', None) or ""
params['visible_blocks'] = BlockRecordList.from_list(params['visible_blocks'], params['course_id'])
@classmethod
def _prepare_params_visible_blocks_id(cls, params):
"""
Prepares the visible_blocks_id field for the grade record,
using the hash of the visible_blocks field. Specifying
the hashed field eliminates extra queries to get the
VisibleBlocks record. Use this variation of preparing
the params when you are sure of the existence of the
VisibleBlock.
"""
params['visible_blocks_id'] = params['visible_blocks'].hash_value
del params['visible_blocks']
@classmethod
def _prepare_params_override(cls, params):
override = PersistentSubsectionGradeOverride.get_override(params['user_id'], params['usage_key'])
if override:
if override.earned_all_override is not None:
params['earned_all'] = override.earned_all_override
if override.possible_all_override is not None:
params['possible_all'] = override.possible_all_override
if override.earned_graded_override is not None:
params['earned_graded'] = override.earned_graded_override
if override.possible_graded_override is not None:
params['possible_graded'] = override.possible_graded_override
@staticmethod
def _emit_grade_calculated_event(grade):
events.subsection_grade_calculated(grade)
class PersistentCourseGrade(TimeStampedModel):
"""
A django model tracking persistent course grades.
"""
class Meta(object):
app_label = "grades"
# Indices:
# (course_id, user_id) for individual grades
# (course_id) for instructors to see all course grades, implicitly created via the unique_together constraint
# (user_id) for course dashboard; explicitly declared as an index below
# (passed_timestamp, course_id) for tracking when users first earned a passing grade.
# (modified): find all the grades updated within a certain timespan
# (modified, course_id): find all the grades updated within a certain timespan for a course
unique_together = [
('course_id', 'user_id'),
]
index_together = [
('passed_timestamp', 'course_id'),
('modified', 'course_id')
]
# primary key will need to be large for this table
id = UnsignedBigIntAutoField(primary_key=True) # pylint: disable=invalid-name
user_id = models.IntegerField(blank=False, db_index=True)
course_id = CourseKeyField(blank=False, max_length=255)
# Information relating to the state of content when grade was calculated
course_edited_timestamp = models.DateTimeField(u'Last content edit timestamp', blank=True, null=True)
course_version = models.CharField(u'Course content version identifier', blank=True, max_length=255)
grading_policy_hash = models.CharField(u'Hash of grading policy', blank=False, max_length=255)
# Information about the course grade itself
percent_grade = models.FloatField(blank=False)
letter_grade = models.CharField(u'Letter grade for course', blank=False, max_length=255)
# Information related to course completion
passed_timestamp = models.DateTimeField(u'Date learner earned a passing grade', blank=True, null=True)
_CACHE_NAMESPACE = u"grades.models.PersistentCourseGrade"
def __unicode__(self):
"""
Returns a string representation of this model.
"""
return u', '.join([
u"{} user: {}".format(type(self).__name__, self.user_id),
u"course version: {}".format(self.course_version),
u"grading policy: {}".format(self.grading_policy_hash),
u"percent grade: {}%".format(self.percent_grade),
u"letter grade: {}".format(self.letter_grade),
u"passed timestamp: {}".format(self.passed_timestamp),
])
@classmethod
def prefetch(cls, course_id, users):
"""
Prefetches grades for the given users for the given course.
"""
get_cache(cls._CACHE_NAMESPACE)[cls._cache_key(course_id)] = {
grade.user_id: grade
for grade in
cls.objects.filter(user_id__in=[user.id for user in users], course_id=course_id)
}
@classmethod
def read(cls, user_id, course_id):
"""
Reads a grade from database
Arguments:
user_id: The user associated with the desired grade
course_id: The id of the course associated with the desired grade
Raises PersistentCourseGrade.DoesNotExist if applicable
"""
try:
prefetched_grades = get_cache(cls._CACHE_NAMESPACE)[cls._cache_key(course_id)]
try:
return prefetched_grades[user_id]
except KeyError:
# user's grade is not in the prefetched list, so
# assume they have no grade
raise cls.DoesNotExist
except KeyError:
# grades were not prefetched for the course, so fetch it
return cls.objects.get(user_id=user_id, course_id=course_id)
@classmethod
def update_or_create(cls, user_id, course_id, **kwargs):
"""
Creates a course grade in the database.
Returns a PersistedCourseGrade object.
"""
passed = kwargs.pop('passed')
if kwargs.get('course_version', None) is None:
kwargs['course_version'] = ""
grade, _ = cls.objects.update_or_create(
user_id=user_id,
course_id=course_id,
defaults=kwargs
)
if passed and not grade.passed_timestamp:
grade.passed_timestamp = now()
grade.save()
cls._emit_grade_calculated_event(grade)
cls._update_cache(course_id, user_id, grade)
return grade
@classmethod
def _update_cache(cls, course_id, user_id, grade):
course_cache = get_cache(cls._CACHE_NAMESPACE).get(cls._cache_key(course_id))
if course_cache is not None:
course_cache[user_id] = grade
@classmethod
def _cache_key(cls, course_id):
return u"grades_cache.{}".format(course_id)
@staticmethod
def _emit_grade_calculated_event(grade):
events.course_grade_calculated(grade)
class PersistentSubsectionGradeOverride(models.Model):
"""
A django model tracking persistent grades overrides at the subsection level.
"""
class Meta(object):
app_label = "grades"
grade = UnsignedBigIntOneToOneField(PersistentSubsectionGrade, related_name='override')
# Created/modified timestamps prevent race-conditions when using with async rescoring tasks
created = models.DateTimeField(auto_now_add=True, db_index=True)
modified = models.DateTimeField(auto_now=True, db_index=True)
# earned/possible refers to the number of points achieved and available to achieve.
# graded refers to the subset of all problems that are marked as being graded.
earned_all_override = models.FloatField(null=True, blank=True)
possible_all_override = models.FloatField(null=True, blank=True)
earned_graded_override = models.FloatField(null=True, blank=True)
possible_graded_override = models.FloatField(null=True, blank=True)
_CACHE_NAMESPACE = u"grades.models.PersistentSubsectionGradeOverride"
@classmethod
def prefetch(cls, user_id, course_key):
get_cache(cls._CACHE_NAMESPACE)[(user_id, str(course_key))] = {
override.grade.usage_key: override
for override in
cls.objects.filter(grade__user_id=user_id, grade__course_id=course_key)
}
@classmethod
def get_override(cls, user_id, usage_key):
prefetch_values = get_cache(cls._CACHE_NAMESPACE).get((user_id, str(usage_key.course_key)), None)
if prefetch_values is not None:
return prefetch_values.get(usage_key)
try:
return cls.objects.get(
grade__user_id=user_id,
grade__course_id=usage_key.course_key,
grade__usage_key=usage_key,
)
except PersistentSubsectionGradeOverride.DoesNotExist:
pass
def prefetch(user, course_key):
PersistentSubsectionGradeOverride.prefetch(user.id, course_key)
VisibleBlocks.bulk_read(user.id, course_key)
|
rolandovillca/python_basic_concepts | refs/heads/master | modules/database_module/__init__.py | 4 | # The __init__.py files are required to make Python treat the directories as containing packages;
# this is done to prevent directories with a common name, such as string,
# from unintentionally hiding valid modules that occur later on the module search path.
# In the simplest case, __init__.py can just be an empty file,
# but it can also execute initialization code for the package or set the __all__ variable, described later.
# Summary:
# Files named __init__.py are used to mark directories on disk as Python package directories.
# If you remove the __init__.py file, Python will no longer look for submodules inside that directory, so attempts to import the module will fail.
# The __init__.py file is usually empty, but can be used to export selected portions of the package under more convenient name, hold convenience functions, etc.
# Files named __init__.py are used to mark directories on disk as Python package directories. If you have the files
# mydir/spam/__init__.py
# mydir/spam/module.py
# and mydir is on your path, you can import the code in module.py as
# import spam.module
# or
# from spam import module
# If you remove the __init__.py file, Python will no longer look for submodules inside that directory, so attempts to import the module will fail.
# The __init__.py file is usually empty, but can be used to export selected portions of the package under more convenient name, hold convenience functions, etc. Given the example above, the contents of the init module can be accessed as
# import spam
|
hyowon/servo | refs/heads/master | tests/wpt/web-platform-tests/tools/html5lib/html5lib/tests/test_encoding.py | 445 | from __future__ import absolute_import, division, unicode_literals
import os
import unittest
try:
unittest.TestCase.assertEqual
except AttributeError:
unittest.TestCase.assertEqual = unittest.TestCase.assertEquals
from .support import get_data_files, TestData, test_dir, errorMessage
from html5lib import HTMLParser, inputstream
class Html5EncodingTestCase(unittest.TestCase):
def test_codec_name_a(self):
self.assertEqual(inputstream.codecName("utf-8"), "utf-8")
def test_codec_name_b(self):
self.assertEqual(inputstream.codecName("utf8"), "utf-8")
def test_codec_name_c(self):
self.assertEqual(inputstream.codecName(" utf8 "), "utf-8")
def test_codec_name_d(self):
self.assertEqual(inputstream.codecName("ISO_8859--1"), "windows-1252")
def runParserEncodingTest(data, encoding):
p = HTMLParser()
assert p.documentEncoding is None
p.parse(data, useChardet=False)
encoding = encoding.lower().decode("ascii")
assert encoding == p.documentEncoding, errorMessage(data, encoding, p.documentEncoding)
def runPreScanEncodingTest(data, encoding):
stream = inputstream.HTMLBinaryInputStream(data, chardet=False)
encoding = encoding.lower().decode("ascii")
# Very crude way to ignore irrelevant tests
if len(data) > stream.numBytesMeta:
return
assert encoding == stream.charEncoding[0], errorMessage(data, encoding, stream.charEncoding[0])
def test_encoding():
for filename in get_data_files("encoding"):
tests = TestData(filename, b"data", encoding=None)
for idx, test in enumerate(tests):
yield (runParserEncodingTest, test[b'data'], test[b'encoding'])
yield (runPreScanEncodingTest, test[b'data'], test[b'encoding'])
try:
try:
import charade # flake8: noqa
except ImportError:
import chardet # flake8: noqa
except ImportError:
print("charade/chardet not found, skipping chardet tests")
else:
def test_chardet():
with open(os.path.join(test_dir, "encoding" , "chardet", "test_big5.txt"), "rb") as fp:
encoding = inputstream.HTMLInputStream(fp.read()).charEncoding
assert encoding[0].lower() == "big5"
|
tornadomeet/mxnet | refs/heads/master | example/ssd/tools/rand_sampler.py | 55 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import math
class RandSampler(object):
"""
Random sampler base class, used for data augmentation
Parameters:
----------
max_trials : int
maximum trials, if exceed this number, give up anyway
max_sample : int
maximum random crop samples to be generated
"""
def __init__(self, max_trials, max_sample):
assert max_trials > 0
self.max_trials = int(max_trials)
assert max_sample >= 0
self.max_sample = int(max_sample)
def sample(self, label):
"""
Interface for calling sampling function
Parameters:
----------
label : numpy.array (n x 5 matrix)
ground-truths
Returns:
----------
list of (crop_box, label) tuples, if failed, return empty list []
"""
return NotImplementedError
class RandCropper(RandSampler):
"""
Random cropping original images with various settings
Parameters:
----------
min_scale : float
minimum crop scale, (0, 1]
max_scale : float
maximum crop scale, (0, 1], must larger than min_scale
min_aspect_ratio : float
minimum crop aspect ratio, (0, 1]
max_aspect_ratio : float
maximum crop aspect ratio, [1, inf)
min_overlap : float
hreshold of minimum overlap between a rand crop and any gt
max_trials : int
maximum trials, if exceed this number, give up anyway
max_sample : int
maximum random crop samples to be generated
"""
def __init__(self, min_scale=1., max_scale=1.,
min_aspect_ratio=1., max_aspect_ratio=1.,
min_overlap=0., max_trials=50, max_sample=1):
super(RandCropper, self).__init__(max_trials, max_sample)
assert min_scale <= max_scale, "min_scale must <= max_scale"
assert 0 < min_scale and min_scale <= 1, "min_scale must in (0, 1]"
assert 0 < max_scale and max_scale <= 1, "max_scale must in (0, 1]"
self.min_scale = min_scale
self.max_scale = max_scale
assert 0 < min_aspect_ratio and min_aspect_ratio <= 1, "min_ratio must in (0, 1]"
assert 1 <= max_aspect_ratio , "max_ratio must >= 1"
self.min_aspect_ratio = min_aspect_ratio
self.max_aspect_ratio = max_aspect_ratio
assert 0 <= min_overlap and min_overlap <= 1, "min_overlap must in [0,1]"
self.min_overlap = min_overlap
self.config = {'gt_constraint' : 'center'}
def sample(self, label):
"""
generate random cropping boxes according to parameters
if satifactory crops generated, apply to ground-truth as well
Parameters:
----------
label : numpy.array (n x 5 matrix)
ground-truths
Returns:
----------
list of (crop_box, label) tuples, if failed, return empty list []
"""
samples = []
count = 0
for trial in range(self.max_trials):
if count >= self.max_sample:
return samples
scale = np.random.uniform(self.min_scale, self.max_scale)
min_ratio = max(self.min_aspect_ratio, scale * scale)
max_ratio = min(self.max_aspect_ratio, 1. / scale / scale)
ratio = math.sqrt(np.random.uniform(min_ratio, max_ratio))
width = scale * ratio
height = scale / ratio
left = np.random.uniform(0., 1 - width)
top = np.random.uniform(0., 1 - height)
rand_box = (left, top, left + width, top + height)
valid_mask = np.where(label[:, 0] > -1)[0]
gt = label[valid_mask, :]
ious = self._check_satisfy(rand_box, gt)
if ious is not None:
# transform gt labels after crop, discard bad ones
l, t, r, b = rand_box
new_gt_boxes = []
new_width = r - l
new_height = b - t
for i in range(valid_mask.size):
if ious[i] > 0:
xmin = max(0., (gt[i, 1] - l) / new_width)
ymin = max(0., (gt[i, 2] - t) / new_height)
xmax = min(1., (gt[i, 3] - l) / new_width)
ymax = min(1., (gt[i, 4] - t) / new_height)
new_gt_boxes.append([gt[i, 0], xmin, ymin, xmax, ymax])
if not new_gt_boxes:
continue
new_gt_boxes = np.array(new_gt_boxes)
label = np.lib.pad(new_gt_boxes,
((0, label.shape[0]-new_gt_boxes.shape[0]), (0,0)), \
'constant', constant_values=(-1, -1))
samples.append((rand_box, label))
count += 1
return samples
def _check_satisfy(self, rand_box, gt_boxes):
"""
check if overlap with any gt box is larger than threshold
"""
l, t, r, b = rand_box
num_gt = gt_boxes.shape[0]
ls = np.ones(num_gt) * l
ts = np.ones(num_gt) * t
rs = np.ones(num_gt) * r
bs = np.ones(num_gt) * b
mask = np.where(ls < gt_boxes[:, 1])[0]
ls[mask] = gt_boxes[mask, 1]
mask = np.where(ts < gt_boxes[:, 2])[0]
ts[mask] = gt_boxes[mask, 2]
mask = np.where(rs > gt_boxes[:, 3])[0]
rs[mask] = gt_boxes[mask, 3]
mask = np.where(bs > gt_boxes[:, 4])[0]
bs[mask] = gt_boxes[mask, 4]
w = rs - ls
w[w < 0] = 0
h = bs - ts
h[h < 0] = 0
inter_area = h * w
union_area = np.ones(num_gt) * max(0, r - l) * max(0, b - t)
union_area += (gt_boxes[:, 3] - gt_boxes[:, 1]) * (gt_boxes[:, 4] - gt_boxes[:, 2])
union_area -= inter_area
ious = inter_area / union_area
ious[union_area <= 0] = 0
max_iou = np.amax(ious)
if max_iou < self.min_overlap:
return None
# check ground-truth constraint
if self.config['gt_constraint'] == 'center':
for i in range(ious.shape[0]):
if ious[i] > 0:
gt_x = (gt_boxes[i, 1] + gt_boxes[i, 3]) / 2.0
gt_y = (gt_boxes[i, 2] + gt_boxes[i, 4]) / 2.0
if gt_x < l or gt_x > r or gt_y < t or gt_y > b:
return None
elif self.config['gt_constraint'] == 'corner':
for i in range(ious.shape[0]):
if ious[i] > 0:
if gt_boxes[i, 1] < l or gt_boxes[i, 3] > r \
or gt_boxes[i, 2] < t or gt_boxes[i, 4] > b:
return None
return ious
class RandPadder(RandSampler):
"""
Random cropping original images with various settings
Parameters:
----------
min_scale : float
minimum crop scale, [1, inf)
max_scale : float
maximum crop scale, [1, inf), must larger than min_scale
min_aspect_ratio : float
minimum crop aspect ratio, (0, 1]
max_aspect_ratio : float
maximum crop aspect ratio, [1, inf)
min_gt_scale : float
minimum ground-truth scale to be satisfied after padding,
either width or height, [0, 1]
max_trials : int
maximum trials, if exceed this number, give up anyway
max_sample : int
maximum random crop samples to be generated
"""
def __init__(self, min_scale=1., max_scale=1., min_aspect_ratio=1., \
max_aspect_ratio=1., min_gt_scale=.01, max_trials=50,
max_sample=1):
super(RandPadder, self).__init__(max_trials, max_sample)
assert min_scale <= max_scale, "min_scale must <= max_scale"
assert min_scale >= 1, "min_scale must in (0, 1]"
self.min_scale = min_scale
self.max_scale = max_scale
assert 0 < min_aspect_ratio and min_aspect_ratio <= 1, "min_ratio must in (0, 1]"
assert 1 <= max_aspect_ratio , "max_ratio must >= 1"
self.min_aspect_ratio = min_aspect_ratio
self.max_aspect_ratio = max_aspect_ratio
assert 0 <= min_gt_scale and min_gt_scale <= 1, "min_gt_scale must in [0, 1]"
self.min_gt_scale = min_gt_scale
def sample(self, label):
"""
generate random padding boxes according to parameters
if satifactory padding generated, apply to ground-truth as well
Parameters:
----------
label : numpy.array (n x 5 matrix)
ground-truths
Returns:
----------
list of (crop_box, label) tuples, if failed, return empty list []
"""
samples = []
count = 0
for trial in range(self.max_trials):
if count >= self.max_sample:
return samples
scale = np.random.uniform(self.min_scale, self.max_scale)
min_ratio = max(self.min_aspect_ratio, scale * scale)
max_ratio = min(self.max_aspect_ratio, 1. / scale / scale)
ratio = math.sqrt(np.random.uniform(min_ratio, max_ratio))
width = scale * ratio
if width < 1:
continue
height = scale / ratio
if height < 1:
continue
left = np.random.uniform(0., 1 - width)
top = np.random.uniform(0., 1 - height)
right = left + width
bot = top + height
rand_box = (left, top, right, bot)
valid_mask = np.where(label[:, 0] > -1)[0]
gt = label[valid_mask, :]
new_gt_boxes = []
for i in range(gt.shape[0]):
xmin = (gt[i, 1] - left) / width
ymin = (gt[i, 2] - top) / height
xmax = (gt[i, 3] - left) / width
ymax = (gt[i, 4] - top) / height
new_size = min(xmax - xmin, ymax - ymin)
if new_size < self.min_gt_scale:
new_gt_boxes = []
break
new_gt_boxes.append([gt[i, 0], xmin, ymin, xmax, ymax])
if not new_gt_boxes:
continue
new_gt_boxes = np.array(new_gt_boxes)
label = np.lib.pad(new_gt_boxes,
((0, label.shape[0]-new_gt_boxes.shape[0]), (0,0)), \
'constant', constant_values=(-1, -1))
samples.append((rand_box, label))
count += 1
return samples
|
obreitwi/yccp | refs/heads/master | setup.py | 1 |
from setuptools import setup
import os
import os.path as osp
versionfile = osp.join(
osp.dirname(osp.abspath(__file__)), "yccp", "version.py")
with open(versionfile) as f:
code = compile(f.read(), versionfile, 'exec')
exec(code, globals(), locals())
setup(
name="yccp",
version=".".join(map(str, __version__)),
install_requires=["PyYAML>=3.12"],
packages=["yccp", "yccp.cli", "yccp.sweeps"],
entry_points={
"console_scripts" : [
"yccp-sbn=yccp.cli.sort_by_numbers:main"
]
},
url="https://github.com/obreitwi/yccp",
license="GNUv3",
zip_safe=True,
)
|
guillecura/tttsite | refs/heads/master | node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-2.7/pygments/lexers/web.py | 197 | # -*- coding: utf-8 -*-
"""
pygments.lexers.web
~~~~~~~~~~~~~~~~~~~
Lexers for web-related languages and markup.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
import copy
from pygments.lexer import RegexLexer, ExtendedRegexLexer, bygroups, using, \
include, this
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Other, Punctuation, Literal
from pygments.util import get_bool_opt, get_list_opt, looks_like_xml, \
html_doctype_matches, unirange
from pygments.lexers.agile import RubyLexer
from pygments.lexers.compiled import ScalaLexer
__all__ = ['HtmlLexer', 'XmlLexer', 'JavascriptLexer', 'JsonLexer', 'CssLexer',
'PhpLexer', 'ActionScriptLexer', 'XsltLexer', 'ActionScript3Lexer',
'MxmlLexer', 'HaxeLexer', 'HamlLexer', 'SassLexer', 'ScssLexer',
'ObjectiveJLexer', 'CoffeeScriptLexer', 'LiveScriptLexer',
'DuelLexer', 'ScamlLexer', 'JadeLexer', 'XQueryLexer',
'DtdLexer', 'DartLexer', 'LassoLexer', 'QmlLexer', 'TypeScriptLexer']
class JavascriptLexer(RegexLexer):
"""
For JavaScript source code.
"""
name = 'JavaScript'
aliases = ['js', 'javascript']
filenames = ['*.js', ]
mimetypes = ['application/javascript', 'application/x-javascript',
'text/x-javascript', 'text/javascript', ]
flags = re.DOTALL
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'<!--', Comment),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline)
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'(?=/)', Text, ('#pop', 'badregex')),
(r'', Text, '#pop')
],
'badregex': [
(r'\n', Text, '#pop')
],
'root': [
(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
r'(<<|>>>?|==?|!=?|[-<>+*%&\|\^/])=?', Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(for|in|while|do|break|return|continue|switch|case|default|if|else|'
r'throw|try|catch|finally|new|delete|typeof|instanceof|void|yield|'
r'this)\b', Keyword, 'slashstartsregex'),
(r'(var|let|with|function)\b', Keyword.Declaration, 'slashstartsregex'),
(r'(abstract|boolean|byte|char|class|const|debugger|double|enum|export|'
r'extends|final|float|goto|implements|import|int|interface|long|native|'
r'package|private|protected|public|short|static|super|synchronized|throws|'
r'transient|volatile)\b', Keyword.Reserved),
(r'(true|false|null|NaN|Infinity|undefined)\b', Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'Error|eval|isFinite|isNaN|parseFloat|parseInt|document|this|'
r'window)\b', Name.Builtin),
(r'[$a-zA-Z_][a-zA-Z0-9_]*', Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
]
}
class JsonLexer(RegexLexer):
"""
For JSON data structures.
*New in Pygments 1.5.*
"""
name = 'JSON'
aliases = ['json']
filenames = ['*.json']
mimetypes = [ 'application/json', ]
# integer part of a number
int_part = r'-?(0|[1-9]\d*)'
# fractional part of a number
frac_part = r'\.\d+'
# exponential part of a number
exp_part = r'[eE](\+|-)?\d+'
flags = re.DOTALL
tokens = {
'whitespace': [
(r'\s+', Text),
],
# represents a simple terminal value
'simplevalue': [
(r'(true|false|null)\b', Keyword.Constant),
(('%(int_part)s(%(frac_part)s%(exp_part)s|'
'%(exp_part)s|%(frac_part)s)') % vars(),
Number.Float),
(int_part, Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
],
# the right hand side of an object, after the attribute name
'objectattribute': [
include('value'),
(r':', Punctuation),
# comma terminates the attribute but expects more
(r',', Punctuation, '#pop'),
# a closing bracket terminates the entire object, so pop twice
(r'}', Punctuation, ('#pop', '#pop')),
],
# a json object - { attr, attr, ... }
'objectvalue': [
include('whitespace'),
(r'"(\\\\|\\"|[^"])*"', Name.Tag, 'objectattribute'),
(r'}', Punctuation, '#pop'),
],
# json array - [ value, value, ... }
'arrayvalue': [
include('whitespace'),
include('value'),
(r',', Punctuation),
(r']', Punctuation, '#pop'),
],
# a json value - either a simple value or a complex value (object or array)
'value': [
include('whitespace'),
include('simplevalue'),
(r'{', Punctuation, 'objectvalue'),
(r'\[', Punctuation, 'arrayvalue'),
],
# the root of a json document whould be a value
'root': [
include('value'),
],
}
JSONLexer = JsonLexer # for backwards compatibility with Pygments 1.5
class ActionScriptLexer(RegexLexer):
"""
For ActionScript source code.
*New in Pygments 0.9.*
"""
name = 'ActionScript'
aliases = ['as', 'actionscript']
filenames = ['*.as']
mimetypes = ['application/x-actionscript3', 'text/x-actionscript3',
'text/actionscript3']
flags = re.DOTALL
tokens = {
'root': [
(r'\s+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'/(\\\\|\\/|[^/\n])*/[gim]*', String.Regex),
(r'[~\^\*!%&<>\|+=:;,/?\\-]+', Operator),
(r'[{}\[\]();.]+', Punctuation),
(r'(case|default|for|each|in|while|do|break|return|continue|if|else|'
r'throw|try|catch|var|with|new|typeof|arguments|instanceof|this|'
r'switch)\b', Keyword),
(r'(class|public|final|internal|native|override|private|protected|'
r'static|import|extends|implements|interface|intrinsic|return|super|'
r'dynamic|function|const|get|namespace|package|set)\b',
Keyword.Declaration),
(r'(true|false|null|NaN|Infinity|-Infinity|undefined|Void)\b',
Keyword.Constant),
(r'(Accessibility|AccessibilityProperties|ActionScriptVersion|'
r'ActivityEvent|AntiAliasType|ApplicationDomain|AsBroadcaster|Array|'
r'AsyncErrorEvent|AVM1Movie|BevelFilter|Bitmap|BitmapData|'
r'BitmapDataChannel|BitmapFilter|BitmapFilterQuality|BitmapFilterType|'
r'BlendMode|BlurFilter|Boolean|ByteArray|Camera|Capabilities|CapsStyle|'
r'Class|Color|ColorMatrixFilter|ColorTransform|ContextMenu|'
r'ContextMenuBuiltInItems|ContextMenuEvent|ContextMenuItem|'
r'ConvultionFilter|CSMSettings|DataEvent|Date|DefinitionError|'
r'DeleteObjectSample|Dictionary|DisplacmentMapFilter|DisplayObject|'
r'DisplacmentMapFilterMode|DisplayObjectContainer|DropShadowFilter|'
r'Endian|EOFError|Error|ErrorEvent|EvalError|Event|EventDispatcher|'
r'EventPhase|ExternalInterface|FileFilter|FileReference|'
r'FileReferenceList|FocusDirection|FocusEvent|Font|FontStyle|FontType|'
r'FrameLabel|FullScreenEvent|Function|GlowFilter|GradientBevelFilter|'
r'GradientGlowFilter|GradientType|Graphics|GridFitType|HTTPStatusEvent|'
r'IBitmapDrawable|ID3Info|IDataInput|IDataOutput|IDynamicPropertyOutput'
r'IDynamicPropertyWriter|IEventDispatcher|IExternalizable|'
r'IllegalOperationError|IME|IMEConversionMode|IMEEvent|int|'
r'InteractiveObject|InterpolationMethod|InvalidSWFError|InvokeEvent|'
r'IOError|IOErrorEvent|JointStyle|Key|Keyboard|KeyboardEvent|KeyLocation|'
r'LineScaleMode|Loader|LoaderContext|LoaderInfo|LoadVars|LocalConnection|'
r'Locale|Math|Matrix|MemoryError|Microphone|MorphShape|Mouse|MouseEvent|'
r'MovieClip|MovieClipLoader|Namespace|NetConnection|NetStatusEvent|'
r'NetStream|NewObjectSample|Number|Object|ObjectEncoding|PixelSnapping|'
r'Point|PrintJob|PrintJobOptions|PrintJobOrientation|ProgressEvent|Proxy|'
r'QName|RangeError|Rectangle|ReferenceError|RegExp|Responder|Sample|Scene|'
r'ScriptTimeoutError|Security|SecurityDomain|SecurityError|'
r'SecurityErrorEvent|SecurityPanel|Selection|Shape|SharedObject|'
r'SharedObjectFlushStatus|SimpleButton|Socket|Sound|SoundChannel|'
r'SoundLoaderContext|SoundMixer|SoundTransform|SpreadMethod|Sprite|'
r'StackFrame|StackOverflowError|Stage|StageAlign|StageDisplayState|'
r'StageQuality|StageScaleMode|StaticText|StatusEvent|String|StyleSheet|'
r'SWFVersion|SyncEvent|SyntaxError|System|TextColorType|TextField|'
r'TextFieldAutoSize|TextFieldType|TextFormat|TextFormatAlign|'
r'TextLineMetrics|TextRenderer|TextSnapshot|Timer|TimerEvent|Transform|'
r'TypeError|uint|URIError|URLLoader|URLLoaderDataFormat|URLRequest|'
r'URLRequestHeader|URLRequestMethod|URLStream|URLVariabeles|VerifyError|'
r'Video|XML|XMLDocument|XMLList|XMLNode|XMLNodeType|XMLSocket|XMLUI)\b',
Name.Builtin),
(r'(decodeURI|decodeURIComponent|encodeURI|escape|eval|isFinite|isNaN|'
r'isXMLName|clearInterval|fscommand|getTimer|getURL|getVersion|'
r'isFinite|parseFloat|parseInt|setInterval|trace|updateAfterEvent|'
r'unescape)\b',Name.Function),
(r'[$a-zA-Z_][a-zA-Z0-9_]*', Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-f]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
]
}
class ActionScript3Lexer(RegexLexer):
"""
For ActionScript 3 source code.
*New in Pygments 0.11.*
"""
name = 'ActionScript 3'
aliases = ['as3', 'actionscript3']
filenames = ['*.as']
mimetypes = ['application/x-actionscript', 'text/x-actionscript',
'text/actionscript']
identifier = r'[$a-zA-Z_][a-zA-Z0-9_]*'
typeidentifier = identifier + '(?:\.<\w+>)?'
flags = re.DOTALL | re.MULTILINE
tokens = {
'root': [
(r'\s+', Text),
(r'(function\s+)(' + identifier + r')(\s*)(\()',
bygroups(Keyword.Declaration, Name.Function, Text, Operator),
'funcparams'),
(r'(var|const)(\s+)(' + identifier + r')(\s*)(:)(\s*)(' +
typeidentifier + r')',
bygroups(Keyword.Declaration, Text, Name, Text, Punctuation, Text,
Keyword.Type)),
(r'(import|package)(\s+)((?:' + identifier + r'|\.)+)(\s*)',
bygroups(Keyword, Text, Name.Namespace, Text)),
(r'(new)(\s+)(' + typeidentifier + r')(\s*)(\()',
bygroups(Keyword, Text, Keyword.Type, Text, Operator)),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'/(\\\\|\\/|[^\n])*/[gisx]*', String.Regex),
(r'(\.)(' + identifier + r')', bygroups(Operator, Name.Attribute)),
(r'(case|default|for|each|in|while|do|break|return|continue|if|else|'
r'throw|try|catch|with|new|typeof|arguments|instanceof|this|'
r'switch|import|include|as|is)\b',
Keyword),
(r'(class|public|final|internal|native|override|private|protected|'
r'static|import|extends|implements|interface|intrinsic|return|super|'
r'dynamic|function|const|get|namespace|package|set)\b',
Keyword.Declaration),
(r'(true|false|null|NaN|Infinity|-Infinity|undefined|void)\b',
Keyword.Constant),
(r'(decodeURI|decodeURIComponent|encodeURI|escape|eval|isFinite|isNaN|'
r'isXMLName|clearInterval|fscommand|getTimer|getURL|getVersion|'
r'isFinite|parseFloat|parseInt|setInterval|trace|updateAfterEvent|'
r'unescape)\b', Name.Function),
(identifier, Name),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-f]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'[~\^\*!%&<>\|+=:;,/?\\{}\[\]().-]+', Operator),
],
'funcparams': [
(r'\s+', Text),
(r'(\s*)(\.\.\.)?(' + identifier + r')(\s*)(:)(\s*)(' +
typeidentifier + r'|\*)(\s*)',
bygroups(Text, Punctuation, Name, Text, Operator, Text,
Keyword.Type, Text), 'defval'),
(r'\)', Operator, 'type')
],
'type': [
(r'(\s*)(:)(\s*)(' + typeidentifier + r'|\*)',
bygroups(Text, Operator, Text, Keyword.Type), '#pop:2'),
(r'\s*', Text, '#pop:2')
],
'defval': [
(r'(=)(\s*)([^(),]+)(\s*)(,?)',
bygroups(Operator, Text, using(this), Text, Operator), '#pop'),
(r',?', Operator, '#pop')
]
}
def analyse_text(text):
if re.match(r'\w+\s*:\s*\w', text):
return 0.3
return 0
class CssLexer(RegexLexer):
"""
For CSS (Cascading Style Sheets).
"""
name = 'CSS'
aliases = ['css']
filenames = ['*.css']
mimetypes = ['text/css']
tokens = {
'root': [
include('basics'),
],
'basics': [
(r'\s+', Text),
(r'/\*(?:.|\n)*?\*/', Comment),
(r'{', Punctuation, 'content'),
(r'\:[a-zA-Z0-9_-]+', Name.Decorator),
(r'\.[a-zA-Z0-9_-]+', Name.Class),
(r'\#[a-zA-Z0-9_-]+', Name.Function),
(r'@[a-zA-Z0-9_-]+', Keyword, 'atrule'),
(r'[a-zA-Z0-9_-]+', Name.Tag),
(r'[~\^\*!%&\[\]\(\)<>\|+=@:;,./?-]', Operator),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single)
],
'atrule': [
(r'{', Punctuation, 'atcontent'),
(r';', Punctuation, '#pop'),
include('basics'),
],
'atcontent': [
include('basics'),
(r'}', Punctuation, '#pop:2'),
],
'content': [
(r'\s+', Text),
(r'}', Punctuation, '#pop'),
(r'url\(.*?\)', String.Other),
(r'^@.*?$', Comment.Preproc),
(r'(azimuth|background-attachment|background-color|'
r'background-image|background-position|background-repeat|'
r'background|border-bottom-color|border-bottom-style|'
r'border-bottom-width|border-left-color|border-left-style|'
r'border-left-width|border-right|border-right-color|'
r'border-right-style|border-right-width|border-top-color|'
r'border-top-style|border-top-width|border-bottom|'
r'border-collapse|border-left|border-width|border-color|'
r'border-spacing|border-style|border-top|border|caption-side|'
r'clear|clip|color|content|counter-increment|counter-reset|'
r'cue-after|cue-before|cue|cursor|direction|display|'
r'elevation|empty-cells|float|font-family|font-size|'
r'font-size-adjust|font-stretch|font-style|font-variant|'
r'font-weight|font|height|letter-spacing|line-height|'
r'list-style-type|list-style-image|list-style-position|'
r'list-style|margin-bottom|margin-left|margin-right|'
r'margin-top|margin|marker-offset|marks|max-height|max-width|'
r'min-height|min-width|opacity|orphans|outline|outline-color|'
r'outline-style|outline-width|overflow(?:-x|-y)?|padding-bottom|'
r'padding-left|padding-right|padding-top|padding|page|'
r'page-break-after|page-break-before|page-break-inside|'
r'pause-after|pause-before|pause|pitch|pitch-range|'
r'play-during|position|quotes|richness|right|size|'
r'speak-header|speak-numeral|speak-punctuation|speak|'
r'speech-rate|stress|table-layout|text-align|text-decoration|'
r'text-indent|text-shadow|text-transform|top|unicode-bidi|'
r'vertical-align|visibility|voice-family|volume|white-space|'
r'widows|width|word-spacing|z-index|bottom|left|'
r'above|absolute|always|armenian|aural|auto|avoid|baseline|'
r'behind|below|bidi-override|blink|block|bold|bolder|both|'
r'capitalize|center-left|center-right|center|circle|'
r'cjk-ideographic|close-quote|collapse|condensed|continuous|'
r'crop|crosshair|cross|cursive|dashed|decimal-leading-zero|'
r'decimal|default|digits|disc|dotted|double|e-resize|embed|'
r'extra-condensed|extra-expanded|expanded|fantasy|far-left|'
r'far-right|faster|fast|fixed|georgian|groove|hebrew|help|'
r'hidden|hide|higher|high|hiragana-iroha|hiragana|icon|'
r'inherit|inline-table|inline|inset|inside|invert|italic|'
r'justify|katakana-iroha|katakana|landscape|larger|large|'
r'left-side|leftwards|level|lighter|line-through|list-item|'
r'loud|lower-alpha|lower-greek|lower-roman|lowercase|ltr|'
r'lower|low|medium|message-box|middle|mix|monospace|'
r'n-resize|narrower|ne-resize|no-close-quote|no-open-quote|'
r'no-repeat|none|normal|nowrap|nw-resize|oblique|once|'
r'open-quote|outset|outside|overline|pointer|portrait|px|'
r'relative|repeat-x|repeat-y|repeat|rgb|ridge|right-side|'
r'rightwards|s-resize|sans-serif|scroll|se-resize|'
r'semi-condensed|semi-expanded|separate|serif|show|silent|'
r'slow|slower|small-caps|small-caption|smaller|soft|solid|'
r'spell-out|square|static|status-bar|super|sw-resize|'
r'table-caption|table-cell|table-column|table-column-group|'
r'table-footer-group|table-header-group|table-row|'
r'table-row-group|text|text-bottom|text-top|thick|thin|'
r'transparent|ultra-condensed|ultra-expanded|underline|'
r'upper-alpha|upper-latin|upper-roman|uppercase|url|'
r'visible|w-resize|wait|wider|x-fast|x-high|x-large|x-loud|'
r'x-low|x-small|x-soft|xx-large|xx-small|yes)\b', Keyword),
(r'(indigo|gold|firebrick|indianred|yellow|darkolivegreen|'
r'darkseagreen|mediumvioletred|mediumorchid|chartreuse|'
r'mediumslateblue|black|springgreen|crimson|lightsalmon|brown|'
r'turquoise|olivedrab|cyan|silver|skyblue|gray|darkturquoise|'
r'goldenrod|darkgreen|darkviolet|darkgray|lightpink|teal|'
r'darkmagenta|lightgoldenrodyellow|lavender|yellowgreen|thistle|'
r'violet|navy|orchid|blue|ghostwhite|honeydew|cornflowerblue|'
r'darkblue|darkkhaki|mediumpurple|cornsilk|red|bisque|slategray|'
r'darkcyan|khaki|wheat|deepskyblue|darkred|steelblue|aliceblue|'
r'gainsboro|mediumturquoise|floralwhite|coral|purple|lightgrey|'
r'lightcyan|darksalmon|beige|azure|lightsteelblue|oldlace|'
r'greenyellow|royalblue|lightseagreen|mistyrose|sienna|'
r'lightcoral|orangered|navajowhite|lime|palegreen|burlywood|'
r'seashell|mediumspringgreen|fuchsia|papayawhip|blanchedalmond|'
r'peru|aquamarine|white|darkslategray|ivory|dodgerblue|'
r'lemonchiffon|chocolate|orange|forestgreen|slateblue|olive|'
r'mintcream|antiquewhite|darkorange|cadetblue|moccasin|'
r'limegreen|saddlebrown|darkslateblue|lightskyblue|deeppink|'
r'plum|aqua|darkgoldenrod|maroon|sandybrown|magenta|tan|'
r'rosybrown|pink|lightblue|palevioletred|mediumseagreen|'
r'dimgray|powderblue|seagreen|snow|mediumblue|midnightblue|'
r'paleturquoise|palegoldenrod|whitesmoke|darkorchid|salmon|'
r'lightslategray|lawngreen|lightgreen|tomato|hotpink|'
r'lightyellow|lavenderblush|linen|mediumaquamarine|green|'
r'blueviolet|peachpuff)\b', Name.Builtin),
(r'\!important', Comment.Preproc),
(r'/\*(?:.|\n)*?\*/', Comment),
(r'\#[a-zA-Z0-9]{1,6}', Number),
(r'[\.-]?[0-9]*[\.]?[0-9]+(em|px|\%|pt|pc|in|mm|cm|ex|s)\b', Number),
(r'-?[0-9]+', Number),
(r'[~\^\*!%&<>\|+=@:,./?-]+', Operator),
(r'[\[\]();]+', Punctuation),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name)
]
}
class ObjectiveJLexer(RegexLexer):
"""
For Objective-J source code with preprocessor directives.
*New in Pygments 1.3.*
"""
name = 'Objective-J'
aliases = ['objective-j', 'objectivej', 'obj-j', 'objj']
filenames = ['*.j']
mimetypes = ['text/x-objective-j']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)*'
flags = re.DOTALL | re.MULTILINE
tokens = {
'root': [
include('whitespace'),
# function definition
(r'^(' + _ws + r'[\+-]' + _ws + r')([\(a-zA-Z_].*?[^\(])(' + _ws + '{)',
bygroups(using(this), using(this, state='function_signature'),
using(this))),
# class definition
(r'(@interface|@implementation)(\s+)', bygroups(Keyword, Text),
'classname'),
(r'(@class|@protocol)(\s*)', bygroups(Keyword, Text),
'forward_classname'),
(r'(\s*)(@end)(\s*)', bygroups(Text, Keyword, Text)),
include('statements'),
('[{\(\)}]', Punctuation),
(';', Punctuation),
],
'whitespace': [
(r'(@import)(\s+)("(?:\\\\|\\"|[^"])*")',
bygroups(Comment.Preproc, Text, String.Double)),
(r'(@import)(\s+)(<(?:\\\\|\\>|[^>])*>)',
bygroups(Comment.Preproc, Text, String.Double)),
(r'(#(?:include|import))(\s+)("(?:\\\\|\\"|[^"])*")',
bygroups(Comment.Preproc, Text, String.Double)),
(r'(#(?:include|import))(\s+)(<(?:\\\\|\\>|[^>])*>)',
bygroups(Comment.Preproc, Text, String.Double)),
(r'#if\s+0', Comment.Preproc, 'if0'),
(r'#', Comment.Preproc, 'macro'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'//(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'<!--', Comment),
],
'slashstartsregex': [
include('whitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'(?=/)', Text, ('#pop', 'badregex')),
(r'', Text, '#pop'),
],
'badregex': [
(r'\n', Text, '#pop'),
],
'statements': [
(r'(L|@)?"', String, 'string'),
(r"(L|@)?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'",
String.Char),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'0x[0-9a-fA-F]+[Ll]?', Number.Hex),
(r'0[0-7]+[Ll]?', Number.Oct),
(r'\d+[Ll]?', Number.Integer),
(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
(r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
r'(<<|>>>?|==?|!=?|[-<>+*%&\|\^/])=?',
Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(for|in|while|do|break|return|continue|switch|case|default|if|'
r'else|throw|try|catch|finally|new|delete|typeof|instanceof|void|'
r'prototype|__proto__)\b', Keyword, 'slashstartsregex'),
(r'(var|with|function)\b', Keyword.Declaration, 'slashstartsregex'),
(r'(@selector|@private|@protected|@public|@encode|'
r'@synchronized|@try|@throw|@catch|@finally|@end|@property|'
r'@synthesize|@dynamic|@for|@accessors|new)\b', Keyword),
(r'(int|long|float|short|double|char|unsigned|signed|void|'
r'id|BOOL|bool|boolean|IBOutlet|IBAction|SEL|@outlet|@action)\b',
Keyword.Type),
(r'(self|super)\b', Name.Builtin),
(r'(TRUE|YES|FALSE|NO|Nil|nil|NULL)\b', Keyword.Constant),
(r'(true|false|null|NaN|Infinity|undefined)\b', Keyword.Constant),
(r'(ABS|ASIN|ACOS|ATAN|ATAN2|SIN|COS|TAN|EXP|POW|CEIL|FLOOR|ROUND|'
r'MIN|MAX|RAND|SQRT|E|LN2|LN10|LOG2E|LOG10E|PI|PI2|PI_2|SQRT1_2|'
r'SQRT2)\b', Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'Error|eval|isFinite|isNaN|parseFloat|parseInt|document|this|'
r'window)\b', Name.Builtin),
(r'([$a-zA-Z_][a-zA-Z0-9_]*)(' + _ws + r')(?=\()',
bygroups(Name.Function, using(this))),
(r'[$a-zA-Z_][a-zA-Z0-9_]*', Name),
],
'classname' : [
# interface definition that inherits
(r'([a-zA-Z_][a-zA-Z0-9_]*)(' + _ws + r':' + _ws +
r')([a-zA-Z_][a-zA-Z0-9_]*)?',
bygroups(Name.Class, using(this), Name.Class), '#pop'),
# interface definition for a category
(r'([a-zA-Z_][a-zA-Z0-9_]*)(' + _ws + r'\()([a-zA-Z_][a-zA-Z0-9_]*)(\))',
bygroups(Name.Class, using(this), Name.Label, Text), '#pop'),
# simple interface / implementation
(r'([a-zA-Z_][a-zA-Z0-9_]*)', Name.Class, '#pop'),
],
'forward_classname' : [
(r'([a-zA-Z_][a-zA-Z0-9_]*)(\s*,\s*)',
bygroups(Name.Class, Text), '#push'),
(r'([a-zA-Z_][a-zA-Z0-9_]*)(\s*;?)',
bygroups(Name.Class, Text), '#pop'),
],
'function_signature': [
include('whitespace'),
# start of a selector w/ parameters
(r'(\(' + _ws + r')' # open paren
r'([a-zA-Z_][a-zA-Z0-9_]+)' # return type
r'(' + _ws + r'\)' + _ws + r')' # close paren
r'([$a-zA-Z_][a-zA-Z0-9_]+' + _ws + r':)', # function name
bygroups(using(this), Keyword.Type, using(this),
Name.Function), 'function_parameters'),
# no-param function
(r'(\(' + _ws + r')' # open paren
r'([a-zA-Z_][a-zA-Z0-9_]+)' # return type
r'(' + _ws + r'\)' + _ws + r')' # close paren
r'([$a-zA-Z_][a-zA-Z0-9_]+)', # function name
bygroups(using(this), Keyword.Type, using(this),
Name.Function), "#pop"),
# no return type given, start of a selector w/ parameters
(r'([$a-zA-Z_][a-zA-Z0-9_]+' + _ws + r':)', # function name
bygroups (Name.Function), 'function_parameters'),
# no return type given, no-param function
(r'([$a-zA-Z_][a-zA-Z0-9_]+)', # function name
bygroups(Name.Function), "#pop"),
('', Text, '#pop'),
],
'function_parameters': [
include('whitespace'),
# parameters
(r'(\(' + _ws + ')' # open paren
r'([^\)]+)' # type
r'(' + _ws + r'\)' + _ws + r')' # close paren
r'([$a-zA-Z_][a-zA-Z0-9_]+)', # param name
bygroups(using(this), Keyword.Type, using(this), Text)),
# one piece of a selector name
(r'([$a-zA-Z_][a-zA-Z0-9_]+' + _ws + r':)', # function name
Name.Function),
# smallest possible selector piece
(r'(:)', Name.Function),
# var args
(r'(,' + _ws + r'\.\.\.)', using(this)),
# param name
(r'([$a-zA-Z_][a-zA-Z0-9_]+)', Text),
],
'expression' : [
(r'([$a-zA-Z_][a-zA-Z0-9_]*)(\()', bygroups(Name.Function,
Punctuation)),
(r'(\))', Punctuation, "#pop"),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'if0': [
(r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
(r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
(r'.*?\n', Comment),
]
}
def analyse_text(text):
if re.search('^\s*@import\s+[<"]', text, re.MULTILINE):
# special directive found in most Objective-J files
return True
return False
class HtmlLexer(RegexLexer):
"""
For HTML 4 and XHTML 1 markup. Nested JavaScript and CSS is highlighted
by the appropriate lexer.
"""
name = 'HTML'
aliases = ['html']
filenames = ['*.html', '*.htm', '*.xhtml', '*.xslt']
mimetypes = ['text/html', 'application/xhtml+xml']
flags = re.IGNORECASE | re.DOTALL
tokens = {
'root': [
('[^<&]+', Text),
(r'&\S*?;', Name.Entity),
(r'\<\!\[CDATA\[.*?\]\]\>', Comment.Preproc),
('<!--', Comment, 'comment'),
(r'<\?.*?\?>', Comment.Preproc),
('<![^>]*>', Comment.Preproc),
(r'<\s*script\s*', Name.Tag, ('script-content', 'tag')),
(r'<\s*style\s*', Name.Tag, ('style-content', 'tag')),
# note: this allows tag names not used in HTML like <x:with-dash>,
# this is to support yet-unknown template engines and the like
(r'<\s*[\w:.-]+', Name.Tag, 'tag'),
(r'<\s*/\s*[\w:.-]+\s*>', Name.Tag),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'tag': [
(r'\s+', Text),
(r'[a-zA-Z0-9_:-]+\s*=', Name.Attribute, 'attr'),
(r'[a-zA-Z0-9_:-]+', Name.Attribute),
(r'/?\s*>', Name.Tag, '#pop'),
],
'script-content': [
(r'<\s*/\s*script\s*>', Name.Tag, '#pop'),
(r'.+?(?=<\s*/\s*script\s*>)', using(JavascriptLexer)),
],
'style-content': [
(r'<\s*/\s*style\s*>', Name.Tag, '#pop'),
(r'.+?(?=<\s*/\s*style\s*>)', using(CssLexer)),
],
'attr': [
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
def analyse_text(text):
if html_doctype_matches(text):
return 0.5
class PhpLexer(RegexLexer):
"""
For `PHP <http://www.php.net/>`_ source code.
For PHP embedded in HTML, use the `HtmlPhpLexer`.
Additional options accepted:
`startinline`
If given and ``True`` the lexer starts highlighting with
php code (i.e.: no starting ``<?php`` required). The default
is ``False``.
`funcnamehighlighting`
If given and ``True``, highlight builtin function names
(default: ``True``).
`disabledmodules`
If given, must be a list of module names whose function names
should not be highlighted. By default all modules are highlighted
except the special ``'unknown'`` module that includes functions
that are known to php but are undocumented.
To get a list of allowed modules have a look into the
`_phpbuiltins` module:
.. sourcecode:: pycon
>>> from pygments.lexers._phpbuiltins import MODULES
>>> MODULES.keys()
['PHP Options/Info', 'Zip', 'dba', ...]
In fact the names of those modules match the module names from
the php documentation.
"""
name = 'PHP'
aliases = ['php', 'php3', 'php4', 'php5']
filenames = ['*.php', '*.php[345]', '*.inc']
mimetypes = ['text/x-php']
flags = re.IGNORECASE | re.DOTALL | re.MULTILINE
tokens = {
'root': [
(r'<\?(php)?', Comment.Preproc, 'php'),
(r'[^<]+', Other),
(r'<', Other)
],
'php': [
(r'\?>', Comment.Preproc, '#pop'),
(r'<<<(\'?)([a-zA-Z_][a-zA-Z0-9_]*)\1\n.*?\n\2\;?\n', String),
(r'\s+', Text),
(r'#.*?\n', Comment.Single),
(r'//.*?\n', Comment.Single),
# put the empty comment here, it is otherwise seen as
# the start of a docstring
(r'/\*\*/', Comment.Multiline),
(r'/\*\*.*?\*/', String.Doc),
(r'/\*.*?\*/', Comment.Multiline),
(r'(->|::)(\s*)([a-zA-Z_][a-zA-Z0-9_]*)',
bygroups(Operator, Text, Name.Attribute)),
(r'[~!%^&*+=|:.<>/?@-]+', Operator),
(r'[\[\]{}();,]+', Punctuation),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'(function)(\s*)(?=\()', bygroups(Keyword, Text)),
(r'(function)(\s+)(&?)(\s*)',
bygroups(Keyword, Text, Operator, Text), 'functionname'),
(r'(const)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)',
bygroups(Keyword, Text, Name.Constant)),
(r'(and|E_PARSE|old_function|E_ERROR|or|as|E_WARNING|parent|'
r'eval|PHP_OS|break|exit|case|extends|PHP_VERSION|cfunction|'
r'FALSE|print|for|require|continue|foreach|require_once|'
r'declare|return|default|static|do|switch|die|stdClass|'
r'echo|else|TRUE|elseif|var|empty|if|xor|enddeclare|include|'
r'virtual|endfor|include_once|while|endforeach|global|__FILE__|'
r'endif|list|__LINE__|endswitch|new|__sleep|endwhile|not|'
r'array|__wakeup|E_ALL|NULL|final|php_user_filter|interface|'
r'implements|public|private|protected|abstract|clone|try|'
r'catch|throw|this|use|namespace|trait)\b', Keyword),
(r'(true|false|null)\b', Keyword.Constant),
(r'\$\{\$+[a-zA-Z_][a-zA-Z0-9_]*\}', Name.Variable),
(r'\$+[a-zA-Z_][a-zA-Z0-9_]*', Name.Variable),
(r'[\\a-zA-Z_][\\a-zA-Z0-9_]*', Name.Other),
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'\d+[eE][+-]?[0-9]+', Number.Float),
(r'0[0-7]+', Number.Oct),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+', Number.Integer),
(r"'([^'\\]*(?:\\.[^'\\]*)*)'", String.Single),
(r'`([^`\\]*(?:\\.[^`\\]*)*)`', String.Backtick),
(r'"', String.Double, 'string'),
],
'classname': [
(r'[a-zA-Z_][\\a-zA-Z0-9_]*', Name.Class, '#pop')
],
'functionname': [
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Function, '#pop')
],
'string': [
(r'"', String.Double, '#pop'),
(r'[^{$"\\]+', String.Double),
(r'\\([nrt\"$\\]|[0-7]{1,3}|x[0-9A-Fa-f]{1,2})', String.Escape),
(r'\$[a-zA-Z_][a-zA-Z0-9_]*(\[\S+\]|->[a-zA-Z_][a-zA-Z0-9_]*)?',
String.Interpol),
(r'(\{\$\{)(.*?)(\}\})',
bygroups(String.Interpol, using(this, _startinline=True),
String.Interpol)),
(r'(\{)(\$.*?)(\})',
bygroups(String.Interpol, using(this, _startinline=True),
String.Interpol)),
(r'(\$\{)(\S+)(\})',
bygroups(String.Interpol, Name.Variable, String.Interpol)),
(r'[${\\]+', String.Double)
],
}
def __init__(self, **options):
self.funcnamehighlighting = get_bool_opt(
options, 'funcnamehighlighting', True)
self.disabledmodules = get_list_opt(
options, 'disabledmodules', ['unknown'])
self.startinline = get_bool_opt(options, 'startinline', False)
# private option argument for the lexer itself
if '_startinline' in options:
self.startinline = options.pop('_startinline')
# collect activated functions in a set
self._functions = set()
if self.funcnamehighlighting:
from pygments.lexers._phpbuiltins import MODULES
for key, value in MODULES.iteritems():
if key not in self.disabledmodules:
self._functions.update(value)
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
stack = ['root']
if self.startinline:
stack.append('php')
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text, stack):
if token is Name.Other:
if value in self._functions:
yield index, Name.Builtin, value
continue
yield index, token, value
def analyse_text(text):
rv = 0.0
if re.search(r'<\?(?!xml)', text):
rv += 0.3
if '?>' in text:
rv += 0.1
return rv
class DtdLexer(RegexLexer):
"""
A lexer for DTDs (Document Type Definitions).
*New in Pygments 1.5.*
"""
flags = re.MULTILINE | re.DOTALL
name = 'DTD'
aliases = ['dtd']
filenames = ['*.dtd']
mimetypes = ['application/xml-dtd']
tokens = {
'root': [
include('common'),
(r'(<!ELEMENT)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Tag), 'element'),
(r'(<!ATTLIST)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Tag), 'attlist'),
(r'(<!ENTITY)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Entity), 'entity'),
(r'(<!NOTATION)(\s+)(\S+)',
bygroups(Keyword, Text, Name.Tag), 'notation'),
(r'(<!\[)([^\[\s]+)(\s*)(\[)', # conditional sections
bygroups(Keyword, Name.Entity, Text, Keyword)),
(r'(<!DOCTYPE)(\s+)([^>\s]+)',
bygroups(Keyword, Text, Name.Tag)),
(r'PUBLIC|SYSTEM', Keyword.Constant),
(r'[\[\]>]', Keyword),
],
'common': [
(r'\s+', Text),
(r'(%|&)[^;]*;', Name.Entity),
('<!--', Comment, 'comment'),
(r'[(|)*,?+]', Operator),
(r'"[^"]*"', String.Double),
(r'\'[^\']*\'', String.Single),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'element': [
include('common'),
(r'EMPTY|ANY|#PCDATA', Keyword.Constant),
(r'[^>\s\|()?+*,]+', Name.Tag),
(r'>', Keyword, '#pop'),
],
'attlist': [
include('common'),
(r'CDATA|IDREFS|IDREF|ID|NMTOKENS|NMTOKEN|ENTITIES|ENTITY|NOTATION',
Keyword.Constant),
(r'#REQUIRED|#IMPLIED|#FIXED', Keyword.Constant),
(r'xml:space|xml:lang', Keyword.Reserved),
(r'[^>\s\|()?+*,]+', Name.Attribute),
(r'>', Keyword, '#pop'),
],
'entity': [
include('common'),
(r'SYSTEM|PUBLIC|NDATA', Keyword.Constant),
(r'[^>\s\|()?+*,]+', Name.Entity),
(r'>', Keyword, '#pop'),
],
'notation': [
include('common'),
(r'SYSTEM|PUBLIC', Keyword.Constant),
(r'[^>\s\|()?+*,]+', Name.Attribute),
(r'>', Keyword, '#pop'),
],
}
def analyse_text(text):
if not looks_like_xml(text) and \
('<!ELEMENT' in text or '<!ATTLIST' in text or '<!ENTITY' in text):
return 0.8
class XmlLexer(RegexLexer):
"""
Generic lexer for XML (eXtensible Markup Language).
"""
flags = re.MULTILINE | re.DOTALL | re.UNICODE
name = 'XML'
aliases = ['xml']
filenames = ['*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd',
'*.wsdl', '*.wsf']
mimetypes = ['text/xml', 'application/xml', 'image/svg+xml',
'application/rss+xml', 'application/atom+xml']
tokens = {
'root': [
('[^<&]+', Text),
(r'&\S*?;', Name.Entity),
(r'\<\!\[CDATA\[.*?\]\]\>', Comment.Preproc),
('<!--', Comment, 'comment'),
(r'<\?.*?\?>', Comment.Preproc),
('<![^>]*>', Comment.Preproc),
(r'<\s*[\w:.-]+', Name.Tag, 'tag'),
(r'<\s*/\s*[\w:.-]+\s*>', Name.Tag),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'tag': [
(r'\s+', Text),
(r'[\w.:-]+\s*=', Name.Attribute, 'attr'),
(r'/?\s*>', Name.Tag, '#pop'),
],
'attr': [
('\s+', Text),
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
def analyse_text(text):
if looks_like_xml(text):
return 0.5
class XsltLexer(XmlLexer):
'''
A lexer for XSLT.
*New in Pygments 0.10.*
'''
name = 'XSLT'
aliases = ['xslt']
filenames = ['*.xsl', '*.xslt', '*.xpl'] # xpl is XProc
mimetypes = ['application/xsl+xml', 'application/xslt+xml']
EXTRA_KEYWORDS = set([
'apply-imports', 'apply-templates', 'attribute',
'attribute-set', 'call-template', 'choose', 'comment',
'copy', 'copy-of', 'decimal-format', 'element', 'fallback',
'for-each', 'if', 'import', 'include', 'key', 'message',
'namespace-alias', 'number', 'otherwise', 'output', 'param',
'preserve-space', 'processing-instruction', 'sort',
'strip-space', 'stylesheet', 'template', 'text', 'transform',
'value-of', 'variable', 'when', 'with-param'
])
def get_tokens_unprocessed(self, text):
for index, token, value in XmlLexer.get_tokens_unprocessed(self, text):
m = re.match('</?xsl:([^>]*)/?>?', value)
if token is Name.Tag and m and m.group(1) in self.EXTRA_KEYWORDS:
yield index, Keyword, value
else:
yield index, token, value
def analyse_text(text):
if looks_like_xml(text) and '<xsl' in text:
return 0.8
class MxmlLexer(RegexLexer):
"""
For MXML markup.
Nested AS3 in <script> tags is highlighted by the appropriate lexer.
*New in Pygments 1.1.*
"""
flags = re.MULTILINE | re.DOTALL
name = 'MXML'
aliases = ['mxml']
filenames = ['*.mxml']
mimetimes = ['text/xml', 'application/xml']
tokens = {
'root': [
('[^<&]+', Text),
(r'&\S*?;', Name.Entity),
(r'(\<\!\[CDATA\[)(.*?)(\]\]\>)',
bygroups(String, using(ActionScript3Lexer), String)),
('<!--', Comment, 'comment'),
(r'<\?.*?\?>', Comment.Preproc),
('<![^>]*>', Comment.Preproc),
(r'<\s*[a-zA-Z0-9:._-]+', Name.Tag, 'tag'),
(r'<\s*/\s*[a-zA-Z0-9:._-]+\s*>', Name.Tag),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'tag': [
(r'\s+', Text),
(r'[a-zA-Z0-9_.:-]+\s*=', Name.Attribute, 'attr'),
(r'/?\s*>', Name.Tag, '#pop'),
],
'attr': [
('\s+', Text),
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
class HaxeLexer(ExtendedRegexLexer):
"""
For Haxe source code (http://haxe.org/).
*New in Pygments 1.3.*
"""
name = 'Haxe'
aliases = ['hx', 'Haxe', 'haxe', 'haXe', 'hxsl']
filenames = ['*.hx', '*.hxsl']
mimetypes = ['text/haxe', 'text/x-haxe', 'text/x-hx']
# keywords extracted from lexer.mll in the haxe compiler source
keyword = (r'(?:function|class|static|var|if|else|while|do|for|'
r'break|return|continue|extends|implements|import|'
r'switch|case|default|public|private|try|untyped|'
r'catch|new|this|throw|extern|enum|in|interface|'
r'cast|override|dynamic|typedef|package|'
r'inline|using|null|true|false|abstract)\b')
# idtype in lexer.mll
typeid = r'_*[A-Z][_a-zA-Z0-9]*'
# combined ident and dollar and idtype
ident = r'(?:_*[a-z][_a-zA-Z0-9]*|_+[0-9][_a-zA-Z0-9]*|' + typeid + \
'|_+|\$[_a-zA-Z0-9]+)'
binop = (r'(?:%=|&=|\|=|\^=|\+=|\-=|\*=|/=|<<=|>\s*>\s*=|>\s*>\s*>\s*=|==|'
r'!=|<=|>\s*=|&&|\|\||<<|>>>|>\s*>|\.\.\.|<|>|%|&|\||\^|\+|\*|'
r'/|\-|=>|=)')
# ident except keywords
ident_no_keyword = r'(?!' + keyword + ')' + ident
flags = re.DOTALL | re.MULTILINE
preproc_stack = []
def preproc_callback(self, match, ctx):
proc = match.group(2)
if proc == 'if':
# store the current stack
self.preproc_stack.append(ctx.stack[:])
elif proc in ['else', 'elseif']:
# restore the stack back to right before #if
if self.preproc_stack: ctx.stack = self.preproc_stack[-1][:]
elif proc == 'end':
# remove the saved stack of previous #if
if self.preproc_stack: self.preproc_stack.pop()
# #if and #elseif should follow by an expr
if proc in ['if', 'elseif']:
ctx.stack.append('preproc-expr')
# #error can be optionally follow by the error msg
if proc in ['error']:
ctx.stack.append('preproc-error')
yield match.start(), Comment.Preproc, '#' + proc
ctx.pos = match.end()
tokens = {
'root': [
include('spaces'),
include('meta'),
(r'(?:package)\b', Keyword.Namespace, ('semicolon', 'package')),
(r'(?:import)\b', Keyword.Namespace, ('semicolon', 'import')),
(r'(?:using)\b', Keyword.Namespace, ('semicolon', 'using')),
(r'(?:extern|private)\b', Keyword.Declaration),
(r'(?:abstract)\b', Keyword.Declaration, 'abstract'),
(r'(?:class|interface)\b', Keyword.Declaration, 'class'),
(r'(?:enum)\b', Keyword.Declaration, 'enum'),
(r'(?:typedef)\b', Keyword.Declaration, 'typedef'),
# top-level expression
# although it is not supported in haxe, but it is common to write
# expression in web pages the positive lookahead here is to prevent
# an infinite loop at the EOF
(r'(?=.)', Text, 'expr-statement'),
],
# space/tab/comment/preproc
'spaces': [
(r'\s+', Text),
(r'//[^\n\r]*', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'(#)(if|elseif|else|end|error)\b', preproc_callback),
],
'string-single-interpol': [
(r'\$\{', String.Interpol, ('string-interpol-close', 'expr')),
(r'\$\$', String.Escape),
(r'\$(?=' + ident + ')', String.Interpol, 'ident'),
include('string-single'),
],
'string-single': [
(r"'", String.Single, '#pop'),
(r'\\.', String.Escape),
(r'.', String.Single),
],
'string-double': [
(r'"', String.Double, '#pop'),
(r'\\.', String.Escape),
(r'.', String.Double),
],
'string-interpol-close': [
(r'\$'+ident, String.Interpol),
(r'\}', String.Interpol, '#pop'),
],
'package': [
include('spaces'),
(ident, Name.Namespace),
(r'\.', Punctuation, 'import-ident'),
(r'', Text, '#pop'),
],
'import': [
include('spaces'),
(ident, Name.Namespace),
(r'\*', Keyword), # wildcard import
(r'\.', Punctuation, 'import-ident'),
(r'in', Keyword.Namespace, 'ident'),
(r'', Text, '#pop'),
],
'import-ident': [
include('spaces'),
(r'\*', Keyword, '#pop'), # wildcard import
(ident, Name.Namespace, '#pop'),
],
'using': [
include('spaces'),
(ident, Name.Namespace),
(r'\.', Punctuation, 'import-ident'),
(r'', Text, '#pop'),
],
'preproc-error': [
(r'\s+', Comment.Preproc),
(r"'", String.Single, ('#pop', 'string-single')),
(r'"', String.Double, ('#pop', 'string-double')),
(r'', Text, '#pop'),
],
'preproc-expr': [
(r'\s+', Comment.Preproc),
(r'\!', Comment.Preproc),
(r'\(', Comment.Preproc, ('#pop', 'preproc-parenthesis')),
(ident, Comment.Preproc, '#pop'),
(r"'", String.Single, ('#pop', 'string-single')),
(r'"', String.Double, ('#pop', 'string-double')),
],
'preproc-parenthesis': [
(r'\s+', Comment.Preproc),
(r'\)', Comment.Preproc, '#pop'),
('', Text, 'preproc-expr-in-parenthesis'),
],
'preproc-expr-chain': [
(r'\s+', Comment.Preproc),
(binop, Comment.Preproc, ('#pop', 'preproc-expr-in-parenthesis')),
(r'', Text, '#pop'),
],
# same as 'preproc-expr' but able to chain 'preproc-expr-chain'
'preproc-expr-in-parenthesis': [
(r'\s+', Comment.Preproc),
(r'\!', Comment.Preproc),
(r'\(', Comment.Preproc,
('#pop', 'preproc-expr-chain', 'preproc-parenthesis')),
(ident, Comment.Preproc, ('#pop', 'preproc-expr-chain')),
(r"'", String.Single,
('#pop', 'preproc-expr-chain', 'string-single')),
(r'"', String.Double,
('#pop', 'preproc-expr-chain', 'string-double')),
],
'abstract' : [
include('spaces'),
(r'', Text, ('#pop', 'abstract-body', 'abstract-relation',
'abstract-opaque', 'type-param-constraint', 'type-name')),
],
'abstract-body' : [
include('spaces'),
(r'\{', Punctuation, ('#pop', 'class-body')),
],
'abstract-opaque' : [
include('spaces'),
(r'\(', Punctuation, ('#pop', 'parenthesis-close', 'type')),
(r'', Text, '#pop'),
],
'abstract-relation': [
include('spaces'),
(r'(?:to|from)', Keyword.Declaration, 'type'),
(r',', Punctuation),
(r'', Text, '#pop'),
],
'meta': [
include('spaces'),
(r'@', Name.Decorator, ('meta-body', 'meta-ident', 'meta-colon')),
],
# optional colon
'meta-colon': [
include('spaces'),
(r':', Name.Decorator, '#pop'),
(r'', Text, '#pop'),
],
# same as 'ident' but set token as Name.Decorator instead of Name
'meta-ident': [
include('spaces'),
(ident, Name.Decorator, '#pop'),
],
'meta-body': [
include('spaces'),
(r'\(', Name.Decorator, ('#pop', 'meta-call')),
(r'', Text, '#pop'),
],
'meta-call': [
include('spaces'),
(r'\)', Name.Decorator, '#pop'),
(r'', Text, ('#pop', 'meta-call-sep', 'expr')),
],
'meta-call-sep': [
include('spaces'),
(r'\)', Name.Decorator, '#pop'),
(r',', Punctuation, ('#pop', 'meta-call')),
],
'typedef': [
include('spaces'),
(r'', Text, ('#pop', 'typedef-body', 'type-param-constraint',
'type-name')),
],
'typedef-body': [
include('spaces'),
(r'=', Operator, ('#pop', 'optional-semicolon', 'type')),
],
'enum': [
include('spaces'),
(r'', Text, ('#pop', 'enum-body', 'bracket-open',
'type-param-constraint', 'type-name')),
],
'enum-body': [
include('spaces'),
include('meta'),
(r'\}', Punctuation, '#pop'),
(ident_no_keyword, Name, ('enum-member', 'type-param-constraint')),
],
'enum-member': [
include('spaces'),
(r'\(', Punctuation,
('#pop', 'semicolon', 'flag', 'function-param')),
(r'', Punctuation, ('#pop', 'semicolon', 'flag')),
],
'class': [
include('spaces'),
(r'', Text, ('#pop', 'class-body', 'bracket-open', 'extends',
'type-param-constraint', 'type-name')),
],
'extends': [
include('spaces'),
(r'(?:extends|implements)\b', Keyword.Declaration, 'type'),
(r',', Punctuation), # the comma is made optional here, since haxe2
# requires the comma but haxe3 does not allow it
(r'', Text, '#pop'),
],
'bracket-open': [
include('spaces'),
(r'\{', Punctuation, '#pop'),
],
'bracket-close': [
include('spaces'),
(r'\}', Punctuation, '#pop'),
],
'class-body': [
include('spaces'),
include('meta'),
(r'\}', Punctuation, '#pop'),
(r'(?:static|public|private|override|dynamic|inline|macro)\b',
Keyword.Declaration),
(r'', Text, 'class-member'),
],
'class-member': [
include('spaces'),
(r'(var)\b', Keyword.Declaration,
('#pop', 'optional-semicolon', 'prop')),
(r'(function)\b', Keyword.Declaration,
('#pop', 'optional-semicolon', 'class-method')),
],
# local function, anonymous or not
'function-local': [
include('spaces'),
(r'(' + ident_no_keyword + ')?', Name.Function,
('#pop', 'expr', 'flag', 'function-param',
'parenthesis-open', 'type-param-constraint')),
],
'optional-expr': [
include('spaces'),
include('expr'),
(r'', Text, '#pop'),
],
'class-method': [
include('spaces'),
(ident, Name.Function, ('#pop', 'optional-expr', 'flag',
'function-param', 'parenthesis-open',
'type-param-constraint')),
],
# function arguments
'function-param': [
include('spaces'),
(r'\)', Punctuation, '#pop'),
(r'\?', Punctuation),
(ident_no_keyword, Name,
('#pop', 'function-param-sep', 'assign', 'flag')),
],
'function-param-sep': [
include('spaces'),
(r'\)', Punctuation, '#pop'),
(r',', Punctuation, ('#pop', 'function-param')),
],
# class property
# eg. var prop(default, null):String;
'prop': [
include('spaces'),
(ident_no_keyword, Name, ('#pop', 'assign', 'flag', 'prop-get-set')),
],
'prop-get-set': [
include('spaces'),
(r'\(', Punctuation, ('#pop', 'parenthesis-close',
'prop-get-set-opt', 'comma', 'prop-get-set-opt')),
(r'', Text, '#pop'),
],
'prop-get-set-opt': [
include('spaces'),
(r'(?:default|null|never|dynamic|get|set)\b', Keyword, '#pop'),
(ident_no_keyword, Text, '#pop'), #custom getter/setter
],
'expr-statement': [
include('spaces'),
# makes semicolon optional here, just to avoid checking the last
# one is bracket or not.
(r'', Text, ('#pop', 'optional-semicolon', 'expr')),
],
'expr': [
include('spaces'),
(r'@', Name.Decorator, ('#pop', 'optional-expr', 'meta-body',
'meta-ident', 'meta-colon')),
(r'(?:\+\+|\-\-|~(?!/)|!|\-)', Operator),
(r'\(', Punctuation, ('#pop', 'expr-chain', 'parenthesis')),
(r'(?:inline)\b', Keyword.Declaration),
(r'(?:function)\b', Keyword.Declaration, ('#pop', 'expr-chain',
'function-local')),
(r'\{', Punctuation, ('#pop', 'expr-chain', 'bracket')),
(r'(?:true|false|null)\b', Keyword.Constant, ('#pop', 'expr-chain')),
(r'(?:this)\b', Keyword, ('#pop', 'expr-chain')),
(r'(?:cast)\b', Keyword, ('#pop', 'expr-chain', 'cast')),
(r'(?:try)\b', Keyword, ('#pop', 'catch', 'expr')),
(r'(?:var)\b', Keyword.Declaration, ('#pop', 'var')),
(r'(?:new)\b', Keyword, ('#pop', 'expr-chain', 'new')),
(r'(?:switch)\b', Keyword, ('#pop', 'switch')),
(r'(?:if)\b', Keyword, ('#pop', 'if')),
(r'(?:do)\b', Keyword, ('#pop', 'do')),
(r'(?:while)\b', Keyword, ('#pop', 'while')),
(r'(?:for)\b', Keyword, ('#pop', 'for')),
(r'(?:untyped|throw)\b', Keyword),
(r'(?:return)\b', Keyword, ('#pop', 'optional-expr')),
(r'(?:macro)\b', Keyword, ('#pop', 'macro')),
(r'(?:continue|break)\b', Keyword, '#pop'),
(r'(?:\$\s*[a-z]\b|\$(?!'+ident+'))', Name, ('#pop', 'dollar')),
(ident_no_keyword, Name, ('#pop', 'expr-chain')),
# Float
(r'\.[0-9]+', Number.Float, ('#pop', 'expr-chain')),
(r'[0-9]+[eE][\+\-]?[0-9]+', Number.Float, ('#pop', 'expr-chain')),
(r'[0-9]+\.[0-9]*[eE][\+\-]?[0-9]+', Number.Float, ('#pop', 'expr-chain')),
(r'[0-9]+\.[0-9]+', Number.Float, ('#pop', 'expr-chain')),
(r'[0-9]+\.(?!' + ident + '|\.\.)', Number.Float, ('#pop', 'expr-chain')),
# Int
(r'0x[0-9a-fA-F]+', Number.Hex, ('#pop', 'expr-chain')),
(r'[0-9]+', Number.Integer, ('#pop', 'expr-chain')),
# String
(r"'", String.Single, ('#pop', 'expr-chain', 'string-single-interpol')),
(r'"', String.Double, ('#pop', 'expr-chain', 'string-double')),
# EReg
(r'~/(\\\\|\\/|[^/\n])*/[gimsu]*', String.Regex, ('#pop', 'expr-chain')),
# Array
(r'\[', Punctuation, ('#pop', 'expr-chain', 'array-decl')),
],
'expr-chain': [
include('spaces'),
(r'(?:\+\+|\-\-)', Operator),
(binop, Operator, ('#pop', 'expr')),
(r'(?:in)\b', Keyword, ('#pop', 'expr')),
(r'\?', Operator, ('#pop', 'expr', 'ternary', 'expr')),
(r'(\.)(' + ident_no_keyword + ')', bygroups(Punctuation, Name)),
(r'\[', Punctuation, 'array-access'),
(r'\(', Punctuation, 'call'),
(r'', Text, '#pop'),
],
# macro reification
'macro': [
include('spaces'),
(r':', Punctuation, ('#pop', 'type')),
(r'', Text, ('#pop', 'expr')),
],
# cast can be written as "cast expr" or "cast(expr, type)"
'cast': [
include('spaces'),
(r'\(', Punctuation, ('#pop', 'parenthesis-close',
'cast-type', 'expr')),
(r'', Text, ('#pop', 'expr')),
],
# optionally give a type as the 2nd argument of cast()
'cast-type': [
include('spaces'),
(r',', Punctuation, ('#pop', 'type')),
(r'', Text, '#pop'),
],
'catch': [
include('spaces'),
(r'(?:catch)\b', Keyword, ('expr', 'function-param',
'parenthesis-open')),
(r'', Text, '#pop'),
],
# do-while loop
'do': [
include('spaces'),
(r'', Punctuation, ('#pop', 'do-while', 'expr')),
],
# the while after do
'do-while': [
include('spaces'),
(r'(?:while)\b', Keyword, ('#pop', 'parenthesis',
'parenthesis-open')),
],
'while': [
include('spaces'),
(r'\(', Punctuation, ('#pop', 'expr', 'parenthesis')),
],
'for': [
include('spaces'),
(r'\(', Punctuation, ('#pop', 'expr', 'parenthesis')),
],
'if': [
include('spaces'),
(r'\(', Punctuation, ('#pop', 'else', 'optional-semicolon', 'expr',
'parenthesis')),
],
'else': [
include('spaces'),
(r'(?:else)\b', Keyword, ('#pop', 'expr')),
(r'', Text, '#pop'),
],
'switch': [
include('spaces'),
(r'', Text, ('#pop', 'switch-body', 'bracket-open', 'expr')),
],
'switch-body': [
include('spaces'),
(r'(?:case|default)\b', Keyword, ('case-block', 'case')),
(r'\}', Punctuation, '#pop'),
],
'case': [
include('spaces'),
(r':', Punctuation, '#pop'),
(r'', Text, ('#pop', 'case-sep', 'case-guard', 'expr')),
],
'case-sep': [
include('spaces'),
(r':', Punctuation, '#pop'),
(r',', Punctuation, ('#pop', 'case')),
],
'case-guard': [
include('spaces'),
(r'(?:if)\b', Keyword, ('#pop', 'parenthesis', 'parenthesis-open')),
(r'', Text, '#pop'),
],
# optional multiple expr under a case
'case-block': [
include('spaces'),
(r'(?!(?:case|default)\b|\})', Keyword, 'expr-statement'),
(r'', Text, '#pop'),
],
'new': [
include('spaces'),
(r'', Text, ('#pop', 'call', 'parenthesis-open', 'type')),
],
'array-decl': [
include('spaces'),
(r'\]', Punctuation, '#pop'),
(r'', Text, ('#pop', 'array-decl-sep', 'expr')),
],
'array-decl-sep': [
include('spaces'),
(r'\]', Punctuation, '#pop'),
(r',', Punctuation, ('#pop', 'array-decl')),
],
'array-access': [
include('spaces'),
(r'', Text, ('#pop', 'array-access-close', 'expr')),
],
'array-access-close': [
include('spaces'),
(r'\]', Punctuation, '#pop'),
],
'comma': [
include('spaces'),
(r',', Punctuation, '#pop'),
],
'colon': [
include('spaces'),
(r':', Punctuation, '#pop'),
],
'semicolon': [
include('spaces'),
(r';', Punctuation, '#pop'),
],
'optional-semicolon': [
include('spaces'),
(r';', Punctuation, '#pop'),
(r'', Text, '#pop'),
],
# identity that CAN be a Haxe keyword
'ident': [
include('spaces'),
(ident, Name, '#pop'),
],
'dollar': [
include('spaces'),
(r'\{', Keyword, ('#pop', 'bracket-close', 'expr')),
(r'', Text, ('#pop', 'expr-chain')),
],
'type-name': [
include('spaces'),
(typeid, Name, '#pop'),
],
'type-full-name': [
include('spaces'),
(r'\.', Punctuation, 'ident'),
(r'', Text, '#pop'),
],
'type': [
include('spaces'),
(r'\?', Punctuation),
(ident, Name, ('#pop', 'type-check', 'type-full-name')),
(r'\{', Punctuation, ('#pop', 'type-check', 'type-struct')),
(r'\(', Punctuation, ('#pop', 'type-check', 'type-parenthesis')),
],
'type-parenthesis': [
include('spaces'),
(r'', Text, ('#pop', 'parenthesis-close', 'type')),
],
'type-check': [
include('spaces'),
(r'->', Punctuation, ('#pop', 'type')),
(r'<(?!=)', Punctuation, 'type-param'),
(r'', Text, '#pop'),
],
'type-struct': [
include('spaces'),
(r'\}', Punctuation, '#pop'),
(r'\?', Punctuation),
(r'>', Punctuation, ('comma', 'type')),
(ident_no_keyword, Name, ('#pop', 'type-struct-sep', 'type', 'colon')),
include('class-body'),
],
'type-struct-sep': [
include('spaces'),
(r'\}', Punctuation, '#pop'),
(r',', Punctuation, ('#pop', 'type-struct')),
],
# type-param can be a normal type or a constant literal...
'type-param-type': [
# Float
(r'\.[0-9]+', Number.Float, '#pop'),
(r'[0-9]+[eE][\+\-]?[0-9]+', Number.Float, '#pop'),
(r'[0-9]+\.[0-9]*[eE][\+\-]?[0-9]+', Number.Float, '#pop'),
(r'[0-9]+\.[0-9]+', Number.Float, '#pop'),
(r'[0-9]+\.(?!' + ident + '|\.\.)', Number.Float, '#pop'),
# Int
(r'0x[0-9a-fA-F]+', Number.Hex, '#pop'),
(r'[0-9]+', Number.Integer, '#pop'),
# String
(r"'", String.Single, ('#pop', 'string-single')),
(r'"', String.Double, ('#pop', 'string-double')),
# EReg
(r'~/(\\\\|\\/|[^/\n])*/[gim]*', String.Regex, '#pop'),
# Array
(r'\[', Operator, ('#pop', 'array-decl')),
include('type'),
],
# type-param part of a type
# ie. the <A,B> path in Map<A,B>
'type-param': [
include('spaces'),
(r'', Text, ('#pop', 'type-param-sep', 'type-param-type')),
],
'type-param-sep': [
include('spaces'),
(r'>', Punctuation, '#pop'),
(r',', Punctuation, ('#pop', 'type-param')),
],
# optional type-param that may include constraint
# ie. <T:Constraint, T2:(ConstraintA,ConstraintB)>
'type-param-constraint': [
include('spaces'),
(r'<(?!=)', Punctuation, ('#pop', 'type-param-constraint-sep',
'type-param-constraint-flag', 'type-name')),
(r'', Text, '#pop'),
],
'type-param-constraint-sep': [
include('spaces'),
(r'>', Punctuation, '#pop'),
(r',', Punctuation, ('#pop', 'type-param-constraint-sep',
'type-param-constraint-flag', 'type-name')),
],
# the optional constraint inside type-param
'type-param-constraint-flag': [
include('spaces'),
(r':', Punctuation, ('#pop', 'type-param-constraint-flag-type')),
(r'', Text, '#pop'),
],
'type-param-constraint-flag-type': [
include('spaces'),
(r'\(', Punctuation, ('#pop', 'type-param-constraint-flag-type-sep',
'type')),
(r'', Text, ('#pop', 'type')),
],
'type-param-constraint-flag-type-sep': [
include('spaces'),
(r'\)', Punctuation, '#pop'),
(r',', Punctuation, 'type'),
],
# a parenthesis expr that contain exactly one expr
'parenthesis': [
include('spaces'),
(r'', Text, ('#pop', 'parenthesis-close', 'expr')),
],
'parenthesis-open': [
include('spaces'),
(r'\(', Punctuation, '#pop'),
],
'parenthesis-close': [
include('spaces'),
(r'\)', Punctuation, '#pop'),
],
'var': [
include('spaces'),
(ident_no_keyword, Text, ('#pop', 'var-sep', 'assign', 'flag')),
],
# optional more var decl.
'var-sep': [
include('spaces'),
(r',', Punctuation, ('#pop', 'var')),
(r'', Text, '#pop'),
],
# optional assignment
'assign': [
include('spaces'),
(r'=', Operator, ('#pop', 'expr')),
(r'', Text, '#pop'),
],
# optional type flag
'flag': [
include('spaces'),
(r':', Punctuation, ('#pop', 'type')),
(r'', Text, '#pop'),
],
# colon as part of a ternary operator (?:)
'ternary': [
include('spaces'),
(r':', Operator, '#pop'),
],
# function call
'call': [
include('spaces'),
(r'\)', Punctuation, '#pop'),
(r'', Text, ('#pop', 'call-sep', 'expr')),
],
# after a call param
'call-sep': [
include('spaces'),
(r'\)', Punctuation, '#pop'),
(r',', Punctuation, ('#pop', 'call')),
],
# bracket can be block or object
'bracket': [
include('spaces'),
(r'(?!(?:\$\s*[a-z]\b|\$(?!'+ident+')))' + ident_no_keyword, Name,
('#pop', 'bracket-check')),
(r"'", String.Single, ('#pop', 'bracket-check', 'string-single')),
(r'"', String.Double, ('#pop', 'bracket-check', 'string-double')),
(r'', Text, ('#pop', 'block')),
],
'bracket-check': [
include('spaces'),
(r':', Punctuation, ('#pop', 'object-sep', 'expr')), #is object
(r'', Text, ('#pop', 'block', 'optional-semicolon', 'expr-chain')), #is block
],
# code block
'block': [
include('spaces'),
(r'\}', Punctuation, '#pop'),
(r'', Text, 'expr-statement'),
],
# object in key-value pairs
'object': [
include('spaces'),
(r'\}', Punctuation, '#pop'),
(r'', Text, ('#pop', 'object-sep', 'expr', 'colon', 'ident-or-string'))
],
# a key of an object
'ident-or-string': [
include('spaces'),
(ident_no_keyword, Name, '#pop'),
(r"'", String.Single, ('#pop', 'string-single')),
(r'"', String.Double, ('#pop', 'string-double')),
],
# after a key-value pair in object
'object-sep': [
include('spaces'),
(r'\}', Punctuation, '#pop'),
(r',', Punctuation, ('#pop', 'object')),
],
}
def analyse_text(text):
if re.match(r'\w+\s*:\s*\w', text): return 0.3
def _indentation(lexer, match, ctx):
indentation = match.group(0)
yield match.start(), Text, indentation
ctx.last_indentation = indentation
ctx.pos = match.end()
if hasattr(ctx, 'block_state') and ctx.block_state and \
indentation.startswith(ctx.block_indentation) and \
indentation != ctx.block_indentation:
ctx.stack.append(ctx.block_state)
else:
ctx.block_state = None
ctx.block_indentation = None
ctx.stack.append('content')
def _starts_block(token, state):
def callback(lexer, match, ctx):
yield match.start(), token, match.group(0)
if hasattr(ctx, 'last_indentation'):
ctx.block_indentation = ctx.last_indentation
else:
ctx.block_indentation = ''
ctx.block_state = state
ctx.pos = match.end()
return callback
class HamlLexer(ExtendedRegexLexer):
"""
For Haml markup.
*New in Pygments 1.3.*
"""
name = 'Haml'
aliases = ['haml', 'HAML']
filenames = ['*.haml']
mimetypes = ['text/x-haml']
flags = re.IGNORECASE
# Haml can include " |\n" anywhere,
# which is ignored and used to wrap long lines.
# To accomodate this, use this custom faux dot instead.
_dot = r'(?: \|\n(?=.* \|)|.)'
# In certain places, a comma at the end of the line
# allows line wrapping as well.
_comma_dot = r'(?:,\s*\n|' + _dot + ')'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[a-z0-9_:-]+', Name.Class, 'tag'),
(r'\#[a-z0-9_:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'[&!]?==', Punctuation, 'plain'),
(r'([&!]?[=~])(' + _comma_dot + r'*\n)',
bygroups(Punctuation, using(RubyLexer)),
'root'),
(r'', Text, 'plain'),
],
'content': [
include('css'),
(r'%[a-z0-9_:-]+', Name.Tag, 'tag'),
(r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
(r'(/)(\[' + _dot + '*?\])(' + _dot + r'*\n)',
bygroups(Comment, Comment.Special, Comment),
'#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
'#pop'),
(r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
'haml-comment-block'), '#pop'),
(r'(-)(' + _comma_dot + r'*\n)',
bygroups(Punctuation, using(RubyLexer)),
'#pop'),
(r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
'#pop'),
include('eval-or-plain'),
],
'tag': [
include('css'),
(r'\{(,\n|' + _dot + ')*?\}', using(RubyLexer)),
(r'\[' + _dot + '*?\]', using(RubyLexer)),
(r'\(', Text, 'html-attributes'),
(r'/[ \t]*\n', Punctuation, '#pop:2'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(RubyLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'\s+', Text),
(r'[a-z0-9_:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
(r'[a-z0-9_:-]+', Name.Attribute),
(r'\)', Text, '#pop'),
],
'html-attribute-value': [
(r'[ \t]+', Text),
(r'[a-z0-9_]+', Name.Variable, '#pop'),
(r'@[a-z0-9_]+', Name.Variable.Instance, '#pop'),
(r'\$[a-z0-9_]+', Name.Variable.Global, '#pop'),
(r"'(\\\\|\\'|[^'\n])*'", String, '#pop'),
(r'"(\\\\|\\"|[^"\n])*"', String, '#pop'),
],
'html-comment-block': [
(_dot + '+', Comment),
(r'\n', Text, 'root'),
],
'haml-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
'filter-block': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(RubyLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
}
common_sass_tokens = {
'value': [
(r'[ \t]+', Text),
(r'[!$][\w-]+', Name.Variable),
(r'url\(', String.Other, 'string-url'),
(r'[a-z_-][\w-]*(?=\()', Name.Function),
(r'(azimuth|background-attachment|background-color|'
r'background-image|background-position|background-repeat|'
r'background|border-bottom-color|border-bottom-style|'
r'border-bottom-width|border-left-color|border-left-style|'
r'border-left-width|border-right|border-right-color|'
r'border-right-style|border-right-width|border-top-color|'
r'border-top-style|border-top-width|border-bottom|'
r'border-collapse|border-left|border-width|border-color|'
r'border-spacing|border-style|border-top|border|caption-side|'
r'clear|clip|color|content|counter-increment|counter-reset|'
r'cue-after|cue-before|cue|cursor|direction|display|'
r'elevation|empty-cells|float|font-family|font-size|'
r'font-size-adjust|font-stretch|font-style|font-variant|'
r'font-weight|font|height|letter-spacing|line-height|'
r'list-style-type|list-style-image|list-style-position|'
r'list-style|margin-bottom|margin-left|margin-right|'
r'margin-top|margin|marker-offset|marks|max-height|max-width|'
r'min-height|min-width|opacity|orphans|outline|outline-color|'
r'outline-style|outline-width|overflow|padding-bottom|'
r'padding-left|padding-right|padding-top|padding|page|'
r'page-break-after|page-break-before|page-break-inside|'
r'pause-after|pause-before|pause|pitch|pitch-range|'
r'play-during|position|quotes|richness|right|size|'
r'speak-header|speak-numeral|speak-punctuation|speak|'
r'speech-rate|stress|table-layout|text-align|text-decoration|'
r'text-indent|text-shadow|text-transform|top|unicode-bidi|'
r'vertical-align|visibility|voice-family|volume|white-space|'
r'widows|width|word-spacing|z-index|bottom|left|'
r'above|absolute|always|armenian|aural|auto|avoid|baseline|'
r'behind|below|bidi-override|blink|block|bold|bolder|both|'
r'capitalize|center-left|center-right|center|circle|'
r'cjk-ideographic|close-quote|collapse|condensed|continuous|'
r'crop|crosshair|cross|cursive|dashed|decimal-leading-zero|'
r'decimal|default|digits|disc|dotted|double|e-resize|embed|'
r'extra-condensed|extra-expanded|expanded|fantasy|far-left|'
r'far-right|faster|fast|fixed|georgian|groove|hebrew|help|'
r'hidden|hide|higher|high|hiragana-iroha|hiragana|icon|'
r'inherit|inline-table|inline|inset|inside|invert|italic|'
r'justify|katakana-iroha|katakana|landscape|larger|large|'
r'left-side|leftwards|level|lighter|line-through|list-item|'
r'loud|lower-alpha|lower-greek|lower-roman|lowercase|ltr|'
r'lower|low|medium|message-box|middle|mix|monospace|'
r'n-resize|narrower|ne-resize|no-close-quote|no-open-quote|'
r'no-repeat|none|normal|nowrap|nw-resize|oblique|once|'
r'open-quote|outset|outside|overline|pointer|portrait|px|'
r'relative|repeat-x|repeat-y|repeat|rgb|ridge|right-side|'
r'rightwards|s-resize|sans-serif|scroll|se-resize|'
r'semi-condensed|semi-expanded|separate|serif|show|silent|'
r'slow|slower|small-caps|small-caption|smaller|soft|solid|'
r'spell-out|square|static|status-bar|super|sw-resize|'
r'table-caption|table-cell|table-column|table-column-group|'
r'table-footer-group|table-header-group|table-row|'
r'table-row-group|text|text-bottom|text-top|thick|thin|'
r'transparent|ultra-condensed|ultra-expanded|underline|'
r'upper-alpha|upper-latin|upper-roman|uppercase|url|'
r'visible|w-resize|wait|wider|x-fast|x-high|x-large|x-loud|'
r'x-low|x-small|x-soft|xx-large|xx-small|yes)\b', Name.Constant),
(r'(indigo|gold|firebrick|indianred|darkolivegreen|'
r'darkseagreen|mediumvioletred|mediumorchid|chartreuse|'
r'mediumslateblue|springgreen|crimson|lightsalmon|brown|'
r'turquoise|olivedrab|cyan|skyblue|darkturquoise|'
r'goldenrod|darkgreen|darkviolet|darkgray|lightpink|'
r'darkmagenta|lightgoldenrodyellow|lavender|yellowgreen|thistle|'
r'violet|orchid|ghostwhite|honeydew|cornflowerblue|'
r'darkblue|darkkhaki|mediumpurple|cornsilk|bisque|slategray|'
r'darkcyan|khaki|wheat|deepskyblue|darkred|steelblue|aliceblue|'
r'gainsboro|mediumturquoise|floralwhite|coral|lightgrey|'
r'lightcyan|darksalmon|beige|azure|lightsteelblue|oldlace|'
r'greenyellow|royalblue|lightseagreen|mistyrose|sienna|'
r'lightcoral|orangered|navajowhite|palegreen|burlywood|'
r'seashell|mediumspringgreen|papayawhip|blanchedalmond|'
r'peru|aquamarine|darkslategray|ivory|dodgerblue|'
r'lemonchiffon|chocolate|orange|forestgreen|slateblue|'
r'mintcream|antiquewhite|darkorange|cadetblue|moccasin|'
r'limegreen|saddlebrown|darkslateblue|lightskyblue|deeppink|'
r'plum|darkgoldenrod|sandybrown|magenta|tan|'
r'rosybrown|pink|lightblue|palevioletred|mediumseagreen|'
r'dimgray|powderblue|seagreen|snow|mediumblue|midnightblue|'
r'paleturquoise|palegoldenrod|whitesmoke|darkorchid|salmon|'
r'lightslategray|lawngreen|lightgreen|tomato|hotpink|'
r'lightyellow|lavenderblush|linen|mediumaquamarine|'
r'blueviolet|peachpuff)\b', Name.Entity),
(r'(black|silver|gray|white|maroon|red|purple|fuchsia|green|'
r'lime|olive|yellow|navy|blue|teal|aqua)\b', Name.Builtin),
(r'\!(important|default)', Name.Exception),
(r'(true|false)', Name.Pseudo),
(r'(and|or|not)', Operator.Word),
(r'/\*', Comment.Multiline, 'inline-comment'),
(r'//[^\n]*', Comment.Single),
(r'\#[a-z0-9]{1,6}', Number.Hex),
(r'(-?\d+)(\%|[a-z]+)?', bygroups(Number.Integer, Keyword.Type)),
(r'(-?\d*\.\d+)(\%|[a-z]+)?', bygroups(Number.Float, Keyword.Type)),
(r'#{', String.Interpol, 'interpolation'),
(r'[~\^\*!&%<>\|+=@:,./?-]+', Operator),
(r'[\[\]()]+', Punctuation),
(r'"', String.Double, 'string-double'),
(r"'", String.Single, 'string-single'),
(r'[a-z_-][\w-]*', Name),
],
'interpolation': [
(r'\}', String.Interpol, '#pop'),
include('value'),
],
'selector': [
(r'[ \t]+', Text),
(r'\:', Name.Decorator, 'pseudo-class'),
(r'\.', Name.Class, 'class'),
(r'\#', Name.Namespace, 'id'),
(r'[a-zA-Z0-9_-]+', Name.Tag),
(r'#\{', String.Interpol, 'interpolation'),
(r'&', Keyword),
(r'[~\^\*!&\[\]\(\)<>\|+=@:;,./?-]', Operator),
(r'"', String.Double, 'string-double'),
(r"'", String.Single, 'string-single'),
],
'string-double': [
(r'(\\.|#(?=[^\n{])|[^\n"#])+', String.Double),
(r'#\{', String.Interpol, 'interpolation'),
(r'"', String.Double, '#pop'),
],
'string-single': [
(r"(\\.|#(?=[^\n{])|[^\n'#])+", String.Double),
(r'#\{', String.Interpol, 'interpolation'),
(r"'", String.Double, '#pop'),
],
'string-url': [
(r'(\\#|#(?=[^\n{])|[^\n#)])+', String.Other),
(r'#\{', String.Interpol, 'interpolation'),
(r'\)', String.Other, '#pop'),
],
'pseudo-class': [
(r'[\w-]+', Name.Decorator),
(r'#\{', String.Interpol, 'interpolation'),
(r'', Text, '#pop'),
],
'class': [
(r'[\w-]+', Name.Class),
(r'#\{', String.Interpol, 'interpolation'),
(r'', Text, '#pop'),
],
'id': [
(r'[\w-]+', Name.Namespace),
(r'#\{', String.Interpol, 'interpolation'),
(r'', Text, '#pop'),
],
'for': [
(r'(from|to|through)', Operator.Word),
include('value'),
],
}
class SassLexer(ExtendedRegexLexer):
"""
For Sass stylesheets.
*New in Pygments 1.3.*
"""
name = 'Sass'
aliases = ['sass', 'SASS']
filenames = ['*.sass']
mimetypes = ['text/x-sass']
flags = re.IGNORECASE
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'content': [
(r'//[^\n]*', _starts_block(Comment.Single, 'single-comment'),
'root'),
(r'/\*[^\n]*', _starts_block(Comment.Multiline, 'multi-comment'),
'root'),
(r'@import', Keyword, 'import'),
(r'@for', Keyword, 'for'),
(r'@(debug|warn|if|while)', Keyword, 'value'),
(r'(@mixin)( [\w-]+)', bygroups(Keyword, Name.Function), 'value'),
(r'(@include)( [\w-]+)', bygroups(Keyword, Name.Decorator), 'value'),
(r'@extend', Keyword, 'selector'),
(r'@[a-z0-9_-]+', Keyword, 'selector'),
(r'=[\w-]+', Name.Function, 'value'),
(r'\+[\w-]+', Name.Decorator, 'value'),
(r'([!$][\w-]\w*)([ \t]*(?:(?:\|\|)?=|:))',
bygroups(Name.Variable, Operator), 'value'),
(r':', Name.Attribute, 'old-style-attr'),
(r'(?=.+?[=:]([^a-z]|$))', Name.Attribute, 'new-style-attr'),
(r'', Text, 'selector'),
],
'single-comment': [
(r'.+', Comment.Single),
(r'\n', Text, 'root'),
],
'multi-comment': [
(r'.+', Comment.Multiline),
(r'\n', Text, 'root'),
],
'import': [
(r'[ \t]+', Text),
(r'\S+', String),
(r'\n', Text, 'root'),
],
'old-style-attr': [
(r'[^\s:="\[]+', Name.Attribute),
(r'#{', String.Interpol, 'interpolation'),
(r'[ \t]*=', Operator, 'value'),
(r'', Text, 'value'),
],
'new-style-attr': [
(r'[^\s:="\[]+', Name.Attribute),
(r'#{', String.Interpol, 'interpolation'),
(r'[ \t]*[=:]', Operator, 'value'),
],
'inline-comment': [
(r"(\\#|#(?=[^\n{])|\*(?=[^\n/])|[^\n#*])+", Comment.Multiline),
(r'#\{', String.Interpol, 'interpolation'),
(r"\*/", Comment, '#pop'),
],
}
for group, common in common_sass_tokens.iteritems():
tokens[group] = copy.copy(common)
tokens['value'].append((r'\n', Text, 'root'))
tokens['selector'].append((r'\n', Text, 'root'))
class ScssLexer(RegexLexer):
"""
For SCSS stylesheets.
"""
name = 'SCSS'
aliases = ['scss']
filenames = ['*.scss']
mimetypes = ['text/x-scss']
flags = re.IGNORECASE | re.DOTALL
tokens = {
'root': [
(r'\s+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'@import', Keyword, 'value'),
(r'@for', Keyword, 'for'),
(r'@(debug|warn|if|while)', Keyword, 'value'),
(r'(@mixin)( [\w-]+)', bygroups(Keyword, Name.Function), 'value'),
(r'(@include)( [\w-]+)', bygroups(Keyword, Name.Decorator), 'value'),
(r'@extend', Keyword, 'selector'),
(r'@[a-z0-9_-]+', Keyword, 'selector'),
(r'(\$[\w-]*\w)([ \t]*:)', bygroups(Name.Variable, Operator), 'value'),
(r'(?=[^;{}][;}])', Name.Attribute, 'attr'),
(r'(?=[^;{}:]+:[^a-z])', Name.Attribute, 'attr'),
(r'', Text, 'selector'),
],
'attr': [
(r'[^\s:="\[]+', Name.Attribute),
(r'#{', String.Interpol, 'interpolation'),
(r'[ \t]*:', Operator, 'value'),
],
'inline-comment': [
(r"(\\#|#(?=[^{])|\*(?=[^/])|[^#*])+", Comment.Multiline),
(r'#\{', String.Interpol, 'interpolation'),
(r"\*/", Comment, '#pop'),
],
}
for group, common in common_sass_tokens.iteritems():
tokens[group] = copy.copy(common)
tokens['value'].extend([(r'\n', Text), (r'[;{}]', Punctuation, 'root')])
tokens['selector'].extend([(r'\n', Text), (r'[;{}]', Punctuation, 'root')])
class CoffeeScriptLexer(RegexLexer):
"""
For `CoffeeScript`_ source code.
.. _CoffeeScript: http://coffeescript.org
*New in Pygments 1.3.*
"""
name = 'CoffeeScript'
aliases = ['coffee-script', 'coffeescript', 'coffee']
filenames = ['*.coffee']
mimetypes = ['text/coffeescript']
flags = re.DOTALL
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'###[^#].*?###', Comment.Multiline),
(r'#(?!##[^#]).*?\n', Comment.Single),
],
'multilineregex': [
(r'[^/#]+', String.Regex),
(r'///([gim]+\b|\B)', String.Regex, '#pop'),
(r'#{', String.Interpol, 'interpoling_string'),
(r'[/#]', String.Regex),
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'///', String.Regex, ('#pop', 'multilineregex')),
(r'/(?! )(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'', Text, '#pop'),
],
'root': [
# this next expr leads to infinite loops root -> slashstartsregex
#(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'\+\+|~|&&|\band\b|\bor\b|\bis\b|\bisnt\b|\bnot\b|\?|:|'
r'\|\||\\(?=\n)|(<<|>>>?|==?|!=?|'
r'=(?!>)|-(?!>)|[<>+*`%&\|\^/])=?',
Operator, 'slashstartsregex'),
(r'(?:\([^()]+\))?\s*[=-]>', Name.Function),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(?<![\.\$])(for|own|in|of|while|until|'
r'loop|break|return|continue|'
r'switch|when|then|if|unless|else|'
r'throw|try|catch|finally|new|delete|typeof|instanceof|super|'
r'extends|this|class|by)\b', Keyword, 'slashstartsregex'),
(r'(?<![\.\$])(true|false|yes|no|on|off|null|'
r'NaN|Infinity|undefined)\b',
Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'eval|isFinite|isNaN|parseFloat|parseInt|document|window)\b',
Name.Builtin),
(r'[$a-zA-Z_][a-zA-Z0-9_\.:\$]*\s*[:=]\s', Name.Variable,
'slashstartsregex'),
(r'@[$a-zA-Z_][a-zA-Z0-9_\.:\$]*\s*[:=]\s', Name.Variable.Instance,
'slashstartsregex'),
(r'@', Name.Other, 'slashstartsregex'),
(r'@?[$a-zA-Z_][a-zA-Z0-9_\$]*', Name.Other, 'slashstartsregex'),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
('"""', String, 'tdqs'),
("'''", String, 'tsqs'),
('"', String, 'dqs'),
("'", String, 'sqs'),
],
'strings': [
(r'[^#\\\'"]+', String),
# note that all coffee script strings are multi-line.
# hashmarks, quotes and backslashes must be parsed one at a time
],
'interpoling_string' : [
(r'}', String.Interpol, "#pop"),
include('root')
],
'dqs': [
(r'"', String, '#pop'),
(r'\\.|\'', String), # double-quoted string don't need ' escapes
(r'#{', String.Interpol, "interpoling_string"),
include('strings')
],
'sqs': [
(r"'", String, '#pop'),
(r'#|\\.|"', String), # single quoted strings don't need " escapses
include('strings')
],
'tdqs': [
(r'"""', String, '#pop'),
(r'\\.|\'|"', String), # no need to escape quotes in triple-string
(r'#{', String.Interpol, "interpoling_string"),
include('strings'),
],
'tsqs': [
(r"'''", String, '#pop'),
(r'#|\\.|\'|"', String), # no need to escape quotes in triple-strings
include('strings')
],
}
class LiveScriptLexer(RegexLexer):
"""
For `LiveScript`_ source code.
.. _LiveScript: http://gkz.github.com/LiveScript/
New in Pygments 1.6.
"""
name = 'LiveScript'
aliases = ['live-script', 'livescript']
filenames = ['*.ls']
mimetypes = ['text/livescript']
flags = re.DOTALL
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'/\*.*?\*/', Comment.Multiline),
(r'#.*?\n', Comment.Single),
],
'multilineregex': [
include('commentsandwhitespace'),
(r'//([gim]+\b|\B)', String.Regex, '#pop'),
(r'/', String.Regex),
(r'[^/#]+', String.Regex)
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'//', String.Regex, ('#pop', 'multilineregex')),
(r'/(?! )(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'', Text, '#pop'),
],
'root': [
# this next expr leads to infinite loops root -> slashstartsregex
#(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'(?:\([^()]+\))?[ ]*[~-]{1,2}>|'
r'(?:\(?[^()\n]+\)?)?[ ]*<[~-]{1,2}', Name.Function),
(r'\+\+|&&|(?<![\.\$])\b(?:and|x?or|is|isnt|not)\b|\?|:|=|'
r'\|\||\\(?=\n)|(<<|>>>?|==?|!=?|'
r'~(?!\~?>)|-(?!\-?>)|<(?!\[)|(?<!\])>|'
r'[+*`%&\|\^/])=?',
Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(?<![\.\$])(for|own|in|of|while|until|loop|break|'
r'return|continue|switch|when|then|if|unless|else|'
r'throw|try|catch|finally|new|delete|typeof|instanceof|super|'
r'extends|this|class|by|const|var|to|til)\b', Keyword,
'slashstartsregex'),
(r'(?<![\.\$])(true|false|yes|no|on|off|'
r'null|NaN|Infinity|undefined|void)\b',
Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'eval|isFinite|isNaN|parseFloat|parseInt|document|window)\b',
Name.Builtin),
(r'[$a-zA-Z_][a-zA-Z0-9_\.\-:\$]*\s*[:=]\s', Name.Variable,
'slashstartsregex'),
(r'@[$a-zA-Z_][a-zA-Z0-9_\.\-:\$]*\s*[:=]\s', Name.Variable.Instance,
'slashstartsregex'),
(r'@', Name.Other, 'slashstartsregex'),
(r'@?[$a-zA-Z_][a-zA-Z0-9_\-]*', Name.Other, 'slashstartsregex'),
(r'[0-9]+\.[0-9]+([eE][0-9]+)?[fd]?(?:[a-zA-Z_]+)?', Number.Float),
(r'[0-9]+(~[0-9a-z]+)?(?:[a-zA-Z_]+)?', Number.Integer),
('"""', String, 'tdqs'),
("'''", String, 'tsqs'),
('"', String, 'dqs'),
("'", String, 'sqs'),
(r'\\[\w$-]+', String),
(r'<\[.*\]>', String),
],
'strings': [
(r'[^#\\\'"]+', String),
# note that all coffee script strings are multi-line.
# hashmarks, quotes and backslashes must be parsed one at a time
],
'interpoling_string' : [
(r'}', String.Interpol, "#pop"),
include('root')
],
'dqs': [
(r'"', String, '#pop'),
(r'\\.|\'', String), # double-quoted string don't need ' escapes
(r'#{', String.Interpol, "interpoling_string"),
(r'#', String),
include('strings')
],
'sqs': [
(r"'", String, '#pop'),
(r'#|\\.|"', String), # single quoted strings don't need " escapses
include('strings')
],
'tdqs': [
(r'"""', String, '#pop'),
(r'\\.|\'|"', String), # no need to escape quotes in triple-string
(r'#{', String.Interpol, "interpoling_string"),
(r'#', String),
include('strings'),
],
'tsqs': [
(r"'''", String, '#pop'),
(r'#|\\.|\'|"', String), # no need to escape quotes in triple-strings
include('strings')
],
}
class DuelLexer(RegexLexer):
"""
Lexer for Duel Views Engine (formerly JBST) markup with JavaScript code blocks.
See http://duelengine.org/.
See http://jsonml.org/jbst/.
*New in Pygments 1.4.*
"""
name = 'Duel'
aliases = ['duel', 'Duel Engine', 'Duel View', 'JBST', 'jbst', 'JsonML+BST']
filenames = ['*.duel','*.jbst']
mimetypes = ['text/x-duel','text/x-jbst']
flags = re.DOTALL
tokens = {
'root': [
(r'(<%[@=#!:]?)(.*?)(%>)',
bygroups(Name.Tag, using(JavascriptLexer), Name.Tag)),
(r'(<%\$)(.*?)(:)(.*?)(%>)',
bygroups(Name.Tag, Name.Function, Punctuation, String, Name.Tag)),
(r'(<%--)(.*?)(--%>)',
bygroups(Name.Tag, Comment.Multiline, Name.Tag)),
(r'(<script.*?>)(.*?)(</script>)',
bygroups(using(HtmlLexer),
using(JavascriptLexer), using(HtmlLexer))),
(r'(.+?)(?=<)', using(HtmlLexer)),
(r'.+', using(HtmlLexer)),
],
}
class ScamlLexer(ExtendedRegexLexer):
"""
For `Scaml markup <http://scalate.fusesource.org/>`_. Scaml is Haml for Scala.
*New in Pygments 1.4.*
"""
name = 'Scaml'
aliases = ['scaml', 'SCAML']
filenames = ['*.scaml']
mimetypes = ['text/x-scaml']
flags = re.IGNORECASE
# Scaml does not yet support the " |\n" notation to
# wrap long lines. Once it does, use the custom faux
# dot instead.
# _dot = r'(?: \|\n(?=.* \|)|.)'
_dot = r'.'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[a-z0-9_:-]+', Name.Class, 'tag'),
(r'\#[a-z0-9_:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'[&!]?==', Punctuation, 'plain'),
(r'([&!]?[=~])(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)),
'root'),
(r'', Text, 'plain'),
],
'content': [
include('css'),
(r'%[a-z0-9_:-]+', Name.Tag, 'tag'),
(r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
(r'(/)(\[' + _dot + '*?\])(' + _dot + r'*\n)',
bygroups(Comment, Comment.Special, Comment),
'#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
'#pop'),
(r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
'scaml-comment-block'), '#pop'),
(r'(-@\s*)(import)?(' + _dot + r'*\n)',
bygroups(Punctuation, Keyword, using(ScalaLexer)),
'#pop'),
(r'(-)(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)),
'#pop'),
(r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
'#pop'),
include('eval-or-plain'),
],
'tag': [
include('css'),
(r'\{(,\n|' + _dot + ')*?\}', using(ScalaLexer)),
(r'\[' + _dot + '*?\]', using(ScalaLexer)),
(r'\(', Text, 'html-attributes'),
(r'/[ \t]*\n', Punctuation, '#pop:2'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'\s+', Text),
(r'[a-z0-9_:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
(r'[a-z0-9_:-]+', Name.Attribute),
(r'\)', Text, '#pop'),
],
'html-attribute-value': [
(r'[ \t]+', Text),
(r'[a-z0-9_]+', Name.Variable, '#pop'),
(r'@[a-z0-9_]+', Name.Variable.Instance, '#pop'),
(r'\$[a-z0-9_]+', Name.Variable.Global, '#pop'),
(r"'(\\\\|\\'|[^'\n])*'", String, '#pop'),
(r'"(\\\\|\\"|[^"\n])*"', String, '#pop'),
],
'html-comment-block': [
(_dot + '+', Comment),
(r'\n', Text, 'root'),
],
'scaml-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
'filter-block': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
}
class JadeLexer(ExtendedRegexLexer):
"""
For Jade markup.
Jade is a variant of Scaml, see:
http://scalate.fusesource.org/documentation/scaml-reference.html
*New in Pygments 1.4.*
"""
name = 'Jade'
aliases = ['jade', 'JADE']
filenames = ['*.jade']
mimetypes = ['text/x-jade']
flags = re.IGNORECASE
_dot = r'.'
tokens = {
'root': [
(r'[ \t]*\n', Text),
(r'[ \t]*', _indentation),
],
'css': [
(r'\.[a-z0-9_:-]+', Name.Class, 'tag'),
(r'\#[a-z0-9_:-]+', Name.Function, 'tag'),
],
'eval-or-plain': [
(r'[&!]?==', Punctuation, 'plain'),
(r'([&!]?[=~])(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)), 'root'),
(r'', Text, 'plain'),
],
'content': [
include('css'),
(r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
(r'(/)(\[' + _dot + '*?\])(' + _dot + r'*\n)',
bygroups(Comment, Comment.Special, Comment),
'#pop'),
(r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
'#pop'),
(r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
'scaml-comment-block'), '#pop'),
(r'(-@\s*)(import)?(' + _dot + r'*\n)',
bygroups(Punctuation, Keyword, using(ScalaLexer)),
'#pop'),
(r'(-)(' + _dot + r'*\n)',
bygroups(Punctuation, using(ScalaLexer)),
'#pop'),
(r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
'#pop'),
(r'[a-z0-9_:-]+', Name.Tag, 'tag'),
(r'\|', Text, 'eval-or-plain'),
],
'tag': [
include('css'),
(r'\{(,\n|' + _dot + ')*?\}', using(ScalaLexer)),
(r'\[' + _dot + '*?\]', using(ScalaLexer)),
(r'\(', Text, 'html-attributes'),
(r'/[ \t]*\n', Punctuation, '#pop:2'),
(r'[<>]{1,2}(?=[ \t=])', Punctuation),
include('eval-or-plain'),
],
'plain': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
'html-attributes': [
(r'\s+', Text),
(r'[a-z0-9_:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
(r'[a-z0-9_:-]+', Name.Attribute),
(r'\)', Text, '#pop'),
],
'html-attribute-value': [
(r'[ \t]+', Text),
(r'[a-z0-9_]+', Name.Variable, '#pop'),
(r'@[a-z0-9_]+', Name.Variable.Instance, '#pop'),
(r'\$[a-z0-9_]+', Name.Variable.Global, '#pop'),
(r"'(\\\\|\\'|[^'\n])*'", String, '#pop'),
(r'"(\\\\|\\"|[^"\n])*"', String, '#pop'),
],
'html-comment-block': [
(_dot + '+', Comment),
(r'\n', Text, 'root'),
],
'scaml-comment-block': [
(_dot + '+', Comment.Preproc),
(r'\n', Text, 'root'),
],
'filter-block': [
(r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
(r'(#\{)(' + _dot + '*?)(\})',
bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
(r'\n', Text, 'root'),
],
}
class XQueryLexer(ExtendedRegexLexer):
"""
An XQuery lexer, parsing a stream and outputting the tokens needed to
highlight xquery code.
*New in Pygments 1.4.*
"""
name = 'XQuery'
aliases = ['xquery', 'xqy', 'xq', 'xql', 'xqm']
filenames = ['*.xqy', '*.xquery', '*.xq', '*.xql', '*.xqm']
mimetypes = ['text/xquery', 'application/xquery']
xquery_parse_state = []
# FIX UNICODE LATER
#ncnamestartchar = (
# ur"[A-Z]|_|[a-z]|[\u00C0-\u00D6]|[\u00D8-\u00F6]|[\u00F8-\u02FF]|"
# ur"[\u0370-\u037D]|[\u037F-\u1FFF]|[\u200C-\u200D]|[\u2070-\u218F]|"
# ur"[\u2C00-\u2FEF]|[\u3001-\uD7FF]|[\uF900-\uFDCF]|[\uFDF0-\uFFFD]|"
# ur"[\u10000-\uEFFFF]"
#)
ncnamestartchar = r"(?:[A-Z]|_|[a-z])"
# FIX UNICODE LATER
#ncnamechar = ncnamestartchar + (ur"|-|\.|[0-9]|\u00B7|[\u0300-\u036F]|"
# ur"[\u203F-\u2040]")
ncnamechar = r"(?:" + ncnamestartchar + r"|-|\.|[0-9])"
ncname = "(?:%s+%s*)" % (ncnamestartchar, ncnamechar)
pitarget_namestartchar = r"(?:[A-KN-WY-Z]|_|:|[a-kn-wy-z])"
pitarget_namechar = r"(?:" + pitarget_namestartchar + r"|-|\.|[0-9])"
pitarget = "%s+%s*" % (pitarget_namestartchar, pitarget_namechar)
prefixedname = "%s:%s" % (ncname, ncname)
unprefixedname = ncname
qname = "(?:%s|%s)" % (prefixedname, unprefixedname)
entityref = r'(?:&(?:lt|gt|amp|quot|apos|nbsp);)'
charref = r'(?:&#[0-9]+;|&#x[0-9a-fA-F]+;)'
stringdouble = r'(?:"(?:' + entityref + r'|' + charref + r'|""|[^&"])*")'
stringsingle = r"(?:'(?:" + entityref + r"|" + charref + r"|''|[^&'])*')"
# FIX UNICODE LATER
#elementcontentchar = (ur'\t|\r|\n|[\u0020-\u0025]|[\u0028-\u003b]|'
# ur'[\u003d-\u007a]|\u007c|[\u007e-\u007F]')
elementcontentchar = r'[A-Za-z]|\s|\d|[!"#$%\(\)\*\+,\-\./\:;=\?\@\[\\\]^_\'`\|~]'
#quotattrcontentchar = (ur'\t|\r|\n|[\u0020-\u0021]|[\u0023-\u0025]|'
# ur'[\u0027-\u003b]|[\u003d-\u007a]|\u007c|[\u007e-\u007F]')
quotattrcontentchar = r'[A-Za-z]|\s|\d|[!#$%\(\)\*\+,\-\./\:;=\?\@\[\\\]^_\'`\|~]'
#aposattrcontentchar = (ur'\t|\r|\n|[\u0020-\u0025]|[\u0028-\u003b]|'
# ur'[\u003d-\u007a]|\u007c|[\u007e-\u007F]')
aposattrcontentchar = r'[A-Za-z]|\s|\d|[!"#$%\(\)\*\+,\-\./\:;=\?\@\[\\\]^_`\|~]'
# CHAR elements - fix the above elementcontentchar, quotattrcontentchar,
# aposattrcontentchar
#x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
flags = re.DOTALL | re.MULTILINE | re.UNICODE
def punctuation_root_callback(lexer, match, ctx):
yield match.start(), Punctuation, match.group(1)
# transition to root always - don't pop off stack
ctx.stack = ['root']
ctx.pos = match.end()
def operator_root_callback(lexer, match, ctx):
yield match.start(), Operator, match.group(1)
# transition to root always - don't pop off stack
ctx.stack = ['root']
ctx.pos = match.end()
def popstate_tag_callback(lexer, match, ctx):
yield match.start(), Name.Tag, match.group(1)
ctx.stack.append(lexer.xquery_parse_state.pop())
ctx.pos = match.end()
def popstate_xmlcomment_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append(lexer.xquery_parse_state.pop())
ctx.pos = match.end()
def popstate_kindtest_callback(lexer, match, ctx):
yield match.start(), Punctuation, match.group(1)
next_state = lexer.xquery_parse_state.pop()
if next_state == 'occurrenceindicator':
if re.match("[?*+]+", match.group(2)):
yield match.start(), Punctuation, match.group(2)
ctx.stack.append('operator')
ctx.pos = match.end()
else:
ctx.stack.append('operator')
ctx.pos = match.end(1)
else:
ctx.stack.append(next_state)
ctx.pos = match.end(1)
def popstate_callback(lexer, match, ctx):
yield match.start(), Punctuation, match.group(1)
# if we have run out of our state stack, pop whatever is on the pygments
# state stack
if len(lexer.xquery_parse_state) == 0:
ctx.stack.pop()
elif len(ctx.stack) > 1:
ctx.stack.append(lexer.xquery_parse_state.pop())
else:
# i don't know if i'll need this, but in case, default back to root
ctx.stack = ['root']
ctx.pos = match.end()
def pushstate_element_content_starttag_callback(lexer, match, ctx):
yield match.start(), Name.Tag, match.group(1)
lexer.xquery_parse_state.append('element_content')
ctx.stack.append('start_tag')
ctx.pos = match.end()
def pushstate_cdata_section_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('cdata_section')
lexer.xquery_parse_state.append(ctx.state.pop)
ctx.pos = match.end()
def pushstate_starttag_callback(lexer, match, ctx):
yield match.start(), Name.Tag, match.group(1)
lexer.xquery_parse_state.append(ctx.state.pop)
ctx.stack.append('start_tag')
ctx.pos = match.end()
def pushstate_operator_order_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
ctx.stack = ['root']
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_operator_root_validate(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
ctx.stack = ['root']
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_operator_root_validate_withmode(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Keyword, match.group(3)
ctx.stack = ['root']
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_operator_processing_instruction_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('processing_instruction')
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_element_content_processing_instruction_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('processing_instruction')
lexer.xquery_parse_state.append('element_content')
ctx.pos = match.end()
def pushstate_element_content_cdata_section_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('cdata_section')
lexer.xquery_parse_state.append('element_content')
ctx.pos = match.end()
def pushstate_operator_cdata_section_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('cdata_section')
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_element_content_xmlcomment_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('xml_comment')
lexer.xquery_parse_state.append('element_content')
ctx.pos = match.end()
def pushstate_operator_xmlcomment_callback(lexer, match, ctx):
yield match.start(), String.Doc, match.group(1)
ctx.stack.append('xml_comment')
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
def pushstate_kindtest_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('kindtest')
ctx.stack.append('kindtest')
ctx.pos = match.end()
def pushstate_operator_kindtestforpi_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('operator')
ctx.stack.append('kindtestforpi')
ctx.pos = match.end()
def pushstate_operator_kindtest_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('operator')
ctx.stack.append('kindtest')
ctx.pos = match.end()
def pushstate_occurrenceindicator_kindtest_callback(lexer, match, ctx):
yield match.start(), Name.Tag, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('occurrenceindicator')
ctx.stack.append('kindtest')
ctx.pos = match.end()
def pushstate_operator_starttag_callback(lexer, match, ctx):
yield match.start(), Name.Tag, match.group(1)
lexer.xquery_parse_state.append('operator')
ctx.stack.append('start_tag')
ctx.pos = match.end()
def pushstate_operator_root_callback(lexer, match, ctx):
yield match.start(), Punctuation, match.group(1)
lexer.xquery_parse_state.append('operator')
ctx.stack = ['root']#.append('root')
ctx.pos = match.end()
def pushstate_operator_root_construct_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('operator')
ctx.stack = ['root']
ctx.pos = match.end()
def pushstate_root_callback(lexer, match, ctx):
yield match.start(), Punctuation, match.group(1)
cur_state = ctx.stack.pop()
lexer.xquery_parse_state.append(cur_state)
ctx.stack = ['root']#.append('root')
ctx.pos = match.end()
def pushstate_operator_attribute_callback(lexer, match, ctx):
yield match.start(), Name.Attribute, match.group(1)
ctx.stack.append('operator')
ctx.pos = match.end()
def pushstate_operator_callback(lexer, match, ctx):
yield match.start(), Keyword, match.group(1)
yield match.start(), Text, match.group(2)
yield match.start(), Punctuation, match.group(3)
lexer.xquery_parse_state.append('operator')
ctx.pos = match.end()
tokens = {
'comment': [
# xquery comments
(r'(:\))', Comment, '#pop'),
(r'(\(:)', Comment, '#push'),
(r'[^:)]', Comment),
(r'([^:)]|:|\))', Comment),
],
'whitespace': [
(r'\s+', Text),
],
'operator': [
include('whitespace'),
(r'(\})', popstate_callback),
(r'\(:', Comment, 'comment'),
(r'(\{)', pushstate_root_callback),
(r'then|else|external|at|div|except', Keyword, 'root'),
(r'order by', Keyword, 'root'),
(r'is|mod|order\s+by|stable\s+order\s+by', Keyword, 'root'),
(r'and|or', Operator.Word, 'root'),
(r'(eq|ge|gt|le|lt|ne|idiv|intersect|in)(?=\b)',
Operator.Word, 'root'),
(r'return|satisfies|to|union|where|preserve\s+strip',
Keyword, 'root'),
(r'(>=|>>|>|<=|<<|<|-|\*|!=|\+|\||:=|=)',
operator_root_callback),
(r'(::|;|\[|//|/|,)',
punctuation_root_callback),
(r'(castable|cast)(\s+)(as)\b',
bygroups(Keyword, Text, Keyword), 'singletype'),
(r'(instance)(\s+)(of)\b',
bygroups(Keyword, Text, Keyword), 'itemtype'),
(r'(treat)(\s+)(as)\b',
bygroups(Keyword, Text, Keyword), 'itemtype'),
(r'(case|as)\b', Keyword, 'itemtype'),
(r'(\))(\s*)(as)',
bygroups(Punctuation, Text, Keyword), 'itemtype'),
(r'\$', Name.Variable, 'varname'),
(r'(for|let)(\s+)(\$)',
bygroups(Keyword, Text, Name.Variable), 'varname'),
#(r'\)|\?|\]', Punctuation, '#push'),
(r'\)|\?|\]', Punctuation),
(r'(empty)(\s+)(greatest|least)', bygroups(Keyword, Text, Keyword)),
(r'ascending|descending|default', Keyword, '#push'),
(r'external', Keyword),
(r'collation', Keyword, 'uritooperator'),
# finally catch all string literals and stay in operator state
(stringdouble, String.Double),
(stringsingle, String.Single),
(r'(catch)(\s*)', bygroups(Keyword, Text), 'root'),
],
'uritooperator': [
(stringdouble, String.Double, '#pop'),
(stringsingle, String.Single, '#pop'),
],
'namespacedecl': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(r'(at)(\s+)('+stringdouble+')', bygroups(Keyword, Text, String.Double)),
(r"(at)(\s+)("+stringsingle+')', bygroups(Keyword, Text, String.Single)),
(stringdouble, String.Double),
(stringsingle, String.Single),
(r',', Punctuation),
(r'=', Operator),
(r';', Punctuation, 'root'),
(ncname, Name.Namespace),
],
'namespacekeyword': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(stringdouble, String.Double, 'namespacedecl'),
(stringsingle, String.Single, 'namespacedecl'),
(r'inherit|no-inherit', Keyword, 'root'),
(r'namespace', Keyword, 'namespacedecl'),
(r'(default)(\s+)(element)', bygroups(Keyword, Text, Keyword)),
(r'preserve|no-preserve', Keyword),
(r',', Punctuation),
],
'varname': [
(r'\(:', Comment, 'comment'),
(qname, Name.Variable, 'operator'),
],
'singletype': [
(r'\(:', Comment, 'comment'),
(ncname + r'(:\*)', Name.Variable, 'operator'),
(qname, Name.Variable, 'operator'),
],
'itemtype': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(r'\$', Punctuation, 'varname'),
(r'(void)(\s*)(\()(\s*)(\))',
bygroups(Keyword, Text, Punctuation, Text, Punctuation), 'operator'),
(r'(element|attribute|schema-element|schema-attribute|comment|text|'
r'node|binary|document-node|empty-sequence)(\s*)(\()',
pushstate_occurrenceindicator_kindtest_callback),
# Marklogic specific type?
(r'(processing-instruction)(\s*)(\()',
bygroups(Keyword, Text, Punctuation),
('occurrenceindicator', 'kindtestforpi')),
(r'(item)(\s*)(\()(\s*)(\))(?=[*+?])',
bygroups(Keyword, Text, Punctuation, Text, Punctuation),
'occurrenceindicator'),
(r'\(\#', Punctuation, 'pragma'),
(r';', Punctuation, '#pop'),
(r'then|else', Keyword, '#pop'),
(r'(at)(\s+)(' + stringdouble + ')',
bygroups(Keyword, Text, String.Double), 'namespacedecl'),
(r'(at)(\s+)(' + stringsingle + ')',
bygroups(Keyword, Text, String.Single), 'namespacedecl'),
(r'except|intersect|in|is|return|satisfies|to|union|where',
Keyword, 'root'),
(r'and|div|eq|ge|gt|le|lt|ne|idiv|mod|or', Operator.Word, 'root'),
(r':=|=|,|>=|>>|>|\[|\(|<=|<<|<|-|!=|\|', Operator, 'root'),
(r'external|at', Keyword, 'root'),
(r'(stable)(\s+)(order)(\s+)(by)',
bygroups(Keyword, Text, Keyword, Text, Keyword), 'root'),
(r'(castable|cast)(\s+)(as)',
bygroups(Keyword, Text, Keyword), 'singletype'),
(r'(treat)(\s+)(as)', bygroups(Keyword, Text, Keyword)),
(r'(instance)(\s+)(of)', bygroups(Keyword, Text, Keyword)),
(r'case|as', Keyword, 'itemtype'),
(r'(\))(\s*)(as)', bygroups(Operator, Text, Keyword), 'itemtype'),
(ncname + r':\*', Keyword.Type, 'operator'),
(qname, Keyword.Type, 'occurrenceindicator'),
],
'kindtest': [
(r'\(:', Comment, 'comment'),
(r'{', Punctuation, 'root'),
(r'(\))([*+?]?)', popstate_kindtest_callback),
(r'\*', Name, 'closekindtest'),
(qname, Name, 'closekindtest'),
(r'(element|schema-element)(\s*)(\()', pushstate_kindtest_callback),
],
'kindtestforpi': [
(r'\(:', Comment, 'comment'),
(r'\)', Punctuation, '#pop'),
(ncname, Name.Variable),
(stringdouble, String.Double),
(stringsingle, String.Single),
],
'closekindtest': [
(r'\(:', Comment, 'comment'),
(r'(\))', popstate_callback),
(r',', Punctuation),
(r'(\{)', pushstate_operator_root_callback),
(r'\?', Punctuation),
],
'xml_comment': [
(r'(-->)', popstate_xmlcomment_callback),
(r'[^-]{1,2}', Literal),
(ur'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
unirange(0x10000, 0x10ffff), Literal),
],
'processing_instruction': [
(r'\s+', Text, 'processing_instruction_content'),
(r'\?>', String.Doc, '#pop'),
(pitarget, Name),
],
'processing_instruction_content': [
(r'\?>', String.Doc, '#pop'),
(ur'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
unirange(0x10000, 0x10ffff), Literal),
],
'cdata_section': [
(r']]>', String.Doc, '#pop'),
(ur'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
unirange(0x10000, 0x10ffff), Literal),
],
'start_tag': [
include('whitespace'),
(r'(/>)', popstate_tag_callback),
(r'>', Name.Tag, 'element_content'),
(r'"', Punctuation, 'quot_attribute_content'),
(r"'", Punctuation, 'apos_attribute_content'),
(r'=', Operator),
(qname, Name.Tag),
],
'quot_attribute_content': [
(r'"', Punctuation, 'start_tag'),
(r'(\{)', pushstate_root_callback),
(r'""', Name.Attribute),
(quotattrcontentchar, Name.Attribute),
(entityref, Name.Attribute),
(charref, Name.Attribute),
(r'\{\{|\}\}', Name.Attribute),
],
'apos_attribute_content': [
(r"'", Punctuation, 'start_tag'),
(r'\{', Punctuation, 'root'),
(r"''", Name.Attribute),
(aposattrcontentchar, Name.Attribute),
(entityref, Name.Attribute),
(charref, Name.Attribute),
(r'\{\{|\}\}', Name.Attribute),
],
'element_content': [
(r'</', Name.Tag, 'end_tag'),
(r'(\{)', pushstate_root_callback),
(r'(<!--)', pushstate_element_content_xmlcomment_callback),
(r'(<\?)', pushstate_element_content_processing_instruction_callback),
(r'(<!\[CDATA\[)', pushstate_element_content_cdata_section_callback),
(r'(<)', pushstate_element_content_starttag_callback),
(elementcontentchar, Literal),
(entityref, Literal),
(charref, Literal),
(r'\{\{|\}\}', Literal),
],
'end_tag': [
include('whitespace'),
(r'(>)', popstate_tag_callback),
(qname, Name.Tag),
],
'xmlspace_decl': [
(r'\(:', Comment, 'comment'),
(r'preserve|strip', Keyword, '#pop'),
],
'declareordering': [
(r'\(:', Comment, 'comment'),
include('whitespace'),
(r'ordered|unordered', Keyword, '#pop'),
],
'xqueryversion': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(stringdouble, String.Double),
(stringsingle, String.Single),
(r'encoding', Keyword),
(r';', Punctuation, '#pop'),
],
'pragma': [
(qname, Name.Variable, 'pragmacontents'),
],
'pragmacontents': [
(r'#\)', Punctuation, 'operator'),
(ur'\t|\r|\n|[\u0020-\uD7FF]|[\uE000-\uFFFD]|' +
unirange(0x10000, 0x10ffff), Literal),
(r'(\s+)', Text),
],
'occurrenceindicator': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
(r'\*|\?|\+', Operator, 'operator'),
(r':=', Operator, 'root'),
(r'', Text, 'operator'),
],
'option': [
include('whitespace'),
(qname, Name.Variable, '#pop'),
],
'qname_braren': [
include('whitespace'),
(r'(\{)', pushstate_operator_root_callback),
(r'(\()', Punctuation, 'root'),
],
'element_qname': [
(qname, Name.Variable, 'root'),
],
'attribute_qname': [
(qname, Name.Variable, 'root'),
],
'root': [
include('whitespace'),
(r'\(:', Comment, 'comment'),
# handle operator state
# order on numbers matters - handle most complex first
(r'\d+(\.\d*)?[eE][\+\-]?\d+', Number.Double, 'operator'),
(r'(\.\d+)[eE][\+\-]?\d+', Number.Double, 'operator'),
(r'(\.\d+|\d+\.\d*)', Number, 'operator'),
(r'(\d+)', Number.Integer, 'operator'),
(r'(\.\.|\.|\))', Punctuation, 'operator'),
(r'(declare)(\s+)(construction)',
bygroups(Keyword, Text, Keyword), 'operator'),
(r'(declare)(\s+)(default)(\s+)(order)',
bygroups(Keyword, Text, Keyword, Text, Keyword), 'operator'),
(ncname + ':\*', Name, 'operator'),
('\*:'+ncname, Name.Tag, 'operator'),
('\*', Name.Tag, 'operator'),
(stringdouble, String.Double, 'operator'),
(stringsingle, String.Single, 'operator'),
(r'(\})', popstate_callback),
#NAMESPACE DECL
(r'(declare)(\s+)(default)(\s+)(collation)',
bygroups(Keyword, Text, Keyword, Text, Keyword)),
(r'(module|declare)(\s+)(namespace)',
bygroups(Keyword, Text, Keyword), 'namespacedecl'),
(r'(declare)(\s+)(base-uri)',
bygroups(Keyword, Text, Keyword), 'namespacedecl'),
#NAMESPACE KEYWORD
(r'(declare)(\s+)(default)(\s+)(element|function)',
bygroups(Keyword, Text, Keyword, Text, Keyword), 'namespacekeyword'),
(r'(import)(\s+)(schema|module)',
bygroups(Keyword.Pseudo, Text, Keyword.Pseudo), 'namespacekeyword'),
(r'(declare)(\s+)(copy-namespaces)',
bygroups(Keyword, Text, Keyword), 'namespacekeyword'),
#VARNAMEs
(r'(for|let|some|every)(\s+)(\$)',
bygroups(Keyword, Text, Name.Variable), 'varname'),
(r'\$', Name.Variable, 'varname'),
(r'(declare)(\s+)(variable)(\s+)(\$)',
bygroups(Keyword, Text, Keyword, Text, Name.Variable), 'varname'),
#ITEMTYPE
(r'(\))(\s+)(as)', bygroups(Operator, Text, Keyword), 'itemtype'),
(r'(element|attribute|schema-element|schema-attribute|comment|'
r'text|node|document-node|empty-sequence)(\s+)(\()',
pushstate_operator_kindtest_callback),
(r'(processing-instruction)(\s+)(\()',
pushstate_operator_kindtestforpi_callback),
(r'(<!--)', pushstate_operator_xmlcomment_callback),
(r'(<\?)', pushstate_operator_processing_instruction_callback),
(r'(<!\[CDATA\[)', pushstate_operator_cdata_section_callback),
# (r'</', Name.Tag, 'end_tag'),
(r'(<)', pushstate_operator_starttag_callback),
(r'(declare)(\s+)(boundary-space)',
bygroups(Keyword, Text, Keyword), 'xmlspace_decl'),
(r'(validate)(\s+)(lax|strict)',
pushstate_operator_root_validate_withmode),
(r'(validate)(\s*)(\{)', pushstate_operator_root_validate),
(r'(typeswitch)(\s*)(\()', bygroups(Keyword, Text, Punctuation)),
(r'(element|attribute)(\s*)(\{)',
pushstate_operator_root_construct_callback),
(r'(document|text|processing-instruction|comment)(\s*)(\{)',
pushstate_operator_root_construct_callback),
#ATTRIBUTE
(r'(attribute)(\s+)(?=' + qname + r')',
bygroups(Keyword, Text), 'attribute_qname'),
#ELEMENT
(r'(element)(\s+)(?=' +qname+ r')',
bygroups(Keyword, Text), 'element_qname'),
#PROCESSING_INSTRUCTION
(r'(processing-instruction)(\s+)(' + ncname + r')(\s*)(\{)',
bygroups(Keyword, Text, Name.Variable, Text, Punctuation),
'operator'),
(r'(declare|define)(\s+)(function)',
bygroups(Keyword, Text, Keyword)),
(r'(\{)', pushstate_operator_root_callback),
(r'(unordered|ordered)(\s*)(\{)',
pushstate_operator_order_callback),
(r'(declare)(\s+)(ordering)',
bygroups(Keyword, Text, Keyword), 'declareordering'),
(r'(xquery)(\s+)(version)',
bygroups(Keyword.Pseudo, Text, Keyword.Pseudo), 'xqueryversion'),
(r'(\(#)', Punctuation, 'pragma'),
# sometimes return can occur in root state
(r'return', Keyword),
(r'(declare)(\s+)(option)', bygroups(Keyword, Text, Keyword),
'option'),
#URI LITERALS - single and double quoted
(r'(at)(\s+)('+stringdouble+')', String.Double, 'namespacedecl'),
(r'(at)(\s+)('+stringsingle+')', String.Single, 'namespacedecl'),
(r'(ancestor-or-self|ancestor|attribute|child|descendant-or-self)(::)',
bygroups(Keyword, Punctuation)),
(r'(descendant|following-sibling|following|parent|preceding-sibling'
r'|preceding|self)(::)', bygroups(Keyword, Punctuation)),
(r'(if)(\s*)(\()', bygroups(Keyword, Text, Punctuation)),
(r'then|else', Keyword),
# ML specific
(r'(try)(\s*)', bygroups(Keyword, Text), 'root'),
(r'(catch)(\s*)(\()(\$)',
bygroups(Keyword, Text, Punctuation, Name.Variable), 'varname'),
(r'(@'+qname+')', Name.Attribute),
(r'(@'+ncname+')', Name.Attribute),
(r'@\*:'+ncname, Name.Attribute),
(r'(@)', Name.Attribute),
(r'//|/|\+|-|;|,|\(|\)', Punctuation),
# STANDALONE QNAMES
(qname + r'(?=\s*{)', Name.Tag, 'qname_braren'),
(qname + r'(?=\s*\([^:])', Name.Function, 'qname_braren'),
(qname, Name.Tag, 'operator'),
]
}
class DartLexer(RegexLexer):
"""
For `Dart <http://dartlang.org/>`_ source code.
*New in Pygments 1.5.*
"""
name = 'Dart'
aliases = ['dart']
filenames = ['*.dart']
mimetypes = ['text/x-dart']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
include('string_literal'),
(r'#!(.*?)$', Comment.Preproc),
(r'\b(import|export)\b', Keyword, 'import_decl'),
(r'\b(library|source|part of|part)\b', Keyword),
(r'[^\S\n]+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'\b(class)\b(\s+)',
bygroups(Keyword.Declaration, Text), 'class'),
(r'\b(assert|break|case|catch|continue|default|do|else|finally|for|'
r'if|in|is|new|return|super|switch|this|throw|try|while)\b',
Keyword),
(r'\b(abstract|const|extends|factory|final|get|implements|'
r'native|operator|set|static|typedef|var)\b', Keyword.Declaration),
(r'\b(bool|double|Dynamic|int|num|Object|String|void)\b', Keyword.Type),
(r'\b(false|null|true)\b', Keyword.Constant),
(r'[~!%^&*+=|?:<>/-]|as', Operator),
(r'[a-zA-Z_$][a-zA-Z0-9_]*:', Name.Label),
(r'[a-zA-Z_$][a-zA-Z0-9_]*', Name),
(r'[(){}\[\],.;]', Punctuation),
(r'0[xX][0-9a-fA-F]+', Number.Hex),
# DIGIT+ (‘.’ DIGIT*)? EXPONENT?
(r'\d+(\.\d*)?([eE][+-]?\d+)?', Number),
(r'\.\d+([eE][+-]?\d+)?', Number), # ‘.’ DIGIT+ EXPONENT?
(r'\n', Text)
# pseudo-keyword negate intentionally left out
],
'class': [
(r'[a-zA-Z_$][a-zA-Z0-9_]*', Name.Class, '#pop')
],
'import_decl': [
include('string_literal'),
(r'\s+', Text),
(r'\b(as|show|hide)\b', Keyword),
(r'[a-zA-Z_$][a-zA-Z0-9_]*', Name),
(r'\,', Punctuation),
(r'\;', Punctuation, '#pop')
],
'string_literal': [
# Raw strings.
(r'r"""([\s|\S]*?)"""', String.Double),
(r"r'''([\s|\S]*?)'''", String.Single),
(r'r"(.*?)"', String.Double),
(r"r'(.*?)'", String.Single),
# Normal Strings.
(r'"""', String.Double, 'string_double_multiline'),
(r"'''", String.Single, 'string_single_multiline'),
(r'"', String.Double, 'string_double'),
(r"'", String.Single, 'string_single')
],
'string_common': [
(r"\\(x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4}|u\{[0-9A-Fa-f]*\}|[a-z\'\"$\\])",
String.Escape),
(r'(\$)([a-zA-Z_][a-zA-Z0-9_]*)', bygroups(String.Interpol, Name)),
(r'(\$\{)(.*?)(\})',
bygroups(String.Interpol, using(this), String.Interpol))
],
'string_double': [
(r'"', String.Double, '#pop'),
(r'[^\"$\\\n]+', String.Double),
include('string_common'),
(r'\$+', String.Double)
],
'string_double_multiline': [
(r'"""', String.Double, '#pop'),
(r'[^\"$\\]+', String.Double),
include('string_common'),
(r'(\$|\")+', String.Double)
],
'string_single': [
(r"'", String.Single, '#pop'),
(r"[^\'$\\\n]+", String.Single),
include('string_common'),
(r'\$+', String.Single)
],
'string_single_multiline': [
(r"'''", String.Single, '#pop'),
(r'[^\'$\\]+', String.Single),
include('string_common'),
(r'(\$|\')+', String.Single)
]
}
class TypeScriptLexer(RegexLexer):
"""
For `TypeScript <http://www.python.org>`_ source code.
*New in Pygments 1.6.*
"""
name = 'TypeScript'
aliases = ['ts']
filenames = ['*.ts']
mimetypes = ['text/x-typescript']
flags = re.DOTALL
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'<!--', Comment),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline)
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'(?=/)', Text, ('#pop', 'badregex')),
(r'', Text, '#pop')
],
'badregex': [
(r'\n', Text, '#pop')
],
'root': [
(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
r'(<<|>>>?|==?|!=?|[-<>+*%&\|\^/])=?', Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(for|in|while|do|break|return|continue|switch|case|default|if|else|'
r'throw|try|catch|finally|new|delete|typeof|instanceof|void|'
r'this)\b', Keyword, 'slashstartsregex'),
(r'(var|let|with|function)\b', Keyword.Declaration, 'slashstartsregex'),
(r'(abstract|boolean|byte|char|class|const|debugger|double|enum|export|'
r'extends|final|float|goto|implements|import|int|interface|long|native|'
r'package|private|protected|public|short|static|super|synchronized|throws|'
r'transient|volatile)\b', Keyword.Reserved),
(r'(true|false|null|NaN|Infinity|undefined)\b', Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'Error|eval|isFinite|isNaN|parseFloat|parseInt|document|this|'
r'window)\b', Name.Builtin),
# Match stuff like: module name {...}
(r'\b(module)(\s*)(\s*[a-zA-Z0-9_?.$][\w?.$]*)(\s*)',
bygroups(Keyword.Reserved, Text, Name.Other, Text), 'slashstartsregex'),
# Match variable type keywords
(r'\b(string|bool|number)\b', Keyword.Type),
# Match stuff like: constructor
(r'\b(constructor|declare|interface|as|AS)\b', Keyword.Reserved),
# Match stuff like: super(argument, list)
(r'(super)(\s*)(\([a-zA-Z0-9,_?.$\s]+\s*\))',
bygroups(Keyword.Reserved, Text), 'slashstartsregex'),
# Match stuff like: function() {...}
(r'([a-zA-Z_?.$][\w?.$]*)\(\) \{', Name.Other, 'slashstartsregex'),
# Match stuff like: (function: return type)
(r'([a-zA-Z0-9_?.$][\w?.$]*)(\s*:\s*)([a-zA-Z0-9_?.$][\w?.$]*)',
bygroups(Name.Other, Text, Keyword.Type)),
(r'[$a-zA-Z_][a-zA-Z0-9_]*', Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
]
}
class LassoLexer(RegexLexer):
"""
For `Lasso <http://www.lassosoft.com/>`_ source code, covering both Lasso 9
syntax and LassoScript for Lasso 8.6 and earlier. For Lasso embedded in
HTML, use the `LassoHtmlLexer`.
Additional options accepted:
`builtinshighlighting`
If given and ``True``, highlight builtin types, traits, methods, and
members (default: ``True``).
`requiredelimiters`
If given and ``True``, only highlight code between delimiters as Lasso
(default: ``False``).
*New in Pygments 1.6.*
"""
name = 'Lasso'
aliases = ['lasso', 'lassoscript']
filenames = ['*.lasso', '*.lasso[89]']
alias_filenames = ['*.incl', '*.inc', '*.las']
mimetypes = ['text/x-lasso']
flags = re.IGNORECASE | re.DOTALL | re.MULTILINE
tokens = {
'root': [
(r'^#!.+lasso9\b', Comment.Preproc, 'lasso'),
(r'\[no_square_brackets\]', Comment.Preproc, 'nosquarebrackets'),
(r'\[noprocess\]', Comment.Preproc, ('delimiters', 'noprocess')),
(r'\[', Comment.Preproc, ('delimiters', 'squarebrackets')),
(r'<\?(LassoScript|lasso|=)', Comment.Preproc,
('delimiters', 'anglebrackets')),
(r'<', Other, 'delimiters'),
(r'\s+', Other),
(r'', Other, ('delimiters', 'lassofile')),
],
'delimiters': [
(r'\[no_square_brackets\]', Comment.Preproc, 'nosquarebrackets'),
(r'\[noprocess\]', Comment.Preproc, 'noprocess'),
(r'\[', Comment.Preproc, 'squarebrackets'),
(r'<\?(LassoScript|lasso|=)', Comment.Preproc, 'anglebrackets'),
(r'<', Other),
(r'[^[<]+', Other),
],
'nosquarebrackets': [
(r'<\?(LassoScript|lasso|=)', Comment.Preproc, 'anglebrackets'),
(r'<', Other),
(r'[^<]+', Other),
],
'noprocess': [
(r'\[/noprocess\]', Comment.Preproc, '#pop'),
(r'\[', Other),
(r'[^[]', Other),
],
'squarebrackets': [
(r'\]', Comment.Preproc, '#pop'),
include('lasso'),
],
'anglebrackets': [
(r'\?>', Comment.Preproc, '#pop'),
include('lasso'),
],
'lassofile': [
(r'\]', Comment.Preproc, '#pop'),
(r'\?>', Comment.Preproc, '#pop'),
include('lasso'),
],
'whitespacecomments': [
(r'\s+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*\*!.*?\*/', String.Doc),
(r'/\*.*?\*/', Comment.Multiline),
],
'lasso': [
# whitespace/comments
include('whitespacecomments'),
# literals
(r'\d*\.\d+(e[+-]?\d+)?', Number.Float),
(r'0x[\da-f]+', Number.Hex),
(r'\d+', Number.Integer),
(r'([+-]?)(infinity|NaN)\b', bygroups(Operator, Number)),
(r"'", String.Single, 'singlestring'),
(r'"', String.Double, 'doublestring'),
(r'`[^`]*`', String.Backtick),
# names
(r'\$[a-z_][\w.]*', Name.Variable),
(r'#([a-z_][\w.]*|\d+)', Name.Variable.Instance),
(r"(\.)('[a-z_][\w.]*')",
bygroups(Name.Builtin.Pseudo, Name.Variable.Class)),
(r"(self)(\s*->\s*)('[a-z_][\w.]*')",
bygroups(Name.Builtin.Pseudo, Operator, Name.Variable.Class)),
(r'(\.\.?)([a-z_][\w.]*)',
bygroups(Name.Builtin.Pseudo, Name.Other.Member)),
(r'(->\\?\s*|&\s*)([a-z_][\w.]*)',
bygroups(Operator, Name.Other.Member)),
(r'(self|inherited|global|void)\b', Name.Builtin.Pseudo),
(r'-[a-z_][\w.]*', Name.Attribute),
(r'(::\s*)([a-z_][\w.]*)', bygroups(Punctuation, Name.Label)),
(r'(error_(code|msg)_\w+|Error_AddError|Error_ColumnRestriction|'
r'Error_DatabaseConnectionUnavailable|Error_DatabaseTimeout|'
r'Error_DeleteError|Error_FieldRestriction|Error_FileNotFound|'
r'Error_InvalidDatabase|Error_InvalidPassword|'
r'Error_InvalidUsername|Error_ModuleNotFound|'
r'Error_NoError|Error_NoPermission|Error_OutOfMemory|'
r'Error_ReqColumnMissing|Error_ReqFieldMissing|'
r'Error_RequiredColumnMissing|Error_RequiredFieldMissing|'
r'Error_UpdateError)\b', Name.Exception),
# definitions
(r'(define)(\s+)([a-z_][\w.]*)(\s*=>\s*)(type|trait|thread)\b',
bygroups(Keyword.Declaration, Text, Name.Class, Operator, Keyword)),
(r'(define)(\s+)([a-z_][\w.]*)(\s*->\s*)([a-z_][\w.]*=?|[-+*/%<>]|==)',
bygroups(Keyword.Declaration, Text, Name.Class, Operator,
Name.Function), 'signature'),
(r'(define)(\s+)([a-z_][\w.]*)',
bygroups(Keyword.Declaration, Text, Name.Function), 'signature'),
(r'(public|protected|private|provide)(\s+)(([a-z_][\w.]*=?|'
r'[-+*/%<>]|==)(?=\s*\())', bygroups(Keyword, Text, Name.Function),
'signature'),
(r'(public|protected|private)(\s+)([a-z_][\w.]*)',
bygroups(Keyword, Text, Name.Function)),
# keywords
(r'(true|false|none|minimal|full|all)\b', Keyword.Constant),
(r'(local|var|variable|data(?=\s))\b', Keyword.Declaration),
(r'(array|date|decimal|duration|integer|map|pair|string|tag|xml|'
r'null|list|queue|set|stack|staticarray)\b', Keyword.Type),
(r'([a-z_][\w.]*)(\s+)(in)\b', bygroups(Name, Text, Keyword)),
(r'(let|into)(\s+)([a-z_][\w.]*)', bygroups(Keyword, Text, Name)),
(r'require\b', Keyword, 'requiresection'),
(r'(/?)(Namespace_Using)\b', bygroups(Punctuation, Keyword.Namespace)),
(r'(/?)(Cache|Database_Names|Database_SchemaNames|'
r'Database_TableNames|Define_Tag|Define_Type|Email_Batch|'
r'Encode_Set|HTML_Comment|Handle|Handle_Error|Header|If|Inline|'
r'Iterate|LJAX_Target|Link|Link_CurrentAction|Link_CurrentGroup|'
r'Link_CurrentRecord|Link_Detail|Link_FirstGroup|'
r'Link_FirstRecord|Link_LastGroup|Link_LastRecord|Link_NextGroup|'
r'Link_NextRecord|Link_PrevGroup|Link_PrevRecord|Log|Loop|'
r'NoProcess|Output_None|Portal|Private|Protect|Records|Referer|'
r'Referrer|Repeating|ResultSet|Rows|Search_Args|Search_Arguments|'
r'Select|Sort_Args|Sort_Arguments|Thread_Atomic|Value_List|While|'
r'Abort|Case|Else|If_Empty|If_False|If_Null|If_True|Loop_Abort|'
r'Loop_Continue|Loop_Count|Params|Params_Up|Return|Return_Value|'
r'Run_Children|SOAP_DefineTag|SOAP_LastRequest|SOAP_LastResponse|'
r'Tag_Name|ascending|average|by|define|descending|do|equals|'
r'frozen|group|handle_failure|import|in|into|join|let|match|max|'
r'min|on|order|parent|protected|provide|public|require|skip|'
r'split_thread|sum|take|thread|to|trait|type|where|with|yield)\b',
bygroups(Punctuation, Keyword)),
# other
(r',', Punctuation, 'commamember'),
(r'(and|or|not)\b', Operator.Word),
(r'([a-z_][\w.]*)(\s*::\s*)?([a-z_][\w.]*)?(\s*=(?!=))',
bygroups(Name, Punctuation, Name.Label, Operator)),
(r'(/?)([\w.]+)', bygroups(Punctuation, Name.Other)),
(r'(=)(bw|ew|cn|lte?|gte?|n?eq|ft|n?rx)\b',
bygroups(Operator, Operator.Word)),
(r':=|[-+*/%=<>&|!?\\]+', Operator),
(r'[{}():;,@^]', Punctuation),
],
'singlestring': [
(r"'", String.Single, '#pop'),
(r"[^'\\]+", String.Single),
include('escape'),
(r"\\+", String.Single),
],
'doublestring': [
(r'"', String.Double, '#pop'),
(r'[^"\\]+', String.Double),
include('escape'),
(r'\\+', String.Double),
],
'escape': [
(r'\\(U[\da-f]{8}|u[\da-f]{4}|x[\da-f]{1,2}|[0-7]{1,3}|:[^:]+:|'
r'[abefnrtv?\"\'\\]|$)', String.Escape),
],
'signature': [
(r'=>', Operator, '#pop'),
(r'\)', Punctuation, '#pop'),
(r'[(,]', Punctuation, 'parameter'),
include('lasso'),
],
'parameter': [
(r'\)', Punctuation, '#pop'),
(r'-?[a-z_][\w.]*', Name.Attribute, '#pop'),
(r'\.\.\.', Name.Builtin.Pseudo),
include('lasso'),
],
'requiresection': [
(r'(([a-z_][\w.]*=?|[-+*/%<>]|==)(?=\s*\())', Name, 'requiresignature'),
(r'(([a-z_][\w.]*=?|[-+*/%<>]|==)(?=(\s*::\s*[\w.]+)?\s*,))', Name),
(r'[a-z_][\w.]*=?|[-+*/%<>]|==', Name, '#pop'),
(r'(::\s*)([a-z_][\w.]*)', bygroups(Punctuation, Name.Label)),
(r',', Punctuation),
include('whitespacecomments'),
],
'requiresignature': [
(r'(\)(?=(\s*::\s*[\w.]+)?\s*,))', Punctuation, '#pop'),
(r'\)', Punctuation, '#pop:2'),
(r'-?[a-z_][\w.]*', Name.Attribute),
(r'(::\s*)([a-z_][\w.]*)', bygroups(Punctuation, Name.Label)),
(r'\.\.\.', Name.Builtin.Pseudo),
(r'[(,]', Punctuation),
include('whitespacecomments'),
],
'commamember': [
(r'(([a-z_][\w.]*=?|[-+*/%<>]|==)'
r'(?=\s*(\(([^()]*\([^()]*\))*[^)]*\)\s*)?(::[\w.\s]+)?=>))',
Name.Function, 'signature'),
include('whitespacecomments'),
(r'', Text, '#pop'),
],
}
def __init__(self, **options):
self.builtinshighlighting = get_bool_opt(
options, 'builtinshighlighting', True)
self.requiredelimiters = get_bool_opt(
options, 'requiredelimiters', False)
self._builtins = set()
self._members = set()
if self.builtinshighlighting:
from pygments.lexers._lassobuiltins import BUILTINS, MEMBERS
for key, value in BUILTINS.iteritems():
self._builtins.update(value)
for key, value in MEMBERS.iteritems():
self._members.update(value)
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
stack = ['root']
if self.requiredelimiters:
stack.append('delimiters')
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text, stack):
if (token is Name.Other and value.lower() in self._builtins or
token is Name.Other.Member and value.lower() in self._members):
yield index, Name.Builtin, value
continue
yield index, token, value
def analyse_text(text):
rv = 0.0
if 'bin/lasso9' in text:
rv += 0.8
if re.search(r'<\?(=|lasso)|\A\[', text, re.I):
rv += 0.4
if re.search(r'local\(', text, re.I):
rv += 0.4
if '?>' in text:
rv += 0.1
return rv
class QmlLexer(RegexLexer):
"""
For QML files. See http://doc.qt.digia.com/4.7/qdeclarativeintroduction.html.
*New in Pygments 1.6.*
"""
# QML is based on javascript, so much of this is taken from the
# JavascriptLexer above.
name = 'QML'
aliases = ['qml', 'Qt Meta Language', 'Qt modeling Language']
filenames = ['*.qml',]
mimetypes = [ 'application/x-qml',]
# pasted from JavascriptLexer, with some additions
flags = re.DOTALL
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'<!--', Comment),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline)
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'(?=/)', Text, ('#pop', 'badregex')),
(r'', Text, '#pop')
],
'badregex': [
(r'\n', Text, '#pop')
],
'root' : [
(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
r'(<<|>>>?|==?|!=?|[-<>+*%&\|\^/])=?', Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
# QML insertions
(r'\bid\s*:\s*[A-Za-z][_A-Za-z.0-9]*',Keyword.Declaration,
'slashstartsregex'),
(r'\b[A-Za-z][_A-Za-z.0-9]*\s*:',Keyword, 'slashstartsregex'),
# the rest from JavascriptLexer
(r'(for|in|while|do|break|return|continue|switch|case|default|if|else|'
r'throw|try|catch|finally|new|delete|typeof|instanceof|void|'
r'this)\b', Keyword, 'slashstartsregex'),
(r'(var|let|with|function)\b', Keyword.Declaration, 'slashstartsregex'),
(r'(abstract|boolean|byte|char|class|const|debugger|double|enum|export|'
r'extends|final|float|goto|implements|import|int|interface|long|native|'
r'package|private|protected|public|short|static|super|synchronized|throws|'
r'transient|volatile)\b', Keyword.Reserved),
(r'(true|false|null|NaN|Infinity|undefined)\b', Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'Error|eval|isFinite|isNaN|parseFloat|parseInt|document|this|'
r'window)\b', Name.Builtin),
(r'[$a-zA-Z_][a-zA-Z0-9_]*', Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
]
}
|
ljgabc/lfs | refs/heads/master | usr/lib/python2.7/test/test_fnmatch.py | 130 | """Test cases for the fnmatch module."""
from test import test_support
import unittest
from fnmatch import fnmatch, fnmatchcase, _MAXCACHE, _cache
from fnmatch import fnmatch, fnmatchcase, _MAXCACHE, _cache, _purge
class FnmatchTestCase(unittest.TestCase):
def tearDown(self):
_purge()
def check_match(self, filename, pattern, should_match=1, fn=fnmatch):
if should_match:
self.assertTrue(fn(filename, pattern),
"expected %r to match pattern %r"
% (filename, pattern))
else:
self.assertTrue(not fn(filename, pattern),
"expected %r not to match pattern %r"
% (filename, pattern))
def test_fnmatch(self):
check = self.check_match
check('abc', 'abc')
check('abc', '?*?')
check('abc', '???*')
check('abc', '*???')
check('abc', '???')
check('abc', '*')
check('abc', 'ab[cd]')
check('abc', 'ab[!de]')
check('abc', 'ab[de]', 0)
check('a', '??', 0)
check('a', 'b', 0)
# these test that '\' is handled correctly in character sets;
# see SF bug #409651
check('\\', r'[\]')
check('a', r'[!\]')
check('\\', r'[!\]', 0)
# test that filenames with newlines in them are handled correctly.
# http://bugs.python.org/issue6665
check('foo\nbar', 'foo*')
check('foo\nbar\n', 'foo*')
check('\nfoo', 'foo*', False)
check('\n', '*')
def test_fnmatchcase(self):
check = self.check_match
check('AbC', 'abc', 0, fnmatchcase)
check('abc', 'AbC', 0, fnmatchcase)
def test_cache_clearing(self):
# check that caches do not grow too large
# http://bugs.python.org/issue7846
# string pattern cache
for i in range(_MAXCACHE + 1):
fnmatch('foo', '?' * i)
self.assertLessEqual(len(_cache), _MAXCACHE)
def test_main():
test_support.run_unittest(FnmatchTestCase)
if __name__ == "__main__":
test_main()
|
WhySoGeeky/DroidPot | refs/heads/master | modules/monitor/system_call/system_call.py | 1 | import os, sys, shutil
from lib.common.commands.adb import Adb
from lib.common.abstract import Monitor
from yapsy.IPlugin import IPlugin
adb = Adb()
class system_call(Monitor, IPlugin):
def __init__(self):
super(system_call, self).__init__()
self.compatible_device = []
def prepare(self, params, session, device_serial):
"""
This method handles the preparation of the device for monitoring. You can write any file modification here
:param params: session's configuration created from web interface
:param session: session object
:param device_serial: device's serial from [adb devices] command
:return:
"""
cur_file = os.path.dirname(os.path.realpath("__file__"))
print "system call directory is %s"%cur_file
#will be map and links to /system/bin
shutil.copy2(src=os.path.join(cur_file,"modules/monitor/system_call","daemons","bootstart.sh"), dst=os.path.join(session.ramdisk_dir, "bin"))
shutil.copy2(src=os.path.join(cur_file,"modules/monitor/system_call","daemons","bootstart2.sh"), dst=os.path.join(session.ramdisk_dir, "bin"))
for param_key, param_value in params.iteritems():
if param_key == "api_monitoring" and param_value == "on":
ramdisk_dir = session.ramdisk_dir
init_file = open(os.path.join(ramdisk_dir, "init.rc"), 'a')
init_file.write("\non property:dev.bootcomplete=1")
init_file.write("\n start bootstart")
init_file.write("\n")
init_file.write("\nservice bootstart /system/bin/sh /bin/bootstart.sh")
init_file.write("\n class late-start")
init_file.write("\n user root")
init_file.write("\n group root")
init_file.write("\n disable")
init_file.write("\n oneshot")
init_file.write("\n")
if param_key == "syscall_monitoring" and param_value == "on":
ramdisk_dir = session.ramdisk_dir
init_file = open(os.path.join(ramdisk_dir, "init.rc"), 'a')
init_file.write("\non property:dev.bootcomplete=1")
init_file.write("\n start bootstart")
init_file.write("\n")
init_file.write("\nservice bootstart /system/bin/sh /bin/bootstart2.sh")
init_file.write("\n class late-start")
init_file.write("\n user root")
init_file.write("\n group root")
init_file.write("\n disable")
init_file.write("\n oneshot")
init_file.write("\n")
def preSession(self, params, module, session, device_serial):
"""
This method handles the manipulation required to the device just before malicious apk is install on the device.
:param params: session's configuration created from web interface
:param module:
:param session: session object
:param device_serial: device's serial from [adb devices] command
:return:
"""
#clear logcat first
clear_logcat_command = "logcat -c"
adb.shell(clear_logcat_command, root=True, device_serial=device_serial)
def postSession(self, params, module, session, device_serial):
"""
This method handles the manipulation or extraction of information from the device after the monitoring session has ended
:param params: session's configuration created from web interface
:param module:
:param session: session object
:param device_serial: device's serial from [adb devices] command
:return:
"""
#capture api hook log from logcat -d option will give one time run of logcat
logcat_dump_command = "logcat -d -v time -s hook-ioctl"
result = adb.shell(logcat_dump_command, root=True, device_serial=device_serial)
if result.std_output:
return result.std_output
else:
return result.std_error
def get_view(self):
"""
get the django configuration form.
If you don't know what to do with this method, DON'T CHANGE ANYTHING
:return: django configuration form
"""
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from view_system_call import ConfigForm
sys.path.remove(os.path.dirname(os.path.abspath(__file__)))
return ConfigForm
|
Mellthas/quodlibet | refs/heads/master | quodlibet/tests/test_qltk__editutils.py | 4 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from tests import TestCase
from quodlibet.formats import DUMMY_SONG
from quodlibet.qltk._editutils import FilterCheckButton, \
OverwriteWarning, WriteFailedError, FilterPluginBox, EditingPluginHandler
class FCB(FilterCheckButton):
_section = _key = _label = "foo"
class FCB2(FCB):
_order = 1.0
class FCB3(FCB):
_order = 1.2
class FCB4(FCB):
_order = 1.3
class FCB5(FCB):
_order = 1.3
class FCB1(FCB):
_order = 1.4
class TFilterCheckButton(TestCase):
def setUp(self):
self.fcb1 = FCB1()
self.fcb2 = FCB2()
self.fcb3 = FCB3()
self.fcb4 = FCB4()
self.fcb5 = FCB5()
def test_filter(self):
self.failUnlessRaises(NotImplementedError, self.fcb1.filter, "", "")
def test_filter_list(self):
self.failUnlessRaises(
NotImplementedError, self.fcb1.filter_list, [""], [""])
def test_cmp(self):
l = [self.fcb1, self.fcb2, self.fcb3, self.fcb4, self.fcb5]
l.sort()
self.failUnlessEqual(
l, [self.fcb2, self.fcb3, self.fcb4, self.fcb5, self.fcb1])
def tearDown(self):
for cb in [self.fcb1, self.fcb2, self.fcb3, self.fcb4, self.fcb5]:
cb.destroy()
class TEditDialogs(TestCase):
def test_overwrite(self):
OverwriteWarning(None, DUMMY_SONG).destroy()
def test_write_failed(self):
WriteFailedError(None, DUMMY_SONG).destroy()
class TFilterPluginBox(TestCase):
def test_main(self):
handler = EditingPluginHandler()
x = FilterPluginBox(handler)
self.assertEqual(x.filters, [])
x.destroy()
|
vrpolak/slowsort | refs/heads/master | mutable_stable_lazy_zigzag_pairing_weak_heap.py | 1 | """Module that defines mutable stable zigzag pairing weak heap."""
from weakref import ref
from pep_3140 import Deque
from pep_3140 import List
from sorted_using_weak_heap import sorted_using_mutable_stable_weak_heap
from mutable_priority_weak_queue import MutablePriorityWeakQueue
def _ref_or_none(item):
"""Return weak reference if item is (or points to) non-None, else return None."""
if isinstance(item, ref):
item = item()
return None if item is None else ref(item)
def _item_or_none(reference):
"""Return weakly referenced item, or None if reference is None or the item is dead."""
return None if reference is None else reference()
class MutableStableLazyZigzagPairingWeakHeap(MutablePriorityWeakQueue):
"""A weak heap that is mutable, stable, lazy and zigzag pairing.
Heap: An implementation, usable as a queue, least priority value in, first out.
Weak: Not a container. Items can vanish from the queue, iff they gets garbage collected.
Lazy: Least element is determined only upon pop, in hope to get more relevant comparisons.
Mutable: Self is altered regularily to avoid excessive object creation.
Stable: Two include methods to allow caller decide tiebreaker.
Pairing: Most subheap comparisons are on pairs of "equal" sub-heaps.
Zigzag: The odd sub-heap is left at alternating ends.
This implementation uses Deque to store ordered collection of sub-heaps."""
def __init__(self, top_item=None, forest=None):
"""Initialize a queue."""
self.top_ref = _ref_or_none(top_item)
self.forest = forest if forest is not None else Deque()
def ensure_top_demoted(self):
"""In case heap has a top, demote it so merge is easier."""
self.top_ref = _ref_or_none(self.top_ref)
if self.top_ref is None:
return
demoted = MutableStableLazyZigzagPairingWeakHeap(self.top_ref, self.forest, self.length)
self.top_ref = None
self.forest = Deque([demoted])
def add(self, item):
"""Add item to self, prioritized after current items, do not compare yet."""
self.ensure_top_demoted()
self.forest.append(MutableStableLazyZigzagPairingWeakHeap(top_item=item))
def _include_after(self, heap):
"""Include another heap, forest-ordered after current items."""
# Do not ensure top promoted, as it was the previous top who won comparison.
self.forest.append(heap)
def _include_before(self, heap):
"""Include another heap, forest-ordered before current items."""
# Do not ensure top promoted, as it was the previous top who won comparison.
self.forest.appendleft(heap)
def peek(self):
"""Return least priority item or None if empty, this includes promoting top, but not extraction.
This also acts as ensure_top_promoted.
Do pairwise includes in zigzag fashion until there is only one tree. Then upgrade.
Return the top item (not weakref) to make sure top stay promoted (instead of vanishing).
"""
top_item = _item_or_none(self.top_ref)
if (top_item is not None) or (not self.forest):
return top_item
# In order to prevent returning None on nonempty heap,
# we need to track any comparison winner, to prevent it from dying.
protected = None
while len(self.forest) > 1:
# zig
new_forest = Deque()
while len(self.forest) > 1:
latter_heap = self.forest.pop()
former_heap = self.forest.pop()
# We need to peek and check for None as items might got deleted.
latter_item = latter_heap.peek()
former_item = former_heap.peek()
if latter_item is None:
if former_item is None:
continue
else:
self.forest.append(former_heap)
continue
if former_item is None:
self.forest.append(latter_heap)
continue
if latter_item < former_item:
protected = latter_item
latter_heap._include_before(former_heap)
new_forest.appendleft(latter_heap)
else:
protected = former_item
former_heap._include_after(latter_heap)
new_forest.appendleft(former_heap)
if self.forest:
new_forest.appendleft(self.forest.pop())
self.forest = new_forest
# zag
new_forest = Deque()
while len(self.forest) > 1:
former_heap = self.forest.popleft()
latter_heap = self.forest.popleft()
former_item = former_heap.peek()
latter_item = latter_heap.peek()
if latter_item is None:
if former_item is None:
continue
else:
self.forest.appendleft(former_heap)
continue
if former_item is None:
self.forest.appendleft(latter_heap)
continue
if latter_item < former_item:
protected = latter_item
latter_heap._include_before(former_heap)
new_forest.append(latter_heap)
else:
protected = former_item
former_heap._include_after(latter_heap)
new_forest.append(former_heap)
if self.forest:
new_forest.append(self.forest.pop())
self.forest = new_forest
new_state = self.forest.pop()
self.top_ref = new_state.top_ref
self.forest = new_state.forest
top_item = _item_or_none(self.top_ref)
return protected if top_item is None else top_item
def pop(self):
"""Extract the least item from self and return that, or None if empty."""
item = self.peek()
self.top_ref = None
return item
def mslzpwh_sorted(source):
"""Return new List of items, sorted using the mslzpw heap."""
return sorted_using_mutable_stable_weak_heap(MutableStableLazyZigzagPairingWeakHeap, source)
|
helldorado/ansible | refs/heads/devel | hacking/create_deprecated_issues.py | 17 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# (c) 2017, Matt Martz <matt@sivel.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import argparse
import os
import time
from collections import defaultdict
from ansible.release import __version__ as ansible_version
ansible_major_version = '.'.join(ansible_version.split('.')[:2])
try:
from github3 import GitHub
except ImportError:
raise SystemExit(
'This script needs the github3.py library installed to work'
)
if not os.getenv('GITHUB_TOKEN'):
raise SystemExit(
'Please set the GITHUB_TOKEN env var with your github oauth token'
)
deprecated = defaultdict(list)
parser = argparse.ArgumentParser()
parser.add_argument('--template', default='deprecated_issue_template.md',
type=argparse.FileType('r'),
help='Path to markdown file template to be used for issue '
'body. Default: %(default)s')
parser.add_argument('problems', nargs=1, type=argparse.FileType('r'),
help='Path to file containing pylint output for the '
'ansible-deprecated-version check')
args = parser.parse_args()
body_tmpl = args.template.read()
args.template.close()
text = args.problems[0].read()
args.problems[0].close()
for line in text.splitlines():
path = line.split(':')[0]
if path.endswith('__init__.py'):
component = os.path.basename(os.path.dirname(path))
else:
component, ext_ = os.path.splitext(os.path.basename(path).lstrip('_'))
title = (
'%s contains deprecated call to be removed in %s' %
(component, ansible_major_version)
)
deprecated[component].append(
dict(title=title, path=path, line=line)
)
g = GitHub(token=os.getenv('GITHUB_TOKEN'))
repo = g.repository('ansible', 'ansible')
# Not enabled by default, this fetches the column of a project,
# so that we can later add the issue to a project column
# You will need the project and column IDs for this to work
# and then update the below lines
# project = repo.project(2141803)
# project_column = project.column(4348504)
for component, items in deprecated.items():
title = items[0]['title']
path = '\n'.join(set((i['path']) for i in items))
line = '\n'.join(i['line'] for i in items)
body = body_tmpl % dict(component=component, path=path,
line=line,
version=ansible_major_version)
issue = repo.create_issue(title, body=body, labels=['deprecated'])
print(issue)
# Sleep a little, so that the API doesn't block us
time.sleep(0.5)
# Uncomment the next 2 lines if you want to add issues to a project
# Needs to be done in combination with the above code for selecting
# the project/column
# project_column.create_card_with_issue(issue)
# time.sleep(0.5)
|
byndcivilization/toy-infrastructure | refs/heads/master | flask-app/venv/lib/python3.6/site-packages/pip/_vendor/distro.py | 330 | # Copyright 2015,2016 Nir Cohen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The ``distro`` package (``distro`` stands for Linux Distribution) provides
information about the Linux distribution it runs on, such as a reliable
machine-readable distro ID, or version information.
It is a renewed alternative implementation for Python's original
:py:func:`platform.linux_distribution` function, but it provides much more
functionality. An alternative implementation became necessary because Python
3.5 deprecated this function, and Python 3.7 is expected to remove it
altogether. Its predecessor function :py:func:`platform.dist` was already
deprecated since Python 2.6 and is also expected to be removed in Python 3.7.
Still, there are many cases in which access to Linux distribution information
is needed. See `Python issue 1322 <https://bugs.python.org/issue1322>`_ for
more information.
"""
import os
import re
import sys
import json
import shlex
import logging
import subprocess
if not sys.platform.startswith('linux'):
raise ImportError('Unsupported platform: {0}'.format(sys.platform))
_UNIXCONFDIR = '/etc'
_OS_RELEASE_BASENAME = 'os-release'
#: Translation table for normalizing the "ID" attribute defined in os-release
#: files, for use by the :func:`distro.id` method.
#:
#: * Key: Value as defined in the os-release file, translated to lower case,
#: with blanks translated to underscores.
#:
#: * Value: Normalized value.
NORMALIZED_OS_ID = {}
#: Translation table for normalizing the "Distributor ID" attribute returned by
#: the lsb_release command, for use by the :func:`distro.id` method.
#:
#: * Key: Value as returned by the lsb_release command, translated to lower
#: case, with blanks translated to underscores.
#:
#: * Value: Normalized value.
NORMALIZED_LSB_ID = {
'enterpriseenterprise': 'oracle', # Oracle Enterprise Linux
'redhatenterpriseworkstation': 'rhel', # RHEL 6.7
}
#: Translation table for normalizing the distro ID derived from the file name
#: of distro release files, for use by the :func:`distro.id` method.
#:
#: * Key: Value as derived from the file name of a distro release file,
#: translated to lower case, with blanks translated to underscores.
#:
#: * Value: Normalized value.
NORMALIZED_DISTRO_ID = {
'redhat': 'rhel', # RHEL 6.x, 7.x
}
# Pattern for content of distro release file (reversed)
_DISTRO_RELEASE_CONTENT_REVERSED_PATTERN = re.compile(
r'(?:[^)]*\)(.*)\()? *(?:STL )?([\d.+\-a-z]*\d) *(?:esaeler *)?(.+)')
# Pattern for base file name of distro release file
_DISTRO_RELEASE_BASENAME_PATTERN = re.compile(
r'(\w+)[-_](release|version)$')
# Base file names to be ignored when searching for distro release file
_DISTRO_RELEASE_IGNORE_BASENAMES = (
'debian_version',
'lsb-release',
'oem-release',
_OS_RELEASE_BASENAME,
'system-release'
)
def linux_distribution(full_distribution_name=True):
"""
Return information about the current Linux distribution as a tuple
``(id_name, version, codename)`` with items as follows:
* ``id_name``: If *full_distribution_name* is false, the result of
:func:`distro.id`. Otherwise, the result of :func:`distro.name`.
* ``version``: The result of :func:`distro.version`.
* ``codename``: The result of :func:`distro.codename`.
The interface of this function is compatible with the original
:py:func:`platform.linux_distribution` function, supporting a subset of
its parameters.
The data it returns may not exactly be the same, because it uses more data
sources than the original function, and that may lead to different data if
the Linux distribution is not consistent across multiple data sources it
provides (there are indeed such distributions ...).
Another reason for differences is the fact that the :func:`distro.id`
method normalizes the distro ID string to a reliable machine-readable value
for a number of popular Linux distributions.
"""
return _distro.linux_distribution(full_distribution_name)
def id():
"""
Return the distro ID of the current Linux distribution, as a
machine-readable string.
For a number of Linux distributions, the returned distro ID value is
*reliable*, in the sense that it is documented and that it does not change
across releases of the distribution.
This package maintains the following reliable distro ID values:
============== =========================================
Distro ID Distribution
============== =========================================
"ubuntu" Ubuntu
"debian" Debian
"rhel" RedHat Enterprise Linux
"centos" CentOS
"fedora" Fedora
"sles" SUSE Linux Enterprise Server
"opensuse" openSUSE
"amazon" Amazon Linux
"arch" Arch Linux
"cloudlinux" CloudLinux OS
"exherbo" Exherbo Linux
"gentoo" GenToo Linux
"ibm_powerkvm" IBM PowerKVM
"kvmibm" KVM for IBM z Systems
"linuxmint" Linux Mint
"mageia" Mageia
"mandriva" Mandriva Linux
"parallels" Parallels
"pidora" Pidora
"raspbian" Raspbian
"oracle" Oracle Linux (and Oracle Enterprise Linux)
"scientific" Scientific Linux
"slackware" Slackware
"xenserver" XenServer
============== =========================================
If you have a need to get distros for reliable IDs added into this set,
or if you find that the :func:`distro.id` function returns a different
distro ID for one of the listed distros, please create an issue in the
`distro issue tracker`_.
**Lookup hierarchy and transformations:**
First, the ID is obtained from the following sources, in the specified
order. The first available and non-empty value is used:
* the value of the "ID" attribute of the os-release file,
* the value of the "Distributor ID" attribute returned by the lsb_release
command,
* the first part of the file name of the distro release file,
The so determined ID value then passes the following transformations,
before it is returned by this method:
* it is translated to lower case,
* blanks (which should not be there anyway) are translated to underscores,
* a normalization of the ID is performed, based upon
`normalization tables`_. The purpose of this normalization is to ensure
that the ID is as reliable as possible, even across incompatible changes
in the Linux distributions. A common reason for an incompatible change is
the addition of an os-release file, or the addition of the lsb_release
command, with ID values that differ from what was previously determined
from the distro release file name.
"""
return _distro.id()
def name(pretty=False):
"""
Return the name of the current Linux distribution, as a human-readable
string.
If *pretty* is false, the name is returned without version or codename.
(e.g. "CentOS Linux")
If *pretty* is true, the version and codename are appended.
(e.g. "CentOS Linux 7.1.1503 (Core)")
**Lookup hierarchy:**
The name is obtained from the following sources, in the specified order.
The first available and non-empty value is used:
* If *pretty* is false:
- the value of the "NAME" attribute of the os-release file,
- the value of the "Distributor ID" attribute returned by the lsb_release
command,
- the value of the "<name>" field of the distro release file.
* If *pretty* is true:
- the value of the "PRETTY_NAME" attribute of the os-release file,
- the value of the "Description" attribute returned by the lsb_release
command,
- the value of the "<name>" field of the distro release file, appended
with the value of the pretty version ("<version_id>" and "<codename>"
fields) of the distro release file, if available.
"""
return _distro.name(pretty)
def version(pretty=False, best=False):
"""
Return the version of the current Linux distribution, as a human-readable
string.
If *pretty* is false, the version is returned without codename (e.g.
"7.0").
If *pretty* is true, the codename in parenthesis is appended, if the
codename is non-empty (e.g. "7.0 (Maipo)").
Some distributions provide version numbers with different precisions in
the different sources of distribution information. Examining the different
sources in a fixed priority order does not always yield the most precise
version (e.g. for Debian 8.2, or CentOS 7.1).
The *best* parameter can be used to control the approach for the returned
version:
If *best* is false, the first non-empty version number in priority order of
the examined sources is returned.
If *best* is true, the most precise version number out of all examined
sources is returned.
**Lookup hierarchy:**
In all cases, the version number is obtained from the following sources.
If *best* is false, this order represents the priority order:
* the value of the "VERSION_ID" attribute of the os-release file,
* the value of the "Release" attribute returned by the lsb_release
command,
* the version number parsed from the "<version_id>" field of the first line
of the distro release file,
* the version number parsed from the "PRETTY_NAME" attribute of the
os-release file, if it follows the format of the distro release files.
* the version number parsed from the "Description" attribute returned by
the lsb_release command, if it follows the format of the distro release
files.
"""
return _distro.version(pretty, best)
def version_parts(best=False):
"""
Return the version of the current Linux distribution as a tuple
``(major, minor, build_number)`` with items as follows:
* ``major``: The result of :func:`distro.major_version`.
* ``minor``: The result of :func:`distro.minor_version`.
* ``build_number``: The result of :func:`distro.build_number`.
For a description of the *best* parameter, see the :func:`distro.version`
method.
"""
return _distro.version_parts(best)
def major_version(best=False):
"""
Return the major version of the current Linux distribution, as a string,
if provided.
Otherwise, the empty string is returned. The major version is the first
part of the dot-separated version string.
For a description of the *best* parameter, see the :func:`distro.version`
method.
"""
return _distro.major_version(best)
def minor_version(best=False):
"""
Return the minor version of the current Linux distribution, as a string,
if provided.
Otherwise, the empty string is returned. The minor version is the second
part of the dot-separated version string.
For a description of the *best* parameter, see the :func:`distro.version`
method.
"""
return _distro.minor_version(best)
def build_number(best=False):
"""
Return the build number of the current Linux distribution, as a string,
if provided.
Otherwise, the empty string is returned. The build number is the third part
of the dot-separated version string.
For a description of the *best* parameter, see the :func:`distro.version`
method.
"""
return _distro.build_number(best)
def like():
"""
Return a space-separated list of distro IDs of distributions that are
closely related to the current Linux distribution in regards to packaging
and programming interfaces, for example distributions the current
distribution is a derivative from.
**Lookup hierarchy:**
This information item is only provided by the os-release file.
For details, see the description of the "ID_LIKE" attribute in the
`os-release man page
<http://www.freedesktop.org/software/systemd/man/os-release.html>`_.
"""
return _distro.like()
def codename():
"""
Return the codename for the release of the current Linux distribution,
as a string.
If the distribution does not have a codename, an empty string is returned.
Note that the returned codename is not always really a codename. For
example, openSUSE returns "x86_64". This function does not handle such
cases in any special way and just returns the string it finds, if any.
**Lookup hierarchy:**
* the codename within the "VERSION" attribute of the os-release file, if
provided,
* the value of the "Codename" attribute returned by the lsb_release
command,
* the value of the "<codename>" field of the distro release file.
"""
return _distro.codename()
def info(pretty=False, best=False):
"""
Return certain machine-readable information items about the current Linux
distribution in a dictionary, as shown in the following example:
.. sourcecode:: python
{
'id': 'rhel',
'version': '7.0',
'version_parts': {
'major': '7',
'minor': '0',
'build_number': ''
},
'like': 'fedora',
'codename': 'Maipo'
}
The dictionary structure and keys are always the same, regardless of which
information items are available in the underlying data sources. The values
for the various keys are as follows:
* ``id``: The result of :func:`distro.id`.
* ``version``: The result of :func:`distro.version`.
* ``version_parts -> major``: The result of :func:`distro.major_version`.
* ``version_parts -> minor``: The result of :func:`distro.minor_version`.
* ``version_parts -> build_number``: The result of
:func:`distro.build_number`.
* ``like``: The result of :func:`distro.like`.
* ``codename``: The result of :func:`distro.codename`.
For a description of the *pretty* and *best* parameters, see the
:func:`distro.version` method.
"""
return _distro.info(pretty, best)
def os_release_info():
"""
Return a dictionary containing key-value pairs for the information items
from the os-release file data source of the current Linux distribution.
See `os-release file`_ for details about these information items.
"""
return _distro.os_release_info()
def lsb_release_info():
"""
Return a dictionary containing key-value pairs for the information items
from the lsb_release command data source of the current Linux distribution.
See `lsb_release command output`_ for details about these information
items.
"""
return _distro.lsb_release_info()
def distro_release_info():
"""
Return a dictionary containing key-value pairs for the information items
from the distro release file data source of the current Linux distribution.
See `distro release file`_ for details about these information items.
"""
return _distro.distro_release_info()
def os_release_attr(attribute):
"""
Return a single named information item from the os-release file data source
of the current Linux distribution.
Parameters:
* ``attribute`` (string): Key of the information item.
Returns:
* (string): Value of the information item, if the item exists.
The empty string, if the item does not exist.
See `os-release file`_ for details about these information items.
"""
return _distro.os_release_attr(attribute)
def lsb_release_attr(attribute):
"""
Return a single named information item from the lsb_release command output
data source of the current Linux distribution.
Parameters:
* ``attribute`` (string): Key of the information item.
Returns:
* (string): Value of the information item, if the item exists.
The empty string, if the item does not exist.
See `lsb_release command output`_ for details about these information
items.
"""
return _distro.lsb_release_attr(attribute)
def distro_release_attr(attribute):
"""
Return a single named information item from the distro release file
data source of the current Linux distribution.
Parameters:
* ``attribute`` (string): Key of the information item.
Returns:
* (string): Value of the information item, if the item exists.
The empty string, if the item does not exist.
See `distro release file`_ for details about these information items.
"""
return _distro.distro_release_attr(attribute)
class LinuxDistribution(object):
"""
Provides information about a Linux distribution.
This package creates a private module-global instance of this class with
default initialization arguments, that is used by the
`consolidated accessor functions`_ and `single source accessor functions`_.
By using default initialization arguments, that module-global instance
returns data about the current Linux distribution (i.e. the distro this
package runs on).
Normally, it is not necessary to create additional instances of this class.
However, in situations where control is needed over the exact data sources
that are used, instances of this class can be created with a specific
distro release file, or a specific os-release file, or without invoking the
lsb_release command.
"""
def __init__(self,
include_lsb=True,
os_release_file='',
distro_release_file=''):
"""
The initialization method of this class gathers information from the
available data sources, and stores that in private instance attributes.
Subsequent access to the information items uses these private instance
attributes, so that the data sources are read only once.
Parameters:
* ``include_lsb`` (bool): Controls whether the
`lsb_release command output`_ is included as a data source.
If the lsb_release command is not available in the program execution
path, the data source for the lsb_release command will be empty.
* ``os_release_file`` (string): The path name of the
`os-release file`_ that is to be used as a data source.
An empty string (the default) will cause the default path name to
be used (see `os-release file`_ for details).
If the specified or defaulted os-release file does not exist, the
data source for the os-release file will be empty.
* ``distro_release_file`` (string): The path name of the
`distro release file`_ that is to be used as a data source.
An empty string (the default) will cause a default search algorithm
to be used (see `distro release file`_ for details).
If the specified distro release file does not exist, or if no default
distro release file can be found, the data source for the distro
release file will be empty.
Public instance attributes:
* ``os_release_file`` (string): The path name of the
`os-release file`_ that is actually used as a data source. The
empty string if no distro release file is used as a data source.
* ``distro_release_file`` (string): The path name of the
`distro release file`_ that is actually used as a data source. The
empty string if no distro release file is used as a data source.
Raises:
* :py:exc:`IOError`: Some I/O issue with an os-release file or distro
release file.
* :py:exc:`subprocess.CalledProcessError`: The lsb_release command had
some issue (other than not being available in the program execution
path).
* :py:exc:`UnicodeError`: A data source has unexpected characters or
uses an unexpected encoding.
"""
self.os_release_file = os_release_file or \
os.path.join(_UNIXCONFDIR, _OS_RELEASE_BASENAME)
self.distro_release_file = distro_release_file or '' # updated later
self._os_release_info = self._get_os_release_info()
self._lsb_release_info = self._get_lsb_release_info() \
if include_lsb else {}
self._distro_release_info = self._get_distro_release_info()
def __repr__(self):
"""Return repr of all info
"""
return \
"LinuxDistribution(" \
"os_release_file={0!r}, " \
"distro_release_file={1!r}, " \
"_os_release_info={2!r}, " \
"_lsb_release_info={3!r}, " \
"_distro_release_info={4!r})".format(
self.os_release_file,
self.distro_release_file,
self._os_release_info,
self._lsb_release_info,
self._distro_release_info)
def linux_distribution(self, full_distribution_name=True):
"""
Return information about the Linux distribution that is compatible
with Python's :func:`platform.linux_distribution`, supporting a subset
of its parameters.
For details, see :func:`distro.linux_distribution`.
"""
return (
self.name() if full_distribution_name else self.id(),
self.version(),
self.codename()
)
def id(self):
"""Return the distro ID of the Linux distribution, as a string.
For details, see :func:`distro.id`.
"""
def normalize(distro_id, table):
distro_id = distro_id.lower().replace(' ', '_')
return table.get(distro_id, distro_id)
distro_id = self.os_release_attr('id')
if distro_id:
return normalize(distro_id, NORMALIZED_OS_ID)
distro_id = self.lsb_release_attr('distributor_id')
if distro_id:
return normalize(distro_id, NORMALIZED_LSB_ID)
distro_id = self.distro_release_attr('id')
if distro_id:
return normalize(distro_id, NORMALIZED_DISTRO_ID)
return ''
def name(self, pretty=False):
"""
Return the name of the Linux distribution, as a string.
For details, see :func:`distro.name`.
"""
name = self.os_release_attr('name') \
or self.lsb_release_attr('distributor_id') \
or self.distro_release_attr('name')
if pretty:
name = self.os_release_attr('pretty_name') \
or self.lsb_release_attr('description')
if not name:
name = self.distro_release_attr('name')
version = self.version(pretty=True)
if version:
name = name + ' ' + version
return name or ''
def version(self, pretty=False, best=False):
"""
Return the version of the Linux distribution, as a string.
For details, see :func:`distro.version`.
"""
versions = [
self.os_release_attr('version_id'),
self.lsb_release_attr('release'),
self.distro_release_attr('version_id'),
self._parse_distro_release_content(
self.os_release_attr('pretty_name')).get('version_id', ''),
self._parse_distro_release_content(
self.lsb_release_attr('description')).get('version_id', '')
]
version = ''
if best:
# This algorithm uses the last version in priority order that has
# the best precision. If the versions are not in conflict, that
# does not matter; otherwise, using the last one instead of the
# first one might be considered a surprise.
for v in versions:
if v.count(".") > version.count(".") or version == '':
version = v
else:
for v in versions:
if v != '':
version = v
break
if pretty and version and self.codename():
version = u'{0} ({1})'.format(version, self.codename())
return version
def version_parts(self, best=False):
"""
Return the version of the Linux distribution, as a tuple of version
numbers.
For details, see :func:`distro.version_parts`.
"""
version_str = self.version(best=best)
if version_str:
version_regex = re.compile(r'(\d+)\.?(\d+)?\.?(\d+)?')
matches = version_regex.match(version_str)
if matches:
major, minor, build_number = matches.groups()
return major, minor or '', build_number or ''
return '', '', ''
def major_version(self, best=False):
"""
Return the major version number of the current distribution.
For details, see :func:`distro.major_version`.
"""
return self.version_parts(best)[0]
def minor_version(self, best=False):
"""
Return the minor version number of the Linux distribution.
For details, see :func:`distro.minor_version`.
"""
return self.version_parts(best)[1]
def build_number(self, best=False):
"""
Return the build number of the Linux distribution.
For details, see :func:`distro.build_number`.
"""
return self.version_parts(best)[2]
def like(self):
"""
Return the IDs of distributions that are like the Linux distribution.
For details, see :func:`distro.like`.
"""
return self.os_release_attr('id_like') or ''
def codename(self):
"""
Return the codename of the Linux distribution.
For details, see :func:`distro.codename`.
"""
return self.os_release_attr('codename') \
or self.lsb_release_attr('codename') \
or self.distro_release_attr('codename') \
or ''
def info(self, pretty=False, best=False):
"""
Return certain machine-readable information about the Linux
distribution.
For details, see :func:`distro.info`.
"""
return dict(
id=self.id(),
version=self.version(pretty, best),
version_parts=dict(
major=self.major_version(best),
minor=self.minor_version(best),
build_number=self.build_number(best)
),
like=self.like(),
codename=self.codename(),
)
def os_release_info(self):
"""
Return a dictionary containing key-value pairs for the information
items from the os-release file data source of the Linux distribution.
For details, see :func:`distro.os_release_info`.
"""
return self._os_release_info
def lsb_release_info(self):
"""
Return a dictionary containing key-value pairs for the information
items from the lsb_release command data source of the Linux
distribution.
For details, see :func:`distro.lsb_release_info`.
"""
return self._lsb_release_info
def distro_release_info(self):
"""
Return a dictionary containing key-value pairs for the information
items from the distro release file data source of the Linux
distribution.
For details, see :func:`distro.distro_release_info`.
"""
return self._distro_release_info
def os_release_attr(self, attribute):
"""
Return a single named information item from the os-release file data
source of the Linux distribution.
For details, see :func:`distro.os_release_attr`.
"""
return self._os_release_info.get(attribute, '')
def lsb_release_attr(self, attribute):
"""
Return a single named information item from the lsb_release command
output data source of the Linux distribution.
For details, see :func:`distro.lsb_release_attr`.
"""
return self._lsb_release_info.get(attribute, '')
def distro_release_attr(self, attribute):
"""
Return a single named information item from the distro release file
data source of the Linux distribution.
For details, see :func:`distro.distro_release_attr`.
"""
return self._distro_release_info.get(attribute, '')
def _get_os_release_info(self):
"""
Get the information items from the specified os-release file.
Returns:
A dictionary containing all information items.
"""
if os.path.isfile(self.os_release_file):
with open(self.os_release_file) as release_file:
return self._parse_os_release_content(release_file)
return {}
@staticmethod
def _parse_os_release_content(lines):
"""
Parse the lines of an os-release file.
Parameters:
* lines: Iterable through the lines in the os-release file.
Each line must be a unicode string or a UTF-8 encoded byte
string.
Returns:
A dictionary containing all information items.
"""
props = {}
lexer = shlex.shlex(lines, posix=True)
lexer.whitespace_split = True
# The shlex module defines its `wordchars` variable using literals,
# making it dependent on the encoding of the Python source file.
# In Python 2.6 and 2.7, the shlex source file is encoded in
# 'iso-8859-1', and the `wordchars` variable is defined as a byte
# string. This causes a UnicodeDecodeError to be raised when the
# parsed content is a unicode object. The following fix resolves that
# (... but it should be fixed in shlex...):
if sys.version_info[0] == 2 and isinstance(lexer.wordchars, bytes):
lexer.wordchars = lexer.wordchars.decode('iso-8859-1')
tokens = list(lexer)
for token in tokens:
# At this point, all shell-like parsing has been done (i.e.
# comments processed, quotes and backslash escape sequences
# processed, multi-line values assembled, trailing newlines
# stripped, etc.), so the tokens are now either:
# * variable assignments: var=value
# * commands or their arguments (not allowed in os-release)
if '=' in token:
k, v = token.split('=', 1)
if isinstance(v, bytes):
v = v.decode('utf-8')
props[k.lower()] = v
if k == 'VERSION':
# this handles cases in which the codename is in
# the `(CODENAME)` (rhel, centos, fedora) format
# or in the `, CODENAME` format (Ubuntu).
codename = re.search(r'(\(\D+\))|,(\s+)?\D+', v)
if codename:
codename = codename.group()
codename = codename.strip('()')
codename = codename.strip(',')
codename = codename.strip()
# codename appears within paranthese.
props['codename'] = codename
else:
props['codename'] = ''
else:
# Ignore any tokens that are not variable assignments
pass
return props
def _get_lsb_release_info(self):
"""
Get the information items from the lsb_release command output.
Returns:
A dictionary containing all information items.
"""
cmd = 'lsb_release -a'
process = subprocess.Popen(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
stdout, stderr = stdout.decode('utf-8'), stderr.decode('utf-8')
code = process.returncode
if code == 0:
content = stdout.splitlines()
return self._parse_lsb_release_content(content)
elif code == 127: # Command not found
return {}
else:
if sys.version_info[:2] >= (3, 5):
raise subprocess.CalledProcessError(code, cmd, stdout, stderr)
elif sys.version_info[:2] >= (2, 7):
raise subprocess.CalledProcessError(code, cmd, stdout)
elif sys.version_info[:2] == (2, 6):
raise subprocess.CalledProcessError(code, cmd)
@staticmethod
def _parse_lsb_release_content(lines):
"""
Parse the output of the lsb_release command.
Parameters:
* lines: Iterable through the lines of the lsb_release output.
Each line must be a unicode string or a UTF-8 encoded byte
string.
Returns:
A dictionary containing all information items.
"""
props = {}
for line in lines:
line = line.decode('utf-8') if isinstance(line, bytes) else line
kv = line.strip('\n').split(':', 1)
if len(kv) != 2:
# Ignore lines without colon.
continue
k, v = kv
props.update({k.replace(' ', '_').lower(): v.strip()})
return props
def _get_distro_release_info(self):
"""
Get the information items from the specified distro release file.
Returns:
A dictionary containing all information items.
"""
if self.distro_release_file:
# If it was specified, we use it and parse what we can, even if
# its file name or content does not match the expected pattern.
distro_info = self._parse_distro_release_file(
self.distro_release_file)
basename = os.path.basename(self.distro_release_file)
# The file name pattern for user-specified distro release files
# is somewhat more tolerant (compared to when searching for the
# file), because we want to use what was specified as best as
# possible.
match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename)
if match:
distro_info['id'] = match.group(1)
return distro_info
else:
basenames = os.listdir(_UNIXCONFDIR)
# We sort for repeatability in cases where there are multiple
# distro specific files; e.g. CentOS, Oracle, Enterprise all
# containing `redhat-release` on top of their own.
basenames.sort()
for basename in basenames:
if basename in _DISTRO_RELEASE_IGNORE_BASENAMES:
continue
match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename)
if match:
filepath = os.path.join(_UNIXCONFDIR, basename)
distro_info = self._parse_distro_release_file(filepath)
if 'name' in distro_info:
# The name is always present if the pattern matches
self.distro_release_file = filepath
distro_info['id'] = match.group(1)
return distro_info
return {}
def _parse_distro_release_file(self, filepath):
"""
Parse a distro release file.
Parameters:
* filepath: Path name of the distro release file.
Returns:
A dictionary containing all information items.
"""
if os.path.isfile(filepath):
with open(filepath) as fp:
# Only parse the first line. For instance, on SLES there
# are multiple lines. We don't want them...
return self._parse_distro_release_content(fp.readline())
return {}
@staticmethod
def _parse_distro_release_content(line):
"""
Parse a line from a distro release file.
Parameters:
* line: Line from the distro release file. Must be a unicode string
or a UTF-8 encoded byte string.
Returns:
A dictionary containing all information items.
"""
if isinstance(line, bytes):
line = line.decode('utf-8')
matches = _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN.match(
line.strip()[::-1])
distro_info = {}
if matches:
# regexp ensures non-None
distro_info['name'] = matches.group(3)[::-1]
if matches.group(2):
distro_info['version_id'] = matches.group(2)[::-1]
if matches.group(1):
distro_info['codename'] = matches.group(1)[::-1]
elif line:
distro_info['name'] = line.strip()
return distro_info
_distro = LinuxDistribution()
def main():
import argparse
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler(sys.stdout))
parser = argparse.ArgumentParser(description="Linux distro info tool")
parser.add_argument(
'--json',
'-j',
help="Output in machine readable format",
action="store_true")
args = parser.parse_args()
if args.json:
logger.info(json.dumps(info(), indent=4, sort_keys=True))
else:
logger.info('Name: %s', name(pretty=True))
distribution_version = version(pretty=True)
if distribution_version:
logger.info('Version: %s', distribution_version)
distribution_codename = codename()
if distribution_codename:
logger.info('Codename: %s', distribution_codename)
if __name__ == '__main__':
main()
|
ycsoft/FatCat-Server | refs/heads/master | LIBS/boost_1_58_0/libs/python/test/callbacks.py | 12 | # Copyright David Abrahams 2004. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
'''
>>> from callbacks_ext import *
>>> def double(x):
... return x + x
...
>>> apply_int_int(double, 42)
84
>>> apply_void_int(double, 42)
>>> def identity(x):
... return x
Once we have array conversion support, this test will fail. Er,
succeed<wink>:
>>> try: apply_to_string_literal(identity)
... except ReferenceError: pass # expected
... else: print 'expected an exception!'
>>> try: apply_X_ref_handle(lambda ignored:X(42), None)
... except ReferenceError: pass # expected
... else: print 'expected an exception!'
>>> x = X(42)
>>> x.y = X(7)
>>> apply_X_ref_handle(lambda z:z.y, x).value()
7
>>> x = apply_X_X(identity, X(42))
>>> x.value()
42
>>> x_count()
1
>>> del x
>>> x_count()
0
>>> def increment(x):
... x.set(x.value() + 1)
...
>>> x = X(42)
>>> apply_void_X_ref(increment, x)
>>> x.value()
43
>>> apply_void_X_cref(increment, x)
>>> x.value() # const-ness is not respected, sorry!
44
>>> last_x = 1
>>> def decrement(x):
... global last_x
... last_x = x
... if x is not None:
... x.set(x.value() - 1)
>>> apply_void_X_ptr(decrement, x)
>>> x.value()
43
>>> last_x.value()
43
>>> increment(last_x)
>>> x.value()
44
>>> last_x.value()
44
>>> apply_void_X_ptr(decrement, None)
>>> assert last_x is None
>>> x.value()
44
>>> last_x = 1
>>> apply_void_X_deep_ptr(decrement, None)
>>> assert last_x is None
>>> x.value()
44
>>> apply_void_X_deep_ptr(decrement, x)
>>> x.value()
44
>>> last_x.value()
43
>>> y = apply_X_ref_handle(identity, x)
>>> assert y.value() == x.value()
>>> increment(x)
>>> assert y.value() == x.value()
>>> y = apply_X_ptr_handle_cref(identity, x)
>>> assert y.value() == x.value()
>>> increment(x)
>>> assert y.value() == x.value()
>>> y = apply_X_ptr_handle_cref(identity, None)
>>> y
>>> def new_x(ignored):
... return X(666)
...
>>> try: apply_X_ref_handle(new_x, 1)
... except ReferenceError: pass
... else: print 'no error'
>>> try: apply_X_ptr_handle_cref(new_x, 1)
... except ReferenceError: pass
... else: print 'no error'
>>> try: apply_cstring_cstring(identity, 'hello')
... except ReferenceError: pass
... else: print 'no error'
>>> apply_char_char(identity, 'x')
'x'
>>> apply_cstring_pyobject(identity, 'hello')
'hello'
>>> apply_cstring_pyobject(identity, None)
>>> apply_char_char(identity, 'x')
'x'
>>> assert apply_to_own_type(identity) is type(identity)
>>> assert apply_object_object(identity, identity) is identity
'''
def run(args = None):
import sys
import doctest
if args is not None:
sys.argv = args
return doctest.testmod(sys.modules.get(__name__))
if __name__ == '__main__':
print "running..."
import sys
status = run()[0]
if (status == 0): print "Done."
sys.exit(status)
|
blueshaedow/Polarbear | refs/heads/master | firmware/tmk_core/tool/mbed/mbed-sdk/workspace_tools/host_tests/udpecho_server.py | 52 | """
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from SocketServer import BaseRequestHandler, UDPServer
from private_settings import SERVER_ADDRESS
class UDP_EchoHandler(BaseRequestHandler):
def handle(self):
data, socket = self.request
print "client:", self.client_address
print "data:", data
socket.sendto(data, self.client_address)
server = UDPServer((SERVER_ADDRESS, 7195), UDP_EchoHandler)
print "listening for connections"
server.serve_forever()
|
esikachev/scenario | refs/heads/test_cases | sahara/plugins/hdp/versions/version_2_0_6/edp_engine.py | 8 | # Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from sahara.plugins.hdp import edp_engine
from sahara.service.edp import hdfs_helper
class EdpOozieEngine(edp_engine.EdpOozieEngine):
def create_hdfs_dir(self, remote, dir_name):
hdfs_helper.create_dir_hadoop2(remote, dir_name, self.get_hdfs_user())
def get_resource_manager_uri(self, cluster):
return cluster['info']['Yarn']['ResourceManager']
|
jbenden/ansible | refs/heads/devel | lib/ansible/modules/remote_management/foreman/katello.py | 9 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Eric D Helms <ericdhelms@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: katello
short_description: Manage Katello Resources
description:
- Allows the management of Katello resources inside your Foreman server
version_added: "2.3"
author: "Eric D Helms (@ehelms)"
requirements:
- "nailgun >= 0.28.0"
- "python >= 2.6"
- datetime
options:
server_url:
description:
- URL of Foreman server
required: true
username:
description:
- Username on Foreman server
required: true
password:
description:
- Password for user accessing Foreman server
required: true
entity:
description:
- The Foreman resource that the action will be performed on (e.g. organization, host)
required: true
params:
description:
- Parameters associated to the entity resource to set or edit in dictionary format (e.g. name, description)
required: true
'''
EXAMPLES = '''
---
# Simple Example:
- name: "Create Product"
local_action:
module: katello
username: "admin"
password: "admin"
server_url: "https://fakeserver.com"
entity: "product"
params:
name: "Centos 7"
# Abstraction Example:
# katello.yml
---
- name: "{{ name }}"
local_action:
module: katello
username: "admin"
password: "admin"
server_url: "https://fakeserver.com"
entity: "{{ entity }}"
params: "{{ params }}"
# tasks.yml
---
- include: katello.yml
vars:
name: "Create Dev Environment"
entity: "lifecycle_environment"
params:
name: "Dev"
prior: "Library"
organization: "Default Organization"
- include: katello.yml
vars:
name: "Create Centos Product"
entity: "product"
params:
name: "Centos 7"
organization: "Default Organization"
- include: katello.yml
vars:
name: "Create 7.2 Repository"
entity: "repository"
params:
name: "Centos 7.2"
product: "Centos 7"
organization: "Default Organization"
content_type: "yum"
url: "http://mirror.centos.org/centos/7/os/x86_64/"
- include: katello.yml
vars:
name: "Create Centos 7 View"
entity: "content_view"
params:
name: "Centos 7 View"
organization: "Default Organization"
repositories:
- name: "Centos 7.2"
product: "Centos 7"
- include: katello.yml
vars:
name: "Enable RHEL Product"
entity: "repository_set"
params:
name: "Red Hat Enterprise Linux 7 Server (RPMs)"
product: "Red Hat Enterprise Linux Server"
organization: "Default Organization"
basearch: "x86_64"
releasever: "7"
'''
RETURN = '''# '''
import datetime
import os
import traceback
try:
from nailgun import entities, entity_fields, entity_mixins
from nailgun.config import ServerConfig
HAS_NAILGUN_PACKAGE = True
except:
HAS_NAILGUN_PACKAGE = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
class NailGun(object):
def __init__(self, server, entities, module):
self._server = server
self._entities = entities
self._module = module
entity_mixins.TASK_TIMEOUT = 1000
def find_organization(self, name, **params):
org = self._entities.Organization(self._server, name=name, **params)
response = org.search(set(), {'search': 'name={}'.format(name)})
if len(response) == 1:
return response[0]
else:
self._module.fail_json(msg="No organization found for %s" % name)
def find_lifecycle_environment(self, name, organization):
org = self.find_organization(organization)
lifecycle_env = self._entities.LifecycleEnvironment(self._server, name=name, organization=org)
response = lifecycle_env.search()
if len(response) == 1:
return response[0]
else:
self._module.fail_json(msg="No Lifecycle Found found for %s" % name)
def find_product(self, name, organization):
org = self.find_organization(organization)
product = self._entities.Product(self._server, name=name, organization=org)
response = product.search()
if len(response) == 1:
return response[0]
else:
self._module.fail_json(msg="No Product found for %s" % name)
def find_repository(self, name, product, organization):
product = self.find_product(product, organization)
repository = self._entities.Repository(self._server, name=name, product=product)
repository._fields['organization'] = entity_fields.OneToOneField(entities.Organization)
repository.organization = product.organization
response = repository.search()
if len(response) == 1:
return response[0]
else:
self._module.fail_json(msg="No Repository found for %s" % name)
def find_content_view(self, name, organization):
org = self.find_organization(organization)
content_view = self._entities.ContentView(self._server, name=name, organization=org)
response = content_view.search()
if len(response) == 1:
return response[0]
else:
self._module.fail_json(msg="No Content View found for %s" % name)
def organization(self, params):
name = params['name']
del params['name']
org = self.find_organization(name, **params)
if org:
org = self._entities.Organization(self._server, name=name, id=org.id, **params)
org.update()
else:
org = self._entities.Organization(self._server, name=name, **params)
org.create()
return True
def manifest(self, params):
org = self.find_organization(params['organization'])
params['organization'] = org.id
try:
file = open(os.getcwd() + params['content'], 'r')
content = file.read()
finally:
file.close()
manifest = self._entities.Subscription(self._server)
try:
manifest.upload(
data={'organization_id': org.id},
files={'content': content}
)
return True
except Exception as e:
if "Import is the same as existing data" in e.message:
return False
else:
self._module.fail_json(msg="Manifest import failed with %s" % to_native(e),
exception=traceback.format_exc())
def product(self, params):
org = self.find_organization(params['organization'])
params['organization'] = org.id
product = self._entities.Product(self._server, **params)
response = product.search()
if len(response) == 1:
product.id = response[0].id
product.update()
else:
product.create()
return True
def sync_product(self, params):
org = self.find_organization(params['organization'])
product = self.find_product(params['name'], org.name)
return product.sync()
def repository(self, params):
product = self.find_product(params['product'], params['organization'])
params['product'] = product.id
del params['organization']
repository = self._entities.Repository(self._server, **params)
repository._fields['organization'] = entity_fields.OneToOneField(entities.Organization)
repository.organization = product.organization
response = repository.search()
if len(response) == 1:
repository.id = response[0].id
repository.update()
else:
repository.create()
return True
def sync_repository(self, params):
org = self.find_organization(params['organization'])
repository = self.find_repository(params['name'], params['product'], org.name)
return repository.sync()
def repository_set(self, params):
product = self.find_product(params['product'], params['organization'])
del params['product']
del params['organization']
if not product:
return False
else:
reposet = self._entities.RepositorySet(self._server, product=product, name=params['name'])
reposet = reposet.search()[0]
formatted_name = [params['name'].replace('(', '').replace(')', '')]
formatted_name.append(params['basearch'])
if 'releasever' in params:
formatted_name.append(params['releasever'])
formatted_name = ' '.join(formatted_name)
repository = self._entities.Repository(self._server, product=product, name=formatted_name)
repository._fields['organization'] = entity_fields.OneToOneField(entities.Organization)
repository.organization = product.organization
repository = repository.search()
if len(repository) == 0:
if 'releasever' in params:
reposet.enable(data={'basearch': params['basearch'], 'releasever': params['releasever']})
else:
reposet.enable(data={'basearch': params['basearch']})
return True
def sync_plan(self, params):
org = self.find_organization(params['organization'])
params['organization'] = org.id
params['sync_date'] = datetime.datetime.strptime(params['sync_date'], "%H:%M")
products = params['products']
del params['products']
sync_plan = self._entities.SyncPlan(
self._server,
name=params['name'],
organization=org
)
response = sync_plan.search()
sync_plan.sync_date = params['sync_date']
sync_plan.interval = params['interval']
if len(response) == 1:
sync_plan.id = response[0].id
sync_plan.update()
else:
response = sync_plan.create()
sync_plan.id = response[0].id
if products:
ids = []
for name in products:
product = self.find_product(name, org.name)
ids.append(product.id)
sync_plan.add_products(data={'product_ids': ids})
return True
def content_view(self, params):
org = self.find_organization(params['organization'])
content_view = self._entities.ContentView(self._server, name=params['name'], organization=org)
response = content_view.search()
if len(response) == 1:
content_view.id = response[0].id
content_view.update()
else:
content_view = content_view.create()
if params['repositories']:
repos = []
for repository in params['repositories']:
repository = self.find_repository(repository['name'], repository['product'], org.name)
repos.append(repository)
content_view.repository = repos
content_view.update(['repository'])
def find_content_view_version(self, name, organization, environment):
env = self.find_lifecycle_environment(environment, organization)
content_view = self.find_content_view(name, organization)
content_view_version = self._entities.ContentViewVersion(self._server, content_view=content_view)
response = content_view_version.search(['content_view'], {'environment_id': env.id})
if len(response) == 1:
return response[0]
else:
self._module.fail_json(msg="No Content View version found for %s" % response)
def publish(self, params):
content_view = self.find_content_view(params['name'], params['organization'])
return content_view.publish()
def promote(self, params):
to_environment = self.find_lifecycle_environment(params['to_environment'], params['organization'])
version = self.find_content_view_version(params['name'], params['organization'], params['from_environment'])
data = {'environment_id': to_environment.id}
return version.promote(data=data)
def lifecycle_environment(self, params):
org = self.find_organization(params['organization'])
prior_env = self.find_lifecycle_environment(params['prior'], params['organization'])
lifecycle_env = self._entities.LifecycleEnvironment(self._server, name=params['name'], organization=org, prior=prior_env)
response = lifecycle_env.search()
if len(response) == 1:
lifecycle_env.id = response[0].id
lifecycle_env.update()
else:
lifecycle_env.create()
return True
def activation_key(self, params):
org = self.find_organization(params['organization'])
activation_key = self._entities.ActivationKey(self._server, name=params['name'], organization=org)
response = activation_key.search()
if len(response) == 1:
activation_key.id = response[0].id
activation_key.update()
else:
activation_key.create()
if params['content_view']:
content_view = self.find_content_view(params['content_view'], params['organization'])
lifecycle_environment = self.find_lifecycle_environment(params['lifecycle_environment'], params['organization'])
activation_key.content_view = content_view
activation_key.environment = lifecycle_environment
activation_key.update()
return True
def main():
module = AnsibleModule(
argument_spec=dict(
server_url=dict(required=True),
username=dict(required=True, no_log=True),
password=dict(required=True, no_log=True),
entity=dict(required=True, no_log=False),
action=dict(required=False, no_log=False),
verify_ssl=dict(required=False, type='bool', default=False),
params=dict(required=True, no_log=True, type='dict'),
),
supports_check_mode=True
)
if not HAS_NAILGUN_PACKAGE:
module.fail_json(msg="Missing required nailgun module (check docs or install with: pip install nailgun")
server_url = module.params['server_url']
username = module.params['username']
password = module.params['password']
entity = module.params['entity']
action = module.params['action']
params = module.params['params']
verify_ssl = module.params['verify_ssl']
server = ServerConfig(
url=server_url,
auth=(username, password),
verify=verify_ssl
)
ng = NailGun(server, entities, module)
# Lets make an connection to the server with username and password
try:
org = entities.Organization(server)
org.search()
except Exception as e:
module.fail_json(msg="Failed to connect to Foreman server: %s " % e)
result = False
if entity == 'product':
if action == 'sync':
result = ng.sync_product(params)
else:
result = ng.product(params)
elif entity == 'repository':
if action == 'sync':
result = ng.sync_repository(params)
else:
result = ng.repository(params)
elif entity == 'manifest':
result = ng.manifest(params)
elif entity == 'repository_set':
result = ng.repository_set(params)
elif entity == 'sync_plan':
result = ng.sync_plan(params)
elif entity == 'content_view':
if action == 'publish':
result = ng.publish(params)
elif action == 'promote':
result = ng.promote(params)
else:
result = ng.content_view(params)
elif entity == 'lifecycle_environment':
result = ng.lifecycle_environment(params)
elif entity == 'activation_key':
result = ng.activation_key(params)
else:
module.fail_json(changed=False, result="Unsupported entity supplied")
module.exit_json(changed=result, result="%s updated" % entity)
if __name__ == '__main__':
main()
|
carmark/vbox | refs/heads/master | src/VBox/GuestHost/OpenGL/util/debug_opcodes.py | 22 | # Copyright (c) 2001, Stanford University
# All rights reserved.
#
# See the file LICENSE.txt for information on redistributing this software.
import sys;
import cPickle;
import string;
import re;
import apiutil
apiutil.CopyrightC()
print """
#include "cr_debugopcodes.h"
#include <stdio.h>
"""
print """void crDebugOpcodes( FILE *fp, unsigned char *ptr, unsigned int num_opcodes )
{
\tunsigned int i;
\tfor (i = 0 ; i < num_opcodes ; i++)
\t{
\t\tswitch(*(ptr--))
\t\t{
"""
keys = apiutil.GetDispatchedFunctions(sys.argv[1]+"/APIspec.txt")
keys.sort()
for func_name in keys:
if "pack" in apiutil.ChromiumProps(func_name):
print '\t\tcase %s:' % apiutil.OpcodeName( func_name )
print '\t\t\tfprintf( fp, "%s\\n" ); ' % apiutil.OpcodeName( func_name )
print '\t\t\tbreak;'
print """
\t\t}
\t}
}
"""
|
nitely/http-lazy-headers | refs/heads/master | http_lazy_headers/shared/common/__init__.py | 803 | # -*- coding: utf-8 -*-
|
a-x-/httpie | refs/heads/master | httpie/compat.py | 46 | """
Python 2.6, 2.7, and 3.x compatibility.
"""
import sys
is_py2 = sys.version_info[0] == 2
is_py26 = sys.version_info[:2] == (2, 6)
is_py27 = sys.version_info[:2] == (2, 7)
is_py3 = sys.version_info[0] == 3
is_pypy = 'pypy' in sys.version.lower()
is_windows = 'win32' in str(sys.platform).lower()
if is_py2:
bytes = str
str = unicode
elif is_py3:
str = str
bytes = bytes
try: # pragma: no cover
# noinspection PyUnresolvedReferences,PyCompatibility
from urllib.parse import urlsplit
except ImportError: # pragma: no cover
# noinspection PyUnresolvedReferences,PyCompatibility
from urlparse import urlsplit
try: # pragma: no cover
# noinspection PyCompatibility
from urllib.request import urlopen
except ImportError: # pragma: no cover
# noinspection PyCompatibility
from urllib2 import urlopen
try: # pragma: no cover
from collections import OrderedDict
except ImportError: # pragma: no cover
# Python 2.6 OrderedDict class, needed for headers, parameters, etc .###
# <https://pypi.python.org/pypi/ordereddict/1.1>
# noinspection PyCompatibility
from UserDict import DictMixin
# noinspection PyShadowingBuiltins
class OrderedDict(dict, DictMixin):
# Copyright (c) 2009 Raymond Hettinger
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
# noinspection PyMissingConstructor
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d'
% len(args))
try:
self.__end
except AttributeError:
self.clear()
self.update(*args, **kwds)
def clear(self):
self.__end = end = []
# noinspection PyUnusedLocal
end += [None, end, end] # sentinel node for doubly linked list
self.__map = {} # key --> [key, prev, next]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
end = self.__end
curr = end[1]
curr[2] = end[1] = self.__map[key] = [key, curr, end]
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
key, prev, next = self.__map.pop(key)
prev[2] = next
next[1] = prev
def __iter__(self):
end = self.__end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.__end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
def popitem(self, last=True):
if not self:
raise KeyError('dictionary is empty')
if last:
key = reversed(self).next()
else:
key = iter(self).next()
value = self.pop(key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
tmp = self.__map, self.__end
del self.__map, self.__end
inst_dict = vars(self).copy()
self.__map, self.__end = tmp
if inst_dict:
return self.__class__, (items,), inst_dict
return self.__class__, (items,)
def keys(self):
return list(self)
setdefault = DictMixin.setdefault
update = DictMixin.update
pop = DictMixin.pop
values = DictMixin.values
items = DictMixin.items
iterkeys = DictMixin.iterkeys
itervalues = DictMixin.itervalues
iteritems = DictMixin.iteritems
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
def copy(self):
return self.__class__(self)
# noinspection PyMethodOverriding
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
if isinstance(other, OrderedDict):
if len(self) != len(other):
return False
for p, q in zip(self.items(), other.items()):
if p != q:
return False
return True
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
|
sungkim11/mhargadh | refs/heads/master | django/contrib/gis/gdal/tests/test_geom.py | 154 | from django.contrib.gis.gdal import OGRGeometry, OGRGeomType, \
OGRException, OGRIndexError, SpatialReference, CoordTransform, \
GDAL_VERSION
from django.utils import unittest
from django.contrib.gis.geometry.test_data import TestDataMixin
class OGRGeomTest(unittest.TestCase, TestDataMixin):
"This tests the OGR Geometry."
def test00a_geomtype(self):
"Testing OGRGeomType object."
# OGRGeomType should initialize on all these inputs.
try:
g = OGRGeomType(1)
g = OGRGeomType(7)
g = OGRGeomType('point')
g = OGRGeomType('GeometrycollectioN')
g = OGRGeomType('LINearrING')
g = OGRGeomType('Unknown')
except:
self.fail('Could not create an OGRGeomType object!')
# Should throw TypeError on this input
self.assertRaises(OGRException, OGRGeomType, 23)
self.assertRaises(OGRException, OGRGeomType, 'fooD')
self.assertRaises(OGRException, OGRGeomType, 9)
# Equivalence can take strings, ints, and other OGRGeomTypes
self.assertEqual(True, OGRGeomType(1) == OGRGeomType(1))
self.assertEqual(True, OGRGeomType(7) == 'GeometryCollection')
self.assertEqual(True, OGRGeomType('point') == 'POINT')
self.assertEqual(False, OGRGeomType('point') == 2)
self.assertEqual(True, OGRGeomType('unknown') == 0)
self.assertEqual(True, OGRGeomType(6) == 'MULtiPolyGON')
self.assertEqual(False, OGRGeomType(1) != OGRGeomType('point'))
self.assertEqual(True, OGRGeomType('POINT') != OGRGeomType(6))
# Testing the Django field name equivalent property.
self.assertEqual('PointField', OGRGeomType('Point').django)
self.assertEqual('GeometryField', OGRGeomType('Unknown').django)
self.assertEqual(None, OGRGeomType('none').django)
# 'Geometry' initialization implies an unknown geometry type.
gt = OGRGeomType('Geometry')
self.assertEqual(0, gt.num)
self.assertEqual('Unknown', gt.name)
def test00b_geomtype_25d(self):
"Testing OGRGeomType object with 25D types."
wkb25bit = OGRGeomType.wkb25bit
self.assertTrue(OGRGeomType(wkb25bit + 1) == 'Point25D')
self.assertTrue(OGRGeomType('MultiLineString25D') == (5 + wkb25bit))
self.assertEqual('GeometryCollectionField', OGRGeomType('GeometryCollection25D').django)
def test01a_wkt(self):
"Testing WKT output."
for g in self.geometries.wkt_out:
geom = OGRGeometry(g.wkt)
self.assertEqual(g.wkt, geom.wkt)
def test01a_ewkt(self):
"Testing EWKT input/output."
for ewkt_val in ('POINT (1 2 3)', 'LINEARRING (0 0,1 1,2 1,0 0)'):
# First with ewkt output when no SRID in EWKT
self.assertEqual(ewkt_val, OGRGeometry(ewkt_val).ewkt)
# No test consumption with an SRID specified.
ewkt_val = 'SRID=4326;%s' % ewkt_val
geom = OGRGeometry(ewkt_val)
self.assertEqual(ewkt_val, geom.ewkt)
self.assertEqual(4326, geom.srs.srid)
def test01b_gml(self):
"Testing GML output."
for g in self.geometries.wkt_out:
geom = OGRGeometry(g.wkt)
exp_gml = g.gml
if GDAL_VERSION >= (1, 8):
# In GDAL 1.8, the non-conformant GML tag <gml:GeometryCollection> was
# replaced with <gml:MultiGeometry>.
exp_gml = exp_gml.replace('GeometryCollection', 'MultiGeometry')
self.assertEqual(exp_gml, geom.gml)
def test01c_hex(self):
"Testing HEX input/output."
for g in self.geometries.hex_wkt:
geom1 = OGRGeometry(g.wkt)
self.assertEqual(g.hex, geom1.hex)
# Constructing w/HEX
geom2 = OGRGeometry(g.hex)
self.assertEqual(geom1, geom2)
def test01d_wkb(self):
"Testing WKB input/output."
from binascii import b2a_hex
for g in self.geometries.hex_wkt:
geom1 = OGRGeometry(g.wkt)
wkb = geom1.wkb
self.assertEqual(b2a_hex(wkb).upper(), g.hex)
# Constructing w/WKB.
geom2 = OGRGeometry(wkb)
self.assertEqual(geom1, geom2)
def test01e_json(self):
"Testing GeoJSON input/output."
from django.contrib.gis.gdal.prototypes.geom import GEOJSON
if not GEOJSON: return
for g in self.geometries.json_geoms:
geom = OGRGeometry(g.wkt)
if not hasattr(g, 'not_equal'):
self.assertEqual(g.json, geom.json)
self.assertEqual(g.json, geom.geojson)
self.assertEqual(OGRGeometry(g.wkt), OGRGeometry(geom.json))
def test02_points(self):
"Testing Point objects."
prev = OGRGeometry('POINT(0 0)')
for p in self.geometries.points:
if not hasattr(p, 'z'): # No 3D
pnt = OGRGeometry(p.wkt)
self.assertEqual(1, pnt.geom_type)
self.assertEqual('POINT', pnt.geom_name)
self.assertEqual(p.x, pnt.x)
self.assertEqual(p.y, pnt.y)
self.assertEqual((p.x, p.y), pnt.tuple)
def test03_multipoints(self):
"Testing MultiPoint objects."
for mp in self.geometries.multipoints:
mgeom1 = OGRGeometry(mp.wkt) # First one from WKT
self.assertEqual(4, mgeom1.geom_type)
self.assertEqual('MULTIPOINT', mgeom1.geom_name)
mgeom2 = OGRGeometry('MULTIPOINT') # Creating empty multipoint
mgeom3 = OGRGeometry('MULTIPOINT')
for g in mgeom1:
mgeom2.add(g) # adding each point from the multipoints
mgeom3.add(g.wkt) # should take WKT as well
self.assertEqual(mgeom1, mgeom2) # they should equal
self.assertEqual(mgeom1, mgeom3)
self.assertEqual(mp.coords, mgeom2.coords)
self.assertEqual(mp.n_p, mgeom2.point_count)
def test04_linestring(self):
"Testing LineString objects."
prev = OGRGeometry('POINT(0 0)')
for ls in self.geometries.linestrings:
linestr = OGRGeometry(ls.wkt)
self.assertEqual(2, linestr.geom_type)
self.assertEqual('LINESTRING', linestr.geom_name)
self.assertEqual(ls.n_p, linestr.point_count)
self.assertEqual(ls.coords, linestr.tuple)
self.assertEqual(True, linestr == OGRGeometry(ls.wkt))
self.assertEqual(True, linestr != prev)
self.assertRaises(OGRIndexError, linestr.__getitem__, len(linestr))
prev = linestr
# Testing the x, y properties.
x = [tmpx for tmpx, tmpy in ls.coords]
y = [tmpy for tmpx, tmpy in ls.coords]
self.assertEqual(x, linestr.x)
self.assertEqual(y, linestr.y)
def test05_multilinestring(self):
"Testing MultiLineString objects."
prev = OGRGeometry('POINT(0 0)')
for mls in self.geometries.multilinestrings:
mlinestr = OGRGeometry(mls.wkt)
self.assertEqual(5, mlinestr.geom_type)
self.assertEqual('MULTILINESTRING', mlinestr.geom_name)
self.assertEqual(mls.n_p, mlinestr.point_count)
self.assertEqual(mls.coords, mlinestr.tuple)
self.assertEqual(True, mlinestr == OGRGeometry(mls.wkt))
self.assertEqual(True, mlinestr != prev)
prev = mlinestr
for ls in mlinestr:
self.assertEqual(2, ls.geom_type)
self.assertEqual('LINESTRING', ls.geom_name)
self.assertRaises(OGRIndexError, mlinestr.__getitem__, len(mlinestr))
def test06_linearring(self):
"Testing LinearRing objects."
prev = OGRGeometry('POINT(0 0)')
for rr in self.geometries.linearrings:
lr = OGRGeometry(rr.wkt)
#self.assertEqual(101, lr.geom_type.num)
self.assertEqual('LINEARRING', lr.geom_name)
self.assertEqual(rr.n_p, len(lr))
self.assertEqual(True, lr == OGRGeometry(rr.wkt))
self.assertEqual(True, lr != prev)
prev = lr
def test07a_polygons(self):
"Testing Polygon objects."
# Testing `from_bbox` class method
bbox = (-180,-90,180,90)
p = OGRGeometry.from_bbox( bbox )
self.assertEqual(bbox, p.extent)
prev = OGRGeometry('POINT(0 0)')
for p in self.geometries.polygons:
poly = OGRGeometry(p.wkt)
self.assertEqual(3, poly.geom_type)
self.assertEqual('POLYGON', poly.geom_name)
self.assertEqual(p.n_p, poly.point_count)
self.assertEqual(p.n_i + 1, len(poly))
# Testing area & centroid.
self.assertAlmostEqual(p.area, poly.area, 9)
x, y = poly.centroid.tuple
self.assertAlmostEqual(p.centroid[0], x, 9)
self.assertAlmostEqual(p.centroid[1], y, 9)
# Testing equivalence
self.assertEqual(True, poly == OGRGeometry(p.wkt))
self.assertEqual(True, poly != prev)
if p.ext_ring_cs:
ring = poly[0]
self.assertEqual(p.ext_ring_cs, ring.tuple)
self.assertEqual(p.ext_ring_cs, poly[0].tuple)
self.assertEqual(len(p.ext_ring_cs), ring.point_count)
for r in poly:
self.assertEqual('LINEARRING', r.geom_name)
def test07b_closepolygons(self):
"Testing closing Polygon objects."
# Both rings in this geometry are not closed.
poly = OGRGeometry('POLYGON((0 0, 5 0, 5 5, 0 5), (1 1, 2 1, 2 2, 2 1))')
self.assertEqual(8, poly.point_count)
print "\nBEGIN - expecting IllegalArgumentException; safe to ignore.\n"
try:
c = poly.centroid
except OGRException:
# Should raise an OGR exception, rings are not closed
pass
else:
self.fail('Should have raised an OGRException!')
print "\nEND - expecting IllegalArgumentException; safe to ignore.\n"
# Closing the rings -- doesn't work on GDAL versions 1.4.1 and below:
# http://trac.osgeo.org/gdal/ticket/1673
if GDAL_VERSION <= (1, 4, 1): return
poly.close_rings()
self.assertEqual(10, poly.point_count) # Two closing points should've been added
self.assertEqual(OGRGeometry('POINT(2.5 2.5)'), poly.centroid)
def test08_multipolygons(self):
"Testing MultiPolygon objects."
prev = OGRGeometry('POINT(0 0)')
for mp in self.geometries.multipolygons:
mpoly = OGRGeometry(mp.wkt)
self.assertEqual(6, mpoly.geom_type)
self.assertEqual('MULTIPOLYGON', mpoly.geom_name)
if mp.valid:
self.assertEqual(mp.n_p, mpoly.point_count)
self.assertEqual(mp.num_geom, len(mpoly))
self.assertRaises(OGRIndexError, mpoly.__getitem__, len(mpoly))
for p in mpoly:
self.assertEqual('POLYGON', p.geom_name)
self.assertEqual(3, p.geom_type)
self.assertEqual(mpoly.wkt, OGRGeometry(mp.wkt).wkt)
def test09a_srs(self):
"Testing OGR Geometries with Spatial Reference objects."
for mp in self.geometries.multipolygons:
# Creating a geometry w/spatial reference
sr = SpatialReference('WGS84')
mpoly = OGRGeometry(mp.wkt, sr)
self.assertEqual(sr.wkt, mpoly.srs.wkt)
# Ensuring that SRS is propagated to clones.
klone = mpoly.clone()
self.assertEqual(sr.wkt, klone.srs.wkt)
# Ensuring all children geometries (polygons and their rings) all
# return the assigned spatial reference as well.
for poly in mpoly:
self.assertEqual(sr.wkt, poly.srs.wkt)
for ring in poly:
self.assertEqual(sr.wkt, ring.srs.wkt)
# Ensuring SRS propagate in topological ops.
a = OGRGeometry(self.geometries.topology_geoms[0].wkt_a, sr)
b = OGRGeometry(self.geometries.topology_geoms[0].wkt_b, sr)
diff = a.difference(b)
union = a.union(b)
self.assertEqual(sr.wkt, diff.srs.wkt)
self.assertEqual(sr.srid, union.srs.srid)
# Instantiating w/an integer SRID
mpoly = OGRGeometry(mp.wkt, 4326)
self.assertEqual(4326, mpoly.srid)
mpoly.srs = SpatialReference(4269)
self.assertEqual(4269, mpoly.srid)
self.assertEqual('NAD83', mpoly.srs.name)
# Incrementing through the multipolyogn after the spatial reference
# has been re-assigned.
for poly in mpoly:
self.assertEqual(mpoly.srs.wkt, poly.srs.wkt)
poly.srs = 32140
for ring in poly:
# Changing each ring in the polygon
self.assertEqual(32140, ring.srs.srid)
self.assertEqual('NAD83 / Texas South Central', ring.srs.name)
ring.srs = str(SpatialReference(4326)) # back to WGS84
self.assertEqual(4326, ring.srs.srid)
# Using the `srid` property.
ring.srid = 4322
self.assertEqual('WGS 72', ring.srs.name)
self.assertEqual(4322, ring.srid)
def test09b_srs_transform(self):
"Testing transform()."
orig = OGRGeometry('POINT (-104.609 38.255)', 4326)
trans = OGRGeometry('POINT (992385.4472045 481455.4944650)', 2774)
# Using an srid, a SpatialReference object, and a CoordTransform object
# or transformations.
t1, t2, t3 = orig.clone(), orig.clone(), orig.clone()
t1.transform(trans.srid)
t2.transform(SpatialReference('EPSG:2774'))
ct = CoordTransform(SpatialReference('WGS84'), SpatialReference(2774))
t3.transform(ct)
# Testing use of the `clone` keyword.
k1 = orig.clone()
k2 = k1.transform(trans.srid, clone=True)
self.assertEqual(k1, orig)
self.assertNotEqual(k1, k2)
prec = 3
for p in (t1, t2, t3, k2):
self.assertAlmostEqual(trans.x, p.x, prec)
self.assertAlmostEqual(trans.y, p.y, prec)
def test09c_transform_dim(self):
"Testing coordinate dimension is the same on transformed geometries."
ls_orig = OGRGeometry('LINESTRING(-104.609 38.255)', 4326)
ls_trans = OGRGeometry('LINESTRING(992385.4472045 481455.4944650)', 2774)
prec = 3
ls_orig.transform(ls_trans.srs)
# Making sure the coordinate dimension is still 2D.
self.assertEqual(2, ls_orig.coord_dim)
self.assertAlmostEqual(ls_trans.x[0], ls_orig.x[0], prec)
self.assertAlmostEqual(ls_trans.y[0], ls_orig.y[0], prec)
def test10_difference(self):
"Testing difference()."
for i in xrange(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
d1 = OGRGeometry(self.geometries.diff_geoms[i].wkt)
d2 = a.difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a - b) # __sub__ is difference operator
a -= b # testing __isub__
self.assertEqual(d1, a)
def test11_intersection(self):
"Testing intersects() and intersection()."
for i in xrange(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
i1 = OGRGeometry(self.geometries.intersect_geoms[i].wkt)
self.assertEqual(True, a.intersects(b))
i2 = a.intersection(b)
self.assertEqual(i1, i2)
self.assertEqual(i1, a & b) # __and__ is intersection operator
a &= b # testing __iand__
self.assertEqual(i1, a)
def test12_symdifference(self):
"Testing sym_difference()."
for i in xrange(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
d1 = OGRGeometry(self.geometries.sdiff_geoms[i].wkt)
d2 = a.sym_difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a ^ b) # __xor__ is symmetric difference operator
a ^= b # testing __ixor__
self.assertEqual(d1, a)
def test13_union(self):
"Testing union()."
for i in xrange(len(self.geometries.topology_geoms)):
a = OGRGeometry(self.geometries.topology_geoms[i].wkt_a)
b = OGRGeometry(self.geometries.topology_geoms[i].wkt_b)
u1 = OGRGeometry(self.geometries.union_geoms[i].wkt)
u2 = a.union(b)
self.assertEqual(u1, u2)
self.assertEqual(u1, a | b) # __or__ is union operator
a |= b # testing __ior__
self.assertEqual(u1, a)
def test14_add(self):
"Testing GeometryCollection.add()."
# Can't insert a Point into a MultiPolygon.
mp = OGRGeometry('MultiPolygon')
pnt = OGRGeometry('POINT(5 23)')
self.assertRaises(OGRException, mp.add, pnt)
# GeometryCollection.add may take an OGRGeometry (if another collection
# of the same type all child geoms will be added individually) or WKT.
for mp in self.geometries.multipolygons:
mpoly = OGRGeometry(mp.wkt)
mp1 = OGRGeometry('MultiPolygon')
mp2 = OGRGeometry('MultiPolygon')
mp3 = OGRGeometry('MultiPolygon')
for poly in mpoly:
mp1.add(poly) # Adding a geometry at a time
mp2.add(poly.wkt) # Adding WKT
mp3.add(mpoly) # Adding a MultiPolygon's entire contents at once.
for tmp in (mp1, mp2, mp3): self.assertEqual(mpoly, tmp)
def test15_extent(self):
"Testing `extent` property."
# The xmin, ymin, xmax, ymax of the MultiPoint should be returned.
mp = OGRGeometry('MULTIPOINT(5 23, 0 0, 10 50)')
self.assertEqual((0.0, 0.0, 10.0, 50.0), mp.extent)
# Testing on the 'real world' Polygon.
poly = OGRGeometry(self.geometries.polygons[3].wkt)
ring = poly.shell
x, y = ring.x, ring.y
xmin, ymin = min(x), min(y)
xmax, ymax = max(x), max(y)
self.assertEqual((xmin, ymin, xmax, ymax), poly.extent)
def test16_25D(self):
"Testing 2.5D geometries."
pnt_25d = OGRGeometry('POINT(1 2 3)')
self.assertEqual('Point25D', pnt_25d.geom_type.name)
self.assertEqual(3.0, pnt_25d.z)
self.assertEqual(3, pnt_25d.coord_dim)
ls_25d = OGRGeometry('LINESTRING(1 1 1,2 2 2,3 3 3)')
self.assertEqual('LineString25D', ls_25d.geom_type.name)
self.assertEqual([1.0, 2.0, 3.0], ls_25d.z)
self.assertEqual(3, ls_25d.coord_dim)
def test17_pickle(self):
"Testing pickle support."
import cPickle
g1 = OGRGeometry('LINESTRING(1 1 1,2 2 2,3 3 3)', 'WGS84')
g2 = cPickle.loads(cPickle.dumps(g1))
self.assertEqual(g1, g2)
self.assertEqual(4326, g2.srs.srid)
self.assertEqual(g1.srs.wkt, g2.srs.wkt)
def test18_ogrgeometry_transform_workaround(self):
"Testing coordinate dimensions on geometries after transformation."
# A bug in GDAL versions prior to 1.7 changes the coordinate
# dimension of a geometry after it has been transformed.
# This test ensures that the bug workarounds employed within
# `OGRGeometry.transform` indeed work.
wkt_2d = "MULTILINESTRING ((0 0,1 1,2 2))"
wkt_3d = "MULTILINESTRING ((0 0 0,1 1 1,2 2 2))"
srid = 4326
# For both the 2D and 3D MultiLineString, ensure _both_ the dimension
# of the collection and the component LineString have the expected
# coordinate dimension after transform.
geom = OGRGeometry(wkt_2d, srid)
geom.transform(srid)
self.assertEqual(2, geom.coord_dim)
self.assertEqual(2, geom[0].coord_dim)
self.assertEqual(wkt_2d, geom.wkt)
geom = OGRGeometry(wkt_3d, srid)
geom.transform(srid)
self.assertEqual(3, geom.coord_dim)
self.assertEqual(3, geom[0].coord_dim)
self.assertEqual(wkt_3d, geom.wkt)
def test19_equivalence_regression(self):
"Testing equivalence methods with non-OGRGeometry instances."
self.assertNotEqual(None, OGRGeometry('POINT(0 0)'))
self.assertEqual(False, OGRGeometry('LINESTRING(0 0, 1 1)') == 3)
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(OGRGeomTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
|
PaulAYoung/f2014_iolab | refs/heads/master | pymongoProject/venv/lib/python2.7/site-packages/flask/testsuite/test_apps/moduleapp/apps/admin/__init__.py | 629 | from flask import Module, render_template
admin = Module(__name__, url_prefix='/admin')
@admin.route('/')
def index():
return render_template('admin/index.html')
@admin.route('/index2')
def index2():
return render_template('./admin/index.html')
|
vladmm/intellij-community | refs/heads/master | python/testData/inspections/PyProtectedMemberInspection/truePositive.py | 75 | __author__ = 'ktisha'
class A:
def __init__(self):
self._a = 1
def foo(self):
self.b= 1
print <weak_warning descr="Access to a protected member _a of a class">A()._a</weak_warning> |
zhouyao1994/incubator-superset | refs/heads/master | superset/connectors/sqla/__init__.py | 7 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from . import models, views
|
Malthus/script.revolve.helper | refs/heads/master | resources/populatesubmenufromskinvariables.py | 1 | # * Function: Revolve/PopulateSubmenuFromSkinVariables
import sys
import xbmc
import baselibrary
import xbmclibrary
FUNCTIONNAME = 'Revolve/PopulateSubmenuFromSkinVariables'
DEFAULTTARGETMASK = 'MySubmenu%02dOption'
DEFAULTTARGETWINDOW = '0'
TOTALITEMS = 20
def copyProperties(sourcemask, targetmask, targetwindow):
for index in range (1, TOTALITEMS + 1):
sourcebase = sourcemask % (index)
targetbase = targetmask % (index)
xbmclibrary.copySkinSettingToProperty(sourcebase + '.Type', targetbase + '.Type', targetwindow)
xbmclibrary.copyBooleanSkinSettingToProperty(sourcebase + '.Active', targetbase + '.Active', targetwindow)
xbmclibrary.copySkinSettingToProperty(sourcebase + '.Name', targetbase + '.Name', targetwindow)
xbmclibrary.copySkinSettingToProperty(sourcebase + '.Subtitle', targetbase + '.Subtitle', targetwindow)
xbmclibrary.copySkinSettingToProperty(sourcebase + '.BackgroundImage', targetbase + '.BackgroundImage', targetwindow)
xbmclibrary.copySkinSettingToProperty(sourcebase + '.MenuTitle', targetbase + '.MenuTitle', targetwindow)
xbmclibrary.copySkinSettingToProperty(sourcebase + '.SourceInfo', targetbase + '.SourceInfo', targetwindow)
xbmclibrary.copySkinSettingToProperty(sourcebase + '.Window', targetbase + '.Window', targetwindow)
xbmclibrary.copySkinSettingToProperty(sourcebase + '.Action', targetbase + '.Action', targetwindow)
def execute(arguments):
if len(arguments) > 2:
sourcemask = arguments[2]
targetmask = baselibrary.extractArgument(arguments, 3, DEFAULTTARGETMASK)
targetwindow = baselibrary.extractArgument(arguments, 4, DEFAULTTARGETWINDOW)
copyProperties(sourcemask, targetmask, targetwindow)
else:
xbmclibrary.writeErrorMessage(FUNCTIONNAME, FUNCTIONNAME + ' terminates: Missing argument(s) in call to script.')
|
yongxin1029/pywinauto | refs/heads/master | examples/notepad_item.py | 19 | # GUI Application automation and testing library
# Copyright (C) 2006 Mark Mc Mahon
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place,
# Suite 330,
# Boston, MA 02111-1307 USA
"Run some automations to test things"
__revision__ = "$Revision: 214 $"
import time
from pywinauto import application
from pywinauto import tests
from pywinauto.findbestmatch import MatchError
from pywinauto import findwindows
#application.set_timing(3, .5, 10, .5, .4, .2, .2, .1, .2, .5)
"Run a quick test on Notepad"
app = application.Application()
app.start_(ur"notepad.exe")
app['Notepad'].Wait('ready')
app['Notepad'].MenuSelect("File->PageSetup")
# ----- Page Setup Dialog ----
# Select the 4th combobox item
app['PageSetupDlg']['ComboBox1'].Select(4)
# Select the 'Letter' combobox item
app['PageSetupDlg']['ComboBox1'].Select("Letter")
# ----- Next Page Setup Dialog ----
app['PageSetupDlg']['Printer'].Click()
app['PageSetupDlg']['Network'].Click()
# ----- Connect To Printer Dialog ----
# Select a checkbox
app['ConnectToPrinter']['ExpandByDef'].Check()
# Uncheck it again - but use Click this time!
app['ConnectToPrinter']['ExpandByDef'].Click()
app['ConnectToPrinter']['OK'].CloseClick()
# ----- 2nd Page Setup Dialog again ----
app['PageSetupDlg2']['Properties'].Click()
# ----- Document Properties Dialog ----
doc_props = app.window_(title_re = ".*Document Properties")
# Two ways of selecting tabs
doc_props['TabCtrl'].Select(2)
doc_props['TabCtrl'].Select("Layout")
# click a Radio button
doc_props['RotatedLandscape'].Click()
doc_props['Portrait'].Click()
# open the Advanced options dialog in two steps
advbutton = doc_props['Advanced']
advbutton.Click()
# ----- Advanced Options Dialog ----
# close the 4 windows
app.window_(title_re = ".* Advanced Options")['Ok'].Click()
# ----- Document Properties Dialog again ----
doc_props['Cancel'].CloseClick()
# ----- 2nd Page Setup Dialog again ----
app['PageSetup2']['OK'].CloseClick()
# ----- Page Setup Dialog ----
app['PageSetup']['Ok'].CloseClick()
# type some text
app['Notepad']['Edit'].SetEditText(u"I am typing s\xe4me text to Notepad"
"\r\n\r\nAnd then I am going to quit")
# exit notepad
app['NotepadDialog'].MenuSelect("File->Exit")
app['Notepad']['No'].CloseClick()
|
JetBrains/intellij-community | refs/heads/master | python/testData/formatter/blankLineBeforeFunction_after.py | 79 | class C:
x = 1
def foo(self): pass
|
credativUK/OCB | refs/heads/7.0-local | addons/sale/wizard/sale_line_invoice.py | 14 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from openerp.tools.translate import _
from openerp import netsvc
class sale_order_line_make_invoice(osv.osv_memory):
_name = "sale.order.line.make.invoice"
_description = "Sale OrderLine Make_invoice"
def make_invoices(self, cr, uid, ids, context=None):
"""
To make invoices.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: the ID or list of IDs
@param context: A standard dictionary
@return: A dictionary which of fields with values.
"""
if context is None: context = {}
res = False
invoices = {}
#TODO: merge with sale.py/make_invoice
def make_invoice(order, lines):
"""
To make invoices.
@param order:
@param lines:
@return:
"""
a = order.partner_id.property_account_receivable.id
if order.partner_id and order.partner_id.property_payment_term.id:
pay_term = order.partner_id.property_payment_term.id
else:
pay_term = False
inv = {
'name': order.client_order_ref or '',
'origin': order.name,
'type': 'out_invoice',
'reference': "P%dSO%d" % (order.partner_id.id, order.id),
'account_id': a,
'partner_id': order.partner_invoice_id.id,
'invoice_line': [(6, 0, lines)],
'currency_id' : order.pricelist_id.currency_id.id,
'comment': order.note,
'payment_term': pay_term,
'fiscal_position': order.fiscal_position.id or order.partner_id.property_account_position.id,
'user_id': order.user_id and order.user_id.id or False,
'company_id': order.company_id and order.company_id.id or False,
'date_invoice': fields.date.today(),
}
inv_id = self.pool.get('account.invoice').create(cr, uid, inv)
return inv_id
sales_order_line_obj = self.pool.get('sale.order.line')
sales_order_obj = self.pool.get('sale.order')
wf_service = netsvc.LocalService('workflow')
for line in sales_order_line_obj.browse(cr, uid, context.get('active_ids', []), context=context):
if (not line.invoiced) and (line.state not in ('draft', 'cancel')):
if not line.order_id in invoices:
invoices[line.order_id] = []
line_id = sales_order_line_obj.invoice_line_create(cr, uid, [line.id])
for lid in line_id:
invoices[line.order_id].append(lid)
for order, il in invoices.items():
res = make_invoice(order, il)
cr.execute('INSERT INTO sale_order_invoice_rel \
(order_id,invoice_id) values (%s,%s)', (order.id, res))
flag = True
data_sale = sales_order_obj.browse(cr, uid, order.id, context=context)
for line in data_sale.order_line:
if not line.invoiced:
flag = False
break
if flag:
wf_service.trg_validate(uid, 'sale.order', order.id, 'manual_invoice', cr)
if not invoices:
raise osv.except_osv(_('Warning!'), _('Invoice cannot be created for this Sales Order Line due to one of the following reasons:\n1.The state of this sales order line is either "draft" or "cancel"!\n2.The Sales Order Line is Invoiced!'))
if context.get('open_invoices', False):
return self.open_invoices(cr, uid, ids, res, context=context)
return {'type': 'ir.actions.act_window_close'}
def open_invoices(self, cr, uid, ids, invoice_ids, context=None):
""" open a view on one of the given invoice_ids """
ir_model_data = self.pool.get('ir.model.data')
form_res = ir_model_data.get_object_reference(cr, uid, 'account', 'invoice_form')
form_id = form_res and form_res[1] or False
tree_res = ir_model_data.get_object_reference(cr, uid, 'account', 'invoice_tree')
tree_id = tree_res and tree_res[1] or False
return {
'name': _('Invoice'),
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'account.invoice',
'res_id': invoice_ids,
'view_id': False,
'views': [(form_id, 'form'), (tree_id, 'tree')],
'context': {'type': 'out_invoice'},
'type': 'ir.actions.act_window',
}
sale_order_line_make_invoice()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.